├── .idea
├── Face_add_Mask.iml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
└── modules.xml
├── 222.jpg
├── Data
├── images
│ ├── FMA-3D.jpg
│ └── mask-sample.jpg
├── mask-data
│ ├── 0.png
│ ├── 1.png
│ ├── 2.png
│ ├── 3.png
│ ├── 4.png
│ ├── 5.png
│ ├── 6.png
│ └── 7.png
├── test-data
│ ├── test1.jpg
│ └── test1_landmark.txt
└── uv-data
│ ├── .ipynb_checkpoints
│ ├── face_ind-checkpoint.txt
│ ├── triangles-checkpoint.txt
│ ├── uv_face-checkpoint.png
│ ├── uv_face_eyes-checkpoint.png
│ ├── uv_face_mask-checkpoint.png
│ ├── uv_kpt_ind-checkpoint.txt
│ ├── uv_kpt_mask-checkpoint.png
│ └── uv_weight_mask-checkpoint.png
│ ├── face_ind.txt
│ ├── triangles.txt
│ └── uv_face_mask.png
├── README.md
├── __init__.py
├── __pycache__
├── face_align.cpython-36.pyc
├── face_align.cpython-38.pyc
├── face_det.cpython-36.pyc
├── face_det.cpython-38.pyc
├── face_masker.cpython-36.pyc
└── face_masker.cpython-38.pyc
├── config
├── logging.conf
└── model_conf.yaml
├── core
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ └── __init__.cpython-38.pyc
├── image_cropper
│ ├── BaseImageCropper.py
│ └── arcface_cropper
│ │ └── FaceRecImageCropper.py
├── model_handler
│ ├── BaseModelHandler.py
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── BaseModelHandler.cpython-36.pyc
│ │ ├── BaseModelHandler.cpython-38.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ └── __init__.cpython-38.pyc
│ ├── face_alignment
│ │ ├── FaceAlignModelHandler.py
│ │ └── __pycache__
│ │ │ ├── FaceAlignModelHandler.cpython-36.pyc
│ │ │ └── FaceAlignModelHandler.cpython-38.pyc
│ ├── face_detection
│ │ ├── FaceDetModelHandler.py
│ │ └── __pycache__
│ │ │ ├── FaceDetModelHandler.cpython-36.pyc
│ │ │ └── FaceDetModelHandler.cpython-38.pyc
│ └── face_recognition
│ │ └── FaceRecModelHandler.py
└── model_loader
│ ├── BaseModelLoader.py
│ ├── __init__.py
│ ├── __pycache__
│ ├── BaseModelLoader.cpython-36.pyc
│ ├── BaseModelLoader.cpython-38.pyc
│ ├── __init__.cpython-36.pyc
│ └── __init__.cpython-38.pyc
│ ├── face_alignment
│ ├── FaceAlignModelLoader.py
│ └── __pycache__
│ │ ├── FaceAlignModelLoader.cpython-36.pyc
│ │ └── FaceAlignModelLoader.cpython-38.pyc
│ ├── face_detection
│ ├── FaceDetModelLoader.py
│ └── __pycache__
│ │ ├── FaceDetModelLoader.cpython-36.pyc
│ │ └── FaceDetModelLoader.cpython-38.pyc
│ └── face_recognition
│ └── FaceRecModelLoader.py
├── face_align.py
├── face_det.py
├── face_masker.py
├── logs
└── sdk.log
├── main.py
├── models
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ ├── __init__.cpython-38.pyc
│ ├── prnet.cpython-36.pyc
│ └── prnet.cpython-38.pyc
├── face_alignment
│ └── face_alignment_1.0
│ │ ├── face_landmark_pfld.pkl
│ │ └── model_meta.json
├── face_detection
│ └── face_detection_1.0
│ │ ├── face_detection_retina.pkl
│ │ └── model_meta.json
├── network_def
│ ├── __pycache__
│ │ ├── mobilev3_pfld.cpython-36.pyc
│ │ ├── mobilev3_pfld.cpython-38.pyc
│ │ ├── retinaface_def.cpython-36.pyc
│ │ └── retinaface_def.cpython-38.pyc
│ ├── mobilefacenet_def.py
│ ├── mobilev3_pfld.py
│ └── retinaface_def.py
├── prnet.py
└── prnet_pytorch.pth
├── test1_mask1.jpg
└── utils
├── BuzException.py
├── __pycache__
├── BuzException.cpython-36.pyc
├── BuzException.cpython-38.pyc
├── read_info.cpython-36.pyc
└── read_info.cpython-38.pyc
├── lms_trans.py
├── mesh
├── .ipynb_checkpoints
│ ├── __init__-checkpoint.py
│ ├── io-checkpoint.py
│ ├── light-checkpoint.py
│ ├── render-checkpoint.py
│ ├── transform-checkpoint.py
│ └── vis-checkpoint.py
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ ├── __init__.cpython-38.pyc
│ ├── render.cpython-36.pyc
│ └── render.cpython-38.pyc
├── cython
│ ├── .ipynb_checkpoints
│ │ ├── mesh_core-checkpoint.h
│ │ └── mesh_core_cython-checkpoint.pyx
│ ├── build
│ │ └── temp.linux-x86_64-3.6
│ │ │ ├── mesh_core.o
│ │ │ └── mesh_core_cython.o
│ ├── mesh_core.cpp
│ ├── mesh_core.h
│ ├── mesh_core_cython.cpp
│ ├── mesh_core_cython.cpython-36m-x86_64-linux-gnu.so
│ ├── mesh_core_cython.pyx
│ └── setup.py
├── mesh_core_cython.cpython-36m-x86_64-linux-gnu.so
└── render.py
└── read_info.py
/.idea/Face_add_Mask.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/222.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/222.jpg
--------------------------------------------------------------------------------
/Data/images/FMA-3D.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/images/FMA-3D.jpg
--------------------------------------------------------------------------------
/Data/images/mask-sample.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/images/mask-sample.jpg
--------------------------------------------------------------------------------
/Data/mask-data/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/0.png
--------------------------------------------------------------------------------
/Data/mask-data/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/1.png
--------------------------------------------------------------------------------
/Data/mask-data/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/2.png
--------------------------------------------------------------------------------
/Data/mask-data/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/3.png
--------------------------------------------------------------------------------
/Data/mask-data/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/4.png
--------------------------------------------------------------------------------
/Data/mask-data/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/5.png
--------------------------------------------------------------------------------
/Data/mask-data/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/6.png
--------------------------------------------------------------------------------
/Data/mask-data/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/mask-data/7.png
--------------------------------------------------------------------------------
/Data/test-data/test1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/test-data/test1.jpg
--------------------------------------------------------------------------------
/Data/test-data/test1_landmark.txt:
--------------------------------------------------------------------------------
1 | 99 194 99 211 100 226 102 242 104 258 107 273 110 289 114 304 120 319 126 333 135 346 145 358 157 369 169 379 183 387 199 392 217 393 236 390 252 384 265 375 277 363 287 351 296 339 303 324 308 310 311 295 314 279 315 263 316 247 316 232 316 216 316 200 313 184 114 200 127 186 146 182 165 183 183 187 182 196 164 194 146 193 130 195 226 185 243 179 263 176 282 179 296 192 280 189 263 187 245 189 227 193 205 198 206 219 206 240 207 262 190 206 183 247 176 265 185 273 196 276 209 279 222 274 232 271 241 262 232 245 222 205 137 206 146 199 158 195 170 198 179 206 169 207 158 209 147 208 158 202 233 203 242 195 253 191 265 193 275 200 266 203 255 205 243 204 255 196 162 303 180 301 199 300 209 302 219 299 241 298 263 299 250 315 234 326 212 332 190 328 174 317 169 304 185 306 210 308 236 304 255 301 238 312 211 318 185 314 158 202 255 197
--------------------------------------------------------------------------------
/Data/uv-data/.ipynb_checkpoints/uv_face-checkpoint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/uv-data/.ipynb_checkpoints/uv_face-checkpoint.png
--------------------------------------------------------------------------------
/Data/uv-data/.ipynb_checkpoints/uv_face_eyes-checkpoint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/uv-data/.ipynb_checkpoints/uv_face_eyes-checkpoint.png
--------------------------------------------------------------------------------
/Data/uv-data/.ipynb_checkpoints/uv_face_mask-checkpoint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/uv-data/.ipynb_checkpoints/uv_face_mask-checkpoint.png
--------------------------------------------------------------------------------
/Data/uv-data/.ipynb_checkpoints/uv_kpt_ind-checkpoint.txt:
--------------------------------------------------------------------------------
1 | 1.500000000000000000e+01 2.200000000000000000e+01 2.600000000000000000e+01 3.200000000000000000e+01 4.500000000000000000e+01 6.700000000000000000e+01 9.100000000000000000e+01 1.120000000000000000e+02 1.280000000000000000e+02 1.430000000000000000e+02 1.640000000000000000e+02 1.880000000000000000e+02 2.100000000000000000e+02 2.230000000000000000e+02 2.290000000000000000e+02 2.330000000000000000e+02 2.400000000000000000e+02 5.800000000000000000e+01 7.100000000000000000e+01 8.500000000000000000e+01 9.700000000000000000e+01 1.060000000000000000e+02 1.490000000000000000e+02 1.580000000000000000e+02 1.700000000000000000e+02 1.840000000000000000e+02 1.970000000000000000e+02 1.280000000000000000e+02 1.280000000000000000e+02 1.280000000000000000e+02 1.280000000000000000e+02 1.170000000000000000e+02 1.220000000000000000e+02 1.280000000000000000e+02 1.330000000000000000e+02 1.380000000000000000e+02 7.800000000000000000e+01 8.600000000000000000e+01 9.500000000000000000e+01 1.020000000000000000e+02 9.600000000000000000e+01 8.700000000000000000e+01 1.530000000000000000e+02 1.600000000000000000e+02 1.690000000000000000e+02 1.770000000000000000e+02 1.680000000000000000e+02 1.590000000000000000e+02 1.080000000000000000e+02 1.160000000000000000e+02 1.240000000000000000e+02 1.280000000000000000e+02 1.310000000000000000e+02 1.390000000000000000e+02 1.460000000000000000e+02 1.370000000000000000e+02 1.320000000000000000e+02 1.280000000000000000e+02 1.230000000000000000e+02 1.180000000000000000e+02 1.100000000000000000e+02 1.220000000000000000e+02 1.280000000000000000e+02 1.330000000000000000e+02 1.450000000000000000e+02 1.320000000000000000e+02 1.280000000000000000e+02 1.230000000000000000e+02
2 | 9.600000000000000000e+01 1.180000000000000000e+02 1.410000000000000000e+02 1.650000000000000000e+02 1.830000000000000000e+02 1.900000000000000000e+02 1.880000000000000000e+02 1.870000000000000000e+02 1.930000000000000000e+02 1.870000000000000000e+02 1.880000000000000000e+02 1.900000000000000000e+02 1.830000000000000000e+02 1.650000000000000000e+02 1.410000000000000000e+02 1.180000000000000000e+02 9.600000000000000000e+01 4.900000000000000000e+01 4.200000000000000000e+01 3.900000000000000000e+01 4.000000000000000000e+01 4.200000000000000000e+01 4.200000000000000000e+01 4.000000000000000000e+01 3.900000000000000000e+01 4.200000000000000000e+01 4.900000000000000000e+01 5.900000000000000000e+01 7.300000000000000000e+01 8.600000000000000000e+01 9.600000000000000000e+01 1.110000000000000000e+02 1.130000000000000000e+02 1.150000000000000000e+02 1.130000000000000000e+02 1.110000000000000000e+02 6.700000000000000000e+01 6.000000000000000000e+01 6.100000000000000000e+01 6.500000000000000000e+01 6.800000000000000000e+01 6.900000000000000000e+01 6.500000000000000000e+01 6.100000000000000000e+01 6.000000000000000000e+01 6.700000000000000000e+01 6.900000000000000000e+01 6.800000000000000000e+01 1.420000000000000000e+02 1.310000000000000000e+02 1.270000000000000000e+02 1.280000000000000000e+02 1.270000000000000000e+02 1.310000000000000000e+02 1.420000000000000000e+02 1.480000000000000000e+02 1.500000000000000000e+02 1.500000000000000000e+02 1.500000000000000000e+02 1.480000000000000000e+02 1.410000000000000000e+02 1.350000000000000000e+02 1.340000000000000000e+02 1.350000000000000000e+02 1.420000000000000000e+02 1.430000000000000000e+02 1.420000000000000000e+02 1.430000000000000000e+02
3 |
--------------------------------------------------------------------------------
/Data/uv-data/.ipynb_checkpoints/uv_kpt_mask-checkpoint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/uv-data/.ipynb_checkpoints/uv_kpt_mask-checkpoint.png
--------------------------------------------------------------------------------
/Data/uv-data/.ipynb_checkpoints/uv_weight_mask-checkpoint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/uv-data/.ipynb_checkpoints/uv_weight_mask-checkpoint.png
--------------------------------------------------------------------------------
/Data/uv-data/uv_face_mask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/Data/uv-data/uv_face_mask.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Face_Mask_Add
2 | 博客讲解:https://blog.csdn.net/zengwubbb/article/details/113417485
3 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/__init__.py
--------------------------------------------------------------------------------
/__pycache__/face_align.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/__pycache__/face_align.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/face_align.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/__pycache__/face_align.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/face_det.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/__pycache__/face_det.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/face_det.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/__pycache__/face_det.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/face_masker.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/__pycache__/face_masker.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/face_masker.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/__pycache__/face_masker.cpython-38.pyc
--------------------------------------------------------------------------------
/config/logging.conf:
--------------------------------------------------------------------------------
1 | [loggers] # loggers object list
2 | keys = root, sdk, api
3 |
4 | [handlers] # handlers object list
5 | keys = consoleHandlers, fileHandlers
6 |
7 | [formatters] # formatters list
8 | keys = fmt
9 |
10 | [logger_root]
11 | level = DEBUG
12 | handlers = consoleHandlers, fileHandlers
13 |
14 | [logger_sdk] # sdk logger
15 | level = DEBUG
16 | handlers = fileHandlers
17 | qualname = sdk
18 | propagate = 0
19 |
20 | [logger_api] # api logger
21 | level = DEBUG
22 | handlers = consoleHandlers
23 | qualname = api
24 | propagate = 0
25 |
26 | [handler_consoleHandlers]# consoleHandlers.
27 | class = StreamHandler
28 | level = DEBUG
29 | formatter = fmt
30 | args = (sys.stdout,)
31 |
32 | [handler_fileHandlers]]# fileHandlers
33 | class = logging.handlers.RotatingFileHandler
34 | level = DEBUG
35 | formatter = fmt
36 | args = ('logs/sdk.log', 'a', 10000, 3, 'UTF-8')
37 |
38 | [formatter_fmt] # fmt format
39 | format = %(levelname)s %(asctime)s %(filename)s: %(lineno)d] %(message)s
40 | datefmt = %Y-%m-%d %H:%M:%S
--------------------------------------------------------------------------------
/config/model_conf.yaml:
--------------------------------------------------------------------------------
1 | non-mask:
2 | face_detection: face_detection_1.0
3 | face_alignment: face_alignment_1.0
4 | face_recognition: face_recognition_1.0
5 | mask:
6 | face_detection: face_detection_2.0
7 | face_alignment: face_alignment_2.0
8 | face_recognition: face_recognition_2.0
9 |
--------------------------------------------------------------------------------
/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/__init__.py
--------------------------------------------------------------------------------
/core/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/core/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/core/image_cropper/BaseImageCropper.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | from abc import ABCMeta, abstractmethod
7 |
8 | class BaseImageCropper(metaclass=ABCMeta):
9 | """Base class for all model loader.
10 | All image alignment classes need to inherit this base class.
11 | """
12 | def __init__(self):
13 | pass
14 |
15 | @abstractmethod
16 | def crop_image_by_mat(self, image, landmarks):
17 | """Should be overridden by all subclasses.
18 | Used for online image cropping, input the original Mat,
19 | and return the Mat obtained from the image cropping.
20 | """
21 | pass
22 |
--------------------------------------------------------------------------------
/core/image_cropper/arcface_cropper/FaceRecImageCropper.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | # based on:
7 | # https://github.com/deepinsight/insightface/blob/master/recognition/common/face_align.py
8 |
9 | import os
10 | import cv2
11 | import numpy as np
12 | from skimage import transform as trans
13 |
14 | from core.image_cropper.BaseImageCropper import BaseImageCropper
15 | from utils.lms_trans import lms106_2_lms5, lms25_2_lms5
16 |
17 | src1 = np.array([
18 | [51.642,50.115],
19 | [57.617,49.990],
20 | [35.740,69.007],
21 | [51.157,89.050],
22 | [57.025,89.702]], dtype=np.float32)
23 | #<--left
24 | src2 = np.array([
25 | [45.031,50.118],
26 | [65.568,50.872],
27 | [39.677,68.111],
28 | [45.177,86.190],
29 | [64.246,86.758]], dtype=np.float32)
30 |
31 | #---frontal
32 | src3 = np.array([
33 | [39.730,51.138],
34 | [72.270,51.138],
35 | [56.000,68.493],
36 | [42.463,87.010],
37 | [69.537,87.010]], dtype=np.float32)
38 |
39 | #-->right
40 | src4 = np.array([
41 | [46.845,50.872],
42 | [67.382,50.118],
43 | [72.737,68.111],
44 | [48.167,86.758],
45 | [67.236,86.190]], dtype=np.float32)
46 |
47 | #-->right profile
48 | src5 = np.array([
49 | [54.796,49.990],
50 | [60.771,50.115],
51 | [76.673,69.007],
52 | [55.388,89.702],
53 | [61.257,89.050]], dtype=np.float32)
54 |
55 | src = np.array([src1,src2,src3,src4,src5])
56 | src_map = {112 : src, 224 : src*2}
57 |
58 | arcface_src = np.array([
59 | [38.2946, 51.6963],
60 | [73.5318, 51.5014],
61 | [56.0252, 71.7366],
62 | [41.5493, 92.3655],
63 | [70.7299, 92.2041] ], dtype=np.float32 )
64 |
65 | arcface_src = np.expand_dims(arcface_src, axis=0)
66 |
67 | # In[66]:
68 |
69 | # lmk is prediction; src is template
70 | def estimate_norm(lmk, image_size = 112, mode='arcface'):
71 | assert lmk.shape==(5,2)
72 | tform = trans.SimilarityTransform()
73 | lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
74 | min_M = []
75 | min_index = []
76 | min_error = float('inf')
77 | if mode=='arcface':
78 | assert image_size==112
79 | src = arcface_src
80 | else:
81 | src = src_map[image_size]
82 | for i in np.arange(src.shape[0]):
83 | tform.estimate(lmk, src[i])
84 | M = tform.params[0:2,:]
85 | results = np.dot(M, lmk_tran.T)
86 | results = results.T
87 | error = np.sum(np.sqrt(np.sum((results - src[i]) ** 2,axis=1)))
88 | # print(error)
89 | if error< min_error:
90 | min_error = error
91 | min_M = M
92 | min_index = i
93 | return min_M, min_index
94 |
95 | def norm_crop(img, landmark, image_size=112, mode='arcface'):
96 | M, pose_index = estimate_norm(landmark, image_size, mode)
97 | warped = cv2.warpAffine(img,M, (image_size, image_size), borderValue = 0.0)
98 | return warped
99 |
100 | # my class warpper
101 | class FaceRecImageCropper(BaseImageCropper):
102 | """Implementation of image cropper
103 |
104 | Attributes:
105 | image: the input image.
106 | landmarks: using landmarks information to crop.
107 | """
108 | def __init__(self):
109 | super().__init__()
110 |
111 | def crop_image_by_mat(self, image, landmarks):
112 | if len(landmarks) == 106 * 2:
113 | landmarks = lms106_2_lms5(landmarks)
114 | if len(landmarks) == 25 * 2:
115 | landmarks = lms25_2_lms5(landmarks)
116 | assert(len(landmarks) == 5 * 2)
117 | landmarks = np.array(landmarks)
118 | height, width, channel = image.shape
119 | if channel != 3:
120 | print('Error input.')
121 | landmarks = landmarks.reshape((5,2))
122 | cropped_image = norm_crop(image, landmarks)
123 | return cropped_image
124 |
--------------------------------------------------------------------------------
/core/model_handler/BaseModelHandler.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | from abc import ABCMeta, abstractmethod
7 | import torch
8 |
9 | class BaseModelHandler(metaclass=ABCMeta):
10 | """Base class for all neural network models.
11 | All the model loaders need to inherit this base class,
12 | and each new model needs to implement the "inference_on_image" method
13 | """
14 | def __init__(self, model, device, cfg):
15 | """
16 | Generate the model by loading the configuration file.
17 | #######:param cfg: Cfg Node
18 | """
19 | self.model = model
20 | self.model.eval()
21 | self.cfg = cfg
22 | self.device = torch.device(device)
23 |
24 | @abstractmethod
25 | def inference_on_image(self, image):
26 | pass
27 |
28 | def _preprocess(self, image):
29 | pass
30 |
31 | def _postprocess(self, output):
32 | pass
33 |
--------------------------------------------------------------------------------
/core/model_handler/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/__init__.py
--------------------------------------------------------------------------------
/core/model_handler/__pycache__/BaseModelHandler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/__pycache__/BaseModelHandler.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_handler/__pycache__/BaseModelHandler.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/__pycache__/BaseModelHandler.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_handler/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_handler/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_handler/face_alignment/FaceAlignModelHandler.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201023
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | import logging.config
7 | logging.config.fileConfig("./config/logging.conf")
8 | logger = logging.getLogger('sdk')
9 |
10 | import cv2
11 | import torch
12 | import numpy as np
13 | import torch.backends.cudnn as cudnn
14 |
15 | from core.model_handler.BaseModelHandler import BaseModelHandler
16 | from utils.BuzException import *
17 | from torchvision import transforms
18 |
19 | class FaceAlignModelHandler(BaseModelHandler):
20 | """Implementation of face landmark model handler
21 |
22 | Attributes:
23 | model: the face landmark model.
24 | device: use cpu or gpu to process.
25 | cfg(dict): testing config, inherit from the parent class.
26 | """
27 | def __init__(self, model, device, cfg):
28 | """
29 | Init FaceLmsModelHandler settings.
30 | """
31 | super().__init__(model, device, cfg)
32 | self.img_size = self.cfg['img_size']
33 |
34 | def inference_on_image(self, image, dets):
35 | """Get the inference of the image and process the inference result.
36 |
37 | Returns:
38 | A numpy array, the landmarks prediction based on the shape of original image, shape: (106, 2),
39 | """
40 | cudnn.benchmark = True
41 | try:
42 | image_pre = self._preprocess(image, dets)
43 | except Exception as e:
44 | raise e
45 | self.model = self.model.to(self.device)
46 | image_pre = image_pre.unsqueeze(0)
47 | with torch.no_grad():
48 | image_pre = image_pre.to(self.device)
49 | _, landmarks_normal = self.model(image_pre)
50 | landmarks = self._postprocess(landmarks_normal)
51 | return landmarks
52 |
53 | #Adapted from https://github.com/Hsintao/pfld_106_face_landmarks/blob/master/data/prepare.py
54 | def _preprocess(self, image, det):
55 | """Preprocess the input image, cutting the input image through the face detection information.
56 | Using the face detection result(dets) to get the face position in the input image.
57 | After determining the center of face position and the box size of face, crop the image
58 | and resize it into preset size.
59 |
60 | Returns:
61 | A torch tensor, the image after preprecess, shape: (3, 112, 112).
62 | """
63 | if not isinstance(image, np.ndarray):
64 | logger.error('The input should be the ndarray read by cv2!')
65 | raise InputError()
66 | img = image.copy()
67 | self.image_org = image.copy()
68 | img = np.float32(img)
69 |
70 | xy = np.array([det[0], det[1]])
71 | zz = np.array([det[2], det[3]])
72 | wh = zz - xy + 1
73 | center = (xy + wh / 2).astype(np.int32)
74 | boxsize = int(np.max(wh) * 1.2)
75 | xy = center - boxsize // 2
76 | self.xy = xy
77 | self.boxsize = boxsize
78 | x1, y1 = xy
79 | x2, y2 = xy + boxsize
80 | height, width, _ = img.shape
81 | dx = max(0, -x1)
82 | dy = max(0, -y1)
83 | x1 = max(0, x1)
84 | y1 = max(0, y1)
85 | edx = max(0, x2 - width)
86 | edy = max(0, y2 - height)
87 | x2 = min(width, x2)
88 | y2 = min(height, y2)
89 | imageT = image[y1:y2, x1:x2]
90 | if dx > 0 or dy > 0 or edx > 0 or edy > 0:
91 | imageT = cv2.copyMakeBorder(
92 | imageT, dy, edy, dx, edx, cv2.BORDER_CONSTANT, 0)
93 |
94 | imageT = cv2.resize(imageT, (self.img_size, self.img_size))
95 | t = transforms.Compose([transforms.ToTensor()])
96 | img_after = t(imageT)
97 | return img_after
98 |
99 | def _postprocess(self, landmarks_normal):
100 | """Process the predicted landmarks into the form of the original image.
101 |
102 | Returns:
103 | A numpy array, the landmarks based on the shape of original image, shape: (106, 2),
104 | """
105 | landmarks_normal = landmarks_normal.cpu().numpy()
106 | landmarks_normal = landmarks_normal.reshape(landmarks_normal.shape[0], -1, 2)
107 | landmarks = landmarks_normal[0] * [self.boxsize, self.boxsize] + self.xy
108 | return landmarks
109 |
--------------------------------------------------------------------------------
/core/model_handler/face_alignment/__pycache__/FaceAlignModelHandler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/face_alignment/__pycache__/FaceAlignModelHandler.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_handler/face_alignment/__pycache__/FaceAlignModelHandler.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/face_alignment/__pycache__/FaceAlignModelHandler.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_handler/face_detection/FaceDetModelHandler.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201019
4 | @contact: jun21wangustc@gmail.com
5 | """
6 |
7 | import logging.config
8 | logging.config.fileConfig("./config/logging.conf")
9 | logger = logging.getLogger('sdk')
10 |
11 | import torch
12 | import numpy as np
13 | from math import ceil
14 | from itertools import product as product
15 | import torch.backends.cudnn as cudnn
16 |
17 | from core.model_handler.BaseModelHandler import BaseModelHandler
18 | from utils.BuzException import *
19 |
20 |
21 | class FaceDetModelHandler(BaseModelHandler):
22 | """Implementation of face detection model handler
23 |
24 | Attributes:
25 | model: the face detection model.
26 | device: use cpu or gpu to process.
27 | cfg(dict): testing config, inherit from the parent class.
28 | """
29 | def __init__(self, model, device, cfg):
30 | """
31 | Init FaceDetModelHandler settings.
32 | """
33 | super().__init__(model, device, cfg)
34 | self.variance = self.cfg['variance']
35 |
36 | def inference_on_image(self, image):
37 | """Get the inference of the image and process the inference result.
38 |
39 | Returns:
40 | A numpy array, the shape is N * (x, y, w, h, confidence),
41 | N is the number of detection box.
42 | """
43 | cudnn.benchmark = True
44 | input_height, input_width, _ = image.shape
45 | try:
46 | image, scale = self._preprocess(image)
47 | except Exception as e:
48 | raise e
49 | self.model = self.model.to(self.device)
50 | image = torch.from_numpy(image).unsqueeze(0)
51 | with torch.no_grad():
52 | image = image.to(self.device)
53 | scale = scale.to(self.device)
54 | loc, conf, landms = self.model(image)
55 | dets = self._postprocess(loc, conf, scale, input_height, input_width)
56 | return dets
57 |
58 | def _preprocess(self, image):
59 | """Preprocess the image, such as standardization and other operations.
60 |
61 | Returns:
62 | A numpy array list, the shape is channel * h * w.
63 | A tensor, the shape is 4.
64 | """
65 | if not isinstance(image, np.ndarray):
66 | logger.error('The input should be the ndarray read by cv2!')
67 | raise InputError()
68 | img = np.float32(image)
69 | scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
70 | img -= (104, 117, 123)
71 | img = img.transpose(2, 0, 1)
72 | return img, scale
73 |
74 | def _postprocess(self, loc, conf, scale, input_height, input_width):
75 | """Postprecess the prediction result.
76 | Decode detection result, set the confidence threshold and do the NMS
77 | to keep the appropriate detection box.
78 |
79 | Returns:
80 | A numpy array, the shape is N * (x, y, w, h, confidence),
81 | N is the number of detection box.
82 | """
83 | priorbox = PriorBox(self.cfg, image_size=(input_height, input_width))
84 | priors = priorbox.forward()
85 | priors = priors.to(self.device)
86 | prior_data = priors.data
87 | boxes = self.decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
88 | boxes = boxes * scale
89 | boxes = boxes.cpu().numpy()
90 | scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
91 |
92 | # ignore low scores
93 | inds = np.where(scores > self.cfg['confidence_threshold'])[0]
94 | boxes = boxes[inds]
95 | scores = scores[inds]
96 |
97 | # keep top-K before NMS
98 | order = scores.argsort()[::-1]
99 | boxes = boxes[order]
100 | scores = scores[order]
101 |
102 | # do NMS
103 | nms_threshold = 0.2
104 | dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
105 | keep = self.py_cpu_nms(dets, nms_threshold)
106 | dets = dets[keep, :]
107 | return dets
108 |
109 | # Adapted from https://github.com/chainer/chainercv
110 | def decode(self, loc, priors, variances):
111 | """Decode locations from predictions using priors to undo
112 | the encoding we did for offset regression at train time.
113 | Args:
114 | loc (tensor): location predictions for loc layers,
115 | Shape: [num_priors,4]
116 | priors (tensor): Prior boxes in center-offset form.
117 | Shape: [num_priors,4].
118 | variances: (list[float]) Variances of priorboxes
119 |
120 | Return:
121 | decoded bounding box predictions
122 | """
123 | boxes = torch.cat((priors[:, :2], priors[:, 2:]), 1)
124 | boxes[:, :2] = priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:]
125 | boxes[:, 2:] = priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])
126 | boxes[:, :2] -= boxes[:, 2:] / 2
127 | boxes[:, 2:] += boxes[:, :2]
128 | return boxes
129 |
130 | # Adapted from https://github.com/biubug6/Pytorch_Retinaface
131 | def py_cpu_nms(self, dets, thresh):
132 | """Python version NMS.
133 |
134 | Returns:
135 | The kept index after NMS.
136 | """
137 | x1 = dets[:, 0]
138 | y1 = dets[:, 1]
139 | x2 = dets[:, 2]
140 | y2 = dets[:, 3]
141 | scores = dets[:, 4]
142 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
143 | order = scores.argsort()[::-1]
144 | keep = []
145 | while order.size > 0:
146 | i = order[0]
147 | keep.append(i)
148 | xx1 = np.maximum(x1[i], x1[order[1:]])
149 | yy1 = np.maximum(y1[i], y1[order[1:]])
150 | xx2 = np.minimum(x2[i], x2[order[1:]])
151 | yy2 = np.minimum(y2[i], y2[order[1:]])
152 | w = np.maximum(0.0, xx2 - xx1 + 1)
153 | h = np.maximum(0.0, yy2 - yy1 + 1)
154 | inter = w * h
155 | ovr = inter / (areas[i] + areas[order[1:]] - inter)
156 | inds = np.where(ovr <= thresh)[0]
157 | order = order[inds + 1]
158 | return keep
159 |
160 | # Adapted from https://github.com/biubug6/Pytorch_Retinafacey
161 | class PriorBox(object):
162 | """Compute the suitable parameters of anchors for later decode operation
163 |
164 | Attributes:
165 | cfg(dict): testing config.
166 | image_size(tuple): the input image size.
167 | """
168 | def __init__(self, cfg, image_size=None):
169 | """
170 | Init priorBox settings related to the generation of anchors.
171 | """
172 | super(PriorBox, self).__init__()
173 | self.min_sizes = cfg['min_sizes']
174 | self.steps = cfg['steps']
175 | self.image_size = image_size
176 | self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
177 | self.name = "s"
178 |
179 | def forward(self):
180 | anchors = []
181 | for k, f in enumerate(self.feature_maps):
182 | min_sizes = self.min_sizes[k]
183 | for i, j in product(range(f[0]), range(f[1])):
184 | for min_size in min_sizes:
185 | s_kx = min_size / self.image_size[1]
186 | s_ky = min_size / self.image_size[0]
187 | dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
188 | dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
189 | for cy, cx in product(dense_cy, dense_cx):
190 | anchors += [cx, cy, s_kx, s_ky]
191 | # back to torch land
192 | output = torch.Tensor(anchors).view(-1, 4)
193 | return output
194 |
--------------------------------------------------------------------------------
/core/model_handler/face_detection/__pycache__/FaceDetModelHandler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/face_detection/__pycache__/FaceDetModelHandler.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_handler/face_detection/__pycache__/FaceDetModelHandler.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_handler/face_detection/__pycache__/FaceDetModelHandler.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_handler/face_recognition/FaceRecModelHandler.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | import logging.config
7 | logging.config.fileConfig("config/logging.conf")
8 | logger = logging.getLogger('sdk')
9 |
10 | import numpy as np
11 | import torch
12 |
13 | from core.model_handler.BaseModelHandler import BaseModelHandler
14 | from utils.BuzException import *
15 |
16 | class FaceRecModelHandler(BaseModelHandler):
17 | """Implementation of face recognition model handler
18 |
19 | Attributes:
20 | model: the face recognition model.
21 | device: use cpu or gpu to process.
22 | cfg(dict): testing config, inherit from the parent class.
23 | """
24 | def __init__(self, model, device, cfg):
25 | """
26 | Init FaceRecModelHandler settings.
27 | """
28 | super().__init__(model, device, cfg)
29 | self.mean = self.cfg['mean']
30 | self.std = self.cfg['std']
31 | self.input_height = self.cfg['input_height']
32 | self.input_width = self.cfg['input_width']
33 |
34 | def inference_on_image(self, image):
35 | """Get the inference of the image.
36 |
37 | Returns:
38 | A numpy array, the output feature, shape (512,),
39 | """
40 | try:
41 | image = self._preprocess(image)
42 | except Exception as e:
43 | raise e
44 | image = torch.unsqueeze(image, 0)
45 | image = image.to(self.device)
46 | with torch.no_grad():
47 | feature = self.model(image).cpu().numpy()
48 | feature = np.squeeze(feature)
49 | return feature
50 |
51 | def _preprocess(self, image):
52 | """Preprocess the input image.
53 |
54 | Returns:
55 | A torch tensor, the input after preprecess, shape: (3, 112, 112).
56 | """
57 | if not isinstance(image, np.ndarray):
58 | logger.error('The input should be the ndarray read by cv2!')
59 | raise InputError()
60 | height, width, channels = image.shape
61 | if height != self.input_height or width != self.input_width:
62 | raise FalseImageSizeError()
63 | if image.ndim == 2:
64 | image = image[:, :, np.newaxis]
65 | if image.ndim == 4:
66 | image = image[:,:,:3]
67 | if image.ndim > 4:
68 | raise FaseChannelError(image.ndim)
69 | image = (image.transpose((2, 0, 1)) - self.mean) / self.std
70 | image = image.astype(np.float32)
71 | image = torch.from_numpy(image)
72 | return image
73 |
--------------------------------------------------------------------------------
/core/model_loader/BaseModelLoader.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | import os
7 | import sys
8 | sys.path.append('models/network_def')
9 | import logging.config
10 | logging.config.fileConfig("./config/logging.conf")
11 | logger = logging.getLogger('sdk')
12 | from abc import ABCMeta, abstractmethod
13 |
14 | import json
15 |
16 |
17 | class BaseModelLoader(metaclass=ABCMeta):
18 | """Base class for all model loader.
19 | All the model loaders need to inherit this base class,
20 | and each new model needs to implement the "load model" method
21 | """
22 | def __init__(self, model_path, model_category, model_name, meta_file='model_meta.json'):
23 | model_root_dir = os.path.join(model_path, model_category, model_name)
24 | meta_file_path = os.path.join(model_root_dir, meta_file)
25 | # print(model_root_dir, meta_file_path)
26 | self.cfg = {}
27 | try:
28 | self.meta_conf = json.load(open(meta_file_path, 'r'))
29 | except IOError as e:
30 | logger.error('The configuration file meta.json was not found or failed to parse the file!')
31 | raise e
32 | except Exception as e:
33 | logger.info('The configuration file format is wrong!')
34 | raise e
35 | else:
36 | logger.info('Successfully parsed the model configuration file meta.json!')
37 | # common configs for all model
38 | self.cfg['model_path'] = model_path
39 | self.cfg['model_category'] = model_category
40 | self.cfg['model_name'] = model_name
41 | self.cfg['model_type'] = self.meta_conf['model_type']
42 | self.cfg['model_info'] = self.meta_conf['model_info']
43 | self.cfg['model_file_path'] = os.path.join(model_root_dir, self.meta_conf['model_file'])
44 | self.cfg['release_date'] = self.meta_conf['release_date']
45 | self.cfg['input_height'] = self.meta_conf['input_height']
46 | self.cfg['input_width'] = self.meta_conf['input_width']
47 |
48 | @abstractmethod
49 | def load_model(self):
50 | """Should be overridden by all subclasses.
51 | Different models may have different configuration information,
52 | such as mean, so each model implements its own loader
53 | """
54 | pass
55 |
--------------------------------------------------------------------------------
/core/model_loader/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/__init__.py
--------------------------------------------------------------------------------
/core/model_loader/__pycache__/BaseModelLoader.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/__pycache__/BaseModelLoader.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_loader/__pycache__/BaseModelLoader.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/__pycache__/BaseModelLoader.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_loader/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_loader/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_loader/face_alignment/FaceAlignModelLoader.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201023
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | import sys
7 | import logging.config
8 | logging.config.fileConfig("./config/logging.conf")
9 | logger = logging.getLogger('sdk')
10 |
11 | import torch
12 |
13 | from core.model_loader.BaseModelLoader import BaseModelLoader
14 |
15 | class FaceAlignModelLoader(BaseModelLoader):
16 | def __init__(self, model_path, model_category, model_name, meta_file='model_meta.json'):
17 | logger.info('Start to analyze the face landmark model, model path: %s, model category: %s,model name: %s' %
18 | (model_path, model_category, model_name))
19 | super().__init__(model_path, model_category, model_name, meta_file)
20 | self.cfg['img_size'] = self.meta_conf['input_width']
21 |
22 | def load_model(self):
23 | try:
24 | model = torch.load(self.cfg['model_file_path'])
25 | except Exception as e:
26 | logger.error('The model failed to load, please check the model path: %s!'
27 | % self.cfg['model_file_path'])
28 | raise e
29 | else:
30 | logger.info('Successfully loaded the face landmark model!')
31 | return model, self.cfg
32 |
--------------------------------------------------------------------------------
/core/model_loader/face_alignment/__pycache__/FaceAlignModelLoader.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/face_alignment/__pycache__/FaceAlignModelLoader.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_loader/face_alignment/__pycache__/FaceAlignModelLoader.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/face_alignment/__pycache__/FaceAlignModelLoader.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_loader/face_detection/FaceDetModelLoader.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201019
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | import sys
7 | import logging.config
8 | logging.config.fileConfig("./config/logging.conf")
9 | logger = logging.getLogger('sdk')
10 |
11 | import torch
12 |
13 | from core.model_loader.BaseModelLoader import BaseModelLoader
14 |
15 | class FaceDetModelLoader(BaseModelLoader):
16 | def __init__(self, model_path, model_category, model_name, meta_file='model_meta.json'):
17 | # print(model_path)
18 | logger.info('Start to analyze the face detection model, model path: %s, model category: %s,model name: %s' %
19 | (model_path, model_category, model_name))
20 | super().__init__(model_path, model_category, model_name, meta_file)
21 | self.cfg['min_sizes'] = self.meta_conf['min_sizes']
22 | self.cfg['steps'] = self.meta_conf['steps']
23 | self.cfg['variance'] = self.meta_conf['variance']
24 | self.cfg['in_channel'] = self.meta_conf['in_channel']
25 | self.cfg['out_channel'] = self.meta_conf['out_channel']
26 | self.cfg['confidence_threshold'] = self.meta_conf['confidence_threshold']
27 |
28 | def load_model(self):
29 | try:
30 | print(self.cfg['model_file_path'])
31 | model = torch.load(self.cfg['model_file_path'])
32 | except Exception as e:
33 | logger.error('The model failed to load, please check the model path: %s!'
34 | % self.cfg['model_file_path'])
35 | raise e
36 | else:
37 | logger.info('Successfully loaded the face detection model!')
38 | return model, self.cfg
39 |
--------------------------------------------------------------------------------
/core/model_loader/face_detection/__pycache__/FaceDetModelLoader.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/face_detection/__pycache__/FaceDetModelLoader.cpython-36.pyc
--------------------------------------------------------------------------------
/core/model_loader/face_detection/__pycache__/FaceDetModelLoader.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/core/model_loader/face_detection/__pycache__/FaceDetModelLoader.cpython-38.pyc
--------------------------------------------------------------------------------
/core/model_loader/face_recognition/FaceRecModelLoader.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | import sys
7 | import logging.config
8 | logging.config.fileConfig("config/logging.conf")
9 | logger = logging.getLogger('sdk')
10 |
11 | import torch
12 |
13 | from core.model_loader.BaseModelLoader import BaseModelLoader
14 |
15 | class FaceRecModelLoader(BaseModelLoader):
16 | def __init__(self, model_path, model_category, model_name, meta_file='model_meta.json'):
17 | logger.info('Start to analyze the face recognition model, model path: %s, model category: %s,model name: %s' %
18 | (model_path, model_category, model_name))
19 | super().__init__(model_path, model_category, model_name, meta_file)
20 | self.cfg['mean'] = self.meta_conf['mean']
21 | self.cfg['std'] = self.meta_conf['std']
22 |
23 | def load_model(self):
24 | try:
25 | model = torch.load(self.cfg['model_file_path'])
26 | except Exception as e:
27 | logger.error('The model failed to load, please check the model path: %s!'
28 | % self.cfg['model_file_path'])
29 | raise e
30 | else:
31 | logger.info('Successfully loaded the face recognition model!')
32 | return model, self.cfg
33 |
--------------------------------------------------------------------------------
/face_align.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 | # -*- coding: utf-8 -*-
3 | # @Time : 2021/1/28 下午5:27
4 | # @Author : zengwb
5 |
6 | import sys
7 | import logging.config
8 | logging.config.fileConfig("./config/logging.conf")
9 | logger = logging.getLogger('api')
10 |
11 | import yaml
12 | import cv2
13 | import numpy as np
14 | from core.model_loader.face_alignment.FaceAlignModelLoader import FaceAlignModelLoader
15 | from core.model_handler.face_alignment.FaceAlignModelHandler import FaceAlignModelHandler
16 |
17 | with open('./config/model_conf.yaml') as f:
18 | model_conf = yaml.load(f, Loader=yaml.FullLoader)
19 |
20 |
21 | class FaceAlign():
22 | def __init__(self):
23 | model_dir = './models'
24 | # model setting, modified along with model
25 | scene = 'non-mask'
26 | model_category = 'face_alignment'
27 | model_name = model_conf[scene][model_category]
28 | logger.info('Start to load the face landmark model...')
29 | # load model
30 | try:
31 | faceAlignModelLoader = FaceAlignModelLoader(model_dir, model_category, model_name)
32 | except Exception as e:
33 | logger.info('Failed to parse model configuration file!')
34 | sys.exit(-1)
35 | else:
36 | logger.info('Successfully parsed the model configuration file model_meta.json!')
37 |
38 | try:
39 | model, cfg = faceAlignModelLoader.load_model()
40 | except Exception as e:
41 | logger.error('Model loading failed!')
42 | sys.exit(-1)
43 | else:
44 | logger.info('Successfully loaded the face landmark model!')
45 |
46 | self.faceAlignModelHandler = FaceAlignModelHandler(model, 'cuda:0', cfg)
47 |
48 | def __call__(self, image, bbox):
49 | det = np.asarray(list(map(int, bbox[0:4])), dtype=np.int32)
50 | # print(det)
51 | landmarks = self.faceAlignModelHandler.inference_on_image(image, det)
52 | # print(landmarks)
53 | # image_show = image.copy()
54 | # for (x, y) in landmarks.astype(np.int32):
55 | # cv2.circle(image_show, (x, y), 2, (255, 0, 0), -1)
56 | # cv2.imshow('lms', image_show)
57 | # cv2.waitKey(0)
58 | return landmarks
59 |
60 |
61 | if __name__ == '__main__':
62 | face_align = FaceAlign()
63 | image_path = '/home/zengwb/Documents/FaceX-Zoo/face_sdk/api_usage/temp/test1_detect_res.jpg'
64 | image_det_txt_path = '/home/zengwb/Documents/FaceX-Zoo/face_sdk/api_usage/temp/test1_detect_res.txt'
65 | image = cv2.imread(image_path, cv2.IMREAD_COLOR)
66 | with open(image_det_txt_path, 'r') as f:
67 | lines = f.readlines()
68 | for i, line in enumerate(lines):
69 | landmarks = face_align(image, line)
70 |
71 |
72 |
--------------------------------------------------------------------------------
/face_det.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 | # -*- coding: utf-8 -*-
3 | # @Time : 2021/1/28 下午5:00
4 | # @Author : zengwb
5 |
6 |
7 | import logging.config
8 | logging.config.fileConfig("./config/logging.conf")
9 | logger = logging.getLogger('api')
10 |
11 | import yaml
12 | import cv2
13 | import sys
14 | from core.model_loader.face_detection.FaceDetModelLoader import FaceDetModelLoader
15 | from core.model_handler.face_detection.FaceDetModelHandler import FaceDetModelHandler
16 |
17 | with open('./config/model_conf.yaml') as f:
18 | model_conf = yaml.load(f, Loader=yaml.FullLoader)
19 |
20 |
21 | class FaceDet():
22 | def __init__(self):
23 | model_dir = './models'
24 | # model setting, modified along with model
25 | scene = 'non-mask'
26 | model_category = 'face_detection'
27 | model_name = model_conf[scene][model_category]
28 | logger.info('Start to load the face detection model...')
29 | try:
30 | faceDetModelLoader = FaceDetModelLoader(model_dir, model_category, model_name)
31 | except Exception as e:
32 | logger.info('Failed to parse model configuration file!')
33 | sys.exit(-1)
34 | else:
35 | logger.info('Successfully parsed the model configuration file model_meta.json!')
36 |
37 | try:
38 | self.model, self.cfg = faceDetModelLoader.load_model()
39 | except Exception as e:
40 | logger.error('Model loading failed!')
41 | sys.exit(-1)
42 | else:
43 | logger.info('Successfully loaded the face detection model!')
44 |
45 | self.faceDetModelHandler = FaceDetModelHandler(self.model, 'cuda:0', self.cfg)
46 |
47 | def __call__(self, image):
48 | # read image
49 | # image_path = '/home/zengwb/Documents/FaceX-Zoo/face_sdk/api_usage/test_images/test1.jpg'
50 |
51 | try:
52 | dets = self.faceDetModelHandler.inference_on_image(image)
53 | except Exception as e:
54 | logger.error('Face detection failed!')
55 | sys.exit(-1)
56 | else:
57 | logger.info('Successful face detection!')
58 |
59 | save_path_txt = './temp/test1_detect_res.txt'
60 | bboxs = dets
61 | # with open(save_path_txt, "w") as fd:
62 | # for box in bboxs:
63 | # line = str(int(box[0])) + " " + str(int(box[1])) + " " + \
64 | # str(int(box[2])) + " " + str(int(box[3])) + " " + \
65 | # str(box[4]) + " \n"
66 | # fd.write(line)
67 | #
68 | # for box in bboxs:
69 | # box = list(map(int, box))
70 | # cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
71 | # cv2.imwrite(save_path_img, image)
72 | logger.info('Successfully generate face detection results!')
73 |
74 | return bboxs
75 |
76 |
77 | if __name__ == '__main__':
78 | image_path = '/home/zengwb/Documents/FaceX-Zoo/face_sdk/api_usage/test_images/test1.jpg'
79 | face_detector = FaceDet()
80 | bboxs = face_detector(image_path)
81 | print(bboxs)
--------------------------------------------------------------------------------
/face_masker.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: Yinglu Liu, Jun Wang
3 | @date: 20201012
4 | @contact: jun21wangustc@gmail.com
5 | """
6 |
7 | import os
8 | from random import randint
9 | import warnings
10 | import copy
11 | warnings.filterwarnings('ignore')
12 | import cv2
13 | import torch
14 | import numpy as np
15 | from skimage.io import imread, imsave
16 | from skimage.transform import estimate_transform, warp
17 | from utils import mesh
18 | from utils import read_info
19 | from models.prnet import PRNet
20 |
21 | class PRN:
22 | """Process of PRNet.
23 | based on:
24 | https://github.com/YadiraF/PRNet/blob/master/api.py
25 | """
26 | def __init__(self, model_path):
27 | self.resolution = 256
28 | self.MaxPos = self.resolution*1.1
29 | self.face_ind = np.loadtxt('Data/uv-data/face_ind.txt').astype(np.int32)
30 | self.triangles = np.loadtxt('Data/uv-data/triangles.txt').astype(np.int32)
31 | self.net = PRNet(3, 3)
32 | state_dict = torch.load(model_path)
33 | self.net.load_state_dict(state_dict)
34 | self.net.eval()
35 | if torch.cuda.is_available():
36 | self.net = self.net.to('cuda')
37 | def process(self, image, image_info):
38 | if np.max(image_info.shape) > 4: # key points to get bounding box
39 | kpt = image_info
40 | if kpt.shape[0] > 3:
41 | kpt = kpt.T
42 | left = np.min(kpt[0, :]); right = np.max(kpt[0, :]);
43 | top = np.min(kpt[1,:]); bottom = np.max(kpt[1,:])
44 | else: # bounding box
45 | bbox = image_info
46 | left = bbox[0]; right = bbox[1]; top = bbox[2]; bottom = bbox[3]
47 | old_size = (right - left + bottom - top)/2
48 | center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
49 | size = int(old_size*1.6)
50 | # crop image
51 | src_pts = np.array([[center[0]-size/2, center[1]-size/2],
52 | [center[0] - size/2, center[1]+size/2],
53 | [center[0]+size/2, center[1]-size/2]])
54 | DST_PTS = np.array([[0,0], [0,self.resolution - 1], [self.resolution - 1, 0]])
55 | tform = estimate_transform('similarity', src_pts, DST_PTS)
56 | cropped_image = warp(image, tform.inverse, output_shape=(self.resolution, self.resolution))
57 | cropped_image = np.transpose(cropped_image[np.newaxis, :,:,:], (0, 3, 1, 2)).astype(np.float32)
58 | cropped_image = torch.from_numpy(cropped_image)
59 | if torch.cuda.is_available:
60 | cropped_image = cropped_image.cuda()
61 | cropped_pos = self.net(cropped_image)
62 | cropped_pos = cropped_pos.cpu().detach().numpy()
63 | cropped_pos = np.transpose(cropped_pos, (0, 2, 3, 1)).squeeze() * self.MaxPos
64 | # restore
65 | cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T
66 | z = cropped_vertices[2,:].copy()/tform.params[0,0]
67 | cropped_vertices[2,:] = 1
68 | vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
69 | vertices = np.vstack((vertices[:2,:], z))
70 | pos = np.reshape(vertices.T, [self.resolution, self.resolution, 3])
71 | return pos
72 | def get_vertices(self, pos):
73 | all_vertices = np.reshape(pos, [self.resolution ** 2, -1])
74 | vertices = all_vertices[self.face_ind, :]
75 | return vertices
76 | def get_colors_from_texture(self, texture):
77 | all_colors = np.reshape(texture, [self.resolution**2, -1])
78 | colors = all_colors[self.face_ind, :]
79 | return colors
80 |
81 |
82 | class FaceMasker:
83 | """Add a virtual mask in face.
84 |
85 | Attributes:
86 | uv_face_path(str): the path of uv_face.
87 | mask_template_folder(str): the directory where all mask template in.
88 | prn(object): PRN object, https://github.com/YadiraF/PRNet.
89 | template_name2ref_texture_src(dict): key is template name, value is the mask load by skimage.io.
90 | template_name2uv_mask_src(dict): key is template name, value is the uv_mask.
91 | is_aug(bool): whether or not to add some augmentaion operation on the mask.
92 | """
93 | def __init__(self, is_aug):
94 | """init for FaceMasker
95 |
96 | Args:
97 | is_aug(bool): whether or not to add some augmentaion operation on the mask.
98 | """
99 | self.uv_face_path = 'Data/uv-data/uv_face_mask.png'
100 | self.mask_template_folder = 'Data/mask-data'
101 | self.prn = PRN('models/prnet_pytorch.pth')
102 | self.template_name2ref_texture_src, self.template_name2uv_mask_src = self.get_ref_texture_src()
103 | self.is_aug = is_aug
104 |
105 | def get_ref_texture_src(self):
106 | template_name2ref_texture_src = {}
107 | template_name2uv_mask_src = {}
108 | mask_template_list = os.listdir(self.mask_template_folder)
109 | uv_face = imread(self.uv_face_path, as_gray=True)/255.
110 | for mask_template in mask_template_list:
111 | mask_template_path = os.path.join(self.mask_template_folder, mask_template)
112 | ref_texture_src = imread(mask_template_path, as_gray=False)/255.
113 | if ref_texture_src.shape[2] == 4: # must 4 channel, how about 3 channel?
114 | uv_mask_src = ref_texture_src[:,:,3]
115 | ref_texture_src = ref_texture_src[:,:,:3]
116 | else:
117 | print('Fatal error!', mask_template_path)
118 | uv_mask_src[uv_face == 0] = 0
119 | template_name2ref_texture_src[mask_template] = ref_texture_src
120 | template_name2uv_mask_src[mask_template] = uv_mask_src
121 | return template_name2ref_texture_src, template_name2uv_mask_src
122 |
123 | def add_mask(self, face_root, image_name2lms, image_name2template_name, masked_face_root):
124 | for image_name, face_lms in image_name2lms.items():
125 | image_path = os.path.join(face_root, image_name)
126 | masked_face_path = os.path.join(masked_face_root, image_name)
127 | template_name = image_name2template_name[image_name]
128 | self.add_mask_one(image_path, face_lms, template_name, masked_face_path)
129 |
130 | def add_mask_two(self, image, face_lms, template_name, masked_face_path):
131 | """Add mask to one image.
132 |
133 | Args:
134 | image_path(str): the image to add mask.
135 | face_lms(str): face landmarks, [x1, y1, x2, y2, ..., x106, y106]
136 | template_name(str): the mask template to be added on the current image,
137 | got to '/Data/mask-data' for all template.
138 | masked_face_path(str): the path to save masked image.
139 | """
140 | # image = imread(image_path)
141 | image = image[:, :, ::-1]
142 | ref_texture_src = self.template_name2ref_texture_src[template_name]
143 | uv_mask_src = self.template_name2uv_mask_src[template_name]
144 | if image.ndim == 2:
145 | image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
146 | [h, w, c] = image.shape
147 | if c == 4:
148 | image = image[:,:,:3]
149 | # for face_lm in face_lms:
150 | pos, vertices = self.get_vertices(face_lms[0], image) # 3d reconstruction -> get texture.
151 | image = image/255. #!!
152 | texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None,
153 | interpolation=cv2.INTER_NEAREST,
154 | borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
155 | new_texture = self.get_new_texture(ref_texture_src, uv_mask_src, texture)
156 | #remap to input image.(render)
157 | vis_colors = np.ones((vertices.shape[0], 1))
158 | face_mask = mesh.render.render_colors(vertices, self.prn.triangles, vis_colors, h, w, c = 1)
159 | face_mask = np.squeeze(face_mask > 0).astype(np.float32)
160 | new_colors = self.prn.get_colors_from_texture(new_texture)
161 | new_image = mesh.render.render_colors(vertices, self.prn.triangles, new_colors, h, w, c = 3)
162 | new_image = image * (1 - face_mask[:, :, np.newaxis]) + new_image * face_mask[:, :, np.newaxis]
163 | new_image = np.clip(new_image, -1, 1) #must clip to (-1, 1)!
164 | imsave(masked_face_path, new_image)
165 | # 222222222222222222222222222222
166 | iii = copy.deepcopy(new_image)
167 | pos, vertices = self.get_vertices(face_lms[1], iii) # 3d reconstruction -> get texture.
168 | image = image / 255. # !!
169 | texture = cv2.remap(image, pos[:, :, :2].astype(np.float32), None,
170 | interpolation=cv2.INTER_NEAREST,
171 | borderMode=cv2.BORDER_CONSTANT, borderValue=(0))
172 | new_texture = self.get_new_texture(ref_texture_src, uv_mask_src, texture)
173 | # remap to input image.(render)
174 | vis_colors = np.ones((vertices.shape[0], 1))
175 | face_mask = mesh.render.render_colors(vertices, self.prn.triangles, vis_colors, h, w, c=1)
176 | face_mask = np.squeeze(face_mask > 0).astype(np.float32)
177 | new_colors = self.prn.get_colors_from_texture(new_texture)
178 | new_image = mesh.render.render_colors(vertices, self.prn.triangles, new_colors, h, w, c=3)
179 | new_image = image * (1 - face_mask[:, :, np.newaxis]) + new_image * face_mask[:, :, np.newaxis]
180 | new_image = np.clip(new_image, -1, 1) # must clip to (-1, 1)!
181 | imsave('222.png', new_image)
182 |
183 | exit()
184 | return new_image
185 |
186 | def add_mask_one(self, image, face_lms, template_name, masked_face_path):
187 | """Add mask to one image.
188 |
189 | Args:
190 | image_path(str): the image to add mask.
191 | face_lms(str): face landmarks, [x1, y1, x2, y2, ..., x106, y106]
192 | template_name(str): the mask template to be added on the current image,
193 | got to '/Data/mask-data' for all template.
194 | masked_face_path(str): the path to save masked image.
195 | """
196 | # image = imread(image_path)
197 | image = image[:, :, ::-1]
198 | ref_texture_src = self.template_name2ref_texture_src[template_name]
199 | uv_mask_src = self.template_name2uv_mask_src[template_name]
200 | if image.ndim == 2:
201 | image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
202 | [h, w, c] = image.shape
203 | if c == 4:
204 | image = image[:,:,:3]
205 | pos, vertices = self.get_vertices(face_lms, image) # 3d reconstruction -> get texture.
206 | image = image/255. #!!
207 | texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None,
208 | interpolation=cv2.INTER_NEAREST,
209 | borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
210 | new_texture = self.get_new_texture(ref_texture_src, uv_mask_src, texture)
211 | #remap to input image.(render)
212 | vis_colors = np.ones((vertices.shape[0], 1))
213 | face_mask = mesh.render.render_colors(vertices, self.prn.triangles, vis_colors, h, w, c = 1)
214 | face_mask = np.squeeze(face_mask > 0).astype(np.float32)
215 | new_colors = self.prn.get_colors_from_texture(new_texture)
216 | new_image = mesh.render.render_colors(vertices, self.prn.triangles, new_colors, h, w, c = 3)
217 | new_image = image * (1 - face_mask[:, :, np.newaxis]) + new_image * face_mask[:, :, np.newaxis]
218 | new_image = np.clip(new_image, -1, 1) # must clip to (-1, 1)!
219 | imsave(masked_face_path, new_image)
220 | return new_image
221 |
222 | def get_vertices(self, face_lms, image):
223 | """Get vertices
224 |
225 | Args:
226 | face_lms: face landmarks.
227 | image:[0, 255]
228 | """
229 | # print(face_lms.shape) # (212,)
230 | lms_info = read_info.read_landmark_106_array(face_lms)
231 | # print(lms_info.shape) # (68, 2)
232 | pos = self.prn.process(image, lms_info)
233 | vertices = self.prn.get_vertices(pos)
234 | # print(pos.shape, vertices.shape) # (256, 256, 3) (43867, 3)
235 | return pos, vertices
236 | # pos_ = []
237 | # vertices_ = []
238 | # for face_lm in face_lms:
239 | # lms_info = read_info.read_landmark_106_array(face_lm)
240 | # pos = self.prn.process(image, lms_info)
241 | # vertices = self.prn.get_vertices(pos)
242 | # pos_.append(pos)
243 | # vertices_.append(vertices)
244 | # print(pos.shape, vertices.shape)
245 | # return pos_, vertices_
246 |
247 | def get_new_texture(self, ref_texture_src, uv_mask_src, texture):
248 | """Get new texture
249 | Mainly for data augmentation.
250 | """
251 | x_offset = 5
252 | y_offset = 5
253 | alpha = '0.5,0.8'
254 | beta = 0
255 | erode_iter = 5
256 |
257 | # random augmentation
258 | ref_texture = ref_texture_src.copy()
259 | uv_mask = uv_mask_src.copy()
260 | if self.is_aug:
261 | # random flip
262 | if np.random.rand()>0.5:
263 | ref_texture = cv2.flip(ref_texture, 1, dst=None)
264 | uv_mask = cv2.flip(uv_mask, 1, dst=None)
265 | # random scale,
266 | if np.random.rand()>0.5:
267 | x_offset = np.random.randint(x_offset)
268 | y_offset = np.random.randint(y_offset)
269 | ref_texture_temp = np.zeros_like(ref_texture)
270 | uv_mask_temp = np.zeros_like(uv_mask)
271 | target_size = (256-x_offset*2, 256-y_offset*2)
272 | ref_texture_temp[y_offset:256-y_offset, x_offset:256-x_offset,:] = cv2.resize(ref_texture, target_size)
273 | uv_mask_temp[y_offset:256-y_offset, x_offset:256-x_offset] = cv2.resize(uv_mask, target_size)
274 | ref_texture = ref_texture_temp
275 | uv_mask = uv_mask_temp
276 | # random erode
277 | if np.random.rand()>0.8:
278 | t = np.random.randint(erode_iter)
279 | kernel = np.ones((5,5),np.uint8)
280 | uv_mask = cv2.erode(uv_mask,kernel,iterations = t)
281 | # random contrast and brightness
282 | if np.random.rand()>0.5:
283 | alpha_r = [float(_) for _ in alpha.split(',')]
284 | alpha = (alpha_r[1] - alpha_r[0])*np.random.rand() + alpha_r[0]
285 | beta = beta
286 | img = ref_texture*255
287 | blank = np.zeros(img.shape, img.dtype)
288 | # dst = alpha * img + beta * blank
289 | dst = cv2.addWeighted(img, alpha, blank, 1-alpha, beta)
290 | ref_texture = dst.clip(0,255) / 255
291 | new_texture = texture*(1 - uv_mask[:,:,np.newaxis]) + ref_texture[:,:,:3]*uv_mask[:,:,np.newaxis]
292 | return new_texture
--------------------------------------------------------------------------------
/logs/sdk.log:
--------------------------------------------------------------------------------
1 | DEBUG 2021-01-30 11:27:21 font_manager.py: 1436] Using fontManager instance from /home/zengwb/.cache/matplotlib/fontlist-v330.json
2 | DEBUG 2021-01-30 11:27:21 pyplot.py: 290] Loaded backend module://backend_interagg version unknown.
3 | INFO 2021-01-30 11:27:22 FaceDetModelLoader.py: 19] Start to analyze the face detection model, model path: ./models, model category: face_detection,model name: face_detection_1.0
4 | INFO 2021-01-30 11:27:22 BaseModelLoader.py: 36] Successfully parsed the model configuration file meta.json!
5 | INFO 2021-01-30 11:27:22 FaceDetModelLoader.py: 37] Successfully loaded the face detection model!
6 | INFO 2021-01-30 11:27:22 FaceAlignModelLoader.py: 18] Start to analyze the face landmark model, model path: ./models, model category: face_alignment,model name: face_alignment_1.0
7 | INFO 2021-01-30 11:27:22 BaseModelLoader.py: 36] Successfully parsed the model configuration file meta.json!
8 | INFO 2021-01-30 11:27:24 FaceAlignModelLoader.py: 30] Successfully loaded the face landmark model!
9 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 | # -*- coding: utf-8 -*-
3 | # @Time : 2021/1/28 下午4:58
4 | # @Author : zengwb
5 |
6 |
7 | import logging.config
8 | logging.config.fileConfig("./config/logging.conf")
9 | logger = logging.getLogger('api')
10 |
11 | import yaml
12 | import cv2
13 | import numpy as np
14 | import sys
15 | from skimage.io import imread, imsave, imshow
16 | import matplotlib.pyplot as plt
17 | from core.model_loader.face_detection.FaceDetModelLoader import FaceDetModelLoader
18 | from core.model_handler.face_detection.FaceDetModelHandler import FaceDetModelHandler
19 | import copy
20 | from face_det import FaceDet
21 | from face_align import FaceAlign
22 | from face_masker import FaceMasker
23 |
24 |
25 | def main():
26 | show_result = False
27 | image_path = './Data/test-data/test1.jpg'
28 | # image_path = './1.png'
29 | image = cv2.imread(image_path, cv2.IMREAD_COLOR)
30 | face_detector = FaceDet()
31 | face_align = FaceAlign()
32 |
33 | bboxs = face_detector(image)
34 | # print(bboxs)
35 | image_show = image.copy()
36 | face_lms = []
37 | for box in bboxs:
38 | # print(box)
39 | landmarks = face_align(image, box)
40 | # print(landmarks, landmarks.shape)
41 | lms = np.reshape(landmarks.astype(np.int32), (-1))
42 | # print(lms, lms.shape)
43 | face_lms.append(lms)
44 | cv2.rectangle(image_show, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
45 | for (x, y) in landmarks.astype(np.int32):
46 | cv2.circle(image_show, (x, y), 2, (255, 0, 0), -1)
47 | if show_result:
48 | cv2.imshow('lms', image_show)
49 | cv2.waitKey(0)
50 |
51 | # face masker
52 | is_aug = True
53 | mask_template_name = '0.png'
54 | mask_template_name2 = '1.png'
55 | masked_face_path = 'test1_mask1.jpg'
56 | face_masker = FaceMasker(is_aug)
57 | # ======masked one face========
58 | new_image = face_masker.add_mask_one(image, face_lms[0], mask_template_name, mask_template_name)
59 | # imsave(mask_template_name, new_image)
60 | plt.imshow(new_image)
61 | plt.show()
62 |
63 | # masked two face
64 | # new_image = face_masker.add_mask_two(image, face_lms, mask_template_name, masked_face_path)
65 | # plt.imshow(new_image)
66 | # plt.show()
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 | if __name__ == '__main__':
76 | # list = [1,2,3]
77 | # m = 9
78 | # for i in range(len(list)):
79 | # list.append(m)
80 | # m += 1
81 | # print(list)
82 | # image_path = '/home/zengwb/Documents/FaceX-Zoo/Face_add_Mask/Data/test-data/test1.jpg'
83 | # img = cv2.imread(image_path)
84 | # i = np.clip(img, -1, 1)
85 | # cv2.imshow('s', i)
86 | # cv2.waitKey(0)
87 | # exit()
88 | main()
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/__init__.py
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/models/__pycache__/prnet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/__pycache__/prnet.cpython-36.pyc
--------------------------------------------------------------------------------
/models/__pycache__/prnet.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/__pycache__/prnet.cpython-38.pyc
--------------------------------------------------------------------------------
/models/face_alignment/face_alignment_1.0/face_landmark_pfld.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/face_alignment/face_alignment_1.0/face_landmark_pfld.pkl
--------------------------------------------------------------------------------
/models/face_alignment/face_alignment_1.0/model_meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "model_type" : "pfld face landmark nets",
3 | "model_info" : "some model info",
4 | "model_file" : "face_landmark_pfld.pkl",
5 | "release_date" : "20201023",
6 | "input_height" : 112,
7 | "input_width" : 112
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/models/face_detection/face_detection_1.0/face_detection_retina.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/face_detection/face_detection_1.0/face_detection_retina.pkl
--------------------------------------------------------------------------------
/models/face_detection/face_detection_1.0/model_meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "model_type" : "retina face detect nets",
3 | "model_info" : "some model info",
4 | "model_file" : "face_detection_retina.pkl",
5 | "release_date" : "20201019",
6 | "input_height" : 120,
7 | "input_width" : 120,
8 | "min_sizes": [[16, 32], [64, 128], [256, 512]],
9 | "steps": [8, 16, 32],
10 | "variance": [0.1, 0.2],
11 | "in_channel": 256,
12 | "out_channel": 256,
13 | "confidence_threshold": 0.7
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/models/network_def/__pycache__/mobilev3_pfld.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/network_def/__pycache__/mobilev3_pfld.cpython-36.pyc
--------------------------------------------------------------------------------
/models/network_def/__pycache__/mobilev3_pfld.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/network_def/__pycache__/mobilev3_pfld.cpython-38.pyc
--------------------------------------------------------------------------------
/models/network_def/__pycache__/retinaface_def.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/network_def/__pycache__/retinaface_def.cpython-36.pyc
--------------------------------------------------------------------------------
/models/network_def/__pycache__/retinaface_def.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/network_def/__pycache__/retinaface_def.cpython-38.pyc
--------------------------------------------------------------------------------
/models/network_def/mobilefacenet_def.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: Jun Wang
3 | @date: 20201019
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | # based on:
7 | # https://github.com/TreB1eN/InsightFace_Pytorch/blob/master/model.py
8 |
9 | from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Sequential, Module
10 | import torch
11 |
12 | class Flatten(Module):
13 | def forward(self, input):
14 | return input.view(input.size(0), -1)
15 |
16 | def l2_norm(input,axis=1):
17 | norm = torch.norm(input,2,axis,True)
18 | output = torch.div(input, norm)
19 | return output
20 |
21 | class Conv_block(Module):
22 | def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
23 | super(Conv_block, self).__init__()
24 | self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False)
25 | self.bn = BatchNorm2d(out_c)
26 | self.prelu = PReLU(out_c)
27 | def forward(self, x):
28 | x = self.conv(x)
29 | x = self.bn(x)
30 | x = self.prelu(x)
31 | return x
32 |
33 | class Linear_block(Module):
34 | def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
35 | super(Linear_block, self).__init__()
36 | self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False)
37 | self.bn = BatchNorm2d(out_c)
38 | def forward(self, x):
39 | x = self.conv(x)
40 | x = self.bn(x)
41 | return x
42 |
43 | class Depth_Wise(Module):
44 | def __init__(self, in_c, out_c, residual = False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):
45 | super(Depth_Wise, self).__init__()
46 | self.conv = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
47 | self.conv_dw = Conv_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride)
48 | self.project = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
49 | self.residual = residual
50 | def forward(self, x):
51 | if self.residual:
52 | short_cut = x
53 | x = self.conv(x)
54 | x = self.conv_dw(x)
55 | x = self.project(x)
56 | if self.residual:
57 | output = short_cut + x
58 | else:
59 | output = x
60 | return output
61 |
62 | class Residual(Module):
63 | def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
64 | super(Residual, self).__init__()
65 | modules = []
66 | for _ in range(num_block):
67 | modules.append(Depth_Wise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups))
68 | self.model = Sequential(*modules)
69 | def forward(self, x):
70 | return self.model(x)
71 |
72 | class MobileFaceNet(Module):
73 | def __init__(self, embedding_size, out_h, out_w):
74 | super(MobileFaceNet, self).__init__()
75 | self.conv1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1))
76 | self.conv2_dw = Conv_block(64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)
77 | self.conv_23 = Depth_Wise(64, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128)
78 | self.conv_3 = Residual(64, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
79 | self.conv_34 = Depth_Wise(64, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256)
80 | self.conv_4 = Residual(128, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
81 | self.conv_45 = Depth_Wise(128, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512)
82 | self.conv_5 = Residual(128, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
83 | self.conv_6_sep = Conv_block(128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0))
84 | #self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(7,7), stride=(1, 1), padding=(0, 0))
85 | #self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(4,7), stride=(1, 1), padding=(0, 0))
86 | self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(out_h, out_w), stride=(1, 1), padding=(0, 0))
87 | self.conv_6_flatten = Flatten()
88 | self.linear = Linear(512, embedding_size, bias=False)
89 | self.bn = BatchNorm1d(embedding_size)
90 |
91 | def forward(self, x):
92 | out = self.conv1(x)
93 | out = self.conv2_dw(out)
94 | out = self.conv_23(out)
95 | out = self.conv_3(out)
96 | out = self.conv_34(out)
97 | out = self.conv_4(out)
98 | out = self.conv_45(out)
99 | out = self.conv_5(out)
100 | out = self.conv_6_sep(out)
101 | out = self.conv_6_dw(out)
102 | out = self.conv_6_flatten(out)
103 | out = self.linear(out)
104 | out = self.bn(out)
105 | return l2_norm(out)
106 |
--------------------------------------------------------------------------------
/models/network_def/mobilev3_pfld.py:
--------------------------------------------------------------------------------
1 | # derive from:
2 | # https://github.com/Hsintao/pfld_106_face_landmarks/blob/master/models/mobilev3_pfld.py
3 |
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 |
8 |
9 | def conv_bn(inp, oup, kernel_size, stride, padding=1, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
10 | return nn.Sequential(
11 | conv_layer(inp, oup, kernel_size, stride, padding, bias=False),
12 | norm_layer(oup),
13 | nlin_layer(inplace=True)
14 | )
15 |
16 |
17 | def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
18 | return nn.Sequential(
19 | conv_layer(inp, oup, 1, 1, 0, bias=False),
20 | norm_layer(oup),
21 | nlin_layer(inplace=True)
22 | )
23 |
24 |
25 | class Hswish(nn.Module):
26 | def __init__(self, inplace=True):
27 | super(Hswish, self).__init__()
28 | self.inplace = inplace
29 |
30 | def forward(self, x):
31 | return x * F.relu6(x + 3., inplace=self.inplace) / 6.
32 |
33 |
34 | class Hsigmoid(nn.Module):
35 | def __init__(self, inplace=True):
36 | super(Hsigmoid, self).__init__()
37 | self.inplace = inplace
38 |
39 | def forward(self, x):
40 | return F.relu6(x + 3., inplace=self.inplace) / 6.
41 |
42 |
43 | class SEModule(nn.Module):
44 | def __init__(self, channel, reduction=4):
45 | super(SEModule, self).__init__()
46 | self.avg_pool = nn.AdaptiveAvgPool2d(1)
47 | self.fc = nn.Sequential(
48 | nn.Linear(channel, channel // reduction, bias=False),
49 | nn.ReLU(inplace=True),
50 | nn.Linear(channel // reduction, channel, bias=False),
51 | Hsigmoid()
52 | )
53 |
54 | def forward(self, x):
55 | b, c, h, w = x.size()
56 | # F.avg_pool2d()
57 | y = self.avg_pool(x).view(b, c)
58 | y = self.fc(y).view(b, c, 1, 1)
59 | return x * y
60 |
61 |
62 | class Identity(nn.Module):
63 | def __init__(self, channel):
64 | super(Identity, self).__init__()
65 |
66 | def forward(self, x):
67 | return x
68 |
69 |
70 | class MobileBottleneck(nn.Module):
71 | def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE'):
72 | super(MobileBottleneck, self).__init__()
73 | assert stride in [1, 2]
74 | assert kernel in [3, 5]
75 | padding = (kernel - 1) // 2
76 | self.use_res_connect = stride == 1 and inp == oup
77 |
78 | conv_layer = nn.Conv2d
79 | norm_layer = nn.BatchNorm2d
80 | if nl == 'RE':
81 | nlin_layer = nn.ReLU # or ReLU6
82 | elif nl == 'HS':
83 | nlin_layer = Hswish
84 | else:
85 | raise NotImplementedError
86 | if se:
87 | SELayer = SEModule
88 | else:
89 | SELayer = Identity
90 |
91 | self.conv = nn.Sequential(
92 | # pw
93 | conv_layer(inp, exp, 1, 1, 0, bias=False),
94 | norm_layer(exp),
95 | nlin_layer(inplace=True),
96 | # dw
97 | conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False),
98 | norm_layer(exp),
99 | SELayer(exp),
100 | nlin_layer(inplace=True),
101 | # pw-linear
102 | conv_layer(exp, oup, 1, 1, 0, bias=False),
103 | norm_layer(oup),
104 | )
105 |
106 | def forward(self, x):
107 | if self.use_res_connect:
108 | return x + self.conv(x)
109 | else:
110 | return self.conv(x)
111 |
112 |
113 | class PFLDInference(nn.Module):
114 | def __init__(self):
115 | super(PFLDInference, self).__init__()
116 | self.use_attention = True
117 | self.conv_bn1 = conv_bn(3, 16, 3, stride=1, nlin_layer=Hswish)
118 | self.conv_bn2 = MobileBottleneck(16, 16, 3, 1, 16, False, 'RE')
119 |
120 | self.conv3_1 = MobileBottleneck(16, 24, 3, 2, 64, False, 'RE')
121 |
122 | self.block3_2 = MobileBottleneck(24, 24, 3, 1, 72, False, "RE")
123 | self.block3_3 = MobileBottleneck(24, 40, 5, 2, 72, self.use_attention, "RE")
124 | self.block3_4 = MobileBottleneck(40, 40, 5, 1, 120, self.use_attention, "RE")
125 | self.block3_5 = MobileBottleneck(40, 40, 5, 1, 120, self.use_attention, "RE")
126 |
127 | self.conv4_1 = MobileBottleneck(40, 80, 3, 2, 240, False, "RE")
128 |
129 | self.conv5_1 = MobileBottleneck(80, 80, 3, 1, 200, False, "HS")
130 | self.block5_2 = MobileBottleneck(80, 112, 3, 1, 480, self.use_attention, "HS")
131 | self.block5_3 = MobileBottleneck(112, 112, 3, 1, 672, self.use_attention, "HS")
132 | self.block5_4 = MobileBottleneck(112, 160, 3, 1, 672, self.use_attention, "HS")
133 |
134 | self.conv6_1 = MobileBottleneck(160, 16, 3, 1, 320, False, "HS") # [16, 14, 14]
135 |
136 | self.conv7 = nn.Conv2d(16, 32, 3, 2, padding=1)
137 | self.conv8 = nn.Conv2d(32, 128, 7, 1, 0)
138 | self.avg_pool1 = nn.AvgPool2d(14)
139 | self.avg_pool2 = nn.AvgPool2d(7)
140 | self.fc = nn.Linear(176, 106 * 2)
141 |
142 | def forward(self, x): # x: 3, 112, 112
143 | x = self.conv_bn1(x) # [64, 56, 56]
144 | x = self.conv_bn2(x) # [64, 56, 56]
145 | x = self.conv3_1(x)
146 | x = self.block3_2(x)
147 | x = self.block3_3(x)
148 | x = self.block3_4(x)
149 | out1 = self.block3_5(x)
150 |
151 | x = self.conv4_1(out1)
152 |
153 | x = self.conv5_1(x)
154 | x = self.block5_2(x)
155 | x = self.block5_3(x)
156 | x = self.block5_4(x)
157 | x = self.conv6_1(x)
158 | x1 = self.avg_pool1(x)
159 | x1 = x1.view(x1.size(0), -1)
160 |
161 | x = self.conv7(x)
162 | x2 = self.avg_pool2(x)
163 | x2 = x2.view(x2.size(0), -1)
164 |
165 | x3 = self.conv8(x)
166 | x3 = x3.view(x1.size(0), -1)
167 |
168 | multi_scale = torch.cat([x1, x2, x3], 1)
169 | landmarks = self.fc(multi_scale)
170 |
171 | return out1, landmarks
172 |
173 |
174 | class AuxiliaryNet(nn.Module):
175 | def __init__(self):
176 | super(AuxiliaryNet, self).__init__()
177 | self.conv1 = conv_bn(40, 128, 3, 2)
178 | self.conv2 = conv_bn(128, 128, 3, 1)
179 | self.conv3 = conv_bn(128, 32, 3, 2)
180 | self.conv4 = conv_bn(32, 128, 3, 1, padding=0)
181 | self.max_pool1 = nn.MaxPool2d(5)
182 | self.fc1 = nn.Linear(128, 32)
183 | self.fc2 = nn.Linear(32, 3)
184 |
185 | def forward(self, x):
186 | x = self.conv1(x)
187 | x = self.conv2(x)
188 | x = self.conv3(x)
189 | x = self.conv4(x)
190 | x = self.max_pool1(x)
191 | x = x.view(x.size(0), -1)
192 | x = self.fc1(x)
193 | x = self.fc2(x)
194 |
195 | return x
196 |
--------------------------------------------------------------------------------
/models/network_def/retinaface_def.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201019
4 | @contact: jun21wangustc@gmail.com
5 | """
6 |
7 | # based on:
8 | # https://github.com/biubug6/Pytorch_Retinaface/blob/master/models/retinaface.py
9 |
10 | import torch
11 | import torch.nn as nn
12 | import torchvision.models._utils as _utils
13 | import torch.nn.functional as F
14 | from collections import OrderedDict
15 |
16 | def conv_bn(inp, oup, stride = 1, leaky = 0):
17 | return nn.Sequential(
18 | nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
19 | nn.BatchNorm2d(oup),
20 | nn.LeakyReLU(negative_slope=leaky, inplace=True)
21 | )
22 |
23 | def conv_bn_no_relu(inp, oup, stride):
24 | return nn.Sequential(
25 | nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
26 | nn.BatchNorm2d(oup),
27 | )
28 |
29 | def conv_bn1X1(inp, oup, stride, leaky=0):
30 | return nn.Sequential(
31 | nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),
32 | nn.BatchNorm2d(oup),
33 | nn.LeakyReLU(negative_slope=leaky, inplace=True)
34 | )
35 |
36 | def conv_dw(inp, oup, stride, leaky=0.1):
37 | return nn.Sequential(
38 | nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
39 | nn.BatchNorm2d(inp),
40 | nn.LeakyReLU(negative_slope= leaky,inplace=True),
41 |
42 | nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
43 | nn.BatchNorm2d(oup),
44 | nn.LeakyReLU(negative_slope= leaky,inplace=True),
45 | )
46 |
47 | class SSH(nn.Module):
48 | def __init__(self, in_channel, out_channel):
49 | super(SSH, self).__init__()
50 | assert out_channel % 4 == 0
51 | leaky = 0
52 | if (out_channel <= 64):
53 | leaky = 0.1
54 | self.conv3X3 = conv_bn_no_relu(in_channel, out_channel//2, stride=1)
55 |
56 | self.conv5X5_1 = conv_bn(in_channel, out_channel//4, stride=1, leaky = leaky)
57 | self.conv5X5_2 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
58 |
59 | self.conv7X7_2 = conv_bn(out_channel//4, out_channel//4, stride=1, leaky = leaky)
60 | self.conv7x7_3 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
61 |
62 | def forward(self, input):
63 | conv3X3 = self.conv3X3(input)
64 |
65 | conv5X5_1 = self.conv5X5_1(input)
66 | conv5X5 = self.conv5X5_2(conv5X5_1)
67 |
68 | conv7X7_2 = self.conv7X7_2(conv5X5_1)
69 | conv7X7 = self.conv7x7_3(conv7X7_2)
70 |
71 | out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
72 | out = F.relu(out)
73 | return out
74 |
75 | class FPN(nn.Module):
76 | def __init__(self,in_channels_list,out_channels):
77 | super(FPN,self).__init__()
78 | leaky = 0
79 | if (out_channels <= 64):
80 | leaky = 0.1
81 | self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride = 1, leaky = leaky)
82 | self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride = 1, leaky = leaky)
83 | self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride = 1, leaky = leaky)
84 |
85 | self.merge1 = conv_bn(out_channels, out_channels, leaky = leaky)
86 | self.merge2 = conv_bn(out_channels, out_channels, leaky = leaky)
87 |
88 | def forward(self, input):
89 | # names = list(input.keys())
90 | input = list(input.values())
91 |
92 | output1 = self.output1(input[0])
93 | output2 = self.output2(input[1])
94 | output3 = self.output3(input[2])
95 |
96 | up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest")
97 | output2 = output2 + up3
98 | output2 = self.merge2(output2)
99 |
100 | up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest")
101 | output1 = output1 + up2
102 | output1 = self.merge1(output1)
103 |
104 | out = [output1, output2, output3]
105 | return out
106 |
107 | class MobileNetV1(nn.Module):
108 | def __init__(self):
109 | super(MobileNetV1, self).__init__()
110 | self.stage1 = nn.Sequential(
111 | conv_bn(3, 8, 2, leaky = 0.1), # 3
112 | conv_dw(8, 16, 1), # 7
113 | conv_dw(16, 32, 2), # 11
114 | conv_dw(32, 32, 1), # 19
115 | conv_dw(32, 64, 2), # 27
116 | conv_dw(64, 64, 1), # 43
117 | )
118 | self.stage2 = nn.Sequential(
119 | conv_dw(64, 128, 2), # 43 + 16 = 59
120 | conv_dw(128, 128, 1), # 59 + 32 = 91
121 | conv_dw(128, 128, 1), # 91 + 32 = 123
122 | conv_dw(128, 128, 1), # 123 + 32 = 155
123 | conv_dw(128, 128, 1), # 155 + 32 = 187
124 | conv_dw(128, 128, 1), # 187 + 32 = 219
125 | )
126 | self.stage3 = nn.Sequential(
127 | conv_dw(128, 256, 2), # 219 +3 2 = 241
128 | conv_dw(256, 256, 1), # 241 + 64 = 301
129 | )
130 | self.avg = nn.AdaptiveAvgPool2d((1,1))
131 | self.fc = nn.Linear(256, 1000)
132 |
133 | def forward(self, x):
134 | x = self.stage1(x)
135 | x = self.stage2(x)
136 | x = self.stage3(x)
137 | x = self.avg(x)
138 | # x = self.model(x)
139 | x = x.view(-1, 256)
140 | x = self.fc(x)
141 | return x
142 |
143 | class ClassHead(nn.Module):
144 | def __init__(self,inchannels=512,num_anchors=3):
145 | super(ClassHead,self).__init__()
146 | self.num_anchors = num_anchors
147 | self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)
148 |
149 | def forward(self,x):
150 | out = self.conv1x1(x)
151 | out = out.permute(0,2,3,1).contiguous()
152 |
153 | return out.view(out.shape[0], -1, 2)
154 |
155 | class BboxHead(nn.Module):
156 | def __init__(self,inchannels=512,num_anchors=3):
157 | super(BboxHead,self).__init__()
158 | self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)
159 |
160 | def forward(self,x):
161 | out = self.conv1x1(x)
162 | out = out.permute(0,2,3,1).contiguous()
163 |
164 | return out.view(out.shape[0], -1, 4)
165 |
166 | class LandmarkHead(nn.Module):
167 | def __init__(self,inchannels=512,num_anchors=3):
168 | super(LandmarkHead,self).__init__()
169 | self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)
170 |
171 | def forward(self,x):
172 | out = self.conv1x1(x)
173 | out = out.permute(0,2,3,1).contiguous()
174 |
175 | return out.view(out.shape[0], -1, 10)
176 |
177 | class RetinaFace(nn.Module):
178 | def __init__(self, cfg = None, phase = 'train'):
179 | """
180 | :param cfg: Network related settings.
181 | :param phase: train or test.
182 | """
183 | super(RetinaFace,self).__init__()
184 | self.phase = phase
185 | backbone = MobileNetV1()
186 |
187 | self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])
188 | in_channels_stage2 = cfg['in_channel']
189 | in_channels_list = [
190 | in_channels_stage2 * 2,
191 | in_channels_stage2 * 4,
192 | in_channels_stage2 * 8,
193 | ]
194 | out_channels = cfg['out_channel']
195 | self.fpn = FPN(in_channels_list,out_channels)
196 | self.ssh1 = SSH(out_channels, out_channels)
197 | self.ssh2 = SSH(out_channels, out_channels)
198 | self.ssh3 = SSH(out_channels, out_channels)
199 |
200 | self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
201 | self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
202 | self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
203 |
204 | def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):
205 | classhead = nn.ModuleList()
206 | for i in range(fpn_num):
207 | classhead.append(ClassHead(inchannels,anchor_num))
208 | return classhead
209 |
210 | def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):
211 | bboxhead = nn.ModuleList()
212 | for i in range(fpn_num):
213 | bboxhead.append(BboxHead(inchannels,anchor_num))
214 | return bboxhead
215 |
216 | def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):
217 | landmarkhead = nn.ModuleList()
218 | for i in range(fpn_num):
219 | landmarkhead.append(LandmarkHead(inchannels,anchor_num))
220 | return landmarkhead
221 |
222 | def forward(self,inputs):
223 | out = self.body(inputs)
224 |
225 | # FPN
226 | fpn = self.fpn(out)
227 |
228 | # SSH
229 | feature1 = self.ssh1(fpn[0])
230 | feature2 = self.ssh2(fpn[1])
231 | feature3 = self.ssh3(fpn[2])
232 | features = [feature1, feature2, feature3]
233 |
234 | bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
235 | classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)
236 | ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
237 |
238 | if self.phase == 'train':
239 | output = (bbox_regressions, classifications, ldm_regressions)
240 | else:
241 | output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
242 | return output
243 |
--------------------------------------------------------------------------------
/models/prnet.py:
--------------------------------------------------------------------------------
1 | # derive from:
2 | # https://github.com/liguohao96/pytorch-prnet/blob/master/prnet.py
3 |
4 | from torch import nn
5 |
6 | def padding_same_conv2d(input_size, in_c, out_c, kernel_size=4, stride=1):
7 | output_size = input_size // stride
8 | padding_num = stride * (output_size - 1) - input_size + kernel_size
9 | if padding_num % 2 == 0:
10 | return nn.Sequential(nn.Conv2d(in_c, out_c, kernel_size=kernel_size, stride=stride, padding=padding_num // 2, bias=False))
11 | else:
12 | return nn.Sequential(
13 | nn.ConstantPad2d((padding_num // 2, padding_num // 2 + 1, padding_num // 2, padding_num // 2 + 1), 0),
14 | nn.Conv2d(in_c, out_c, kernel_size=kernel_size, stride=stride, padding=0, bias=False)
15 | )
16 |
17 | class resBlock(nn.Module):
18 | def __init__(self, in_c, out_c, kernel_size=4, stride=1, input_size=None):
19 | super().__init__()
20 | assert kernel_size == 4
21 | self.shortcut = lambda x: x
22 | if in_c != out_c:
23 | self.shortcut = nn.Conv2d(in_c, out_c, kernel_size=1, stride=stride, bias=False)
24 |
25 | main_layers = [
26 | nn.Conv2d(in_c, out_c // 2, kernel_size=1, stride=1, padding=0, bias=False),
27 | nn.BatchNorm2d(out_c // 2, eps=0.001, momentum=0.001),
28 | nn.ReLU(inplace=True),
29 | ]
30 |
31 | main_layers.extend([
32 | *padding_same_conv2d(input_size, out_c // 2, out_c // 2, kernel_size=kernel_size, stride=stride),
33 | nn.BatchNorm2d(out_c // 2, eps=0.001, momentum=0.001),
34 | nn.ReLU(inplace=True)])
35 |
36 | main_layers.extend(
37 | padding_same_conv2d(input_size, out_c // 2, out_c, kernel_size=1, stride=1)
38 | )
39 | self.main = nn.Sequential(*main_layers)
40 | self.activate = nn.Sequential(
41 | nn.BatchNorm2d(out_c, eps=0.001, momentum=0.001),
42 | nn.ReLU(inplace=True)
43 | )
44 |
45 | def forward(self, x):
46 | shortcut_x = self.shortcut(x)
47 | main_x = self.main(x)
48 | x = self.activate(shortcut_x + main_x)
49 | return x
50 |
51 |
52 | class upBlock(nn.Module):
53 | def __init__(self, in_c, out_c, conv_num=2):
54 | super().__init__()
55 | additional_conv = []
56 | layer_length = 4
57 |
58 | for i in range(1, conv_num+1):
59 | additional_conv += [
60 | nn.ConstantPad2d((2, 1, 2, 1), 0),
61 | nn.ConvTranspose2d(out_c, out_c, kernel_size=4, stride=1, padding=3, bias=False),
62 | nn.BatchNorm2d(out_c, eps=0.001, momentum=0.001),
63 | nn.ReLU(inplace=True)
64 | ]
65 | self.main = nn.Sequential(
66 | # nn.ConstantPad2d((0, 1, 0, 1), 0),
67 | nn.ConvTranspose2d(in_c, out_c, kernel_size=4, stride=2, padding=1, bias=False),
68 | nn.BatchNorm2d(out_c, eps=0.001, momentum=0.001),
69 | nn.ReLU(inplace=True),
70 | *additional_conv
71 | )
72 |
73 | def forward(self, x):
74 | x = self.main(x)
75 | return x
76 |
77 | class PRNet(nn.Module):
78 | def __init__(self, in_channel, out_channel=3):
79 | super().__init__()
80 | size = 16
81 | self.input_conv = nn.Sequential( #*[
82 | *padding_same_conv2d(256, in_channel, size, kernel_size=4, stride=1), # 256x256x16
83 | nn.BatchNorm2d(size, eps=0.001, momentum=0.001),
84 | nn.ReLU(inplace=True)
85 | # ]
86 | )
87 | self.down_conv_1 = resBlock(size, size * 2, kernel_size=4, stride=2, input_size=256) # 128x128x32
88 | self.down_conv_2 = resBlock(size * 2, size * 2, kernel_size=4, stride=1, input_size=128) # 128x128x32
89 | self.down_conv_3 = resBlock(size * 2, size * 4, kernel_size=4, stride=2, input_size=128) # 64x64x64
90 | self.down_conv_4 = resBlock(size * 4, size * 4, kernel_size=4, stride=1, input_size=64) # 64x64x64
91 | self.down_conv_5 = resBlock(size * 4, size * 8, kernel_size=4, stride=2, input_size=64) # 32x32x128
92 | self.down_conv_6 = resBlock(size * 8, size * 8, kernel_size=4, stride=1, input_size=32) # 32x32x128
93 | self.down_conv_7 = resBlock(size * 8, size * 16, kernel_size=4, stride=2, input_size=32) # 16x16x256
94 | self.down_conv_8 = resBlock(size * 16, size * 16, kernel_size=4, stride=1, input_size=16) # 16x16x256
95 | self.down_conv_9 = resBlock(size * 16, size * 32, kernel_size=4, stride=2, input_size=16) # 8x8x512
96 | self.down_conv_10 = resBlock(size * 32, size * 32, kernel_size=4, stride=1, input_size=8) # 8x8x512
97 |
98 | self.center_conv = nn.Sequential(
99 | nn.ConstantPad2d((2, 1, 2, 1), 0),
100 | nn.ConvTranspose2d(size * 32, size * 32, kernel_size=4, stride=1, padding=3, bias=False), # 8x8x512
101 | nn.BatchNorm2d(size * 32, eps=0.001, momentum=0.001),
102 | nn.ReLU(inplace=True)
103 | )
104 |
105 | self.up_conv_5 = upBlock(size * 32, size * 16) # 16x16x256
106 | self.up_conv_4 = upBlock(size * 16, size * 8) # 32x32x128
107 | self.up_conv_3 = upBlock(size * 8, size * 4) # 64x64x64
108 |
109 | self.up_conv_2 = upBlock(size * 4, size * 2, 1) # 128x128x32
110 | self.up_conv_1 = upBlock(size * 2, size, 1) # 256x256x16
111 |
112 | self.output_conv = nn.Sequential(
113 | nn.ConstantPad2d((2, 1, 2, 1), 0),
114 | nn.ConvTranspose2d(size, 3, kernel_size=4, stride=1, padding=3, bias=False),
115 | nn.BatchNorm2d(3, eps=0.001, momentum=0.001),
116 | nn.ReLU(inplace=True),
117 |
118 | nn.ConstantPad2d((2, 1, 2, 1), 0),
119 | nn.ConvTranspose2d(3, 3, kernel_size=4, stride=1, padding=3, bias=False),
120 | nn.BatchNorm2d(3, eps=0.001, momentum=0.001),
121 | nn.ReLU(inplace=True),
122 |
123 | nn.ConstantPad2d((2, 1, 2, 1), 0),
124 | nn.ConvTranspose2d(3, 3, kernel_size=4, stride=1, padding=3, bias=False),
125 | nn.BatchNorm2d(3, eps=0.001, momentum=0.001),
126 | nn.Sigmoid()
127 | )
128 |
129 | def forward(self, x):
130 | x = self.input_conv(x)
131 | x = self.down_conv_1(x)
132 | x = self.down_conv_2(x)
133 | x = self.down_conv_3(x)
134 | x = self.down_conv_4(x)
135 | x = self.down_conv_5(x)
136 | x = self.down_conv_6(x)
137 | x = self.down_conv_7(x)
138 | x = self.down_conv_8(x)
139 | x = self.down_conv_9(x)
140 | x = self.down_conv_10(x)
141 |
142 | x = self.center_conv(x)
143 |
144 | x = self.up_conv_5(x)
145 | x = self.up_conv_4(x)
146 | x = self.up_conv_3(x)
147 | x = self.up_conv_2(x)
148 | x = self.up_conv_1(x)
149 | x = self.output_conv(x)
150 | return x
151 |
--------------------------------------------------------------------------------
/models/prnet_pytorch.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/models/prnet_pytorch.pth
--------------------------------------------------------------------------------
/test1_mask1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/test1_mask1.jpg
--------------------------------------------------------------------------------
/utils/BuzException.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | # all self defined exception is derived from BuzException
7 | class BuzException(Exception):
8 | pass
9 |
10 | class InputError(BuzException):
11 | def __init__(self):
12 | pass
13 | def __str__(self):
14 | return ("Input type error!")
15 |
16 | ###############################################
17 | #all image related exception.
18 | ###############################################
19 | class ImageException(BuzException):
20 | pass
21 |
22 | class EmptyImageError(ImageException):
23 | def __init__(self):
24 | pass
25 | def __str__(self):
26 | return ("The input image is empty.")
27 |
28 | class FalseImageSizeError(ImageException):
29 | def __init__(self):
30 | pass
31 | def __str__(self):
32 | return ("The input image size is false.")
33 |
34 | class FaseChannelError(ImageException):
35 | def __init__(self, channel):
36 | self.channel = channel
37 | def __str__(self):
38 | return ("Input channel {} is invalid(only 2, 3, 4 channel is support.),".format(repr(self.channel)))
39 |
--------------------------------------------------------------------------------
/utils/__pycache__/BuzException.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/__pycache__/BuzException.cpython-36.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/BuzException.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/__pycache__/BuzException.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/read_info.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/__pycache__/read_info.cpython-36.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/read_info.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/__pycache__/read_info.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/lms_trans.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: JiXuan Xu, Jun Wang
3 | @date: 20201015
4 | @contact: jun21wangustc@gmail.com
5 | """
6 | # it's a approximate map
7 | # 15 --> (99+103)/2
8 | # 17, 19; 20, 22; 16; 9 will be used in face crop(25 points)
9 | lms25_2_lms106 = {1:105, 2:106, 3:34, 4:38, 5:43,
10 | 6:47, 7:52, 8:55, 9:88, 10:94,
11 | 11:85, 12:91, 13:63, 14:59, 15:99,
12 | 16:61, 17:71, 18:73, 19:67, 20:80,
13 | 21:82, 22:76, 23:36, 24:45, 25:17}
14 |
15 | # 1: left eye center
16 | # 2: right eye center
17 | # 3: nose tip
18 | # 4: left mouth corner
19 | # 5: right mouth corner
20 | lms5_2_lms25 = {1:1, 2:2, 3:8, 4:11, 5:12}
21 | lms5_2_lms106 = {1:105, 2:106, 3:55, 4:85, 5:91}
22 |
23 | def lms106_2_lms25(lms_106):
24 | lms25 = []
25 | for cur_point_index in range(25):
26 | cur_point_id = cur_point_index + 1
27 | point_id_106 = lms25_2_lms106[cur_point_id]
28 | cur_point_index_106 = point_id_106 - 1
29 | cur_point_x = lms_106[cur_point_index_106 * 2]
30 | cur_point_y = lms_106[cur_point_index_106 * 2 + 1]
31 | lms25.append(cur_point_x)
32 | lms25.append(cur_point_y)
33 | return lms25
34 |
35 | def lms106_2_lms5(lms_106):
36 | lms5 = []
37 | for cur_point_index in range(5):
38 | cur_point_id = cur_point_index + 1
39 | point_id_106 = lms5_2_lms106[cur_point_id]
40 | cur_point_index_106 = point_id_106 - 1
41 | cur_point_x = lms_106[cur_point_index_106 * 2]
42 | cur_point_y = lms_106[cur_point_index_106 * 2 + 1]
43 | lms5.append(cur_point_x)
44 | lms5.append(cur_point_y)
45 | return lms5
46 |
47 | def lms25_2_lms5(lms_25):
48 | lms5 = []
49 | for cur_point_index in range(5):
50 | cur_point_id = cur_point_index + 1
51 | point_id_25 = lms5_2_lms25[cur_point_id]
52 | cur_point_index_25 = point_id_25 - 1
53 | cur_point_x = lms_25[cur_point_index_25 * 2]
54 | cur_point_y = lms_25[cur_point_index_25 * 2 + 1]
55 | lms5.append(cur_point_x)
56 | lms5.append(cur_point_y)
57 | return lms5
58 |
--------------------------------------------------------------------------------
/utils/mesh/.ipynb_checkpoints/__init__-checkpoint.py:
--------------------------------------------------------------------------------
1 | #import sys
2 | #sys.path.append('../face3d/mesh/cython')
3 | #import mesh_core_cython
4 | from . import io
5 | from . import vis
6 | from . import transform
7 | from . import light
8 | from . import render
9 |
10 |
--------------------------------------------------------------------------------
/utils/mesh/.ipynb_checkpoints/io-checkpoint.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import os, sys
7 | from skimage import io
8 | from time import time
9 |
10 | sys.path.insert(0, '/home/liuyinglu2/notespace/code/face3d/face3d/mesh')
11 | import cython
12 | import mesh_core_cython
13 |
14 | ## TODO
15 | ## TODO: c++ version
16 | def read_obj(obj_name):
17 | ''' read mesh
18 | '''
19 | return 0
20 |
21 | # ------------------------- write
22 | def write_asc(path, vertices):
23 | '''
24 | Args:
25 | vertices: shape = (nver, 3)
26 | '''
27 | if path.split('.')[-1] == 'asc':
28 | np.savetxt(path, vertices)
29 | else:
30 | np.savetxt(path + '.asc', vertices)
31 |
32 | def write_obj_with_colors(obj_name, vertices, triangles, colors):
33 | ''' Save 3D face model with texture represented by colors.
34 | Args:
35 | obj_name: str
36 | vertices: shape = (nver, 3)
37 | triangles: shape = (ntri, 3)
38 | colors: shape = (nver, 3)
39 | '''
40 | triangles = triangles.copy()
41 | triangles += 1 # meshlab start with 1
42 |
43 | if obj_name.split('.')[-1] != 'obj':
44 | obj_name = obj_name + '.obj'
45 |
46 | # write obj
47 | with open(obj_name, 'w') as f:
48 |
49 | # write vertices & colors
50 | for i in range(vertices.shape[0]):
51 | # s = 'v {} {} {} \n'.format(vertices[0,i], vertices[1,i], vertices[2,i])
52 | s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2])
53 | f.write(s)
54 |
55 | # write f: ver ind/ uv ind
56 | [k, ntri] = triangles.shape
57 | for i in range(triangles.shape[0]):
58 | # s = 'f {} {} {}\n'.format(triangles[i, 0], triangles[i, 1], triangles[i, 2])
59 | s = 'f {} {} {}\n'.format(triangles[i, 2], triangles[i, 1], triangles[i, 0])
60 | f.write(s)
61 |
62 | ## TODO: c++ version
63 | def write_obj_with_texture(obj_name, vertices, triangles, texture, uv_coords):
64 | ''' Save 3D face model with texture represented by texture map.
65 | Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
66 | Args:
67 | obj_name: str
68 | vertices: shape = (nver, 3)
69 | triangles: shape = (ntri, 3)
70 | texture: shape = (256,256,3)
71 | uv_coords: shape = (nver, 3) max value<=1
72 | '''
73 | if obj_name.split('.')[-1] != 'obj':
74 | obj_name = obj_name + '.obj'
75 | mtl_name = obj_name.replace('.obj', '.mtl')
76 | texture_name = obj_name.replace('.obj', '_texture.png')
77 |
78 | triangles = triangles.copy()
79 | triangles += 1 # mesh lab start with 1
80 |
81 | # write obj
82 | with open(obj_name, 'w') as f:
83 | # first line: write mtlib(material library)
84 | s = "mtllib {}\n".format(os.path.abspath(mtl_name))
85 | f.write(s)
86 |
87 | # write vertices
88 | for i in range(vertices.shape[0]):
89 | s = 'v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2])
90 | f.write(s)
91 |
92 | # write uv coords
93 | for i in range(uv_coords.shape[0]):
94 | s = 'vt {} {}\n'.format(uv_coords[i,0], 1 - uv_coords[i,1])
95 | f.write(s)
96 |
97 | f.write("usemtl FaceTexture\n")
98 |
99 | # write f: ver ind/ uv ind
100 | for i in range(triangles.shape[0]):
101 | s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,2], triangles[i,2], triangles[i,1], triangles[i,1], triangles[i,0], triangles[i,0])
102 | f.write(s)
103 |
104 | # write mtl
105 | with open(mtl_name, 'w') as f:
106 | f.write("newmtl FaceTexture\n")
107 | s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
108 | f.write(s)
109 |
110 | # write texture as png
111 | imsave(texture_name, texture)
112 |
113 | # c++ version
114 | def write_obj_with_colors_texture(obj_name, vertices, triangles, colors, texture, uv_coords):
115 | ''' Save 3D face model with texture.
116 | Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
117 | Args:
118 | obj_name: str
119 | vertices: shape = (nver, 3)
120 | triangles: shape = (ntri, 3)
121 | colors: shape = (nver, 3)
122 | texture: shape = (256,256,3)
123 | uv_coords: shape = (nver, 3) max value<=1
124 | '''
125 | if obj_name.split('.')[-1] != 'obj':
126 | obj_name = obj_name + '.obj'
127 | mtl_name = obj_name.replace('.obj', '.mtl')
128 | texture_name = obj_name.replace('.obj', '_texture.png')
129 |
130 | triangles = triangles.copy()
131 | triangles += 1 # mesh lab start with 1
132 |
133 | # write obj
134 | vertices, colors, uv_coords = vertices.astype(np.float32).copy(), colors.astype(np.float32).copy(), uv_coords.astype(np.float32).copy()
135 | mesh_core_cython.write_obj_with_colors_texture_core(str.encode(obj_name), str.encode(os.path.abspath(mtl_name)), vertices, triangles, colors, uv_coords, vertices.shape[0], triangles.shape[0], uv_coords.shape[0])
136 |
137 | # write mtl
138 | with open(mtl_name, 'w') as f:
139 | f.write("newmtl FaceTexture\n")
140 | s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
141 | f.write(s)
142 |
143 | # write texture as png
144 | io.imsave(texture_name, texture)
--------------------------------------------------------------------------------
/utils/mesh/.ipynb_checkpoints/light-checkpoint.py:
--------------------------------------------------------------------------------
1 | '''
2 | Functions about lighting mesh(changing colors/texture of mesh).
3 | 1. add light to colors/texture (shade each vertex)
4 | 2. fit light according to colors/texture & image.
5 | '''
6 |
7 | from __future__ import absolute_import
8 | from __future__ import division
9 | from __future__ import print_function
10 |
11 | import numpy as np
12 | import sys
13 | sys.path.insert(0, '/home/liuyinglu2/notespace/code/face3d/face3d/mesh')
14 | import cython
15 | import mesh_core_cython
16 |
17 | def get_normal(vertices, triangles):
18 | ''' calculate normal direction in each vertex
19 | Args:
20 | vertices: [nver, 3]
21 | triangles: [ntri, 3]
22 | Returns:
23 | normal: [nver, 3]
24 | '''
25 | pt0 = vertices[triangles[:, 0], :] # [ntri, 3]
26 | pt1 = vertices[triangles[:, 1], :] # [ntri, 3]
27 | pt2 = vertices[triangles[:, 2], :] # [ntri, 3]
28 | tri_normal = np.cross(pt0 - pt1, pt0 - pt2) # [ntri, 3]. normal of each triangle
29 |
30 | normal = np.zeros_like(vertices, dtype = np.float32).copy() # [nver, 3]
31 | # for i in range(triangles.shape[0]):
32 | # normal[triangles[i, 0], :] = normal[triangles[i, 0], :] + tri_normal[i, :]
33 | # normal[triangles[i, 1], :] = normal[triangles[i, 1], :] + tri_normal[i, :]
34 | # normal[triangles[i, 2], :] = normal[triangles[i, 2], :] + tri_normal[i, :]
35 | mesh_core_cython.get_normal_core(normal, tri_normal.astype(np.float32).copy(), triangles.copy(), triangles.shape[0])
36 |
37 | # normalize to unit length
38 | mag = np.sum(normal**2, 1) # [nver]
39 | zero_ind = (mag == 0)
40 | mag[zero_ind] = 1;
41 | normal[zero_ind, 0] = np.ones((np.sum(zero_ind)))
42 |
43 | normal = normal/np.sqrt(mag[:,np.newaxis])
44 |
45 | return normal
46 |
47 | # TODO: test
48 | def add_light_sh(vertices, triangles, colors, sh_coeff):
49 | '''
50 | In 3d face, usually assume:
51 | 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
52 | 2. Lighting can be an arbitrary combination of point sources
53 | --> can be expressed in terms of spherical harmonics(omit the lighting coefficients)
54 | I = albedo * (sh(n) x sh_coeff)
55 |
56 | albedo: n x 1
57 | sh_coeff: 9 x 1
58 | Y(n) = (1, n_x, n_y, n_z, n_xn_y, n_xn_z, n_yn_z, n_x^2 - n_y^2, 3n_z^2 - 1)': n x 9
59 | # Y(n) = (1, n_x, n_y, n_z)': n x 4
60 |
61 | Args:
62 | vertices: [nver, 3]
63 | triangles: [ntri, 3]
64 | colors: [nver, 3] albedo
65 | sh_coeff: [9, 1] spherical harmonics coefficients
66 |
67 | Returns:
68 | lit_colors: [nver, 3]
69 | '''
70 | assert vertices.shape[0] == colors.shape[0]
71 | nver = vertices.shape[0]
72 | normal = get_normal(vertices, triangles) # [nver, 3]
73 | sh = np.array((np.ones(nver), n[:,0], n[:,1], n[:,2], n[:,0]*n[:,1], n[:,0]*n[:,2], n[:,1]*n[:,2], n[:,0]**2 - n[:,1]**2, 3*(n[:,2]**2) - 1)) # [nver, 9]
74 | ref = sh.dot(sh_coeff) #[nver, 1]
75 | lit_colors = colors*ref
76 | return lit_colors
77 |
78 |
79 | def add_light(vertices, triangles, colors, light_positions = 0, light_intensities = 0):
80 | ''' Gouraud shading. add point lights.
81 | In 3d face, usually assume:
82 | 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
83 | 2. Lighting can be an arbitrary combination of point sources
84 | 3. No specular (unless skin is oil, 23333)
85 |
86 | Ref: https://cs184.eecs.berkeley.edu/lecture/pipeline
87 | Args:
88 | vertices: [nver, 3]
89 | triangles: [ntri, 3]
90 | light_positions: [nlight, 3]
91 | light_intensities: [nlight, 3]
92 | Returns:
93 | lit_colors: [nver, 3]
94 | '''
95 | nver = vertices.shape[0]
96 | normals = get_normal(vertices, triangles) # [nver, 3]
97 |
98 | # ambient
99 | # La = ka*Ia
100 |
101 | # diffuse
102 | # Ld = kd*(I/r^2)max(0, nxl)
103 | direction_to_lights = vertices[np.newaxis, :, :] - light_positions[:, np.newaxis, :] # [nlight, nver, 3]
104 | direction_to_lights_n = np.sqrt(np.sum(direction_to_lights**2, axis = 2)) # [nlight, nver]
105 | direction_to_lights = direction_to_lights/direction_to_lights_n[:, :, np.newaxis]
106 | normals_dot_lights = normals[np.newaxis, :, :]*direction_to_lights # [nlight, nver, 3]
107 | normals_dot_lights = np.sum(normals_dot_lights, axis = 2) # [nlight, nver]
108 | diffuse_output = colors[np.newaxis, :, :]*normals_dot_lights[:, :, np.newaxis]*light_intensities[:, np.newaxis, :]
109 | diffuse_output = np.sum(diffuse_output, axis = 0) # [nver, 3]
110 |
111 | # specular
112 | # h = (v + l)/(|v + l|) bisector
113 | # Ls = ks*(I/r^2)max(0, nxh)^p
114 | # increasing p narrows the reflectionlob
115 |
116 | lit_colors = diffuse_output # only diffuse part here.
117 | lit_colors = np.minimum(np.maximum(lit_colors, 0), 1)
118 | return lit_colors
119 |
120 |
121 |
122 | ## TODO. estimate light(sh coeff)
123 | ## -------------------------------- estimate. can not use now.
124 | def fit_light(image, vertices, colors, triangles, vis_ind, lamb = 10, max_iter = 3):
125 | [h, w, c] = image.shape
126 |
127 | # surface normal
128 | norm = get_normal(vertices, triangles)
129 |
130 | nver = vertices.shape[1]
131 |
132 | # vertices --> corresponding image pixel
133 | pt2d = vertices[:2, :]
134 |
135 | pt2d[0,:] = np.minimum(np.maximum(pt2d[0,:], 0), w - 1)
136 | pt2d[1,:] = np.minimum(np.maximum(pt2d[1,:], 0), h - 1)
137 | pt2d = np.round(pt2d).astype(np.int32) # 2 x nver
138 |
139 | image_pixel = image[pt2d[1,:], pt2d[0,:], :] # nver x 3
140 | image_pixel = image_pixel.T # 3 x nver
141 |
142 | # vertices --> corresponding mean texture pixel with illumination
143 | # Spherical Harmonic Basis
144 | harmonic_dim = 9
145 | nx = norm[0,:];
146 | ny = norm[1,:];
147 | nz = norm[2,:];
148 | harmonic = np.zeros((nver, harmonic_dim))
149 |
150 | pi = np.pi
151 | harmonic[:,0] = np.sqrt(1/(4*pi)) * np.ones((nver,));
152 | harmonic[:,1] = np.sqrt(3/(4*pi)) * nx;
153 | harmonic[:,2] = np.sqrt(3/(4*pi)) * ny;
154 | harmonic[:,3] = np.sqrt(3/(4*pi)) * nz;
155 | harmonic[:,4] = 1/2. * np.sqrt(3/(4*pi)) * (2*nz**2 - nx**2 - ny**2);
156 | harmonic[:,5] = 3 * np.sqrt(5/(12*pi)) * (ny*nz);
157 | harmonic[:,6] = 3 * np.sqrt(5/(12*pi)) * (nx*nz);
158 | harmonic[:,7] = 3 * np.sqrt(5/(12*pi)) * (nx*ny);
159 | harmonic[:,8] = 3/2. * np.sqrt(5/(12*pi)) * (nx*nx - ny*ny);
160 |
161 | '''
162 | I' = sum(albedo * lj * hj) j = 0:9 (albedo = tex)
163 | set A = albedo*h (n x 9)
164 | alpha = lj (9 x 1)
165 | Y = I (n x 1)
166 | Y' = A.dot(alpha)
167 |
168 | opt function:
169 | ||Y - A*alpha|| + lambda*(alpha'*alpha)
170 | result:
171 | A'*(Y - A*alpha) + lambda*alpha = 0
172 | ==>
173 | (A'*A*alpha - lambda)*alpha = A'*Y
174 | left: 9 x 9
175 | right: 9 x 1
176 | '''
177 | n_vis_ind = len(vis_ind)
178 | n = n_vis_ind*c
179 |
180 | Y = np.zeros((n, 1))
181 | A = np.zeros((n, 9))
182 | light = np.zeros((3, 1))
183 |
184 | for k in range(c):
185 | Y[k*n_vis_ind:(k+1)*n_vis_ind, :] = image_pixel[k, vis_ind][:, np.newaxis]
186 | A[k*n_vis_ind:(k+1)*n_vis_ind, :] = texture[k, vis_ind][:, np.newaxis] * harmonic[vis_ind, :]
187 | Ac = texture[k, vis_ind][:, np.newaxis]
188 | Yc = image_pixel[k, vis_ind][:, np.newaxis]
189 | light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
190 |
191 | for i in range(max_iter):
192 |
193 | Yc = Y.copy()
194 | for k in range(c):
195 | Yc[k*n_vis_ind:(k+1)*n_vis_ind, :] /= light[k]
196 |
197 | # update alpha
198 | equation_left = np.dot(A.T, A) + lamb*np.eye(harmonic_dim); # why + ?
199 | equation_right = np.dot(A.T, Yc)
200 | alpha = np.dot(np.linalg.inv(equation_left), equation_right)
201 |
202 | # update light
203 | for k in range(c):
204 | Ac = A[k*n_vis_ind:(k+1)*n_vis_ind, :].dot(alpha)
205 | Yc = Y[k*n_vis_ind:(k+1)*n_vis_ind, :]
206 | light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
207 |
208 | appearance = np.zeros_like(texture)
209 | for k in range(c):
210 | tmp = np.dot(harmonic*texture[k, :][:, np.newaxis], alpha*light[k])
211 | appearance[k,:] = tmp.T
212 |
213 | appearance = np.minimum(np.maximum(appearance, 0), 1)
214 |
215 | return appearance
216 |
217 |
--------------------------------------------------------------------------------
/utils/mesh/.ipynb_checkpoints/render-checkpoint.py:
--------------------------------------------------------------------------------
1 | '''
2 | functions about rendering mesh(from 3d obj to 2d image).
3 | only use rasterization render here.
4 | Note that:
5 | 1. Generally, render func includes camera, light, raterize. Here no camera and light(I write these in other files)
6 | 2. Generally, the input vertices are normalized to [-1,1] and cetered on [0, 0]. (in world space)
7 | Here, the vertices are using image coords, which centers on [w/2, h/2] with the y-axis pointing to oppisite direction.
8 | Means: render here only conducts interpolation.(I just want to make the input flexible)
9 |
10 | Author: Yao Feng
11 | Mail: yaofeng1995@gmail.com
12 | '''
13 | from __future__ import absolute_import
14 | from __future__ import division
15 | from __future__ import print_function
16 |
17 | import numpy as np
18 | from time import time
19 | import sys
20 | import ipdb
21 | ipdb.set_trace()
22 | sys.path.insert(0, '/home/liuyinglu2/notespace/code/face3d/face3d/mesh')
23 | import cython
24 | import mesh_core_cython
25 | #from .cython import mesh_core_cython
26 |
27 | def rasterize_triangles(vertices, triangles, h, w):
28 | '''
29 | Args:
30 | vertices: [nver, 3]
31 | triangles: [ntri, 3]
32 | h: height
33 | w: width
34 | Returns:
35 | depth_buffer: [h, w] saves the depth, here, the bigger the z, the fronter the point.
36 | triangle_buffer: [h, w] saves the tri id(-1 for no triangle).
37 | barycentric_weight: [h, w, 3] saves corresponding barycentric weight.
38 |
39 | # Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
40 | # h, w is the size of rendering
41 | '''
42 |
43 | # initial
44 | depth_buffer = np.zeros([h, w]) - 999999. #set the initial z to the farest position
45 | triangle_buffer = np.zeros([h, w], dtype = np.int32) - 1 # if tri id = -1, the pixel has no triangle correspondance
46 | barycentric_weight = np.zeros([h, w, 3], dtype = np.float32) #
47 |
48 | vertices = vertices.astype(np.float32).copy()
49 | triangles = triangles.astype(np.int32).copy()
50 |
51 | mesh_core_cython.rasterize_triangles_core(
52 | vertices, triangles,
53 | depth_buffer, triangle_buffer, barycentric_weight,
54 | vertices.shape[0], triangles.shape[0],
55 | h, w)
56 |
57 | def render_colors(vertices, triangles, colors, h, w, c = 3, BG = None):
58 | ''' render mesh with colors
59 | Args:
60 | vertices: [nver, 3]
61 | triangles: [ntri, 3]
62 | colors: [nver, 3]
63 | h: height
64 | w: width
65 | c: channel
66 | BG: background image
67 | Returns:
68 | image: [h, w, c]. rendered image./rendering.
69 | '''
70 |
71 | # initial
72 | if BG is None:
73 | image = np.zeros((h, w, c), dtype = np.float32)
74 | else:
75 | assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
76 | image = BG
77 | depth_buffer = np.zeros([h, w], dtype = np.float32, order = 'C') - 999999.
78 |
79 | # change orders. --> C-contiguous order(column major)
80 | vertices = vertices.astype(np.float32).copy()
81 | triangles = triangles.astype(np.int32).copy()
82 | colors = colors.astype(np.float32).copy()
83 | ###
84 | st = time()
85 | mesh_core_cython.render_colors_core(
86 | image, vertices, triangles,
87 | colors,
88 | depth_buffer,
89 | vertices.shape[0], triangles.shape[0],
90 | h, w, c)
91 | return image
92 |
93 |
94 | def render_texture(vertices, triangles, texture, tex_coords, tex_triangles, h, w, c = 3, mapping_type = 'nearest', BG = None):
95 | ''' render mesh with texture map
96 | Args:
97 | vertices: [3, nver]
98 | triangles: [3, ntri]
99 | texture: [tex_h, tex_w, 3]
100 | tex_coords: [ntexcoords, 3]
101 | tex_triangles: [ntri, 3]
102 | h: height of rendering
103 | w: width of rendering
104 | c: channel
105 | mapping_type: 'bilinear' or 'nearest'
106 | '''
107 | # initial
108 | if BG is None:
109 | image = np.zeros((h, w, c), dtype = np.float32)
110 | else:
111 | assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
112 | image = BG
113 |
114 | depth_buffer = np.zeros([h, w], dtype = np.float32, order = 'C') - 999999.
115 |
116 | tex_h, tex_w, tex_c = texture.shape
117 | if mapping_type == 'nearest':
118 | mt = int(0)
119 | elif mapping_type == 'bilinear':
120 | mt = int(1)
121 | else:
122 | mt = int(0)
123 |
124 | # -> C order
125 | vertices = vertices.astype(np.float32).copy()
126 | triangles = triangles.astype(np.int32).copy()
127 | texture = texture.astype(np.float32).copy()
128 | tex_coords = tex_coords.astype(np.float32).copy()
129 | tex_triangles = tex_triangles.astype(np.int32).copy()
130 |
131 | mesh_core_cython.render_texture_core(
132 | image, vertices, triangles,
133 | texture, tex_coords, tex_triangles,
134 | depth_buffer,
135 | vertices.shape[0], tex_coords.shape[0], triangles.shape[0],
136 | h, w, c,
137 | tex_h, tex_w, tex_c,
138 | mt)
139 | return image
140 |
141 |
--------------------------------------------------------------------------------
/utils/mesh/.ipynb_checkpoints/transform-checkpoint.py:
--------------------------------------------------------------------------------
1 | '''
2 | Functions about transforming mesh(changing the position: modify vertices).
3 | 1. forward: transform(transform, camera, project).
4 | 2. backward: estimate transform matrix from correspondences.
5 |
6 | Author: Yao Feng
7 | Mail: yaofeng1995@gmail.com
8 | '''
9 |
10 | from __future__ import absolute_import
11 | from __future__ import division
12 | from __future__ import print_function
13 |
14 | import numpy as np
15 | import math
16 | from math import cos, sin
17 |
18 | def angle2matrix(angles):
19 | ''' get rotation matrix from three rotation angles(degree). right-handed.
20 | Args:
21 | angles: [3,]. x, y, z angles
22 | x: pitch. positive for looking down.
23 | y: yaw. positive for looking left.
24 | z: roll. positive for tilting head right.
25 | Returns:
26 | R: [3, 3]. rotation matrix.
27 | '''
28 | x, y, z = np.deg2rad(angles[0]), np.deg2rad(angles[1]), np.deg2rad(angles[2])
29 | # x
30 | Rx=np.array([[1, 0, 0],
31 | [0, cos(x), -sin(x)],
32 | [0, sin(x), cos(x)]])
33 | # y
34 | Ry=np.array([[ cos(y), 0, sin(y)],
35 | [ 0, 1, 0],
36 | [-sin(y), 0, cos(y)]])
37 | # z
38 | Rz=np.array([[cos(z), -sin(z), 0],
39 | [sin(z), cos(z), 0],
40 | [ 0, 0, 1]])
41 |
42 | R=Rz.dot(Ry.dot(Rx))
43 | return R.astype(np.float32)
44 |
45 | def angle2matrix_3ddfa(angles):
46 | ''' get rotation matrix from three rotation angles(radian). The same as in 3DDFA.
47 | Args:
48 | angles: [3,]. x, y, z angles
49 | x: pitch.
50 | y: yaw.
51 | z: roll.
52 | Returns:
53 | R: 3x3. rotation matrix.
54 | '''
55 | # x, y, z = np.deg2rad(angles[0]), np.deg2rad(angles[1]), np.deg2rad(angles[2])
56 | x, y, z = angles[0], angles[1], angles[2]
57 |
58 | # x
59 | Rx=np.array([[1, 0, 0],
60 | [0, cos(x), sin(x)],
61 | [0, -sin(x), cos(x)]])
62 | # y
63 | Ry=np.array([[ cos(y), 0, -sin(y)],
64 | [ 0, 1, 0],
65 | [sin(y), 0, cos(y)]])
66 | # z
67 | Rz=np.array([[cos(z), sin(z), 0],
68 | [-sin(z), cos(z), 0],
69 | [ 0, 0, 1]])
70 | R = Rx.dot(Ry).dot(Rz)
71 | return R.astype(np.float32)
72 |
73 |
74 | ## ------------------------------------------ 1. transform(transform, project, camera).
75 | ## ---------- 3d-3d transform. Transform obj in world space
76 | def rotate(vertices, angles):
77 | ''' rotate vertices.
78 | X_new = R.dot(X). X: 3 x 1
79 | Args:
80 | vertices: [nver, 3].
81 | rx, ry, rz: degree angles
82 | rx: pitch. positive for looking down
83 | ry: yaw. positive for looking left
84 | rz: roll. positive for tilting head right
85 | Returns:
86 | rotated vertices: [nver, 3]
87 | '''
88 | R = angle2matrix(angles)
89 | rotated_vertices = vertices.dot(R.T)
90 |
91 | return rotated_vertices
92 |
93 | def similarity_transform(vertices, s, R, t3d):
94 | ''' similarity transform. dof = 7.
95 | 3D: s*R.dot(X) + t
96 | Homo: M = [[sR, t],[0^T, 1]]. M.dot(X)
97 | Args:(float32)
98 | vertices: [nver, 3].
99 | s: [1,]. scale factor.
100 | R: [3,3]. rotation matrix.
101 | t3d: [3,]. 3d translation vector.
102 | Returns:
103 | transformed vertices: [nver, 3]
104 | '''
105 | t3d = np.squeeze(np.array(t3d, dtype = np.float32))
106 | transformed_vertices = s * vertices.dot(R.T) + t3d[np.newaxis, :]
107 |
108 | return transformed_vertices
109 |
110 |
111 | ## -------------- Camera. from world space to camera space
112 | # Ref: https://cs184.eecs.berkeley.edu/lecture/transforms-2
113 | def normalize(x):
114 | epsilon = 1e-12
115 | norm = np.sqrt(np.sum(x**2, axis = 0))
116 | norm = np.maximum(norm, epsilon)
117 | return x/norm
118 |
119 | def lookat_camera(vertices, eye, at = None, up = None):
120 | """ 'look at' transformation: from world space to camera space
121 | standard camera space:
122 | camera located at the origin.
123 | looking down negative z-axis.
124 | vertical vector is y-axis.
125 | Xcam = R(X - C)
126 | Homo: [[R, -RC], [0, 1]]
127 | Args:
128 | vertices: [nver, 3]
129 | eye: [3,] the XYZ world space position of the camera.
130 | at: [3,] a position along the center of the camera's gaze.
131 | up: [3,] up direction
132 | Returns:
133 | transformed_vertices: [nver, 3]
134 | """
135 | if at is None:
136 | at = np.array([0, 0, 0], np.float32)
137 | if up is None:
138 | up = np.array([0, 1, 0], np.float32)
139 |
140 | eye = np.array(eye).astype(np.float32)
141 | at = np.array(at).astype(np.float32)
142 | z_aixs = -normalize(at - eye) # look forward
143 | x_aixs = normalize(np.cross(up, z_aixs)) # look right
144 | y_axis = np.cross(z_aixs, x_aixs) # look up
145 |
146 | R = np.stack((x_aixs, y_axis, z_aixs))#, axis = 0) # 3 x 3
147 | transformed_vertices = vertices - eye # translation
148 | transformed_vertices = transformed_vertices.dot(R.T) # rotation
149 | return transformed_vertices
150 |
151 | ## --------- 3d-2d project. from camera space to image plane
152 | # generally, image plane only keeps x,y channels, here reserve z channel for calculating z-buffer.
153 | def orthographic_project(vertices):
154 | ''' scaled orthographic projection(just delete z)
155 | assumes: variations in depth over the object is small relative to the mean distance from camera to object
156 | x -> x*f/z, y -> x*f/z, z -> f.
157 | for point i,j. zi~=zj. so just delete z
158 | ** often used in face
159 | Homo: P = [[1,0,0,0], [0,1,0,0], [0,0,1,0]]
160 | Args:
161 | vertices: [nver, 3]
162 | Returns:
163 | projected_vertices: [nver, 3] if isKeepZ=True. [nver, 2] if isKeepZ=False.
164 | '''
165 | return vertices.copy()
166 |
167 | def perspective_project(vertices, fovy, aspect_ratio = 1., near = 0.1, far = 1000.):
168 | ''' perspective projection.
169 | Args:
170 | vertices: [nver, 3]
171 | fovy: vertical angular field of view. degree.
172 | aspect_ratio : width / height of field of view
173 | near : depth of near clipping plane
174 | far : depth of far clipping plane
175 | Returns:
176 | projected_vertices: [nver, 3]
177 | '''
178 | fovy = np.deg2rad(fovy)
179 | top = near*np.tan(fovy)
180 | bottom = -top
181 | right = top*aspect_ratio
182 | left = -right
183 |
184 | #-- homo
185 | P = np.array([[near/right, 0, 0, 0],
186 | [0, near/top, 0, 0],
187 | [0, 0, -(far+near)/(far-near), -2*far*near/(far-near)],
188 | [0, 0, -1, 0]])
189 | vertices_homo = np.hstack((vertices, np.ones((vertices.shape[0], 1)))) # [nver, 4]
190 | projected_vertices = vertices_homo.dot(P.T)
191 | projected_vertices = projected_vertices/projected_vertices[:,3:]
192 | projected_vertices = projected_vertices[:,:3]
193 | projected_vertices[:,2] = -projected_vertices[:,2]
194 |
195 | #-- non homo. only fovy
196 | # projected_vertices = vertices.copy()
197 | # projected_vertices[:,0] = -(near/right)*vertices[:,0]/vertices[:,2]
198 | # projected_vertices[:,1] = -(near/top)*vertices[:,1]/vertices[:,2]
199 | return projected_vertices
200 |
201 |
202 | def to_image(vertices, h, w, is_perspective = False):
203 | ''' change vertices to image coord system
204 | 3d system: XYZ, center(0, 0, 0)
205 | 2d image: x(u), y(v). center(w/2, h/2), flip y-axis.
206 | Args:
207 | vertices: [nver, 3]
208 | h: height of the rendering
209 | w : width of the rendering
210 | Returns:
211 | projected_vertices: [nver, 3]
212 | '''
213 | image_vertices = vertices.copy()
214 | if is_perspective:
215 | # if perspective, the projected vertices are normalized to [-1, 1]. so change it to image size first.
216 | image_vertices[:,0] = image_vertices[:,0]*w/2
217 | image_vertices[:,1] = image_vertices[:,1]*h/2
218 | # move to center of image
219 | image_vertices[:,0] = image_vertices[:,0] + w/2
220 | image_vertices[:,1] = image_vertices[:,1] + h/2
221 | # flip vertices along y-axis.
222 | image_vertices[:,1] = h - image_vertices[:,1] - 1
223 | return image_vertices
224 |
225 |
226 | #### -------------------------------------------2. estimate transform matrix from correspondences.
227 | def estimate_affine_matrix_3d23d(X, Y):
228 | ''' Using least-squares solution
229 | Args:
230 | X: [n, 3]. 3d points(fixed)
231 | Y: [n, 3]. corresponding 3d points(moving). Y = PX
232 | Returns:
233 | P_Affine: (3, 4). Affine camera matrix (the third row is [0, 0, 0, 1]).
234 | '''
235 | X_homo = np.hstack((X, np.ones([X.shape[1],1]))) #n x 4
236 | P = np.linalg.lstsq(X_homo, Y)[0].T # Affine matrix. 3 x 4
237 | return P
238 |
239 | def estimate_affine_matrix_3d22d(X, x):
240 | ''' Using Golden Standard Algorithm for estimating an affine camera
241 | matrix P from world to image correspondences.
242 | See Alg.7.2. in MVGCV
243 | Code Ref: https://github.com/patrikhuber/eos/blob/master/include/eos/fitting/affine_camera_estimation.hpp
244 | x_homo = X_homo.dot(P_Affine)
245 | Args:
246 | X: [n, 3]. corresponding 3d points(fixed)
247 | x: [n, 2]. n>=4. 2d points(moving). x = PX
248 | Returns:
249 | P_Affine: [3, 4]. Affine camera matrix
250 | '''
251 | X = X.T; x = x.T
252 | assert(x.shape[1] == X.shape[1])
253 | n = x.shape[1]
254 | assert(n >= 4)
255 |
256 | #--- 1. normalization
257 | # 2d points
258 | mean = np.mean(x, 1) # (2,)
259 | x = x - np.tile(mean[:, np.newaxis], [1, n])
260 | average_norm = np.mean(np.sqrt(np.sum(x**2, 0)))
261 | scale = np.sqrt(2) / average_norm
262 | x = scale * x
263 |
264 | T = np.zeros((3,3), dtype = np.float32)
265 | T[0, 0] = T[1, 1] = scale
266 | T[:2, 2] = -mean*scale
267 | T[2, 2] = 1
268 |
269 | # 3d points
270 | X_homo = np.vstack((X, np.ones((1, n))))
271 | mean = np.mean(X, 1) # (3,)
272 | X = X - np.tile(mean[:, np.newaxis], [1, n])
273 | m = X_homo[:3,:] - X
274 | average_norm = np.mean(np.sqrt(np.sum(X**2, 0)))
275 | scale = np.sqrt(3) / average_norm
276 | X = scale * X
277 |
278 | U = np.zeros((4,4), dtype = np.float32)
279 | U[0, 0] = U[1, 1] = U[2, 2] = scale
280 | U[:3, 3] = -mean*scale
281 | U[3, 3] = 1
282 |
283 | # --- 2. equations
284 | A = np.zeros((n*2, 8), dtype = np.float32);
285 | X_homo = np.vstack((X, np.ones((1, n)))).T
286 | A[:n, :4] = X_homo
287 | A[n:, 4:] = X_homo
288 | b = np.reshape(x, [-1, 1])
289 |
290 | # --- 3. solution
291 | p_8 = np.linalg.pinv(A).dot(b)
292 | P = np.zeros((3, 4), dtype = np.float32)
293 | P[0, :] = p_8[:4, 0]
294 | P[1, :] = p_8[4:, 0]
295 | P[-1, -1] = 1
296 |
297 | # --- 4. denormalization
298 | P_Affine = np.linalg.inv(T).dot(P.dot(U))
299 | return P_Affine
300 |
301 | def P2sRt(P):
302 | ''' decompositing camera matrix P
303 | Args:
304 | P: (3, 4). Affine Camera Matrix.
305 | Returns:
306 | s: scale factor.
307 | R: (3, 3). rotation matrix.
308 | t: (3,). translation.
309 | '''
310 | t = P[:, 3]
311 | R1 = P[0:1, :3]
312 | R2 = P[1:2, :3]
313 | s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2.0
314 | r1 = R1/np.linalg.norm(R1)
315 | r2 = R2/np.linalg.norm(R2)
316 | r3 = np.cross(r1, r2)
317 |
318 | R = np.concatenate((r1, r2, r3), 0)
319 | return s, R, t
320 |
321 | #Ref: https://www.learnopencv.com/rotation-matrix-to-euler-angles/
322 | def isRotationMatrix(R):
323 | ''' checks if a matrix is a valid rotation matrix(whether orthogonal or not)
324 | '''
325 | Rt = np.transpose(R)
326 | shouldBeIdentity = np.dot(Rt, R)
327 | I = np.identity(3, dtype = R.dtype)
328 | n = np.linalg.norm(I - shouldBeIdentity)
329 | return n < 1e-6
330 |
331 | def matrix2angle(R):
332 | ''' get three Euler angles from Rotation Matrix
333 | Args:
334 | R: (3,3). rotation matrix
335 | Returns:
336 | x: pitch
337 | y: yaw
338 | z: roll
339 | '''
340 | assert(isRotationMatrix)
341 | sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
342 |
343 | singular = sy < 1e-6
344 |
345 | if not singular :
346 | x = math.atan2(R[2,1] , R[2,2])
347 | y = math.atan2(-R[2,0], sy)
348 | z = math.atan2(R[1,0], R[0,0])
349 | else :
350 | x = math.atan2(-R[1,2], R[1,1])
351 | y = math.atan2(-R[2,0], sy)
352 | z = 0
353 |
354 | # rx, ry, rz = np.rad2deg(x), np.rad2deg(y), np.rad2deg(z)
355 | rx, ry, rz = x*180/np.pi, y*180/np.pi, z*180/np.pi
356 | return rx, ry, rz
357 |
358 | # def matrix2angle(R):
359 | # ''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
360 | # Args:
361 | # R: (3,3). rotation matrix
362 | # Returns:
363 | # x: yaw
364 | # y: pitch
365 | # z: roll
366 | # '''
367 | # # assert(isRotationMatrix(R))
368 |
369 | # if R[2,0] !=1 or R[2,0] != -1:
370 | # x = math.asin(R[2,0])
371 | # y = math.atan2(R[2,1]/cos(x), R[2,2]/cos(x))
372 | # z = math.atan2(R[1,0]/cos(x), R[0,0]/cos(x))
373 |
374 | # else:# Gimbal lock
375 | # z = 0 #can be anything
376 | # if R[2,0] == -1:
377 | # x = np.pi/2
378 | # y = z + math.atan2(R[0,1], R[0,2])
379 | # else:
380 | # x = -np.pi/2
381 | # y = -z + math.atan2(-R[0,1], -R[0,2])
382 |
383 | # return x, y, z
--------------------------------------------------------------------------------
/utils/mesh/.ipynb_checkpoints/vis-checkpoint.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 | from skimage import measure
8 | from mpl_toolkits.mplot3d import Axes3D
9 |
10 | def plot_mesh(vertices, triangles, subplot = [1,1,1], title = 'mesh', el = 90, az = -90, lwdt=.1, dist = 6, color = "grey"):
11 | '''
12 | plot the mesh
13 | Args:
14 | vertices: [nver, 3]
15 | triangles: [ntri, 3]
16 | '''
17 | ax = plt.subplot(subplot[0], subplot[1], subplot[2], projection = '3d')
18 | ax.plot_trisurf(vertices[:, 0], vertices[:, 1], vertices[:, 2], triangles = triangles, lw = lwdt, color = color, alpha = 1)
19 | ax.axis("off")
20 | ax.view_init(elev = el, azim = az)
21 | ax.dist = dist
22 | plt.title(title)
23 |
24 | ### -------------- Todo: use vtk to visualize mesh? or visvis? or VisPy?
25 |
--------------------------------------------------------------------------------
/utils/mesh/__init__.py:
--------------------------------------------------------------------------------
1 | from . import render
2 |
3 |
--------------------------------------------------------------------------------
/utils/mesh/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/utils/mesh/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/mesh/__pycache__/render.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/__pycache__/render.cpython-36.pyc
--------------------------------------------------------------------------------
/utils/mesh/__pycache__/render.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/__pycache__/render.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/mesh/cython/.ipynb_checkpoints/mesh_core-checkpoint.h:
--------------------------------------------------------------------------------
1 | #ifndef MESH_CORE_HPP_
2 | #define MESH_CORE_HPP_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 |
13 | class point
14 | {
15 | public:
16 | float x;
17 | float y;
18 |
19 | float dot(point p)
20 | {
21 | return this->x * p.x + this->y * p.y;
22 | }
23 |
24 | point operator-(const point& p)
25 | {
26 | point np;
27 | np.x = this->x - p.x;
28 | np.y = this->y - p.y;
29 | return np;
30 | }
31 |
32 | point operator+(const point& p)
33 | {
34 | point np;
35 | np.x = this->x + p.x;
36 | np.y = this->y + p.y;
37 | return np;
38 | }
39 |
40 | point operator*(float s)
41 | {
42 | point np;
43 | np.x = s * this->x;
44 | np.y = s * this->y;
45 | return np;
46 | }
47 | };
48 |
49 |
50 | bool isPointInTri(point p, point p0, point p1, point p2, int h, int w);
51 | void get_point_weight(float* weight, point p, point p0, point p1, point p2);
52 |
53 | void _get_normal_core(
54 | float* normal, float* tri_normal, int* triangles,
55 | int ntri);
56 |
57 | void _rasterize_triangles_core(
58 | float* vertices, int* triangles,
59 | float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
60 | int nver, int ntri,
61 | int h, int w);
62 |
63 | void _render_colors_core(
64 | float* image, float* vertices, int* triangles,
65 | float* colors,
66 | float* depth_buffer,
67 | int nver, int ntri,
68 | int h, int w, int c);
69 |
70 | void _render_texture_core(
71 | float* image, float* vertices, int* triangles,
72 | float* texture, float* tex_coords, int* tex_triangles,
73 | float* depth_buffer,
74 | int nver, int tex_nver, int ntri,
75 | int h, int w, int c,
76 | int tex_h, int tex_w, int tex_c,
77 | int mapping_type);
78 |
79 | void _write_obj_with_colors_texture(string filename, string mtl_name,
80 | float* vertices, int* triangles, float* colors, float* uv_coords,
81 | int nver, int ntri, int ntexver);
82 |
83 | #endif
--------------------------------------------------------------------------------
/utils/mesh/cython/.ipynb_checkpoints/mesh_core_cython-checkpoint.pyx:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | cimport numpy as np
3 | from libcpp.string cimport string
4 |
5 | # use the Numpy-C-API from Cython
6 | np.import_array()
7 |
8 | # cdefine the signature of our c function
9 | cdef extern from "mesh_core.h":
10 | void _rasterize_triangles_core(
11 | float* vertices, int* triangles,
12 | float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
13 | int nver, int ntri,
14 | int h, int w)
15 |
16 | void _render_colors_core(
17 | float* image, float* vertices, int* triangles,
18 | float* colors,
19 | float* depth_buffer,
20 | int nver, int ntri,
21 | int h, int w, int c)
22 |
23 | void _render_texture_core(
24 | float* image, float* vertices, int* triangles,
25 | float* texture, float* tex_coords, int* tex_triangles,
26 | float* depth_buffer,
27 | int nver, int tex_nver, int ntri,
28 | int h, int w, int c,
29 | int tex_h, int tex_w, int tex_c,
30 | int mapping_type)
31 |
32 | void _get_normal_core(
33 | float* normal, float* tri_normal, int* triangles,
34 | int ntri)
35 |
36 | void _write_obj_with_colors_texture(string filename, string mtl_name,
37 | float* vertices, int* triangles, float* colors, float* uv_coords,
38 | int nver, int ntri, int ntexver)
39 |
40 | def get_normal_core(np.ndarray[float, ndim=2, mode = "c"] normal not None,
41 | np.ndarray[float, ndim=2, mode = "c"] tri_normal not None,
42 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
43 | int ntri
44 | ):
45 | _get_normal_core(
46 | np.PyArray_DATA(normal), np.PyArray_DATA(tri_normal), np.PyArray_DATA(triangles),
47 | ntri)
48 |
49 | def rasterize_triangles_core(
50 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
51 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
52 | np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
53 | np.ndarray[int, ndim=2, mode = "c"] triangle_buffer not None,
54 | np.ndarray[float, ndim=2, mode = "c"] barycentric_weight not None,
55 | int nver, int ntri,
56 | int h, int w
57 | ):
58 | _rasterize_triangles_core(
59 | np.PyArray_DATA(vertices), np.PyArray_DATA(triangles),
60 | np.PyArray_DATA(depth_buffer), np.PyArray_DATA(triangle_buffer), np.PyArray_DATA(barycentric_weight),
61 | nver, ntri,
62 | h, w)
63 |
64 | def render_colors_core(np.ndarray[float, ndim=3, mode = "c"] image not None,
65 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
66 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
67 | np.ndarray[float, ndim=2, mode = "c"] colors not None,
68 | np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
69 | int nver, int ntri,
70 | int h, int w, int c
71 | ):
72 | _render_colors_core(
73 | np.PyArray_DATA(image), np.PyArray_DATA(vertices), np.PyArray_DATA(triangles),
74 | np.PyArray_DATA(colors),
75 | np.PyArray_DATA(depth_buffer),
76 | nver, ntri,
77 | h, w, c)
78 |
79 | def render_texture_core(np.ndarray[float, ndim=3, mode = "c"] image not None,
80 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
81 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
82 | np.ndarray[float, ndim=3, mode = "c"] texture not None,
83 | np.ndarray[float, ndim=2, mode = "c"] tex_coords not None,
84 | np.ndarray[int, ndim=2, mode="c"] tex_triangles not None,
85 | np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
86 | int nver, int tex_nver, int ntri,
87 | int h, int w, int c,
88 | int tex_h, int tex_w, int tex_c,
89 | int mapping_type
90 | ):
91 | _render_texture_core(
92 | np.PyArray_DATA(image), np.PyArray_DATA(vertices), np.PyArray_DATA(triangles),
93 | np.PyArray_DATA(texture), np.PyArray_DATA(tex_coords), np.PyArray_DATA(tex_triangles),
94 | np.PyArray_DATA(depth_buffer),
95 | nver, tex_nver, ntri,
96 | h, w, c,
97 | tex_h, tex_w, tex_c,
98 | mapping_type)
99 |
100 | def write_obj_with_colors_texture_core(string filename, string mtl_name,
101 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
102 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
103 | np.ndarray[float, ndim=2, mode = "c"] colors not None,
104 | np.ndarray[float, ndim=2, mode = "c"] uv_coords not None,
105 | int nver, int ntri, int ntexver
106 | ):
107 | _write_obj_with_colors_texture(filename, mtl_name,
108 | np.PyArray_DATA(vertices), np.PyArray_DATA(triangles), np.PyArray_DATA(colors), np.PyArray_DATA(uv_coords),
109 | nver, ntri, ntexver)
110 |
--------------------------------------------------------------------------------
/utils/mesh/cython/build/temp.linux-x86_64-3.6/mesh_core.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/cython/build/temp.linux-x86_64-3.6/mesh_core.o
--------------------------------------------------------------------------------
/utils/mesh/cython/build/temp.linux-x86_64-3.6/mesh_core_cython.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/cython/build/temp.linux-x86_64-3.6/mesh_core_cython.o
--------------------------------------------------------------------------------
/utils/mesh/cython/mesh_core.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | functions that can not be optimazed by vertorization in python.
3 | 1. rasterization.(need process each triangle)
4 | 2. normal of each vertex.(use one-ring, need process each vertex)
5 | 3. write obj(seems that it can be verctorized? anyway, writing it in c++ is simple, so also add function here. --> however, why writting in c++ is still slow?)
6 |
7 | Author: Yao Feng
8 | Mail: yaofeng1995@gmail.com
9 | */
10 |
11 | #include "mesh_core.h"
12 |
13 |
14 | /* Judge whether the point is in the triangle
15 | Method:
16 | http://blackpawn.com/texts/pointinpoly/
17 | Args:
18 | point: [x, y]
19 | tri_points: three vertices(2d points) of a triangle. 2 coords x 3 vertices
20 | Returns:
21 | bool: true for in triangle
22 | */
23 | bool isPointInTri(point p, point p0, point p1, point p2)
24 | {
25 | // vectors
26 | point v0, v1, v2;
27 | v0 = p2 - p0;
28 | v1 = p1 - p0;
29 | v2 = p - p0;
30 |
31 | // dot products
32 | float dot00 = v0.dot(v0); //v0.x * v0.x + v0.y * v0.y //np.dot(v0.T, v0)
33 | float dot01 = v0.dot(v1); //v0.x * v1.x + v0.y * v1.y //np.dot(v0.T, v1)
34 | float dot02 = v0.dot(v2); //v0.x * v2.x + v0.y * v2.y //np.dot(v0.T, v2)
35 | float dot11 = v1.dot(v1); //v1.x * v1.x + v1.y * v1.y //np.dot(v1.T, v1)
36 | float dot12 = v1.dot(v2); //v1.x * v2.x + v1.y * v2.y//np.dot(v1.T, v2)
37 |
38 | // barycentric coordinates
39 | float inverDeno;
40 | if(dot00*dot11 - dot01*dot01 == 0)
41 | inverDeno = 0;
42 | else
43 | inverDeno = 1/(dot00*dot11 - dot01*dot01);
44 |
45 | float u = (dot11*dot02 - dot01*dot12)*inverDeno;
46 | float v = (dot00*dot12 - dot01*dot02)*inverDeno;
47 |
48 | // check if point in triangle
49 | return (u >= 0) && (v >= 0) && (u + v < 1);
50 | }
51 |
52 |
53 | void get_point_weight(float* weight, point p, point p0, point p1, point p2)
54 | {
55 | // vectors
56 | point v0, v1, v2;
57 | v0 = p2 - p0;
58 | v1 = p1 - p0;
59 | v2 = p - p0;
60 |
61 | // dot products
62 | float dot00 = v0.dot(v0); //v0.x * v0.x + v0.y * v0.y //np.dot(v0.T, v0)
63 | float dot01 = v0.dot(v1); //v0.x * v1.x + v0.y * v1.y //np.dot(v0.T, v1)
64 | float dot02 = v0.dot(v2); //v0.x * v2.x + v0.y * v2.y //np.dot(v0.T, v2)
65 | float dot11 = v1.dot(v1); //v1.x * v1.x + v1.y * v1.y //np.dot(v1.T, v1)
66 | float dot12 = v1.dot(v2); //v1.x * v2.x + v1.y * v2.y//np.dot(v1.T, v2)
67 |
68 | // barycentric coordinates
69 | float inverDeno;
70 | if(dot00*dot11 - dot01*dot01 == 0)
71 | inverDeno = 0;
72 | else
73 | inverDeno = 1/(dot00*dot11 - dot01*dot01);
74 |
75 | float u = (dot11*dot02 - dot01*dot12)*inverDeno;
76 | float v = (dot00*dot12 - dot01*dot02)*inverDeno;
77 |
78 | // weight
79 | weight[0] = 1 - u - v;
80 | weight[1] = v;
81 | weight[2] = u;
82 | }
83 |
84 |
85 | void _get_normal_core(
86 | float* normal, float* tri_normal, int* triangles,
87 | int ntri)
88 | {
89 | int i, j;
90 | int tri_p0_ind, tri_p1_ind, tri_p2_ind;
91 |
92 | for(i = 0; i < ntri; i++)
93 | {
94 | tri_p0_ind = triangles[3*i];
95 | tri_p1_ind = triangles[3*i + 1];
96 | tri_p2_ind = triangles[3*i + 2];
97 |
98 | for(j = 0; j < 3; j++)
99 | {
100 | normal[3*tri_p0_ind + j] = normal[3*tri_p0_ind + j] + tri_normal[3*i + j];
101 | normal[3*tri_p1_ind + j] = normal[3*tri_p1_ind + j] + tri_normal[3*i + j];
102 | normal[3*tri_p2_ind + j] = normal[3*tri_p2_ind + j] + tri_normal[3*i + j];
103 | }
104 | }
105 | }
106 |
107 |
108 | void _rasterize_triangles_core(
109 | float* vertices, int* triangles,
110 | float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
111 | int nver, int ntri,
112 | int h, int w)
113 | {
114 | int i;
115 | int x, y, k;
116 | int tri_p0_ind, tri_p1_ind, tri_p2_ind;
117 | point p0, p1, p2, p;
118 | int x_min, x_max, y_min, y_max;
119 | float p_depth, p0_depth, p1_depth, p2_depth;
120 | float weight[3];
121 |
122 | for(i = 0; i < ntri; i++)
123 | {
124 | tri_p0_ind = triangles[3*i];
125 | tri_p1_ind = triangles[3*i + 1];
126 | tri_p2_ind = triangles[3*i + 2];
127 |
128 | p0.x = vertices[3*tri_p0_ind]; p0.y = vertices[3*tri_p0_ind + 1]; p0_depth = vertices[3*tri_p0_ind + 2];
129 | p1.x = vertices[3*tri_p1_ind]; p1.y = vertices[3*tri_p1_ind + 1]; p1_depth = vertices[3*tri_p1_ind + 2];
130 | p2.x = vertices[3*tri_p2_ind]; p2.y = vertices[3*tri_p2_ind + 1]; p2_depth = vertices[3*tri_p2_ind + 2];
131 |
132 | x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
133 | x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
134 |
135 | y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
136 | y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
137 |
138 | if(x_max < x_min || y_max < y_min)
139 | {
140 | continue;
141 | }
142 |
143 | for(y = y_min; y <= y_max; y++) //h
144 | {
145 | for(x = x_min; x <= x_max; x++) //w
146 | {
147 | p.x = x; p.y = y;
148 | if(p.x < 2 || p.x > w - 3 || p.y < 2 || p.y > h - 3 || isPointInTri(p, p0, p1, p2))
149 | {
150 | get_point_weight(weight, p, p0, p1, p2);
151 | p_depth = weight[0]*p0_depth + weight[1]*p1_depth + weight[2]*p2_depth;
152 |
153 | if((p_depth > depth_buffer[y*w + x]))
154 | {
155 | depth_buffer[y*w + x] = p_depth;
156 | triangle_buffer[y*w + x] = i;
157 | for(k = 0; k < 3; k++)
158 | {
159 | barycentric_weight[y*w*3 + x*3 + k] = weight[k];
160 | }
161 | }
162 | }
163 | }
164 | }
165 | }
166 | }
167 |
168 |
169 | void _render_colors_core(
170 | float* image, float* vertices, int* triangles,
171 | float* colors,
172 | float* depth_buffer,
173 | int nver, int ntri,
174 | int h, int w, int c)
175 | {
176 | int i;
177 | int x, y, k;
178 | int tri_p0_ind, tri_p1_ind, tri_p2_ind;
179 | point p0, p1, p2, p;
180 | int x_min, x_max, y_min, y_max;
181 | float p_depth, p0_depth, p1_depth, p2_depth;
182 | float p_color, p0_color, p1_color, p2_color;
183 | float weight[3];
184 |
185 | for(i = 0; i < ntri; i++)
186 | {
187 | tri_p0_ind = triangles[3*i];
188 | tri_p1_ind = triangles[3*i + 1];
189 | tri_p2_ind = triangles[3*i + 2];
190 |
191 | p0.x = vertices[3*tri_p0_ind]; p0.y = vertices[3*tri_p0_ind + 1]; p0_depth = vertices[3*tri_p0_ind + 2];
192 | p1.x = vertices[3*tri_p1_ind]; p1.y = vertices[3*tri_p1_ind + 1]; p1_depth = vertices[3*tri_p1_ind + 2];
193 | p2.x = vertices[3*tri_p2_ind]; p2.y = vertices[3*tri_p2_ind + 1]; p2_depth = vertices[3*tri_p2_ind + 2];
194 |
195 | x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
196 | x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
197 |
198 | y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
199 | y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
200 |
201 | if(x_max < x_min || y_max < y_min)
202 | {
203 | continue;
204 | }
205 |
206 | for(y = y_min; y <= y_max; y++) //h
207 | {
208 | for(x = x_min; x <= x_max; x++) //w
209 | {
210 | p.x = x; p.y = y;
211 | if(p.x < 2 || p.x > w - 3 || p.y < 2 || p.y > h - 3 || isPointInTri(p, p0, p1, p2))
212 | {
213 | get_point_weight(weight, p, p0, p1, p2);
214 | p_depth = weight[0]*p0_depth + weight[1]*p1_depth + weight[2]*p2_depth;
215 |
216 | if((p_depth > depth_buffer[y*w + x]))
217 | {
218 | for(k = 0; k < c; k++) // c
219 | {
220 | p0_color = colors[c*tri_p0_ind + k];
221 | p1_color = colors[c*tri_p1_ind + k];
222 | p2_color = colors[c*tri_p2_ind + k];
223 |
224 | p_color = weight[0]*p0_color + weight[1]*p1_color + weight[2]*p2_color;
225 | image[y*w*c + x*c + k] = p_color;
226 | }
227 |
228 | depth_buffer[y*w + x] = p_depth;
229 | }
230 | }
231 | }
232 | }
233 | }
234 | }
235 |
236 |
237 | void _render_texture_core(
238 | float* image, float* vertices, int* triangles,
239 | float* texture, float* tex_coords, int* tex_triangles,
240 | float* depth_buffer,
241 | int nver, int tex_nver, int ntri,
242 | int h, int w, int c,
243 | int tex_h, int tex_w, int tex_c,
244 | int mapping_type)
245 | {
246 | int i;
247 | int x, y, k;
248 | int tri_p0_ind, tri_p1_ind, tri_p2_ind;
249 | int tex_tri_p0_ind, tex_tri_p1_ind, tex_tri_p2_ind;
250 | point p0, p1, p2, p;
251 | point tex_p0, tex_p1, tex_p2, tex_p;
252 | int x_min, x_max, y_min, y_max;
253 | float weight[3];
254 | float p_depth, p0_depth, p1_depth, p2_depth;
255 | float xd, yd;
256 | float ul, ur, dl, dr;
257 | for(i = 0; i < ntri; i++)
258 | {
259 | // mesh
260 | tri_p0_ind = triangles[3*i];
261 | tri_p1_ind = triangles[3*i + 1];
262 | tri_p2_ind = triangles[3*i + 2];
263 |
264 | p0.x = vertices[3*tri_p0_ind]; p0.y = vertices[3*tri_p0_ind + 1]; p0_depth = vertices[3*tri_p0_ind + 2];
265 | p1.x = vertices[3*tri_p1_ind]; p1.y = vertices[3*tri_p1_ind + 1]; p1_depth = vertices[3*tri_p1_ind + 2];
266 | p2.x = vertices[3*tri_p2_ind]; p2.y = vertices[3*tri_p2_ind + 1]; p2_depth = vertices[3*tri_p2_ind + 2];
267 |
268 | // texture
269 | tex_tri_p0_ind = tex_triangles[3*i];
270 | tex_tri_p1_ind = tex_triangles[3*i + 1];
271 | tex_tri_p2_ind = tex_triangles[3*i + 2];
272 |
273 | tex_p0.x = tex_coords[3*tex_tri_p0_ind]; tex_p0.y = tex_coords[3*tri_p0_ind + 1];
274 | tex_p1.x = tex_coords[3*tex_tri_p1_ind]; tex_p1.y = tex_coords[3*tri_p1_ind + 1];
275 | tex_p2.x = tex_coords[3*tex_tri_p2_ind]; tex_p2.y = tex_coords[3*tri_p2_ind + 1];
276 |
277 |
278 | x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
279 | x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
280 |
281 | y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
282 | y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
283 |
284 |
285 | if(x_max < x_min || y_max < y_min)
286 | {
287 | continue;
288 | }
289 |
290 | for(y = y_min; y <= y_max; y++) //h
291 | {
292 | for(x = x_min; x <= x_max; x++) //w
293 | {
294 | p.x = x; p.y = y;
295 | if(p.x < 2 || p.x > w - 3 || p.y < 2 || p.y > h - 3 || isPointInTri(p, p0, p1, p2))
296 | {
297 | get_point_weight(weight, p, p0, p1, p2);
298 | p_depth = weight[0]*p0_depth + weight[1]*p1_depth + weight[2]*p2_depth;
299 |
300 | if((p_depth > depth_buffer[y*w + x]))
301 | {
302 | // -- color from texture
303 | // cal weight in mesh tri
304 | get_point_weight(weight, p, p0, p1, p2);
305 | // cal coord in texture
306 | tex_p = tex_p0*weight[0] + tex_p1*weight[1] + tex_p2*weight[2];
307 | tex_p.x = max(min(tex_p.x, float(tex_w - 1)), float(0));
308 | tex_p.y = max(min(tex_p.y, float(tex_h - 1)), float(0));
309 |
310 | yd = tex_p.y - floor(tex_p.y);
311 | xd = tex_p.x - floor(tex_p.x);
312 | for(k = 0; k < c; k++)
313 | {
314 | if(mapping_type==0)// nearest
315 | {
316 | image[y*w*c + x*c + k] = texture[int(round(tex_p.y))*tex_w*tex_c + int(round(tex_p.x))*tex_c + k];
317 | }
318 | else//bilinear interp
319 | {
320 | ul = texture[(int)floor(tex_p.y)*tex_w*tex_c + (int)floor(tex_p.x)*tex_c + k];
321 | ur = texture[(int)floor(tex_p.y)*tex_w*tex_c + (int)ceil(tex_p.x)*tex_c + k];
322 | dl = texture[(int)ceil(tex_p.y)*tex_w*tex_c + (int)floor(tex_p.x)*tex_c + k];
323 | dr = texture[(int)ceil(tex_p.y)*tex_w*tex_c + (int)ceil(tex_p.x)*tex_c + k];
324 |
325 | image[y*w*c + x*c + k] = ul*(1-xd)*(1-yd) + ur*xd*(1-yd) + dl*(1-xd)*yd + dr*xd*yd;
326 | }
327 |
328 | }
329 |
330 | depth_buffer[y*w + x] = p_depth;
331 | }
332 | }
333 | }
334 | }
335 | }
336 | }
337 |
338 |
339 |
340 | // ------------------------------------------------- write
341 | // obj write
342 | // Ref: https://github.com/patrikhuber/eos/blob/master/include/eos/core/Mesh.hpp
343 | void _write_obj_with_colors_texture(string filename, string mtl_name,
344 | float* vertices, int* triangles, float* colors, float* uv_coords,
345 | int nver, int ntri, int ntexver)
346 | {
347 | int i;
348 |
349 | ofstream obj_file(filename.c_str());
350 |
351 | // first line of the obj file: the mtl name
352 | obj_file << "mtllib " << mtl_name << endl;
353 |
354 | // write vertices
355 | for (i = 0; i < nver; ++i)
356 | {
357 | obj_file << "v " << vertices[3*i] << " " << vertices[3*i + 1] << " " << vertices[3*i + 2] << colors[3*i] << " " << colors[3*i + 1] << " " << colors[3*i + 2] << endl;
358 | }
359 |
360 | // write uv coordinates
361 | for (i = 0; i < ntexver; ++i)
362 | {
363 | //obj_file << "vt " << uv_coords[2*i] << " " << (1 - uv_coords[2*i + 1]) << endl;
364 | obj_file << "vt " << uv_coords[2*i] << " " << uv_coords[2*i + 1] << endl;
365 | }
366 |
367 | obj_file << "usemtl FaceTexture" << endl;
368 | // write triangles
369 | for (i = 0; i < ntri; ++i)
370 | {
371 | // obj_file << "f " << triangles[3*i] << "/" << triangles[3*i] << " " << triangles[3*i + 1] << "/" << triangles[3*i + 1] << " " << triangles[3*i + 2] << "/" << triangles[3*i + 2] << endl;
372 | obj_file << "f " << triangles[3*i + 2] << "/" << triangles[3*i + 2] << " " << triangles[3*i + 1] << "/" << triangles[3*i + 1] << " " << triangles[3*i] << "/" << triangles[3*i] << endl;
373 | }
374 |
375 | }
376 |
--------------------------------------------------------------------------------
/utils/mesh/cython/mesh_core.h:
--------------------------------------------------------------------------------
1 | #ifndef MESH_CORE_HPP_
2 | #define MESH_CORE_HPP_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 |
13 | class point
14 | {
15 | public:
16 | float x;
17 | float y;
18 |
19 | float dot(point p)
20 | {
21 | return this->x * p.x + this->y * p.y;
22 | }
23 |
24 | point operator-(const point& p)
25 | {
26 | point np;
27 | np.x = this->x - p.x;
28 | np.y = this->y - p.y;
29 | return np;
30 | }
31 |
32 | point operator+(const point& p)
33 | {
34 | point np;
35 | np.x = this->x + p.x;
36 | np.y = this->y + p.y;
37 | return np;
38 | }
39 |
40 | point operator*(float s)
41 | {
42 | point np;
43 | np.x = s * this->x;
44 | np.y = s * this->y;
45 | return np;
46 | }
47 | };
48 |
49 |
50 | bool isPointInTri(point p, point p0, point p1, point p2, int h, int w);
51 | void get_point_weight(float* weight, point p, point p0, point p1, point p2);
52 |
53 | void _get_normal_core(
54 | float* normal, float* tri_normal, int* triangles,
55 | int ntri);
56 |
57 | void _rasterize_triangles_core(
58 | float* vertices, int* triangles,
59 | float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
60 | int nver, int ntri,
61 | int h, int w);
62 |
63 | void _render_colors_core(
64 | float* image, float* vertices, int* triangles,
65 | float* colors,
66 | float* depth_buffer,
67 | int nver, int ntri,
68 | int h, int w, int c);
69 |
70 | void _render_texture_core(
71 | float* image, float* vertices, int* triangles,
72 | float* texture, float* tex_coords, int* tex_triangles,
73 | float* depth_buffer,
74 | int nver, int tex_nver, int ntri,
75 | int h, int w, int c,
76 | int tex_h, int tex_w, int tex_c,
77 | int mapping_type);
78 |
79 | void _write_obj_with_colors_texture(string filename, string mtl_name,
80 | float* vertices, int* triangles, float* colors, float* uv_coords,
81 | int nver, int ntri, int ntexver);
82 |
83 | #endif
--------------------------------------------------------------------------------
/utils/mesh/cython/mesh_core_cython.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/cython/mesh_core_cython.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/utils/mesh/cython/mesh_core_cython.pyx:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | cimport numpy as np
3 | from libcpp.string cimport string
4 |
5 | # use the Numpy-C-API from Cython
6 | np.import_array()
7 |
8 | # cdefine the signature of our c function
9 | cdef extern from "mesh_core.h":
10 | void _rasterize_triangles_core(
11 | float* vertices, int* triangles,
12 | float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
13 | int nver, int ntri,
14 | int h, int w)
15 |
16 | void _render_colors_core(
17 | float* image, float* vertices, int* triangles,
18 | float* colors,
19 | float* depth_buffer,
20 | int nver, int ntri,
21 | int h, int w, int c)
22 |
23 | void _render_texture_core(
24 | float* image, float* vertices, int* triangles,
25 | float* texture, float* tex_coords, int* tex_triangles,
26 | float* depth_buffer,
27 | int nver, int tex_nver, int ntri,
28 | int h, int w, int c,
29 | int tex_h, int tex_w, int tex_c,
30 | int mapping_type)
31 |
32 | void _get_normal_core(
33 | float* normal, float* tri_normal, int* triangles,
34 | int ntri)
35 |
36 | void _write_obj_with_colors_texture(string filename, string mtl_name,
37 | float* vertices, int* triangles, float* colors, float* uv_coords,
38 | int nver, int ntri, int ntexver)
39 |
40 | def get_normal_core(np.ndarray[float, ndim=2, mode = "c"] normal not None,
41 | np.ndarray[float, ndim=2, mode = "c"] tri_normal not None,
42 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
43 | int ntri
44 | ):
45 | _get_normal_core(
46 | np.PyArray_DATA(normal), np.PyArray_DATA(tri_normal), np.PyArray_DATA(triangles),
47 | ntri)
48 |
49 | def rasterize_triangles_core(
50 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
51 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
52 | np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
53 | np.ndarray[int, ndim=2, mode = "c"] triangle_buffer not None,
54 | np.ndarray[float, ndim=2, mode = "c"] barycentric_weight not None,
55 | int nver, int ntri,
56 | int h, int w
57 | ):
58 | _rasterize_triangles_core(
59 | np.PyArray_DATA(vertices), np.PyArray_DATA(triangles),
60 | np.PyArray_DATA(depth_buffer), np.PyArray_DATA(triangle_buffer), np.PyArray_DATA(barycentric_weight),
61 | nver, ntri,
62 | h, w)
63 |
64 | def render_colors_core(np.ndarray[float, ndim=3, mode = "c"] image not None,
65 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
66 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
67 | np.ndarray[float, ndim=2, mode = "c"] colors not None,
68 | np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
69 | int nver, int ntri,
70 | int h, int w, int c
71 | ):
72 | _render_colors_core(
73 | np.PyArray_DATA(image), np.PyArray_DATA(vertices), np.PyArray_DATA(triangles),
74 | np.PyArray_DATA(colors),
75 | np.PyArray_DATA(depth_buffer),
76 | nver, ntri,
77 | h, w, c)
78 |
79 | def render_texture_core(np.ndarray[float, ndim=3, mode = "c"] image not None,
80 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
81 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
82 | np.ndarray[float, ndim=3, mode = "c"] texture not None,
83 | np.ndarray[float, ndim=2, mode = "c"] tex_coords not None,
84 | np.ndarray[int, ndim=2, mode="c"] tex_triangles not None,
85 | np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
86 | int nver, int tex_nver, int ntri,
87 | int h, int w, int c,
88 | int tex_h, int tex_w, int tex_c,
89 | int mapping_type
90 | ):
91 | _render_texture_core(
92 | np.PyArray_DATA(image), np.PyArray_DATA(vertices), np.PyArray_DATA(triangles),
93 | np.PyArray_DATA(texture), np.PyArray_DATA(tex_coords), np.PyArray_DATA(tex_triangles),
94 | np.PyArray_DATA(depth_buffer),
95 | nver, tex_nver, ntri,
96 | h, w, c,
97 | tex_h, tex_w, tex_c,
98 | mapping_type)
99 |
100 | def write_obj_with_colors_texture_core(string filename, string mtl_name,
101 | np.ndarray[float, ndim=2, mode = "c"] vertices not None,
102 | np.ndarray[int, ndim=2, mode="c"] triangles not None,
103 | np.ndarray[float, ndim=2, mode = "c"] colors not None,
104 | np.ndarray[float, ndim=2, mode = "c"] uv_coords not None,
105 | int nver, int ntri, int ntexver
106 | ):
107 | _write_obj_with_colors_texture(filename, mtl_name,
108 | np.PyArray_DATA(vertices), np.PyArray_DATA(triangles), np.PyArray_DATA(colors), np.PyArray_DATA(uv_coords),
109 | nver, ntri, ntexver)
110 |
--------------------------------------------------------------------------------
/utils/mesh/cython/setup.py:
--------------------------------------------------------------------------------
1 | '''
2 | python setup.py build_ext -i
3 | to compile
4 | '''
5 |
6 | # setup.py
7 | from distutils.core import setup, Extension
8 | from Cython.Build import cythonize
9 | from Cython.Distutils import build_ext
10 | import numpy
11 |
12 | setup(
13 | name = 'mesh_core_cython',
14 | cmdclass={'build_ext': build_ext},
15 | ext_modules=[Extension("mesh_core_cython",
16 | sources=["mesh_core_cython.pyx", "mesh_core.cpp"],
17 | language='c++',
18 | include_dirs=[numpy.get_include()])],
19 | )
20 |
21 |
--------------------------------------------------------------------------------
/utils/mesh/mesh_core_cython.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zengwb-lx/Face_Mask_Add/5ca88ae246d00388fba44d32a59b79654b12a710/utils/mesh/mesh_core_cython.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/utils/mesh/render.py:
--------------------------------------------------------------------------------
1 | '''
2 | functions about rendering mesh(from 3d obj to 2d image).
3 | only use rasterization render here.
4 | Note that:
5 | 1. Generally, render func includes camera, light, raterize. Here no camera and light(I write these in other files)
6 | 2. Generally, the input vertices are normalized to [-1,1] and cetered on [0, 0]. (in world space)
7 | Here, the vertices are using image coords, which centers on [w/2, h/2] with the y-axis pointing to oppisite direction.
8 | Means: render here only conducts interpolation.(I just want to make the input flexible)
9 |
10 | Author: Yao Feng
11 | Mail: yaofeng1995@gmail.com
12 | '''
13 | from __future__ import absolute_import
14 | from __future__ import division
15 | from __future__ import print_function
16 |
17 | import numpy as np
18 | from time import time
19 | import sys
20 | #sys.path.insert(0, '/home/liuyinglu2/notespace/code/face3d/face3d/mesh')
21 | import cython
22 | from . import mesh_core_cython
23 | #from .cython import mesh_core_cython
24 |
25 | def rasterize_triangles(vertices, triangles, h, w):
26 | '''
27 | Args:
28 | vertices: [nver, 3]
29 | triangles: [ntri, 3]
30 | h: height
31 | w: width
32 | Returns:
33 | depth_buffer: [h, w] saves the depth, here, the bigger the z, the fronter the point.
34 | triangle_buffer: [h, w] saves the tri id(-1 for no triangle).
35 | barycentric_weight: [h, w, 3] saves corresponding barycentric weight.
36 |
37 | # Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
38 | # h, w is the size of rendering
39 | '''
40 |
41 | # initial
42 | depth_buffer = np.zeros([h, w]) - 999999. #set the initial z to the farest position
43 | triangle_buffer = np.zeros([h, w], dtype = np.int32) - 1 # if tri id = -1, the pixel has no triangle correspondance
44 | barycentric_weight = np.zeros([h, w, 3], dtype = np.float32) #
45 |
46 | vertices = vertices.astype(np.float32).copy()
47 | triangles = triangles.astype(np.int32).copy()
48 |
49 | mesh_core_cython.rasterize_triangles_core(
50 | vertices, triangles,
51 | depth_buffer, triangle_buffer, barycentric_weight,
52 | vertices.shape[0], triangles.shape[0],
53 | h, w)
54 |
55 | def render_colors(vertices, triangles, colors, h, w, c = 3, BG = None):
56 | ''' render mesh with colors
57 | Args:
58 | vertices: [nver, 3]
59 | triangles: [ntri, 3]
60 | colors: [nver, 3]
61 | h: height
62 | w: width
63 | c: channel
64 | BG: background image
65 | Returns:
66 | image: [h, w, c]. rendered image./rendering.
67 | '''
68 |
69 | # initial
70 | if BG is None:
71 | image = np.zeros((h, w, c), dtype = np.float32)
72 | else:
73 | assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
74 | image = BG
75 | depth_buffer = np.zeros([h, w], dtype = np.float32, order = 'C') - 999999.
76 |
77 | # change orders. --> C-contiguous order(column major)
78 | vertices = vertices.astype(np.float32).copy()
79 | triangles = triangles.astype(np.int32).copy()
80 | colors = colors.astype(np.float32).copy()
81 | ###
82 | st = time()
83 | mesh_core_cython.render_colors_core(
84 | image, vertices, triangles,
85 | colors,
86 | depth_buffer,
87 | vertices.shape[0], triangles.shape[0],
88 | h, w, c)
89 | return image
90 |
91 |
92 | def render_texture(vertices, triangles, texture, tex_coords, tex_triangles, h, w, c = 3, mapping_type = 'nearest', BG = None):
93 | ''' render mesh with texture map
94 | Args:
95 | vertices: [3, nver]
96 | triangles: [3, ntri]
97 | texture: [tex_h, tex_w, 3]
98 | tex_coords: [ntexcoords, 3]
99 | tex_triangles: [ntri, 3]
100 | h: height of rendering
101 | w: width of rendering
102 | c: channel
103 | mapping_type: 'bilinear' or 'nearest'
104 | '''
105 | # initial
106 | if BG is None:
107 | image = np.zeros((h, w, c), dtype = np.float32)
108 | else:
109 | assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
110 | image = BG
111 |
112 | depth_buffer = np.zeros([h, w], dtype = np.float32, order = 'C') - 999999.
113 |
114 | tex_h, tex_w, tex_c = texture.shape
115 | if mapping_type == 'nearest':
116 | mt = int(0)
117 | elif mapping_type == 'bilinear':
118 | mt = int(1)
119 | else:
120 | mt = int(0)
121 |
122 | # -> C order
123 | vertices = vertices.astype(np.float32).copy()
124 | triangles = triangles.astype(np.int32).copy()
125 | texture = texture.astype(np.float32).copy()
126 | tex_coords = tex_coords.astype(np.float32).copy()
127 | tex_triangles = tex_triangles.astype(np.int32).copy()
128 |
129 | mesh_core_cython.render_texture_core(
130 | image, vertices, triangles,
131 | texture, tex_coords, tex_triangles,
132 | depth_buffer,
133 | vertices.shape[0], tex_coords.shape[0], triangles.shape[0],
134 | h, w, c,
135 | tex_h, tex_w, tex_c,
136 | mt)
137 | return image
138 |
139 |
--------------------------------------------------------------------------------
/utils/read_info.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: Yinglu Liu, Jun Wang
3 | @date: 20201012
4 | @contact: jun21wangustc@gmail.com
5 | """
6 |
7 | import numpy as np
8 |
9 |
10 | def read_landmark_106_file(filepath):
11 | map = [[1,2],[3,4],[5,6],7,9,11,[12,13],14,16,18,[19,20],21,23,25,[26,27],[28,29],[30,31],33,34,35,36,37,42,43,44,45,46,51,52,53,54,58,59,60,61,62,66,67,69,70,71,73,75,76,78,79,80,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103]
12 | line = open(filepath).readline().strip()
13 | pts1 = line.split(' ')[58:-1]
14 | assert(len(pts1) == 106*2)
15 | pts1 = np.array(pts1, dtype = np.float)
16 | pts1 = pts1.reshape((106, 2))
17 | pts = np.zeros((68,2)) # map 106 to 68
18 | for ii in range(len(map)):
19 | if isinstance(map[ii],list):
20 | pts[ii] = np.mean(pts1[map[ii]], axis=0)
21 | else:
22 | pts[ii] = pts1[map[ii]]
23 | return pts
24 |
25 |
26 | def read_landmark_106_array(face_lms):
27 | map = [[1,2],[3,4],[5,6],7,9,11,[12,13],14,16,18,[19,20],21,23,25,[26,27],[28,29],[30,31],33,34,35,36,37,42,43,44,45,46,51,52,53,54,58,59,60,61,62,66,67,69,70,71,73,75,76,78,79,80,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103]
28 | pts1 = np.array(face_lms, dtype = np.float)
29 | pts1 = pts1.reshape((106, 2))
30 | pts = np.zeros((68,2)) # map 106 to 68
31 | for ii in range(len(map)):
32 | if isinstance(map[ii],list):
33 | pts[ii] = np.mean(pts1[map[ii]], axis=0)
34 | else:
35 | pts[ii] = pts1[map[ii]]
36 | return pts
37 |
38 | def read_landmark_106(filepath):
39 | map = [[1,2],[3,4],[5,6],7,9,11,[12,13],14,16,18,[19,20],21,23,25,[26,27],[28,29],[30,31],33,34,35,36,37,42,43,44,45,46,51,52,53,54,58,59,60,61,62,66,67,69,70,71,73,75,76,78,79,80,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103]
40 | lines = open(filepath).readlines() # load landmarks
41 | pts1 = [_.strip().split() for _ in lines[1:107]]
42 | pts1 = np.array(pts1, dtype = np.float)
43 | pts = np.zeros((68,2)) # map 106 to 68
44 | for ii in range(len(map)):
45 | if isinstance(map[ii],list):
46 | pts[ii] = np.mean(pts1[map[ii]], axis=0)
47 | else:
48 | pts[ii] = pts1[map[ii]]
49 | return pts
50 |
51 | def read_bbox(filepath):
52 | lines = open(filepath).readlines()
53 | bbox = lines[0].strip().split()
54 | bbox = [int(float(_)) for _ in bbox]
55 | return np.array(bbox)
56 |
57 |
--------------------------------------------------------------------------------