├── README.md
├── benchmark
├── README.md
├── code
│ ├── batch_eval.sh
│ ├── evaluator.py
│ ├── export_table.py
│ ├── mesh_util.py
│ ├── pred_loader.py
│ ├── predef
│ │ ├── lab_head_centers.npy
│ │ ├── lab_rotations.npy
│ │ ├── wild_head_centers.npy
│ │ ├── wild_pupil_scales.npy
│ │ └── wild_rotations.npy
│ ├── renderer.py
│ └── show.py
├── data
│ └── download_data.sh
├── eval_result
│ ├── fslab_3DDFA_V2
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_DECA
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_DF2Net
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_DFDN
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_Deep3DFaceRec
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_LAP
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_MGCNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_PRNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_RingNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_SADRNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_UDL
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_extreme3dface
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_facescape_deep
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fslab_facescape_opti
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_3DDFA_V2
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_DECA
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_DF2Net
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_DFDN
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_Deep3DFaceRec
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_LAP
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_MGCNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_PRNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_RingNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_SADRNet
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_UDL
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_extreme3dface
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ ├── fswild_facescape_deep
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
│ └── fswild_facescape_opti
│ │ ├── CD_full.txt
│ │ ├── CR_full.txt
│ │ └── MNE_full.txt
├── pred
│ └── download_pred.sh
└── requirements.txt
├── doc
├── License_Agreement.pdf
├── doc_bilinear_model.md
├── doc_mview_model.md
├── doc_tu_model.md
├── external_link_fsbm.md
└── facescape_googledrive.md
├── figures
├── benchmark_eval.jpg
├── cam_coord_fs.jpg
├── facescape_all.jpg
├── facescape_bm.jpg
├── facescape_info.jpg
├── facescape_mview.jpg
└── facescape_tu.jpg
├── samples
├── download_sample.sh
└── sample_keypoint3d.txt
└── toolkit
├── README.md
├── demo_align.ipynb
├── demo_bilinear_basic.ipynb
├── demo_bilinear_fit.ipynb
├── demo_landmark.ipynb
├── demo_mask.ipynb
├── demo_mview_projection.ipynb
├── demo_output
├── fm_result.jpg
├── lm_result.jpg
├── mview_depth_view49.jpg
├── mview_rend_view49.jpg
├── tu_color.jpg
├── tu_depth.jpg
├── tu_shade.jpg
└── tu_tex.jpg
├── demo_render.ipynb
├── demo_rig.ipynb
├── demo_symmetry.ipynb
├── predef
├── Rt_scale_dict.json
├── facial_mask_v10.png
├── facial_mask_v16.png
├── landmark_indices.npz
├── landmark_indices.txt
├── sym_dict.npy
└── sym_dict_old.npy
├── requirements.txt
├── src
├── camera.py
├── facescape_bm.py
├── facescape_fitter.py
├── mesh_obj.py
├── mesh_proc.py
├── renderer.py
├── rig.py
└── utility.py
├── test_data
└── chan.jpg
└── tex_unwrap_test.py
/README.md:
--------------------------------------------------------------------------------
1 | # FaceScape
2 |
3 | *FaceScape* provides large-scale high-quality 3D face datasets, parametric models, docs and toolkits about 3D face related technology. [[CVPR2020 paper]](https://openaccess.thecvf.com/content_CVPR_2020/papers/Yang_FaceScape_A_Large-Scale_High_Quality_3D_Face_Dataset_and_Detailed_CVPR_2020_paper.pdf) [[extended arXiv Report]](https://arxiv.org/pdf/2111.01082.pdf) [[supplementary]](https://openaccess.thecvf.com/content_CVPR_2020/supplemental/Yang_FaceScape_A_Large-Scale_CVPR_2020_supplemental.zip)
4 |
5 | Our latest progress will be updated to this repository constantly - *[latest update: 2023/10/20]*
6 |
7 | ### Data
8 |
9 | The data can be downloaded at https://facescape.nju.edu.cn/ after requesting a license key.
10 | *New:* Share link on Google Drive is available after requesting the license key, view [here](https://github.com/zhuhao-nju/facescape/blob/master/doc/facescape_googledrive.md) for detail.
11 | *New:* The bilinear model ver1.6 can be downloaded without requesting a license key, view [here](https://github.com/zhuhao-nju/facescape/blob/master/doc/external_link_fsbm.md) for the link and rules.
12 |
13 |
14 |
15 | The available sources include:
16 |
17 | | Item (Docs) | Description | Quantity | Quality |
18 | |-------------------|---------------------------------------------------------------------|------------------------------------------------|---------|
19 | | [TU models](/doc/doc_tu_model.md) | Topologically uniformed 3D face models
with displacement map and texture map. | **16940 models**
(847 id × 20 exp) | Detailed geometry,
4K dp/tex maps |
20 | | [Multi-view data](/doc/doc_mview_model.md) | Multi-view images, camera parameters
and corresponding 3D face mesh. | **>400k images**
(359 id × 20 exp
× ≈60 view)| 4M~12M pixels |
21 | | [Bilinear model](/doc/doc_bilinear_model.md) | The statistical model to transform the base
shape into the vector space. | 4 for different settings | Only for base shape. |
22 | | [Info list](/doc/doc_tu_model.md) | Gender / age of the subjects. | 847 subjects | -- |
23 |
24 | The datasets are only released for non-commercial research use. As facial data involves the privacy of participants, we use strict license terms to ensure that the dataset is not abused.
25 |
26 | ### Benchmark for SVFR
27 | We present a benchmark to evaluate the accuracy of single-view face 3D reconstruction (SVFR) methods, view [here](/benchmark/README.md) for the details.
28 |
29 | ### ToolKit
30 | Start using python toolkit [here](/toolkit/README.md), the demos include:
31 |
32 | * [bilinear_model-basic](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_bilinear_basic.ipynb) - use facescape bilinear model to generate 3D mesh models.
33 | * [bilinear_model-fit](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_bilinear_fit.ipynb) - fit the bilinear model to 2D/3D landmarks.
34 | * [multi-view-project](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_mview_projection.ipynb) - Project 3D models to multi-view images.
35 | * [landmark](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_landmark.ipynb) - extract landmarks using predefined vertex index.
36 | * [facial_mask](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_mask.ipynb) - extract facial region from the full head TU-models.
37 | * [render](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_render.ipynb) - render TU-models to color images and depth map.
38 | * [alignment](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_align.ipynb) - align all the multi-view models.
39 | * [symmetry](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_symmetry.ipynb) - get the correspondence of the vertices on TU-models from left side to right side.
40 | * [rig](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_rig.ipynb) - rig 20 expressions to 52 expressions.
41 |
42 |
43 | ### Open-Source Projects using FaceScape
44 |
45 | **[High-fidelity 3D Face Generation from Natural Language Descriptions (CVPR 2023)](https://github.com/zhuhao-nju/describe3d)**
46 | *Menghua Wu, Hao Zhu#, Linjia Huang, Yiyu Zhuang, Yuanxun Lu, Xun Cao*
47 |
48 | **[RAFaRe: Learning Robust and Accurate Non-parametric 3D Face Reconstruction from Pseudo 2D&3D Pairs (AAAI 2023)](https://github.com/zhuhao-nju/rafare)**
49 | *Longwei Guo, Hao Zhu#, Yuanxun Lu, Menghua Wu, Xun Cao*
50 |
51 | **[Structure-aware Editable Morphable Model for 3D Facial Detail Animation and Manipulation (ECCV2022)](https://github.com/gerwang/facial-detail-manipulation)**
52 | *Jingwang Ling, Zhibo Wang, Ming Lu, Quan Wang, Chen Qian, Feng Xu*
53 |
54 | **[HeadNeRF: A Real-Time NeRF-Based Parametric Head Model (CVPR2022)](https://github.com/CrisHY1995/headnerf)**
55 | *Yang Hong, Bo Peng, Haiyao Xiao, Ligang Liu, Juyong Zhang*
56 |
57 | **[ImFace: A Nonlinear 3D Morphable Face Model with Implicit Neural Representations (CVPR2022)](https://github.com/MingwuZheng/ImFace)**
58 | *Mingwu Zheng, Hongyu Yang, Di Huang, Liming Chen*
59 |
60 | **[Detailed Facial Geometry Recovery from Multi-view Images by Learning an Implicit Function (AAAI 2022)](https://github.com/zhuhao-nju/mvfr)**
61 | *Yunze Xiao\*, Hao Zhu\*, Haotian Yang, Zhengyu Diao, Xiangju Lu, Xun Cao*
62 |
63 | **[Deep Unsupervised 3D SfM Face Reconstruction Based on Massive Landmark Bundle Adjustment (ACM MM 2021)](https://github.com/BoomStarcuc/3DSfMFaceReconstruction)**
64 | *Yuxing Wang, Yawen Lu, Zhihua Xie, Guoyu Lu*
65 |
66 | **[Detailed Riggable 3D face Prediction Code of FaceScape (CVPR2020)](https://github.com/yanght321/Detailed3DFace.git)**
67 | *Haotian Yang\*, Hao Zhu\*, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, Xun Cao*
68 |
69 |
70 |
71 | ### ChangeLog
72 | * **2023/10/20**
73 | Benchmark data and results have been updated to be consistent with the experiments in the latest journal version paper.
74 | * **2022/9/9**
75 | One section is added to introduce open-source projects that use FaceScape data or models, and will be continuously updated.
76 | * **2022/7/26**
77 | The data for training and testing [MoFaNeRF](https://github.com/zhuhao-nju/mofanerf) is added to the [download page](https://facescape.nju.edu.cn/).
78 | * **2021/12/2**
79 | A benchmark to evaluate single-view face reconstruction is available, view [here](https://github.com/zhuhao-nju/facescape/blob/master/benchmark/README.md) for detail.
80 | * **2021/8/16**
81 | Share link on google drive is available after requesting the license key, view [here](https://github.com/zhuhao-nju/facescape/blob/master/doc/facescape_googledrive.md) for detail.
82 | * **2021/5/13**
83 | The fitting demo is added to the toolkit. Please note if you downloaded the bilinear model v1.6 before 2021/5/13, you need to download it again, because some parameters required by the fitting demo are supplemented.
84 | * **2021/4/14**
85 | The bilinear model has been updated to 1.6, check it [here](/doc/doc_bilinear_model.md).
86 | The new bilinear model now can be downloaded from *NJU Drive* or *Google Drive* without requesting a license key. Check it [here](/doc/external_link_fsbm.md).
87 | ToolKit and Doc have been updated with new content.
88 | Some wrong ages and genders in the info list are corrected in "info_list_v2.txt".
89 | * **2020/9/27**
90 | The code of detailed riggable 3D face prediction is released, check it [here](https://github.com/yanght321/Detailed3DFace.git).
91 | * **2020/7/25**
92 | Multi-view data is available for download.
93 | The bilinear model is updated to ver 1.3, with vertex-color added.
94 | Info list including gender and age is available on the download page.
95 | Tools and samples are added to this repository.
96 | * **2020/7/7**
97 | The bilinear model is updated to ver 1.2.
98 | * **2020/6/13**
99 | The [website]((https://facescape.nju.edu.cn/)) of FaceScape is online.
3D models and bilinear models are available for download.
100 | * **2020/3/31**
101 | The pre-print paper is available on [arXiv](https://arxiv.org/abs/2003.13989).
102 |
103 | ### Bibtex
104 | If you find this project helpful to your research, please consider citing:
105 |
106 | ```
107 | @article{zhu2023facescape,
108 | title={FaceScape: 3D Facial Dataset and Benchmark for Single-View 3D Face Reconstruction},
109 | author={Zhu, Hao and Yang, Haotian and Guo, Longwei and Zhang, Yidi and Wang, Yanru and Huang, Mingkai and Wu, Menghua and Shen, Qiu and Yang, Ruigang and Cao, Xun},
110 | journal={IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
111 | year={2023},
112 | publisher={IEEE}}
113 | ```
114 | ```
115 | @inproceedings{yang2020facescape,
116 | author = {Yang, Haotian and Zhu, Hao and Wang, Yanru and Huang, Mingkai and Shen, Qiu and Yang, Ruigang and Cao, Xun},
117 | title = {FaceScape: A Large-Scale High Quality 3D Face Dataset and Detailed Riggable 3D Face Prediction},
118 | booktitle = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
119 | month = {June},
120 | year = {2020},
121 | page = {601--610}}
122 | ```
123 |
124 | ### Acknowledge
125 | The project is supported by [CITE Lab](https://cite.nju.edu.cn/) of Nanjing University, Baidu Research, and Aiqiyi Inc. The student contributors: Shengyu Ji, Wei Jin, Mingkai Huang, Yanru Wang, Haotian Yang, Yidi Zhang, Yunze Xiao, Yuxin Ding, Longwei Guo, Menghua Wu, Yiyu Zhuang.
126 |
--------------------------------------------------------------------------------
/benchmark/README.md:
--------------------------------------------------------------------------------
1 | # Benchmark for SVFR methods
2 | *This is a beta version*. We present a benchmark to evaluate the accuracy of single-view face 3D reconstruction (SVFR) methods on our in-the-wild data and in-the-lab data. Different from [NoW benchmark](https://ringnet.is.tue.mpg.de/challenge.html) that evaluates the shape recovery for the expression-free canonical face, our benchmark takes various poses, expressions, environments and focal lengths into consideration. More details about the benchmark can be found in Sec. 6 in our [journal paper](https://arxiv.org/abs/2111.01082).
3 |
4 | ### Environment
5 |
6 | The code is tested on Ubuntu with python 3.x. [Anaconda](https://www.anaconda.com/products/individual) is recommended to build a virtual environment:
7 | ```
8 | conda create -n fs_eval python=3.6 -y
9 | conda activate fs_eval
10 | ```
11 | Install the required packages:
12 | ```
13 | pip install --upgrade pip
14 | pip install -r ../requirements.txt
15 | ```
16 |
17 | Install pyembree to accelerate the intersection check, otherwise the evaluation will be very slow:
18 | ```
19 | conda install -c conda-forge pyembree
20 | ```
21 |
22 | Install [psbody-mesh](https://github.com/MPI-IS/mesh) following its instruction.
23 |
24 | ### Download Benchmark Data
25 | The benchmark data (207 MB) that contains images, meshes, and parameters can be downloaded from NJU Drive:
26 | ```
27 | cd data
28 | ./download_data.sh
29 | ```
30 | or be downloaded from [Google Drive](https://drive.google.com/file/d/1-aZjHXpofKsDEa-rNunE2HsgUMlilKAY/view?usp=share_link). Using these data indicates that you have to obey the [License Agreement](https://github.com/zhuhao-nju/facescape/blob/master/doc/License_Agreement.pdf) of FaceScape.
31 |
32 | ### Show Pre-evaluated Results
33 | The quantitative evaluations are put in './eval_result/'. You may show the quantitative results in command-line:
34 | ```
35 | cd code
36 | python show.py
37 | ```
38 | or save the quantitative results as csv table:
39 | ```
40 | cd code
41 | python export.py
42 | ```
43 |
44 | The heat meshes have been generated where the distance error is visualized as vertex color.
45 |
46 | ### Run Evaluation Code
47 |
48 | The evaluation code can be validated by running for the 14 reported methods, and the results should be the same as the pre-evaluated results above.
49 |
50 | Firstly, download the result models of the 14 methods (14.3 GB) from NJU Drive:
51 | ```
52 | cd pred
53 | ./download_pred.sh
54 | ```
55 | or from Google Drive ([lab_pred.zip](https://drive.google.com/file/d/1catZZb8XTCIess_aea-46VieL197tzNh/view?usp=share_link), [wild_pred.zip](https://drive.google.com/file/d/1pY0Asfal7SPBfdRX4D4ecPQfXEhPo1R_/view?usp=share_link)).
56 |
57 | Then run the following code to get the evaluation results:
58 | ```
59 | cd code
60 | ./batch_eval.sh
61 | ```
62 | This will take a few hours and the results will be saved to './eval_result/'.
63 |
64 | ### Evaluate a New Method
65 | As various SVR methods output the resulting model in different formats and settings, we define the specific function for each method in 'pred_loader.py' to load the result models and process them. The process aims at projecting the result 3D model to align the source image using the standard projecting parameters, which involves scaling and depth-shift but no rotation.
66 |
67 | To evaluate the methods on in-the-wild data, the resulting model should be placed in an orthogonal camera coordinate as used in trimesh(https://github.com/mikedh/trimesh). The orthogonal space is normalized to a cube in [-1, -1, -1] ~ [1, 1, 1].
68 | To evaluate the methods on in-the-wild data, the resulting model should be placed in a perspective camera coordinate with the ground-truth focal length. The depth will be automatically shifted to align the resulting model and ground-truth model. As some methods use orthogonal projection while others use perspective projection, an approximate transforming between orthogonal and perspective is required in the loader function. Please refer to the existing 14 loader functions to write and register a new loader, then run the evaluation code for the specific method:
69 | ```
70 | # in-the-wild evaluation
71 | python ./evaluator.py --dataset fswild --method $METHOD_NAME$
72 | # in-the-lab evaluation
73 | python ./evaluator.py --dataset fslab --method $METHOD_NAME$
74 | ```
75 | Use the option '--heat_mesh False' if the heat mesh is not required. Use the option '--num $IDX$' to evaluate only one tuple with index = $IDX$.
76 |
77 | ### Visualize
78 |
79 | The quantitative evaluations are plotted as:
80 |
81 |
82 |
83 | Please note:
84 | * The ground-truth shape for evaluation was normalized in scale. Specifically, all the pupil distances are scaled to 62.85mm, which is a statistical mean value of pupil distance.
85 | * The main difference to [NoW Benchmark](https://ringnet.is.tue.mpg.de/challenge.html) is that our evaluation takes predicted pose and expression into account, while NoW benchmark mainly evaluates neutralized face, excluding the influence of facial expressions and poses.
86 |
--------------------------------------------------------------------------------
/benchmark/code/batch_eval.sh:
--------------------------------------------------------------------------------
1 | #=================================================================
2 | python ./evaluator.py --dataset fswild --method facescape_opti --num -1
3 | python ./evaluator.py --dataset fswild --method facescape_deep --num -1
4 | python ./evaluator.py --dataset fswild --method 3DDFA_V2 --num -1
5 | python ./evaluator.py --dataset fswild --method DECA --num -1
6 | python ./evaluator.py --dataset fswild --method extreme3dface --num -1
7 | python ./evaluator.py --dataset fswild --method MGCNet --num -1
8 | python ./evaluator.py --dataset fswild --method PRNet --num -1
9 | python ./evaluator.py --dataset fswild --method RingNet --num -1
10 | python ./evaluator.py --dataset fswild --method SADRNet --num -1
11 | python ./evaluator.py --dataset fswild --method UDL --num -1
12 | python ./evaluator.py --dataset fswild --method DF2Net --num -1
13 | python ./evaluator.py --dataset fswild --method Deep3DFaceRec --num -1
14 | python ./evaluator.py --dataset fswild --method DFDN --num -1
15 | python ./evaluator.py --dataset fswild --method LAP --num -1
16 | #=================================================================
17 | python ./evaluator.py --dataset fslab --method facescape_opti --num -1
18 | python ./evaluator.py --dataset fslab --method facescape_deep --num -1
19 | python ./evaluator.py --dataset fslab --method 3DDFA_V2 --num -1
20 | python ./evaluator.py --dataset fslab --method DECA --num -1
21 | python ./evaluator.py --dataset fslab --method extreme3dface --num -1
22 | python ./evaluator.py --dataset fslab --method MGCNet --num -1
23 | python ./evaluator.py --dataset fslab --method PRNet --num -1
24 | python ./evaluator.py --dataset fslab --method RingNet --num -1
25 | python ./evaluator.py --dataset fslab --method SADRNet --num -1
26 | python ./evaluator.py --dataset fslab --method UDL --num -1
27 | python ./evaluator.py --dataset fslab --method DF2Net --num -1
28 | python ./evaluator.py --dataset fslab --method Deep3DFaceRec --num -1
29 | python ./evaluator.py --dataset fslab --method DFDN --num -1
30 | python ./evaluator.py --dataset fslab --method LAP --num -1
31 | #=================================================================
32 | python ./show.py
33 |
--------------------------------------------------------------------------------
/benchmark/code/export_table.py:
--------------------------------------------------------------------------------
1 | ## ================================================================================
2 | ## Description: save results to csv table
3 | ##
4 | ## Usage: 'python export_table.py' to export csv table to '../eval_result/*.csv'
5 | ## csv table can be further transformed to Latex, Excel, etc.
6 | ##
7 | ## Authors: Hao Zhu (zhuhaoese@nju.edu.cn)
8 | ##
9 | ## License: MIT
10 | ## ================================================================================
11 |
12 | import numpy as np
13 |
14 | require_visi = False
15 |
16 | dataset_list = ['fswild', 'fslab']
17 | method_list = ['extreme3dface', # CVPR 2018
18 | 'PRNet', # ECCV 2018
19 | 'Deep3DFaceRec', # CVPRW 2019
20 | 'RingNet', # CVPR 2019
21 | 'DFDN', # ICCV 2019
22 | 'DF2Net', # ICCV 2019
23 | 'UDL', # TIP 2020
24 | 'facescape_opti', # CVPR 2020
25 | 'facescape_deep', # CVPR 2020
26 | 'MGCNet', # ECCV 2020
27 | '3DDFA_V2', # ECCV 2020
28 | 'SADRNet', # TIP 2021
29 | 'LAP', # CVPR 2021
30 | 'DECA', # SIGGRAPH 2021
31 | ]
32 |
33 | metric_list = ['CD_full', 'MNE_full', 'CR_full']
34 |
35 | # Table 1, fs_wild table - angle
36 | dataset_name = 'fswild'
37 | with open("../eval_result/table_1_fswild_angle.csv", 'w') as f:
38 | # table header
39 | f.write(",,$\pm5$,,,$\pm30$,,,$\pm60$,,,$\pm90$,,\n")
40 | f.write(",CD/$mm$,MNE/$rad$,CR/$\%$,CD/$mm$,MNE/$rad$,CR/$\%$,CD/$mm$,MNE/$rad$,CR/$\%$," +
41 | "CD/$mm$,MNE/$rad$,CR/$\%$,done/$\%$\n")
42 |
43 | for method_name in method_list:
44 | f.write("%s," % method_name)
45 |
46 | value_arr = np.zeros((13))
47 | for m_i, metric_name in enumerate(metric_list):
48 | this_list = np.loadtxt("../eval_result/%s_%s/%s.txt" % (dataset_name,
49 | method_name,
50 | metric_name))
51 |
52 | angel_0_5 = np.mean(this_list[0:100][~np.isnan(this_list[0:100])])
53 | angel_5_30 = np.mean(this_list[100:200][~np.isnan(this_list[100:200])])
54 | angel_30_60 = np.mean(this_list[200:300][~np.isnan(this_list[200:300])])
55 | angel_60_90 = np.mean(this_list[300:][~np.isnan(this_list[300:])])
56 | angle_all = np.mean(this_list[~np.isnan(this_list)])
57 | done_rate = float(len(this_list[~np.isnan(this_list)])) / len(this_list)
58 |
59 | value_arr[0*3+m_i] = angel_0_5
60 | value_arr[1*3+m_i] = angel_5_30
61 | value_arr[2*3+m_i] = angel_30_60
62 | value_arr[3*3+m_i] = angel_60_90
63 | value_arr[12] = done_rate
64 |
65 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[0], value_arr[1], value_arr[2]*100))
66 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[3], value_arr[4], value_arr[5]*100))
67 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[6], value_arr[7], value_arr[8]*100))
68 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[9], value_arr[10], value_arr[11]*100))
69 | f.write("%2.1f\n" % (value_arr[12]*100))
70 |
71 |
72 | # Table 2, fs_lab table - angle
73 | dataset_name = 'fslab'
74 |
75 | angle_0_indices, angle_30_indices, angle_60_indices = [], [], []
76 | for sub_idx in range(20):
77 | for f_idx in range(1):
78 | for v_idx in [0]:
79 | angle_0_indices.append(sub_idx*33 + f_idx*11 + v_idx)
80 | for v_idx in list(range(1, 9)):
81 | angle_30_indices.append(sub_idx*33 + f_idx*11 + v_idx)
82 | for v_idx in list(range(9, 11)):
83 | angle_60_indices.append(sub_idx*33 + f_idx*11 + v_idx)
84 |
85 | with open("../eval_result/table_2_fslab_angle.csv", 'w') as f:
86 | # table header
87 | f.write(",,$\pm5$,,,$\pm30$,,,$\pm60$,,\n")
88 | f.write(",CD/$mm$,MNE/$rad$,CR/$\%$,CD/$mm$,MNE/$rad$,CR/$\%$,CD/$mm$,MNE/$rad$,CR/$\%$," +
89 | "done/$\%$\n")
90 |
91 | for method_name in method_list:
92 | f.write("%s," % method_name)
93 |
94 | value_arr = np.zeros((10))
95 | for m_i, metric_name in enumerate(metric_list):
96 | this_list = np.loadtxt("../eval_result/%s_%s/%s.txt" % (dataset_name,
97 | method_name,
98 | metric_name))
99 |
100 | angel_0 = np.mean(this_list[angle_0_indices][~np.isnan(this_list[angle_0_indices])])
101 | angel_30 = np.mean(this_list[angle_30_indices][~np.isnan(this_list[angle_30_indices])])
102 | if len(this_list[angle_60_indices][~np.isnan(this_list[angle_60_indices])]) == 0:
103 | angel_60 = -1
104 | else:
105 | angel_60 = np.mean(this_list[angle_60_indices][~np.isnan(this_list[angle_60_indices])])
106 | angle_all = np.mean(this_list[~np.isnan(this_list)])
107 | done_rate = float(len(this_list[~np.isnan(this_list)])) / len(this_list)
108 |
109 | value_arr[0*3+m_i] = angel_0
110 | value_arr[1*3+m_i] = angel_30
111 | value_arr[2*3+m_i] = angel_60
112 | value_arr[9] = done_rate
113 |
114 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[0], value_arr[1], value_arr[2]*100))
115 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[3], value_arr[4], value_arr[5]*100))
116 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[6], value_arr[7], value_arr[8]*100))
117 | f.write("%2.1f\n" % (value_arr[9]*100))
118 |
119 |
120 | # Table 3, fs_lab table - focal length
121 | dataset_name = 'fslab'
122 |
123 | f_1200_indices, f_600_indices, f_300_indices = [], [], []
124 | for sub_idx in range(20):
125 | for v_idx in range(11):
126 | f_1200_indices.append(sub_idx*33 + 0*11 + v_idx)
127 | f_600_indices.append(sub_idx*33 + 1*11 + v_idx)
128 | f_300_indices.append(sub_idx*33 + 2*11 + v_idx)
129 |
130 | with open("../eval_result/table_3_fslab_f.csv", 'w') as f:
131 | # table header
132 | f.write(",,$1200$,,,$600$,,,$300$,,\n")
133 | f.write(",CD/$mm$,MNE/$rad$,CR/$\%$,CD/$mm$,MNE/$rad$,CR/$\%$,CD/$mm$,MNE/$rad$,CR/$\%$," +
134 | "done/$\%$\n")
135 |
136 | for method_name in method_list:
137 | f.write("%s," % method_name)
138 |
139 | value_arr = np.zeros((10))
140 | for m_i, metric_name in enumerate(metric_list):
141 | this_list = np.loadtxt("../eval_result/%s_%s/%s.txt" % (dataset_name,
142 | method_name,
143 | metric_name))
144 |
145 | f_1200 = np.mean(this_list[f_1200_indices][~np.isnan(this_list[f_1200_indices])])
146 | f_600 = np.mean(this_list[f_600_indices][~np.isnan(this_list[f_600_indices])])
147 | f_300 = np.mean(this_list[f_300_indices][~np.isnan(this_list[f_300_indices])])
148 | f_all = np.mean(this_list[~np.isnan(this_list)])
149 | done_rate = float(len(this_list[~np.isnan(this_list)])) / len(this_list)
150 |
151 | value_arr[0*3+m_i] = f_1200
152 | value_arr[1*3+m_i] = f_600
153 | value_arr[2*3+m_i] = f_300
154 | value_arr[9] = done_rate
155 |
156 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[0], value_arr[1], value_arr[2]*100))
157 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[3], value_arr[4], value_arr[5]*100))
158 | f.write("%1.2f,%1.3f,%2.1f," % (value_arr[6], value_arr[7], value_arr[8]*100))
159 | f.write("%2.1f\n" % (value_arr[9]*100))
160 |
161 |
--------------------------------------------------------------------------------
/benchmark/code/mesh_util.py:
--------------------------------------------------------------------------------
1 | import trimesh, numpy as np
2 | from psbody.mesh import Mesh
3 |
4 | # make trimesh from vertices and faces
5 | def make_trimesh(verts, faces, vert_colors = None):
6 | if vert_colors is None:
7 | return trimesh.Trimesh(vertices=verts, faces=faces, process=False)
8 | else:
9 | return trimesh.Trimesh(vertices=verts, faces=faces, vertex_colors=vert_colors, process=False)
10 |
11 | # to replace trimesh.load
12 | def load_ori_mesh(fn):
13 | return trimesh.load(fn,
14 | resolver = None,
15 | split_object = False,
16 | group_material = False,
17 | skip_materials = False,
18 | maintain_order = True,
19 | process = False)
20 |
21 | # mesh_A and mesh_B are trimesh class
22 | # warn: if reuqire_array == True, the output - errors_B2A, errors_A2B may contains NaN
23 | def compute_chamfer(mesh_A, mesh_B, require_array = False):
24 | # B to A
25 | mesh_A_aabb = Mesh(v=mesh_A.vertices, f=mesh_A.faces).compute_aabb_tree()
26 | _, closests_B2A = mesh_A_aabb.nearest(mesh_B.vertices)
27 | errors_B2A = np.linalg.norm(mesh_B.vertices - closests_B2A, axis=1)
28 |
29 | # A to B
30 | mesh_B_aabb = Mesh(v=mesh_B.vertices, f=mesh_B.faces).compute_aabb_tree()
31 | _, closests_A2B = mesh_B_aabb.nearest(mesh_A.vertices)
32 | errors_A2B = np.linalg.norm(mesh_A.vertices - closests_A2B, axis=1)
33 |
34 | errors_all = np.concatenate((errors_A2B, errors_B2A))
35 | chamfer_dist = np.mean(errors_all[~np.isnan(errors_all)])
36 | if require_array is False:
37 | return chamfer_dist
38 | else:
39 | return chamfer_dist, errors_B2A, errors_A2B
40 |
41 | # depth==bg_val means backgroud, will not be visualized
42 | def depth2mesh(depth, threshold=15, bg_val=0.0):
43 | h, w = depth.shape
44 | indices_map = np.indices((h, w))
45 |
46 | # make vertices
47 | verts = np.concatenate((indices_map.transpose((1, 2, 0)),
48 | np.expand_dims(depth, axis = 2)), axis = 2).reshape((h * w, 3))
49 |
50 | # generate valid mask according to difference of depth and bg_val
51 | depth_diff = np.zeros((h, w, 3))
52 | depth_diff[:,:,0] = depth - np.roll(depth, -1, axis=0)
53 | depth_diff[:,:,1] = depth - np.roll(depth, -1, axis=1)
54 | depth_diff[:,:,2] = depth - np.roll(np.roll(depth, -1, axis=0), -1, axis=1)
55 | depth_diff_max = np.max(np.abs(depth_diff), axis=2)
56 | valid_mask = (depth_diff_max
6 |
7 | ### Description (ver1.6)
8 |
9 | Comparing to previous versions, ver1.6 has updated in the following aspects :
10 |
11 | * Resolve the problem of wierd fitting around ears.
12 | * New model with better representation ability for frontal face is provided.
13 | * New symmetric mesh topology is used.
14 | * All parameters are integrated into a npz file and a python class is provided to use them.
15 |
16 | Ver1.6 provides four models:
17 |
18 | * *facescape_bm_v1.6_847_50_52_id_front.npz* - Bilinear model with 52 expression parameters and 50 identity parameters. PCA is applied to identity dimesion, which reduces from 847 to 50. The frontal facial vertices are with higher weights (10:1) for a better representation ablility. *This model is **recommended** in general cases.*
19 | * *facescape_bm_v1.6_847_50_52_id.npz* - Same to above, except the higher frontal weights are not adopts.
20 | * *facescape_bm_v1.6_847_300_52_id.npz* - Same to above, except the identity dimesion is reduced to 300, not 50.
21 | * *facescape_bm_v1.6_847_50_52_id_exp.npz* - Same to above, except that PCA is applied to both identity dimension(50) and expression dimension(52). Please note that this model doesn't work for our code of 'bilinear_model-fit'.
22 |
23 | The demo code to use the facescape bilinear model ver1.6 can be found here: [basic usage](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_bilinear_basic.ipynb) and [model fitting](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_bilinear_fit.ipynb).
24 |
25 | Please refer to our [paper](https://openaccess.thecvf.com/content_CVPR_2020/papers/Yang_FaceScape_A_Large-Scale_High_Quality_3D_Face_Dataset_and_Detailed_CVPR_2020_paper.pdf) for more about the bilinear model.
26 |
27 |
28 | ### Description (ver1.0/1.2/1.3)
29 |
30 | Our bilinear model is a statistical model which transforms the base shape of the faces into a vector space representation. We provide two 3DMM with different numbers of identity parameters:
31 |
32 | - *core_847_50_52.npy* - bilinear model with 52 expression parameters and 50 identity parameters.
33 | - *core_847_300_52.npy* - bilinear model with 52 expression parameters and 300 identity parameters.
34 | - *factors_id_847_50_52.npy* and factors_id_847_300_52.npy are identity parameters corresponding to 847 subjects in the dataset.
35 |
36 | The demo code to use the facescape bilinear model ver1.0/1.2/1.3 can be found [here](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_bilinear_basic.ipynb).
37 |
38 | ### Version Log
39 |
40 | - **v1.6** - The topolpogy of mesh has been updated to be fully symmetric. The facial mask is refined. The problem that may produce wierd fitting result around ears has been solved. The parameters required by fitting demo are also attached. (2021/12/08: The wrong id_mean, id_var, and the missing ft_indices_front have been fixed.)
41 | - **v1.3** - Bilinear model with vertex color is supplemented.
42 | - **v1.2** - The previous v1.0 (core_50_52.npy) is build from a different trainset of 888 subjects. This version uses the index consistent with the index of released TU-models.
43 | - **v1.0** - Initial release.
44 |
--------------------------------------------------------------------------------
/doc/doc_mview_model.md:
--------------------------------------------------------------------------------
1 | ## FaceScape - Multi-View
2 |
3 | FaceScape provides multi-view images, camera paramters and reconstructed 3D shapes. There are 359 subjects x 20 expressions = 7120 tuples of data. The number of available images reaches to over 400k! Please visit [https://facescape.nju.edu.cn/](https://facescape.nju.edu.cn/) for data access. Users who have already applied FaceScape can login and download multi-view data directly.
4 |
5 |
6 |
7 | ### Data Structure
8 | The multi-view images are stored as:
9 | ```
10 | IMGS_ROOT
11 | # ↓ID ↓EXP ↓VIEW
12 | ├───── 1
13 | │ ├── 1_neutral
14 | │ │ ├───── 0.jpg
15 | │ │ ├───── 1.jpg
16 | │ │ ├───── ...
17 | │ │ ├───── N.jpg
18 | │ │ └───── params.json
19 | │ ├── 2_smile
20 | │ ├── ...
21 | │ └── 20_brow_lower
22 | ├───── 2
23 | ├───── ...
24 | └───── 359
25 | ```
26 | The corresponding 3D models are stored as:
27 | ```
28 | MESH_ROOT
29 | # ↓ID ↓EXP
30 | ├───── 1
31 | │ ├── 1_neutral.ply
32 | │ ├── 2_smile.ply
33 | │ ├── ...
34 | │ └── 20_brow_lower.ply
35 | ├───── 2
36 | ├───── ...
37 | └───── 359
38 | ```
39 | The 'params.json' files store the multiple information about the data. The dictionary read from 'params.json' is organized as:
40 |
41 | - '$VIEW$_K' - Intrinsic Matrix [4x4 float]
42 | - '$VIEW$_Rt' - Extrinsic Matrix [3x4 float]
43 | - '$VIEW$_distortion' - Distortion Parameters (k1 k2 p1 p2 k3) [5 float]
44 | - '$VIEW$_width' - Image Width [int]
45 | - '$VIEW$_height' - Image Height [int]
46 | - '$VIEW$_matches' - Number of valid matches [int]
47 | - '$VIEW$_valid' - Image is valid or not [bool]
48 | - '$VIEW$_sn' - Serial Number of Camera [string]
49 | - '$VIEW$_ori' - Original Filename [string]
50 |
51 | where $VIEW$ is view index in the range of [0-N]. N is the image number of this tuple.
52 |
53 | ### Implementation Notes
54 |
55 | - **Distortion**: To project the reconstructed model to fit the images, the images must be undistorted in advance with the provided distortion parameters (see Projection Test below).
56 | - **Valid_label**: The camera parameters of which the '$VIEW$_valid' label is False are not reliable. So when reconstructing with multi-view images, please ignore the images with False label in '$VIEW$_valid'.
57 | - **Publishing limit**: All the images and models in FaceScape cannot be shown in publications except for the subjects listed in the 'publishable_list' (available in download page after login). This has been stated in the license agreement.
58 | - **Camera Model**: Please see Camera Model section below.
59 |
60 | ### Parameter Parser
61 | A simple demo code is provided to parse the data. Json package is required to be installed:
62 | ```
63 | Pip install json
64 | ```
65 | Parameters can be parsed by the following code in Python:
66 |
67 | ```python
68 | import json
69 |
70 | with open("img/$id$/$exp$.json", 'r') as f:
71 | params = json.load(f) # read parameters
72 | img_num = len(params)//9 # get image number
73 |
74 | test_view = 0 # select a view in 0 ~ img_num
75 |
76 | K = params['%d_K' % test_view] # intrinsic mat
77 | Rt = params['%d_Rt' % test_view] # extrinsic mat
78 | dist = params['%d_distortion' % test_view] # distortion parameter
79 | h_src = params['%d_height' % test_view] # height
80 | w_src = params['%d_width' % test_view] # width
81 | valid = params['%d_valid' % test_view] # valid or not
82 | ```
83 |
84 |
85 | ### Camera Model
86 |
87 | The camera parameters of FaceScape multi-view data use the camera model that is widely used in computer vision field (CV-Cam for short). This camera coordinate is different from the camera coordinate defined in OpenGL (GL-Cam for short) and many other rendering tools. CV-Cam and GL-Cam are shown in the figure below.
88 |
89 |
90 |
91 | Our extrinsic matrix (Rt) can be transformed from CV-Cam to GL-Cam by the following calculation:
92 | ```python
93 | import numpy as np
94 | Rt_gl = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]]).dot(Rt_cv)
95 | ```
96 |
97 | ### Projection Test
98 | A simple demo is provided to render the mesh model to fit the image. The code can be found [here](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_mview_projection.ipynb).
99 |
--------------------------------------------------------------------------------
/doc/doc_tu_model.md:
--------------------------------------------------------------------------------
1 | # Facescape TU model
2 |
3 | The data available for downloading contains 847 subjects × 20 expressions, in a total of 16940 models, which is roughly 90% of the complete data. The other 10% of data are not released for potential evaluation or benchmark in the future.
4 |
5 |
6 |
7 | ### Models
8 | There are 847 tuple of topologically uniformed models. Each tuple of data consists of:
9 |
10 | * 20 base mesh models (/models_reg/$IDENTITY$_$EXPRESSION$.obj)
11 | * 20 displacement maps (/dpmap/$IDENTITY$_$EXPRESSION$.png)
12 | * 1 base material file (/models_reg/$IDENTITY$_$EXPRESSION$.obj.mtl)
13 | * 1 texture (/models_reg/$IDENTITY$_$EXPRESSION$.jpg) where $IDENTITY$ is the index of identity (1 - 847), $EXPRESSION$ is the index of expression (0 - 20). Please note that some of the model‘s texture maps (index: 360 - 847) were mosaics around the eyes to protect the privacy of some participants.
14 |
15 | ### Feature
16 | * Topologically uniformed.
17 | The geometric models of different identities and different expressions share the same mesh topology, which makes the features on faces easy to be aligned. This also helps in building a 3D morphable model.
18 | * Displacement map + base mesh.
19 | We use base shapes to represent rough geometry and displacement maps to represent detailed geometry, which is a two-layer representation for our extremely detailed face shape. Some light-weight software like MeshLab can only visualize the base mesh model/texture. Displacement maps can be loaded and visualized in MAYA, ZBrush, 3D MAX, etc.
20 | * 20 specific expressions.
21 | The subjects are asked to perform 20 specific expressions for capturing: neutral, smile, mouth-stretch, anger, jaw-left, jaw-right, jaw-forward, mouth-left, mouth-right, dimpler, chin-raiser, lip-puckerer, lip-funneler, sadness, lip-roll, grin, cheek-blowing, eye-closed, brow-raiser, brow-lower.
22 | * High resolution.
23 | The texture maps and displacement maps reach 4K resolution, which preserving maximum detailed texture and geometry.
24 |
25 |
26 |
27 | ### Information list
28 | A text file containing the ages and gender of the subjects. From left to right, each row is the index, gender (m-male, f-female), age, and valid label. ‘-’ means this information is not provided. Valid label is [1 + 4] binary number, 1-True, 0-False. The first number means if the model for this person is complete and valid, and the rest four means if obj-model, mtl-material, jpg-texture, and png-dpmap are missing.
29 |
30 | ### Publishable list
31 | A text file containing the indexes of the model that can be used for paper publication or presentation. Please read the 4th term in the license for more about this policy. The publishable list may be updated in the future.
32 |
33 |
--------------------------------------------------------------------------------
/doc/external_link_fsbm.md:
--------------------------------------------------------------------------------
1 | ## External Link to download Bilinear model v1.6
2 |
3 | Considering many researchers only require the bilinear model for running the demo or other purpose, we provide the external download link for bilinear model v1.6: [Google Drive](https://drive.google.com/drive/folders/1nI5rI2lxSdJ4jv3o3026GWmZcbtf6OSc?usp=sharing) or [NJU Drive](https://box.nju.edu.cn/d/b8ca3f2d4a95437993f5/).
4 |
5 | By downloading or using the data from these links, you have to agree to the [agreement](https://github.com/zhuhao-nju/facescape/blob/master/doc/License_Agreement.pdf) and obey the rules, including no-commercial-use and no-distribution.
6 |
7 | The links below only provide bilinear model v1.6, which can also be downloaded from https://facescape.nju.edu.cn/ using the license key, and they are all the same. The previous versions and other data still need to be downloaded from https://facescape.nju.edu.cn/ after requesting the license key. The description of the FaceScape Bilinear model can be found [here](https://github.com/zhuhao-nju/facescape_debug/blob/master/doc/doc_bilinear_model.md).
8 |
--------------------------------------------------------------------------------
/doc/facescape_googledrive.md:
--------------------------------------------------------------------------------
1 | # FaceScape on Google Drive
2 |
3 | ### Request share link
4 |
5 | Considering that downloading from our server may be unstable in some regions other than China, we use google drive to share the data as an alternative option. The share link can be requested and is only valid within **two weeks** after you receive it. The data in the google drive share link is all the same as data on facescape website.
6 |
7 | To request the share link, the following points should be noted:
8 |
9 | * For anyone who has applied and get the license key before, just send the request to facescape@outlook.com. Please use the Email that was used to apply the license key.
10 |
11 | * For anyone who didn't have a license key, you should firstly apply for it in https://facescape.nju.edu.cn, and mention the request of share link in the application mail.
12 |
13 | * The share link is only valid within **two weeks** after you receive it. Please do NOT distribute the share link to others.
14 |
15 |
16 | ### Download in command line
17 |
18 | The content in the google drive shared folder can be downloaded in command-line using tools like [gdrive](https://github.com/prasmussen/gdrive), which is a tiny multi-platform downloading tool. Gdrive executable for Linux or Windows can be obtained [here](https://github.com/prasmussen/gdrive/releases/tag/2.1.1).
19 |
20 | Usage of gdrive:
21 | ```
22 | gdrive download [fileID or folderID] --recursive
23 | ```
24 |
25 | The [fileID] can be identified from the share link, like:
26 | `https://drive.google.com/file/d/[fileID]/view?usp=sharing`
27 |
28 | The [folderID] can be identified from the share link, like:
29 | `https://drive.google.com/drive/folders/[folderID]?usp=sharing`
30 |
31 | The share link can be viewed by "right-click --> get share link" in google drive.
32 |
33 | ### Content list
34 | ```
35 | ├─── Information (2 files, 16k)
36 | ├─── Parametric Model (3 files, 15.4G)
37 | ├─── TU-Model (9 files, 120G)
38 | ├─── Multi-View Data (36 files, 689G)
39 | ├─── License_Agreement.pdf
40 | └─── facescape_google_drive.md
41 | ```
42 | Please refer to our [project page](https://github.com/zhuhao-nju/facescape) for detailed description.
43 |
44 |
--------------------------------------------------------------------------------
/figures/benchmark_eval.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/figures/benchmark_eval.jpg
--------------------------------------------------------------------------------
/figures/cam_coord_fs.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/figures/cam_coord_fs.jpg
--------------------------------------------------------------------------------
/figures/facescape_all.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/figures/facescape_all.jpg
--------------------------------------------------------------------------------
/figures/facescape_bm.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/figures/facescape_bm.jpg
--------------------------------------------------------------------------------
/figures/facescape_info.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/figures/facescape_info.jpg
--------------------------------------------------------------------------------
/figures/facescape_mview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/figures/facescape_mview.jpg
--------------------------------------------------------------------------------
/figures/facescape_tu.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/figures/facescape_tu.jpg
--------------------------------------------------------------------------------
/samples/download_sample.sh:
--------------------------------------------------------------------------------
1 | # check if samples.tar.gz exists
2 | if [ -f ./samples.tar.gz ] ; then
3 | echo "samples.tar.gz has already been downloaded."
4 | fi
5 |
6 | # download samples.tar.gz
7 | if [ ! -f ./samples.tar.gz ] ; then
8 | wget --no-check-certificate 'https://box.nju.edu.cn/f/e33302f9b9ce4c7597d0/?dl=1' -O ./samples.tar.gz
9 | fi
10 |
11 | # extract files
12 | tar -zxf samples.tar.gz -k
13 | echo "samples have been extracted."
14 |
--------------------------------------------------------------------------------
/samples/sample_keypoint3d.txt:
--------------------------------------------------------------------------------
1 | -1.749958970588235130e-01 -1.422514999999999752e-01 3.182118264705881217e+00
2 | -1.657168970588235313e-01 -8.745849999999998070e-02 3.186478264705881358e+00
3 | -1.578048970588235567e-01 -3.706549999999997347e-02 3.190362264705880690e+00
4 | -1.461938970588235187e-01 9.430500000000036076e-03 3.196720264705881220e+00
5 | -1.363898970588235393e-01 6.177750000000004071e-02 3.187260264705881418e+00
6 | -1.150398970588235315e-01 1.067215000000000247e-01 3.146095264705881078e+00
7 | -9.429689705882353390e-02 1.438075000000000325e-01 3.097821264705880928e+00
8 | -6.590189705882353033e-02 1.714535000000000364e-01 3.046801264705881529e+00
9 | -1.003289705882353007e-02 1.913815000000000377e-01 3.020528264705880872e+00
10 | 5.528310294117647072e-02 1.800155000000000505e-01 3.033636264705880770e+00
11 | 1.061471029411764633e-01 1.547725000000000350e-01 3.065823264705881179e+00
12 | 1.441731029411764675e-01 1.301805000000000323e-01 3.093274264705881293e+00
13 | 1.789781029411764424e-01 9.024250000000000327e-02 3.106993264705881330e+00
14 | 2.003381029411764880e-01 4.090050000000003405e-02 3.106300264705881276e+00
15 | 2.129841029411764786e-01 -1.986499999999974397e-03 3.091864264705881382e+00
16 | 2.248881029411764487e-01 -4.943349999999996358e-02 3.086024264705881315e+00
17 | 2.317211029411764822e-01 -1.045394999999999797e-01 3.085023264705880841e+00
18 | -1.439928970588235102e-01 -1.562494999999999856e-01 3.014971264705881282e+00
19 | -1.233048970588235260e-01 -1.665304999999999702e-01 2.978027264705881194e+00
20 | -9.969089705882352992e-02 -1.683844999999999648e-01 2.959304264705880705e+00
21 | -7.365689705882352833e-02 -1.653084999999999694e-01 2.948828264705880997e+00
22 | -5.106389705882353330e-02 -1.549544999999999673e-01 2.944215264705881019e+00
23 | 4.374310294117646908e-02 -1.466574999999999684e-01 2.929077264705880701e+00
24 | 6.888510294117647370e-02 -1.543984999999999663e-01 2.932146264705880689e+00
25 | 9.740410294117646250e-02 -1.561984999999999624e-01 2.933953264705881026e+00
26 | 1.315081029411764857e-01 -1.527774999999999828e-01 2.946040264705881206e+00
27 | 1.585201029411764662e-01 -1.396324999999999927e-01 2.965632264705881482e+00
28 | -7.907897058823529918e-03 -9.247749999999997639e-02 2.934213264705880952e+00
29 | -1.151989705882353054e-02 -5.289949999999998820e-02 2.917562264705881425e+00
30 | -1.489789705882353134e-02 -1.678049999999997599e-02 2.905608264705881183e+00
31 | -1.876689705882353051e-02 1.308750000000002967e-02 2.901294264705881254e+00
32 | -4.072089705882353527e-02 2.382750000000002921e-02 2.959961264705881057e+00
33 | -2.960789705882353046e-02 3.236350000000001725e-02 2.953616264705881456e+00
34 | -1.229589705882353119e-02 3.764650000000002716e-02 2.949657264705881410e+00
35 | 4.610102941176467961e-03 3.501550000000003271e-02 2.943093264705881396e+00
36 | 2.067010294117646585e-02 3.174350000000003558e-02 2.950079264705880888e+00
37 | -1.098288970588235242e-01 -1.127524999999999777e-01 2.997800264705881013e+00
38 | -9.511489705882353318e-02 -1.171094999999999775e-01 2.978996264705880748e+00
39 | -7.142589705882353146e-02 -1.147934999999999789e-01 2.974793264705881235e+00
40 | -5.096389705882353044e-02 -1.019734999999999808e-01 2.978184264705880935e+00
41 | -7.020689705882353360e-02 -9.850749999999997009e-02 2.980221264705881445e+00
42 | -9.169489705882352681e-02 -1.013134999999999730e-01 2.992095264705881164e+00
43 | 4.683410294117647255e-02 -9.904449999999997978e-02 2.964778264705881128e+00
44 | 7.092310294117647185e-02 -1.047284999999999744e-01 2.955093264705880962e+00
45 | 9.475910294117646793e-02 -1.038524999999999726e-01 2.956299264705880780e+00
46 | 1.136221029411764727e-01 -9.846849999999997272e-02 2.963942264705881513e+00
47 | 9.491710294117647329e-02 -8.923049999999997650e-02 2.961290264705881192e+00
48 | 6.682410294117646643e-02 -8.976549999999997032e-02 2.955751264705881454e+00
49 | -6.529089705882352990e-02 8.611350000000000948e-02 3.009020264705880798e+00
50 | -5.019889705882352865e-02 8.428350000000001119e-02 2.975986264705881013e+00
51 | -2.734989705882353087e-02 8.376650000000002150e-02 2.951984264705880712e+00
52 | -1.454089705882352992e-02 8.625650000000001372e-02 2.951353264705881330e+00
53 | 3.591102941176468888e-03 8.405250000000002997e-02 2.955353264705880889e+00
54 | 3.019210294117646853e-02 8.878750000000001918e-02 2.960834264705881402e+00
55 | 6.172010294117646900e-02 9.618150000000003086e-02 2.990527264705881372e+00
56 | 3.239410294117647110e-02 1.075415000000000121e-01 2.965249264705881238e+00
57 | 8.921102941176470291e-03 1.148585000000000300e-01 2.964963264705881230e+00
58 | -1.414889705882352994e-02 1.167755000000000321e-01 2.962712264705881005e+00
59 | -3.253089705882353244e-02 1.147965000000000235e-01 2.968982264705880780e+00
60 | -4.788389705882353103e-02 1.044645000000000157e-01 2.986104264705881306e+00
61 | -6.381789705882352781e-02 8.622150000000000647e-02 3.008641264705881113e+00
62 | -2.885289705882353173e-02 9.492050000000001875e-02 2.971416264705880828e+00
63 | -1.045389705882353130e-02 9.690650000000000652e-02 2.965152264705881002e+00
64 | 9.064102941176467587e-03 9.833950000000002412e-02 2.970884264705881073e+00
65 | 5.647210294117646634e-02 9.562049999999999716e-02 2.992537264705880773e+00
66 | 8.688102941176469723e-03 9.453150000000004605e-02 2.968360264705880880e+00
67 | -1.114089705882353142e-02 9.536150000000004345e-02 2.962014264705881139e+00
68 | -2.953089705882352978e-02 9.337750000000000217e-02 2.968273264705881154e+00
69 |
--------------------------------------------------------------------------------
/toolkit/README.md:
--------------------------------------------------------------------------------
1 | # README - toolkit
2 |
3 | ### Environment
4 |
5 | The toolkit demo have been tested in python 3.6 in Ubuntu. We recommend to create an environment using [Anaconda](https://www.anaconda.com/products/individual#Downloads):
6 |
7 | ```
8 | conda create -n facescape python=3.6 -y
9 | conda activate facescape
10 | ```
11 |
12 | Install required packages
13 | ```
14 | pip install -r requirements.txt
15 | conda install jupyter -y # for running in local jupyter
16 | conda install nb_conda_kernels -y # enable notebook kernels
17 | ```
18 |
19 | Then you can view demos locally by starting jupter notebook:
20 | ```
21 | jupyter notebook
22 | ```
23 |
24 | ### Download sample data
25 |
26 | Run the following script to download sample data:
27 | ```
28 | cd samples/ && ./download_sample.sh
29 | ```
30 |
31 | ### Demos
32 |
33 | The links below use an external jupyter renderer, because github often fails to render jupyter notebook online.
34 |
35 | * [bilinear_model-basic](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_bilinear_basic.ipynb) - use facescape bilinear model to generate 3D mesh models.
36 | * [bilinear_model-fit](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_bilinear_fit.ipynb) - fit the bilinear model to 2D/3D landmarks.
37 | * [multi-view-project](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_mview_projection.ipynb) - Project 3D models to multi-view images.
38 | * [landmark](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_landmark.ipynb) - extract landmarks using predefined vertex index.
39 | * [facial_mask](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_mask.ipynb) - extract facial region from the full head TU-models.
40 | * [render](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_render.ipynb) - render TU-models to color images and depth map.
41 | * [alignment](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_align.ipynb) - align all the multi-view models.
42 | * [symmetry](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_symmetry.ipynb) - get the correspondence of the vertices on TU-models from left side to right side.
43 | * [rig](https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_rig.ipynb) - rig 20 expressions to 52 expressions.
44 |
45 |
46 |
--------------------------------------------------------------------------------
/toolkit/demo_output/fm_result.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/fm_result.jpg
--------------------------------------------------------------------------------
/toolkit/demo_output/lm_result.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/lm_result.jpg
--------------------------------------------------------------------------------
/toolkit/demo_output/mview_depth_view49.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/mview_depth_view49.jpg
--------------------------------------------------------------------------------
/toolkit/demo_output/mview_rend_view49.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/mview_rend_view49.jpg
--------------------------------------------------------------------------------
/toolkit/demo_output/tu_color.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/tu_color.jpg
--------------------------------------------------------------------------------
/toolkit/demo_output/tu_depth.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/tu_depth.jpg
--------------------------------------------------------------------------------
/toolkit/demo_output/tu_shade.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/tu_shade.jpg
--------------------------------------------------------------------------------
/toolkit/demo_output/tu_tex.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/demo_output/tu_tex.jpg
--------------------------------------------------------------------------------
/toolkit/demo_render.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Render\n",
8 | "\n",
9 | "This demo shows how to render TU-models and multi-view models to color images and depth maps. Firstly, make sure the environment and sample data have been prepared following [README-toolkit](https://github.com/zhuhao-nju/facescape/blob/master/toolkit/README.md).\n"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "### (1) Render multi-view model"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": 1,
22 | "metadata": {},
23 | "outputs": [
24 | {
25 | "name": "stdout",
26 | "output_type": "stream",
27 | "text": [
28 | "results saved to ./demo_output/\n"
29 | ]
30 | }
31 | ],
32 | "source": [
33 | "# render multi-view model\n",
34 | "import cv2, json, os\n",
35 | "import numpy as np\n",
36 | "import src.renderer as renderer\n",
37 | "\n",
38 | "cam_idx = 49\n",
39 | "mesh_dirname = \"../samples/sample_mview_data/4_anger.ply\"\n",
40 | "\n",
41 | "# read params to find a camera setting\n",
42 | "with open(\"../samples/sample_mview_data/4_anger/params.json\", 'r') as f:\n",
43 | " params = json.load(f)\n",
44 | "\n",
45 | "# extract KRt\n",
46 | "K = np.array(params['%d_K' % cam_idx])\n",
47 | "Rt = np.array(params['%d_Rt' % cam_idx])\n",
48 | "h_src = params['%d_height' % cam_idx]\n",
49 | "w_src = params['%d_width' % cam_idx]\n",
50 | "\n",
51 | "# scale K RT h w\n",
52 | "scale = 0.2\n",
53 | "h, w = int(h_src * scale), int(w_src * scale)\n",
54 | "K[:2,:] = K[:2,:] * scale\n",
55 | "\n",
56 | "# render\n",
57 | "rend_depth, rend_img = renderer.render_cvcam(mesh_dirname, K, Rt, rend_size=(h, w))\n",
58 | "\n",
59 | "# save image and depth\n",
60 | "os.makedirs(\"./demo_output/\", exist_ok = True)\n",
61 | "cv2.imwrite(\"./demo_output/mview_rend_view%d.jpg\" % cam_idx, rend_img)\n",
62 | "rend_depth_vis = rend_depth - np.min(rend_depth[rend_depth!=0])\n",
63 | "rend_depth_vis = (rend_depth_vis / np.max(rend_depth_vis) * 255).astype(np.uint8)\n",
64 | "cv2.imwrite(\"./demo_output/mview_depth_view%d.jpg\" % cam_idx, rend_depth_vis)\n",
65 | "print(\"results saved to ./demo_output/\")"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "### (2) Render TU-model (base)"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 2,
78 | "metadata": {},
79 | "outputs": [
80 | {
81 | "name": "stdout",
82 | "output_type": "stream",
83 | "text": [
84 | "results saved to ./demo_output/\n"
85 | ]
86 | }
87 | ],
88 | "source": [
89 | "# render multi-view model\n",
90 | "import cv2, json, os, trimesh\n",
91 | "import numpy as np\n",
92 | "import src.renderer as renderer\n",
93 | "\n",
94 | "# read tu base mesh\n",
95 | "tu_base_mesh = trimesh.load(\"../samples/sample_tu_model/1_neutral.obj\", process=False)\n",
96 | "\n",
97 | "# extract K Rt\n",
98 | "K = np.array([[2000, 0, 256],\n",
99 | " [0, 2000, 256],\n",
100 | " [0, 0, 1]], dtype=np.float64)\n",
101 | "\n",
102 | "Rt = np.array([[1, 0, 0, 0],\n",
103 | " [0, -1, 0, 0],\n",
104 | " [0, 0, -1, 1200]], dtype=np.float64)\n",
105 | "h, w = 512, 512\n",
106 | "tu_base_mesh.visual.material.diffuse = np.array([255, 255, 255, 255], dtype=np.uint8)\n",
107 | "\n",
108 | "\n",
109 | "# render texture image and depth\n",
110 | "rend_depth, rend_tex = renderer.render_cvcam(tu_base_mesh, K, Rt, rend_size=(h, w), \n",
111 | " flat_shading=True)\n",
112 | "# render color image\n",
113 | "_, rend_color = renderer.render_cvcam(tu_base_mesh, K, Rt, rend_size=(h, w), \n",
114 | " flat_shading=False)\n",
115 | "\n",
116 | "# render shade image\n",
117 | "tu_base_mesh.visual.material.image = np.ones((1, 1, 3), dtype=np.uint8)*255\n",
118 | "_, rend_shade = renderer.render_cvcam(tu_base_mesh, K, Rt, rend_size=(h, w), \n",
119 | " flat_shading=False)\n",
120 | "\n",
121 | "# save all\n",
122 | "rend_depth_vis = rend_depth.copy()\n",
123 | "rend_depth_vis[rend_depth!=0] = rend_depth_vis[rend_depth!=0] - np.min(rend_depth[rend_depth!=0])\n",
124 | "rend_depth_vis = (rend_depth_vis / np.max(rend_depth_vis) * 255).astype(np.uint8)\n",
125 | "\n",
126 | "# save image and depth\n",
127 | "os.makedirs(\"./demo_output/\", exist_ok = True)\n",
128 | "cv2.imwrite(\"./demo_output/tu_tex.jpg\", rend_tex)\n",
129 | "cv2.imwrite(\"./demo_output/tu_color.jpg\", rend_color)\n",
130 | "cv2.imwrite(\"./demo_output/tu_shade.jpg\", rend_shade)\n",
131 | "rend_depth_vis = rend_depth - np.min(rend_depth[rend_depth!=0])\n",
132 | "rend_depth_vis = (rend_depth_vis / np.max(rend_depth_vis) * 255).astype(np.uint8)\n",
133 | "cv2.imwrite(\"./demo_output/tu_depth.jpg\", rend_depth_vis)\n",
134 | "print(\"results saved to ./demo_output/\")"
135 | ]
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "\n",
142 | "### (3) Render TU-model (base mesh + displacement map)\n",
143 | "\n",
144 | "Rendering with displacement map is not supported for many light-weight rendering libs. Professional rendering softwares are required to render displacement map, like Blender, 3D Max, MAYA, ZBrush, etc. ~~TODO: Blender is a open-source rendering software, here we provide a simple projects to rendering base mesh + displacement map in blender.~~\n"
145 | ]
146 | }
147 | ],
148 | "metadata": {
149 | "kernelspec": {
150 | "display_name": "Python [conda env:facescape]",
151 | "language": "python",
152 | "name": "conda-env-facescape-py"
153 | },
154 | "language_info": {
155 | "codemirror_mode": {
156 | "name": "ipython",
157 | "version": 3
158 | },
159 | "file_extension": ".py",
160 | "mimetype": "text/x-python",
161 | "name": "python",
162 | "nbconvert_exporter": "python",
163 | "pygments_lexer": "ipython3",
164 | "version": "3.6.13"
165 | }
166 | },
167 | "nbformat": 4,
168 | "nbformat_minor": 4
169 | }
170 |
--------------------------------------------------------------------------------
/toolkit/demo_rig.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "d2ede359",
6 | "metadata": {},
7 | "source": [
8 | "# Expression Rig \n",
9 | "\n",
10 | "This demo shows how to rig captured 20 expressions to 52 expressions for the blendshapes. Please make sure the environment have been prepared following [README-toolkit](https://github.com/zhuhao-nju/facescape/blob/master/toolkit/README.md), and the testing data has been downloaded from this [link](https://box.nju.edu.cn/f/95f48b181f394c248790/?dl=1) and extracted to 'facescape/toolkit/test_data/'."
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 1,
16 | "id": "fcfd5365",
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "name": "stderr",
21 | "output_type": "stream",
22 | "text": [
23 | "[step 1/8] Loading templates: 100%|██████████| 51/51 [00:05<00:00, 9.66it/s]\n",
24 | "[step 2/8] Computing M for templates: 100%|██████████| 52/52 [03:44<00:00, 4.32s/it]\n"
25 | ]
26 | },
27 | {
28 | "name": "stdout",
29 | "output_type": "stream",
30 | "text": [
31 | "[step 3/8] Building align_mesh\n"
32 | ]
33 | },
34 | {
35 | "name": "stderr",
36 | "output_type": "stream",
37 | "text": [
38 | "[step 4/8] Loading TU models: 100%|██████████| 19/19 [00:02<00:00, 8.77it/s]\n",
39 | "[step 5/8] Computing M for TU models: 100%|██████████| 20/20 [01:25<00:00, 4.27s/it]\n"
40 | ]
41 | },
42 | {
43 | "name": "stdout",
44 | "output_type": "stream",
45 | "text": [
46 | "[step 6/8] Building A\n"
47 | ]
48 | },
49 | {
50 | "name": "stderr",
51 | "output_type": "stream",
52 | "text": [
53 | "[step 7/8] Optimizing: 100%|██████████| 5/5 [32:44<00:00, 392.90s/it]\n",
54 | "[step 8/8] Saving result models: 100%|██████████| 51/51 [06:36<00:00, 7.77s/it]"
55 | ]
56 | },
57 | {
58 | "name": "stdout",
59 | "output_type": "stream",
60 | "text": [
61 | "Done, results saved to ./test_data/rig_data/1/rigging/\n"
62 | ]
63 | },
64 | {
65 | "name": "stderr",
66 | "output_type": "stream",
67 | "text": [
68 | "\n"
69 | ]
70 | }
71 | ],
72 | "source": [
73 | "from src.rig import rig_20to52\n",
74 | "\n",
75 | "rig_20to52(id_dir = \"./test_data/rig_data/1/\", \n",
76 | " tplt_dir = \"./test_data/rig_data/templates/\", \n",
77 | " params_file = \"./test_data/rig_data/rig_params.json\")"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "id": "68ebf9e4",
83 | "metadata": {},
84 | "source": [
85 | "### Name of Expressions before Rigging\n",
86 | "\n",
87 | "| ID | Name | | ID | Name | | ID | Name | | ID | Name | | ID | Name |\n",
88 | "|:--:|:-------------:|:-:|:--:|:-----------:|:-:|:--:|:------------:|:-:|:--:|:------------:|:-:|:--:|:-------------:|\n",
89 | "| 1 | Neutral | | 5 | Jaw_Left | | 9 | Mouth_Right | | 13 | Lip_Funneler | | 17 | Cheek_Blowing |\n",
90 | "| 2 | Smile | | 6 | Jaw_Right | | 10 | Dimpler | | 14 | Sadness | | 18 | Eye_Closed |\n",
91 | "| 3 | Mouth_Stretch | | 7 | Jaw_Forward | | 11 | Chin_Raiser | | 15 | Lip_Roll | | 19 | Brow_Raiser |\n",
92 | "| 4 | Anger | | 8 | Mouth_Left | | 12 | Lip_Puckerer | | 16 | Grin | | 20 | Brow_Lower |\n",
93 | "\n",
94 | "### Name of Expressions after Rigging\n",
95 | "\n",
96 | "| ID | Name | | ID | Name | | ID | Name | | ID | Name | | ID | Name |\n",
97 | "|:--:|:-----------:|:-:|:--:|:--------:|:-:|:--:|:---------------:|:-:|:--:|:-------------:|:-:|:--:|:--------------:|\n",
98 | "| 1 | EyeBlink_L | | 11 | EyeOut_L | | 21 | LipsTogether | | 31 | MouthSmile_L | | 41 | LipsPucker |\n",
99 | "| 2 | EyeBlink_R | | 12 | EyeOut_R | | 22 | JawLeft | | 32 | MouthSmile_R | | 42 | LipsFunnel |\n",
100 | "| 3 | EyeSquint_L | | 13 | EyeUp_L | | 23 | JawRight | | 33 | MouthDimple_L | | 43 | MouthLeft |\n",
101 | "| 4 | EyeSquint_R | | 14 | EyeUp_R | | 24 | JawFwd | | 34 | MouthDimple_R | | 44 | MouthRight |\n",
102 | "| 5 | EyeDown_L | | 15 | BrowsD_L | | 25 | LipsUpperUp_L | | 35 | LipsStretch_L | | 45 | ChinLowerRaise |\n",
103 | "| 6 | EyeDown_R | | 16 | BrowsD_R | | 26 | LipsUpperUp_R | | 36 | LipsStretch_R | | 46 | ChinUpperRaise |\n",
104 | "| 7 | EyeIn_L | | 17 | BrowsU_C | | 27 | LipsLowerDown_L | | 37 | MouthFrown_L | | 47 | Sneer_L |\n",
105 | "| 8 | EyeIn_R | | 18 | BrowsU_L | | 28 | LipsLowerDown_R | | 38 | MouthFrown_R | | 48 | Sneer_R |\n",
106 | "| 9 | EyeOpen_L | | 19 | BrowsU_R | | 29 | LipsUpperClose | | 39 | MouthPress_L | | 49 | Puff |\n",
107 | "| 10 | EyeOpen_R | | 20 | JawOpen | | 30 | LipsLowerClose | | 40 | MouthPress_R | | 50 | CheekSquint_L |\n",
108 | "| 0 | Neutral | | | | | | | | | | | 51 | CheekSquint_R |"
109 | ]
110 | }
111 | ],
112 | "metadata": {
113 | "kernelspec": {
114 | "display_name": "Python [conda env:facescape] *",
115 | "language": "python",
116 | "name": "conda-env-facescape-py"
117 | },
118 | "language_info": {
119 | "codemirror_mode": {
120 | "name": "ipython",
121 | "version": 3
122 | },
123 | "file_extension": ".py",
124 | "mimetype": "text/x-python",
125 | "name": "python",
126 | "nbconvert_exporter": "python",
127 | "pygments_lexer": "ipython3",
128 | "version": "3.6.13"
129 | }
130 | },
131 | "nbformat": 4,
132 | "nbformat_minor": 5
133 | }
134 |
--------------------------------------------------------------------------------
/toolkit/predef/facial_mask_v10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/predef/facial_mask_v10.png
--------------------------------------------------------------------------------
/toolkit/predef/facial_mask_v16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/predef/facial_mask_v16.png
--------------------------------------------------------------------------------
/toolkit/predef/landmark_indices.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/predef/landmark_indices.npz
--------------------------------------------------------------------------------
/toolkit/predef/landmark_indices.txt:
--------------------------------------------------------------------------------
1 | 23404
2 | 4607
3 | 4615
4 | 4655
5 | 20356
6 | 4643
7 | 5022
8 | 5013
9 | 1681
10 | 1692
11 | 11470
12 | 10441
13 | 1336
14 | 1343
15 | 1303
16 | 1295
17 | 2372
18 | 6143
19 | 6141
20 | 6126
21 | 6113
22 | 6109
23 | 2844
24 | 2762
25 | 2765
26 | 2774
27 | 2789
28 | 6053
29 | 6041
30 | 1870
31 | 1855
32 | 4728
33 | 4870
34 | 1807
35 | 1551
36 | 1419
37 | 3434
38 | 3414
39 | 3447
40 | 3457
41 | 3309
42 | 3373
43 | 3179
44 | 151
45 | 127
46 | 143
47 | 3236
48 | 47
49 | 21018
50 | 4985
51 | 4898
52 | 6571
53 | 1575
54 | 1663
55 | 1599
56 | 1899
57 | 12138
58 | 5231
59 | 21978
60 | 5101
61 | 21067
62 | 21239
63 | 11378
64 | 11369
65 | 11553
66 | 12048
67 | 5212
68 | 21892
69 |
--------------------------------------------------------------------------------
/toolkit/predef/sym_dict.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/predef/sym_dict.npy
--------------------------------------------------------------------------------
/toolkit/predef/sym_dict_old.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/predef/sym_dict_old.npy
--------------------------------------------------------------------------------
/toolkit/requirements.txt:
--------------------------------------------------------------------------------
1 | # python requirements
2 | cmake
3 | numpy
4 | dlib
5 | opencv-python
6 | Pillow
7 | scipy
8 | trimesh
9 | pyrender
10 | openmesh
11 | sklearn
12 | tqdm
13 |
--------------------------------------------------------------------------------
/toolkit/src/camera.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2020, Hao Zhu, NJU.
3 | Camera projection and inverse-projection.
4 | """
5 |
6 | import numpy as np
7 |
8 | # Basic camera projection and inv-projection
9 | class CamPara():
10 | def __init__(self, K=None, Rt=None):
11 | img_size = [200,200]
12 | if K is None:
13 | K = np.array([[500, 0, 99.5],
14 | [0, 500, 99.5],
15 | [0, 0, 1]])
16 | else:
17 | K = np.array(K)
18 | if Rt is None:
19 | Rt = np.array([[1, 0, 0, 0],
20 | [0, 1, 0, 0],
21 | [0, 0, 1, 0]])
22 | else:
23 | Rt = np.array(Rt)
24 | R = Rt[:,:3]
25 | t = Rt[:,3]
26 | self.cam_center = -np.dot(R.T,t)
27 |
28 | # compute projection and inv-projection matrix
29 | self.proj_mat = np.dot(K, Rt)
30 | self.inv_proj_mat = np.linalg.pinv(self.proj_mat)
31 |
32 | # compute ray directions of camera center pixel
33 | c_uv = np.array([float(K[0, 2]), float(K[1, 2])])
34 | self.center_dir = self.inv_project(c_uv)
35 |
36 | def get_camcenter(self):
37 | return self.cam_center
38 |
39 | def get_center_dir(self):
40 | return self.center_dir
41 |
42 | def project(self, p_xyz):
43 | p_xyz = np.double(p_xyz)
44 | p_uv_1 = np.dot(self.proj_mat, np.append(p_xyz, 1))
45 | if p_uv_1[2] == 0:
46 | return 0
47 | p_uv = (p_uv_1/p_uv_1[2])[:2]
48 | return p_uv
49 |
50 | # inverse projection, if depth is None, return a normalized direction
51 | def inv_project(self, p_uv, depth = None, plane_correct = False):
52 | p_uv = np.double(p_uv)
53 | p_xyz_1 = np.dot(self.inv_proj_mat, np.append(p_uv, 1))
54 | if p_xyz_1[3] == 0:
55 | return 0
56 | p_xyz = (p_xyz_1/p_xyz_1[3])[:3]
57 | p_dir = p_xyz - self.cam_center
58 | p_dir = p_dir / np.linalg.norm(p_dir)
59 | if depth is None:
60 | return p_dir
61 | else:
62 | if plane_correct is True:
63 | depth_c = depth/np.dot(self.center_dir, p_dir)
64 | else:
65 | depth_c = depth
66 | real_xyz = self.cam_center + p_dir * depth_c
67 | return real_xyz
68 |
69 |
--------------------------------------------------------------------------------
/toolkit/src/facescape_bm.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2020, Hao Zhu, Haotian Yang, NJU.
3 | Bilinear model.
4 | """
5 |
6 | import numpy as np
7 | from src.mesh_obj import mesh_obj
8 |
9 | class facescape_bm(object):
10 | def __init__(self, filename):
11 | bm_model = np.load(filename, allow_pickle=True)
12 | self.shape_bm_core = bm_model['shape_bm_core'] # shape core
13 |
14 | # Calculating the residual converts the shape core into the residual representation
15 | sub_tensor = np.stack((self.shape_bm_core[:, 0, :],) * self.shape_bm_core.shape[1], 1)
16 | res_tensor = self.shape_bm_core - sub_tensor
17 | res_tensor[:, 0, :] = self.shape_bm_core[:, 0, :]
18 | self.shape_bm_core = res_tensor
19 |
20 | self.color_bm_core = bm_model['color_bm_core'] # color core
21 | self.color_bm_mean = bm_model['color_bm_mean'] # color mean
22 |
23 | self.fv_indices = bm_model['fv_indices'] # face - vertex indices
24 | self.ft_indices = bm_model['ft_indices'] # face - texture_coordinate indices
25 | self.fv_indices_front = bm_model['fv_indices_front'] # frontal face-vertex indices
26 | self.ft_indices_front = bm_model['ft_indices_front'] # frontal face-texture_coordinate indices
27 |
28 | self.vc_dict_front = bm_model['vc_dict_front'] # frontal vertex color dictionary
29 | self.v_indices_front = bm_model['v_indices_front'] # frontal vertex indices
30 |
31 | self.vert_num = bm_model['vert_num'] # vertex number
32 | self.face_num = bm_model['face_num'] # face number
33 | self.frontal_vert_num = bm_model['frontal_vert_num'] # frontal vertex number
34 | self.frontal_face_num = bm_model['frontal_face_num'] # frontal face number
35 |
36 | self.texcoords = bm_model['texcoords'] # texture coordinates (constant)
37 | self.facial_mask = bm_model['facial_mask'] # UV facial mask
38 | self.sym_dict = bm_model['sym_dict'] # symmetry dictionary
39 | self.lm_list_v16 = bm_model['lm_list_v16'] # landmark indices
40 |
41 | self.vert_10to16_dict = bm_model['vert_10to16_dict'] # vertex indices dictionary (v1.0 to v1.6)
42 | self.vert_16to10_dict = bm_model['vert_16to10_dict'] # vertex indices dictionary (v1.6 to v1.0)
43 |
44 | if 'id_mean' in bm_model.files:
45 | self.id_mean = bm_model['id_mean'] # identity factors mean
46 | if 'id_var' in bm_model.files:
47 | self.id_var = bm_model['id_var'] # identity factors variance
48 |
49 | # make expression GaussianMixture model
50 | if 'exp_gmm_weights' in bm_model.files:
51 | self.exp_gmm_weights = bm_model['exp_gmm_weights']
52 | if 'exp_gmm_means' in bm_model.files:
53 | self.exp_gmm_means = bm_model['exp_gmm_means']
54 | if 'exp_gmm_covariances' in bm_model.files:
55 | self.exp_gmm_covariances = bm_model['exp_gmm_covariances']
56 |
57 | if 'contour_line_right' in bm_model.files:
58 | self.contour_line_right = bm_model['contour_line_right'].tolist() # contour line - right
59 | if 'contour_line_left' in bm_model.files:
60 | self.contour_line_left = bm_model['contour_line_left'].tolist() # contour line - left
61 | if 'bottom_cand' in bm_model.files:
62 | self.bottom_cand = bm_model['bottom_cand'].tolist() # bottom cand
63 |
64 | # generate full mesh
65 | def gen_full(self, id_vec, exp_vec):
66 | verts = self.shape_bm_core.dot(id_vec).dot(exp_vec).reshape((-1, 3))
67 | mesh = mesh_obj()
68 | mesh.create(vertices = verts,
69 | texcoords = self.texcoords,
70 | faces_v = self.fv_indices,
71 | faces_vt = self.ft_indices)
72 | return mesh
73 |
74 | # generate facial mesh
75 | def gen_face(self, id_vec, exp_vec):
76 | verts = self.shape_bm_core.dot(id_vec).dot(exp_vec).reshape((-1, 3))
77 | mesh = mesh_obj()
78 | mesh.create(vertices = verts[self.v_indices_front],
79 | texcoords = self.texcoords,
80 | faces_v = self.fv_indices_front,
81 | faces_vt = self.ft_indices_front)
82 | return mesh
83 |
84 | # generate facial mesh with vertex color
85 | def gen_face_color(self, id_vec, exp_vec, vc_vec):
86 |
87 | verts = self.shape_bm_core.dot(id_vec).dot(exp_vec).reshape((-1, 3))
88 | vert_colors = self.color_bm_mean + self.color_bm_core.dot(vc_vec)
89 | vert_colors = vert_colors.reshape((-1, 3)) / 255
90 | mesh = mesh_obj()
91 |
92 | new_vert_colors = vert_colors[self.vc_dict_front][:,[2,1,0]]
93 | new_vert_colors[(self.vc_dict_front == -1)] = np.array([0, 0, 0], dtype = np.float32)
94 |
95 | mesh.create(vertices = verts[self.v_indices_front],
96 | vert_colors = new_vert_colors,
97 | texcoords = self.texcoords,
98 | faces_v = self.fv_indices_front,
99 | faces_vt = self.ft_indices_front)
100 | return mesh
101 |
102 |
--------------------------------------------------------------------------------
/toolkit/src/mesh_obj.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2020, Hao Zhu, Haotian Yang, NJU.
3 | OBJ file loader and writer.
4 | """
5 |
6 | import numpy as np, os
7 |
8 | class mesh_obj:
9 | def __init__(self, filename=None):
10 | """Loads a Wavefront OBJ file. """
11 | self.vertices = []
12 | self.vert_colors = []
13 | self.normals = []
14 | self.texcoords = []
15 | self.faces = []
16 | self.adjacent_list = []
17 | material = None
18 |
19 | if filename != None:
20 | for line in open(filename, "r"):
21 | if line.startswith('#'): continue
22 | values = line.split()
23 | if not values: continue
24 | if values[0] == 'v':
25 | if len(values) == 4:
26 | self.vertices.append(list(map(float, values[1:4])))
27 | elif len(values) == 7:
28 | self.vertices.append(list(map(float, values[1:4])))
29 | self.vert_colors.append(list(map(float, values[4:7])))
30 | elif values[0] == 'vn':
31 | self.normals.append(list(map(float, values[1:4])))
32 | elif values[0] == 'vt':
33 | self.texcoords.append(list(map(float, values[1:3])))
34 | elif values[0] in ('usemtl', 'usemat'):
35 | material = values[1]
36 | elif values[0] == 'mtllib':
37 | fn = os.path.dirname(filename) + '/' + os.path.basename(values[1])
38 | if os.path.isfile(fn) is True:
39 | self.mtl = self.read_mtl()
40 | else:
41 | print("mtl file not found: %s" % fn)
42 | elif values[0] == 'f':
43 | face = []
44 | texcoords = []
45 | norms = []
46 | for v in values[1:]:
47 | w = v.split('/')
48 | face.append(int(w[0]))
49 | if len(w) >= 2 and len(w[1]) > 0:
50 | texcoords.append(int(w[1]))
51 | else:
52 | texcoords.append(0)
53 | if len(w) >= 3 and len(w[2]) > 0:
54 | norms.append(int(w[2]))
55 | else:
56 | norms.append(0)
57 | self.faces.append((face, norms, texcoords, material))
58 |
59 | def create(self, vertices = [], vert_colors = [], normals = [], texcoords = [],
60 | faces_v = [], faces_vn = [], faces_vt = []):
61 | self.vertices = vertices
62 | self.vert_colors = vert_colors
63 | self.normals = normals
64 | self.texcoords = texcoords
65 | self.faces_v = faces_v
66 | self.faces_vn = faces_vn
67 | self.faces_vt = faces_vt
68 | self.faces = []
69 | material = None
70 |
71 | face_num = max(len(faces_v), len(faces_vn), len(faces_vt))
72 | if face_num > 0:
73 | if len(faces_v) != face_num:
74 | faces_v = [[-1, -1, -1]] * face_num
75 | if len(faces_vn) != face_num:
76 | faces_vn = [[-1, -1, -1]] * face_num
77 | if len(faces_vt) != face_num:
78 | faces_vt = [[-1, -1, -1]] * face_num
79 | for i in range(face_num):
80 | self.faces.append((faces_v[i], faces_vn[i], faces_vt[i], material))
81 |
82 | def get_adjacent(self, index):
83 | if not self.adjacent_list:
84 | adjacent_list = [[] for i in range(len(self.vertices))]
85 | for face in self.faces:
86 | face_vertices, face_normals, face_texture_coords, material = face
87 | adjacent_list[face_vertices[0] - 1].append(face_vertices[1] - 1)
88 | adjacent_list[face_vertices[0] - 1].append(face_vertices[2] - 1)
89 | adjacent_list[face_vertices[1] - 1].append(face_vertices[0] - 1)
90 | adjacent_list[face_vertices[1] - 1].append(face_vertices[2] - 1)
91 | adjacent_list[face_vertices[2] - 1].append(face_vertices[0] - 1)
92 | adjacent_list[face_vertices[2] - 1].append(face_vertices[1] - 1)
93 |
94 | adjacent_list = list(map(set, adjacent_list))
95 | self.adjacent_list = list(map(list, adjacent_list))
96 | return self.adjacent_list[index]
97 |
98 | def export(self, output_dir, file_name, texture_name=None, enable_vc=False, enable_vt=True):
99 |
100 | tgt_dir = os.path.dirname(output_dir)
101 | output_file = os.path.join(output_dir, file_name) + '.obj'
102 | mtl_file = os.path.join(output_dir, file_name) + '.mtl'
103 |
104 | if len(tgt_dir) != 0:
105 | os.makedirs(tgt_dir, exist_ok=True)
106 |
107 | with open(output_file, "w") as f:
108 | if texture_name is not None:
109 | f.write('mtllib ./%s.mtl\n' % file_name)
110 | if enable_vc is True:
111 | for idx, vert in enumerate(self.vertices):
112 | f.write("v %f %f %f %f %f %f\n" % (vert[0], vert[1], vert[2],
113 | self.vert_colors[idx][0],
114 | self.vert_colors[idx][1],
115 | self.vert_colors[idx][2]))
116 | else:
117 | for vert in self.vertices:
118 | f.write("v %f %f %f\n" % (vert[0], vert[1], vert[2]))
119 | if enable_vt is True:
120 | for tc in self.texcoords:
121 | f.write("vt %.6f %.6f\n" % (tc[0], tc[1]))
122 | if texture_name is not None:
123 | f.write('usemtl material_0\n')
124 | for face in self.faces:
125 | face_vertices, face_normals, face_texture_coords, material = face
126 | f.write("f %d/%d %d/%d %d/%d\n" % (face_vertices[0], face_texture_coords[0],
127 | face_vertices[1], face_texture_coords[1],
128 | face_vertices[2], face_texture_coords[2]))
129 | else:
130 | for face in self.faces:
131 | face_vertices, face_normals, face_texture_coords, material = face
132 | f.write("f %d %d %d\n" % (face_vertices[0], face_vertices[1], face_vertices[2]))
133 |
134 | if texture_name is not None:
135 | with open(mtl_file, 'w') as f:
136 | f.write('newmtl material_0\nKa 0.200000 0.200000 0.200000\nKd 0.000000 0.000000 0.000000\n')
137 | f.write(
138 | 'Ks 1.000000 1.000000 1.000000\nTr 0.000000\nillum 2\nNs 0.000000\nmap_Kd %s' % texture_name)
139 |
140 | def read_mtl(self, filename):
141 | contents = {}
142 | mtl = None
143 | for line in open(filename, "r"):
144 | if line.startswith('#'): continue
145 | values = line.split()
146 | if not values: continue
147 | if values[0] == 'newmtl':
148 | mtl = contents[values[1]] = {}
149 | elif mtl is None:
150 | raise ValueError('mtl file doesn\'t start with newmtl stmt')
151 | elif values[0] == 'map_Kd':
152 | # load the texture referred to by this declaration
153 | mtl[values[0]] = values[1]
154 | else:
155 | mtl[values[0]] = map(float, values[1:])
156 | return contents
157 |
158 |
--------------------------------------------------------------------------------
/toolkit/src/mesh_proc.py:
--------------------------------------------------------------------------------
1 | import numpy as np, pickle, json
2 | from scipy import sparse
3 | from scipy.linalg import orthogonal_procrustes
4 | from scipy.sparse.linalg import lsqr
5 |
6 | WEIGHT = 1.0
7 |
8 | # Laplacian mesh editing
9 | class Laplacian_deformer():
10 | def getLaplacianMatrixUmbrella(self, mesh, anchorsIdx):
11 | n = len(mesh.vertices)
12 | k = anchorsIdx.shape[0]
13 | I = []
14 | J = []
15 | V = []
16 |
17 | # Build sparse Laplacian Matrix coordinates and values
18 | for i in range(n):
19 | neighbors = mesh.vertex_neighbors[i]
20 | z = len(neighbors)
21 | I = I + ([i] * (z + 1)) # repeated row
22 | J = J + neighbors + [i] # column indices and this row
23 | V = V + ([-1] * z) + [z] # negative weights and row degree
24 |
25 | # augment Laplacian matrix with anchor weights
26 | for i in range(k):
27 | I = I + [n + i]
28 | J = J + [anchorsIdx[i]]
29 | V = V + [WEIGHT] # default anchor weight
30 |
31 | L = sparse.coo_matrix((V, (I, J)), shape=(n + k, n)).tocsr()
32 |
33 | return L
34 |
35 | def solveLaplacianMesh(self, raw_mesh, anchors, anchorsIdx):
36 | mesh = raw_mesh.copy()
37 |
38 | vertices = np.array(mesh.vertices)
39 | n = vertices.shape[0] # N x 3
40 | k = anchorsIdx.shape[0]
41 |
42 | L = self.getLaplacianMatrixUmbrella(mesh, anchorsIdx)
43 | delta = np.array(L.dot(vertices))
44 |
45 | # augment delta solution matrix with weighted anchors
46 | for i in range(k):
47 | delta[n + i, :] = WEIGHT * anchors[i, :]
48 |
49 | # update mesh vertices with least-squares solution
50 | for i in range(3):
51 | vertices[:, i] = lsqr(L, delta[:, i])[0]
52 |
53 | mesh.vertices = vertices.tolist()
54 |
55 | return mesh
56 |
57 | def register_mesh(tplt_markers, markers_3d, src_mesh):
58 | tplt_center = np.mean(tplt_markers, axis=0)
59 | markers_center = np.mean(markers_3d, axis=0)
60 | markers_3d_centered = markers_3d - markers_center
61 | tplt_markers_centered = tplt_markers - tplt_center
62 | scale_tgt = np.linalg.norm(markers_3d_centered) / np.linalg.norm(tplt_markers_centered)
63 | markers_3d_centered = markers_3d_centered / scale_tgt
64 | translate = tplt_center
65 | rotation, _ = orthogonal_procrustes(tplt_markers_centered, markers_3d_centered)
66 |
67 | # transform verts
68 | src_verts = np.array(src_mesh.vertices)
69 | tgt_verts = src_verts.copy()
70 | tgt_verts = tgt_verts - markers_center
71 | tgt_verts = np.dot(rotation, tgt_verts.T).T
72 | tgt_verts = tgt_verts / scale_tgt
73 | tgt_verts = tgt_verts + translate
74 |
75 | markers_3d = markers_3d - markers_center
76 | markers_3d = np.dot(rotation, markers_3d.T).T
77 | markers_3d = markers_3d / scale_tgt
78 | markers_3d = markers_3d + translate
79 |
80 | tgt_verts_list = []
81 | for i in range(tgt_verts.shape[0]):
82 | tgt_verts_list.append([tgt_verts[i, 0], tgt_verts[i, 1], tgt_verts[i, 2]])
83 | src_mesh.vertices = tgt_verts_list
84 | return src_mesh, markers_3d
85 |
86 |
87 | class mesh_aligner:
88 | def __init__(self, templates, rig_params):
89 | self.seam_indices = []
90 | self.new_vertices_idx = []
91 | self.new_faces = []
92 | self.new_seam_indices = []
93 | self.mesh_deformer = Laplacian_deformer()
94 | self.predef = templates[0].copy()
95 | self.seam_indices = rig_params['seam_indices']
96 | self.new_vertices_idx = rig_params['new_vertices_idx']
97 | self.new_faces = rig_params['new_faces']
98 | self.new_seam_indices = rig_params['new_seam_indices']
99 |
100 | def align(self, scan_neutral, scan_exp, idx):
101 | self.predef.vertices = np.array(scan_exp)[self.new_vertices_idx[idx]]
102 | self.predef.faces = self.new_faces[idx]
103 | movable, _ = register_mesh(np.array(scan_neutral)[self.seam_indices[idx]],
104 | np.array(scan_exp)[self.seam_indices[idx]], self.predef)
105 | anchors = np.array(scan_neutral)[self.seam_indices[idx]]
106 | deformed_movable = self.mesh_deformer.solveLaplacianMesh(movable, anchors,
107 | np.array(self.new_seam_indices[idx]))
108 | scan_neutral = np.array(scan_neutral)
109 | scan_neutral[self.new_vertices_idx[idx]] = np.array(deformed_movable.vertices)
110 | return scan_neutral
111 |
112 |
--------------------------------------------------------------------------------
/toolkit/src/renderer.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2020, Hao Zhu, NJU.
3 | Parametric model fitter..
4 | """
5 |
6 | import numpy as np
7 | import pyrender
8 | import trimesh
9 |
10 | # render with gl camera
11 | def render_glcam(model_in, # model name or trimesh
12 | K = None,
13 | Rt = None,
14 | scale = 1.0,
15 | rend_size = (512, 512),
16 | light_trans = np.array([[0], [100], [0]]),
17 | flat_shading = False):
18 |
19 | # Mesh creation
20 | if isinstance(model_in, str) is True:
21 | mesh = trimesh.load(model_in, process=False)
22 | else:
23 | mesh = model_in.copy()
24 | pr_mesh = pyrender.Mesh.from_trimesh(mesh)
25 |
26 | # Scene creation
27 | scene = pyrender.Scene()
28 |
29 | # Adding objects to the scene
30 | face_node = scene.add(pr_mesh)
31 |
32 | # Caculate fx fy cx cy from K
33 | fx, fy = K[0][0] * scale, K[1][1] * scale
34 | cx, cy = K[0][2] * scale, K[1][2] * scale
35 |
36 | # Camera Creation
37 | cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy,
38 | znear=0.1, zfar=100000)
39 | cam_pose = np.eye(4)
40 | cam_pose[:3, :3] = Rt[:3, :3].T
41 | cam_pose[:3, 3] = -Rt[:3, :3].T.dot(Rt[:, 3])
42 | scene.add(cam, pose=cam_pose)
43 |
44 | # Set up the light
45 | light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=10.0)
46 | light_pose = cam_pose.copy()
47 | light_pose[0:3, :] += light_trans
48 | scene.add(light, pose=light_pose)
49 |
50 | # Rendering offscreen from that camera
51 | r = pyrender.OffscreenRenderer(viewport_width=rend_size[1],
52 | viewport_height=rend_size[0],
53 | point_size=1.0)
54 | if flat_shading is True:
55 | color, depth = r.render(scene, flags=pyrender.constants.RenderFlags.FLAT)
56 | else:
57 | color, depth = r.render(scene)
58 |
59 | # rgb to bgr for cv2
60 | color = color[:, :, [2, 1, 0]]
61 |
62 | return depth, color
63 |
64 |
65 | # render with cv camera
66 | def render_cvcam(model_in, # model name or trimesh
67 | K = None,
68 | Rt = None,
69 | scale = 1.0,
70 | rend_size = (512, 512),
71 | light_trans = np.array([[0], [100], [0]]),
72 | flat_shading = False):
73 |
74 | if np.array(K).all() == None:
75 | K = np.array([[2000, 0, 256],
76 | [0, 2000, 256],
77 | [0, 0, 1]], dtype=np.float64)
78 |
79 | if np.array(Rt).all() == None:
80 | Rt = np.array([[1, 0, 0, 0],
81 | [0, 1, 0, 0],
82 | [0, 0, 1, 0]], dtype=np.float64)
83 |
84 | # define R to transform from cvcam to glcam
85 | R_cv2gl = np.array([[1, 0, 0],
86 | [0, -1, 0],
87 | [0, 0, -1]])
88 | Rt_cv = R_cv2gl.dot(Rt)
89 |
90 | return render_glcam(model_in, K, Rt_cv, scale, rend_size, light_trans, flat_shading)
91 |
92 | # render with orth camera
93 | def render_orthcam(model_in, # model name or trimesh
94 | xy_mag,
95 | rend_size,
96 | flat_shading=False,
97 | zfar = 10000,
98 | znear = 0.05):
99 |
100 | # Mesh creation
101 | if isinstance(model_in, str) is True:
102 | mesh = trimesh.load(model_in, process=False)
103 | else:
104 | mesh = model_in.copy()
105 | pr_mesh = pyrender.Mesh.from_trimesh(mesh)
106 |
107 | # Scene creation
108 | scene = pyrender.Scene()
109 |
110 | # Adding objects to the scene
111 | face_node = scene.add(pr_mesh)
112 |
113 | # Camera Creation
114 | if type(xy_mag) == float:
115 | cam = pyrender.OrthographicCamera(xmag = xy_mag, ymag = xy_mag,
116 | znear=znear, zfar=zfar)
117 | elif type(xy_mag) == tuple:
118 | cam = pyrender.OrthographicCamera(xmag = xy_mag[0], ymag = xy_mag[1],
119 | znear=znear, zfar=zfar)
120 | else:
121 | print("Error: xy_mag should be float or tuple")
122 | return False
123 |
124 | scene.add(cam, pose=np.eye(4))
125 |
126 | # Set up the light
127 | light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=10.0)
128 | scene.add(light, pose=np.eye(4))
129 |
130 | # Rendering offscreen from that camera
131 | r = pyrender.OffscreenRenderer(viewport_width=rend_size[1],
132 | viewport_height=rend_size[0],
133 | point_size=1.0)
134 | if flat_shading is True:
135 | color, depth = r.render(scene, flags=pyrender.constants.RenderFlags.FLAT)
136 | else:
137 | color, depth = r.render(scene)
138 |
139 | # rgb to bgr for cv2
140 | color = color[:, :, [2, 1, 0]]
141 |
142 | # fix pyrender BUG of depth rendering, pyrender version: 0.1.43
143 | depth[depth!=0] = (zfar + znear - ((2.0 * znear * zfar) / depth[depth!=0]) ) / (zfar - znear)
144 | depth[depth!=0] = ( ( depth[depth!=0] + (zfar + znear) / (zfar - znear) ) * (zfar - znear) ) / 2.0
145 |
146 | return depth, color
147 |
148 |
--------------------------------------------------------------------------------
/toolkit/src/rig.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2021, Haotian Yang, Hao Zhu, NJU.
3 | Expression rigging and PCA related functions.
4 | """
5 |
6 | import numpy as np, trimesh, scipy, tqdm, json, os
7 | from scipy.optimize import minimize
8 | from scipy.sparse import coo_matrix, linalg
9 | from src.mesh_proc import mesh_aligner
10 |
11 | num_scan = 19
12 | num_bs = 51
13 |
14 | def get_M(mesh):
15 | result_M = np.zeros((len(mesh.faces) * 3, 3))
16 | for tri_index, face in enumerate(mesh.faces):
17 | v1 = mesh.vertices[face[0]]
18 | v2 = mesh.vertices[face[1]]
19 | v3 = mesh.vertices[face[2]]
20 | M = np.zeros((3, 3))
21 | M[:, 0] = v2 - v1
22 | M[:, 1] = v3 - v1
23 | M[:, 2] = np.cross(v2 - v1, v3 - v1)
24 | result_M[tri_index * 3:tri_index * 3 + 3, :] = M
25 | return result_M
26 |
27 | def get_vertices(bs_M, neutral_M, tri_num, A):
28 | c = np.zeros((3 * tri_num, 3))
29 | for tri_index in range(tri_num):
30 | c[3 * tri_index:3 * tri_index + 3, :] = (
31 | np.matmul(bs_M[3 * tri_index:3 * tri_index + 3] + neutral_M[3 * tri_index:3 * tri_index + 3],
32 | np.linalg.inv(neutral_M[3 * tri_index:3 * tri_index + 3]))).T
33 | return linalg.spsolve(A.T.dot(A), A.T.dot(c))
34 |
35 |
36 | def optimize_bs(bs_weight, bs_M, scan_M, templates_M, tri_num, beta):
37 | least_sq_A = np.zeros((num_scan + num_bs, num_bs))
38 | least_sq_b = np.zeros((num_scan + num_bs, 3 * 3 * tri_num))
39 | least_sq_A[0:num_scan, :] = bs_weight
40 | for i in range(num_scan):
41 | least_sq_b[i, :] = (scan_M[i + 1] - scan_M[0]).flatten()
42 |
43 | for i in range(num_bs):
44 | omega = np.power(
45 | (1 + np.linalg.norm(templates_M[i + 1]) / 40) / (0.1 + np.linalg.norm(templates_M[i + 1]) / 40), 2)
46 | template_M_res = np.zeros((tri_num * 3, 3))
47 |
48 | for j in range(tri_num):
49 | template_M_res[j * 3:j * 3 + 3, :] = np.matmul(
50 | np.matmul(templates_M[0, j * 3:j * 3 + 3, :] + templates_M[i + 1, j * 3:j * 3 + 3, :],
51 | np.linalg.inv(templates_M[0, j * 3:j * 3 + 3, :])),
52 | scan_M[0, j * 3:j * 3 + 3, :]) - scan_M[0, j * 3:j * 3 + 3, :]
53 | least_sq_A[num_scan + i, i] = np.sqrt(omega * beta)
54 | least_sq_b[num_scan + i, :] = np.sqrt(omega * beta) * template_M_res.flatten()
55 |
56 | result_M = scipy.linalg.solve(np.matmul(least_sq_A.T, least_sq_A), np.matmul(least_sq_A.T, least_sq_b))
57 | for i in range(num_bs):
58 | bs_M[i] = np.reshape(result_M[i], (-1, 3))
59 |
60 | def compute_res_weight(bs_weight, bs_vertices, scan_vertex, init_weight, gama):
61 | return np.power(np.linalg.norm(bs_vertices.dot(bs_weight) - scan_vertex), 2) + gama * np.power(
62 | np.linalg.norm(bs_weight - init_weight), 2)
63 |
64 |
65 | def optimize_weight(bs_weights, bs_vertices, scan_vertices, init_weights, bounds, gama):
66 | for i in range(num_scan):
67 | init_weight = init_weights[i, :]
68 | bs_weight = init_weight
69 | scan_vertex = scan_vertices[i, :, :].flatten()
70 | result = minimize(compute_res_weight, bs_weight, method='L-BFGS-B', bounds=bounds,
71 | args=(bs_vertices, scan_vertex, init_weight, gama),
72 | options={'ftol': 1e-5, 'maxiter': 1000})
73 | bs_weights[i, :] = result.x
74 |
75 |
76 | def get_fixed_vertices(templates):
77 | fixed_vertices = np.zeros((num_bs, len(templates[0].vertices)), dtype=np.bool)
78 | for i in range(num_bs):
79 | bs_vertices = np.array(templates[i + 1].vertices)
80 | fixed_vertices[i, :] = np.sum(np.abs(bs_vertices), axis=1) == 0
81 | return fixed_vertices
82 |
83 | # rig 52 expressions from 20 expressions
84 | def rig_20to52(id_dir, tplt_dir, params_file, mesh_ver = "1.0"):
85 |
86 | with open(params_file, 'r') as f:
87 | rig_params = json.load(f)
88 |
89 | exp_list = rig_params['exp_list']
90 | bs_weight = np.array(rig_params['bs_weight'])
91 | vert_16to10_dict = rig_params["vert_16to10_dict"]
92 | faces_v16 = rig_params["faces_v16"]
93 |
94 | templates = []
95 | template_neutral = trimesh.load_mesh(tplt_dir + "Neutral.obj",
96 | maintain_order=True, process = False)
97 | templates.append(template_neutral)
98 |
99 | expt = [0, 1, 4, 5]
100 | for i in tqdm.trange(num_bs, desc="[step 1/8] Loading templates"):
101 | template = trimesh.load_mesh(tplt_dir + str(i) + ".obj",
102 | maintain_order=True, process = False)
103 | if i in expt:
104 | template.vertices = (np.array(template.vertices) - np.array(template_neutral.vertices)).tolist()
105 | templates.append(template)
106 |
107 | weight_bounds = [(0, 1)]*num_bs
108 |
109 | templates_M = np.zeros((num_bs + 1, len(template_neutral.faces) * 3, 3))
110 | for i in tqdm.trange(len(templates), desc="[step 2/8] Computing M for templates"):
111 | templates_M[i, :, :] = get_M(templates[i])
112 | if i > 0 and i - 1 not in expt:
113 | templates_M[i, :, :] = templates_M[i, :, :] - templates_M[0, :, :]
114 |
115 | print('[step 3/8] Building align_mesh')
116 | aligner = mesh_aligner(templates, rig_params)
117 |
118 | scans = []
119 | scan_neutral = trimesh.load_mesh(id_dir + "aligned/1_neutral.obj",
120 | maintain_order=True, process = False)
121 | scans.append(scan_neutral)
122 | for i in tqdm.trange(num_scan, desc="[step 4/8] Loading TU models"):
123 | scan = trimesh.load_mesh(id_dir + "aligned/" + exp_list[i + 1] + ".obj",
124 | maintain_order=True, process = False)
125 | scans.append(scan)
126 |
127 | if mesh_ver == "1.0":
128 | scans_v16 = []
129 | scan_neutral = trimesh.Trimesh(vertices = scan_neutral.vertices[vert_16to10_dict],
130 | faces = faces_v16,
131 | maintain_order = True, process = False)
132 | for scan in scans:
133 | scan = trimesh.Trimesh(vertices = scan.vertices[vert_16to10_dict],
134 | faces = faces_v16,
135 | maintain_order = True, process = False)
136 | scans_v16.append(scan)
137 | scans = scans_v16
138 |
139 | scan_M = np.zeros((num_scan + 1, len(scan_neutral.faces) * 3, 3))
140 | for i in tqdm.trange(len(scans), desc="[step 5/8] Computing M for TU models"):
141 | scan_M[i, :, :] = get_M(scans[i])
142 |
143 | print('[step 6/8] Building A')
144 | row = np.zeros(9 * len(scan_neutral.faces))
145 | column = np.zeros(9 * len(scan_neutral.faces))
146 | A_data = np.zeros(9 * len(scan_neutral.faces))
147 | for tri_index, face in enumerate(scan_neutral.faces):
148 |
149 | v1 = scan_neutral.vertices[face[0]]
150 | v2 = scan_neutral.vertices[face[1]]
151 | v3 = scan_neutral.vertices[face[2]]
152 |
153 | V = np.zeros((3, 2))
154 | V[:, 0] = v2 - v1
155 | V[:, 1] = v3 - v1
156 | Q, R = np.linalg.qr(V)
157 | affine = np.matmul(np.linalg.inv(R), Q.T)
158 |
159 | row[tri_index * 9:tri_index * 9 + 3] = tri_index * 3
160 | row[tri_index * 9 + 3:tri_index * 9 + 6] = tri_index * 3 + 1
161 | row[tri_index * 9 + 6:tri_index * 9 + 9] = tri_index * 3 + 2
162 | column[tri_index * 9:tri_index * 9 + 9:3] = face[0]
163 | column[tri_index * 9 + 1:tri_index * 9 + 9:3] = face[1]
164 | column[tri_index * 9 + 2:tri_index * 9 + 9:3] = face[2]
165 | A_data[tri_index * 9] = -affine[0, 0] - affine[1, 0]
166 | A_data[tri_index * 9 + 1: tri_index * 9 + 3] = affine[0:2, 0]
167 | A_data[tri_index * 9 + 3] = -affine[0, 1] - affine[1, 1]
168 | A_data[tri_index * 9 + 4:tri_index * 9 + 6] = affine[0:2, 1]
169 | A_data[tri_index * 9 + 6] = -affine[0, 2] - affine[1, 2]
170 | A_data[tri_index * 9 + 7:tri_index * 9 + 9] = affine[0:2, 2]
171 |
172 | A = coo_matrix((A_data, (row, column)), shape=(3 * len(scan_neutral.faces), len(scan_neutral.vertices))).tocsr()
173 |
174 | scan_neutral_vertices = np.array(scan_neutral.vertices)
175 | scan_vertices = np.zeros((num_scan, len(scan_neutral.vertices), 3))
176 | for i in range(num_scan):
177 | scan_vertices[i, :, :] = np.array(scans[i + 1].vertices) - scan_neutral_vertices
178 |
179 | bs_M = np.zeros((num_bs, len(template_neutral.faces) * 3, 3))
180 |
181 | init_weights = bs_weight.copy()
182 | gama, beta = 20000, 0.5
183 | for loop in tqdm.trange(5, desc="[step 7/8] Optimizing"):
184 | optimize_bs(bs_weight, bs_M, scan_M, templates_M, len(scan_neutral.faces), beta)
185 | bs_vertices = np.zeros((3 * len(scan_neutral.vertices), num_bs))
186 |
187 | if loop != 4:
188 | for i in range(num_bs):
189 | vertices = get_vertices(bs_M[i], scan_M[0], len(scan_neutral.faces), A)
190 | vertices = aligner.align(scan_neutral_vertices, vertices, i)
191 | bs_vertices[:, i] = (vertices - scan_neutral_vertices).flatten()
192 |
193 | optimize_weight(bs_weight, bs_vertices, scan_vertices, init_weights, weight_bounds, gama)
194 |
195 | beta -= 0.1
196 | gama -= 5000
197 |
198 | # save results
199 | os.makedirs(id_dir + 'rigging', exist_ok=True)
200 | scan_neutral.export(id_dir + 'rigging/Neutral.obj');
201 | for i in tqdm.trange(num_bs, desc="[step 8/8] Saving result models"):
202 | vertices = get_vertices(bs_M[i], scan_M[0], len(scan_neutral.faces), A)
203 | vertices = aligner.align(scan_neutral_vertices, vertices, i)
204 | scan_neutral.vertices = vertices.tolist()
205 | scan_neutral.export(id_dir + 'rigging/' + str(i) + '.obj')
206 |
207 | print("Done, results saved to %srigging/" % id_dir)
208 |
209 |
210 | # generate core of bilinear model from rigged meshes
211 | # the shape of verts_arr should be [vert_num, id_num, exp_num]
212 | # exp_dims=0 means PCA is not applied to the expression dimension
213 | def make_bm(verts_arr, id_dims=50, exp_dims=0):
214 | import tensorly
215 | tensorly.set_backend('pytorch')
216 |
217 | verts_tensor = torch.from_numpy(verts_arr)
218 | if exp_dims == 0:
219 | core, factors = partial_tucker(tensor = verts_tensor, modes = [0, 1],
220 | rank=[id_dims])
221 | else:
222 | core, factors = partial_tucker(tensor = vc_tensor, modes = [0, 1],
223 | rank=[id_dims, exp_dims])
224 |
225 | return core, factors
226 |
227 |
--------------------------------------------------------------------------------
/toolkit/src/utility.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2020, Hao Zhu, NJU.
3 | Utility functions.
4 | """
5 |
6 | import numpy as np
7 | import cv2, PIL.Image
8 |
9 | # show image in Jupyter Notebook (work inside loop)
10 | from io import BytesIO
11 | from IPython.display import display, Image
12 | def show_img_arr(arr, bgr_mode = False):
13 | if bgr_mode is True:
14 | arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
15 | im = PIL.Image.fromarray(arr)
16 | bio = BytesIO()
17 | im.save(bio, format='png')
18 | display(Image(bio.getvalue(), format='png'))
19 |
20 | # show depth array in Jupyter Notebook (work inside loop)
21 | def show_depth_arr(depth_map):
22 | depth_max = np.max(depth_map)
23 | depth_min = np.min(depth_map)
24 | depth_map = (depth_map - depth_min)/(depth_max - depth_min)*255
25 | show_img_arr(depth_map.astype(np.uint8))
26 |
27 | # rotate verts along y axis
28 | def rotate_verts_y(verts, y):
29 | verts_mean = np.mean(verts, axis = 0)
30 | verts = verts - verts_mean
31 |
32 | angle = y*np.math.pi/180
33 | R = np.array([[np.cos(angle), 0, np.sin(angle)],
34 | [0, 1, 0],
35 | [-np.sin(angle), 0, np.cos(angle)]])
36 |
37 | verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
38 | return verts
39 |
40 | # rotate verts along x axis
41 | def rotate_verts_x(verts, x):
42 | verts_mean = np.mean(verts, axis = 0)
43 | verts = verts - verts_mean
44 |
45 | angle = x*np.math.pi/180
46 | R = np.array([[1, 0, 0],
47 | [0, np.cos(angle), -np.sin(angle)],
48 | [0, np.sin(angle), np.cos(angle)]])
49 |
50 | verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
51 | return verts
52 |
53 | # rotate verts along z axis
54 | def rotate_verts_z(verts, z):
55 | verts_mean = np.mean(verts, axis = 0)
56 | verts = verts - verts_mean
57 |
58 | angle = z*np.math.pi/180
59 | R = np.array([[np.cos(angle), -np.sin(angle), 0],
60 | [np.sin(angle), np.cos(angle), 0],
61 | [0, 0, 1]])
62 |
63 | verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
64 | return verts
65 |
--------------------------------------------------------------------------------
/toolkit/test_data/chan.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuhao-nju/facescape/19a9fffd68a79c48fec31ee9949e10a8a412abca/toolkit/test_data/chan.jpg
--------------------------------------------------------------------------------
/toolkit/tex_unwrap_test.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from src.facescape_fitter import facescape_fitter
3 | import numpy as np
4 | from src.facescape_bm import facescape_bm
5 | from src.renderer import render_cvcam
6 | import timeit
7 | # import cupy as cp
8 | import csv
9 | import numpy as np, cv2, trimesh
10 | from src.facescape_fitter import facescape_fitter
11 | from src.renderer import render_orthcam
12 | from src.renderer import render_cvcam
13 |
14 |
15 | np.random.seed(1000)
16 |
17 | # Initialize model and fitter
18 | fs_fitter = facescape_fitter(fs_file="./bilinear_model_v1.6/facescape_bm_v1.6_847_50_52_id_front.npz",
19 | kp2d_backend='dlib') # or 'face_alignment'
20 |
21 | # Fit id to image
22 | src_path = "./test_data/chan.jpg"
23 | src_img = cv2.imread(src_path)
24 | kp2d = fs_fitter.detect_kp2d(src_img) # extract 2D key points
25 | mesh, params, mesh_verts_img = fs_fitter.fit_kp2d(kp2d) # fit model
26 | id, _, scale, trans, rot_vector = params
27 |
28 | # Get texture
29 | texture = fs_fitter.get_texture(src_img, mesh_verts_img, mesh)
30 | filename = './demo_output/test_mesh.jpg'
31 | cv2.imwrite(filename, texture)
32 |
33 | # Save base mesh
34 | mesh.export(output_dir='./demo_output', file_name='test_mesh', texture_name='test_mesh.jpg', enable_vc=False, enable_vt=True)
35 |
--------------------------------------------------------------------------------