├── data_scans └── placeholder ├── data_posmaps └── placeholder ├── step3_point_avatar ├── __init__.py ├── lib │ ├── __init__.py │ ├── lbs.py │ ├── losses.py │ ├── fite_model.py │ ├── shader_utils.py │ ├── utils.py │ └── dataset.py ├── posmap_shaders │ ├── f.glsl │ └── v.glsl ├── LICENSE_POP ├── render_posmaps.py ├── train_fite_point_avatar.py └── test_fite_point_avatar.py ├── step1_diffused_skinning ├── __init__.py ├── compile_lbs_surf_grad.sh ├── src_lbs_surf_grad │ ├── DTypes.h │ ├── HEMath.h │ ├── main_lbs_surf_grad.cpp │ ├── HEMath.cpp │ └── HEMesh.h ├── lbs.py └── compute_diffused_skinning.py ├── teaser └── pipeline.png ├── step2_implicit_template ├── lib │ ├── model │ │ ├── metrics.py │ │ ├── broyden.py │ │ ├── helpers.py │ │ ├── sample.py │ │ ├── smpl.py │ │ ├── network.py │ │ └── deformer_diffused_skinning.py │ ├── smpl │ │ ├── utils.py │ │ ├── vertex_ids.py │ │ ├── vertex_joint_selector.py │ │ └── lbs.py │ ├── utils │ │ ├── meshing.py │ │ └── render.py │ ├── dataset │ │ └── fite.py │ └── libmise │ │ └── mise.pyx ├── __init__.py ├── setup.py ├── LICENSE_SNARF ├── train_fite_implicit_template.py └── extract_fite_implicit_template.py ├── configs ├── common.yaml ├── step1.yaml ├── projection_list.yaml ├── resynth_pretrained_subject_list.yaml ├── cape_pretrained_subject_list.yaml ├── step3.yaml └── step2.yaml ├── data_templates └── gender_list.yaml ├── LICENSE ├── .gitignore ├── measure_error.py └── README.md /data_scans/placeholder: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /data_posmaps/placeholder: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /step3_point_avatar/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /step1_diffused_skinning/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /step3_point_avatar/lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /teaser/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jsnln/fite/HEAD/teaser/pipeline.png -------------------------------------------------------------------------------- /step1_diffused_skinning/compile_lbs_surf_grad.sh: -------------------------------------------------------------------------------- 1 | g++ src_lbs_surf_grad/main_lbs_surf_grad.cpp src_lbs_surf_grad/HEMath.cpp src_lbs_surf_grad/HEMeshStruct.cpp -o lbs_surf_grad -O2 -------------------------------------------------------------------------------- /step3_point_avatar/posmap_shaders/f.glsl: -------------------------------------------------------------------------------- 1 | #version 460 core 2 | 3 | in vec3 outColor; 4 | 5 | out vec4 color; 6 | 7 | void main() 8 | { 9 | color = vec4(outColor, 1.0f); 10 | } -------------------------------------------------------------------------------- /step1_diffused_skinning/src_lbs_surf_grad/DTypes.h: -------------------------------------------------------------------------------- 1 | #ifndef __DTYPES_H__ 2 | #define __DTYPES_H__ 3 | 4 | typedef float HEfloat; 5 | typedef int HEint; 6 | const HEfloat HEFLOAT_INF = 1e8f; 7 | 8 | #endif 9 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/model/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def calculate_iou(gt, prediction): 4 | intersection = torch.logical_and(gt, prediction) 5 | union = torch.logical_or(gt, prediction) 6 | return torch.sum(intersection) / torch.sum(union) -------------------------------------------------------------------------------- /configs/common.yaml: -------------------------------------------------------------------------------- 1 | expname: resynth_pretrained 2 | result_folder: results 3 | device: 'cuda' 4 | 5 | smpl_model_path: smpl_models 6 | data_scans_path: data_scans 7 | data_templates_path: data_templates 8 | 9 | num_joints: 24 # doesn't support other values yet 10 | leg_angle: 15.0 11 | n_cano_points: 50000 12 | -------------------------------------------------------------------------------- /configs/step1.yaml: -------------------------------------------------------------------------------- 1 | point_interpolant_exe: PoissonRecon/Bin/Linux/PointInterpolant 2 | skinning_grid_depth: 8 3 | lbs_surf_grad_exe: step1_diffused_skinning/lbs_surf_grad 4 | tmp_folder_constraints: data_tmp_constraints 5 | tmp_folder_skinning_grid: data_tmp_skinning_grid 6 | 7 | ask_before_os_system: true 8 | subject: rp_anna_posed_001 -------------------------------------------------------------------------------- /step3_point_avatar/posmap_shaders/v.glsl: -------------------------------------------------------------------------------- 1 | #version 460 core 2 | 3 | layout (location = 0) in vec3 position; 4 | layout (location = 1) in vec3 color; 5 | 6 | out vec3 outColor; 7 | 8 | uniform mat4 projection; 9 | 10 | void main() 11 | { 12 | gl_Position = projection * vec4(position, 1.0f); 13 | outColor = color; 14 | } -------------------------------------------------------------------------------- /configs/projection_list.yaml: -------------------------------------------------------------------------------- 1 | - dirc: rightfront 2 | y_rot: 45 3 | x_rot: 30 4 | y_shift: 0.25 5 | x_stretch: 1.25 6 | - dirc: rightback 7 | y_rot: 135 8 | x_rot: -30 9 | y_shift: 0.25 10 | x_stretch: 1.25 11 | - dirc: leftback 12 | y_rot: 225 13 | x_rot: 30 14 | y_shift: 0.25 15 | x_stretch: 1.25 16 | - dirc: leftfront 17 | y_rot: 315 18 | x_rot: -30 19 | y_shift: 0.25 20 | x_stretch: 1.25 -------------------------------------------------------------------------------- /step2_implicit_template/__init__.py: -------------------------------------------------------------------------------- 1 | # from .lib.dataset.fite import ReSynthDataSet, ReSynthDataProcessor 2 | # from lib.snarf_model_queried import SNARFModelQueried 3 | # from lib.model.smpl import SMPLServer 4 | # from lib.model.sample import PointOnBones, PointInSpace 5 | # from lib.model.network import ImplicitNetwork 6 | # from lib.model.metrics import calculate_iou 7 | # from lib.utils.meshing import generate_mesh 8 | # from lib.model.helpers import masked_softmax 9 | # from lib.model.deformer_queried import ForwardDeformerQueried, skinning 10 | # from lib.utils.render import render_trimesh, render_joint, weights2colors -------------------------------------------------------------------------------- /configs/resynth_pretrained_subject_list.yaml: -------------------------------------------------------------------------------- 1 | - name: rp_anna_posed_001 2 | gender: female 3 | - name: rp_beatrice_posed_025 4 | gender: female 5 | - name: rp_christine_posed_027 6 | gender: female 7 | - name: rp_celina_posed_005 8 | gender: female 9 | - name: rp_alexandra_posed_006 10 | gender: female 11 | - name: rp_carla_posed_006 12 | gender: female 13 | - name: rp_cindy_posed_005 14 | gender: female 15 | - name: rp_corey_posed_006 16 | gender: male 17 | - name: rp_eric_posed_006 18 | gender: male 19 | - name: rp_carla_posed_004 20 | gender: female 21 | - name: rp_eric_posed_035 22 | gender: male 23 | - name: rp_felice_posed_004 24 | gender: female 25 | -------------------------------------------------------------------------------- /configs/cape_pretrained_subject_list.yaml: -------------------------------------------------------------------------------- 1 | - name: 00096_jerseyshort 2 | gender: male 3 | - name: 00096_longshort 4 | gender: male 5 | - name: 00096_shirtlong 6 | gender: male 7 | - name: 00096_shirtshort 8 | gender: male 9 | - name: 00096_shortlong 10 | gender: male 11 | - name: 00096_shortshort 12 | gender: male 13 | - name: 00215_jerseyshort 14 | gender: male 15 | - name: 00215_longshort 16 | gender: male 17 | - name: 00215_poloshort 18 | gender: male 19 | - name: 00215_shortlong 20 | gender: male 21 | - name: 03375_blazerlong 22 | gender: male 23 | - name: 03375_longlong 24 | gender: male 25 | - name: 03375_shortlong 26 | gender: male 27 | - name: 03375_shortshort 28 | gender: male 29 | -------------------------------------------------------------------------------- /data_templates/gender_list.yaml: -------------------------------------------------------------------------------- 1 | rp_alexandra_posed_006: female 2 | rp_anna_posed_001: female 3 | rp_beatrice_posed_025: female 4 | rp_carla_posed_004: female 5 | rp_carla_posed_006: female 6 | rp_celina_posed_005: female 7 | rp_christine_posed_027: female 8 | rp_cindy_posed_005: female 9 | rp_corey_posed_006: male 10 | rp_eric_posed_006: male 11 | rp_eric_posed_035: male 12 | rp_felice_posed_004: female 13 | 00032_shortlong: male 14 | 00096_jerseyshort: male 15 | 00096_longshort: male 16 | 00096_shirtlong: male 17 | 00096_shirtshort: male 18 | 00096_shortlong: male 19 | 00096_shortshort: male 20 | 00215_jerseyshort: male 21 | 00215_longshort: male 22 | 00215_poloshort: male 23 | 00215_shortlong: male 24 | 03375_blazerlong: male 25 | 03375_longlong: male 26 | 03375_shortlong: male 27 | 03375_shortshort: male 28 | -------------------------------------------------------------------------------- /configs/step3.yaml: -------------------------------------------------------------------------------- 1 | # data 2 | data_posmaps_path: data_posmaps 3 | 4 | # loss func related 5 | w_m2s: 10000.0 6 | w_s2m: 10000.0 7 | w_normal: 1.0 8 | w_rgl: 2000.0 9 | w_latent_rgl: 1.0 10 | 11 | # training / eval related 12 | epochs: 401 13 | train_normals_from_start: false 14 | decay_start: 250 15 | rise_start: 250 16 | batch_size: 4 17 | decay_every: 400 18 | rise_every: 400 19 | 20 | save_ckpt_every: 25 21 | save_pcd_every: 500 22 | lr: 0.0003 23 | lr_geomfeat: 0.0005 24 | predeform: true 25 | save_cano: false 26 | load_epoch: null 27 | eval_use_gt: false 28 | 29 | # architecture related 30 | hsize: 256 31 | nf: 16 32 | c_geom: 64 33 | c_pose: 64 34 | up_mode: upconv 35 | residual_scaling: 0.01 36 | predeform_scaling: 0.005 37 | use_dropout: false 38 | 39 | # data related 40 | data_spacing: 1 41 | 42 | posmap_size: 256 43 | num_workers: 10 44 | 45 | selected_subjects: null 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Siyou Lin, Hongwen Zhang, Zerong Zheng, Ruizhi Shao, Yebin Liu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /step2_implicit_template/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The TensorFlow Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """Set-up script for installing extension modules.""" 15 | from Cython.Build import cythonize 16 | import numpy 17 | from setuptools import Extension 18 | from setuptools import setup 19 | 20 | # Get the numpy include directory. 21 | numpy_include_dir = numpy.get_include() 22 | 23 | # mise (efficient mesh extraction) 24 | mise_module = Extension( 25 | "lib.libmise.mise", 26 | sources=["lib/libmise/mise.pyx"], 27 | ) 28 | 29 | # Gather all extension modules 30 | ext_modules = [ 31 | mise_module, 32 | ] 33 | 34 | setup(ext_modules=cythonize(ext_modules),) -------------------------------------------------------------------------------- /step2_implicit_template/LICENSE_SNARF: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Xu Chen, Yufeng Zheng, Michael J. Black, Otmar Hilliges, Andreas Geiger 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /step1_diffused_skinning/src_lbs_surf_grad/HEMath.h: -------------------------------------------------------------------------------- 1 | #ifndef __HEMATH_H__ 2 | #define __HEMATH_H__ 3 | 4 | #include "DTypes.h" 5 | #include 6 | 7 | namespace HEMath { 8 | class Vec3; 9 | HEfloat inner_prod(const Vec3 & u, const Vec3 & v); 10 | Vec3 cross_prod(const Vec3 & u, const Vec3 & v); 11 | }; 12 | 13 | class HEMath::Vec3 { 14 | HEfloat x = 0.0f; 15 | HEfloat y = 0.0f; 16 | HEfloat z = 0.0f; 17 | public: 18 | Vec3(); 19 | Vec3(const Vec3 & other); 20 | Vec3(HEfloat X, HEfloat Y, HEfloat Z); 21 | Vec3 operator+(const Vec3 & other) const; 22 | Vec3 operator-(const Vec3 & other) const; 23 | Vec3 operator-() const; // unary 24 | Vec3 operator*(HEfloat other) const; // scalar 25 | Vec3 operator=(const Vec3 & other); 26 | Vec3 operator=(const HEfloat & other); 27 | bool operator==(const Vec3 & other) const; 28 | HEfloat operator[](int i) const; 29 | HEfloat & operator[](int i); 30 | 31 | HEfloat norm() const; 32 | Vec3 vertical() const; 33 | Vec3 normalize() const; 34 | void normalize_(); 35 | friend HEfloat inner_prod(const Vec3 & v, const Vec3 & w); 36 | friend Vec3 cross_prod(const Vec3 & v, const Vec3 & w); 37 | 38 | void print(); 39 | }; 40 | 41 | 42 | #endif -------------------------------------------------------------------------------- /step1_diffused_skinning/src_lbs_surf_grad/main_lbs_surf_grad.cpp: -------------------------------------------------------------------------------- 1 | #include "HEMath.h" 2 | #include "HEMesh.h" 3 | #include 4 | 5 | int main(int argc, char* argv[]) { 6 | 7 | if (argc != 4) { 8 | std::cerr << "Please provide three arguments:\n" 9 | << " 1. the path to the canonically posed SMPL template (obj format)\n" 10 | << " 2. the path to the lbs_weights (txt format)\n" 11 | << " 3. the path to output file (txt format)\n"; 12 | return 0; 13 | } 14 | 15 | std::vector verts; 16 | std::vector faces; 17 | std::vector weights; 18 | 19 | bool data_loaded = HEMesh::loadFromOBJ(argv[1], verts, faces) && HEMesh::loadWeights(argv[2], weights); 20 | if (data_loaded) { 21 | 22 | HEMesh::MeshStruct mesh; 23 | mesh.setFromVertsFacesWithAdditionalVertAttr(verts, faces, weights); 24 | mesh.computeVertexNormals(); 25 | for (int jid = 0; jid < N_SMPL_JOINTS; jid++) { 26 | mesh.computeComputeLBSGradients(jid); 27 | } 28 | 29 | bool grads_computed = mesh.exportVertsAndAttrs(argv[3]); 30 | if (grads_computed) { 31 | std::cout << "Outputted to: " << argv[3] << "\n"; 32 | } 33 | // mesh.exportVertsAndAttrs("cano_data_with_lbs_grad.txt"); 34 | } 35 | } -------------------------------------------------------------------------------- /configs/step2.yaml: -------------------------------------------------------------------------------- 1 | 2 | datamodule: 3 | num_workers: 10 4 | subject: 'rp_anna_posed_001' 5 | clothing: '' 6 | 7 | batch_size: 8 8 | processor: 9 | points_per_frame: 5000 10 | sampler: 11 | global_sigma: 1.8 12 | local_sigma: 0.025 13 | 14 | # epoch: last 15 | # resume: false 16 | 17 | trainer: 18 | gradient_clip_val: 0.1 19 | check_val_every_n_epoch: 4 20 | save_vis_every_n_iters: 500 21 | save_ckpt_every_n_iters: 1000 22 | deterministic: true 23 | max_steps: 8001 24 | gpus: 1 25 | model: 26 | # shape MLP 27 | network: 28 | d_in: 3 29 | d_out: 1 30 | depth: 8 31 | width: 256 32 | multires: 0 33 | skip_layer: [4] 34 | cond_layer: [4] 35 | dim_cond_embed: 8 36 | weight_norm: true 37 | geometric_init: true 38 | bias: 1 39 | deformer: 40 | softmax_mode: hierarchical 41 | # LBS MLP 42 | network: 43 | d_in: 3 44 | d_out: 25 45 | depth: 4 46 | width: 128 47 | multires: 0 48 | skip_layer: [] 49 | cond_layer: [] 50 | dim_cond_embed: 0 51 | weight_norm: true 52 | geometric_init: false 53 | bias: 1 54 | optim: 55 | lr: 0.001 56 | soft_blend: 5 57 | pose_conditioning: true 58 | nepochs_pretrain: 0 59 | lambda_bone_occ: 1 60 | lambda_bone_w: 10 61 | 62 | extraction: 63 | resolution_low: 256 64 | resolution_high: 512 65 | save_mesh: true 66 | every_n_frames: 1 67 | fast_mode: true 68 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/smpl/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems and the Max Planck Institute for Biological 14 | # Cybernetics. All rights reserved. 15 | # 16 | # Contact: ps-license@tuebingen.mpg.de 17 | 18 | from __future__ import print_function 19 | from __future__ import absolute_import 20 | from __future__ import division 21 | 22 | import numpy as np 23 | import torch 24 | 25 | 26 | def to_tensor(array, dtype=torch.float32): 27 | if 'torch.tensor' not in str(type(array)): 28 | return torch.tensor(array, dtype=dtype) 29 | 30 | 31 | class Struct(object): 32 | def __init__(self, **kwargs): 33 | for key, val in kwargs.items(): 34 | setattr(self, key, val) 35 | 36 | 37 | def to_np(array, dtype=np.float32): 38 | if 'scipy.sparse' in str(type(array)): 39 | array = array.todense() 40 | return np.array(array, dtype=dtype) 41 | 42 | 43 | def rot_mat_to_euler(rot_mats): 44 | # Calculates rotation matrix to euler angles 45 | # Careful for extreme cases of eular angles like [0.0, pi, 0.0] 46 | 47 | sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] + 48 | rot_mats[:, 1, 0] * rot_mats[:, 1, 0]) 49 | return torch.atan2(-rot_mats[:, 2, 0], sy) 50 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/smpl/vertex_ids.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems and the Max Planck Institute for Biological 14 | # Cybernetics. All rights reserved. 15 | # 16 | # Contact: ps-license@tuebingen.mpg.de 17 | 18 | from __future__ import print_function 19 | from __future__ import absolute_import 20 | from __future__ import division 21 | 22 | # Joint name to vertex mapping. SMPL/SMPL-H/SMPL-X vertices that correspond to 23 | # MSCOCO and OpenPose joints 24 | vertex_ids = { 25 | 'smplh': { 26 | 'nose': 332, 27 | 'reye': 6260, 28 | 'leye': 2800, 29 | 'rear': 4071, 30 | 'lear': 583, 31 | 'rthumb': 6191, 32 | 'rindex': 5782, 33 | 'rmiddle': 5905, 34 | 'rring': 6016, 35 | 'rpinky': 6133, 36 | 'lthumb': 2746, 37 | 'lindex': 2319, 38 | 'lmiddle': 2445, 39 | 'lring': 2556, 40 | 'lpinky': 2673, 41 | 'LBigToe': 3216, 42 | 'LSmallToe': 3226, 43 | 'LHeel': 3387, 44 | 'RBigToe': 6617, 45 | 'RSmallToe': 6624, 46 | 'RHeel': 6787 47 | }, 48 | 'smplx': { 49 | 'nose': 9120, 50 | 'reye': 9929, 51 | 'leye': 9448, 52 | 'rear': 616, 53 | 'lear': 6, 54 | 'rthumb': 8079, 55 | 'rindex': 7669, 56 | 'rmiddle': 7794, 57 | 'rring': 7905, 58 | 'rpinky': 8022, 59 | 'lthumb': 5361, 60 | 'lindex': 4933, 61 | 'lmiddle': 5058, 62 | 'lring': 5169, 63 | 'lpinky': 5286, 64 | 'LBigToe': 5770, 65 | 'LSmallToe': 5780, 66 | 'LHeel': 8846, 67 | 'RBigToe': 8463, 68 | 'RSmallToe': 8474, 69 | 'RHeel': 8635 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /step3_point_avatar/lib/lbs.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn 3 | 4 | from smplx.lbs import batch_rodrigues, batch_rigid_transform 5 | 6 | def lbs(v_shaped, j_shaped, pose, parents, lbs_weights): 7 | """ 8 | This implementation is based on https://github.com/vchoutas/smplx/smplx/lbs.py. 9 | 10 | weights: [bs, n_pts, 24] 11 | body_lbs_weights: [6890, 24], used for similarity weighting between cloth and smpl verts 12 | posedirs: will add pose correctives if not None, shape: [207, 20670] 13 | """ 14 | batch_size = max(v_shaped.shape[0], pose.shape[0]) 15 | device, dtype = v_shaped.device, v_shaped.dtype 16 | 17 | v_posed = v_shaped # [N, 6890, 3] 18 | J = j_shaped # [N, 24, 3] 19 | rot_mats = batch_rodrigues(pose.view(-1, 3)).view([batch_size, -1, 3, 3]) # [N, 24, 3, 3] 20 | J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) 21 | 22 | # do skinning: 23 | W = lbs_weights # [N, 6890, 24] 24 | T = torch.einsum('bvj,bjrc->bvrc', W, A) 25 | 26 | homogen_coord = torch.ones(batch_size, v_posed.shape[1], 1, dtype=dtype, device=device) # [N, 6890, 1], all ones 27 | v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) # [N, 6890, 4] 28 | v_homo = torch.einsum('bvrc,bvc->bvr', T, v_posed_homo) 29 | 30 | verts = v_homo[:, :, :3]# / v_homo[:, :, 3:] 31 | 32 | return {'v_posed': verts, 'j_posed': J_transformed, 'v_tfs': T} 33 | 34 | 35 | def inv_lbs(v_posed, j_unposed, pose, parents, lbs_weights): 36 | """ 37 | This implementation is based on https://github.com/vchoutas/smplx/smplx/lbs.py. 38 | """ 39 | 40 | batch_size = max(v_posed.shape[0], pose.shape[0]) 41 | device, dtype = v_posed.device, v_posed.dtype 42 | 43 | v_posed = v_posed # [N, 6890, 3] 44 | J = j_unposed # [N, 24, 3] 45 | 46 | rot_mats = batch_rodrigues(pose.reshape(-1, 3)).reshape([batch_size, -1, 3, 3]) # [N, 24, 3, 3] 47 | J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) 48 | 49 | ### NOTE Do skinning: 50 | W = lbs_weights # [N, 6890, 24] 51 | T = torch.einsum('bvj,bjrc->bvrc', W, A) 52 | T_inv = torch.linalg.inv(T) 53 | 54 | homogen_coord = torch.ones(batch_size, v_posed.shape[1], 1, dtype=dtype, device=device) # [N, 6890, 1], all ones 55 | v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) # [N, 6890, 4] 56 | v_homo = torch.einsum('bvrc,bvc->bvr', T_inv, v_posed_homo) 57 | 58 | verts = v_homo[:, :, :3] # / v_homo[:, :, 3:] 59 | 60 | return {'v_unposed': verts, 'j_posed': J_transformed, 'v_tfs_inv': T_inv} 61 | -------------------------------------------------------------------------------- /step1_diffused_skinning/lbs.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn 3 | 4 | from smplx.lbs import batch_rodrigues, batch_rigid_transform 5 | 6 | def lbs(v_shaped, j_shaped, pose, parents, lbs_weights): 7 | """ 8 | This implementation is based on https://github.com/vchoutas/smplx/smplx/lbs.py. 9 | 10 | weights: [bs, n_pts, 24] 11 | body_lbs_weights: [6890, 24], used for similarity weighting between cloth and smpl verts 12 | posedirs: will add pose correctives if not None, shape: [207, 20670] 13 | """ 14 | batch_size = max(v_shaped.shape[0], pose.shape[0]) 15 | device, dtype = v_shaped.device, v_shaped.dtype 16 | 17 | v_posed = v_shaped # [N, 6890, 3] 18 | J = j_shaped # [N, 24, 3] 19 | rot_mats = batch_rodrigues(pose.view(-1, 3)).view([batch_size, -1, 3, 3]) # [N, 24, 3, 3] 20 | J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) 21 | 22 | # do skinning: 23 | W = lbs_weights # [N, 6890, 24] 24 | T = torch.einsum('bvj,bjrc->bvrc', W, A) 25 | 26 | homogen_coord = torch.ones(batch_size, v_posed.shape[1], 1, dtype=dtype, device=device) # [N, 6890, 1], all ones 27 | v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) # [N, 6890, 4] 28 | v_homo = torch.einsum('bvrc,bvc->bvr', T, v_posed_homo) 29 | 30 | verts = v_homo[:, :, :3]# / v_homo[:, :, 3:] 31 | 32 | return {'v_posed': verts, 'j_posed': J_transformed, 'v_tfs': T} 33 | 34 | 35 | def inv_lbs(v_posed, j_unposed, pose, parents, lbs_weights): 36 | """ 37 | This implementation is based on https://github.com/vchoutas/smplx/smplx/lbs.py. 38 | """ 39 | 40 | batch_size = max(v_posed.shape[0], pose.shape[0]) 41 | device, dtype = v_posed.device, v_posed.dtype 42 | 43 | v_posed = v_posed # [N, 6890, 3] 44 | J = j_unposed # [N, 24, 3] 45 | 46 | rot_mats = batch_rodrigues(pose.reshape(-1, 3)).reshape([batch_size, -1, 3, 3]) # [N, 24, 3, 3] 47 | J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) 48 | 49 | ### NOTE Do skinning: 50 | W = lbs_weights # [N, 6890, 24] 51 | T = torch.einsum('bvj,bjrc->bvrc', W, A) 52 | T_inv = torch.linalg.inv(T) 53 | 54 | homogen_coord = torch.ones(batch_size, v_posed.shape[1], 1, dtype=dtype, device=device) # [N, 6890, 1], all ones 55 | v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) # [N, 6890, 4] 56 | v_homo = torch.einsum('bvrc,bvc->bvr', T_inv, v_posed_homo) 57 | 58 | verts = v_homo[:, :, :3] # / v_homo[:, :, 3:] 59 | 60 | return {'v_unposed': verts, 'j_posed': J_transformed, 'v_tfs_inv': T_inv} 61 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/utils/meshing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from skimage import measure 4 | from lib.libmise import mise 5 | import trimesh 6 | 7 | ''' Code adapted from NASA https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/projects/nasa/lib/utils.py''' 8 | def generate_mesh(func, verts, level_set=0, res_init=32, res_up=3): 9 | 10 | scale = 1.1 # Scale of the padded bbox regarding the tight one. 11 | 12 | verts = verts.data.cpu().numpy() 13 | gt_bbox = np.stack([verts.min(axis=0), verts.max(axis=0)], axis=0) 14 | gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 15 | gt_scale = (gt_bbox[1] - gt_bbox[0]).max() 16 | 17 | mesh_extractor = mise.MISE(res_init, res_up, level_set) 18 | points = mesh_extractor.query() 19 | 20 | # query occupancy grid 21 | with torch.no_grad(): 22 | while points.shape[0] != 0: 23 | 24 | orig_points = points 25 | points = points.astype(np.float32) 26 | points = (points / mesh_extractor.resolution - 0.5) * scale 27 | points = points * gt_scale + gt_center 28 | points = torch.tensor(points).float().cuda() 29 | 30 | values = func(points.unsqueeze(0))[:,0] 31 | values = values.data.cpu().numpy().astype(np.float64) 32 | 33 | mesh_extractor.update(orig_points, values) 34 | 35 | points = mesh_extractor.query() 36 | 37 | value_grid = mesh_extractor.to_dense() 38 | # value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6) 39 | 40 | # marching cube 41 | # verts, faces, normals, values = measure.marching_cubes_lewiner( 42 | # volume=value_grid, 43 | # gradient_direction='ascent', 44 | # level=min(level_set, value_grid.max())) 45 | verts, faces, normals, values = measure.marching_cubes( 46 | volume=value_grid, 47 | gradient_direction='ascent', 48 | level=min(level_set, value_grid.max()), 49 | method='lewiner') 50 | 51 | verts = (verts / mesh_extractor.resolution - 0.5) * scale 52 | verts = verts * gt_scale + gt_center 53 | 54 | meshexport = trimesh.Trimesh(verts, faces, normals, vertex_colors=values, process=False) 55 | 56 | # remove disconnect part 57 | connected_comp = meshexport.split(only_watertight=False) 58 | max_area = 0 59 | max_comp = None 60 | for comp in connected_comp: 61 | if comp.area > max_area: 62 | max_area = comp.area 63 | max_comp = comp 64 | meshexport = max_comp 65 | 66 | return meshexport -------------------------------------------------------------------------------- /step3_point_avatar/lib/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | 4 | def chamfer_loss_separate(output, target, weight=1e4, phase='train', debug=False): 5 | from chamferdist.chamferdist import ChamferDistance 6 | cdist = ChamferDistance() 7 | model2scan, scan2model, idx1, idx2 = cdist(output, target) 8 | if phase == 'train': 9 | return model2scan, scan2model, idx1, idx2 10 | else: # in test, show both directions, average over points, but keep batch 11 | return torch.mean(model2scan, dim=-1)* weight, torch.mean(scan2model, dim=-1)* weight, 12 | 13 | 14 | def normal_loss(output_normals, target_normals, nearest_idx, weight=1.0, phase='train'): 15 | ''' 16 | Given the set of nearest neighbors found by chamfer distance, calculate the 17 | L1 discrepancy between the predicted and GT normals on each nearest neighbor point pairs. 18 | Note: the input normals are already normalized (length==1). 19 | ''' 20 | nearest_idx = nearest_idx.expand(3, -1, -1).permute([1,2,0]).long() # [batch, N] --> [batch, N, 3], repeat for the last dim 21 | target_normals_chosen = torch.gather(target_normals, dim=1, index=nearest_idx) 22 | 23 | assert output_normals.shape == target_normals_chosen.shape 24 | 25 | if phase == 'train': 26 | lnormal = F.l1_loss(output_normals, target_normals_chosen, reduction='mean') # [batch, 8000, 3]) 27 | return lnormal, target_normals_chosen 28 | else: 29 | lnormal = F.l1_loss(output_normals, target_normals_chosen, reduction='none') 30 | lnormal = lnormal.mean(-1).mean(-1) # avg over all but batch axis 31 | return lnormal, target_normals_chosen 32 | 33 | 34 | def color_loss(output_colors, target_colors, nearest_idx, weight=1.0, phase='train', excl_holes=False): 35 | ''' 36 | Similar to normal loss, used in training a color prediction model. 37 | ''' 38 | nearest_idx = nearest_idx.expand(3, -1, -1).permute([1,2,0]).long() # [batch, N] --> [batch, N, 3], repeat for the last dim 39 | target_colors_chosen = torch.gather(target_colors, dim=1, index=nearest_idx) 40 | 41 | assert output_colors.shape == target_colors_chosen.shape 42 | 43 | if excl_holes: 44 | # scan holes have rgb all=0, exclude these from supervision 45 | colorsum = target_colors_chosen.sum(-1) 46 | mask = (colorsum!=0).float().unsqueeze(-1) 47 | else: 48 | mask = 1. 49 | 50 | if phase == 'train': 51 | lcolor = F.l1_loss(output_colors, target_colors_chosen, reduction='none') # [batch, 8000, 3]) 52 | lcolor = lcolor * mask 53 | lcolor = lcolor.mean() 54 | return lcolor, target_colors_chosen 55 | else: 56 | lcolor = F.l1_loss(output_colors, target_colors_chosen, reduction='none') 57 | lcolor = lcolor * mask 58 | lcolor = lcolor.mean(-1).mean(-1) # avg over all but batch axis 59 | return lcolor, target_colors_chosen -------------------------------------------------------------------------------- /step1_diffused_skinning/src_lbs_surf_grad/HEMath.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "HEMath.h" 6 | #define NORMALIZE_EPS 1e-8f 7 | #define HOMOGENIZE_EPS 1e-8f 8 | 9 | HEMath::Vec3::Vec3(): x(0.0f), y(0.0f), z(0.0f) {} 10 | HEMath::Vec3::Vec3(const HEMath::Vec3 & other): x(other.x), y(other.y), z(other.z) {} 11 | HEMath::Vec3::Vec3(HEfloat X, HEfloat Y, HEfloat Z): x(X), y(Y), z(Z) {} 12 | HEMath::Vec3 HEMath::Vec3::operator+(const HEMath::Vec3 & other) const { return HEMath::Vec3(x+other.x, y+other.y, z+other.z); } 13 | HEMath::Vec3 HEMath::Vec3::operator-(const HEMath::Vec3 & other) const { return HEMath::Vec3(x-other.x, y-other.y, z-other.z); } 14 | HEMath::Vec3 HEMath::Vec3::operator-() const { return HEMath::Vec3(-x, -y, -z); } 15 | HEMath::Vec3 HEMath::Vec3::operator*(HEfloat other) const { return HEMath::Vec3(x * other, y * other, z * other); } // scalar 16 | HEMath::Vec3 HEMath::Vec3::operator=(const HEMath::Vec3 & other) { x = other.x; y = other.y; z = other.z; return *this; } 17 | HEMath::Vec3 HEMath::Vec3::operator=(const HEfloat & other) { x = y = z = other; return *this; } 18 | bool HEMath::Vec3::operator==(const HEMath::Vec3 & other) const { return (x == other.x) && (y == other.y) && (z == other.z); } 19 | HEfloat HEMath::Vec3::operator[](int i) const { 20 | assert((0 <= i) && (i <= 2)); 21 | if (i == 0) return x; 22 | if (i == 1) return y; 23 | if (i == 2) return z; 24 | } 25 | HEfloat & HEMath::Vec3::operator[](int i) { 26 | assert((0 <= i) && (i <= 2)); 27 | if (i == 0) return x; 28 | if (i == 1) return y; 29 | if (i == 2) return z; 30 | } 31 | HEfloat HEMath::Vec3::norm() const { return sqrt(HEMath::inner_prod(*this, *this)); } 32 | HEMath::Vec3 HEMath::Vec3::vertical() const { 33 | Vec3 result = cross_prod(*this, Vec3(0.0f, 0.0f, 1.0f)); 34 | if ( result.norm() < NORMALIZE_EPS ) 35 | return Vec3(1.0f, 0.0f, 0.0f); 36 | return result.normalize(); 37 | } 38 | 39 | HEMath::Vec3 HEMath::Vec3::normalize() const { 40 | HEfloat vec_len = std::sqrt(inner_prod(*this, *this)); 41 | if (vec_len < NORMALIZE_EPS) { 42 | return Vec3(0.0f, 0.0f, 0.0f); 43 | } 44 | return Vec3(x / vec_len, y / vec_len, z / vec_len); 45 | } 46 | 47 | void HEMath::Vec3::normalize_() { 48 | HEfloat vec_len = std::sqrt(inner_prod(*this, *this)); 49 | if (vec_len < NORMALIZE_EPS) { 50 | x = y = z = 0.0f; 51 | } 52 | x /= vec_len; 53 | y /= vec_len; 54 | z /= vec_len; 55 | } 56 | HEfloat HEMath::inner_prod(const HEMath::Vec3 & v, const HEMath::Vec3 & w) { return v.x * w.x + v.y * w.y + v.z * w.z; } // friend function 57 | HEMath::Vec3 HEMath::cross_prod(const HEMath::Vec3 & v, const HEMath::Vec3 & w) { // friend function 58 | return HEMath::Vec3( 59 | v.y * w.z - v.z * w.y, 60 | v.z * w.x - v.x * w.z, 61 | v.x * w.y - v.y * w.x 62 | ); 63 | } 64 | void HEMath::Vec3::print() { printf("Vec4(%f, %f, %f)", x, y, z); } 65 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/smpl/vertex_joint_selector.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems and the Max Planck Institute for Biological 14 | # Cybernetics. All rights reserved. 15 | # 16 | # Contact: ps-license@tuebingen.mpg.de 17 | 18 | from __future__ import absolute_import 19 | from __future__ import print_function 20 | from __future__ import division 21 | 22 | import numpy as np 23 | 24 | import torch 25 | import torch.nn as nn 26 | 27 | from .utils import to_tensor 28 | 29 | 30 | class VertexJointSelector(nn.Module): 31 | 32 | def __init__(self, vertex_ids=None, 33 | use_hands=True, 34 | use_feet_keypoints=True, **kwargs): 35 | super(VertexJointSelector, self).__init__() 36 | 37 | extra_joints_idxs = [] 38 | 39 | face_keyp_idxs = np.array([ 40 | vertex_ids['nose'], 41 | vertex_ids['reye'], 42 | vertex_ids['leye'], 43 | vertex_ids['rear'], 44 | vertex_ids['lear']], dtype=np.int64) 45 | 46 | extra_joints_idxs = np.concatenate([extra_joints_idxs, 47 | face_keyp_idxs]) 48 | 49 | if use_feet_keypoints: 50 | feet_keyp_idxs = np.array([vertex_ids['LBigToe'], 51 | vertex_ids['LSmallToe'], 52 | vertex_ids['LHeel'], 53 | vertex_ids['RBigToe'], 54 | vertex_ids['RSmallToe'], 55 | vertex_ids['RHeel']], dtype=np.int32) 56 | 57 | extra_joints_idxs = np.concatenate( 58 | [extra_joints_idxs, feet_keyp_idxs]) 59 | 60 | if use_hands: 61 | self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky'] 62 | 63 | tips_idxs = [] 64 | for hand_id in ['l', 'r']: 65 | for tip_name in self.tip_names: 66 | tips_idxs.append(vertex_ids[hand_id + tip_name]) 67 | 68 | extra_joints_idxs = np.concatenate( 69 | [extra_joints_idxs, tips_idxs]) 70 | 71 | self.register_buffer('extra_joints_idxs', 72 | to_tensor(extra_joints_idxs, dtype=torch.long)) 73 | 74 | def forward(self, vertices, joints): 75 | extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs) 76 | joints = torch.cat([joints, extra_joints], dim=1) 77 | return joints 78 | -------------------------------------------------------------------------------- /step1_diffused_skinning/src_lbs_surf_grad/HEMesh.h: -------------------------------------------------------------------------------- 1 | #ifndef __HEMESH_H__ 2 | #define __HEMESH_H__ 3 | 4 | #include 5 | #include "DTypes.h" 6 | #include "HEMath.h" 7 | #include 8 | #include 9 | 10 | #define N_SMPL_JOINTS 24 11 | 12 | namespace HEMesh { 13 | class MeshStruct; 14 | struct HEdge; 15 | struct Face; 16 | struct Vertex; 17 | bool loadFromOBJ(const char * obj_fn, std::vector & out_verts, std::vector & out_v_indices); 18 | bool loadWeights(const char * txt_fn, std::vector & out_weights); 19 | bool writeToOBJ(const char * obj_fn, const std::vector & out_verts, const std::vector & out_faces); 20 | bool writeLBSWeightsToTxt(const char * obj_fn); 21 | } 22 | 23 | struct HEMesh::HEdge { 24 | HEint pair; 25 | HEint next; 26 | HEint f; 27 | HEint v; 28 | 29 | HEdge(HEint PAIR, HEint NEXT, HEint F, HEint V): pair(PAIR), next(NEXT), f(F), v(V) {} 30 | }; 31 | 32 | struct HEMesh::Face { 33 | HEint h; 34 | HEMath::Vec3 n; 35 | // other data 36 | Face(HEint H): h(H), n(0.0f, 0.0f, 0.0f) {} 37 | }; 38 | 39 | struct HEMesh::Vertex { 40 | HEint h; // any hedge that points to this vertex 41 | HEMath::Vec3 p; 42 | HEMath::Vec3 n; // normal vector 43 | HEMath::Vec3 tx; // tangent vector 44 | HEMath::Vec3 ty; // tangent vector 45 | HEfloat lbs_weights[N_SMPL_JOINTS] = {}; 46 | HEfloat lbs_weights_grad_tx[N_SMPL_JOINTS] = {}; 47 | HEfloat lbs_weights_grad_ty[N_SMPL_JOINTS] = {}; 48 | // other data 49 | Vertex(HEint H, HEfloat X, HEfloat Y, HEfloat Z): h(H), p(X, Y, Z), n(0.0f, 0.0f, 0.0f) {} 50 | Vertex(HEint H, HEfloat X, HEfloat Y, HEfloat Z, const HEfloat* LBS_WEIGHTS): h(H), p(X, Y, Z), n(0.0f, 0.0f, 0.0f) { 51 | for (int i = 0; i < N_SMPL_JOINTS; i++) 52 | lbs_weights[i] = LBS_WEIGHTS[i]; 53 | } 54 | }; 55 | 56 | class HEMesh::MeshStruct { 57 | protected: 58 | std::vector verts; 59 | std::vector faces; 60 | std::vector hedges; 61 | 62 | public: 63 | // constructors 64 | MeshStruct() {} 65 | MeshStruct(const MeshStruct & mesh); 66 | MeshStruct(const char * obj_fn); 67 | MeshStruct(const std::vector & verts, const std::vector & faces); 68 | 69 | // operators 70 | MeshStruct operator=(const MeshStruct & obj_fn); 71 | 72 | // set 73 | void setFromOBJ(const char * obj_fn); 74 | void setFromVertsFaces(const std::vector & verts, const std::vector & faces); 75 | void setFromVertsFacesWithAdditionalVertAttr(const std::vector & in_verts, const std::vector & in_faces, const std::vector & in_weights); 76 | bool exportVertsAndAttrs(const char * fn) const; 77 | 78 | // get 79 | size_t n_verts() const { return verts.size(); } 80 | size_t n_faces() const { return faces.size(); } 81 | size_t n_hedges() const { return hedges.size(); } 82 | size_t n_edges() const { return hedges.size() / 2; } 83 | size_t n_edges_euler() const { return verts.size() + faces.size() - 2; } 84 | 85 | // computations 86 | void computeVertexNormals(); 87 | void computeFaceNormals(); 88 | void computeComputeLBSGradients(HEint joint_id); 89 | 90 | size_t n_triangles() { return faces.size(); } 91 | // display 92 | void print(); 93 | }; 94 | 95 | 96 | #endif 97 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/model/broyden.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def broyden(g, x_init, J_inv_init, max_steps=50, cvg_thresh=1e-5, dvg_thresh=1, eps=1e-6): 4 | """Find roots of the given function g(x) = 0. 5 | This function is impleneted based on https://github.com/locuslab/deq. 6 | 7 | Tensor shape abbreviation: 8 | N: number of points 9 | D: space dimension 10 | Args: 11 | g (function): the function of which the roots are to be determined. shape: [N, D, 1]->[N, D, 1] 12 | x_init (tensor): initial value of the parameters. shape: [N, D, 1] 13 | J_inv_init (tensor): initial value of the inverse Jacobians. shape: [N, D, D] 14 | 15 | max_steps (int, optional): max number of iterations. Defaults to 50. 16 | cvg_thresh (float, optional): covergence threshold. Defaults to 1e-5. 17 | dvg_thresh (float, optional): divergence threshold. Defaults to 1. 18 | eps (float, optional): a small number added to the denominator to prevent numerical error. Defaults to 1e-6. 19 | 20 | Returns: 21 | result (tensor): root of the given function. shape: [N, D, 1] 22 | diff (tensor): corresponding loss. [N] 23 | valid_ids (tensor): identifiers of converged points. [N] 24 | """ 25 | 26 | # initialization 27 | x = x_init.clone().detach() 28 | J_inv = J_inv_init.clone().detach() 29 | 30 | ids_val = torch.ones(x.shape[0]).bool() 31 | 32 | gx = g(x, mask=ids_val) 33 | update = -J_inv.bmm(gx) 34 | 35 | x_opt = x.clone() 36 | gx_norm_opt = torch.linalg.norm(gx.squeeze(-1), dim=-1) 37 | 38 | delta_gx = torch.zeros_like(gx) 39 | delta_x = torch.zeros_like(x) 40 | 41 | ids_val = torch.ones_like(gx_norm_opt).bool() 42 | 43 | for solvestep in range(max_steps): 44 | # ic(solvestep) 45 | 46 | # update paramter values 47 | delta_x[ids_val] = update 48 | x[ids_val] += delta_x[ids_val] 49 | delta_gx[ids_val] = g(x, mask=ids_val) - gx[ids_val] 50 | gx[ids_val] += delta_gx[ids_val] 51 | 52 | # store values with minial loss 53 | gx_norm = torch.linalg.norm(gx.squeeze(-1), dim=-1) 54 | ids_opt = gx_norm < gx_norm_opt 55 | gx_norm_opt[ids_opt] = gx_norm.clone().detach()[ids_opt] 56 | x_opt[ids_opt] = x.clone().detach()[ids_opt] 57 | 58 | # exclude converged and diverged points from furture iterations 59 | ids_val = (gx_norm_opt > cvg_thresh) & (gx_norm < dvg_thresh) 60 | if ids_val.sum() <= 0: 61 | break 62 | 63 | # compute paramter update for next iter 64 | vT = (delta_x[ids_val]).transpose(-1, -2).bmm(J_inv[ids_val]) 65 | a = delta_x[ids_val] - J_inv[ids_val].bmm(delta_gx[ids_val]) 66 | b = vT.bmm(delta_gx[ids_val]) 67 | b[b >= 0] += eps 68 | b[b < 0] -= eps 69 | u = a / b 70 | 71 | # ic(ids_val) 72 | # ic(delta_x[ids_val].shape, J_inv[ids_val].shape, u.bmm(vT).shape) 73 | # ids_val_expanded = ids_val.reshape(-1,1,1).expand(-1,3,3) 74 | ubmmvT = u.bmm(vT) 75 | # ic(ids_val_expanded.shape, J_inv.shape) 76 | 77 | # J_inv[ids_val_expanded].add_(ubmmvT.reshape(-1).contiguous()) 78 | # J_inv[ids_val].add_(ubmmvT) 79 | J_inv[ids_val] += ubmmvT 80 | 81 | 82 | 83 | 84 | update = -J_inv[ids_val].bmm(gx[ids_val]) 85 | 86 | return {'result': x_opt, 'diff': gx_norm_opt, 'valid_ids': gx_norm_opt < cvg_thresh} 87 | -------------------------------------------------------------------------------- /step3_point_avatar/lib/fite_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from .modules import UnetNoCond5DS, UnetNoCond6DS, UnetNoCond7DS, ShapeDecoder, PreDeformer 6 | 7 | class FITEModel(nn.Module): 8 | def __init__( 9 | self, 10 | projection_list, 11 | input_nc=3, # num channels of the unet input 12 | c_geom=64, # channels of the geometric features 13 | c_pose=64, # channels of the pose features 14 | nf=64, # num filters for the unet 15 | posmap_size=256, # size of UV positional map (pose conditioning), i.e. the input to the pose unet 16 | hsize=256, # hidden layer size of the ShapeDecoder MLP 17 | up_mode='upconv', # upconv or upsample for the upsampling layers in the pose feature UNet 18 | use_dropout=False, # whether use dropout in the pose feature UNet 19 | ): 20 | 21 | super().__init__() 22 | 23 | self.posmap_size = posmap_size 24 | self.projection_list = projection_list 25 | 26 | unets = {32: UnetNoCond5DS, 64: UnetNoCond6DS, 128: UnetNoCond7DS, 256: UnetNoCond7DS} 27 | unet_loaded = unets[self.posmap_size] 28 | 29 | # U-net: for extracting pixel-aligned pose features from the input UV positional maps 30 | n_proj_directions = len(projection_list) 31 | self.poseencode_unets = [] 32 | for proj_id in range(n_proj_directions): 33 | self.poseencode_unets.append(unet_loaded(input_nc, c_pose//n_proj_directions, nf, up_mode=up_mode, use_dropout=use_dropout)) 34 | self.poseencode_unets = nn.ModuleList(self.poseencode_unets) 35 | 36 | self.decoder = ShapeDecoder(in_size=c_pose + c_geom + 3, hsize=hsize, actv_fn='softplus') 37 | self.predeformer = PreDeformer(in_size=c_geom) 38 | 39 | def query_feats(self, projected_pts, pose_featmaps): 40 | query_grid = projected_pts[:, :, None] # [bs, n_pts, 1, 3] 41 | queried_feats = F.grid_sample(pose_featmaps, query_grid)[..., 0] # [bs, n_feat, n_pts] 42 | 43 | # ic(queried_feats_front.shape, queried_feats_back.shape) 44 | return queried_feats 45 | 46 | def forward(self, geom_feats_batch, projected_pts_batch, basepoints_batch, posmaps_batch, posmap_weights_batch=None, return_featmap=False): 47 | 48 | queried_feats_batch = [] 49 | for proj_id in range(len(self.projection_list)): 50 | proj_direction = self.projection_list[proj_id]['dirc'] 51 | # projected_pts_batch = projected_pts_all[proj_direction][subject_id] 52 | pose_featmap = self.poseencode_unets[proj_id](posmaps_batch[proj_direction]) 53 | 54 | # ic(projected_pts_batch[proj_direction].shape, pose_featmap.shape) 55 | queried_feats = self.query_feats(projected_pts_batch[proj_direction], pose_featmap) 56 | if posmap_weights_batch is not None: 57 | queried_feats_batch.append(queried_feats * posmap_weights_batch[proj_direction][:, None]) 58 | else: 59 | queried_feats_batch.append(queried_feats) 60 | 61 | pose_feat_final = torch.cat(queried_feats_batch + [geom_feats_batch, basepoints_batch.permute(0, 2, 1)], 1) 62 | 63 | residuals, normals = self.decoder(pose_feat_final) 64 | 65 | if return_featmap: 66 | return residuals, normals, queried_feats_batch 67 | return residuals, normals 68 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/model/helpers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | import cv2 4 | import numpy as np 5 | 6 | def masked_softmax(vec, mask, dim=-1, mode='softmax', soft_blend=1): 7 | if mode == 'softmax': 8 | 9 | vec = torch.distributions.Bernoulli(logits=vec).probs 10 | 11 | masked_exps = torch.exp(soft_blend*vec) * mask.float() 12 | masked_exps_sum = masked_exps.sum(dim) 13 | 14 | output = torch.zeros_like(vec) 15 | output[masked_exps_sum>0,:] = masked_exps[masked_exps_sum>0,:]/ masked_exps_sum[masked_exps_sum>0].unsqueeze(-1) 16 | 17 | output = (output * vec).sum(dim, keepdim=True) 18 | 19 | output = torch.distributions.Bernoulli(probs=output).logits 20 | 21 | elif mode == 'max': 22 | vec[~mask] = -math.inf 23 | output = torch.max(vec, dim, keepdim=True)[0] 24 | 25 | return output 26 | 27 | 28 | ''' Hierarchical softmax following the kinematic tree of the human body. Imporves convergence speed''' 29 | def hierarchical_softmax(x): 30 | def softmax(x): 31 | return torch.nn.functional.softmax(x, dim=-1) 32 | 33 | def sigmoid(x): 34 | return torch.sigmoid(x) 35 | 36 | n_batch, n_point, n_dim = x.shape 37 | x = x.flatten(0,1) 38 | 39 | prob_all = torch.ones(n_batch * n_point, 24, device=x.device) 40 | 41 | prob_all[:, [1, 2, 3]] = prob_all[:, [0]] * sigmoid(x[:, [0]]) * softmax(x[:, [1, 2, 3]]) 42 | prob_all[:, [0]] = prob_all[:, [0]] * (1 - sigmoid(x[:, [0]])) 43 | 44 | prob_all[:, [4, 5, 6]] = prob_all[:, [1, 2, 3]] * (sigmoid(x[:, [4, 5, 6]])) 45 | prob_all[:, [1, 2, 3]] = prob_all[:, [1, 2, 3]] * (1 - sigmoid(x[:, [4, 5, 6]])) 46 | 47 | prob_all[:, [7, 8, 9]] = prob_all[:, [4, 5, 6]] * (sigmoid(x[:, [7, 8, 9]])) 48 | prob_all[:, [4, 5, 6]] = prob_all[:, [4, 5, 6]] * (1 - sigmoid(x[:, [7, 8, 9]])) 49 | 50 | prob_all[:, [10, 11]] = prob_all[:, [7, 8]] * (sigmoid(x[:, [10, 11]])) 51 | prob_all[:, [7, 8]] = prob_all[:, [7, 8]] * (1 - sigmoid(x[:, [10, 11]])) 52 | 53 | prob_all[:, [12, 13, 14]] = prob_all[:, [9]] * sigmoid(x[:, [24]]) * softmax(x[:, [12, 13, 14]]) 54 | prob_all[:, [9]] = prob_all[:, [9]] * (1 - sigmoid(x[:, [24]])) 55 | 56 | prob_all[:, [15]] = prob_all[:, [12]] * (sigmoid(x[:, [15]])) 57 | prob_all[:, [12]] = prob_all[:, [12]] * (1 - sigmoid(x[:, [15]])) 58 | 59 | prob_all[:, [16, 17]] = prob_all[:, [13, 14]] * (sigmoid(x[:, [16, 17]])) 60 | prob_all[:, [13, 14]] = prob_all[:, [13, 14]] * (1 - sigmoid(x[:, [16, 17]])) 61 | 62 | prob_all[:, [18, 19]] = prob_all[:, [16, 17]] * (sigmoid(x[:, [18, 19]])) 63 | prob_all[:, [16, 17]] = prob_all[:, [16, 17]] * (1 - sigmoid(x[:, [18, 19]])) 64 | 65 | prob_all[:, [20, 21]] = prob_all[:, [18, 19]] * (sigmoid(x[:, [20, 21]])) 66 | prob_all[:, [18, 19]] = prob_all[:, [18, 19]] * (1 - sigmoid(x[:, [20, 21]])) 67 | 68 | prob_all[:, [22, 23]] = prob_all[:, [20, 21]] * (sigmoid(x[:, [22, 23]])) 69 | prob_all[:, [20, 21]] = prob_all[:, [20, 21]] * (1 - sigmoid(x[:, [22, 23]])) 70 | 71 | prob_all = prob_all.reshape(n_batch, n_point, prob_all.shape[-1]) 72 | return prob_all 73 | 74 | def rectify_pose(pose, root_abs): 75 | """ 76 | Rectify AMASS pose in global coord adapted from https://github.com/akanazawa/hmr/issues/50. 77 | 78 | Args: 79 | pose (72,): Pose. 80 | 81 | Returns: 82 | Rotated pose. 83 | """ 84 | pose = pose.copy() 85 | R_abs = cv2.Rodrigues(root_abs)[0] 86 | R_root = cv2.Rodrigues(pose[:3])[0] 87 | new_root = np.linalg.inv(R_abs).dot(R_root) 88 | pose[:3] = cv2.Rodrigues(new_root)[0].reshape(3) 89 | return pose -------------------------------------------------------------------------------- /step2_implicit_template/lib/dataset/fite.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os.path import join 3 | import glob 4 | import yaml 5 | import trimesh 6 | 7 | import numpy as np 8 | import torch 9 | from torch.utils.data import Dataset 10 | 11 | import kaolin 12 | from ...lib.model.smpl import SMPLServer 13 | from ...lib.model.sample import PointInSpace 14 | 15 | class FITEDataSet(Dataset): 16 | 17 | def __init__(self, dataset_path, data_templ_path, opt, subject, clothing, split): 18 | 19 | 20 | self.regstr_list = glob.glob(join(dataset_path, subject, split, clothing+'*.npz'), recursive=True) 21 | 22 | with open(join(data_templ_path, 'gender_list.yaml') ,'r') as f: 23 | self.gender = yaml.safe_load(f)[subject] 24 | 25 | minimal_body_path = os.path.join(data_templ_path, subject, f'{subject}_minimal_tpose.ply') 26 | self.v_template = np.array(trimesh.load(minimal_body_path, process=False).vertices) 27 | self.meta_info = {'v_template': self.v_template, 'gender': self.gender} 28 | 29 | self.max_verts = 40000 30 | self.points_per_frame = opt['processor']['points_per_frame'] 31 | 32 | self.sampler = PointInSpace(**opt['processor']['sampler']) 33 | 34 | 35 | def __getitem__(self, index): 36 | 37 | data = {} 38 | 39 | while True: 40 | try: 41 | regstr = np.load(self.regstr_list[index]) 42 | poses = regstr['pose'] 43 | break 44 | except: 45 | index = np.random.randint(self.__len__()) 46 | print('corrupted npz') 47 | 48 | verts = regstr['scan_v'] - regstr['transl'][None,:] 49 | verts = torch.tensor(verts).float() 50 | 51 | faces = torch.tensor(regstr['scan_f']).long() 52 | 53 | ### NOTE remove foot poses 54 | if 'felice' in self.regstr_list[index]: 55 | poses[21:27] = 0 # foot 56 | poses[30:36] = 0 # toes 57 | 58 | smpl_params = torch.zeros([86]).float() 59 | smpl_params[0] = 1 60 | smpl_params[4:76] = torch.tensor(poses).float() 61 | 62 | # data['scan_verts'] = verts 63 | data['smpl_params'] = smpl_params 64 | data['smpl_thetas'] = smpl_params[4:76] 65 | data['smpl_betas'] = smpl_params[76:] 66 | 67 | num_verts, num_dim = verts.shape 68 | random_idx = torch.randint(0, num_verts, [self.points_per_frame, 1], device=verts.device) 69 | random_pts = torch.gather(verts, 0, random_idx.expand(-1, num_dim)) 70 | data['pts_d'] = self.sampler.get_points(random_pts[None])[0] 71 | data['occ_gt'] = kaolin.ops.mesh.check_sign(verts[None], faces, data['pts_d'][None]).float()[0].unsqueeze(-1) 72 | 73 | return data 74 | 75 | def __len__(self): 76 | return len(self.regstr_list) 77 | 78 | ''' Used to generate groud-truth occupancy and bone transformations in batchs during training ''' 79 | class FITEDataProcessor(): 80 | 81 | def __init__(self, opt, smpl_model_path, meta_info, device, **kwargs): 82 | 83 | self.opt = opt 84 | self.gender = meta_info['gender'] 85 | self.v_template = meta_info['v_template'] 86 | 87 | self.smpl_server = SMPLServer(smpl_model_path=smpl_model_path, gender=self.gender, v_template=self.v_template) 88 | self.smpl_faces = torch.tensor(self.smpl_server.smpl.faces.astype('int')).unsqueeze(0).to(device) 89 | 90 | self.sampler = PointInSpace(**opt['sampler']) 91 | 92 | def process(self, data): 93 | 94 | smpl_output = self.smpl_server(data['smpl_params'], absolute=True) 95 | data.update(smpl_output) 96 | return data 97 | -------------------------------------------------------------------------------- /step3_point_avatar/lib/shader_utils.py: -------------------------------------------------------------------------------- 1 | # Mario Rosasco, 2016 2 | # adapted from framework.cpp, Copyright (C) 2010-2012 by Jason L. McKesson 3 | # This file is licensed under the MIT License. 4 | # 5 | # NB: Unlike in the framework.cpp organization, the main loop is contained 6 | # in the tutorial files, not in this framework file. Additionally, a copy of 7 | # this module file must exist in the same directory as the tutorial files 8 | # to be imported properly. 9 | 10 | import os 11 | 12 | from OpenGL.GL import * 13 | 14 | 15 | # Function that creates and compiles shaders according to the given type (a GL enum value) and 16 | # shader program (a file containing a GLSL program). 17 | def loadShader(shaderType, shaderFile): 18 | # check if file exists, get full path name 19 | strFilename = findFileOrThrow(shaderFile) 20 | shaderData = None 21 | 22 | print(f"Found shader filename = {strFilename}") 23 | 24 | with open(strFilename, 'r') as f: 25 | shaderData = f.read() 26 | 27 | shader = glCreateShader(shaderType) 28 | glShaderSource(shader, shaderData) # note that this is a simpler function call than in C 29 | 30 | # This shader compilation is more explicit than the one used in 31 | # framework.cpp, which relies on a glutil wrapper function. 32 | # This is made explicit here mainly to decrease dependence on pyOpenGL 33 | # utilities and wrappers, which docs caution may change in future versions. 34 | glCompileShader(shader) 35 | 36 | status = glGetShaderiv(shader, GL_COMPILE_STATUS) 37 | if status == GL_FALSE: 38 | # Note that getting the error log is much simpler in Python than in C/C++ 39 | # and does not require explicit handling of the string buffer 40 | strInfoLog = glGetShaderInfoLog(shader) 41 | strShaderType = "" 42 | if shaderType is GL_VERTEX_SHADER: 43 | strShaderType = "vertex" 44 | elif shaderType is GL_GEOMETRY_SHADER: 45 | strShaderType = "geometry" 46 | elif shaderType is GL_FRAGMENT_SHADER: 47 | strShaderType = "fragment" 48 | 49 | print("Compilation failure for " + strShaderType + " shader:\n" + str(strInfoLog)) 50 | 51 | return shader 52 | 53 | 54 | # Function that accepts a list of shaders, compiles them, and returns a handle to the compiled program 55 | def createProgram(shaderList): 56 | program = glCreateProgram() 57 | 58 | for shader in shaderList: 59 | glAttachShader(program, shader) 60 | 61 | glLinkProgram(program) 62 | 63 | status = glGetProgramiv(program, GL_LINK_STATUS) 64 | if status == GL_FALSE: 65 | # Note that getting the error log is much simpler in Python than in C/C++ 66 | # and does not require explicit handling of the string buffer 67 | strInfoLog = glGetProgramInfoLog(program) 68 | print("Linker failure: \n" + str(strInfoLog)) 69 | 70 | for shader in shaderList: 71 | glDetachShader(program, shader) 72 | 73 | return program 74 | 75 | 76 | # Helper function to locate and open the target file (passed in as a string). 77 | # Returns the full path to the file as a string. 78 | def findFileOrThrow(strBasename): 79 | # Keep constant names in C-style convention, for readability 80 | # when comparing to C(/C++) code. 81 | if os.path.isfile(strBasename): 82 | return strBasename 83 | 84 | LOCAL_FILE_DIR = "data" + os.sep 85 | GLOBAL_FILE_DIR = os.path.dirname(os.path.abspath(__file__)) + os.sep + "data" + os.sep 86 | 87 | strFilename = LOCAL_FILE_DIR + strBasename 88 | if os.path.isfile(strFilename): 89 | return strFilename 90 | 91 | strFilename = GLOBAL_FILE_DIR + strBasename 92 | if os.path.isfile(strFilename): 93 | return strFilename 94 | 95 | raise IOError('Could not find target file ' + strBasename) 96 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/model/sample.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class PointOnBones: 5 | def __init__(self, bone_ids): 6 | self.bone_ids = bone_ids 7 | 8 | def get_points(self, joints, num_per_bone=5): 9 | """Sample points on bones in canonical space. 10 | 11 | Args: 12 | joints (tensor): joint positions to define the bone positions. shape: [B, J, D] 13 | num_per_bone (int, optional): number of sample points on each bone. Defaults to 5. 14 | 15 | Returns: 16 | samples (tensor): sampled points in canoncial space. shape: [B, ?, 3] 17 | probs (tensor): ground truth occupancy for samples (all 1). shape: [B, ?] 18 | """ 19 | 20 | num_batch, _, _ = joints.shape 21 | 22 | samples = [] 23 | 24 | for bone_id in self.bone_ids: 25 | 26 | if bone_id[0] < 0 or bone_id[1] < 0: 27 | continue 28 | 29 | bone_dir = joints[:, bone_id[1]] - joints[:, bone_id[0]] 30 | 31 | scalars = ( 32 | torch.linspace(0, 1, steps=num_per_bone, device=joints.device) 33 | .unsqueeze(0) 34 | .expand(num_batch, -1) 35 | ) 36 | scalars = ( 37 | scalars + torch.randn((num_batch, num_per_bone), device=joints.device) * 0.1 38 | ).clamp_(0, 1) 39 | 40 | samples.append( 41 | joints[:, bone_id[0]].unsqueeze(1).expand(-1, scalars.shape[-1], -1) 42 | + torch.einsum("bn,bi->bni", scalars, bone_dir) # b: num_batch, n: num_per_bone, i: 3-dim 43 | ) 44 | 45 | samples = torch.cat(samples, dim=1) 46 | 47 | probs = torch.ones((num_batch, samples.shape[1]), device=joints.device) 48 | 49 | return samples, probs 50 | 51 | def get_joints(self, joints): 52 | """Sample joints in canonical space. 53 | 54 | Args: 55 | joints (tensor): joint positions to define the bone positions. shape: [B, J, D] 56 | 57 | Returns: 58 | samples (tensor): sampled points in canoncial space. shape: [B, ?, 3] 59 | weights (tensor): ground truth skinning weights for samples (all 1). shape: [B, ?, J] 60 | """ 61 | num_batch, num_joints, _ = joints.shape 62 | 63 | samples = [] 64 | weights = [] 65 | 66 | for k in range(num_joints): 67 | samples.append(joints[:, k]) 68 | weight = torch.zeros((num_batch, num_joints), device=joints.device) 69 | weight[:, k] = 1 70 | weights.append(weight) 71 | 72 | for bone_id in self.bone_ids: 73 | 74 | if bone_id[0] < 0 or bone_id[1] < 0: 75 | continue 76 | 77 | samples.append(joints[:, bone_id[1]]) 78 | 79 | weight = torch.zeros((num_batch, num_joints), device=joints.device) 80 | weight[:, bone_id[0]] = 1 81 | weights.append(weight) 82 | 83 | samples = torch.stack(samples, dim=1) 84 | weights = torch.stack(weights, dim=1) 85 | 86 | return samples, weights 87 | 88 | 89 | class PointInSpace: 90 | def __init__(self, global_sigma=1.8, local_sigma=0.01, **kwargs): 91 | self.global_sigma = global_sigma 92 | self.local_sigma = local_sigma 93 | 94 | def get_points(self, pc_input): 95 | """Sample one point near each of the given point + 1/8 uniformly. 96 | 97 | Args: 98 | pc_input (tensor): sampling centers. shape: [B, N, D] 99 | 100 | Returns: 101 | samples (tensor): sampled points. shape: [B, N + N / 8, D] 102 | """ 103 | 104 | batch_size, sample_size, dim = pc_input.shape 105 | 106 | sample_local = pc_input + (torch.randn_like(pc_input) * self.local_sigma) 107 | 108 | sample_global = ( 109 | torch.rand(batch_size, sample_size // 8, dim, device=pc_input.device) 110 | * (self.global_sigma * 2) 111 | ) - self.global_sigma 112 | 113 | sample = torch.cat([sample_local, sample_global], dim=1) 114 | 115 | return sample 116 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/model/smpl.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import trimesh 4 | from ...lib.smpl.body_models import SMPL 5 | from os.path import join 6 | 7 | class SMPLServer(torch.nn.Module): 8 | 9 | def __init__(self, smpl_model_path, gender='neutral', betas=None, v_template=None, device='cuda'): 10 | super().__init__() 11 | 12 | 13 | self.smpl = SMPL(model_path=join(smpl_model_path, 'smpl'), 14 | gender=gender, 15 | batch_size=1, 16 | use_hands=False, 17 | use_feet_keypoints=False, 18 | dtype=torch.float32).to(device) 19 | 20 | self.bone_parents = self.smpl.bone_parents.astype(int) 21 | self.bone_parents[0] = -1 22 | self.bone_ids = [] 23 | for i in range(24): self.bone_ids.append([self.bone_parents[i], i]) 24 | 25 | if v_template is not None: 26 | self.v_template = torch.tensor(v_template).float().to(device) 27 | else: 28 | self.v_template = None 29 | 30 | if betas is not None: 31 | self.betas = torch.tensor(betas).float().to(device) 32 | else: 33 | self.betas = None 34 | 35 | # define the canonical pose 36 | param_canonical = torch.zeros((1, 86),dtype=torch.float32).to(device) 37 | param_canonical[0, 0] = 1 38 | param_canonical[0, 9] = 15/180*np.pi#np.pi / 6 39 | param_canonical[0, 12] = -15/180*np.pi#np.pi / 6 40 | if self.betas is not None and self.v_template is None: 41 | param_canonical[0,-10:] = self.betas 42 | self.param_canonical = param_canonical 43 | 44 | output = self.forward(param_canonical, absolute=True) 45 | 46 | ### NOTE add normals 47 | smpl_mesh = trimesh.Trimesh(output['smpl_verts'][0].cpu().numpy(), self.smpl.faces, process=False) 48 | 49 | self.vnormals_c = torch.from_numpy(smpl_mesh.vertex_normals.copy()).float()[None].to(device) 50 | self.faces_c = torch.from_numpy(self.smpl.faces.astype(np.int32)).long().to(device) 51 | self.verts_c = output['smpl_verts'] 52 | self.joints_c = output['smpl_jnts'] 53 | self.tfs_c_inv = output['smpl_tfs'].squeeze(0).inverse() 54 | 55 | 56 | def forward(self, smpl_params, absolute=False): 57 | """return SMPL output from params 58 | 59 | Args: 60 | smpl_params : smpl parameters. shape: [B, 86]. [0-scale,1:4-trans, 4:76-thetas,76:86-betas] 61 | absolute (bool): if true return smpl_tfs wrt thetas=0. else wrt thetas=thetas_canonical. 62 | 63 | Returns: 64 | smpl_verts: vertices. shape: [B, 6893. 3] 65 | smpl_tfs: bone transformations. shape: [B, 24, 4, 4] 66 | smpl_jnts: joint positions. shape: [B, 25, 3] 67 | """ 68 | 69 | output = {} 70 | 71 | scale, transl, thetas, betas = torch.split(smpl_params, [1, 3, 72, 10], dim=1) 72 | 73 | # ignore betas if v_template is provided 74 | if self.v_template is not None: 75 | betas = torch.zeros_like(betas) 76 | 77 | smpl_output = self.smpl.forward(betas=betas, 78 | transl=torch.zeros_like(transl), 79 | body_pose=thetas[:, 3:], 80 | global_orient=thetas[:, :3], 81 | return_verts=True, 82 | return_full_pose=True, 83 | v_template=self.v_template) 84 | 85 | verts = smpl_output.vertices.clone() 86 | output['smpl_verts'] = verts * scale.unsqueeze(1) + transl.unsqueeze(1) 87 | 88 | joints = smpl_output.joints.clone() 89 | output['smpl_jnts'] = joints * scale.unsqueeze(1) + transl.unsqueeze(1) 90 | 91 | tf_mats = smpl_output.T.clone() 92 | tf_mats[:, :, :3, :] *= scale.unsqueeze(1).unsqueeze(1) 93 | tf_mats[:, :, :3, 3] += transl.unsqueeze(1) 94 | 95 | if not absolute: 96 | tf_mats = torch.einsum('bnij,njk->bnik', tf_mats, self.tfs_c_inv) 97 | 98 | output['smpl_tfs'] = tf_mats 99 | 100 | return output 101 | -------------------------------------------------------------------------------- /step2_implicit_template/train_fite_implicit_template.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | from tqdm import tqdm 3 | import cv2 4 | import torch 5 | from torch.nn.utils import clip_grad_norm_ 6 | from torch.utils.data import DataLoader 7 | 8 | import os 9 | from os.path import join 10 | 11 | from .lib.dataset.fite import FITEDataSet, FITEDataProcessor 12 | from .lib.snarf_model_diffused_skinning import SNARFModelDiffusedSkinning 13 | 14 | if __name__ == '__main__': 15 | 16 | opt = {} 17 | with open(join('configs', 'common.yaml'), 'r') as common_opt_f: 18 | common_opt = yaml.safe_load(common_opt_f) 19 | opt.update(common_opt) 20 | with open(join('configs', f'step2.yaml'), 'r') as step_opt_f: 21 | step_opt = yaml.safe_load(step_opt_f) 22 | opt.update(step_opt) 23 | 24 | exp_folder = join(opt['result_folder'], opt['expname'], 'step2-results') 25 | checkpoint_folder = join(opt['result_folder'], opt['expname'], 'step2-checkpoints') 26 | 27 | if not os.path.exists(exp_folder): 28 | os.makedirs(exp_folder) 29 | if not os.path.exists(checkpoint_folder): 30 | os.makedirs(checkpoint_folder) 31 | 32 | ### NOTE dataset 33 | dataset_train = FITEDataSet(dataset_path=opt['data_scans_path'], 34 | data_templ_path=opt['data_templates_path'], 35 | opt=opt['datamodule'], 36 | subject=opt['datamodule']['subject'], 37 | clothing=opt['datamodule']['clothing'], 38 | split='train') 39 | 40 | dataloader = DataLoader(dataset_train, 41 | batch_size=opt['datamodule']['batch_size'], 42 | num_workers=opt['datamodule']['num_workers'], 43 | shuffle=True, 44 | drop_last=True, 45 | pin_memory=True) 46 | 47 | ### NOTE data processor 48 | data_processor = FITEDataProcessor(opt['datamodule']['processor'], 49 | smpl_model_path=opt['smpl_model_path'], 50 | meta_info=dataset_train.meta_info, 51 | device=opt['device']) 52 | 53 | 54 | model = SNARFModelDiffusedSkinning(opt['model']['soft_blend'], 55 | opt['smpl_model_path'], 56 | opt['model']['pose_conditioning'], 57 | opt['model']['network'], 58 | subject=opt['datamodule']['subject'], 59 | cpose_smpl_mesh_path=join(opt['data_templates_path'], opt['datamodule']['subject'], opt['datamodule']['subject'] + '_minimal_cpose.ply'), 60 | cpose_weight_grid_path=join(opt['data_templates_path'], opt['datamodule']['subject'], opt['datamodule']['subject'] + '_cano_lbs_weights_grid_float32.npy'), 61 | meta_info=dataset_train.meta_info, 62 | device=opt['device'], 63 | data_processor=data_processor).to(opt['device']) 64 | 65 | 66 | optimizer = torch.optim.Adam(model.parameters(), lr=opt['model']['optim']['lr']) 67 | 68 | loader = dataloader 69 | max_steps = opt['trainer']['max_steps'] 70 | 71 | total_steps = 0 72 | total_epochs = 0 73 | while total_steps < max_steps: 74 | tloader = tqdm(dataloader) 75 | for batch in tloader: 76 | for data_key in batch: 77 | batch[data_key] = batch[data_key].to(opt['device']) 78 | loss = model.training_step(batch, 0) 79 | 80 | optimizer.zero_grad() 81 | loss.backward() 82 | clip_grad_norm_(model.parameters(), max_norm=opt['trainer']['gradient_clip_val']) 83 | optimizer.step() 84 | 85 | tloader.set_description(f'[Epoch: {total_epochs:03d}; Step: {total_steps:05d}/{max_steps:05d}] loss_bce = {loss.item():.4e}') 86 | 87 | if total_steps % opt['trainer']['save_vis_every_n_iters'] == 0: 88 | with torch.no_grad(): 89 | img_all = model.validation_step(batch, 0)['img_all'] 90 | cv2.imwrite(join(exp_folder, f'vis-step-{total_steps:05d}.png'), img_all[..., :3][..., ::-1]) 91 | 92 | if total_steps % opt['trainer']['save_ckpt_every_n_iters'] == 0: 93 | torch.save(model.state_dict(), join(checkpoint_folder, 'checkpoint-latest.pt')) 94 | torch.save(model.state_dict(), join(checkpoint_folder, f'checkpoint-{total_steps:05d}.pt')) 95 | 96 | total_steps += 1 97 | total_epochs += 1 98 | -------------------------------------------------------------------------------- /step1_diffused_skinning/compute_diffused_skinning.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os.path import join 3 | import math 4 | import numpy as np 5 | import trimesh 6 | import torch 7 | import yaml 8 | from glob import glob 9 | 10 | import array 11 | import tqdm 12 | 13 | import smplx 14 | from smplx.lbs import vertices2joints 15 | from .lbs import lbs 16 | 17 | if __name__ == '__main__': 18 | 19 | ### NOTE useful options 20 | opt = {} 21 | with open(join('configs', 'common.yaml'), 'r') as common_opt_f: 22 | common_opt = yaml.safe_load(common_opt_f) 23 | opt.update(common_opt) 24 | with open(join('configs', f'step1.yaml'), 'r') as step_opt_f: 25 | step_opt = yaml.safe_load(step_opt_f) 26 | opt.update(step_opt) 27 | 28 | data_templates_path = opt['data_templates_path'] 29 | subject = opt['subject'] 30 | smpl_model_path = opt['smpl_model_path'] 31 | num_joints = opt['num_joints'] 32 | leg_angle = opt['leg_angle'] 33 | point_interpolant_exe = opt['point_interpolant_exe'] 34 | skinning_grid_depth = opt['skinning_grid_depth'] 35 | lbs_surf_grad_exe = opt['lbs_surf_grad_exe'] 36 | ask_before_os_system = bool(opt['ask_before_os_system']) 37 | 38 | tmp_folder_constraints = opt['tmp_folder_constraints'] 39 | tmp_folder_skinning_grid = opt['tmp_folder_skinning_grid'] 40 | 41 | if not os.path.exists(tmp_folder_constraints): 42 | os.makedirs(tmp_folder_constraints) 43 | if not os.path.exists(tmp_folder_skinning_grid): 44 | os.makedirs(tmp_folder_skinning_grid) 45 | 46 | 47 | 48 | # ### NOTE get a canonical-pose SMPL template 49 | smpl_tpose_mesh_path = join(data_templates_path, subject, f'{subject}_minimal_tpose.ply') 50 | with open(join(data_templates_path, 'gender_list.yaml') ,'r') as f: 51 | gender = yaml.safe_load(f)[subject] 52 | 53 | cpose_param = torch.zeros(1, 72) 54 | cpose_param[:, 5] = leg_angle / 180 * math.pi 55 | cpose_param[:, 8] = -leg_angle / 180 * math.pi 56 | 57 | tpose_mesh = trimesh.load(smpl_tpose_mesh_path, process=False) 58 | smpl_model = smplx.create(smpl_model_path, model_type='smpl', gender=gender) 59 | 60 | tpose_verts = torch.from_numpy(tpose_mesh.vertices).float()[None] 61 | tpose_joints = vertices2joints(smpl_model.J_regressor, tpose_verts) 62 | 63 | out = lbs(tpose_verts, tpose_joints, cpose_param, smpl_model.parents, smpl_model.lbs_weights[None]) 64 | cpose_verts = out['v_posed'][0].cpu().numpy() 65 | 66 | # np.savetxt('cano_data_grad_constraints.xyz', out['v_posed'][0], fmt="%.8f") 67 | cpose_mesh = trimesh.Trimesh(cpose_verts, smpl_model.faces, process=False) 68 | cpose_mesh.export(join(data_templates_path, subject, f'{subject}_minimal_cpose.obj')) 69 | cpose_mesh.export(join(data_templates_path, subject, f'{subject}_minimal_cpose.ply')) 70 | np.savetxt(join(data_templates_path, subject, f'{subject}_lbs_weights.txt'), smpl_model.lbs_weights.numpy(), fmt="%.8f") 71 | 72 | ### NOTE compute the along-surface gradients of skinning 73 | cmd = f'{lbs_surf_grad_exe} ' + \ 74 | f'{join(data_templates_path, subject, subject + "_minimal_cpose.obj")} ' + \ 75 | f'{join(data_templates_path, subject, subject + "_lbs_weights.txt")} ' + \ 76 | f'{join(data_templates_path, subject, subject + "_cpose_lbs_grads.txt")} ' 77 | 78 | if ask_before_os_system: 79 | go_on = input(f'\n[WILL EXECUTE with os.system] {cmd}\nContinue? (y/n)') 80 | else: 81 | go_on = 'y' 82 | if go_on == 'y': 83 | os.system(cmd) 84 | 85 | ### NOTE reorganize data 86 | data = np.loadtxt(join(data_templates_path, subject, subject + "_cpose_lbs_grads.txt")) 87 | 88 | position = data[:, 0:3] 89 | normals = data[:, 3:6] 90 | tx = data[:, 6:9] 91 | ty = data[:, 9:12] 92 | lbs_w = data[:, 12:36] 93 | lbs_tx = data[:, 36:60] 94 | lbs_ty = data[:, 60:84] 95 | 96 | if not os.path.exists(tmp_folder_constraints): 97 | os.mkdir(tmp_folder_constraints) 98 | 99 | for jid in tqdm.tqdm(range(num_joints)): 100 | out_fn_grad = os.path.join(tmp_folder_constraints, f"cano_data_lbs_grad_{jid:02d}.xyz") 101 | out_fn_val = os.path.join(tmp_folder_constraints, f"cano_data_lbs_val_{jid:02d}.xyz") 102 | 103 | grad_field = lbs_tx[:, jid:jid+1] * tx + lbs_ty[:, jid:jid+1] * ty 104 | 105 | out_data_grad = np.concatenate([position, grad_field], 1) 106 | out_data_val = np.concatenate([position, lbs_w[:, jid:jid+1]], 1) 107 | np.savetxt(out_fn_grad, out_data_grad, fmt="%.8f") 108 | np.savetxt(out_fn_val, out_data_val, fmt="%.8f") 109 | 110 | 111 | ### NOTE solve for the diffused skinning fields 112 | for jid in range(num_joints): 113 | cmd = f'{point_interpolant_exe} ' + \ 114 | f'--inValues {join(tmp_folder_constraints, f"cano_data_lbs_val_{jid:02d}.xyz")} ' + \ 115 | f'--inGradients {join(tmp_folder_constraints, f"cano_data_lbs_grad_{jid:02d}.xyz")} ' + \ 116 | f'--gradientWeight 0.05 --dim 3 --verbose ' + \ 117 | f'--grid {join(tmp_folder_skinning_grid, f"grid_{jid:02d}.grd")} ' + \ 118 | f'--depth {skinning_grid_depth} ' 119 | 120 | if ask_before_os_system: 121 | go_on = input(f'\n[WILL EXECUTE with os.system] {cmd}\nContinue? (y/n)') 122 | else: 123 | go_on = 'y' 124 | if go_on == 'y': 125 | os.system(cmd) 126 | 127 | ### NOTE concatenate all grids 128 | fn_list = sorted(list(glob(join(tmp_folder_skinning_grid, 'grid_*.grd')))) 129 | print(fn_list) 130 | 131 | grids = [] 132 | for fn in fn_list: 133 | with open(fn, 'rb') as f: 134 | bytes = f.read() 135 | grid_res = 2 ** skinning_grid_depth 136 | grid_header_len = len(bytes) - grid_res ** 3 * 8 137 | grid_np = np.array(array.array('d', bytes[grid_header_len:])).reshape(256, 256, 256) 138 | grids.append(grid_np) 139 | 140 | 141 | grids_all = np.stack(grids, 0) 142 | grids_all = np.clip(grids_all, 0.0, 1.0) 143 | grids_all = grids_all / grids_all.sum(0)[None] 144 | np.save(join(data_templates_path, subject, subject + '_cano_lbs_weights_grid_float32.npy'), grids_all.astype(np.float32)) 145 | -------------------------------------------------------------------------------- /step3_point_avatar/LICENSE_POP: -------------------------------------------------------------------------------- 1 | License 2 | 3 | Software Copyright License for non-commercial scientific research purposes 4 | Please read carefully the following terms and conditions and any accompanying documentation before you download and/or use the SCALE software, (the "Software"), including 3D meshes, images, videos, textures, software, scripts, and animations. By downloading and/or using the Software (including downloading, cloning, installing, and any other use of the corresponding github repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Software. Any infringement of the terms of this agreement will automatically terminate your rights under this License. 5 | 6 | 7 | Ownership / Licensees 8 | The Software and the associated materials has been developed at the 9 | 10 | Max Planck Institute for Intelligent Systems (hereinafter "MPI"). 11 | 12 | Any copyright or patent right is owned by and proprietary material of the 13 | 14 | Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter collectively “Max-Planck”) 15 | 16 | hereinafter the “Licensor”. 17 | 18 | 19 | License Grant 20 | Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right: 21 | 22 | To install the Software on computers owned, leased or otherwise controlled by you and/or your organization; 23 | To use the Software for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects; 24 | Any other use, in particular any use for commercial, pornographic, military, or surveillance, purposes is prohibited. This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of other artefacts for commercial purposes. The Software may not be used to create fake, libelous, misleading, or defamatory content of any kind excluding analyses in peer-reviewed scientific research. The Software may not be reproduced, modified and/or made available in any form to any third party without Max-Planck’s prior written permission. 25 | 26 | The Software may not be used for pornographic purposes or to generate pornographic material whether commercial or not. This license also prohibits the use of the Software to train methods/algorithms/neural networks/etc. for commercial, pornographic, military, surveillance, or defamatory use of any kind. By downloading the Software, you agree not to reverse engineer it. 27 | 28 | 29 | No Distribution 30 | The Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive purposes only. 31 | 32 | 33 | Disclaimer of Representations and Warranties 34 | You expressly acknowledge and agree that the Software results from basic research, is provided “AS IS”, may contain errors, and that any use of the Software is at your sole risk. LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of the Software, (ii) that the use of the Software will not infringe any patents, copyrights or other intellectual property rights of a third party, and (iii) that the use of the Software will not cause any damage of any kind to you or a third party. 35 | 36 | 37 | Limitation of Liability 38 | Because this Software License Agreement qualifies as a donation, according to Section 521 of the German Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only. If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee for the resulting damage. 39 | Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be liable in accordance with the German Product Liability Act in the event of product liability. The foregoing applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall be excluded. 40 | Patent claims generated through the usage of the Software cannot be directed towards the copyright holders. 41 | The Software is provided in the state of development the licensor defines. If modified or extended by Licensee, the Licensor makes no claims about the fitness of the Software and is not responsible for any problems such modifications cause. 42 | 43 | 44 | No Maintenance Services 45 | You understand and agree that Licensor is under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Software. Licensor nevertheless reserves the right to update, modify, or discontinue the Software at any time. 46 | 47 | Defects of the Software must be notified in writing to the Licensor with a comprehensible description of the error symptoms. The notification of the defect should enable the reproduction of the error. The Licensee is encouraged to communicate any use, results, modification or publication. 48 | 49 | 50 | Publications using the Software 51 | You acknowledge that the Software is a valuable scientific resource and agree to appropriately reference the following paper in any publication making use of the Software. 52 | 53 | Citation: 54 | 55 | @inproceedings{Ma:CVPR:2021, 56 | title = {{SCALE}: Modeling Clothed Humans with a Surface Codec of Articulated Local Elements}, 57 | author = {Ma, Qianli and Saito, Shunsuke and Yang, Jinlong and Tang, Siyu and Black, Michael J.}, 58 | booktitle = {Proceedings IEEE/CVF Conf.~on Computer Vision and Pattern Recognition (CVPR)}, 59 | month = jun, 60 | year = {2021}, 61 | month_numeric = {6} 62 | } 63 | 64 | 65 | Commercial licensing opportunities 66 | For commercial uses of the Software, please send email to ps-license@tue.mpg.de 67 | 68 | This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention. -------------------------------------------------------------------------------- /step2_implicit_template/lib/utils/render.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import torch 4 | import cv2 5 | 6 | from pytorch3d.renderer import ( 7 | FoVOrthographicCameras, 8 | RasterizationSettings, 9 | MeshRenderer, 10 | MeshRasterizer, 11 | HardPhongShader, 12 | PointLights 13 | ) 14 | from pytorch3d.structures import Meshes 15 | from pytorch3d.renderer.mesh import Textures 16 | 17 | 18 | class Renderer(): 19 | def __init__(self, image_size=512): 20 | super().__init__() 21 | 22 | self.image_size = image_size 23 | 24 | self.device = torch.device("cuda:0") 25 | torch.cuda.set_device(self.device) 26 | 27 | R = torch.from_numpy(np.array([[-1., 0., 0.], 28 | [0., 1., 0.], 29 | [0., 0., -1.]])).cuda().float().unsqueeze(0) 30 | 31 | 32 | t = torch.from_numpy(np.array([[0., 0.3, 5.]])).cuda().float() 33 | 34 | self.cameras = FoVOrthographicCameras(R=R, T=t,device=self.device) 35 | 36 | self.lights = PointLights(device=self.device,location=[[0.0, 0.0, 3.0]], 37 | ambient_color=((1,1,1),),diffuse_color=((0,0,0),),specular_color=((0,0,0),)) 38 | 39 | self.raster_settings = RasterizationSettings(image_size=image_size,faces_per_pixel=100,blur_radius=0) 40 | self.rasterizer = MeshRasterizer(cameras=self.cameras, raster_settings=self.raster_settings) 41 | 42 | self.shader = HardPhongShader(device=self.device, cameras=self.cameras, lights=self.lights) 43 | 44 | self.renderer = MeshRenderer(rasterizer=self.rasterizer, shader=self.shader) 45 | 46 | def render_mesh(self, verts, faces, colors=None, mode='npat'): 47 | ''' 48 | mode: normal, phong, texture 49 | ''' 50 | with torch.no_grad(): 51 | 52 | mesh = Meshes(verts, faces) 53 | 54 | normals = torch.stack(mesh.verts_normals_list()) 55 | front_light = torch.tensor([0,0,1]).float().to(verts.device) 56 | shades = (normals * front_light.view(1,1,3)).sum(-1).clamp(min=0).unsqueeze(-1).expand(-1,-1,3) 57 | results = [] 58 | 59 | # normal 60 | if 'n' in mode: 61 | normals_vis = normals* 0.5 + 0.5 62 | mesh_normal = Meshes(verts, faces, textures=Textures(verts_rgb=normals_vis)) 63 | image_normal = self.renderer(mesh_normal) 64 | results.append(image_normal) 65 | 66 | # shading 67 | if 'p' in mode: 68 | mesh_shading = Meshes(verts, faces, textures=Textures(verts_rgb=shades)) 69 | image_phong = self.renderer(mesh_shading) 70 | results.append(image_phong) 71 | 72 | # albedo 73 | if 'a' in mode: 74 | assert(colors is not None) 75 | mesh_albido = Meshes(verts, faces, textures=Textures(verts_rgb=colors)) 76 | image_color = self.renderer(mesh_albido) 77 | results.append(image_color) 78 | 79 | # albedo*shading 80 | if 't' in mode: 81 | assert(colors is not None) 82 | mesh_teture = Meshes(verts, faces, textures=Textures(verts_rgb=colors*shades)) 83 | image_color = self.renderer(mesh_teture) 84 | results.append(image_color) 85 | 86 | return torch.cat(results, axis=1) 87 | 88 | image_size = 512 89 | torch.cuda.set_device(torch.device("cuda:0")) 90 | renderer = Renderer(image_size) 91 | 92 | def render(verts, faces, colors=None): 93 | return renderer.render_mesh(verts, faces, colors) 94 | 95 | def render_trimesh(mesh, mode='npta'): 96 | verts = torch.tensor(mesh.vertices).cuda().float()[None] 97 | faces = torch.tensor(mesh.faces).cuda()[None] 98 | colors = torch.tensor(mesh.visual.vertex_colors).float().cuda()[None,...,:3]/255 99 | image = renderer.render_mesh(verts, faces, colors=colors, mode=mode)[0] 100 | image = (255*image).data.cpu().numpy().astype(np.uint8) 101 | return image 102 | 103 | 104 | def render_joint(smpl_jnts, bone_ids): 105 | marker_sz = 6 106 | line_wd = 2 107 | 108 | image = np.ones((image_size, image_size,3), dtype=np.uint8)*255 109 | smpl_jnts[:,1] += 0.3 110 | smpl_jnts[:,1] = -smpl_jnts[:,1] 111 | smpl_jnts = smpl_jnts[:,:2]*image_size/2 + image_size/2 112 | 113 | for b in bone_ids: 114 | if b[0]<0 : continue 115 | joint = smpl_jnts[b[0]] 116 | cv2.circle(image, joint.astype('int32'), color=(0,0,0), radius=marker_sz, thickness=-1) 117 | 118 | joint2 = smpl_jnts[b[1]] 119 | cv2.circle(image, joint2.astype('int32'), color=(0,0,0), radius=marker_sz, thickness=-1) 120 | 121 | cv2.line(image, joint2.astype('int32'), joint.astype('int32'), color=(0,0,0), thickness=int(line_wd)) 122 | 123 | return image 124 | 125 | 126 | 127 | def weights2colors(weights): 128 | import matplotlib.pyplot as plt 129 | 130 | cmap = plt.get_cmap('Paired') 131 | 132 | colors = [ 'pink', #0 133 | 'blue', #1 134 | 'green', #2 135 | 'red', #3 136 | 'pink', #4 137 | 'pink', #5 138 | 'pink', #6 139 | 'green', #7 140 | 'blue', #8 141 | 'red', #9 142 | 'pink', #10 143 | 'pink', #11 144 | 'pink', #12 145 | 'blue', #13 146 | 'green', #14 147 | 'red', #15 148 | 'cyan', #16 149 | 'darkgreen', #17 150 | 'pink', #18 151 | 'pink', #19 152 | 'blue', #20 153 | 'green', #21 154 | 'pink', #22 155 | 'pink' #23 156 | ] 157 | 158 | 159 | color_mapping = {'cyan': cmap.colors[3], 160 | 'blue': cmap.colors[1], 161 | 'darkgreen': cmap.colors[1], 162 | 'green':cmap.colors[3], 163 | 'pink': [1,1,1], 164 | 'red':cmap.colors[5], 165 | } 166 | 167 | for i in range(len(colors)): 168 | colors[i] = np.array(color_mapping[colors[i]]) 169 | 170 | colors = np.stack(colors)[None]# [1x24x3] 171 | verts_colors = weights[:,:,None] * colors 172 | verts_colors = verts_colors.sum(1) 173 | return verts_colors -------------------------------------------------------------------------------- /measure_error.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from glob import glob 3 | from tqdm import tqdm 4 | import matplotlib.pyplot as plt 5 | import pymeshlab 6 | import argparse 7 | from scipy.spatial import cKDTree as KDTree 8 | import trimesh 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('gt_dataset', type=str) 12 | parser.add_argument('recon_dataset', type=str) 13 | args = parser.parse_args() 14 | 15 | 16 | def angular_error(gt_mesh_name, gen_mesh_name, sample_num): 17 | """ 18 | This function computes a symmetric chamfer distance, i.e. the sum of both chamfers. 19 | 20 | gt_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction 21 | method (see compute_metrics.py for more) 22 | 23 | gen_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction 24 | method (see compute_metrics.py for more) 25 | 26 | """ 27 | gt_mesh = trimesh.load_mesh(gt_mesh_name) 28 | gen_mesh = trimesh.load_mesh(gen_mesh_name) 29 | 30 | gt_points, gt_face_index = trimesh.sample.sample_surface(gt_mesh, sample_num) 31 | gen_points, gen_face_index = trimesh.sample.sample_surface(gen_mesh, sample_num) 32 | 33 | gt_normals = gt_mesh.face_normals[gt_face_index] 34 | gen_normals = gen_mesh.face_normals[gen_face_index] 35 | 36 | # one direction 37 | gen_points_kd_tree = KDTree(gen_points) 38 | gt2gen_dist, gt2gen_vert_ids = gen_points_kd_tree.query(gt_points) 39 | gt2gen_closest_normals_on_gen = gen_normals[gt2gen_vert_ids] 40 | gt2gen_cos_sim = np.mean(np.einsum('nk,nk->n', gt_normals, gt2gen_closest_normals_on_gen)) 41 | 42 | # other direction 43 | gt_points_kd_tree = KDTree(gt_points) 44 | gen2gt_dist, gen2gt_vert_ids = gt_points_kd_tree.query(gen_points) 45 | gen2gt_closest_normals_on_gen = gt_normals[gen2gt_vert_ids] 46 | gen2gt_cos_sim = np.mean(np.einsum('nk,nk->n', gen_normals, gen2gt_closest_normals_on_gen)) 47 | cos_sim = (np.abs(gt2gen_cos_sim) + np.abs(gen2gt_cos_sim)) / 2 48 | 49 | str_ang = f"angle: {gt2gen_cos_sim:.6f} {gen2gt_cos_sim:.6f} {cos_sim:.6f}\n" 50 | 51 | return str_ang, cos_sim 52 | 53 | def print_matching(list_a, list_b): 54 | counter = 0 55 | for a, b in zip(list_a, list_b): 56 | counter += 1 57 | print(f'Matched {a} and {b}') 58 | print(f'Matched {counter} of {len(list_a)} and {len(list_b)}') 59 | 60 | 61 | def res2str(name_a, name_b, res_a2b, res_b2a, ms): 62 | """ 63 | this normalizes the results by bounding box diagonal 64 | and put into a new dict 65 | """ 66 | 67 | # error field extraction and normalization 68 | a2b_error_field = ms.mesh(3).vertex_quality_array() # float64, (100000,) 69 | b2a_error_field = ms.mesh(5).vertex_quality_array() # float64, (100000,) 70 | a2b_error_field /= res_a2b['diag_mesh_0'] 71 | b2a_error_field /= res_b2a['diag_mesh_0'] 72 | 73 | dist_Haus_a2b = a2b_error_field.max() 74 | dist_Haus_b2a = b2a_error_field.max() 75 | dist_symHausd = max(dist_Haus_a2b, dist_Haus_b2a) 76 | 77 | dist_Cham_a2b = (a2b_error_field ** 2).mean() 78 | dist_Cham_b2a = (b2a_error_field ** 2).mean() 79 | dist_symChamf = (dist_Cham_a2b + dist_Cham_b2a) / 2 80 | 81 | str_nma = f"name_a: {name_a}\n" 82 | str_nmb = f"name_b: {name_b}\n" 83 | str_itm = f"---- a2b b2a sym\n" 84 | str_hau = f"haus: {dist_Haus_a2b:.6e} {dist_Haus_b2a:.6e} {dist_symHausd:.6e}\n" 85 | str_chm = f"chamfer: {dist_Cham_a2b:.6e} {dist_Cham_b2a:.6e} {dist_symChamf:.6e}\n" 86 | str_dg0 = f"diag a: {res_a2b['diag_mesh_0']:.6e}\n" 87 | str_dg1 = f"diag b: {res_a2b['diag_mesh_1']:.6e}\n" 88 | str_num = f"n_samples: {res_a2b['n_samples']}\n" 89 | 90 | str_all = str_nma + str_nmb + str_itm + str_hau + str_chm + str_dg0 + str_dg1 + str_num 91 | return str_all, dist_symHausd, dist_Haus_a2b, dist_Haus_b2a, dist_symChamf, dist_Cham_a2b, dist_Cham_b2a 92 | 93 | def compare_meshes(meshfile_a, meshfile_b, sample_num): 94 | ms = pymeshlab.MeshSet() 95 | ms.load_new_mesh(meshfile_a) 96 | ms.load_new_mesh(meshfile_b) 97 | 98 | res_a2b = ms.hausdorff_distance( 99 | sampledmesh=0, 100 | targetmesh=1, 101 | savesample=True, 102 | samplevert=False, 103 | sampleedge=False, 104 | samplefauxedge=False, 105 | sampleface=True, 106 | samplenum=sample_num 107 | ) 108 | 109 | # 2 is closest from a to b (on b) 110 | # 3 is sampled from a to b (on a) 111 | 112 | res_b2a = ms.hausdorff_distance( 113 | sampledmesh=1, 114 | targetmesh=0, 115 | savesample=True, 116 | samplevert=False, 117 | sampleedge=False, 118 | samplefauxedge=False, 119 | sampleface=True, 120 | samplenum=sample_num 121 | ) 122 | 123 | # 4 is closest from b to a (on a) 124 | # 5 is sampled from b to a (on b) 125 | 126 | str_res, d_haus, d_haus_a2b, d_haus_b2a, d_cham, d_cham_a2b, d_cham_b2a = res2str(meshfile_a, meshfile_b, res_a2b, res_b2a, ms) 127 | 128 | del ms 129 | return str_res, d_haus, d_cham 130 | 131 | if __name__ == '__main__': 132 | folder_GT = args.gt_dataset 133 | folder_recon = args.recon_dataset 134 | 135 | list_GT = sorted(list(glob(f'{folder_GT}/*_poisson.ply'))) 136 | list_recon = sorted(list(glob(f'{folder_recon}/*_poisson.ply'))) 137 | 138 | log_file = f'{folder_recon}/log_file_ver2.txt' 139 | log_file_stream = open(log_file, 'w') 140 | 141 | csv_file = f'{folder_recon}/hausandchamfer.csv' 142 | csv_file_stream = open(csv_file, 'w') 143 | 144 | assert len(list_GT) == len(list_recon), f'you have {len(list_GT)} GT and {len(list_recon)} recon files' 145 | print_matching(list_GT, list_recon) 146 | 147 | for i in tqdm(range(len(list_GT))): 148 | print(f'Trying to compare \n {list_GT[i]}\n {list_recon[i]}') 149 | 150 | sample_num = 200000 151 | 152 | str_res, d_haus, d_cham = compare_meshes(list_GT[i], list_recon[i], sample_num) 153 | str_ang, cos_sim = angular_error(list_GT[i], list_recon[i], sample_num) 154 | 155 | log_file_stream.write(str_res + str_ang + '---------------\n') 156 | print(str_res + str_ang + '---------------\n') 157 | csv_file_stream.write(f'{list_recon[i]},{d_haus},{d_cham},{cos_sim}\n') 158 | 159 | log_file_stream.close() 160 | csv_file_stream.close() 161 | -------------------------------------------------------------------------------- /step2_implicit_template/extract_fite_implicit_template.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import torch 4 | import numpy as np 5 | 6 | import yaml 7 | import trimesh 8 | from os.path import join 9 | 10 | from pytorch3d.ops import sample_farthest_points 11 | 12 | from .lib.snarf_model_diffused_skinning import SNARFModelDiffusedSkinning 13 | from .lib.model.helpers import rectify_pose 14 | 15 | if __name__ == '__main__': 16 | opt = {} 17 | with open(join('configs', 'common.yaml'), 'r') as common_opt_f: 18 | common_opt = yaml.safe_load(common_opt_f) 19 | opt.update(common_opt) 20 | with open(join('configs', f'step2.yaml'), 'r') as step_opt_f: 21 | step_opt = yaml.safe_load(step_opt_f) 22 | opt.update(step_opt) 23 | 24 | exp_folder = join(opt['result_folder'], opt['expname'], 'step2-results') 25 | checkpoint_folder = join(opt['result_folder'], opt['expname'], 'step2-checkpoints') 26 | checkpoint_path = join(checkpoint_folder, 'checkpoint-latest.pt') 27 | 28 | # set subject info 29 | subject = opt['datamodule']['subject'] 30 | minimal_body_path = os.path.join(opt['data_templates_path'], subject, f'{subject}_minimal_tpose.ply') 31 | v_template = np.array(trimesh.load(minimal_body_path, process=False).vertices) 32 | with open(join(opt['data_templates_path'], 'gender_list.yaml') ,'r') as f: 33 | gender = yaml.safe_load(f)[subject] 34 | meta_info = {'v_template': v_template.copy(), 'gender': gender} 35 | # meta_info = np.load('meta_info.npz') 36 | 37 | model = SNARFModelDiffusedSkinning(opt['model']['soft_blend'], 38 | opt['smpl_model_path'], 39 | opt['model']['pose_conditioning'], 40 | opt['model']['network'], 41 | subject=opt['datamodule']['subject'], 42 | cpose_smpl_mesh_path=join(opt['data_templates_path'], opt['datamodule']['subject'], opt['datamodule']['subject'] + '_minimal_cpose.ply'), 43 | cpose_weight_grid_path=join(opt['data_templates_path'], opt['datamodule']['subject'], opt['datamodule']['subject'] + '_cano_lbs_weights_grid_float32.npy'), 44 | meta_info=meta_info, 45 | device=opt['device'], 46 | data_processor=None).to(opt['device']) 47 | 48 | model.load_state_dict(torch.load(checkpoint_path)) 49 | model.deformer.init_bones = np.arange(24) 50 | model.eval() 51 | 52 | # pose format conversion 53 | smplx_to_smpl = list(range(66)) + [72, 73, 74, 117, 118, 119] # SMPLH to SMPL 54 | 55 | # load motion sequence 56 | motion_path = join(opt['data_scans_path'], opt['datamodule']['subject'], 'train') 57 | if os.path.isdir(motion_path): 58 | motion_files = sorted(glob.glob(os.path.join(motion_path, '*.npz'))) 59 | smpl_params_all = [] 60 | for f in motion_files: 61 | f = np.load(f) 62 | smpl_params = np.zeros(86) 63 | smpl_params[0], smpl_params[4:76] = 1, f['pose'] 64 | smpl_params = torch.tensor(smpl_params).float().to(opt['device']) 65 | smpl_params_all.append(smpl_params) 66 | smpl_params_all = torch.stack(smpl_params_all) 67 | 68 | elif '.npz' in motion_path: 69 | f = np.load(motion_path) 70 | smpl_params_all = np.zeros( (f['poses'].shape[0], 86) ) 71 | smpl_params_all[:,0] = 1 72 | if f['poses'].shape[-1] == 72: 73 | smpl_params_all[:, 4:76] = f['poses'] 74 | elif f['poses'].shape[-1] == 156: 75 | smpl_params_all[:, 4:76] = f['poses'][:,smplx_to_smpl] 76 | 77 | root_abs = smpl_params_all[0, 4:7].copy() 78 | for i in range(smpl_params_all.shape[0]): 79 | smpl_params_all[i, 4:7] = rectify_pose(smpl_params_all[i, 4:7], root_abs) 80 | 81 | smpl_params_all = torch.tensor(smpl_params_all).float().to(opt['device']) 82 | 83 | ### NOTE choose only min l1-norm pose 84 | l1_norm_pose = smpl_params_all.abs().sum(-1) # [n_poses,] 85 | min_pose_id = l1_norm_pose.argmin() 86 | min_pose_id = 0 87 | smpl_params_all = smpl_params_all[min_pose_id][None] 88 | 89 | # generate data batch 90 | smpl_params = smpl_params_all 91 | data = model.smpl_server.forward(smpl_params, absolute=True) 92 | data['smpl_thetas'] = smpl_params[:, 4:76] 93 | 94 | print(f'[LOG] extracting implicit templates') 95 | # low resolution mesh 96 | results_lowres = model.plot(data, res=opt['extraction']['resolution_low']) 97 | results_lowres['mesh_cano'].export(join(opt['data_templates_path'], subject, f'{subject}_cano_mesh_{opt["extraction"]["resolution_low"]}.ply')) 98 | print(f'[LOG] extracted mesh at resolution {opt["extraction"]["resolution_low"]}. saving it at ' + join(opt['data_templates_path'], subject, f'{subject}_cano_mesh_{opt["extraction"]["resolution_low"]}.ply')) 99 | 100 | # high resolution mesh 101 | results_highres = model.plot(data, res=opt['extraction']['resolution_high']) 102 | results_highres['mesh_cano'].export(join(opt['data_templates_path'], subject, f'{subject}_cano_mesh_{opt["extraction"]["resolution_high"]}.ply')) 103 | print(f'[LOG] extracted mesh at resolution {opt["extraction"]["resolution_high"]}. saving it at ' + join(opt['data_templates_path'], subject, f'{subject}_cano_mesh_{opt["extraction"]["resolution_high"]}.ply')) 104 | 105 | 106 | print(f'[LOG] downsampling the high resolution mesh and packing the clothed template') 107 | verts_mesh = results_lowres['mesh_cano'].vertices 108 | faces_mesh = results_lowres['mesh_cano'].faces 109 | weights_mesh = results_lowres['weights_cano'].cpu().numpy() 110 | 111 | points = torch.from_numpy(results_highres['mesh_cano'].vertices).float()[None].to(opt['device']) 112 | downsampled_points, downsampled_indices = sample_farthest_points(points, K=opt['n_cano_points']) 113 | 114 | verts_downsampled = downsampled_points[0].cpu().numpy() 115 | normals_downsampled = results_highres['mesh_cano'].vertex_normals[downsampled_indices[0].cpu().numpy()] 116 | weights_downsampled = results_highres['weights_cano'].cpu().numpy()[downsampled_indices[0].cpu().numpy()] 117 | 118 | verts_tpose = v_template 119 | 120 | np.savez(join(opt['data_templates_path'], f'{subject}_clothed_template.npz'), 121 | verts_mesh=verts_mesh, 122 | faces_mesh=faces_mesh, 123 | weights_mesh=weights_mesh, 124 | verts_downsampled=verts_downsampled, 125 | normals_downsampled=normals_downsampled, 126 | weights_downsampled=weights_downsampled, 127 | verts_tpose=verts_tpose) 128 | 129 | print(f'[LOG] clothed template packed at ' + join(opt['data_templates_path'], f'{subject}_clothed_template.npz')) 130 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/model/network.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | """ MLP for neural implicit shapes. The code is based on https://github.com/lioryariv/idr with adaption. """ 5 | class ImplicitNetwork(torch.nn.Module): 6 | def __init__( 7 | self, 8 | d_in, 9 | d_out, 10 | width, 11 | depth, 12 | geometric_init=True, 13 | bias=1.0, 14 | weight_norm=True, 15 | multires=0, 16 | skip_layer=[], 17 | cond_layer=[], 18 | cond_dim=69, 19 | dim_cond_embed=-1, 20 | ): 21 | super().__init__() 22 | 23 | dims = [d_in] + [width] * depth + [d_out] 24 | self.num_layers = len(dims) 25 | 26 | self.embed_fn = None 27 | if multires > 0: 28 | embed_fn, input_ch = get_embedder(multires) 29 | self.embed_fn = embed_fn 30 | dims[0] = input_ch 31 | 32 | self.cond_layer = cond_layer 33 | self.cond_dim = cond_dim 34 | 35 | self.dim_cond_embed = dim_cond_embed 36 | if dim_cond_embed > 0: 37 | self.lin_p0 = torch.nn.Linear(self.cond_dim, dim_cond_embed) 38 | self.cond_dim = dim_cond_embed 39 | 40 | self.skip_layer = skip_layer 41 | 42 | for l in range(0, self.num_layers - 1): 43 | if l + 1 in self.skip_layer: 44 | out_dim = dims[l + 1] - dims[0] 45 | else: 46 | out_dim = dims[l + 1] 47 | 48 | if l in self.cond_layer: 49 | lin = torch.nn.Linear(dims[l] + self.cond_dim, out_dim) 50 | else: 51 | lin = torch.nn.Linear(dims[l], out_dim) 52 | 53 | if geometric_init: 54 | if l == self.num_layers - 2: 55 | torch.nn.init.normal_( 56 | lin.weight, mean=-np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001 57 | ) 58 | torch.nn.init.constant_(lin.bias, bias) 59 | elif multires > 0 and l == 0: 60 | torch.nn.init.constant_(lin.bias, 0.0) 61 | torch.nn.init.constant_(lin.weight[:, 3:], 0.0) 62 | torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim)) 63 | elif multires > 0 and l in self.skip_layer: 64 | torch.nn.init.constant_(lin.bias, 0.0) 65 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) 66 | torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3) :], 0.0) 67 | else: 68 | torch.nn.init.constant_(lin.bias, 0.0) 69 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) 70 | 71 | if weight_norm: 72 | lin = torch.nn.utils.weight_norm(lin) 73 | 74 | setattr(self, "lin" + str(l), lin) 75 | 76 | self.softplus = torch.nn.Softplus(beta=100) 77 | 78 | def forward(self, input, cond, mask=None): 79 | """MPL query. 80 | 81 | Tensor shape abbreviation: 82 | B: batch size 83 | N: number of points 84 | D: input dimension 85 | 86 | Args: 87 | input (tensor): network input. shape: [B, N, D] 88 | cond (dict): conditional input. 89 | mask (tensor, optional): only masked inputs are fed into the network. shape: [B, N] 90 | 91 | Returns: 92 | output (tensor): network output. Might contains placehold if mask!=None shape: [N, D, ?] 93 | """ 94 | 95 | 96 | n_batch, n_point, n_dim = input.shape 97 | 98 | if n_batch * n_point == 0: 99 | return input 100 | 101 | # reshape to [N,?] 102 | input = input.reshape(n_batch * n_point, n_dim) 103 | if mask is not None: 104 | input = input[mask] 105 | 106 | input_embed = input if self.embed_fn is None else self.embed_fn(input) 107 | 108 | if len(self.cond_layer): 109 | cond = cond["smpl"] 110 | n_batch, n_cond = cond.shape 111 | input_cond = cond.unsqueeze(1).expand(n_batch, n_point, n_cond) 112 | input_cond = input_cond.reshape(n_batch * n_point, n_cond) 113 | 114 | if mask is not None: 115 | input_cond = input_cond[mask] 116 | 117 | if self.dim_cond_embed > 0: 118 | input_cond = self.lin_p0(input_cond) 119 | 120 | x = input_embed 121 | 122 | for l in range(0, self.num_layers - 1): 123 | lin = getattr(self, "lin" + str(l)) 124 | if l in self.cond_layer: 125 | x = torch.cat([x, input_cond], dim=-1) 126 | 127 | if l in self.skip_layer: 128 | x = torch.cat([x, input_embed], 1) / np.sqrt(2) 129 | 130 | x = lin(x) 131 | 132 | if l < self.num_layers - 2: 133 | x = self.softplus(x) 134 | 135 | # add placeholder for masked prediction 136 | if mask is not None: 137 | x_full = torch.zeros(n_batch * n_point, x.shape[-1], device=x.device) 138 | x_full[mask] = x 139 | else: 140 | x_full = x 141 | 142 | return x_full.reshape(n_batch, n_point, -1) 143 | 144 | 145 | """ Positional encoding embedding. Code was taken from https://github.com/bmild/nerf. """ 146 | class Embedder: 147 | def __init__(self, **kwargs): 148 | self.kwargs = kwargs 149 | self.create_embedding_fn() 150 | 151 | def create_embedding_fn(self): 152 | embed_fns = [] 153 | d = self.kwargs["input_dims"] 154 | out_dim = 0 155 | if self.kwargs["include_input"]: 156 | embed_fns.append(lambda x: x) 157 | out_dim += d 158 | 159 | max_freq = self.kwargs["max_freq_log2"] 160 | N_freqs = self.kwargs["num_freqs"] 161 | 162 | if self.kwargs["log_sampling"]: 163 | freq_bands = 2.0 ** torch.linspace(0.0, max_freq, N_freqs) 164 | else: 165 | freq_bands = torch.linspace(2.0 ** 0.0, 2.0 ** max_freq, N_freqs) 166 | 167 | for freq in freq_bands: 168 | for p_fn in self.kwargs["periodic_fns"]: 169 | embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) 170 | out_dim += d 171 | 172 | self.embed_fns = embed_fns 173 | self.out_dim = out_dim 174 | 175 | def embed(self, inputs): 176 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1) 177 | 178 | 179 | def get_embedder(multires): 180 | embed_kwargs = { 181 | "include_input": True, 182 | "input_dims": 3, 183 | "max_freq_log2": multires - 1, 184 | "num_freqs": multires, 185 | "log_sampling": True, 186 | "periodic_fns": [torch.sin, torch.cos], 187 | } 188 | 189 | embedder_obj = Embedder(**embed_kwargs) 190 | 191 | def embed(x, eo=embedder_obj): 192 | return eo.embed(x) 193 | 194 | return embed, embedder_obj.out_dim 195 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Learning Implicit Templates for Point-Based Clothed Human Modeling (ECCV 2022) 2 | 3 | This repository contains the implementation of the paper 4 | 5 | **Learning Implicit Templates for Point-Based Clothed Human Modeling (ECCV 2022)** 6 | 7 | *[Siyou Lin](https://jsnln.github.io/), [Hongwen Zhang](https://hongwenzhang.github.io/), [Zerong Zheng](https://zhengzerong.github.io/), [Ruizhi Shao](https://dsaurus.github.io/saurus), [Yebin Liu](http://liuyebin.com/)* 8 | 9 | [Project page](https://jsnln.github.io/fite/index.html) | [Paper](https://arxiv.org/abs/2207.06955) | [Supp](https://jsnln.github.io/fite/assets/fite_supp.pdf) 10 | 11 | A **First-Implicit-Then-Explicit (FITE)** pipeline for modeling humans in clothing. 12 | 13 | ![pipeline](./teaser/pipeline.png) 14 | 15 | ### [Update] 16 | 17 | - Uploaded the script for error measuring: `measure_error.py`. There was some typo in Eq. (6) and Eq. (7) in the the supplementary material. We used `pymeshlab` for measuring errors. The point-to-point distance is normalized by the bounding box diagonal when computing the Chamfer distance. Furthermore, both Chamfer distance and cosine similarity are divided by 2. 18 | 19 | 20 | ### Overview 21 | 22 | Our code consists of three main steps: 23 | 24 | 1. Given a minimal T-pose SMPL template, compute the diffused skinning field, stored as a 256^3 voxel grid. 25 | 2. Learn a canonical implicit template using SNARF, but with our diffused skinning field. 26 | 3. Render position maps (posmaps for short) and train the point-based avatars. 27 | 28 | You can get a quick start with our pretrained models, or follow the instructions below to go through the whole pipeline. 29 | 30 | ### Quick Start 31 | 32 | To use our pretrained models (download from [here](https://cloud.tsinghua.edu.cn/d/8a6fe3fa9af341fdae06/)), only the test script of step 3 needs to be executed. 33 | 34 | First, unzip the downloaded pretrained data, put `{subject_name}_clothed_template.npz` in the folder `data_templates`, and put `checkpoint-400.pt` and `geom-feats-400.pt` in `results/{resynth,cape}_pretrained/step3-checkpoints`. Rename the check points to `*-latest.pt` 35 | 36 | To animate a certain subject, say `rp_carla_posed_004`, prepare a pose sequence (`.npz` files containing the `pose` and `transl` parameters) under the directory `data_scans/rp_carla_posed_004/test/`, and then run (assuming the project directory is the working directory): 37 | 38 | ```bash 39 | python -m step3_point_avatar.render_posmaps rp_carla_posed_004 test 40 | ``` 41 | 42 | This will render the posmaps to `data_posmaps/rp_carla_posed_004/test`. Then, run 43 | 44 | ```bash 45 | python -m step3_point_avatar.test_fite_point_avatar 46 | ``` 47 | 48 | The animated point clouds will be saved at `results/resynth_pretrained/step3-test-pcds`. 49 | 50 | ### The Whole Pipeline 51 | 52 | Prior to running the pipeline, set `expname` in `configs/common.yaml` to the name of your experiment and keep it fixed. 53 | 54 | #### 1. Dependencies 55 | 56 | We have tested this code on Ubuntu 20.04 with Python 3.8.10 and CUDA 11.1. 57 | 58 | To run the whole pipeline, the user needs to install the following dependencies. 59 | 60 | 1. The `PointInterpolant` executable from https://github.com/mkazhdan/PoissonRecon. After successfully building `PointInterpolant`, set `point_interpolant_exe` in the config file `configs/step1.yaml` as the path to the executable. 61 | 62 | ```bash 63 | git clone https://github.com/mkazhdan/PoissonRecon 64 | cd PoissonRecon 65 | make pointinterpolant 66 | cd .. 67 | ``` 68 | 69 | 2. Git clone https://github.com/NVIDIAGameWorks/kaolin.git to the FITE project directory and build it. 70 | 71 | ```bash 72 | git clone --recursive https://github.com/NVIDIAGameWorks/kaolin 73 | cd kaolin 74 | git checkout v0.11.0 # optional, other versions should also work 75 | python setup.py install # use --user if needed 76 | cd .. 77 | ``` 78 | 79 | 80 | 3. Download chamfer distance from this implementation: https://github.com/krrish94/chamferdist/tree/97051583f6fe72d5d4a855696dbfda0ea9b73a6a and build it. 81 | 82 | ```bash 83 | cd chamferdist 84 | python setup.py install # use --user if needed 85 | cd .. 86 | ``` 87 | 88 | 4. Download the SMPL models from https://smpl.is.tue.mpg.de/ (v1.1.0). Put them in some folder structured as: 89 | 90 | ``` 91 | smpl_models/ 92 | smpl/ 93 | SMPL_MALE.pkl 94 | SMPL_FEMALE.pkl 95 | SMPL_NEUTRAL.pkl 96 | ``` 97 | 98 | Then set `smpl_model_path` in `configs/common.yaml` as the path to this folder. 99 | 100 | 5. Install these python packages: 101 | 102 | ```bash 103 | pyyaml # 6.0 104 | tqdm # 4.62.3 105 | smplx # 0.1.28 106 | torch # 1.10.0+cu111 107 | trimesh # 3.10.2 108 | numpy # 1.22.3 109 | opencv-python # 4.5.5 110 | scikit-image # 0.18.1 111 | pytorch3d # 0.6.1 112 | pyglm # 2.5.7 113 | pyopengl # 3.1.0 114 | glfw # 2.1.0 115 | scipy # 1.8.0 116 | ``` 117 | 118 | #### 2. Data Preparation for Training 119 | 120 | To train on your own data, you need to prepare the scans (which must be closed meshes) as `.npz` files containing the following items: 121 | 122 | ``` 123 | 'pose': of shape (72,), SMPL pose parameters 124 | 'transl': of shape (3,), translation of the scan 125 | 'scan_f': of shape (N_faces, 3), triangle faces of the scan mesh 126 | 'scan_v': of shape (N_vertices, 3), vertices of the scan mesh 127 | 'scan_pc': of shape (N_points, 3), points uniformly sampled on the scan 128 | 'scan_n': of shape (N_points, 3), normals of the sampled points (unit length) 129 | ``` 130 | 131 | Note that the number of points of `scan_pc` and `scan_n` must be the same across different scans (for tensor batching), while the number of vertices and faces can be different across scans. These `.npz` files should be placed at `data_scans/{subject_name}/train`. 132 | 133 | A minimal SMPL body matching the scans is also needed. Prepare the T-pose SMPL mesh as `{subject_name}_minimal_tpose.ply`, and put it in `data_templates/{subject_name}/` . Finally, add the gender of the subject to `data_templates/gender_list.yaml`. 134 | 135 | #### 3. Diffused Skinning 136 | 137 | This step computes a voxel grid for diffused skinning. First, run 138 | 139 | ```bash 140 | cd step1_diffused_skinning 141 | sh compile_lbs_surf_grad.sh 142 | cd .. 143 | ``` 144 | 145 | to compile the c++ program for computing surface gradient of LBS weights. Then, change the `subject` option in `configs/step1.yaml` to the name of your subject, and run 146 | 147 | ```bash 148 | python -m step1_diffused_skinning.compute_diffused_skinning 149 | ``` 150 | 151 | After it finishes, a file named `{subject_name}_cano_lbs_weights_grid_float32.npy` will be placed at `data_templates/{subject_name}/`. You can optionally delete the intermediate files in `data_tmp_constraints` and `data_tmp_skinning_grid` if they take up too much space. 152 | 153 | 154 | #### 4. Implicit Templates 155 | 156 | ```bash 157 | cd step2_implicit_templates 158 | python setup.py install # --user if needed 159 | cd .. 160 | ``` 161 | 162 | Change `datamodule.subject` in `configs/step2.yaml` to the name of the subject to train. Then run 163 | 164 | ```bash 165 | python -m step2_implicit_template.train_fite_implicit_template 166 | ``` 167 | 168 | Intermediate visualizations and checkpoints can be found at `results/{expname}/step2-results` and `results/{expname}/step2-results`. After the training is done, run 169 | 170 | ```bash 171 | python -m step2_implicit_template.extract_fite_implicit_template 172 | ``` 173 | 174 | This extracts the implicit templates in canonical poses to `data_templates`. 175 | 176 | #### 5. Point Avatar 177 | 178 | Prior to train the point avatar(s), the user needs to render the posmaps: 179 | 180 | ```bash 181 | python -m step3_point_avatar.render_posmaps {subject_name} {dataset_split} 182 | ``` 183 | 184 | Recall that our point avatar part is a multi-subject model. If you do use multiple subjects, each should go through all the steps above. After that, collect the names and genders of all subjects in a config file `configs/{expname}_subject_list.yaml`. Then, run 185 | 186 | ```bash 187 | python -m step3_point_avatar.train_fite_point_avatar 188 | ``` 189 | 190 | After training is complete, prepare test poses (only `pose` and `transl` are needed) in `data_scans/{subject_name}/test/` and render posmaps same as above (test split). Then, run 191 | 192 | ```bash 193 | python -m step3_point_avatar.test_fite_point_avatar 194 | ``` 195 | 196 | The outputted point clouds can be found at `results/{expname}/step3-test-pcds/`. 197 | 198 | ### Acknowledgements & A Note on the License 199 | 200 | This code is partly based on [SNARF](https://github.com/xuchen-ethz/snarf) and [POP](https://github.com/qianlim/POP). We thank those authors for making their code publicly available. Note that the code in `step2_implicit_template` is inherited from [SNARF](https://github.com/xuchen-ethz/snarf), while that in `step3_point_avatar` is herited from [POP](https://github.com/qianlim/POP). Please follow their original licenses if you intend to use them. 201 | 202 | 203 | 204 | -------------------------------------------------------------------------------- /step3_point_avatar/lib/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | def tensor2numpy(tensor): 5 | if isinstance(tensor, torch.Tensor): 6 | return tensor.detach().cpu().numpy() 7 | 8 | def vertex_normal_2_vertex_color(vertex_normal): 9 | # Normalize vertex normal 10 | import torch 11 | if torch.is_tensor(vertex_normal): 12 | vertex_normal = vertex_normal.detach().cpu().numpy() 13 | normal_length = ((vertex_normal**2).sum(1))**0.5 14 | normal_length = normal_length.reshape(-1, 1) 15 | vertex_normal /= normal_length 16 | # Convert normal to color: 17 | color = vertex_normal * 255/2.0 + 128 18 | return color.astype(np.ubyte) 19 | 20 | def export_ply_with_vquality(filename, v_array=None, f_array=None, vq_array=None): 21 | """ 22 | v_array: vertex array 23 | vq_array: vertex quality array 24 | """ 25 | 26 | Nv = v_array.shape[0] if v_array is not None else 0 27 | Nf = f_array.shape[0] if f_array is not None else 0 28 | 29 | with open(filename, 'w') as plyfile: 30 | plyfile.write(f'ply\n') 31 | plyfile.write(f'format ascii 1.0\n') 32 | plyfile.write(f'comment trisst custom\n') 33 | plyfile.write(f'element vertex {Nv}\n') 34 | plyfile.write(f'property float x\n') 35 | plyfile.write(f'property float y\n') 36 | plyfile.write(f'property float z\n') 37 | if vq_array is not None: 38 | plyfile.write(f'property float quality\n') 39 | plyfile.write(f'element face {Nf}\n') 40 | plyfile.write(f'property list uchar int vertex_indices\n') 41 | plyfile.write(f'end_header\n') 42 | for i in range(Nv): 43 | plyfile.write(f'{v_array[i][0]} {v_array[i][1]} {v_array[i][2]} ') 44 | 45 | if vq_array is None: 46 | plyfile.write('\n') 47 | continue 48 | 49 | plyfile.write(f'{vq_array[i]} ') 50 | plyfile.write('\n') 51 | continue 52 | 53 | for i in range(Nf): 54 | plyfile.write(f'3 {f_array[i][0]} {f_array[i][1]} {f_array[i][2]}\n') 55 | 56 | 57 | 58 | def customized_export_ply(outfile_name, v, f = None, v_n = None, v_c = None, f_c = None, e = None): 59 | ''' 60 | Author: Jinlong Yang, jyang@tue.mpg.de 61 | 62 | Exports a point cloud / mesh to a .ply file 63 | supports vertex normal and color export 64 | such that the saved file will be correctly displayed in MeshLab 65 | 66 | # v: Vertex position, N_v x 3 float numpy array 67 | # f: Face, N_f x 3 int numpy array 68 | # v_n: Vertex normal, N_v x 3 float numpy array 69 | # v_c: Vertex color, N_v x (3 or 4) uchar numpy array 70 | # f_n: Face normal, N_f x 3 float numpy array 71 | # f_c: Face color, N_f x (3 or 4) uchar numpy array 72 | # e: Edge, N_e x 2 int numpy array 73 | # mode: ascii or binary ply file. Value is {'ascii', 'binary'} 74 | ''' 75 | 76 | v_n_flag=False 77 | v_c_flag=False 78 | f_c_flag=False 79 | 80 | N_v = v.shape[0] 81 | assert(v.shape[1] == 3) 82 | if not type(v_n) == type(None): 83 | assert(v_n.shape[0] == N_v) 84 | if type(v_n) == 'torch.Tensor': 85 | v_n = v_n.detach().cpu().numpy() 86 | v_n_flag = True 87 | if not type(v_c) == type(None): 88 | assert(v_c.shape[0] == N_v) 89 | v_c_flag = True 90 | if v_c.shape[1] == 3: 91 | # warnings.warn("Vertex color does not provide alpha channel, use default alpha = 255") 92 | alpha_channel = np.zeros((N_v, 1), dtype = np.ubyte)+255 93 | v_c = np.hstack((v_c, alpha_channel)) 94 | 95 | N_f = 0 96 | if not type(f) == type(None): 97 | N_f = f.shape[0] 98 | assert(f.shape[1] == 3) 99 | if not type(f_c) == type(None): 100 | assert(f_c.shape[0] == f.shape[0]) 101 | f_c_flag = True 102 | if f_c.shape[1] == 3: 103 | # warnings.warn("Face color does not provide alpha channel, use default alpha = 255") 104 | alpha_channel = np.zeros((N_f, 1), dtype = np.ubyte)+255 105 | f_c = np.hstack((f_c, alpha_channel)) 106 | N_e = 0 107 | if not type(e) == type(None): 108 | N_e = e.shape[0] 109 | 110 | with open(outfile_name, 'w') as file: 111 | # Header 112 | file.write('ply\n') 113 | file.write('format ascii 1.0\n') 114 | file.write('element vertex %d\n'%(N_v)) 115 | file.write('property float x\n') 116 | file.write('property float y\n') 117 | file.write('property float z\n') 118 | 119 | if v_n_flag: 120 | file.write('property float nx\n') 121 | file.write('property float ny\n') 122 | file.write('property float nz\n') 123 | if v_c_flag: 124 | file.write('property uchar red\n') 125 | file.write('property uchar green\n') 126 | file.write('property uchar blue\n') 127 | file.write('property uchar alpha\n') 128 | 129 | file.write('element face %d\n'%(N_f)) 130 | file.write('property list uchar int vertex_indices\n') 131 | if f_c_flag: 132 | file.write('property uchar red\n') 133 | file.write('property uchar green\n') 134 | file.write('property uchar blue\n') 135 | file.write('property uchar alpha\n') 136 | 137 | if not N_e == 0: 138 | file.write('element edge %d\n'%(N_e)) 139 | file.write('property int vertex1\n') 140 | file.write('property int vertex2\n') 141 | 142 | file.write('end_header\n') 143 | 144 | # Main body: 145 | # Vertex 146 | if v_n_flag and v_c_flag: 147 | for i in range(0, N_v): 148 | file.write('%f %f %f %f %f %f %d %d %d %d\n'%\ 149 | (v[i,0], v[i,1], v[i,2],\ 150 | v_n[i,0], v_n[i,1], v_n[i,2], \ 151 | v_c[i,0], v_c[i,1], v_c[i,2], v_c[i,3])) 152 | elif v_n_flag: 153 | for i in range(0, N_v): 154 | file.write('%f %f %f %f %f %f\n'%\ 155 | (v[i,0], v[i,1], v[i,2],\ 156 | v_n[i,0], v_n[i,1], v_n[i,2])) 157 | elif v_c_flag: 158 | for i in range(0, N_v): 159 | file.write('%f %f %f %d %d %d %d\n'%\ 160 | (v[i,0], v[i,1], v[i,2],\ 161 | v_c[i,0], v_c[i,1], v_c[i,2], v_c[i,3])) 162 | else: 163 | for i in range(0, N_v): 164 | file.write('%f %f %f\n'%\ 165 | (v[i,0], v[i,1], v[i,2])) 166 | # Face 167 | if f_c_flag: 168 | for i in range(0, N_f): 169 | file.write('3 %d %d %d %d %d %d %d\n'%\ 170 | (f[i,0], f[i,1], f[i,2],\ 171 | f_c[i,0], f_c[i,1], f_c[i,2], f_c[i,3])) 172 | else: 173 | for i in range(0, N_f): 174 | file.write('3 %d %d %d\n'%\ 175 | (f[i,0], f[i,1], f[i,2])) 176 | 177 | # Edge 178 | if not N_e == 0: 179 | for i in range(0, N_e): 180 | file.write('%d %d\n'%(e[i,0], e[i,1])) 181 | 182 | def save_result_examples(save_dir, model_name, result_name, 183 | points, normals=None, patch_color=None, 184 | texture=None, coarse_pts=None, 185 | gt=None, epoch=None): 186 | # works on single pcl, i.e. [#num_pts, 3], no batch dimension 187 | from os.path import join 188 | import numpy as np 189 | 190 | if epoch is None: 191 | normal_fn = '{}_{}_pred.ply'.format(model_name, result_name) 192 | else: 193 | normal_fn = '{}_epoch{}_{}_pred.ply'.format(model_name, str(epoch).zfill(4), result_name) 194 | normal_fn = join(save_dir, normal_fn) 195 | points = tensor2numpy(points) 196 | 197 | if normals is not None: 198 | normals = tensor2numpy(normals) 199 | color_normal = vertex_normal_2_vertex_color(normals) 200 | customized_export_ply(normal_fn, v=points, v_n=normals, v_c=color_normal) 201 | 202 | if patch_color is not None: 203 | patch_color = tensor2numpy(patch_color) 204 | if patch_color.max() < 1.1: 205 | patch_color = (patch_color*255.).astype(np.ubyte) 206 | pcolor_fn = normal_fn.replace('pred.ply', 'pred_patchcolor.ply') 207 | customized_export_ply(pcolor_fn, v=points, v_c=patch_color) 208 | 209 | if texture is not None: 210 | texture = tensor2numpy(texture) 211 | if texture.max() < 1.1: 212 | texture = (texture*255.).astype(np.ubyte) 213 | texture_fn = normal_fn.replace('pred.ply', 'pred_texture.ply') 214 | customized_export_ply(texture_fn, v=points, v_c=texture) 215 | 216 | if coarse_pts is not None: 217 | coarse_pts = tensor2numpy(coarse_pts) 218 | coarse_fn = normal_fn.replace('pred.ply', 'interm.ply') 219 | customized_export_ply(coarse_fn, v=coarse_pts) 220 | 221 | if gt is not None: 222 | gt = tensor2numpy(gt) 223 | gt_fn = normal_fn.replace('pred.ply', 'gt.ply') 224 | customized_export_ply(gt_fn, v=gt) 225 | 226 | 227 | def adjust_loss_weights(init_weight, current_epoch, mode='decay', start=400, every=20): 228 | # decay or rise the loss weights according to the given policy and current epoch 229 | # mode: decay, rise or binary 230 | 231 | if mode != 'binary': 232 | if current_epoch < start: 233 | if mode == 'rise': 234 | weight = init_weight * 1e-6 # use a very small weight for the normal loss in the beginning until the chamfer dist stabalizes 235 | else: 236 | weight = init_weight 237 | else: 238 | if every == 0: 239 | weight = init_weight # don't rise, keep const 240 | else: 241 | if mode == 'rise': 242 | weight = init_weight * (1.05 ** ((current_epoch - start) // every)) 243 | else: 244 | weight = init_weight * (0.85 ** ((current_epoch - start) // every)) 245 | 246 | return weight -------------------------------------------------------------------------------- /step2_implicit_template/lib/model/deformer_diffused_skinning.py: -------------------------------------------------------------------------------- 1 | import trimesh 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | import numpy as np 6 | from .broyden import broyden 7 | 8 | class ForwardDeformerDiffusedSkinning(torch.nn.Module): 9 | """ 10 | Tensor shape abbreviation: 11 | B: batch size 12 | N: number of points 13 | J: number of bones 14 | I: number of init 15 | D: space dimension 16 | """ 17 | 18 | def __init__(self, subject, cpose_smpl_mesh_path, cpose_weight_grid_path, device): 19 | super().__init__() 20 | 21 | self.init_bones = [0, 1, 2, 4, 5, 16, 17, 18, 19] 22 | 23 | ### NOTE query grid data 24 | self.subject = subject 25 | self.bbox_grid_extend = None 26 | self.bbox_grid_center = None 27 | self.weight_grid = None 28 | 29 | if self.bbox_grid_extend is None or self.bbox_grid_center is None or self.weight_grid is None: 30 | cpose_smpl_mesh = trimesh.load(cpose_smpl_mesh_path, process=False) 31 | cpose_verts = torch.from_numpy(np.array(cpose_smpl_mesh.vertices)).float().to(device)[:, :3] 32 | bbox_data_min = cpose_verts.min(0).values 33 | bbox_data_max = cpose_verts.max(0).values 34 | bbox_data_extend = (bbox_data_max - bbox_data_min).max() 35 | bbox_grid_extend = bbox_data_extend * 1.1 36 | center = (bbox_data_min + bbox_data_max) / 2 37 | 38 | grid_pt = torch.from_numpy(np.load(cpose_weight_grid_path)).float().to(device) 39 | 40 | self.bbox_grid_extend = bbox_grid_extend 41 | self.bbox_grid_center = center 42 | self.weight_grid = grid_pt 43 | 44 | 45 | def forward(self, xd, cond, tfs, eval_mode=False): 46 | """Given deformed point return its caonical correspondence 47 | 48 | Args: 49 | xd (tensor): deformed points in batch. shape: [B, N, D] 50 | cond (dict): conditional input. 51 | tfs (tensor): bone transformation matrices. shape: [B, J, D+1, D+1] 52 | 53 | Returns: 54 | xc (tensor): canonical correspondences. shape: [B, N, I, D] 55 | others (dict): other useful outputs. 56 | """ 57 | xc_init = self.init(xd, tfs) 58 | 59 | xc_opt, others = self.search(xd, xc_init, cond, tfs, eval_mode=eval_mode) 60 | 61 | if eval_mode: 62 | return xc_opt, others 63 | 64 | # compute correction term for implicit differentiation during training 65 | 66 | # do not back-prop through broyden 67 | xc_opt = xc_opt.detach() 68 | 69 | # reshape to [B,?,D] for network query 70 | n_batch, n_point, n_init, n_dim = xc_init.shape 71 | xc_opt = xc_opt.reshape((n_batch, n_point * n_init, n_dim)) 72 | 73 | xd_opt = self.forward_skinning(xc_opt, cond, tfs) 74 | 75 | grad_inv = self.gradient(xc_opt, cond, tfs).inverse() 76 | 77 | correction = xd_opt - xd_opt.detach() 78 | correction = torch.einsum("bnij,bnj->bni", -grad_inv.detach(), correction) 79 | 80 | # trick for implicit diff with autodiff: 81 | # xc = xc_opt + 0 and xc' = correction' 82 | xc = xc_opt + correction 83 | 84 | # reshape back to [B,N,I,D] 85 | xc = xc.reshape(xc_init.shape) 86 | 87 | return xc, others 88 | 89 | def init(self, xd, tfs): 90 | """Transform xd to canonical space for initialization 91 | 92 | Args: 93 | xd (tensor): deformed points in batch. shape: [B, N, D] 94 | tfs (tensor): bone transformation matrices. shape: [B, J, D+1, D+1] 95 | 96 | Returns: 97 | xc_init (tensor): gradients. shape: [B, N, I, D] 98 | """ 99 | n_batch, n_point, _ = xd.shape 100 | _, n_joint, _, _ = tfs.shape 101 | 102 | xc_init = [] 103 | for i in self.init_bones: 104 | w = torch.zeros((n_batch, n_point, n_joint), device=xd.device) 105 | w[:, :, i] = 1 106 | xc_init.append(skinning(xd, w, tfs, inverse=True)) 107 | 108 | xc_init = torch.stack(xc_init, dim=2) 109 | 110 | return xc_init 111 | 112 | def search(self, xd, xc_init, cond, tfs, eval_mode=False): 113 | """Search correspondences. 114 | 115 | Args: 116 | xd (tensor): deformed points in batch. shape: [B, N, D] 117 | xc_init (tensor): deformed points in batch. shape: [B, N, I, D] 118 | cond (dict): conditional input. 119 | tfs (tensor): bone transformation matrices. shape: [B, J, D+1, D+1] 120 | 121 | Returns: 122 | xc_opt (tensor): canonoical correspondences of xd. shape: [B, N, I, D] 123 | valid_ids (tensor): identifiers of converged points. [B, N, I] 124 | """ 125 | # reshape to [B,?,D] for other functions 126 | n_batch, n_point, n_init, n_dim = xc_init.shape 127 | xc_init = xc_init.reshape(n_batch, n_point * n_init, n_dim) 128 | xd_tgt = xd.repeat_interleave(n_init, dim=1) 129 | 130 | # compute init jacobians 131 | if not eval_mode: 132 | J_inv_init = self.gradient(xc_init, cond, tfs).inverse() 133 | else: 134 | w = self.query_weights(xc_init, cond, mask=None) 135 | # ic(einsum("bpn,bnij->bpij", w, tfs)[:, :, :3, :3][0, 0]) 136 | # ic(w.shape, tfs.shape) 137 | J_inv_init = torch.einsum("bpn,bnij->bpij", w, tfs)[:, :, :3, :3].inverse() 138 | # J_inv_init = torch.pinverse(einsum("bpn,bnij->bpij", w, tfs)[:, :, :3, :3]) 139 | 140 | # reshape init to [?,D,...] for boryden 141 | xc_init = xc_init.reshape(-1, n_dim, 1) 142 | J_inv_init = J_inv_init.flatten(0, 1) 143 | 144 | # construct function for root finding 145 | def _func(xc_opt, mask=None): 146 | # reshape to [B,?,D] for other functions 147 | xc_opt = xc_opt.reshape(n_batch, n_point * n_init, n_dim) 148 | xd_opt = self.forward_skinning(xc_opt, cond, tfs, mask=mask) 149 | error = xd_opt - xd_tgt 150 | # reshape to [?,D,1] for boryden 151 | error = error.flatten(0, 1)[mask].unsqueeze(-1) 152 | return error 153 | 154 | # run broyden without grad 155 | with torch.no_grad(): 156 | result = broyden(_func, xc_init, J_inv_init) 157 | 158 | # reshape back to [B,N,I,D] 159 | xc_opt = result["result"].reshape(n_batch, n_point, n_init, n_dim) 160 | result["valid_ids"] = result["valid_ids"].reshape(n_batch, n_point, n_init) 161 | 162 | return xc_opt, result 163 | 164 | def forward_skinning(self, xc, cond, tfs, mask=None): 165 | """Canonical point -> deformed point 166 | 167 | Args: 168 | xc (tensor): canonoical points in batch. shape: [B, N, D] 169 | cond (dict): conditional input. 170 | tfs (tensor): bone transformation matrices. shape: [B, J, D+1, D+1] 171 | 172 | Returns: 173 | xd (tensor): deformed point. shape: [B, N, D] 174 | """ 175 | w = self.query_weights(xc, cond, mask=mask) 176 | xd = skinning(xc, w, tfs, inverse=False) 177 | return xd 178 | 179 | def query_weights(self, xc, cond, mask=None): 180 | """Get skinning weights in canonical space 181 | 182 | Args: 183 | xc (tensor): canonical points. shape: [B, N, D] 184 | cond (dict): conditional input. 185 | mask (tensor, optional): valid indices. shape: [B, N] 186 | 187 | Returns: 188 | w (tensor): skinning weights. shape: [B, N, J] 189 | """ 190 | 191 | def get_w(p_xc, p_mask, p_grid): 192 | n_batch, n_point, n_dim = p_xc.shape 193 | 194 | if n_batch * n_point == 0: 195 | return p_xc 196 | 197 | # reshape to [N,?] 198 | p_xc = p_xc.reshape(n_batch * n_point, n_dim) 199 | if p_mask is not None: 200 | p_xc = p_xc[p_mask] # (n_b*n_p, n_dim) 201 | 202 | x = F.grid_sample(p_grid[None], 203 | p_xc[None, None, None], 204 | align_corners=False, 205 | padding_mode='border')[0, :, 0, 0].T # [Nv, 24] 206 | 207 | # add placeholder for masked prediction 208 | if p_mask is not None: 209 | x_full = torch.zeros(n_batch * n_point, x.shape[-1], device=x.device) 210 | x_full[p_mask] = x 211 | else: 212 | x_full = x 213 | 214 | return x_full.reshape(n_batch, n_point, -1) 215 | 216 | def inv_transform_v(v, scale_grid, transl): 217 | """ 218 | v: [b, n, 3] 219 | """ 220 | v = v - transl[None, None] 221 | v = v / scale_grid 222 | v = v * 2 223 | 224 | return v 225 | 226 | 227 | 228 | v_cano_in_grid_coords = inv_transform_v(xc, self.bbox_grid_extend, self.bbox_grid_center) 229 | 230 | out = get_w(v_cano_in_grid_coords, mask, self.weight_grid) 231 | # out = F.grid_sample(grid_pt[None], v_cano_in_grid_coords[None, None], align_corners=False, padding_mode='border')[0, :, 0, 0].T # [Nv, 24] 232 | w = out 233 | 234 | # ic(xc.shape, w.shape) 235 | # ic(w.sum(-1).max(), w.sum(-1).min()) 236 | return w 237 | 238 | def gradient(self, xc, cond, tfs): 239 | """Get gradients df/dx 240 | 241 | Args: 242 | xc (tensor): canonical points. shape: [B, N, D] 243 | cond (dict): conditional input. 244 | tfs (tensor): bone transformation matrices. shape: [B, J, D+1, D+1] 245 | 246 | Returns: 247 | grad (tensor): gradients. shape: [B, N, D, D] 248 | """ 249 | xc.requires_grad_(True) 250 | 251 | xd = self.forward_skinning(xc, cond, tfs) 252 | 253 | grads = [] 254 | for i in range(xd.shape[-1]): 255 | d_out = torch.zeros_like(xd, requires_grad=False, device=xd.device) 256 | d_out[:, :, i] = 1 257 | grad = torch.autograd.grad( 258 | outputs=xd, 259 | inputs=xc, 260 | grad_outputs=d_out, 261 | create_graph=False, 262 | retain_graph=True, 263 | only_inputs=True, 264 | )[0] 265 | grads.append(grad) 266 | 267 | return torch.stack(grads, dim=-2) 268 | 269 | 270 | def skinning(x, w, tfs, inverse=False): 271 | """Linear blend skinning 272 | 273 | Args: 274 | x (tensor): canonical points. shape: [B, N, D] 275 | w (tensor): conditional input. [B, N, J] 276 | tfs (tensor): bone transformation matrices. shape: [B, J, D+1, D+1] 277 | Returns: 278 | x (tensor): skinned points. shape: [B, N, D] 279 | """ 280 | x_h = F.pad(x, (0, 1), value=1.0) 281 | 282 | if inverse: 283 | # p:n_point, n:n_bone, i,k: n_dim+1 284 | w_tf = torch.einsum("bpn,bnij->bpij", w, tfs) 285 | x_h = torch.einsum("bpij,bpj->bpi", w_tf.inverse(), x_h) 286 | else: 287 | x_h = torch.einsum("bpn,bnij,bpj->bpi", w, tfs, x_h) 288 | 289 | return x_h[:, :, :3] 290 | -------------------------------------------------------------------------------- /step3_point_avatar/render_posmaps.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import math 3 | import os 4 | from os.path import join, basename, splitext 5 | import torch 6 | import torch.nn 7 | import numpy as np 8 | import trimesh 9 | from tqdm import tqdm 10 | import glob 11 | import yaml 12 | 13 | import smplx 14 | 15 | import math 16 | import glm 17 | 18 | import numpy as np 19 | from OpenGL.GL import * 20 | import glfw 21 | 22 | from smplx.lbs import vertices2joints 23 | from .lib.lbs import lbs, inv_lbs 24 | from .lib.shader_utils import createProgram, loadShader 25 | 26 | ### NOTE for debugging, get the posed point cloud 27 | def posmap2pcd(img): 28 | color = img[..., :3] 29 | mask = img[..., 3:4].astype(bool) 30 | 31 | mask = np.concatenate([mask, mask, mask], -1) 32 | 33 | pcd = color[mask].reshape(-1, 3) 34 | return pcd 35 | 36 | def reindex_verts(p_verts, p_faces): 37 | reindexed_verts = [] 38 | for fid in range(len(p_faces)): 39 | vid = p_faces[fid] 40 | reindexed_verts.append(p_verts[vid].reshape(-1)) 41 | return np.concatenate(reindexed_verts, 0) 42 | 43 | def shift(p_verts, y_shift): 44 | p_verts = p_verts.reshape(-1, 3) 45 | p_verts = p_verts + np.array([[0, y_shift, 0]], dtype=np.float32) 46 | # p_verts[:, 2] *= 0.1 47 | p_verts = p_verts.reshape(-1) 48 | return p_verts 49 | 50 | @torch.no_grad() 51 | def load_data(fn, cano_data, parents, unposed_joints, cano_pose_param, remove_root_pose, device): 52 | smpl_params = np.load(fn) 53 | transl = smpl_params['transl'] 54 | pose = smpl_params['pose'] 55 | 56 | verts = cano_data['verts_mesh'] 57 | weights = cano_data['weights_mesh'] 58 | 59 | verts = torch.from_numpy(verts).float()[None].to(device) 60 | weights = torch.from_numpy(weights).float()[None].to(device) 61 | pose = torch.from_numpy(pose).float()[None].to(device) 62 | 63 | ### NOTE remove root pose 64 | if remove_root_pose: 65 | pose[:, :3] = 0 66 | 67 | out_unposed = inv_lbs(verts, unposed_joints, cano_pose_param, parents, weights) 68 | 69 | # NOTE use pose correctives or not (no use anyway) 70 | out = lbs(out_unposed['v_unposed'], unposed_joints, pose, parents, lbs_weights=weights) 71 | 72 | mesh = trimesh.Trimesh(out['v_posed'][0].cpu().numpy(), cano_data['faces_mesh'], process=False) 73 | mesh_cano = trimesh.Trimesh(cano_data['verts_mesh'], cano_data['faces_mesh'], process=False) 74 | 75 | # NOTE now preprocess verts data 76 | vertices_cano = reindex_verts(cano_data['verts_mesh'], cano_data['faces_mesh']).astype(np.float32) 77 | vertices_posed = reindex_verts(mesh.vertices, mesh.faces).astype(np.float32) 78 | feats = vertices_posed.copy() 79 | 80 | return vertices_cano, feats, smpl_params, mesh 81 | 82 | 83 | def get_proj_mat(y_rotate_deg, x_rotate_deg, y_shift, x_stretch=None): 84 | """ 85 | y_rotate_deg: rotation angle in degrees 86 | """ 87 | model_y_rot = glm.rotate(y_rotate_deg / 180 * math.pi, [0.0, 1.0, 0.0]) 88 | model_x_rot = glm.rotate(x_rotate_deg / 180 * math.pi, [1.0, 0.0, 0.0]) 89 | ortho = glm.ortho(-1.0, 1.0, -1.0-y_shift, 1.0-y_shift) 90 | if x_stretch is not None: 91 | stretch_mat = glm.mat4(1) 92 | stretch_mat[0,0] = x_stretch 93 | return stretch_mat * ortho * model_x_rot * model_y_rot 94 | 95 | return ortho * model_x_rot * model_y_rot 96 | 97 | 98 | def render_one(verts, feats, frame_buffer): 99 | glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer) 100 | 101 | v = np.array(verts, dtype = np.float32) 102 | c = np.array(feats, dtype = np.float32) 103 | 104 | SIZE_OF_FLOAT = 4 105 | 106 | glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * SIZE_OF_FLOAT, v) 107 | glEnableVertexAttribArray(0) 108 | glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 3 * SIZE_OF_FLOAT, c) 109 | glEnableVertexAttribArray(1) 110 | 111 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) 112 | glDrawArrays(GL_TRIANGLES, 0, len(v) // 3) 113 | glfw.swap_buffers(window) 114 | 115 | glReadBuffer(GL_COLOR_ATTACHMENT0) 116 | data = glReadPixels(0, 0, width, height, GL_RGBA, GL_FLOAT, outputType=None) 117 | rgb = data.reshape(height, width, -1) 118 | rgb = np.flip(rgb, 0) 119 | 120 | glBindFramebuffer(GL_FRAMEBUFFER, 0) 121 | return rgb 122 | 123 | import argparse 124 | parser = argparse.ArgumentParser() 125 | parser.add_argument('subject', type=str) 126 | parser.add_argument('split', type=str) 127 | args = parser.parse_args() 128 | 129 | if __name__ == '__main__': 130 | 131 | 132 | opt = {} 133 | with open(join('configs', 'common.yaml'), 'r') as common_opt_f: 134 | common_opt = yaml.safe_load(common_opt_f) 135 | opt.update(common_opt) 136 | with open(join('configs', f'step3.yaml'), 'r') as step_opt_f: 137 | step_opt = yaml.safe_load(step_opt_f) 138 | opt.update(step_opt) 139 | with open(join('configs', f'{opt["expname"]}_subject_list.yaml'), 'r') as subject_list_f: 140 | subject_list = yaml.safe_load(subject_list_f) 141 | opt['subject_list'] = subject_list 142 | with open(join('configs', f'projection_list.yaml'), 'r') as projection_list_f: 143 | projection_list = yaml.safe_load(projection_list_f) 144 | opt['projection_list'] = projection_list 145 | 146 | width = opt['posmap_size'] 147 | height = opt['posmap_size'] 148 | 149 | # NOTE init window and context 150 | glfw.init() 151 | window = glfw.create_window(width, height, "LBS and render", None, None) 152 | glfw.set_window_pos(window, 600, 300) 153 | glfw.make_context_current(window) 154 | 155 | # NOTE generate and bind alternative buffers 156 | frame_buffer = glGenFramebuffers(1) 157 | glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer) 158 | 159 | color_buffer = glGenTextures(1) 160 | glBindTexture(GL_TEXTURE_2D, color_buffer) 161 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) 162 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) 163 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) 164 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) 165 | glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, None) 166 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, color_buffer, 0) 167 | 168 | # Configure depth texture map to render to 169 | depth_buffer = glGenTextures(1) 170 | glBindTexture(GL_TEXTURE_2D, depth_buffer) 171 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) 172 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) 173 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) 174 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) 175 | glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY) 176 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE) 177 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL) 178 | glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None) 179 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_buffer, 0) 180 | 181 | attachments = [] 182 | attachments.append(GL_COLOR_ATTACHMENT0) 183 | glDrawBuffers(1, attachments) 184 | # NOTE bind it here so you can read it later 185 | glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer) 186 | 187 | glClearColor(0.0, 0.0, 0.0, 0.0) 188 | glClearDepth(1.0) 189 | 190 | glEnable(GL_DEPTH_TEST) 191 | glDepthFunc(GL_LESS) 192 | glDepthRange(-1.0, 1.0) 193 | glDisable(GL_CULL_FACE) 194 | 195 | glClampColor(GL_CLAMP_READ_COLOR, GL_FALSE) 196 | glClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE) 197 | glClampColor(GL_CLAMP_VERTEX_COLOR, GL_FALSE) 198 | 199 | print(glCheckFramebufferStatus(GL_FRAMEBUFFER)) 200 | 201 | # NOTE load shaders and set shaders (but not used yet) 202 | vertex_shader = loadShader(GL_VERTEX_SHADER, 'step3_point_avatar/posmap_shaders/v.glsl') 203 | fragment_shader = loadShader(GL_FRAGMENT_SHADER, 'step3_point_avatar/posmap_shaders/f.glsl') 204 | shader_program = createProgram([vertex_shader, fragment_shader]) 205 | 206 | glUseProgram(shader_program) 207 | 208 | proj_list = opt['projection_list'] 209 | 210 | ### NOTE loading shared cano data 211 | SUBJECT = args.subject 212 | SPLIT = args.split 213 | 214 | FN_CANO_DATA = join(opt['data_templates_path'], f'{SUBJECT}_clothed_template.npz') # should have keys 'faces', 'verts', 'weights' 215 | # FN_ZERO_POSE_MINIMAL = join(opt['data_templates_path'], SUBJECT, f'{SUBJECT}_minimal_cpose.ply') 216 | 217 | FN_GENDER_LIST = join(opt['data_templates_path'], 'gender_list.yaml') 218 | with open(FN_GENDER_LIST, 'r') as f_gender_list: 219 | gender_list = yaml.safe_load(f_gender_list) 220 | GENDER = gender_list[SUBJECT] 221 | 222 | n_joints = opt['num_joints'] 223 | 224 | smpl_model = smplx.create(opt['smpl_model_path'], model_type='smpl', gender=GENDER) 225 | smpl_parents = smpl_model.parents.clone().to(opt['device']) 226 | 227 | cano_data = np.load(FN_CANO_DATA) 228 | cano_pose_param = torch.zeros(1, 72).to(opt['device']) 229 | cano_pose_param[:, 5] = opt['leg_angle'] / 180*math.pi 230 | cano_pose_param[:, 8] = -opt['leg_angle'] / 180*math.pi 231 | 232 | tpose_verts = torch.from_numpy(cano_data['verts_tpose']).float()[None] 233 | tpose_joints = vertices2joints(smpl_model.J_regressor, tpose_verts).to(opt['device']) 234 | 235 | if not os.path.exists(join(opt['data_posmaps_path'], SUBJECT, SPLIT)): 236 | os.makedirs(join(opt['data_posmaps_path'], SUBJECT, SPLIT), exist_ok=True) 237 | 238 | FN_FOLDER = join('data_scans', SUBJECT, SPLIT) # packed snarf data 239 | fn_list = sorted(list(glob.glob(join(FN_FOLDER, '*.npz')))) 240 | fn_list = tqdm(fn_list) 241 | for fn in fn_list: 242 | ### NOTE load separate poses 243 | fn_list.set_description(fn) 244 | vertices_cano, feats, smpl_params, mesh = load_data(fn, cano_data, 245 | smpl_parents, 246 | tpose_joints, 247 | cano_pose_param, 248 | device=opt['device'], 249 | remove_root_pose=True) 250 | rgb_dict_this_pose = {} 251 | for proj_id in range(len(proj_list)): 252 | proj_direction = proj_list[proj_id]['dirc'] 253 | # NOTE now starts passing data and render 254 | proj_params = proj_list[proj_id] 255 | 256 | proj_mat = get_proj_mat(proj_params['y_rot'], proj_params['x_rot'], proj_params['y_shift'], proj_params['x_stretch']) 257 | glUniformMatrix4fv(glGetUniformLocation(shader_program, 'projection'), 1, GL_FALSE, proj_mat.to_bytes()) 258 | 259 | rgb = render_one(vertices_cano, feats, frame_buffer) 260 | pcd = posmap2pcd(rgb) 261 | rgb_dict_this_pose[proj_direction] = rgb.copy() 262 | 263 | npz_savename = join(opt['data_posmaps_path'], SUBJECT, SPLIT, splitext(basename(fn))[0] + f'_posmap') 264 | np.savez(npz_savename, **rgb_dict_this_pose) 265 | 266 | -------------------------------------------------------------------------------- /step3_point_avatar/lib/dataset.py: -------------------------------------------------------------------------------- 1 | from os.path import join, basename 2 | import glob 3 | from tqdm import tqdm 4 | import math 5 | import numpy as np 6 | import smplx 7 | import torch 8 | from torch.utils.data import Dataset 9 | import glm 10 | 11 | from smplx.lbs import vertices2joints 12 | 13 | class FITEPosmapDataset(Dataset): 14 | @torch.no_grad() 15 | def __init__(self, 16 | subject_list, # dict (key: subject name; val: subject gender and maybe other attributes) 17 | projection_list, 18 | split, 19 | root_rendered, 20 | root_packed, 21 | data_spacing, 22 | remove_hand_foot_pose, 23 | data_shift=0, 24 | selected_subjects=None, 25 | ): 26 | super().__init__() 27 | 28 | self.subject_list = subject_list 29 | self.projection_list = projection_list 30 | self.split = split 31 | self.data_root_rendered = root_rendered 32 | self.data_root_packed = root_packed 33 | self.data_spacing = data_spacing 34 | self.data_shift = data_shift 35 | 36 | # dict with subject names as keys 37 | print(f'[LOG] Found data for these subjects:') 38 | self.data_len = 0 39 | 40 | self.basenames = [] 41 | self.subject_ids = [] 42 | for subject_id in range(len(self.subject_list)): 43 | subject_name = self.subject_list[subject_id]['name'] 44 | if selected_subjects is not None and subject_name not in selected_subjects: 45 | continue 46 | # ic(join(self.data_root_rendered, subject_name, split, '*_posmap.npz')) 47 | subject_basename_list = sorted(list(glob.glob(join(self.data_root_rendered, subject_name, split, '*_posmap.npz'))))[self.data_shift:][::self.data_spacing] 48 | # ic(subject_basename_list) 49 | self.data_len += len(subject_basename_list) 50 | 51 | self.basenames = self.basenames + [basename(name)[:-11] for name in subject_basename_list] 52 | self.subject_ids = self.subject_ids + [subject_id] * len(subject_basename_list) 53 | 54 | print(f'[LOG] {subject_name.ljust(25)}: {len(subject_basename_list)}') 55 | 56 | assert self.data_len == len(self.basenames) 57 | assert self.data_len == len(self.subject_ids) 58 | print(f'[LOG] Found {self.data_len} data in total. Loading...') 59 | 60 | ### NOTE load posmaps 61 | print(f'[LOG] Loading posmaps...') 62 | self.posmaps = {} 63 | for proj_id in range(len(self.projection_list)): 64 | proj_direction = self.projection_list[proj_id]['dirc'] 65 | self.posmaps[proj_direction] = [] 66 | for i in tqdm(range(len(self.basenames))): 67 | fn_base = self.basenames[i] 68 | subject_id = self.subject_ids[i] 69 | subject_name = self.subject_list[subject_id]['name'] 70 | posmap_file = np.load(join(self.data_root_rendered, subject_name, split, fn_base + '_posmap.npz')) 71 | for proj_id in range(len(self.projection_list)): 72 | proj_direction = self.projection_list[proj_id]['dirc'] 73 | posmap = torch.from_numpy(posmap_file[proj_direction]).float()[..., :3].permute(2,0,1) 74 | self.posmaps[proj_direction].append(posmap) 75 | 76 | ### NOTE load packed points 77 | print(f'[LOG] Loading packed point clouds and smpl data...') 78 | self.point_list = [] 79 | self.normal_list = [] 80 | self.pose_list = [] 81 | self.transl_list = [] 82 | for i in tqdm(range(len(self.basenames))): 83 | fn_base = self.basenames[i] 84 | subject_id = self.subject_ids[i] 85 | subject_name = self.subject_list[subject_id]['name'] 86 | packed_pcd_file = np.load(join(self.data_root_packed, subject_name, split, fn_base + '.npz')) 87 | if self.split == 'train': 88 | self.point_list.append(torch.from_numpy(packed_pcd_file['scan_pc']).float()) 89 | self.normal_list.append(torch.from_numpy(packed_pcd_file['scan_n']).float()) 90 | self.pose_list.append(torch.from_numpy(packed_pcd_file['pose']).float()) 91 | self.transl_list.append(torch.from_numpy(packed_pcd_file['transl']).float()) 92 | 93 | if remove_hand_foot_pose: 94 | self.pose_list[-1][30:36] = 0 # feet 95 | self.pose_list[-1][66:72] = 0 # fingers 96 | # self.pose_list[-1][21:27] = 0 # ankles 97 | # self.pose_list[-1][60:66] = 0 # wrists 98 | 99 | def __getitem__(self, index): 100 | if self.split == 'train': 101 | ret = { 102 | 'points': self.point_list[index] - self.transl_list[index][None], 103 | 'normals': self.normal_list[index], 104 | 'pose': self.pose_list[index], 105 | 'transl': self.transl_list[index], # note that this is already removed from points 106 | } 107 | else: 108 | ret = { 109 | 'pose': self.pose_list[index], 110 | 'transl': self.transl_list[index], 111 | } 112 | for proj_id in range(len(self.projection_list)): 113 | proj_direction = self.projection_list[proj_id]['dirc'] 114 | ret[f'posmap_{proj_direction}'] = self.posmaps[proj_direction][index] 115 | ret['basename'] = self.basenames[index] 116 | ret['subject_id'] = self.subject_ids[index] 117 | ret['subject_name'] = self.subject_list[self.subject_ids[index]]['name'] 118 | 119 | return ret 120 | 121 | def __len__(self): 122 | return self.data_len 123 | 124 | 125 | 126 | class FITECanoDataRepository: 127 | def __init__(self, 128 | subject_list, 129 | projection_list, 130 | root_cano_data, 131 | channels_geom_feat, 132 | n_points_cano_data, 133 | cano_pose_leg_angle, 134 | smpl_model_path): 135 | 136 | self.subject_list = subject_list 137 | self.projection_list = projection_list 138 | self.root_cano_data = root_cano_data 139 | self.channels_geom_feat = channels_geom_feat 140 | self.cano_pose_leg_angle = cano_pose_leg_angle 141 | 142 | n_subjects = len(self.subject_list) 143 | self.geom_feats = torch.ones(n_subjects, channels_geom_feat, n_points_cano_data).normal_(mean=0., std=0.01) 144 | 145 | self.cano_data_list = { 146 | 'verts_mesh': [], 147 | 'faces_mesh': [], 148 | 'weights_mesh': [], 149 | 'verts_downsampled': [], 150 | 'normals_downsampled': [], 151 | 'weights_downsampled': [], 152 | 'verts_tpose': [], 153 | } 154 | 155 | ### NOTE load cano data 156 | print(f'[LOG] Loading canonical data...') 157 | for subject_id in range(len(self.subject_list)): 158 | subject_name = self.subject_list[subject_id]['name'] 159 | cano_data = np.load(join(root_cano_data, f'{subject_name}_clothed_template.npz')) 160 | for key in cano_data.files: 161 | if 'verts' in key or 'normals' in key or 'weights' in key: 162 | data_tmp = torch.from_numpy(cano_data[key]).float() 163 | else: 164 | data_tmp = torch.from_numpy(cano_data[key]).int() 165 | self.cano_data_list[key].append(data_tmp) 166 | 167 | for key in self.cano_data_list.keys(): 168 | if 'downsampled' in key or 'tpose' in key: 169 | setattr(self, key, torch.stack(self.cano_data_list[key], dim=0)) 170 | 171 | ### NOTE compute unposed joints 172 | self.smpl_model_male = smplx.create(model_path=smpl_model_path, model_type='smpl', gender='male') 173 | self.smpl_model_female = smplx.create(model_path=smpl_model_path, model_type='smpl', gender='female') 174 | self.smpl_model_neutral = smplx.create(model_path=smpl_model_path, model_type='smpl', gender='neutral') 175 | 176 | joints_tpose = [] 177 | for subject_id in range(len(self.subject_list)): 178 | subject_name = self.subject_list[subject_id]['name'] 179 | subject_gender = self.subject_list[subject_id]['gender'] 180 | if subject_gender == 'male': 181 | joints_tpose.append(vertices2joints(self.smpl_model_male.J_regressor, self.verts_tpose[[subject_id]])) 182 | elif subject_gender == 'female': 183 | joints_tpose.append(vertices2joints(self.smpl_model_female.J_regressor, self.verts_tpose[[subject_id]])) 184 | elif subject_gender == 'neutral': 185 | joints_tpose.append(vertices2joints(self.smpl_model_neutral.J_regressor, self.verts_tpose[[subject_id]])) 186 | else: 187 | print(f'[ERROR] Unknown gender type: {subject_gender}') 188 | joints_tpose = torch.cat(joints_tpose, 0) 189 | setattr(self, 'joints_tpose', joints_tpose) 190 | 191 | ### NOTE precompute projected points 192 | self.projected_points = {} 193 | tensor_ones_tmp = torch.ones(self.verts_downsampled.shape[0], self.verts_downsampled.shape[1], 1) 194 | for proj_id in range(len(self.projection_list)): 195 | proj_config = self.projection_list[proj_id] 196 | proj_mat_glm = self.get_proj_mat(proj_config['y_rot'], proj_config['x_rot'], proj_config['y_shift'], proj_config['x_stretch']) 197 | proj_mat = torch.from_numpy(np.array(proj_mat_glm.to_list()).astype(np.float32)) # [4, 4], for this view only 198 | ### NOTE/IMPORTANT projection matric from glm needs to be transposed 199 | proj_mat = proj_mat.t() 200 | 201 | points_homo = torch.cat([self.verts_downsampled, tensor_ones_tmp], dim=-1) 202 | self.projected_points[proj_config['dirc']] = torch.einsum('rc,bnc->bnr', proj_mat, points_homo)[..., :2] # take the projected coords only 203 | ### NOTE/IMPORTANT projected y coords need to be invertible for pytorch interpolation convention 204 | self.projected_points[proj_config['dirc']][..., 1] *= -1 205 | 206 | ### NOTE cano pose param 207 | self.cano_pose_param = torch.zeros(1, 72) 208 | self.cano_pose_param[:, 5] = self.cano_pose_leg_angle/180*math.pi 209 | self.cano_pose_param[:, 8] = -self.cano_pose_leg_angle/180*math.pi 210 | self.cano_pose_param = self.cano_pose_param.expand(len(self.subject_list), -1) 211 | 212 | self.smpl_parents = self.smpl_model_male.parents.clone() 213 | 214 | def get_proj_mat(self, y_rotate_deg, x_rotate_deg, y_shift, x_stretch=None): 215 | """ 216 | y_rotate_deg: rotation angle in degrees 217 | """ 218 | model_y_rot = glm.rotate(y_rotate_deg / 180 * math.pi, [0.0, 1.0, 0.0]) 219 | model_x_rot = glm.rotate(x_rotate_deg / 180 * math.pi, [1.0, 0.0, 0.0]) 220 | 221 | ortho = glm.ortho(-1.0, 1.0, -1.0-y_shift, 1.0-y_shift) 222 | 223 | if x_stretch is not None: 224 | stretch_mat = glm.mat4(1) 225 | stretch_mat[0,0] = x_stretch 226 | return stretch_mat * ortho * model_x_rot * model_y_rot 227 | 228 | return ortho * model_x_rot * model_y_rot 229 | 230 | def set_device(self, device): 231 | for key in self.__dict__.keys(): 232 | if isinstance(getattr(self, key), torch.Tensor): 233 | setattr(self, key, getattr(self, key).to(device)) 234 | if isinstance(getattr(self, key), dict): 235 | for key_of_attr in getattr(self, key).keys(): 236 | if isinstance(getattr(self, key)[key_of_attr], torch.Tensor): 237 | getattr(self, key)[key_of_attr] = getattr(self, key)[key_of_attr].to(device) 238 | return self 239 | -------------------------------------------------------------------------------- /step3_point_avatar/train_fite_point_avatar.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import tqdm 3 | import torch 4 | import torch.nn.functional as F 5 | from torch.utils.data import DataLoader 6 | 7 | import os 8 | from os.path import join 9 | 10 | from .lib.fite_model import FITEModel 11 | from .lib.dataset import FITEPosmapDataset, FITECanoDataRepository 12 | from .lib.losses import normal_loss, chamfer_loss_separate 13 | from .lib.utils import save_result_examples, adjust_loss_weights 14 | from .lib.lbs import lbs, inv_lbs 15 | 16 | if __name__ == '__main__': 17 | 18 | opt = {} 19 | with open(join('configs', 'common.yaml'), 'r') as common_opt_f: 20 | common_opt = yaml.safe_load(common_opt_f) 21 | opt.update(common_opt) 22 | with open(join('configs', f'step3.yaml'), 'r') as step_opt_f: 23 | step_opt = yaml.safe_load(step_opt_f) 24 | opt.update(step_opt) 25 | with open(join('configs', f'{opt["expname"]}_subject_list.yaml'), 'r') as subject_list_f: 26 | subject_list = yaml.safe_load(subject_list_f) 27 | opt['subject_list'] = subject_list 28 | with open(join('configs', f'projection_list.yaml'), 'r') as projection_list_f: 29 | projection_list = yaml.safe_load(projection_list_f) 30 | opt['projection_list'] = projection_list 31 | 32 | ### NOTE exp folders 33 | CHECKPOINTS_PATH = join(opt['result_folder'], opt['expname'], 'step3-checkpoints') 34 | TRAINPCD_PATH = join(opt['result_folder'], opt['expname'], 'step3-train-pcds') 35 | 36 | if not os.path.exists(CHECKPOINTS_PATH): 37 | os.makedirs(CHECKPOINTS_PATH) 38 | if not os.path.exists(TRAINPCD_PATH): 39 | os.makedirs(TRAINPCD_PATH) 40 | 41 | 42 | ### NOTE add subject ids 43 | for subject_id_single in range(len(opt['subject_list'])): 44 | opt['subject_list'][subject_id_single]['id'] = subject_id_single 45 | 46 | 47 | cano_data_repo = FITECanoDataRepository( 48 | subject_list=opt['subject_list'], 49 | projection_list=opt['projection_list'], 50 | root_cano_data=opt['data_templates_path'], 51 | channels_geom_feat=opt['c_geom'], 52 | n_points_cano_data=opt['n_cano_points'], 53 | cano_pose_leg_angle=opt['leg_angle'], 54 | smpl_model_path=opt['smpl_model_path'], 55 | ).set_device(opt['device']) 56 | 57 | train_dataset = FITEPosmapDataset( 58 | subject_list=opt['subject_list'], 59 | projection_list=opt['projection_list'], 60 | split='train', 61 | root_rendered=opt['data_posmaps_path'], 62 | root_packed=opt['data_scans_path'], 63 | remove_hand_foot_pose=False, 64 | data_spacing=opt['data_spacing'], 65 | selected_subjects=opt['selected_subjects'], 66 | ) 67 | 68 | train_loader = DataLoader(train_dataset, batch_size=opt['batch_size'], shuffle=True, drop_last=True, num_workers=opt['num_workers']) 69 | 70 | ### NOTE model 71 | model = FITEModel(projection_list=opt['projection_list'], 72 | input_nc=3, 73 | hsize=opt['hsize'], 74 | nf=opt['nf'], 75 | c_geom=opt['c_geom'], 76 | c_pose=opt['c_pose'], 77 | up_mode=opt['up_mode'], 78 | use_dropout=opt['use_dropout'] 79 | ).to(opt['device']) 80 | 81 | cano_data_repo.geom_feats.requires_grad_(True) 82 | optimizer = torch.optim.Adam([ 83 | {"params": model.parameters(), "lr": opt['lr']}, 84 | {"params": cano_data_repo.geom_feats, "lr": opt['lr_geomfeat']} 85 | ]) 86 | 87 | total_iters = 0 88 | for epoch in range(opt['epochs']): 89 | train_bar_per_epoch = tqdm.tqdm(enumerate(train_loader)) 90 | 91 | wdecay_rgl = adjust_loss_weights(opt['w_rgl'], epoch, mode='decay', start=opt['decay_start'], every=opt['decay_every']) 92 | if opt['train_normals_from_start']: 93 | wrise_normal = opt['w_normal'] 94 | else: 95 | wrise_normal = adjust_loss_weights(opt['w_normal'], epoch, mode='rise', start=opt['rise_start'], every=opt['rise_every']) 96 | loss_weights = torch.tensor([opt['w_s2m'], opt['w_m2s'], wrise_normal, wdecay_rgl, opt['w_latent_rgl']]) 97 | 98 | for i, batch in train_bar_per_epoch: 99 | points_gt = batch['points'].to(opt['device']) 100 | normals_gt = batch['normals'].to(opt['device']) 101 | pose = batch['pose'].to(opt['device']) 102 | transl = batch['transl'].to(opt['device']) # NOTE already removed from points, needed only for debug 103 | subject_id = batch['subject_id'].to(opt['device']) 104 | 105 | 106 | posmaps_batch = {} 107 | projected_pts_batch = {} 108 | for proj_id in range(len(model.projection_list)): 109 | proj_direction = model.projection_list[proj_id]['dirc'] 110 | posmaps_batch[proj_direction] = batch[f'posmap_{proj_direction}'].to(opt['device']) 111 | projected_pts_batch[proj_direction] = cano_data_repo.projected_points[proj_direction][subject_id] 112 | 113 | ### NOTE get transformations 114 | geom_feats_batch = cano_data_repo.geom_feats[subject_id] 115 | basepoints_batch = cano_data_repo.verts_downsampled[subject_id] 116 | normals_cano_batch = cano_data_repo.normals_downsampled[subject_id] 117 | weights_cano_batch = cano_data_repo.weights_downsampled[subject_id] 118 | cano_smpl_param_batch = cano_data_repo.cano_pose_param[subject_id] 119 | joints_tpose_batch = cano_data_repo.joints_tpose[subject_id] 120 | 121 | if opt['predeform']: 122 | predeform_offsets = model.predeformer(geom_feats_batch).permute(0,2,1) * opt['predeform_scaling'] # [b, n_pts, 3] 123 | out_unposed = inv_lbs(basepoints_batch + predeform_offsets, joints_tpose_batch, cano_smpl_param_batch, cano_data_repo.smpl_parents, weights_cano_batch) 124 | else: 125 | out_unposed = inv_lbs(basepoints_batch, joints_tpose_batch, cano_smpl_param_batch, cano_data_repo.smpl_parents, weights_cano_batch) 126 | 127 | out = lbs(out_unposed['v_unposed'], joints_tpose_batch, pose, cano_data_repo.smpl_parents, lbs_weights=weights_cano_batch) 128 | posed_verts = out['v_posed'] 129 | 130 | unposing_tfs = out_unposed['v_tfs_inv'] # [1, 85722, 4, 4] 131 | posing_tfs = out['v_tfs'] # same as above 132 | 133 | cano_normals_transformed = torch.einsum('bvrc,bvc->bvr', unposing_tfs[:, :, :3, :3], normals_cano_batch) 134 | cano_normals_transformed = torch.einsum('bvrc,bvc->bvr', posing_tfs[:, :, :3, :3], cano_normals_transformed) 135 | cano_normals_transformed = F.normalize(cano_normals_transformed, dim=-1) 136 | 137 | 138 | residuals, normals = model.forward(geom_feats_batch, projected_pts_batch, basepoints_batch, posmaps_batch) 139 | 140 | ### NOTE transform outputs (rotation only) 141 | residuals = residuals.permute(0, 2, 1) # [bs, n_pts, 3] 142 | normals = normals.permute(0, 2, 1) # [bs, n_pts, 3] 143 | 144 | residuals = torch.einsum('bvrc,bvc->bvr', unposing_tfs[:, :, :3, :3], residuals) 145 | residuals = torch.einsum('bvrc,bvc->bvr', posing_tfs[:, :, :3, :3], residuals) 146 | residuals = residuals * opt['residual_scaling'] 147 | 148 | normals = torch.einsum('bvrc,bvc->bvr', unposing_tfs[:, :, :3, :3], normals) 149 | normals = torch.einsum('bvrc,bvc->bvr', posing_tfs[:, :, :3, :3], normals) 150 | normals = F.normalize(normals, dim=-1) 151 | 152 | offset_verts = posed_verts + residuals 153 | 154 | offset_verts = offset_verts.contiguous() 155 | points_gt = points_gt.contiguous() 156 | normals = normals.contiguous() 157 | normals_gt = normals_gt.contiguous() 158 | 159 | ### NOTE original 160 | m2s, s2m, idx_closest_gt, _ = chamfer_loss_separate(offset_verts, points_gt) #idx1: [#pred points] 161 | s2m = torch.mean(s2m) 162 | 163 | # normal loss 164 | lnormal, closest_target_normals = normal_loss(normals, normals_gt, idx_closest_gt) 165 | 166 | # dist from the predicted points to their respective closest point on the GT, projected by 167 | # the normal of these GT points, to appxoimate the point-to-surface distance 168 | nearest_idx = idx_closest_gt.expand(3, -1, -1).permute([1,2,0]).long() # [batch, N] --> [batch, N, 3], repeat for the last dim 169 | target_points_chosen = torch.gather(points_gt, dim=1, index=nearest_idx) 170 | pc_diff = target_points_chosen - offset_verts # vectors from prediction to its closest point in gt pcl 171 | m2s = torch.sum(pc_diff * closest_target_normals, dim=-1) # project on direction of the normal of these gt points 172 | m2s = torch.mean(m2s**2) # the length (squared) is the approx. pred point to scan surface dist. 173 | 174 | rgl_len = torch.mean(residuals ** 2) 175 | rgl_latent = torch.mean(geom_feats_batch**2) 176 | if opt['predeform']: 177 | rgl_predef = torch.mean(predeform_offsets ** 2) 178 | 179 | w_s2m, w_m2s, w_normal, w_rgl, w_latent_rgl = loss_weights 180 | if opt['predeform']: 181 | w_predef = w_rgl.clone() / 5 182 | loss = s2m*w_s2m + m2s*w_m2s + lnormal* w_normal + rgl_len*w_rgl + rgl_latent*w_latent_rgl + rgl_predef*w_predef 183 | else: 184 | loss = s2m*w_s2m + m2s*w_m2s + lnormal* w_normal + rgl_len*w_rgl + rgl_latent*w_latent_rgl 185 | 186 | optimizer.zero_grad() 187 | loss.backward() 188 | optimizer.step() 189 | 190 | 191 | if opt['predeform']: 192 | train_bar_per_epoch.set_description(f"[{total_iters}/{epoch}/{opt['epochs']}] m2s: {m2s:.4e}, s2m: {s2m:.4e}, normal: {lnormal:.4e}, rgl_len: {rgl_len:.4e}, rgl_predef: {rgl_predef:.4e}, rgl_latent: {rgl_latent:.4e}") 193 | else: 194 | train_bar_per_epoch.set_description(f"[{total_iters}/{epoch}/{opt['epochs']}] m2s: {m2s:.4e}, s2m: {s2m:.4e}, normal: {lnormal:.4e}, rgl_len: {rgl_len:.4e}, rgl_latent: {rgl_latent:.4e}") 195 | 196 | save_spacing = 1 197 | if total_iters % opt['save_pcd_every'] == 0: 198 | with torch.no_grad(): 199 | debug_pcd_posed_offset = torch.cat([offset_verts[0] + transl[0][None], normals[0]], 1).detach().cpu().numpy() 200 | for j in range(offset_verts.shape[0])[::save_spacing]: 201 | save_result_examples(TRAINPCD_PATH, f'{opt["expname"]}_epoch{epoch:05d}', batch['basename'][j], 202 | points=offset_verts[j]+transl[j][None], normals=normals[j]) 203 | if opt['save_cano'] and opt['predeform']: 204 | save_result_examples(TRAINPCD_PATH, f'{opt["expname"]}_epoch{epoch:05d}', batch['basename'][j] + '_cano', 205 | points=basepoints_batch[j], normals=normals[j]) 206 | save_result_examples(TRAINPCD_PATH, f'{opt["expname"]}_epoch{epoch:05d}', batch['basename'][j] + '_cano_predef', 207 | points=basepoints_batch[j]+predeform_offsets[j], normals=normals[j]) 208 | 209 | total_iters += 1 210 | 211 | if (epoch + 1) % opt['save_ckpt_every'] == 0: 212 | torch.save(model.state_dict(), join(CHECKPOINTS_PATH, f'checkpoint-{epoch+1:03d}.pt')) 213 | torch.save(cano_data_repo.geom_feats, join(CHECKPOINTS_PATH, f'geom-feats-{epoch+1:03d}.pt')) 214 | torch.save(model.state_dict(), join(CHECKPOINTS_PATH, f'checkpoint-latest.pt')) 215 | torch.save(cano_data_repo.geom_feats, join(CHECKPOINTS_PATH, f'geom-feats-latest.pt')) 216 | -------------------------------------------------------------------------------- /step3_point_avatar/test_fite_point_avatar.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | from tqdm import tqdm 3 | import torch 4 | import torch.nn.functional as F 5 | from torch.utils.data import DataLoader 6 | 7 | import os 8 | from os.path import join 9 | 10 | 11 | from .lib.fite_model import FITEModel 12 | from .lib.dataset import FITEPosmapDataset, FITECanoDataRepository 13 | from .lib.losses import normal_loss, chamfer_loss_separate 14 | from .lib.lbs import lbs, inv_lbs 15 | from .lib.utils import save_result_examples 16 | 17 | 18 | if __name__ == '__main__': 19 | 20 | opt = {} 21 | with open(join('configs', 'common.yaml'), 'r') as common_opt_f: 22 | common_opt = yaml.safe_load(common_opt_f) 23 | opt.update(common_opt) 24 | with open(join('configs', f'step3.yaml'), 'r') as step_opt_f: 25 | step_opt = yaml.safe_load(step_opt_f) 26 | opt.update(step_opt) 27 | with open(join('configs', f'{opt["expname"]}_subject_list.yaml'), 'r') as subject_list_f: 28 | subject_list = yaml.safe_load(subject_list_f) 29 | opt['subject_list'] = subject_list 30 | with open(join('configs', f'projection_list.yaml'), 'r') as projection_list_f: 31 | projection_list = yaml.safe_load(projection_list_f) 32 | opt['projection_list'] = projection_list 33 | 34 | ### NOTE exp folders 35 | CHECKPOINTS_PATH = join(opt['result_folder'], opt['expname'], 'step3-checkpoints') 36 | TESTPCD_PATH = join(opt['result_folder'], opt['expname'], 'step3-test-pcds') 37 | 38 | if not os.path.exists(CHECKPOINTS_PATH): 39 | os.makedirs(CHECKPOINTS_PATH) 40 | if not os.path.exists(TESTPCD_PATH): 41 | os.makedirs(TESTPCD_PATH) 42 | 43 | ### NOTE add subject ids 44 | for subject_id_single in range(len(opt['subject_list'])): 45 | opt['subject_list'][subject_id_single]['id'] = subject_id_single 46 | 47 | 48 | cano_data_repo = FITECanoDataRepository( 49 | subject_list=opt['subject_list'], 50 | projection_list=opt['projection_list'], 51 | root_cano_data=opt['data_templates_path'], 52 | channels_geom_feat=opt['c_geom'], 53 | n_points_cano_data=opt['n_cano_points'], 54 | cano_pose_leg_angle=opt['leg_angle'], 55 | smpl_model_path=opt['smpl_model_path'] 56 | ).set_device(opt['device']) 57 | 58 | test_dataset = FITEPosmapDataset( 59 | subject_list=opt['subject_list'], 60 | projection_list=opt['projection_list'], 61 | split='test', 62 | root_rendered=opt['data_posmaps_path'], 63 | root_packed=opt['data_scans_path'], 64 | remove_hand_foot_pose=False, 65 | data_spacing=opt['data_spacing'], 66 | selected_subjects=opt['selected_subjects'], 67 | ) 68 | 69 | 70 | # batch_size = args.batch_size 71 | n_test_samples = len(test_dataset) 72 | test_loader = DataLoader(test_dataset, batch_size=opt['batch_size'], shuffle=False, drop_last=False) 73 | 74 | ### NOTE model preparation 75 | # Y_SHIFT=0.3 76 | model = FITEModel(projection_list=opt['projection_list'], 77 | input_nc=3, 78 | hsize=opt['hsize'], 79 | nf=opt['nf'], 80 | c_geom=opt['c_geom'], 81 | c_pose=opt['c_pose'], 82 | up_mode=opt['up_mode'], 83 | use_dropout=opt['use_dropout'] 84 | ).to(opt['device']) 85 | 86 | # model.load_state_dict(torch.load('checkpoints/checkpoint-1000.pt')) 87 | if opt['load_epoch'] is None: 88 | model.load_state_dict(torch.load(join(CHECKPOINTS_PATH, f'checkpoint-latest.pt'))) 89 | cano_data_repo.geom_feats = torch.load(join(CHECKPOINTS_PATH, f'geom-feats-latest.pt')) 90 | else: 91 | model.load_state_dict(torch.load(join(CHECKPOINTS_PATH, f'checkpoint-{opt["load_epoch"]:03d}.pt'))) 92 | cano_data_repo.geom_feats = torch.load(join(CHECKPOINTS_PATH, f'geom-feats-{opt["load_epoch"]:03d}.pt')) 93 | model.eval() 94 | cano_data_repo.geom_feats.requires_grad_(False) 95 | 96 | 97 | test_s2m, test_m2s, test_lnormal, test_rgl, test_latent_rgl = 0, 0, 0, 0, 0 98 | 99 | 100 | # def tqdm(x): 101 | # return x 102 | with torch.no_grad(): 103 | n_tested_samples = 0 104 | test_bar = tqdm(enumerate(test_loader)) 105 | for i, batch in test_bar: 106 | 107 | # ------------------------------------------------------- 108 | # ------------ load batch data and reshaping ------------ 109 | 110 | if opt['eval_use_gt']: 111 | points_gt = batch['points'].to(opt['device']) 112 | normals_gt = batch['normals'].to(opt['device']) 113 | pose = batch['pose'].to(opt['device']) 114 | transl = batch['transl'].to(opt['device']) # NOTE already removed from points, needed only for debug 115 | subject_id = batch['subject_id'].to(opt['device']) 116 | 117 | 118 | posmaps_batch = {} 119 | # posmap_weights_batch = {} 120 | projected_pts_batch = {} 121 | # ic(projected_pts_batch) 122 | for proj_id in range(len(model.projection_list)): 123 | proj_direction = model.projection_list[proj_id]['dirc'] 124 | posmaps_batch[proj_direction] = batch[f'posmap_{proj_direction}'].to(opt['device']) 125 | # posmap_weights_batch[proj_direction] = cano_data_repo.posmap_weights[proj_direction][subject_id] 126 | projected_pts_batch[proj_direction] = cano_data_repo.projected_points[proj_direction][subject_id] 127 | # ic(cano_data_repo.projected_points.shape, projected_pts_batch[proj_direction].shape) 128 | 129 | ### NOTE get transformations 130 | # verts_cano_batch = cano_data_repo.verts_downsampled[subject_id] 131 | geom_feats_batch = cano_data_repo.geom_feats[subject_id] 132 | basepoints_batch = cano_data_repo.verts_downsampled[subject_id] 133 | normals_cano_batch = cano_data_repo.normals_downsampled[subject_id] 134 | weights_cano_batch = cano_data_repo.weights_downsampled[subject_id] 135 | cano_smpl_param_batch = cano_data_repo.cano_pose_param[subject_id] 136 | joints_tpose_batch = cano_data_repo.joints_tpose[subject_id] 137 | 138 | if opt['predeform']: 139 | predeform_offsets = model.predeformer(geom_feats_batch).permute(0,2,1) * opt['predeform_scaling'] # [b, n_pts, 3] 140 | out_unposed = inv_lbs(basepoints_batch + predeform_offsets, joints_tpose_batch, cano_smpl_param_batch, cano_data_repo.smpl_parents, weights_cano_batch) 141 | else: 142 | out_unposed = inv_lbs(basepoints_batch, joints_tpose_batch, cano_smpl_param_batch, cano_data_repo.smpl_parents, weights_cano_batch) 143 | out = lbs(out_unposed['v_unposed'], joints_tpose_batch, pose, cano_data_repo.smpl_parents, lbs_weights=weights_cano_batch) 144 | posed_verts = out['v_posed'] 145 | 146 | unposing_tfs = out_unposed['v_tfs_inv'] # [1, 85722, 4, 4] 147 | posing_tfs = out['v_tfs'] # same as above 148 | 149 | cano_normals_transformed = torch.einsum('bvrc,bvc->bvr', unposing_tfs[:, :, :3, :3], normals_cano_batch) 150 | cano_normals_transformed = torch.einsum('bvrc,bvc->bvr', posing_tfs[:, :, :3, :3], cano_normals_transformed) 151 | cano_normals_transformed = F.normalize(cano_normals_transformed, dim=-1) 152 | 153 | residuals, normals = model.forward(geom_feats_batch, projected_pts_batch, basepoints_batch, posmaps_batch) 154 | 155 | ### NOTE transform outputs (rotation only) 156 | residuals = residuals.permute(0, 2, 1) # [bs, n_pts, 3] 157 | normals = normals.permute(0, 2, 1) # [bs, n_pts, 3] 158 | 159 | residuals = torch.einsum('bvrc,bvc->bvr', unposing_tfs[:, :, :3, :3], residuals) 160 | residuals = torch.einsum('bvrc,bvc->bvr', posing_tfs[:, :, :3, :3], residuals) 161 | residuals = residuals * opt['residual_scaling'] 162 | 163 | normals = torch.einsum('bvrc,bvc->bvr', unposing_tfs[:, :, :3, :3], normals) 164 | normals = torch.einsum('bvrc,bvc->bvr', posing_tfs[:, :, :3, :3], normals) 165 | normals = F.normalize(normals, dim=-1) 166 | 167 | offset_verts = posed_verts + residuals 168 | 169 | offset_verts = offset_verts.contiguous() 170 | 171 | if opt['eval_use_gt']: 172 | points_gt = points_gt.contiguous() 173 | normals = normals.contiguous() 174 | normals_gt = normals_gt.contiguous() 175 | 176 | 177 | # -------------------------------- 178 | # ------------ losses ------------ 179 | bs = points_gt.shape[0] 180 | 181 | m2s_real, s2m, idx_closest_gt, _ = chamfer_loss_separate(offset_verts, points_gt) #idx1: [#pred points] 182 | s2m = s2m.mean(1) 183 | lnormal, closest_target_normals = normal_loss(normals, normals_gt, idx_closest_gt, phase='test') 184 | nearest_idx = idx_closest_gt.expand(3, -1, -1).permute([1,2,0]).long() # [batch, N] --> [batch, N, 3], repeat for the last dim 185 | target_points_chosen = torch.gather(points_gt, dim=1, index=nearest_idx) 186 | pc_diff = target_points_chosen - offset_verts # vectors from prediction to its closest point in gt pcl 187 | m2s = torch.sum(pc_diff * closest_target_normals, dim=-1) # project on direction of the normal of these gt points 188 | m2s = torch.mean(m2s**2, 1) # the length (squared) is the approx. pred point to scan surface dist. 189 | 190 | rgl_len = torch.mean((residuals ** 2).reshape(bs, -1), 1) 191 | rgl_latent = torch.mean(geom_feats_batch**2) 192 | 193 | # ------------------------------------------ 194 | # ------------ accumulate stats ------------ 195 | 196 | test_m2s += torch.sum(m2s) 197 | test_s2m += torch.sum(s2m) 198 | test_lnormal += torch.sum(lnormal) 199 | test_rgl += torch.sum(rgl_len) 200 | test_latent_rgl += rgl_latent 201 | 202 | 203 | save_spacing = 1 204 | 205 | for j in range(offset_verts.shape[0])[::save_spacing]: 206 | ### NOTE save pred 207 | if not os.path.exists(join(TESTPCD_PATH, batch['subject_name'][j])): 208 | os.makedirs(join(TESTPCD_PATH, batch['subject_name'][j])) 209 | save_result_examples(join(TESTPCD_PATH, batch['subject_name'][j]), opt["expname"], batch['basename'][j], 210 | points=offset_verts[j]+transl[j][None], normals=normals[j]) 211 | save_result_examples(join(TESTPCD_PATH, batch['subject_name'][j]), opt["expname"], batch['basename'][j] + '_base', 212 | points=posed_verts[j]+transl[j][None], normals=cano_normals_transformed[j]) 213 | 214 | ### NOTE save_gt 215 | if opt['save_cano'] and opt['args.predeform']: 216 | save_result_examples(TESTPCD_PATH, opt["expname"], batch['basename'][j] + '_cano', 217 | points=basepoints_batch[j], normals=normals[j]) 218 | save_result_examples(TESTPCD_PATH, opt["expname"], batch['basename'][j] + '_cano_predef', 219 | points=basepoints_batch[j]+predeform_offsets[j], normals=normals[j]) 220 | n_tested_samples += offset_verts.shape[0] 221 | test_bar.set_description(f'[LOG] tested {n_tested_samples}/{n_test_samples} samples') 222 | 223 | if opt['eval_use_gt']: 224 | 225 | test_m2s /= n_test_samples 226 | test_s2m /= n_test_samples 227 | test_lnormal /= n_test_samples 228 | test_rgl /= n_test_samples 229 | test_latent_rgl /= n_test_samples 230 | 231 | test_s2m, test_m2s, test_lnormal, test_rgl, test_latent_rgl = list(map(lambda x: x.detach().cpu().numpy(), [test_s2m, test_m2s, test_lnormal, test_rgl, test_latent_rgl])) 232 | 233 | print("model2scan dist: {:.3e}, scan2model dist: {:.3e}, normal loss: {:.3e}" 234 | " rgl term: {:.3e}, latent rgl term:{:.3e},".format(test_m2s.item(), test_s2m.item(), test_lnormal.item(), 235 | test_rgl.item(), test_latent_rgl.item())) 236 | -------------------------------------------------------------------------------- /step2_implicit_template/lib/libmise/mise.pyx: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | cimport cython 3 | from cython.operator cimport dereference as dref 4 | from libcpp.vector cimport vector 5 | from libcpp.map cimport map 6 | from libc.math cimport isnan, NAN 7 | import numpy as np 8 | 9 | 10 | cdef struct Vector3D: 11 | int x, y, z 12 | 13 | 14 | cdef struct Voxel: 15 | Vector3D loc 16 | unsigned int level 17 | bint is_leaf 18 | unsigned long children[2][2][2] 19 | 20 | 21 | cdef struct GridPoint: 22 | Vector3D loc 23 | double value 24 | bint known 25 | 26 | 27 | cdef inline unsigned long vec_to_idx(Vector3D coord, long resolution): 28 | cdef unsigned long idx 29 | idx = resolution * resolution * coord.x + resolution * coord.y + coord.z 30 | return idx 31 | 32 | 33 | cdef class MISE: 34 | cdef vector[Voxel] voxels 35 | cdef vector[GridPoint] grid_points 36 | cdef map[long, long] grid_point_hash 37 | cdef readonly int resolution_0 38 | cdef readonly int depth 39 | cdef readonly double threshold 40 | cdef readonly int voxel_size_0 41 | cdef readonly int resolution 42 | 43 | def __cinit__(self, int resolution_0, int depth, double threshold): 44 | self.resolution_0 = resolution_0 45 | self.depth = depth 46 | self.threshold = threshold 47 | self.voxel_size_0 = (1 << depth) 48 | self.resolution = resolution_0 * self.voxel_size_0 49 | 50 | # Create initial voxels 51 | self.voxels.reserve(resolution_0 * resolution_0 * resolution_0) 52 | 53 | cdef Voxel voxel 54 | cdef GridPoint point 55 | cdef Vector3D loc 56 | cdef int i, j, k 57 | for i in range(resolution_0): 58 | for j in range(resolution_0): 59 | for k in range (resolution_0): 60 | loc = Vector3D( 61 | i * self.voxel_size_0, 62 | j * self.voxel_size_0, 63 | k * self.voxel_size_0, 64 | ) 65 | voxel = Voxel( 66 | loc=loc, 67 | level=0, 68 | is_leaf=True, 69 | ) 70 | 71 | assert(self.voxels.size() == vec_to_idx(Vector3D(i, j, k), resolution_0)) 72 | self.voxels.push_back(voxel) 73 | 74 | # Create initial grid points 75 | self.grid_points.reserve((resolution_0 + 1) * (resolution_0 + 1) * (resolution_0 + 1)) 76 | for i in range(resolution_0 + 1): 77 | for j in range(resolution_0 + 1): 78 | for k in range(resolution_0 + 1): 79 | loc = Vector3D( 80 | i * self.voxel_size_0, 81 | j * self.voxel_size_0, 82 | k * self.voxel_size_0, 83 | ) 84 | assert(self.grid_points.size() == vec_to_idx(Vector3D(i, j, k), resolution_0 + 1)) 85 | self.add_grid_point(loc) 86 | 87 | def update(self, long[:, :] points, double[:] values): 88 | """Update points and set their values. Also determine all active voxels and subdivide them.""" 89 | assert(points.shape[0] == values.shape[0]) 90 | assert(points.shape[1] == 3) 91 | cdef Vector3D loc 92 | cdef long idx 93 | cdef int i 94 | 95 | # Find all indices of point and set value 96 | for i in range(points.shape[0]): 97 | loc = Vector3D(points[i, 0], points[i, 1], points[i, 2]) 98 | idx = self.get_grid_point_idx(loc) 99 | if idx == -1: 100 | raise ValueError('Point not in grid!') 101 | self.grid_points[idx].value = values[i] 102 | self.grid_points[idx].known = True 103 | # Subdivide activate voxels and add new points 104 | self.subdivide_voxels() 105 | 106 | def query(self): 107 | """Query points to evaluate.""" 108 | # Find all points with unknown value 109 | cdef vector[Vector3D] points 110 | cdef int n_unknown = 0 111 | for p in self.grid_points: 112 | if not p.known: 113 | n_unknown += 1 114 | 115 | points.reserve(n_unknown) 116 | for p in self.grid_points: 117 | if not p.known: 118 | points.push_back(p.loc) 119 | 120 | # Convert to numpy 121 | points_np = np.zeros((points.size(), 3), dtype=np.int64) 122 | cdef long[:, :] points_view = points_np 123 | for i in range(points.size()): 124 | points_view[i, 0] = points[i].x 125 | points_view[i, 1] = points[i].y 126 | points_view[i, 2] = points[i].z 127 | 128 | return points_np 129 | 130 | def to_dense(self): 131 | """Output dense matrix at highest resolution.""" 132 | out_array = np.full((self.resolution + 1,) * 3, np.nan) 133 | cdef double[:, :, :] out_view = out_array 134 | cdef GridPoint point 135 | cdef int i, j, k 136 | 137 | for point in self.grid_points: 138 | # Take voxel for which points is upper left corner 139 | # assert(point.known) 140 | out_view[point.loc.x, point.loc.y, point.loc.z] = point.value 141 | 142 | # Complete along x axis 143 | for i in range(1, self.resolution + 1): 144 | for j in range(self.resolution + 1): 145 | for k in range(self.resolution + 1): 146 | if isnan(out_view[i, j, k]): 147 | out_view[i, j, k] = out_view[i-1, j, k] 148 | 149 | # Complete along y axis 150 | for i in range(self.resolution + 1): 151 | for j in range(1, self.resolution + 1): 152 | for k in range(self.resolution + 1): 153 | if isnan(out_view[i, j, k]): 154 | out_view[i, j, k] = out_view[i, j-1, k] 155 | 156 | 157 | # Complete along z axis 158 | for i in range(self.resolution + 1): 159 | for j in range(self.resolution + 1): 160 | for k in range(1, self.resolution + 1): 161 | if isnan(out_view[i, j, k]): 162 | out_view[i, j, k] = out_view[i, j, k-1] 163 | assert(not isnan(out_view[i, j, k])) 164 | return out_array 165 | 166 | def get_points(self): 167 | points_np = np.zeros((self.grid_points.size(), 3), dtype=np.int64) 168 | values_np = np.zeros((self.grid_points.size()), dtype=np.float64) 169 | 170 | cdef long[:, :] points_view = points_np 171 | cdef double[:] values_view = values_np 172 | cdef Vector3D loc 173 | cdef int i 174 | 175 | for i in range(self.grid_points.size()): 176 | loc = self.grid_points[i].loc 177 | points_view[i, 0] = loc.x 178 | points_view[i, 1] = loc.y 179 | points_view[i, 2] = loc.z 180 | values_view[i] = self.grid_points[i].value 181 | 182 | return points_np, values_np 183 | 184 | cdef void subdivide_voxels(self) except +: 185 | cdef vector[bint] next_to_positive 186 | cdef vector[bint] next_to_negative 187 | cdef int i, j, k 188 | cdef long idx 189 | cdef Vector3D loc, adj_loc 190 | 191 | # Initialize vectors 192 | next_to_positive.resize(self.voxels.size(), False) 193 | next_to_negative.resize(self.voxels.size(), False) 194 | 195 | # Iterate over grid points and mark voxels active 196 | # TODO: can move this to update operation and add attibute to voxel 197 | for grid_point in self.grid_points: 198 | loc = grid_point.loc 199 | if not grid_point.known: 200 | continue 201 | 202 | # Iterate over the 8 adjacent voxels 203 | for i in range(-1, 1): 204 | for j in range(-1, 1): 205 | for k in range(-1, 1): 206 | adj_loc = Vector3D( 207 | x=loc.x + i, 208 | y=loc.y + j, 209 | z=loc.z + k, 210 | ) 211 | idx = self.get_voxel_idx(adj_loc) 212 | if idx == -1: 213 | continue 214 | 215 | if grid_point.value >= self.threshold: 216 | next_to_positive[idx] = True 217 | if grid_point.value <= self.threshold: 218 | next_to_negative[idx] = True 219 | 220 | cdef int n_subdivide = 0 221 | 222 | for idx in range(self.voxels.size()): 223 | if not self.voxels[idx].is_leaf or self.voxels[idx].level == self.depth: 224 | continue 225 | if next_to_positive[idx] and next_to_negative[idx]: 226 | n_subdivide += 1 227 | 228 | self.voxels.reserve(self.voxels.size() + 8 * n_subdivide) 229 | self.grid_points.reserve(self.voxels.size() + 19 * n_subdivide) 230 | 231 | for idx in range(self.voxels.size()): 232 | if not self.voxels[idx].is_leaf or self.voxels[idx].level == self.depth: 233 | continue 234 | if next_to_positive[idx] and next_to_negative[idx]: 235 | self.subdivide_voxel(idx) 236 | 237 | cdef void subdivide_voxel(self, long idx): 238 | cdef Voxel voxel 239 | cdef GridPoint point 240 | cdef Vector3D loc0 = self.voxels[idx].loc 241 | cdef Vector3D loc 242 | cdef int new_level = self.voxels[idx].level + 1 243 | cdef int new_size = 1 << (self.depth - new_level) 244 | assert(new_level <= self.depth) 245 | assert(1 <= new_size <= self.voxel_size_0) 246 | 247 | # Current voxel is not leaf anymore 248 | self.voxels[idx].is_leaf = False 249 | # Add new voxels 250 | cdef int i, j, k 251 | for i in range(2): 252 | for j in range(2): 253 | for k in range(2): 254 | loc = Vector3D( 255 | x=loc0.x + i * new_size, 256 | y=loc0.y + j * new_size, 257 | z=loc0.z + k * new_size, 258 | ) 259 | voxel = Voxel( 260 | loc=loc, 261 | level=new_level, 262 | is_leaf=True 263 | ) 264 | 265 | self.voxels[idx].children[i][j][k] = self.voxels.size() 266 | self.voxels.push_back(voxel) 267 | 268 | # Add new grid points 269 | for i in range(3): 270 | for j in range(3): 271 | for k in range(3): 272 | loc = Vector3D( 273 | loc0.x + i * new_size, 274 | loc0.y + j * new_size, 275 | loc0.z + k * new_size, 276 | ) 277 | 278 | # Only add new grid points 279 | if self.get_grid_point_idx(loc) == -1: 280 | self.add_grid_point(loc) 281 | 282 | 283 | @cython.cdivision(True) 284 | cdef long get_voxel_idx(self, Vector3D loc) except +: 285 | """Utility function for getting voxel index corresponding to 3D coordinates.""" 286 | # Shorthands 287 | cdef long resolution = self.resolution 288 | cdef long resolution_0 = self.resolution_0 289 | cdef long depth = self.depth 290 | cdef long voxel_size_0 = self.voxel_size_0 291 | 292 | # Return -1 if point lies outside bounds 293 | if not (0 <= loc.x < resolution and 0<= loc.y < resolution and 0 <= loc.z < resolution): 294 | return -1 295 | 296 | # Coordinates in coarse voxel grid 297 | cdef Vector3D loc0 = Vector3D( 298 | x=loc.x >> depth, 299 | y=loc.y >> depth, 300 | z=loc.z >> depth, 301 | ) 302 | 303 | # Initial voxels 304 | cdef int idx = vec_to_idx(loc0, resolution_0) 305 | cdef Voxel voxel = self.voxels[idx] 306 | assert(voxel.loc.x == loc0.x * voxel_size_0) 307 | assert(voxel.loc.y == loc0.y * voxel_size_0) 308 | assert(voxel.loc.z == loc0.z * voxel_size_0) 309 | 310 | # Relative coordinates 311 | cdef Vector3D loc_rel = Vector3D( 312 | x=loc.x - (loc0.x << depth), 313 | y=loc.y - (loc0.y << depth), 314 | z=loc.z - (loc0.z << depth), 315 | ) 316 | 317 | cdef Vector3D loc_offset 318 | cdef long voxel_size = voxel_size_0 319 | 320 | while not voxel.is_leaf: 321 | voxel_size = voxel_size >> 1 322 | assert(voxel_size >= 1) 323 | 324 | # Determine child 325 | loc_offset = Vector3D( 326 | x=1 if (loc_rel.x >= voxel_size) else 0, 327 | y=1 if (loc_rel.y >= voxel_size) else 0, 328 | z=1 if (loc_rel.z >= voxel_size) else 0, 329 | ) 330 | # New voxel 331 | idx = voxel.children[loc_offset.x][loc_offset.y][loc_offset.z] 332 | voxel = self.voxels[idx] 333 | 334 | # New relative coordinates 335 | loc_rel = Vector3D( 336 | x=loc_rel.x - loc_offset.x * voxel_size, 337 | y=loc_rel.y - loc_offset.y * voxel_size, 338 | z=loc_rel.z - loc_offset.z * voxel_size, 339 | ) 340 | 341 | assert(0<= loc_rel.x < voxel_size) 342 | assert(0<= loc_rel.y < voxel_size) 343 | assert(0<= loc_rel.z < voxel_size) 344 | 345 | 346 | # Return idx 347 | return idx 348 | 349 | 350 | cdef inline void add_grid_point(self, Vector3D loc): 351 | cdef GridPoint point = GridPoint( 352 | loc=loc, 353 | value=0., 354 | known=False, 355 | ) 356 | self.grid_point_hash[vec_to_idx(loc, self.resolution + 1)] = self.grid_points.size() 357 | self.grid_points.push_back(point) 358 | 359 | cdef inline int get_grid_point_idx(self, Vector3D loc): 360 | p_idx = self.grid_point_hash.find(vec_to_idx(loc, self.resolution + 1)) 361 | if p_idx == self.grid_point_hash.end(): 362 | return -1 363 | 364 | cdef int idx = dref(p_idx).second 365 | assert(self.grid_points[idx].loc.x == loc.x) 366 | assert(self.grid_points[idx].loc.y == loc.y) 367 | assert(self.grid_points[idx].loc.z == loc.z) 368 | 369 | return idx -------------------------------------------------------------------------------- /step2_implicit_template/lib/smpl/lbs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems and the Max Planck Institute for Biological 14 | # Cybernetics. All rights reserved. 15 | # 16 | # Contact: ps-license@tuebingen.mpg.de 17 | 18 | from __future__ import absolute_import 19 | from __future__ import print_function 20 | from __future__ import division 21 | 22 | import numpy as np 23 | 24 | import torch 25 | import torch.nn.functional as F 26 | 27 | from .utils import rot_mat_to_euler 28 | 29 | 30 | def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx, 31 | dynamic_lmk_b_coords, 32 | neck_kin_chain, dtype=torch.float32): 33 | ''' Compute the faces, barycentric coordinates for the dynamic landmarks 34 | 35 | 36 | To do so, we first compute the rotation of the neck around the y-axis 37 | and then use a pre-computed look-up table to find the faces and the 38 | barycentric coordinates that will be used. 39 | 40 | Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) 41 | for providing the original TensorFlow implementation and for the LUT. 42 | 43 | Parameters 44 | ---------- 45 | vertices: torch.tensor BxVx3, dtype = torch.float32 46 | The tensor of input vertices 47 | pose: torch.tensor Bx(Jx3), dtype = torch.float32 48 | The current pose of the body model 49 | dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long 50 | The look-up table from neck rotation to faces 51 | dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 52 | The look-up table from neck rotation to barycentric coordinates 53 | neck_kin_chain: list 54 | A python list that contains the indices of the joints that form the 55 | kinematic chain of the neck. 56 | dtype: torch.dtype, optional 57 | 58 | Returns 59 | ------- 60 | dyn_lmk_faces_idx: torch.tensor, dtype = torch.long 61 | A tensor of size BxL that contains the indices of the faces that 62 | will be used to compute the current dynamic landmarks. 63 | dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 64 | A tensor of size BxL that contains the indices of the faces that 65 | will be used to compute the current dynamic landmarks. 66 | ''' 67 | 68 | batch_size = vertices.shape[0] 69 | 70 | aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1, 71 | neck_kin_chain) 72 | rot_mats = batch_rodrigues( 73 | aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3) 74 | 75 | rel_rot_mat = torch.eye(3, device=vertices.device, 76 | dtype=dtype).unsqueeze_(dim=0) 77 | for idx in range(len(neck_kin_chain)): 78 | rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat) 79 | 80 | y_rot_angle = torch.round( 81 | torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi, 82 | max=39)).to(dtype=torch.long) 83 | neg_mask = y_rot_angle.lt(0).to(dtype=torch.long) 84 | mask = y_rot_angle.lt(-39).to(dtype=torch.long) 85 | neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle) 86 | y_rot_angle = (neg_mask * neg_vals + 87 | (1 - neg_mask) * y_rot_angle) 88 | 89 | dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx, 90 | 0, y_rot_angle) 91 | dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords, 92 | 0, y_rot_angle) 93 | 94 | return dyn_lmk_faces_idx, dyn_lmk_b_coords 95 | 96 | 97 | def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords): 98 | ''' Calculates landmarks by barycentric interpolation 99 | 100 | Parameters 101 | ---------- 102 | vertices: torch.tensor BxVx3, dtype = torch.float32 103 | The tensor of input vertices 104 | faces: torch.tensor Fx3, dtype = torch.long 105 | The faces of the mesh 106 | lmk_faces_idx: torch.tensor L, dtype = torch.long 107 | The tensor with the indices of the faces used to calculate the 108 | landmarks. 109 | lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32 110 | The tensor of barycentric coordinates that are used to interpolate 111 | the landmarks 112 | 113 | Returns 114 | ------- 115 | landmarks: torch.tensor BxLx3, dtype = torch.float32 116 | The coordinates of the landmarks for each mesh in the batch 117 | ''' 118 | # Extract the indices of the vertices for each face 119 | # BxLx3 120 | batch_size, num_verts = vertices.shape[:2] 121 | device = vertices.device 122 | 123 | lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).expand( 124 | batch_size, -1, -1).long() 125 | 126 | lmk_faces = lmk_faces + torch.arange( 127 | batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts 128 | 129 | lmk_vertices = vertices.view(-1, 3)[lmk_faces].view( 130 | batch_size, -1, 3, 3) 131 | 132 | landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords]) 133 | return landmarks 134 | 135 | 136 | def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents, 137 | lbs_weights, pose2rot=True, dtype=torch.float32, pose_blend=True): 138 | ''' Performs Linear Blend Skinning with the given shape and pose parameters 139 | 140 | Parameters 141 | ---------- 142 | betas : torch.tensor BxNB 143 | The tensor of shape parameters 144 | pose : torch.tensor Bx(J + 1) * 3 145 | The pose parameters in axis-angle format 146 | v_template torch.tensor BxVx3 147 | The template mesh that will be deformed 148 | shapedirs : torch.tensor 1xNB 149 | The tensor of PCA shape displacements 150 | posedirs : torch.tensor Px(V * 3) 151 | The pose PCA coefficients 152 | J_regressor : torch.tensor JxV 153 | The regressor array that is used to calculate the joints from 154 | the position of the vertices 155 | parents: torch.tensor J 156 | The array that describes the kinematic tree for the model 157 | lbs_weights: torch.tensor N x V x (J + 1) 158 | The linear blend skinning weights that represent how much the 159 | rotation matrix of each part affects each vertex 160 | pose2rot: bool, optional 161 | Flag on whether to convert the input pose tensor to rotation 162 | matrices. The default value is True. If False, then the pose tensor 163 | should already contain rotation matrices and have a size of 164 | Bx(J + 1)x9 165 | dtype: torch.dtype, optional 166 | 167 | Returns 168 | ------- 169 | verts: torch.tensor BxVx3 170 | The vertices of the mesh after applying the shape and pose 171 | displacements. 172 | joints: torch.tensor BxJx3 173 | The joints of the model 174 | ''' 175 | 176 | batch_size = max(betas.shape[0], pose.shape[0]) 177 | device = betas.device 178 | 179 | # Add shape contribution 180 | v_shaped = v_template + blend_shapes(betas, shapedirs) 181 | 182 | # Get the joints 183 | # NxJx3 array 184 | J = vertices2joints(J_regressor, v_shaped) 185 | 186 | # 3. Add pose blend shapes 187 | # N x J x 3 x 3 188 | ident = torch.eye(3, dtype=dtype, device=device) 189 | 190 | 191 | if pose2rot: 192 | rot_mats = batch_rodrigues( 193 | pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3]) 194 | 195 | pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1]) 196 | # (N x P) x (P, V * 3) -> N x V x 3 197 | pose_offsets = torch.matmul(pose_feature, posedirs) \ 198 | .view(batch_size, -1, 3) 199 | else: 200 | pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident 201 | rot_mats = pose.view(batch_size, -1, 3, 3) 202 | 203 | pose_offsets = torch.matmul(pose_feature.view(batch_size, -1), 204 | posedirs).view(batch_size, -1, 3) 205 | 206 | if pose_blend: 207 | v_posed = pose_offsets + v_shaped 208 | else: 209 | v_posed = v_shaped 210 | 211 | # 4. Get the global joint location 212 | J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) 213 | 214 | # 5. Do skinning: 215 | # W is N x V x (J + 1) 216 | W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1]) 217 | # (N x V x (J + 1)) x (N x (J + 1) x 16) 218 | num_joints = J_regressor.shape[0] 219 | T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \ 220 | .view(batch_size, -1, 4, 4) 221 | 222 | homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1], 223 | dtype=dtype, device=device) 224 | v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) 225 | v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1)) 226 | 227 | verts = v_homo[:, :, :3, 0] 228 | 229 | return verts, J_transformed, T, W, A.view(batch_size, num_joints, 4,4) 230 | 231 | 232 | def vertices2joints(J_regressor, vertices): 233 | ''' Calculates the 3D joint locations from the vertices 234 | 235 | Parameters 236 | ---------- 237 | J_regressor : torch.tensor JxV 238 | The regressor array that is used to calculate the joints from the 239 | position of the vertices 240 | vertices : torch.tensor BxVx3 241 | The tensor of mesh vertices 242 | 243 | Returns 244 | ------- 245 | torch.tensor BxJx3 246 | The location of the joints 247 | ''' 248 | 249 | return torch.einsum('bik,ji->bjk', [vertices, J_regressor]) 250 | 251 | 252 | def blend_shapes(betas, shape_disps): 253 | ''' Calculates the per vertex displacement due to the blend shapes 254 | 255 | 256 | Parameters 257 | ---------- 258 | betas : torch.tensor Bx(num_betas) 259 | Blend shape coefficients 260 | shape_disps: torch.tensor Vx3x(num_betas) 261 | Blend shapes 262 | 263 | Returns 264 | ------- 265 | torch.tensor BxVx3 266 | The per-vertex displacement due to shape deformation 267 | ''' 268 | 269 | # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l] 270 | # i.e. Multiply each shape displacement by its corresponding beta and 271 | # then sum them. 272 | blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps]) 273 | return blend_shape 274 | 275 | 276 | def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32): 277 | ''' Calculates the rotation matrices for a batch of rotation vectors 278 | Parameters 279 | ---------- 280 | rot_vecs: torch.tensor Nx3 281 | array of N axis-angle vectors 282 | Returns 283 | ------- 284 | R: torch.tensor Nx3x3 285 | The rotation matrices for the given axis-angle parameters 286 | ''' 287 | 288 | batch_size = rot_vecs.shape[0] 289 | device = rot_vecs.device 290 | 291 | angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True) 292 | rot_dir = rot_vecs / angle 293 | 294 | cos = torch.unsqueeze(torch.cos(angle), dim=1) 295 | sin = torch.unsqueeze(torch.sin(angle), dim=1) 296 | 297 | # Bx1 arrays 298 | rx, ry, rz = torch.split(rot_dir, 1, dim=1) 299 | K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device) 300 | 301 | zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device) 302 | K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \ 303 | .view((batch_size, 3, 3)) 304 | 305 | ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0) 306 | rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K) 307 | return rot_mat 308 | 309 | 310 | def transform_mat(R, t): 311 | ''' Creates a batch of transformation matrices 312 | Args: 313 | - R: Bx3x3 array of a batch of rotation matrices 314 | - t: Bx3x1 array of a batch of translation vectors 315 | Returns: 316 | - T: Bx4x4 Transformation matrix 317 | ''' 318 | # No padding left or right, only add an extra row 319 | return torch.cat([F.pad(R, [0, 0, 0, 1]), 320 | F.pad(t, [0, 0, 0, 1], value=1)], dim=2) 321 | 322 | 323 | def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32): 324 | """ 325 | Applies a batch of rigid transformations to the joints 326 | 327 | Parameters 328 | ---------- 329 | rot_mats : torch.tensor BxNx3x3 330 | Tensor of rotation matrices 331 | joints : torch.tensor BxNx3 332 | Locations of joints 333 | parents : torch.tensor BxN 334 | The kinematic tree of each object 335 | dtype : torch.dtype, optional: 336 | The data type of the created tensors, the default is torch.float32 337 | 338 | Returns 339 | ------- 340 | posed_joints : torch.tensor BxNx3 341 | The locations of the joints after applying the pose rotations 342 | rel_transforms : torch.tensor BxNx4x4 343 | The relative (with respect to the root joint) rigid transformations 344 | for all the joints 345 | """ 346 | 347 | joints = torch.unsqueeze(joints, dim=-1) 348 | 349 | rel_joints = joints.clone() 350 | rel_joints[:, 1:] -= joints[:, parents[1:]] 351 | 352 | transforms_mat = transform_mat( 353 | rot_mats.reshape(-1, 3, 3), 354 | rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4) 355 | 356 | transform_chain = [transforms_mat[:, 0]] 357 | for i in range(1, parents.shape[0]): 358 | # Subtract the joint location at the rest pose 359 | # No need for rotation, since it's identity when at rest 360 | curr_res = torch.matmul(transform_chain[parents[i]], 361 | transforms_mat[:, i]) 362 | transform_chain.append(curr_res) 363 | 364 | transforms = torch.stack(transform_chain, dim=1) 365 | 366 | # The last column of the transformations contains the posed joints 367 | posed_joints = transforms[:, :, :3, 3] 368 | 369 | # The last column of the transformations contains the posed joints 370 | posed_joints = transforms[:, :, :3, 3] 371 | 372 | joints_homogen = F.pad(joints, [0, 0, 0, 1]) 373 | 374 | rel_transforms = transforms - F.pad( 375 | torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0]) 376 | 377 | return posed_joints, rel_transforms 378 | --------------------------------------------------------------------------------