├── .gitignore ├── HandObjectImageSynthesizer ├── common │ ├── __init__.py │ └── config.py ├── data │ ├── __init__.py │ └── preprocess.py ├── dataset │ ├── DexYCB.py │ ├── HO3D.py │ └── __init__.py ├── denoising_diffusion_pytorch │ ├── __init__.py │ ├── classifier_free_guidance_normal_wloss_grcond.py │ ├── tools.py │ └── version.py ├── experiment │ ├── dexycb_s0 │ │ └── cfg.json │ └── ho3d │ │ └── cfg.json ├── hand_recon │ ├── common │ │ ├── __init__.py │ │ ├── config.py │ │ ├── tool.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── coor_converter.py │ │ │ ├── dir.py │ │ │ ├── mano.py │ │ │ ├── manopth │ │ │ ├── .gitignore │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── assets │ │ │ │ ├── mano_layer.png │ │ │ │ └── random_hand.png │ │ │ ├── environment.yml │ │ │ ├── examples │ │ │ │ ├── manopth_demo.py │ │ │ │ └── manopth_mindemo.py │ │ │ ├── mano │ │ │ │ ├── __init__.py │ │ │ │ └── webuser │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── lbs.py │ │ │ │ │ ├── posemapper.py │ │ │ │ │ ├── serialization.py │ │ │ │ │ ├── smpl_handpca_wrapper_HAND_only.py │ │ │ │ │ └── verts.py │ │ │ ├── manopth.egg-info │ │ │ │ ├── PKG-INFO │ │ │ │ ├── SOURCES.txt │ │ │ │ ├── dependency_links.txt │ │ │ │ └── top_level.txt │ │ │ ├── manopth │ │ │ │ ├── __init__.py │ │ │ │ ├── argutils.py │ │ │ │ ├── demo.py │ │ │ │ ├── manolayer.py │ │ │ │ ├── rodrigues_layer.py │ │ │ │ ├── rot6d.py │ │ │ │ ├── rotproj.py │ │ │ │ └── tensutils.py │ │ │ ├── setup.py │ │ │ └── test │ │ │ │ └── test_demo.py │ │ │ ├── preprocessing.py │ │ │ ├── render.py │ │ │ ├── seg.py │ │ │ ├── smpl.py │ │ │ ├── smplpytorch │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── assets │ │ │ │ └── image.png │ │ │ ├── demo.py │ │ │ ├── display_utils.py │ │ │ ├── environment.yml │ │ │ ├── image.png │ │ │ ├── setup.py │ │ │ └── smplpytorch │ │ │ │ ├── __init__.py │ │ │ │ ├── native │ │ │ │ ├── __init__.py │ │ │ │ ├── models │ │ │ │ │ └── README.md │ │ │ │ └── webuser │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── posemapper.py │ │ │ │ │ └── serialization.py │ │ │ │ └── pytorch │ │ │ │ ├── __init__.py │ │ │ │ ├── rodrigues_layer.py │ │ │ │ ├── smpl_layer.py │ │ │ │ └── tensutils.py │ │ │ ├── transforms.py │ │ │ └── vis.py │ ├── loss │ │ ├── __init__.py │ │ └── loss.py │ └── model │ │ ├── __init__.py │ │ ├── layer.py │ │ ├── mob_recon │ │ ├── conv │ │ │ ├── __init__.py │ │ │ ├── dsconv.py │ │ │ └── spiralconv.py │ │ ├── models │ │ │ ├── densestack.py │ │ │ ├── generate_pth.py │ │ │ ├── loss.py │ │ │ ├── mobrecon_ds.py │ │ │ ├── modules.py │ │ │ ├── resnet.py │ │ │ ├── resnetstack.py │ │ │ └── transformer.py │ │ ├── template │ │ │ ├── j_reg.npy │ │ │ ├── right_faces.npy │ │ │ ├── template.ply │ │ │ ├── template_body.ply │ │ │ ├── transform.pkl │ │ │ └── transform_body.pkl │ │ └── utils │ │ │ ├── alter_pretrain.py │ │ │ ├── augmentation.py │ │ │ ├── draw3d.py │ │ │ ├── fh_utils.py │ │ │ ├── generate_spiral_seq.py │ │ │ ├── mesh_sampling.py │ │ │ ├── preprocessing.py │ │ │ ├── progress │ │ │ ├── __init__.py │ │ │ ├── bar.py │ │ │ ├── counter.py │ │ │ └── spinner.py │ │ │ ├── read.py │ │ │ ├── smpl.py │ │ │ ├── test.py │ │ │ ├── transforms.py │ │ │ ├── utils.py │ │ │ ├── vis.py │ │ │ ├── warmup_scheduler.py │ │ │ ├── writer.py │ │ │ └── zimeval.py │ │ ├── model.py │ │ └── module.py ├── inference_dexycb.py ├── inference_ho3d.py ├── train.py └── trainer.py ├── HandReconstruction ├── common │ ├── __init__.py │ ├── config.py │ ├── tool.py │ └── utils │ │ ├── __init__.py │ │ ├── coor_converter.py │ │ ├── dir.py │ │ ├── mano.py │ │ ├── manopth │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── assets │ │ │ ├── mano_layer.png │ │ │ └── random_hand.png │ │ ├── environment.yml │ │ ├── examples │ │ │ ├── manopth_demo.py │ │ │ └── manopth_mindemo.py │ │ ├── mano │ │ │ ├── __init__.py │ │ │ └── webuser │ │ │ │ ├── __init__.py │ │ │ │ ├── lbs.py │ │ │ │ ├── posemapper.py │ │ │ │ ├── serialization.py │ │ │ │ ├── smpl_handpca_wrapper_HAND_only.py │ │ │ │ └── verts.py │ │ ├── manopth.egg-info │ │ │ ├── PKG-INFO │ │ │ ├── SOURCES.txt │ │ │ ├── dependency_links.txt │ │ │ └── top_level.txt │ │ ├── manopth │ │ │ ├── __init__.py │ │ │ ├── argutils.py │ │ │ ├── demo.py │ │ │ ├── manolayer.py │ │ │ ├── rodrigues_layer.py │ │ │ ├── rot6d.py │ │ │ ├── rotproj.py │ │ │ └── tensutils.py │ │ ├── setup.py │ │ └── test │ │ │ └── test_demo.py │ │ ├── preprocessing.py │ │ ├── render.py │ │ ├── seg.py │ │ ├── smpl.py │ │ ├── smplpytorch │ │ ├── LICENSE │ │ ├── README.md │ │ ├── assets │ │ │ └── image.png │ │ ├── demo.py │ │ ├── display_utils.py │ │ ├── environment.yml │ │ ├── image.png │ │ ├── setup.py │ │ └── smplpytorch │ │ │ ├── __init__.py │ │ │ ├── native │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ └── README.md │ │ │ └── webuser │ │ │ │ ├── __init__.py │ │ │ │ ├── posemapper.py │ │ │ │ └── serialization.py │ │ │ └── pytorch │ │ │ ├── __init__.py │ │ │ ├── rodrigues_layer.py │ │ │ ├── smpl_layer.py │ │ │ └── tensutils.py │ │ ├── transforms.py │ │ └── vis.py ├── data_loader │ ├── DEX_YCB.py │ ├── HO3D.py │ ├── __init__.py │ ├── data_loader.py │ └── transforms.py ├── experiment │ ├── dexycb_s0 │ │ ├── h2onet │ │ │ └── cfg.json │ │ ├── handoccnet │ │ │ └── cfg.json │ │ └── mobrecon │ │ │ └── cfg.json │ ├── dexycb_s1 │ │ ├── h2onet │ │ │ └── cfg.json │ │ ├── handoccnet │ │ │ └── cfg.json │ │ └── mobrecon │ │ │ └── cfg.json │ └── ho3d │ │ ├── h2onet │ │ └── cfg.json │ │ ├── handoccnet │ │ └── cfg.json │ │ └── mobrecon │ │ └── cfg.json ├── loss │ ├── __init__.py │ └── loss.py ├── model │ ├── __init__.py │ ├── h2o_net │ │ ├── conv │ │ │ ├── __init__.py │ │ │ ├── dsconv.py │ │ │ └── spiralconv.py │ │ ├── models │ │ │ ├── densestack.py │ │ │ └── modules.py │ │ ├── template │ │ │ ├── j_reg.npy │ │ │ ├── right_faces.npy │ │ │ ├── template.ply │ │ │ ├── template_body.ply │ │ │ ├── transform.pkl │ │ │ └── transform_body.pkl │ │ └── utils │ │ │ ├── alter_pretrain.py │ │ │ ├── augmentation.py │ │ │ ├── draw3d.py │ │ │ ├── fh_utils.py │ │ │ ├── generate_spiral_seq.py │ │ │ ├── mesh_sampling.py │ │ │ ├── preprocessing.py │ │ │ ├── progress │ │ │ ├── __init__.py │ │ │ ├── bar.py │ │ │ ├── counter.py │ │ │ └── spinner.py │ │ │ ├── read.py │ │ │ ├── smpl.py │ │ │ ├── test.py │ │ │ ├── transforms.py │ │ │ ├── utils.py │ │ │ ├── vis.py │ │ │ ├── warmup_scheduler.py │ │ │ ├── writer.py │ │ │ └── zimeval.py │ ├── hand_occ_net │ │ ├── backbone.py │ │ ├── cbam.py │ │ ├── hand_head.py │ │ ├── mano_head.py │ │ ├── regressor.py │ │ └── transformer.py │ ├── layer.py │ ├── mob_recon │ │ ├── conv │ │ │ ├── __init__.py │ │ │ ├── dsconv.py │ │ │ └── spiralconv.py │ │ ├── models │ │ │ ├── densestack.py │ │ │ ├── generate_pth.py │ │ │ ├── loss.py │ │ │ ├── mobrecon_ds.py │ │ │ ├── modules.py │ │ │ ├── resnet.py │ │ │ ├── resnetstack.py │ │ │ └── transformer.py │ │ ├── template │ │ │ ├── j_reg.npy │ │ │ ├── right_faces.npy │ │ │ ├── template.ply │ │ │ ├── template_body.ply │ │ │ ├── transform.pkl │ │ │ └── transform_body.pkl │ │ └── utils │ │ │ ├── alter_pretrain.py │ │ │ ├── augmentation.py │ │ │ ├── draw3d.py │ │ │ ├── fh_utils.py │ │ │ ├── generate_spiral_seq.py │ │ │ ├── mesh_sampling.py │ │ │ ├── preprocessing.py │ │ │ ├── progress │ │ │ ├── __init__.py │ │ │ ├── bar.py │ │ │ ├── counter.py │ │ │ └── spinner.py │ │ │ ├── read.py │ │ │ ├── smpl.py │ │ │ ├── test.py │ │ │ ├── transforms.py │ │ │ ├── utils.py │ │ │ ├── vis.py │ │ │ ├── warmup_scheduler.py │ │ │ ├── writer.py │ │ │ └── zimeval.py │ ├── model.py │ └── module.py ├── optimizer │ ├── __init__.py │ └── optimizer.py ├── test.py └── train.py ├── LICENSE ├── NovelConditionCreator ├── DexGraspNet │ ├── asset_process │ │ ├── README.md │ │ ├── decompose.py │ │ ├── decompose_list.py │ │ ├── extract.py │ │ ├── manifold.py │ │ ├── normalize.py │ │ ├── poolrun.py │ │ └── utils │ │ │ └── extract_utils.py │ └── grasp_generation │ │ ├── main.py │ │ ├── mano │ │ └── contact_indices.json │ │ ├── pose_validation.py │ │ ├── scripts │ │ └── generate_object_pose.py │ │ ├── tests │ │ ├── visualize_hand_model.py │ │ ├── visualize_initialization.py │ │ ├── visualize_object_pose.py │ │ └── visualize_result.py │ │ └── utils │ │ ├── energy.py │ │ ├── hand_model.py │ │ ├── initializations.py │ │ ├── logger.py │ │ ├── object_model.py │ │ └── optimizer.py ├── dexycb │ ├── __init__.py │ ├── dexycb_preprocess.py │ ├── mano.py │ ├── manopth │ │ ├── examples │ │ │ ├── manopth_demo.py │ │ │ └── manopth_mindemo.py │ │ ├── mano │ │ │ ├── __init__.py │ │ │ └── webuser │ │ │ │ ├── __init__.py │ │ │ │ ├── lbs.py │ │ │ │ ├── posemapper.py │ │ │ │ ├── serialization.py │ │ │ │ ├── smpl_handpca_wrapper_HAND_only.py │ │ │ │ └── verts.py │ │ └── manopth │ │ │ ├── __init__.py │ │ │ ├── argutils.py │ │ │ ├── demo.py │ │ │ ├── manolayer.py │ │ │ ├── rodrigues_layer.py │ │ │ ├── rot6d.py │ │ │ ├── rotproj.py │ │ │ └── tensutils.py │ ├── preprocessing.py │ ├── seg.py │ ├── shaders │ │ ├── mesh.frag │ │ └── mesh.vert │ └── vis.py └── ho3d │ ├── __init__.py │ ├── ho3d_preprocess.py │ ├── mano.py │ ├── manopth │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── environment.yml │ ├── examples │ │ ├── manopth_demo.py │ │ └── manopth_mindemo.py │ ├── mano │ │ ├── __init__.py │ │ └── webuser │ │ │ ├── __init__.py │ │ │ ├── lbs.py │ │ │ ├── posemapper.py │ │ │ ├── serialization.py │ │ │ ├── smpl_handpca_wrapper_HAND_only.py │ │ │ └── verts.py │ ├── manopth │ │ ├── __init__.py │ │ ├── argutils.py │ │ ├── demo.py │ │ ├── manolayer.py │ │ ├── rodrigues_layer.py │ │ ├── rot6d.py │ │ ├── rotproj.py │ │ └── tensutils.py │ ├── setup.py │ └── test │ │ └── test_demo.py │ ├── preprocessing.py │ ├── seg.py │ ├── shaders │ ├── mesh.frag │ └── mesh.vert │ └── vis.py ├── README.md ├── assets └── poster.png └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | MANO_LEFT.pkl 3 | MANO_RIGHT.pkl 4 | *.pth 5 | *.pt 6 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/common/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/common/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | from easydict import EasyDict as edict 3 | 4 | 5 | class Config(): 6 | 7 | def __init__(self, json_path): 8 | with open(json_path) as f: 9 | self.cfg = json.load(f) 10 | self.cfg = edict(self.cfg) 11 | 12 | def save(self, json_path): 13 | with open(json_path, "w") as f: 14 | json.dump(self.cfg, f, indent=4) 15 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/data/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/dataset/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/denoising_diffusion_pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/denoising_diffusion_pytorch/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '1.5.4' 2 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/experiment/dexycb_s0/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "dexycb_s0", 4 | "model_dir": "experiment/dexycb_s0" 5 | }, 6 | "data": { 7 | "image_size": 128, 8 | "timesteps": 1000, 9 | "dataset_name": "dexycb", 10 | "data_split": "s0_train", 11 | "version": 2 12 | }, 13 | "model": { 14 | "name": "cf_normal_cond_v2_wloss_grcond", 15 | "beta_schedule": "linear" 16 | }, 17 | "loss": { 18 | "name": "l1", 19 | "objective": "pred_x0" 20 | }, 21 | "train": { 22 | "num_steps": 700000, 23 | "batch_size": 256, 24 | "gradient_accumulate_every": 2, 25 | "ema_decay": 0.995 26 | }, 27 | "test": { 28 | "num_samples": 16, 29 | "sampling_timesteps": 1000 30 | }, 31 | "optimizer": { 32 | "lr": 8e-5 33 | }, 34 | "summary": { 35 | "save_and_sample_every": 1000 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/experiment/ho3d/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "ho3d", 4 | "model_dir": "experiment/ho3d" 5 | }, 6 | "data": { 7 | "image_size": 128, 8 | "timesteps": 1000, 9 | "dataset_name": "ho3d", 10 | "data_split": "train", 11 | "version": 2 12 | }, 13 | "model": { 14 | "name": "cf_normal_cond_v2_wloss_grcond", 15 | "beta_schedule": "linear" 16 | }, 17 | "loss": { 18 | "name": "l1", 19 | "objective": "pred_x0" 20 | }, 21 | "train": { 22 | "num_steps": 700000, 23 | "batch_size": 256, 24 | "gradient_accumulate_every": 2, 25 | "ema_decay": 0.995 26 | }, 27 | "test": { 28 | "num_samples": 16, 29 | "sampling_timesteps": 1000 30 | }, 31 | "optimizer": { 32 | "lr": 8e-5 33 | }, 34 | "summary": { 35 | "save_and_sample_every": 1000 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | from easydict import EasyDict as edict 3 | 4 | 5 | class Config(): 6 | 7 | def __init__(self, json_path): 8 | with open(json_path) as f: 9 | self.cfg = json.load(f) 10 | self.cfg = edict(self.cfg) 11 | 12 | def save(self, json_path): 13 | with open(json_path, 'w') as f: 14 | json.dump(self.cfg, f, indent=4) 15 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/coor_converter.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | def row_col2theta_phi(row, col, width, height): 6 | theta = ((row + 0.5) / height) * np.pi 7 | phi = (0.5 - (col + 0.5) / width) * 2.0 * np.pi 8 | phi = (col - (width - 1) / 2) / width * 2 * np.pi 9 | return theta, phi 10 | 11 | 12 | def theta_phi2row_col_array(theta, phi, width, height): 13 | row = (theta / np.pi) * height - 0.5 14 | col = (0.5 - phi / (2.0 * np.pi)) * width - 0.5 15 | row = row.astype(int) 16 | col = col.astype(int) 17 | row = np.clip(row, 0, height - 1) # make sure the lights do not sink to bottom 18 | col = col % width # handle the negative cols 19 | return row, col 20 | 21 | 22 | def np_theta_phi2xyz(theta, phi): 23 | x = np.sin(theta) * np.sin(phi) 24 | y = np.cos(theta) 25 | z = np.sin(theta) * np.cos(phi) 26 | return np.array((x, y, z)) 27 | 28 | 29 | def np_xyz2theta_phi(x, y, z): 30 | theta = np.arccos(y) 31 | phi = np.arctan2(x, z) # quadrant awareness 32 | return theta, phi 33 | 34 | 35 | def torch_theta_phi2xyz(theta, phi): 36 | x = np.sin(theta) * np.sin(phi) 37 | y = np.cos(theta) 38 | z = np.sin(theta) * np.cos(phi) 39 | return torch.tensor((x, y, z)).cuda() 40 | 41 | 42 | def torch_xyz2theta_phi(x, y, z): 43 | theta = torch.arccos(y) 44 | phi = torch.arctan2(x, z) # quadrant awareness 45 | return theta, phi 46 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/dir.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/.gitignore: -------------------------------------------------------------------------------- 1 | *.sw* 2 | *.bak 3 | *_bak.py 4 | 5 | .cache/ 6 | __pycache__/ 7 | build/ 8 | dist/ 9 | manopth_hassony2.egg-info/ 10 | mano/models/ 11 | assets/mano_layer.svg 12 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/assets/mano_layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/manopth/assets/mano_layer.png -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/assets/random_hand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/manopth/assets/random_hand.png -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/environment.yml: -------------------------------------------------------------------------------- 1 | name: manopth 2 | 3 | dependencies: 4 | - opencv 5 | - python=3.7 6 | - matplotlib 7 | - numpy 8 | - pytorch 9 | - tqdm 10 | - git 11 | - pip: 12 | - git+https://github.com/hassony2/chumpy.git 13 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/examples/manopth_mindemo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from manopth.manolayer import ManoLayer 3 | from manopth import demo 4 | 5 | batch_size = 10 6 | # Select number of principal components for pose space 7 | ncomps = 6 8 | 9 | # Initialize MANO layer 10 | mano_layer = ManoLayer( 11 | mano_root='mano/models', use_pca=True, ncomps=ncomps, flat_hand_mean=False) 12 | 13 | # Generate random shape parameters 14 | random_shape = torch.rand(batch_size, 10) 15 | # Generate random pose parameters, including 3 values for global axis-angle rotation 16 | random_pose = torch.rand(batch_size, ncomps + 3) 17 | 18 | # Forward pass through MANO layer 19 | hand_verts, hand_joints = mano_layer(random_pose, random_shape) 20 | demo.display_hand({ 21 | 'verts': hand_verts, 22 | 'joints': hand_joints 23 | }, 24 | mano_faces=mano_layer.th_faces) 25 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/mano/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/manopth/mano/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/mano/webuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/manopth/mano/webuser/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/mano/webuser/posemapper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license 5 | 6 | More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. 7 | For comments or questions, please email us at: mano@tue.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines a wrapper for the loading functions of the MANO model. 13 | 14 | Modules included: 15 | - load_model: 16 | loads the MANO model from a given file location (i.e. a .pkl file location), 17 | or a dictionary object. 18 | 19 | ''' 20 | 21 | 22 | import chumpy as ch 23 | import numpy as np 24 | import cv2 25 | 26 | 27 | class Rodrigues(ch.Ch): 28 | dterms = 'rt' 29 | 30 | def compute_r(self): 31 | return cv2.Rodrigues(self.rt.r)[0] 32 | 33 | def compute_dr_wrt(self, wrt): 34 | if wrt is self.rt: 35 | return cv2.Rodrigues(self.rt.r)[1].T 36 | 37 | 38 | def lrotmin(p): 39 | if isinstance(p, np.ndarray): 40 | p = p.ravel()[3:] 41 | return np.concatenate( 42 | [(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() 43 | for pp in p.reshape((-1, 3))]).ravel() 44 | if p.ndim != 2 or p.shape[1] != 3: 45 | p = p.reshape((-1, 3)) 46 | p = p[1:] 47 | return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() 48 | for pp in p]).ravel() 49 | 50 | 51 | def posemap(s): 52 | if s == 'lrotmin': 53 | return lrotmin 54 | else: 55 | raise Exception('Unknown posemapping: %s' % (str(s), )) 56 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | README.md 2 | setup.py 3 | mano/__init__.py 4 | mano/webuser/__init__.py 5 | mano/webuser/lbs.py 6 | mano/webuser/posemapper.py 7 | mano/webuser/serialization.py 8 | mano/webuser/smpl_handpca_wrapper_HAND_only.py 9 | mano/webuser/verts.py 10 | manopth/__init__.py 11 | manopth/argutils.py 12 | manopth/demo.py 13 | manopth/manolayer.py 14 | manopth/rodrigues_layer.py 15 | manopth/rot6d.py 16 | manopth/rotproj.py 17 | manopth/tensutils.py 18 | manopth.egg-info/PKG-INFO 19 | manopth.egg-info/SOURCES.txt 20 | manopth.egg-info/dependency_links.txt 21 | manopth.egg-info/top_level.txt 22 | test/test_demo.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | mano 2 | manopth 3 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth/__init__.py: -------------------------------------------------------------------------------- 1 | name = 'manopth' 2 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth/argutils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import pickle 4 | import subprocess 5 | import sys 6 | 7 | 8 | def print_args(args): 9 | opts = vars(args) 10 | print('======= Options ========') 11 | for k, v in sorted(opts.items()): 12 | print('{}: {}'.format(k, v)) 13 | print('========================') 14 | 15 | 16 | def save_args(args, save_folder, opt_prefix='opt', verbose=True): 17 | opts = vars(args) 18 | # Create checkpoint folder 19 | if not os.path.exists(save_folder): 20 | os.makedirs(save_folder, exist_ok=True) 21 | 22 | # Save options 23 | opt_filename = '{}.txt'.format(opt_prefix) 24 | opt_path = os.path.join(save_folder, opt_filename) 25 | with open(opt_path, 'a') as opt_file: 26 | opt_file.write('====== Options ======\n') 27 | for k, v in sorted(opts.items()): 28 | opt_file.write( 29 | '{option}: {value}\n'.format(option=str(k), value=str(v))) 30 | opt_file.write('=====================\n') 31 | opt_file.write('launched {} at {}\n'.format( 32 | str(sys.argv[0]), str(datetime.datetime.now()))) 33 | 34 | # Add git info 35 | label = subprocess.check_output(["git", "describe", 36 | "--always"]).strip() 37 | if subprocess.call( 38 | ["git", "branch"], 39 | stderr=subprocess.STDOUT, 40 | stdout=open(os.devnull, 'w')) == 0: 41 | opt_file.write('=== Git info ====\n') 42 | opt_file.write('{}\n'.format(label)) 43 | commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) 44 | opt_file.write('commit : {}\n'.format(commit.strip())) 45 | 46 | opt_picklename = '{}.pkl'.format(opt_prefix) 47 | opt_picklepath = os.path.join(save_folder, opt_picklename) 48 | with open(opt_picklepath, 'wb') as opt_file: 49 | pickle.dump(opts, opt_file) 50 | if verbose: 51 | print('Saved options to {}'.format(opt_path)) 52 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth/demo.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | from mpl_toolkits.mplot3d import Axes3D 3 | from mpl_toolkits.mplot3d.art3d import Poly3DCollection 4 | import numpy as np 5 | import torch 6 | 7 | from manopth.manolayer import ManoLayer 8 | 9 | 10 | def generate_random_hand(batch_size=1, ncomps=6, mano_root='mano/models'): 11 | nfull_comps = ncomps + 3 # Add global orientation dims to PCA 12 | random_pcapose = torch.rand(batch_size, nfull_comps) 13 | mano_layer = ManoLayer(mano_root=mano_root) 14 | verts, joints = mano_layer(random_pcapose) 15 | return {'verts': verts, 'joints': joints, 'faces': mano_layer.th_faces} 16 | 17 | 18 | def display_hand(hand_info, mano_faces=None, ax=None, alpha=0.2, batch_idx=0, show=True): 19 | """ 20 | Displays hand batch_idx in batch of hand_info, hand_info as returned by 21 | generate_random_hand 22 | """ 23 | if ax is None: 24 | fig = plt.figure() 25 | ax = fig.add_subplot(111, projection='3d') 26 | verts, joints = hand_info['verts'][batch_idx], hand_info['joints'][ 27 | batch_idx] 28 | if mano_faces is None: 29 | ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.1) 30 | else: 31 | mesh = Poly3DCollection(verts[mano_faces], alpha=alpha) 32 | face_color = (141 / 255, 184 / 255, 226 / 255) 33 | edge_color = (50 / 255, 50 / 255, 50 / 255) 34 | mesh.set_edgecolor(edge_color) 35 | mesh.set_facecolor(face_color) 36 | ax.add_collection3d(mesh) 37 | ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') 38 | cam_equal_aspect_3d(ax, verts.numpy()) 39 | if show: 40 | plt.show() 41 | 42 | 43 | def cam_equal_aspect_3d(ax, verts, flip_x=False): 44 | """ 45 | Centers view on cuboid containing hand and flips y and z axis 46 | and fixes azimuth 47 | """ 48 | extents = np.stack([verts.min(0), verts.max(0)], axis=1) 49 | sz = extents[:, 1] - extents[:, 0] 50 | centers = np.mean(extents, axis=1) 51 | maxsize = max(abs(sz)) 52 | r = maxsize / 2 53 | if flip_x: 54 | ax.set_xlim(centers[0] + r, centers[0] - r) 55 | else: 56 | ax.set_xlim(centers[0] - r, centers[0] + r) 57 | # Invert y and z axis 58 | ax.set_ylim(centers[1] + r, centers[1] - r) 59 | ax.set_zlim(centers[2] + r, centers[2] - r) 60 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth/rot6d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def compute_rotation_matrix_from_ortho6d(poses): 5 | """ 6 | Code from 7 | https://github.com/papagina/RotationContinuity 8 | On the Continuity of Rotation Representations in Neural Networks 9 | Zhou et al. CVPR19 10 | https://zhouyisjtu.github.io/project_rotation/rotation.html 11 | """ 12 | x_raw = poses[:, 0:3] # batch*3 13 | y_raw = poses[:, 3:6] # batch*3 14 | 15 | x = normalize_vector(x_raw) # batch*3 16 | z = cross_product(x, y_raw) # batch*3 17 | z = normalize_vector(z) # batch*3 18 | y = cross_product(z, x) # batch*3 19 | 20 | x = x.view(-1, 3, 1) 21 | y = y.view(-1, 3, 1) 22 | z = z.view(-1, 3, 1) 23 | matrix = torch.cat((x, y, z), 2) # batch*3*3 24 | return matrix 25 | 26 | def robust_compute_rotation_matrix_from_ortho6d(poses): 27 | """ 28 | Instead of making 2nd vector orthogonal to first 29 | create a base that takes into account the two predicted 30 | directions equally 31 | """ 32 | x_raw = poses[:, 0:3] # batch*3 33 | y_raw = poses[:, 3:6] # batch*3 34 | 35 | x = normalize_vector(x_raw) # batch*3 36 | y = normalize_vector(y_raw) # batch*3 37 | middle = normalize_vector(x + y) 38 | orthmid = normalize_vector(x - y) 39 | x = normalize_vector(middle + orthmid) 40 | y = normalize_vector(middle - orthmid) 41 | # Their scalar product should be small ! 42 | # assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001 43 | z = normalize_vector(cross_product(x, y)) 44 | 45 | x = x.view(-1, 3, 1) 46 | y = y.view(-1, 3, 1) 47 | z = z.view(-1, 3, 1) 48 | matrix = torch.cat((x, y, z), 2) # batch*3*3 49 | # Check for reflection in matrix ! If found, flip last vector TODO 50 | assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0 51 | return matrix 52 | 53 | 54 | def normalize_vector(v): 55 | batch = v.shape[0] 56 | v_mag = torch.sqrt(v.pow(2).sum(1)) # batch 57 | v_mag = torch.max(v_mag, v.new([1e-8])) 58 | v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1]) 59 | v = v/v_mag 60 | return v 61 | 62 | 63 | def cross_product(u, v): 64 | batch = u.shape[0] 65 | i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1] 66 | j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2] 67 | k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0] 68 | 69 | out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) 70 | 71 | return out 72 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth/rotproj.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def batch_rotprojs(batches_rotmats): 5 | proj_rotmats = [] 6 | for batch_idx, batch_rotmats in enumerate(batches_rotmats): 7 | proj_batch_rotmats = [] 8 | for rot_idx, rotmat in enumerate(batch_rotmats): 9 | # GPU implementation of svd is VERY slow 10 | # ~ 2 10^-3 per hit vs 5 10^-5 on cpu 11 | U, S, V = rotmat.cpu().svd() 12 | rotmat = torch.matmul(U, V.transpose(0, 1)) 13 | orth_det = rotmat.det() 14 | # Remove reflection 15 | if orth_det < 0: 16 | rotmat[:, 2] = -1 * rotmat[:, 2] 17 | 18 | rotmat = rotmat.cuda() 19 | proj_batch_rotmats.append(rotmat) 20 | proj_rotmats.append(torch.stack(proj_batch_rotmats)) 21 | return torch.stack(proj_rotmats) 22 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/manopth/tensutils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from hand_recon.common.utils.manopth.manopth import rodrigues_layer 4 | 5 | 6 | def th_posemap_axisang(pose_vectors): 7 | rot_nb = int(pose_vectors.shape[1] / 3) 8 | pose_vec_reshaped = pose_vectors.contiguous().view(-1, 3) 9 | rot_mats = rodrigues_layer.batch_rodrigues(pose_vec_reshaped) 10 | rot_mats = rot_mats.view(pose_vectors.shape[0], rot_nb * 9) 11 | pose_maps = subtract_flat_id(rot_mats) 12 | return pose_maps, rot_mats 13 | 14 | 15 | def th_with_zeros(tensor): 16 | batch_size = tensor.shape[0] 17 | padding = tensor.new([0.0, 0.0, 0.0, 1.0]) 18 | padding.requires_grad = False 19 | 20 | concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] 21 | cat_res = torch.cat(concat_list, 1) 22 | return cat_res 23 | 24 | 25 | def th_pack(tensor): 26 | batch_size = tensor.shape[0] 27 | padding = tensor.new_zeros((batch_size, 4, 3)) 28 | padding.requires_grad = False 29 | pack_list = [padding, tensor] 30 | pack_res = torch.cat(pack_list, 2) 31 | return pack_res 32 | 33 | 34 | def subtract_flat_id(rot_mats): 35 | # Subtracts identity as a flattened tensor 36 | rot_nb = int(rot_mats.shape[1] / 9) 37 | id_flat = torch.eye(3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat(rot_mats.shape[0], rot_nb) 38 | # id_flat.requires_grad = False 39 | results = rot_mats - id_flat 40 | return results 41 | 42 | 43 | def make_list(tensor): 44 | # type: (List[int]) -> List[int] 45 | return tensor 46 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | import warnings 3 | 4 | DEPENDENCY_PACKAGE_NAMES = ["matplotlib", "torch", "tqdm", "numpy", "cv2", 5 | "chumpy"] 6 | 7 | 8 | def check_dependencies(): 9 | missing_dependencies = [] 10 | for package_name in DEPENDENCY_PACKAGE_NAMES: 11 | try: 12 | __import__(package_name) 13 | except ImportError: 14 | missing_dependencies.append(package_name) 15 | 16 | if missing_dependencies: 17 | warnings.warn( 18 | 'Missing dependencies: {}. We recommend you follow ' 19 | 'the installation instructions at ' 20 | 'https://github.com/hassony2/manopth#installation'.format( 21 | missing_dependencies)) 22 | 23 | 24 | with open("README.md", "r") as fh: 25 | long_description = fh.read() 26 | 27 | check_dependencies() 28 | 29 | setup( 30 | name="manopth", 31 | version="0.0.1", 32 | author="Yana Hasson", 33 | author_email="yana.hasson.inria@gmail.com", 34 | packages=find_packages(exclude=('tests',)), 35 | python_requires=">=3.5.0", 36 | description="PyTorch mano layer", 37 | long_description=long_description, 38 | long_description_content_type="text/markdown", 39 | url="https://github.com/hassony2/manopth", 40 | classifiers=[ 41 | "Programming Language :: Python :: 3", 42 | "License :: OSI Approved :: GNU GENERAL PUBLIC LICENSE", 43 | "Operating System :: OS Independent", 44 | ], 45 | ) 46 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/manopth/test/test_demo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from manopth.demo import generate_random_hand 4 | 5 | 6 | def test_generate_random_hand(): 7 | batch_size = 3 8 | hand_info = generate_random_hand(batch_size=batch_size, ncomps=6) 9 | verts = hand_info['verts'] 10 | joints = hand_info['joints'] 11 | assert verts.shape == (batch_size, 778, 3) 12 | assert joints.shape == (batch_size, 21, 3) 13 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smpl.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os.path as osp 4 | import json 5 | from config import cfg 6 | 7 | import sys 8 | sys.path.insert(0, cfg.smpl_path) 9 | from smplpytorch.pytorch.smpl_layer import SMPL_Layer 10 | 11 | class SMPL(object): 12 | def __init__(self): 13 | self.layer = {'neutral': self.get_layer(), 'male': self.get_layer('male'), 'female': self.get_layer('female')} 14 | self.vertex_num = 6890 15 | self.face = self.layer['neutral'].th_faces.numpy() 16 | self.joint_regressor = self.layer['neutral'].th_J_regressor.numpy() 17 | 18 | # add nose, L/R eye, L/R ear 19 | self.face_kps_vertex = (331, 2802, 6262, 3489, 3990) # mesh vertex idx 20 | nose_onehot = np.array([1 if i == 331 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 21 | left_eye_onehot = np.array([1 if i == 2802 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 22 | right_eye_onehot = np.array([1 if i == 6262 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 23 | left_ear_onehot = np.array([1 if i == 3489 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 24 | right_ear_onehot = np.array([1 if i == 3990 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 25 | self.joint_regressor = np.concatenate((self.joint_regressor, nose_onehot, left_eye_onehot, right_eye_onehot, left_ear_onehot, right_ear_onehot)) 26 | 27 | self.joint_num = 29 # original: 24. manually add nose, L/R eye, L/R ear 28 | self.joints_name = ('Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear') 29 | self.flip_pairs = ( (1,2), (4,5), (7,8), (10,11), (13,14), (16,17), (18,19), (20,21), (22,23) , (25,26), (27,28) ) 30 | self.skeleton = ( (0,1), (1,4), (4,7), (7,10), (0,2), (2,5), (5,8), (8,11), (0,3), (3,6), (6,9), (9,14), (14,17), (17,19), (19, 21), (21,23), (9,13), (13,16), (16,18), (18,20), (20,22), (9,12), (12,24), (24,15), (24,25), (24,26), (25,27), (26,28) ) 31 | self.root_joint_idx = self.joints_name.index('Pelvis') 32 | 33 | def get_layer(self, gender='neutral'): 34 | return SMPL_Layer(gender=gender, model_root=cfg.smpl_path + '/smplpytorch/native/models') 35 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/assets/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/assets/image.png -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/demo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from smplpytorch.pytorch.smpl_layer import SMPL_Layer 4 | from display_utils import display_model 5 | 6 | 7 | if __name__ == '__main__': 8 | cuda = False 9 | batch_size = 1 10 | 11 | # Create the SMPL layer 12 | smpl_layer = SMPL_Layer( 13 | center_idx=0, 14 | gender='neutral', 15 | model_root='smplpytorch/native/models') 16 | 17 | # Generate random pose and shape parameters 18 | pose_params = torch.rand(batch_size, 72) * 0.2 19 | shape_params = torch.rand(batch_size, 10) * 0.03 20 | 21 | # GPU mode 22 | if cuda: 23 | pose_params = pose_params.cuda() 24 | shape_params = shape_params.cuda() 25 | smpl_layer.cuda() 26 | 27 | # Forward from the SMPL layer 28 | verts, Jtr = smpl_layer(pose_params, th_betas=shape_params) 29 | 30 | # Draw output vertices and joints 31 | display_model( 32 | {'verts': verts.cpu().detach(), 33 | 'joints': Jtr.cpu().detach()}, 34 | model_faces=smpl_layer.th_faces, 35 | with_joints=True, 36 | kintree_table=smpl_layer.kintree_table, 37 | savepath='image.png', 38 | show=True) 39 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/environment.yml: -------------------------------------------------------------------------------- 1 | name: smplpytorch 2 | 3 | dependencies: 4 | - opencv 5 | - python=3.7 6 | - matplotlib 7 | - numpy 8 | - pytorch 9 | - pip 10 | - pip: 11 | - git+https://github.com/hassony2/chumpy.git 12 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/image.png -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | REQUIREMENTS = [ 7 | "opencv-python", 8 | "matplotlib", 9 | "numpy", 10 | "torch", 11 | "chumpy @ git+ssh://git@github.com/hassony2/chumpy"] 12 | 13 | setuptools.setup( 14 | name="smplpytorch", 15 | version="0.0.1", 16 | author="Gul Varol", 17 | author_email="gulvarols@gmail.com", 18 | python_requires=">=3.5.0", 19 | install_requires=REQUIREMENTS, 20 | description="SMPL human body layer for PyTorch is a differentiable PyTorch layer", 21 | long_description=long_description, 22 | long_description_content_type="text/markdown", 23 | url="https://github.com/gulvarol/smplpytorch", 24 | packages=setuptools.find_packages(), 25 | classifiers=[ 26 | "Programming Language :: Python :: 3", 27 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", 28 | "Operating System :: OS Independent", 29 | ], 30 | ) 31 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/__init__.py: -------------------------------------------------------------------------------- 1 | name = "smplpytorch" 2 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/native/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/native/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/native/models/README.md: -------------------------------------------------------------------------------- 1 | Here copy the .pkl model files. 2 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/native/webuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/native/webuser/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/native/webuser/posemapper.py: -------------------------------------------------------------------------------- 1 | import chumpy as ch 2 | import numpy as np 3 | import cv2 4 | 5 | 6 | class Rodrigues(ch.Ch): 7 | dterms = 'rt' 8 | 9 | def compute_r(self): 10 | return cv2.Rodrigues(self.rt.r)[0] 11 | 12 | def compute_dr_wrt(self, wrt): 13 | if wrt is self.rt: 14 | return cv2.Rodrigues(self.rt.r)[1].T 15 | 16 | 17 | def lrotmin(p): 18 | if isinstance(p, np.ndarray): 19 | p = p.ravel()[3:] 20 | return np.concatenate([(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() for pp in p.reshape((-1, 3))]).ravel() 21 | if p.ndim != 2 or p.shape[1] != 3: 22 | p = p.reshape((-1, 3)) 23 | p = p[1:] 24 | return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() for pp in p]).ravel() 25 | 26 | 27 | def posemap(s): 28 | if s == 'lrotmin': 29 | return lrotmin 30 | else: 31 | raise Exception('Unknown posemapping: %s' % (str(s),)) 32 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/native/webuser/serialization.py: -------------------------------------------------------------------------------- 1 | def ready_arguments(fname_or_dict): 2 | import numpy as np 3 | import pickle 4 | import chumpy as ch 5 | from chumpy.ch import MatVecMult 6 | from smplpytorch.native.webuser.posemapper import posemap 7 | 8 | if not isinstance(fname_or_dict, dict): 9 | dd = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1') 10 | # dd = pickle.load(open(fname_or_dict, 'rb')) 11 | else: 12 | dd = fname_or_dict 13 | 14 | want_shapemodel = 'shapedirs' in dd 15 | nposeparms = dd['kintree_table'].shape[1] * 3 16 | 17 | if 'trans' not in dd: 18 | dd['trans'] = np.zeros(3) 19 | if 'pose' not in dd: 20 | dd['pose'] = np.zeros(nposeparms) 21 | if 'shapedirs' in dd and 'betas' not in dd: 22 | dd['betas'] = np.zeros(dd['shapedirs'].shape[-1]) 23 | 24 | for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']: 25 | if (s in dd) and not hasattr(dd[s], 'dterms'): 26 | dd[s] = ch.array(dd[s]) 27 | 28 | if want_shapemodel: 29 | dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template'] 30 | v_shaped = dd['v_shaped'] 31 | J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0]) 32 | J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1]) 33 | J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2]) 34 | dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T 35 | dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose'])) 36 | else: 37 | dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose'])) 38 | 39 | return dd 40 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/pytorch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/pytorch/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/common/utils/smplpytorch/smplpytorch/pytorch/tensutils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from smplpytorch.pytorch import rodrigues_layer 4 | 5 | 6 | def th_posemap_axisang(pose_vectors): 7 | ''' 8 | Converts axis-angle to rotmat 9 | pose_vectors (Tensor (batch_size x 72)): pose parameters in axis-angle representation 10 | ''' 11 | rot_nb = int(pose_vectors.shape[1] / 3) 12 | rot_mats = [] 13 | for joint_idx in range(rot_nb): 14 | axis_ang = pose_vectors[:, joint_idx * 3:(joint_idx + 1) * 3] 15 | rot_mat = rodrigues_layer.batch_rodrigues(axis_ang) 16 | rot_mats.append(rot_mat) 17 | 18 | rot_mats = torch.cat(rot_mats, 1) 19 | return rot_mats 20 | 21 | 22 | def th_with_zeros(tensor): 23 | batch_size = tensor.shape[0] 24 | padding = tensor.new([0.0, 0.0, 0.0, 1.0]) 25 | padding.requires_grad = False 26 | 27 | concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] 28 | cat_res = torch.cat(concat_list, 1) 29 | return cat_res 30 | 31 | 32 | def th_pack(tensor): 33 | batch_size = tensor.shape[0] 34 | padding = tensor.new_zeros((batch_size, 4, 3)) 35 | padding.requires_grad = False 36 | pack_list = [padding, tensor] 37 | pack_res = torch.cat(pack_list, 2) 38 | return pack_res 39 | 40 | 41 | def subtract_flat_id(rot_mats): 42 | # Subtracts identity as a flattened tensor 43 | id_flat = torch.eye( 44 | 3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat( 45 | rot_mats.shape[0], 23) 46 | # id_flat.requires_grad = False 47 | results = rot_mats - id_flat 48 | return results 49 | 50 | 51 | def make_list(tensor): 52 | # type: (List[int]) -> List[int] 53 | return tensor 54 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/loss/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/model/__init__.py -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/layer.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def make_linear_layers(feat_dims, relu_final=True, use_bn=False): 5 | layers = [] 6 | for i in range(len(feat_dims) - 1): 7 | layers.append(nn.Linear(feat_dims[i], feat_dims[i + 1])) 8 | 9 | # Do not use ReLU for final estimation 10 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and relu_final): 11 | if use_bn: 12 | layers.append(nn.BatchNorm1d(feat_dims[i + 1])) 13 | layers.append(nn.ReLU(inplace=True)) 14 | 15 | return nn.Sequential(*layers) 16 | 17 | 18 | def make_conv_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True): 19 | layers = [] 20 | for i in range(len(feat_dims) - 1): 21 | layers.append(nn.Conv2d(in_channels=feat_dims[i], out_channels=feat_dims[i + 1], kernel_size=kernel, stride=stride, 22 | padding=padding)) 23 | # Do not use BN and ReLU for final estimation 24 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and bnrelu_final): 25 | layers.append(nn.BatchNorm2d(feat_dims[i + 1])) 26 | layers.append(nn.ReLU(inplace=True)) 27 | 28 | return nn.Sequential(*layers) 29 | 30 | 31 | def make_conv1d_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True): 32 | layers = [] 33 | for i in range(len(feat_dims) - 1): 34 | layers.append(nn.Conv1d(in_channels=feat_dims[i], out_channels=feat_dims[i + 1], kernel_size=kernel, stride=stride, 35 | padding=padding)) 36 | # Do not use BN and ReLU for final estimation 37 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and bnrelu_final): 38 | layers.append(nn.BatchNorm1d(feat_dims[i + 1])) 39 | layers.append(nn.ReLU(inplace=True)) 40 | 41 | return nn.Sequential(*layers) 42 | 43 | 44 | def make_deconv_layers(feat_dims, bnrelu_final=True): 45 | layers = [] 46 | for i in range(len(feat_dims) - 1): 47 | layers.append( 48 | nn.ConvTranspose2d(in_channels=feat_dims[i], 49 | out_channels=feat_dims[i + 1], 50 | kernel_size=4, 51 | stride=2, 52 | padding=1, 53 | output_padding=0, 54 | bias=False)) 55 | 56 | # Do not use BN and ReLU for final estimation 57 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and bnrelu_final): 58 | layers.append(nn.BatchNorm2d(feat_dims[i + 1])) 59 | layers.append(nn.ReLU(inplace=True)) 60 | 61 | return nn.Sequential(*layers) 62 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .spiralconv import SpiralConv 2 | from .dsconv import DSConv 3 | 4 | __all__ = [ 5 | 'SpiralConv', 6 | ] 7 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/conv/dsconv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Xingyu Chen. All Rights Reserved. 2 | 3 | """ 4 | * @file dsconv.py 5 | * @author chenxingyu (chenxy.sean@gmail.com) 6 | * @brief Depth-separable spiral convolution 7 | * @version 0.1 8 | * @date 2022-04-28 9 | * 10 | * @copyright Copyright (c) 2022 chenxingyu 11 | * 12 | """ 13 | 14 | import torch 15 | import torch.nn as nn 16 | import numpy as np 17 | 18 | 19 | class DSConv(nn.Module): 20 | def __init__(self, in_channels, out_channels, indices, dim=1): 21 | super(DSConv, self).__init__() 22 | self.dim = dim 23 | self.indices = indices 24 | self.in_channels = in_channels 25 | self.out_channels = out_channels 26 | self.seq_length = indices.size(1) 27 | self.spatial_layer = nn.Conv2d(self.in_channels, self.in_channels, int(np.sqrt(self.seq_length)), 1, 0, groups=self.in_channels, bias=False) 28 | self.channel_layer = nn.Linear(self.in_channels, self.out_channels, bias=False) 29 | torch.nn.init.xavier_uniform_(self.channel_layer.weight) 30 | 31 | def reset_parameters(self): 32 | torch.nn.init.xavier_uniform_(self.spatial_layer.weight) 33 | torch.nn.init.xavier_uniform_(self.channel_layer.weight) 34 | 35 | def forward(self, x): 36 | n_nodes, _ = self.indices.size() 37 | bs = x.size(0) 38 | x = torch.index_select(x, self.dim, self.indices.to(x.device).view(-1)) 39 | x = x.view(bs * n_nodes, self.seq_length, -1).transpose(1, 2) 40 | x = x.view(x.size(0), x.size(1), int(np.sqrt(self.seq_length)), int(np.sqrt(self.seq_length))) 41 | x = self.spatial_layer(x).view(bs, n_nodes, -1) 42 | x = self.channel_layer(x) 43 | 44 | return x 45 | 46 | def __repr__(self): 47 | return '{}({}, {}, seq_length={})'.format(self.__class__.__name__, 48 | self.in_channels, 49 | self.out_channels, 50 | self.seq_length) -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/conv/spiralconv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SpiralConv(nn.Module): 6 | def __init__(self, in_channels, out_channels, indices, dim=1): 7 | super(SpiralConv, self).__init__() 8 | self.dim = dim 9 | self.indices = indices 10 | self.in_channels = in_channels 11 | self.out_channels = out_channels 12 | self.seq_length = indices.size(1) 13 | 14 | self.layer = nn.Linear(in_channels * self.seq_length, out_channels) 15 | self.reset_parameters() 16 | 17 | def reset_parameters(self): 18 | torch.nn.init.xavier_uniform_(self.layer.weight) 19 | torch.nn.init.constant_(self.layer.bias, 0) 20 | 21 | def forward(self, x): 22 | n_nodes, _ = self.indices.size() 23 | if x.dim() == 2: 24 | x = torch.index_select(x, 0, self.indices.to(x.device).view(-1)) 25 | x = x.view(n_nodes, -1) 26 | elif x.dim() == 3: 27 | bs = x.size(0) 28 | x = torch.index_select(x, self.dim, self.indices.to(x.device).reshape(-1)) 29 | x = x.view(bs, n_nodes, -1) 30 | else: 31 | raise RuntimeError( 32 | 'x.dim() is expected to be 2 or 3, but received {}'.format( 33 | x.dim())) 34 | x = self.layer(x) 35 | return x 36 | 37 | def __repr__(self): 38 | return '{}({}, {}, seq_length={})'.format(self.__class__.__name__, 39 | self.in_channels, 40 | self.out_channels, 41 | self.seq_length) 42 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/models/generate_pth.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import OrderedDict 3 | import torch 4 | import torch.nn as nn 5 | from resnetstack import ResnetStack_Backbone 6 | from resnetstack import Bottleneck 7 | 8 | 9 | def show_weight(Weight: OrderedDict): 10 | for k, v in Weight.items(): 11 | # if k.startswith('resnet_stack2'): 12 | # break 13 | print(f'{k: <50}', end='') 14 | print(v.shape) 15 | 16 | 17 | def update_weights(pretrained, edited): 18 | ''' 19 | pretrained: pretrained ResNet50 weights 20 | edited: edited model weights, model.state_dict() 21 | return: updated new model weights 22 | ''' 23 | pretrained_layer_name_size = 60 24 | 25 | for k, v in pretrained.items(): 26 | names = k.split('.') 27 | 28 | if names[0] in ['conv1', 'bn1']: 29 | # same name 30 | assert (k in edited) 31 | edited[k] = v 32 | print(f'{k: <{pretrained_layer_name_size}}', end='') 33 | print(k) # pretrained layer 34 | 35 | elif names[0] in ['layer1', 'layer2', 'layer3', 'layer4']: 36 | for i in range(1, 3): 37 | new_layer_name = f'resnet_stack{i}.' + k 38 | assert (new_layer_name in edited) # make sure the layer is in new model 39 | edited[new_layer_name] = v 40 | print(f'{new_layer_name: <{pretrained_layer_name_size}}', end='') 41 | print(k) # pretrained layer 42 | 43 | else: # no mapped layer 44 | print(' ' * pretrained_layer_name_size, end='') 45 | print(k) # pretrained layer 46 | 47 | return edited 48 | 49 | 50 | # def check_if_weights_are_updated(pretrained, edited): 51 | # name = 'resnet_stack1.layer2.2.bn1.bias' 52 | # origin = edited[name] 53 | 54 | # edited = update_weights(pretrained, edited) 55 | # edited = edited[name] 56 | 57 | # pretrained = Weight_Resnet['layer2.2.bn1.bias'] 58 | 59 | # print(origin) 60 | # print(edited) 61 | # print(pretrained) 62 | 63 | 64 | def generate_pth(): 65 | PATH_RESNET = '/uac/gds/xuhao/.cache/torch/hub/checkpoints/resnet50-0676ba61.pth' 66 | assert (os.path.isfile(PATH_RESNET)) 67 | 68 | print('torchvision - resnet50 weight found, loading...') 69 | Weight_Resnet = torch.load(PATH_RESNET) 70 | 71 | # my model 72 | model = ResnetStack_Backbone(Bottleneck, [3, 4, 6, 3]) 73 | Weight_RNStack = model.state_dict() 74 | 75 | # same_name = ['conv1', 'bn1'] 76 | # stk1_name = ['layer1', 'layer2', 'layer3', 'layer4'] 77 | # stk2_name = ['layer1', 'layer2', 'layer3', 'layer4'] 78 | 79 | Weight_RNStack = update_weights(Weight_Resnet, Weight_RNStack) 80 | torch.save(Weight_RNStack, 'resnetstack.pth') 81 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/j_reg.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/j_reg.npy -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/right_faces.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/right_faces.npy -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/transform.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/transform.pkl -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/transform_body.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandObjectImageSynthesizer/hand_recon/model/mob_recon/template/transform_body.pkl -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/alter_pretrain.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from collections import OrderedDict 3 | 4 | path = 'out/Human36M/cmr_pg_h36m_resumepp_lr5/checkpoints/' 5 | old_name = 'checkpoint_best.pt' 6 | new_name = 'cmr_pg_res18_h36m.pt' 7 | new_weight = OrderedDict() 8 | checkpoint = torch.load(path+old_name, map_location='cpu')['model_state_dict'] 9 | 10 | for k, v in checkpoint.items(): 11 | if 'backbone.' not in k and 'reduce.' not in k: 12 | new_weight[k] = v 13 | 14 | torch.save(new_weight, path+new_name) 15 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/generate_spiral_seq.py: -------------------------------------------------------------------------------- 1 | import openmesh as om 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | 5 | 6 | def _next_ring(mesh, last_ring, other): 7 | res = [] 8 | 9 | def is_new_vertex(idx): 10 | return (idx not in last_ring and idx not in other and idx not in res) 11 | 12 | for vh1 in last_ring: 13 | vh1 = om.VertexHandle(vh1) 14 | after_last_ring = False 15 | for vh2 in mesh.vv(vh1): 16 | if after_last_ring: 17 | if is_new_vertex(vh2.idx()): 18 | res.append(vh2.idx()) 19 | if vh2.idx() in last_ring: 20 | after_last_ring = True 21 | for vh2 in mesh.vv(vh1): 22 | if vh2.idx() in last_ring: 23 | break 24 | if is_new_vertex(vh2.idx()): 25 | res.append(vh2.idx()) 26 | return res 27 | 28 | 29 | def extract_spirals(mesh, seq_length, dilation=1): 30 | # output: spirals.size() = [N, seq_length] 31 | spirals = [] 32 | for vh0 in mesh.vertices(): 33 | reference_one_ring = [] 34 | for vh1 in mesh.vv(vh0): 35 | reference_one_ring.append(vh1.idx()) 36 | spiral = [vh0.idx()] 37 | one_ring = list(reference_one_ring) 38 | last_ring = one_ring 39 | next_ring = _next_ring(mesh, last_ring, spiral) 40 | spiral.extend(last_ring) 41 | while len(spiral) + len(next_ring) < seq_length * dilation: 42 | if len(next_ring) == 0: 43 | break 44 | last_ring = next_ring 45 | next_ring = _next_ring(mesh, last_ring, spiral) 46 | spiral.extend(last_ring) 47 | if len(next_ring) > 0: 48 | spiral.extend(next_ring) 49 | else: 50 | kdt = KDTree(mesh.points(), metric='euclidean') 51 | spiral = kdt.query(np.expand_dims(mesh.points()[spiral[0]], 52 | axis=0), 53 | k=seq_length * dilation, 54 | return_distance=False).tolist() 55 | spiral = [item for subspiral in spiral for item in subspiral] 56 | spirals.append(spiral[:seq_length * dilation][::dilation]) 57 | return spirals 58 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/progress/counter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite, Progress 19 | 20 | 21 | class Counter(Infinite): 22 | def update(self): 23 | self.write(str(self.index)) 24 | 25 | 26 | class Countdown(Progress): 27 | def update(self): 28 | self.write(str(self.remaining)) 29 | 30 | 31 | class Stack(Progress): 32 | phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') 33 | 34 | def update(self): 35 | nphases = len(self.phases) 36 | i = min(nphases - 1, int(self.progress * nphases)) 37 | self.write(self.phases[i]) 38 | 39 | 40 | class Pie(Stack): 41 | phases = ('○', '◔', '◑', '◕', '●') 42 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/progress/spinner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite 19 | 20 | 21 | class Spinner(Infinite): 22 | phases = ('-', '\\', '|', '/') 23 | hide_cursor = True 24 | 25 | def update(self): 26 | i = self.index % len(self.phases) 27 | self.write(self.phases[i]) 28 | 29 | 30 | class PieSpinner(Spinner): 31 | phases = ['◷', '◶', '◵', '◴'] 32 | 33 | 34 | class MoonSpinner(Spinner): 35 | phases = ['◑', '◒', '◐', '◓'] 36 | 37 | 38 | class LineSpinner(Spinner): 39 | phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] 40 | 41 | 42 | class PixelSpinner(Spinner): 43 | phases = ['⣾', '⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] 44 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/smpl.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os.path as osp 4 | import json 5 | # from config import cfg 6 | 7 | import sys 8 | 9 | from smplpytorch.pytorch.smpl_layer import SMPL_Layer 10 | 11 | 12 | class SMPL(object): 13 | def __init__(self, root): 14 | self.root = root 15 | self.layer = {'neutral': self.get_layer(), 'male': self.get_layer('male'), 'female': self.get_layer('female')} 16 | self.vertex_num = 6890 17 | self.face = self.layer['neutral'].th_faces.numpy() 18 | self.joint_regressor = self.layer['neutral'].th_J_regressor.numpy() 19 | 20 | # add nose, L/R eye, L/R ear 21 | self.face_kps_vertex = (331, 2802, 6262, 3489, 3990) # mesh vertex idx 22 | nose_onehot = np.array([1 if i == 331 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 23 | left_eye_onehot = np.array([1 if i == 2802 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 24 | right_eye_onehot = np.array([1 if i == 6262 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 25 | left_ear_onehot = np.array([1 if i == 3489 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 26 | right_ear_onehot = np.array([1 if i == 3990 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 27 | self.joint_regressor = np.concatenate( 28 | (self.joint_regressor, nose_onehot, left_eye_onehot, right_eye_onehot, left_ear_onehot, right_ear_onehot)) 29 | 30 | self.joint_num = 29 # original: 24. manually add nose, L/R eye, L/R ear 31 | self.joints_name = ( 32 | 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 33 | 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 34 | 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear') 35 | self.flip_pairs = ( 36 | (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) 37 | self.skeleton = ( 38 | (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), 39 | (17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), 40 | (24, 25), (24, 26), (25, 27), (26, 28)) 41 | self.root_joint_idx = self.joints_name.index('Pelvis') 42 | 43 | def get_layer(self, gender='neutral'): 44 | return SMPL_Layer(gender=gender, model_root=osp.join(self.root, 'template')) 45 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.tri as mtri 4 | from mpl_toolkits.mplot3d import Axes3D 5 | 6 | xy = [[0.3,0.5], 7 | [0.6,0.8], 8 | [0.5,0.1], 9 | [0.1,0.2]] 10 | xy = np.array(xy) 11 | 12 | triangles = [[0,2,1], 13 | [2,0,3]] 14 | 15 | triang = mtri.Triangulation(xy[:,0], xy[:,1], triangles=triangles) 16 | 17 | z = [0.1,0.2,0.3,0.4] 18 | 19 | fig, ax = plt.subplots(subplot_kw =dict(projection="3d")) 20 | ax.plot_trisurf(triang, z) 21 | 22 | plt.show() -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import numpy as np 4 | import openmesh as om 5 | 6 | 7 | def makedirs(folder): 8 | if not os.path.exists(folder): 9 | os.makedirs(folder) 10 | 11 | 12 | def count_parameters(model): 13 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 14 | 15 | 16 | def to_edge_index(mat): 17 | return torch.LongTensor(np.vstack(mat.nonzero())) 18 | 19 | 20 | def to_sparse(spmat): 21 | return torch.sparse.FloatTensor(torch.LongTensor(np.copy(np.array([spmat.tocoo().row, spmat.tocoo().col]))), 22 | torch.FloatTensor(spmat.tocoo().data), torch.Size(spmat.tocoo().shape)) 23 | 24 | 25 | def preprocess_spiral(face, seq_length, vertices=None, dilation=1): 26 | from .generate_spiral_seq import extract_spirals 27 | assert face.shape[1] == 3 28 | if vertices is not None: 29 | mesh = om.TriMesh(np.array(vertices), np.array(face)) 30 | else: 31 | n_vertices = face.max() + 1 32 | mesh = om.TriMesh(np.ones([n_vertices, 3]), np.array(face)) 33 | spirals = torch.tensor(extract_spirals(mesh, seq_length=seq_length, dilation=dilation)) 34 | return spirals 35 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/warmup_scheduler.py: -------------------------------------------------------------------------------- 1 | from bisect import bisect_right 2 | 3 | def adjust_learning_rate(optimizer, epoch, step, len_epoch, lr, lr_decay, decay_step, warmup_epochs): 4 | """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" 5 | # lr = args.lr * (0.1 ** (epoch // 30)) 6 | # for param_group in optimizer.param_groups: 7 | # param_group["lr"] = lr 8 | lr = lr * (lr_decay ** bisect_right(decay_step, epoch)) 9 | 10 | """Warmup""" 11 | if epoch < warmup_epochs: 12 | lr = ( 13 | lr 14 | * float(1 + step + epoch * len_epoch) 15 | / float(warmup_epochs * len_epoch) 16 | ) 17 | 18 | # if args.rank == 0: 19 | # writer.print_str("epoch = {}, step = {}, lr = {}".format(epoch, step, lr)) 20 | 21 | for param_group in optimizer.param_groups: 22 | param_group["lr"] = lr 23 | 24 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/hand_recon/model/mob_recon/utils/writer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import torch 4 | import json 5 | from glob import glob 6 | import logging 7 | logging.getLogger('PIL').setLevel(logging.WARNING) 8 | 9 | 10 | class Writer: 11 | def __init__(self, args=None): 12 | self.args = args 13 | if self.args is not None: 14 | log_filename = os.path.join( 15 | args.out_dir, 'log.log') 16 | 17 | logging.basicConfig( 18 | filename=log_filename, 19 | level=logging.DEBUG, 20 | format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s') 21 | 22 | 23 | def print_str(self, info): 24 | logging.info(info) 25 | 26 | def print_info(self, info): 27 | message = 'Epoch: {}/{}, Duration: {:.3f}s, Train Loss: {:.4f}, Test Loss: {:.4f}' \ 28 | .format(info['current_epoch'], info['epochs'], info['t_duration'], info['train_loss'], info['test_loss']) 29 | logging.info(message) 30 | 31 | def print_step(self, info): 32 | message = 'Epoch: {}/{}, Step: {}/{}, Total_step: {}, Duration: {:.3f}s, Train Loss: {:.4f}, L1 Loss: {:.4f}, Lr: {:.6f}' \ 33 | .format(info['epoch'], info['max_epoch'], info['step'], info['max_step'], info['total_step'], info['step_duration'], info['train_loss'], info['l1_loss'], info['lr']) 34 | logging.info(message) 35 | 36 | def print_step_ft(self, info): 37 | message = 'Epoch: {}/{}, Step: {}/{}, Total: {}, Dur: {:.3f}s, FDur: {:.3f}s, BDur: {:.3f}s,, Train Loss: {:.4f}, L1 Loss: {:.4f}, Lr: {:.6f}' \ 38 | .format(info['epoch'], info['max_epoch'], info['step'], info['max_step'], info['total_step'], 39 | info['step_duration'], info['forward_duration'] ,info['backward_duration'], info['train_loss'], info['l1_loss'], info['lr']) 40 | logging.info(message) 41 | 42 | def save_checkpoint(self, model, optimizer, scheduler, epoch, best=False, last=False): 43 | if best: 44 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_best.pt') 45 | elif last: 46 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_last.pt') 47 | else: 48 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_{:03d}.pt'.format(epoch)) 49 | scheduler_state_dict = {} if scheduler is None else scheduler.state_dict() 50 | torch.save( 51 | { 52 | 'epoch': epoch, 53 | 'model_state_dict': model.state_dict(), 54 | 'optimizer_state_dict': optimizer.state_dict(), 55 | 'scheduler_state_dict': scheduler_state_dict, 56 | }, save_path) 57 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/inference_dexycb.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | 4 | from common.config import Config 5 | from trainer import Tester_DexYCB 6 | from train import fetch_model 7 | import sys 8 | 9 | sys.path.append('./') 10 | from hand_recon.model.model import fetch_model as fetch_hand_recon_model 11 | 12 | parser = argparse.ArgumentParser('Inference', add_help=True) 13 | parser.add_argument('--model_dir', default='', type=str, help='Directory containing cfg.json') 14 | parser.add_argument('--data_split', default='', type=str, help='data split') 15 | parser.add_argument('--total_split', '-ts', type=int, default=0, help='total split of data list') 16 | parser.add_argument('--current_split', '-pi', type=int, default=1000, help='current split of data list') 17 | parser.add_argument('--version', '-v', type=int, default=1, help='generation version') 18 | parser.add_argument('--part', '-pt', type=int, default=1, help='generation part') 19 | parser.add_argument('--sampling_timesteps', '-t', type=int, default=250, help='sample step') 20 | parser.add_argument('--batch_size', '-bs', type=int, default=25, help='batch size') 21 | parser.add_argument('--resume', default=-1, type=int, help='index of model weights') 22 | args = parser.parse_args() 23 | 24 | if __name__ == '__main__': 25 | # Load the parameters from json file 26 | args = parser.parse_args() 27 | json_path = os.path.join(args.model_dir, 'cfg.json') 28 | assert os.path.isfile(json_path), f'No json configuration file found at {json_path}' 29 | cfg = Config(json_path).cfg 30 | 31 | # Update args into cfg.test 32 | cfg.test.update(vars(args)) 33 | diffusion_model = fetch_model(cfg) 34 | 35 | # hand reconstruction model 36 | hand_recon_model = fetch_hand_recon_model(model_name='mobrecon', model_path='./hand_recon/experiment/dexycb.mobrecon.ori/filter/test_model_best.pth') 37 | 38 | tester = Tester_DexYCB( 39 | diffusion_model=diffusion_model, 40 | hand_recon_model=hand_recon_model, 41 | folder= 42 | f'{cfg.test.data_split}_name_{cfg.model.name}_condition_v{cfg.test.version}_p{cfg.test.part}_res_{cfg.data.image_size}_resume_{cfg.test.resume}_step_{cfg.test.sampling_timesteps}', 43 | batch_size=cfg.test.batch_size, 44 | split_batches=True, 45 | data_split=cfg.test.data_split, 46 | version=cfg.test.version, 47 | part=cfg.test.part, 48 | total_split=cfg.test.total_split, 49 | current_split=cfg.test.current_split, 50 | results_folder=cfg.base.model_dir, 51 | ) 52 | 53 | tester.load(milestone=cfg.test.resume) 54 | tester.inference_single_gpu() 55 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/inference_ho3d.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | 4 | from common.config import Config 5 | from trainer import Tester_HO3D 6 | from train import fetch_model 7 | import sys 8 | 9 | sys.path.append('./') 10 | from hand_recon.model.model import fetch_model as fetch_hand_recon_model 11 | 12 | parser = argparse.ArgumentParser('Inference', add_help=True) 13 | parser.add_argument('--model_dir', default='', type=str, help='Directory containing cfg.json') 14 | parser.add_argument('--data_split', default='', type=str, help='data split') 15 | parser.add_argument('--total_split', '-ts', type=int, default=0, help='total split of data list') 16 | parser.add_argument('--current_split', '-pi', type=int, default=1000, help='current split of data list') 17 | parser.add_argument('--version', '-v', type=int, default=1, help='generation version') 18 | parser.add_argument('--part', '-pt', type=int, default=1, help='generation part') 19 | parser.add_argument('--sampling_timesteps', '-t', type=int, default=250, help='sample step') 20 | parser.add_argument('--batch_size', '-bs', type=int, default=25, help='batch size') 21 | parser.add_argument('--resume', default=-1, type=int, help='index of model weights') 22 | args = parser.parse_args() 23 | 24 | if __name__ == '__main__': 25 | # Load the parameters from json file 26 | args = parser.parse_args() 27 | json_path = os.path.join(args.model_dir, 'cfg.json') 28 | assert os.path.isfile(json_path), f'No json configuration file found at {json_path}' 29 | cfg = Config(json_path).cfg 30 | 31 | # Update args into cfg.test 32 | cfg.test.update(vars(args)) 33 | diffusion_model = fetch_model(cfg) 34 | 35 | # hand reconstruction model 36 | hand_recon_model = fetch_hand_recon_model(model_name='mobrecon', model_path='./hand_recon/experiment/ho3d.mobrecon.ori/filter/val_model_best.pth') 37 | 38 | tester = Tester_HO3D( 39 | diffusion_model=diffusion_model, 40 | hand_recon_model=hand_recon_model, 41 | folder= 42 | f'{cfg.test.data_split}_name_{cfg.model.name}_condition_v{cfg.test.version}_p{cfg.test.part}_res_{cfg.data.image_size}_resume_{cfg.test.resume}_step_{cfg.test.sampling_timesteps}', 43 | batch_size=cfg.test.batch_size, 44 | split_batches=True, 45 | data_split=cfg.test.data_split, 46 | version=cfg.test.version, 47 | part=cfg.test.part, 48 | total_split=cfg.test.total_split, 49 | current_split=cfg.test.current_split, 50 | results_folder=cfg.base.model_dir, 51 | ) 52 | 53 | tester.load(milestone=cfg.test.resume) 54 | tester.inference_single_gpu() 55 | -------------------------------------------------------------------------------- /HandObjectImageSynthesizer/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | from common.config import Config 4 | from trainer import Trainer 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('--model_dir', default='', type=str, help='Directory containing cfg.json') 8 | parser.add_argument('--resume', default=-1, type=int, help='index of model weights') 9 | 10 | 11 | def fetch_model(cfg): 12 | if cfg.model.name in ['cf_normal_cond_v2_wloss_grcond']: 13 | from denoising_diffusion_pytorch.classifier_free_guidance_normal_wloss_grcond import Unet, GaussianDiffusion 14 | unet_model = Unet(dim=64, num_classes=8, dim_mults=(1, 2, 4, 8)) 15 | diffusion_model = GaussianDiffusion( 16 | unet_model, 17 | image_size=cfg.data.image_size, 18 | timesteps=cfg.data.timesteps, # number of steps 19 | sampling_timesteps=cfg.test.sampling_timesteps, # number of sampling timesteps (using ddim for faster inference [see citation for ddim paper]) 20 | loss_type=cfg.loss.name, # L1 or L2 21 | objective=cfg.loss.objective, 22 | beta_schedule=cfg.model.beta_schedule, 23 | ) 24 | 25 | else: 26 | return NotImplementedError(f'No implement {cfg.model.name}') 27 | 28 | return diffusion_model 29 | 30 | 31 | if __name__ == '__main__': 32 | # Load the parameters from json file 33 | args = parser.parse_args() 34 | json_path = os.path.join(args.model_dir, 'cfg.json') 35 | assert os.path.isfile(json_path), f'No json configuration file found at {json_path}' 36 | cfg = Config(json_path).cfg 37 | 38 | # Update args into cfg.base 39 | cfg.base.update(vars(args)) 40 | diffusion_model = fetch_model(cfg) 41 | trainer = Trainer( 42 | diffusion_model, 43 | dataset_name=cfg.data.dataset_name, 44 | train_batch_size=cfg.train.batch_size, 45 | train_lr=cfg.optimizer.lr, 46 | train_num_steps=cfg.train.num_steps, # total training steps 47 | gradient_accumulate_every=cfg.train.gradient_accumulate_every, # gradient accumulation steps 48 | ema_decay=cfg.train.ema_decay, # exponential moving average decay 49 | amp=False, # turn on mixed precision 50 | calculate_fid=True, # whether to calculate fid during training 51 | split_batches=True, 52 | save_and_sample_every=cfg.summary.save_and_sample_every, 53 | num_samples=cfg.test.num_samples, 54 | results_folder=cfg.base.model_dir, 55 | data_split=cfg.data.data_split, 56 | version=cfg.data.version, 57 | ) 58 | 59 | trainer.load(milestone=cfg.base.resume) 60 | trainer.train() 61 | -------------------------------------------------------------------------------- /HandReconstruction/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/common/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | from easydict import EasyDict as edict 3 | 4 | 5 | class Config(): 6 | 7 | def __init__(self, json_path): 8 | with open(json_path) as f: 9 | self.cfg = json.load(f) 10 | self.cfg = edict(self.cfg) 11 | 12 | def save(self, json_path): 13 | with open(json_path, 'w') as f: 14 | json.dump(self.cfg, f, indent=4) 15 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/common/utils/coor_converter.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | def row_col2theta_phi(row, col, width, height): 6 | theta = ((row + 0.5) / height) * np.pi 7 | phi = (0.5 - (col + 0.5) / width) * 2.0 * np.pi 8 | phi = (col - (width - 1) / 2) / width * 2 * np.pi 9 | return theta, phi 10 | 11 | 12 | def theta_phi2row_col_array(theta, phi, width, height): 13 | row = (theta / np.pi) * height - 0.5 14 | col = (0.5 - phi / (2.0 * np.pi)) * width - 0.5 15 | row = row.astype(int) 16 | col = col.astype(int) 17 | row = np.clip(row, 0, height - 1) # make sure the lights do not sink to bottom 18 | col = col % width # handle the negative cols 19 | return row, col 20 | 21 | 22 | def np_theta_phi2xyz(theta, phi): 23 | x = np.sin(theta) * np.sin(phi) 24 | y = np.cos(theta) 25 | z = np.sin(theta) * np.cos(phi) 26 | return np.array((x, y, z)) 27 | 28 | 29 | def np_xyz2theta_phi(x, y, z): 30 | theta = np.arccos(y) 31 | phi = np.arctan2(x, z) # quadrant awareness 32 | return theta, phi 33 | 34 | 35 | def torch_theta_phi2xyz(theta, phi): 36 | x = np.sin(theta) * np.sin(phi) 37 | y = np.cos(theta) 38 | z = np.sin(theta) * np.cos(phi) 39 | return torch.tensor((x, y, z)).cuda() 40 | 41 | 42 | def torch_xyz2theta_phi(x, y, z): 43 | theta = torch.arccos(y) 44 | phi = torch.arctan2(x, z) # quadrant awareness 45 | return theta, phi 46 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/dir.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/.gitignore: -------------------------------------------------------------------------------- 1 | *.sw* 2 | *.bak 3 | *_bak.py 4 | 5 | .cache/ 6 | __pycache__/ 7 | build/ 8 | dist/ 9 | manopth_hassony2.egg-info/ 10 | mano/models/ 11 | assets/mano_layer.svg 12 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/assets/mano_layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/manopth/assets/mano_layer.png -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/assets/random_hand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/manopth/assets/random_hand.png -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/environment.yml: -------------------------------------------------------------------------------- 1 | name: manopth 2 | 3 | dependencies: 4 | - opencv 5 | - python=3.7 6 | - matplotlib 7 | - numpy 8 | - pytorch 9 | - tqdm 10 | - git 11 | - pip: 12 | - git+https://github.com/hassony2/chumpy.git 13 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/examples/manopth_mindemo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from manopth.manolayer import ManoLayer 3 | from manopth import demo 4 | 5 | batch_size = 10 6 | # Select number of principal components for pose space 7 | ncomps = 6 8 | 9 | # Initialize MANO layer 10 | mano_layer = ManoLayer( 11 | mano_root='mano/models', use_pca=True, ncomps=ncomps, flat_hand_mean=False) 12 | 13 | # Generate random shape parameters 14 | random_shape = torch.rand(batch_size, 10) 15 | # Generate random pose parameters, including 3 values for global axis-angle rotation 16 | random_pose = torch.rand(batch_size, ncomps + 3) 17 | 18 | # Forward pass through MANO layer 19 | hand_verts, hand_joints = mano_layer(random_pose, random_shape) 20 | demo.display_hand({ 21 | 'verts': hand_verts, 22 | 'joints': hand_joints 23 | }, 24 | mano_faces=mano_layer.th_faces) 25 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/mano/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/manopth/mano/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/mano/webuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/manopth/mano/webuser/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/mano/webuser/posemapper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license 5 | 6 | More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. 7 | For comments or questions, please email us at: mano@tue.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines a wrapper for the loading functions of the MANO model. 13 | 14 | Modules included: 15 | - load_model: 16 | loads the MANO model from a given file location (i.e. a .pkl file location), 17 | or a dictionary object. 18 | 19 | ''' 20 | 21 | 22 | import chumpy as ch 23 | import numpy as np 24 | import cv2 25 | 26 | 27 | class Rodrigues(ch.Ch): 28 | dterms = 'rt' 29 | 30 | def compute_r(self): 31 | return cv2.Rodrigues(self.rt.r)[0] 32 | 33 | def compute_dr_wrt(self, wrt): 34 | if wrt is self.rt: 35 | return cv2.Rodrigues(self.rt.r)[1].T 36 | 37 | 38 | def lrotmin(p): 39 | if isinstance(p, np.ndarray): 40 | p = p.ravel()[3:] 41 | return np.concatenate( 42 | [(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() 43 | for pp in p.reshape((-1, 3))]).ravel() 44 | if p.ndim != 2 or p.shape[1] != 3: 45 | p = p.reshape((-1, 3)) 46 | p = p[1:] 47 | return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() 48 | for pp in p]).ravel() 49 | 50 | 51 | def posemap(s): 52 | if s == 'lrotmin': 53 | return lrotmin 54 | else: 55 | raise Exception('Unknown posemapping: %s' % (str(s), )) 56 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | README.md 2 | setup.py 3 | mano/__init__.py 4 | mano/webuser/__init__.py 5 | mano/webuser/lbs.py 6 | mano/webuser/posemapper.py 7 | mano/webuser/serialization.py 8 | mano/webuser/smpl_handpca_wrapper_HAND_only.py 9 | mano/webuser/verts.py 10 | manopth/__init__.py 11 | manopth/argutils.py 12 | manopth/demo.py 13 | manopth/manolayer.py 14 | manopth/rodrigues_layer.py 15 | manopth/rot6d.py 16 | manopth/rotproj.py 17 | manopth/tensutils.py 18 | manopth.egg-info/PKG-INFO 19 | manopth.egg-info/SOURCES.txt 20 | manopth.egg-info/dependency_links.txt 21 | manopth.egg-info/top_level.txt 22 | test/test_demo.py -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | mano 2 | manopth 3 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth/__init__.py: -------------------------------------------------------------------------------- 1 | name = 'manopth' 2 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth/argutils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import pickle 4 | import subprocess 5 | import sys 6 | 7 | 8 | def print_args(args): 9 | opts = vars(args) 10 | print('======= Options ========') 11 | for k, v in sorted(opts.items()): 12 | print('{}: {}'.format(k, v)) 13 | print('========================') 14 | 15 | 16 | def save_args(args, save_folder, opt_prefix='opt', verbose=True): 17 | opts = vars(args) 18 | # Create checkpoint folder 19 | if not os.path.exists(save_folder): 20 | os.makedirs(save_folder, exist_ok=True) 21 | 22 | # Save options 23 | opt_filename = '{}.txt'.format(opt_prefix) 24 | opt_path = os.path.join(save_folder, opt_filename) 25 | with open(opt_path, 'a') as opt_file: 26 | opt_file.write('====== Options ======\n') 27 | for k, v in sorted(opts.items()): 28 | opt_file.write( 29 | '{option}: {value}\n'.format(option=str(k), value=str(v))) 30 | opt_file.write('=====================\n') 31 | opt_file.write('launched {} at {}\n'.format( 32 | str(sys.argv[0]), str(datetime.datetime.now()))) 33 | 34 | # Add git info 35 | label = subprocess.check_output(["git", "describe", 36 | "--always"]).strip() 37 | if subprocess.call( 38 | ["git", "branch"], 39 | stderr=subprocess.STDOUT, 40 | stdout=open(os.devnull, 'w')) == 0: 41 | opt_file.write('=== Git info ====\n') 42 | opt_file.write('{}\n'.format(label)) 43 | commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) 44 | opt_file.write('commit : {}\n'.format(commit.strip())) 45 | 46 | opt_picklename = '{}.pkl'.format(opt_prefix) 47 | opt_picklepath = os.path.join(save_folder, opt_picklename) 48 | with open(opt_picklepath, 'wb') as opt_file: 49 | pickle.dump(opts, opt_file) 50 | if verbose: 51 | print('Saved options to {}'.format(opt_path)) 52 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth/demo.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | from mpl_toolkits.mplot3d import Axes3D 3 | from mpl_toolkits.mplot3d.art3d import Poly3DCollection 4 | import numpy as np 5 | import torch 6 | 7 | from manopth.manolayer import ManoLayer 8 | 9 | 10 | def generate_random_hand(batch_size=1, ncomps=6, mano_root='mano/models'): 11 | nfull_comps = ncomps + 3 # Add global orientation dims to PCA 12 | random_pcapose = torch.rand(batch_size, nfull_comps) 13 | mano_layer = ManoLayer(mano_root=mano_root) 14 | verts, joints = mano_layer(random_pcapose) 15 | return {'verts': verts, 'joints': joints, 'faces': mano_layer.th_faces} 16 | 17 | 18 | def display_hand(hand_info, mano_faces=None, ax=None, alpha=0.2, batch_idx=0, show=True): 19 | """ 20 | Displays hand batch_idx in batch of hand_info, hand_info as returned by 21 | generate_random_hand 22 | """ 23 | if ax is None: 24 | fig = plt.figure() 25 | ax = fig.add_subplot(111, projection='3d') 26 | verts, joints = hand_info['verts'][batch_idx], hand_info['joints'][ 27 | batch_idx] 28 | if mano_faces is None: 29 | ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.1) 30 | else: 31 | mesh = Poly3DCollection(verts[mano_faces], alpha=alpha) 32 | face_color = (141 / 255, 184 / 255, 226 / 255) 33 | edge_color = (50 / 255, 50 / 255, 50 / 255) 34 | mesh.set_edgecolor(edge_color) 35 | mesh.set_facecolor(face_color) 36 | ax.add_collection3d(mesh) 37 | ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') 38 | cam_equal_aspect_3d(ax, verts.numpy()) 39 | if show: 40 | plt.show() 41 | 42 | 43 | def cam_equal_aspect_3d(ax, verts, flip_x=False): 44 | """ 45 | Centers view on cuboid containing hand and flips y and z axis 46 | and fixes azimuth 47 | """ 48 | extents = np.stack([verts.min(0), verts.max(0)], axis=1) 49 | sz = extents[:, 1] - extents[:, 0] 50 | centers = np.mean(extents, axis=1) 51 | maxsize = max(abs(sz)) 52 | r = maxsize / 2 53 | if flip_x: 54 | ax.set_xlim(centers[0] + r, centers[0] - r) 55 | else: 56 | ax.set_xlim(centers[0] - r, centers[0] + r) 57 | # Invert y and z axis 58 | ax.set_ylim(centers[1] + r, centers[1] - r) 59 | ax.set_zlim(centers[2] + r, centers[2] - r) 60 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth/rot6d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def compute_rotation_matrix_from_ortho6d(poses): 5 | """ 6 | Code from 7 | https://github.com/papagina/RotationContinuity 8 | On the Continuity of Rotation Representations in Neural Networks 9 | Zhou et al. CVPR19 10 | https://zhouyisjtu.github.io/project_rotation/rotation.html 11 | """ 12 | x_raw = poses[:, 0:3] # batch*3 13 | y_raw = poses[:, 3:6] # batch*3 14 | 15 | x = normalize_vector(x_raw) # batch*3 16 | z = cross_product(x, y_raw) # batch*3 17 | z = normalize_vector(z) # batch*3 18 | y = cross_product(z, x) # batch*3 19 | 20 | x = x.view(-1, 3, 1) 21 | y = y.view(-1, 3, 1) 22 | z = z.view(-1, 3, 1) 23 | matrix = torch.cat((x, y, z), 2) # batch*3*3 24 | return matrix 25 | 26 | def robust_compute_rotation_matrix_from_ortho6d(poses): 27 | """ 28 | Instead of making 2nd vector orthogonal to first 29 | create a base that takes into account the two predicted 30 | directions equally 31 | """ 32 | x_raw = poses[:, 0:3] # batch*3 33 | y_raw = poses[:, 3:6] # batch*3 34 | 35 | x = normalize_vector(x_raw) # batch*3 36 | y = normalize_vector(y_raw) # batch*3 37 | middle = normalize_vector(x + y) 38 | orthmid = normalize_vector(x - y) 39 | x = normalize_vector(middle + orthmid) 40 | y = normalize_vector(middle - orthmid) 41 | # Their scalar product should be small ! 42 | # assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001 43 | z = normalize_vector(cross_product(x, y)) 44 | 45 | x = x.view(-1, 3, 1) 46 | y = y.view(-1, 3, 1) 47 | z = z.view(-1, 3, 1) 48 | matrix = torch.cat((x, y, z), 2) # batch*3*3 49 | # Check for reflection in matrix ! If found, flip last vector TODO 50 | assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0 51 | return matrix 52 | 53 | 54 | def normalize_vector(v): 55 | batch = v.shape[0] 56 | v_mag = torch.sqrt(v.pow(2).sum(1)) # batch 57 | v_mag = torch.max(v_mag, v.new([1e-8])) 58 | v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1]) 59 | v = v/v_mag 60 | return v 61 | 62 | 63 | def cross_product(u, v): 64 | batch = u.shape[0] 65 | i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1] 66 | j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2] 67 | k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0] 68 | 69 | out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) 70 | 71 | return out 72 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth/rotproj.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def batch_rotprojs(batches_rotmats): 5 | proj_rotmats = [] 6 | for batch_idx, batch_rotmats in enumerate(batches_rotmats): 7 | proj_batch_rotmats = [] 8 | for rot_idx, rotmat in enumerate(batch_rotmats): 9 | # GPU implementation of svd is VERY slow 10 | # ~ 2 10^-3 per hit vs 5 10^-5 on cpu 11 | U, S, V = rotmat.cpu().svd() 12 | rotmat = torch.matmul(U, V.transpose(0, 1)) 13 | orth_det = rotmat.det() 14 | # Remove reflection 15 | if orth_det < 0: 16 | rotmat[:, 2] = -1 * rotmat[:, 2] 17 | 18 | rotmat = rotmat.cuda() 19 | proj_batch_rotmats.append(rotmat) 20 | proj_rotmats.append(torch.stack(proj_batch_rotmats)) 21 | return torch.stack(proj_rotmats) 22 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/manopth/tensutils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from common.utils.manopth.manopth import rodrigues_layer 4 | 5 | 6 | def th_posemap_axisang(pose_vectors): 7 | rot_nb = int(pose_vectors.shape[1] / 3) 8 | pose_vec_reshaped = pose_vectors.contiguous().view(-1, 3) 9 | rot_mats = rodrigues_layer.batch_rodrigues(pose_vec_reshaped) 10 | rot_mats = rot_mats.view(pose_vectors.shape[0], rot_nb * 9) 11 | pose_maps = subtract_flat_id(rot_mats) 12 | return pose_maps, rot_mats 13 | 14 | 15 | def th_with_zeros(tensor): 16 | batch_size = tensor.shape[0] 17 | padding = tensor.new([0.0, 0.0, 0.0, 1.0]) 18 | padding.requires_grad = False 19 | 20 | concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] 21 | cat_res = torch.cat(concat_list, 1) 22 | return cat_res 23 | 24 | 25 | def th_pack(tensor): 26 | batch_size = tensor.shape[0] 27 | padding = tensor.new_zeros((batch_size, 4, 3)) 28 | padding.requires_grad = False 29 | pack_list = [padding, tensor] 30 | pack_res = torch.cat(pack_list, 2) 31 | return pack_res 32 | 33 | 34 | def subtract_flat_id(rot_mats): 35 | # Subtracts identity as a flattened tensor 36 | rot_nb = int(rot_mats.shape[1] / 9) 37 | id_flat = torch.eye(3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat(rot_mats.shape[0], rot_nb) 38 | # id_flat.requires_grad = False 39 | results = rot_mats - id_flat 40 | return results 41 | 42 | 43 | def make_list(tensor): 44 | # type: (List[int]) -> List[int] 45 | return tensor 46 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | import warnings 3 | 4 | DEPENDENCY_PACKAGE_NAMES = ["matplotlib", "torch", "tqdm", "numpy", "cv2", 5 | "chumpy"] 6 | 7 | 8 | def check_dependencies(): 9 | missing_dependencies = [] 10 | for package_name in DEPENDENCY_PACKAGE_NAMES: 11 | try: 12 | __import__(package_name) 13 | except ImportError: 14 | missing_dependencies.append(package_name) 15 | 16 | if missing_dependencies: 17 | warnings.warn( 18 | 'Missing dependencies: {}. We recommend you follow ' 19 | 'the installation instructions at ' 20 | 'https://github.com/hassony2/manopth#installation'.format( 21 | missing_dependencies)) 22 | 23 | 24 | with open("README.md", "r") as fh: 25 | long_description = fh.read() 26 | 27 | check_dependencies() 28 | 29 | setup( 30 | name="manopth", 31 | version="0.0.1", 32 | author="Yana Hasson", 33 | author_email="yana.hasson.inria@gmail.com", 34 | packages=find_packages(exclude=('tests',)), 35 | python_requires=">=3.5.0", 36 | description="PyTorch mano layer", 37 | long_description=long_description, 38 | long_description_content_type="text/markdown", 39 | url="https://github.com/hassony2/manopth", 40 | classifiers=[ 41 | "Programming Language :: Python :: 3", 42 | "License :: OSI Approved :: GNU GENERAL PUBLIC LICENSE", 43 | "Operating System :: OS Independent", 44 | ], 45 | ) 46 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/manopth/test/test_demo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from manopth.demo import generate_random_hand 4 | 5 | 6 | def test_generate_random_hand(): 7 | batch_size = 3 8 | hand_info = generate_random_hand(batch_size=batch_size, ncomps=6) 9 | verts = hand_info['verts'] 10 | joints = hand_info['joints'] 11 | assert verts.shape == (batch_size, 778, 3) 12 | assert joints.shape == (batch_size, 21, 3) 13 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smpl.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os.path as osp 4 | import json 5 | from config import cfg 6 | 7 | import sys 8 | sys.path.insert(0, cfg.smpl_path) 9 | from smplpytorch.pytorch.smpl_layer import SMPL_Layer 10 | 11 | class SMPL(object): 12 | def __init__(self): 13 | self.layer = {'neutral': self.get_layer(), 'male': self.get_layer('male'), 'female': self.get_layer('female')} 14 | self.vertex_num = 6890 15 | self.face = self.layer['neutral'].th_faces.numpy() 16 | self.joint_regressor = self.layer['neutral'].th_J_regressor.numpy() 17 | 18 | # add nose, L/R eye, L/R ear 19 | self.face_kps_vertex = (331, 2802, 6262, 3489, 3990) # mesh vertex idx 20 | nose_onehot = np.array([1 if i == 331 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 21 | left_eye_onehot = np.array([1 if i == 2802 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 22 | right_eye_onehot = np.array([1 if i == 6262 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 23 | left_ear_onehot = np.array([1 if i == 3489 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 24 | right_ear_onehot = np.array([1 if i == 3990 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) 25 | self.joint_regressor = np.concatenate((self.joint_regressor, nose_onehot, left_eye_onehot, right_eye_onehot, left_ear_onehot, right_ear_onehot)) 26 | 27 | self.joint_num = 29 # original: 24. manually add nose, L/R eye, L/R ear 28 | self.joints_name = ('Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear') 29 | self.flip_pairs = ( (1,2), (4,5), (7,8), (10,11), (13,14), (16,17), (18,19), (20,21), (22,23) , (25,26), (27,28) ) 30 | self.skeleton = ( (0,1), (1,4), (4,7), (7,10), (0,2), (2,5), (5,8), (8,11), (0,3), (3,6), (6,9), (9,14), (14,17), (17,19), (19, 21), (21,23), (9,13), (13,16), (16,18), (18,20), (20,22), (9,12), (12,24), (24,15), (24,25), (24,26), (25,27), (26,28) ) 31 | self.root_joint_idx = self.joints_name.index('Pelvis') 32 | 33 | def get_layer(self, gender='neutral'): 34 | return SMPL_Layer(gender=gender, model_root=cfg.smpl_path + '/smplpytorch/native/models') 35 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/assets/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/smplpytorch/assets/image.png -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/demo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from smplpytorch.pytorch.smpl_layer import SMPL_Layer 4 | from display_utils import display_model 5 | 6 | 7 | if __name__ == '__main__': 8 | cuda = False 9 | batch_size = 1 10 | 11 | # Create the SMPL layer 12 | smpl_layer = SMPL_Layer( 13 | center_idx=0, 14 | gender='neutral', 15 | model_root='smplpytorch/native/models') 16 | 17 | # Generate random pose and shape parameters 18 | pose_params = torch.rand(batch_size, 72) * 0.2 19 | shape_params = torch.rand(batch_size, 10) * 0.03 20 | 21 | # GPU mode 22 | if cuda: 23 | pose_params = pose_params.cuda() 24 | shape_params = shape_params.cuda() 25 | smpl_layer.cuda() 26 | 27 | # Forward from the SMPL layer 28 | verts, Jtr = smpl_layer(pose_params, th_betas=shape_params) 29 | 30 | # Draw output vertices and joints 31 | display_model( 32 | {'verts': verts.cpu().detach(), 33 | 'joints': Jtr.cpu().detach()}, 34 | model_faces=smpl_layer.th_faces, 35 | with_joints=True, 36 | kintree_table=smpl_layer.kintree_table, 37 | savepath='image.png', 38 | show=True) 39 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/environment.yml: -------------------------------------------------------------------------------- 1 | name: smplpytorch 2 | 3 | dependencies: 4 | - opencv 5 | - python=3.7 6 | - matplotlib 7 | - numpy 8 | - pytorch 9 | - pip 10 | - pip: 11 | - git+https://github.com/hassony2/chumpy.git 12 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/smplpytorch/image.png -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | REQUIREMENTS = [ 7 | "opencv-python", 8 | "matplotlib", 9 | "numpy", 10 | "torch", 11 | "chumpy @ git+ssh://git@github.com/hassony2/chumpy"] 12 | 13 | setuptools.setup( 14 | name="smplpytorch", 15 | version="0.0.1", 16 | author="Gul Varol", 17 | author_email="gulvarols@gmail.com", 18 | python_requires=">=3.5.0", 19 | install_requires=REQUIREMENTS, 20 | description="SMPL human body layer for PyTorch is a differentiable PyTorch layer", 21 | long_description=long_description, 22 | long_description_content_type="text/markdown", 23 | url="https://github.com/gulvarol/smplpytorch", 24 | packages=setuptools.find_packages(), 25 | classifiers=[ 26 | "Programming Language :: Python :: 3", 27 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", 28 | "Operating System :: OS Independent", 29 | ], 30 | ) 31 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/__init__.py: -------------------------------------------------------------------------------- 1 | name = "smplpytorch" 2 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/native/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/smplpytorch/smplpytorch/native/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/native/models/README.md: -------------------------------------------------------------------------------- 1 | Here copy the .pkl model files. 2 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/native/webuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/smplpytorch/smplpytorch/native/webuser/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/native/webuser/posemapper.py: -------------------------------------------------------------------------------- 1 | import chumpy as ch 2 | import numpy as np 3 | import cv2 4 | 5 | 6 | class Rodrigues(ch.Ch): 7 | dterms = 'rt' 8 | 9 | def compute_r(self): 10 | return cv2.Rodrigues(self.rt.r)[0] 11 | 12 | def compute_dr_wrt(self, wrt): 13 | if wrt is self.rt: 14 | return cv2.Rodrigues(self.rt.r)[1].T 15 | 16 | 17 | def lrotmin(p): 18 | if isinstance(p, np.ndarray): 19 | p = p.ravel()[3:] 20 | return np.concatenate([(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() for pp in p.reshape((-1, 3))]).ravel() 21 | if p.ndim != 2 or p.shape[1] != 3: 22 | p = p.reshape((-1, 3)) 23 | p = p[1:] 24 | return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() for pp in p]).ravel() 25 | 26 | 27 | def posemap(s): 28 | if s == 'lrotmin': 29 | return lrotmin 30 | else: 31 | raise Exception('Unknown posemapping: %s' % (str(s),)) 32 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/native/webuser/serialization.py: -------------------------------------------------------------------------------- 1 | def ready_arguments(fname_or_dict): 2 | import numpy as np 3 | import pickle 4 | import chumpy as ch 5 | from chumpy.ch import MatVecMult 6 | from smplpytorch.native.webuser.posemapper import posemap 7 | 8 | if not isinstance(fname_or_dict, dict): 9 | dd = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1') 10 | # dd = pickle.load(open(fname_or_dict, 'rb')) 11 | else: 12 | dd = fname_or_dict 13 | 14 | want_shapemodel = 'shapedirs' in dd 15 | nposeparms = dd['kintree_table'].shape[1] * 3 16 | 17 | if 'trans' not in dd: 18 | dd['trans'] = np.zeros(3) 19 | if 'pose' not in dd: 20 | dd['pose'] = np.zeros(nposeparms) 21 | if 'shapedirs' in dd and 'betas' not in dd: 22 | dd['betas'] = np.zeros(dd['shapedirs'].shape[-1]) 23 | 24 | for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']: 25 | if (s in dd) and not hasattr(dd[s], 'dterms'): 26 | dd[s] = ch.array(dd[s]) 27 | 28 | if want_shapemodel: 29 | dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template'] 30 | v_shaped = dd['v_shaped'] 31 | J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0]) 32 | J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1]) 33 | J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2]) 34 | dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T 35 | dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose'])) 36 | else: 37 | dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose'])) 38 | 39 | return dd 40 | -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/pytorch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/common/utils/smplpytorch/smplpytorch/pytorch/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/common/utils/smplpytorch/smplpytorch/pytorch/tensutils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from smplpytorch.pytorch import rodrigues_layer 4 | 5 | 6 | def th_posemap_axisang(pose_vectors): 7 | ''' 8 | Converts axis-angle to rotmat 9 | pose_vectors (Tensor (batch_size x 72)): pose parameters in axis-angle representation 10 | ''' 11 | rot_nb = int(pose_vectors.shape[1] / 3) 12 | rot_mats = [] 13 | for joint_idx in range(rot_nb): 14 | axis_ang = pose_vectors[:, joint_idx * 3:(joint_idx + 1) * 3] 15 | rot_mat = rodrigues_layer.batch_rodrigues(axis_ang) 16 | rot_mats.append(rot_mat) 17 | 18 | rot_mats = torch.cat(rot_mats, 1) 19 | return rot_mats 20 | 21 | 22 | def th_with_zeros(tensor): 23 | batch_size = tensor.shape[0] 24 | padding = tensor.new([0.0, 0.0, 0.0, 1.0]) 25 | padding.requires_grad = False 26 | 27 | concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] 28 | cat_res = torch.cat(concat_list, 1) 29 | return cat_res 30 | 31 | 32 | def th_pack(tensor): 33 | batch_size = tensor.shape[0] 34 | padding = tensor.new_zeros((batch_size, 4, 3)) 35 | padding.requires_grad = False 36 | pack_list = [padding, tensor] 37 | pack_res = torch.cat(pack_list, 2) 38 | return pack_res 39 | 40 | 41 | def subtract_flat_id(rot_mats): 42 | # Subtracts identity as a flattened tensor 43 | id_flat = torch.eye( 44 | 3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat( 45 | rot_mats.shape[0], 23) 46 | # id_flat.requires_grad = False 47 | results = rot_mats - id_flat 48 | return results 49 | 50 | 51 | def make_list(tensor): 52 | # type: (List[int]) -> List[int] 53 | return tensor 54 | -------------------------------------------------------------------------------- /HandReconstruction/data_loader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/data_loader/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/data_loader/data_loader.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | import os 4 | import numpy as np 5 | from multiprocessing import cpu_count 6 | from torch.utils.data import DataLoader 7 | 8 | from data_loader.HO3D import HO3D 9 | from data_loader.DEX_YCB import DEX_YCB 10 | 11 | from data_loader.transforms import fetch_transforms 12 | 13 | 14 | def worker_init_fn(worker_id): 15 | rand_seed = random.randint(0, 2**32 - 1) 16 | random.seed(rand_seed) 17 | np.random.seed(rand_seed) 18 | torch.manual_seed(rand_seed) 19 | torch.cuda.manual_seed(rand_seed) 20 | torch.cuda.manual_seed_all(rand_seed) 21 | 22 | 23 | def fetch_dataloader(cfg): 24 | # Train and test transforms 25 | train_transforms, test_transforms = fetch_transforms(cfg) 26 | # Train dataset 27 | train_ds = eval(cfg.data.name)(cfg, train_transforms, 'train') 28 | # Val dataset 29 | if 'val' in cfg.data.eval_type: 30 | val_ds = eval(cfg.data.name)(cfg, test_transforms, 'val') 31 | elif 'train' in cfg.data.eval_type: 32 | val_ds = eval(cfg.data.name)(cfg, test_transforms, 'train') 33 | # Test dataset 34 | if 'test' in cfg.data.eval_type: 35 | test_ds = eval(cfg.data.name)(cfg, test_transforms, 'test') 36 | 37 | # Data loader 38 | train_dl = DataLoader(train_ds, batch_size=cfg.train.batch_size, num_workers=cpu_count(), pin_memory=True, shuffle=True, drop_last=True, worker_init_fn=worker_init_fn) 39 | 40 | if 'val' in cfg.data.eval_type: 41 | val_dl = DataLoader(val_ds, batch_size=cfg.test.batch_size, num_workers=cpu_count(), pin_memory=True, shuffle=False) 42 | elif 'train' in cfg.data.eval_type: 43 | val_dl = DataLoader(train_ds, batch_size=cfg.test.batch_size, num_workers=cpu_count(), pin_memory=True, shuffle=False) 44 | else: 45 | val_dl = None 46 | 47 | if 'test' in cfg.data.eval_type: 48 | test_dl = DataLoader(test_ds, batch_size=cfg.test.batch_size, num_workers=cpu_count(), pin_memory=True, shuffle=False) 49 | else: 50 | test_dl = None 51 | 52 | dl, ds = {}, {} 53 | dl['train'], ds['train'] = train_dl, train_ds 54 | if val_dl is not None: 55 | dl['val'], ds['val'] = val_dl, val_ds 56 | if test_dl is not None: 57 | dl['test'], ds['test'] = test_dl, test_ds 58 | 59 | return dl, ds 60 | -------------------------------------------------------------------------------- /HandReconstruction/data_loader/transforms.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch 3 | import torchvision 4 | import numpy as np 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | class Template: 10 | 11 | def __init__(self, a=0.01, b=0.05): 12 | self.a = a 13 | self.b = a 14 | 15 | def __call__(self, sample): 16 | 17 | return sample 18 | 19 | 20 | def fetch_transforms(cfg): 21 | 22 | train_transforms = [torchvision.transforms.ToTensor()] 23 | test_transforms = [torchvision.transforms.ToTensor()] 24 | 25 | logger.info("Train transforms: {}".format(", ".join([type(t).__name__ for t in train_transforms]))) 26 | logger.info("Val/Test transforms: {}".format(", ".join([type(t).__name__ for t in test_transforms]))) 27 | train_transforms = torchvision.transforms.Compose(train_transforms) 28 | test_transforms = torchvision.transforms.Compose(test_transforms) 29 | return train_transforms, test_transforms 30 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/dexycb_s0/h2onet/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "dexycb_s0.h2onet", 4 | "model_dir": "experiment/dexycb_s0/h2onet" 5 | }, 6 | "data": { 7 | "name": "DEX_YCB", 8 | "eval_type": [ 9 | "test" 10 | ], 11 | "input_img_shape": [ 12 | 128, 13 | 128 14 | ] 15 | }, 16 | "model": { 17 | "name": "h2onet", 18 | "pretrain": true 19 | }, 20 | "loss": { 21 | "w_gr": true, 22 | "name": "h2onet" 23 | }, 24 | "metric": { 25 | "major_metric": "score" 26 | }, 27 | "train": { 28 | "num_epochs": 38, 29 | "batch_size": 32, 30 | "aug": true 31 | }, 32 | "test": { 33 | "batch_size": 48 34 | }, 35 | "summary": { 36 | "save_summary_steps": 50, 37 | "save_latest_freq": 1, 38 | "save_best_after": 0 39 | }, 40 | "optimizer": { 41 | "name": "adam", 42 | "lr": 1e-4 43 | }, 44 | "scheduler": { 45 | "name": "step", 46 | "milestones": [ 47 | 30 48 | ], 49 | "gamma": 0.1 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/dexycb_s0/handoccnet/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "dexycb_s0.handoccnet", 4 | "model_dir": "experiment/dexycb_s0/handoccnet" 5 | }, 6 | "data": { 7 | "name": "DEX_YCB", 8 | "eval_type": [ 9 | "test" 10 | ], 11 | "input_img_shape": [ 12 | 256, 13 | 256 14 | ] 15 | }, 16 | "model": { 17 | "name": "hand_occ_net", 18 | "pretrain": true 19 | }, 20 | "loss": { 21 | "name": "hand_occ_net", 22 | "lambda_mano_verts": 1e4, 23 | "lambda_mano_joints": 1e4, 24 | "lambda_mano_pose": 10, 25 | "lambda_mano_shape": 0.1, 26 | "lambda_joints_img": 100 27 | }, 28 | "metric": { 29 | "major_metric": "score" 30 | }, 31 | "train": { 32 | "num_epochs": 50, 33 | "batch_size": 32, 34 | "grad_norm_clip": 0.1, 35 | "aug": true 36 | }, 37 | "test": { 38 | "batch_size": 48, 39 | "num_workers": 20 40 | }, 41 | "summary": { 42 | "save_summary_steps": 50, 43 | "save_latest_freq": 1, 44 | "save_best_after": 0 45 | }, 46 | "optimizer": { 47 | "name": "adam", 48 | "lr": 1e-4 49 | }, 50 | "scheduler": { 51 | "name": "step", 52 | "milestones": [ 53 | 2, 54 | 4, 55 | 6, 56 | 8, 57 | 10, 58 | 12, 59 | 14, 60 | 16, 61 | 18, 62 | 20, 63 | 22, 64 | 24, 65 | 26, 66 | 28, 67 | 30, 68 | 32, 69 | 34, 70 | 36, 71 | 38, 72 | 40, 73 | 42, 74 | 44, 75 | 46, 76 | 48 77 | ], 78 | "gamma": 0.9 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/dexycb_s0/mobrecon/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "dexycb_s0.mobrecon", 4 | "model_dir": "experiment/dexycb_s0/mobrecon" 5 | }, 6 | "data": { 7 | "name": "DEX_YCB", 8 | "eval_type": [ 9 | "test" 10 | ], 11 | "input_img_shape": [ 12 | 128, 13 | 128 14 | ] 15 | }, 16 | "model": { 17 | "name": "mobrecon", 18 | "pretrain": true 19 | }, 20 | "loss": { 21 | "name": "mobrecon" 22 | }, 23 | "metric": { 24 | "major_metric": "score" 25 | }, 26 | "train": { 27 | "num_epochs": 76, 28 | "batch_size": 32, 29 | "grad_norm_clip": 0.1, 30 | "aug": true 31 | }, 32 | "test": { 33 | "batch_size": 64 34 | }, 35 | "summary": { 36 | "save_summary_steps": 50, 37 | "save_latest_freq": 1, 38 | "save_best_after": 0 39 | }, 40 | "optimizer": { 41 | "name": "adam", 42 | "lr": 1e-3 43 | }, 44 | "scheduler": { 45 | "name": "step", 46 | "milestones": [ 47 | 60 48 | ], 49 | "gamma": 0.1 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/dexycb_s1/h2onet/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "dexycb_s1.h2onet", 4 | "model_dir": "experiment/dexycb_s1/h2onet" 5 | }, 6 | "data": { 7 | "name": "DEX_YCB", 8 | "eval_type": [ 9 | "test" 10 | ], 11 | "input_img_shape": [ 12 | 128, 13 | 128 14 | ] 15 | }, 16 | "model": { 17 | "name": "h2onet", 18 | "pretrain": true 19 | }, 20 | "loss": { 21 | "w_gr": true, 22 | "name": "h2onet" 23 | }, 24 | "metric": { 25 | "major_metric": "score" 26 | }, 27 | "train": { 28 | "num_epochs": 76, 29 | "batch_size": 32, 30 | "aug": true 31 | }, 32 | "test": { 33 | "batch_size": 48 34 | }, 35 | "summary": { 36 | "save_summary_steps": 50, 37 | "save_latest_freq": 1, 38 | "save_best_after": 0 39 | }, 40 | "optimizer": { 41 | "name": "adam", 42 | "lr": 1e-4 43 | }, 44 | "scheduler": { 45 | "name": "step", 46 | "milestones": [ 47 | 60 48 | ], 49 | "gamma": 0.1 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/dexycb_s1/handoccnet/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "dexycb_s1.handoccnet", 4 | "model_dir": "experiment/dexycb_s1/handoccnet" 5 | }, 6 | "data": { 7 | "name": "DEX_YCB", 8 | "eval_type": [ 9 | "test" 10 | ], 11 | "input_img_shape": [ 12 | 256, 13 | 256 14 | ] 15 | }, 16 | "model": { 17 | "name": "hand_occ_net", 18 | "pretrain": true 19 | }, 20 | "loss": { 21 | "name": "hand_occ_net", 22 | "lambda_mano_verts": 1e4, 23 | "lambda_mano_joints": 1e4, 24 | "lambda_mano_pose": 10, 25 | "lambda_mano_shape": 0.1, 26 | "lambda_joints_img": 100 27 | }, 28 | "metric": { 29 | "major_metric": "score" 30 | }, 31 | "train": { 32 | "num_epochs": 50, 33 | "batch_size": 32, 34 | "grad_norm_clip": 0.1, 35 | "aug": true 36 | }, 37 | "test": { 38 | "batch_size": 48 39 | }, 40 | "summary": { 41 | "save_summary_steps": 50, 42 | "save_latest_freq": 1, 43 | "save_best_after": 0 44 | }, 45 | "optimizer": { 46 | "name": "adam", 47 | "lr": 1e-4 48 | }, 49 | "scheduler": { 50 | "name": "step", 51 | "milestones": [ 52 | 2, 53 | 4, 54 | 6, 55 | 8, 56 | 10, 57 | 12, 58 | 14, 59 | 16, 60 | 18, 61 | 20, 62 | 22, 63 | 24, 64 | 26, 65 | 28, 66 | 30, 67 | 32, 68 | 34, 69 | 36, 70 | 38, 71 | 40, 72 | 42, 73 | 44, 74 | 46, 75 | 48 76 | ], 77 | "gamma": 0.9 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/dexycb_s1/mobrecon/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "dexycb_s1.mobrecon", 4 | "model_dir": "experiment/dexycb_s1/mobrecon" 5 | }, 6 | "data": { 7 | "name": "DEX_YCB", 8 | "eval_type": [ 9 | "test" 10 | ], 11 | "input_img_shape": [ 12 | 128, 13 | 128 14 | ] 15 | }, 16 | "model": { 17 | "name": "mobrecon", 18 | "pretrain": true 19 | }, 20 | "loss": { 21 | "name": "mobrecon" 22 | }, 23 | "metric": { 24 | "major_metric": "score" 25 | }, 26 | "train": { 27 | "num_epochs": 76, 28 | "batch_size": 32, 29 | "grad_norm_clip": 0.1, 30 | "aug": true 31 | }, 32 | "test": { 33 | "batch_size": 64 34 | }, 35 | "summary": { 36 | "save_summary_steps": 50, 37 | "save_latest_freq": 1, 38 | "save_best_after": 0 39 | }, 40 | "optimizer": { 41 | "name": "adam", 42 | "lr": 1e-3 43 | }, 44 | "scheduler": { 45 | "name": "step", 46 | "milestones": [ 47 | 60 48 | ], 49 | "gamma": 0.1 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/ho3d/h2onet/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "ho3d.h2onet", 4 | "model_dir": "experiment/ho3d/h2onet" 5 | }, 6 | "data": { 7 | "name": "HO3D", 8 | "data_split": "train", 9 | "eval_type": [ 10 | "test" 11 | ], 12 | "input_img_shape": [ 13 | 128, 14 | 128 15 | ] 16 | }, 17 | "model": { 18 | "name": "h2onet", 19 | "pretrain": true 20 | }, 21 | "loss": { 22 | "w_gr": true, 23 | "name": "h2onet" 24 | }, 25 | "metric": { 26 | "major_metric": "score" 27 | }, 28 | "train": { 29 | "num_epochs": 38, 30 | "batch_size": 32, 31 | "aug": true 32 | }, 33 | "test": { 34 | "batch_size": 48 35 | }, 36 | "summary": { 37 | "save_summary_steps": 50, 38 | "save_latest_freq": 1, 39 | "save_best_after": 0 40 | }, 41 | "optimizer": { 42 | "name": "adam", 43 | "lr": 1e-4 44 | }, 45 | "scheduler": { 46 | "name": "step", 47 | "milestones": [ 48 | 30 49 | ], 50 | "gamma": 0.1 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/ho3d/handoccnet/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "ho3d.handoccnet", 4 | "model_dir": "experiment/ho3d/handoccnet" 5 | }, 6 | "data": { 7 | "name": "HO3D", 8 | "eval_type": [ 9 | "test" 10 | ], 11 | "input_img_shape": [ 12 | 256, 13 | 256 14 | ] 15 | }, 16 | "model": { 17 | "name": "hand_occ_net", 18 | "pretrain": true 19 | }, 20 | "loss": { 21 | "name": "hand_occ_net", 22 | "lambda_mano_verts": 1e4, 23 | "lambda_mano_joints": 1e4, 24 | "lambda_mano_pose": 10, 25 | "lambda_mano_shape": 0.1, 26 | "lambda_joints_img": 100 27 | }, 28 | "metric": { 29 | "major_metric": "score" 30 | }, 31 | "train": { 32 | "num_epochs": 70, 33 | "batch_size": 64, 34 | "grad_norm_clip": 0.1, 35 | "aug": true 36 | }, 37 | "test": { 38 | "batch_size": 64 39 | }, 40 | "summary": { 41 | "save_summary_steps": 50, 42 | "save_latest_freq": 1, 43 | "save_best_after": 0 44 | }, 45 | "optimizer": { 46 | "name": "adam", 47 | "lr": 1e-4 48 | }, 49 | "scheduler": { 50 | "name": "step", 51 | "milestones": [ 52 | 10, 53 | 20, 54 | 30, 55 | 40, 56 | 50, 57 | 60 58 | ], 59 | "gamma": 0.7 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /HandReconstruction/experiment/ho3d/mobrecon/cfg.json: -------------------------------------------------------------------------------- 1 | { 2 | "base": { 3 | "exp_name": "ho3d.mobrecon", 4 | "model_dir": "experiment/ho3d/mobrecon" 5 | }, 6 | "data": { 7 | "name": "HO3D", 8 | "data_split": "train", 9 | "eval_type": [ 10 | "test" 11 | ], 12 | "input_img_shape": [ 13 | 128, 14 | 128 15 | ] 16 | }, 17 | "model": { 18 | "name": "mobrecon", 19 | "pretrain": true 20 | }, 21 | "loss": { 22 | "name": "mobrecon" 23 | }, 24 | "metric": { 25 | "major_metric": "score" 26 | }, 27 | "train": { 28 | "num_epochs": 38, 29 | "batch_size": 32, 30 | "grad_norm_clip": 0.1, 31 | "aug": true 32 | }, 33 | "test": { 34 | "batch_size": 48 35 | }, 36 | "summary": { 37 | "save_summary_steps": 50, 38 | "save_latest_freq": 1, 39 | "save_best_after": 0 40 | }, 41 | "optimizer": { 42 | "name": "adam", 43 | "lr": 1e-3 44 | }, 45 | "scheduler": { 46 | "name": "step", 47 | "milestones": [ 48 | 30 49 | ], 50 | "gamma": 0.1 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /HandReconstruction/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/loss/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .spiralconv import SpiralConv 2 | from .dsconv import DSConv 3 | 4 | __all__ = [ 5 | 'SpiralConv', 6 | ] 7 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/conv/dsconv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Xingyu Chen. All Rights Reserved. 2 | 3 | """ 4 | * @file dsconv.py 5 | * @author chenxingyu (chenxy.sean@gmail.com) 6 | * @brief Depth-separable spiral convolution 7 | * @version 0.1 8 | * @date 2022-04-28 9 | * 10 | * @copyright Copyright (c) 2022 chenxingyu 11 | * 12 | """ 13 | 14 | import torch 15 | import torch.nn as nn 16 | import numpy as np 17 | 18 | 19 | class DSConv(nn.Module): 20 | def __init__(self, in_channels, out_channels, indices, dim=1): 21 | super(DSConv, self).__init__() 22 | self.dim = dim 23 | self.indices = indices 24 | self.in_channels = in_channels 25 | self.out_channels = out_channels 26 | self.seq_length = indices.size(1) 27 | self.spatial_layer = nn.Conv2d(self.in_channels, self.in_channels, int(np.sqrt(self.seq_length)), 1, 0, groups=self.in_channels, bias=False) 28 | self.channel_layer = nn.Linear(self.in_channels, self.out_channels, bias=False) 29 | torch.nn.init.xavier_uniform_(self.channel_layer.weight) 30 | 31 | def reset_parameters(self): 32 | torch.nn.init.xavier_uniform_(self.spatial_layer.weight) 33 | torch.nn.init.xavier_uniform_(self.channel_layer.weight) 34 | 35 | def forward(self, x): 36 | n_nodes, _ = self.indices.size() 37 | bs = x.size(0) 38 | x = torch.index_select(x, self.dim, self.indices.to(x.device).view(-1)) 39 | x = x.view(bs * n_nodes, self.seq_length, -1).transpose(1, 2) 40 | x = x.view(x.size(0), x.size(1), int(np.sqrt(self.seq_length)), int(np.sqrt(self.seq_length))) 41 | x = self.spatial_layer(x).view(bs, n_nodes, -1) 42 | x = self.channel_layer(x) 43 | 44 | return x 45 | 46 | def __repr__(self): 47 | return '{}({}, {}, seq_length={})'.format(self.__class__.__name__, 48 | self.in_channels, 49 | self.out_channels, 50 | self.seq_length) -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/conv/spiralconv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SpiralConv(nn.Module): 6 | def __init__(self, in_channels, out_channels, indices, dim=1): 7 | super(SpiralConv, self).__init__() 8 | self.dim = dim 9 | self.indices = indices 10 | self.in_channels = in_channels 11 | self.out_channels = out_channels 12 | self.seq_length = indices.size(1) 13 | 14 | self.layer = nn.Linear(in_channels * self.seq_length, out_channels) 15 | self.reset_parameters() 16 | 17 | def reset_parameters(self): 18 | torch.nn.init.xavier_uniform_(self.layer.weight) 19 | torch.nn.init.constant_(self.layer.bias, 0) 20 | 21 | def forward(self, x): 22 | n_nodes, _ = self.indices.size() 23 | if x.dim() == 2: 24 | x = torch.index_select(x, 0, self.indices.to(x.device).view(-1)) 25 | x = x.view(n_nodes, -1) 26 | elif x.dim() == 3: 27 | bs = x.size(0) 28 | x = torch.index_select(x, self.dim, self.indices.to(x.device).reshape(-1)) 29 | x = x.view(bs, n_nodes, -1) 30 | else: 31 | raise RuntimeError( 32 | 'x.dim() is expected to be 2 or 3, but received {}'.format( 33 | x.dim())) 34 | x = self.layer(x) 35 | return x 36 | 37 | def __repr__(self): 38 | return '{}({}, {}, seq_length={})'.format(self.__class__.__name__, 39 | self.in_channels, 40 | self.out_channels, 41 | self.seq_length) 42 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/template/j_reg.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/h2o_net/template/j_reg.npy -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/template/right_faces.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/h2o_net/template/right_faces.npy -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/template/transform.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/h2o_net/template/transform.pkl -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/template/transform_body.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/h2o_net/template/transform_body.pkl -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/alter_pretrain.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from collections import OrderedDict 3 | 4 | path = 'out/Human36M/cmr_pg_h36m_resumepp_lr5/checkpoints/' 5 | old_name = 'checkpoint_best.pt' 6 | new_name = 'cmr_pg_res18_h36m.pt' 7 | new_weight = OrderedDict() 8 | checkpoint = torch.load(path+old_name, map_location='cpu')['model_state_dict'] 9 | 10 | for k, v in checkpoint.items(): 11 | if 'backbone.' not in k and 'reduce.' not in k: 12 | new_weight[k] = v 13 | 14 | torch.save(new_weight, path+new_name) 15 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/generate_spiral_seq.py: -------------------------------------------------------------------------------- 1 | import openmesh as om 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | 5 | 6 | def _next_ring(mesh, last_ring, other): 7 | res = [] 8 | 9 | def is_new_vertex(idx): 10 | return (idx not in last_ring and idx not in other and idx not in res) 11 | 12 | for vh1 in last_ring: 13 | vh1 = om.VertexHandle(vh1) 14 | after_last_ring = False 15 | for vh2 in mesh.vv(vh1): 16 | if after_last_ring: 17 | if is_new_vertex(vh2.idx()): 18 | res.append(vh2.idx()) 19 | if vh2.idx() in last_ring: 20 | after_last_ring = True 21 | for vh2 in mesh.vv(vh1): 22 | if vh2.idx() in last_ring: 23 | break 24 | if is_new_vertex(vh2.idx()): 25 | res.append(vh2.idx()) 26 | return res 27 | 28 | 29 | def extract_spirals(mesh, seq_length, dilation=1): 30 | # output: spirals.size() = [N, seq_length] 31 | spirals = [] 32 | for vh0 in mesh.vertices(): 33 | reference_one_ring = [] 34 | for vh1 in mesh.vv(vh0): 35 | reference_one_ring.append(vh1.idx()) 36 | spiral = [vh0.idx()] 37 | one_ring = list(reference_one_ring) 38 | last_ring = one_ring 39 | next_ring = _next_ring(mesh, last_ring, spiral) 40 | spiral.extend(last_ring) 41 | while len(spiral) + len(next_ring) < seq_length * dilation: 42 | if len(next_ring) == 0: 43 | break 44 | last_ring = next_ring 45 | next_ring = _next_ring(mesh, last_ring, spiral) 46 | spiral.extend(last_ring) 47 | if len(next_ring) > 0: 48 | spiral.extend(next_ring) 49 | else: 50 | kdt = KDTree(mesh.points(), metric='euclidean') 51 | spiral = kdt.query(np.expand_dims(mesh.points()[spiral[0]], axis=0), k=seq_length * dilation, return_distance=False).tolist() 52 | spiral = [item for subspiral in spiral for item in subspiral] 53 | spirals.append(spiral[:seq_length * dilation][::dilation]) 54 | return spirals 55 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/progress/counter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite, Progress 19 | 20 | 21 | class Counter(Infinite): 22 | def update(self): 23 | self.write(str(self.index)) 24 | 25 | 26 | class Countdown(Progress): 27 | def update(self): 28 | self.write(str(self.remaining)) 29 | 30 | 31 | class Stack(Progress): 32 | phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') 33 | 34 | def update(self): 35 | nphases = len(self.phases) 36 | i = min(nphases - 1, int(self.progress * nphases)) 37 | self.write(self.phases[i]) 38 | 39 | 40 | class Pie(Stack): 41 | phases = ('○', '◔', '◑', '◕', '●') 42 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/progress/spinner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite 19 | 20 | 21 | class Spinner(Infinite): 22 | phases = ('-', '\\', '|', '/') 23 | hide_cursor = True 24 | 25 | def update(self): 26 | i = self.index % len(self.phases) 27 | self.write(self.phases[i]) 28 | 29 | 30 | class PieSpinner(Spinner): 31 | phases = ['◷', '◶', '◵', '◴'] 32 | 33 | 34 | class MoonSpinner(Spinner): 35 | phases = ['◑', '◒', '◐', '◓'] 36 | 37 | 38 | class LineSpinner(Spinner): 39 | phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] 40 | 41 | 42 | class PixelSpinner(Spinner): 43 | phases = ['⣾', '⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] 44 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/smpl.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os.path as osp 4 | import json 5 | # from config import cfg 6 | 7 | import sys 8 | 9 | from smplpytorch.pytorch.smpl_layer import SMPL_Layer 10 | 11 | 12 | class SMPL(object): 13 | def __init__(self, root): 14 | self.root = root 15 | self.layer = {'neutral': self.get_layer(), 'male': self.get_layer('male'), 'female': self.get_layer('female')} 16 | self.vertex_num = 6890 17 | self.face = self.layer['neutral'].th_faces.numpy() 18 | self.joint_regressor = self.layer['neutral'].th_J_regressor.numpy() 19 | 20 | # add nose, L/R eye, L/R ear 21 | self.face_kps_vertex = (331, 2802, 6262, 3489, 3990) # mesh vertex idx 22 | nose_onehot = np.array([1 if i == 331 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 23 | left_eye_onehot = np.array([1 if i == 2802 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 24 | right_eye_onehot = np.array([1 if i == 6262 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 25 | left_ear_onehot = np.array([1 if i == 3489 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 26 | right_ear_onehot = np.array([1 if i == 3990 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 27 | self.joint_regressor = np.concatenate( 28 | (self.joint_regressor, nose_onehot, left_eye_onehot, right_eye_onehot, left_ear_onehot, right_ear_onehot)) 29 | 30 | self.joint_num = 29 # original: 24. manually add nose, L/R eye, L/R ear 31 | self.joints_name = ( 32 | 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 33 | 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 34 | 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear') 35 | self.flip_pairs = ( 36 | (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) 37 | self.skeleton = ( 38 | (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), 39 | (17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), 40 | (24, 25), (24, 26), (25, 27), (26, 28)) 41 | self.root_joint_idx = self.joints_name.index('Pelvis') 42 | 43 | def get_layer(self, gender='neutral'): 44 | return SMPL_Layer(gender=gender, model_root=osp.join(self.root, 'template')) 45 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.tri as mtri 4 | from mpl_toolkits.mplot3d import Axes3D 5 | 6 | xy = [[0.3,0.5], 7 | [0.6,0.8], 8 | [0.5,0.1], 9 | [0.1,0.2]] 10 | xy = np.array(xy) 11 | 12 | triangles = [[0,2,1], 13 | [2,0,3]] 14 | 15 | triang = mtri.Triangulation(xy[:,0], xy[:,1], triangles=triangles) 16 | 17 | z = [0.1,0.2,0.3,0.4] 18 | 19 | fig, ax = plt.subplots(subplot_kw =dict(projection="3d")) 20 | ax.plot_trisurf(triang, z) 21 | 22 | plt.show() -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import numpy as np 4 | import openmesh as om 5 | 6 | 7 | def makedirs(folder): 8 | if not os.path.exists(folder): 9 | os.makedirs(folder) 10 | 11 | 12 | def count_parameters(model): 13 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 14 | 15 | 16 | def to_edge_index(mat): 17 | return torch.LongTensor(np.vstack(mat.nonzero())) 18 | 19 | 20 | def to_sparse(spmat): 21 | return torch.sparse.FloatTensor( 22 | torch.LongTensor([spmat.tocoo().row, 23 | spmat.tocoo().col]), 24 | torch.FloatTensor(spmat.tocoo().data), torch.Size(spmat.tocoo().shape)) 25 | 26 | 27 | def preprocess_spiral(face, seq_length, vertices=None, dilation=1): 28 | from .generate_spiral_seq import extract_spirals 29 | assert face.shape[1] == 3 30 | if vertices is not None: 31 | mesh = om.TriMesh(np.array(vertices), np.array(face)) 32 | else: 33 | n_vertices = face.max() + 1 34 | mesh = om.TriMesh(np.ones([n_vertices, 3]), np.array(face)) 35 | spirals = torch.tensor( 36 | extract_spirals(mesh, seq_length=seq_length, dilation=dilation)) 37 | return spirals 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/warmup_scheduler.py: -------------------------------------------------------------------------------- 1 | from bisect import bisect_right 2 | 3 | def adjust_learning_rate(optimizer, epoch, step, len_epoch, lr, lr_decay, decay_step, warmup_epochs): 4 | """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" 5 | # lr = args.lr * (0.1 ** (epoch // 30)) 6 | # for param_group in optimizer.param_groups: 7 | # param_group["lr"] = lr 8 | lr = lr * (lr_decay ** bisect_right(decay_step, epoch)) 9 | 10 | """Warmup""" 11 | if epoch < warmup_epochs: 12 | lr = ( 13 | lr 14 | * float(1 + step + epoch * len_epoch) 15 | / float(warmup_epochs * len_epoch) 16 | ) 17 | 18 | # if args.rank == 0: 19 | # writer.print_str("epoch = {}, step = {}, lr = {}".format(epoch, step, lr)) 20 | 21 | for param_group in optimizer.param_groups: 22 | param_group["lr"] = lr 23 | 24 | -------------------------------------------------------------------------------- /HandReconstruction/model/h2o_net/utils/writer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import torch 4 | import json 5 | from glob import glob 6 | import logging 7 | logging.getLogger('PIL').setLevel(logging.WARNING) 8 | 9 | 10 | class Writer: 11 | def __init__(self, args=None): 12 | self.args = args 13 | if self.args is not None: 14 | log_filename = os.path.join( 15 | args.out_dir, 'log.log') 16 | 17 | logging.basicConfig( 18 | filename=log_filename, 19 | level=logging.DEBUG, 20 | format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s') 21 | 22 | 23 | def print_str(self, info): 24 | logging.info(info) 25 | 26 | def print_info(self, info): 27 | message = 'Epoch: {}/{}, Duration: {:.3f}s, Train Loss: {:.4f}, Test Loss: {:.4f}' \ 28 | .format(info['current_epoch'], info['epochs'], info['t_duration'], info['train_loss'], info['test_loss']) 29 | logging.info(message) 30 | 31 | def print_step(self, info): 32 | message = 'Epoch: {}/{}, Step: {}/{}, Total_step: {}, Duration: {:.3f}s, Train Loss: {:.4f}, L1 Loss: {:.4f}, Lr: {:.6f}' \ 33 | .format(info['epoch'], info['max_epoch'], info['step'], info['max_step'], info['total_step'], info['step_duration'], info['train_loss'], info['l1_loss'], info['lr']) 34 | logging.info(message) 35 | 36 | def print_step_ft(self, info): 37 | message = 'Epoch: {}/{}, Step: {}/{}, Total: {}, Dur: {:.3f}s, FDur: {:.3f}s, BDur: {:.3f}s,, Train Loss: {:.4f}, L1 Loss: {:.4f}, Lr: {:.6f}' \ 38 | .format(info['epoch'], info['max_epoch'], info['step'], info['max_step'], info['total_step'], 39 | info['step_duration'], info['forward_duration'] ,info['backward_duration'], info['train_loss'], info['l1_loss'], info['lr']) 40 | logging.info(message) 41 | 42 | def save_checkpoint(self, model, optimizer, scheduler, epoch, best=False, last=False): 43 | if best: 44 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_best.pt') 45 | elif last: 46 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_last.pt') 47 | else: 48 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_{:03d}.pt'.format(epoch)) 49 | scheduler_state_dict = {} if scheduler is None else scheduler.state_dict() 50 | torch.save( 51 | { 52 | 'epoch': epoch, 53 | 'model_state_dict': model.state_dict(), 54 | 'optimizer_state_dict': optimizer.state_dict(), 55 | 'scheduler_state_dict': scheduler_state_dict, 56 | }, save_path) 57 | -------------------------------------------------------------------------------- /HandReconstruction/model/hand_occ_net/regressor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import functional as F 4 | from common.utils.mano import MANO 5 | from model.hand_occ_net.hand_head import hand_regHead, hand_Encoder 6 | from model.hand_occ_net.mano_head import mano_regHead 7 | 8 | 9 | class Regressor(nn.Module): 10 | 11 | def __init__(self): 12 | super(Regressor, self).__init__() 13 | self.hand_regHead = hand_regHead() 14 | self.hand_Encoder = hand_Encoder() 15 | self.mano_regHead = mano_regHead() 16 | 17 | def forward(self, feats): 18 | out_hm, encoding, preds_joints_img = self.hand_regHead(feats) 19 | mano_encoding = self.hand_Encoder(out_hm, encoding) 20 | pred_mano_results = self.mano_regHead(mano_encoding) 21 | 22 | return pred_mano_results, preds_joints_img 23 | -------------------------------------------------------------------------------- /HandReconstruction/model/layer.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def make_linear_layers(feat_dims, relu_final=True, use_bn=False): 5 | layers = [] 6 | for i in range(len(feat_dims) - 1): 7 | layers.append(nn.Linear(feat_dims[i], feat_dims[i + 1])) 8 | 9 | # Do not use ReLU for final estimation 10 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and relu_final): 11 | if use_bn: 12 | layers.append(nn.BatchNorm1d(feat_dims[i + 1])) 13 | layers.append(nn.ReLU(inplace=True)) 14 | 15 | return nn.Sequential(*layers) 16 | 17 | 18 | def make_conv_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True): 19 | layers = [] 20 | for i in range(len(feat_dims) - 1): 21 | layers.append(nn.Conv2d(in_channels=feat_dims[i], out_channels=feat_dims[i + 1], kernel_size=kernel, stride=stride, padding=padding)) 22 | # Do not use BN and ReLU for final estimation 23 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and bnrelu_final): 24 | layers.append(nn.BatchNorm2d(feat_dims[i + 1])) 25 | layers.append(nn.ReLU(inplace=True)) 26 | 27 | return nn.Sequential(*layers) 28 | 29 | 30 | def make_conv1d_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True): 31 | layers = [] 32 | for i in range(len(feat_dims) - 1): 33 | layers.append(nn.Conv1d(in_channels=feat_dims[i], out_channels=feat_dims[i + 1], kernel_size=kernel, stride=stride, padding=padding)) 34 | # Do not use BN and ReLU for final estimation 35 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and bnrelu_final): 36 | layers.append(nn.BatchNorm1d(feat_dims[i + 1])) 37 | layers.append(nn.ReLU(inplace=True)) 38 | 39 | return nn.Sequential(*layers) 40 | 41 | 42 | def make_deconv_layers(feat_dims, bnrelu_final=True): 43 | layers = [] 44 | for i in range(len(feat_dims) - 1): 45 | layers.append(nn.ConvTranspose2d(in_channels=feat_dims[i], out_channels=feat_dims[i + 1], kernel_size=4, stride=2, padding=1, output_padding=0, bias=False)) 46 | 47 | # Do not use BN and ReLU for final estimation 48 | if i < len(feat_dims) - 2 or (i == len(feat_dims) - 2 and bnrelu_final): 49 | layers.append(nn.BatchNorm2d(feat_dims[i + 1])) 50 | layers.append(nn.ReLU(inplace=True)) 51 | 52 | return nn.Sequential(*layers) 53 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .spiralconv import SpiralConv 2 | from .dsconv import DSConv 3 | 4 | __all__ = [ 5 | 'SpiralConv', 6 | ] 7 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/conv/dsconv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Xingyu Chen. All Rights Reserved. 2 | 3 | """ 4 | * @file dsconv.py 5 | * @author chenxingyu (chenxy.sean@gmail.com) 6 | * @brief Depth-separable spiral convolution 7 | * @version 0.1 8 | * @date 2022-04-28 9 | * 10 | * @copyright Copyright (c) 2022 chenxingyu 11 | * 12 | """ 13 | 14 | import torch 15 | import torch.nn as nn 16 | import numpy as np 17 | 18 | 19 | class DSConv(nn.Module): 20 | def __init__(self, in_channels, out_channels, indices, dim=1): 21 | super(DSConv, self).__init__() 22 | self.dim = dim 23 | self.indices = indices 24 | self.in_channels = in_channels 25 | self.out_channels = out_channels 26 | self.seq_length = indices.size(1) 27 | self.spatial_layer = nn.Conv2d(self.in_channels, self.in_channels, int(np.sqrt(self.seq_length)), 1, 0, groups=self.in_channels, bias=False) 28 | self.channel_layer = nn.Linear(self.in_channels, self.out_channels, bias=False) 29 | torch.nn.init.xavier_uniform_(self.channel_layer.weight) 30 | 31 | def reset_parameters(self): 32 | torch.nn.init.xavier_uniform_(self.spatial_layer.weight) 33 | torch.nn.init.xavier_uniform_(self.channel_layer.weight) 34 | 35 | def forward(self, x): 36 | n_nodes, _ = self.indices.size() 37 | bs = x.size(0) 38 | x = torch.index_select(x, self.dim, self.indices.to(x.device).view(-1)) 39 | x = x.view(bs * n_nodes, self.seq_length, -1).transpose(1, 2) 40 | x = x.view(x.size(0), x.size(1), int(np.sqrt(self.seq_length)), int(np.sqrt(self.seq_length))) 41 | x = self.spatial_layer(x).view(bs, n_nodes, -1) 42 | x = self.channel_layer(x) 43 | 44 | return x 45 | 46 | def __repr__(self): 47 | return '{}({}, {}, seq_length={})'.format(self.__class__.__name__, 48 | self.in_channels, 49 | self.out_channels, 50 | self.seq_length) -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/conv/spiralconv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SpiralConv(nn.Module): 6 | def __init__(self, in_channels, out_channels, indices, dim=1): 7 | super(SpiralConv, self).__init__() 8 | self.dim = dim 9 | self.indices = indices 10 | self.in_channels = in_channels 11 | self.out_channels = out_channels 12 | self.seq_length = indices.size(1) 13 | 14 | self.layer = nn.Linear(in_channels * self.seq_length, out_channels) 15 | self.reset_parameters() 16 | 17 | def reset_parameters(self): 18 | torch.nn.init.xavier_uniform_(self.layer.weight) 19 | torch.nn.init.constant_(self.layer.bias, 0) 20 | 21 | def forward(self, x): 22 | n_nodes, _ = self.indices.size() 23 | if x.dim() == 2: 24 | x = torch.index_select(x, 0, self.indices.to(x.device).view(-1)) 25 | x = x.view(n_nodes, -1) 26 | elif x.dim() == 3: 27 | bs = x.size(0) 28 | x = torch.index_select(x, self.dim, self.indices.to(x.device).reshape(-1)) 29 | x = x.view(bs, n_nodes, -1) 30 | else: 31 | raise RuntimeError( 32 | 'x.dim() is expected to be 2 or 3, but received {}'.format( 33 | x.dim())) 34 | x = self.layer(x) 35 | return x 36 | 37 | def __repr__(self): 38 | return '{}({}, {}, seq_length={})'.format(self.__class__.__name__, 39 | self.in_channels, 40 | self.out_channels, 41 | self.seq_length) 42 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/models/generate_pth.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import OrderedDict 3 | import torch 4 | import torch.nn as nn 5 | from resnetstack import ResnetStack_Backbone 6 | from resnetstack import Bottleneck 7 | 8 | 9 | def show_weight(Weight: OrderedDict): 10 | for k, v in Weight.items(): 11 | # if k.startswith('resnet_stack2'): 12 | # break 13 | print(f'{k: <50}', end='') 14 | print(v.shape) 15 | 16 | 17 | def update_weights(pretrained, edited): 18 | ''' 19 | pretrained: pretrained ResNet50 weights 20 | edited: edited model weights, model.state_dict() 21 | return: updated new model weights 22 | ''' 23 | pretrained_layer_name_size = 60 24 | 25 | for k, v in pretrained.items(): 26 | names = k.split('.') 27 | 28 | if names[0] in ['conv1', 'bn1']: 29 | # same name 30 | assert (k in edited) 31 | edited[k] = v 32 | print(f'{k: <{pretrained_layer_name_size}}', end='') 33 | print(k) # pretrained layer 34 | 35 | elif names[0] in ['layer1', 'layer2', 'layer3', 'layer4']: 36 | for i in range(1, 3): 37 | new_layer_name = f'resnet_stack{i}.' + k 38 | assert (new_layer_name in edited) # make sure the layer is in new model 39 | edited[new_layer_name] = v 40 | print(f'{new_layer_name: <{pretrained_layer_name_size}}', end='') 41 | print(k) # pretrained layer 42 | 43 | else: # no mapped layer 44 | print(' ' * pretrained_layer_name_size, end='') 45 | print(k) # pretrained layer 46 | 47 | return edited 48 | 49 | 50 | # def check_if_weights_are_updated(pretrained, edited): 51 | # name = 'resnet_stack1.layer2.2.bn1.bias' 52 | # origin = edited[name] 53 | 54 | # edited = update_weights(pretrained, edited) 55 | # edited = edited[name] 56 | 57 | # pretrained = Weight_Resnet['layer2.2.bn1.bias'] 58 | 59 | # print(origin) 60 | # print(edited) 61 | # print(pretrained) 62 | 63 | 64 | def generate_pth(): 65 | PATH_RESNET = '/uac/gds/xuhao/.cache/torch/hub/checkpoints/resnet50-0676ba61.pth' 66 | assert (os.path.isfile(PATH_RESNET)) 67 | 68 | print('torchvision - resnet50 weight found, loading...') 69 | Weight_Resnet = torch.load(PATH_RESNET) 70 | 71 | # my model 72 | model = ResnetStack_Backbone(Bottleneck, [3, 4, 6, 3]) 73 | Weight_RNStack = model.state_dict() 74 | 75 | # same_name = ['conv1', 'bn1'] 76 | # stk1_name = ['layer1', 'layer2', 'layer3', 'layer4'] 77 | # stk2_name = ['layer1', 'layer2', 'layer3', 'layer4'] 78 | 79 | Weight_RNStack = update_weights(Weight_Resnet, Weight_RNStack) 80 | torch.save(Weight_RNStack, 'resnetstack.pth') 81 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/template/j_reg.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/mob_recon/template/j_reg.npy -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/template/right_faces.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/mob_recon/template/right_faces.npy -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/template/transform.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/mob_recon/template/transform.pkl -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/template/transform_body.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/model/mob_recon/template/transform_body.pkl -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/alter_pretrain.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from collections import OrderedDict 3 | 4 | path = 'out/Human36M/cmr_pg_h36m_resumepp_lr5/checkpoints/' 5 | old_name = 'checkpoint_best.pt' 6 | new_name = 'cmr_pg_res18_h36m.pt' 7 | new_weight = OrderedDict() 8 | checkpoint = torch.load(path+old_name, map_location='cpu')['model_state_dict'] 9 | 10 | for k, v in checkpoint.items(): 11 | if 'backbone.' not in k and 'reduce.' not in k: 12 | new_weight[k] = v 13 | 14 | torch.save(new_weight, path+new_name) 15 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/generate_spiral_seq.py: -------------------------------------------------------------------------------- 1 | import openmesh as om 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | 5 | 6 | def _next_ring(mesh, last_ring, other): 7 | res = [] 8 | 9 | def is_new_vertex(idx): 10 | return (idx not in last_ring and idx not in other and idx not in res) 11 | 12 | for vh1 in last_ring: 13 | vh1 = om.VertexHandle(vh1) 14 | after_last_ring = False 15 | for vh2 in mesh.vv(vh1): 16 | if after_last_ring: 17 | if is_new_vertex(vh2.idx()): 18 | res.append(vh2.idx()) 19 | if vh2.idx() in last_ring: 20 | after_last_ring = True 21 | for vh2 in mesh.vv(vh1): 22 | if vh2.idx() in last_ring: 23 | break 24 | if is_new_vertex(vh2.idx()): 25 | res.append(vh2.idx()) 26 | return res 27 | 28 | 29 | def extract_spirals(mesh, seq_length, dilation=1): 30 | # output: spirals.size() = [N, seq_length] 31 | spirals = [] 32 | for vh0 in mesh.vertices(): 33 | reference_one_ring = [] 34 | for vh1 in mesh.vv(vh0): 35 | reference_one_ring.append(vh1.idx()) 36 | spiral = [vh0.idx()] 37 | one_ring = list(reference_one_ring) 38 | last_ring = one_ring 39 | next_ring = _next_ring(mesh, last_ring, spiral) 40 | spiral.extend(last_ring) 41 | while len(spiral) + len(next_ring) < seq_length * dilation: 42 | if len(next_ring) == 0: 43 | break 44 | last_ring = next_ring 45 | next_ring = _next_ring(mesh, last_ring, spiral) 46 | spiral.extend(last_ring) 47 | if len(next_ring) > 0: 48 | spiral.extend(next_ring) 49 | else: 50 | kdt = KDTree(mesh.points(), metric='euclidean') 51 | spiral = kdt.query(np.expand_dims(mesh.points()[spiral[0]], 52 | axis=0), 53 | k=seq_length * dilation, 54 | return_distance=False).tolist() 55 | spiral = [item for subspiral in spiral for item in subspiral] 56 | spirals.append(spiral[:seq_length * dilation][::dilation]) 57 | return spirals 58 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/progress/counter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite, Progress 19 | 20 | 21 | class Counter(Infinite): 22 | def update(self): 23 | self.write(str(self.index)) 24 | 25 | 26 | class Countdown(Progress): 27 | def update(self): 28 | self.write(str(self.remaining)) 29 | 30 | 31 | class Stack(Progress): 32 | phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') 33 | 34 | def update(self): 35 | nphases = len(self.phases) 36 | i = min(nphases - 1, int(self.progress * nphases)) 37 | self.write(self.phases[i]) 38 | 39 | 40 | class Pie(Stack): 41 | phases = ('○', '◔', '◑', '◕', '●') 42 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/progress/spinner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite 19 | 20 | 21 | class Spinner(Infinite): 22 | phases = ('-', '\\', '|', '/') 23 | hide_cursor = True 24 | 25 | def update(self): 26 | i = self.index % len(self.phases) 27 | self.write(self.phases[i]) 28 | 29 | 30 | class PieSpinner(Spinner): 31 | phases = ['◷', '◶', '◵', '◴'] 32 | 33 | 34 | class MoonSpinner(Spinner): 35 | phases = ['◑', '◒', '◐', '◓'] 36 | 37 | 38 | class LineSpinner(Spinner): 39 | phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] 40 | 41 | 42 | class PixelSpinner(Spinner): 43 | phases = ['⣾', '⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] 44 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/smpl.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os.path as osp 4 | import json 5 | # from config import cfg 6 | 7 | import sys 8 | 9 | from smplpytorch.pytorch.smpl_layer import SMPL_Layer 10 | 11 | 12 | class SMPL(object): 13 | def __init__(self, root): 14 | self.root = root 15 | self.layer = {'neutral': self.get_layer(), 'male': self.get_layer('male'), 'female': self.get_layer('female')} 16 | self.vertex_num = 6890 17 | self.face = self.layer['neutral'].th_faces.numpy() 18 | self.joint_regressor = self.layer['neutral'].th_J_regressor.numpy() 19 | 20 | # add nose, L/R eye, L/R ear 21 | self.face_kps_vertex = (331, 2802, 6262, 3489, 3990) # mesh vertex idx 22 | nose_onehot = np.array([1 if i == 331 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 23 | left_eye_onehot = np.array([1 if i == 2802 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 24 | right_eye_onehot = np.array([1 if i == 6262 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 25 | left_ear_onehot = np.array([1 if i == 3489 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 26 | right_ear_onehot = np.array([1 if i == 3990 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 27 | self.joint_regressor = np.concatenate( 28 | (self.joint_regressor, nose_onehot, left_eye_onehot, right_eye_onehot, left_ear_onehot, right_ear_onehot)) 29 | 30 | self.joint_num = 29 # original: 24. manually add nose, L/R eye, L/R ear 31 | self.joints_name = ( 32 | 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 33 | 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 34 | 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear') 35 | self.flip_pairs = ( 36 | (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) 37 | self.skeleton = ( 38 | (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), 39 | (17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), 40 | (24, 25), (24, 26), (25, 27), (26, 28)) 41 | self.root_joint_idx = self.joints_name.index('Pelvis') 42 | 43 | def get_layer(self, gender='neutral'): 44 | return SMPL_Layer(gender=gender, model_root=osp.join(self.root, 'template')) 45 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.tri as mtri 4 | from mpl_toolkits.mplot3d import Axes3D 5 | 6 | xy = [[0.3,0.5], 7 | [0.6,0.8], 8 | [0.5,0.1], 9 | [0.1,0.2]] 10 | xy = np.array(xy) 11 | 12 | triangles = [[0,2,1], 13 | [2,0,3]] 14 | 15 | triang = mtri.Triangulation(xy[:,0], xy[:,1], triangles=triangles) 16 | 17 | z = [0.1,0.2,0.3,0.4] 18 | 19 | fig, ax = plt.subplots(subplot_kw =dict(projection="3d")) 20 | ax.plot_trisurf(triang, z) 21 | 22 | plt.show() -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import numpy as np 4 | import openmesh as om 5 | 6 | 7 | def makedirs(folder): 8 | if not os.path.exists(folder): 9 | os.makedirs(folder) 10 | 11 | 12 | def count_parameters(model): 13 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 14 | 15 | 16 | def to_edge_index(mat): 17 | return torch.LongTensor(np.vstack(mat.nonzero())) 18 | 19 | 20 | def to_sparse(spmat): 21 | return torch.sparse.FloatTensor( 22 | torch.LongTensor([spmat.tocoo().row, 23 | spmat.tocoo().col]), 24 | torch.FloatTensor(spmat.tocoo().data), torch.Size(spmat.tocoo().shape)) 25 | 26 | 27 | def preprocess_spiral(face, seq_length, vertices=None, dilation=1): 28 | from .generate_spiral_seq import extract_spirals 29 | assert face.shape[1] == 3 30 | if vertices is not None: 31 | mesh = om.TriMesh(np.array(vertices), np.array(face)) 32 | else: 33 | n_vertices = face.max() + 1 34 | mesh = om.TriMesh(np.ones([n_vertices, 3]), np.array(face)) 35 | spirals = torch.tensor( 36 | extract_spirals(mesh, seq_length=seq_length, dilation=dilation)) 37 | return spirals 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/warmup_scheduler.py: -------------------------------------------------------------------------------- 1 | from bisect import bisect_right 2 | 3 | def adjust_learning_rate(optimizer, epoch, step, len_epoch, lr, lr_decay, decay_step, warmup_epochs): 4 | """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" 5 | # lr = args.lr * (0.1 ** (epoch // 30)) 6 | # for param_group in optimizer.param_groups: 7 | # param_group["lr"] = lr 8 | lr = lr * (lr_decay ** bisect_right(decay_step, epoch)) 9 | 10 | """Warmup""" 11 | if epoch < warmup_epochs: 12 | lr = ( 13 | lr 14 | * float(1 + step + epoch * len_epoch) 15 | / float(warmup_epochs * len_epoch) 16 | ) 17 | 18 | # if args.rank == 0: 19 | # writer.print_str("epoch = {}, step = {}, lr = {}".format(epoch, step, lr)) 20 | 21 | for param_group in optimizer.param_groups: 22 | param_group["lr"] = lr 23 | 24 | -------------------------------------------------------------------------------- /HandReconstruction/model/mob_recon/utils/writer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import torch 4 | import json 5 | from glob import glob 6 | import logging 7 | logging.getLogger('PIL').setLevel(logging.WARNING) 8 | 9 | 10 | class Writer: 11 | def __init__(self, args=None): 12 | self.args = args 13 | if self.args is not None: 14 | log_filename = os.path.join( 15 | args.out_dir, 'log.log') 16 | 17 | logging.basicConfig( 18 | filename=log_filename, 19 | level=logging.DEBUG, 20 | format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s') 21 | 22 | 23 | def print_str(self, info): 24 | logging.info(info) 25 | 26 | def print_info(self, info): 27 | message = 'Epoch: {}/{}, Duration: {:.3f}s, Train Loss: {:.4f}, Test Loss: {:.4f}' \ 28 | .format(info['current_epoch'], info['epochs'], info['t_duration'], info['train_loss'], info['test_loss']) 29 | logging.info(message) 30 | 31 | def print_step(self, info): 32 | message = 'Epoch: {}/{}, Step: {}/{}, Total_step: {}, Duration: {:.3f}s, Train Loss: {:.4f}, L1 Loss: {:.4f}, Lr: {:.6f}' \ 33 | .format(info['epoch'], info['max_epoch'], info['step'], info['max_step'], info['total_step'], info['step_duration'], info['train_loss'], info['l1_loss'], info['lr']) 34 | logging.info(message) 35 | 36 | def print_step_ft(self, info): 37 | message = 'Epoch: {}/{}, Step: {}/{}, Total: {}, Dur: {:.3f}s, FDur: {:.3f}s, BDur: {:.3f}s,, Train Loss: {:.4f}, L1 Loss: {:.4f}, Lr: {:.6f}' \ 38 | .format(info['epoch'], info['max_epoch'], info['step'], info['max_step'], info['total_step'], 39 | info['step_duration'], info['forward_duration'] ,info['backward_duration'], info['train_loss'], info['l1_loss'], info['lr']) 40 | logging.info(message) 41 | 42 | def save_checkpoint(self, model, optimizer, scheduler, epoch, best=False, last=False): 43 | if best: 44 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_best.pt') 45 | elif last: 46 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_last.pt') 47 | else: 48 | save_path = os.path.join(self.args.checkpoints_dir, 'checkpoint_{:03d}.pt'.format(epoch)) 49 | scheduler_state_dict = {} if scheduler is None else scheduler.state_dict() 50 | torch.save( 51 | { 52 | 'epoch': epoch, 53 | 'model_state_dict': model.state_dict(), 54 | 'optimizer_state_dict': optimizer.state_dict(), 55 | 'scheduler_state_dict': scheduler_state_dict, 56 | }, save_path) 57 | -------------------------------------------------------------------------------- /HandReconstruction/optimizer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/HandReconstruction/optimizer/__init__.py -------------------------------------------------------------------------------- /HandReconstruction/optimizer/optimizer.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | 3 | 4 | def fetch_optimizer(cfg, model): 5 | total_params = [p for p in model.parameters() if p.requires_grad] 6 | if cfg.optimizer.name == "adam": 7 | # optimizer = optim.Adam(total_params, lr=cfg.optimizer.lr, weight_decay=1e-4) 8 | optimizer = optim.Adam(total_params, lr=cfg.optimizer.lr) 9 | elif cfg.optimizer.name == "sgd": 10 | optimizer = optim.SGD(total_params, lr=cfg.optimizer.lr) 11 | else: 12 | raise NotImplementedError("Unknown optimizer type: {}.".format(cfg.optimizer.name)) 13 | 14 | if cfg.scheduler.name == "exp": 15 | scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=cfg.scheduler.gamma) 16 | elif cfg.scheduler.name == "step": 17 | scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.scheduler.milestones, gamma=cfg.scheduler.gamma) 18 | else: 19 | raise NotImplementedError("Unknown scheduler type: {}.".format(cfg.scheduler.name)) 20 | return optimizer, scheduler 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 HaoXu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/asset_process/decompose_list.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from utils.extract_utils import * 3 | 4 | 5 | if __name__ == "__main__": 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--src", type=str, required=True) 8 | parser.add_argument("--dst", type=str, required=True) 9 | parser.add_argument("--coacd_path", type=str, required=True) 10 | args = parser.parse_args() 11 | 12 | os.makedirs(args.dst, exist_ok=True) 13 | 14 | with open("run.sh", "w") as f: 15 | for mesh in os.listdir(args.src): 16 | f.write( 17 | f"python decompose.py --data_root_path {args.src} --result_path {args.dst} --object_code {mesh[:-4]} --coacd_path {args.coacd_path}\n") 18 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/asset_process/extract.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from utils.extract_utils import * 3 | 4 | 5 | if __name__ == "__main__": 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--src", type=str, required=True) 8 | parser.add_argument("--dst", type=str, required=True) 9 | parser.add_argument('--set', type=str, 10 | choices=["core", "sem", "mujoco", "ddg"], required=True) 11 | parser.add_argument('--meta', type=str) 12 | args = parser.parse_args() 13 | 14 | if(args.set == "core"): 15 | extract_core(args.src, args.dst) 16 | elif(args.set == "sem"): 17 | extract_sem(args.src, args.dst, args.meta) 18 | elif(args.set == "mujoco"): 19 | extract_mujoco(args.src, args.dst) 20 | elif(args.set == "ddg"): 21 | extract_ddg(args.src, args.dst) 22 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/asset_process/manifold.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from utils.extract_utils import * 3 | 4 | 5 | if __name__ == "__main__": 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--src", type=str, required=True) 8 | parser.add_argument("--dst", type=str, required=True) 9 | parser.add_argument("--manifold_path", type=str, required=True) 10 | args = parser.parse_args() 11 | 12 | os.makedirs(args.dst, exist_ok=True) 13 | 14 | with open("run.sh", "w") as f: 15 | for mesh in os.listdir(args.src): 16 | f.write( 17 | f"{args.manifold_path} --input {os.path.join(args.src, mesh)} --output {os.path.join(args.dst, mesh)}\n") 18 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/asset_process/normalize.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import trimesh 3 | import numpy as np 4 | import tqdm 5 | from utils.extract_utils import * 6 | 7 | 8 | if __name__ == "__main__": 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument("--src", type=str, required=True) 11 | parser.add_argument("--dst", type=str, required=True) 12 | args = parser.parse_args() 13 | 14 | os.makedirs(args.dst, exist_ok=True) 15 | 16 | for code in tqdm(os.listdir(args.src)): 17 | mesh = trimesh.load(os.path.join(args.src, code), 18 | force="mesh", process=False) 19 | verts = np.array(mesh.vertices) 20 | xcenter = (np.max(verts[:, 0]) + np.min(verts[:, 0])) / 2 21 | ycenter = (np.max(verts[:, 1]) + np.min(verts[:, 1])) / 2 22 | zcenter = (np.max(verts[:, 2]) + np.min(verts[:, 2])) / 2 23 | verts_ = verts - np.array([xcenter, ycenter, zcenter]) 24 | dmax = np.max(np.sqrt(np.sum(np.square(verts_), axis=1))) * 1.03 25 | verts_ /= dmax 26 | mesh_ = trimesh.Trimesh( 27 | vertices=verts_, faces=mesh.faces, process=False) 28 | if(mesh_.is_watertight and mesh_.volume > 0.05): 29 | mesh_.export(os.path.join(args.dst, code)) 30 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/asset_process/poolrun.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Pool 2 | import os 3 | import time 4 | import argparse 5 | 6 | 7 | def run_cmd(cmd): 8 | if cmd[-1] == "\n": 9 | cmd = cmd[:-1] 10 | os.system(cmd) 11 | 12 | 13 | if __name__ == "__main__": 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("-i", 16 | "--input", 17 | type=str, 18 | help="input shell script", 19 | default="run.sh") 20 | parser.add_argument("-p", 21 | "--process", 22 | type=int, 23 | help="num of process to run", 24 | default=4) 25 | args = parser.parse_args() 26 | 27 | p = Pool(args.process) 28 | 29 | cmds = None 30 | 31 | with open(args.input, "r") as f: 32 | cmds = f.readlines() 33 | 34 | t1 = time.time() 35 | 36 | for i in cmds: 37 | if i[0] == '\n': 38 | continue 39 | p.apply_async(run_cmd, args=(i, )) 40 | 41 | p.close() 42 | p.join() 43 | 44 | t2 = time.time() 45 | 46 | print(f"Finished in {(t2-t1):.2f}s") 47 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/grasp_generation/mano/contact_indices.json: -------------------------------------------------------------------------------- 1 | [ 2 | 699, 700, 753, 754, 714, 741, 755, 757, 739, 756, 760, 740, 762, 763, 3 | 194, 195, 165, 48, 49, 166, 46, 47, 280, 237, 238, 340, 341, 330, 342, 328, 343, 4 | 375, 386, 387, 358, 359, 376, 356, 357, 402, 396, 397, 452, 453, 440, 454, 438, 455, 5 | 485, 496, 497, 470, 471, 486, 468, 469, 513, 506, 507, 563, 564, 551, 565, 549, 566, 6 | 614, 615, 582, 583, 580, 581, 681, 681, 625, 666, 683, 7 | 73, 96, 98, 99, 772, 774, 775, 777 8 | ] -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/grasp_generation/tests/visualize_hand_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Last modified date: 2023.02.23 3 | Author: Jialiang Zhang 4 | Description: visualize hand model using plotly.graph_objects 5 | """ 6 | 7 | import os 8 | import sys 9 | 10 | os.chdir(os.path.dirname(os.path.dirname(__file__))) 11 | sys.path.append(os.path.realpath('.')) 12 | 13 | import numpy as np 14 | import torch 15 | import transforms3d 16 | import plotly.graph_objects as go 17 | from utils.hand_model import HandModel 18 | 19 | 20 | torch.manual_seed(1) 21 | 22 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' 23 | 24 | if __name__ == '__main__': 25 | device = torch.device('cpu') 26 | 27 | # hand model 28 | 29 | hand_model = HandModel( 30 | mano_root='mano', 31 | contact_indices_path='mano/contact_indices.json', 32 | pose_distrib_path='mano/pose_distrib.pt', 33 | device=device 34 | ) 35 | 36 | vec, angle = transforms3d.euler.euler2axangle(-np.pi / 2, -np.pi / 2, np.pi / 6, axes='rzxz') 37 | hand_pose = torch.concat([ 38 | torch.tensor([-0.1, -0.05, 0], dtype=torch.float, device=device), 39 | torch.tensor(vec * angle, dtype=torch.float, device=device), 40 | torch.tensor([ 41 | 0, 0, torch.pi / 6, 42 | 0, 0, 0, 43 | 0, 0, 0, 44 | 45 | 0, 0, torch.pi / 6, 46 | 0, 0, 0, 47 | 0, 0, 0, 48 | 49 | 0, 0, torch.pi / 6, 50 | 0, 0, 0, 51 | 0, 0, 0, 52 | 53 | 0, 0, torch.pi / 6, 54 | 0, 0, 0, 55 | 0, 0, 0, 56 | 57 | *(torch.pi / 2 * torch.tensor([2, 1, 0], dtype=torch.float) / torch.tensor([2, 1, 0], dtype=torch.float).norm()), 58 | 0, 0, 0, 59 | 0, 0, 0, 60 | ], dtype=torch.float, device=device), 61 | ]) 62 | hand_model.set_parameters(hand_pose.unsqueeze(0)) 63 | 64 | # info 65 | contact_candidates = hand_model.get_contact_candidates() 66 | print(f'n_contact_candidates: {hand_model.n_contact_candidates}') 67 | 68 | # visualize 69 | 70 | hand_plotly = hand_model.get_plotly_data(i=0, opacity=0.5, color='lightblue', with_keypoints=True) 71 | v = contact_candidates[0].detach().cpu().numpy() 72 | contact_candidates_plotly = [go.Scatter3d(x=v[:, 0], y=v[:, 1], z=v[:, 2], mode='markers', marker=dict(size=2, color='white'))] 73 | 74 | fig = go.Figure(hand_plotly + contact_candidates_plotly) 75 | fig.update_layout(scene_aspectmode='data') 76 | fig.show() 77 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/grasp_generation/tests/visualize_object_pose.py: -------------------------------------------------------------------------------- 1 | """ 2 | Last modified date: 2023.02.23 3 | Author: Jialiang Zhang 4 | Description: visualize object in world frame using plotly.graph_objects 5 | """ 6 | 7 | import os 8 | 9 | os.chdir(os.path.dirname(os.path.dirname(__file__))) 10 | 11 | import argparse 12 | import numpy as np 13 | import trimesh as tm 14 | import plotly.graph_objects as go 15 | 16 | 17 | if __name__ == '__main__': 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('--data_root_path', type=str, default='../data/meshdata') 20 | parser.add_argument('--poses', type=str, default='../data/poses') 21 | parser.add_argument('--object_code', type=str, default='core-mug-8570d9a8d24cb0acbebd3c0c0c70fb03') 22 | parser.add_argument('--num', type=int, default=0) 23 | parser.add_argument('--scale', type=float, default=0.1234567890) 24 | args = parser.parse_args() 25 | 26 | # load data 27 | pose_matrices = np.load(os.path.join(args.poses, args.object_code + '.npy')) 28 | print(f'n_data: {len(pose_matrices)}') 29 | pose_matrix = pose_matrices[args.num] 30 | pose_matrix[:3, 3] *= args.scale 31 | object_mesh = tm.load(os.path.join(args.data_root_path, args.object_code, 'coacd', 'decomposed.obj')).apply_scale(args.scale) 32 | 33 | # visualize 34 | v = object_mesh.vertices @ pose_matrix[:3, :3].T + pose_matrix[:3, 3] 35 | f = object_mesh.faces 36 | object_plotly = go.Mesh3d(x=v[:, 0], y=v[:, 1], z=v[:, 2], i=f[:, 0], j=f[:, 1], k=f[:, 2], color='lightgreen', opacity=1) 37 | fig = go.Figure(object_plotly) 38 | fig.show() 39 | -------------------------------------------------------------------------------- /NovelConditionCreator/DexGraspNet/grasp_generation/utils/energy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Last modified date: 2023.04.12 3 | Author: Jialiang Zhang 4 | Description: energy functions 5 | """ 6 | 7 | import torch 8 | 9 | 10 | def cal_energy(hand_model, object_model, w_dis=100.0, w_pen=100.0, w_prior=0.5, w_spen=10.0, w_tpen=40.0, on_table=False, verbose=False): 11 | 12 | # E_dis 13 | batch_size, n_contact, _ = hand_model.contact_points.shape 14 | device = object_model.device 15 | distance, contact_normal = object_model.cal_distance(hand_model.contact_points) 16 | E_dis = torch.sum(distance.abs(), dim=-1, dtype=torch.float).to(device) 17 | 18 | # E_fc 19 | contact_normal = contact_normal.reshape(batch_size, 1, 3 * n_contact) 20 | transformation_matrix = torch.tensor([[0, 0, 0, 0, 0, -1, 0, 1, 0], [0, 0, 1, 0, 0, 0, -1, 0, 0], [0, -1, 0, 1, 0, 0, 0, 0, 0]], dtype=torch.float, device=device) 21 | g = torch.cat([ 22 | torch.eye(3, dtype=torch.float, device=device).expand(batch_size, n_contact, 3, 3).reshape(batch_size, 3 * n_contact, 3), 23 | (hand_model.contact_points @ transformation_matrix).view(batch_size, 3 * n_contact, 3) 24 | ], 25 | dim=2).float().to(device) 26 | norm = torch.norm(contact_normal @ g, dim=[1, 2]) 27 | E_fc = norm * norm 28 | 29 | # E_pen 30 | object_scale = object_model.object_scale_tensor.flatten().unsqueeze(1).unsqueeze(2) 31 | object_surface_points = object_model.surface_points_tensor * object_scale # (n_objects * batch_size_each, num_samples, 3) 32 | distances = hand_model.cal_distance(object_surface_points) 33 | distances[distances <= 0] = 0 34 | E_pen = distances.sum(-1) 35 | 36 | # E_prior 37 | E_prior = torch.norm((hand_model.hand_pose[:, 6:] - hand_model.pose_distrib[0]) / hand_model.pose_distrib[1], dim=-1) 38 | 39 | # E_spen 40 | E_spen = hand_model.self_penetration() 41 | 42 | if on_table: 43 | # E_tpen 44 | plane_distances = hand_model.cal_dis_plane(object_model.plane_parameters) # [B, 778] 45 | plane_distances[plane_distances > 0] = 0 46 | E_tpen = -plane_distances.sum(-1) 47 | else: 48 | E_tpen = torch.zeros_like(E_spen, device=E_spen.device) 49 | 50 | if verbose: 51 | return E_fc + w_dis * E_dis + w_pen * E_pen + w_prior * E_prior + w_spen * E_spen + w_tpen * E_tpen, E_fc, E_dis, E_pen, E_prior, E_spen, E_tpen 52 | else: 53 | return E_fc + w_dis * E_dis + w_pen * E_pen + w_prior * E_prior + w_spen * E_spen + w_tpen * E_tpen 54 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/NovelConditionCreator/dexycb/__init__.py -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/mano.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os.path as osp 3 | import sys 4 | 5 | sys.path.append('../') 6 | from dexycb.manopth.manopth.manolayer import ManoLayer 7 | 8 | 9 | class MANO(object): 10 | 11 | def __init__(self, side, flat_hand_mean=False): 12 | self.layer = self.get_layer(side, flat_hand_mean) 13 | self.vertex_num = 778 14 | self.face = self.layer.th_faces.numpy() 15 | self.joint_regressor = self.layer.th_J_regressor.numpy() 16 | 17 | self.joint_num = 21 18 | self.joints_name = ('Wrist', 'Thumb_1', 'Thumb_2', 'Thumb_3', 'Thumb_4', 'Index_1', 'Index_2', 'Index_3', 'Index_4', 'Middle_1', 'Middle_2', 'Middle_3', 'Middle_4', 19 | 'Ring_1', 'Ring_2', 'Ring_3', 'Ring_4', 'Pinky_1', 'Pinky_2', 'Pinky_3', 'Pinly_4') 20 | 21 | self.skeleton = ((0, 1), (0, 5), (0, 9), (0, 13), (0, 17), (1, 2), (2, 3), (3, 4), (5, 6), (6, 7), (7, 8), (9, 10), (10, 11), (11, 12), (13, 14), (14, 15), (15, 16), 22 | (17, 18), (18, 19), (19, 20)) 23 | self.root_joint_idx = self.joints_name.index('Wrist') 24 | 25 | # add fingertips to joint_regressor 26 | self.fingertip_vertex_idx = [745, 317, 444, 556, 673] # mesh vertex idx (right hand) version0 27 | thumbtip_onehot = np.array([1 if i == 745 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 28 | indextip_onehot = np.array([1 if i == 317 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 29 | middletip_onehot = np.array([1 if i == 445 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 30 | ringtip_onehot = np.array([1 if i == 556 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 31 | pinkytip_onehot = np.array([1 if i == 673 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 32 | 33 | self.joint_regressor = np.concatenate((self.joint_regressor, thumbtip_onehot, indextip_onehot, middletip_onehot, ringtip_onehot, pinkytip_onehot)) 34 | self.joint_regressor = self.joint_regressor[[0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20], :] 35 | 36 | def get_layer(self, side, flat_hand_mean=False): 37 | return ManoLayer(mano_root=osp.join('./manopth', 'mano', 'models'), flat_hand_mean=flat_hand_mean, use_pca=False, side=side) # load right hand MANO model 38 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/examples/manopth_mindemo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from manopth.manolayer import ManoLayer 3 | from manopth import demo 4 | 5 | batch_size = 10 6 | # Select number of principal components for pose space 7 | ncomps = 6 8 | 9 | # Initialize MANO layer 10 | mano_layer = ManoLayer( 11 | mano_root='mano/models', use_pca=True, ncomps=ncomps, flat_hand_mean=False) 12 | 13 | # Generate random shape parameters 14 | random_shape = torch.rand(batch_size, 10) 15 | # Generate random pose parameters, including 3 values for global axis-angle rotation 16 | random_pose = torch.rand(batch_size, ncomps + 3) 17 | 18 | # Forward pass through MANO layer 19 | hand_verts, hand_joints = mano_layer(random_pose, random_shape) 20 | demo.display_hand({ 21 | 'verts': hand_verts, 22 | 'joints': hand_joints 23 | }, 24 | mano_faces=mano_layer.th_faces) 25 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/mano/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/NovelConditionCreator/dexycb/manopth/mano/__init__.py -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/mano/webuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/NovelConditionCreator/dexycb/manopth/mano/webuser/__init__.py -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/mano/webuser/posemapper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license 5 | 6 | More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. 7 | For comments or questions, please email us at: mano@tue.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines a wrapper for the loading functions of the MANO model. 13 | 14 | Modules included: 15 | - load_model: 16 | loads the MANO model from a given file location (i.e. a .pkl file location), 17 | or a dictionary object. 18 | 19 | ''' 20 | 21 | 22 | import chumpy as ch 23 | import numpy as np 24 | import cv2 25 | 26 | 27 | class Rodrigues(ch.Ch): 28 | dterms = 'rt' 29 | 30 | def compute_r(self): 31 | return cv2.Rodrigues(self.rt.r)[0] 32 | 33 | def compute_dr_wrt(self, wrt): 34 | if wrt is self.rt: 35 | return cv2.Rodrigues(self.rt.r)[1].T 36 | 37 | 38 | def lrotmin(p): 39 | if isinstance(p, np.ndarray): 40 | p = p.ravel()[3:] 41 | return np.concatenate( 42 | [(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() 43 | for pp in p.reshape((-1, 3))]).ravel() 44 | if p.ndim != 2 or p.shape[1] != 3: 45 | p = p.reshape((-1, 3)) 46 | p = p[1:] 47 | return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() 48 | for pp in p]).ravel() 49 | 50 | 51 | def posemap(s): 52 | if s == 'lrotmin': 53 | return lrotmin 54 | else: 55 | raise Exception('Unknown posemapping: %s' % (str(s), )) 56 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/manopth/__init__.py: -------------------------------------------------------------------------------- 1 | name = 'manopth' 2 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/manopth/argutils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import pickle 4 | import subprocess 5 | import sys 6 | 7 | 8 | def print_args(args): 9 | opts = vars(args) 10 | print('======= Options ========') 11 | for k, v in sorted(opts.items()): 12 | print('{}: {}'.format(k, v)) 13 | print('========================') 14 | 15 | 16 | def save_args(args, save_folder, opt_prefix='opt', verbose=True): 17 | opts = vars(args) 18 | # Create checkpoint folder 19 | if not os.path.exists(save_folder): 20 | os.makedirs(save_folder, exist_ok=True) 21 | 22 | # Save options 23 | opt_filename = '{}.txt'.format(opt_prefix) 24 | opt_path = os.path.join(save_folder, opt_filename) 25 | with open(opt_path, 'a') as opt_file: 26 | opt_file.write('====== Options ======\n') 27 | for k, v in sorted(opts.items()): 28 | opt_file.write( 29 | '{option}: {value}\n'.format(option=str(k), value=str(v))) 30 | opt_file.write('=====================\n') 31 | opt_file.write('launched {} at {}\n'.format( 32 | str(sys.argv[0]), str(datetime.datetime.now()))) 33 | 34 | # Add git info 35 | label = subprocess.check_output(["git", "describe", 36 | "--always"]).strip() 37 | if subprocess.call( 38 | ["git", "branch"], 39 | stderr=subprocess.STDOUT, 40 | stdout=open(os.devnull, 'w')) == 0: 41 | opt_file.write('=== Git info ====\n') 42 | opt_file.write('{}\n'.format(label)) 43 | commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) 44 | opt_file.write('commit : {}\n'.format(commit.strip())) 45 | 46 | opt_picklename = '{}.pkl'.format(opt_prefix) 47 | opt_picklepath = os.path.join(save_folder, opt_picklename) 48 | with open(opt_picklepath, 'wb') as opt_file: 49 | pickle.dump(opts, opt_file) 50 | if verbose: 51 | print('Saved options to {}'.format(opt_path)) 52 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/manopth/demo.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | from mpl_toolkits.mplot3d import Axes3D 3 | from mpl_toolkits.mplot3d.art3d import Poly3DCollection 4 | import numpy as np 5 | import torch 6 | 7 | from manopth.manolayer import ManoLayer 8 | 9 | 10 | def generate_random_hand(batch_size=1, ncomps=6, mano_root='mano/models'): 11 | nfull_comps = ncomps + 3 # Add global orientation dims to PCA 12 | random_pcapose = torch.rand(batch_size, nfull_comps) 13 | mano_layer = ManoLayer(mano_root=mano_root) 14 | verts, joints = mano_layer(random_pcapose) 15 | return {'verts': verts, 'joints': joints, 'faces': mano_layer.th_faces} 16 | 17 | 18 | def display_hand(hand_info, mano_faces=None, ax=None, alpha=0.2, batch_idx=0, show=True): 19 | """ 20 | Displays hand batch_idx in batch of hand_info, hand_info as returned by 21 | generate_random_hand 22 | """ 23 | if ax is None: 24 | fig = plt.figure() 25 | ax = fig.add_subplot(111, projection='3d') 26 | verts, joints = hand_info['verts'][batch_idx], hand_info['joints'][ 27 | batch_idx] 28 | if mano_faces is None: 29 | ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.1) 30 | else: 31 | mesh = Poly3DCollection(verts[mano_faces], alpha=alpha) 32 | face_color = (141 / 255, 184 / 255, 226 / 255) 33 | edge_color = (50 / 255, 50 / 255, 50 / 255) 34 | mesh.set_edgecolor(edge_color) 35 | mesh.set_facecolor(face_color) 36 | ax.add_collection3d(mesh) 37 | ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') 38 | cam_equal_aspect_3d(ax, verts.numpy()) 39 | if show: 40 | plt.show() 41 | 42 | 43 | def cam_equal_aspect_3d(ax, verts, flip_x=False): 44 | """ 45 | Centers view on cuboid containing hand and flips y and z axis 46 | and fixes azimuth 47 | """ 48 | extents = np.stack([verts.min(0), verts.max(0)], axis=1) 49 | sz = extents[:, 1] - extents[:, 0] 50 | centers = np.mean(extents, axis=1) 51 | maxsize = max(abs(sz)) 52 | r = maxsize / 2 53 | if flip_x: 54 | ax.set_xlim(centers[0] + r, centers[0] - r) 55 | else: 56 | ax.set_xlim(centers[0] - r, centers[0] + r) 57 | # Invert y and z axis 58 | ax.set_ylim(centers[1] + r, centers[1] - r) 59 | ax.set_zlim(centers[2] + r, centers[2] - r) 60 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/manopth/rot6d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def compute_rotation_matrix_from_ortho6d(poses): 5 | """ 6 | Code from 7 | https://github.com/papagina/RotationContinuity 8 | On the Continuity of Rotation Representations in Neural Networks 9 | Zhou et al. CVPR19 10 | https://zhouyisjtu.github.io/project_rotation/rotation.html 11 | """ 12 | x_raw = poses[:, 0:3] # batch*3 13 | y_raw = poses[:, 3:6] # batch*3 14 | 15 | x = normalize_vector(x_raw) # batch*3 16 | z = cross_product(x, y_raw) # batch*3 17 | z = normalize_vector(z) # batch*3 18 | y = cross_product(z, x) # batch*3 19 | 20 | x = x.view(-1, 3, 1) 21 | y = y.view(-1, 3, 1) 22 | z = z.view(-1, 3, 1) 23 | matrix = torch.cat((x, y, z), 2) # batch*3*3 24 | return matrix 25 | 26 | def robust_compute_rotation_matrix_from_ortho6d(poses): 27 | """ 28 | Instead of making 2nd vector orthogonal to first 29 | create a base that takes into account the two predicted 30 | directions equally 31 | """ 32 | x_raw = poses[:, 0:3] # batch*3 33 | y_raw = poses[:, 3:6] # batch*3 34 | 35 | x = normalize_vector(x_raw) # batch*3 36 | y = normalize_vector(y_raw) # batch*3 37 | middle = normalize_vector(x + y) 38 | orthmid = normalize_vector(x - y) 39 | x = normalize_vector(middle + orthmid) 40 | y = normalize_vector(middle - orthmid) 41 | # Their scalar product should be small ! 42 | # assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001 43 | z = normalize_vector(cross_product(x, y)) 44 | 45 | x = x.view(-1, 3, 1) 46 | y = y.view(-1, 3, 1) 47 | z = z.view(-1, 3, 1) 48 | matrix = torch.cat((x, y, z), 2) # batch*3*3 49 | # Check for reflection in matrix ! If found, flip last vector TODO 50 | assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0 51 | return matrix 52 | 53 | 54 | def normalize_vector(v): 55 | batch = v.shape[0] 56 | v_mag = torch.sqrt(v.pow(2).sum(1)) # batch 57 | v_mag = torch.max(v_mag, v.new([1e-8])) 58 | v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1]) 59 | v = v/v_mag 60 | return v 61 | 62 | 63 | def cross_product(u, v): 64 | batch = u.shape[0] 65 | i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1] 66 | j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2] 67 | k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0] 68 | 69 | out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) 70 | 71 | return out 72 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/manopth/rotproj.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def batch_rotprojs(batches_rotmats): 5 | proj_rotmats = [] 6 | for batch_idx, batch_rotmats in enumerate(batches_rotmats): 7 | proj_batch_rotmats = [] 8 | for rot_idx, rotmat in enumerate(batch_rotmats): 9 | # GPU implementation of svd is VERY slow 10 | # ~ 2 10^-3 per hit vs 5 10^-5 on cpu 11 | U, S, V = rotmat.cpu().svd() 12 | rotmat = torch.matmul(U, V.transpose(0, 1)) 13 | orth_det = rotmat.det() 14 | # Remove reflection 15 | if orth_det < 0: 16 | rotmat[:, 2] = -1 * rotmat[:, 2] 17 | 18 | rotmat = rotmat.cuda() 19 | proj_batch_rotmats.append(rotmat) 20 | proj_rotmats.append(torch.stack(proj_batch_rotmats)) 21 | return torch.stack(proj_rotmats) 22 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/manopth/manopth/tensutils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from dexycb.manopth.manopth import rodrigues_layer 4 | 5 | 6 | def th_posemap_axisang(pose_vectors): 7 | rot_nb = int(pose_vectors.shape[1] / 3) 8 | pose_vec_reshaped = pose_vectors.contiguous().view(-1, 3) 9 | rot_mats = rodrigues_layer.batch_rodrigues(pose_vec_reshaped) 10 | rot_mats = rot_mats.view(pose_vectors.shape[0], rot_nb * 9) 11 | pose_maps = subtract_flat_id(rot_mats) 12 | return pose_maps, rot_mats 13 | 14 | 15 | def th_with_zeros(tensor): 16 | batch_size = tensor.shape[0] 17 | padding = tensor.new([0.0, 0.0, 0.0, 1.0]) 18 | padding.requires_grad = False 19 | 20 | concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] 21 | cat_res = torch.cat(concat_list, 1) 22 | return cat_res 23 | 24 | 25 | def th_pack(tensor): 26 | batch_size = tensor.shape[0] 27 | padding = tensor.new_zeros((batch_size, 4, 3)) 28 | padding.requires_grad = False 29 | pack_list = [padding, tensor] 30 | pack_res = torch.cat(pack_list, 2) 31 | return pack_res 32 | 33 | 34 | def subtract_flat_id(rot_mats): 35 | # Subtracts identity as a flattened tensor 36 | rot_nb = int(rot_mats.shape[1] / 9) 37 | id_flat = torch.eye(3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat(rot_mats.shape[0], rot_nb) 38 | # id_flat.requires_grad = False 39 | results = rot_mats - id_flat 40 | return results 41 | 42 | 43 | def make_list(tensor): 44 | # type: (List[int]) -> List[int] 45 | return tensor 46 | -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/shaders/mesh.frag: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | in vec3 frag_position; 4 | in vec3 frag_normal; 5 | 6 | out vec4 frag_color; 7 | 8 | void main() 9 | { 10 | vec3 normal = normalize(frag_normal); 11 | 12 | frag_color = vec4(normal * 0.5 + 0.5, 1.0); 13 | } -------------------------------------------------------------------------------- /NovelConditionCreator/dexycb/shaders/mesh.vert: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | // Vertex Attributes 4 | layout(location = 0) in vec3 position; 5 | layout(location = NORMAL_LOC) in vec3 normal; 6 | layout(location = INST_M_LOC) in mat4 inst_m; 7 | 8 | // Uniforms 9 | uniform mat4 M; 10 | uniform mat4 V; 11 | uniform mat4 P; 12 | 13 | // Outputs 14 | out vec3 frag_position; 15 | out vec3 frag_normal; 16 | 17 | void main() 18 | { 19 | gl_Position = P * V * M * inst_m * vec4(position, 1); 20 | frag_position = vec3(M * inst_m * vec4(position, 1.0)); 21 | 22 | mat4 N = transpose(inverse(M * inst_m)); 23 | frag_normal = normalize(vec3(N * vec4(normal, 0.0))); 24 | } -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/NovelConditionCreator/ho3d/__init__.py -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/mano.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os.path as osp 3 | import sys 4 | 5 | sys.path.append("../") 6 | from dexycb.manopth.manopth.manolayer import ManoLayer 7 | 8 | 9 | class MANO(object): 10 | 11 | def __init__(self, side, flat_hand_mean=False): 12 | self.layer = self.get_layer(side, flat_hand_mean) 13 | self.vertex_num = 778 14 | self.face = self.layer.th_faces.numpy() 15 | self.joint_regressor = self.layer.th_J_regressor.numpy() 16 | 17 | self.joint_num = 21 18 | self.joints_name = ('Wrist', 'Thumb_1', 'Thumb_2', 'Thumb_3', 'Thumb_4', 'Index_1', 'Index_2', 'Index_3', 'Index_4', 'Middle_1', 'Middle_2', 'Middle_3', 'Middle_4', 19 | 'Ring_1', 'Ring_2', 'Ring_3', 'Ring_4', 'Pinky_1', 'Pinky_2', 'Pinky_3', 'Pinly_4') 20 | 21 | self.skeleton = ((0, 1), (0, 5), (0, 9), (0, 13), (0, 17), (1, 2), (2, 3), (3, 4), (5, 6), (6, 7), (7, 8), (9, 10), (10, 11), (11, 12), (13, 14), (14, 15), (15, 16), 22 | (17, 18), (18, 19), (19, 20)) 23 | self.root_joint_idx = self.joints_name.index("Wrist") 24 | 25 | # add fingertips to joint_regressor 26 | self.fingertip_vertex_idx = [745, 317, 444, 556, 673] # mesh vertex idx (right hand) version0 27 | thumbtip_onehot = np.array([1 if i == 745 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 28 | indextip_onehot = np.array([1 if i == 317 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 29 | middletip_onehot = np.array([1 if i == 445 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 30 | ringtip_onehot = np.array([1 if i == 556 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 31 | pinkytip_onehot = np.array([1 if i == 673 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1) 32 | 33 | self.joint_regressor = np.concatenate((self.joint_regressor, thumbtip_onehot, indextip_onehot, middletip_onehot, ringtip_onehot, pinkytip_onehot)) 34 | self.joint_regressor = self.joint_regressor[[0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20], :] 35 | 36 | def get_layer(self, side, flat_hand_mean=False): 37 | return ManoLayer(mano_root=osp.join("./manopth", "mano", "models"), flat_hand_mean=flat_hand_mean, use_pca=False, side=side) # load right hand MANO model 38 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/.gitignore: -------------------------------------------------------------------------------- 1 | *.sw* 2 | *.bak 3 | *_bak.py 4 | 5 | .cache/ 6 | __pycache__/ 7 | build/ 8 | dist/ 9 | manopth_hassony2.egg-info/ 10 | mano/models/ 11 | assets/mano_layer.svg 12 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/environment.yml: -------------------------------------------------------------------------------- 1 | name: manopth 2 | 3 | dependencies: 4 | - opencv 5 | - python=3.7 6 | - matplotlib 7 | - numpy 8 | - pytorch 9 | - tqdm 10 | - git 11 | - pip: 12 | - git+https://github.com/hassony2/chumpy.git 13 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/examples/manopth_mindemo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from manopth.manolayer import ManoLayer 3 | from manopth import demo 4 | 5 | batch_size = 10 6 | # Select number of principal components for pose space 7 | ncomps = 6 8 | 9 | # Initialize MANO layer 10 | mano_layer = ManoLayer( 11 | mano_root='mano/models', use_pca=True, ncomps=ncomps, flat_hand_mean=False) 12 | 13 | # Generate random shape parameters 14 | random_shape = torch.rand(batch_size, 10) 15 | # Generate random pose parameters, including 3 values for global axis-angle rotation 16 | random_pose = torch.rand(batch_size, ncomps + 3) 17 | 18 | # Forward pass through MANO layer 19 | hand_verts, hand_joints = mano_layer(random_pose, random_shape) 20 | demo.display_hand({ 21 | 'verts': hand_verts, 22 | 'joints': hand_joints 23 | }, 24 | mano_faces=mano_layer.th_faces) 25 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/mano/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/NovelConditionCreator/ho3d/manopth/mano/__init__.py -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/mano/webuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/NovelConditionCreator/ho3d/manopth/mano/webuser/__init__.py -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/mano/webuser/posemapper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license 5 | 6 | More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. 7 | For comments or questions, please email us at: mano@tue.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines a wrapper for the loading functions of the MANO model. 13 | 14 | Modules included: 15 | - load_model: 16 | loads the MANO model from a given file location (i.e. a .pkl file location), 17 | or a dictionary object. 18 | 19 | ''' 20 | 21 | 22 | import chumpy as ch 23 | import numpy as np 24 | import cv2 25 | 26 | 27 | class Rodrigues(ch.Ch): 28 | dterms = 'rt' 29 | 30 | def compute_r(self): 31 | return cv2.Rodrigues(self.rt.r)[0] 32 | 33 | def compute_dr_wrt(self, wrt): 34 | if wrt is self.rt: 35 | return cv2.Rodrigues(self.rt.r)[1].T 36 | 37 | 38 | def lrotmin(p): 39 | if isinstance(p, np.ndarray): 40 | p = p.ravel()[3:] 41 | return np.concatenate( 42 | [(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() 43 | for pp in p.reshape((-1, 3))]).ravel() 44 | if p.ndim != 2 or p.shape[1] != 3: 45 | p = p.reshape((-1, 3)) 46 | p = p[1:] 47 | return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() 48 | for pp in p]).ravel() 49 | 50 | 51 | def posemap(s): 52 | if s == 'lrotmin': 53 | return lrotmin 54 | else: 55 | raise Exception('Unknown posemapping: %s' % (str(s), )) 56 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/manopth/__init__.py: -------------------------------------------------------------------------------- 1 | name = 'manopth' 2 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/manopth/argutils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import pickle 4 | import subprocess 5 | import sys 6 | 7 | 8 | def print_args(args): 9 | opts = vars(args) 10 | print('======= Options ========') 11 | for k, v in sorted(opts.items()): 12 | print('{}: {}'.format(k, v)) 13 | print('========================') 14 | 15 | 16 | def save_args(args, save_folder, opt_prefix='opt', verbose=True): 17 | opts = vars(args) 18 | # Create checkpoint folder 19 | if not os.path.exists(save_folder): 20 | os.makedirs(save_folder, exist_ok=True) 21 | 22 | # Save options 23 | opt_filename = '{}.txt'.format(opt_prefix) 24 | opt_path = os.path.join(save_folder, opt_filename) 25 | with open(opt_path, 'a') as opt_file: 26 | opt_file.write('====== Options ======\n') 27 | for k, v in sorted(opts.items()): 28 | opt_file.write( 29 | '{option}: {value}\n'.format(option=str(k), value=str(v))) 30 | opt_file.write('=====================\n') 31 | opt_file.write('launched {} at {}\n'.format( 32 | str(sys.argv[0]), str(datetime.datetime.now()))) 33 | 34 | # Add git info 35 | label = subprocess.check_output(["git", "describe", 36 | "--always"]).strip() 37 | if subprocess.call( 38 | ["git", "branch"], 39 | stderr=subprocess.STDOUT, 40 | stdout=open(os.devnull, 'w')) == 0: 41 | opt_file.write('=== Git info ====\n') 42 | opt_file.write('{}\n'.format(label)) 43 | commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) 44 | opt_file.write('commit : {}\n'.format(commit.strip())) 45 | 46 | opt_picklename = '{}.pkl'.format(opt_prefix) 47 | opt_picklepath = os.path.join(save_folder, opt_picklename) 48 | with open(opt_picklepath, 'wb') as opt_file: 49 | pickle.dump(opts, opt_file) 50 | if verbose: 51 | print('Saved options to {}'.format(opt_path)) 52 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/manopth/demo.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | from mpl_toolkits.mplot3d import Axes3D 3 | from mpl_toolkits.mplot3d.art3d import Poly3DCollection 4 | import numpy as np 5 | import torch 6 | 7 | from manopth.manolayer import ManoLayer 8 | 9 | 10 | def generate_random_hand(batch_size=1, ncomps=6, mano_root='mano/models'): 11 | nfull_comps = ncomps + 3 # Add global orientation dims to PCA 12 | random_pcapose = torch.rand(batch_size, nfull_comps) 13 | mano_layer = ManoLayer(mano_root=mano_root) 14 | verts, joints = mano_layer(random_pcapose) 15 | return {'verts': verts, 'joints': joints, 'faces': mano_layer.th_faces} 16 | 17 | 18 | def display_hand(hand_info, mano_faces=None, ax=None, alpha=0.2, batch_idx=0, show=True): 19 | """ 20 | Displays hand batch_idx in batch of hand_info, hand_info as returned by 21 | generate_random_hand 22 | """ 23 | if ax is None: 24 | fig = plt.figure() 25 | ax = fig.add_subplot(111, projection='3d') 26 | verts, joints = hand_info['verts'][batch_idx], hand_info['joints'][ 27 | batch_idx] 28 | if mano_faces is None: 29 | ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.1) 30 | else: 31 | mesh = Poly3DCollection(verts[mano_faces], alpha=alpha) 32 | face_color = (141 / 255, 184 / 255, 226 / 255) 33 | edge_color = (50 / 255, 50 / 255, 50 / 255) 34 | mesh.set_edgecolor(edge_color) 35 | mesh.set_facecolor(face_color) 36 | ax.add_collection3d(mesh) 37 | ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') 38 | cam_equal_aspect_3d(ax, verts.numpy()) 39 | if show: 40 | plt.show() 41 | 42 | 43 | def cam_equal_aspect_3d(ax, verts, flip_x=False): 44 | """ 45 | Centers view on cuboid containing hand and flips y and z axis 46 | and fixes azimuth 47 | """ 48 | extents = np.stack([verts.min(0), verts.max(0)], axis=1) 49 | sz = extents[:, 1] - extents[:, 0] 50 | centers = np.mean(extents, axis=1) 51 | maxsize = max(abs(sz)) 52 | r = maxsize / 2 53 | if flip_x: 54 | ax.set_xlim(centers[0] + r, centers[0] - r) 55 | else: 56 | ax.set_xlim(centers[0] - r, centers[0] + r) 57 | # Invert y and z axis 58 | ax.set_ylim(centers[1] + r, centers[1] - r) 59 | ax.set_zlim(centers[2] + r, centers[2] - r) 60 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/manopth/rot6d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def compute_rotation_matrix_from_ortho6d(poses): 5 | """ 6 | Code from 7 | https://github.com/papagina/RotationContinuity 8 | On the Continuity of Rotation Representations in Neural Networks 9 | Zhou et al. CVPR19 10 | https://zhouyisjtu.github.io/project_rotation/rotation.html 11 | """ 12 | x_raw = poses[:, 0:3] # batch*3 13 | y_raw = poses[:, 3:6] # batch*3 14 | 15 | x = normalize_vector(x_raw) # batch*3 16 | z = cross_product(x, y_raw) # batch*3 17 | z = normalize_vector(z) # batch*3 18 | y = cross_product(z, x) # batch*3 19 | 20 | x = x.view(-1, 3, 1) 21 | y = y.view(-1, 3, 1) 22 | z = z.view(-1, 3, 1) 23 | matrix = torch.cat((x, y, z), 2) # batch*3*3 24 | return matrix 25 | 26 | def robust_compute_rotation_matrix_from_ortho6d(poses): 27 | """ 28 | Instead of making 2nd vector orthogonal to first 29 | create a base that takes into account the two predicted 30 | directions equally 31 | """ 32 | x_raw = poses[:, 0:3] # batch*3 33 | y_raw = poses[:, 3:6] # batch*3 34 | 35 | x = normalize_vector(x_raw) # batch*3 36 | y = normalize_vector(y_raw) # batch*3 37 | middle = normalize_vector(x + y) 38 | orthmid = normalize_vector(x - y) 39 | x = normalize_vector(middle + orthmid) 40 | y = normalize_vector(middle - orthmid) 41 | # Their scalar product should be small ! 42 | # assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001 43 | z = normalize_vector(cross_product(x, y)) 44 | 45 | x = x.view(-1, 3, 1) 46 | y = y.view(-1, 3, 1) 47 | z = z.view(-1, 3, 1) 48 | matrix = torch.cat((x, y, z), 2) # batch*3*3 49 | # Check for reflection in matrix ! If found, flip last vector TODO 50 | assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0 51 | return matrix 52 | 53 | 54 | def normalize_vector(v): 55 | batch = v.shape[0] 56 | v_mag = torch.sqrt(v.pow(2).sum(1)) # batch 57 | v_mag = torch.max(v_mag, v.new([1e-8])) 58 | v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1]) 59 | v = v/v_mag 60 | return v 61 | 62 | 63 | def cross_product(u, v): 64 | batch = u.shape[0] 65 | i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1] 66 | j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2] 67 | k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0] 68 | 69 | out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) 70 | 71 | return out 72 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/manopth/rotproj.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def batch_rotprojs(batches_rotmats): 5 | proj_rotmats = [] 6 | for batch_idx, batch_rotmats in enumerate(batches_rotmats): 7 | proj_batch_rotmats = [] 8 | for rot_idx, rotmat in enumerate(batch_rotmats): 9 | # GPU implementation of svd is VERY slow 10 | # ~ 2 10^-3 per hit vs 5 10^-5 on cpu 11 | U, S, V = rotmat.cpu().svd() 12 | rotmat = torch.matmul(U, V.transpose(0, 1)) 13 | orth_det = rotmat.det() 14 | # Remove reflection 15 | if orth_det < 0: 16 | rotmat[:, 2] = -1 * rotmat[:, 2] 17 | 18 | rotmat = rotmat.cuda() 19 | proj_batch_rotmats.append(rotmat) 20 | proj_rotmats.append(torch.stack(proj_batch_rotmats)) 21 | return torch.stack(proj_rotmats) 22 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/manopth/tensutils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from dexycb.manopth.manopth import rodrigues_layer 4 | 5 | 6 | def th_posemap_axisang(pose_vectors): 7 | rot_nb = int(pose_vectors.shape[1] / 3) 8 | pose_vec_reshaped = pose_vectors.contiguous().view(-1, 3) 9 | rot_mats = rodrigues_layer.batch_rodrigues(pose_vec_reshaped) 10 | rot_mats = rot_mats.view(pose_vectors.shape[0], rot_nb * 9) 11 | pose_maps = subtract_flat_id(rot_mats) 12 | return pose_maps, rot_mats 13 | 14 | 15 | def th_with_zeros(tensor): 16 | batch_size = tensor.shape[0] 17 | padding = tensor.new([0.0, 0.0, 0.0, 1.0]) 18 | padding.requires_grad = False 19 | 20 | concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] 21 | cat_res = torch.cat(concat_list, 1) 22 | return cat_res 23 | 24 | 25 | def th_pack(tensor): 26 | batch_size = tensor.shape[0] 27 | padding = tensor.new_zeros((batch_size, 4, 3)) 28 | padding.requires_grad = False 29 | pack_list = [padding, tensor] 30 | pack_res = torch.cat(pack_list, 2) 31 | return pack_res 32 | 33 | 34 | def subtract_flat_id(rot_mats): 35 | # Subtracts identity as a flattened tensor 36 | rot_nb = int(rot_mats.shape[1] / 9) 37 | id_flat = torch.eye(3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat(rot_mats.shape[0], rot_nb) 38 | # id_flat.requires_grad = False 39 | results = rot_mats - id_flat 40 | return results 41 | 42 | 43 | def make_list(tensor): 44 | # type: (List[int]) -> List[int] 45 | return tensor 46 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | import warnings 3 | 4 | DEPENDENCY_PACKAGE_NAMES = ["matplotlib", "torch", "tqdm", "numpy", "cv2", 5 | "chumpy"] 6 | 7 | 8 | def check_dependencies(): 9 | missing_dependencies = [] 10 | for package_name in DEPENDENCY_PACKAGE_NAMES: 11 | try: 12 | __import__(package_name) 13 | except ImportError: 14 | missing_dependencies.append(package_name) 15 | 16 | if missing_dependencies: 17 | warnings.warn( 18 | 'Missing dependencies: {}. We recommend you follow ' 19 | 'the installation instructions at ' 20 | 'https://github.com/hassony2/manopth#installation'.format( 21 | missing_dependencies)) 22 | 23 | 24 | with open("README.md", "r") as fh: 25 | long_description = fh.read() 26 | 27 | check_dependencies() 28 | 29 | setup( 30 | name="manopth", 31 | version="0.0.1", 32 | author="Yana Hasson", 33 | author_email="yana.hasson.inria@gmail.com", 34 | packages=find_packages(exclude=('tests',)), 35 | python_requires=">=3.5.0", 36 | description="PyTorch mano layer", 37 | long_description=long_description, 38 | long_description_content_type="text/markdown", 39 | url="https://github.com/hassony2/manopth", 40 | classifiers=[ 41 | "Programming Language :: Python :: 3", 42 | "License :: OSI Approved :: GNU GENERAL PUBLIC LICENSE", 43 | "Operating System :: OS Independent", 44 | ], 45 | ) 46 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/manopth/test/test_demo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from manopth.demo import generate_random_hand 4 | 5 | 6 | def test_generate_random_hand(): 7 | batch_size = 3 8 | hand_info = generate_random_hand(batch_size=batch_size, ncomps=6) 9 | verts = hand_info['verts'] 10 | joints = hand_info['joints'] 11 | assert verts.shape == (batch_size, 778, 3) 12 | assert joints.shape == (batch_size, 21, 3) 13 | -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/shaders/mesh.frag: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | in vec3 frag_position; 4 | in vec3 frag_normal; 5 | 6 | out vec4 frag_color; 7 | 8 | void main() 9 | { 10 | vec3 normal = normalize(frag_normal); 11 | 12 | frag_color = vec4(normal * 0.5 + 0.5, 1.0); 13 | } -------------------------------------------------------------------------------- /NovelConditionCreator/ho3d/shaders/mesh.vert: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | // Vertex Attributes 4 | layout(location = 0) in vec3 position; 5 | layout(location = NORMAL_LOC) in vec3 normal; 6 | layout(location = INST_M_LOC) in mat4 inst_m; 7 | 8 | // Uniforms 9 | uniform mat4 M; 10 | uniform mat4 V; 11 | uniform mat4 P; 12 | 13 | // Outputs 14 | out vec3 frag_position; 15 | out vec3 frag_normal; 16 | 17 | void main() 18 | { 19 | gl_Position = P * V * M * inst_m * vec4(position, 1); 20 | frag_position = vec3(M * inst_m * vec4(position, 1.0)); 21 | 22 | mat4 N = transpose(inverse(M * inst_m)); 23 | frag_normal = normalize(vec3(N * vec4(normal, 0.0))); 24 | } -------------------------------------------------------------------------------- /assets/poster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hxwork/HandBooster_Pytorch/125c7538bee42f3835b92d09507ff3bb1509f473/assets/poster.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate==0.16.0 2 | chumpy==0.70 3 | coloredlogs==15.0.1 4 | easydict==1.10 5 | einops==0.8.0 6 | ema_pytorch==0.2.2 7 | imageio==2.22.1 8 | lxml==5.3.0 9 | matplotlib==3.5.2 10 | matplotlib==3.6.2 11 | nori2==1.12.1 12 | numpy==1.23.4 13 | numpy==1.23.3 14 | open3d==0.16.0 15 | opencv_python==4.6.0.66 16 | openmesh==1.2.1 17 | Pillow==9.2.0 18 | Pillow==11.0.0 19 | plotly==5.11.0 20 | psbody_mesh==0.4 21 | pycocotools==2.0.4 22 | pymeshlab==2023.12.post2 23 | pyrender==0.1.45 24 | pytorch3d==0.3.0 25 | pytorch3d==0.7.4 26 | pytorch_fid==0.3.0 27 | PyYAML==5.4.1 28 | PyYAML==6.0 29 | PyYAML==6.0.2 30 | refile==7.0.10 31 | sapien==2.2.2 32 | scikit_image==0.19.3 33 | scikit_learn==1.1.2 34 | scipy==1.14.1 35 | setuptools==65.5.0 36 | setuptools==65.4.1 37 | termcolor==2.5.0 38 | torch==1.12.1 39 | torch_geometric==2.1.0.post1 40 | torchgeometry==0.1.2 41 | torchvision==0.13.1 42 | tqdm==4.64.1 43 | transforms3d==0.4.2 44 | trimesh==3.15.3 45 | --------------------------------------------------------------------------------