├── apps ├── ICON.py ├── Normal.py ├── __pycache__ │ ├── ICON.cpython-38.pyc │ ├── Normal.cpython-38.pyc │ ├── infer.cpython-38.pyc │ ├── infer_normal.cpython-38.pyc │ ├── train-normal.cpython-38.pyc │ └── train.cpython-38.pyc ├── infer.py ├── infer_normal.py ├── train-normal.py └── train.py ├── configs ├── d_if.yaml ├── pamir.yaml ├── pifu.yaml └── train │ ├── d_if.yaml │ ├── pamir.yaml │ └── pifu.yaml ├── environment.yaml ├── examples ├── fashion cloth2.jpg ├── fashion_cloth1.jpg ├── handsome boy.jpg ├── loose cloth.jpg ├── strange_pose.jpg └── tight cloth.jpg ├── lib ├── __init__.py ├── __pycache__ │ └── __init__.cpython-38.pyc ├── common │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── cloth_extraction.cpython-38.pyc │ │ ├── config.cpython-38.pyc │ │ ├── render.cpython-38.pyc │ │ ├── render_utils.cpython-38.pyc │ │ ├── seg3d_lossless.cpython-38.pyc │ │ ├── seg3d_utils.cpython-38.pyc │ │ └── train_util.cpython-38.pyc │ ├── cloth_extraction.py │ ├── config.py │ ├── render.py │ ├── render_utils.py │ ├── seg3d_lossless.py │ ├── seg3d_utils.py │ ├── smpl_vert_segmentation.json │ └── train_util.py ├── dataloader_demo.py ├── dataset │ ├── Evaluator.py │ ├── NormalDataset.py │ ├── NormalModule.py │ ├── PIFuDataModule.py │ ├── PIFuDataset-KNNsample.py │ ├── PIFuDataset-uncertainty.py │ ├── PIFuDataset.py │ ├── PIFuDataset_occ.py │ ├── PointFeat.py │ ├── TestDataset.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── Evaluator.cpython-38.pyc │ │ ├── NormalDataset.cpython-38.pyc │ │ ├── NormalModule.cpython-38.pyc │ │ ├── PIFuDataModule.cpython-38.pyc │ │ ├── PIFuDataset.cpython-38.pyc │ │ ├── PointFeat.cpython-38.pyc │ │ ├── TestDataset.cpython-38.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── body_model.cpython-38.pyc │ │ └── mesh_util.cpython-38.pyc │ ├── body_model.py │ ├── hoppeMesh.py │ ├── mesh_util.py │ └── tbfo.ttf ├── hybrik │ └── models │ │ ├── __pycache__ │ │ └── simple3dpose.cpython-38.pyc │ │ ├── layers │ │ ├── Resnet.py │ │ ├── __pycache__ │ │ │ └── Resnet.cpython-38.pyc │ │ └── smpl │ │ │ ├── SMPL.py │ │ │ ├── __pycache__ │ │ │ ├── SMPL.cpython-38.pyc │ │ │ └── lbs.cpython-38.pyc │ │ │ └── lbs.py │ │ └── simple3dpose.py ├── net │ ├── BasePIFuNet.py │ ├── FBNet.py │ ├── HGFilters.py │ ├── HGPIFuNet.py │ ├── MLP_DIF.py │ ├── NormalNet.py │ ├── VE.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── BasePIFuNet.cpython-38.pyc │ │ ├── FBNet.cpython-38.pyc │ │ ├── HGFilters.cpython-38.pyc │ │ ├── HGPIFuNet.cpython-38.pyc │ │ ├── MLP.cpython-38.pyc │ │ ├── MLP_DIF.cpython-38.pyc │ │ ├── MLP_DIFwoR.cpython-38.pyc │ │ ├── MLP_v3_1_1.cpython-38.pyc │ │ ├── MLP_v3_1_1_xyz.cpython-38.pyc │ │ ├── NormalNet.cpython-38.pyc │ │ ├── VE.cpython-38.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── geometry.cpython-38.pyc │ │ ├── local_affine.cpython-38.pyc │ │ ├── net_util.cpython-38.pyc │ │ ├── spatial.cpython-38.pyc │ │ └── voxelize.cpython-38.pyc │ ├── geometry.py │ ├── local_affine.py │ ├── net_util.py │ ├── spatial.py │ └── voxelize.py ├── pare │ ├── LICENSE │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-38.pyc │ └── pare │ │ ├── __init__.py │ │ ├── __pycache__ │ │ └── __init__.cpython-38.pyc │ │ ├── core │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── config.cpython-38.pyc │ │ │ ├── constants.cpython-38.pyc │ │ │ └── tester.cpython-38.pyc │ │ ├── config.py │ │ ├── constants.py │ │ └── tester.py │ │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── hmr.cpython-38.pyc │ │ │ └── pare.cpython-38.pyc │ │ ├── backbone │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── hrnet.cpython-38.pyc │ │ │ │ ├── mobilenet.cpython-38.pyc │ │ │ │ ├── resnet.cpython-38.pyc │ │ │ │ └── utils.cpython-38.pyc │ │ │ ├── hrnet.py │ │ │ ├── hrnet_hmr.py │ │ │ ├── hrnet_legacy.py │ │ │ ├── hrnet_pare.py │ │ │ ├── mobilenet.py │ │ │ ├── resnet.py │ │ │ └── utils.py │ │ ├── head │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── hmr_head.cpython-38.pyc │ │ │ │ ├── pare_head.cpython-38.pyc │ │ │ │ ├── smpl_cam_head.cpython-38.pyc │ │ │ │ └── smpl_head.cpython-38.pyc │ │ │ ├── hmr_head.py │ │ │ ├── pare_head.py │ │ │ ├── smpl_cam_head.py │ │ │ └── smpl_head.py │ │ ├── hmr.py │ │ ├── layers │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── coattention.cpython-38.pyc │ │ │ │ ├── interpolate.cpython-38.pyc │ │ │ │ ├── keypoint_attention.cpython-38.pyc │ │ │ │ ├── locallyconnected2d.cpython-38.pyc │ │ │ │ ├── nonlocalattention.cpython-38.pyc │ │ │ │ └── softargmax.cpython-38.pyc │ │ │ ├── attention.py │ │ │ ├── coattention.py │ │ │ ├── interpolate.py │ │ │ ├── keypoint_attention.py │ │ │ ├── locallyconnected2d.py │ │ │ ├── non_local │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ └── dot_product.cpython-38.pyc │ │ │ │ └── dot_product.py │ │ │ ├── nonlocalattention.py │ │ │ └── softargmax.py │ │ └── pare.py │ │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── geometry.cpython-38.pyc │ │ ├── kp_utils.cpython-38.pyc │ │ └── train_utils.cpython-38.pyc │ │ ├── geometry.py │ │ ├── kp_utils.py │ │ └── train_utils.py ├── pixielib │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ └── pixie.cpython-38.pyc │ ├── models │ │ ├── FLAME.py │ │ ├── SMPLX.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── SMPLX.cpython-38.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── encoders.cpython-38.pyc │ │ │ ├── hrnet.cpython-38.pyc │ │ │ ├── lbs.cpython-38.pyc │ │ │ ├── moderators.cpython-38.pyc │ │ │ └── resnet.cpython-38.pyc │ │ ├── encoders.py │ │ ├── hrnet.py │ │ ├── lbs.py │ │ ├── moderators.py │ │ └── resnet.py │ ├── pixie.py │ └── utils │ │ ├── __pycache__ │ │ ├── config.cpython-38.pyc │ │ ├── rotation_converter.cpython-38.pyc │ │ ├── tensor_cropper.cpython-38.pyc │ │ └── util.cpython-38.pyc │ │ ├── array_cropper.py │ │ ├── config.py │ │ ├── renderer.py │ │ ├── rotation_converter.py │ │ ├── tensor_cropper.py │ │ └── util.py ├── pymaf │ ├── configs │ │ └── pymaf_config.yaml │ ├── core │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── cfgs.cpython-38.pyc │ │ │ ├── constants.cpython-38.pyc │ │ │ └── path_config.cpython-38.pyc │ │ ├── base_trainer.py │ │ ├── cfgs.py │ │ ├── constants.py │ │ ├── fits_dict.py │ │ ├── path_config.py │ │ └── train_options.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── hmr.cpython-38.pyc │ │ │ ├── maf_extractor.cpython-38.pyc │ │ │ ├── pymaf_net.cpython-38.pyc │ │ │ ├── res_module.cpython-38.pyc │ │ │ └── smpl.cpython-38.pyc │ │ ├── hmr.py │ │ ├── maf_extractor.py │ │ ├── pymaf_net.py │ │ ├── res_module.py │ │ └── smpl.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── geometry.cpython-38.pyc │ │ ├── imutils.cpython-38.pyc │ │ └── streamer.cpython-38.pyc │ │ ├── geometry.py │ │ ├── imutils.py │ │ ├── streamer.py │ │ └── transforms.py ├── renderer │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── camera.cpython-38.pyc │ │ ├── glm.cpython-38.pyc │ │ ├── mesh.cpython-38.pyc │ │ ├── opengl_util.cpython-38.pyc │ │ └── prt_util.cpython-38.pyc │ ├── camera.py │ ├── gl │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── cam_render.cpython-38.pyc │ │ │ ├── color_render.cpython-38.pyc │ │ │ ├── framework.cpython-38.pyc │ │ │ ├── glcontext.cpython-38.pyc │ │ │ ├── init_gl.cpython-38.pyc │ │ │ ├── prt_render.cpython-38.pyc │ │ │ └── render.cpython-38.pyc │ │ ├── cam_render.py │ │ ├── color_render.py │ │ ├── data │ │ │ ├── color.fs │ │ │ ├── color.vs │ │ │ ├── normal.fs │ │ │ ├── normal.vs │ │ │ ├── prt.fs │ │ │ ├── prt.vs │ │ │ ├── prt_uv.fs │ │ │ ├── prt_uv.vs │ │ │ ├── quad.fs │ │ │ └── quad.vs │ │ ├── framework.py │ │ ├── glcontext.py │ │ ├── init_gl.py │ │ ├── norm_render.py │ │ ├── normal_render.py │ │ ├── prt_render.py │ │ ├── render.py │ │ └── render2.py │ ├── glm.py │ ├── mesh.py │ ├── opengl_util.py │ └── prt_util.py └── smplx │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── body_models.py │ ├── joint_names.py │ ├── lbs.py │ ├── utils.py │ ├── vertex_ids.py │ └── vertex_joint_selector.py ├── media ├── DIF-pipeline .png └── psy_logo.png ├── readme.md ├── requirements.txt └── scripts ├── __init__.py ├── __pycache__ ├── __init__.cpython-38.pyc └── render_batch.cpython-38.pyc ├── env_sh.npy ├── render_batch.py ├── tetrahedronize_scripts ├── body_model.py └── tedrahedronize.py └── visibility_batch.py /apps/__pycache__/ICON.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/apps/__pycache__/ICON.cpython-38.pyc -------------------------------------------------------------------------------- /apps/__pycache__/Normal.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/apps/__pycache__/Normal.cpython-38.pyc -------------------------------------------------------------------------------- /apps/__pycache__/infer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/apps/__pycache__/infer.cpython-38.pyc -------------------------------------------------------------------------------- /apps/__pycache__/infer_normal.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/apps/__pycache__/infer_normal.cpython-38.pyc -------------------------------------------------------------------------------- /apps/__pycache__/train-normal.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/apps/__pycache__/train-normal.cpython-38.pyc -------------------------------------------------------------------------------- /apps/__pycache__/train.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/apps/__pycache__/train.cpython-38.pyc -------------------------------------------------------------------------------- /apps/train.py: -------------------------------------------------------------------------------- 1 | # ignore all the warnings 2 | import warnings 3 | import logging 4 | 5 | warnings.filterwarnings('ignore') 6 | logging.getLogger("wandb").setLevel(logging.ERROR) 7 | logging.getLogger("lightning").setLevel(logging.ERROR) 8 | logging.getLogger("trimesh").setLevel(logging.ERROR) 9 | 10 | from pytorch_lightning.callbacks import LearningRateMonitor 11 | from pytorch_lightning.callbacks import ModelCheckpoint 12 | from pytorch_lightning import loggers as pl_loggers 13 | from apps.ICON import ICON 14 | # from ICON import ICON 15 | from lib.dataset.PIFuDataModule import PIFuDataModule 16 | from lib.common.config import get_cfg_defaults 17 | from lib.common.train_util import SubTrainer, load_networks 18 | import os 19 | import os.path as osp 20 | import argparse 21 | import numpy as np 22 | 23 | if __name__ == "__main__": 24 | 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument("-cfg", "--config_file", type=str, help="path of the yaml config file") 27 | parser.add_argument("-test", "--test_mode", action="store_true") 28 | args = parser.parse_args() 29 | cfg = get_cfg_defaults() 30 | cfg.merge_from_file(args.config_file) 31 | # cfg.merge_from_file('./configs/train/icon-filter.yaml') 32 | cfg.freeze() 33 | 34 | os.makedirs(osp.join(cfg.results_path, cfg.name), exist_ok=True) 35 | os.makedirs(osp.join(cfg.ckpt_dir, cfg.name), exist_ok=True) 36 | 37 | tb_logger = pl_loggers.TensorBoardLogger( 38 | save_dir=cfg.results_path, name=cfg.name, default_hp_metric=False 39 | ) 40 | 41 | if cfg.overfit: 42 | cfg_overfit_list = ["batch_size", 1] 43 | cfg.merge_from_list(cfg_overfit_list) 44 | save_k = 0 45 | 46 | checkpoint = ModelCheckpoint( 47 | dirpath=osp.join(cfg.ckpt_dir, cfg.name), 48 | save_top_k=1, 49 | verbose=False, 50 | save_weights_only=True, 51 | monitor="val/avgacc", 52 | mode="max", 53 | filename="{epoch:02d}", 54 | ) 55 | 56 | if cfg.test_mode or args.test_mode: 57 | 58 | cfg_test_mode = [ 59 | "test_mode", 60 | True, 61 | "dataset.types", 62 | ["cape"], 63 | "dataset.scales", 64 | [100.0], 65 | "dataset.rotation_num", 66 | 3, 67 | "mcube_res", 68 | 256, 69 | "clean_mesh", 70 | True, 71 | ] 72 | cfg.merge_from_list(cfg_test_mode) 73 | 74 | freq_eval = cfg.freq_eval 75 | if cfg.fast_dev > 0: 76 | freq_eval = cfg.fast_dev 77 | 78 | trainer_kwargs = { 79 | "gpus": cfg.gpus, 80 | "auto_select_gpus": True, 81 | "reload_dataloaders_every_epoch": True, 82 | "sync_batchnorm": True, 83 | "benchmark": True, 84 | "logger": tb_logger, 85 | "track_grad_norm": -1, 86 | "num_sanity_val_steps": cfg.num_sanity_val_steps, 87 | "checkpoint_callback": checkpoint, 88 | "limit_train_batches": cfg.dataset.train_bsize, 89 | "limit_val_batches": cfg.dataset.val_bsize if not cfg.overfit else 0.001, 90 | "limit_test_batches": cfg.dataset.test_bsize if not cfg.overfit else 0.0, 91 | "profiler": None, 92 | "fast_dev_run": cfg.fast_dev, 93 | "max_epochs": cfg.num_epoch, 94 | "callbacks": [LearningRateMonitor(logging_interval="step")], 95 | } 96 | 97 | datamodule = PIFuDataModule(cfg) 98 | 99 | if not cfg.test_mode: 100 | datamodule.setup(stage="fit") 101 | train_len = datamodule.data_size["train"] 102 | val_len = datamodule.data_size["val"] 103 | trainer_kwargs.update( 104 | { 105 | "log_every_n_steps": 106 | int(cfg.freq_plot * train_len // cfg.batch_size), 107 | "val_check_interval": 108 | int(freq_eval * train_len // cfg.batch_size) if freq_eval > 10 else freq_eval, 109 | } 110 | ) 111 | 112 | if cfg.overfit: 113 | cfg_show_list = ["freq_show_train", 100.0, "freq_show_val", 10.0] 114 | else: 115 | cfg_show_list = [ 116 | "freq_show_train", 117 | cfg.freq_show_train * train_len // cfg.batch_size, 118 | "freq_show_val", 119 | max(cfg.freq_show_val * val_len, 1.0), 120 | ] 121 | 122 | cfg.merge_from_list(cfg_show_list) 123 | 124 | model = ICON(cfg) 125 | 126 | trainer = SubTrainer(**trainer_kwargs) 127 | 128 | # load checkpoints 129 | load_networks(cfg, model, mlp_path=cfg.resume_path, normal_path=cfg.normal_path) 130 | 131 | if not cfg.test_mode: 132 | trainer.fit(model=model, datamodule=datamodule) 133 | trainer.test(model=model, datamodule=datamodule) 134 | else: 135 | np.random.seed(1993) 136 | trainer.test(model=model, datamodule=datamodule) 137 | -------------------------------------------------------------------------------- /configs/d_if.yaml: -------------------------------------------------------------------------------- 1 | name: d_if 2 | ckpt_dir: "./data/ckpt/" 3 | # resume_path: "/data/yangxueting/D_IF_opensource/data/ckpt/d_if/final.ckpt" 4 | normal_path: "/data/yangxueting/ICON_orl/data/ckpt/normal.ckpt" 5 | 6 | 7 | test_mode: True 8 | batch_size: 1 9 | 10 | net: 11 | mlp_dim: [256, 512, 256, 128, 1] 12 | res_layers: [2,3,4] 13 | num_stack: 2 14 | prior_type: "icon" # icon/pamir/icon 15 | use_filter: True 16 | in_geo: (('normal_F',3), ('normal_B',3)) 17 | in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3)) 18 | smpl_feats: ['sdf', 'cmap', 'norm', 'vis'] 19 | gtype: 'HGPIFuNet' 20 | norm_mlp: 'batch' 21 | hourglass_dim: 6 22 | smpl_dim: 7 23 | 24 | # user defined 25 | mcube_res: 512 # occupancy field resolution, higher --> more details 26 | clean_mesh: False # if True, will remove floating pieces -------------------------------------------------------------------------------- /configs/pamir.yaml: -------------------------------------------------------------------------------- 1 | name: a_wild_pamir 2 | ckpt_dir: "./data/ckpt/" 3 | resume_path: "/data/yangxueting/ICONforPIFU/data/ckpt/pamir_orl/epoch=07.ckpt" 4 | normal_path: "/data/yangxueting/ICON_orl/data/ckpt/normal.ckpt" 5 | 6 | test_mode: True 7 | batch_size: 1 8 | 9 | net: 10 | mlp_dim: [256, 512, 256, 128, 1] 11 | res_layers: [2,3,4] 12 | num_stack: 2 13 | prior_type: "pamir" # icon/pamir/icon 14 | use_filter: True 15 | in_geo: (('image',3), ('normal_F',3), ('normal_B',3)) 16 | in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3)) 17 | gtype: 'HGPIFuNet' 18 | norm_mlp: 'batch' 19 | hourglass_dim: 6 20 | voxel_dim: 7 21 | 22 | # user defined 23 | mcube_res: 256 # occupancy field resolution, higher --> more details 24 | clean_mesh: False # if True, will remove floating pieces -------------------------------------------------------------------------------- /configs/pifu.yaml: -------------------------------------------------------------------------------- 1 | name: pifuwoR 2 | ckpt_dir: "./data/ckpt/" 3 | # resume_path: "/data/yangxueting/ICONforPIFU/data/ckpt/pifu_orl/epoch=03.ckpt" 4 | normal_path: "/data/yangxueting/ICON_orl/data/ckpt/normal.ckpt" 5 | 6 | test_mode: True 7 | batch_size: 1 8 | 9 | net: 10 | mlp_dim: [256, 512, 256, 128, 1] 11 | res_layers: [2,3,4] 12 | num_stack: 2 13 | prior_type: "pifu" # icon/pamir/icon 14 | use_filter: True 15 | in_geo: (('image',3), ('normal_F',3), ('normal_B',3)) 16 | in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3)) 17 | gtype: 'HGPIFuNet' 18 | norm_mlp: 'batch' 19 | hourglass_dim: 12 20 | 21 | 22 | # user defined 23 | mcube_res: 512 # occupancy field resolution, higher --> more details 24 | clean_mesh: False # if True, will remove floating pieces -------------------------------------------------------------------------------- /configs/train/d_if.yaml: -------------------------------------------------------------------------------- 1 | name: d_if 2 | ckpt_dir: "./data/ckpt/" 3 | # resume_path: "/data/yangxueting/D_IF_opensource/data/ckpt/d_if/final.ckpt" 4 | normal_path: "/data/yangxueting/ICON_orl/data/ckpt/normal.ckpt" 5 | results_path: "./results" 6 | 7 | dataset: 8 | root: "./data/" 9 | rotation_num: 36 10 | num_sample_geo: 8000 11 | num_sample_color: 0 12 | num_sample_seg: 0 13 | num_sample_knn: 0 14 | sigma_geo: 5.0 15 | sigma_seg: 0.00 16 | sigma_color: 0.00 17 | train_bsize: 1.0 18 | val_bsize: 1.0 19 | test_bsize: 1.0 20 | ray_sample_num: 1 21 | zray_type: False 22 | semantic_p: False 23 | remove_outlier: False 24 | 25 | noise_type: ['pose', 'beta'] 26 | noise_scale: [0.0, 0.0] 27 | 28 | types: ["thuman2"] 29 | scales: [100.0] 30 | 31 | net: 32 | mlp_dim: [256, 512, 256, 128, 1] 33 | res_layers: [2,3,4] 34 | num_stack: 2 35 | prior_type: "icon" 36 | use_filter: True 37 | in_geo: (('normal_F',3), ('normal_B',3)) 38 | in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3)) 39 | smpl_feats: ['sdf', 'cmap', 'norm', 'vis'] 40 | gtype: 'HGPIFuNet' 41 | ctype: 'resnet34' 42 | norm_mlp: 'batch' 43 | N_freqs: 10 44 | geo_w: 0.1 45 | norm_w: 0.001 46 | dc_w: 1.0 47 | hourglass_dim: 6 48 | voxel_dim: 32 49 | smpl_dim: 7 50 | 51 | lr_G: 1e-4 52 | weight_decay: 0.0 53 | momentum: 0.0 54 | batch_size: 10 55 | num_threads: 8 56 | gpus: [0] 57 | test_gpus: [0] 58 | 59 | sdf: True 60 | sdf_clip: 15.0 61 | 62 | fast_dev: 0 63 | resume: False 64 | test_mode: False 65 | mcube_res: 512 66 | clean_mesh: True 67 | num_sanity_val_steps: 1 68 | 69 | momentum: 0.0 70 | optim: RMSprop 71 | 72 | # training (batch=4, set=agora, rot-6) 73 | overfit: False 74 | num_epoch: 9 75 | freq_show_train: 0.1 76 | freq_show_val: 0.3 77 | freq_plot: 0.01 78 | freq_eval: 0.2 79 | schedule: [3, 8] 80 | -------------------------------------------------------------------------------- /configs/train/pamir.yaml: -------------------------------------------------------------------------------- 1 | name: pamirwoR 2 | ckpt_dir: "./data/ckpt/" 3 | # resume_path: "/data/yangxueting/ICONforPIFU/data/ckpt/pamir_orl/epoch=07.ckpt" 4 | normal_path: "/data/yangxueting/ICON_orl/data/ckpt/normal.ckpt" 5 | results_path: "./results" 6 | 7 | dataset: 8 | root: "./data/" 9 | rotation_num: 36 10 | num_sample_geo: 8000 11 | num_sample_color: 0 12 | num_sample_seg: 0 13 | num_sample_knn: 0 14 | sigma_geo: 5.0 15 | sigma_seg: 0.00 16 | sigma_color: 0.00 17 | train_bsize: 1.0 18 | val_bsize: 1.0 19 | test_bsize: 1.0 20 | ray_sample_num: 1 21 | zray_type: False 22 | semantic_p: False 23 | remove_outlier: False 24 | 25 | noise_type: ['pose', 'beta'] 26 | noise_scale: [0.0, 0.0] 27 | 28 | types: ["thuman2"] 29 | scales: [100.0] 30 | 31 | net: 32 | mlp_dim: [256, 512, 256, 128, 1] 33 | res_layers: [2,3,4] 34 | num_stack: 2 35 | prior_type: "pamir" 36 | use_filter: True 37 | in_geo: (('image',3), ('normal_F',3), ('normal_B',3)) 38 | in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3)) 39 | gtype: 'HGPIFuNet' 40 | ctype: 'resnet34' 41 | norm_mlp: 'batch' 42 | N_freqs: 10 43 | geo_w: 0.1 44 | norm_w: 0.001 45 | dc_w: 1.0 46 | hourglass_dim: 6 47 | voxel_dim: 7 48 | 49 | lr_G: 1e-4 50 | weight_decay: 0.0 51 | momentum: 0.0 52 | batch_size: 10 53 | num_threads: 8 54 | gpus: [0] 55 | test_gpus: [0] 56 | 57 | sdf: False 58 | sdf_clip: 15.0 59 | 60 | fast_dev: 0 61 | resume: False 62 | test_mode: False 63 | mcube_res: 512 64 | clean_mesh: True 65 | num_sanity_val_steps: 1 66 | 67 | momentum: 0.0 68 | optim: RMSprop 69 | 70 | # training (batch=4, set=agora, rot-6) 71 | overfit: False 72 | num_epoch: 10 73 | freq_show_train: 0.1 74 | freq_show_val: 0.3 75 | freq_plot: 0.01 76 | freq_eval: 0.2 77 | schedule: [3, 8] 78 | -------------------------------------------------------------------------------- /configs/train/pifu.yaml: -------------------------------------------------------------------------------- 1 | name: pifuwoR 2 | ckpt_dir: "./data/ckpt/" 3 | # resume_path: "/data/yangxueting/ICONforPIFU/data/ckpt/pifu_orl/epoch=03.ckpt" 4 | normal_path: "/data/yangxueting/ICON_orl/data/ckpt/normal.ckpt" 5 | results_path: "./results" 6 | 7 | dataset: 8 | root: "./data/" 9 | rotation_num: 36 10 | num_sample_geo: 8000 11 | num_sample_color: 0 12 | num_sample_seg: 0 13 | num_sample_knn: 0 14 | sigma_geo: 5.0 15 | sigma_seg: 0.00 16 | sigma_color: 0.00 17 | train_bsize: 1.0 18 | val_bsize: 1.0 19 | test_bsize: 1.0 20 | ray_sample_num: 1 21 | zray_type: False 22 | semantic_p: False 23 | remove_outlier: False 24 | 25 | noise_type: ['pose', 'beta'] 26 | noise_scale: [0.0, 0.0] 27 | 28 | types: ["thuman2"] 29 | scales: [100.0] 30 | 31 | net: 32 | mlp_dim: [256, 512, 256, 128, 1] 33 | res_layers: [2,3,4] 34 | num_stack: 2 35 | prior_type: "pifu" 36 | use_filter: True 37 | in_geo: (('image',3), ('normal_F',3), ('normal_B',3)) 38 | in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3)) 39 | gtype: 'HGPIFuNet' 40 | ctype: 'resnet34' 41 | norm_mlp: 'batch' 42 | N_freqs: 10 43 | geo_w: 0.1 44 | norm_w: 0.001 45 | dc_w: 1.0 46 | hourglass_dim: 12 47 | voxel_dim: 32 48 | smpl_dim: 7 49 | 50 | lr_G: 1e-4 51 | weight_decay: 0.0 52 | momentum: 0.0 53 | batch_size: 10 54 | num_threads: 8 55 | gpus: [0] 56 | test_gpus: [0] 57 | 58 | sdf: False 59 | sdf_clip: 15.0 60 | 61 | fast_dev: 0 62 | resume: False 63 | test_mode: False 64 | mcube_res: 512 65 | clean_mesh: True 66 | num_sanity_val_steps: 1 67 | 68 | momentum: 0.0 69 | optim: RMSprop 70 | 71 | # training (batch=4, set=agora, rot-6) 72 | overfit: False 73 | num_epoch: 10 74 | freq_show_train: 0.1 75 | freq_show_val: 0.3 76 | freq_plot: 0.01 77 | freq_eval: 0.2 78 | schedule: [3, 8] 79 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: D-IF 2 | channels: 3 | - pytorch-lts 4 | - nvidia 5 | - conda-forge 6 | - fvcore 7 | - iopath 8 | - bottler 9 | - defaults 10 | dependencies: 11 | - python=3.8 12 | - pytorch 13 | - torchvision 14 | - fvcore 15 | - iopath 16 | - nvidiacub 17 | - pyembree 18 | - pip 19 | -------------------------------------------------------------------------------- /examples/fashion cloth2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/examples/fashion cloth2.jpg -------------------------------------------------------------------------------- /examples/fashion_cloth1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/examples/fashion_cloth1.jpg -------------------------------------------------------------------------------- /examples/handsome boy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/examples/handsome boy.jpg -------------------------------------------------------------------------------- /examples/loose cloth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/examples/loose cloth.jpg -------------------------------------------------------------------------------- /examples/strange_pose.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/examples/strange_pose.jpg -------------------------------------------------------------------------------- /examples/tight cloth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/examples/tight cloth.jpg -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/__init__.py -------------------------------------------------------------------------------- /lib/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__init__.py -------------------------------------------------------------------------------- /lib/common/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__pycache__/cloth_extraction.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/cloth_extraction.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__pycache__/render.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/render.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__pycache__/render_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/render_utils.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__pycache__/seg3d_lossless.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/seg3d_lossless.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__pycache__/seg3d_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/seg3d_utils.cpython-38.pyc -------------------------------------------------------------------------------- /lib/common/__pycache__/train_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/common/__pycache__/train_util.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataloader_demo.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from lib.common.config import get_cfg_defaults 3 | from ICONforPIFU.lib.dataset.PIFuDataset import PIFuDataset 4 | 5 | if __name__ == '__main__': 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('-v', '--show', action='store_true', help='vis sampler 3D') 9 | parser.add_argument('-s', '--speed', action='store_true', help='vis sampler 3D') 10 | parser.add_argument('-l', '--list', action='store_true', help='vis sampler 3D') 11 | parser.add_argument( 12 | '-c', '--config', default='./configs/train/icon-filter.yaml', help='vis sampler 3D' 13 | ) 14 | parser.add_argument('-d', '--dataset', default='thuman2') 15 | args_c = parser.parse_args() 16 | 17 | args = get_cfg_defaults() 18 | args.merge_from_file(args_c.config) 19 | 20 | if args.dataset == 'cape': 21 | 22 | # for cape test set 23 | cfg_test_mode = [ 24 | "test_mode", True, "dataset.types", ["cape"], "dataset.scales", [100.0], 25 | "dataset.rotation_num", 3 26 | ] 27 | args.merge_from_list(cfg_test_mode) 28 | 29 | # dataset sampler 30 | dataset = PIFuDataset(args, split='test', vis=args_c.show) 31 | print(f"Number of subjects :{len(dataset.subject_list)}") 32 | data_dict = dataset[1] 33 | 34 | if args_c.list: 35 | for k in data_dict.keys(): 36 | if not hasattr(data_dict[k], "shape"): 37 | print(f"{k}: {data_dict[k]}") 38 | else: 39 | print(f"{k}: {data_dict[k].shape}") 40 | 41 | if args_c.show: 42 | # for item in dataset: 43 | item = dataset[0] 44 | dataset.visualize_sampling3D(item, mode='cmap') 45 | # dataset.visualize_sampling3D(item, mode='occ') 46 | # dataset.visualize_sampling3D(item, mode='normal') 47 | # dataset.visualize_sampling3D(item, mode='sdf') 48 | # dataset.visualize_sampling3D(item, mode='vis') 49 | 50 | if args_c.speed: 51 | # original: 2 it/s 52 | # smpl online compute: 2 it/s 53 | # normal online compute: 1.5 it/s 54 | from tqdm import tqdm 55 | for item in tqdm(dataset): 56 | # pass 57 | for k in item.keys(): 58 | if 'voxel' in k: 59 | if not hasattr(item[k], "shape"): 60 | print(f"{k}: {item[k]}") 61 | else: 62 | print(f"{k}: {item[k].shape}") 63 | print("--------------------") 64 | -------------------------------------------------------------------------------- /lib/dataset/NormalModule.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import numpy as np 18 | from torch.utils.data import DataLoader 19 | from .NormalDataset import NormalDataset 20 | 21 | # pytorch lightning related libs 22 | import pytorch_lightning as pl 23 | 24 | 25 | class NormalModule(pl.LightningDataModule): 26 | def __init__(self, cfg): 27 | super(NormalModule, self).__init__() 28 | self.cfg = cfg 29 | self.overfit = self.cfg.overfit 30 | 31 | if self.overfit: 32 | self.batch_size = 1 33 | else: 34 | self.batch_size = self.cfg.batch_size 35 | 36 | self.data_size = {} 37 | 38 | def prepare_data(self): 39 | 40 | pass 41 | 42 | @staticmethod 43 | def worker_init_fn(worker_id): 44 | np.random.seed(np.random.get_state()[1][0] + worker_id) 45 | 46 | def setup(self, stage): 47 | 48 | if stage == 'fit' or stage is None: 49 | self.train_dataset = NormalDataset(cfg=self.cfg, split="train") 50 | self.val_dataset = NormalDataset(cfg=self.cfg, split="val") 51 | self.data_size = {'train': len(self.train_dataset), 'val': len(self.val_dataset)} 52 | 53 | if stage == 'test' or stage is None: 54 | self.test_dataset = NormalDataset(cfg=self.cfg, split="test") 55 | 56 | def train_dataloader(self): 57 | 58 | train_data_loader = DataLoader( 59 | self.train_dataset, 60 | batch_size=self.batch_size, 61 | shuffle=not self.overfit, 62 | num_workers=self.cfg.num_threads, 63 | pin_memory=True, 64 | worker_init_fn=self.worker_init_fn 65 | ) 66 | 67 | return train_data_loader 68 | 69 | def val_dataloader(self): 70 | 71 | if self.overfit: 72 | current_dataset = self.train_dataset 73 | else: 74 | current_dataset = self.val_dataset 75 | 76 | val_data_loader = DataLoader( 77 | current_dataset, 78 | batch_size=self.batch_size, 79 | shuffle=False, 80 | num_workers=self.cfg.num_threads, 81 | pin_memory=True 82 | ) 83 | 84 | return val_data_loader 85 | 86 | def test_dataloader(self): 87 | 88 | test_data_loader = DataLoader( 89 | self.test_dataset, 90 | batch_size=1, 91 | shuffle=False, 92 | num_workers=self.cfg.num_threads, 93 | pin_memory=True 94 | ) 95 | 96 | return test_data_loader 97 | -------------------------------------------------------------------------------- /lib/dataset/PIFuDataModule.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import DataLoader 3 | from .PIFuDataset import PIFuDataset 4 | import pytorch_lightning as pl 5 | 6 | 7 | class PIFuDataModule(pl.LightningDataModule): 8 | def __init__(self, cfg): 9 | super(PIFuDataModule, self).__init__() 10 | self.cfg = cfg 11 | self.overfit = self.cfg.overfit 12 | 13 | if self.overfit: 14 | self.batch_size = 1 15 | else: 16 | self.batch_size = self.cfg.batch_size 17 | 18 | self.data_size = {} 19 | 20 | def prepare_data(self): 21 | 22 | pass 23 | 24 | @staticmethod 25 | def worker_init_fn(worker_id): 26 | np.random.seed(np.random.get_state()[1][0] + worker_id) 27 | 28 | def setup(self, stage): 29 | 30 | if stage == 'fit': 31 | self.train_dataset = PIFuDataset(cfg=self.cfg, split="train") 32 | self.val_dataset = PIFuDataset(cfg=self.cfg, split="val") 33 | self.data_size = {'train': len(self.train_dataset), 'val': len(self.val_dataset)} 34 | 35 | if stage == 'test': 36 | self.test_dataset = PIFuDataset(cfg=self.cfg, split="test") 37 | 38 | def train_dataloader(self): 39 | 40 | train_data_loader = DataLoader( 41 | self.train_dataset, 42 | batch_size=self.batch_size, 43 | shuffle=True, 44 | num_workers=self.cfg.num_threads, 45 | pin_memory=True, 46 | worker_init_fn=self.worker_init_fn 47 | ) 48 | 49 | return train_data_loader 50 | 51 | def val_dataloader(self): 52 | 53 | if self.overfit: 54 | current_dataset = self.train_dataset 55 | else: 56 | current_dataset = self.val_dataset 57 | 58 | val_data_loader = DataLoader( 59 | current_dataset, 60 | batch_size=1, 61 | shuffle=False, 62 | num_workers=self.cfg.num_threads, 63 | pin_memory=True, 64 | worker_init_fn=self.worker_init_fn 65 | ) 66 | 67 | return val_data_loader 68 | 69 | def test_dataloader(self): 70 | 71 | test_data_loader = DataLoader( 72 | self.test_dataset, 73 | batch_size=1, 74 | shuffle=False, 75 | num_workers=self.cfg.num_threads, 76 | pin_memory=True 77 | ) 78 | 79 | return test_data_loader 80 | -------------------------------------------------------------------------------- /lib/dataset/PointFeat.py: -------------------------------------------------------------------------------- 1 | from pytorch3d.structures import Meshes 2 | import torch.nn.functional as F 3 | import torch 4 | from lib.common.render_utils import face_vertices 5 | from lib.dataset.mesh_util import SMPLX, barycentric_coordinates_of_projection 6 | from kaolin.ops.mesh import check_sign 7 | from kaolin.metrics.trianglemesh import point_to_mesh_distance 8 | 9 | 10 | class PointFeat: 11 | def __init__(self, verts, faces): 12 | 13 | # verts [B, N_vert, 3] 14 | # faces [B, N_face, 3] 15 | # triangles [B, N_face, 3, 3] 16 | 17 | self.Bsize = verts.shape[0] 18 | self.mesh = Meshes(verts, faces) 19 | self.device = verts.device 20 | self.faces = faces 21 | 22 | # SMPL has watertight mesh, but SMPL-X has two eyeballs and open mouth 23 | # 1. remove eye_ball faces from SMPL-X: 9928-9383, 10474-9929 24 | # 2. fill mouth holes with 30 more faces 25 | 26 | if verts.shape[1] == 10475: 27 | faces = faces[:, ~SMPLX().smplx_eyeball_fid] 28 | mouth_faces = ( 29 | torch.as_tensor(SMPLX().smplx_mouth_fid).unsqueeze(0).repeat(self.Bsize, 1, 30 | 1).to(self.device) 31 | ) 32 | self.faces = torch.cat([faces, mouth_faces], dim=1).long() 33 | 34 | self.verts = verts 35 | self.triangles = face_vertices(self.verts, self.faces) 36 | 37 | def query(self, points, feats={}): 38 | 39 | # points [B, N, 3] 40 | # feats {'feat_name': [B, N, C]} 41 | 42 | del_keys = ["smpl_verts", "smpl_faces", "smpl_joint"] 43 | 44 | residues, pts_ind, _ = point_to_mesh_distance(points, self.triangles) 45 | closest_triangles = torch.gather( 46 | self.triangles, 1, pts_ind[:, :, None, None].expand(-1, -1, 3, 3) 47 | ).view(-1, 3, 3) 48 | bary_weights = barycentric_coordinates_of_projection(points.view(-1, 3), closest_triangles) 49 | 50 | out_dict = {} 51 | 52 | for feat_key in feats.keys(): 53 | 54 | if feat_key in del_keys: 55 | continue 56 | 57 | elif feats[feat_key] is not None: 58 | feat_arr = feats[feat_key] 59 | feat_dim = feat_arr.shape[-1] 60 | feat_tri = face_vertices(feat_arr, self.faces) 61 | closest_feats = torch.gather( 62 | feat_tri, 1, pts_ind[:, :, None, None].expand(-1, -1, 3, feat_dim) 63 | ).view(-1, 3, feat_dim) 64 | pts_feats = ((closest_feats * bary_weights[:, :, None]).sum(1).unsqueeze(0)) 65 | out_dict[feat_key.split("_")[1]] = pts_feats 66 | 67 | else: 68 | out_dict[feat_key.split("_")[1]] = None 69 | 70 | if "sdf" in out_dict.keys(): 71 | pts_dist = torch.sqrt(residues) / torch.sqrt(torch.tensor(3)) 72 | pts_signs = 2.0 * (check_sign(self.verts, self.faces[0], points).float() - 0.5) 73 | pts_sdf = (pts_dist * pts_signs).unsqueeze(-1) 74 | out_dict["sdf"] = pts_sdf 75 | 76 | if "vis" in out_dict.keys(): 77 | out_dict["vis"] = out_dict["vis"].ge(1e-1).float() 78 | 79 | if "norm" in out_dict.keys(): 80 | pts_norm = out_dict["norm"] * torch.tensor([-1.0, 1.0, -1.0]).to(self.device) 81 | out_dict["norm"] = F.normalize(pts_norm, dim=2) 82 | 83 | if "cmap" in out_dict.keys(): 84 | out_dict["cmap"] = out_dict["cmap"].clamp_(min=0.0, max=1.0) 85 | 86 | for out_key in out_dict.keys(): 87 | out_dict[out_key] = out_dict[out_key].view(self.Bsize, -1, out_dict[out_key].shape[-1]) 88 | 89 | return out_dict 90 | -------------------------------------------------------------------------------- /lib/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__init__.py -------------------------------------------------------------------------------- /lib/dataset/__pycache__/Evaluator.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/Evaluator.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/NormalDataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/NormalDataset.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/NormalModule.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/NormalModule.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/PIFuDataModule.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/PIFuDataModule.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/PIFuDataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/PIFuDataset.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/PointFeat.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/PointFeat.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/TestDataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/TestDataset.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/body_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/body_model.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/__pycache__/mesh_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/__pycache__/mesh_util.cpython-38.pyc -------------------------------------------------------------------------------- /lib/dataset/hoppeMesh.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import numpy as np 18 | from scipy.spatial import cKDTree 19 | import trimesh 20 | 21 | import logging 22 | 23 | logging.getLogger("trimesh").setLevel(logging.ERROR) 24 | 25 | 26 | def save_obj_mesh(mesh_path, verts, faces): 27 | file = open(mesh_path, 'w') 28 | for v in verts: 29 | file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2])) 30 | for f in faces: 31 | f_plus = f + 1 32 | file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2])) 33 | file.close() 34 | 35 | 36 | def save_obj_mesh_with_color(mesh_path, verts, faces, colors): 37 | file = open(mesh_path, 'w') 38 | 39 | for idx, v in enumerate(verts): 40 | c = colors[idx] 41 | file.write('v %.4f %.4f %.4f %.4f %.4f %.4f\n' % (v[0], v[1], v[2], c[0], c[1], c[2])) 42 | for f in faces: 43 | f_plus = f + 1 44 | file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2])) 45 | file.close() 46 | 47 | 48 | def save_ply(mesh_path, points, rgb): 49 | ''' 50 | Save the visualization of sampling to a ply file. 51 | Red points represent positive predictions. 52 | Green points represent negative predictions. 53 | :param mesh_path: File name to save 54 | :param points: [N, 3] array of points 55 | :param rgb: [N, 3] array of rgb values in the range [0~1] 56 | :return: 57 | ''' 58 | to_save = np.concatenate([points, rgb * 255], axis=-1) 59 | return np.savetxt( 60 | mesh_path, 61 | to_save, 62 | fmt='%.6f %.6f %.6f %d %d %d', 63 | comments='', 64 | header=( 65 | 'ply\nformat ascii 1.0\nelement vertex {:d}\n' + 66 | 'property float x\nproperty float y\nproperty float z\n' + 67 | 'property uchar red\nproperty uchar green\nproperty uchar blue\n' + 'end_header' 68 | ).format(points.shape[0]) 69 | ) 70 | 71 | 72 | class HoppeMesh: 73 | def __init__(self, verts, faces, vert_normals, face_normals): 74 | ''' 75 | The HoppeSDF calculates signed distance towards a predefined oriented point cloud 76 | http://hhoppe.com/recon.pdf 77 | For clean and high-resolution pcl data, this is the fastest and accurate approximation of sdf 78 | :param points: pts 79 | :param normals: normals 80 | ''' 81 | self.verts = verts # [n, 3] 82 | self.faces = faces # [m, 3] 83 | self.vert_normals = vert_normals # [n, 3] 84 | self.face_normals = face_normals # [m, 3] 85 | 86 | self.kd_tree = cKDTree(self.verts) 87 | self.len = len(self.verts) 88 | 89 | def query(self, points): 90 | dists, idx = self.kd_tree.query(points, n_jobs=1) 91 | # FIXME: because the eyebows are removed, cKDTree around eyebows 92 | # are not accurate. Cause a few false-inside labels here. 93 | dirs = points - self.verts[idx] 94 | signs = (dirs * self.vert_normals[idx]).sum(axis=1) 95 | signs = (signs > 0) * 2 - 1 96 | return signs * dists 97 | 98 | def contains(self, points): 99 | 100 | labels = trimesh.Trimesh(vertices=self.verts, faces=self.faces).contains(points) 101 | return labels 102 | 103 | def export(self, path): 104 | if self.colors is not None: 105 | save_obj_mesh_with_color(path, self.verts, self.faces, self.colors[:, 0:3] / 255.0) 106 | else: 107 | save_obj_mesh(path, self.verts, self.faces) 108 | 109 | def export_ply(self, path): 110 | save_ply(path, self.verts, self.colors[:, 0:3] / 255.0) 111 | 112 | def triangles(self): 113 | return self.verts[self.faces] # [n, 3, 3] 114 | -------------------------------------------------------------------------------- /lib/dataset/tbfo.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/dataset/tbfo.ttf -------------------------------------------------------------------------------- /lib/hybrik/models/__pycache__/simple3dpose.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/hybrik/models/__pycache__/simple3dpose.cpython-38.pyc -------------------------------------------------------------------------------- /lib/hybrik/models/layers/__pycache__/Resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/hybrik/models/layers/__pycache__/Resnet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/hybrik/models/layers/smpl/__pycache__/SMPL.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/hybrik/models/layers/smpl/__pycache__/SMPL.cpython-38.pyc -------------------------------------------------------------------------------- /lib/hybrik/models/layers/smpl/__pycache__/lbs.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/hybrik/models/layers/smpl/__pycache__/lbs.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/BasePIFuNet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch.nn as nn 18 | import pytorch_lightning as pl 19 | 20 | from .geometry import index, orthogonal, perspective 21 | 22 | 23 | class BasePIFuNet(pl.LightningModule): 24 | def __init__( 25 | self, 26 | projection_mode='orthogonal', 27 | error_term=nn.MSELoss(), 28 | ): 29 | """ 30 | :param projection_mode: 31 | Either orthogonal or perspective. 32 | It will call the corresponding function for projection. 33 | :param error_term: 34 | nn Loss between the predicted [B, Res, N] and the label [B, Res, N] 35 | """ 36 | super(BasePIFuNet, self).__init__() 37 | self.name = 'base' 38 | 39 | self.error_term = error_term 40 | 41 | self.index = index 42 | self.projection = orthogonal if projection_mode == 'orthogonal' else perspective 43 | 44 | def forward(self, points, images, calibs, transforms=None): 45 | ''' 46 | :param points: [B, 3, N] world space coordinates of points 47 | :param images: [B, C, H, W] input images 48 | :param calibs: [B, 3, 4] calibration matrices for each image 49 | :param transforms: Optional [B, 2, 3] image space coordinate transforms 50 | :return: [B, Res, N] predictions for each point 51 | ''' 52 | features = self.filter(images) 53 | preds = self.query(features, points, calibs, transforms) 54 | return preds 55 | 56 | def filter(self, images): 57 | ''' 58 | Filter the input images 59 | store all intermediate features. 60 | :param images: [B, C, H, W] input images 61 | ''' 62 | return None 63 | 64 | def query(self, features, points, calibs, transforms=None): 65 | ''' 66 | Given 3D points, query the network predictions for each point. 67 | Image features should be pre-computed before this call. 68 | store all intermediate features. 69 | query() function may behave differently during training/testing. 70 | :param points: [B, 3, N] world space coordinates of points 71 | :param calibs: [B, 3, 4] calibration matrices for each image 72 | :param transforms: Optional [B, 2, 3] image space coordinate transforms 73 | :param labels: Optional [B, Res, N] gt labeling 74 | :return: [B, Res, N] predictions for each point 75 | ''' 76 | return None 77 | 78 | def get_error(self, preds, labels): 79 | ''' 80 | Get the network loss from the last query 81 | :return: loss term 82 | ''' 83 | return self.error_term(preds, labels) 84 | -------------------------------------------------------------------------------- /lib/net/NormalNet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | from lib.net.FBNet import define_G 18 | from lib.net.net_util import init_net, VGGLoss 19 | from lib.net.HGFilters import * 20 | from lib.net.BasePIFuNet import BasePIFuNet 21 | import torch 22 | import torch.nn as nn 23 | 24 | 25 | class NormalNet(BasePIFuNet): 26 | ''' 27 | HG PIFu network uses Hourglass stacks as the image filter. 28 | It does the following: 29 | 1. Compute image feature stacks and store it in self.im_feat_list 30 | self.im_feat_list[-1] is the last stack (output stack) 31 | 2. Calculate calibration 32 | 3. If training, it index on every intermediate stacks, 33 | If testing, it index on the last stack. 34 | 4. Classification. 35 | 5. During training, error is calculated on all stacks. 36 | ''' 37 | def __init__(self, cfg, error_term=nn.SmoothL1Loss()): 38 | 39 | super(NormalNet, self).__init__(error_term=error_term) 40 | 41 | self.l1_loss = nn.SmoothL1Loss() 42 | 43 | self.opt = cfg.net 44 | 45 | if self.training: 46 | self.vgg_loss = [VGGLoss()] 47 | 48 | self.in_nmlF = [ 49 | item[0] for item in self.opt.in_nml if '_F' in item[0] or item[0] == 'image' 50 | ] 51 | self.in_nmlB = [ 52 | item[0] for item in self.opt.in_nml if '_B' in item[0] or item[0] == 'image' 53 | ] 54 | self.in_nmlF_dim = sum( 55 | [item[1] for item in self.opt.in_nml if '_F' in item[0] or item[0] == 'image'] 56 | ) 57 | self.in_nmlB_dim = sum( 58 | [item[1] for item in self.opt.in_nml if '_B' in item[0] or item[0] == 'image'] 59 | ) 60 | 61 | self.netF = define_G(self.in_nmlF_dim, 3, 64, "global", 4, 9, 1, 3, "instance") 62 | self.netB = define_G(self.in_nmlB_dim, 3, 64, "global", 4, 9, 1, 3, "instance") 63 | 64 | init_net(self) 65 | 66 | def forward(self, in_tensor): 67 | 68 | inF_list = [] 69 | inB_list = [] 70 | 71 | for name in self.in_nmlF: 72 | inF_list.append(in_tensor[name]) 73 | for name in self.in_nmlB: 74 | inB_list.append(in_tensor[name]) 75 | 76 | nmlF = self.netF(torch.cat(inF_list, dim=1)) 77 | nmlB = self.netB(torch.cat(inB_list, dim=1)) 78 | 79 | # ||normal|| == 1 80 | nmlF = nmlF / torch.norm(nmlF, dim=1, keepdim=True) 81 | nmlB = nmlB / torch.norm(nmlB, dim=1, keepdim=True) 82 | 83 | # output: float_arr [-1,1] with [B, C, H, W] 84 | 85 | mask = (in_tensor['image'].abs().sum(dim=1, keepdim=True) != 0.0).detach().float() 86 | 87 | nmlF = nmlF * mask 88 | nmlB = nmlB * mask 89 | 90 | return nmlF, nmlB 91 | 92 | def get_norm_error(self, prd_F, prd_B, tgt): 93 | """calculate normal loss 94 | 95 | Args: 96 | pred (torch.tensor): [B, 6, 512, 512] 97 | tagt (torch.tensor): [B, 6, 512, 512] 98 | """ 99 | 100 | tgt_F, tgt_B = tgt['normal_F'], tgt['normal_B'] 101 | 102 | l1_F_loss = self.l1_loss(prd_F, tgt_F) 103 | l1_B_loss = self.l1_loss(prd_B, tgt_B) 104 | 105 | with torch.no_grad(): 106 | vgg_F_loss = self.vgg_loss[0](prd_F, tgt_F) 107 | vgg_B_loss = self.vgg_loss[0](prd_B, tgt_B) 108 | 109 | total_loss = [5.0 * l1_F_loss + vgg_F_loss, 5.0 * l1_B_loss + vgg_B_loss] 110 | 111 | return total_loss 112 | -------------------------------------------------------------------------------- /lib/net/__init__.py: -------------------------------------------------------------------------------- 1 | from .BasePIFuNet import BasePIFuNet 2 | from .HGPIFuNet import HGPIFuNet 3 | from .NormalNet import NormalNet 4 | from .VE import VolumeEncoder 5 | -------------------------------------------------------------------------------- /lib/net/__pycache__/BasePIFuNet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/BasePIFuNet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/FBNet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/FBNet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/HGFilters.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/HGFilters.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/HGPIFuNet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/HGPIFuNet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/MLP.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/MLP.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/MLP_DIF.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/MLP_DIF.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/MLP_DIFwoR.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/MLP_DIFwoR.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/MLP_v3_1_1.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/MLP_v3_1_1.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/MLP_v3_1_1_xyz.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/MLP_v3_1_1_xyz.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/NormalNet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/NormalNet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/VE.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/VE.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/geometry.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/geometry.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/local_affine.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/local_affine.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/net_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/net_util.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/spatial.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/spatial.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/__pycache__/voxelize.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/net/__pycache__/voxelize.cpython-38.pyc -------------------------------------------------------------------------------- /lib/net/geometry.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | 19 | 20 | def index(feat, uv): 21 | ''' 22 | :param feat: [B, C, H, W] image features 23 | :param uv: [B, 2, N] uv coordinates in the image plane, range [0, 1] 24 | :return: [B, C, N] image features at the uv coordinates 25 | ''' 26 | uv = uv.transpose(1, 2) # [B, N, 2] 27 | 28 | (B, N, _) = uv.shape 29 | C = feat.shape[1] 30 | 31 | if uv.shape[-1] == 3: 32 | # uv = uv[:,:,[2,1,0]] 33 | # uv = uv * torch.tensor([1.0,-1.0,1.0]).type_as(uv)[None,None,...] 34 | uv = uv.unsqueeze(2).unsqueeze(3) # [B, N, 1, 1, 3] 35 | else: 36 | uv = uv.unsqueeze(2) # [B, N, 1, 2] 37 | 38 | # NOTE: for newer PyTorch, it seems that training results are degraded due to implementation diff in F.grid_sample 39 | # for old versions, simply remove the aligned_corners argument. 40 | samples = torch.nn.functional.grid_sample(feat, uv, align_corners=True) # [B, C, N, 1] 41 | return samples.view(B, C, N) # [B, C, N] 42 | 43 | 44 | def orthogonal(points, calibrations, transforms=None): 45 | ''' 46 | Compute the orthogonal projections of 3D points into the image plane by given projection matrix 47 | :param points: [B, 3, N] Tensor of 3D points 48 | :param calibrations: [B, 3, 4] Tensor of projection matrix 49 | :param transforms: [B, 2, 3] Tensor of image transform matrix 50 | :return: xyz: [B, 3, N] Tensor of xyz coordinates in the image plane 51 | ''' 52 | rot = calibrations[:, :3, :3] 53 | trans = calibrations[:, :3, 3:4] 54 | pts = torch.baddbmm(trans, rot, points) # [B, 3, N] 55 | if transforms is not None: 56 | scale = transforms[:2, :2] 57 | shift = transforms[:2, 2:3] 58 | pts[:, :2, :] = torch.baddbmm(shift, scale, pts[:, :2, :]) 59 | return pts 60 | 61 | 62 | def perspective(points, calibrations, transforms=None): 63 | ''' 64 | Compute the perspective projections of 3D points into the image plane by given projection matrix 65 | :param points: [Bx3xN] Tensor of 3D points 66 | :param calibrations: [Bx3x4] Tensor of projection matrix 67 | :param transforms: [Bx2x3] Tensor of image transform matrix 68 | :return: xy: [Bx2xN] Tensor of xy coordinates in the image plane 69 | ''' 70 | rot = calibrations[:, :3, :3] 71 | trans = calibrations[:, :3, 3:4] 72 | homo = torch.baddbmm(trans, rot, points) # [B, 3, N] 73 | xy = homo[:, :2, :] / homo[:, 2:3, :] 74 | if transforms is not None: 75 | scale = transforms[:2, :2] 76 | shift = transforms[:2, 2:3] 77 | xy = torch.baddbmm(shift, scale, xy) 78 | 79 | xyz = torch.cat([xy, homo[:, 2:3, :]], 1) 80 | return xyz 81 | -------------------------------------------------------------------------------- /lib/net/local_affine.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 by Haozhe Wu, Tsinghua University, Department of Computer Science and Technology. 2 | # All rights reserved. 3 | # This file is part of the pytorch-nicp, 4 | # and is released under the "MIT License Agreement". Please see the LICENSE 5 | # file that should have been included as part of this package. 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.sparse as sp 10 | 11 | 12 | # reference: https://github.com/wuhaozhe/pytorch-nicp 13 | class LocalAffine(nn.Module): 14 | def __init__(self, num_points, batch_size=1, edges=None): 15 | ''' 16 | specify the number of points, the number of points should be constant across the batch 17 | and the edges torch.Longtensor() with shape N * 2 18 | the local affine operator supports batch operation 19 | batch size must be constant 20 | add additional pooling on top of w matrix 21 | ''' 22 | super(LocalAffine, self).__init__() 23 | self.A = nn.Parameter( 24 | torch.eye(3).unsqueeze(0).unsqueeze(0).repeat(batch_size, num_points, 1, 1) 25 | ) 26 | self.b = nn.Parameter( 27 | torch.zeros(3).unsqueeze(0).unsqueeze(0).unsqueeze(3).repeat( 28 | batch_size, num_points, 1, 1 29 | ) 30 | ) 31 | self.edges = edges 32 | self.num_points = num_points 33 | 34 | def stiffness(self): 35 | ''' 36 | calculate the stiffness of local affine transformation 37 | f norm get infinity gradient when w is zero matrix, 38 | ''' 39 | if self.edges is None: 40 | raise Exception("edges cannot be none when calculate stiff") 41 | idx1 = self.edges[:, 0] 42 | idx2 = self.edges[:, 1] 43 | affine_weight = torch.cat((self.A, self.b), dim=3) 44 | w1 = torch.index_select(affine_weight, dim=1, index=idx1) 45 | w2 = torch.index_select(affine_weight, dim=1, index=idx2) 46 | w_diff = (w1 - w2)**2 47 | w_rigid = (torch.linalg.det(self.A) - 1.0)**2 48 | return w_diff, w_rigid 49 | 50 | def forward(self, x, return_stiff=False): 51 | ''' 52 | x should have shape of B * N * 3 53 | ''' 54 | x = x.unsqueeze(3) 55 | out_x = torch.matmul(self.A, x) 56 | out_x = out_x + self.b 57 | out_x.squeeze_(3) 58 | if return_stiff: 59 | stiffness, rigid = self.stiffness() 60 | return out_x, stiffness, rigid 61 | else: 62 | return out_x 63 | -------------------------------------------------------------------------------- /lib/net/spatial.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | import torch 7 | import pytorch_lightning as pl 8 | import numpy as np 9 | 10 | 11 | class SpatialEncoder(pl.LightningModule): 12 | def __init__(self, sp_level=1, sp_type="rel_z_decay", scale=1.0, n_kpt=24, sigma=0.2): 13 | 14 | super().__init__() 15 | 16 | self.sp_type = sp_type 17 | self.sp_level = sp_level 18 | self.n_kpt = n_kpt 19 | self.scale = scale 20 | self.sigma = sigma 21 | 22 | @staticmethod 23 | def position_embedding(x, nlevels, scale=1.0): 24 | """ 25 | args: 26 | x: (B, N, C) 27 | return: 28 | (B, N, C * n_levels * 2) 29 | """ 30 | if nlevels <= 0: 31 | return x 32 | vec = SpatialEncoder.pe_vector(nlevels, x.device, scale) 33 | 34 | B, N, _ = x.shape 35 | y = x[:, :, None, :] * vec[None, None, :, None] 36 | z = torch.cat((torch.sin(y), torch.cos(y)), axis=-1).view(B, N, -1) 37 | 38 | return torch.cat([x, z], -1) 39 | 40 | @staticmethod 41 | def pe_vector(nlevels, device, scale=1.0): 42 | v, val = [], 1 43 | for _ in range(nlevels): 44 | v.append(scale * np.pi * val) 45 | val *= 2 46 | return torch.from_numpy(np.asarray(v, dtype=np.float32)).to(device) 47 | 48 | def get_dim(self): 49 | if self.sp_type in ["z", "rel_z", "rel_z_decay"]: 50 | if "rel" in self.sp_type: 51 | return (1 + 2 * self.sp_level) * self.n_kpt 52 | else: 53 | return 1 + 2 * self.sp_level 54 | elif "xyz" in self.sp_type: 55 | if "rel" in self.sp_type: 56 | return (1 + 2 * self.sp_level) * 3 * self.n_kpt 57 | else: 58 | return (1 + 2 * self.sp_level) * 3 59 | 60 | return 0 61 | 62 | def forward(self, cxyz, kptxyz): 63 | 64 | B, N = cxyz.shape[:2] 65 | K = kptxyz.shape[1] 66 | 67 | dz = cxyz[:, :, None, 2:3] - kptxyz[:, None, :, 2:3] 68 | dxyz = cxyz[:, :, None] - kptxyz[:, None, :] 69 | 70 | # (B, N, K) 71 | weight = torch.exp(-(dxyz**2).sum(-1) / (2.0 * (self.sigma**2))) 72 | 73 | # position embedding ( B, N, K * (2*n_levels+1) ) 74 | out = self.position_embedding(dz.view(B, N, K), self.sp_level) 75 | 76 | # BV,N,K,(2*n_levels+1) * B,N,K,1 = B,N,K*(2*n_levels+1) -> BV,K*(2*n_levels+1),N 77 | out = (out.view(B, N, -1, K) * weight[:, :, None]).view(B, N, -1).permute(0, 2, 1) 78 | 79 | return out 80 | 81 | 82 | if __name__ == "__main__": 83 | pts = torch.randn(2, 10000, 3).to("cuda") 84 | kpts = torch.randn(2, 24, 3).to("cuda") 85 | 86 | sp_encoder = SpatialEncoder(sp_level=3, sp_type="rel_z_decay", scale=1.0, n_kpt=24, 87 | sigma=0.1).to("cuda") 88 | out = sp_encoder(pts, kpts) 89 | print(out.shape) 90 | -------------------------------------------------------------------------------- /lib/pare/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/__init__.py -------------------------------------------------------------------------------- /lib/pare/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/__init__.py -------------------------------------------------------------------------------- /lib/pare/pare/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/core/__init__.py -------------------------------------------------------------------------------- /lib/pare/pare/core/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/core/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/core/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/core/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/core/__pycache__/constants.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/core/__pycache__/constants.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/core/__pycache__/tester.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/core/__pycache__/tester.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/core/tester.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | from loguru import logger 19 | 20 | from ..models import PARE 21 | from .config import update_hparams 22 | from ..utils.train_utils import load_pretrained_model 23 | 24 | MIN_NUM_FRAMES = 0 25 | 26 | 27 | class PARETester: 28 | def __init__(self, cfg, ckpt): 29 | self.model_cfg = update_hparams(cfg) 30 | self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') 31 | self.model = self._build_model() 32 | self._load_pretrained_model(ckpt) 33 | self.model.eval() 34 | 35 | def _build_model(self): 36 | # ========= Define PARE model ========= # 37 | model_cfg = self.model_cfg 38 | 39 | if model_cfg.METHOD == 'pare': 40 | model = PARE( 41 | backbone=model_cfg.PARE.BACKBONE, 42 | num_joints=model_cfg.PARE.NUM_JOINTS, 43 | softmax_temp=model_cfg.PARE.SOFTMAX_TEMP, 44 | num_features_smpl=model_cfg.PARE.NUM_FEATURES_SMPL, 45 | focal_length=model_cfg.DATASET.FOCAL_LENGTH, 46 | img_res=model_cfg.DATASET.IMG_RES, 47 | pretrained=model_cfg.TRAINING.PRETRAINED, 48 | iterative_regression=model_cfg.PARE.ITERATIVE_REGRESSION, 49 | num_iterations=model_cfg.PARE.NUM_ITERATIONS, 50 | iter_residual=model_cfg.PARE.ITER_RESIDUAL, 51 | shape_input_type=model_cfg.PARE.SHAPE_INPUT_TYPE, 52 | pose_input_type=model_cfg.PARE.POSE_INPUT_TYPE, 53 | pose_mlp_num_layers=model_cfg.PARE.POSE_MLP_NUM_LAYERS, 54 | shape_mlp_num_layers=model_cfg.PARE.SHAPE_MLP_NUM_LAYERS, 55 | pose_mlp_hidden_size=model_cfg.PARE.POSE_MLP_HIDDEN_SIZE, 56 | shape_mlp_hidden_size=model_cfg.PARE.SHAPE_MLP_HIDDEN_SIZE, 57 | use_keypoint_features_for_smpl_regression=model_cfg.PARE. 58 | USE_KEYPOINT_FEATURES_FOR_SMPL_REGRESSION, 59 | use_heatmaps=model_cfg.DATASET.USE_HEATMAPS, 60 | use_keypoint_attention=model_cfg.PARE.USE_KEYPOINT_ATTENTION, 61 | use_postconv_keypoint_attention=model_cfg.PARE.USE_POSTCONV_KEYPOINT_ATTENTION, 62 | use_scale_keypoint_attention=model_cfg.PARE.USE_SCALE_KEYPOINT_ATTENTION, 63 | keypoint_attention_act=model_cfg.PARE.KEYPOINT_ATTENTION_ACT, 64 | use_final_nonlocal=model_cfg.PARE.USE_FINAL_NONLOCAL, 65 | use_branch_nonlocal=model_cfg.PARE.USE_BRANCH_NONLOCAL, 66 | use_hmr_regression=model_cfg.PARE.USE_HMR_REGRESSION, 67 | use_coattention=model_cfg.PARE.USE_COATTENTION, 68 | num_coattention_iter=model_cfg.PARE.NUM_COATTENTION_ITER, 69 | coattention_conv=model_cfg.PARE.COATTENTION_CONV, 70 | use_upsampling=model_cfg.PARE.USE_UPSAMPLING, 71 | deconv_conv_kernel_size=model_cfg.PARE.DECONV_CONV_KERNEL_SIZE, 72 | use_soft_attention=model_cfg.PARE.USE_SOFT_ATTENTION, 73 | num_branch_iteration=model_cfg.PARE.NUM_BRANCH_ITERATION, 74 | branch_deeper=model_cfg.PARE.BRANCH_DEEPER, 75 | num_deconv_layers=model_cfg.PARE.NUM_DECONV_LAYERS, 76 | num_deconv_filters=model_cfg.PARE.NUM_DECONV_FILTERS, 77 | use_resnet_conv_hrnet=model_cfg.PARE.USE_RESNET_CONV_HRNET, 78 | use_position_encodings=model_cfg.PARE.USE_POS_ENC, 79 | use_mean_camshape=model_cfg.PARE.USE_MEAN_CAMSHAPE, 80 | use_mean_pose=model_cfg.PARE.USE_MEAN_POSE, 81 | init_xavier=model_cfg.PARE.INIT_XAVIER, 82 | ).to(self.device) 83 | else: 84 | logger.error(f'{model_cfg.METHOD} is undefined!') 85 | exit() 86 | 87 | return model 88 | 89 | def _load_pretrained_model(self, ckpt_path): 90 | # ========= Load pretrained weights ========= # 91 | logger.info(f'Loading pretrained model from {ckpt_path}') 92 | ckpt = torch.load(ckpt_path)['state_dict'] 93 | load_pretrained_model( 94 | self.model, ckpt, overwrite_shape_mismatch=True, remove_lightning=True 95 | ) 96 | logger.info(f'Loaded pretrained weights from \"{ckpt_path}\"') 97 | -------------------------------------------------------------------------------- /lib/pare/pare/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .hmr import HMR 2 | from .pare import PARE 3 | from .head.smpl_head import SMPL 4 | -------------------------------------------------------------------------------- /lib/pare/pare/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/__pycache__/hmr.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/__pycache__/hmr.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/__pycache__/pare.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/__pycache__/pare.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # from .hrnet_pare import * 2 | from .resnet import * 3 | from .mobilenet import * 4 | -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/backbone/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/__pycache__/hrnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/backbone/__pycache__/hrnet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/__pycache__/mobilenet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/backbone/__pycache__/mobilenet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/backbone/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/backbone/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/hrnet_legacy.py: -------------------------------------------------------------------------------- 1 | import timm 2 | from torch import nn 3 | 4 | models = [ 5 | 'hrnet_w18_small', 6 | 'hrnet_w18_small_v2', 7 | 'hrnet_w18', 8 | 'hrnet_w30', 9 | 'hrnet_w32', 10 | 'hrnet_w40', 11 | 'hrnet_w44', 12 | 'hrnet_w48', 13 | 'hrnet_w64', 14 | ] 15 | 16 | 17 | class HRNet(nn.Module): 18 | def __init__(self, arch, pretrained=True): 19 | super(HRNet, self).__init__() 20 | self.m = timm.create_model(arch, pretrained=pretrained) 21 | 22 | def forward(self, x): 23 | return self.m.forward_features(x) 24 | 25 | 26 | def hrnet_w32(pretrained=True): 27 | return HRNet('hrnet_w32', pretrained) 28 | 29 | 30 | def hrnet_w48(pretrained=True): 31 | return HRNet('hrnet_w48', pretrained) 32 | 33 | 34 | def hrnet_w64(pretrained=True): 35 | return HRNet('hrnet_w64', pretrained) 36 | 37 | 38 | def dla34(pretrained=True): 39 | return HRNet('dla34', pretrained) 40 | -------------------------------------------------------------------------------- /lib/pare/pare/models/backbone/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | 18 | def get_backbone_info(backbone): 19 | info = { 20 | 'resnet18': { 21 | 'n_output_channels': 512, 22 | 'downsample_rate': 4 23 | }, 24 | 'resnet34': { 25 | 'n_output_channels': 512, 26 | 'downsample_rate': 4 27 | }, 28 | 'resnet50': { 29 | 'n_output_channels': 2048, 30 | 'downsample_rate': 4 31 | }, 32 | 'resnet50_adf_dropout': { 33 | 'n_output_channels': 2048, 34 | 'downsample_rate': 4 35 | }, 36 | 'resnet50_dropout': { 37 | 'n_output_channels': 2048, 38 | 'downsample_rate': 4 39 | }, 40 | 'resnet101': { 41 | 'n_output_channels': 2048, 42 | 'downsample_rate': 4 43 | }, 44 | 'resnet152': { 45 | 'n_output_channels': 2048, 46 | 'downsample_rate': 4 47 | }, 48 | 'resnext50_32x4d': { 49 | 'n_output_channels': 2048, 50 | 'downsample_rate': 4 51 | }, 52 | 'resnext101_32x8d': { 53 | 'n_output_channels': 2048, 54 | 'downsample_rate': 4 55 | }, 56 | 'wide_resnet50_2': { 57 | 'n_output_channels': 2048, 58 | 'downsample_rate': 4 59 | }, 60 | 'wide_resnet101_2': { 61 | 'n_output_channels': 2048, 62 | 'downsample_rate': 4 63 | }, 64 | 'mobilenet_v2': { 65 | 'n_output_channels': 1280, 66 | 'downsample_rate': 4 67 | }, 68 | 'hrnet_w32': { 69 | 'n_output_channels': 480, 70 | 'downsample_rate': 4 71 | }, 72 | 'hrnet_w48': { 73 | 'n_output_channels': 720, 74 | 'downsample_rate': 4 75 | }, 76 | # 'hrnet_w64': {'n_output_channels': 2048, 'downsample_rate': 4}, 77 | 'dla34': { 78 | 'n_output_channels': 512, 79 | 'downsample_rate': 4 80 | }, 81 | } 82 | return info[backbone] 83 | -------------------------------------------------------------------------------- /lib/pare/pare/models/head/__init__.py: -------------------------------------------------------------------------------- 1 | from .pare_head import PareHead 2 | from .hmr_head import HMRHead 3 | from .smpl_head import SMPLHead 4 | from .smpl_cam_head import SMPLCamHead 5 | -------------------------------------------------------------------------------- /lib/pare/pare/models/head/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/head/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/head/__pycache__/hmr_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/head/__pycache__/hmr_head.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/head/__pycache__/pare_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/head/__pycache__/pare_head.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/head/__pycache__/smpl_cam_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/head/__pycache__/smpl_cam_head.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/head/__pycache__/smpl_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/head/__pycache__/smpl_head.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/head/smpl_head.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | import numpy as np 19 | import torch.nn as nn 20 | 21 | from lib.smplx import SMPL as _SMPL 22 | from lib.smplx.utils import SMPLOutput 23 | from lib.smplx.lbs import vertices2joints 24 | 25 | from ...core import config, constants 26 | from ...utils.geometry import perspective_projection, convert_weak_perspective_to_perspective 27 | 28 | 29 | class SMPL(_SMPL): 30 | """ Extension of the official SMPL implementation to support more joints """ 31 | def __init__(self, *args, **kwargs): 32 | super(SMPL, self).__init__(*args, **kwargs) 33 | joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES] 34 | J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA) 35 | self.register_buffer( 36 | 'J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32) 37 | ) 38 | self.joint_map = torch.tensor(joints, dtype=torch.long) 39 | 40 | def forward(self, *args, **kwargs): 41 | kwargs['get_skin'] = True 42 | smpl_output = super(SMPL, self).forward(*args, **kwargs) 43 | extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices) 44 | joints = torch.cat([smpl_output.joints, extra_joints], dim=1) 45 | joints = joints[:, self.joint_map, :] 46 | output = SMPLOutput( 47 | vertices=smpl_output.vertices, 48 | global_orient=smpl_output.global_orient, 49 | body_pose=smpl_output.body_pose, 50 | joints=joints, 51 | betas=smpl_output.betas, 52 | full_pose=smpl_output.full_pose 53 | ) 54 | return output 55 | 56 | 57 | class SMPLHead(nn.Module): 58 | def __init__(self, focal_length=5000., img_res=224): 59 | super(SMPLHead, self).__init__() 60 | self.smpl = SMPL(config.SMPL_MODEL_DIR, create_transl=False) 61 | self.add_module('smpl', self.smpl) 62 | self.focal_length = focal_length 63 | self.img_res = img_res 64 | 65 | def forward(self, rotmat, shape, cam=None, normalize_joints2d=False): 66 | ''' 67 | :param rotmat: rotation in euler angles format (N,J,3,3) 68 | :param shape: smpl betas 69 | :param cam: weak perspective camera 70 | :param normalize_joints2d: bool, normalize joints between -1, 1 if true 71 | :return: dict with keys 'vertices', 'joints3d', 'joints2d' if cam is True 72 | ''' 73 | smpl_output = self.smpl( 74 | betas=shape, 75 | body_pose=rotmat[:, 1:].contiguous(), 76 | global_orient=rotmat[:, 0].unsqueeze(1).contiguous(), 77 | pose2rot=False, 78 | ) 79 | 80 | output = { 81 | 'smpl_vertices': smpl_output.vertices, 82 | 'smpl_joints3d': smpl_output.joints, 83 | } 84 | if cam is not None: 85 | joints3d = smpl_output.joints 86 | batch_size = joints3d.shape[0] 87 | device = joints3d.device 88 | cam_t = convert_weak_perspective_to_perspective( 89 | cam, 90 | focal_length=self.focal_length, 91 | img_res=self.img_res, 92 | ) 93 | joints2d = perspective_projection( 94 | joints3d, 95 | rotation=torch.eye(3, device=device).unsqueeze(0).expand(batch_size, -1, -1), 96 | translation=cam_t, 97 | focal_length=self.focal_length, 98 | camera_center=torch.zeros(batch_size, 2, device=device) 99 | ) 100 | if normalize_joints2d: 101 | # Normalize keypoints to [-1,1] 102 | joints2d = joints2d / (self.img_res / 2.) 103 | 104 | output['smpl_joints2d'] = joints2d 105 | output['pred_cam_t'] = cam_t 106 | 107 | return output 108 | -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from .locallyconnected2d import LocallyConnected2d 2 | from .interpolate import interpolate 3 | from .nonlocalattention import NonLocalAttention 4 | from .keypoint_attention import KeypointAttention 5 | -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__pycache__/coattention.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/__pycache__/coattention.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__pycache__/interpolate.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/__pycache__/interpolate.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__pycache__/keypoint_attention.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/__pycache__/keypoint_attention.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__pycache__/locallyconnected2d.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/__pycache__/locallyconnected2d.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__pycache__/nonlocalattention.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/__pycache__/nonlocalattention.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/__pycache__/softargmax.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/__pycache__/softargmax.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/attention.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | from torch import nn 19 | 20 | 21 | def init_weights(m): 22 | if type(m) == nn.Linear: 23 | torch.nn.init.uniform_(m.weight, -0.1, 0.1) 24 | m.bias.data.fill_(0.01) 25 | 26 | 27 | class SelfAttention(nn.Module): 28 | def __init__( 29 | self, attention_size, batch_first=False, layers=1, dropout=.0, non_linearity='tanh' 30 | ): 31 | super(SelfAttention, self).__init__() 32 | 33 | self.batch_first = batch_first 34 | 35 | if non_linearity == 'relu': 36 | activation = nn.ReLU() 37 | else: 38 | activation = nn.Tanh() 39 | 40 | modules = [] 41 | for i in range(layers - 1): 42 | modules.append(nn.Linear(attention_size, attention_size)) 43 | modules.append(activation) 44 | modules.append(nn.Dropout(dropout)) 45 | 46 | # last attention layer must output 1 47 | modules.append(nn.Linear(attention_size, 1)) 48 | modules.append(activation) 49 | modules.append(nn.Dropout(dropout)) 50 | 51 | self.attention = nn.Sequential(*modules) 52 | self.attention.apply(init_weights) 53 | self.softmax = nn.Softmax(dim=-1) 54 | 55 | def forward(self, inputs): 56 | 57 | ################################################################## 58 | # STEP 1 - perform dot product 59 | # of the attention vector and each hidden state 60 | ################################################################## 61 | 62 | # inputs is a 3D Tensor: batch, len, hidden_size 63 | # scores is a 2D Tensor: batch, len 64 | scores = self.attention(inputs).squeeze() 65 | scores = self.softmax(scores) 66 | 67 | ################################################################## 68 | # Step 2 - Weighted sum of hidden states, by the attention scores 69 | ################################################################## 70 | 71 | # multiply each hidden state with the attention weights 72 | weighted = torch.mul(inputs, scores.unsqueeze(-1).expand_as(inputs)) 73 | 74 | # sum the hidden states 75 | # representations = weighted.sum(1).squeeze() 76 | representations = weighted.sum(1).squeeze() 77 | return representations, scores 78 | -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/interpolate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | 19 | 20 | def interpolate(feat, uv): 21 | ''' 22 | 23 | :param feat: [B, C, H, W] image features 24 | :param uv: [B, 2, N] uv coordinates in the image plane, range [-1, 1] 25 | :return: [B, C, N] image features at the uv coordinates 26 | ''' 27 | if uv.shape[-1] != 2: 28 | uv = uv.transpose(1, 2) # [B, N, 2] 29 | uv = uv.unsqueeze(2) # [B, N, 1, 2] 30 | # NOTE: for newer PyTorch, it seems that training results are degraded due to implementation diff in F.grid_sample 31 | # for old versions, simply remove the aligned_corners argument. 32 | if int(torch.__version__.split('.')[1]) < 4: 33 | samples = torch.nn.functional.grid_sample(feat, uv) # [B, C, N, 1] 34 | else: 35 | samples = torch.nn.functional.grid_sample(feat, uv, align_corners=True) # [B, C, N, 1] 36 | return samples[:, :, :, 0] # [B, C, N] 37 | -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/keypoint_attention.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | import numpy as np 19 | import torch.nn as nn 20 | import torch.nn.functional as F 21 | 22 | 23 | class KeypointAttention(nn.Module): 24 | def __init__( 25 | self, 26 | use_conv=False, 27 | in_channels=(256, 64), 28 | out_channels=(256, 64), 29 | act='softmax', 30 | use_scale=False 31 | ): 32 | super(KeypointAttention, self).__init__() 33 | self.use_conv = use_conv 34 | self.in_channels = in_channels 35 | self.out_channels = out_channels 36 | self.act = act 37 | self.use_scale = use_scale 38 | if use_conv: 39 | self.conv1x1_pose = nn.Conv1d(in_channels[0], out_channels[0], kernel_size=1) 40 | self.conv1x1_shape_cam = nn.Conv1d(in_channels[1], out_channels[1], kernel_size=1) 41 | 42 | def forward(self, features, heatmaps): 43 | batch_size, num_joints, height, width = heatmaps.shape 44 | 45 | if self.use_scale: 46 | scale = 1.0 / np.sqrt(height * width) 47 | heatmaps = heatmaps * scale 48 | 49 | if self.act == 'softmax': 50 | normalized_heatmap = F.softmax(heatmaps.reshape(batch_size, num_joints, -1), dim=-1) 51 | elif self.act == 'sigmoid': 52 | normalized_heatmap = torch.sigmoid(heatmaps.reshape(batch_size, num_joints, -1)) 53 | features = features.reshape(batch_size, -1, height * width) 54 | 55 | attended_features = torch.matmul(normalized_heatmap, features.transpose(2, 1)) 56 | attended_features = attended_features.transpose(2, 1) 57 | 58 | if self.use_conv: 59 | if attended_features.shape[1] == self.in_channels[0]: 60 | attended_features = self.conv1x1_pose(attended_features) 61 | else: 62 | attended_features = self.conv1x1_shape_cam(attended_features) 63 | 64 | return attended_features 65 | -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/locallyconnected2d.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | import torch.nn as nn 19 | from torch.nn.modules.utils import _pair 20 | 21 | 22 | class LocallyConnected2d(nn.Module): 23 | def __init__(self, in_channels, out_channels, output_size, kernel_size, stride, bias=False): 24 | super(LocallyConnected2d, self).__init__() 25 | output_size = _pair(output_size) 26 | self.weight = nn.Parameter( 27 | torch.randn( 28 | 1, out_channels, in_channels, output_size[0], output_size[1], kernel_size**2 29 | ), 30 | requires_grad=True, 31 | ) 32 | if bias: 33 | self.bias = nn.Parameter( 34 | torch.randn(1, out_channels, output_size[0], output_size[1]), requires_grad=True 35 | ) 36 | else: 37 | self.register_parameter('bias', None) 38 | self.kernel_size = _pair(kernel_size) 39 | self.stride = _pair(stride) 40 | 41 | def forward(self, x): 42 | _, c, h, w = x.size() 43 | kh, kw = self.kernel_size 44 | dh, dw = self.stride 45 | x = x.unfold(2, kh, dh).unfold(3, kw, dw) 46 | x = x.contiguous().view(*x.size()[:-2], -1) 47 | # Sum in in_channel and kernel_size dims 48 | out = (x.unsqueeze(1) * self.weight).sum([2, -1]) 49 | if self.bias is not None: 50 | out += self.bias 51 | return out 52 | -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/non_local/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/non_local/__init__.py -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/non_local/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/non_local/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/non_local/__pycache__/dot_product.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/models/layers/non_local/__pycache__/dot_product.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/models/layers/nonlocalattention.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import torch 18 | import torch.nn as nn 19 | import torch.nn.functional as F 20 | 21 | 22 | class NonLocalAttention(nn.Module): 23 | def __init__( 24 | self, 25 | in_channels=256, 26 | out_channels=256, 27 | ): 28 | super(NonLocalAttention, self).__init__() 29 | self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1) 30 | 31 | def forward(self, input): 32 | ''' 33 | input [N, Feats, J, 1] 34 | output [N, Feats, J, 1] 35 | ''' 36 | batch_size, n_feats, n_joints, _ = input.shape 37 | input = input.squeeze(-1) 38 | 39 | # Compute attention weights 40 | attention = torch.matmul(input.transpose(2, 1), input) 41 | norm_attention = F.softmax(attention, dim=-1) 42 | 43 | # Compute final dot product 44 | out = torch.matmul(input, norm_attention) 45 | out = self.conv1x1(out) 46 | 47 | out = out.unsqueeze(-1) # [N, F, J, 1] 48 | return out 49 | 50 | 51 | if __name__ == '__main__': 52 | nla = NonLocalAttention() 53 | 54 | inp = torch.rand(32, 256, 24, 1) 55 | 56 | out = nla(inp) 57 | print(out.shape) 58 | -------------------------------------------------------------------------------- /lib/pare/pare/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/utils/__init__.py -------------------------------------------------------------------------------- /lib/pare/pare/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/utils/__pycache__/geometry.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/utils/__pycache__/geometry.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/utils/__pycache__/kp_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/utils/__pycache__/kp_utils.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pare/pare/utils/__pycache__/train_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pare/pare/utils/__pycache__/train_utils.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/__init__.py -------------------------------------------------------------------------------- /lib/pixielib/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/__pycache__/pixie.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/__pycache__/pixie.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/FLAME.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # Using this computer program means that you agree to the terms 6 | # in the LICENSE file included with this software distribution. 7 | # Any use not explicitly granted by the LICENSE is prohibited. 8 | # 9 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 10 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 11 | # for Intelligent Systems. All rights reserved. 12 | # 13 | # For comments or questions, please email us at pixie@tue.mpg.de 14 | # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de 15 | 16 | import torch 17 | import torch.nn as nn 18 | import numpy as np 19 | import pickle 20 | import torch.nn.functional as F 21 | 22 | 23 | class FLAMETex(nn.Module): 24 | """ 25 | FLAME texture: 26 | https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64 27 | FLAME texture converted from BFM: 28 | https://github.com/TimoBolkart/BFM_to_FLAME 29 | """ 30 | def __init__(self, config): 31 | super(FLAMETex, self).__init__() 32 | if config.tex_type == 'BFM': 33 | mu_key = 'MU' 34 | pc_key = 'PC' 35 | n_pc = 199 36 | tex_path = config.tex_path 37 | tex_space = np.load(tex_path) 38 | texture_mean = tex_space[mu_key].reshape(1, -1) 39 | texture_basis = tex_space[pc_key].reshape(-1, n_pc) 40 | 41 | elif config.tex_type == 'FLAME': 42 | mu_key = 'mean' 43 | pc_key = 'tex_dir' 44 | n_pc = 200 45 | tex_path = config.flame_tex_path 46 | tex_space = np.load(tex_path) 47 | texture_mean = tex_space[mu_key].reshape(1, -1) / 255. 48 | texture_basis = tex_space[pc_key].reshape(-1, n_pc) / 255. 49 | else: 50 | print('texture type ', config.tex_type, 'not exist!') 51 | raise NotImplementedError 52 | 53 | n_tex = config.n_tex 54 | num_components = texture_basis.shape[1] 55 | texture_mean = torch.from_numpy(texture_mean).float()[None, ...] 56 | texture_basis = torch.from_numpy(texture_basis[:, :n_tex]).float()[None, ...] 57 | self.register_buffer('texture_mean', texture_mean) 58 | self.register_buffer('texture_basis', texture_basis) 59 | 60 | def forward(self, texcode=None): 61 | ''' 62 | texcode: [batchsize, n_tex] 63 | texture: [bz, 3, 256, 256], range: 0-1 64 | ''' 65 | texture = self.texture_mean + \ 66 | (self.texture_basis*texcode[:, None, :]).sum(-1) 67 | texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0, 3, 1, 2) 68 | texture = F.interpolate(texture, [256, 256]) 69 | texture = texture[:, [2, 1, 0], :, :] 70 | return texture 71 | 72 | 73 | def texture_flame2smplx(cached_data, flame_texture, smplx_texture): 74 | ''' Convert flame texture map (face-only) into smplx texture map (includes body texture) 75 | TODO: pytorch version ==> grid sample 76 | ''' 77 | if smplx_texture.shape[0] != smplx_texture.shape[1]: 78 | print('SMPL-X texture not squared (%d != %d)' % (smplx_texture[0], smplx_texture[1])) 79 | return 80 | if smplx_texture.shape[0] != cached_data['target_resolution']: 81 | print( 82 | 'SMPL-X texture size does not match cached image resolution (%d != %d)' % 83 | (smplx_texture.shape[0], cached_data['target_resolution']) 84 | ) 85 | return 86 | x_coords = cached_data['x_coords'] 87 | y_coords = cached_data['y_coords'] 88 | target_pixel_ids = cached_data['target_pixel_ids'] 89 | source_uv_points = cached_data['source_uv_points'] 90 | 91 | source_tex_coords = np.zeros_like((source_uv_points)).astype(int) 92 | source_tex_coords[:, 0] = np.clip( 93 | flame_texture.shape[0] * (1.0 - source_uv_points[:, 1]), 0.0, flame_texture.shape[0] 94 | ).astype(int) 95 | source_tex_coords[:, 1] = np.clip( 96 | flame_texture.shape[1] * (source_uv_points[:, 0]), 0.0, flame_texture.shape[1] 97 | ).astype(int) 98 | 99 | smplx_texture[y_coords[target_pixel_ids].astype(int), 100 | x_coords[target_pixel_ids].astype(int), :] = flame_texture[source_tex_coords[:, 101 | 0], 102 | source_tex_coords[:, 103 | 1]] 104 | 105 | return smplx_texture 106 | -------------------------------------------------------------------------------- /lib/pixielib/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__init__.py -------------------------------------------------------------------------------- /lib/pixielib/models/__pycache__/SMPLX.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__pycache__/SMPLX.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/__pycache__/encoders.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__pycache__/encoders.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/__pycache__/hrnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__pycache__/hrnet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/__pycache__/lbs.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__pycache__/lbs.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/__pycache__/moderators.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__pycache__/moderators.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/models/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/models/encoders.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | class ResnetEncoder(nn.Module): 8 | def __init__(self, append_layers=None): 9 | super(ResnetEncoder, self).__init__() 10 | from . import resnet 11 | # feature_size = 2048 12 | self.feature_dim = 2048 13 | self.encoder = resnet.load_ResNet50Model() # out: 2048 14 | # regressor 15 | self.append_layers = append_layers 16 | # for normalize input images 17 | MEAN = [0.485, 0.456, 0.406] 18 | STD = [0.229, 0.224, 0.225] 19 | self.register_buffer('MEAN', torch.tensor(MEAN)[None, :, None, None]) 20 | self.register_buffer('STD', torch.tensor(STD)[None, :, None, None]) 21 | 22 | def forward(self, inputs): 23 | ''' inputs: [bz, 3, h, w], range: [0,1] 24 | ''' 25 | inputs = (inputs - self.MEAN) / self.STD 26 | features = self.encoder(inputs) 27 | if self.append_layers: 28 | features = self.last_op(features) 29 | return features 30 | 31 | 32 | class MLP(nn.Module): 33 | def __init__(self, channels=[2048, 1024, 1], last_op=None): 34 | super(MLP, self).__init__() 35 | layers = [] 36 | 37 | for l in range(0, len(channels) - 1): 38 | layers.append(nn.Linear(channels[l], channels[l + 1])) 39 | if l < len(channels) - 2: 40 | layers.append(nn.ReLU()) 41 | if last_op: 42 | layers.append(last_op) 43 | 44 | self.layers = nn.Sequential(*layers) 45 | 46 | def forward(self, inputs): 47 | outs = self.layers(inputs) 48 | return outs 49 | 50 | 51 | class HRNEncoder(nn.Module): 52 | def __init__(self, append_layers=None): 53 | super(HRNEncoder, self).__init__() 54 | from . import hrnet 55 | self.feature_dim = 2048 56 | self.encoder = hrnet.load_HRNet(pretrained=True) # out: 2048 57 | # regressor 58 | self.append_layers = append_layers 59 | # for normalize input images 60 | MEAN = [0.485, 0.456, 0.406] 61 | STD = [0.229, 0.224, 0.225] 62 | self.register_buffer('MEAN', torch.tensor(MEAN)[None, :, None, None]) 63 | self.register_buffer('STD', torch.tensor(STD)[None, :, None, None]) 64 | 65 | def forward(self, inputs): 66 | ''' inputs: [bz, 3, h, w], range: [0,1] 67 | ''' 68 | inputs = (inputs - self.MEAN) / self.STD 69 | features = self.encoder(inputs)['concat'] 70 | if self.append_layers: 71 | features = self.last_op(features) 72 | return features 73 | -------------------------------------------------------------------------------- /lib/pixielib/models/moderators.py: -------------------------------------------------------------------------------- 1 | ''' Moderator 2 | # Input feature: body, part(head, hand) 3 | # output: fused feature, weight 4 | ''' 5 | import numpy as np 6 | import torch.nn as nn 7 | import torch 8 | import torch.nn.functional as F 9 | 10 | # MLP + temperature softmax 11 | # w = SoftMax(w^\prime * temperature) 12 | 13 | 14 | class TempSoftmaxFusion(nn.Module): 15 | def __init__(self, channels=[2048 * 2, 1024, 1], detach_inputs=False, detach_feature=False): 16 | super(TempSoftmaxFusion, self).__init__() 17 | self.detach_inputs = detach_inputs 18 | self.detach_feature = detach_feature 19 | # weight 20 | layers = [] 21 | for l in range(0, len(channels) - 1): 22 | layers.append(nn.Linear(channels[l], channels[l + 1])) 23 | if l < len(channels) - 2: 24 | layers.append(nn.ReLU()) 25 | self.layers = nn.Sequential(*layers) 26 | # temperature 27 | self.register_parameter('temperature', nn.Parameter(torch.ones(1))) 28 | 29 | def forward(self, x, y, work=True): 30 | ''' 31 | x: feature from body 32 | y: feature from part(head/hand) 33 | work: whether to fuse features 34 | ''' 35 | if work: 36 | # 1. cat input feature, predict the weights 37 | f_in = torch.cat([x, y], dim=1) 38 | if self.detach_inputs: 39 | f_in = f_in.detach() 40 | f_temp = self.layers(f_in) 41 | f_weight = F.softmax(f_temp * self.temperature, dim=1) 42 | 43 | # 2. feature fusion 44 | if self.detach_feature: 45 | x = x.detach() 46 | y = y.detach() 47 | f_out = f_weight[:, [0]] * x + f_weight[:, [1]] * y 48 | x_out = f_out 49 | y_out = f_out 50 | else: 51 | x_out = x 52 | y_out = y 53 | f_weight = None 54 | return x_out, y_out, f_weight 55 | 56 | 57 | # MLP + Gumbel-Softmax trick 58 | # w = w^{\prime} - w^{\prime}\text{.detach()} + w^{\prime}\text{.gt(0.5)} 59 | 60 | 61 | class GumbelSoftmaxFusion(nn.Module): 62 | def __init__(self, channels=[2048 * 2, 1024, 1], detach_inputs=False, detach_feature=False): 63 | super(GumbelSoftmaxFusion, self).__init__() 64 | self.detach_inputs = detach_inputs 65 | self.detach_feature = detach_feature 66 | 67 | # weight 68 | layers = [] 69 | for l in range(0, len(channels) - 1): 70 | layers.append(nn.Linear(channels[l], channels[l + 1])) 71 | if l < len(channels) - 2: 72 | layers.append(nn.ReLU()) 73 | layers.append(nn.Softmax()) 74 | self.layers = nn.Sequential(*layers) 75 | 76 | def forward(self, x, y, work=True): 77 | ''' 78 | x: feature from body 79 | y: feature from part(head/hand) 80 | work: whether to fuse features 81 | ''' 82 | if work: 83 | # 1. cat input feature, predict the weights 84 | f_in = torch.cat([x, y], dim=-1) 85 | if self.detach_inputs: 86 | f_in = f_in.detach() 87 | f_weight = self.layers(f_in) 88 | # weight to be hard 89 | f_weight = f_weight - f_weight.detach() + f_weight.gt(0.5) 90 | 91 | # 2. feature fusion 92 | if self.detach_feature: 93 | x = x.detach() 94 | y = y.detach() 95 | f_out = f_weight[:, [0]] * x + f_weight[:, [1]] * y 96 | x_out = f_out 97 | y_out = f_out 98 | else: 99 | x_out = x 100 | y_out = y 101 | f_weight = None 102 | return x_out, y_out, f_weight 103 | -------------------------------------------------------------------------------- /lib/pixielib/utils/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/utils/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/utils/__pycache__/rotation_converter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/utils/__pycache__/rotation_converter.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/utils/__pycache__/tensor_cropper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/utils/__pycache__/tensor_cropper.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/utils/__pycache__/util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pixielib/utils/__pycache__/util.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pixielib/utils/array_cropper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | crop 3 | for numpy array 4 | Given image, bbox(center, bboxsize) 5 | return: cropped image, tform(used for transform the keypoint accordingly) 6 | 7 | only support crop to squared images 8 | ''' 9 | 10 | import numpy as np 11 | from skimage.transform import estimate_transform, warp, resize, rescale 12 | 13 | 14 | def points2bbox(points, points_scale=None): 15 | # recover range 16 | if points_scale: 17 | points[:, 0] = points[:, 0] * points_scale[1] / 2 + points_scale[1] / 2 18 | points[:, 1] = points[:, 1] * points_scale[0] / 2 + points_scale[0] / 2 19 | 20 | left = np.min(points[:, 0]) 21 | right = np.max(points[:, 0]) 22 | top = np.min(points[:, 1]) 23 | bottom = np.max(points[:, 1]) 24 | size = max(right - left, bottom - top) 25 | # + old_size*0.1]) 26 | center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0]) 27 | return center, size 28 | # translate center 29 | 30 | 31 | def augment_bbox(center, bbox_size, scale=[1.0, 1.0], trans_scale=0.): 32 | trans_scale = (np.random.rand(2) * 2 - 1) * trans_scale 33 | center = center + trans_scale * bbox_size # 0.5 34 | scale = np.random.rand() * (scale[1] - scale[0]) + scale[0] 35 | size = int(bbox_size * scale) 36 | return center, size 37 | 38 | 39 | def crop_array(image, center, bboxsize, crop_size): 40 | ''' for single image only 41 | Args: 42 | image (numpy.Array): the reference array of shape HxWXC. 43 | size (Tuple[int, int]): a tuple with the height and width that will be 44 | used to resize the extracted patches. 45 | Returns: 46 | cropped_image 47 | tform: 3x3 affine matrix 48 | ''' 49 | # points: top-left, top-right, bottom-right 50 | src_pts = np.array( 51 | [ 52 | [center[0] - bboxsize / 2, center[1] - bboxsize / 2], 53 | [center[0] + bboxsize / 2, center[1] - bboxsize / 2], 54 | [center[0] + bboxsize / 2, center[1] + bboxsize / 2] 55 | ] 56 | ) 57 | DST_PTS = np.array([[0, 0], [crop_size - 1, 0], [crop_size - 1, crop_size - 1]]) 58 | 59 | # estimate transformation between points 60 | tform = estimate_transform('similarity', src_pts, DST_PTS) 61 | 62 | # warp images 63 | cropped_image = warp(image, tform.inverse, output_shape=(crop_size, crop_size)) 64 | 65 | return cropped_image, tform.params.T 66 | 67 | 68 | class Cropper(object): 69 | def __init__(self, crop_size, scale=[1, 1], trans_scale=0.): 70 | self.crop_size = crop_size 71 | self.scale = scale 72 | self.trans_scale = trans_scale 73 | 74 | def crop(self, image, points, points_scale=None): 75 | # points to bbox 76 | center, bbox_size = points2bbox(points, points_scale) 77 | # argument bbox. 78 | center, bbox_size = augment_bbox( 79 | center, bbox_size, scale=self.scale, trans_scale=self.trans_scale 80 | ) 81 | # crop 82 | cropped_image, tform = crop_array(image, center, bbox_size, self.crop_size) 83 | return cropped_image, tform 84 | -------------------------------------------------------------------------------- /lib/pymaf/configs/pymaf_config.yaml: -------------------------------------------------------------------------------- 1 | SOLVER: 2 | MAX_ITER: 500000 3 | TYPE: Adam 4 | BASE_LR: 0.00005 5 | GAMMA: 0.1 6 | STEPS: [0] 7 | EPOCHS: [0] 8 | DEBUG: False 9 | LOGDIR: '' 10 | DEVICE: cuda 11 | NUM_WORKERS: 8 12 | SEED_VALUE: -1 13 | LOSS: 14 | KP_2D_W: 300.0 15 | KP_3D_W: 300.0 16 | SHAPE_W: 0.06 17 | POSE_W: 60.0 18 | VERT_W: 0.0 19 | INDEX_WEIGHTS: 2.0 20 | # Loss weights for surface parts. (24 Parts) 21 | PART_WEIGHTS: 0.3 22 | # Loss weights for UV regression. 23 | POINT_REGRESSION_WEIGHTS: 0.5 24 | TRAIN: 25 | NUM_WORKERS: 8 26 | BATCH_SIZE: 64 27 | PIN_MEMORY: True 28 | TEST: 29 | BATCH_SIZE: 32 30 | MODEL: 31 | PyMAF: 32 | BACKBONE: 'res50' 33 | MLP_DIM: [256, 128, 64, 5] 34 | N_ITER: 3 35 | AUX_SUPV_ON: True 36 | DP_HEATMAP_SIZE: 56 37 | RES_MODEL: 38 | DECONV_WITH_BIAS: False 39 | NUM_DECONV_LAYERS: 3 40 | NUM_DECONV_FILTERS: 41 | - 256 42 | - 256 43 | - 256 44 | NUM_DECONV_KERNELS: 45 | - 4 46 | - 4 47 | - 4 48 | -------------------------------------------------------------------------------- /lib/pymaf/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/core/__init__.py -------------------------------------------------------------------------------- /lib/pymaf/core/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/core/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/core/__pycache__/cfgs.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/core/__pycache__/cfgs.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/core/__pycache__/constants.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/core/__pycache__/constants.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/core/__pycache__/path_config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/core/__pycache__/path_config.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/core/base_trainer.py: -------------------------------------------------------------------------------- 1 | # This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/utils/base_trainer.py 2 | from __future__ import division 3 | import logging 4 | from utils import CheckpointSaver 5 | from tensorboardX import SummaryWriter 6 | 7 | import torch 8 | from tqdm import tqdm 9 | 10 | tqdm.monitor_interval = 0 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class BaseTrainer(object): 16 | """Base class for Trainer objects. 17 | Takes care of checkpointing/logging/resuming training. 18 | """ 19 | def __init__(self, options): 20 | self.options = options 21 | if options.multiprocessing_distributed: 22 | self.device = torch.device('cuda', options.gpu) 23 | else: 24 | self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 25 | # override this function to define your model, optimizers etc. 26 | self.saver = CheckpointSaver(save_dir=options.checkpoint_dir, overwrite=options.overwrite) 27 | if options.rank == 0: 28 | self.summary_writer = SummaryWriter(self.options.summary_dir) 29 | self.init_fn() 30 | 31 | self.checkpoint = None 32 | if options.resume and self.saver.exists_checkpoint(): 33 | self.checkpoint = self.saver.load_checkpoint(self.models_dict, self.optimizers_dict) 34 | 35 | if self.checkpoint is None: 36 | self.epoch_count = 0 37 | self.step_count = 0 38 | else: 39 | self.epoch_count = self.checkpoint['epoch'] 40 | self.step_count = self.checkpoint['total_step_count'] 41 | 42 | if self.checkpoint is not None: 43 | self.checkpoint_batch_idx = self.checkpoint['batch_idx'] 44 | else: 45 | self.checkpoint_batch_idx = 0 46 | 47 | self.best_performance = float('inf') 48 | 49 | def load_pretrained(self, checkpoint_file=None): 50 | """Load a pretrained checkpoint. 51 | This is different from resuming training using --resume. 52 | """ 53 | if checkpoint_file is not None: 54 | checkpoint = torch.load(checkpoint_file) 55 | for model in self.models_dict: 56 | if model in checkpoint: 57 | self.models_dict[model].load_state_dict(checkpoint[model], strict=True) 58 | print(f'Checkpoint {model} loaded') 59 | 60 | def move_dict_to_device(self, dict, device, tensor2float=False): 61 | for k, v in dict.items(): 62 | if isinstance(v, torch.Tensor): 63 | if tensor2float: 64 | dict[k] = v.float().to(device) 65 | else: 66 | dict[k] = v.to(device) 67 | 68 | # The following methods (with the possible exception of test) have to be implemented in the derived classes 69 | def train(self, epoch): 70 | raise NotImplementedError('You need to provide an train method') 71 | 72 | def init_fn(self): 73 | raise NotImplementedError('You need to provide an _init_fn method') 74 | 75 | def train_step(self, input_batch): 76 | raise NotImplementedError('You need to provide a _train_step method') 77 | 78 | def train_summaries(self, input_batch): 79 | raise NotImplementedError('You need to provide a _train_summaries method') 80 | 81 | def visualize(self, input_batch): 82 | raise NotImplementedError('You need to provide a visualize method') 83 | 84 | def validate(self): 85 | pass 86 | 87 | def test(self): 88 | pass 89 | 90 | def evaluate(self): 91 | pass 92 | 93 | def fit(self): 94 | # Run training for num_epochs epochs 95 | for epoch in tqdm( 96 | range(self.epoch_count, self.options.num_epochs), 97 | total=self.options.num_epochs, 98 | initial=self.epoch_count 99 | ): 100 | self.epoch_count = epoch 101 | self.train(epoch) 102 | return 103 | -------------------------------------------------------------------------------- /lib/pymaf/core/cfgs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import os 18 | import json 19 | from yacs.config import CfgNode as CN 20 | 21 | # Configuration variables 22 | cfg = CN(new_allowed=True) 23 | 24 | cfg.OUTPUT_DIR = 'results' 25 | cfg.DEVICE = 'cuda' 26 | cfg.DEBUG = False 27 | cfg.LOGDIR = '' 28 | cfg.VAL_VIS_BATCH_FREQ = 200 29 | cfg.TRAIN_VIS_ITER_FERQ = 1000 30 | cfg.SEED_VALUE = -1 31 | 32 | cfg.TRAIN = CN(new_allowed=True) 33 | 34 | cfg.LOSS = CN(new_allowed=True) 35 | cfg.LOSS.KP_2D_W = 300.0 36 | cfg.LOSS.KP_3D_W = 300.0 37 | cfg.LOSS.SHAPE_W = 0.06 38 | cfg.LOSS.POSE_W = 60.0 39 | cfg.LOSS.VERT_W = 0.0 40 | 41 | # Loss weights for dense correspondences 42 | cfg.LOSS.INDEX_WEIGHTS = 2.0 43 | # Loss weights for surface parts. (24 Parts) 44 | cfg.LOSS.PART_WEIGHTS = 0.3 45 | # Loss weights for UV regression. 46 | cfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5 47 | 48 | cfg.MODEL = CN(new_allowed=True) 49 | 50 | cfg.MODEL.PyMAF = CN(new_allowed=True) 51 | 52 | # switch 53 | cfg.TRAIN.VAL_LOOP = True 54 | 55 | cfg.TEST = CN(new_allowed=True) 56 | 57 | 58 | def get_cfg_defaults(): 59 | """Get a yacs CfgNode object with default values for my_project.""" 60 | # Return a clone so that the defaults will not be altered 61 | # This is for the "local variable" use pattern 62 | # return cfg.clone() 63 | return cfg 64 | 65 | 66 | def update_cfg(cfg_file): 67 | # cfg = get_cfg_defaults() 68 | cfg.merge_from_file(cfg_file) 69 | # return cfg.clone() 70 | return cfg 71 | 72 | 73 | def parse_args(args): 74 | cfg_file = args.cfg_file 75 | if args.cfg_file is not None: 76 | cfg = update_cfg(args.cfg_file) 77 | else: 78 | cfg = get_cfg_defaults() 79 | 80 | # if args.misc is not None: 81 | # cfg.merge_from_list(args.misc) 82 | 83 | return cfg 84 | 85 | 86 | def parse_args_extend(args): 87 | if args.resume: 88 | if not os.path.exists(args.log_dir): 89 | raise ValueError('Experiment are set to resume mode, but log directory does not exist.') 90 | 91 | # load log's cfg 92 | cfg_file = os.path.join(args.log_dir, 'cfg.yaml') 93 | cfg = update_cfg(cfg_file) 94 | 95 | if args.misc is not None: 96 | cfg.merge_from_list(args.misc) 97 | else: 98 | parse_args(args) 99 | -------------------------------------------------------------------------------- /lib/pymaf/core/constants.py: -------------------------------------------------------------------------------- 1 | # This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/constants.py 2 | FOCAL_LENGTH = 5000. 3 | IMG_RES = 224 4 | 5 | # Mean and standard deviation for normalizing input image 6 | IMG_NORM_MEAN = [0.485, 0.456, 0.406] 7 | IMG_NORM_STD = [0.229, 0.224, 0.225] 8 | """ 9 | We create a superset of joints containing the OpenPose joints together with the ones that each dataset provides. 10 | We keep a superset of 24 joints such that we include all joints from every dataset. 11 | If a dataset doesn't provide annotations for a specific joint, we simply ignore it. 12 | The joints used here are the following: 13 | """ 14 | JOINT_NAMES = [ 15 | # 25 OpenPose joints (in the order provided by OpenPose) 16 | 'OP Nose', 17 | 'OP Neck', 18 | 'OP RShoulder', 19 | 'OP RElbow', 20 | 'OP RWrist', 21 | 'OP LShoulder', 22 | 'OP LElbow', 23 | 'OP LWrist', 24 | 'OP MidHip', 25 | 'OP RHip', 26 | 'OP RKnee', 27 | 'OP RAnkle', 28 | 'OP LHip', 29 | 'OP LKnee', 30 | 'OP LAnkle', 31 | 'OP REye', 32 | 'OP LEye', 33 | 'OP REar', 34 | 'OP LEar', 35 | 'OP LBigToe', 36 | 'OP LSmallToe', 37 | 'OP LHeel', 38 | 'OP RBigToe', 39 | 'OP RSmallToe', 40 | 'OP RHeel', 41 | # 24 Ground Truth joints (superset of joints from different datasets) 42 | 'Right Ankle', 43 | 'Right Knee', 44 | 'Right Hip', # 2 45 | 'Left Hip', 46 | 'Left Knee', # 4 47 | 'Left Ankle', 48 | 'Right Wrist', # 6 49 | 'Right Elbow', 50 | 'Right Shoulder', # 8 51 | 'Left Shoulder', 52 | 'Left Elbow', # 10 53 | 'Left Wrist', 54 | 'Neck (LSP)', # 12 55 | 'Top of Head (LSP)', 56 | 'Pelvis (MPII)', # 14 57 | 'Thorax (MPII)', 58 | 'Spine (H36M)', # 16 59 | 'Jaw (H36M)', 60 | 'Head (H36M)', # 18 61 | 'Nose', 62 | 'Left Eye', 63 | 'Right Eye', 64 | 'Left Ear', 65 | 'Right Ear' 66 | ] 67 | 68 | # Dict containing the joints in numerical order 69 | JOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))} 70 | 71 | # Map joints to SMPL joints 72 | JOINT_MAP = { 73 | 'OP Nose': 24, 74 | 'OP Neck': 12, 75 | 'OP RShoulder': 17, 76 | 'OP RElbow': 19, 77 | 'OP RWrist': 21, 78 | 'OP LShoulder': 16, 79 | 'OP LElbow': 18, 80 | 'OP LWrist': 20, 81 | 'OP MidHip': 0, 82 | 'OP RHip': 2, 83 | 'OP RKnee': 5, 84 | 'OP RAnkle': 8, 85 | 'OP LHip': 1, 86 | 'OP LKnee': 4, 87 | 'OP LAnkle': 7, 88 | 'OP REye': 25, 89 | 'OP LEye': 26, 90 | 'OP REar': 27, 91 | 'OP LEar': 28, 92 | 'OP LBigToe': 29, 93 | 'OP LSmallToe': 30, 94 | 'OP LHeel': 31, 95 | 'OP RBigToe': 32, 96 | 'OP RSmallToe': 33, 97 | 'OP RHeel': 34, 98 | 'Right Ankle': 8, 99 | 'Right Knee': 5, 100 | 'Right Hip': 45, 101 | 'Left Hip': 46, 102 | 'Left Knee': 4, 103 | 'Left Ankle': 7, 104 | 'Right Wrist': 21, 105 | 'Right Elbow': 19, 106 | 'Right Shoulder': 17, 107 | 'Left Shoulder': 16, 108 | 'Left Elbow': 18, 109 | 'Left Wrist': 20, 110 | 'Neck (LSP)': 47, 111 | 'Top of Head (LSP)': 48, 112 | 'Pelvis (MPII)': 49, 113 | 'Thorax (MPII)': 50, 114 | 'Spine (H36M)': 51, 115 | 'Jaw (H36M)': 52, 116 | 'Head (H36M)': 53, 117 | 'Nose': 24, 118 | 'Left Eye': 26, 119 | 'Right Eye': 25, 120 | 'Left Ear': 28, 121 | 'Right Ear': 27 122 | } 123 | 124 | # Joint selectors 125 | # Indices to get the 14 LSP joints from the 17 H36M joints 126 | H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9] 127 | H36M_TO_J14 = H36M_TO_J17[:14] 128 | # Indices to get the 14 LSP joints from the ground truth joints 129 | J24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17] 130 | J24_TO_J14 = J24_TO_J17[:14] 131 | J24_TO_J19 = J24_TO_J17[:14] + [19, 20, 21, 22, 23] 132 | J24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0] 133 | 134 | # Permutation of SMPL pose parameters when flipping the shape 135 | SMPL_JOINTS_FLIP_PERM = [ 136 | 0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22 137 | ] 138 | SMPL_POSE_FLIP_PERM = [] 139 | for i in SMPL_JOINTS_FLIP_PERM: 140 | SMPL_POSE_FLIP_PERM.append(3 * i) 141 | SMPL_POSE_FLIP_PERM.append(3 * i + 1) 142 | SMPL_POSE_FLIP_PERM.append(3 * i + 2) 143 | # Permutation indices for the 24 ground truth joints 144 | J24_FLIP_PERM = [ 145 | 5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22 146 | ] 147 | # Permutation indices for the full set of 49 joints 148 | J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\ 149 | + [25+i for i in J24_FLIP_PERM] 150 | SMPL_J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\ 151 | + [25+i for i in SMPL_JOINTS_FLIP_PERM] 152 | -------------------------------------------------------------------------------- /lib/pymaf/core/path_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/path_config.py 3 | path configuration 4 | This file contains definitions of useful data stuctures and the paths 5 | for the datasets and data files necessary to run the code. 6 | Things you need to change: *_ROOT that indicate the path to each dataset 7 | """ 8 | import os 9 | 10 | # pymaf 11 | pymaf_data_dir = os.path.join(os.path.dirname(__file__), "../../../data/HPS/pymaf_data") 12 | 13 | SMPL_MEAN_PARAMS = os.path.join(pymaf_data_dir, "smpl_mean_params.npz") 14 | SMPL_MODEL_DIR = os.path.join(pymaf_data_dir, "../../smpl_related/models/smpl") 15 | MESH_DOWNSAMPLEING = os.path.join(pymaf_data_dir, "mesh_downsampling.npz") 16 | 17 | CUBE_PARTS_FILE = os.path.join(pymaf_data_dir, "cube_parts.npy") 18 | JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(pymaf_data_dir, "J_regressor_extra.npy") 19 | JOINT_REGRESSOR_H36M = os.path.join(pymaf_data_dir, "J_regressor_h36m.npy") 20 | VERTEX_TEXTURE_FILE = os.path.join(pymaf_data_dir, "vertex_texture.npy") 21 | SMPL_MEAN_PARAMS = os.path.join(pymaf_data_dir, "smpl_mean_params.npz") 22 | CHECKPOINT_FILE = os.path.join(pymaf_data_dir, "pretrained_model/PyMAF_model_checkpoint.pt") 23 | 24 | # pare 25 | pare_data_dir = os.path.join(os.path.dirname(__file__), "../../../data/HPS/pare_data") 26 | CFG = os.path.join(pare_data_dir, "pare/checkpoints/pare_w_3dpw_config.yaml") 27 | CKPT = os.path.join(pare_data_dir, "pare/checkpoints/pare_w_3dpw_checkpoint.ckpt") 28 | 29 | # hybrik 30 | hybrik_data_dir = os.path.join(os.path.dirname(__file__), "../../../data/HPS/hybrik_data") 31 | HYBRIK_CFG = os.path.join(hybrik_data_dir, "hybrik_config.yaml") 32 | HYBRIK_CKPT = os.path.join(hybrik_data_dir, "pretrained_w_cam.pth") 33 | -------------------------------------------------------------------------------- /lib/pymaf/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .hmr import hmr 2 | from .pymaf_net import pymaf_net 3 | from .smpl import SMPL 4 | -------------------------------------------------------------------------------- /lib/pymaf/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/models/__pycache__/hmr.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/models/__pycache__/hmr.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/models/__pycache__/maf_extractor.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/models/__pycache__/maf_extractor.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/models/__pycache__/pymaf_net.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/models/__pycache__/pymaf_net.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/models/__pycache__/res_module.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/models/__pycache__/res_module.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/models/__pycache__/smpl.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/models/__pycache__/smpl.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/models/smpl.py: -------------------------------------------------------------------------------- 1 | # This script is borrowed from https://github.com/nkolot/SPIN/blob/master/models/smpl.py 2 | 3 | import torch 4 | import numpy as np 5 | from lib.smplx import SMPL as _SMPL 6 | from lib.smplx.body_models import ModelOutput 7 | from lib.smplx.lbs import vertices2joints 8 | from collections import namedtuple 9 | 10 | from lib.pymaf.core import path_config, constants 11 | 12 | SMPL_MEAN_PARAMS = path_config.SMPL_MEAN_PARAMS 13 | SMPL_MODEL_DIR = path_config.SMPL_MODEL_DIR 14 | 15 | # Indices to get the 14 LSP joints from the 17 H36M joints 16 | H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9] 17 | H36M_TO_J14 = H36M_TO_J17[:14] 18 | 19 | 20 | class SMPL(_SMPL): 21 | """ Extension of the official SMPL implementation to support more joints """ 22 | def __init__(self, *args, **kwargs): 23 | super().__init__(*args, **kwargs) 24 | joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES] 25 | J_regressor_extra = np.load('/data/yangxueting/ICON_orl/data/HPS/pymaf_data/J_regressor_extra.npy') 26 | self.register_buffer( 27 | 'J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32) 28 | ) 29 | self.joint_map = torch.tensor(joints, dtype=torch.long) 30 | self.ModelOutput = namedtuple( 31 | 'ModelOutput_', ModelOutput._fields + ( 32 | 'smpl_joints', 33 | 'joints_J19', 34 | ) 35 | ) 36 | self.ModelOutput.__new__.__defaults__ = (None, ) * len(self.ModelOutput._fields) 37 | 38 | def forward(self, *args, **kwargs): 39 | kwargs['get_skin'] = True 40 | smpl_output = super().forward(*args, **kwargs) 41 | extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices) 42 | # smpl_output.joints: [B, 45, 3] extra_joints: [B, 9, 3] 43 | vertices = smpl_output.vertices 44 | joints = torch.cat([smpl_output.joints, extra_joints], dim=1) 45 | smpl_joints = smpl_output.joints[:, :24] 46 | joints = joints[:, self.joint_map, :] # [B, 49, 3] 47 | joints_J24 = joints[:, -24:, :] 48 | joints_J19 = joints_J24[:, constants.J24_TO_J19, :] 49 | output = self.ModelOutput( 50 | vertices=vertices, 51 | global_orient=smpl_output.global_orient, 52 | body_pose=smpl_output.body_pose, 53 | joints=joints, 54 | joints_J19=joints_J19, 55 | smpl_joints=smpl_joints, 56 | betas=smpl_output.betas, 57 | full_pose=smpl_output.full_pose 58 | ) 59 | return output 60 | 61 | 62 | def get_smpl_faces(): 63 | smpl = SMPL(SMPL_MODEL_DIR, batch_size=1, create_transl=False) 64 | return smpl.faces 65 | 66 | 67 | def get_part_joints(smpl_joints): 68 | batch_size = smpl_joints.shape[0] 69 | 70 | # part_joints = torch.zeros().to(smpl_joints.device) 71 | 72 | one_seg_pairs = [ 73 | (0, 1), (0, 2), (0, 3), (3, 6), (9, 12), (9, 13), (9, 14), (12, 15), (13, 16), (14, 17) 74 | ] 75 | two_seg_pairs = [(1, 4), (2, 5), (4, 7), (5, 8), (16, 18), (17, 19), (18, 20), (19, 21)] 76 | 77 | one_seg_pairs.extend(two_seg_pairs) 78 | 79 | single_joints = [(10), (11), (15), (22), (23)] 80 | 81 | part_joints = [] 82 | 83 | for j_p in one_seg_pairs: 84 | new_joint = torch.mean(smpl_joints[:, j_p], dim=1, keepdim=True) 85 | part_joints.append(new_joint) 86 | 87 | for j_p in single_joints: 88 | part_joints.append(smpl_joints[:, j_p:j_p + 1]) 89 | 90 | part_joints = torch.cat(part_joints, dim=1) 91 | 92 | return part_joints 93 | -------------------------------------------------------------------------------- /lib/pymaf/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/utils/__init__.py -------------------------------------------------------------------------------- /lib/pymaf/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/utils/__pycache__/geometry.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/utils/__pycache__/geometry.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/utils/__pycache__/imutils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/utils/__pycache__/imutils.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/utils/__pycache__/streamer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/pymaf/utils/__pycache__/streamer.cpython-38.pyc -------------------------------------------------------------------------------- /lib/pymaf/utils/streamer.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import torch 3 | import numpy as np 4 | import imageio 5 | 6 | 7 | def aug_matrix(w1, h1, w2, h2): 8 | dx = (w2 - w1) / 2.0 9 | dy = (h2 - h1) / 2.0 10 | 11 | matrix_trans = np.array([[1.0, 0, dx], [0, 1.0, dy], [0, 0, 1.0]]) 12 | 13 | scale = np.min([float(w2) / w1, float(h2) / h1]) 14 | 15 | M = get_affine_matrix(center=(w2 / 2.0, h2 / 2.0), translate=(0, 0), scale=scale) 16 | 17 | M = np.array(M + [0., 0., 1.]).reshape(3, 3) 18 | M = M.dot(matrix_trans) 19 | 20 | return M 21 | 22 | 23 | def get_affine_matrix(center, translate, scale): 24 | cx, cy = center 25 | tx, ty = translate 26 | 27 | M = [1, 0, 0, 0, 1, 0] 28 | M = [x * scale for x in M] 29 | 30 | # Apply translation and of center translation: RSS * C^-1 31 | M[2] += M[0] * (-cx) + M[1] * (-cy) 32 | M[5] += M[3] * (-cx) + M[4] * (-cy) 33 | 34 | # Apply center translation: T * C * RSS * C^-1 35 | M[2] += cx + tx 36 | M[5] += cy + ty 37 | return M 38 | 39 | 40 | class BaseStreamer(): 41 | """This streamer will return images at 512x512 size. 42 | """ 43 | def __init__( 44 | self, width=512, height=512, pad=True, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), **kwargs 45 | ): 46 | self.width = width 47 | self.height = height 48 | self.pad = pad 49 | self.mean = np.array(mean) 50 | self.std = np.array(std) 51 | 52 | self.loader = self.create_loader() 53 | 54 | def create_loader(self): 55 | raise NotImplementedError 56 | yield np.zeros((600, 400, 3)) # in RGB (0, 255) 57 | 58 | def __getitem__(self, index): 59 | image = next(self.loader) 60 | in_height, in_width, _ = image.shape 61 | M = aug_matrix(in_width, in_height, self.width, self.height, self.pad) 62 | image = cv2.warpAffine(image, M[0:2, :], (self.width, self.height), flags=cv2.INTER_CUBIC) 63 | 64 | input = np.float32(image) 65 | input = (input / 255.0 - self.mean) / self.std # TO [-1.0, 1.0] 66 | input = input.transpose(2, 0, 1) # TO [3 x H x W] 67 | return torch.from_numpy(input).float() 68 | 69 | def __len__(self): 70 | raise NotImplementedError 71 | 72 | 73 | class CaptureStreamer(BaseStreamer): 74 | """This streamer takes webcam as input. 75 | """ 76 | def __init__(self, id=0, width=512, height=512, pad=True, **kwargs): 77 | super().__init__(width, height, pad, **kwargs) 78 | self.capture = cv2.VideoCapture(id) 79 | 80 | def create_loader(self): 81 | while True: 82 | _, image = self.capture.read() 83 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # RGB 84 | yield image 85 | 86 | def __len__(self): 87 | return 100_000_000 88 | 89 | def __del__(self): 90 | self.capture.release() 91 | 92 | 93 | class VideoListStreamer(BaseStreamer): 94 | """This streamer takes a list of video files as input. 95 | """ 96 | def __init__(self, files, width=512, height=512, pad=True, **kwargs): 97 | super().__init__(width, height, pad, **kwargs) 98 | self.files = files 99 | self.captures = [imageio.get_reader(f) for f in files] 100 | self.nframes = sum([int(cap._meta["fps"] * cap._meta["duration"]) for cap in self.captures]) 101 | 102 | def create_loader(self): 103 | for capture in self.captures: 104 | for image in capture: # RGB 105 | yield image 106 | 107 | def __len__(self): 108 | return self.nframes 109 | 110 | def __del__(self): 111 | for capture in self.captures: 112 | capture.close() 113 | 114 | 115 | class ImageListStreamer(BaseStreamer): 116 | """This streamer takes a list of image files as input. 117 | """ 118 | def __init__(self, files, width=512, height=512, pad=True, **kwargs): 119 | super().__init__(width, height, pad, **kwargs) 120 | self.files = files 121 | 122 | def create_loader(self): 123 | for f in self.files: 124 | image = cv2.imread(f, cv2.IMREAD_UNCHANGED)[:, :, 0:3] 125 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # RGB 126 | yield image 127 | 128 | def __len__(self): 129 | return len(self.files) 130 | -------------------------------------------------------------------------------- /lib/pymaf/utils/transforms.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import cv2 12 | import numpy as np 13 | 14 | 15 | def transform_preds(coords, center, scale, output_size): 16 | target_coords = np.zeros(coords.shape) 17 | trans = get_affine_transform(center, scale, 0, output_size, inv=1) 18 | for p in range(coords.shape[0]): 19 | target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) 20 | return target_coords 21 | 22 | 23 | def get_affine_transform( 24 | center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0 25 | ): 26 | if not isinstance(scale, np.ndarray) and not isinstance(scale, list): 27 | # print(scale) 28 | scale = np.array([scale, scale]) 29 | 30 | scale_tmp = scale * 200.0 31 | src_w = scale_tmp[0] 32 | dst_w = output_size[0] 33 | dst_h = output_size[1] 34 | 35 | rot_rad = np.pi * rot / 180 36 | src_dir = get_dir([0, src_w * -0.5], rot_rad) 37 | dst_dir = np.array([0, dst_w * -0.5], np.float32) 38 | 39 | src = np.zeros((3, 2), dtype=np.float32) 40 | dst = np.zeros((3, 2), dtype=np.float32) 41 | src[0, :] = center + scale_tmp * shift 42 | src[1, :] = center + src_dir + scale_tmp * shift 43 | dst[0, :] = [dst_w * 0.5, dst_h * 0.5] 44 | dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir 45 | 46 | src[2:, :] = get_3rd_point(src[0, :], src[1, :]) 47 | dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) 48 | 49 | if inv: 50 | trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) 51 | else: 52 | trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) 53 | 54 | return trans 55 | 56 | 57 | def affine_transform(pt, t): 58 | new_pt = np.array([pt[0], pt[1], 1.]).T 59 | new_pt = np.dot(t, new_pt) 60 | return new_pt[:2] 61 | 62 | 63 | def get_3rd_point(a, b): 64 | direct = a - b 65 | return b + np.array([-direct[1], direct[0]], dtype=np.float32) 66 | 67 | 68 | def get_dir(src_point, rot_rad): 69 | sn, cs = np.sin(rot_rad), np.cos(rot_rad) 70 | 71 | src_result = [0, 0] 72 | src_result[0] = src_point[0] * cs - src_point[1] * sn 73 | src_result[1] = src_point[0] * sn + src_point[1] * cs 74 | 75 | return src_result 76 | -------------------------------------------------------------------------------- /lib/renderer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/__init__.py -------------------------------------------------------------------------------- /lib/renderer/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/__pycache__/camera.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/__pycache__/camera.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/__pycache__/glm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/__pycache__/glm.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/__pycache__/mesh.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/__pycache__/mesh.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/__pycache__/opengl_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/__pycache__/opengl_util.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/__pycache__/prt_util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/__pycache__/prt_util.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__init__.py -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/cam_render.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/cam_render.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/color_render.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/color_render.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/framework.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/framework.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/glcontext.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/glcontext.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/init_gl.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/init_gl.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/prt_render.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/prt_render.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/__pycache__/render.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/lib/renderer/gl/__pycache__/render.cpython-38.pyc -------------------------------------------------------------------------------- /lib/renderer/gl/cam_render.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | from .render import Render 18 | 19 | GLUT = None 20 | 21 | 22 | class CamRender(Render): 23 | def __init__( 24 | self, 25 | width=1600, 26 | height=1200, 27 | name='Cam Renderer', 28 | program_files=['simple.fs', 'simple.vs'], 29 | color_size=1, 30 | ms_rate=1, 31 | egl=False 32 | ): 33 | Render.__init__( 34 | self, width, height, name, program_files, color_size, ms_rate=ms_rate, egl=egl 35 | ) 36 | self.camera = None 37 | 38 | if not egl: 39 | global GLUT 40 | import OpenGL.GLUT as GLUT 41 | GLUT.glutDisplayFunc(self.display) 42 | GLUT.glutKeyboardFunc(self.keyboard) 43 | 44 | def set_camera(self, camera): 45 | self.camera = camera 46 | self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix() 47 | 48 | def keyboard(self, key, x, y): 49 | # up 50 | eps = 1 51 | # print(key) 52 | if key == b'w': 53 | self.camera.center += eps * self.camera.direction 54 | elif key == b's': 55 | self.camera.center -= eps * self.camera.direction 56 | if key == b'a': 57 | self.camera.center -= eps * self.camera.right 58 | elif key == b'd': 59 | self.camera.center += eps * self.camera.right 60 | if key == b' ': 61 | self.camera.center += eps * self.camera.up 62 | elif key == b'x': 63 | self.camera.center -= eps * self.camera.up 64 | elif key == b'i': 65 | self.camera.near += 0.1 * eps 66 | self.camera.far += 0.1 * eps 67 | elif key == b'o': 68 | self.camera.near -= 0.1 * eps 69 | self.camera.far -= 0.1 * eps 70 | 71 | self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix() 72 | 73 | def show(self): 74 | if GLUT is not None: 75 | GLUT.glutMainLoop() 76 | -------------------------------------------------------------------------------- /lib/renderer/gl/data/color.fs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | layout (location = 0) out vec4 FragColor; 4 | layout (location = 1) out vec4 FragNormal; 5 | layout (location = 2) out vec4 FragDepth; 6 | 7 | in vec3 Color; 8 | in vec3 CamNormal; 9 | in vec3 depth; 10 | 11 | 12 | void main() 13 | { 14 | FragColor = vec4(Color,1.0); 15 | 16 | vec3 cam_norm_normalized = normalize(CamNormal); 17 | vec3 rgb = (cam_norm_normalized + 1.0) / 2.0; 18 | FragNormal = vec4(rgb, 1.0); 19 | FragDepth = vec4(depth.xyz, 1.0); 20 | } 21 | -------------------------------------------------------------------------------- /lib/renderer/gl/data/color.vs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | layout (location = 0) in vec3 a_Position; 4 | layout (location = 1) in vec3 a_Color; 5 | layout (location = 2) in vec3 a_Normal; 6 | 7 | out vec3 CamNormal; 8 | out vec3 CamPos; 9 | out vec3 Color; 10 | out vec3 depth; 11 | 12 | 13 | uniform mat3 RotMat; 14 | uniform mat4 NormMat; 15 | uniform mat4 ModelMat; 16 | uniform mat4 PerspMat; 17 | 18 | void main() 19 | { 20 | vec3 a_Position = (NormMat * vec4(a_Position,1.0)).xyz; 21 | gl_Position = PerspMat * ModelMat * vec4(RotMat * a_Position, 1.0); 22 | Color = a_Color; 23 | 24 | mat3 R = mat3(ModelMat) * RotMat; 25 | CamNormal = (R * a_Normal); 26 | 27 | depth = vec3(gl_Position.z / gl_Position.w); 28 | 29 | } -------------------------------------------------------------------------------- /lib/renderer/gl/data/normal.fs: -------------------------------------------------------------------------------- 1 | #version 330 2 | 3 | out vec4 FragColor; 4 | 5 | in vec3 CamNormal; 6 | 7 | void main() 8 | { 9 | vec3 cam_norm_normalized = normalize(CamNormal); 10 | vec3 rgb = (cam_norm_normalized + 1.0) / 2.0; 11 | FragColor = vec4(rgb, 1.0); 12 | } -------------------------------------------------------------------------------- /lib/renderer/gl/data/normal.vs: -------------------------------------------------------------------------------- 1 | #version 330 2 | 3 | layout (location = 0) in vec3 Position; 4 | layout (location = 1) in vec3 Normal; 5 | 6 | out vec3 CamNormal; 7 | 8 | uniform mat4 ModelMat; 9 | uniform mat4 PerspMat; 10 | 11 | void main() 12 | { 13 | gl_Position = PerspMat * ModelMat * vec4(Position, 1.0); 14 | CamNormal = (ModelMat * vec4(Normal, 0.0)).xyz; 15 | } -------------------------------------------------------------------------------- /lib/renderer/gl/data/prt.fs: -------------------------------------------------------------------------------- 1 | #version 330 2 | 3 | uniform vec3 SHCoeffs[9]; 4 | uniform uint analytic; 5 | 6 | uniform uint hasNormalMap; 7 | uniform uint hasAlbedoMap; 8 | 9 | uniform sampler2D AlbedoMap; 10 | uniform sampler2D NormalMap; 11 | 12 | in VertexData { 13 | vec3 Position; 14 | vec3 Depth; 15 | vec3 ModelNormal; 16 | vec2 Texcoord; 17 | vec3 Tangent; 18 | vec3 Bitangent; 19 | vec3 PRT1; 20 | vec3 PRT2; 21 | vec3 PRT3; 22 | vec3 Label; 23 | } VertexIn; 24 | 25 | layout (location = 0) out vec4 FragColor; 26 | layout (location = 1) out vec4 FragNormal; 27 | layout (location = 2) out vec4 FragPosition; 28 | layout (location = 3) out vec4 FragAlbedo; 29 | layout (location = 4) out vec4 FragShading; 30 | layout (location = 5) out vec4 FragPRT1; 31 | layout (location = 6) out vec4 FragPRT2; 32 | // layout (location = 7) out vec4 FragPRT3; 33 | layout (location = 7) out vec4 FragLabel; 34 | 35 | 36 | vec4 gammaCorrection(vec4 vec, float g) 37 | { 38 | return vec4(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g), vec.w); 39 | } 40 | 41 | vec3 gammaCorrection(vec3 vec, float g) 42 | { 43 | return vec3(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g)); 44 | } 45 | 46 | void evaluateH(vec3 n, out float H[9]) 47 | { 48 | float c1 = 0.429043, c2 = 0.511664, 49 | c3 = 0.743125, c4 = 0.886227, c5 = 0.247708; 50 | 51 | H[0] = c4; 52 | H[1] = 2.0 * c2 * n[1]; 53 | H[2] = 2.0 * c2 * n[2]; 54 | H[3] = 2.0 * c2 * n[0]; 55 | H[4] = 2.0 * c1 * n[0] * n[1]; 56 | H[5] = 2.0 * c1 * n[1] * n[2]; 57 | H[6] = c3 * n[2] * n[2] - c5; 58 | H[7] = 2.0 * c1 * n[2] * n[0]; 59 | H[8] = c1 * (n[0] * n[0] - n[1] * n[1]); 60 | } 61 | 62 | vec3 evaluateLightingModel(vec3 normal) 63 | { 64 | float H[9]; 65 | evaluateH(normal, H); 66 | vec3 res = vec3(0.0); 67 | for (int i = 0; i < 9; i++) { 68 | res += H[i] * SHCoeffs[i]; 69 | } 70 | return res; 71 | } 72 | 73 | // nC: coarse geometry normal, nH: fine normal from normal map 74 | vec3 evaluateLightingModelHybrid(vec3 nC, vec3 nH, mat3 prt) 75 | { 76 | float HC[9], HH[9]; 77 | evaluateH(nC, HC); 78 | evaluateH(nH, HH); 79 | 80 | vec3 res = vec3(0.0); 81 | vec3 shadow = vec3(0.0); 82 | vec3 unshadow = vec3(0.0); 83 | for(int i = 0; i < 3; ++i){ 84 | for(int j = 0; j < 3; ++j){ 85 | int id = i*3+j; 86 | res += HH[id]* SHCoeffs[id]; 87 | shadow += prt[i][j] * SHCoeffs[id]; 88 | unshadow += HC[id] * SHCoeffs[id]; 89 | } 90 | } 91 | vec3 ratio = clamp(shadow/unshadow,0.0,1.0); 92 | res = ratio * res; 93 | 94 | return res; 95 | } 96 | 97 | vec3 evaluateLightingModelPRT(mat3 prt) 98 | { 99 | vec3 res = vec3(0.0); 100 | for(int i = 0; i < 3; ++i){ 101 | for(int j = 0; j < 3; ++j){ 102 | res += prt[i][j] * SHCoeffs[i*3+j]; 103 | } 104 | } 105 | 106 | return res; 107 | } 108 | 109 | void main() 110 | { 111 | vec2 uv = VertexIn.Texcoord; 112 | vec3 nC = normalize(VertexIn.ModelNormal); 113 | vec3 nml = nC; 114 | mat3 prt = mat3(VertexIn.PRT1, VertexIn.PRT2, VertexIn.PRT3); 115 | 116 | if(hasAlbedoMap == uint(0)) 117 | FragAlbedo = vec4(1.0); 118 | else 119 | FragAlbedo = texture(AlbedoMap, uv);//gammaCorrection(texture(AlbedoMap, uv), 1.0/2.2); 120 | 121 | if(hasNormalMap == uint(0)) 122 | { 123 | if(analytic == uint(0)) 124 | FragShading = vec4(evaluateLightingModelPRT(prt), 1.0f); 125 | else 126 | FragShading = vec4(evaluateLightingModel(nC), 1.0f); 127 | } 128 | else 129 | { 130 | vec3 n_tan = normalize(texture(NormalMap, uv).rgb*2.0-vec3(1.0)); 131 | 132 | mat3 TBN = mat3(normalize(VertexIn.Tangent),normalize(VertexIn.Bitangent),nC); 133 | vec3 nH = normalize(TBN * n_tan); 134 | 135 | if(analytic == uint(0)) 136 | FragShading = vec4(evaluateLightingModelHybrid(nC,nH,prt),1.0f); 137 | else 138 | FragShading = vec4(evaluateLightingModel(nH), 1.0f); 139 | 140 | nml = nH; 141 | } 142 | 143 | FragShading = gammaCorrection(FragShading, 2.2); 144 | FragColor = clamp(FragAlbedo * FragShading, 0.0, 1.0); 145 | FragNormal = vec4(0.5*(nml+vec3(1.0)), 1.0); 146 | FragPosition = vec4(VertexIn.Depth.xyz, 1.0); 147 | FragShading = vec4(clamp(0.5*FragShading.xyz, 0.0, 1.0),1.0); 148 | // FragColor = gammaCorrection(clamp(FragAlbedo * FragShading, 0.0, 1.0),2.2); 149 | // FragNormal = vec4(0.5*(nml+vec3(1.0)), 1.0); 150 | // FragPosition = vec4(VertexIn.Position,VertexIn.Depth.x); 151 | // FragShading = vec4(gammaCorrection(clamp(0.5*FragShading.xyz, 0.0, 1.0),2.2),1.0); 152 | // FragAlbedo = gammaCorrection(FragAlbedo,2.2); 153 | FragPRT1 = vec4(VertexIn.PRT1,1.0); 154 | FragPRT2 = vec4(VertexIn.PRT2,1.0); 155 | // FragPRT3 = vec4(VertexIn.PRT3,1.0); 156 | FragLabel = vec4(VertexIn.Label,1.0); 157 | } -------------------------------------------------------------------------------- /lib/renderer/gl/data/prt_uv.fs: -------------------------------------------------------------------------------- 1 | #version 330 2 | 3 | uniform vec3 SHCoeffs[9]; 4 | uniform uint analytic; 5 | 6 | uniform uint hasNormalMap; 7 | uniform uint hasAlbedoMap; 8 | 9 | uniform sampler2D AlbedoMap; 10 | uniform sampler2D NormalMap; 11 | 12 | in VertexData { 13 | vec3 Position; 14 | vec3 ModelNormal; 15 | vec3 CameraNormal; 16 | vec2 Texcoord; 17 | vec3 Tangent; 18 | vec3 Bitangent; 19 | vec3 PRT1; 20 | vec3 PRT2; 21 | vec3 PRT3; 22 | } VertexIn; 23 | 24 | layout (location = 0) out vec4 FragColor; 25 | layout (location = 1) out vec4 FragPosition; 26 | layout (location = 2) out vec4 FragNormal; 27 | 28 | vec4 gammaCorrection(vec4 vec, float g) 29 | { 30 | return vec4(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g), vec.w); 31 | } 32 | 33 | vec3 gammaCorrection(vec3 vec, float g) 34 | { 35 | return vec3(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g)); 36 | } 37 | 38 | void evaluateH(vec3 n, out float H[9]) 39 | { 40 | float c1 = 0.429043, c2 = 0.511664, 41 | c3 = 0.743125, c4 = 0.886227, c5 = 0.247708; 42 | 43 | H[0] = c4; 44 | H[1] = 2.0 * c2 * n[1]; 45 | H[2] = 2.0 * c2 * n[2]; 46 | H[3] = 2.0 * c2 * n[0]; 47 | H[4] = 2.0 * c1 * n[0] * n[1]; 48 | H[5] = 2.0 * c1 * n[1] * n[2]; 49 | H[6] = c3 * n[2] * n[2] - c5; 50 | H[7] = 2.0 * c1 * n[2] * n[0]; 51 | H[8] = c1 * (n[0] * n[0] - n[1] * n[1]); 52 | } 53 | 54 | vec3 evaluateLightingModel(vec3 normal) 55 | { 56 | float H[9]; 57 | evaluateH(normal, H); 58 | vec3 res = vec3(0.0); 59 | for (int i = 0; i < 9; i++) { 60 | res += H[i] * SHCoeffs[i]; 61 | } 62 | return res; 63 | } 64 | 65 | // nC: coarse geometry normal, nH: fine normal from normal map 66 | vec3 evaluateLightingModelHybrid(vec3 nC, vec3 nH, mat3 prt) 67 | { 68 | float HC[9], HH[9]; 69 | evaluateH(nC, HC); 70 | evaluateH(nH, HH); 71 | 72 | vec3 res = vec3(0.0); 73 | vec3 shadow = vec3(0.0); 74 | vec3 unshadow = vec3(0.0); 75 | for(int i = 0; i < 3; ++i){ 76 | for(int j = 0; j < 3; ++j){ 77 | int id = i*3+j; 78 | res += HH[id]* SHCoeffs[id]; 79 | shadow += prt[i][j] * SHCoeffs[id]; 80 | unshadow += HC[id] * SHCoeffs[id]; 81 | } 82 | } 83 | vec3 ratio = clamp(shadow/unshadow,0.0,1.0); 84 | res = ratio * res; 85 | 86 | return res; 87 | } 88 | 89 | vec3 evaluateLightingModelPRT(mat3 prt) 90 | { 91 | vec3 res = vec3(0.0); 92 | for(int i = 0; i < 3; ++i){ 93 | for(int j = 0; j < 3; ++j){ 94 | res += prt[i][j] * SHCoeffs[i*3+j]; 95 | } 96 | } 97 | 98 | return res; 99 | } 100 | 101 | void main() 102 | { 103 | vec2 uv = VertexIn.Texcoord; 104 | vec3 nM = normalize(VertexIn.ModelNormal); 105 | vec3 nC = normalize(VertexIn.CameraNormal); 106 | vec3 nml = nC; 107 | mat3 prt = mat3(VertexIn.PRT1, VertexIn.PRT2, VertexIn.PRT3); 108 | 109 | vec4 albedo, shading; 110 | if(hasAlbedoMap == uint(0)) 111 | albedo = vec4(1.0); 112 | else 113 | albedo = texture(AlbedoMap, uv);//gammaCorrection(texture(AlbedoMap, uv), 1.0/2.2); 114 | 115 | if(hasNormalMap == uint(0)) 116 | { 117 | if(analytic == uint(0)) 118 | shading = vec4(evaluateLightingModelPRT(prt), 1.0f); 119 | else 120 | shading = vec4(evaluateLightingModel(nC), 1.0f); 121 | } 122 | else 123 | { 124 | vec3 n_tan = normalize(texture(NormalMap, uv).rgb*2.0-vec3(1.0)); 125 | 126 | mat3 TBN = mat3(normalize(VertexIn.Tangent),normalize(VertexIn.Bitangent),nC); 127 | vec3 nH = normalize(TBN * n_tan); 128 | 129 | if(analytic == uint(0)) 130 | shading = vec4(evaluateLightingModelHybrid(nC,nH,prt),1.0f); 131 | else 132 | shading = vec4(evaluateLightingModel(nH), 1.0f); 133 | 134 | nml = nH; 135 | } 136 | 137 | shading = gammaCorrection(shading, 2.2); 138 | FragColor = clamp(albedo * shading, 0.0, 1.0); 139 | FragPosition = vec4(VertexIn.Position,1.0); 140 | FragNormal = vec4(0.5*(nM+vec3(1.0)),1.0); 141 | } -------------------------------------------------------------------------------- /lib/renderer/gl/data/quad.fs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | out vec4 FragColor; 3 | 4 | in vec2 TexCoord; 5 | 6 | uniform sampler2D screenTexture; 7 | 8 | void main() 9 | { 10 | FragColor = texture(screenTexture, TexCoord); 11 | } -------------------------------------------------------------------------------- /lib/renderer/gl/data/quad.vs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | layout (location = 0) in vec2 aPos; 3 | layout (location = 1) in vec2 aTexCoord; 4 | 5 | out vec2 TexCoord; 6 | 7 | void main() 8 | { 9 | gl_Position = vec4(aPos.x, aPos.y, 0.0, 1.0); 10 | TexCoord = aTexCoord; 11 | } -------------------------------------------------------------------------------- /lib/renderer/gl/framework.py: -------------------------------------------------------------------------------- 1 | # Mario Rosasco, 2016 2 | # adapted from framework.cpp, Copyright (C) 2010-2012 by Jason L. McKesson 3 | # This file is licensed under the MIT License. 4 | # 5 | # NB: Unlike in the framework.cpp organization, the main loop is contained 6 | # in the tutorial files, not in this framework file. Additionally, a copy of 7 | # this module file must exist in the same directory as the tutorial files 8 | # to be imported properly. 9 | 10 | import os 11 | from OpenGL.GL import * 12 | 13 | 14 | # Function that creates and compiles shaders according to the given type (a GL enum value) and 15 | # shader program (a file containing a GLSL program). 16 | def loadShader(shaderType, shaderFile): 17 | # check if file exists, get full path name 18 | strFilename = findFileOrThrow(shaderFile) 19 | shaderData = None 20 | with open(strFilename, 'r') as f: 21 | shaderData = f.read() 22 | 23 | shader = glCreateShader(shaderType) 24 | glShaderSource(shader, shaderData) # note that this is a simpler function call than in C 25 | 26 | # This shader compilation is more explicit than the one used in 27 | # framework.cpp, which relies on a glutil wrapper function. 28 | # This is made explicit here mainly to decrease dependence on pyOpenGL 29 | # utilities and wrappers, which docs caution may change in future versions. 30 | glCompileShader(shader) 31 | 32 | status = glGetShaderiv(shader, GL_COMPILE_STATUS) 33 | if status == GL_FALSE: 34 | # Note that getting the error log is much simpler in Python than in C/C++ 35 | # and does not require explicit handling of the string buffer 36 | strInfoLog = glGetShaderInfoLog(shader) 37 | strShaderType = "" 38 | if shaderType is GL_VERTEX_SHADER: 39 | strShaderType = "vertex" 40 | elif shaderType is GL_GEOMETRY_SHADER: 41 | strShaderType = "geometry" 42 | elif shaderType is GL_FRAGMENT_SHADER: 43 | strShaderType = "fragment" 44 | 45 | print("Compilation failure for " + strShaderType + " shader:\n" + str(strInfoLog)) 46 | 47 | return shader 48 | 49 | 50 | # Function that accepts a list of shaders, compiles them, and returns a handle to the compiled program 51 | def createProgram(shaderList): 52 | program = glCreateProgram() 53 | 54 | for shader in shaderList: 55 | glAttachShader(program, shader) 56 | 57 | glLinkProgram(program) 58 | 59 | status = glGetProgramiv(program, GL_LINK_STATUS) 60 | if status == GL_FALSE: 61 | # Note that getting the error log is much simpler in Python than in C/C++ 62 | # and does not require explicit handling of the string buffer 63 | strInfoLog = glGetProgramInfoLog(program) 64 | print("Linker failure: \n" + str(strInfoLog)) 65 | 66 | for shader in shaderList: 67 | glDetachShader(program, shader) 68 | 69 | return program 70 | 71 | 72 | # Helper function to locate and open the target file (passed in as a string). 73 | # Returns the full path to the file as a string. 74 | def findFileOrThrow(strBasename): 75 | # Keep constant names in C-style convention, for readability 76 | # when comparing to C(/C++) code. 77 | if os.path.isfile(strBasename): 78 | return strBasename 79 | 80 | LOCAL_FILE_DIR = "data" + os.sep 81 | GLOBAL_FILE_DIR = os.path.dirname(os.path.abspath(__file__)) + os.sep + "data" + os.sep 82 | 83 | strFilename = LOCAL_FILE_DIR + strBasename 84 | if os.path.isfile(strFilename): 85 | return strFilename 86 | 87 | strFilename = GLOBAL_FILE_DIR + strBasename 88 | if os.path.isfile(strFilename): 89 | return strFilename 90 | 91 | raise IOError('Could not find target file ' + strBasename) 92 | -------------------------------------------------------------------------------- /lib/renderer/gl/glcontext.py: -------------------------------------------------------------------------------- 1 | """Headless GPU-accelerated OpenGL context creation on Google Colaboratory. 2 | 3 | Typical usage: 4 | 5 | # Optional PyOpenGL configuratiopn can be done here. 6 | # import OpenGL 7 | # OpenGL.ERROR_CHECKING = True 8 | 9 | # 'glcontext' must be imported before any OpenGL.* API. 10 | from lucid.misc.gl.glcontext import create_opengl_context 11 | 12 | # Now it's safe to import OpenGL and EGL functions 13 | import OpenGL.GL as gl 14 | 15 | # create_opengl_context() creates a GL context that is attached to an 16 | # offscreen surface of the specified size. Note that rendering to buffers 17 | # of other sizes and formats is still possible with OpenGL Framebuffers. 18 | # 19 | # Users are expected to directly use the EGL API in case more advanced 20 | # context management is required. 21 | width, height = 640, 480 22 | create_opengl_context((width, height)) 23 | 24 | # OpenGL context is available here. 25 | 26 | """ 27 | 28 | from __future__ import print_function 29 | 30 | # pylint: disable=unused-import,g-import-not-at-top,g-statement-before-imports 31 | 32 | try: 33 | import OpenGL 34 | except: 35 | print('This module depends on PyOpenGL.') 36 | print('Please run "\033[1m!pip install -q pyopengl\033[0m" ' 37 | 'prior importing this module.') 38 | raise 39 | 40 | import ctypes 41 | from ctypes import pointer, util 42 | import os 43 | 44 | os.environ['PYOPENGL_PLATFORM'] = 'egl' 45 | 46 | # OpenGL loading workaround. 47 | # 48 | # * PyOpenGL tries to load libGL, but we need libOpenGL, see [1,2]. 49 | # This could have been solved by a symlink libGL->libOpenGL, but: 50 | # 51 | # * Python 2.7 can't find libGL and linEGL due to a bug (see [3]) 52 | # in ctypes.util, that was only wixed in Python 3.6. 53 | # 54 | # So, the only solution I've found is to monkeypatch ctypes.util 55 | # [1] https://devblogs.nvidia.com/egl-eye-opengl-visualization-without-x-server/ 56 | # [2] https://devblogs.nvidia.com/linking-opengl-server-side-rendering/ 57 | # [3] https://bugs.python.org/issue9998 58 | _find_library_old = ctypes.util.find_library 59 | try: 60 | 61 | def _find_library_new(name): 62 | return { 63 | 'GL': 'libOpenGL.so', 64 | 'EGL': 'libEGL.so', 65 | }.get(name, _find_library_old(name)) 66 | 67 | util.find_library = _find_library_new 68 | import OpenGL.GL as gl 69 | import OpenGL.EGL as egl 70 | except: 71 | print('Unable to load OpenGL libraries. ' 72 | 'Make sure you use GPU-enabled backend.') 73 | print('Press "Runtime->Change runtime type" and set ' 74 | '"Hardware accelerator" to GPU.') 75 | raise 76 | finally: 77 | util.find_library = _find_library_old 78 | 79 | 80 | def create_opengl_context(surface_size=(640, 480)): 81 | """Create offscreen OpenGL context and make it current. 82 | 83 | Users are expected to directly use EGL API in case more advanced 84 | context management is required. 85 | 86 | Args: 87 | surface_size: (width, height), size of the offscreen rendering surface. 88 | """ 89 | egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) 90 | 91 | major, minor = egl.EGLint(), egl.EGLint() 92 | egl.eglInitialize(egl_display, pointer(major), pointer(minor)) 93 | 94 | config_attribs = [ 95 | egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, 96 | egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, 97 | egl.EGL_NONE 98 | ] 99 | config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) 100 | 101 | num_configs = egl.EGLint() 102 | egl_cfg = egl.EGLConfig() 103 | egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) 104 | 105 | width, height = surface_size 106 | pbuffer_attribs = [ 107 | egl.EGL_WIDTH, 108 | width, 109 | egl.EGL_HEIGHT, 110 | height, 111 | egl.EGL_NONE, 112 | ] 113 | pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) 114 | egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) 115 | 116 | egl.eglBindAPI(egl.EGL_OPENGL_API) 117 | 118 | context_attribs = None 119 | # context_attribs = [ 120 | # egl.EGL_CONTEXT_MAJOR_VERSION, 121 | # 4, 122 | # egl.EGL_CONTEXT_MINOR_VERSION, 123 | # 1, 124 | # egl.EGL_NONE, 125 | # ] 126 | 127 | egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, context_attribs) 128 | egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context) 129 | 130 | buffer_type = egl.EGLint() 131 | out = egl.eglQueryContext(egl_display, egl_context, egl.EGL_CONTEXT_CLIENT_VERSION, buffer_type) 132 | # print(buffer_type) 133 | -------------------------------------------------------------------------------- /lib/renderer/gl/init_gl.py: -------------------------------------------------------------------------------- 1 | _glut_window = None 2 | _context_inited = None 3 | 4 | 5 | def initialize_GL_context(width=512, height=512, egl=False): 6 | ''' 7 | default context uses GLUT 8 | ''' 9 | if not egl: 10 | import OpenGL.GLUT as GLUT 11 | display_mode = GLUT.GLUT_DOUBLE | GLUT.GLUT_RGB | GLUT.GLUT_DEPTH 12 | global _glut_window 13 | if _glut_window is None: 14 | GLUT.glutInit() 15 | GLUT.glutInitDisplayMode(display_mode) 16 | GLUT.glutInitWindowSize(width, height) 17 | GLUT.glutInitWindowPosition(0, 0) 18 | _glut_window = GLUT.glutCreateWindow("My Render.") 19 | else: 20 | from .glcontext import create_opengl_context 21 | global _context_inited 22 | if _context_inited is None: 23 | create_opengl_context((width, height)) 24 | _context_inited = True 25 | -------------------------------------------------------------------------------- /lib/renderer/gl/norm_render.py: -------------------------------------------------------------------------------- 1 | ''' 2 | MIT License 3 | 4 | Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | ''' 24 | from OpenGL.GLUT import * 25 | 26 | from .render2 import Render 27 | 28 | 29 | class NormRender(Render): 30 | def __init__( 31 | self, 32 | width=1600, 33 | height=1200, 34 | name='Cam Renderer', 35 | program_files=['simple.fs', 'simple.vs'], 36 | color_size=1, 37 | ms_rate=1 38 | ): 39 | Render.__init__(self, width, height, name, program_files, color_size, ms_rate) 40 | self.camera = None 41 | 42 | glutDisplayFunc(self.display) 43 | glutKeyboardFunc(self.keyboard) 44 | 45 | def set_camera(self, camera): 46 | self.camera = camera 47 | self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix() 48 | 49 | def set_matrices(self, projection, modelview): 50 | self.projection_matrix = projection 51 | self.model_view_matrix = modelview 52 | 53 | def keyboard(self, key, x, y): 54 | # up 55 | eps = 1 56 | # print(key) 57 | if key == b'w': 58 | self.camera.center += eps * self.camera.direction 59 | elif key == b's': 60 | self.camera.center -= eps * self.camera.direction 61 | if key == b'a': 62 | self.camera.center -= eps * self.camera.right 63 | elif key == b'd': 64 | self.camera.center += eps * self.camera.right 65 | if key == b' ': 66 | self.camera.center += eps * self.camera.up 67 | elif key == b'x': 68 | self.camera.center -= eps * self.camera.up 69 | elif key == b'i': 70 | self.camera.near += 0.1 * eps 71 | self.camera.far += 0.1 * eps 72 | elif key == b'o': 73 | self.camera.near -= 0.1 * eps 74 | self.camera.far -= 0.1 * eps 75 | 76 | self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix() 77 | 78 | def show(self): 79 | glutMainLoop() 80 | -------------------------------------------------------------------------------- /lib/renderer/gl/normal_render.py: -------------------------------------------------------------------------------- 1 | ''' 2 | MIT License 3 | 4 | Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | ''' 24 | import numpy as np 25 | import math 26 | 27 | from .framework import * 28 | from .norm_render import NormRender 29 | 30 | 31 | class NormalRender(NormRender): 32 | def __init__(self, width=1600, height=1200, name='Normal Renderer'): 33 | NormRender.__init__(self, width, height, name, program_files=['normal.vs', 'normal.fs']) 34 | 35 | self.norm_buffer = glGenBuffers(1) 36 | 37 | self.norm_data = None 38 | 39 | def set_normal_mesh(self, vertices, faces, norms, face_normals): 40 | NormRender.set_mesh(self, vertices, faces) 41 | 42 | self.norm_data = norms[face_normals.reshape([-1])] 43 | 44 | glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer) 45 | glBufferData(GL_ARRAY_BUFFER, self.norm_data, GL_STATIC_DRAW) 46 | 47 | glBindBuffer(GL_ARRAY_BUFFER, 0) 48 | 49 | def euler_to_rot_mat(self, r_x, r_y, r_z): 50 | R_x = np.array( 51 | [[1, 0, 0], [0, math.cos(r_x), -math.sin(r_x)], [0, math.sin(r_x), 52 | math.cos(r_x)]] 53 | ) 54 | 55 | R_y = np.array( 56 | [[math.cos(r_y), 0, math.sin(r_y)], [0, 1, 0], [-math.sin(r_y), 0, 57 | math.cos(r_y)]] 58 | ) 59 | 60 | R_z = np.array( 61 | [[math.cos(r_z), -math.sin(r_z), 0], [math.sin(r_z), math.cos(r_z), 0], [0, 0, 1]] 62 | ) 63 | 64 | R = np.dot(R_z, np.dot(R_y, R_x)) 65 | 66 | return R 67 | 68 | def draw(self): 69 | self.draw_init() 70 | 71 | glUseProgram(self.program) 72 | glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix.transpose()) 73 | glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix.transpose()) 74 | 75 | # Handle vertex buffer 76 | glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer) 77 | 78 | glEnableVertexAttribArray(0) 79 | glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None) 80 | 81 | # Handle normal buffer 82 | glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer) 83 | 84 | glEnableVertexAttribArray(1) 85 | glVertexAttribPointer(1, 3, GL_DOUBLE, GL_FALSE, 0, None) 86 | 87 | glDrawArrays(GL_TRIANGLES, 0, self.n_vertices) 88 | 89 | glDisableVertexAttribArray(1) 90 | glDisableVertexAttribArray(0) 91 | 92 | glBindBuffer(GL_ARRAY_BUFFER, 0) 93 | 94 | glUseProgram(0) 95 | 96 | self.draw_end() 97 | -------------------------------------------------------------------------------- /lib/renderer/glm.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | import numpy as np 18 | 19 | 20 | def vec3(x, y, z): 21 | return np.array([x, y, z], dtype=np.float32) 22 | 23 | 24 | def radians(v): 25 | return np.radians(v) 26 | 27 | 28 | def identity(): 29 | return np.identity(4, dtype=np.float32) 30 | 31 | 32 | def empty(): 33 | return np.zeros([4, 4], dtype=np.float32) 34 | 35 | 36 | def magnitude(v): 37 | return np.linalg.norm(v) 38 | 39 | 40 | def normalize(v): 41 | m = magnitude(v) 42 | return v if m == 0 else v / m 43 | 44 | 45 | def dot(u, v): 46 | return np.sum(u * v) 47 | 48 | 49 | def cross(u, v): 50 | res = vec3(0, 0, 0) 51 | res[0] = u[1] * v[2] - u[2] * v[1] 52 | res[1] = u[2] * v[0] - u[0] * v[2] 53 | res[2] = u[0] * v[1] - u[1] * v[0] 54 | return res 55 | 56 | 57 | # below functions can be optimized 58 | 59 | 60 | def translate(m, v): 61 | res = np.copy(m) 62 | res[:, 3] = m[:, 0] * v[0] + m[:, 1] * v[1] + m[:, 2] * v[2] + m[:, 3] 63 | return res 64 | 65 | 66 | def rotate(m, angle, v): 67 | a = angle 68 | c = np.cos(a) 69 | s = np.sin(a) 70 | 71 | axis = normalize(v) 72 | temp = (1 - c) * axis 73 | 74 | rot = empty() 75 | rot[0][0] = c + temp[0] * axis[0] 76 | rot[0][1] = temp[0] * axis[1] + s * axis[2] 77 | rot[0][2] = temp[0] * axis[2] - s * axis[1] 78 | 79 | rot[1][0] = temp[1] * axis[0] - s * axis[2] 80 | rot[1][1] = c + temp[1] * axis[1] 81 | rot[1][2] = temp[1] * axis[2] + s * axis[0] 82 | 83 | rot[2][0] = temp[2] * axis[0] + s * axis[1] 84 | rot[2][1] = temp[2] * axis[1] - s * axis[0] 85 | rot[2][2] = c + temp[2] * axis[2] 86 | 87 | res = empty() 88 | res[:, 0] = m[:, 0] * rot[0][0] + m[:, 1] * rot[0][1] + m[:, 2] * rot[0][2] 89 | res[:, 1] = m[:, 0] * rot[1][0] + m[:, 1] * rot[1][1] + m[:, 2] * rot[1][2] 90 | res[:, 2] = m[:, 0] * rot[2][0] + m[:, 1] * rot[2][1] + m[:, 2] * rot[2][2] 91 | res[:, 3] = m[:, 3] 92 | return res 93 | 94 | 95 | def perspective(fovy, aspect, zNear, zFar): 96 | tanHalfFovy = np.tan(fovy / 2) 97 | 98 | res = empty() 99 | res[0][0] = 1 / (aspect * tanHalfFovy) 100 | res[1][1] = 1 / (tanHalfFovy) 101 | res[2][3] = -1 102 | res[2][2] = -(zFar + zNear) / (zFar - zNear) 103 | res[3][2] = -(2 * zFar * zNear) / (zFar - zNear) 104 | 105 | return res.T 106 | 107 | 108 | def ortho(left, right, bottom, top, zNear, zFar): 109 | # res = np.ones([4, 4], dtype=np.float32) 110 | res = identity() 111 | res[0][0] = 2 / (right - left) 112 | res[1][1] = 2 / (top - bottom) 113 | res[2][2] = -2 / (zFar - zNear) 114 | res[3][0] = -(right + left) / (right - left) 115 | res[3][1] = -(top + bottom) / (top - bottom) 116 | res[3][2] = -(zFar + zNear) / (zFar - zNear) 117 | return res.T 118 | 119 | 120 | def lookat(eye, center, up): 121 | f = normalize(center - eye) 122 | s = normalize(cross(f, up)) 123 | u = cross(s, f) 124 | 125 | res = identity() 126 | res[0][0] = s[0] 127 | res[1][0] = s[1] 128 | res[2][0] = s[2] 129 | res[0][1] = u[0] 130 | res[1][1] = u[1] 131 | res[2][1] = u[2] 132 | res[0][2] = -f[0] 133 | res[1][2] = -f[1] 134 | res[2][2] = -f[2] 135 | res[3][0] = -dot(s, eye) 136 | res[3][1] = -dot(u, eye) 137 | res[3][2] = -dot(f, eye) 138 | return res.T 139 | 140 | 141 | def transform(d, m): 142 | return np.dot(m, d.T).T 143 | -------------------------------------------------------------------------------- /lib/smplx/.gitignore: -------------------------------------------------------------------------------- 1 | #### joe made this: http://goel.io/joe 2 | 3 | #####=== Python ===##### 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # pyenv 80 | .python-version 81 | 82 | # celery beat schedule file 83 | celerybeat-schedule 84 | 85 | # SageMath parsed files 86 | *.sage.py 87 | 88 | # Environments 89 | .env 90 | .venv 91 | env/ 92 | venv/ 93 | ENV/ 94 | env.bak/ 95 | venv.bak/ 96 | 97 | # Spyder project settings 98 | .spyderproject 99 | .spyproject 100 | 101 | # Rope project settings 102 | .ropeproject 103 | 104 | # mkdocs documentation 105 | /site 106 | 107 | # mypy 108 | .mypy_cache/ 109 | models/ 110 | output/ 111 | outputs/ 112 | transfer_data/ 113 | torch-trust-ncg/ 114 | build/ 115 | -------------------------------------------------------------------------------- /lib/smplx/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | from .body_models import ( 18 | create, 19 | SMPL, 20 | SMPLH, 21 | SMPLX, 22 | MANO, 23 | FLAME, 24 | build_layer, 25 | SMPLLayer, 26 | SMPLHLayer, 27 | SMPLXLayer, 28 | MANOLayer, 29 | FLAMELayer, 30 | ) 31 | -------------------------------------------------------------------------------- /lib/smplx/joint_names.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | JOINT_NAMES = [ 18 | 'pelvis', 19 | 'left_hip', 20 | 'right_hip', 21 | 'spine1', 22 | 'left_knee', 23 | 'right_knee', 24 | 'spine2', 25 | 'left_ankle', 26 | 'right_ankle', 27 | 'spine3', 28 | 'left_foot', 29 | 'right_foot', 30 | 'neck', 31 | 'left_collar', 32 | 'right_collar', 33 | 'head', 34 | 'left_shoulder', 35 | 'right_shoulder', 36 | 'left_elbow', 37 | 'right_elbow', 38 | 'left_wrist', 39 | 'right_wrist', 40 | 'jaw', 41 | 'left_eye_smplhf', 42 | 'right_eye_smplhf', 43 | 'left_index1', 44 | 'left_index2', 45 | 'left_index3', 46 | 'left_middle1', 47 | 'left_middle2', 48 | 'left_middle3', 49 | 'left_pinky1', 50 | 'left_pinky2', 51 | 'left_pinky3', 52 | 'left_ring1', 53 | 'left_ring2', 54 | 'left_ring3', 55 | 'left_thumb1', 56 | 'left_thumb2', 57 | 'left_thumb3', 58 | 'right_index1', 59 | 'right_index2', 60 | 'right_index3', 61 | 'right_middle1', 62 | 'right_middle2', 63 | 'right_middle3', 64 | 'right_pinky1', 65 | 'right_pinky2', 66 | 'right_pinky3', 67 | 'right_ring1', 68 | 'right_ring2', 69 | 'right_ring3', 70 | 'right_thumb1', 71 | 'right_thumb2', 72 | 'right_thumb3', 73 | 'nose', 74 | 'right_eye', 75 | 'left_eye', 76 | 'right_ear', 77 | 'left_ear', 78 | 'left_big_toe', 79 | 'left_small_toe', 80 | 'left_heel', 81 | 'right_big_toe', 82 | 'right_small_toe', 83 | 'right_heel', 84 | 'left_thumb', 85 | 'left_index', 86 | 'left_middle', 87 | 'left_ring', 88 | 'left_pinky', 89 | 'right_thumb', 90 | 'right_index', 91 | 'right_middle', 92 | 'right_ring', 93 | 'right_pinky', 94 | 'right_eye_brow1', 95 | 'right_eye_brow2', 96 | 'right_eye_brow3', 97 | 'right_eye_brow4', 98 | 'right_eye_brow5', 99 | 'left_eye_brow5', 100 | 'left_eye_brow4', 101 | 'left_eye_brow3', 102 | 'left_eye_brow2', 103 | 'left_eye_brow1', 104 | 'nose1', 105 | 'nose2', 106 | 'nose3', 107 | 'nose4', 108 | 'right_nose_2', 109 | 'right_nose_1', 110 | 'nose_middle', 111 | 'left_nose_1', 112 | 'left_nose_2', 113 | 'right_eye1', 114 | 'right_eye2', 115 | 'right_eye3', 116 | 'right_eye4', 117 | 'right_eye5', 118 | 'right_eye6', 119 | 'left_eye4', 120 | 'left_eye3', 121 | 'left_eye2', 122 | 'left_eye1', 123 | 'left_eye6', 124 | 'left_eye5', 125 | 'right_mouth_1', 126 | 'right_mouth_2', 127 | 'right_mouth_3', 128 | 'mouth_top', 129 | 'left_mouth_3', 130 | 'left_mouth_2', 131 | 'left_mouth_1', 132 | 'left_mouth_5', # 59 in OpenPose output 133 | 'left_mouth_4', # 58 in OpenPose output 134 | 'mouth_bottom', 135 | 'right_mouth_4', 136 | 'right_mouth_5', 137 | 'right_lip_1', 138 | 'right_lip_2', 139 | 'lip_top', 140 | 'left_lip_2', 141 | 'left_lip_1', 142 | 'left_lip_3', 143 | 'lip_bottom', 144 | 'right_lip_3', 145 | # Face contour 146 | 'right_contour_1', 147 | 'right_contour_2', 148 | 'right_contour_3', 149 | 'right_contour_4', 150 | 'right_contour_5', 151 | 'right_contour_6', 152 | 'right_contour_7', 153 | 'right_contour_8', 154 | 'contour_middle', 155 | 'left_contour_8', 156 | 'left_contour_7', 157 | 'left_contour_6', 158 | 'left_contour_5', 159 | 'left_contour_4', 160 | 'left_contour_3', 161 | 'left_contour_2', 162 | 'left_contour_1', 163 | ] 164 | -------------------------------------------------------------------------------- /lib/smplx/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | from typing import NewType, Union, Optional 18 | from dataclasses import dataclass, asdict, fields 19 | import numpy as np 20 | import torch 21 | 22 | Tensor = NewType('Tensor', torch.Tensor) 23 | Array = NewType('Array', np.ndarray) 24 | 25 | 26 | @dataclass 27 | class ModelOutput: 28 | vertices: Optional[Tensor] = None 29 | joints: Optional[Tensor] = None 30 | full_pose: Optional[Tensor] = None 31 | global_orient: Optional[Tensor] = None 32 | transl: Optional[Tensor] = None 33 | 34 | def __getitem__(self, key): 35 | return getattr(self, key) 36 | 37 | def get(self, key, default=None): 38 | return getattr(self, key, default) 39 | 40 | def __iter__(self): 41 | return self.keys() 42 | 43 | def keys(self): 44 | keys = [t.name for t in fields(self)] 45 | return iter(keys) 46 | 47 | def values(self): 48 | values = [getattr(self, t.name) for t in fields(self)] 49 | return iter(values) 50 | 51 | def items(self): 52 | data = [(t.name, getattr(self, t.name)) for t in fields(self)] 53 | return iter(data) 54 | 55 | 56 | @dataclass 57 | class SMPLOutput(ModelOutput): 58 | betas: Optional[Tensor] = None 59 | body_pose: Optional[Tensor] = None 60 | 61 | 62 | @dataclass 63 | class SMPLHOutput(SMPLOutput): 64 | left_hand_pose: Optional[Tensor] = None 65 | right_hand_pose: Optional[Tensor] = None 66 | transl: Optional[Tensor] = None 67 | 68 | 69 | @dataclass 70 | class SMPLXOutput(SMPLHOutput): 71 | expression: Optional[Tensor] = None 72 | jaw_pose: Optional[Tensor] = None 73 | joint_transformation: Optional[Tensor] = None 74 | vertex_transformation: Optional[Tensor] = None 75 | 76 | 77 | @dataclass 78 | class MANOOutput(ModelOutput): 79 | betas: Optional[Tensor] = None 80 | hand_pose: Optional[Tensor] = None 81 | 82 | 83 | @dataclass 84 | class FLAMEOutput(ModelOutput): 85 | betas: Optional[Tensor] = None 86 | expression: Optional[Tensor] = None 87 | jaw_pose: Optional[Tensor] = None 88 | neck_pose: Optional[Tensor] = None 89 | 90 | 91 | def find_joint_kin_chain(joint_id, kinematic_tree): 92 | kin_chain = [] 93 | curr_idx = joint_id 94 | while curr_idx != -1: 95 | kin_chain.append(curr_idx) 96 | curr_idx = kinematic_tree[curr_idx] 97 | return kin_chain 98 | 99 | 100 | def to_tensor(array: Union[Array, Tensor], dtype=torch.float32) -> Tensor: 101 | if torch.is_tensor(array): 102 | return array 103 | else: 104 | return torch.tensor(array, dtype=dtype) 105 | 106 | 107 | class Struct(object): 108 | def __init__(self, **kwargs): 109 | for key, val in kwargs.items(): 110 | setattr(self, key, val) 111 | 112 | 113 | def to_np(array, dtype=np.float32): 114 | if 'scipy.sparse' in str(type(array)): 115 | array = array.todense() 116 | return np.array(array, dtype=dtype) 117 | 118 | 119 | def rot_mat_to_euler(rot_mats): 120 | # Calculates rotation matrix to euler angles 121 | # Careful for extreme cases of eular angles like [0.0, pi, 0.0] 122 | 123 | sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] + rot_mats[:, 1, 0] * rot_mats[:, 1, 0]) 124 | return torch.atan2(-rot_mats[:, 2, 0], sy) 125 | -------------------------------------------------------------------------------- /lib/smplx/vertex_ids.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | from __future__ import print_function 18 | from __future__ import absolute_import 19 | from __future__ import division 20 | 21 | # Joint name to vertex mapping. SMPL/SMPL-H/SMPL-X vertices that correspond to 22 | # MSCOCO and OpenPose joints 23 | vertex_ids = { 24 | 'smplh': 25 | { 26 | 'nose': 332, 27 | 'reye': 6260, 28 | 'leye': 2800, 29 | 'rear': 4071, 30 | 'lear': 583, 31 | 'rthumb': 6191, 32 | 'rindex': 5782, 33 | 'rmiddle': 5905, 34 | 'rring': 6016, 35 | 'rpinky': 6133, 36 | 'lthumb': 2746, 37 | 'lindex': 2319, 38 | 'lmiddle': 2445, 39 | 'lring': 2556, 40 | 'lpinky': 2673, 41 | 'LBigToe': 3216, 42 | 'LSmallToe': 3226, 43 | 'LHeel': 3387, 44 | 'RBigToe': 6617, 45 | 'RSmallToe': 6624, 46 | 'RHeel': 6787 47 | }, 48 | 'smplx': 49 | { 50 | 'nose': 9120, 51 | 'reye': 9929, 52 | 'leye': 9448, 53 | 'rear': 616, 54 | 'lear': 6, 55 | 'rthumb': 8079, 56 | 'rindex': 7669, 57 | 'rmiddle': 7794, 58 | 'rring': 7905, 59 | 'rpinky': 8022, 60 | 'lthumb': 5361, 61 | 'lindex': 4933, 62 | 'lmiddle': 5058, 63 | 'lring': 5169, 64 | 'lpinky': 5286, 65 | 'LBigToe': 5770, 66 | 'LSmallToe': 5780, 67 | 'LHeel': 8846, 68 | 'RBigToe': 8463, 69 | 'RSmallToe': 8474, 70 | 'RHeel': 8635 71 | }, 72 | 'mano': { 73 | 'thumb': 744, 74 | 'index': 320, 75 | 'middle': 443, 76 | 'ring': 554, 77 | 'pinky': 671, 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /lib/smplx/vertex_joint_selector.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is 4 | # holder of all proprietary rights on this computer program. 5 | # You can only use this computer program if you have closed 6 | # a license agreement with MPG or you get the right to use the computer 7 | # program from someone who is authorized to grant you that right. 8 | # Any use of the computer program without a valid license is prohibited and 9 | # liable to prosecution. 10 | # 11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung 12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute 13 | # for Intelligent Systems. All rights reserved. 14 | # 15 | # Contact: ps-license@tuebingen.mpg.de 16 | 17 | from __future__ import absolute_import 18 | from __future__ import print_function 19 | from __future__ import division 20 | 21 | import numpy as np 22 | 23 | import torch 24 | import torch.nn as nn 25 | 26 | from .utils import to_tensor 27 | 28 | 29 | class VertexJointSelector(nn.Module): 30 | def __init__(self, vertex_ids=None, use_hands=True, use_feet_keypoints=True, **kwargs): 31 | super(VertexJointSelector, self).__init__() 32 | 33 | extra_joints_idxs = [] 34 | 35 | face_keyp_idxs = np.array( 36 | [ 37 | vertex_ids['nose'], vertex_ids['reye'], vertex_ids['leye'], vertex_ids['rear'], 38 | vertex_ids['lear'] 39 | ], 40 | dtype=np.int64 41 | ) 42 | 43 | extra_joints_idxs = np.concatenate([extra_joints_idxs, face_keyp_idxs]) 44 | 45 | if use_feet_keypoints: 46 | feet_keyp_idxs = np.array( 47 | [ 48 | vertex_ids['LBigToe'], vertex_ids['LSmallToe'], vertex_ids['LHeel'], 49 | vertex_ids['RBigToe'], vertex_ids['RSmallToe'], vertex_ids['RHeel'] 50 | ], 51 | dtype=np.int32 52 | ) 53 | 54 | extra_joints_idxs = np.concatenate([extra_joints_idxs, feet_keyp_idxs]) 55 | 56 | if use_hands: 57 | self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky'] 58 | 59 | tips_idxs = [] 60 | for hand_id in ['l', 'r']: 61 | for tip_name in self.tip_names: 62 | tips_idxs.append(vertex_ids[hand_id + tip_name]) 63 | 64 | extra_joints_idxs = np.concatenate([extra_joints_idxs, tips_idxs]) 65 | 66 | self.register_buffer('extra_joints_idxs', to_tensor(extra_joints_idxs, dtype=torch.long)) 67 | 68 | def forward(self, vertices, joints): 69 | extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs) 70 | joints = torch.cat([joints, extra_joints], dim=1) 71 | 72 | return joints 73 | -------------------------------------------------------------------------------- /media/DIF-pipeline .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/media/DIF-pipeline .png -------------------------------------------------------------------------------- /media/psy_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/psyai-net/D-IF_release/f5b82ff5e18ca42115741c3f808520bf664329d0/media/psy_logo.png -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 |
2 |
3 |
19 |
20 |