├── .gitignore ├── INSTALL.md ├── LICENSE ├── README.md ├── configs ├── default.yaml ├── h36m_exp │ ├── latent_xyzc_s11g.yaml │ ├── latent_xyzc_s11p.yaml │ ├── latent_xyzc_s1p.yaml │ ├── latent_xyzc_s5p.yaml │ ├── latent_xyzc_s6p.yaml │ ├── latent_xyzc_s7p.yaml │ ├── latent_xyzc_s8p.yaml │ └── latent_xyzc_s9p.yaml ├── monocular_custom.yaml ├── multi_view_custom.yaml ├── nerf │ ├── nerf_313.yaml │ ├── nerf_315.yaml │ ├── nerf_377.yaml │ ├── nerf_386.yaml │ ├── nerf_387.yaml │ ├── nerf_390.yaml │ ├── nerf_392.yaml │ ├── nerf_393.yaml │ └── nerf_394.yaml ├── neural_volumes │ ├── neural_volumes_313.yaml │ ├── neural_volumes_315.yaml │ ├── neural_volumes_377.yaml │ ├── neural_volumes_386.yaml │ ├── neural_volumes_387.yaml │ ├── neural_volumes_390.yaml │ ├── neural_volumes_392.yaml │ ├── neural_volumes_393.yaml │ └── neural_volumes_394.yaml ├── snapshot_exp │ ├── snapshot_f1c.yaml │ ├── snapshot_f3c.yaml │ ├── snapshot_f4c.yaml │ ├── snapshot_f6p.yaml │ ├── snapshot_f7p.yaml │ ├── snapshot_f8p.yaml │ ├── snapshot_m2c.yaml │ ├── snapshot_m2o.yaml │ ├── snapshot_m3c.yaml │ └── snapshot_m5o.yaml ├── zju_mocap_exp │ ├── latent_xyzc_313.yaml │ ├── latent_xyzc_315.yaml │ ├── latent_xyzc_377.yaml │ ├── latent_xyzc_386.yaml │ ├── latent_xyzc_387.yaml │ ├── latent_xyzc_390.yaml │ ├── latent_xyzc_392.yaml │ ├── latent_xyzc_393.yaml │ ├── latent_xyzc_394.yaml │ ├── latent_xyzc_395.yaml │ ├── latent_xyzc_396.yaml │ └── xyzc_rotate_demo_313.yaml ├── zju_mocap_frame_exp │ ├── latent_xyzc_313_ni1.yaml │ ├── latent_xyzc_313_ni1200.yaml │ ├── latent_xyzc_313_ni300.yaml │ ├── latent_xyzc_313_ni60.yaml │ ├── latent_xyzc_313_ni600.yaml │ ├── latent_xyzc_315_ni1.yaml │ ├── latent_xyzc_377_ni1.yaml │ ├── latent_xyzc_386_ni1.yaml │ ├── latent_xyzc_387_ni1.yaml │ ├── latent_xyzc_390_ni1.yaml │ ├── latent_xyzc_392_ni1.yaml │ ├── latent_xyzc_393_ni1.yaml │ └── latent_xyzc_394_ni1.yaml └── zju_mocap_view_exp │ ├── latent_xyzc_313_1view.yaml │ ├── latent_xyzc_313_2view.yaml │ └── latent_xyzc_313_6view.yaml ├── docker ├── .condarc ├── Dockerfile ├── README.md ├── apt-sources.list └── spconv.sh ├── eval_whole_img.sh ├── lib ├── __init__.py ├── config │ ├── __init__.py │ ├── config.py │ └── yacs.py ├── datasets │ ├── __init__.py │ ├── collate_batch.py │ ├── light_stage │ │ ├── monocular_dataset.py │ │ ├── monocular_demo_dataset.py │ │ ├── monocular_mesh_dataset.py │ │ ├── multi_view_dataset.py │ │ ├── multi_view_demo_dataset.py │ │ ├── multi_view_mesh_dataset.py │ │ ├── multi_view_perform_dataset.py │ │ └── rotate_smpl_dataset.py │ ├── make_dataset.py │ ├── samplers.py │ └── transforms.py ├── evaluators │ ├── __init__.py │ ├── if_nerf.py │ ├── if_nerf_mesh.py │ ├── make_evaluator.py │ └── neural_volume.py ├── networks │ ├── __init__.py │ ├── embedder.py │ ├── latent_xyzc.py │ ├── make_network.py │ ├── nerf.py │ ├── nerf_mesh.py │ ├── renderer │ │ ├── __init__.py │ │ ├── if_clight_renderer.py │ │ ├── if_clight_renderer_mmsk.py │ │ ├── if_clight_renderer_msk.py │ │ ├── if_mesh_renderer.py │ │ ├── make_renderer.py │ │ ├── nerf_net_utils.py │ │ ├── tpose_renderer.py │ │ ├── volume_mesh_renderer.py │ │ └── volume_renderer.py │ └── tpose_xyzc.py ├── train │ ├── __init__.py │ ├── optimizer.py │ ├── recorder.py │ ├── scheduler.py │ └── trainers │ │ ├── __init__.py │ │ ├── if_nerf_clight.py │ │ ├── make_trainer.py │ │ ├── nerf.py │ │ ├── tpose.py │ │ └── trainer.py ├── utils │ ├── base_utils.py │ ├── blend_utils.py │ ├── data_utils.py │ ├── if_nerf │ │ ├── if_nerf_data_utils.py │ │ ├── if_nerf_net_utils.py │ │ └── voxels.py │ ├── img_utils.py │ ├── light_stage │ │ └── ply_to_occupancy.py │ ├── net_utils.py │ ├── optimizer │ │ ├── lr_scheduler.py │ │ └── radam.py │ ├── render_utils.py │ ├── snapshot_data_utils.py │ └── vis_utils.py └── visualizers │ ├── __init__.py │ ├── if_nerf.py │ ├── if_nerf_demo.py │ ├── if_nerf_mesh.py │ ├── if_nerf_perform.py │ ├── if_nerf_test.py │ └── make_visualizer.py ├── requirements.txt ├── run.py ├── run.sh ├── supplementary_material.md ├── test.sh ├── tools ├── custom │ ├── README.md │ ├── camera_params │ │ ├── extri.yml │ │ └── intri.yml │ ├── file_structure.png │ └── get_annots.py ├── prepare_warping.py ├── process_snapshot.py ├── render │ ├── cam_render.py │ ├── camera.py │ ├── color.fs │ ├── color.vs │ ├── color_render.py │ ├── framework.py │ ├── glm.py │ ├── quad.fs │ ├── quad.vs │ └── render.py ├── render_mesh.py ├── snapshot_smpl │ ├── renderer.py │ ├── smpl.py │ └── vendor │ │ ├── __init__.py │ │ └── smpl │ │ ├── __init__.py │ │ ├── lbs.py │ │ ├── posemapper.py │ │ ├── serialization.py │ │ └── verts.py └── vis_snapshot.py ├── train.sh ├── train_net.py ├── visualize.sh └── zju_smpl ├── cfg_model.yml ├── easymocap_to_neuralbody.py ├── example.json ├── extract_vertices.py └── smplmodel ├── body_model.py └── lbs.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .idea/ 3 | .ipynb_checkpoints/ 4 | *.py[cod] 5 | *.so 6 | *.orig 7 | *.o 8 | *.json 9 | *.pth 10 | *.npy 11 | *.ipynb 12 | /data/ 13 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | ### Set up the python environment 2 | 3 | ``` 4 | conda create -n neuralbody python=3.7 5 | conda activate neuralbody 6 | 7 | # make sure that the pytorch cuda is consistent with the system cuda 8 | # e.g., if your system cuda is 10.0, install torch 1.4 built from cuda 10.0 9 | pip install torch==1.4.0+cu100 -f https://download.pytorch.org/whl/torch_stable.html 10 | 11 | pip install -r requirements.txt 12 | 13 | # install spconv 14 | cd 15 | git clone https://github.com/traveller59/spconv --recursive 16 | cd spconv 17 | git checkout abf0acf30f5526ea93e687e3f424f62d9cd8313a 18 | git submodule update --init --recursive 19 | export CUDA_HOME="/usr/local/cuda-10.0" 20 | python setup.py bdist_wheel 21 | cd dist 22 | pip install spconv-1.2.1-cp36-cp36m-linux_x86_64.whl 23 | ``` 24 | 25 | ### Set up datasets 26 | 27 | #### People-Snapshot dataset 28 | 29 | 1. Download the People-Snapshot dataset [here](https://graphics.tu-bs.de/people-snapshot). 30 | 2. Process the People-Snapshot dataset using the [script](https://github.com/zju3dv/neuralbody#process-people-snapshot). 31 | 3. Create a soft link: 32 | ``` 33 | ROOT=/path/to/neuralbody 34 | cd $ROOT/data 35 | ln -s /path/to/people_snapshot people_snapshot 36 | ``` 37 | 38 | #### ZJU-Mocap dataset 39 | 40 | 1. If someone wants to download the ZJU-Mocap dataset, please fill the [form](https://docs.google.com/forms/d/1QcTp5qIbIBn8PCT-EQgG-fOB4HZ9khpRkT3q2OnH2bs) to obtain the download link. Another way is filling in the [agreement](https://pengsida.net/project_page_assets/files/ZJU-MoCap_Agreement.pdf) and emailing me (pengsida@zju.edu.cn) and cc Xiaowei Zhou (xwzhou@zju.edu.cn) to request the download link. 41 | 2. Create a soft link: 42 | ``` 43 | ROOT=/path/to/neuralbody 44 | cd $ROOT/data 45 | ln -s /path/to/zju_mocap zju_mocap 46 | ``` 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | //////////////////////////////////////////////////////////////////////////// 2 | // Copyright 2020-2021 the 3D Vision Group at the State Key Lab of CAD&CG, 3 | // Zhejiang University. All Rights Reserved. 4 | // 5 | // For more information see 6 | // If you use this code, please cite the corresponding publications as 7 | // listed on the above website. 8 | // 9 | // Permission to use, copy, modify and distribute this software and its 10 | // documentation for educational, research and non-profit purposes only. 11 | // Any modification based on this work must be open source and prohibited 12 | // for commercial use. 13 | // You must retain, in the source form of any derivative works that you 14 | // distribute, all copyright, patent, trademark, and attribution notices 15 | // from the source form of this work. 16 | // 17 | // 18 | //////////////////////////////////////////////////////////////////////////// 19 | -------------------------------------------------------------------------------- /configs/default.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zju3dv/neuralbody/3c516b953477006a3d1a7311eb4d51438c982c33/configs/default.yaml -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s11g.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S11/Greeting' 8 | human: 'S11' 9 | ann_file: 'data/h36m/S11/Greeting/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S11/Greeting' 14 | human: 'S11' 15 | ann_file: 'data/h36m/S11/Greeting/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2, 3] 23 | begin_ith_frame: 1200 24 | num_train_frame: 400 25 | smpl: 'smpl' 26 | vertices: 'vertices' 27 | params: 'params' 28 | big_box: True 29 | -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s11p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S11/Posing' 8 | human: 'S11' 9 | ann_file: 'data/h36m/S11/Posing/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S11/Posing' 14 | human: 'S11' 15 | ann_file: 'data/h36m/S11/Posing/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2] 23 | begin_ith_frame: 0 24 | num_train_frame: 200 25 | frame_interval: 5 26 | smpl: 'smpl' 27 | vertices: 'new_vertices' 28 | params: 'new_params' 29 | big_box: True 30 | -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s1p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S1/Posing' 8 | human: 'S1' 9 | ann_file: 'data/h36m/S1/Posing/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S1/Posing' 14 | human: 'S1' 15 | ann_file: 'data/h36m/S1/Posing/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2] 23 | begin_ith_frame: 0 24 | num_train_frame: 150 25 | frame_interval: 5 26 | smpl: 'smpl' 27 | vertices: 'new_vertices' 28 | params: 'new_params' 29 | big_box: True 30 | -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s5p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S5/Posing' 8 | human: 'S5' 9 | ann_file: 'data/h36m/S5/Posing/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S5/Posing' 14 | human: 'S5' 15 | ann_file: 'data/h36m/S5/Posing/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2] 23 | begin_ith_frame: 0 24 | num_train_frame: 250 25 | frame_interval: 5 26 | smpl: 'smpl' 27 | vertices: 'new_vertices' 28 | params: 'new_params' 29 | big_box: True 30 | -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s6p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S6/Posing' 8 | human: 'S6' 9 | ann_file: 'data/h36m/S6/Posing/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S6/Posing' 14 | human: 'S6' 15 | ann_file: 'data/h36m/S6/Posing/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2] 23 | begin_ith_frame: 0 24 | num_train_frame: 150 25 | frame_interval: 5 26 | smpl: 'smpl' 27 | vertices: 'new_vertices' 28 | params: 'new_params' 29 | big_box: True 30 | -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s7p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S7/Posing' 8 | human: 'S7' 9 | ann_file: 'data/h36m/S7/Posing/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S7/Posing' 14 | human: 'S7' 15 | ann_file: 'data/h36m/S7/Posing/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2] 23 | begin_ith_frame: 0 24 | num_train_frame: 300 25 | frame_interval: 5 26 | smpl: 'smpl' 27 | vertices: 'new_vertices' 28 | params: 'new_params' 29 | big_box: True 30 | -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s8p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S8/Posing' 8 | human: 'S8' 9 | ann_file: 'data/h36m/S8/Posing/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S8/Posing' 14 | human: 'S8' 15 | ann_file: 'data/h36m/S8/Posing/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2] 23 | begin_ith_frame: 0 24 | num_train_frame: 250 25 | frame_interval: 5 26 | smpl: 'smpl' 27 | vertices: 'new_vertices' 28 | params: 'new_params' 29 | big_box: True 30 | -------------------------------------------------------------------------------- /configs/h36m_exp/latent_xyzc_s9p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/h36m/S9/Posing' 8 | human: 'S9' 9 | ann_file: 'data/h36m/S9/Posing/annots.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/h36m/S9/Posing' 14 | human: 'S9' 15 | ann_file: 'data/h36m/S9/Posing/annots.npy' 16 | split: 'test' 17 | 18 | # data options 19 | H: 1002 20 | W: 1000 21 | ratio: 1. 22 | training_view: [0, 1, 2] 23 | begin_ith_frame: 0 24 | num_train_frame: 260 25 | frame_interval: 5 26 | smpl: 'smpl' 27 | vertices: 'new_vertices' 28 | params: 'new_params' 29 | big_box: True 30 | -------------------------------------------------------------------------------- /configs/monocular_custom.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'path/to/custom_data', 8 | human: 'custom', 9 | ann_file: 'path/to/custom_data/params.npy', 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'path/to/custom_data', 14 | human: 'custom', 15 | ann_file: 'path/to/custom_data/params.npy', 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | training_view: [0, 6, 12, 18] 21 | num_train_frame: 300 22 | smpl: 'smpl' 23 | vertices: 'vertices' 24 | params: 'params' 25 | big_box: True 26 | -------------------------------------------------------------------------------- /configs/multi_view_custom.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | train_dataset: 7 | data_root: 'path/to/custom_data', 8 | human: 'custom', 9 | ann_file: 'path/to/custom_data/annots.npy', 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'path/to/custom_data', 14 | human: 'custom', 15 | ann_file: 'path/to/custom_data/annots.npy', 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | training_view: [0, 6, 12, 18] 21 | num_train_frame: 300 22 | smpl: 'smpl' 23 | vertices: 'vertices' 24 | params: 'params' 25 | big_box: True 26 | -------------------------------------------------------------------------------- /configs/nerf/nerf_313.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.multi_view_dataset' 5 | train_dataset_path: 'lib/datasets/light_stage/multi_view_dataset.py' 6 | test_dataset_module: 'lib.datasets.light_stage.multi_view_dataset' 7 | test_dataset_path: 'lib/datasets/light_stage/multi_view_dataset.py' 8 | 9 | network_module: 'lib.networks.nerf' 10 | network_path: 'lib/networks/nerf.py' 11 | renderer_module: 'lib.networks.renderer.volume_renderer' 12 | renderer_path: 'lib/networks/renderer/volume_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.nerf.py' 15 | trainer_path: 'lib/train/trainers/nerf.py' 16 | 17 | evaluator_module: 'lib.evaluators.if_nerf' 18 | evaluator_path: 'lib/evaluators/if_nerf.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 313 24 | 25 | train_dataset: 26 | data_root: 'data/zju_mocap/CoreView_313' 27 | human: 'CoreView_313' 28 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 29 | split: 'train' 30 | 31 | test_dataset: 32 | data_root: 'data/zju_mocap/CoreView_313' 33 | human: 'CoreView_313' 34 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 35 | split: 'test' 36 | 37 | train: 38 | batch_size: 1 39 | collator: '' 40 | lr: 5e-4 41 | weight_decay: 0 42 | epoch: 400 43 | scheduler: 44 | type: 'exponential' 45 | gamma: 0.1 46 | decay_epochs: 1000 47 | num_workers: 16 48 | 49 | test: 50 | sampler: 'FrameSampler' 51 | batch_size: 1 52 | collator: '' 53 | 54 | ep_iter: 500 55 | save_ep: 1000 56 | eval_ep: 1000 57 | 58 | # training options 59 | netdepth: 8 60 | netwidth: 256 61 | netdepth_fine: 8 62 | netwidth_fine: 256 63 | netchunk: 65536 64 | chunk: 32768 65 | 66 | no_batching: True 67 | 68 | # rendering options 69 | use_viewdirs: True 70 | i_embed: 0 71 | xyz_res: 10 72 | view_res: 4 73 | raw_noise_std: 0 74 | lindisp: False 75 | 76 | N_samples: 64 77 | N_importance: 128 78 | N_rand: 1024 79 | 80 | perturb: 1 81 | white_bkgd: False 82 | 83 | num_render_views: 50 84 | 85 | # data options 86 | ratio: 0.5 87 | num_train_frame: 1 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | 96 | 97 | novel_view_cfg: 98 | train_dataset_module: 'lib.datasets.light_stage.multi_view_demo_dataset' 99 | train_dataset_path: 'lib/datasets/light_stage/multi_view_demo_dataset.py' 100 | test_dataset_module: 'lib.datasets.light_stage.multi_view_demo_dataset' 101 | test_dataset_path: 'lib/datasets/light_stage/multi_view_demo_dataset.py' 102 | 103 | renderer_module: 'lib.networks.renderer.volume_renderer' 104 | renderer_path: 'lib/networks/renderer/volume_renderer.py' 105 | 106 | visualizer_module: 'lib.visualizers.if_nerf_demo' 107 | visualizer_path: 'lib/visualizers/if_nerf_demo.py' 108 | 109 | test: 110 | sampler: '' 111 | 112 | novel_pose_cfg: 113 | train_dataset_module: 'lib.datasets.light_stage.multi_view_perform_dataset' 114 | train_dataset_path: 'lib/datasets/light_stage/multi_view_perform_dataset.py' 115 | test_dataset_module: 'lib.datasets.light_stage.multi_view_perform_dataset' 116 | test_dataset_path: 'lib/datasets/light_stage/multi_view_perform_dataset.py' 117 | 118 | renderer_module: 'lib.networks.renderer.volume_renderer' 119 | renderer_path: 'lib/networks/renderer/volume_renderer.py' 120 | 121 | visualizer_module: 'lib.visualizers.if_nerf_perform' 122 | visualizer_path: 'lib/visualizers/if_nerf_perform.py' 123 | 124 | test: 125 | sampler: '' 126 | 127 | mesh_cfg: 128 | train_dataset_module: 'lib.datasets.light_stage.multi_view_mesh_dataset' 129 | train_dataset_path: 'lib/datasets/light_stage/multi_view_mesh_dataset.py' 130 | test_dataset_module: 'lib.datasets.light_stage.multi_view_mesh_dataset' 131 | test_dataset_path: 'lib/datasets/light_stage/multi_view_mesh_dataset.py' 132 | 133 | network_module: 'lib.networks.latent_xyzc' 134 | network_path: 'lib/networks/latent_xyzc.py' 135 | renderer_module: 'lib.networks.renderer.volume_mesh_renderer' 136 | renderer_path: 'lib/networks/renderer/volume_mesh_renderer.py' 137 | 138 | visualizer_module: 'lib.visualizers.if_nerf_mesh' 139 | visualizer_path: 'lib/visualizers/if_nerf_mesh.py' 140 | 141 | mesh_th: 5 142 | 143 | test: 144 | sampler: 'FrameSampler' 145 | frame_sampler_interval: 1 146 | -------------------------------------------------------------------------------- /configs/nerf/nerf_315.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 315 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_315' 10 | human: 'CoreView_315' 11 | ann_file: 'data/zju_mocap/CoreView_315/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_315' 16 | human: 'CoreView_315' 17 | ann_file: 'data/zju_mocap/CoreView_315/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/nerf/nerf_377.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 377 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_377' 10 | human: 'CoreView_377' 11 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_377' 16 | human: 'CoreView_377' 17 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/nerf/nerf_386.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 386 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_386' 10 | human: 'CoreView_386' 11 | ann_file: 'data/zju_mocap/CoreView_386/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_386' 16 | human: 'CoreView_386' 17 | ann_file: 'data/zju_mocap/CoreView_386/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/nerf/nerf_387.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 387 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_387' 10 | human: 'CoreView_387' 11 | ann_file: 'data/zju_mocap/CoreView_387/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_387' 16 | human: 'CoreView_387' 17 | ann_file: 'data/zju_mocap/CoreView_387/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/nerf/nerf_390.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 390 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_390' 10 | human: 'CoreView_390' 11 | ann_file: 'data/zju_mocap/CoreView_390/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_390' 16 | human: 'CoreView_390' 17 | ann_file: 'data/zju_mocap/CoreView_390/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/nerf/nerf_392.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 392 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_392' 10 | human: 'CoreView_392' 11 | ann_file: 'data/zju_mocap/CoreView_392/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_392' 16 | human: 'CoreView_392' 17 | ann_file: 'data/zju_mocap/CoreView_392/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/nerf/nerf_393.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 393 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_393' 10 | human: 'CoreView_393' 11 | ann_file: 'data/zju_mocap/CoreView_393/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_393' 16 | human: 'CoreView_393' 17 | ann_file: 'data/zju_mocap/CoreView_393/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/nerf/nerf_394.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/nerf/nerf_313.yaml' 5 | 6 | human: 394 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_394' 10 | human: 'CoreView_394' 11 | ann_file: 'data/zju_mocap/CoreView_394/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_394' 16 | human: 'CoreView_394' 17 | ann_file: 'data/zju_mocap/CoreView_394/annots.npy' 18 | split: 'test' 19 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_313.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 313 24 | 25 | train: 26 | dataset: Human313_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human313_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 60 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_315.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 315 24 | 25 | train: 26 | dataset: Human315_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human315_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 400 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_377.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 377 24 | 25 | train: 26 | dataset: Human377_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human377_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 300 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_386.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 386 24 | 25 | train: 26 | dataset: Human386_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human386_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 300 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_387.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 387 24 | 25 | train: 26 | dataset: Human387_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human387_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 300 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_390.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 390 24 | 25 | train: 26 | dataset: Human390_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human390_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | begin_i: 700 88 | ni: 300 89 | smpl: 'smpl' 90 | params: 'params' 91 | 92 | voxel_size: [0.005, 0.005, 0.005] # dhw 93 | 94 | # record options 95 | log_interval: 1 96 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_392.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 392 24 | 25 | train: 26 | dataset: Human392_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human392_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 300 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_393.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 393 24 | 25 | train: 26 | dataset: Human393_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human393_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 300 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/neural_volumes/neural_volumes_394.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 6 | test_dataset_module: 'lib.datasets.light_stage.can_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/can_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.neural_volume' 18 | evaluator_path: 'lib/evaluators/neural_volume.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | human: 394 24 | 25 | train: 26 | dataset: Human394_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human394_0001_Test 40 | sampler: 'FrameSampler' 41 | batch_size: 1 42 | collator: '' 43 | 44 | ep_iter: 500 45 | save_ep: 1000 46 | eval_ep: 1000 47 | 48 | # training options 49 | netdepth: 8 50 | netwidth: 256 51 | netdepth_fine: 8 52 | netwidth_fine: 256 53 | netchunk: 65536 54 | chunk: 32768 55 | 56 | no_batching: True 57 | 58 | precrop_iters: 500 59 | precrop_frac: 0.5 60 | 61 | # network options 62 | point_feature: 6 63 | 64 | # rendering options 65 | use_viewdirs: True 66 | i_embed: 0 67 | xyz_res: 10 68 | view_res: 4 69 | raw_noise_std: 0 70 | 71 | N_samples: 64 72 | N_importance: 128 73 | N_rand: 1024 74 | 75 | near: 1 76 | far: 3 77 | 78 | perturb: 1 79 | white_bkgd: False 80 | 81 | render_views: 50 82 | 83 | # data options 84 | res: 256 85 | ratio: 0.5 86 | intv: 6 87 | ni: 300 88 | smpl: 'smpl' 89 | params: 'params' 90 | 91 | voxel_size: [0.005, 0.005, 0.005] # dhw 92 | 93 | # record options 94 | log_interval: 1 95 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_f1c.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/female-1-casual' 8 | human: 'female-1-casual' 9 | ann_file: 'data/people_snapshot/female-1-casual/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/female-1-casual' 14 | human: 'female-1-casual' 15 | ann_file: 'data/people_snapshot/female-1-casual/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 250 21 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_f3c.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.monocular_dataset' 5 | train_dataset_path: 'lib/datasets/light_stage/monocular_dataset.py' 6 | test_dataset_module: 'lib.datasets.light_stage.monocular_dataset' 7 | test_dataset_path: 'lib/datasets/light_stage/monocular_dataset.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.if_nerf' 18 | evaluator_path: 'lib/evaluators/if_nerf.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf' 21 | visualizer_path: 'lib/visualizers/if_nerf.py' 22 | 23 | train_dataset: 24 | data_root: 'data/people_snapshot/female-3-casual' 25 | human: 'female-3-casual' 26 | ann_file: 'data/people_snapshot/female-3-casual/params.npy' 27 | split: 'train' 28 | 29 | test_dataset: 30 | data_root: 'data/people_snapshot/female-3-casual' 31 | human: 'female-3-casual' 32 | ann_file: 'data/people_snapshot/female-3-casual/params.npy' 33 | split: 'test' 34 | 35 | train: 36 | batch_size: 1 37 | collator: '' 38 | lr: 5e-4 39 | weight_decay: 0 40 | epoch: 400 41 | scheduler: 42 | type: 'exponential' 43 | gamma: 0.1 44 | decay_epochs: 1000 45 | num_workers: 16 46 | 47 | test: 48 | batch_size: 1 49 | collator: '' 50 | 51 | ep_iter: 500 52 | save_ep: 100 53 | eval_ep: 1000 54 | 55 | # rendering options 56 | i_embed: 0 57 | xyz_res: 10 58 | view_res: 4 59 | raw_noise_std: 0 60 | 61 | N_samples: 64 62 | N_importance: 128 63 | N_rand: 1024 64 | 65 | perturb: 1 66 | white_bkgd: False 67 | 68 | num_render_views: 50 69 | 70 | # data options 71 | H: 1080 72 | W: 1080 73 | ratio: 1. 74 | num_train_frame: 230 75 | 76 | voxel_size: [0.005, 0.005, 0.005] # dhw 77 | 78 | # record options 79 | log_interval: 1 80 | 81 | 82 | novel_view_cfg: 83 | train_dataset_module: 'lib.datasets.light_stage.monocular_demo_dataset' 84 | train_dataset_path: 'lib/datasets/light_stage/monocular_demo_dataset.py' 85 | test_dataset_module: 'lib.datasets.light_stage.monocular_demo_dataset' 86 | test_dataset_path: 'lib/datasets/light_stage/monocular_demo_dataset.py' 87 | 88 | renderer_module: 'lib.networks.renderer.if_clight_renderer_msk' 89 | renderer_path: 'lib/networks/renderer/if_clight_renderer_msk.py' 90 | 91 | visualizer_module: 'lib.visualizers.if_nerf_demo' 92 | visualizer_path: 'lib/visualizers/if_nerf_demo.py' 93 | 94 | ratio: 0.5 95 | 96 | test: 97 | sampler: '' 98 | 99 | novel_pose_cfg: 100 | train_dataset_module: 'lib.datasets.light_stage.monocular_dataset' 101 | train_dataset_path: 'lib/datasets/light_stage/monocular_dataset.py' 102 | test_dataset_module: 'lib.datasets.light_stage.monocular_dataset' 103 | test_dataset_path: 'lib/datasets/light_stage/monocular_dataset.py' 104 | 105 | renderer_module: 'lib.networks.renderer.if_clight_renderer_msk' 106 | renderer_path: 'lib/networks/renderer/if_clight_renderer_msk.py' 107 | 108 | visualizer_module: 'lib.visualizers.if_nerf_perform' 109 | visualizer_path: 'lib/visualizers/if_nerf_perform.py' 110 | 111 | ratio: 0.5 112 | 113 | test: 114 | sampler: '' 115 | 116 | mesh_cfg: 117 | train_dataset_module: 'lib.datasets.light_stage.monocular_mesh_dataset' 118 | train_dataset_path: 'lib/datasets/light_stage/monocular_mesh_dataset.py' 119 | test_dataset_module: 'lib.datasets.light_stage.monocular_mesh_dataset' 120 | test_dataset_path: 'lib/datasets/light_stage/monocular_mesh_dataset.py' 121 | 122 | network_module: 'lib.networks.latent_xyzc' 123 | network_path: 'lib/networks/latent_xyzc.py' 124 | renderer_module: 'lib.networks.renderer.if_mesh_renderer' 125 | renderer_path: 'lib/networks/renderer/if_mesh_renderer.py' 126 | 127 | visualizer_module: 'lib.visualizers.if_nerf_mesh' 128 | visualizer_path: 'lib/visualizers/if_nerf_mesh.py' 129 | 130 | mesh_th: 5 131 | 132 | test: 133 | sampler: 'FrameSampler' 134 | frame_sampler_interval: 1 135 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_f4c.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/female-4-casual' 8 | human: 'female-4-casual' 9 | ann_file: 'data/people_snapshot/female-4-casual/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/female-4-casual' 14 | human: 'female-4-casual' 15 | ann_file: 'data/people_snapshot/female-4-casual/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 200 21 | begin_ith_frame: 10 22 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_f6p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/female-6-plaza' 8 | human: 'female-6-plaza' 9 | ann_file: 'data/people_snapshot/female-6-plaza/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/female-6-plaza' 14 | human: 'female-6-plaza' 15 | ann_file: 'data/people_snapshot/female-6-plaza/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 240 21 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_f7p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/female-7-plaza' 8 | human: 'female-7-plaza' 9 | ann_file: 'data/people_snapshot/female-7-plaza/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/female-7-plaza' 14 | human: 'female-7-plaza' 15 | ann_file: 'data/people_snapshot/female-7-plaza/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 185 21 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_f8p.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/female-8-plaza' 8 | human: 'female-8-plaza' 9 | ann_file: 'data/people_snapshot/female-8-plaza/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/female-8-plaza' 14 | human: 'female-8-plaza' 15 | ann_file: 'data/people_snapshot/female-8-plaza/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 200 21 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_m2c.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/male-2-casual' 8 | human: 'male-2-casual' 9 | ann_file: 'data/people_snapshot/male-2-casual/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/male-2-casual' 14 | human: 'male-2-casual' 15 | ann_file: 'data/people_snapshot/male-2-casual/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 180 21 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_m2o.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/male-2-outdoor' 8 | human: 'male-2-outdoor' 9 | ann_file: 'data/people_snapshot/male-2-outdoor/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/male-2-outdoor' 14 | human: 'male-2-outdoor' 15 | ann_file: 'data/people_snapshot/male-2-outdoor/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 150 21 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_m3c.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/male-3-casual' 8 | human: 'male-3-casual' 9 | ann_file: 'data/people_snapshot/male-3-casual/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/male-3-casual' 14 | human: 'male-3-casual' 15 | ann_file: 'data/people_snapshot/male-3-casual/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 235 21 | -------------------------------------------------------------------------------- /configs/snapshot_exp/snapshot_m5o.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/snapshot_exp/snapshot_f3c.yaml' 5 | 6 | train_dataset: 7 | data_root: 'data/people_snapshot/male-5-outdoor' 8 | human: 'male-5-outdoor' 9 | ann_file: 'data/people_snapshot/male-5-outdoor/params.npy' 10 | split: 'train' 11 | 12 | test_dataset: 13 | data_root: 'data/people_snapshot/male-5-outdoor' 14 | human: 'male-5-outdoor' 15 | ann_file: 'data/people_snapshot/male-5-outdoor/params.npy' 16 | split: 'test' 17 | 18 | # data options 19 | ratio: 1. 20 | num_train_frame: 295 21 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_315.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 315 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_315' 10 | human: 'CoreView_315' 11 | ann_file: 'data/zju_mocap/CoreView_315/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_315' 16 | human: 'CoreView_315' 17 | ann_file: 'data/zju_mocap/CoreView_315/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 400 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_377.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 377 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_377' 10 | human: 'CoreView_377' 11 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_377' 16 | human: 'CoreView_377' 17 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_386.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 386 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_386' 10 | human: 'CoreView_386' 11 | ann_file: 'data/zju_mocap/CoreView_386/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_386' 16 | human: 'CoreView_386' 17 | ann_file: 'data/zju_mocap/CoreView_386/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_387.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 387 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_387' 10 | human: 'CoreView_387' 11 | ann_file: 'data/zju_mocap/CoreView_387/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_387' 16 | human: 'CoreView_387' 17 | ann_file: 'data/zju_mocap/CoreView_387/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_390.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 390 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_390' 10 | human: 'CoreView_390' 11 | ann_file: 'data/zju_mocap/CoreView_390/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_390' 16 | human: 'CoreView_390' 17 | ann_file: 'data/zju_mocap/CoreView_390/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | begin_ith_frame: 700 23 | num_novel_pose_frame: 700 24 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_392.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 392 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_392' 10 | human: 'CoreView_392' 11 | ann_file: 'data/zju_mocap/CoreView_392/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_392' 16 | human: 'CoreView_392' 17 | ann_file: 'data/zju_mocap/CoreView_392/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_393.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 393 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_393' 10 | human: 'CoreView_393' 11 | ann_file: 'data/zju_mocap/CoreView_393/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_393' 16 | human: 'CoreView_393' 17 | ann_file: 'data/zju_mocap/CoreView_393/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_394.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 394 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_394' 10 | human: 'CoreView_394' 11 | ann_file: 'data/zju_mocap/CoreView_394/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_394' 16 | human: 'CoreView_394' 17 | ann_file: 'data/zju_mocap/CoreView_394/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_395.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 395 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_395' 10 | human: 'CoreView_395' 11 | ann_file: 'data/zju_mocap/CoreView_395/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_395' 16 | human: 'CoreView_395' 17 | ann_file: 'data/zju_mocap/CoreView_395/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/latent_xyzc_396.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 396 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_396' 10 | human: 'CoreView_396' 11 | ann_file: 'data/zju_mocap/CoreView_396/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_396' 16 | human: 'CoreView_396' 17 | ann_file: 'data/zju_mocap/CoreView_396/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 540 22 | begin_ith_frame: 810 23 | -------------------------------------------------------------------------------- /configs/zju_mocap_exp/xyzc_rotate_demo_313.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | train_dataset_module: 'lib.datasets.light_stage.can_smpl_demo' 5 | train_dataset_path: 'lib/datasets/light_stage/can_smpl_demo.py' 6 | test_dataset_module: 'lib.datasets.light_stage.rotate_smpl' 7 | test_dataset_path: 'lib/datasets/light_stage/rotate_smpl.py' 8 | 9 | network_module: 'lib.networks.latent_xyzc' 10 | network_path: 'lib/networks/latent_xyzc.py' 11 | renderer_module: 'lib.networks.renderer.if_clight_renderer' 12 | renderer_path: 'lib/networks/renderer/if_clight_renderer.py' 13 | 14 | trainer_module: 'lib.train.trainers.if_nerf_clight' 15 | trainer_path: 'lib/train/trainers/if_nerf_clight.py' 16 | 17 | evaluator_module: 'lib.evaluators.if_nerf' 18 | evaluator_path: 'lib/evaluators/if_nerf.py' 19 | 20 | visualizer_module: 'lib.visualizers.if_nerf_demo' 21 | visualizer_path: 'lib/visualizers/if_nerf_demo.py' 22 | 23 | human: 313 24 | 25 | train: 26 | dataset: Human313_0001_Train 27 | batch_size: 1 28 | collator: '' 29 | lr: 5e-4 30 | weight_decay: 0 31 | epoch: 400 32 | scheduler: 33 | type: 'exponential' 34 | gamma: 0.1 35 | decay_epochs: 1000 36 | num_workers: 16 37 | 38 | test: 39 | dataset: Human313_0001_Test 40 | batch_size: 1 41 | collator: '' 42 | 43 | ep_iter: 500 44 | save_ep: 1000 45 | eval_ep: 1000 46 | 47 | # training options 48 | netdepth: 8 49 | netwidth: 256 50 | netdepth_fine: 8 51 | netwidth_fine: 256 52 | netchunk: 65536 53 | chunk: 32768 54 | 55 | no_batching: True 56 | 57 | precrop_iters: 500 58 | precrop_frac: 0.5 59 | 60 | # network options 61 | point_feature: 6 62 | 63 | # rendering options 64 | use_viewdirs: True 65 | i_embed: 0 66 | xyz_res: 10 67 | view_res: 4 68 | raw_noise_std: 0 69 | 70 | N_samples: 64 71 | N_importance: 128 72 | N_rand: 1024 73 | 74 | near: 1 75 | far: 3 76 | 77 | perturb: 1 78 | white_bkgd: False 79 | 80 | render_views: 50 81 | 82 | # data options 83 | res: 256 84 | ratio: 0.5 85 | intv: 6 86 | ni: 60 87 | smpl: 'smpl' 88 | params: 'params' 89 | 90 | voxel_size: [0.005, 0.005, 0.005] # dhw 91 | 92 | # record options 93 | log_interval: 1 94 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_313_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_313_ni1200.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1200 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_313_ni300.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 600 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_313_ni60.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 60 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_313_ni600.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 300 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_315_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 315 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_315' 10 | human: 'CoreView_315' 11 | ann_file: 'data/zju_mocap/CoreView_315/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_315' 16 | human: 'CoreView_315' 17 | ann_file: 'data/zju_mocap/CoreView_315/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_377_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 377 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_377' 10 | human: 'CoreView_377' 11 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_377' 16 | human: 'CoreView_377' 17 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_386_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 386 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_386' 10 | human: 'CoreView_386' 11 | ann_file: 'data/zju_mocap/CoreView_386/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_386' 16 | human: 'CoreView_386' 17 | ann_file: 'data/zju_mocap/CoreView_386/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_387_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 387 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_387' 10 | human: 'CoreView_387' 11 | ann_file: 'data/zju_mocap/CoreView_387/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_387' 16 | human: 'CoreView_387' 17 | ann_file: 'data/zju_mocap/CoreView_387/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_390_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 377 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_377' 10 | human: 'CoreView_377' 11 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_377' 16 | human: 'CoreView_377' 17 | ann_file: 'data/zju_mocap/CoreView_377/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_392_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 392 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_392' 10 | human: 'CoreView_392' 11 | ann_file: 'data/zju_mocap/CoreView_392/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_392' 16 | human: 'CoreView_392' 17 | ann_file: 'data/zju_mocap/CoreView_392/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_393_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 393 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_393' 10 | human: 'CoreView_393' 11 | ann_file: 'data/zju_mocap/CoreView_393/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_393' 16 | human: 'CoreView_393' 17 | ann_file: 'data/zju_mocap/CoreView_393/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_frame_exp/latent_xyzc_394_ni1.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 394 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_394' 10 | human: 'CoreView_394' 11 | ann_file: 'data/zju_mocap/CoreView_394/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_394' 16 | human: 'CoreView_394' 17 | ann_file: 'data/zju_mocap/CoreView_394/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | num_train_frame: 1 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_view_exp/latent_xyzc_313_1view.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | training_view: [0] 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_view_exp/latent_xyzc_313_2view.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | training_view: [0, 12] 22 | -------------------------------------------------------------------------------- /configs/zju_mocap_view_exp/latent_xyzc_313_6view.yaml: -------------------------------------------------------------------------------- 1 | task: 'if_nerf' 2 | gpus: [0] 3 | 4 | parent_cfg: 'configs/zju_mocap_exp/latent_xyzc_313.yaml' 5 | 6 | human: 313 7 | 8 | train_dataset: 9 | data_root: 'data/zju_mocap/CoreView_313' 10 | human: 'CoreView_313' 11 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 12 | split: 'train' 13 | 14 | test_dataset: 15 | data_root: 'data/zju_mocap/CoreView_313' 16 | human: 'CoreView_313' 17 | ann_file: 'data/zju_mocap/CoreView_313/annots.npy' 18 | split: 'test' 19 | 20 | # data options 21 | smpl: 'smpl_6view' 22 | params: 'params_6view' 23 | vertices: 'vertices_6view' 24 | training_view: [0, 3, 6, 12, 15, 18] 25 | -------------------------------------------------------------------------------- /docker/.condarc: -------------------------------------------------------------------------------- 1 | channels: 2 | - defaults 3 | show_channel_urls: true 4 | default_channels: 5 | - https://mirrors.bfsu.edu.cn/anaconda/pkgs/main 6 | - https://mirrors.bfsu.edu.cn/anaconda/pkgs/r 7 | - https://mirrors.bfsu.edu.cn/anaconda/pkgs/msys2 8 | custom_channels: 9 | conda-forge: https://mirrors.bfsu.edu.cn/anaconda/cloud 10 | msys2: https://mirrors.bfsu.edu.cn/anaconda/cloud 11 | bioconda: https://mirrors.bfsu.edu.cn/anaconda/cloud 12 | menpo: https://mirrors.bfsu.edu.cn/anaconda/cloud 13 | pytorch: https://mirrors.bfsu.edu.cn/anaconda/cloud 14 | simpleitk: https://mirrors.bfsu.edu.cn/anaconda/cloud 15 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.1.1-cudnn8-devel-ubuntu18.04 2 | 3 | # For the convenience for users in China mainland 4 | COPY docker/apt-sources.list /etc/apt/sources.list 5 | 6 | # Install some basic utilities 7 | RUN rm /etc/apt/sources.list.d/nvidia-ml.list \ 8 | && rm /etc/apt/sources.list.d/cuda.list \ 9 | && apt-get update && apt-get install -y \ 10 | curl \ 11 | ca-certificates \ 12 | sudo \ 13 | git \ 14 | bzip2 \ 15 | libx11-6 \ 16 | gcc \ 17 | g++ \ 18 | libusb-1.0-0 \ 19 | cmake \ 20 | libssl-dev \ 21 | && DEBIAN_FRONTEND=noninteractive apt-get install -y python3-opencv \ 22 | && rm -rf /var/lib/apt/lists/* 23 | 24 | # Create a working directory 25 | RUN mkdir /app 26 | WORKDIR /app 27 | 28 | # Create a non-root user and switch to it 29 | RUN adduser --disabled-password --gecos '' --shell /bin/bash user \ 30 | && chown -R user:user /app 31 | RUN echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-user 32 | USER user 33 | 34 | # All users can use /home/user as their home directory 35 | ENV HOME=/home/user 36 | RUN chmod 777 /home/user 37 | 38 | # Install Miniconda and Python 3.8 39 | ENV CONDA_AUTO_UPDATE_CONDA=false 40 | ENV PATH=/home/user/miniconda/bin:$PATH 41 | RUN curl -sLo ~/miniconda.sh https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py38_4.8.3-Linux-x86_64.sh \ 42 | && chmod +x ~/miniconda.sh \ 43 | && ~/miniconda.sh -b -p ~/miniconda \ 44 | && rm ~/miniconda.sh \ 45 | && conda install -y python==3.8.3 \ 46 | && conda clean -ya 47 | COPY --chown=user docker/.condarc /home/user/.condarc 48 | 49 | # CUDA 11.1-specific steps 50 | RUN conda install -y -c conda-forge cudatoolkit=11.1.1 \ 51 | && conda install -y -c pytorch \ 52 | "pytorch=1.8.1=py3.8_cuda11.1_cudnn8.0.5_0" \ 53 | "torchvision=0.9.1=py38_cu111" \ 54 | && conda clean -ya 55 | 56 | # Alter sources for the convenience of users located in China mainland. 57 | RUN pip config set global.index-url https://pypi.douban.com/simple 58 | COPY requirements.txt requirements.txt 59 | RUN pip install -r requirements.txt 60 | 61 | ENV CUDA_HOME=/usr/local/cuda 62 | RUN bash -c "git clone --recursive https://github.com/traveller59/spconv.git" 63 | # We manually download and install cmake since the requirements of spconv is newer than 64 | # that included in apt for ubuntu18. 65 | RUN curl -sLo cmake.tar.gz https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1.tar.gz \ 66 | && tar -xvf cmake.tar.gz \ 67 | && cd cmake-3.20.1 \ 68 | && ./configure \ 69 | && make -j4 && sudo make install 70 | RUN sudo apt-get update && sudo apt-get install -y libboost-dev \ 71 | && sudo rm -rf /var/lib/apt/lists/* 72 | COPY docker/spconv.sh spconv.sh 73 | RUN bash spconv.sh 74 | 75 | CMD ["python3"] 76 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | ## 1. Build the image 2 | 3 | From the root path of the project: 4 | ```shell 5 | docker build -f docker/Dockerfile -t neuralbody . 6 | ``` 7 | 8 | You may want to try several times since there are so many packages to be downloaded through the Internet and htpp(s) erros could occur. 9 | 10 | ## 2. Data preparation 11 | 12 | The docker image contains the environment you need to run the project, while you still need to manually download data as described in [INSTALL.md](https://github.com/zju3dv/neuralbody/blob/master/INSTALL.md). 13 | 14 | Note that the files downloaded are originally tar.gz files, while you need to extract each of them. 15 | 16 | An example is like: 17 | 18 | ```shell 19 | for name in $(ls *.tar.gz); do tar -xvf $name; done 20 | ``` 21 | 22 | ## 3. Execution using docker containers 23 | 24 | 25 | Suppose you are at the root path of the project, run a docker container like: 26 | ```shell 27 | docker run -it --rm --gpus=all \ 28 | --mount type=bind,source="$(pwd)",target=/app \ 29 | --mount type=bind,source=,target=/app/data \ 30 | neuralbody 31 | ``` 32 | where `` can be obtained from [README.md](https://github.com/zju3dv/neuralbody/blob/master/README.md) and `` is your path for data. 33 | -------------------------------------------------------------------------------- /docker/apt-sources.list: -------------------------------------------------------------------------------- 1 | deb http://mirrors.163.com/ubuntu/ bionic main restricted universe multiverse 2 | deb http://mirrors.163.com/ubuntu/ bionic-security main restricted universe multiverse 3 | deb http://mirrors.163.com/ubuntu/ bionic-updates main restricted universe multiverse 4 | deb http://mirrors.163.com/ubuntu/ bionic-proposed main restricted universe multiverse 5 | deb http://mirrors.163.com/ubuntu/ bionic-backports main restricted universe multiverse 6 | deb-src http://mirrors.163.com/ubuntu/ bionic main restricted universe multiverse 7 | deb-src http://mirrors.163.com/ubuntu/ bionic-security main restricted universe multiverse 8 | deb-src http://mirrors.163.com/ubuntu/ bionic-updates main restricted universe multiverse 9 | deb-src http://mirrors.163.com/ubuntu/ bionic-proposed main restricted universe multiverse 10 | deb-src http://mirrors.163.com/ubuntu/ bionic-backports main restricted universe multiverse 11 | -------------------------------------------------------------------------------- /docker/spconv.sh: -------------------------------------------------------------------------------- 1 | cd spconv 2 | git checkout abf0acf30f5526ea93e687e3f424f62d9cd8313a 3 | git submodule update --init --recursive 4 | python setup.py bdist_wheel 5 | pip install dist/spconv-1.2.1-cp38-cp38-linux_x86_64.whl 6 | -------------------------------------------------------------------------------- /eval_whole_img.sh: -------------------------------------------------------------------------------- 1 | python run.py --type evaluate --cfg_file configs/latent_xyzc_313.yaml exp_name xyzc_313 eval_whole_img True gpus "3," 2 | python run.py --type evaluate --cfg_file configs/latent_xyzc_313.yaml exp_name xyzc_313 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 3 | 4 | python run.py --type evaluate --cfg_file configs/latent_xyzc_315.yaml exp_name xyzc_315 eval_whole_img True gpus "3," 5 | python run.py --type evaluate --cfg_file configs/latent_xyzc_315.yaml exp_name xyzc_315 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 6 | 7 | python run.py --type evaluate --cfg_file configs/latent_xyzc_392.yaml exp_name xyzc_392 eval_whole_img True gpus "3," 8 | python run.py --type evaluate --cfg_file configs/latent_xyzc_392.yaml exp_name xyzc_392 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 9 | 10 | python run.py --type evaluate --cfg_file configs/latent_xyzc_393.yaml exp_name xyzc_393 eval_whole_img True gpus "3," 11 | python run.py --type evaluate --cfg_file configs/latent_xyzc_393.yaml exp_name xyzc_393 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 12 | 13 | python run.py --type evaluate --cfg_file configs/latent_xyzc_394.yaml exp_name xyzc_394 eval_whole_img True gpus "3," 14 | python run.py --type evaluate --cfg_file configs/latent_xyzc_394.yaml exp_name xyzc_394 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 15 | 16 | python run.py --type evaluate --cfg_file configs/latent_xyzc_377.yaml exp_name xyzc_377 eval_whole_img True gpus "3," 17 | python run.py --type evaluate --cfg_file configs/latent_xyzc_377.yaml exp_name xyzc_377 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 18 | 19 | python run.py --type evaluate --cfg_file configs/latent_xyzc_386.yaml exp_name xyzc_386 eval_whole_img True gpus "3," 20 | python run.py --type evaluate --cfg_file configs/latent_xyzc_386.yaml exp_name xyzc_386 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 21 | 22 | python run.py --type evaluate --cfg_file configs/latent_xyzc_390.yaml exp_name xyzc_390 eval_whole_img True gpus "3," 23 | python run.py --type evaluate --cfg_file configs/latent_xyzc_390.yaml exp_name xyzc_390 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 700 24 | 25 | python run.py --type evaluate --cfg_file configs/latent_xyzc_387.yaml exp_name xyzc_387 eval_whole_img True gpus "3," 26 | python run.py --type evaluate --cfg_file configs/latent_xyzc_387.yaml exp_name xyzc_387 eval_whole_img True gpus "3," test_novel_pose True novel_pose_ni 1000 27 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zju3dv/neuralbody/3c516b953477006a3d1a7311eb4d51438c982c33/lib/__init__.py -------------------------------------------------------------------------------- /lib/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import cfg, args 2 | -------------------------------------------------------------------------------- /lib/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataset import make_data_loader 2 | -------------------------------------------------------------------------------- /lib/datasets/collate_batch.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data.dataloader import default_collate 2 | import torch 3 | import numpy as np 4 | 5 | 6 | _collators = { 7 | } 8 | 9 | 10 | def make_collator(cfg, is_train): 11 | collator = cfg.train.collator if is_train else cfg.test.collator 12 | if collator in _collators: 13 | return _collators[collator] 14 | else: 15 | return default_collate 16 | -------------------------------------------------------------------------------- /lib/datasets/light_stage/monocular_mesh_dataset.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data as data 2 | from lib.utils import base_utils 3 | from PIL import Image 4 | import numpy as np 5 | import json 6 | import os 7 | import imageio 8 | import cv2 9 | from lib.config import cfg 10 | from lib.utils.if_nerf import if_nerf_data_utils as if_nerf_dutils 11 | from plyfile import PlyData 12 | from lib.utils import snapshot_data_utils as snapshot_dutils 13 | from . import monocular_dataset 14 | 15 | 16 | class Dataset(monocular_dataset.Dataset): 17 | def __init__(self, data_root, human, ann_file, split): 18 | super(Dataset, self).__init__(data_root, human, ann_file, split) 19 | 20 | self.data_root = data_root 21 | self.split = split 22 | 23 | camera_path = os.path.join(self.data_root, 'camera.pkl') 24 | self.cam = snapshot_dutils.get_camera(camera_path) 25 | self.begin_ith_frame = cfg.begin_ith_frame 26 | self.num_train_frame = cfg.num_train_frame 27 | 28 | self.ims = np.arange(self.num_train_frame) 29 | self.num_cams = 1 30 | 31 | params_path = ann_file 32 | self.params = np.load(params_path, allow_pickle=True).item() 33 | 34 | self.nrays = cfg.N_rand 35 | 36 | def prepare_inside_pts(self, pts, msk, K, R, T): 37 | sh = pts.shape 38 | pts3d = pts.reshape(-1, 3) 39 | RT = np.concatenate([R, T], axis=1) 40 | pts2d = base_utils.project(pts3d, K, RT) 41 | 42 | H, W = msk.shape 43 | pts2d = np.round(pts2d).astype(np.int32) 44 | pts2d[:, 0] = np.clip(pts2d[:, 0], 0, W - 1) 45 | pts2d[:, 1] = np.clip(pts2d[:, 1], 0, H - 1) 46 | inside = msk[pts2d[:, 1], pts2d[:, 0]] 47 | inside = inside.reshape(*sh[:-1]) 48 | 49 | return inside 50 | 51 | def __getitem__(self, index): 52 | latent_index = index 53 | index = index + self.begin_ith_frame 54 | frame_index = index 55 | 56 | img_path = os.path.join(self.data_root, 'image', 57 | '{}.jpg'.format(index)) 58 | img = imageio.imread(img_path).astype(np.float32) / 255. 59 | msk_path = os.path.join(self.data_root, 'mask', '{}.png'.format(index)) 60 | msk = imageio.imread(msk_path) 61 | 62 | K = self.cam['K'] 63 | D = self.cam['D'] 64 | img = cv2.undistort(img, K, D) 65 | msk = cv2.undistort(msk, K, D) 66 | 67 | R = self.cam['R'] 68 | T = self.cam['T'][:, None] 69 | 70 | coord, out_sh, can_bounds, bounds, Rh, Th = self.prepare_input( 71 | index) 72 | 73 | # reduce the image resolution by ratio 74 | H, W = int(img.shape[0] * cfg.ratio), int(img.shape[1] * cfg.ratio) 75 | img = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA) 76 | msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST) 77 | img[msk == 0] = 0 78 | K = K.copy() 79 | K[:2] = K[:2] * cfg.ratio 80 | 81 | voxel_size = cfg.voxel_size 82 | x = np.arange(can_bounds[0, 0], can_bounds[1, 0] + voxel_size[0], 83 | voxel_size[0]) 84 | y = np.arange(can_bounds[0, 1], can_bounds[1, 1] + voxel_size[1], 85 | voxel_size[1]) 86 | z = np.arange(can_bounds[0, 2], can_bounds[1, 2] + voxel_size[2], 87 | voxel_size[2]) 88 | pts = np.stack(np.meshgrid(x, y, z, indexing='ij'), axis=-1) 89 | pts = pts.astype(np.float32) 90 | 91 | inside = self.prepare_inside_pts(pts, msk, K, R, T) 92 | 93 | ret = { 94 | 'coord': coord, 95 | 'out_sh': out_sh, 96 | 'pts': pts, 97 | 'inside': inside 98 | } 99 | 100 | R = cv2.Rodrigues(Rh)[0].astype(np.float32) 101 | meta = { 102 | 'bounds': bounds, 103 | 'R': R, 104 | 'Th': Th, 105 | 'latent_index': latent_index, 106 | 'frame_index': frame_index 107 | } 108 | ret.update(meta) 109 | 110 | return ret 111 | 112 | def __len__(self): 113 | return self.num_train_frame 114 | -------------------------------------------------------------------------------- /lib/datasets/make_dataset.py: -------------------------------------------------------------------------------- 1 | from .transforms import make_transforms 2 | from . import samplers 3 | import torch 4 | import torch.utils.data 5 | import imp 6 | import os 7 | from .collate_batch import make_collator 8 | import numpy as np 9 | import time 10 | from lib.config.config import cfg 11 | 12 | 13 | def _dataset_factory(is_train): 14 | if is_train: 15 | module = cfg.train_dataset_module 16 | path = cfg.train_dataset_path 17 | args = cfg.train_dataset 18 | else: 19 | module = cfg.test_dataset_module 20 | path = cfg.test_dataset_path 21 | args = cfg.test_dataset 22 | dataset = imp.load_source(module, path).Dataset(**args) 23 | return dataset 24 | 25 | 26 | def make_dataset(cfg, dataset_name, transforms, is_train=True): 27 | dataset = _dataset_factory(is_train) 28 | return dataset 29 | 30 | 31 | def make_data_sampler(dataset, shuffle, is_distributed, is_train): 32 | if not is_train and cfg.test.sampler == 'FrameSampler': 33 | sampler = samplers.FrameSampler(dataset) 34 | return sampler 35 | if is_distributed: 36 | return samplers.DistributedSampler(dataset, shuffle=shuffle) 37 | if shuffle: 38 | sampler = torch.utils.data.sampler.RandomSampler(dataset) 39 | else: 40 | sampler = torch.utils.data.sampler.SequentialSampler(dataset) 41 | return sampler 42 | 43 | 44 | def make_batch_data_sampler(cfg, sampler, batch_size, drop_last, max_iter, 45 | is_train): 46 | if is_train: 47 | batch_sampler = cfg.train.batch_sampler 48 | sampler_meta = cfg.train.sampler_meta 49 | else: 50 | batch_sampler = cfg.test.batch_sampler 51 | sampler_meta = cfg.test.sampler_meta 52 | 53 | if batch_sampler == 'default': 54 | batch_sampler = torch.utils.data.sampler.BatchSampler( 55 | sampler, batch_size, drop_last) 56 | elif batch_sampler == 'image_size': 57 | batch_sampler = samplers.ImageSizeBatchSampler(sampler, batch_size, 58 | drop_last, sampler_meta) 59 | 60 | if max_iter != -1: 61 | batch_sampler = samplers.IterationBasedBatchSampler( 62 | batch_sampler, max_iter) 63 | return batch_sampler 64 | 65 | 66 | def worker_init_fn(worker_id): 67 | np.random.seed(worker_id + (int(round(time.time() * 1000) % (2**16)))) 68 | 69 | 70 | def make_data_loader(cfg, is_train=True, is_distributed=False, max_iter=-1): 71 | if is_train: 72 | batch_size = cfg.train.batch_size 73 | # shuffle = True 74 | shuffle = cfg.train.shuffle 75 | drop_last = False 76 | else: 77 | batch_size = cfg.test.batch_size 78 | shuffle = True if is_distributed else False 79 | drop_last = False 80 | 81 | dataset_name = cfg.train.dataset if is_train else cfg.test.dataset 82 | 83 | transforms = make_transforms(cfg, is_train) 84 | dataset = make_dataset(cfg, dataset_name, transforms, is_train) 85 | sampler = make_data_sampler(dataset, shuffle, is_distributed, is_train) 86 | batch_sampler = make_batch_data_sampler(cfg, sampler, batch_size, 87 | drop_last, max_iter, is_train) 88 | num_workers = cfg.train.num_workers 89 | collator = make_collator(cfg, is_train) 90 | data_loader = torch.utils.data.DataLoader(dataset, 91 | batch_sampler=batch_sampler, 92 | num_workers=num_workers, 93 | collate_fn=collator, 94 | worker_init_fn=worker_init_fn) 95 | 96 | return data_loader 97 | -------------------------------------------------------------------------------- /lib/datasets/transforms.py: -------------------------------------------------------------------------------- 1 | class Compose(object): 2 | def __init__(self, transforms): 3 | self.transforms = transforms 4 | 5 | def __call__(self, img, kpts=None): 6 | for t in self.transforms: 7 | img, kpts = t(img, kpts) 8 | if kpts is None: 9 | return img 10 | else: 11 | return img, kpts 12 | 13 | def __repr__(self): 14 | format_string = self.__class__.__name__ + "(" 15 | for t in self.transforms: 16 | format_string += "\n" 17 | format_string += " {0}".format(t) 18 | format_string += "\n)" 19 | return format_string 20 | 21 | 22 | class ToTensor(object): 23 | def __call__(self, img, kpts): 24 | return img / 255., kpts 25 | 26 | 27 | class Normalize(object): 28 | def __init__(self, mean, std): 29 | self.mean = mean 30 | self.std = std 31 | 32 | def __call__(self, img, kpts): 33 | img -= self.mean 34 | img /= self.std 35 | return img, kpts 36 | 37 | 38 | def make_transforms(cfg, is_train): 39 | if is_train is True: 40 | transform = Compose( 41 | [ 42 | ToTensor(), 43 | Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), 44 | ] 45 | ) 46 | else: 47 | transform = Compose( 48 | [ 49 | ToTensor(), 50 | Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), 51 | ] 52 | ) 53 | 54 | return transform 55 | -------------------------------------------------------------------------------- /lib/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_evaluator import make_evaluator 2 | -------------------------------------------------------------------------------- /lib/evaluators/if_nerf.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from lib.config import cfg 3 | from skimage.measure import compare_ssim 4 | import os 5 | import cv2 6 | from termcolor import colored 7 | 8 | 9 | class Evaluator: 10 | def __init__(self): 11 | self.mse = [] 12 | self.psnr = [] 13 | self.ssim = [] 14 | 15 | def psnr_metric(self, img_pred, img_gt): 16 | mse = np.mean((img_pred - img_gt)**2) 17 | psnr = -10 * np.log(mse) / np.log(10) 18 | return psnr 19 | 20 | def ssim_metric(self, img_pred, img_gt, batch): 21 | if not cfg.eval_whole_img: 22 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 23 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 24 | mask_at_box = mask_at_box.reshape(H, W) 25 | # crop the object region 26 | x, y, w, h = cv2.boundingRect(mask_at_box.astype(np.uint8)) 27 | img_pred = img_pred[y:y + h, x:x + w] 28 | img_gt = img_gt[y:y + h, x:x + w] 29 | 30 | result_dir = os.path.join(cfg.result_dir, 'comparison') 31 | os.system('mkdir -p {}'.format(result_dir)) 32 | frame_index = batch['frame_index'].item() 33 | view_index = batch['cam_ind'].item() 34 | cv2.imwrite( 35 | '{}/frame{:04d}_view{:04d}.png'.format(result_dir, frame_index, 36 | view_index), 37 | (img_pred[..., [2, 1, 0]] * 255)) 38 | cv2.imwrite( 39 | '{}/frame{:04d}_view{:04d}_gt.png'.format(result_dir, frame_index, 40 | view_index), 41 | (img_gt[..., [2, 1, 0]] * 255)) 42 | 43 | # compute the ssim 44 | ssim = compare_ssim(img_pred, img_gt, multichannel=True) 45 | return ssim 46 | 47 | def evaluate(self, output, batch): 48 | rgb_pred = output['rgb_map'][0].detach().cpu().numpy() 49 | rgb_gt = batch['rgb'][0].detach().cpu().numpy() 50 | 51 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 52 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 53 | mask_at_box = mask_at_box.reshape(H, W) 54 | # convert the pixels into an image 55 | white_bkgd = int(cfg.white_bkgd) 56 | img_pred = np.zeros((H, W, 3)) + white_bkgd 57 | img_pred[mask_at_box] = rgb_pred 58 | img_gt = np.zeros((H, W, 3)) + white_bkgd 59 | img_gt[mask_at_box] = rgb_gt 60 | 61 | if cfg.eval_whole_img: 62 | rgb_pred = img_pred 63 | rgb_gt = img_gt 64 | 65 | mse = np.mean((rgb_pred - rgb_gt)**2) 66 | self.mse.append(mse) 67 | 68 | psnr = self.psnr_metric(rgb_pred, rgb_gt) 69 | self.psnr.append(psnr) 70 | 71 | rgb_pred = img_pred 72 | rgb_gt = img_gt 73 | ssim = self.ssim_metric(rgb_pred, rgb_gt, batch) 74 | self.ssim.append(ssim) 75 | 76 | def summarize(self): 77 | result_dir = cfg.result_dir 78 | print( 79 | colored('the results are saved at {}'.format(result_dir), 80 | 'yellow')) 81 | 82 | result_path = os.path.join(cfg.result_dir, 'metrics.npy') 83 | os.system('mkdir -p {}'.format(os.path.dirname(result_path))) 84 | metrics = {'mse': self.mse, 'psnr': self.psnr, 'ssim': self.ssim} 85 | np.save(result_path, metrics) 86 | print('mse: {}'.format(np.mean(self.mse))) 87 | print('psnr: {}'.format(np.mean(self.psnr))) 88 | print('ssim: {}'.format(np.mean(self.ssim))) 89 | self.mse = [] 90 | self.psnr = [] 91 | self.ssim = [] 92 | -------------------------------------------------------------------------------- /lib/evaluators/if_nerf_mesh.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from lib.config import cfg 3 | import os 4 | 5 | 6 | class Evaluator: 7 | def evaluate(self, output, batch): 8 | cube = output['cube'] 9 | cube = cube[10:-10, 10:-10, 10:-10] 10 | 11 | pts = batch['pts'][0].detach().cpu().numpy() 12 | pts = pts[cube > cfg.mesh_th] 13 | 14 | i = batch['i'].item() 15 | result_dir = os.path.join(cfg.result_dir, 'pts') 16 | os.system('mkdir -p {}'.format(result_dir)) 17 | result_path = os.path.join(result_dir, '{}.npy'.format(i)) 18 | np.save(result_path, pts) 19 | 20 | def summarize(self): 21 | return {} 22 | -------------------------------------------------------------------------------- /lib/evaluators/make_evaluator.py: -------------------------------------------------------------------------------- 1 | import imp 2 | import os 3 | 4 | 5 | def _evaluator_factory(cfg): 6 | module = cfg.evaluator_module 7 | path = cfg.evaluator_path 8 | evaluator = imp.load_source(module, path).Evaluator() 9 | return evaluator 10 | 11 | 12 | def make_evaluator(cfg): 13 | if cfg.skip_eval: 14 | return None 15 | else: 16 | return _evaluator_factory(cfg) 17 | -------------------------------------------------------------------------------- /lib/evaluators/neural_volume.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from lib.config import cfg 3 | from skimage.measure import compare_ssim 4 | import os 5 | import cv2 6 | import imageio 7 | 8 | 9 | class Evaluator: 10 | def __init__(self): 11 | self.mse = [] 12 | self.psnr = [] 13 | self.ssim = [] 14 | 15 | def psnr_metric(self, img_pred, img_gt): 16 | mse = np.mean((img_pred - img_gt)**2) 17 | psnr = -10 * np.log(mse) / np.log(10) 18 | return psnr 19 | 20 | def ssim_metric(self, rgb_pred, rgb_gt, batch): 21 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 22 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 23 | mask_at_box = mask_at_box.reshape(H, W) 24 | # convert the pixels into an image 25 | img_pred = np.zeros((H, W, 3)) 26 | img_pred[mask_at_box] = rgb_pred 27 | img_gt = np.zeros((H, W, 3)) 28 | img_gt[mask_at_box] = rgb_gt 29 | # crop the object region 30 | x, y, w, h = cv2.boundingRect(mask_at_box.astype(np.uint8)) 31 | img_pred = img_pred[y:y + h, x:x + w] 32 | img_gt = img_gt[y:y + h, x:x + w] 33 | # compute the ssim 34 | ssim = compare_ssim(img_pred, img_gt, multichannel=True) 35 | return ssim 36 | 37 | def evaluate(self, batch): 38 | if cfg.human in [302, 313, 315]: 39 | i = batch['i'].item() + 1 40 | else: 41 | i = batch['i'].item() 42 | i = i + cfg.begin_i 43 | cam_ind = batch['cam_ind'].item() 44 | 45 | # obtain the image path 46 | result_dir = 'data/result/neural_volumes/{}_nv'.format(cfg.human) 47 | frame_dir = os.path.join(result_dir, 'frame_{}'.format(i)) 48 | gt_img_path = os.path.join(frame_dir, 'gt_{}.jpg'.format(cam_ind + 1)) 49 | pred_img_path = os.path.join(frame_dir, 50 | 'pred_{}.jpg'.format(cam_ind + 1)) 51 | 52 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 53 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 54 | mask_at_box = mask_at_box.reshape(H, W) 55 | 56 | # convert the pixels into an image 57 | rgb_gt = batch['rgb'][0].detach().cpu().numpy() 58 | img_gt = np.zeros((H, W, 3)) 59 | img_gt[mask_at_box] = rgb_gt 60 | 61 | # gt_img_path = gt_img_path.replace('neural_volumes', 'gt') 62 | # os.system('mkdir -p {}'.format(os.path.dirname(gt_img_path))) 63 | # img_gt = img_gt[..., [2, 1, 0]] * 255 64 | # cv2.imwrite(gt_img_path, img_gt) 65 | 66 | img_pred = imageio.imread(pred_img_path).astype(np.float32) / 255. 67 | img_pred[mask_at_box != 1] = 0 68 | rgb_pred = img_pred[mask_at_box] 69 | 70 | # import matplotlib.pyplot as plt 71 | # _, (ax1, ax2) = plt.subplots(1, 2) 72 | # ax1.imshow(img_gt) 73 | # ax2.imshow(img_pred) 74 | # plt.show() 75 | # return 76 | 77 | mse = np.mean((rgb_pred - rgb_gt)**2) 78 | self.mse.append(mse) 79 | 80 | psnr = self.psnr_metric(rgb_pred, rgb_gt) 81 | self.psnr.append(psnr) 82 | 83 | ssim = self.ssim_metric(rgb_pred, rgb_gt, batch) 84 | self.ssim.append(ssim) 85 | 86 | def summarize(self): 87 | result_path = os.path.join(cfg.result_dir, 'metrics.npy') 88 | os.system('mkdir -p {}'.format(os.path.dirname(result_path))) 89 | metrics = {'mse': self.mse, 'psnr': self.psnr, 'ssim': self.ssim} 90 | np.save(result_path, self.mse) 91 | print('mse: {}'.format(np.mean(self.mse))) 92 | print('psnr: {}'.format(np.mean(self.psnr))) 93 | print('ssim: {}'.format(np.mean(self.ssim))) 94 | self.mse = [] 95 | self.psnr = [] 96 | self.ssim = [] 97 | -------------------------------------------------------------------------------- /lib/networks/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_network import make_network 2 | -------------------------------------------------------------------------------- /lib/networks/embedder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from lib.config import cfg 3 | 4 | 5 | class Embedder: 6 | def __init__(self, **kwargs): 7 | self.kwargs = kwargs 8 | self.create_embedding_fn() 9 | 10 | def create_embedding_fn(self): 11 | embed_fns = [] 12 | d = self.kwargs['input_dims'] 13 | out_dim = 0 14 | if self.kwargs['include_input']: 15 | embed_fns.append(lambda x: x) 16 | out_dim += d 17 | 18 | max_freq = self.kwargs['max_freq_log2'] 19 | N_freqs = self.kwargs['num_freqs'] 20 | 21 | if self.kwargs['log_sampling']: 22 | freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs) 23 | else: 24 | freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs) 25 | 26 | for freq in freq_bands: 27 | for p_fn in self.kwargs['periodic_fns']: 28 | embed_fns.append( 29 | lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) 30 | out_dim += d 31 | 32 | self.embed_fns = embed_fns 33 | self.out_dim = out_dim 34 | 35 | def embed(self, inputs): 36 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1) 37 | 38 | 39 | def get_embedder(multires, input_dims=3): 40 | embed_kwargs = { 41 | 'include_input': True, 42 | 'input_dims': input_dims, 43 | 'max_freq_log2': multires - 1, 44 | 'num_freqs': multires, 45 | 'log_sampling': True, 46 | 'periodic_fns': [torch.sin, torch.cos], 47 | } 48 | embedder_obj = Embedder(**embed_kwargs) 49 | embed = lambda x, eo=embedder_obj: eo.embed(x) 50 | return embed, embedder_obj.out_dim 51 | 52 | 53 | xyz_embedder, xyz_dim = get_embedder(cfg.xyz_res) 54 | view_embedder, view_dim = get_embedder(cfg.view_res) 55 | -------------------------------------------------------------------------------- /lib/networks/make_network.py: -------------------------------------------------------------------------------- 1 | import os 2 | import imp 3 | 4 | 5 | def make_network(cfg): 6 | module = cfg.network_module 7 | path = cfg.network_path 8 | network = imp.load_source(module, path).Network() 9 | return network 10 | -------------------------------------------------------------------------------- /lib/networks/renderer/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_renderer import make_renderer -------------------------------------------------------------------------------- /lib/networks/renderer/if_clight_renderer_mmsk.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from lib.config import cfg 3 | from .nerf_net_utils import * 4 | from .. import embedder 5 | from . import if_clight_renderer 6 | 7 | 8 | class Renderer(if_clight_renderer.Renderer): 9 | def __init__(self, net): 10 | super(Renderer, self).__init__(net) 11 | 12 | def prepare_inside_pts(self, pts, batch): 13 | if 'Ks' not in batch: 14 | __import__('ipdb').set_trace() 15 | return raw 16 | 17 | sh = pts.shape 18 | pts = pts.view(sh[0], -1, sh[3]) 19 | 20 | insides = [] 21 | for nv in range(batch['Ks'].size(1)): 22 | # project pts to image space 23 | R = batch['RT'][:, nv, :3, :3] 24 | T = batch['RT'][:, nv, :3, 3] 25 | pts_ = torch.matmul(pts, R.transpose(2, 1)) + T[:, None] 26 | pts_ = torch.matmul(pts_, batch['Ks'][:, nv].transpose(2, 1)) 27 | pts2d = pts_[..., :2] / pts_[..., 2:] 28 | 29 | # ensure that pts2d is inside the image 30 | pts2d = pts2d.round().long() 31 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 32 | pts2d[..., 0] = torch.clamp(pts2d[..., 0], 0, W - 1) 33 | pts2d[..., 1] = torch.clamp(pts2d[..., 1], 0, H - 1) 34 | 35 | # remove the points outside the mask 36 | pts2d = pts2d[0] 37 | msk = batch['msks'][0, nv] 38 | inside = msk[pts2d[:, 1], pts2d[:, 0]][None].bool() 39 | insides.append(inside) 40 | 41 | inside = insides[0] 42 | for i in range(1, len(insides)): 43 | inside = inside * insides[i] 44 | 45 | return inside 46 | 47 | def get_density_color(self, wpts, viewdir, inside, raw_decoder): 48 | n_batch, n_pixel, n_sample = wpts.shape[:3] 49 | wpts = wpts.view(n_batch, n_pixel * n_sample, -1) 50 | viewdir = viewdir[:, :, None].repeat(1, 1, n_sample, 1).contiguous() 51 | viewdir = viewdir.view(n_batch, n_pixel * n_sample, -1) 52 | wpts = wpts[inside][None] 53 | viewdir = viewdir[inside][None] 54 | full_raw = torch.zeros([n_batch, n_pixel * n_sample, 4]).to(wpts) 55 | if inside.sum() == 0: 56 | return full_raw 57 | 58 | raw = raw_decoder(wpts, viewdir) 59 | full_raw[inside] = raw[0] 60 | 61 | return full_raw 62 | 63 | def get_pixel_value(self, ray_o, ray_d, near, far, feature_volume, 64 | sp_input, batch): 65 | # sampling points along camera rays 66 | wpts, z_vals = self.get_sampling_points(ray_o, ray_d, near, far) 67 | inside = self.prepare_inside_pts(wpts, batch) 68 | 69 | # viewing direction 70 | viewdir = ray_d / torch.norm(ray_d, dim=2, keepdim=True) 71 | 72 | raw_decoder = lambda x_point, viewdir_val: self.net.calculate_density_color( 73 | x_point, viewdir_val, feature_volume, sp_input) 74 | 75 | # compute the color and density 76 | wpts_raw = self.get_density_color(wpts, viewdir, inside, raw_decoder) 77 | 78 | # volume rendering for wpts 79 | n_batch, n_pixel, n_sample = wpts.shape[:3] 80 | raw = wpts_raw.reshape(-1, n_sample, 4) 81 | z_vals = z_vals.view(-1, n_sample) 82 | ray_d = ray_d.view(-1, 3) 83 | rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs( 84 | raw, z_vals, ray_d, cfg.raw_noise_std, cfg.white_bkgd) 85 | 86 | ret = { 87 | 'rgb_map': rgb_map.view(n_batch, n_pixel, -1), 88 | 'disp_map': disp_map.view(n_batch, n_pixel), 89 | 'acc_map': acc_map.view(n_batch, n_pixel), 90 | 'weights': weights.view(n_batch, n_pixel, -1), 91 | 'depth_map': depth_map.view(n_batch, n_pixel) 92 | } 93 | 94 | return ret 95 | -------------------------------------------------------------------------------- /lib/networks/renderer/if_clight_renderer_msk.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from lib.config import cfg 3 | from .nerf_net_utils import * 4 | from .. import embedder 5 | from . import if_clight_renderer_mmsk 6 | 7 | 8 | class Renderer(if_clight_renderer_mmsk.Renderer): 9 | def __init__(self, net): 10 | super(Renderer, self).__init__(net) 11 | 12 | def prepare_inside_pts(self, wpts, batch): 13 | if 'R0_snap' not in batch: 14 | __import__('ipdb').set_trace() 15 | return raw 16 | 17 | # transform points from the world space to the smpl space 18 | Th = batch['Th'] 19 | can_pts = wpts - Th[:, None, None] 20 | R = batch['R'] 21 | can_pts = torch.matmul(can_pts, R) 22 | 23 | R0 = batch['R0_snap'] 24 | Th0 = batch['Th0_snap'] 25 | 26 | # transform pts from smpl coordinate to the world coordinate 27 | sh = can_pts.shape 28 | can_pts = can_pts.view(sh[0], -1, sh[3]) 29 | pts = torch.matmul(can_pts, R0.transpose(2, 1)) + Th0[:, None] 30 | 31 | # project pts to image space 32 | R = batch['RT'][..., :3] 33 | T = batch['RT'][..., 3] 34 | pts = torch.matmul(pts, R.transpose(2, 1)) + T[:, None] 35 | pts = torch.matmul(pts, batch['K'].transpose(2, 1)) 36 | pts2d = pts[..., :2] / pts[..., 2:] 37 | 38 | # ensure that pts2d is inside the image 39 | pts2d = pts2d.round().long() 40 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 41 | pts2d[..., 0] = torch.clamp(pts2d[..., 0], 0, W - 1) 42 | pts2d[..., 1] = torch.clamp(pts2d[..., 1], 0, H - 1) 43 | 44 | # remove the points outside the mask 45 | pts2d = pts2d[0] 46 | msk = batch['msk'][0] 47 | inside = msk[pts2d[:, 1], pts2d[:, 0]][None].bool() 48 | 49 | return inside 50 | -------------------------------------------------------------------------------- /lib/networks/renderer/if_mesh_renderer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from lib.config import cfg 3 | from .nerf_net_utils import * 4 | from .. import embedder 5 | import numpy as np 6 | import mcubes 7 | import trimesh 8 | from . import if_clight_renderer 9 | 10 | 11 | class Renderer(if_clight_renderer.Renderer): 12 | def __init__(self, net): 13 | super(Renderer, self).__init__(net) 14 | 15 | def batchify_rays(self, wpts, alpha_decoder, chunk=1024 * 32): 16 | """Render rays in smaller minibatches to avoid OOM. 17 | """ 18 | n_batch, n_point = wpts.shape[:2] 19 | all_ret = [] 20 | for i in range(0, n_point, chunk): 21 | ret = alpha_decoder(wpts[:, i:i + chunk]) 22 | all_ret.append(ret) 23 | all_ret = torch.cat(all_ret, 1) 24 | return all_ret 25 | 26 | def render(self, batch): 27 | pts = batch['pts'] 28 | sh = pts.shape 29 | 30 | inside = batch['inside'][0].bool() 31 | pts = pts[0][inside][None] 32 | 33 | # encode neural body 34 | sp_input = self.prepare_sp_input(batch) 35 | feature_volume = self.net.encode_sparse_voxels(sp_input) 36 | alpha_decoder = lambda x: self.net.calculate_density( 37 | x, feature_volume, sp_input) 38 | 39 | alpha = self.batchify_rays(pts, alpha_decoder, 2048 * 64) 40 | 41 | alpha = alpha[0, :, 0].detach().cpu().numpy() 42 | cube = np.zeros(sh[1:-1]) 43 | inside = inside.detach().cpu().numpy() 44 | cube[inside == 1] = alpha 45 | 46 | cube = np.pad(cube, 10, mode='constant') 47 | vertices, triangles = mcubes.marching_cubes(cube, cfg.mesh_th) 48 | 49 | # vertices = (vertices - 10) * 0.005 50 | # vertices = vertices + batch['wbounds'][0, 0].detach().cpu().numpy() 51 | 52 | mesh = trimesh.Trimesh(vertices, triangles) 53 | 54 | ret = {'cube': cube, 'mesh': mesh} 55 | 56 | return ret 57 | -------------------------------------------------------------------------------- /lib/networks/renderer/make_renderer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import imp 3 | 4 | 5 | def make_renderer(cfg, network): 6 | module = cfg.renderer_module 7 | path = cfg.renderer_path 8 | renderer = imp.load_source(module, path).Renderer(network) 9 | return renderer 10 | -------------------------------------------------------------------------------- /lib/networks/renderer/nerf_net_utils.py: -------------------------------------------------------------------------------- 1 | import torch.nn.functional as F 2 | import torch 3 | from lib.config import cfg 4 | 5 | 6 | def raw2outputs(raw, z_vals, rays_d, raw_noise_std=0, white_bkgd=False): 7 | """Transforms model's predictions to semantically meaningful values. 8 | Args: 9 | raw: [num_rays, num_samples along ray, 4]. Prediction from model. 10 | z_vals: [num_rays, num_samples along ray]. Integration time. 11 | rays_d: [num_rays, 3]. Direction of each ray. 12 | Returns: 13 | rgb_map: [num_rays, 3]. Estimated RGB color of a ray. 14 | disp_map: [num_rays]. Disparity map. Inverse of depth map. 15 | acc_map: [num_rays]. Sum of weights along each ray. 16 | weights: [num_rays, num_samples]. Weights assigned to each sampled color. 17 | depth_map: [num_rays]. Estimated distance to object. 18 | """ 19 | raw2alpha = lambda raw, dists, act_fn=F.relu: 1. - torch.exp(-act_fn(raw) * 20 | dists) 21 | 22 | dists = z_vals[..., 1:] - z_vals[..., :-1] 23 | dists = torch.cat( 24 | [dists, 25 | torch.Tensor([1e10]).expand(dists[..., :1].shape).to(dists)], 26 | -1) # [N_rays, N_samples] 27 | 28 | dists = dists * torch.norm(rays_d[..., None, :], dim=-1) 29 | 30 | rgb = torch.sigmoid(raw[..., :3]) # [N_rays, N_samples, 3] 31 | noise = 0. 32 | if raw_noise_std > 0.: 33 | noise = torch.randn(raw[..., 3].shape) * raw_noise_std 34 | 35 | alpha = raw2alpha(raw[..., 3] + noise, dists) # [N_rays, N_samples] 36 | # weights = alpha * tf.math.cumprod(1.-alpha + 1e-10, -1, exclusive=True) 37 | weights = alpha * torch.cumprod( 38 | torch.cat( 39 | [torch.ones((alpha.shape[0], 1)).to(alpha), 1. - alpha + 1e-10], 40 | -1), -1)[:, :-1] 41 | rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3] 42 | 43 | depth_map = torch.sum(weights * z_vals, -1) 44 | disp_map = 1. / torch.max(1e-10 * torch.ones_like(depth_map).to(depth_map), 45 | depth_map / torch.sum(weights, -1)) 46 | acc_map = torch.sum(weights, -1) 47 | 48 | if white_bkgd: 49 | rgb_map = rgb_map + (1. - acc_map[..., None]) 50 | 51 | return rgb_map, disp_map, acc_map, weights, depth_map 52 | 53 | 54 | # Hierarchical sampling (section 5.2) 55 | def sample_pdf(bins, weights, N_samples, det=False): 56 | from torchsearchsorted import searchsorted 57 | 58 | # Get pdf 59 | weights = weights + 1e-5 # prevent nans 60 | pdf = weights / torch.sum(weights, -1, keepdim=True) 61 | cdf = torch.cumsum(pdf, -1) 62 | cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], 63 | -1) # (batch, len(bins)) 64 | 65 | # Take uniform samples 66 | if det: 67 | u = torch.linspace(0., 1., steps=N_samples).to(cdf) 68 | u = u.expand(list(cdf.shape[:-1]) + [N_samples]) 69 | else: 70 | u = torch.rand(list(cdf.shape[:-1]) + [N_samples]).to(cdf) 71 | 72 | # Invert CDF 73 | u = u.contiguous() 74 | inds = searchsorted(cdf, u, side='right') 75 | below = torch.max(torch.zeros_like(inds - 1), inds - 1) 76 | above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) 77 | inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) 78 | 79 | # cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2) 80 | # bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2) 81 | matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] 82 | cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) 83 | bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) 84 | 85 | denom = (cdf_g[..., 1] - cdf_g[..., 0]) 86 | denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) 87 | t = (u - cdf_g[..., 0]) / denom 88 | samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) 89 | 90 | return samples 91 | -------------------------------------------------------------------------------- /lib/networks/renderer/volume_mesh_renderer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from lib.config import cfg 3 | from .nerf_net_utils import * 4 | import numpy as np 5 | import mcubes 6 | import trimesh 7 | 8 | 9 | class Renderer: 10 | def __init__(self, net): 11 | self.net = net 12 | 13 | def render_rays(self, ray_batch, net_c=None, pytest=False): 14 | """Volumetric rendering. 15 | Args: 16 | ray_batch: array of shape [batch_size, ...]. All information necessary 17 | for sampling along a ray, including: ray origin, ray direction, min 18 | dist, max dist, and unit-magnitude viewing direction. 19 | network_fn: function. Model for predicting RGB and density at each point 20 | in space. 21 | network_query_fn: function used for passing queries to network_fn. 22 | N_samples: int. Number of different times to sample along each ray. 23 | retraw: bool. If True, include model's raw, unprocessed predictions. 24 | lindisp: bool. If True, sample linearly in inverse depth rather than in depth. 25 | perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified 26 | random points in time. 27 | N_importance: int. Number of additional times to sample along each ray. 28 | These samples are only passed to network_fine. 29 | network_fine: "fine" network with same spec as network_fn. 30 | white_bkgd: bool. If True, assume a white background. 31 | raw_noise_std: ... 32 | verbose: bool. If True, print more debugging info. 33 | Returns: 34 | rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model. 35 | disp_map: [num_rays]. Disparity map. 1 / depth. 36 | acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model. 37 | raw: [num_rays, num_samples, 4]. Raw predictions from model. 38 | rgb0: See rgb_map. Output for coarse model. 39 | disp0: See disp_map. Output for coarse model. 40 | acc0: See acc_map. Output for coarse model. 41 | z_std: [num_rays]. Standard deviation of distances along ray for each 42 | sample. 43 | """ 44 | pts = ray_batch 45 | if net_c is None: 46 | alpha = self.net(pts) 47 | else: 48 | alpha = self.net(pts, net_c) 49 | 50 | if cfg.N_importance > 0: 51 | alpha_0 = alpha 52 | if net_c is None: 53 | alpha = self.net(pts, model='fine') 54 | else: 55 | alpha = self.net(pts, net_c, model='fine') 56 | 57 | ret = { 58 | 'alpha': alpha 59 | } 60 | if cfg.N_importance > 0: 61 | ret['alpha0'] = alpha_0 62 | 63 | for k in ret: 64 | DEBUG = False 65 | if (torch.isnan(ret[k]).any() 66 | or torch.isinf(ret[k]).any()) and DEBUG: 67 | print(f"! [Numerical Error] {k} contains nan or inf.") 68 | 69 | return ret 70 | 71 | def batchify_rays(self, rays_flat, chunk=1024 * 32): 72 | """Render rays in smaller minibatches to avoid OOM. 73 | """ 74 | all_ret = {} 75 | for i in range(0, rays_flat.shape[0], chunk): 76 | ret = self.render_rays(rays_flat[i:i + chunk]) 77 | for k in ret: 78 | if k not in all_ret: 79 | all_ret[k] = [] 80 | all_ret[k].append(ret[k]) 81 | all_ret = {k: torch.cat(all_ret[k], 0) for k in all_ret} 82 | return all_ret 83 | 84 | def render(self, batch): 85 | pts = batch['pts'] 86 | sh = pts.shape 87 | 88 | inside = batch['inside'][0].bool() 89 | pts = pts[0][inside][None] 90 | 91 | pts = pts.view(sh[0], -1, 1, 3) 92 | 93 | ret = self.batchify_rays(pts, cfg.chunk) 94 | 95 | alpha = ret['alpha'] 96 | alpha = alpha[0, :, 0, 0].detach().cpu().numpy() 97 | cube = np.zeros(sh[1:-1]) 98 | inside = inside.detach().cpu().numpy() 99 | cube[inside == 1] = alpha 100 | 101 | cube = np.pad(cube, 10, mode='constant') 102 | vertices, triangles = mcubes.marching_cubes(cube, cfg.mesh_th) 103 | mesh = trimesh.Trimesh(vertices, triangles) 104 | 105 | ret = {'cube': cube, 'mesh': mesh} 106 | 107 | return ret 108 | -------------------------------------------------------------------------------- /lib/train/__init__.py: -------------------------------------------------------------------------------- 1 | from .trainers import make_trainer 2 | from .optimizer import make_optimizer 3 | from .scheduler import make_lr_scheduler, set_lr_scheduler 4 | from .recorder import make_recorder 5 | 6 | -------------------------------------------------------------------------------- /lib/train/optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from lib.utils.optimizer.radam import RAdam 3 | 4 | 5 | _optimizer_factory = { 6 | 'adam': torch.optim.Adam, 7 | 'radam': RAdam, 8 | 'sgd': torch.optim.SGD 9 | } 10 | 11 | 12 | def make_optimizer(cfg, net, lr=None, weight_decay=None): 13 | params = [] 14 | lr = cfg.train.lr if lr is None else lr 15 | weight_decay = cfg.train.weight_decay if weight_decay is None else weight_decay 16 | 17 | for key, value in net.named_parameters(): 18 | if not value.requires_grad: 19 | continue 20 | params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] 21 | 22 | if 'adam' in cfg.train.optim: 23 | optimizer = _optimizer_factory[cfg.train.optim](params, lr, weight_decay=weight_decay) 24 | else: 25 | optimizer = _optimizer_factory[cfg.train.optim](params, lr, momentum=0.9) 26 | 27 | return optimizer 28 | -------------------------------------------------------------------------------- /lib/train/recorder.py: -------------------------------------------------------------------------------- 1 | from collections import deque, defaultdict 2 | import torch 3 | from tensorboardX import SummaryWriter 4 | import os 5 | from lib.config.config import cfg 6 | 7 | from termcolor import colored 8 | 9 | 10 | class SmoothedValue(object): 11 | """Track a series of values and provide access to smoothed values over a 12 | window or the global series average. 13 | """ 14 | 15 | def __init__(self, window_size=20): 16 | self.deque = deque(maxlen=window_size) 17 | self.total = 0.0 18 | self.count = 0 19 | 20 | def update(self, value): 21 | self.deque.append(value) 22 | self.count += 1 23 | self.total += value 24 | 25 | @property 26 | def median(self): 27 | d = torch.tensor(list(self.deque)) 28 | return d.median().item() 29 | 30 | @property 31 | def avg(self): 32 | d = torch.tensor(list(self.deque)) 33 | return d.mean().item() 34 | 35 | @property 36 | def global_avg(self): 37 | return self.total / self.count 38 | 39 | 40 | class Recorder(object): 41 | def __init__(self, cfg): 42 | if cfg.local_rank > 0: 43 | return 44 | 45 | log_dir = cfg.record_dir 46 | # if not cfg.resume: 47 | # print(colored('remove contents of directory %s' % log_dir, 'red')) 48 | # os.system('rm -r %s' % log_dir) 49 | self.writer = SummaryWriter(log_dir=log_dir) 50 | 51 | # scalars 52 | self.epoch = 0 53 | self.step = 0 54 | self.loss_stats = defaultdict(SmoothedValue) 55 | self.batch_time = SmoothedValue() 56 | self.data_time = SmoothedValue() 57 | 58 | # images 59 | self.image_stats = defaultdict(object) 60 | if 'process_' + cfg.task in globals(): 61 | self.processor = globals()['process_' + cfg.task] 62 | else: 63 | self.processor = None 64 | 65 | def update_loss_stats(self, loss_dict): 66 | if cfg.local_rank > 0: 67 | return 68 | for k, v in loss_dict.items(): 69 | self.loss_stats[k].update(v.detach().cpu()) 70 | 71 | def update_image_stats(self, image_stats): 72 | if cfg.local_rank > 0: 73 | return 74 | if self.processor is None: 75 | return 76 | image_stats = self.processor(image_stats) 77 | for k, v in image_stats.items(): 78 | self.image_stats[k] = v.detach().cpu() 79 | 80 | def record(self, prefix, step=-1, loss_stats=None, image_stats=None): 81 | if cfg.local_rank > 0: 82 | return 83 | 84 | pattern = prefix + '/{}' 85 | step = step if step >= 0 else self.step 86 | loss_stats = loss_stats if loss_stats else self.loss_stats 87 | 88 | for k, v in loss_stats.items(): 89 | if isinstance(v, SmoothedValue): 90 | self.writer.add_scalar(pattern.format(k), v.median, step) 91 | else: 92 | self.writer.add_scalar(pattern.format(k), v, step) 93 | 94 | if self.processor is None: 95 | return 96 | image_stats = self.processor(image_stats) if image_stats else self.image_stats 97 | for k, v in image_stats.items(): 98 | self.writer.add_image(pattern.format(k), v, step) 99 | 100 | def state_dict(self): 101 | if cfg.local_rank > 0: 102 | return 103 | scalar_dict = {} 104 | scalar_dict['step'] = self.step 105 | return scalar_dict 106 | 107 | def load_state_dict(self, scalar_dict): 108 | if cfg.local_rank > 0: 109 | return 110 | self.step = scalar_dict['step'] 111 | 112 | def __str__(self): 113 | if cfg.local_rank > 0: 114 | return 115 | loss_state = [] 116 | for k, v in self.loss_stats.items(): 117 | loss_state.append('{}: {:.4f}'.format(k, v.avg)) 118 | loss_state = ' '.join(loss_state) 119 | 120 | recording_state = ' '.join(['epoch: {}', 'step: {}', '{}', 'data: {:.4f}', 'batch: {:.4f}']) 121 | return recording_state.format(self.epoch, self.step, loss_state, self.data_time.avg, self.batch_time.avg) 122 | 123 | 124 | def make_recorder(cfg): 125 | return Recorder(cfg) 126 | -------------------------------------------------------------------------------- /lib/train/scheduler.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | from lib.utils.optimizer.lr_scheduler import WarmupMultiStepLR, MultiStepLR, ExponentialLR 3 | 4 | 5 | def make_lr_scheduler(cfg, optimizer): 6 | cfg_scheduler = cfg.train.scheduler 7 | if cfg_scheduler.type == 'multi_step': 8 | scheduler = MultiStepLR(optimizer, 9 | milestones=cfg_scheduler.milestones, 10 | gamma=cfg_scheduler.gamma) 11 | elif cfg_scheduler.type == 'exponential': 12 | scheduler = ExponentialLR(optimizer, 13 | decay_epochs=cfg_scheduler.decay_epochs, 14 | gamma=cfg_scheduler.gamma) 15 | return scheduler 16 | 17 | 18 | def set_lr_scheduler(cfg, scheduler): 19 | cfg_scheduler = cfg.train.scheduler 20 | if cfg_scheduler.type == 'multi_step': 21 | scheduler.milestones = Counter(cfg_scheduler.milestones) 22 | elif cfg_scheduler.type == 'exponential': 23 | scheduler.decay_epochs = cfg_scheduler.decay_epochs 24 | scheduler.gamma = cfg_scheduler.gamma 25 | -------------------------------------------------------------------------------- /lib/train/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_trainer import make_trainer 2 | -------------------------------------------------------------------------------- /lib/train/trainers/if_nerf_clight.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from lib.config import cfg 3 | import torch 4 | from lib.networks.renderer import if_clight_renderer 5 | from lib.train import make_optimizer 6 | 7 | 8 | class NetworkWrapper(nn.Module): 9 | def __init__(self, net): 10 | super(NetworkWrapper, self).__init__() 11 | 12 | self.net = net 13 | self.renderer = if_clight_renderer.Renderer(self.net) 14 | 15 | self.img2mse = lambda x, y : torch.mean((x - y) ** 2) 16 | self.acc_crit = torch.nn.functional.smooth_l1_loss 17 | 18 | def forward(self, batch): 19 | ret = self.renderer.render(batch) 20 | 21 | scalar_stats = {} 22 | loss = 0 23 | 24 | mask = batch['mask_at_box'] 25 | img_loss = self.img2mse(ret['rgb_map'][mask], batch['rgb'][mask]) 26 | scalar_stats.update({'img_loss': img_loss}) 27 | loss += img_loss 28 | 29 | if 'rgb0' in ret: 30 | img_loss0 = self.img2mse(ret['rgb0'], batch['rgb']) 31 | scalar_stats.update({'img_loss0': img_loss0}) 32 | loss += img_loss0 33 | 34 | scalar_stats.update({'loss': loss}) 35 | image_stats = {} 36 | 37 | return ret, loss, scalar_stats, image_stats 38 | -------------------------------------------------------------------------------- /lib/train/trainers/make_trainer.py: -------------------------------------------------------------------------------- 1 | from .trainer import Trainer 2 | import imp 3 | 4 | 5 | def _wrapper_factory(cfg, network): 6 | module = cfg.trainer_module 7 | path = cfg.trainer_path 8 | network_wrapper = imp.load_source(module, path).NetworkWrapper(network) 9 | return network_wrapper 10 | 11 | 12 | def make_trainer(cfg, network): 13 | network = _wrapper_factory(cfg, network) 14 | return Trainer(network) 15 | -------------------------------------------------------------------------------- /lib/train/trainers/nerf.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from lib.config import cfg 3 | import torch 4 | from lib.networks.renderer import volume_renderer 5 | from lib.train import make_optimizer 6 | 7 | 8 | class NetworkWrapper(nn.Module): 9 | def __init__(self, net): 10 | super(NetworkWrapper, self).__init__() 11 | 12 | self.net = net 13 | self.renderer = volume_renderer.Renderer(self.net) 14 | 15 | self.img2mse = lambda x, y : torch.mean((x - y) ** 2) 16 | self.acc_crit = torch.nn.functional.smooth_l1_loss 17 | 18 | def forward(self, batch): 19 | ret = self.renderer.render(batch) 20 | 21 | scalar_stats = {} 22 | loss = 0 23 | 24 | mask = batch['mask_at_box'] 25 | img_loss = self.img2mse(ret['rgb_map'][mask], batch['rgb'][mask]) 26 | scalar_stats.update({'img_loss': img_loss}) 27 | loss += img_loss 28 | 29 | if 'rgb0' in ret: 30 | img_loss0 = self.img2mse(ret['rgb0'], batch['rgb']) 31 | scalar_stats.update({'img_loss0': img_loss0}) 32 | loss += img_loss0 33 | 34 | scalar_stats.update({'loss': loss}) 35 | image_stats = {} 36 | 37 | return ret, loss, scalar_stats, image_stats 38 | -------------------------------------------------------------------------------- /lib/train/trainers/tpose.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from lib.config import cfg 3 | import torch 4 | from lib.networks.renderer import tpose_renderer 5 | from lib.train import make_optimizer 6 | 7 | 8 | class NetworkWrapper(nn.Module): 9 | def __init__(self, net): 10 | super(NetworkWrapper, self).__init__() 11 | 12 | self.net = net 13 | self.renderer = tpose_renderer.Renderer(self.net) 14 | 15 | self.img2mse = lambda x, y : torch.mean((x - y) ** 2) 16 | self.acc_crit = torch.nn.functional.smooth_l1_loss 17 | 18 | def forward(self, batch): 19 | ret = self.renderer.render(batch) 20 | 21 | scalar_stats = {} 22 | loss = 0 23 | 24 | mask = batch['mask_at_box'] 25 | img_loss = self.img2mse(ret['rgb_map'][mask], batch['rgb'][mask]) 26 | scalar_stats.update({'img_loss': img_loss}) 27 | loss += img_loss 28 | 29 | if 'rgb0' in ret: 30 | img_loss0 = self.img2mse(ret['rgb0'], batch['rgb']) 31 | scalar_stats.update({'img_loss0': img_loss0}) 32 | loss += img_loss0 33 | 34 | scalar_stats.update({'loss': loss}) 35 | image_stats = {} 36 | 37 | return ret, loss, scalar_stats, image_stats 38 | -------------------------------------------------------------------------------- /lib/train/trainers/trainer.py: -------------------------------------------------------------------------------- 1 | import time 2 | import datetime 3 | import torch 4 | import tqdm 5 | from torch.nn import DataParallel 6 | from lib.config import cfg 7 | 8 | 9 | class Trainer(object): 10 | def __init__(self, network): 11 | device = torch.device('cuda:{}'.format(cfg.local_rank)) 12 | network = network.to(device) 13 | if cfg.distributed: 14 | network = torch.nn.parallel.DistributedDataParallel( 15 | network, 16 | device_ids=[cfg.local_rank], 17 | output_device=cfg.local_rank 18 | ) 19 | self.network = network 20 | self.local_rank = cfg.local_rank 21 | self.device = device 22 | 23 | def reduce_loss_stats(self, loss_stats): 24 | reduced_losses = {k: torch.mean(v) for k, v in loss_stats.items()} 25 | return reduced_losses 26 | 27 | def to_cuda(self, batch): 28 | for k in batch: 29 | if k == 'meta': 30 | continue 31 | if isinstance(batch[k], tuple) or isinstance(batch[k], list): 32 | batch[k] = [b.to(self.device) for b in batch[k]] 33 | else: 34 | batch[k] = batch[k].to(self.device) 35 | return batch 36 | 37 | def train(self, epoch, data_loader, optimizer, recorder): 38 | max_iter = len(data_loader) 39 | self.network.train() 40 | end = time.time() 41 | for iteration, batch in enumerate(data_loader): 42 | data_time = time.time() - end 43 | iteration = iteration + 1 44 | 45 | batch = self.to_cuda(batch) 46 | output, loss, loss_stats, image_stats = self.network(batch) 47 | 48 | # training stage: loss; optimizer; scheduler 49 | optimizer.zero_grad() 50 | loss = loss.mean() 51 | loss.backward() 52 | torch.nn.utils.clip_grad_value_(self.network.parameters(), 40) 53 | optimizer.step() 54 | 55 | if cfg.local_rank > 0: 56 | continue 57 | 58 | # data recording stage: loss_stats, time, image_stats 59 | recorder.step += 1 60 | 61 | loss_stats = self.reduce_loss_stats(loss_stats) 62 | recorder.update_loss_stats(loss_stats) 63 | 64 | batch_time = time.time() - end 65 | end = time.time() 66 | recorder.batch_time.update(batch_time) 67 | recorder.data_time.update(data_time) 68 | 69 | if iteration % cfg.log_interval == 0 or iteration == (max_iter - 1): 70 | # print training state 71 | eta_seconds = recorder.batch_time.global_avg * (max_iter - iteration) 72 | eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) 73 | lr = optimizer.param_groups[0]['lr'] 74 | memory = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 75 | 76 | training_state = ' '.join(['eta: {}', '{}', 'lr: {:.6f}', 'max_mem: {:.0f}']) 77 | training_state = training_state.format(eta_string, str(recorder), lr, memory) 78 | print(training_state) 79 | 80 | if iteration % cfg.record_interval == 0 or iteration == (max_iter - 1): 81 | # record loss_stats and image_dict 82 | recorder.update_image_stats(image_stats) 83 | recorder.record('train') 84 | 85 | def val(self, epoch, data_loader, evaluator=None, recorder=None): 86 | self.network.eval() 87 | torch.cuda.empty_cache() 88 | val_loss_stats = {} 89 | data_size = len(data_loader) 90 | for batch in tqdm.tqdm(data_loader): 91 | batch = self.to_cuda(batch) 92 | with torch.no_grad(): 93 | output, loss, loss_stats, image_stats = self.network(batch) 94 | if evaluator is not None: 95 | evaluator.evaluate(output, batch) 96 | 97 | loss_stats = self.reduce_loss_stats(loss_stats) 98 | for k, v in loss_stats.items(): 99 | val_loss_stats.setdefault(k, 0) 100 | val_loss_stats[k] += v 101 | 102 | loss_state = [] 103 | for k in val_loss_stats.keys(): 104 | val_loss_stats[k] /= data_size 105 | loss_state.append('{}: {:.4f}'.format(k, val_loss_stats[k])) 106 | print(loss_state) 107 | 108 | if evaluator is not None: 109 | result = evaluator.summarize() 110 | val_loss_stats.update(result) 111 | 112 | if recorder: 113 | recorder.record('val', epoch, val_loss_stats, image_stats) 114 | -------------------------------------------------------------------------------- /lib/utils/base_utils.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import os 3 | import numpy as np 4 | 5 | 6 | def read_pickle(pkl_path): 7 | with open(pkl_path, 'rb') as f: 8 | return pickle.load(f) 9 | 10 | 11 | def save_pickle(data, pkl_path): 12 | os.system('mkdir -p {}'.format(os.path.dirname(pkl_path))) 13 | with open(pkl_path, 'wb') as f: 14 | pickle.dump(data, f) 15 | 16 | 17 | def project(xyz, K, RT): 18 | """ 19 | xyz: [N, 3] 20 | K: [3, 3] 21 | RT: [3, 4] 22 | """ 23 | xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T 24 | xyz = np.dot(xyz, K.T) 25 | xy = xyz[:, :2] / xyz[:, 2:] 26 | return xy 27 | 28 | 29 | def write_K_pose_inf(K, poses, img_root): 30 | K = K.copy() 31 | K[:2] = K[:2] * 8 32 | K_inf = os.path.join(img_root, 'Intrinsic.inf') 33 | os.system('mkdir -p {}'.format(os.path.dirname(K_inf))) 34 | with open(K_inf, 'w') as f: 35 | for i in range(len(poses)): 36 | f.write('%d\n'%i) 37 | f.write('%f %f %f\n %f %f %f\n %f %f %f\n' % tuple(K.reshape(9).tolist())) 38 | f.write('\n') 39 | 40 | pose_inf = os.path.join(img_root, 'CamPose.inf') 41 | with open(pose_inf, 'w') as f: 42 | for pose in poses: 43 | pose = np.linalg.inv(pose) 44 | A = pose[0:3,:] 45 | tmp = np.concatenate([A[0:3,2].T, A[0:3,0].T,A[0:3,1].T,A[0:3,3].T]) 46 | f.write('%f %f %f %f %f %f %f %f %f %f %f %f\n' % tuple(tmp.tolist())) 47 | -------------------------------------------------------------------------------- /lib/utils/blend_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import numpy as np 4 | 5 | 6 | def ppts_to_pts(ppts, bw, A): 7 | """transform points from the pose space to the zero space""" 8 | sh = ppts.shape 9 | bw = bw.permute(0, 2, 1) 10 | A = torch.bmm(bw, A.view(sh[0], 24, -1)) 11 | A = A.view(sh[0], -1, 4, 4) 12 | pts = ppts - A[..., :3, 3] 13 | R_inv = torch.inverse(A[..., :3, :3]) 14 | pts = torch.sum(R_inv * pts[:, :, None], dim=3) 15 | return pts 16 | 17 | 18 | def grid_sample_blend_weights(grid_coords, bw): 19 | # the blend weight is indexed by xyz 20 | grid_coords = grid_coords[:, None, None] 21 | bw = F.grid_sample(bw, 22 | grid_coords, 23 | padding_mode='border', 24 | align_corners=True) 25 | bw = bw[:, :, 0, 0] 26 | return bw 27 | 28 | 29 | def bounds_grid_sample_blend_weights(pts, bw, bounds): 30 | """grid sample blend weights""" 31 | pts = pts.clone() 32 | 33 | # interpolate blend weights 34 | min_xyz = bounds[:, 0] 35 | max_xyz = bounds[:, 1] 36 | bounds = max_xyz[:, None] - min_xyz[:, None] 37 | grid_coords = (pts - min_xyz[:, None]) / bounds 38 | grid_coords = grid_coords * 2 - 1 39 | # convert xyz to zyx, since the blend weight is indexed by xyz 40 | grid_coords = grid_coords[..., [2, 1, 0]] 41 | 42 | # the blend weight is indexed by xyz 43 | bw = bw.permute(0, 4, 1, 2, 3) 44 | grid_coords = grid_coords[:, None, None] 45 | bw = F.grid_sample(bw, 46 | grid_coords, 47 | padding_mode='border', 48 | align_corners=True) 49 | bw = bw[:, :, 0, 0] 50 | 51 | return bw 52 | 53 | 54 | def grid_sample_A_blend_weights(nf_grid_coords, bw): 55 | """ 56 | nf_grid_coords: batch_size x N_samples x 24 x 3 57 | bw: batch_size x 24 x 64 x 64 x 64 58 | """ 59 | bws = [] 60 | for i in range(24): 61 | nf_grid_coords_ = nf_grid_coords[:, :, i] 62 | nf_grid_coords_ = nf_grid_coords_[:, None, None] 63 | bw_ = F.grid_sample(bw[:, i:i + 1], 64 | nf_grid_coords_, 65 | padding_mode='border', 66 | align_corners=True) 67 | bw_ = bw_[:, :, 0, 0] 68 | bws.append(bw_) 69 | bw = torch.cat(bws, dim=1) 70 | return bw 71 | 72 | 73 | def ppts_to_pts(pts, bw, A): 74 | """transform points from the pose space to the t pose""" 75 | sh = pts.shape 76 | bw = bw.permute(0, 2, 1) 77 | A = torch.bmm(bw, A.view(sh[0], 24, -1)) 78 | A = A.view(sh[0], -1, 4, 4) 79 | pts = pts - A[..., :3, 3] 80 | R_inv = torch.inverse(A[..., :3, :3]) 81 | pts = torch.sum(R_inv * pts[:, :, None], dim=3) 82 | return pts 83 | -------------------------------------------------------------------------------- /lib/utils/if_nerf/if_nerf_net_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import os 4 | from lib.config import cfg 5 | import trimesh 6 | 7 | 8 | def update_loss_img(output, batch): 9 | mse = torch.mean((output['rgb_map'] - batch['rgb'])**2, dim=2)[0] 10 | mse = mse.detach().cpu().numpy().astype(np.float32) 11 | 12 | # load the loss img 13 | img_path = batch['meta']['img_path'][0] 14 | paths = img_path.split('/') 15 | paths[-1] = os.path.basename(img_path).replace('.jpg', '.npy') 16 | loss_img_path = os.path.join(paths[0], 'loss', *paths[1:]) 17 | if os.path.exists(loss_img_path): 18 | loss_img = np.load(loss_img_path) 19 | else: 20 | os.system("mkdir -p '{}'".format(os.path.dirname(loss_img_path))) 21 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 22 | loss_img = mse.mean() * np.ones([H, W]).astype(np.float32) 23 | 24 | coord = batch['img_coord'][0] 25 | coord = coord.detach().cpu().numpy() 26 | loss_img[coord[:, 0], coord[:, 1]] = mse 27 | np.save(loss_img_path, loss_img) 28 | 29 | 30 | def init_smpl(smpl): 31 | data_root = 'data/light_stage' 32 | smpl_dir = os.path.join(data_root, cfg.smpl, cfg.human) 33 | for i in range(cfg.ni): 34 | smpl_path = os.path.join(smpl_dir, '{}.ply'.format(i + 1)) 35 | ply = trimesh.load(smpl_path) 36 | xyz = np.array(ply.vertices).ravel() 37 | smpl.weight.data[i] = torch.FloatTensor(xyz) 38 | return smpl 39 | 40 | 41 | def pts_to_can_pts(pts, batch): 42 | """transform pts from the world coordinate to the smpl coordinate""" 43 | Th = batch['Th'] 44 | pts = pts - Th 45 | R = batch['R'] 46 | pts = torch.matmul(pts, batch['R']) 47 | return pts 48 | 49 | 50 | def pts_to_coords(pts, min_xyz): 51 | pts = pts.clone().detach() 52 | # convert xyz to the voxel coordinate dhw 53 | dhw = pts[..., [2, 1, 0]] 54 | min_dhw = min_xyz[:, [2, 1, 0]] 55 | dhw = dhw - min_dhw[:, None] 56 | dhw = dhw / torch.tensor(cfg.voxel_size).to(dhw) 57 | return dhw 58 | -------------------------------------------------------------------------------- /lib/utils/img_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from matplotlib import cm 3 | import matplotlib.pyplot as plt 4 | import matplotlib.patches as patches 5 | import numpy as np 6 | import cv2 7 | 8 | 9 | def unnormalize_img(img, mean, std): 10 | """ 11 | img: [3, h, w] 12 | """ 13 | img = img.detach().cpu().clone() 14 | # img = img / 255. 15 | img *= torch.tensor(std).view(3, 1, 1) 16 | img += torch.tensor(mean).view(3, 1, 1) 17 | min_v = torch.min(img) 18 | img = (img - min_v) / (torch.max(img) - min_v) 19 | return img 20 | 21 | 22 | def bgr_to_rgb(img): 23 | return img[:, :, [2, 1, 0]] 24 | 25 | 26 | def horizon_concate(inp0, inp1): 27 | h0, w0 = inp0.shape[:2] 28 | h1, w1 = inp1.shape[:2] 29 | if inp0.ndim == 3: 30 | inp = np.zeros((max(h0, h1), w0 + w1, 3), dtype=inp0.dtype) 31 | inp[:h0, :w0, :] = inp0 32 | inp[:h1, w0:(w0 + w1), :] = inp1 33 | else: 34 | inp = np.zeros((max(h0, h1), w0 + w1), dtype=inp0.dtype) 35 | inp[:h0, :w0] = inp0 36 | inp[:h1, w0:(w0 + w1)] = inp1 37 | return inp 38 | 39 | 40 | def vertical_concate(inp0, inp1): 41 | h0, w0 = inp0.shape[:2] 42 | h1, w1 = inp1.shape[:2] 43 | if inp0.ndim == 3: 44 | inp = np.zeros((h0 + h1, max(w0, w1), 3), dtype=inp0.dtype) 45 | inp[:h0, :w0, :] = inp0 46 | inp[h0:(h0 + h1), :w1, :] = inp1 47 | else: 48 | inp = np.zeros((h0 + h1, max(w0, w1)), dtype=inp0.dtype) 49 | inp[:h0, :w0] = inp0 50 | inp[h0:(h0 + h1), :w1] = inp1 51 | return inp 52 | 53 | 54 | def transparent_cmap(cmap): 55 | """Copy colormap and set alpha values""" 56 | mycmap = cmap 57 | mycmap._init() 58 | mycmap._lut[:,-1] = 0.3 59 | return mycmap 60 | 61 | cmap = transparent_cmap(plt.get_cmap('jet')) 62 | 63 | 64 | def set_grid(ax, h, w, interval=8): 65 | ax.set_xticks(np.arange(0, w, interval)) 66 | ax.set_yticks(np.arange(0, h, interval)) 67 | ax.grid() 68 | ax.set_yticklabels([]) 69 | ax.set_xticklabels([]) 70 | 71 | 72 | color_list = np.array( 73 | [ 74 | 0.000, 0.447, 0.741, 75 | 0.850, 0.325, 0.098, 76 | 0.929, 0.694, 0.125, 77 | 0.494, 0.184, 0.556, 78 | 0.466, 0.674, 0.188, 79 | 0.301, 0.745, 0.933, 80 | 0.635, 0.078, 0.184, 81 | 0.300, 0.300, 0.300, 82 | 0.600, 0.600, 0.600, 83 | 1.000, 0.000, 0.000, 84 | 1.000, 0.500, 0.000, 85 | 0.749, 0.749, 0.000, 86 | 0.000, 1.000, 0.000, 87 | 0.000, 0.000, 1.000, 88 | 0.667, 0.000, 1.000, 89 | 0.333, 0.333, 0.000, 90 | 0.333, 0.667, 0.000, 91 | 0.333, 1.000, 0.000, 92 | 0.667, 0.333, 0.000, 93 | 0.667, 0.667, 0.000, 94 | 0.667, 1.000, 0.000, 95 | 1.000, 0.333, 0.000, 96 | 1.000, 0.667, 0.000, 97 | 1.000, 1.000, 0.000, 98 | 0.000, 0.333, 0.500, 99 | 0.000, 0.667, 0.500, 100 | 0.000, 1.000, 0.500, 101 | 0.333, 0.000, 0.500, 102 | 0.333, 0.333, 0.500, 103 | 0.333, 0.667, 0.500, 104 | 0.333, 1.000, 0.500, 105 | 0.667, 0.000, 0.500, 106 | 0.667, 0.333, 0.500, 107 | 0.667, 0.667, 0.500, 108 | 0.667, 1.000, 0.500, 109 | 1.000, 0.000, 0.500, 110 | 1.000, 0.333, 0.500, 111 | 1.000, 0.667, 0.500, 112 | 1.000, 1.000, 0.500, 113 | 0.000, 0.333, 1.000, 114 | 0.000, 0.667, 1.000, 115 | 0.000, 1.000, 1.000, 116 | 0.333, 0.000, 1.000, 117 | 0.333, 0.333, 1.000, 118 | 0.333, 0.667, 1.000, 119 | 0.333, 1.000, 1.000, 120 | 0.667, 0.000, 1.000, 121 | 0.667, 0.333, 1.000, 122 | 0.667, 0.667, 1.000, 123 | 0.667, 1.000, 1.000, 124 | 1.000, 0.000, 1.000, 125 | 1.000, 0.333, 1.000, 126 | 1.000, 0.667, 1.000, 127 | 0.167, 0.000, 0.000, 128 | 0.333, 0.000, 0.000, 129 | 0.500, 0.000, 0.000, 130 | 0.667, 0.000, 0.000, 131 | 0.833, 0.000, 0.000, 132 | 1.000, 0.000, 0.000, 133 | 0.000, 0.167, 0.000, 134 | 0.000, 0.333, 0.000, 135 | 0.000, 0.500, 0.000, 136 | 0.000, 0.667, 0.000, 137 | 0.000, 0.833, 0.000, 138 | 0.000, 1.000, 0.000, 139 | 0.000, 0.000, 0.167, 140 | 0.000, 0.000, 0.333, 141 | 0.000, 0.000, 0.500, 142 | 0.000, 0.000, 0.667, 143 | 0.000, 0.000, 0.833, 144 | 0.000, 0.000, 1.000, 145 | 0.000, 0.000, 0.000, 146 | 0.143, 0.143, 0.143, 147 | 0.286, 0.286, 0.286, 148 | 0.429, 0.429, 0.429, 149 | 0.571, 0.571, 0.571, 150 | 0.714, 0.714, 0.714, 151 | 0.857, 0.857, 0.857, 152 | 1.000, 1.000, 1.000, 153 | 0.50, 0.5, 0 154 | ] 155 | ).astype(np.float32) 156 | colors = color_list.reshape((-1, 3)) * 255 157 | colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3) 158 | -------------------------------------------------------------------------------- /lib/utils/light_stage/ply_to_occupancy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.spatial import cKDTree as KDTree 3 | import os 4 | import tqdm 5 | from lib.utils import data_utils 6 | import glob 7 | from lib.utils.if_nerf.voxels import VoxelGrid 8 | from lib.config import cfg 9 | 10 | 11 | def get_scaled_model(model): 12 | min_xyz = np.min(model, axis=0) 13 | max_xyz = np.max(model, axis=0) 14 | bounds = np.stack([min_xyz, max_xyz], axis=0) 15 | center = (min_xyz + max_xyz) / 2 16 | scale = np.max(max_xyz - min_xyz) 17 | model = (model - center) / scale 18 | return model, bounds 19 | 20 | 21 | def create_grid_points_from_bounds(minimun, maximum, res): 22 | x = np.linspace(minimun, maximum, res) 23 | X, Y, Z = np.meshgrid(x, x, x, indexing='ij') 24 | X = X.reshape((np.prod(X.shape), )) 25 | Y = Y.reshape((np.prod(Y.shape), )) 26 | Z = Z.reshape((np.prod(Z.shape), )) 27 | 28 | points_list = np.column_stack((X, Y, Z)) 29 | del X, Y, Z, x 30 | return points_list 31 | 32 | 33 | def voxelized_pointcloud(model, kdtree, res): 34 | occupancies = np.zeros(res ** 3, dtype=np.int8) 35 | _, idx = kdtree.query(model) 36 | occupancies[idx] = 1 37 | compressed_occupancies = np.packbits(occupancies) 38 | return compressed_occupancies 39 | 40 | 41 | def ply_to_occupancy(): 42 | data_root = 'data/light_stage' 43 | point_cloud_dir = os.path.join(data_root, 'point_cloud') 44 | voxel_dir = os.path.join(data_root, 'voxel') 45 | os.system('mkdir -p {}'.format(voxel_dir)) 46 | 47 | bb_min = -0.5 48 | bb_max = 0.5 49 | res = 256 50 | grid_points = create_grid_points_from_bounds(bb_min, bb_max, res) 51 | kdtree = KDTree(grid_points) 52 | 53 | humans = os.listdir(point_cloud_dir) 54 | for human in humans: 55 | current_pc_dir = os.path.join(point_cloud_dir, human) 56 | current_voxel_dir = os.path.join(voxel_dir, human) 57 | os.system('mkdir -p {}'.format(current_voxel_dir)) 58 | paths = sorted(os.listdir(current_pc_dir)) 59 | for path in tqdm.tqdm(paths): 60 | model = data_utils.load_ply(os.path.join(current_pc_dir, path)) 61 | model, bounds = get_scaled_model(model) 62 | compressed_occupancies = voxelized_pointcloud(model, kdtree, res) 63 | i = int(path.split('.')[0]) 64 | np.savez(os.path.join(current_voxel_dir, '{}.npz'.format(i)), 65 | compressed_occupancies=compressed_occupancies, 66 | bounds=bounds) 67 | 68 | 69 | def create_voxel_off(): 70 | data_root = 'data/light_stage/voxel/CoreView_313' 71 | voxel_paths = glob.glob(os.path.join(data_root, '*.npz')) 72 | res = 256 73 | for voxel_path in voxel_paths: 74 | voxel_data = np.load(voxel_path) 75 | occupancy = np.unpackbits(voxel_data['compressed_occupancies']) 76 | occupancy = occupancy.reshape(res, res, res).astype(np.float32) 77 | i = int(os.path.basename(voxel_path).split('.')[0]) 78 | VoxelGrid(occupancy).to_mesh().export(f'/home/pengsida/{i}.off') 79 | __import__('ipdb').set_trace() 80 | -------------------------------------------------------------------------------- /lib/utils/optimizer/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | from bisect import bisect_right 2 | from collections import Counter 3 | 4 | import torch 5 | 6 | 7 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 8 | def __init__( 9 | self, 10 | optimizer, 11 | milestones, 12 | gamma=0.1, 13 | warmup_factor=1.0 / 3, 14 | warmup_iters=5, 15 | warmup_method="linear", 16 | last_epoch=-1, 17 | ): 18 | if not list(milestones) == sorted(milestones): 19 | raise ValueError( 20 | "Milestones should be a list of" " increasing integers. Got {}", 21 | milestones, 22 | ) 23 | 24 | if warmup_method not in ("constant", "linear"): 25 | raise ValueError( 26 | "Only 'constant' or 'linear' warmup_method accepted" 27 | "got {}".format(warmup_method) 28 | ) 29 | self.milestones = milestones 30 | self.gamma = gamma 31 | self.warmup_factor = warmup_factor 32 | self.warmup_iters = warmup_iters 33 | self.warmup_method = warmup_method 34 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 35 | 36 | def get_lr(self): 37 | warmup_factor = 1 38 | if self.last_epoch < self.warmup_iters: 39 | if self.warmup_method == "constant": 40 | warmup_factor = self.warmup_factor 41 | elif self.warmup_method == "linear": 42 | alpha = float(self.last_epoch) / self.warmup_iters 43 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 44 | return [ 45 | base_lr 46 | * warmup_factor 47 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 48 | for base_lr in self.base_lrs 49 | ] 50 | 51 | 52 | class MultiStepLR(torch.optim.lr_scheduler._LRScheduler): 53 | 54 | def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1): 55 | self.milestones = Counter(milestones) 56 | self.gamma = gamma 57 | super(MultiStepLR, self).__init__(optimizer, last_epoch) 58 | 59 | def get_lr(self): 60 | if self.last_epoch not in self.milestones: 61 | return [group['lr'] for group in self.optimizer.param_groups] 62 | return [group['lr'] * self.gamma ** self.milestones[self.last_epoch] 63 | for group in self.optimizer.param_groups] 64 | 65 | 66 | class ExponentialLR(torch.optim.lr_scheduler._LRScheduler): 67 | 68 | def __init__(self, optimizer, decay_epochs, gamma=0.1, last_epoch=-1): 69 | self.decay_epochs = decay_epochs 70 | self.gamma = gamma 71 | super(ExponentialLR, self).__init__(optimizer, last_epoch) 72 | 73 | def get_lr(self): 74 | return [base_lr * self.gamma ** (self.last_epoch / self.decay_epochs) 75 | for base_lr in self.base_lrs] 76 | -------------------------------------------------------------------------------- /lib/utils/snapshot_data_utils.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import numpy as np 3 | 4 | 5 | def read_pickle(pkl_path): 6 | with open(pkl_path, 'rb') as f: 7 | u = pickle._Unpickler(f) 8 | u.encoding = 'latin1' 9 | return u.load() 10 | 11 | 12 | def get_camera(camera_path): 13 | camera = read_pickle(camera_path) 14 | K = np.zeros([3, 3]) 15 | K[0, 0] = camera['camera_f'][0] 16 | K[1, 1] = camera['camera_f'][1] 17 | K[:2, 2] = camera['camera_c'] 18 | K[2, 2] = 1 19 | R = np.eye(3) 20 | T = np.zeros([3]) 21 | D = camera['camera_k'] 22 | camera = {'K': K, 'R': R, 'T': T, 'D': D} 23 | return camera 24 | -------------------------------------------------------------------------------- /lib/visualizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_visualizer import make_visualizer 2 | -------------------------------------------------------------------------------- /lib/visualizers/if_nerf.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from lib.config import cfg 4 | 5 | 6 | class Visualizer: 7 | def visualize(self, output, batch): 8 | rgb_pred = output['rgb_map'][0].detach().cpu().numpy() 9 | rgb_gt = batch['rgb'][0].detach().cpu().numpy() 10 | print('mse: {}'.format(np.mean((rgb_pred - rgb_gt) ** 2))) 11 | 12 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 13 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 14 | mask_at_box = mask_at_box.reshape(H, W) 15 | 16 | img_pred = np.zeros((H, W, 3)) 17 | if cfg.white_bkgd: 18 | img_pred = img_pred + 1 19 | img_pred[mask_at_box] = rgb_pred 20 | 21 | img_gt = np.zeros((H, W, 3)) 22 | if cfg.white_bkgd: 23 | img_gt = img_gt + 1 24 | img_gt[mask_at_box] = rgb_gt 25 | 26 | _, (ax1, ax2) = plt.subplots(1, 2) 27 | ax1.imshow(img_pred) 28 | ax2.imshow(img_gt) 29 | plt.show() 30 | -------------------------------------------------------------------------------- /lib/visualizers/if_nerf_demo.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from lib.config import cfg 4 | import cv2 5 | import os 6 | from termcolor import colored 7 | 8 | 9 | class Visualizer: 10 | def __init__(self): 11 | data_dir = 'data/render/{}'.format(cfg.exp_name) 12 | print(colored('the results are saved at {}'.format(data_dir), 13 | 'yellow')) 14 | 15 | def visualize(self, output, batch): 16 | rgb_pred = output['rgb_map'][0].detach().cpu().numpy() 17 | 18 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 19 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 20 | mask_at_box = mask_at_box.reshape(H, W) 21 | 22 | img_pred = np.zeros((H, W, 3)) 23 | if cfg.white_bkgd: 24 | img_pred = img_pred + 1 25 | img_pred[mask_at_box] = rgb_pred 26 | img_pred = img_pred[..., [2, 1, 0]] 27 | 28 | depth_pred = np.zeros((H, W)) 29 | depth_pred[mask_at_box] = output['depth_map'][0].detach().cpu().numpy() 30 | 31 | img_root = 'data/render/{}/frame_{:04d}'.format( 32 | cfg.exp_name, batch['frame_index'].item()) 33 | os.system('mkdir -p {}'.format(img_root)) 34 | index = batch['view_index'].item() 35 | 36 | # plt.imshow(depth_pred) 37 | # depth_dir = os.path.join(img_root, 'depth') 38 | # os.system('mkdir -p {}'.format(depth_dir)) 39 | # plt.savefig(os.path.join(depth_dir, '{}.jpg'.format(index))) 40 | # plt.close() 41 | 42 | # mask_pred = np.zeros((H, W, 3)) 43 | # mask_pred[acc_pred > 0.5] = 255 44 | 45 | # acc_dir = os.path.join(img_root, 'mask') 46 | # os.system('mkdir -p {}'.format(acc_dir)) 47 | # mask = cv2.resize(mask_pred, (H * 8, W * 8), interpolation=cv2.INTER_NEAREST) 48 | # mask_path = os.path.join(acc_dir, 'img_{:04d}.jpg'.format(index)) 49 | # cv2.imwrite(mask_path, mask) 50 | 51 | cv2.imwrite(os.path.join(img_root, '{:04d}.png'.format(index)), 52 | img_pred * 255) 53 | -------------------------------------------------------------------------------- /lib/visualizers/if_nerf_mesh.py: -------------------------------------------------------------------------------- 1 | from lib.utils.if_nerf import voxels 2 | import numpy as np 3 | from lib.config import cfg 4 | import os 5 | from termcolor import colored 6 | 7 | 8 | class Visualizer: 9 | def __init__(self): 10 | result_dir = os.path.join(cfg.result_dir, 'mesh') 11 | print(colored('the results are saved at {}'.format(result_dir), 'yellow')) 12 | 13 | def visualize_voxel(self, output, batch): 14 | cube = output['cube'] 15 | cube = cube[10:-10, 10:-10, 10:-10] 16 | cube[cube < cfg.mesh_th] = 0 17 | cube[cube > cfg.mesh_th] = 1 18 | 19 | sh = cube.shape 20 | square_cube = np.zeros((max(sh), ) * 3) 21 | square_cube[:sh[0], :sh[1], :sh[2]] = cube 22 | voxel_grid = voxels.VoxelGrid(square_cube) 23 | mesh = voxel_grid.to_mesh() 24 | mesh.show() 25 | 26 | def visualize(self, output, batch): 27 | mesh = output['mesh'] 28 | # mesh.show() 29 | 30 | result_dir = os.path.join(cfg.result_dir, 'mesh') 31 | os.system('mkdir -p {}'.format(result_dir)) 32 | i = batch['frame_index'].item() 33 | result_path = os.path.join(result_dir, '{:04d}.ply'.format(i)) 34 | mesh.export(result_path) 35 | -------------------------------------------------------------------------------- /lib/visualizers/if_nerf_perform.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from lib.config import cfg 4 | import cv2 5 | import os 6 | from termcolor import colored 7 | 8 | 9 | class Visualizer: 10 | def __init__(self): 11 | data_dir = 'data/perform/{}'.format(cfg.exp_name) 12 | print(colored('the results are saved at {}'.format(data_dir), 13 | 'yellow')) 14 | 15 | def visualize(self, output, batch): 16 | rgb_pred = output['rgb_map'][0].detach().cpu().numpy() 17 | 18 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 19 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 20 | mask_at_box = mask_at_box.reshape(H, W) 21 | 22 | img_pred = np.zeros((H, W, 3)) 23 | if cfg.white_bkgd: 24 | img_pred = img_pred + 1 25 | img_pred[mask_at_box] = rgb_pred 26 | img_pred = img_pred[..., [2, 1, 0]] 27 | 28 | frame_root = 'data/perform/{}/{}'.format(cfg.exp_name, 0) 29 | os.system('mkdir -p {}'.format(frame_root)) 30 | frame_index = batch['frame_index'].item() 31 | view_index = batch['view_index'].item() 32 | cv2.imwrite( 33 | os.path.join( 34 | frame_root, 35 | 'frame{:04d}_view{:04d}.png'.format(frame_index, view_index)), 36 | img_pred * 255) 37 | -------------------------------------------------------------------------------- /lib/visualizers/if_nerf_test.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from lib.config import cfg 4 | import os 5 | import cv2 6 | 7 | 8 | class Visualizer: 9 | def visualize(self, output, batch): 10 | rgb_pred = output['rgb_map'][0].detach().cpu().numpy() 11 | 12 | mask_at_box = batch['mask_at_box'][0].detach().cpu().numpy() 13 | H, W = int(cfg.H * cfg.ratio), int(cfg.W * cfg.ratio) 14 | mask_at_box = mask_at_box.reshape(H, W) 15 | 16 | img_pred = np.zeros((H, W, 3)) 17 | img_pred[mask_at_box] = rgb_pred 18 | 19 | result_dir = os.path.join('data/result/if-nerf', cfg.exp_name) 20 | 21 | if cfg.human in [302, 313, 315]: 22 | i = batch['i'].item() + 1 23 | else: 24 | i = batch['i'].item() 25 | i = i + cfg.begin_i 26 | cam_ind = batch['cam_ind'].item() 27 | frame_dir = os.path.join(result_dir, 'frame_{}'.format(i)) 28 | pred_img_path = os.path.join(frame_dir, 29 | 'pred_{}.jpg'.format(cam_ind + 1)) 30 | 31 | os.system('mkdir -p {}'.format(os.path.dirname(pred_img_path))) 32 | img_pred = (img_pred * 255)[..., [2, 1, 0]] 33 | cv2.imwrite(pred_img_path, img_pred) 34 | 35 | # _, (ax1, ax2) = plt.subplots(1, 2) 36 | # ax1.imshow(img_pred) 37 | # ax2.imshow(img_gt) 38 | # plt.show() 39 | -------------------------------------------------------------------------------- /lib/visualizers/make_visualizer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import imp 3 | 4 | 5 | def make_visualizer(cfg): 6 | module = cfg.visualizer_module 7 | path = cfg.visualizer_path 8 | visualizer = imp.load_source(module, path).Visualizer() 9 | return visualizer 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | open3d>=0.9.0.0 2 | PyYAML==5.3.1 3 | tqdm==4.28.1 4 | tensorboardX==1.2 5 | termcolor==1.1.0 6 | scikit-image==0.14.2 7 | opencv-contrib-python>=3.4.2.17 8 | opencv-python>=3.4.2.17,<4 9 | imageio==2.3.0 10 | trimesh==3.8.15 11 | plyfile==0.6 12 | PyMCubes==0.1.0 13 | pyglet==1.4.0b1 14 | chumpy 15 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | from lib.config import cfg, args 2 | 3 | 4 | def run_dataset(): 5 | from lib.datasets import make_data_loader 6 | import tqdm 7 | 8 | cfg.train.num_workers = 0 9 | data_loader = make_data_loader(cfg, is_train=False) 10 | for batch in tqdm.tqdm(data_loader): 11 | pass 12 | 13 | 14 | def run_network(): 15 | from lib.networks import make_network 16 | from lib.datasets import make_data_loader 17 | from lib.utils.net_utils import load_network 18 | import tqdm 19 | import torch 20 | import time 21 | 22 | network = make_network(cfg).cuda() 23 | load_network(network, cfg.trained_model_dir, epoch=cfg.test.epoch) 24 | network.eval() 25 | 26 | data_loader = make_data_loader(cfg, is_train=False) 27 | total_time = 0 28 | for batch in tqdm.tqdm(data_loader): 29 | for k in batch: 30 | if k != 'meta': 31 | batch[k] = batch[k].cuda() 32 | with torch.no_grad(): 33 | torch.cuda.synchronize() 34 | start = time.time() 35 | network(batch) 36 | torch.cuda.synchronize() 37 | total_time += time.time() - start 38 | print(total_time / len(data_loader)) 39 | 40 | 41 | def run_evaluate(): 42 | from lib.datasets import make_data_loader 43 | from lib.evaluators import make_evaluator 44 | import tqdm 45 | import torch 46 | from lib.networks import make_network 47 | from lib.utils import net_utils 48 | from lib.networks.renderer import make_renderer 49 | 50 | cfg.perturb = 0 51 | 52 | network = make_network(cfg).cuda() 53 | net_utils.load_network(network, 54 | cfg.trained_model_dir, 55 | resume=cfg.resume, 56 | epoch=cfg.test.epoch) 57 | network.train() 58 | 59 | data_loader = make_data_loader(cfg, is_train=False) 60 | renderer = make_renderer(cfg, network) 61 | evaluator = make_evaluator(cfg) 62 | for batch in tqdm.tqdm(data_loader): 63 | for k in batch: 64 | if k != 'meta': 65 | batch[k] = batch[k].cuda() 66 | with torch.no_grad(): 67 | output = renderer.render(batch) 68 | evaluator.evaluate(output, batch) 69 | evaluator.summarize() 70 | 71 | 72 | def run_visualize(): 73 | from lib.networks import make_network 74 | from lib.datasets import make_data_loader 75 | from lib.utils.net_utils import load_network 76 | from lib.utils import net_utils 77 | import tqdm 78 | import torch 79 | from lib.visualizers import make_visualizer 80 | from lib.networks.renderer import make_renderer 81 | 82 | cfg.perturb = 0 83 | 84 | network = make_network(cfg).cuda() 85 | load_network(network, 86 | cfg.trained_model_dir, 87 | resume=cfg.resume, 88 | epoch=cfg.test.epoch) 89 | network.train() 90 | 91 | data_loader = make_data_loader(cfg, is_train=False) 92 | renderer = make_renderer(cfg, network) 93 | visualizer = make_visualizer(cfg) 94 | for batch in tqdm.tqdm(data_loader): 95 | for k in batch: 96 | if k != 'meta': 97 | batch[k] = batch[k].cuda() 98 | with torch.no_grad(): 99 | output = renderer.render(batch) 100 | visualizer.visualize(output, batch) 101 | 102 | 103 | def run_light_stage(): 104 | from lib.utils.light_stage import ply_to_occupancy 105 | ply_to_occupancy.ply_to_occupancy() 106 | # ply_to_occupancy.create_voxel_off() 107 | 108 | 109 | def run_evaluate_nv(): 110 | from lib.datasets import make_data_loader 111 | from lib.evaluators import make_evaluator 112 | import tqdm 113 | from lib.utils import net_utils 114 | 115 | data_loader = make_data_loader(cfg, is_train=False) 116 | evaluator = make_evaluator(cfg) 117 | for batch in tqdm.tqdm(data_loader): 118 | for k in batch: 119 | if k != 'meta': 120 | batch[k] = batch[k].cuda() 121 | evaluator.evaluate(batch) 122 | evaluator.summarize() 123 | 124 | 125 | if __name__ == '__main__': 126 | globals()['run_' + args.type]() 127 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | # evaluation 2 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_313.yaml exp_name xyzc_313 3 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_315.yaml exp_name xyzc_315 4 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_392.yaml exp_name xyzc_392 5 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_393.yaml exp_name xyzc_393 6 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_394.yaml exp_name xyzc_394 7 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_377.yaml exp_name xyzc_377 8 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_386.yaml exp_name xyzc_386 9 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_390.yaml exp_name xyzc_390 10 | # python run.py --type evaluate --cfg_file configs/latent_xyzc_387.yaml exp_name xyzc_387 11 | -------------------------------------------------------------------------------- /tools/custom/README.md: -------------------------------------------------------------------------------- 1 | ## Run the code on the custom dataset 2 | 3 | Please inform me if there is any problem to run the code on your own data. 4 | 5 | 1. If your data already have SMPL parameters, just export the SMPL parameters and SMPL vertices to two directories `params` and `vertices`. If you do not have SMPL parameters, you could take the following ways: 6 | * For a multi-view video, you could estimate SMPL parameters using [https://github.com/zju3dv/EasyMocap](https://github.com/zju3dv/EasyMocap). The output parameter files can be processed using the [script](https://github.com/zju3dv/neuralbody/blob/master/zju_smpl/easymocap_to_neuralbody.py). 7 | * For a monocular video, you could estimate SMPL parameters using [https://github.com/thmoa/videoavatars](https://github.com/thmoa/videoavatars). The output `reconstructed_poses.hdf5` file can be processed following [the instruction](https://github.com/zju3dv/neuralbody#process-people-snapshot). 8 | 2. Organize the dataset as the following structure. Please refer to `CoreView_392` of ZJU-MoCap dataset as an example. 9 | The `annots.npy` is generated by [get_annots.py](get_annots.py). This code is used here to show the format of `annots.npy`. Please revise it according to your input camera parameters and image paths. 10 | Example camera files can be found in [camera_params](camera_params). 11 | 12 | ![file](file_structure.png) 13 | 14 | ``` 15 | ├── /path/to/dataset 16 | │ ├── annots.npy // Store the camera parameters and image paths. 17 | │ ├── params 18 | │ │ ├── 0.npy 19 | │ │ ├── ... 20 | │ │ ├── 1234.npy 21 | │ ├── vertices 22 | │ │ ├── 0.npy 23 | │ │ ├── ... 24 | │ │ ├── 1234.npy 25 | │ ├── Camera_B1 // Store the images. No restrictions on the directory name. 26 | │ │ ├── 00000.jpg 27 | │ │ ├── ... 28 | │ ├── Camera_B2 29 | │ │ ├── 00000.jpg 30 | │ │ ├── ... 31 | │ ├── ... 32 | │ ├── Camera_B23 33 | │ │ ├── 00000.jpg 34 | │ │ ├── ... 35 | │ ├── mask_cihp // Store the foreground segmentation. The directory name must be "mask_cihp". 36 | │ │ ├── Camera_B1 37 | │ │ │ ├── 00000.png 38 | │ │ │ ├── ... 39 | │ │ ├── Camera_B2 40 | │ │ │ ├── 00000.png 41 | │ │ │ ├── ... 42 | │ │ ├── ... 43 | │ │ ├── Camera_B23 44 | │ │ │ ├── 00000.png 45 | │ │ │ ├── ... 46 | ``` 47 | 4. Use `configs/multi_view_custom.yaml` or `configs/monocular_custom.yaml` for training. **Note that you need to revise the `train_dataset` and `test_dataset` in the yaml file.** 48 | ``` 49 | # train from scratch 50 | python train_net.py --cfg_file configs/multi_view_custom.yaml exp_name resume False 51 | # resume training 52 | python train_net.py --cfg_file configs/multi_view_custom.yaml exp_name resume True 53 | ``` 54 | Revise the `num_train_frame` and `training_view` in `custom.yaml` according to your data. Or you could specify it in the command line: 55 | ``` 56 | python train_net.py --cfg_file configs/custom.yaml exp_name num_train_frame 1000 training_view "0, 1, 2, 3" resume False 57 | ``` 58 | 6. Visualization. Please refer to [Visualization on ZJU-MoCap](https://github.com/zju3dv/neuralbody#visualization-on-zju-mocap) as an example. 59 | * Visualize novel views of single frame. 60 | ``` 61 | python run.py --type visualize --cfg_file configs/multi_view_custom.yaml exp_name vis_novel_view True num_render_views 100 62 | ``` 63 | 64 | * Visualize views of dynamic humans with fixed camera 65 | ``` 66 | python run.py --type visualize --cfg_file configs/multi_view_custom.yaml exp_name vis_novel_pose True num_render_frame 1000 num_render_views 1 67 | ``` 68 | 69 | * Visualize views of dynamic humans with rotated camera 70 | ``` 71 | python run.py --type visualize --cfg_file configs/multi_view_custom.yaml exp_name vis_novel_pose True num_render_frame 1000 72 | ``` 73 | 74 | * Visualize mesh. `mesh_th` is the iso-surface threshold of Marching Cube Algorithm. 75 | ``` 76 | # generate meshes 77 | python run.py --type visualize --cfg_file configs/multi_view_custom.yaml exp_name vis_mesh True mesh_th 10 78 | # visualize a specific mesh 79 | python tools/render_mesh.py --exp_name --dataset zju_mocap --mesh_ind 0 80 | ``` 81 | -------------------------------------------------------------------------------- /tools/custom/file_structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zju3dv/neuralbody/3c516b953477006a3d1a7311eb4d51438c982c33/tools/custom/file_structure.png -------------------------------------------------------------------------------- /tools/custom/get_annots.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import glob 4 | import os 5 | import json 6 | 7 | 8 | def get_cams(): 9 | intri = cv2.FileStorage('intri.yml', cv2.FILE_STORAGE_READ) 10 | extri = cv2.FileStorage('extri.yml', cv2.FILE_STORAGE_READ) 11 | cams = {'K': [], 'D': [], 'R': [], 'T': []} 12 | for i in range(23): 13 | cams['K'].append(intri.getNode('K_Camera_B{}'.format(i + 1)).mat()) 14 | cams['D'].append( 15 | intri.getNode('dist_Camera_B{}'.format(i + 1)).mat().T) 16 | cams['R'].append(extri.getNode('Rot_Camera_B{}'.format(i + 1)).mat()) 17 | cams['T'].append(extri.getNode('T_Camera_B{}'.format(i + 1)).mat() * 1000) 18 | return cams 19 | 20 | 21 | def get_img_paths(): 22 | all_ims = [] 23 | for i in range(23): 24 | i = i + 1 25 | data_root = 'Camera_B{}'.format(i) 26 | ims = glob.glob(os.path.join(data_root, '*.jpg')) 27 | ims = np.array(sorted(ims)) 28 | all_ims.append(ims) 29 | num_img = min([len(ims) for ims in all_ims]) 30 | all_ims = [ims[:num_img] for ims in all_ims] 31 | all_ims = np.stack(all_ims, axis=1) 32 | return all_ims 33 | 34 | 35 | cams = get_cams() 36 | img_paths = get_img_paths() 37 | 38 | annot = {} 39 | annot['cams'] = cams 40 | 41 | ims = [] 42 | for img_path, kpt in zip(img_paths, kpts2d): 43 | data = {} 44 | data['ims'] = img_path.tolist() 45 | ims.append(data) 46 | annot['ims'] = ims 47 | 48 | np.save('annots.npy', annot) 49 | np.save('annots_python2.npy', annot, fix_imports=True) 50 | -------------------------------------------------------------------------------- /tools/process_snapshot.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import os 3 | import h5py 4 | import sys 5 | import numpy as np 6 | import open3d as o3d 7 | from snapshot_smpl.smpl import Smpl 8 | import cv2 9 | import tqdm 10 | 11 | 12 | def read_pickle(pkl_path): 13 | with open(pkl_path, 'rb') as f: 14 | u = pickle._Unpickler(f) 15 | u.encoding = 'latin1' 16 | return u.load() 17 | 18 | 19 | def get_KRTD(camera): 20 | K = np.zeros([3, 3]) 21 | K[0, 0] = camera['camera_f'][0] 22 | K[1, 1] = camera['camera_f'][1] 23 | K[:2, 2] = camera['camera_c'] 24 | K[2, 2] = 1 25 | R = np.eye(3) 26 | T = np.zeros([3]) 27 | D = camera['camera_k'] 28 | return K, R, T, D 29 | 30 | 31 | def get_o3d_mesh(vertices, faces): 32 | mesh = o3d.geometry.TriangleMesh() 33 | mesh.vertices = o3d.utility.Vector3dVector(vertices) 34 | mesh.triangles = o3d.utility.Vector3iVector(faces) 35 | mesh.compute_vertex_normals() 36 | return mesh 37 | 38 | 39 | def get_smpl(base_smpl, betas, poses, trans): 40 | base_smpl.betas = betas 41 | base_smpl.pose = poses 42 | base_smpl.trans = trans 43 | vertices = np.array(base_smpl) 44 | 45 | faces = base_smpl.f 46 | mesh = get_o3d_mesh(vertices, faces) 47 | 48 | return vertices, mesh 49 | 50 | 51 | def render_smpl(mesh, img, K, R, T): 52 | vertices = np.array(mesh.vertices) 53 | rendered_img = renderer.render_multiview(vertices, K[None], R[None], 54 | T[None, None], [img])[0] 55 | return rendered_img 56 | 57 | 58 | def extract_image(data_path): 59 | data_root = os.path.dirname(data_path) 60 | img_dir = os.path.join(data_root, 'image') 61 | os.system('mkdir -p {}'.format(img_dir)) 62 | 63 | if len(os.listdir(img_dir)) >= 200: 64 | return 65 | 66 | cap = cv2.VideoCapture(data_path) 67 | 68 | ret, frame = cap.read() 69 | i = 0 70 | 71 | while ret: 72 | cv2.imwrite(os.path.join(img_dir, '{}.jpg'.format(i)), frame) 73 | ret, frame = cap.read() 74 | i = i + 1 75 | 76 | cap.release() 77 | 78 | 79 | def extract_mask(masks, mask_dir): 80 | if len(os.listdir(mask_dir)) >= len(masks): 81 | return 82 | 83 | for i in tqdm.tqdm(range(len(masks))): 84 | mask = masks[i].astype(np.uint8) 85 | 86 | # erode the mask 87 | border = 4 88 | kernel = np.ones((border, border), np.uint8) 89 | mask = cv2.erode(mask.copy(), kernel) 90 | 91 | cv2.imwrite(os.path.join(mask_dir, '{}.png'.format(i)), mask) 92 | 93 | 94 | data_root = 'data/people_snapshot' 95 | videos = ['female-3-casual'] 96 | 97 | model_paths = [ 98 | 'basicModel_f_lbs_10_207_0_v1.0.0.pkl', 99 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl' 100 | ] 101 | 102 | for video in videos: 103 | camera_path = os.path.join(data_root, video, 'camera.pkl') 104 | camera = read_pickle(camera_path) 105 | K, R, T, D = get_KRTD(camera) 106 | 107 | # process video 108 | video_path = os.path.join(data_root, video, video + '.mp4') 109 | extract_image(video_path) 110 | 111 | # process mask 112 | mask_path = os.path.join(data_root, video, 'masks.hdf5') 113 | masks = h5py.File(mask_path)['masks'] 114 | mask_dir = os.path.join(data_root, video, 'mask') 115 | os.system('mkdir -p {}'.format(mask_dir)) 116 | extract_mask(masks, mask_dir) 117 | 118 | smpl_path = os.path.join(data_root, video, 'reconstructed_poses.hdf5') 119 | smpl = h5py.File(smpl_path) 120 | betas = smpl['betas'] 121 | pose = smpl['pose'] 122 | trans = smpl['trans'] 123 | 124 | pose = pose[len(pose) - len(masks):] 125 | trans = trans[len(trans) - len(masks):] 126 | 127 | # process smpl parameters 128 | params = {'beta': np.array(betas), 'pose': pose, 'trans': trans} 129 | params_path = os.path.join(data_root, video, 'params.npy') 130 | np.save(params_path, params) 131 | 132 | if 'female' in video: 133 | model_path = model_paths[0] 134 | else: 135 | model_path = model_paths[1] 136 | model_data = read_pickle(model_path) 137 | 138 | img_dir = os.path.join(data_root, video, 'image') 139 | vertices_dir = os.path.join(data_root, video, 'vertices') 140 | os.system('mkdir -p {}'.format(vertices_dir)) 141 | 142 | num_img = len(os.listdir(img_dir)) 143 | for i in tqdm.tqdm(range(num_img)): 144 | base_smpl = Smpl(model_data) 145 | vertices, mesh = get_smpl(base_smpl, betas, pose[i], trans[i]) 146 | np.save(os.path.join(vertices_dir, '{}.npy'.format(i)), vertices) 147 | -------------------------------------------------------------------------------- /tools/render/cam_render.py: -------------------------------------------------------------------------------- 1 | ''' 2 | MIT License 3 | 4 | Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | ''' 24 | from OpenGL.GLUT import * 25 | 26 | from .render import Render 27 | 28 | 29 | class CamRender(Render): 30 | def __init__(self, width=1600, height=1200, name='Cam Renderer', 31 | program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1): 32 | Render.__init__(self, width, height, name, program_files, color_size, ms_rate) 33 | self.camera = None 34 | 35 | glutDisplayFunc(self.display) 36 | glutKeyboardFunc(self.keyboard) 37 | 38 | def set_camera(self, camera): 39 | self.camera = camera 40 | self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix() 41 | 42 | def set_matrices(self, projection, modelview): 43 | self.projection_matrix = projection 44 | self.model_view_matrix = modelview 45 | 46 | def keyboard(self, key, x, y): 47 | # up 48 | eps = 1 49 | # print(key) 50 | if key == b'w': 51 | self.camera.center += eps * self.camera.direction 52 | elif key == b's': 53 | self.camera.center -= eps * self.camera.direction 54 | if key == b'a': 55 | self.camera.center -= eps * self.camera.right 56 | elif key == b'd': 57 | self.camera.center += eps * self.camera.right 58 | if key == b' ': 59 | self.camera.center += eps * self.camera.up 60 | elif key == b'x': 61 | self.camera.center -= eps * self.camera.up 62 | elif key == b'i': 63 | self.camera.near += 0.1 * eps 64 | self.camera.far += 0.1 * eps 65 | elif key == b'o': 66 | self.camera.near -= 0.1 * eps 67 | self.camera.far -= 0.1 * eps 68 | 69 | self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix() 70 | 71 | def show(self): 72 | glutMainLoop() 73 | -------------------------------------------------------------------------------- /tools/render/color.fs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | out vec4 FragColor; 4 | 5 | in vec3 Color; 6 | 7 | void main() 8 | { 9 | FragColor = vec4(Color,1.0); 10 | } 11 | -------------------------------------------------------------------------------- /tools/render/color.vs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | layout (location = 0) in vec3 a_Position; 4 | layout (location = 1) in vec3 a_Color; 5 | 6 | out vec3 CamNormal; 7 | out vec3 CamPos; 8 | out vec3 Color; 9 | 10 | uniform mat4 ModelMat; 11 | uniform mat4 PerspMat; 12 | 13 | void main() 14 | { 15 | gl_Position = PerspMat * ModelMat * vec4(a_Position, 1.0); 16 | Color = a_Color; 17 | } 18 | -------------------------------------------------------------------------------- /tools/render/color_render.py: -------------------------------------------------------------------------------- 1 | ''' 2 | MIT License 3 | 4 | Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | ''' 24 | import numpy as np 25 | import random 26 | 27 | from .framework import * 28 | from .cam_render import CamRender 29 | 30 | 31 | class ColorRender(CamRender): 32 | def __init__(self, width=1600, height=1200, name='Color Renderer'): 33 | program_files = ['color.vs', 'color.fs'] 34 | CamRender.__init__(self, width, height, name, program_files=program_files) 35 | 36 | # WARNING: this differs from vertex_buffer and vertex_data in Render 37 | self.vert_buffer = {} 38 | self.vert_data = {} 39 | 40 | self.color_buffer = {} 41 | self.color_data = {} 42 | 43 | self.vertex_dim = {} 44 | self.n_vertices = {} 45 | 46 | def set_mesh(self, vertices, faces, color, faces_clr, mat_name='all'): 47 | self.vert_data[mat_name] = vertices[faces.reshape([-1])] 48 | self.n_vertices[mat_name] = self.vert_data[mat_name].shape[0] 49 | self.vertex_dim[mat_name] = self.vert_data[mat_name].shape[1] 50 | 51 | if mat_name not in self.vert_buffer.keys(): 52 | self.vert_buffer[mat_name] = glGenBuffers(1) 53 | glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat_name]) 54 | glBufferData(GL_ARRAY_BUFFER, self.vert_data[mat_name], GL_STATIC_DRAW) 55 | 56 | self.color_data[mat_name] = color[faces_clr.reshape([-1])] 57 | if mat_name not in self.color_buffer.keys(): 58 | self.color_buffer[mat_name] = glGenBuffers(1) 59 | glBindBuffer(GL_ARRAY_BUFFER, self.color_buffer[mat_name]) 60 | glBufferData(GL_ARRAY_BUFFER, self.color_data[mat_name], GL_STATIC_DRAW) 61 | 62 | glBindBuffer(GL_ARRAY_BUFFER, 0) 63 | 64 | def cleanup(self): 65 | 66 | glBindBuffer(GL_ARRAY_BUFFER, 0) 67 | for key in self.vert_data: 68 | glDeleteBuffers(1, [self.vert_buffer[key]]) 69 | glDeleteBuffers(1, [self.color_buffer[key]]) 70 | 71 | self.vert_buffer = {} 72 | self.vert_data = {} 73 | 74 | self.color_buffer = {} 75 | self.color_data = {} 76 | 77 | self.render_texture_mat = {} 78 | 79 | self.vertex_dim = {} 80 | self.n_vertices = {} 81 | 82 | def draw(self): 83 | self.draw_init() 84 | 85 | glEnable(GL_MULTISAMPLE) 86 | 87 | glUseProgram(self.program) 88 | glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix.transpose()) 89 | glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix.transpose()) 90 | 91 | for mat in self.vert_buffer: 92 | # Handle vertex buffer 93 | glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat]) 94 | glEnableVertexAttribArray(0) 95 | glVertexAttribPointer(0, self.vertex_dim[mat], GL_DOUBLE, GL_FALSE, 0, None) 96 | 97 | # Handle normal buffer 98 | glBindBuffer(GL_ARRAY_BUFFER, self.color_buffer[mat]) 99 | glEnableVertexAttribArray(1) 100 | glVertexAttribPointer(1, 3, GL_DOUBLE, GL_FALSE, 0, None) 101 | 102 | glDrawArrays(GL_TRIANGLES, 0, self.n_vertices[mat]) 103 | 104 | glDisableVertexAttribArray(1) 105 | glDisableVertexAttribArray(0) 106 | 107 | glBindBuffer(GL_ARRAY_BUFFER, 0) 108 | 109 | glUseProgram(0) 110 | 111 | glDisable(GL_MULTISAMPLE) 112 | 113 | self.draw_end() 114 | -------------------------------------------------------------------------------- /tools/render/framework.py: -------------------------------------------------------------------------------- 1 | # Mario Rosasco, 2016 2 | # adapted from framework.cpp, Copyright (C) 2010-2012 by Jason L. McKesson 3 | # This file is licensed under the MIT License. 4 | # 5 | # NB: Unlike in the framework.cpp organization, the main loop is contained 6 | # in the tutorial files, not in this framework file. Additionally, a copy of 7 | # this module file must exist in the same directory as the tutorial files 8 | # to be imported properly. 9 | 10 | 11 | import os 12 | 13 | from OpenGL.GL import * 14 | 15 | 16 | # Function that creates and compiles shaders according to the given type (a GL enum value) and 17 | # shader program (a file containing a GLSL program). 18 | def loadShader(shaderType, shaderFile): 19 | # check if file exists, get full path name 20 | strFilename = findFileOrThrow(shaderFile) 21 | shaderData = None 22 | with open(strFilename, 'r') as f: 23 | shaderData = f.read() 24 | 25 | shader = glCreateShader(shaderType) 26 | glShaderSource(shader, shaderData) # note that this is a simpler function call than in C 27 | 28 | # This shader compilation is more explicit than the one used in 29 | # framework.cpp, which relies on a glutil wrapper function. 30 | # This is made explicit here mainly to decrease dependence on pyOpenGL 31 | # utilities and wrappers, which docs caution may change in future versions. 32 | glCompileShader(shader) 33 | 34 | status = glGetShaderiv(shader, GL_COMPILE_STATUS) 35 | if status == GL_FALSE: 36 | # Note that getting the error log is much simpler in Python than in C/C++ 37 | # and does not require explicit handling of the string buffer 38 | strInfoLog = glGetShaderInfoLog(shader) 39 | strShaderType = "" 40 | if shaderType is GL_VERTEX_SHADER: 41 | strShaderType = "vertex" 42 | elif shaderType is GL_GEOMETRY_SHADER: 43 | strShaderType = "geometry" 44 | elif shaderType is GL_FRAGMENT_SHADER: 45 | strShaderType = "fragment" 46 | 47 | print("Compilation failure for " + strShaderType + " shader:\n" + str(strInfoLog)) 48 | 49 | return shader 50 | 51 | 52 | # Function that accepts a list of shaders, compiles them, and returns a handle to the compiled program 53 | def createProgram(shaderList): 54 | program = glCreateProgram() 55 | 56 | for shader in shaderList: 57 | glAttachShader(program, shader) 58 | 59 | glLinkProgram(program) 60 | 61 | status = glGetProgramiv(program, GL_LINK_STATUS) 62 | if status == GL_FALSE: 63 | # Note that getting the error log is much simpler in Python than in C/C++ 64 | # and does not require explicit handling of the string buffer 65 | strInfoLog = glGetProgramInfoLog(program) 66 | print("Linker failure: \n" + str(strInfoLog)) 67 | 68 | for shader in shaderList: 69 | glDetachShader(program, shader) 70 | 71 | return program 72 | 73 | 74 | # Helper function to locate and open the target file (passed in as a string). 75 | # Returns the full path to the file as a string. 76 | def findFileOrThrow(strBasename): 77 | # Keep constant names in C-style convention, for readability 78 | # when comparing to C(/C++) code. 79 | if os.path.isfile(strBasename): 80 | return strBasename 81 | 82 | LOCAL_FILE_DIR = "." + os.sep 83 | GLOBAL_FILE_DIR = os.path.dirname(os.path.abspath(__file__)) + os.sep 84 | 85 | strFilename = LOCAL_FILE_DIR + strBasename 86 | if os.path.isfile(strFilename): 87 | return strFilename 88 | 89 | strFilename = GLOBAL_FILE_DIR + strBasename 90 | if os.path.isfile(strFilename): 91 | return strFilename 92 | 93 | raise IOError('Could not find target file ' + strBasename) 94 | -------------------------------------------------------------------------------- /tools/render/glm.py: -------------------------------------------------------------------------------- 1 | ''' 2 | MIT License 3 | 4 | Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | ''' 24 | import numpy as np 25 | 26 | 27 | def vec3(x, y, z): 28 | return np.array([x, y, z], dtype=np.float32) 29 | 30 | 31 | def radians(v): 32 | return np.radians(v) 33 | 34 | 35 | def identity(): 36 | return np.identity(4, dtype=np.float32) 37 | 38 | 39 | def empty(): 40 | return np.zeros([4, 4], dtype=np.float32) 41 | 42 | 43 | def magnitude(v): 44 | return np.linalg.norm(v) 45 | 46 | 47 | def normalize(v): 48 | m = magnitude(v) 49 | return v if m == 0 else v / m 50 | 51 | 52 | def dot(u, v): 53 | return np.sum(u * v) 54 | 55 | 56 | def cross(u, v): 57 | res = vec3(0, 0, 0) 58 | res[0] = u[1] * v[2] - u[2] * v[1] 59 | res[1] = u[2] * v[0] - u[0] * v[2] 60 | res[2] = u[0] * v[1] - u[1] * v[0] 61 | return res 62 | 63 | 64 | # below functions can be optimized 65 | 66 | def translate(m, v): 67 | res = np.copy(m) 68 | res[:, 3] = m[:, 0] * v[0] + m[:, 1] * v[1] + m[:, 2] * v[2] + m[:, 3] 69 | return res 70 | 71 | 72 | def rotate(m, angle, v): 73 | a = angle 74 | c = np.cos(a) 75 | s = np.sin(a) 76 | 77 | axis = normalize(v) 78 | temp = (1 - c) * axis 79 | 80 | rot = empty() 81 | rot[0][0] = c + temp[0] * axis[0] 82 | rot[0][1] = temp[0] * axis[1] + s * axis[2] 83 | rot[0][2] = temp[0] * axis[2] - s * axis[1] 84 | 85 | rot[1][0] = temp[1] * axis[0] - s * axis[2] 86 | rot[1][1] = c + temp[1] * axis[1] 87 | rot[1][2] = temp[1] * axis[2] + s * axis[0] 88 | 89 | rot[2][0] = temp[2] * axis[0] + s * axis[1] 90 | rot[2][1] = temp[2] * axis[1] - s * axis[0] 91 | rot[2][2] = c + temp[2] * axis[2] 92 | 93 | res = empty() 94 | res[:, 0] = m[:, 0] * rot[0][0] + m[:, 1] * rot[0][1] + m[:, 2] * rot[0][2] 95 | res[:, 1] = m[:, 0] * rot[1][0] + m[:, 1] * rot[1][1] + m[:, 2] * rot[1][2] 96 | res[:, 2] = m[:, 0] * rot[2][0] + m[:, 1] * rot[2][1] + m[:, 2] * rot[2][2] 97 | res[:, 3] = m[:, 3] 98 | return res 99 | 100 | 101 | def perspective(fovy, aspect, zNear, zFar): 102 | tanHalfFovy = np.tan(fovy / 2) 103 | 104 | res = empty() 105 | res[0][0] = 1 / (aspect * tanHalfFovy) 106 | res[1][1] = 1 / (tanHalfFovy) 107 | res[2][3] = -1 108 | res[2][2] = - (zFar + zNear) / (zFar - zNear) 109 | res[3][2] = -(2 * zFar * zNear) / (zFar - zNear) 110 | 111 | return res.T 112 | 113 | 114 | def ortho(left, right, bottom, top, zNear, zFar): 115 | # res = np.ones([4, 4], dtype=np.float32) 116 | res = identity() 117 | res[0][0] = 2 / (right - left) 118 | res[1][1] = 2 / (top - bottom) 119 | res[2][2] = - 2 / (zFar - zNear) 120 | res[3][0] = - (right + left) / (right - left) 121 | res[3][1] = - (top + bottom) / (top - bottom) 122 | res[3][2] = - (zFar + zNear) / (zFar - zNear) 123 | return res.T 124 | 125 | 126 | def lookat(eye, center, up): 127 | f = normalize(center - eye) 128 | s = normalize(cross(f, up)) 129 | u = cross(s, f) 130 | 131 | res = identity() 132 | res[0][0] = s[0] 133 | res[1][0] = s[1] 134 | res[2][0] = s[2] 135 | res[0][1] = u[0] 136 | res[1][1] = u[1] 137 | res[2][1] = u[2] 138 | res[0][2] = -f[0] 139 | res[1][2] = -f[1] 140 | res[2][2] = -f[2] 141 | res[3][0] = -dot(s, eye) 142 | res[3][1] = -dot(u, eye) 143 | res[3][2] = -dot(f, eye) 144 | return res.T 145 | 146 | 147 | def transform(d, m): 148 | return np.dot(m, d.T).T 149 | -------------------------------------------------------------------------------- /tools/render/quad.fs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | out vec4 FragColor; 4 | 5 | in vec2 TexCoord; 6 | 7 | uniform sampler2D screenTexture; 8 | 9 | void main() 10 | { 11 | FragColor = texture(screenTexture, TexCoord); 12 | } 13 | -------------------------------------------------------------------------------- /tools/render/quad.vs: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | layout (location = 0) in vec2 aPos; 4 | layout (location = 1) in vec2 aTexCoord; 5 | 6 | out vec2 TexCoord; 7 | 8 | void main() 9 | { 10 | gl_Position = vec4(aPos.x, aPos.y, 0.0, 1.0); 11 | TexCoord = aTexCoord; 12 | } 13 | -------------------------------------------------------------------------------- /tools/snapshot_smpl/vendor/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | -------------------------------------------------------------------------------- /tools/snapshot_smpl/vendor/smpl/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license 5 | 6 | More information about SMPL is available here http://smpl.is.tue.mpg. 7 | For comments or questions, please email us at: smpl@tuebingen.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This is an initialization file to help python look for submodules in this directory. 13 | ''' -------------------------------------------------------------------------------- /tools/snapshot_smpl/vendor/smpl/lbs.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license 5 | 6 | More information about SMPL is available here http://smpl.is.tue.mpg. 7 | For comments or questions, please email us at: smpl@tuebingen.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines linear blend skinning for the SMPL loader which 13 | defines the effect of bones and blendshapes on the vertices of the template mesh. 14 | 15 | Modules included: 16 | - global_rigid_transformation: 17 | computes global rotation & translation of the model 18 | - verts_core: [overloaded function inherited from verts.verts_core] 19 | computes the blending of joint-influences for each vertex based on type of skinning 20 | 21 | ''' 22 | 23 | from .posemapper import posemap 24 | import chumpy 25 | import numpy as np 26 | 27 | def global_rigid_transformation(pose, J, kintree_table, xp): 28 | results = {} 29 | pose = pose.reshape((-1,3)) 30 | id_to_col = {kintree_table[1,i] : i for i in range(kintree_table.shape[1])} 31 | parent = {i : id_to_col[kintree_table[0,i]] for i in range(1, kintree_table.shape[1])} 32 | 33 | if xp == chumpy: 34 | from posemapper import Rodrigues 35 | rodrigues = lambda x : Rodrigues(x) 36 | else: 37 | import cv2 38 | rodrigues = lambda x : cv2.Rodrigues(x)[0] 39 | 40 | with_zeros = lambda x : xp.vstack((x, xp.array([[0.0, 0.0, 0.0, 1.0]]))) 41 | results[0] = with_zeros(xp.hstack((rodrigues(pose[0,:]), J[0,:].reshape((3,1))))) 42 | 43 | for i in range(1, kintree_table.shape[1]): 44 | results[i] = results[parent[i]].dot(with_zeros(xp.hstack(( 45 | rodrigues(pose[i,:]), 46 | ((J[i,:] - J[parent[i],:]).reshape((3,1))) 47 | )))) 48 | 49 | pack = lambda x : xp.hstack([np.zeros((4, 3)), x.reshape((4,1))]) 50 | 51 | results = [results[i] for i in sorted(results.keys())] 52 | results_global = results 53 | 54 | if True: 55 | results2 = [results[i] - (pack( 56 | results[i].dot(xp.concatenate( ( (J[i,:]), 0 ) ))) 57 | ) for i in range(len(results))] 58 | results = results2 59 | result = xp.dstack(results) 60 | return result, results_global 61 | 62 | 63 | def verts_core(pose, v, J, weights, kintree_table, want_Jtr=False, xp=chumpy): 64 | A, A_global = global_rigid_transformation(pose, J, kintree_table, xp) 65 | T = A.dot(weights.T) 66 | 67 | rest_shape_h = xp.vstack((v.T, np.ones((1, v.shape[0])))) 68 | 69 | v =(T[:,0,:] * rest_shape_h[0, :].reshape((1, -1)) + 70 | T[:,1,:] * rest_shape_h[1, :].reshape((1, -1)) + 71 | T[:,2,:] * rest_shape_h[2, :].reshape((1, -1)) + 72 | T[:,3,:] * rest_shape_h[3, :].reshape((1, -1))).T 73 | 74 | v = v[:,:3] 75 | 76 | if not want_Jtr: 77 | return v 78 | Jtr = xp.vstack([g[:3,3] for g in A_global]) 79 | return (v, Jtr) 80 | 81 | -------------------------------------------------------------------------------- /tools/snapshot_smpl/vendor/smpl/posemapper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license 5 | 6 | More information about SMPL is available here http://smpl.is.tue.mpg. 7 | For comments or questions, please email us at: smpl@tuebingen.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This module defines the mapping of joint-angles to pose-blendshapes. 13 | 14 | Modules included: 15 | - posemap: 16 | computes the joint-to-pose blend shape mapping given a mapping type as input 17 | 18 | ''' 19 | 20 | import chumpy as ch 21 | import numpy as np 22 | import cv2 23 | 24 | 25 | class Rodrigues(ch.Ch): 26 | dterms = 'rt' 27 | 28 | def compute_r(self): 29 | return cv2.Rodrigues(self.rt.r)[0] 30 | 31 | def compute_dr_wrt(self, wrt): 32 | if wrt is self.rt: 33 | return cv2.Rodrigues(self.rt.r)[1].T 34 | 35 | 36 | def lrotmin(p): 37 | if isinstance(p, np.ndarray): 38 | p = p.ravel()[3:] 39 | return np.concatenate([(cv2.Rodrigues(np.array(pp))[0]-np.eye(3)).ravel() for pp in p.reshape((-1,3))]).ravel() 40 | if p.ndim != 2 or p.shape[1] != 3: 41 | p = p.reshape((-1,3)) 42 | p = p[1:] 43 | return ch.concatenate([(Rodrigues(pp)-ch.eye(3)).ravel() for pp in p]).ravel() 44 | 45 | def posemap(s): 46 | if s == 'lrotmin': 47 | return lrotmin 48 | else: 49 | raise Exception('Unknown posemapping: %s' % (str(s),)) 50 | -------------------------------------------------------------------------------- /tools/snapshot_smpl/vendor/smpl/verts.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license 5 | 6 | More information about SMPL is available here http://smpl.is.tue.mpg. 7 | For comments or questions, please email us at: smpl@tuebingen.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines the basic skinning modules for the SMPL loader which 13 | defines the effect of bones and blendshapes on the vertices of the template mesh. 14 | 15 | Modules included: 16 | - verts_decorated: 17 | creates an instance of the SMPL model which inherits model attributes from another 18 | SMPL model. 19 | - verts_core: [overloaded function inherited by lbs.verts_core] 20 | computes the blending of joint-influences for each vertex based on type of skinning 21 | 22 | ''' 23 | 24 | import chumpy 25 | from . import lbs 26 | from .posemapper import posemap 27 | import scipy.sparse as sp 28 | from chumpy.ch import MatVecMult 29 | 30 | def ischumpy(x): return hasattr(x, 'dterms') 31 | 32 | def verts_decorated(trans, pose, 33 | v_template, J, weights, kintree_table, bs_style, f, 34 | bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False): 35 | 36 | for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]: 37 | if which is not None: 38 | assert ischumpy(which) 39 | 40 | v = v_template 41 | 42 | if shapedirs is not None: 43 | if betas is None: 44 | betas = chumpy.zeros(shapedirs.shape[-1]) 45 | v_shaped = v + shapedirs.dot(betas) 46 | else: 47 | v_shaped = v 48 | 49 | if posedirs is not None: 50 | v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose)) 51 | else: 52 | v_posed = v_shaped 53 | 54 | v = v_posed 55 | 56 | if sp.issparse(J): 57 | regressor = J 58 | J_tmpx = MatVecMult(regressor, v_shaped[:,0]) 59 | J_tmpy = MatVecMult(regressor, v_shaped[:,1]) 60 | J_tmpz = MatVecMult(regressor, v_shaped[:,2]) 61 | J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T 62 | else: 63 | assert(ischumpy(J)) 64 | 65 | assert(bs_style=='lbs') 66 | result, Jtr = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy) 67 | 68 | tr = trans.reshape((1,3)) 69 | result = result + tr 70 | Jtr = Jtr + tr 71 | 72 | result.trans = trans 73 | result.f = f 74 | result.pose = pose 75 | result.v_template = v_template 76 | result.J = J 77 | result.weights = weights 78 | result.kintree_table = kintree_table 79 | result.bs_style = bs_style 80 | result.bs_type =bs_type 81 | if posedirs is not None: 82 | result.posedirs = posedirs 83 | result.v_posed = v_posed 84 | if shapedirs is not None: 85 | result.shapedirs = shapedirs 86 | result.betas = betas 87 | result.v_shaped = v_shaped 88 | if want_Jtr: 89 | result.J_transformed = Jtr 90 | return result 91 | 92 | def verts_core(pose, v, J, weights, kintree_table, bs_style, want_Jtr=False, xp=chumpy): 93 | 94 | if xp == chumpy: 95 | assert(hasattr(pose, 'dterms')) 96 | assert(hasattr(v, 'dterms')) 97 | assert(hasattr(J, 'dterms')) 98 | assert(hasattr(weights, 'dterms')) 99 | 100 | assert(bs_style=='lbs') 101 | result = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr, xp) 102 | 103 | return result 104 | -------------------------------------------------------------------------------- /tools/vis_snapshot.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import os 3 | import h5py 4 | import numpy as np 5 | import open3d as o3d 6 | from snapshot_smpl.renderer import Renderer 7 | import cv2 8 | import tqdm 9 | 10 | 11 | def read_pickle(pkl_path): 12 | with open(pkl_path, 'rb') as f: 13 | u = pickle._Unpickler(f) 14 | u.encoding = 'latin1' 15 | return u.load() 16 | 17 | 18 | def get_KRTD(camera): 19 | K = np.zeros([3, 3]) 20 | K[0, 0] = camera['camera_f'][0] 21 | K[1, 1] = camera['camera_f'][1] 22 | K[:2, 2] = camera['camera_c'] 23 | K[2, 2] = 1 24 | R = np.eye(3) 25 | T = np.zeros([3]) 26 | D = camera['camera_k'] 27 | return K, R, T, D 28 | 29 | 30 | def get_o3d_mesh(vertices, faces): 31 | mesh = o3d.geometry.TriangleMesh() 32 | mesh.vertices = o3d.utility.Vector3dVector(vertices) 33 | mesh.triangles = o3d.utility.Vector3iVector(faces) 34 | mesh.compute_vertex_normals() 35 | return mesh 36 | 37 | 38 | def get_smpl(base_smpl, betas, poses, trans): 39 | base_smpl.betas = betas 40 | base_smpl.pose = poses 41 | base_smpl.trans = trans 42 | vertices = np.array(base_smpl) 43 | 44 | faces = base_smpl.f 45 | mesh = get_o3d_mesh(vertices, faces) 46 | 47 | return mesh 48 | 49 | 50 | def render_smpl(vertices, img, K, R, T): 51 | rendered_img = renderer.render_multiview(vertices, K[None], R[None], 52 | T[None, None], [img])[0] 53 | return rendered_img 54 | 55 | 56 | data_root = 'data/people_snapshot' 57 | video = 'female-3-casual' 58 | 59 | # if you do not have these smpl models, you could download them from https://drive.google.com/file/d/1HCVcZPu7UOe1Vv4OHHEoGmVUfKyLFq5d/view?usp=sharing 60 | model_paths = [ 61 | 'basicModel_f_lbs_10_207_0_v1.0.0.pkl', 62 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl' 63 | ] 64 | 65 | camera_path = os.path.join(data_root, video, 'camera.pkl') 66 | camera = read_pickle(camera_path) 67 | K, R, T, D = get_KRTD(camera) 68 | 69 | mask_path = os.path.join(data_root, video, 'masks.hdf5') 70 | masks = h5py.File(mask_path)['masks'] 71 | 72 | smpl_path = os.path.join(data_root, video, 'reconstructed_poses.hdf5') 73 | smpl = h5py.File(smpl_path) 74 | betas = smpl['betas'] 75 | pose = smpl['pose'] 76 | trans = smpl['trans'] 77 | 78 | if 'female' in video: 79 | model_path = model_paths[0] 80 | else: 81 | model_path = model_paths[1] 82 | model_data = read_pickle(model_path) 83 | faces = model_data['f'] 84 | renderer = Renderer(height=1080, width=1080, faces=faces) 85 | 86 | img_dir = os.path.join(data_root, video, 'image') 87 | vertices_dir = os.path.join(data_root, video, 'vertices') 88 | 89 | num_img = len(os.listdir(img_dir)) 90 | for i in tqdm.tqdm(range(num_img)): 91 | img = cv2.imread(os.path.join(img_dir, '{}.jpg'.format(i))) 92 | img = cv2.undistort(img, K, D) 93 | vertices = np.load(os.path.join(vertices_dir, '{}.npy'.format(i))) 94 | rendered_img = render_smpl(vertices, img, K, R, T) 95 | cv2.imshow('main', rendered_img) 96 | cv2.waitKey(50) & 0xFF 97 | -------------------------------------------------------------------------------- /train.sh: -------------------------------------------------------------------------------- 1 | # People-Snapshot dataset 2 | 3 | # training 4 | # python train_net.py --cfg_file configs/snapshot_f3c.yaml exp_name female3c resume False 5 | # python train_net.py --cfg_file configs/snapshot_f4c.yaml exp_name female4c resume False 6 | # python train_net.py --cfg_file configs/snapshot_f6p.yaml exp_name female6p resume False 7 | # python train_net.py --cfg_file configs/snapshot_f7p.yaml exp_name female7p resume False 8 | # python train_net.py --cfg_file configs/snapshot_f8p.yaml exp_name female8p resume False 9 | # python train_net.py --cfg_file configs/snapshot_m2c.yaml exp_name male2c resume False 10 | # python train_net.py --cfg_file configs/snapshot_m2o.yaml exp_name male2o resume False 11 | # python train_net.py --cfg_file configs/snapshot_m3c.yaml exp_name male3c resume False 12 | # python train_net.py --cfg_file configs/snapshot_m5o.yaml exp_name male5o resume False 13 | 14 | # ZJU-Mocap dataset 15 | 16 | # training 17 | # python train_net.py --cfg_file configs/latent_xyzc_313.yaml exp_name xyzc_313 resume False 18 | # python train_net.py --cfg_file configs/latent_xyzc_315.yaml exp_name xyzc_315 resume False 19 | # python train_net.py --cfg_file configs/latent_xyzc_392.yaml exp_name xyzc_392 resume False 20 | # python train_net.py --cfg_file configs/latent_xyzc_393.yaml exp_name xyzc_393 resume False 21 | # python train_net.py --cfg_file configs/latent_xyzc_394.yaml exp_name xyzc_394 resume False 22 | # python train_net.py --cfg_file configs/latent_xyzc_377.yaml exp_name xyzc_377 resume False 23 | # python train_net.py --cfg_file configs/latent_xyzc_386.yaml exp_name xyzc_386 resume False 24 | # python train_net.py --cfg_file configs/latent_xyzc_390.yaml exp_name xyzc_390 resume False 25 | # python train_net.py --cfg_file configs/latent_xyzc_387.yaml exp_name xyzc_387 resume False 26 | 27 | # distributed training 28 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_313.yaml exp_name xyzc_313 resume False gpus "0, 1, 2, 3" distributed True 29 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_315.yaml exp_name xyzc_315 resume False gpus "0, 1, 2, 3" distributed True 30 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_392.yaml exp_name xyzc_392 resume False gpus "0, 1, 2, 3" distributed True 31 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_393.yaml exp_name xyzc_393 resume False gpus "0, 1, 2, 3" distributed True 32 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_394.yaml exp_name xyzc_394 resume False gpus "0, 1, 2, 3" distributed True 33 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_377.yaml exp_name xyzc_377 resume False gpus "0, 1, 2, 3" distributed True 34 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_386.yaml exp_name xyzc_386 resume False gpus "0, 1, 2, 3" distributed True 35 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_390.yaml exp_name xyzc_390 resume False gpus "0, 1, 2, 3" distributed True 36 | # python -m torch.distributed.launch --nproc_per_node=4 train_net.py --cfg_file configs/latent_xyzc_387.yaml exp_name xyzc_387 resume False gpus "0, 1, 2, 3" distributed True 37 | -------------------------------------------------------------------------------- /train_net.py: -------------------------------------------------------------------------------- 1 | from lib.config import cfg, args 2 | from lib.networks import make_network 3 | from lib.train import make_trainer, make_optimizer, make_lr_scheduler, make_recorder, set_lr_scheduler 4 | from lib.datasets import make_data_loader 5 | from lib.utils.net_utils import load_model, save_model, load_network 6 | from lib.evaluators import make_evaluator 7 | import torch.multiprocessing 8 | import torch 9 | import torch.distributed as dist 10 | import os 11 | 12 | if cfg.fix_random: 13 | torch.manual_seed(0) 14 | torch.backends.cudnn.deterministic = True 15 | torch.backends.cudnn.benchmark = False 16 | 17 | 18 | def train(cfg, network): 19 | trainer = make_trainer(cfg, network) 20 | optimizer = make_optimizer(cfg, network) 21 | scheduler = make_lr_scheduler(cfg, optimizer) 22 | recorder = make_recorder(cfg) 23 | evaluator = make_evaluator(cfg) 24 | 25 | begin_epoch = load_model(network, 26 | optimizer, 27 | scheduler, 28 | recorder, 29 | cfg.trained_model_dir, 30 | resume=cfg.resume) 31 | set_lr_scheduler(cfg, scheduler) 32 | 33 | train_loader = make_data_loader(cfg, 34 | is_train=True, 35 | is_distributed=cfg.distributed, 36 | max_iter=cfg.ep_iter) 37 | val_loader = make_data_loader(cfg, is_train=False) 38 | 39 | for epoch in range(begin_epoch, cfg.train.epoch): 40 | recorder.epoch = epoch 41 | if cfg.distributed: 42 | train_loader.batch_sampler.sampler.set_epoch(epoch) 43 | 44 | trainer.train(epoch, train_loader, optimizer, recorder) 45 | scheduler.step() 46 | 47 | if (epoch + 1) % cfg.save_ep == 0 and cfg.local_rank == 0: 48 | save_model(network, optimizer, scheduler, recorder, 49 | cfg.trained_model_dir, epoch) 50 | 51 | if (epoch + 1) % cfg.save_latest_ep == 0 and cfg.local_rank == 0: 52 | save_model(network, 53 | optimizer, 54 | scheduler, 55 | recorder, 56 | cfg.trained_model_dir, 57 | epoch, 58 | last=True) 59 | 60 | if (epoch + 1) % cfg.eval_ep == 0: 61 | trainer.val(epoch, val_loader, evaluator, recorder) 62 | 63 | return network 64 | 65 | 66 | def test(cfg, network): 67 | trainer = make_trainer(cfg, network) 68 | val_loader = make_data_loader(cfg, is_train=False) 69 | evaluator = make_evaluator(cfg) 70 | epoch = load_network(network, 71 | cfg.trained_model_dir, 72 | resume=cfg.resume, 73 | epoch=cfg.test.epoch) 74 | trainer.val(epoch, val_loader, evaluator) 75 | 76 | 77 | def synchronize(): 78 | """ 79 | Helper function to synchronize (barrier) among all processes when 80 | using distributed training 81 | """ 82 | if not dist.is_available(): 83 | return 84 | if not dist.is_initialized(): 85 | return 86 | world_size = dist.get_world_size() 87 | if world_size == 1: 88 | return 89 | dist.barrier() 90 | 91 | 92 | def main(): 93 | if cfg.distributed: 94 | cfg.local_rank = int(os.environ['RANK']) % torch.cuda.device_count() 95 | torch.cuda.set_device(cfg.local_rank) 96 | torch.distributed.init_process_group(backend="nccl", 97 | init_method="env://") 98 | synchronize() 99 | 100 | network = make_network(cfg) 101 | if args.test: 102 | test(cfg, network) 103 | else: 104 | train(cfg, network) 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /zju_smpl/cfg_model.yml: -------------------------------------------------------------------------------- 1 | module: easymocap.bodymodel.smpl.SMPLModel 2 | args: 3 | model_path: data/bodymodels/SMPL_python_v.1.1.0/smpl/models/basicmodel_neutral_lbs_10_207_0_v1.1.0.pkl 4 | device: cuda 5 | regressor_path: data/smplx/J_regressor_body25.npy 6 | NUM_SHAPES: 10 7 | use_pose_blending: True 8 | -------------------------------------------------------------------------------- /zju_smpl/easymocap_to_neuralbody.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import os.path as osp 5 | import sys 6 | import glob 7 | import numpy as np 8 | import tqdm 9 | import cv2 10 | 11 | 12 | def visualize_o3d_pts(pts): 13 | import open3d as o3d 14 | pts = pts.reshape(-1, 3) 15 | pcd = o3d.geometry.PointCloud() 16 | pcd.points = o3d.utility.Vector3dVector(pts) 17 | o3d.visualization.draw_geometries([pcd]) 18 | 19 | 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--input_dir', default='my_313', type=str) 22 | parser.add_argument('--type', default='annots', type=str) 23 | args = parser.parse_args() 24 | 25 | def get_cams(): 26 | intri = cv2.FileStorage('intri.yml', cv2.FILE_STORAGE_READ) 27 | extri = cv2.FileStorage('extri.yml', cv2.FILE_STORAGE_READ) 28 | cams = {'K': [], 'D': [], 'R': [], 'T': []} 29 | for i in num_cams: 30 | cams['K'].append(intri.getNode('K_{}'.format(i)).mat()) 31 | cams['D'].append( 32 | intri.getNode('dist_{}'.format(i)).mat().T) 33 | cams['R'].append(extri.getNode('Rot_{}'.format(i)).mat()) 34 | cams['T'].append(extri.getNode('T_{}'.format(i)).mat() * 1000) 35 | return cams 36 | 37 | 38 | def get_img_paths(): 39 | all_ims = [] 40 | for i in num_cams: 41 | data_root = 'images/{}'.format(i) 42 | ims = glob.glob(os.path.join(data_root, '*.jpg')) 43 | ims = sorted(ims) 44 | ims = np.array(ims) 45 | all_ims.append(ims) 46 | num_img = min([len(ims) for ims in all_ims]) 47 | all_ims = [ims[:num_img] for ims in all_ims] 48 | all_ims = np.stack(all_ims, axis=1) 49 | return all_ims 50 | 51 | 52 | def gen_params_vertices(filename, param_in, param_out, vert_out): 53 | param_in_full = osp.join(param_in, filename) 54 | root = int(osp.splitext(filename)[0]) 55 | 56 | params = json.load(open(param_in_full))['annots'][0] 57 | poses = np.array(params['poses']) 58 | Rh = np.array(params['Rh']) 59 | Th = np.array(params['Th']) 60 | shapes = np.array(params['shapes']) 61 | 62 | # the params of neural body 63 | params = {'poses': poses, 'Rh': Rh, 'Th': Th, 'shapes': shapes} 64 | # np.save('params_0.npy', params) 65 | np.save(osp.join(param_out, "{}.npy".format(root)), params) 66 | 67 | ori_poses = np.zeros((1, bodymodel.NUM_POSES_FULL)) 68 | ori_poses[..., 3:] = poses 69 | vertices = bodymodel(poses=ori_poses, shapes=shapes, Rh=Rh, Th=Th)[0].detach().cpu().numpy() 70 | np.save(osp.join(vert_out, "{}.npy".format(root)), vertices) 71 | 72 | 73 | if args.type == 'annots': 74 | os.chdir(args.input_dir) 75 | num_cams = os.listdir('images') 76 | num_cams = sorted(num_cams) 77 | 78 | cams = get_cams() 79 | img_paths = get_img_paths() 80 | 81 | annot = {} 82 | annot['cams'] = cams 83 | 84 | ims = [] 85 | for img_path in img_paths: 86 | data = {} 87 | data['ims'] = img_path.tolist() 88 | ims.append(data) 89 | annot['ims'] = ims 90 | 91 | np.save('annots.npy', annot) 92 | else: 93 | param_in = os.path.join(args.input_dir, 'output-smpl-3d/smpl/') 94 | param_out = osp.join(args.input_dir, 'params') 95 | vert_out = osp.join(args.input_dir, 'vertices') 96 | cfg_path = 'cfg_model.yml' 97 | 98 | os.system(f"mkdir -p {vert_out}") 99 | 100 | from easymocap.config.baseconfig import load_object, Config 101 | from easymocap.bodymodel.smplx import SMPLHModel, SMPLModel 102 | # load smpl model (maybe copy to gpu) 103 | cfg_model = Config.load(cfg_path) 104 | # cfg_model.args.model_path = cfg_model.args.model_path.replace('neutral', args.gender) 105 | cfg_model.module = cfg_model.module.replace('SMPLHModelEmbedding', 'SMPLHModel') 106 | # cfg_model.args.device = 'cpu' 107 | bodymodel: SMPLModel = load_object(cfg_model.module, cfg_model.args) 108 | 109 | for filename in tqdm.tqdm(sorted(os.listdir(param_in))): 110 | gen_params_vertices(filename, param_in, param_out, vert_out) 111 | 112 | # generate annots.npy 113 | # python easymocap_to_neuralbody.py --input_dir {data_dir} --type annots 114 | # generate params and vertices 115 | # python easymocap_to_neuralbody.py --input_dir {data_dir} --type vertices 116 | -------------------------------------------------------------------------------- /zju_smpl/example.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": 0, 4 | "Rh": [[-1.210, -0.104, 0.541]], 5 | "Th": [[0.267, 0.188, -0.861]], 6 | "poses": [[0.000, 0.000, 0.000, -1.382, -0.390, 0.088, 0.319, -0.340, -0.549, -0.000, 0.014, -0.000, 0.626, -0.182, -0.036, 0.238, -0.388, 0.520, 0.000, 0.121, 0.000, 0.128, -0.360, -0.035, -0.268, -0.207, -0.098, 0.000, 0.033, -0.000, 0.000, 0.001, -0.000, -0.000, -0.000, -0.000, 0.595, -0.294, 0.183, 0.000, -0.000, -0.000, -0.000, 0.013, 0.000, -0.309, -0.380, 0.262, 0.020, 0.171, 0.027, -0.029, 1.803, 0.233, 0.020, 0.209, 0.252, -0.048, 0.737, -0.012, -0.000, 0.000, 0.000, -0.000, 0.000, -0.000, 0.000, 0.000, -0.000, 0.000, -0.000, -0.000]], 7 | "shapes": [[-0.940, -0.223, -0.009, 0.196, 0.039, 0.018, -0.005, -0.011, -0.001, -0.004]] 8 | } 9 | ] 10 | -------------------------------------------------------------------------------- /zju_smpl/extract_vertices.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | import numpy as np 5 | import torch 6 | sys.path.append("../") 7 | from smplmodel.body_model import SMPLlayer 8 | 9 | smpl_dir = 'data/zju_mocap/CoreView_313/params' 10 | verts_dir = 'data/zju_mocap/CoreView_313/vertices' 11 | 12 | # Previously, EasyMocap estimated SMPL parameters without pose blend shapes. 13 | # The newly fitted SMPL parameters consider pose blend shapes. 14 | new_params = False 15 | if 'new' in os.path.basename(smpl_dir): 16 | new_params = True 17 | 18 | smpl_path = os.path.join(smpl_dir, "1.npy") 19 | verts_path = os.path.join(verts_dir, "1.npy") 20 | 21 | ## load precomputed vertices 22 | verts_load = np.load(verts_path) 23 | 24 | ## create smpl model 25 | model_folder = 'data/zju_mocap/smplx' 26 | device = torch.device('cpu') 27 | body_model = SMPLlayer(os.path.join(model_folder, 'smpl'), 28 | gender='neutral', 29 | device=device, 30 | regressor_path=os.path.join(model_folder, 31 | 'J_regressor_body25.npy')) 32 | body_model.to(device) 33 | 34 | ## load SMPL zju 35 | params = np.load(smpl_path, allow_pickle=True).item() 36 | 37 | vertices = body_model(return_verts=True, 38 | return_tensor=False, 39 | new_params=new_params, 40 | **params) 41 | --------------------------------------------------------------------------------