├── phc
├── phc
│ ├── __init__.py
│ ├── env
│ │ ├── __init__.py
│ │ └── tasks
│ │ │ ├── humanoid_im_mcp_getup.py
│ │ │ ├── __init__.py
│ │ │ ├── vec_task_wrappers.py
│ │ │ ├── humanoid_im_mcp.py
│ │ │ ├── humanoid_amp_task.py
│ │ │ └── vec_task.py
│ ├── learning
│ │ ├── __init__.py
│ │ ├── unrealego
│ │ │ ├── __init__.py
│ │ │ ├── models.py
│ │ │ ├── base_model.py
│ │ │ ├── unrealego_heatmap_shared_model.py
│ │ │ └── unrealego_autoencoder_model.py
│ │ ├── __pycache__
│ │ │ ├── pnn.cpython-38.pyc
│ │ │ ├── __init__.cpython-38.pyc
│ │ │ ├── vq_quantizer.cpython-38.pyc
│ │ │ ├── loss_functions.cpython-38.pyc
│ │ │ ├── network_builder.cpython-38.pyc
│ │ │ └── network_loader.cpython-38.pyc
│ │ ├── loss_functions.py
│ │ ├── ar_prior.py
│ │ ├── running_norm.py
│ │ ├── replay_buffer.py
│ │ ├── amp_network_mcp_builder.py
│ │ ├── amp_network_pnn_builder.py
│ │ ├── amp_datasets.py
│ │ ├── amp_models.py
│ │ ├── pnn.py
│ │ └── vq_quantizer.py
│ ├── __pycache__
│ │ └── __init__.cpython-38.pyc
│ ├── utils
│ │ ├── __pycache__
│ │ │ ├── flags.cpython-38.pyc
│ │ │ ├── __init__.cpython-38.pyc
│ │ │ ├── motion_lib_g1.cpython-38.pyc
│ │ │ ├── motion_lib_h1.cpython-38.pyc
│ │ │ ├── torch_utils.cpython-38.pyc
│ │ │ ├── motion_lib_base.cpython-38.pyc
│ │ │ ├── pytorch3d_transforms.cpython-38.pyc
│ │ │ ├── rotation_conversions.cpython-38.pyc
│ │ │ ├── torch_g1_humanoid_batch.cpython-38.pyc
│ │ │ └── torch_h1_humanoid_batch.cpython-38.pyc
│ │ ├── test_motion_file.py
│ │ ├── flags.py
│ │ ├── o3d_utils.py
│ │ ├── __init__.py
│ │ ├── benchmarking.py
│ │ ├── draw_utils.py
│ │ ├── parse_task.py
│ │ ├── logger.py
│ │ ├── plot_script.py
│ │ └── running_mean_std.py
│ ├── smpllib
│ │ └── __pycache__
│ │ │ ├── smpl_eval.cpython-38.pyc
│ │ │ └── smpl_parser.cpython-38.pyc
│ └── data
│ │ ├── assets
│ │ ├── mesh
│ │ │ └── smpl
│ │ │ │ └── 1c00fde5-abea-4340-b528-921965f3a020
│ │ │ │ └── geom
│ │ │ │ ├── Chest.stl
│ │ │ │ ├── Head.stl
│ │ │ │ ├── L_Hip.stl
│ │ │ │ ├── L_Toe.stl
│ │ │ │ ├── Neck.stl
│ │ │ │ ├── R_Hip.stl
│ │ │ │ ├── R_Toe.stl
│ │ │ │ ├── Spine.stl
│ │ │ │ ├── Torso.stl
│ │ │ │ ├── L_Ankle.stl
│ │ │ │ ├── L_Elbow.stl
│ │ │ │ ├── L_Hand.stl
│ │ │ │ ├── L_Knee.stl
│ │ │ │ ├── L_Thorax.stl
│ │ │ │ ├── L_Wrist.stl
│ │ │ │ ├── Pelvis.stl
│ │ │ │ ├── R_Ankle.stl
│ │ │ │ ├── R_Elbow.stl
│ │ │ │ ├── R_Hand.stl
│ │ │ │ ├── R_Knee.stl
│ │ │ │ ├── R_Thorax.stl
│ │ │ │ ├── R_Wrist.stl
│ │ │ │ ├── L_Shoulder.stl
│ │ │ │ └── R_Shoulder.stl
│ │ └── mjcf
│ │ │ ├── ball_medium.urdf
│ │ │ ├── location_marker.urdf
│ │ │ ├── traj_marker.urdf
│ │ │ ├── block_projectile.urdf
│ │ │ ├── heading_marker.urdf
│ │ │ ├── block_projectile_large.urdf
│ │ │ ├── capsule.urdf
│ │ │ └── humanoid_template_local.xml
│ │ └── cfg
│ │ ├── learning
│ │ ├── im.yaml
│ │ ├── im_pnn.yaml
│ │ ├── im_mcp.yaml
│ │ ├── task_amp_big.yaml
│ │ └── task_no_amp_big.yaml
│ │ └── env
│ │ ├── phc_prim_iccv.yaml
│ │ ├── h1_im_4.yaml
│ │ ├── h1_im_1.yaml
│ │ ├── h1_im_2.yaml
│ │ ├── h1_im_3.yaml
│ │ ├── phc_shape_pnn_train_iccv.yaml
│ │ ├── phc_kp_pnn_iccv.yaml
│ │ ├── phc_shape_pnn_iccv.yaml
│ │ ├── phc_kp_mcp_iccv.yaml
│ │ └── phc_shape_mcp_iccv.yaml
├── phc.egg-info
│ ├── top_level.txt
│ ├── dependency_links.txt
│ ├── PKG-INFO
│ └── SOURCES.txt
└── setup.py
├── README.md
├── scripts
├── mjcf_to_urdf.py
├── quest_camera.py
├── mdm_test.py
├── ws_client.py
├── pmcp
│ └── forward_pmcp.py
├── data_process
│ ├── grad_fit_h1_shape.py
│ ├── process_amass_raw.py
│ ├── convert_data_smpl.py
│ ├── grad_fit_g1_shape.py
│ ├── convert_amass_isaac.py
│ └── convert_data_mdm.py
├── render_smpl_o3d.py
└── .ipynb_checkpoints
│ └── render_smpl_o3d-checkpoint.py
└── LICENSE
/phc/phc/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/phc/phc/env/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/phc/phc/learning/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/phc/phc.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | phc
2 |
--------------------------------------------------------------------------------
/phc/phc/learning/unrealego/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/phc/phc.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/phc/phc/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/flags.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/flags.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/learning/__pycache__/pnn.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/learning/__pycache__/pnn.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/learning/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/learning/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/smpllib/__pycache__/smpl_eval.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/smpllib/__pycache__/smpl_eval.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/smpllib/__pycache__/smpl_parser.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/smpllib/__pycache__/smpl_parser.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/motion_lib_g1.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/motion_lib_g1.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/motion_lib_h1.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/motion_lib_h1.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/torch_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/torch_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/learning/__pycache__/vq_quantizer.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/learning/__pycache__/vq_quantizer.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/motion_lib_base.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/motion_lib_base.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/learning/__pycache__/loss_functions.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/learning/__pycache__/loss_functions.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/learning/__pycache__/network_builder.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/learning/__pycache__/network_builder.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/learning/__pycache__/network_loader.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/learning/__pycache__/network_loader.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/pytorch3d_transforms.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/pytorch3d_transforms.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/rotation_conversions.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/rotation_conversions.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/torch_g1_humanoid_batch.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/torch_g1_humanoid_batch.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/utils/__pycache__/torch_h1_humanoid_batch.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/utils/__pycache__/torch_h1_humanoid_batch.cpython-38.pyc
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Chest.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Chest.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Head.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Head.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Hip.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Hip.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Toe.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Toe.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Neck.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Neck.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Hip.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Hip.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Toe.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Toe.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Spine.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Spine.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Torso.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Torso.stl
--------------------------------------------------------------------------------
/phc/phc/utils/test_motion_file.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | path = 'legged_gym/resources/motions/h1/stable_punch.pkl'
3 | if osp.isfile(path):
4 | print(f"{path} 是一个文件")
5 | else:
6 | print(f"{path} 不是一个文件")
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Ankle.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Ankle.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Elbow.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Elbow.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Hand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Hand.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Knee.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Knee.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Thorax.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Thorax.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Wrist.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Wrist.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Pelvis.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/Pelvis.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Ankle.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Ankle.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Elbow.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Elbow.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Hand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Hand.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Knee.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Knee.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Thorax.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Thorax.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Wrist.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Wrist.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Shoulder.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/L_Shoulder.stl
--------------------------------------------------------------------------------
/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Shoulder.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HomerIsAFool/G1-retarget/HEAD/phc/phc/data/assets/mesh/smpl/1c00fde5-abea-4340-b528-921965f3a020/geom/R_Shoulder.stl
--------------------------------------------------------------------------------
/phc/phc.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 2.1
2 | Name: phc
3 | Version: 1.0.0
4 | Summary: UNKNOWN
5 | Home-page: UNKNOWN
6 | Author:
7 | Author-email:
8 | License: BSD-3-Clause
9 | Platform: UNKNOWN
10 |
11 | UNKNOWN
12 |
13 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # G1-retarget
2 | Use PHC framework to retarget motion from AMASS dataset into unitree G1 style
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Usage:TBD
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 | Expected result:TBD
21 |
22 |
--------------------------------------------------------------------------------
/phc/phc/learning/loss_functions.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def kl_multi(qm, qv, pm, pv):
4 | """
5 | q: posterior
6 | p: prior
7 |
8 | """
9 | element_wise = 0.5 * (pv - qv + qv.exp() / pv.exp() + (qm - pm).pow(2) / pv.exp() - 1)
10 | kl = element_wise.sum(-1)
11 | return kl
--------------------------------------------------------------------------------
/phc/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages
2 | from distutils.core import setup
3 |
4 | setup(
5 | name='phc',
6 | version='1.0.0',
7 | author='',
8 | license="BSD-3-Clause",
9 | packages=find_packages(),
10 | author_email='',
11 | description='',
12 | # install_requires=[]
13 | )
--------------------------------------------------------------------------------
/phc/phc/utils/flags.py:
--------------------------------------------------------------------------------
1 | __all__ = ['flags', 'summation']
2 |
3 | class Flags(object):
4 | def __init__(self, items):
5 | for key, val in items.items():
6 | setattr(self,key,val)
7 |
8 | flags = Flags({
9 | 'test': False,
10 | 'debug': False,
11 | "real_traj": False,
12 | "im_eval": False,
13 | })
14 |
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/ball_medium.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/location_marker.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/traj_marker.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/block_projectile.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/heading_marker.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/block_projectile_large.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/capsule.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/phc/phc/learning/ar_prior.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | class AR1Prior(nn.Module):
5 | def __init__(self):
6 | super(AR1Prior, self).__init__()
7 | # Initializing phi as a learnable parameter
8 | # self.phi = nn.Parameter(torch.tensor(0.5))
9 | self.phi = 0.95
10 |
11 | def forward(self, series):
12 | # Calculate the likelihood of the series given phi
13 | # Ignoring the first term since it doesn't have a previous term
14 | error = series[1:] - self.phi * series[:-1]
15 | log_likelihood = -0.5 * torch.sum(error**2)
16 | return log_likelihood
17 |
--------------------------------------------------------------------------------
/phc/phc/learning/unrealego/models.py:
--------------------------------------------------------------------------------
1 |
2 | def create_model(opt):
3 | print(opt.model)
4 |
5 | if opt.model == 'egoglass':
6 | from .egoglass_model import EgoGlassModel
7 | model = EgoGlassModel()
8 |
9 | elif opt.model == "unrealego_heatmap_shared":
10 | from .unrealego_heatmap_shared_model import UnrealEgoHeatmapSharedModel
11 | model = UnrealEgoHeatmapSharedModel()
12 |
13 | elif opt.model == 'unrealego_autoencoder':
14 | from .unrealego_autoencoder_model import UnrealEgoAutoEncoderModel
15 | model = UnrealEgoAutoEncoderModel()
16 |
17 | else:
18 | raise ValueError('Model [%s] not recognized.' % opt.model)
19 | model.initialize(opt)
20 | print("model [%s] was created." % (model.name()))
21 | return model
--------------------------------------------------------------------------------
/scripts/mjcf_to_urdf.py:
--------------------------------------------------------------------------------
1 | #rudimentary MuJoCo mjcf to ROS URDF converter using the UrdfEditor
2 |
3 | import pybullet_utils.bullet_client as bc
4 | import pybullet_data as pd
5 |
6 | import pybullet_utils.urdfEditor as ed
7 | import argparse
8 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
9 | parser.add_argument('--mjcf', help='MuJoCo xml file to be converted to URDF', default='mjcf/humanoid.xml')
10 | args = parser.parse_args()
11 |
12 | p = bc.BulletClient()
13 | p.setAdditionalSearchPath(pd.getDataPath())
14 | objs = p.loadMJCF(args.mjcf, flags=p.URDF_USE_IMPLICIT_CYLINDER)
15 |
16 | for o in objs:
17 | #print("o=",o, p.getBodyInfo(o), p.getNumJoints(o))
18 | humanoid = objs[o]
19 | ed0 = ed.UrdfEditor()
20 | ed0.initializeFromBulletBody(humanoid, p._client)
21 | robotName = str(p.getBodyInfo(o)[1],'utf-8')
22 | partName = str(p.getBodyInfo(o)[0], 'utf-8')
23 |
24 | print("robotName=",robotName)
25 | print("partName=",partName)
26 |
27 | saveVisuals=False
28 | ed0.saveUrdf(robotName+"_"+partName+".urdf", saveVisuals)
--------------------------------------------------------------------------------
/phc/phc/utils/o3d_utils.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import imageio
3 |
4 | def pause_func(action):
5 | global paused
6 | paused = not paused
7 | print(f"Paused: {paused}")
8 | return True
9 |
10 |
11 | def reset_func(action):
12 | global reset
13 | reset = not reset
14 | print(f"Reset: {reset}")
15 | return True
16 |
17 |
18 | def record_func(action):
19 | global recording, writer
20 | if not recording:
21 | fps = 30
22 | curr_date_time = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
23 | curr_video_file_name = f"output/renderings/o3d/{curr_date_time}-test.mp4"
24 | writer = imageio.get_writer(curr_video_file_name, fps=fps, macro_block_size=None)
25 | elif not writer is None:
26 | writer.close()
27 | writer = None
28 |
29 | recording = not recording
30 |
31 | print(f"Recording: {recording}")
32 | return True
33 |
34 |
35 | def zoom_func(action):
36 | global control, curr_zoom
37 | curr_zoom = curr_zoom * 0.9
38 | control.set_zoom(curr_zoom)
39 | print(f"Reset: {reset}")
40 | return True
41 |
--------------------------------------------------------------------------------
/phc/phc/env/tasks/humanoid_im_mcp_getup.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | from typing import OrderedDict
4 | import torch
5 | import numpy as np
6 | from phc.utils.torch_utils import quat_to_tan_norm
7 | import phc.env.tasks.humanoid_im_getup as humanoid_im_getup
8 | import phc.env.tasks.humanoid_im_mcp as humanoid_im_mcp
9 | from phc.env.tasks.humanoid_amp import HumanoidAMP, remove_base_rot
10 | from phc.utils.motion_lib_smpl import MotionLibSMPL
11 |
12 | from phc.utils import torch_utils
13 |
14 | from isaacgym import gymapi
15 | from isaacgym import gymtorch
16 | from isaacgym.torch_utils import *
17 | from phc.utils.flags import flags
18 | import joblib
19 | import gc
20 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonMotion, SkeletonState
21 | from rl_games.algos_torch import torch_ext
22 | import torch.nn as nn
23 | from phc.learning.network_loader import load_mcp_mlp, load_pnn
24 | from collections import deque
25 |
26 | class HumanoidImMCPGetup(humanoid_im_getup.HumanoidImGetup, humanoid_im_mcp.HumanoidImMCP):
27 |
28 | def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless):
29 | super().__init__(cfg=cfg, sim_params=sim_params, physics_engine=physics_engine, device_type=device_type, device_id=device_id, headless=headless)
30 | return
31 |
32 |
--------------------------------------------------------------------------------
/scripts/quest_camera.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | # Open camera 0
4 | cap = cv2.VideoCapture(0)
5 | cap.set(cv2.CAP_PROP_CONVERT_RGB, 0)
6 | # Get the default video size
7 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
8 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
9 |
10 | # Define the codec and create a VideoWriter object
11 | fourcc = cv2.VideoWriter_fourcc(*'mp4v')
12 |
13 | out = cv2.VideoWriter('output.mp4', fourcc, 30, (1920, 320))
14 |
15 | # Start capturing and processing frames
16 | while True:
17 | # Capture frame-by-frame
18 | ret, frame = cap.read()
19 |
20 | # If frame is not available, break the loop
21 | if not ret:
22 | break
23 | # Write the frame to the output video filef
24 |
25 |
26 | # # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
27 | # gray_frame = np.concatenate([frame[:, :320].transpose(1, 0, 2)[::-1, ], frame[:, 320:640].transpose(1, 0, 2)[::-1, ], frame[:, 640:960].transpose(1, 0, 2)[::-1, ], frame[:, 960:].transpose(1, 0, 2)[::-1, ]], axis = 1)
28 |
29 | # # out.write(cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR))
30 | # out.write(gray_frame)
31 |
32 | # Display the resulting frame
33 | cv2.imshow('frame', frame[..., 1])
34 |
35 | # Wait for 1 millisecond for user to press 'q' key to exit
36 | if cv2.waitKey(1) & 0xFF == ord('q'):
37 | break
38 |
39 | # Release the capture and output objects, and close all windows
40 | cap.release()
41 | out.release()
42 | cv2.destroyAllWindows()
--------------------------------------------------------------------------------
/phc/phc/learning/running_norm.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class RunningNorm(nn.Module):
6 | """
7 | y = (x-mean)/std
8 | using running estimates of mean,std
9 | """
10 |
11 | def __init__(self, dim, demean=True, destd=True, clip=5.0):
12 | super().__init__()
13 | self.dim = dim
14 | self.demean = demean
15 | self.destd = destd
16 | self.clip = clip
17 | self.register_buffer("n", torch.tensor(0, dtype=torch.long))
18 | self.register_buffer("mean", torch.zeros(dim))
19 | self.register_buffer("var", torch.zeros(dim))
20 | self.register_buffer("std", torch.zeros(dim))
21 |
22 | def update(self, x):
23 | var_x, mean_x = torch.var_mean(x, dim=0, unbiased=False)
24 | m = x.shape[0]
25 | w = self.n.to(x.dtype) / (m + self.n).to(x.dtype)
26 | self.var[:] = (
27 | w * self.var + (1 - w) * var_x + w * (1 - w) * (mean_x - self.mean).pow(2)
28 | )
29 | self.mean[:] = w * self.mean + (1 - w) * mean_x
30 | self.std[:] = torch.sqrt(self.var)
31 | self.n += m
32 |
33 | def forward(self, x):
34 | if self.training:
35 | with torch.no_grad():
36 | self.update(x)
37 | if self.n > 0:
38 | if self.demean:
39 | x = x - self.mean
40 | if self.destd:
41 | x = x / (self.std + 1e-8)
42 | if self.clip:
43 | x = torch.clamp(x, -self.clip, self.clip)
44 | return x
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2025, HomerIsAFool
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | 3. Neither the name of the copyright holder nor the names of its
16 | contributors may be used to endorse or promote products derived from
17 | this software without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/phc/phc/env/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2023, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/phc/phc/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2023, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/scripts/mdm_test.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 |
7 | sys.path.append(os.getcwd())
8 | # os.system("export REPLICATE_API_TOKEN=e47c32b4a1208437d0c5c02d85afb297353bab1b")
9 |
10 | import replicate
11 | import joblib
12 |
13 | model = replicate.models.get("daanelson/motion_diffusion_model")
14 | version = model.versions.get("3e2218c061c18b2a7388dd91b6677b6515529d4db4d719a6513a23522d23cfa7")
15 |
16 | # https://replicate.com/daanelson/motion_diffusion_model/versions/3e2218c061c18b2a7388dd91b6677b6515529d4db4d719a6513a23522d23cfa7#input
17 | inputs = {
18 | # Prompt
19 | 'prompt': "the person walked forward and is picking up his toolbox.",
20 |
21 | # How many
22 | 'num_repetitions': 3,
23 |
24 | # Choose the format of the output, either an animation or a json file
25 | # of the animation data. The json format is: {"thetas":
26 | # [...], "root_translation": [...], "joint_map": [...]}, where
27 | # "thetas" is an [nframes x njoints x 3] array of
28 | # joint rotations in degrees, "root_translation" is an [nframes x 3]
29 | # array of (X, Y, Z) positions of the root, and "joint_map" is a list
30 | # mapping the SMPL joint index to the corresponding
31 | # HumanIK joint name
32 | # 'output_format': "json_file",
33 | 'output_format': "animation",
34 | }
35 |
36 | # https://replicate.com/daanelson/motion_diffusion_model/versions/3e2218c061c18b2a7388dd91b6677b6515529d4db4d719a6513a23522d23cfa7#output-schema
37 | output = version.predict(**inputs)
38 | import ipdb
39 |
40 | ipdb.set_trace()
41 |
42 | joblib.dump(output, "data/mdm/res.pk")
--------------------------------------------------------------------------------
/phc/phc/data/assets/mjcf/humanoid_template_local.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/phc/phc/utils/benchmarking.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | import time
3 | from collections import defaultdict
4 | import re
5 | import sys
6 |
7 | average_times = defaultdict(lambda: (0,0))
8 |
9 | @contextmanager
10 | def timeit(name):
11 | start = time.time()
12 | yield
13 | end = time.time()
14 | total_time, num_calls = average_times[name]
15 | total_time += end-start
16 | num_calls += 1
17 | print("TIME:", name, end-start, "| AVG", total_time / num_calls, f"| TOTAL {total_time} {num_calls}")
18 | average_times[name] = (total_time, num_calls)
19 |
20 | def time_decorator(func):
21 | def with_times(*args, **kwargs):
22 | with timeit(func.__name__):
23 | return func(*args, **kwargs)
24 | return with_times
25 |
26 |
27 | def recover_map(lines):
28 | info = {}
29 | pattern = re.compile(".* (.*) .* \| .* (.*\\b) .*\| .* (.*) (.*)")
30 |
31 | for l in lines:
32 | if not l.startswith("TIME"):
33 | continue
34 |
35 | match = pattern.match(l)
36 |
37 | name = match.group(1)
38 | avg = float(match.group(2))
39 | total_time = float(match.group(3))
40 | total_calls = float(match.group(4))
41 | info[name] = (avg, total_time, total_calls)
42 |
43 | return info
44 |
45 | def compare_files(fileA, fileB):
46 | with open(fileA) as fA:
47 | linesA = fA.readlines()
48 |
49 | with open(fileB) as fB:
50 | linesB = fB.readlines()
51 |
52 | mapA = recover_map(linesA)
53 | mapB = recover_map(linesB)
54 |
55 | keysA = set(mapA.keys())
56 | keysB = set(mapB.keys())
57 |
58 | inter = keysA.intersection(keysB)
59 | print("Missing A", keysA.difference(inter))
60 | print("Missing B", keysB.difference(inter))
61 |
62 | keys_ordered = list(sorted([(mapA[k][1], k) for k in inter], reverse=True))
63 |
64 | for _, k in keys_ordered:
65 | print(f"{k} {mapA[k]} {mapB[k]}")
66 |
67 |
68 | if __name__ == "__main__":
69 | fA = sys.argv[1]
70 | fB = sys.argv[2]
71 | compare_files(fA, fB)
--------------------------------------------------------------------------------
/scripts/ws_client.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 |
4 | import aiohttp
5 | import json
6 | import numpy as np
7 |
8 | import subprocess
9 |
10 | HOST = os.getenv('HOST', '172.29.229.220')
11 | # HOST = os.getenv('HOST', '0.0.0.0')
12 | # HOST = os.getenv('HOST', 'KLAB-BUTTER.PC.CS.CMU.EDU')
13 | PORT = int(os.getenv('PORT', 8080))
14 |
15 |
16 | async def main():
17 | session = aiohttp.ClientSession()
18 | URL = f'http://{HOST}:{PORT}/ws_talk'
19 | async with session.ws_connect(URL) as ws:
20 |
21 | await prompt_and_send(ws)
22 | async for msg in ws:
23 | print('Message received from server:', msg.data)
24 | await prompt_and_send(ws)
25 | if msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.ERROR):
26 | break
27 |
28 | # session = aiohttp.ClientSession()
29 | # URL = f'http://{HOST}:{PORT}/ws'
30 | # import time
31 | # async with session.ws_connect(URL) as ws:
32 | # await ws.send_str("get_pose")
33 | # async for msg in ws:
34 | # t_s = time.time()
35 | # json_data = json.loads(msg.data)
36 | # print(json_data['pose_mat'][0])
37 |
38 | # await ws.send_str("get_pose")
39 |
40 | # if msg.type in (aiohttp.WSMsgType.CLOSED,
41 | # aiohttp.WSMsgType.ERROR):
42 | # break
43 |
44 | # await asyncio.sleep(1/30)
45 |
46 | # dt = time.time() - t_s
47 | # print(1/dt)
48 |
49 |
50 | async def prompt_and_send(ws):
51 | new_msg_to_send = input('Type a message to send to the server: ')
52 | if new_msg_to_send == 'exit':
53 | print('Exiting!')
54 | raise SystemExit(0)
55 | elif new_msg_to_send == "s":
56 | # subprocess.Popen(["simplescreenrecorder", "--start-recording"])
57 | pass
58 | elif new_msg_to_send == "e":
59 | pass
60 |
61 | await ws.send_str(new_msg_to_send)
62 | return new_msg_to_send
63 |
64 |
65 | if __name__ == '__main__':
66 | print('Type "exit" to quit')
67 | # loop = asyncio.get_event_loop()
68 | # loop.run_forever(main())
69 | asyncio.run(main())
--------------------------------------------------------------------------------
/phc/phc/data/cfg/learning/im.yaml:
--------------------------------------------------------------------------------
1 | params:
2 | seed: 0
3 |
4 | algo:
5 | name: im_amp
6 |
7 | model:
8 | name: amp
9 |
10 | network:
11 | name: amp
12 | separate: True
13 | discrete: False
14 |
15 | space:
16 | continuous:
17 | mu_activation: None
18 | sigma_activation: None
19 | mu_init:
20 | name: default
21 | sigma_init:
22 | name: const_initializer
23 | val: -2.9
24 | fixed_sigma: True
25 | learn_sigma: False
26 |
27 | mlp:
28 | units: [1024, 512]
29 | activation: relu
30 | d2rl: False
31 |
32 | initializer:
33 | name: default
34 | regularizer:
35 | name: None
36 |
37 | disc:
38 | units: [1024, 512]
39 | activation: relu
40 |
41 | initializer:
42 | name: default
43 |
44 | load_checkpoint: False
45 |
46 | config:
47 | name: Humanoid
48 | env_name: rlgpu
49 | multi_gpu: False
50 | ppo: True
51 | mixed_precision: False
52 | normalize_input: True
53 | normalize_value: True
54 | reward_shaper:
55 | scale_value: 1
56 | normalize_advantage: True
57 | gamma: 0.99
58 | tau: 0.95
59 | learning_rate: 2e-5
60 | lr_schedule: constant
61 | score_to_win: 20000
62 | max_epochs: 10000000
63 | save_best_after: 100
64 | save_frequency: 2500
65 | print_stats: False
66 | save_intermediate: True
67 | entropy_coef: 0.0
68 | truncate_grads: True
69 | grad_norm: 50.0
70 | ppo: True
71 | e_clip: 0.2
72 | horizon_length: 32
73 | minibatch_size: 16384
74 | mini_epochs: 6
75 | critic_coef: 5
76 | clip_value: False
77 |
78 | bounds_loss_coef: 10
79 | amp_obs_demo_buffer_size: 200000
80 | amp_replay_buffer_size: 200000
81 | amp_replay_keep_prob: 0.01
82 | amp_batch_size: 512
83 | amp_minibatch_size: 4096
84 | disc_coef: 5
85 | disc_logit_reg: 0.01
86 | disc_grad_penalty: 5
87 | disc_reward_scale: 2
88 | disc_weight_decay: 0.0001
89 | normalize_amp_input: True
90 |
91 | task_reward_w: 0.5
92 | disc_reward_w: 0.5
93 |
94 | player:
95 | games_num: 50000000
--------------------------------------------------------------------------------
/phc/phc/data/cfg/learning/im_pnn.yaml:
--------------------------------------------------------------------------------
1 | params:
2 | seed: 0
3 |
4 | algo:
5 | name: im_amp
6 |
7 | model:
8 | name: amp
9 |
10 | network:
11 | name: amp_pnn
12 | separate: True
13 | discrete: False
14 |
15 | space:
16 | continuous:
17 | mu_activation: None
18 | sigma_activation: None
19 | mu_init:
20 | name: default
21 | sigma_init:
22 | name: const_initializer
23 | val: -2.9
24 | fixed_sigma: True
25 | learn_sigma: False
26 |
27 | mlp:
28 | units: [1024, 512]
29 | activation: relu
30 | d2rl: False
31 |
32 | initializer:
33 | name: default
34 | regularizer:
35 | name: None
36 |
37 | disc:
38 | units: [1024, 512]
39 | activation: relu
40 |
41 | initializer:
42 | name: default
43 |
44 | load_checkpoint: False
45 |
46 | config:
47 | name: Humanoid
48 | env_name: rlgpu
49 | multi_gpu: False
50 | ppo: True
51 | mixed_precision: False
52 | normalize_input: True
53 | normalize_value: True
54 | reward_shaper:
55 | scale_value: 1
56 | normalize_advantage: True
57 | gamma: 0.99
58 | tau: 0.95
59 | learning_rate: 2e-5
60 | lr_schedule: constant
61 | score_to_win: 20000
62 | max_epochs: 10000000
63 | save_best_after: 100
64 | save_frequency: 2500
65 | print_stats: False
66 | save_intermediate: True
67 | entropy_coef: 0.0
68 | truncate_grads: True
69 | grad_norm: 50.0
70 | ppo: True
71 | e_clip: 0.2
72 | horizon_length: 32
73 | minibatch_size: 16384
74 | mini_epochs: 6
75 | critic_coef: 5
76 | clip_value: False
77 |
78 | bounds_loss_coef: 10
79 | amp_obs_demo_buffer_size: 200000
80 | amp_replay_buffer_size: 200000
81 | amp_replay_keep_prob: 0.01
82 | amp_batch_size: 512
83 | amp_minibatch_size: 4096
84 | disc_coef: 5
85 | disc_logit_reg: 0.01
86 | disc_grad_penalty: 5
87 | disc_reward_scale: 2
88 | disc_weight_decay: 0.0001
89 | normalize_amp_input: True
90 |
91 | task_reward_w: 0.5
92 | disc_reward_w: 0.5
93 |
94 | player:
95 | games_num: 50000000
--------------------------------------------------------------------------------
/phc/phc/data/cfg/learning/im_mcp.yaml:
--------------------------------------------------------------------------------
1 | # No softmax
2 | params:
3 | seed: 0
4 |
5 | algo:
6 | name: im_amp
7 |
8 | model:
9 | name: amp
10 |
11 | network:
12 | name: amp_mcp
13 | separate: True
14 | discrete: false
15 | has_softmax: False
16 |
17 | space:
18 | continuous:
19 | mu_activation: None
20 | sigma_activation: None
21 | mu_init:
22 | name: default
23 | sigma_init:
24 | name: const_initializer
25 | val: -2.9
26 | fixed_sigma: True
27 | learn_sigma: False
28 |
29 | mlp:
30 | units: [1024, 512]
31 | activation: relu
32 | d2rl: False
33 |
34 | initializer:
35 | name: default
36 | regularizer:
37 | name: None
38 |
39 | disc:
40 | units: [1024, 512]
41 | activation: relu
42 |
43 | initializer:
44 | name: default
45 |
46 | load_checkpoint: False
47 |
48 | config:
49 | name: Humanoid
50 | env_name: rlgpu
51 | multi_gpu: False
52 | ppo: True
53 | mixed_precision: False
54 | normalize_input: True
55 | normalize_value: True
56 | reward_shaper:
57 | scale_value: 1
58 | normalize_advantage: True
59 | gamma: 0.99
60 | tau: 0.95
61 | learning_rate: 2e-5
62 | lr_schedule: constant
63 | score_to_win: 20000
64 | max_epochs: 10000000
65 | save_best_after: 100
66 | save_frequency: 2500
67 | print_stats: False
68 | save_intermediate: True
69 | entropy_coef: 0.0
70 | truncate_grads: True
71 | grad_norm: 50.0
72 | ppo: True
73 | e_clip: 0.2
74 | horizon_length: 32
75 | minibatch_size: 16384
76 | mini_epochs: 6
77 | critic_coef: 5
78 | clip_value: False
79 |
80 | bounds_loss_coef: 10
81 | amp_obs_demo_buffer_size: 200000
82 | amp_replay_buffer_size: 200000
83 | amp_replay_keep_prob: 0.01
84 | amp_batch_size: 512
85 | amp_minibatch_size: 4096
86 | disc_coef: 5
87 | disc_logit_reg: 0.01
88 | disc_grad_penalty: 5
89 | disc_reward_scale: 2
90 | disc_weight_decay: 0.0001
91 | normalize_amp_input: True
92 |
93 | task_reward_w: 0.5
94 | disc_reward_w: 0.5
95 |
96 | player:
97 | games_num: 999999999999999999999999
--------------------------------------------------------------------------------
/phc/phc/data/cfg/learning/task_amp_big.yaml:
--------------------------------------------------------------------------------
1 | params:
2 | seed: 0
3 |
4 | algo:
5 | name: amp
6 |
7 | model:
8 | name: amp
9 |
10 | network:
11 | name: amp
12 | separate: True
13 |
14 | space:
15 | continuous:
16 | mu_activation: None
17 | sigma_activation: None
18 | mu_init:
19 | name: default
20 | sigma_init:
21 | name: const_initializer
22 | val: -2.9
23 | fixed_sigma: True
24 | learn_sigma: False
25 |
26 | mlp:
27 | units: [2048, 1536, 1024, 1024, 512, 512] # comparable paramter to z_big_task
28 | activation: silu
29 | d2rl: False
30 |
31 | initializer:
32 | name: default
33 | regularizer:
34 | name: None
35 |
36 | disc:
37 | # units: [2048, 1024, 512]
38 | # activation: silu
39 |
40 | units: [1024, 512]
41 | activation: relu
42 |
43 |
44 | initializer:
45 | name: default
46 |
47 | load_checkpoint: False
48 |
49 | config:
50 | name: Humanoid
51 | env_name: rlgpu
52 | multi_gpu: False
53 | ppo: True
54 | mixed_precision: False
55 | normalize_input: True
56 | normalize_value: True
57 | reward_shaper:
58 | scale_value: 1
59 | normalize_advantage: True
60 | gamma: 0.99
61 | tau: 0.95
62 | learning_rate: 2e-5
63 | lr_schedule: constant
64 | score_to_win: 20000
65 | max_epochs: 10000000
66 | save_best_after: 100
67 | save_frequency: 1500
68 | print_stats: False
69 | save_intermediate: True
70 | entropy_coef: 0.0
71 | truncate_grads: True
72 | grad_norm: 50.0
73 | ppo: True
74 | e_clip: 0.2
75 | horizon_length: 32
76 | minibatch_size: 16384
77 | mini_epochs: 6
78 | critic_coef: 5
79 | clip_value: False
80 |
81 | bounds_loss_coef: 10
82 | amp_obs_demo_buffer_size: 200000
83 | amp_replay_buffer_size: 200000
84 | amp_replay_keep_prob: 0.01
85 | amp_batch_size: 512
86 | amp_minibatch_size: 4096
87 | disc_coef: 5
88 | disc_logit_reg: 0.01
89 | disc_grad_penalty: 5
90 | disc_reward_scale: 2
91 | disc_weight_decay: 0.0001
92 | normalize_amp_input: True
93 |
94 | task_reward_w: 0.5
95 | disc_reward_w: 0.5
96 |
97 | player:
98 | games_num: 50000000
--------------------------------------------------------------------------------
/phc/phc/data/cfg/learning/task_no_amp_big.yaml:
--------------------------------------------------------------------------------
1 | params:
2 | seed: 0
3 |
4 | algo:
5 | name: amp
6 |
7 | model:
8 | name: amp
9 |
10 | network:
11 | name: amp
12 | separate: True
13 |
14 | space:
15 | continuous:
16 | mu_activation: None
17 | sigma_activation: None
18 | mu_init:
19 | name: default
20 | sigma_init:
21 | name: const_initializer
22 | val: -2.9
23 | fixed_sigma: True
24 | learn_sigma: False
25 |
26 | mlp:
27 | units: [2048, 1536, 1024, 1024, 512, 512] # comparable paramter to z_big_task
28 | activation: silu
29 | d2rl: False
30 |
31 | initializer:
32 | name: default
33 | regularizer:
34 | name: None
35 |
36 | disc:
37 | # units: [2048, 1024, 512]
38 | # activation: silu
39 |
40 | units: [1024, 512]
41 | activation: relu
42 |
43 |
44 | initializer:
45 | name: default
46 |
47 | load_checkpoint: False
48 |
49 | config:
50 | name: Humanoid
51 | env_name: rlgpu
52 | multi_gpu: False
53 | ppo: True
54 | mixed_precision: False
55 | normalize_input: True
56 | normalize_value: True
57 | reward_shaper:
58 | scale_value: 1
59 | normalize_advantage: True
60 | gamma: 0.99
61 | tau: 0.95
62 | learning_rate: 2e-5
63 | lr_schedule: constant
64 | score_to_win: 20000
65 | max_epochs: 10000000
66 | save_best_after: 100
67 | save_frequency: 1500
68 | print_stats: False
69 | save_intermediate: True
70 | entropy_coef: 0.0
71 | truncate_grads: True
72 | grad_norm: 50.0
73 | ppo: True
74 | e_clip: 0.2
75 | horizon_length: 32
76 | minibatch_size: 16384
77 | mini_epochs: 6
78 | critic_coef: 5
79 | clip_value: False
80 |
81 | bounds_loss_coef: 10
82 | amp_obs_demo_buffer_size: 200000
83 | amp_replay_buffer_size: 200000
84 | amp_replay_keep_prob: 0.01
85 | amp_batch_size: 512
86 | amp_minibatch_size: 4096
87 | disc_coef: 5
88 | disc_logit_reg: 0.01
89 | disc_grad_penalty: 5
90 | disc_reward_scale: 2
91 | disc_weight_decay: 0.0001
92 | normalize_amp_input: True
93 |
94 | task_reward_w: 1
95 | disc_reward_w: 0
96 |
97 | player:
98 | games_num: 50000000
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/phc_prim_iccv.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "EgoQuest_IM"
3 | notes: "PNN, no Laternal connection "
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: True
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 |
27 |
28 | cycle_motion: False
29 | hard_negative: False
30 |
31 | masterfoot: False
32 | freeze_toe: false
33 |
34 | real_weight: True
35 | kp_scale: 1
36 | remove_toe_im: False # For imitation
37 | power_reward: True
38 |
39 | has_shape_obs: True
40 | has_shape_obs_disc: True
41 | has_shape_variation: True
42 | shape_resampling_interval: 500
43 |
44 | pdControl: True
45 | powerScale: 1.0
46 | controlFrequencyInv: 2 # 30 Hz
47 | stateInit: "Random"
48 | hybridInitProb: 0.5
49 | numAMPObsSteps: 10
50 |
51 | localRootObs: True
52 | rootHeightObs: True
53 | keyBodies: ["R_Ankle", "L_Ankle", "R_Wrist", "L_Wrist"]
54 | contactBodies: ["R_Ankle", "L_Ankle", "R_Toe", "L_Toe"]
55 | resetBodies: ['Pelvis', 'L_Hip', 'L_Knee', 'R_Hip', 'R_Knee', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
56 | terminationHeight: 0.15
57 | enableEarlyTermination: True
58 | terminationDistance: 0.25
59 |
60 | ### Fut config
61 | numTrajSamples: 3
62 | trajSampleTimestepInv: 3
63 | enableTaskObs: True
64 |
65 | asset:
66 | assetRoot: "/"
67 | assetFileName: "mjcf/smpl_humanoid.xml"
68 |
69 | plane:
70 | staticFriction: 1.0
71 | dynamicFriction: 1.0
72 | restitution: 0.0
73 |
74 | sim:
75 | substeps: 2
76 | physx:
77 | num_threads: 4
78 | solver_type: 1 # 0: pgs, 1: tgs
79 | num_position_iterations: 4
80 | num_velocity_iterations: 0
81 | contact_offset: 0.02
82 | rest_offset: 0.0
83 | bounce_threshold_velocity: 0.2
84 | max_depenetration_velocity: 10.0
85 | default_buffer_size_multiplier: 10.0
86 |
87 | flex:
88 | num_inner_iterations: 10
89 | warm_start: 0.25
90 |
--------------------------------------------------------------------------------
/phc/phc.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | setup.py
2 | phc/__init__.py
3 | phc/run.py
4 | phc.egg-info/PKG-INFO
5 | phc.egg-info/SOURCES.txt
6 | phc.egg-info/dependency_links.txt
7 | phc.egg-info/top_level.txt
8 | phc/env/__init__.py
9 | phc/env/tasks/__init__.py
10 | phc/env/tasks/base_task.py
11 | phc/env/tasks/humanoid.py
12 | phc/env/tasks/humanoid_amp.py
13 | phc/env/tasks/humanoid_amp_getup.py
14 | phc/env/tasks/humanoid_amp_task.py
15 | phc/env/tasks/humanoid_im.py
16 | phc/env/tasks/humanoid_im_demo.py
17 | phc/env/tasks/humanoid_im_getup.py
18 | phc/env/tasks/humanoid_im_mcp.py
19 | phc/env/tasks/humanoid_im_mcp_demo.py
20 | phc/env/tasks/humanoid_im_mcp_getup.py
21 | phc/env/tasks/humanoid_speed.py
22 | phc/env/tasks/vec_task.py
23 | phc/env/tasks/vec_task_wrappers.py
24 | phc/learning/__init__.py
25 | phc/learning/amp_agent.py
26 | phc/learning/amp_datasets.py
27 | phc/learning/amp_models.py
28 | phc/learning/amp_network_builder.py
29 | phc/learning/amp_network_mcp_builder.py
30 | phc/learning/amp_network_pnn_builder.py
31 | phc/learning/amp_players.py
32 | phc/learning/ar_prior.py
33 | phc/learning/common_agent.py
34 | phc/learning/common_player.py
35 | phc/learning/im_amp.py
36 | phc/learning/im_amp_players.py
37 | phc/learning/loss_functions.py
38 | phc/learning/network_builder.py
39 | phc/learning/network_loader.py
40 | phc/learning/pnn.py
41 | phc/learning/replay_buffer.py
42 | phc/learning/running_norm.py
43 | phc/learning/transformer.py
44 | phc/learning/transformer_layers.py
45 | phc/learning/vq_quantizer.py
46 | phc/learning/unrealego/__init__.py
47 | phc/learning/unrealego/base_model.py
48 | phc/learning/unrealego/egoglass_model.py
49 | phc/learning/unrealego/models.py
50 | phc/learning/unrealego/network.py
51 | phc/learning/unrealego/network_debug.py
52 | phc/learning/unrealego/unrealego_autoencoder_model.py
53 | phc/learning/unrealego/unrealego_heatmap_shared_model.py
54 | phc/utils/__init__.py
55 | phc/utils/benchmarking.py
56 | phc/utils/config.py
57 | phc/utils/data_tree.py
58 | phc/utils/draw_utils.py
59 | phc/utils/flags.py
60 | phc/utils/logger.py
61 | phc/utils/motion_lib_base.py
62 | phc/utils/motion_lib_h1.py
63 | phc/utils/motion_lib_smpl.py
64 | phc/utils/o3d_utils.py
65 | phc/utils/parse_task.py
66 | phc/utils/plot_script.py
67 | phc/utils/pytorch3d_transforms.py
68 | phc/utils/rotation_conversions.py
69 | phc/utils/running_mean_std.py
70 | phc/utils/torch_h1_humanoid_batch.py
71 | phc/utils/torch_utils.py
72 | phc/utils/traj_generator.py
73 | phc/utils/transform_utils.py
74 | phc/utils/uhc_transform_utils.py
--------------------------------------------------------------------------------
/scripts/pmcp/forward_pmcp.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 | sys.path.append(os.getcwd())
7 | from rl_games.algos_torch import torch_ext
8 | import joblib
9 | import numpy as np
10 | import argparse
11 |
12 |
13 |
14 |
15 | if __name__ == '__main__':
16 | parser = argparse.ArgumentParser()
17 | parser.add_argument('--exp', default='')
18 | parser.add_argument('--idx', default=0)
19 | parser.add_argument('--epoch', default=200000)
20 |
21 | args = parser.parse_args()
22 |
23 | trained_idx = int(args.idx)
24 | exp_name = args.exp
25 | epoch = int(args.epoch)
26 | print(f"PNN Processing for: exp_name: {exp_name}, idx: {trained_idx}, epoch: {epoch}")
27 | import ipdb; ipdb.set_trace()
28 |
29 |
30 | checkpoint = torch_ext.load_checkpoint(f"output/dgx/{exp_name}/Humanoid_{epoch:08d}.pth")
31 | amass_train_data_take6 = joblib.load("data/amass/pkls/amass_isaac_im_train_take6_upright_slim.pkl")
32 |
33 | failed_keys_dict = {}
34 | termination_history_dict = {}
35 | all_keys = set()
36 | for failed_path in sorted(glob.glob(f"output/dgx/{exp_name}/failed_*"))[:]:
37 | failed_idx = int(failed_path.split("/")[-1].split("_")[-1].split(".")[0])
38 | failed_keys_entry = joblib.load(failed_path)
39 | failed_keys = failed_keys_entry['failed_keys']
40 | failed_keys_dict[failed_idx] = failed_keys
41 | termination_history_dict[failed_idx] = failed_keys_entry['termination_history']
42 | [all_keys.add(k) for k in failed_keys]
43 |
44 | dump_keys = []
45 | for k, v in failed_keys_dict.items():
46 | if k <= epoch and k >= epoch - 2500 * 5:
47 | dump_keys.append(v)
48 |
49 | dump_keys = np.concatenate(dump_keys)
50 |
51 | network_name_prefix = "a2c_network.pnn.actors"
52 |
53 |
54 | loading_keys = [k for k in checkpoint['model'].keys() if k.startswith(f"{network_name_prefix}.{trained_idx}")]
55 | copy_keys = [k for k in checkpoint['model'].keys() if k.startswith(f"{network_name_prefix}.{trained_idx + 1}")]
56 |
57 |
58 | for idx, key_name in enumerate(copy_keys):
59 | checkpoint['model'][key_name].copy_(checkpoint['model'][loading_keys[idx]])
60 |
61 | torch_ext.save_checkpoint(f"output/dgx/{exp_name}/Humanoid_{epoch + 1:08d}", checkpoint)
62 |
63 | failed_dump = {key: amass_train_data_take6[key] for key in dump_keys if key in amass_train_data_take6}
64 |
65 | os.makedirs(f"data/amass/pkls/auto_pmcp", exist_ok=True)
66 | print(f"dumping {len(failed_dump)} samples to data/amass/pkls/auto_pmcp/{exp_name}_{epoch}.pkl")
67 | joblib.dump(failed_dump, f"data/amass/pkls/auto_pmcp/{exp_name}_{epoch}.pkl")
68 |
--------------------------------------------------------------------------------
/phc/phc/learning/replay_buffer.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | class ReplayBuffer():
4 | def __init__(self, buffer_size, device):
5 | self._head = 0
6 | self._total_count = 0
7 | self._buffer_size = buffer_size
8 | self._device = device
9 | self._data_buf = None
10 | self._sample_idx = torch.randperm(buffer_size)
11 | self._sample_head = 0
12 |
13 | return
14 |
15 | def reset(self):
16 | self._head = 0
17 | self._total_count = 0
18 | self._reset_sample_idx()
19 | return
20 |
21 | def get_buffer_size(self):
22 | return self._buffer_size
23 |
24 | def get_total_count(self):
25 | return self._total_count
26 |
27 | def store(self, data_dict):
28 | if (self._data_buf is None):
29 | self._init_data_buf(data_dict)
30 |
31 | n = next(iter(data_dict.values())).shape[0]
32 | buffer_size = self.get_buffer_size()
33 | assert(n <= buffer_size)
34 |
35 | for key, curr_buf in self._data_buf.items():
36 | curr_n = data_dict[key].shape[0]
37 | assert(n == curr_n)
38 |
39 | store_n = min(curr_n, buffer_size - self._head)
40 | curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
41 |
42 | remainder = n - store_n
43 | if (remainder > 0):
44 | curr_buf[0:remainder] = data_dict[key][store_n:]
45 |
46 | self._head = (self._head + n) % buffer_size
47 | self._total_count += n
48 |
49 | return
50 |
51 | def sample(self, n):
52 | total_count = self.get_total_count()
53 | buffer_size = self.get_buffer_size()
54 |
55 | idx = torch.arange(self._sample_head, self._sample_head + n)
56 | idx = idx % buffer_size
57 | rand_idx = self._sample_idx[idx]
58 | if (total_count < buffer_size):
59 | rand_idx = rand_idx % self._head
60 |
61 | samples = dict()
62 | for k, v in self._data_buf.items():
63 | samples[k] = v[rand_idx]
64 |
65 | self._sample_head += n
66 | if (self._sample_head >= buffer_size):
67 | self._reset_sample_idx()
68 |
69 | return samples
70 |
71 | def _reset_sample_idx(self):
72 | buffer_size = self.get_buffer_size()
73 | self._sample_idx[:] = torch.randperm(buffer_size)
74 | self._sample_head = 0
75 | return
76 |
77 | def _init_data_buf(self, data_dict):
78 | buffer_size = self.get_buffer_size()
79 | self._data_buf = dict()
80 |
81 | for k, v in data_dict.items():
82 | v_shape = v.shape[1:]
83 | self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
84 |
85 | return
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/h1_im_4.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "H1"
3 | notes: "Fitting to three squences"
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: False
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 |
27 | cycle_motion: False
28 | hard_negative: False
29 |
30 | masterfoot: False
31 | freeze_toe: false
32 |
33 | default_humanoid_mass: 51.436 # H1 config
34 | real_weight: True
35 | kp_scale: 1
36 | remove_toe_im: False # For imitation
37 | power_reward: True
38 |
39 | has_shape_obs: False
40 | has_shape_obs_disc: False
41 | has_shape_variation: False
42 | shape_resampling_interval: 500
43 |
44 | pdControl: True
45 | powerScale: 1.0
46 | controlFrequencyInv: 2 # 30 Hz
47 | stateInit: "Random"
48 | hybridInitProb: 0.5
49 | numAMPObsSteps: 10
50 |
51 | localRootObs: True
52 | rootHeightObs: True
53 | keyBodies: ["left_ankle_link", "right_ankle_link", "left_elbow_link", "right_elbow_link"]
54 | contactBodies: ["left_ankle_link", "right_ankle_link"]
55 | resetBodies: [ 'pelvis', 'left_hip_yaw_link', 'left_hip_roll_link','left_hip_pitch_link', 'left_knee_link', 'left_ankle_link', 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_link', 'torso_link', 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link', 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link']
56 | terminationHeight: 0.15
57 | enableEarlyTermination: True
58 | terminationDistance: 0.25
59 |
60 | ### Fut config
61 | numTrajSamples: 3
62 | trajSampleTimestepInv: 3
63 | enableTaskObs: True
64 |
65 | asset:
66 | assetRoot: "./"
67 | # assetFileName: "resources/robots/h1/h1.xml"
68 | assetFileName: "resources/robots/h1/h1.xml"
69 |
70 | plane:
71 | staticFriction: 1.0
72 | dynamicFriction: 1.0
73 | restitution: 0.0
74 |
75 | sim:
76 | substeps: 2
77 | physx:
78 | num_threads: 4
79 | solver_type: 1 # 0: pgs, 1: tgs
80 | num_position_iterations: 4
81 | num_velocity_iterations: 0
82 | contact_offset: 0.02
83 | rest_offset: 0.0
84 | bounce_threshold_velocity: 0.2
85 | max_depenetration_velocity: 10.0
86 | default_buffer_size_multiplier: 10.0
87 |
88 | flex:
89 | num_inner_iterations: 10
90 | warm_start: 0.25
91 |
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/h1_im_1.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "H1"
3 | notes: "PNN, no Laternal connection "
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: False
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 |
27 | cycle_motion: False
28 | hard_negative: False
29 |
30 | masterfoot: False
31 | freeze_toe: false
32 |
33 | default_humanoid_mass: 51.436 # H1 config
34 | real_weight: True
35 | kp_scale: 1
36 | remove_toe_im: False # For imitation
37 | power_reward: True
38 |
39 | has_shape_obs: False
40 | has_shape_obs_disc: False
41 | has_shape_variation: False
42 | shape_resampling_interval: 500
43 |
44 | pdControl: True
45 | powerScale: 1.0
46 | controlFrequencyInv: 2 # 30 Hz
47 | stateInit: "Random"
48 | hybridInitProb: 0.5
49 | numAMPObsSteps: 10
50 |
51 | localRootObs: True
52 | rootHeightObs: True
53 | keyBodies: ["left_ankle_link", "right_ankle_link", "left_elbow_link", "right_elbow_link"]
54 | contactBodies: ["left_ankle_link", "right_ankle_link"]
55 | resetBodies: [ 'pelvis', 'left_hip_yaw_link', 'left_hip_roll_link','left_hip_pitch_link', 'left_knee_link', 'left_ankle_link', 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_link', 'torso_link', 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link', 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link']
56 | terminationHeight: 0.15
57 | enableEarlyTermination: True
58 | terminationDistance: 0.25
59 |
60 | ### Fut config
61 | numTrajSamples: 3
62 | trajSampleTimestepInv: 3
63 | enableTaskObs: True
64 |
65 | asset:
66 | assetRoot: "./"
67 | # assetFileName: "resources/robots/h1/h1.xml"
68 | assetFileName: "resources/robots/h1/h1.xml"
69 |
70 | plane:
71 | staticFriction: 1.0
72 | dynamicFriction: 1.0
73 | restitution: 0.0
74 |
75 | sim:
76 | substeps: 2
77 | physx:
78 | num_threads: 4
79 | solver_type: 1 # 0: pgs, 1: tgs
80 | num_position_iterations: 4
81 | num_velocity_iterations: 0
82 | contact_offset: 0.02
83 | rest_offset: 0.0
84 | bounce_threshold_velocity: 0.2
85 | max_depenetration_velocity: 10.0
86 | default_buffer_size_multiplier: 10.0
87 |
88 | flex:
89 | num_inner_iterations: 10
90 | warm_start: 0.25
91 |
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/h1_im_2.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "H1"
3 | notes: "PNN, no Laternal connection "
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: False
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 |
27 | cycle_motion: False
28 | hard_negative: False
29 |
30 | masterfoot: False
31 | freeze_toe: false
32 |
33 | default_humanoid_mass: 51.436 # H1 config
34 | real_weight: True
35 | kp_scale: 1
36 | remove_toe_im: False # For imitation
37 | power_reward: True
38 |
39 | has_shape_obs: False
40 | has_shape_obs_disc: False
41 | has_shape_variation: False
42 | shape_resampling_interval: 500
43 |
44 | pdControl: True
45 | powerScale: 1.0
46 | controlFrequencyInv: 2 # 30 Hz
47 | stateInit: "Random"
48 | hybridInitProb: 0.5
49 | numAMPObsSteps: 10
50 |
51 | localRootObs: True
52 | rootHeightObs: True
53 | keyBodies: ["left_ankle_link", "right_ankle_link", "left_elbow_link", "right_elbow_link"]
54 | contactBodies: ["left_ankle_link", "right_ankle_link"]
55 | resetBodies: [ 'pelvis', 'left_hip_yaw_link', 'left_hip_roll_link','left_hip_pitch_link', 'left_knee_link', 'left_ankle_link', 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_link', 'torso_link', 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link', 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link']
56 | terminationHeight: 0.15
57 | enableEarlyTermination: True
58 | terminationDistance: 0.25
59 |
60 | ### Fut config
61 | numTrajSamples: 3
62 | trajSampleTimestepInv: 3
63 | enableTaskObs: True
64 |
65 | asset:
66 | assetRoot: "./"
67 | # assetFileName: "resources/robots/h1/h1.xml"
68 | assetFileName: "resources/robots/h1/h1.xml"
69 |
70 | plane:
71 | staticFriction: 1.0
72 | dynamicFriction: 1.0
73 | restitution: 0.0
74 |
75 | sim:
76 | substeps: 2
77 | physx:
78 | num_threads: 4
79 | solver_type: 1 # 0: pgs, 1: tgs
80 | num_position_iterations: 4
81 | num_velocity_iterations: 0
82 | contact_offset: 0.02
83 | rest_offset: 0.0
84 | bounce_threshold_velocity: 0.2
85 | max_depenetration_velocity: 10.0
86 | default_buffer_size_multiplier: 10.0
87 |
88 | flex:
89 | num_inner_iterations: 10
90 | warm_start: 0.25
91 |
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/h1_im_3.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "H1"
3 | notes: "PNN, no Laternal connection "
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: False
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 |
27 | cycle_motion: False
28 | hard_negative: False
29 |
30 | masterfoot: False
31 | freeze_toe: false
32 |
33 | default_humanoid_mass: 51.436 # H1 config
34 | real_weight: True
35 | kp_scale: 1
36 | remove_toe_im: False # For imitation
37 | power_reward: True
38 |
39 | has_shape_obs: False
40 | has_shape_obs_disc: False
41 | has_shape_variation: False
42 | shape_resampling_interval: 500
43 |
44 | pdControl: True
45 | powerScale: 1.0
46 | controlFrequencyInv: 2 # 30 Hz
47 | stateInit: "Random"
48 | hybridInitProb: 0.5
49 | numAMPObsSteps: 10
50 |
51 | localRootObs: True
52 | rootHeightObs: True
53 | keyBodies: ["left_ankle_link", "right_ankle_link", "left_elbow_link", "right_elbow_link"]
54 | contactBodies: ["left_ankle_link", "right_ankle_link"]
55 | resetBodies: [ 'pelvis', 'left_hip_yaw_link', 'left_hip_roll_link','left_hip_pitch_link', 'left_knee_link', 'left_ankle_link', 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_link', 'torso_link', 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link', 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link']
56 | terminationHeight: 0.15
57 | enableEarlyTermination: True
58 | terminationDistance: 0.25
59 |
60 | ### Fut config
61 | numTrajSamples: 3
62 | trajSampleTimestepInv: 3
63 | enableTaskObs: True
64 |
65 | asset:
66 | assetRoot: "./"
67 | # assetFileName: "resources/robots/h1/h1.xml"
68 | assetFileName: "resources/robots/h1/h1.xml"
69 |
70 | plane:
71 | staticFriction: 1.0
72 | dynamicFriction: 1.0
73 | restitution: 0.0
74 |
75 | sim:
76 | substeps: 2
77 | physx:
78 | num_threads: 4
79 | solver_type: 1 # 0: pgs, 1: tgs
80 | num_position_iterations: 4
81 | num_velocity_iterations: 0
82 | contact_offset: 0.02
83 | rest_offset: 0.0
84 | bounce_threshold_velocity: 0.2
85 | max_depenetration_velocity: 10.0
86 | default_buffer_size_multiplier: 10.0
87 |
88 | flex:
89 | num_inner_iterations: 10
90 | warm_start: 0.25
91 |
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/phc_shape_pnn_train_iccv.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "EgoQuest_IM"
3 | notes: "PNN, no Laternal connection "
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: True
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 | has_pnn: True
27 | fitting: False
28 | num_prim: 4
29 | training_prim: 0
30 | actors_to_load: 0
31 | has_lateral: False
32 |
33 | cycle_motion: False
34 | hard_negative: False
35 |
36 | ######## Getup Configs ########
37 | zero_out_far: False
38 | zero_out_far_train: False
39 | cycle_motion: False
40 | getup_udpate_epoch: 78750
41 |
42 | masterfoot: False
43 | freeze_toe: false
44 |
45 | real_weight: True
46 | kp_scale: 1
47 | remove_toe_im: False # For imitation
48 | power_reward: True
49 |
50 | has_shape_obs: True
51 | has_shape_obs_disc: True
52 | has_shape_variation: True
53 | shape_resampling_interval: 500
54 |
55 | pdControl: True
56 | powerScale: 1.0
57 | controlFrequencyInv: 2 # 30 Hz
58 | stateInit: "Random"
59 | hybridInitProb: 0.5
60 | numAMPObsSteps: 10
61 |
62 | localRootObs: True
63 | rootHeightObs: True
64 | keyBodies: ["R_Ankle", "L_Ankle", "R_Wrist", "L_Wrist"]
65 | contactBodies: ["R_Ankle", "L_Ankle", "R_Toe", "L_Toe"]
66 | resetBodies: ['Pelvis', 'L_Hip', 'L_Knee', 'R_Hip', 'R_Knee', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
67 | terminationHeight: 0.15
68 | enableEarlyTermination: True
69 | terminationDistance: 0.25
70 |
71 | ### Fut config
72 | numTrajSamples: 3
73 | trajSampleTimestepInv: 3
74 | enableTaskObs: True
75 |
76 | asset:
77 | assetRoot: "/"
78 | assetFileName: "mjcf/smpl_humanoid.xml"
79 |
80 | plane:
81 | staticFriction: 1.0
82 | dynamicFriction: 1.0
83 | restitution: 0.0
84 |
85 | sim:
86 | substeps: 2
87 | physx:
88 | num_threads: 4
89 | solver_type: 1 # 0: pgs, 1: tgs
90 | num_position_iterations: 4
91 | num_velocity_iterations: 0
92 | contact_offset: 0.02
93 | rest_offset: 0.0
94 | bounce_threshold_velocity: 0.2
95 | max_depenetration_velocity: 10.0
96 | default_buffer_size_multiplier: 10.0
97 |
98 | flex:
99 | num_inner_iterations: 10
100 | warm_start: 0.25
101 |
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/phc_kp_pnn_iccv.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "EgoQuest_IM"
3 | notes: "PNN, no Laternal connection "
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: True
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 7
25 |
26 |
27 | has_pnn: True
28 | fitting: True
29 | num_prim: 4
30 | training_prim: 2
31 | actors_to_load: 4
32 | has_lateral: False
33 | models: ['output/phc_kp_pnn_iccv/Humanoid.pth']
34 |
35 | ######## Getup Configs ########
36 | zero_out_far: True
37 | zero_out_far_train: False
38 | cycle_motion: False
39 | getup_udpate_epoch: 78750
40 |
41 | cycle_motion: True
42 | hard_negative: False
43 |
44 | masterfoot: False
45 | freeze_toe: false
46 |
47 | real_weight: True
48 | kp_scale: 1
49 | remove_toe_im: False # For imitation
50 | power_reward: True
51 |
52 | has_shape_obs: False
53 | has_shape_obs_disc: False
54 | has_shape_variation: False
55 | shape_resampling_interval: 500
56 |
57 | pdControl: True
58 | powerScale: 1.0
59 | controlFrequencyInv: 2 # 30 Hz
60 | stateInit: "Random"
61 | hybridInitProb: 0.5
62 | numAMPObsSteps: 10
63 |
64 | localRootObs: True
65 | rootHeightObs: True
66 | keyBodies: ["R_Ankle", "L_Ankle", "R_Wrist", "L_Wrist"]
67 | contactBodies: ["R_Ankle", "L_Ankle", "R_Toe", "L_Toe"]
68 | resetBodies: ['Pelvis', 'L_Hip', 'L_Knee', 'R_Hip', 'R_Knee', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
69 | terminationHeight: 0.15
70 | enableEarlyTermination: True
71 | terminationDistance: 0.25
72 |
73 | ### Fut config
74 | numTrajSamples: 3
75 | trajSampleTimestepInv: 3
76 | enableTaskObs: True
77 |
78 | asset:
79 | assetRoot: "/"
80 | assetFileName: "mjcf/smpl_humanoid.xml"
81 |
82 | plane:
83 | staticFriction: 1.0
84 | dynamicFriction: 1.0
85 | restitution: 0.0
86 |
87 | sim:
88 | substeps: 2
89 | physx:
90 | num_threads: 4
91 | solver_type: 1 # 0: pgs, 1: tgs
92 | num_position_iterations: 4
93 | num_velocity_iterations: 0
94 | contact_offset: 0.02
95 | rest_offset: 0.0
96 | bounce_threshold_velocity: 0.2
97 | max_depenetration_velocity: 10.0
98 | default_buffer_size_multiplier: 10.0
99 |
100 | flex:
101 | num_inner_iterations: 10
102 | warm_start: 0.25
103 |
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/phc_shape_pnn_iccv.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "EgoQuest_IM"
3 | notes: "PNN, no Laternal connection "
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: True
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 | has_pnn: True
27 | fitting: True
28 | num_prim: 4
29 | training_prim: 0
30 | actors_to_load: 0
31 | has_lateral: False
32 | models: ['output/phc_shape_pnn_iccv/Humanoid.pth']
33 |
34 | cycle_motion: True
35 | hard_negative: False
36 |
37 |
38 | ######## Getup Configs ########
39 | zero_out_far: True
40 | zero_out_far_train: False
41 | cycle_motion: False
42 | getup_udpate_epoch: 78750
43 |
44 | masterfoot: False
45 | freeze_toe: false
46 |
47 | real_weight: True
48 | kp_scale: 1
49 | remove_toe_im: False # For imitation
50 | power_reward: True
51 |
52 | has_shape_obs: True
53 | has_shape_obs_disc: True
54 | has_shape_variation: True
55 | shape_resampling_interval: 500
56 |
57 | pdControl: True
58 | powerScale: 1.0
59 | controlFrequencyInv: 2 # 30 Hz
60 | stateInit: "Random"
61 | hybridInitProb: 0.5
62 | numAMPObsSteps: 10
63 |
64 | localRootObs: True
65 | rootHeightObs: True
66 | keyBodies: ["R_Ankle", "L_Ankle", "R_Wrist", "L_Wrist"]
67 | contactBodies: ["R_Ankle", "L_Ankle", "R_Toe", "L_Toe"]
68 | resetBodies: ['Pelvis', 'L_Hip', 'L_Knee', 'R_Hip', 'R_Knee', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
69 | terminationHeight: 0.15
70 | enableEarlyTermination: True
71 | terminationDistance: 0.25
72 |
73 | ### Fut config
74 | numTrajSamples: 3
75 | trajSampleTimestepInv: 3
76 | enableTaskObs: True
77 |
78 | asset:
79 | assetRoot: "/"
80 | assetFileName: "mjcf/smpl_humanoid.xml"
81 |
82 | plane:
83 | staticFriction: 1.0
84 | dynamicFriction: 1.0
85 | restitution: 0.0
86 |
87 | sim:
88 | substeps: 2
89 | physx:
90 | num_threads: 4
91 | solver_type: 1 # 0: pgs, 1: tgs
92 | num_position_iterations: 4
93 | num_velocity_iterations: 0
94 | contact_offset: 0.02
95 | rest_offset: 0.0
96 | bounce_threshold_velocity: 0.2
97 | max_depenetration_velocity: 10.0
98 | default_buffer_size_multiplier: 10.0
99 |
100 | flex:
101 | num_inner_iterations: 10
102 | warm_start: 0.25
103 |
--------------------------------------------------------------------------------
/phc/phc/utils/draw_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import skimage
3 | from skimage.draw import polygon
4 | from skimage.draw import bezier_curve
5 | from skimage.draw import circle_perimeter
6 | from skimage.draw import disk
7 | from scipy import ndimage
8 | import matplotlib
9 | import matplotlib.pyplot as plt
10 | import matplotlib as mpl
11 |
12 |
13 | def get_color_gradient(percent, color='Blues'):
14 | return mpl.colormaps[color](percent)[:3]
15 |
16 |
17 | def agt_color(aidx):
18 | return matplotlib.colors.to_rgb(plt.rcParams['axes.prop_cycle'].by_key()['color'][aidx % 10])
19 |
20 |
21 | def draw_disk(img_size=80, max_r=10, iterations=3):
22 | shape = (img_size, img_size)
23 | img = np.zeros(shape, dtype=np.uint8)
24 | x, y = np.random.uniform(max_r, img_size - max_r, size=(2))
25 | radius = int(np.random.uniform(max_r))
26 | rr, cc = disk((x, y), radius, shape=shape)
27 | np.clip(rr, 0, img_size - 1, out=rr)
28 | np.clip(cc, 0, img_size - 1, out=cc)
29 | img[rr, cc] = 1
30 | return img
31 |
32 |
33 | def draw_circle(img_size=80, max_r=10, iterations=3):
34 | img = np.zeros((img_size, img_size), dtype=np.uint8)
35 | r, c = np.random.uniform(max_r, img_size - max_r, size=(2,)).astype(int)
36 | radius = int(np.random.uniform(max_r))
37 | rr, cc = circle_perimeter(r, c, radius)
38 | np.clip(rr, 0, img_size - 1, out=rr)
39 | np.clip(cc, 0, img_size - 1, out=cc)
40 | img[rr, cc] = 1
41 | img = ndimage.binary_dilation(img, iterations=1).astype(int)
42 | return img
43 |
44 |
45 | def draw_curve(img_size=80, max_sides=10, iterations=3):
46 | img = np.zeros((img_size, img_size), dtype=np.uint8)
47 | r0, c0, r1, c1, r2, c2 = np.random.uniform(0, img_size, size=(6,)).astype(int)
48 | w = np.random.random()
49 | rr, cc = bezier_curve(r0, c0, r1, c1, r2, c2, w)
50 | np.clip(rr, 0, img_size - 1, out=rr)
51 | np.clip(cc, 0, img_size - 1, out=cc)
52 | img[rr, cc] = 1
53 | img = ndimage.binary_dilation(img, iterations=iterations).astype(int)
54 | return img
55 |
56 |
57 | def draw_polygon(img_size=80, max_sides=10):
58 | img = np.zeros((img_size, img_size), dtype=np.uint8)
59 | num_coord = int(np.random.uniform(3, max_sides))
60 | r = np.random.uniform(0, img_size, size=(num_coord,)).astype(int)
61 | c = np.random.uniform(0, img_size, size=(num_coord,)).astype(int)
62 | rr, cc = polygon(r, c)
63 | np.clip(rr, 0, img_size - 1, out=rr)
64 | np.clip(cc, 0, img_size - 1, out=cc)
65 | img[rr, cc] = 1
66 | return img
67 |
68 |
69 | def draw_ellipse(img_size=80, max_size=10):
70 | img = np.zeros((img_size, img_size), dtype=np.uint8)
71 | r, c, rradius, cradius = np.random.uniform(max_size, img_size - max_size), np.random.uniform(max_size, img_size - max_size),\
72 | np.random.uniform(1, max_size), np.random.uniform(1, max_size)
73 | rr, cc = skimage.draw.ellipse(r, c, rradius, cradius)
74 | np.clip(rr, 0, img_size - 1, out=rr)
75 | np.clip(cc, 0, img_size - 1, out=cc)
76 | img[rr, cc] = 1
77 | return img
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/phc_kp_mcp_iccv.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "EgoQuest_IM"
3 | notes: " obs v7, sorry for the confusing name!! This is from im_pnn_1"
4 | env:
5 | numEnvs: 1536
6 | envSpacing: 5
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: True
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 7
25 |
26 | has_pnn: True
27 | fitting: True
28 | num_prim: 4
29 | training_prim: 2
30 | actors_to_load: 4
31 | has_lateral: False
32 | models: ['output/phc_kp_pnn_iccv/Humanoid.pth']
33 |
34 | zero_out_far: True
35 | zero_out_far_train: False
36 | cycle_motion: False
37 |
38 | getup_udpate_epoch: 95000
39 | getup_schedule: True
40 | recoverySteps: 90
41 | zero_out_far_steps: 90
42 | recoveryEpisodeProb: 0.5
43 | fallInitProb: 0.3
44 |
45 | hard_negative: False
46 |
47 | masterfoot: False
48 | freeze_toe: false
49 |
50 | real_weight: True
51 | kp_scale: 1
52 | remove_toe_im: False # For imitation
53 | power_reward: True
54 | power_coefficient: 0.00005
55 |
56 | has_shape_obs: False
57 | has_shape_obs_disc: False
58 | has_shape_variation: False
59 | shape_resampling_interval: 500
60 |
61 | pdControl: True
62 | powerScale: 1.0
63 | controlFrequencyInv: 2 # 30 Hz
64 | stateInit: "Random"
65 | hybridInitProb: 0.5
66 | numAMPObsSteps: 10
67 |
68 | localRootObs: True
69 | rootHeightObs: True
70 | keyBodies: ["R_Ankle", "L_Ankle", "R_Wrist", "L_Wrist"]
71 | contactBodies: ["R_Ankle", "L_Ankle", "R_Toe", "L_Toe"]
72 | resetBodies: ['Pelvis', 'L_Hip', 'L_Knee', 'R_Hip', 'R_Knee', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
73 | terminationHeight: 0.15
74 | enableEarlyTermination: True
75 | terminationDistance: 0.25
76 |
77 | ### Fut config
78 | numTrajSamples: 3
79 | trajSampleTimestepInv: 3
80 | enableTaskObs: True
81 |
82 | asset:
83 | assetRoot: "/"
84 | assetFileName: "mjcf/smpl_humanoid.xml"
85 |
86 | plane:
87 | staticFriction: 1.0
88 | dynamicFriction: 1.0
89 | restitution: 0.0
90 |
91 | sim:
92 | substeps: 2
93 | physx:
94 | num_threads: 4
95 | solver_type: 1 # 0: pgs, 1: tgs
96 | num_position_iterations: 4
97 | num_velocity_iterations: 0
98 | contact_offset: 0.02
99 | rest_offset: 0.0
100 | bounce_threshold_velocity: 0.2
101 | max_depenetration_velocity: 10.0
102 | default_buffer_size_multiplier: 10.0
103 |
104 | flex:
105 | num_inner_iterations: 10
106 | warm_start: 0.25
107 |
--------------------------------------------------------------------------------
/phc/phc/data/cfg/env/phc_shape_mcp_iccv.yaml:
--------------------------------------------------------------------------------
1 | # if given, will override the device setting in gym.
2 | project_name: "EgoQuest_IM"
3 | notes: "Progressive MCP without softmax, zero out far"
4 | env:
5 | numEnvs: 1024
6 | envSpacing: 2
7 | episodeLength: 300
8 | isFlagrun: False
9 | enableDebugVis: False
10 |
11 | bias_offset: False
12 | has_self_collision: True
13 | has_mesh: False
14 | has_jt_limit: False
15 | has_dof_subset: True
16 | has_upright_start: True
17 | has_smpl_pd_offset: False
18 | remove_toe: False # For humanoid's geom toe
19 | real_weight_porpotion_capsules: True
20 |
21 | sym_loss_coef: 1
22 | big_ankle: True
23 | fut_tracks: False
24 | obs_v: 6
25 |
26 | ######## PNN Configs ########
27 | has_pnn: True
28 | fitting: True
29 | num_prim: 4
30 | training_prim: 0
31 | actors_to_load: 4
32 | has_lateral: False
33 | models: ['output/phc_shape_pnn_iccv/Humanoid.pth']
34 |
35 | ######## Getup Configs ########
36 | zero_out_far: True
37 | zero_out_far_train: False
38 | cycle_motion: False
39 | getup_udpate_epoch: 78750
40 |
41 | getup_schedule: True
42 | recoverySteps: 90
43 | zero_out_far_steps: 90
44 | recoveryEpisodeProb: 0.5
45 | fallInitProb: 0.3
46 |
47 | hard_negative: False
48 |
49 | masterfoot: False
50 | freeze_toe: false
51 |
52 | real_weight: True
53 | kp_scale: 1
54 | remove_toe_im: False # For imitation
55 | power_reward: True
56 | power_coefficient: 0.00005
57 |
58 | has_shape_obs: True
59 | has_shape_obs_disc: True
60 | has_shape_variation: True
61 | shape_resampling_interval: 500
62 |
63 | pdControl: True
64 | powerScale: 1.0
65 | controlFrequencyInv: 2 # 30 Hz
66 | stateInit: "Random"
67 | hybridInitProb: 0.5
68 | numAMPObsSteps: 10
69 |
70 | localRootObs: True
71 | rootHeightObs: True
72 | keyBodies: ["R_Ankle", "L_Ankle", "R_Wrist", "L_Wrist"]
73 | contactBodies: ["R_Ankle", "L_Ankle", "R_Toe", "L_Toe"]
74 | resetBodies: ['Pelvis', 'L_Hip', 'L_Knee', 'R_Hip', 'R_Knee', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
75 | terminationHeight: 0.15
76 | enableEarlyTermination: True
77 | terminationDistance: 0.25
78 |
79 | ### Fut config
80 | numTrajSamples: 3
81 | trajSampleTimestepInv: 3
82 | enableTaskObs: True
83 |
84 | asset:
85 | assetRoot: "/"
86 | assetFileName: "mjcf/smpl_humanoid.xml"
87 |
88 | plane:
89 | staticFriction: 1.0
90 | dynamicFriction: 1.0
91 | restitution: 0.0
92 |
93 | sim:
94 | substeps: 2
95 | physx:
96 | num_threads: 4
97 | solver_type: 1 # 0: pgs, 1: tgs
98 | num_position_iterations: 4
99 | num_velocity_iterations: 0
100 | contact_offset: 0.02
101 | rest_offset: 0.0
102 | bounce_threshold_velocity: 0.2
103 | max_depenetration_velocity: 10.0
104 | default_buffer_size_multiplier: 10.0
105 |
106 | flex:
107 | num_inner_iterations: 10
108 | warm_start: 0.25
109 |
--------------------------------------------------------------------------------
/phc/phc/utils/parse_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2023, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
29 | from phc.env.tasks.humanoid import Humanoid
30 | from phc.env.tasks.humanoid_amp import HumanoidAMP
31 | from phc.env.tasks.humanoid_amp_getup import HumanoidAMPGetup
32 | from phc.env.tasks.humanoid_im import HumanoidIm
33 | from phc.env.tasks.humanoid_speed import HumanoidSpeed
34 | from phc.env.tasks.humanoid_im_getup import HumanoidImGetup
35 | from phc.env.tasks.humanoid_im_mcp import HumanoidImMCP
36 | from phc.env.tasks.humanoid_im_mcp_getup import HumanoidImMCPGetup
37 | from phc.env.tasks.vec_task_wrappers import VecTaskPythonWrapper
38 | from phc.env.tasks.humanoid_im_demo import HumanoidImDemo
39 | from phc.env.tasks.humanoid_im_mcp_demo import HumanoidImMCPDemo
40 |
41 | from isaacgym import rlgpu
42 |
43 | import json
44 | import numpy as np
45 |
46 |
47 | def warn_task_name():
48 | raise Exception("Unrecognized task!\nTask should be one of: [BallBalance, Cartpole, CartpoleYUp, Ant, Humanoid, Anymal, FrankaCabinet, Quadcopter, ShadowHand, ShadowHandLSTM, ShadowHandFFOpenAI, ShadowHandFFOpenAITest, ShadowHandOpenAI, ShadowHandOpenAITest, Ingenuity]")
49 |
50 |
51 | def parse_task(args, cfg, cfg_train, sim_params):
52 |
53 | # create native task and pass custom config
54 | device_id = args.device_id
55 | rl_device = args.rl_device
56 |
57 | cfg["seed"] = cfg_train.get("seed", -1)
58 | cfg_task = cfg["env"]
59 | cfg_task["seed"] = cfg["seed"]
60 |
61 | task = eval(args.task)(cfg=cfg, sim_params=sim_params, physics_engine=args.physics_engine, device_type=args.device, device_id=device_id, headless=args.headless)
62 | env = VecTaskPythonWrapper(task, rl_device, cfg_train.get("clip_observations", np.inf))
63 |
64 | return task, env
65 |
--------------------------------------------------------------------------------
/phc/phc/learning/amp_network_mcp_builder.py:
--------------------------------------------------------------------------------
1 |
2 | from rl_games.algos_torch import torch_ext
3 | from rl_games.algos_torch import layers
4 | from learning.amp_network_builder import AMPBuilder
5 | import torch
6 | import torch.nn as nn
7 | import numpy as np
8 | import copy
9 |
10 | DISC_LOGIT_INIT_SCALE = 1.0
11 |
12 |
13 | class AMPMCPBuilder(AMPBuilder):
14 |
15 | def __init__(self, **kwargs):
16 | super().__init__(**kwargs)
17 | return
18 |
19 | def build(self, name, **kwargs):
20 | net = AMPMCPBuilder.Network(self.params, **kwargs)
21 | return net
22 |
23 | class Network(AMPBuilder.Network):
24 |
25 | def __init__(self, params, **kwargs):
26 | self.self_obs_size = kwargs['self_obs_size']
27 | self.task_obs_size = kwargs['task_obs_size']
28 | self.task_obs_size_detail = kwargs['task_obs_size_detail']
29 | self.fut_tracks = self.task_obs_size_detail['fut_tracks']
30 | self.obs_v = self.task_obs_size_detail['obs_v']
31 | self.num_traj_samples = self.task_obs_size_detail['num_traj_samples']
32 | self.track_bodies = self.task_obs_size_detail['track_bodies']
33 | self.has_softmax = params.get("has_softmax", True)
34 |
35 | kwargs['input_shape'] = (self.self_obs_size + self.task_obs_size,) #
36 |
37 | super().__init__(params, **kwargs)
38 |
39 | self.num_primitive = self.task_obs_size_detail.get("num_prim", 4)
40 |
41 | composer_mlp_args = {
42 | 'input_size': self._calc_input_size((self.self_obs_size + self.task_obs_size,), self.actor_cnn),
43 | 'units': self.units + [self.num_primitive],
44 | 'activation': self.activation,
45 | 'norm_func_name': self.normalization,
46 | 'dense_func': torch.nn.Linear,
47 | 'd2rl': self.is_d2rl,
48 | 'norm_only_first_layer': self.norm_only_first_layer
49 | }
50 |
51 | self.composer = self._build_mlp(**composer_mlp_args)
52 |
53 | if self.has_softmax:
54 | print("!!!Has softmax!!!")
55 | self.composer.append(nn.Softmax(dim=1))
56 |
57 | self.running_mean = kwargs['mean_std'].running_mean
58 | self.running_var = kwargs['mean_std'].running_var
59 |
60 | def load(self, params):
61 | super().load(params)
62 | return
63 |
64 | def eval_actor(self, obs_dict):
65 | obs = obs_dict['obs']
66 | a_out = self.actor_cnn(obs) # This is empty
67 | a_out = a_out.contiguous().view(a_out.size(0), -1)
68 |
69 | a_out = self.composer(a_out)
70 |
71 | if self.is_discrete:
72 | logits = self.logits(a_out)
73 | return logits
74 |
75 | if self.is_multi_discrete:
76 | logits = [logit(a_out) for logit in self.logits]
77 | return logits
78 |
79 | if self.is_continuous:
80 | # mu = self.mu_act(self.mu(a_out))
81 | mu = a_out
82 | if self.space_config['fixed_sigma']:
83 | sigma = mu * 0.0 + self.sigma_act(self.sigma)
84 | else:
85 | sigma = self.sigma_act(self.sigma(a_out))
86 | return mu, sigma
87 | return
88 |
--------------------------------------------------------------------------------
/phc/phc/learning/amp_network_pnn_builder.py:
--------------------------------------------------------------------------------
1 |
2 | from rl_games.algos_torch import torch_ext
3 | from rl_games.algos_torch import layers
4 | from learning.amp_network_builder import AMPBuilder
5 | import torch
6 | import torch.nn as nn
7 | import numpy as np
8 | import copy
9 | from phc.learning.pnn import PNN
10 | from rl_games.algos_torch import torch_ext
11 |
12 | DISC_LOGIT_INIT_SCALE = 1.0
13 |
14 |
15 | class AMPPNNBuilder(AMPBuilder):
16 |
17 | def __init__(self, **kwargs):
18 | super().__init__(**kwargs)
19 | return
20 |
21 | def build(self, name, **kwargs):
22 | net = AMPPNNBuilder.Network(self.params, **kwargs)
23 | return net
24 |
25 | class Network(AMPBuilder.Network):
26 |
27 | def __init__(self, params, **kwargs):
28 | self.self_obs_size = kwargs['self_obs_size']
29 | self.task_obs_size = kwargs['task_obs_size']
30 | self.task_obs_size_detail = kwargs['task_obs_size_detail']
31 | self.fut_tracks = self.task_obs_size_detail['fut_tracks']
32 | self.obs_v = self.task_obs_size_detail['obs_v']
33 | self.num_traj_samples = self.task_obs_size_detail['num_traj_samples']
34 | self.track_bodies = self.task_obs_size_detail['track_bodies']
35 | self.num_prim = self.task_obs_size_detail['num_prim']
36 | self.training_prim = self.task_obs_size_detail['training_prim']
37 | self.model_base = self.task_obs_size_detail['models_path'][0]
38 | self.actors_to_load = self.task_obs_size_detail['actors_to_load']
39 | self.has_lateral = self.task_obs_size_detail['has_lateral']
40 |
41 | kwargs['input_shape'] = (self.self_obs_size + self.task_obs_size,) #
42 |
43 | super().__init__(params, **kwargs)
44 | actor_mlp_args = {
45 | 'input_size': self._calc_input_size((self.self_obs_size + self.task_obs_size,), self.actor_cnn),
46 | 'units': self.units,
47 | 'activation': self.activation,
48 | 'norm_func_name': self.normalization,
49 | 'dense_func': torch.nn.Linear,
50 | }
51 |
52 | del self.actor_mlp
53 | self.discrete = params.get("discrete", False)
54 |
55 | self.pnn = PNN(actor_mlp_args, output_size=kwargs['actions_num'], numCols=self.num_prim, has_lateral=self.has_lateral)
56 | # self.pnn.load_base_net(self.model_base, self.actors_to_load)
57 | self.pnn.freeze_pnn(self.training_prim)
58 |
59 | self.running_mean = kwargs['mean_std'].running_mean
60 | self.running_var = kwargs['mean_std'].running_var
61 |
62 | def eval_actor(self, obs_dict):
63 | obs = obs_dict['obs']
64 |
65 | a_out = self.actor_cnn(obs) # This is empty
66 | a_out = a_out.contiguous().view(a_out.size(0), -1)
67 | a_out, a_outs = self.pnn(a_out, idx=self.training_prim)
68 |
69 | # a_out = a_outs[0]
70 | # print("debugging") # Dubgging!!!
71 |
72 | if self.is_discrete:
73 | logits = self.logits(a_out)
74 | return logits
75 |
76 | if self.is_multi_discrete:
77 | logits = [logit(a_out) for logit in self.logits]
78 | return logits
79 |
80 | if self.is_continuous:
81 | # mu = self.mu_act(self.mu(a_out))
82 | mu = a_out
83 | if self.space_config['fixed_sigma']:
84 | sigma = mu * 0.0 + self.sigma_act(self.sigma)
85 | else:
86 | sigma = self.sigma_act(self.sigma(a_out))
87 |
88 | return mu, sigma
89 | return
90 |
--------------------------------------------------------------------------------
/phc/phc/utils/logger.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------------
2 | # @brief:
3 | # The logger here will be called all across the project. It is inspired
4 | # by Yuxin Wu (ppwwyyxx@gmail.com)
5 | #
6 | # @author:
7 | # Tingwu Wang, 2017, Feb, 20th
8 | # -----------------------------------------------------------------------------
9 |
10 | import logging
11 | import sys
12 | import os
13 | import datetime
14 | from termcolor import colored
15 |
16 | __all__ = ['set_file_handler'] # the actual worker is the '_logger'
17 |
18 |
19 | class _MyFormatter(logging.Formatter):
20 | '''
21 | @brief:
22 | a class to make sure the format could be used
23 | '''
24 |
25 | def format(self, record):
26 | date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')
27 | msg = '%(message)s'
28 |
29 | if record.levelno == logging.WARNING:
30 | fmt = date + ' ' + \
31 | colored('WRN', 'red', attrs=[]) + ' ' + msg
32 | elif record.levelno == logging.ERROR or \
33 | record.levelno == logging.CRITICAL:
34 | fmt = date + ' ' + \
35 | colored('ERR', 'red', attrs=['underline']) + ' ' + msg
36 | else:
37 | fmt = date + ' ' + msg
38 |
39 | if hasattr(self, '_style'):
40 | # Python3 compatibilty
41 | self._style._fmt = fmt
42 | self._fmt = fmt
43 |
44 | return super(self.__class__, self).format(record)
45 |
46 |
47 | _logger = logging.getLogger('joint_embedding')
48 | _logger.propagate = False
49 | _logger.setLevel(logging.INFO)
50 |
51 | # set the console output handler
52 | con_handler = logging.StreamHandler(sys.stdout)
53 | con_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
54 | _logger.addHandler(con_handler)
55 |
56 |
57 | class GLOBAL_PATH(object):
58 |
59 | def __init__(self, path=None):
60 | if path is None:
61 | path = os.getcwd()
62 | self.path = path
63 |
64 | def _set_path(self, path):
65 | self.path = path
66 |
67 | def _get_path(self):
68 | return self.path
69 |
70 |
71 | PATH = GLOBAL_PATH()
72 |
73 |
74 | def set_file_handler(path=None, prefix='', time_str=''):
75 | # set the file output handler
76 | if time_str == '':
77 | file_name = prefix + \
78 | datetime.datetime.now().strftime("%A_%d_%B_%Y_%I:%M%p") + '.log'
79 | else:
80 | file_name = prefix + time_str + '.log'
81 |
82 | if path is None:
83 | mod = sys.modules['__main__']
84 | path = os.path.join(os.path.abspath(mod.__file__), '..', '..', 'log')
85 | else:
86 | path = os.path.join(path, 'log')
87 | path = os.path.abspath(path)
88 |
89 | path = os.path.join(path, file_name)
90 | if not os.path.exists(path):
91 | os.makedirs(path)
92 |
93 | PATH._set_path(path)
94 | path = os.path.join(path, file_name)
95 | from tensorboard_logger import configure
96 | configure(path)
97 |
98 | file_handler = logging.FileHandler(
99 | filename=os.path.join(path, 'logger'), encoding='utf-8', mode='w')
100 | file_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
101 | _logger.addHandler(file_handler)
102 |
103 | _logger.info('Log file set to {}'.format(path))
104 | return path
105 |
106 |
107 | def _get_path():
108 | return PATH._get_path()
109 |
110 |
111 | _LOGGING_METHOD = ['info', 'warning', 'error', 'critical',
112 | 'warn', 'exception', 'debug']
113 |
114 | # export logger functions
115 | for func in _LOGGING_METHOD:
116 | locals()[func] = getattr(_logger, func)
117 |
--------------------------------------------------------------------------------
/phc/phc/env/tasks/vec_task_wrappers.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2023, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
29 | from gym import spaces
30 | import numpy as np
31 | import torch
32 | from phc.env.tasks.vec_task import VecTaskCPU, VecTaskGPU, VecTaskPython
33 |
34 | class VecTaskCPUWrapper(VecTaskCPU):
35 | def __init__(self, task, rl_device, sync_frame_time=False, clip_observations=5.0):
36 | super().__init__(task, rl_device, sync_frame_time, clip_observations)
37 | return
38 |
39 | class VecTaskGPUWrapper(VecTaskGPU):
40 | def __init__(self, task, rl_device, clip_observations=5.0):
41 | super().__init__(task, rl_device, clip_observations)
42 | return
43 |
44 |
45 | class VecTaskPythonWrapper(VecTaskPython):
46 | def __init__(self, task, rl_device, clip_observations=5.0):
47 | super().__init__(task, rl_device, clip_observations)
48 |
49 | self._amp_obs_space = spaces.Box(np.ones(task.get_num_amp_obs()) * -np.Inf, np.ones(task.get_num_amp_obs()) * np.Inf)
50 |
51 | self._enc_amp_obs_space = spaces.Box(np.ones(task.get_num_enc_amp_obs()) * -np.Inf, np.ones(task.get_num_enc_amp_obs()) * np.Inf)
52 | return
53 |
54 | def reset(self, env_ids=None):
55 | self.task.reset(env_ids)
56 | return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
57 |
58 | @property
59 | def amp_observation_space(self):
60 | return self._amp_obs_space
61 |
62 | @property
63 | def enc_amp_observation_space(self):
64 | return self._enc_amp_obs_space
65 |
66 | def fetch_amp_obs_demo(self, num_samples):
67 | return self.task.fetch_amp_obs_demo(num_samples)
68 |
69 | @property
70 | def enc_amp_observation_space(self):
71 | return self._enc_amp_obs_space
72 |
73 | ################ Calm ################
74 | def fetch_amp_obs_demo_pair(self, num_samples):
75 | return self.task.fetch_amp_obs_demo_pair(num_samples)
76 |
77 | def fetch_amp_obs_demo_enc_pair(self, num_samples):
78 | return self.task.fetch_amp_obs_demo_enc_pair(num_samples)
79 |
80 | def fetch_amp_obs_demo_per_id(self, num_samples, motion_ids):
81 | return self.task.fetch_amp_obs_demo_per_id(num_samples, motion_ids)
82 |
--------------------------------------------------------------------------------
/phc/phc/env/tasks/humanoid_im_mcp.py:
--------------------------------------------------------------------------------
1 | import time
2 | import torch
3 | import phc.env.tasks.humanoid_im as humanoid_im
4 |
5 | from isaacgym.torch_utils import *
6 | from phc.utils.flags import flags
7 | from rl_games.algos_torch import torch_ext
8 | import torch.nn as nn
9 | from phc.learning.pnn import PNN
10 | from collections import deque
11 | from phc.learning.network_loader import load_mcp_mlp, load_pnn
12 |
13 | class HumanoidImMCP(humanoid_im.HumanoidIm):
14 |
15 | def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless):
16 | self.num_prim = cfg["env"].get("num_prim", 3)
17 | self.discrete_mcp = cfg["env"].get("discrete_moe", False)
18 | self.has_pnn = cfg["env"].get("has_pnn", False)
19 | self.has_lateral = cfg["env"].get("has_lateral", False)
20 | self.z_activation = cfg["env"].get("z_activation", "relu")
21 |
22 | super().__init__(cfg=cfg, sim_params=sim_params, physics_engine=physics_engine, device_type=device_type, device_id=device_id, headless=headless)
23 |
24 | if self.has_pnn:
25 | assert (len(self.models_path) == 1)
26 | pnn_ck = torch_ext.load_checkpoint(self.models_path[0])
27 | self.pnn = load_pnn(pnn_ck, num_prim = self.num_prim, has_lateral = self.has_lateral, activation = self.z_activation, device = self.device)
28 | self.running_mean, self.running_var = pnn_ck['running_mean_std']['running_mean'], pnn_ck['running_mean_std']['running_var']
29 |
30 | self.fps = deque(maxlen=90)
31 |
32 | return
33 |
34 | def _setup_character_props(self, key_bodies):
35 | super()._setup_character_props(key_bodies)
36 | self._num_actions = self.num_prim
37 | return
38 |
39 | def get_task_obs_size_detail(self):
40 | task_obs_detail = super().get_task_obs_size_detail()
41 | task_obs_detail['num_prim'] = self.num_prim
42 | return task_obs_detail
43 |
44 | def step(self, weights):
45 |
46 | # if self.dr_randomizations.get('actions', None):
47 | # actions = self.dr_randomizations['actions']['noise_lambda'](actions)
48 | # if flags.server_mode:
49 | # t_s = time.time()
50 |
51 | with torch.no_grad():
52 | # Apply trained Model.
53 | curr_obs = ((self.obs_buf - self.running_mean.float()) / torch.sqrt(self.running_var.float() + 1e-05))
54 |
55 | curr_obs = torch.clamp(curr_obs, min=-5.0, max=5.0)
56 | if self.discrete_mcp:
57 | max_idx = torch.argmax(weights, dim=1)
58 | weights = torch.nn.functional.one_hot(max_idx, num_classes=self.num_prim).float()
59 |
60 | if self.has_pnn:
61 | _, actions = self.pnn(curr_obs)
62 |
63 | x_all = torch.stack(actions, dim=1)
64 | else:
65 | x_all = torch.stack([net(curr_obs) for net in self.actors], dim=1)
66 | # print(weights)
67 | actions = torch.sum(weights[:, :, None] * x_all, dim=1)
68 |
69 | # actions = x_all[:, 3] # Debugging
70 | # apply actions
71 | self.pre_physics_step(actions)
72 |
73 | # step physics and render each frame
74 | self._physics_step()
75 |
76 | # to fix!
77 | if self.device == 'cpu':
78 | self.gym.fetch_results(self.sim, True)
79 |
80 | # compute observations, rewards, resets, ...
81 | self.post_physics_step()
82 | # if flags.server_mode:
83 | # dt = time.time() - t_s
84 | # print(f'\r {1/dt:.2f} fps', end='')
85 |
86 | # dt = time.time() - t_s
87 | # self.fps.append(1/dt)
88 | # print(f'\r {np.mean(self.fps):.2f} fps', end='')
89 |
90 |
91 | if self.dr_randomizations.get('observations', None):
92 | self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)
93 |
--------------------------------------------------------------------------------
/phc/phc/learning/unrealego/base_model.py:
--------------------------------------------------------------------------------
1 | from operator import contains
2 | import os
3 | import torch
4 | import torch.nn as nn
5 | from collections import OrderedDict
6 | from utils import util
7 |
8 |
9 | class BaseModel(nn.Module):
10 | def name(self):
11 | return 'BaseModel'
12 |
13 | def initialize(self, opt):
14 | self.opt = opt
15 | self.gpu_ids = opt.gpu_ids
16 | self.isTrain = opt.isTrain
17 | self.save_dir = os.path.join(opt.log_dir, opt.experiment_name)
18 | self.loss_names = []
19 | self.model_names = []
20 | self.visual_names = []
21 | self.visual_pose_names = []
22 | self.image_paths = []
23 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
24 |
25 | def set_input(self, input):
26 | self.input = input
27 |
28 | # update learning rate
29 | def update_learning_rate(self):
30 | old_lr = self.optimizers[0].param_groups[0]['lr']
31 | for scheduler in self.schedulers:
32 | scheduler.step()
33 | lr = self.optimizers[0].param_groups[0]['lr']
34 | print('learning rate %.7f -> %.7f' % (old_lr, lr))
35 |
36 |
37 | # return training loss
38 | def get_current_errors(self):
39 | errors_ret = OrderedDict()
40 | for name in self.loss_names:
41 | if isinstance(name, str):
42 | errors_ret[name] = getattr(self, 'loss_' + name).item()
43 | return errors_ret
44 |
45 | # return visualization images
46 | def get_current_visuals(self):
47 | visual_ret = OrderedDict()
48 | for name in self.visual_names:
49 | if isinstance(name, str):
50 | value = getattr(self, name)
51 |
52 | if "heatmap" in name:
53 | is_heatmap = True
54 | else:
55 | is_heatmap = False
56 |
57 | visual_ret[name] = util.tensor2im(value.data, is_heatmap=is_heatmap)
58 |
59 | # if isinstance(value, list):
60 | # visual_ret[name] = util.tensor2im(value[-1].data, is_heatmap)
61 | # else:
62 | # visual_ret[name] = util.tensor2im(value.data, is_heatmap)
63 |
64 | return visual_ret
65 |
66 | # save models
67 | def save_networks(self, which_epoch):
68 | for name in self.model_names:
69 | if isinstance(name, str):
70 | save_filename = '%s_net_%s.pth' % (which_epoch, name)
71 | save_path = os.path.join(self.save_dir, save_filename)
72 | net = getattr(self, 'net_' + name)
73 | torch.save(net.cpu().state_dict(), save_path)
74 | if len(self.gpu_ids) > 0 and torch.cuda.is_available():
75 | net.cuda()
76 |
77 | # load models
78 | def load_networks(self, which_epoch=None, net=None, path_to_trained_weights=None):
79 | if which_epoch is not None:
80 | for name in self.model_names:
81 | print(name)
82 | if isinstance(name, str):
83 | save_filename = '%s_net_%s.pth' % (which_epoch, name)
84 | save_path = os.path.join(self.save_dir, save_filename)
85 | net = getattr(self, 'net_'+name)
86 | state_dict = torch.load(save_path)
87 | net.load_state_dict(state_dict)
88 | # net.load_state_dict(self.fix_model_state_dict(state_dict))
89 | if not self.isTrain:
90 | net.eval()
91 | else:
92 | state_dict = torch.load(path_to_trained_weights)
93 | if self.opt.distributed:
94 | net.load_state_dict(self.fix_model_state_dict(state_dict))
95 | else:
96 | net.load_state_dict(state_dict)
97 | print('Loaded pre_trained {}'.format(os.path.basename(path_to_trained_weights)))
98 |
99 | def fix_model_state_dict(self, state_dict):
100 | from collections import OrderedDict
101 | new_state_dict = OrderedDict()
102 | for k, v in state_dict.items():
103 | name = k
104 | if name.startswith('module.'):
105 | name = name[7:] # remove 'module.' of dataparallel
106 | new_state_dict[name] = v
107 | return new_state_dict
--------------------------------------------------------------------------------
/phc/phc/env/tasks/humanoid_amp_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2023, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
29 | import torch
30 |
31 | import phc.env.tasks.humanoid_amp as humanoid_amp
32 | from phc.utils.flags import flags
33 | class HumanoidAMPTask(humanoid_amp.HumanoidAMP):
34 | def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless):
35 | self._enable_task_obs = cfg["env"]["enableTaskObs"]
36 |
37 | super().__init__(cfg=cfg,
38 | sim_params=sim_params,
39 | physics_engine=physics_engine,
40 | device_type=device_type,
41 | device_id=device_id,
42 | headless=headless)
43 | self.has_task = True
44 | return
45 |
46 |
47 | def get_obs_size(self):
48 | obs_size = super().get_obs_size()
49 | if (self._enable_task_obs):
50 | task_obs_size = self.get_task_obs_size()
51 | obs_size += task_obs_size
52 | return obs_size
53 |
54 | def get_task_obs_size(self):
55 | return 0
56 |
57 | def pre_physics_step(self, actions):
58 | super().pre_physics_step(actions)
59 | self._update_task()
60 |
61 | return
62 |
63 | def render(self, sync_frame_time=False):
64 | super().render(sync_frame_time)
65 |
66 | if self.viewer or flags.server_mode:
67 | self._draw_task()
68 | return
69 |
70 | def _update_task(self):
71 | return
72 |
73 | def _reset_envs(self, env_ids):
74 | super()._reset_envs(env_ids)
75 | self._reset_task(env_ids)
76 | return
77 |
78 | def _reset_task(self, env_ids):
79 | return
80 |
81 | def _compute_observations(self, env_ids=None):
82 | # env_ids is used for resetting
83 | if env_ids is None:
84 | env_ids = torch.arange(self.num_envs).to(self.device)
85 | humanoid_obs = self._compute_humanoid_obs(env_ids)
86 |
87 | if (self._enable_task_obs):
88 | task_obs = self._compute_task_obs(env_ids)
89 | obs = torch.cat([humanoid_obs, task_obs], dim=-1)
90 | else:
91 | obs = humanoid_obs
92 |
93 |
94 | if self.obs_v == 2:
95 | # Double sub will return a copy.
96 | B, N = obs.shape
97 | sums = self.obs_buf[env_ids, 0:self.past_track_steps].abs().sum(dim=1)
98 | zeros = sums == 0
99 | nonzero = ~zeros
100 | obs_slice = self.obs_buf[env_ids]
101 | obs_slice[zeros] = torch.tile(obs[zeros], (1, self.past_track_steps))
102 | obs_slice[nonzero] = torch.cat([obs_slice[nonzero, N:], obs[nonzero]], dim=-1)
103 | self.obs_buf[env_ids] = obs_slice
104 | else:
105 | self.obs_buf[env_ids] = obs
106 |
107 | return
108 |
109 | def _compute_task_obs(self, env_ids=None):
110 | return NotImplemented
111 |
112 | def _compute_reward(self, actions):
113 | return NotImplemented
114 |
115 | def _draw_task(self):
116 | return
117 |
--------------------------------------------------------------------------------
/phc/phc/learning/amp_datasets.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from rl_games.common import datasets
3 |
4 | class AMPDataset(datasets.PPODataset):
5 | def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
6 | super().__init__(batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len)
7 | self._idx_buf = torch.randperm(self.batch_size)
8 |
9 |
10 |
11 | return
12 |
13 | def update_mu_sigma(self, mu, sigma):
14 | raise NotImplementedError()
15 | return
16 |
17 | # def _get_item_rnn(self, idx):
18 | # gstart = idx * self.num_games_batch
19 | # gend = (idx + 1) * self.num_games_batch
20 | # start = gstart * self.seq_len
21 | # end = gend * self.seq_len
22 | # self.last_range = (start, end)
23 | # input_dict = {}
24 | # for k,v in self.values_dict.items():
25 | # if k not in self.special_names:
26 | # if v is dict:
27 | # v_dict = { kd:vd[start:end] for kd, vd in v.items() }
28 | # input_dict[k] = v_dict
29 | # else:
30 | # input_dict[k] = v[start:end]
31 |
32 | # rnn_states = self.values_dict['rnn_states']
33 | # input_dict['rnn_states'] = [s[:,gstart:gend,:] for s in rnn_states]
34 | # return input_dict
35 |
36 | def update_values_dict(self, values_dict, rnn_format = False, horizon_length = 1, num_envs = 1):
37 | self.values_dict = values_dict
38 | self.horizon_length = horizon_length
39 | self.num_envs = num_envs
40 |
41 | if rnn_format and self.is_rnn:
42 | for k,v in self.values_dict.items():
43 | if k not in self.special_names and v is not None:
44 | self.values_dict[k] = self.values_dict[k].view(self.num_envs, self.horizon_length, -1).squeeze() # Actions are already swapped to the correct format.
45 | if not self.values_dict['rnn_states'] is None:
46 | self.values_dict['rnn_states'] = [s.reshape(self.num_envs, self.horizon_length, -1) for s in self.values_dict['rnn_states']] # rnn_states are not swapped in AMP, so do not swap it here.
47 | self._idx_buf = torch.randperm(self.num_envs) # Update to only shuffle the envs.
48 |
49 | # def _get_item_rnn(self, idx):
50 | # data = super()._get_item_rnn(idx)
51 | # import ipdb; ipdb.set_trace()
52 | # return data
53 |
54 | def _get_item_rnn(self, idx):
55 | # ZL: I am doubling the get_item_rnn function to in a way also get the sequential data. Pretty hacky.
56 | # BPTT, input dict is [batch, seqlen, features]. This function return the sequences that are from the same episide and enviornment in sequentila mannar. Not used at the moment since seq_len is set to 1 for RNN right now.
57 | step_size = int(self.minibatch_size/self.horizon_length)
58 |
59 | start = idx * step_size
60 | end = (idx + 1) * step_size
61 | sample_idx = self._idx_buf[start:end]
62 |
63 | input_dict = {}
64 |
65 | for k,v in self.values_dict.items():
66 | if k not in self.special_names and v is not None:
67 | input_dict[k] = v[sample_idx, :].view(step_size * self.horizon_length, -1).squeeze() # flatten to batch size
68 |
69 | input_dict['old_values'] = input_dict['old_values'][:, None] # ZL Hack: following compute assumes that the old_values is [batch, 1], so has to change this back. Otherwise, the loss will be wrong.
70 | input_dict['returns'] = input_dict['returns'][:, None] # ZL Hack: following compute assumes that the old_values is [batch, 1], so has to change this back. Otherwise, the loss will be wrong.
71 |
72 | if not self.values_dict['rnn_states'] is None:
73 | input_dict['rnn_states'] = [s[sample_idx, :].view(step_size * self.horizon_length, -1) for s in self.values_dict["rnn_states"]]
74 |
75 | if (end >= self.batch_size):
76 | self._shuffle_idx_buf()
77 |
78 |
79 | return input_dict
80 |
81 | def _get_item(self, idx):
82 | start = idx * self.minibatch_size
83 | end = (idx + 1) * self.minibatch_size
84 | sample_idx = self._idx_buf[start:end]
85 |
86 | input_dict = {}
87 | for k,v in self.values_dict.items():
88 | if k not in self.special_names and v is not None:
89 | input_dict[k] = v[sample_idx]
90 |
91 | if (end >= self.batch_size):
92 | self._shuffle_idx_buf()
93 |
94 | return input_dict
95 |
96 | def _shuffle_idx_buf(self):
97 | if self.is_rnn:
98 | self._idx_buf = torch.randperm(self.num_envs)
99 | else:
100 | self._idx_buf[:] = torch.randperm(self.batch_size)
101 | return
--------------------------------------------------------------------------------
/phc/phc/utils/plot_script.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 | import matplotlib
4 | import matplotlib.pyplot as plt
5 | from mpl_toolkits.mplot3d import Axes3D
6 | from matplotlib.animation import FuncAnimation, FFMpegFileWriter
7 | from mpl_toolkits.mplot3d.art3d import Poly3DCollection
8 | import mpl_toolkits.mplot3d.axes3d as p3
9 | # import cv2
10 | from textwrap import wrap
11 |
12 |
13 | def list_cut_average(ll, intervals):
14 | if intervals == 1:
15 | return ll
16 |
17 | bins = math.ceil(len(ll) * 1.0 / intervals)
18 | ll_new = []
19 | for i in range(bins):
20 | l_low = intervals * i
21 | l_high = l_low + intervals
22 | l_high = l_high if l_high < len(ll) else len(ll)
23 | ll_new.append(np.mean(ll[l_low:l_high]))
24 | return ll_new
25 |
26 |
27 | def plot_3d_motion(save_path, kinematic_tree, joints, title, figsize=(3, 3), fps=120, radius=3,
28 | vis_mode='default', gt_joints=None):
29 | matplotlib.use('Agg')
30 |
31 | title = '\n'.join(wrap(title, 20))
32 |
33 | def init():
34 | ax.set_xlim3d([-radius / 2, radius / 2])
35 | ax.set_ylim3d([0, radius])
36 | ax.set_zlim3d([-radius / 3., radius * 2 / 3.])
37 | # print(title)
38 | fig.suptitle(title, fontsize=10)
39 | ax.grid(b=False)
40 |
41 | def plot_xzPlane(minx, maxx, miny, minz, maxz):
42 | ## Plot a plane XZ
43 | verts = [
44 | [minx, miny, minz],
45 | [minx, miny, maxz],
46 | [maxx, miny, maxz],
47 | [maxx, miny, minz]
48 | ]
49 | xz_plane = Poly3DCollection([verts])
50 | xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5))
51 | ax.add_collection3d(xz_plane)
52 |
53 | # return ax
54 |
55 | # (seq_len, joints_num, 3)
56 | data = joints.copy().reshape(len(joints), -1, 3)
57 | if not gt_joints is None:
58 | data_gt = gt_joints.copy().reshape(len(gt_joints), -1, 3)
59 |
60 | fig = plt.figure(figsize=figsize)
61 | plt.tight_layout()
62 | ax = p3.Axes3D(fig)
63 | init()
64 | MINS = data.min(axis=0).min(axis=0)
65 | MAXS = data.max(axis=0).max(axis=0)
66 | colors_blue = ["#4D84AA", "#5B9965", "#61CEB9", "#34C1E2", "#80B79A"] # GT color
67 | colors_orange = ["#DD5A37", "#D69E00", "#B75A39", "#FF6D00", "#DDB50E"] # Generation color
68 | colors = colors_orange
69 | if vis_mode == 'upper_body': # lower body taken fixed to input motion
70 | colors[0] = colors_blue[0]
71 | colors[1] = colors_blue[1]
72 | elif vis_mode == 'gt':
73 | colors = colors_blue
74 |
75 | frame_number = data.shape[0]
76 | # print(dataset.shape)
77 |
78 | height_offset = MINS[1]
79 |
80 | data[:, :, 1] -= height_offset
81 | trajec = data[:, 0, [0, 2]]
82 | data[..., 0] -= data[:, 0:1, 0]
83 | data[..., 2] -= data[:, 0:1, 2]
84 |
85 | if not gt_joints is None:
86 | data_gt[:, :, 1] -= height_offset
87 | data_gt[..., 0] -= data_gt[:, 0:1, 0]
88 | data_gt[..., 2] -= data_gt[:, 0:1, 2]
89 |
90 | # print(trajec.shape)
91 |
92 | def update(index):
93 | # print(index)
94 | # ax.lines = []
95 | # ax.collections = []
96 | ax.lines.clear()
97 | ax.collections.clear()
98 |
99 | ax.view_init(elev=120, azim=-90)
100 | ax.dist = 5
101 | # ax =
102 | plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1],
103 | MAXS[2] - trajec[index, 1])
104 | # ax.scatter(dataset[index, :22, 0], dataset[index, :22, 1], dataset[index, :22, 2], color='black', s=3)
105 |
106 | used_colors = colors
107 | for i, (chain, color) in enumerate(zip(kinematic_tree, used_colors)):
108 | linewidth = 2
109 | ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth, color=color)
110 | ax.scatter(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], color=color, s=50)
111 |
112 | if not gt_joints is None:
113 | ax.plot3D(data_gt[index, chain, 0], data_gt[index, chain, 1], data_gt[index, chain, 2], linewidth=linewidth, color=colors_blue[i])
114 | ax.scatter(data_gt[index, chain, 0], data_gt[index, chain, 1], data_gt[index, chain, 2], color=colors_blue[i], s=50)
115 |
116 |
117 | plt.axis('off')
118 | ax.set_xticklabels([])
119 | ax.set_yticklabels([])
120 | ax.set_zticklabels([])
121 |
122 |
123 | ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False)
124 |
125 | # writer = FFMpegFileWriter(fps=fps)
126 | ani.save(save_path, fps=fps)
127 | # ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False, init_func=init)
128 | # ani.save(save_path, writer='pillow', fps=1000 / fps)
129 |
130 | plt.close()
--------------------------------------------------------------------------------
/scripts/data_process/grad_fit_h1_shape.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 | sys.path.append(os.getcwd())
7 |
8 | from phc.utils import torch_utils
9 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
10 | from scipy.spatial.transform import Rotation as sRot
11 | import numpy as np
12 | import torch
13 | from smpl_sim.smpllib.smpl_parser import (
14 | SMPL_Parser,
15 | SMPLH_Parser,
16 | SMPLX_Parser,
17 | )
18 |
19 | import joblib
20 | import torch
21 | import torch.nn.functional as F
22 | import math
23 | from phc.utils.pytorch3d_transforms import axis_angle_to_matrix
24 | from torch.autograd import Variable
25 | from scipy.ndimage import gaussian_filter1d
26 | from tqdm.notebook import tqdm
27 | from smpl_sim.smpllib.smpl_joint_names import SMPL_MUJOCO_NAMES, SMPL_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES, SMPLH_MUJOCO_NAMES
28 | from phc.utils.torch_h1_humanoid_batch import Humanoid_Batch, H1_ROTATION_AXIS
29 |
30 | h1_joint_names = [ 'pelvis',
31 | 'left_hip_yaw_link', 'left_hip_roll_link','left_hip_pitch_link', 'left_knee_link', 'left_ankle_link',
32 | 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_link',
33 | 'torso_link', 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
34 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link']
35 |
36 |
37 | h1_fk = Humanoid_Batch(extend_head=True) # load forward kinematics model
38 | #### Define corresonpdances between h1 and smpl joints
39 | h1_joint_names_augment = h1_joint_names + ["left_hand_link", "right_hand_link", "head_link"]
40 | h1_joint_pick = ['pelvis', 'left_hip_yaw_link', "left_knee_link", "left_ankle_link", 'right_hip_yaw_link', 'right_knee_link', 'right_ankle_link', "left_shoulder_roll_link", "left_elbow_link", "left_hand_link", "right_shoulder_roll_link", "right_elbow_link", "right_hand_link", "head_link"]
41 | smpl_joint_pick = ["Pelvis", "L_Hip", "L_Knee", "L_Ankle", "R_Hip", "R_Knee", "R_Ankle", "L_Shoulder", "L_Elbow", "L_Hand", "R_Shoulder", "R_Elbow", "R_Hand", "Head"]
42 | h1_joint_pick_idx = [ h1_joint_names_augment.index(j) for j in h1_joint_pick]
43 | smpl_joint_pick_idx = [SMPL_BONE_ORDER_NAMES.index(j) for j in smpl_joint_pick]
44 |
45 |
46 | #### Preparing fitting varialbes
47 | device = torch.device("cpu")
48 | pose_aa_h1 = np.repeat(np.repeat(sRot.identity().as_rotvec()[None, None, None, ], 22, axis = 2), 1, axis = 1)
49 | pose_aa_h1 = torch.from_numpy(pose_aa_h1).float()
50 |
51 | dof_pos = torch.zeros((1, 19))
52 | pose_aa_h1 = torch.cat([torch.zeros((1, 1, 3)), H1_ROTATION_AXIS * dof_pos[..., None], torch.zeros((1, 2, 3))], axis = 1)
53 |
54 |
55 | root_trans = torch.zeros((1, 1, 3))
56 |
57 | ###### prepare SMPL default pause for H1
58 | pose_aa_stand = np.zeros((1, 72))
59 | rotvec = sRot.from_quat([0.5, 0.5, 0.5, 0.5]).as_rotvec()
60 | pose_aa_stand[:, :3] = rotvec
61 | pose_aa_stand = pose_aa_stand.reshape(-1, 24, 3)
62 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('L_Shoulder')] = sRot.from_euler("xyz", [0, 0, -np.pi/2], degrees = False).as_rotvec()
63 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('R_Shoulder')] = sRot.from_euler("xyz", [0, 0, np.pi/2], degrees = False).as_rotvec()
64 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('L_Elbow')] = sRot.from_euler("xyz", [0, -np.pi/2, 0], degrees = False).as_rotvec()
65 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('R_Elbow')] = sRot.from_euler("xyz", [0, np.pi/2, 0], degrees = False).as_rotvec()
66 | pose_aa_stand = torch.from_numpy(pose_aa_stand.reshape(-1, 72))
67 |
68 | smpl_parser_n = SMPL_Parser(model_path="data/smpl", gender="neutral")
69 |
70 | ###### Shape fitting
71 | trans = torch.zeros([1, 3])
72 | beta = torch.zeros([1, 10])
73 | verts, joints = smpl_parser_n.get_joints_verts(pose_aa_stand, beta , trans)
74 | offset = joints[:, 0] - trans
75 | root_trans_offset = trans + offset
76 |
77 | fk_return = h1_fk.fk_batch(pose_aa_h1[None, ], root_trans_offset[None, 0:1])
78 |
79 | shape_new = Variable(torch.zeros([1, 10]).to(device), requires_grad=True)
80 | scale = Variable(torch.ones([1]).to(device), requires_grad=True)
81 | optimizer_shape = torch.optim.Adam([shape_new, scale],lr=0.1)
82 |
83 |
84 | for iteration in range(1000):
85 | verts, joints = smpl_parser_n.get_joints_verts(pose_aa_stand, shape_new, trans[0:1])
86 | root_pos = joints[:, 0]
87 | joints = (joints - joints[:, 0]) * scale + root_pos
88 | diff = fk_return.global_translation_extend[:, :, h1_joint_pick_idx] - joints[:, smpl_joint_pick_idx]
89 | loss_g = diff.norm(dim = -1).mean()
90 | loss = loss_g
91 | if iteration % 100 == 0:
92 | print(iteration, loss.item() * 1000)
93 |
94 | optimizer_shape.zero_grad()
95 | loss.backward()
96 | optimizer_shape.step()
97 |
98 | os.makedirs("data/h1", exist_ok=True)
99 | joblib.dump((shape_new.detach(), scale), "data/h1/shape_optimized_v1.pkl") # V2 has hip jointsrea
100 | print(f"shape fitted and saved to data/h1/shape_optimized_v1.pkl")
--------------------------------------------------------------------------------
/phc/phc/utils/running_mean_std.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | '''
5 | updates statistic from a full data
6 | '''
7 |
8 |
9 | class RunningMeanStd(nn.Module):
10 |
11 | def __init__(self,
12 | insize,
13 | epsilon=1e-05,
14 | per_channel=False,
15 | norm_only=False):
16 | super(RunningMeanStd, self).__init__()
17 | print('RunningMeanStd: ', insize)
18 | self.insize = insize
19 | self.mean_size = insize[0]
20 | self.epsilon = epsilon
21 |
22 | self.norm_only = norm_only
23 | self.per_channel = per_channel
24 | if per_channel:
25 | if len(self.insize) == 3:
26 | self.axis = [0, 2, 3]
27 | if len(self.insize) == 2:
28 | self.axis = [0, 2]
29 | if len(self.insize) == 1:
30 | self.axis = [0]
31 | in_size = self.insize[0]
32 | else:
33 | self.axis = [0]
34 | in_size = insize
35 |
36 | self.register_buffer("running_mean",
37 | torch.zeros(in_size, dtype=torch.float64))
38 | self.register_buffer("running_var",
39 | torch.ones(in_size, dtype=torch.float64))
40 | self.register_buffer("count", torch.ones((), dtype=torch.float64))
41 |
42 | self.forzen = False
43 | self.forzen_partial = False
44 |
45 | def freeze(self):
46 | self.forzen = True
47 |
48 | def unfreeze(self):
49 | self.forzen = False
50 |
51 | def freeze_partial(self, diff):
52 | self.forzen_partial = True
53 | self.diff = diff
54 |
55 |
56 | def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean,
57 | batch_var, batch_count):
58 | delta = batch_mean - mean
59 | tot_count = count + batch_count
60 |
61 | new_mean = mean + delta * batch_count / tot_count
62 | m_a = var * count
63 | m_b = batch_var * batch_count
64 | M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
65 | new_var = M2 / tot_count
66 | new_count = tot_count
67 | return new_mean, new_var, new_count
68 |
69 | def forward(self, input, unnorm=False):
70 | # change shape
71 | if self.per_channel:
72 | if len(self.insize) == 3:
73 | current_mean = self.running_mean.view(
74 | [1, self.insize[0], 1, 1]).expand_as(input)
75 | current_var = self.running_var.view([1, self.insize[0], 1,1]).expand_as(input)
76 | if len(self.insize) == 2:
77 | current_mean = self.running_mean.view([1, self.insize[0],1]).expand_as(input)
78 | current_var = self.running_var.view([1, self.insize[0],1]).expand_as(input)
79 | if len(self.insize) == 1:
80 | current_mean = self.running_mean.view([1, self.insize[0]]).expand_as(input)
81 | current_var = self.running_var.view([1, self.insize[0]]).expand_as(input)
82 | else:
83 | current_mean = self.running_mean
84 | current_var = self.running_var
85 | # get output
86 |
87 | if unnorm:
88 | y = torch.clamp(input, min=-5.0, max=5.0)
89 | y = torch.sqrt(current_var.float() +
90 | self.epsilon) * y + current_mean.float()
91 | else:
92 | if self.norm_only:
93 | y = input / torch.sqrt(current_var.float() + self.epsilon)
94 | else:
95 | y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
96 | y = torch.clamp(y, min=-5.0, max=5.0)
97 |
98 | # update After normalization, so that the values used for training and testing are the same.
99 | if self.training and not self.forzen:
100 | mean = input.mean(self.axis) # along channel axis
101 | var = input.var(self.axis)
102 | new_mean, new_var, new_count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count, mean, var, input.size()[0])
103 | if self.forzen_partial:
104 | # Only update the last bit (futures)
105 | self.running_mean[-self.diff:], self.running_var[-self.diff:], self.count = new_mean[-self.diff:], new_var[-self.diff:], new_count
106 | else:
107 | self.running_mean, self.running_var, self.count = new_mean, new_var, new_count
108 |
109 | return y
110 |
111 |
112 | class RunningMeanStdObs(nn.Module):
113 |
114 | def __init__(self,
115 | insize,
116 | epsilon=1e-05,
117 | per_channel=False,
118 | norm_only=False):
119 | assert (insize is dict)
120 | super(RunningMeanStdObs, self).__init__()
121 | self.running_mean_std = nn.ModuleDict({
122 | k: RunningMeanStd(v, epsilon, per_channel, norm_only)
123 | for k, v in insize.items()
124 | })
125 |
126 | def forward(self, input, unnorm=False):
127 | res = {k: self.running_mean_std(v, unnorm) for k, v in input.items()}
128 | return res
--------------------------------------------------------------------------------
/phc/phc/learning/unrealego/unrealego_heatmap_shared_model.py:
--------------------------------------------------------------------------------
1 | from cProfile import run
2 | from enum import auto
3 | import torch
4 | import torch.nn as nn
5 | from torch.autograd import Variable
6 | from torch.cuda.amp import autocast, GradScaler
7 | from torch.nn import MSELoss
8 |
9 | import itertools
10 | from .base_model import BaseModel
11 | from . import network
12 | from utils.loss import LossFuncLimb, LossFuncCosSim, LossFuncMPJPE
13 | from utils.util import batch_compute_similarity_transform_torch
14 |
15 |
16 | class UnrealEgoHeatmapSharedModel(BaseModel):
17 | def name(self):
18 | return 'UnrealEgo Heatmap Shared model'
19 |
20 | def initialize(self, opt):
21 | BaseModel.initialize(self, opt)
22 |
23 | self.opt = opt
24 | self.scaler = GradScaler(enabled=opt.use_amp)
25 |
26 | self.loss_names = [
27 | 'heatmap_left', 'heatmap_right',
28 | ]
29 |
30 | self.visual_names = [
31 | 'input_rgb_left', 'input_rgb_right',
32 | 'pred_heatmap_left', 'pred_heatmap_right',
33 | 'gt_heatmap_left', 'gt_heatmap_right',
34 | ]
35 |
36 | self.visual_pose_names = [
37 | ]
38 |
39 | if self.isTrain:
40 | self.model_names = ['HeatMap']
41 | else:
42 | self.model_names = ['HeatMap']
43 |
44 | self.eval_key = "mse_heatmap"
45 | self.cm2mm = 10
46 |
47 |
48 | # define the transform network
49 | print(opt.model)
50 | self.net_HeatMap = network.define_HeatMap(opt, model=opt.model)
51 |
52 | if self.isTrain:
53 | # define loss functions
54 | self.lossfunc_MSE = MSELoss()
55 |
56 | # initialize optimizers
57 | self.optimizer_HeatMap = torch.optim.Adam(
58 | params=self.net_HeatMap.parameters(),
59 | lr=opt.lr,
60 | weight_decay=opt.weight_decay
61 | )
62 |
63 | self.optimizers = []
64 | self.schedulers = []
65 | self.optimizers.append(self.optimizer_HeatMap)
66 | for optimizer in self.optimizers:
67 | self.schedulers.append(network.get_scheduler(optimizer, opt))
68 |
69 | # if not self.isTrain or opt.continue_train:
70 | # self.load_networks(opt.which_epoch)
71 |
72 | def set_input(self, data):
73 | self.data = data
74 | self.input_rgb_left = data['input_rgb_left'].cuda(self.device)
75 | self.input_rgb_right = data['input_rgb_right'].cuda(self.device)
76 | self.gt_heatmap_left = data['gt_heatmap_left'].cuda(self.device)
77 | self.gt_heatmap_right = data['gt_heatmap_right'].cuda(self.device)
78 |
79 | def forward(self):
80 | with autocast(enabled=self.opt.use_amp):
81 | # estimate stereo heatmaps
82 | pred_heatmap_cat = self.net_HeatMap(self.input_rgb_left, self.input_rgb_right)
83 | self.pred_heatmap_left, self.pred_heatmap_right = torch.chunk(pred_heatmap_cat, 2, dim=1)
84 |
85 | def backward_HeatMap(self):
86 | with autocast(enabled=self.opt.use_amp):
87 | loss_heatmap_left = self.lossfunc_MSE(
88 | self.pred_heatmap_left, self.gt_heatmap_left
89 | )
90 | loss_heatmap_right = self.lossfunc_MSE(
91 | self.pred_heatmap_right, self.gt_heatmap_right
92 | )
93 |
94 | self.loss_heatmap_left = loss_heatmap_left * self.opt.lambda_heatmap
95 | self.loss_heatmap_right = loss_heatmap_right * self.opt.lambda_heatmap
96 |
97 | loss_total = self.loss_heatmap_left + self.loss_heatmap_right
98 |
99 | self.scaler.scale(loss_total).backward()
100 |
101 | def optimize_parameters(self):
102 |
103 | # set model trainable
104 | self.net_HeatMap.train()
105 |
106 | # set optimizer.zero_grad()
107 | self.optimizer_HeatMap.zero_grad()
108 |
109 | # forward
110 | self.forward()
111 |
112 | # backward
113 | self.backward_HeatMap()
114 |
115 | # optimizer step
116 | self.scaler.step(self.optimizer_HeatMap)
117 |
118 | self.scaler.update()
119 |
120 | def evaluate(self, runnning_average_dict):
121 | # set evaluation mode
122 | self.net_HeatMap.eval()
123 |
124 | # forward pass
125 | pred_heatmap_cat = self.net_HeatMap(self.input_rgb_left, self.input_rgb_right)
126 | self.pred_heatmap_left, self.pred_heatmap_right = torch.chunk(pred_heatmap_cat, 2, dim=1)
127 |
128 | # compute metrics
129 | for id in range(self.pred_heatmap_left.size()[0]): # batch size
130 | # calculate mse loss for heatmap
131 | loss_heatmap_left_id = self.lossfunc_MSE(
132 | self.pred_heatmap_left[id], self.gt_heatmap_left[id]
133 | )
134 | loss_heatmap_right_id = self.lossfunc_MSE(
135 | self.pred_heatmap_right[id], self.gt_heatmap_right[id]
136 | )
137 |
138 | mse_heatmap = loss_heatmap_left_id + loss_heatmap_right_id
139 |
140 | # update metrics dict
141 | runnning_average_dict.update(dict(
142 | mse_heatmap=mse_heatmap
143 | )
144 | )
145 |
146 | return runnning_average_dict
--------------------------------------------------------------------------------
/scripts/data_process/process_amass_raw.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4 | # holder of all proprietary rights on this computer program.
5 | # You can only use this computer program if you have closed
6 | # a license agreement with MPG or you get the right to use the computer
7 | # program from someone who is authorized to grant you that right.
8 | # Any use of the computer program without a valid license is prohibited and
9 | # liable to prosecution.
10 | #
11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13 | # for Intelligent Systems. All rights reserved.
14 | #
15 | # Contact: ps-license@tuebingen.mpg.de
16 | import glob
17 | import os
18 | import sys
19 | import pdb
20 | import os.path as osp
21 |
22 | sys.path.append(os.getcwd())
23 |
24 |
25 | import os
26 | import joblib
27 | import argparse
28 | import numpy as np
29 | import os.path as osp
30 | from tqdm import tqdm
31 | from pathlib import Path
32 |
33 | dict_keys = ["betas", "dmpls", "gender", "mocap_framerate", "poses", "trans"]
34 |
35 | # extract SMPL joints from SMPL-H model
36 | joints_to_use = np.array(
37 | [
38 | 0,
39 | 1,
40 | 2,
41 | 3,
42 | 4,
43 | 5,
44 | 6,
45 | 7,
46 | 8,
47 | 9,
48 | 10,
49 | 11,
50 | 12,
51 | 13,
52 | 14,
53 | 15,
54 | 16,
55 | 17,
56 | 18,
57 | 19,
58 | 20,
59 | 21,
60 | 22,
61 | 37,
62 | ]
63 | )
64 | joints_to_use = np.arange(0, 156).reshape((-1, 3))[joints_to_use].reshape(-1)
65 |
66 | all_sequences = [
67 | "ACCAD",
68 | "BMLmovi",
69 | "BioMotionLab_NTroje",
70 | "CMU",
71 | "DFaust_67",
72 | "EKUT",
73 | "Eyes_Japan_Dataset",
74 | "HumanEva",
75 | "KIT",
76 | "MPI_HDM05",
77 | "MPI_Limits",
78 | "MPI_mosh",
79 | "SFU",
80 | "SSM_synced",
81 | "TCD_handMocap",
82 | "TotalCapture",
83 | "Transitions_mocap",
84 | "BMLhandball",
85 | "DanceDB"
86 | ]
87 |
88 | def read_data(folder, sequences):
89 | # sequences = [osp.join(folder, x) for x in sorted(os.listdir(folder)) if osp.isdir(osp.join(folder, x))]
90 |
91 | if sequences == "all":
92 | sequences = all_sequences
93 |
94 | db = {}
95 | print(folder)
96 | for seq_name in sequences:
97 | print(f"Reading {seq_name} sequence...")
98 | seq_folder = osp.join(folder, seq_name)
99 |
100 | datas = read_single_sequence(seq_folder, seq_name)
101 | db.update(datas)
102 | print(seq_name, "number of seqs", len(datas))
103 |
104 | return db
105 |
106 |
107 | def read_single_sequence(folder, seq_name):
108 | subjects = os.listdir(folder)
109 |
110 | datas = {}
111 |
112 | for subject in tqdm(subjects):
113 | actions = [
114 | x for x in os.listdir(osp.join(folder, subject)) if x.endswith(".npz") and osp.isdir(osp.join(folder, subject))
115 | ]
116 |
117 | for action in actions:
118 | fname = osp.join(folder, subject, action)
119 |
120 | if fname.endswith("shape.npz"):
121 | continue
122 |
123 | data = dict(np.load(fname))
124 | # data['poses'] = pose = data['poses'][:, joints_to_use]
125 |
126 | # shape = np.repeat(data['betas'][:10][np.newaxis], pose.shape[0], axis=0)
127 | # theta = np.concatenate([pose,shape], axis=1)
128 | vid_name = f"{seq_name}_{subject}_{action[:-4]}"
129 |
130 | datas[vid_name] = data
131 | # thetas.append(theta)
132 |
133 | return datas
134 |
135 |
136 | def read_seq_data(folder, nsubjects, fps):
137 | subjects = os.listdir(folder)
138 | sequences = {}
139 |
140 | assert nsubjects < len(subjects), "nsubjects should be less than len(subjects)"
141 |
142 | for subject in subjects[:nsubjects]:
143 | actions = os.listdir(osp.join(folder, subject))
144 |
145 | for action in actions:
146 | data = np.load(osp.join(folder, subject, action))
147 | mocap_framerate = int(data["mocap_framerate"])
148 | sampling_freq = mocap_framerate // fps
149 | sequences[(subject, action)] = data["poses"][
150 | 0::sampling_freq, joints_to_use
151 | ]
152 |
153 | train_set = {}
154 | test_set = {}
155 |
156 | for i, (k, v) in enumerate(sequences.items()):
157 | if i < len(sequences.keys()) - len(sequences.keys()) // 4:
158 | train_set[k] = v
159 | else:
160 | test_set[k] = v
161 |
162 | return train_set, test_set
163 |
164 |
165 | if __name__ == "__main__":
166 | parser = argparse.ArgumentParser()
167 | parser.add_argument(
168 | "--dir", type=str, help="dataset directory", default="data/amass"
169 | )
170 | parser.add_argument(
171 | "--out_dir", type=str, help="dataset directory", default="out"
172 | )
173 |
174 | args = parser.parse_args()
175 | out_path = Path(args.out_dir)
176 | out_path.mkdir(exist_ok=True)
177 | db_file = osp.join(out_path, "amass_db_smplh.pt")
178 |
179 | db = read_data(args.dir, sequences=all_sequences)
180 |
181 |
182 | print(f"Saving AMASS dataset to {db_file}")
183 | joblib.dump(db, db_file)
184 |
--------------------------------------------------------------------------------
/scripts/data_process/convert_data_smpl.py:
--------------------------------------------------------------------------------
1 | from ast import Try
2 | import torch
3 | import joblib
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from scipy import ndimage
7 | from scipy.spatial.transform import Rotation as sRot
8 | import glob
9 | import os
10 | import sys
11 | import pdb
12 | import os.path as osp
13 |
14 | sys.path.append(os.getcwd())
15 |
16 | from uhc.khrylib.utils import get_body_qposaddr
17 | from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names
18 | from uhc.smpllib.smpl_local_robot import SMPL_Robot as LocalRobot
19 | import scipy.ndimage.filters as filters
20 | from typing import List, Optional
21 | from tqdm import tqdm
22 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
23 |
24 | robot_cfg = {
25 | "mesh": False,
26 | "model": "smpl",
27 | "upright_start": True,
28 | "body_params": {},
29 | "joint_params": {},
30 | "geom_params": {},
31 | "actuator_params": {},
32 | }
33 | print(robot_cfg)
34 |
35 | smpl_local_robot = LocalRobot(
36 | robot_cfg,
37 | data_dir="data/smpl",
38 | )
39 |
40 | amass_data = joblib.load("insert_your_data")
41 |
42 | double = False
43 |
44 | mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
45 |
46 |
47 |
48 | amass_remove_data = []
49 |
50 | full_motion_dict = {}
51 | for key_name in tqdm(amass_data.keys()):
52 | smpl_data_entry = amass_data[key_name]
53 | B = smpl_data_entry['pose_aa'].shape[0]
54 |
55 | start, end = 0, 0
56 |
57 | pose_aa = smpl_data_entry['pose_aa'].copy()[start:]
58 | root_trans = smpl_data_entry['trans'].copy()[start:]
59 | B = pose_aa.shape[0]
60 |
61 | beta = smpl_data_entry['beta'].copy() if "beta" in smpl_data_entry else smpl_data_entry['betas'].copy()
62 | if len(beta.shape) == 2:
63 | beta = beta[0]
64 |
65 | gender = smpl_data_entry.get("gender", "neutral")
66 | fps = smpl_data_entry.get("fps", 30.0)
67 |
68 | if isinstance(gender, np.ndarray):
69 | gender = gender.item()
70 |
71 | if isinstance(gender, bytes):
72 | gender = gender.decode("utf-8")
73 | if gender == "neutral":
74 | gender_number = [0]
75 | elif gender == "male":
76 | gender_number = [1]
77 | elif gender == "female":
78 | gender_number = [2]
79 | else:
80 | import ipdb
81 | ipdb.set_trace()
82 | raise Exception("Gender Not Supported!!")
83 |
84 | smpl_2_mujoco = [joint_names.index(q) for q in mujoco_joint_names if q in joint_names]
85 | batch_size = pose_aa.shape[0]
86 | pose_aa = np.concatenate([pose_aa[:, :66], np.zeros((batch_size, 6))], axis=1)
87 | pose_aa_mj = pose_aa.reshape(-1, 24, 3)[..., smpl_2_mujoco, :].copy()
88 |
89 | num = 1
90 | if double:
91 | num = 2
92 | for idx in range(num):
93 | pose_quat = sRot.from_rotvec(pose_aa_mj.reshape(-1, 3)).as_quat().reshape(batch_size, 24, 4)
94 |
95 | gender_number, beta[:], gender = [0], 0, "neutral"
96 | print("using neutral model")
97 |
98 | smpl_local_robot.load_from_skeleton(betas=torch.from_numpy(beta[None,]), gender=gender_number, objs_info=None)
99 | smpl_local_robot.write_xml("egoquest/data/assets/mjcf/smpl_humanoid_1.xml")
100 | skeleton_tree = SkeletonTree.from_mjcf("egoquest/data/assets/mjcf/smpl_humanoid_1.xml")
101 |
102 | root_trans_offset = torch.from_numpy(root_trans) + skeleton_tree.local_translation[0]
103 |
104 | new_sk_state = SkeletonState.from_rotation_and_root_translation(
105 | skeleton_tree, # This is the wrong skeleton tree (location wise) here, but it's fine since we only use the parent relationship here.
106 | torch.from_numpy(pose_quat),
107 | root_trans_offset,
108 | is_local=True)
109 |
110 | if robot_cfg['upright_start']:
111 | pose_quat_global = (sRot.from_quat(new_sk_state.global_rotation.reshape(-1, 4).numpy()) * sRot.from_quat([0.5, 0.5, 0.5, 0.5]).inv()).as_quat().reshape(B, -1, 4) # should fix pose_quat as well here...
112 |
113 | new_sk_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, torch.from_numpy(pose_quat_global), root_trans_offset, is_local=False)
114 | pose_quat = new_sk_state.local_rotation.numpy()
115 |
116 | ############################################################
117 | # key_name_dump = key_name + f"_{idx}"
118 | key_name_dump = key_name
119 | if idx == 1:
120 | left_to_right_index = [0, 5, 6, 7, 8, 1, 2, 3, 4, 9, 10, 11, 12, 13, 19, 20, 21, 22, 23, 14, 15, 16, 17, 18]
121 | pose_quat_global = pose_quat_global[:, left_to_right_index]
122 | pose_quat_global[..., 0] *= -1
123 | pose_quat_global[..., 2] *= -1
124 |
125 | root_trans_offset[..., 1] *= -1
126 | ############################################################
127 |
128 | new_motion_out = {}
129 | new_motion_out['pose_quat_global'] = pose_quat_global
130 | new_motion_out['pose_quat'] = pose_quat
131 | new_motion_out['trans_orig'] = root_trans
132 | new_motion_out['root_trans_offset'] = root_trans_offset
133 | new_motion_out['beta'] = beta
134 | new_motion_out['gender'] = gender
135 | new_motion_out['pose_aa'] = pose_aa
136 | new_motion_out['fps'] = fps
137 | full_motion_dict[key_name_dump] = new_motion_out
138 |
139 | import ipdb; ipdb.set_trace()
140 | joblib.dump(full_motion_dict, "insert_your_data")
141 |
--------------------------------------------------------------------------------
/phc/phc/learning/amp_models.py:
--------------------------------------------------------------------------------
1 | # This is the overall forward pass of the model.
2 |
3 | import torch.nn as nn
4 | from rl_games.algos_torch.models import ModelA2CContinuousLogStd
5 | import torch
6 | class ModelAMPContinuous(ModelA2CContinuousLogStd):
7 | def __init__(self, network):
8 | super().__init__(network)
9 | return
10 |
11 | def build(self, config):
12 | net = self.network_builder.build('amp', **config)
13 | for name, _ in net.named_parameters():
14 | print(name)
15 | return ModelAMPContinuous.Network(net)
16 |
17 | class Network(ModelA2CContinuousLogStd.Network):
18 | def __init__(self, a2c_network):
19 | super().__init__(a2c_network)
20 |
21 | return
22 |
23 | def forward(self, input_dict):
24 | is_train = input_dict.get('is_train', True)
25 | amp_steps = input_dict.get("amp_steps", 2)
26 |
27 |
28 | result = super().forward(input_dict)
29 |
30 | if (is_train):
31 | amp_obs, amp_obs_replay, amp_demo_obs = input_dict['amp_obs'], input_dict['amp_obs_replay'], input_dict['amp_obs_demo']
32 |
33 | disc_agent_logit = self.a2c_network.eval_disc(amp_obs)
34 | result["disc_agent_logit"] = disc_agent_logit
35 |
36 | disc_agent_replay_logit = self.a2c_network.eval_disc(amp_obs_replay)
37 | result["disc_agent_replay_logit"] = disc_agent_replay_logit
38 |
39 | disc_demo_logit = self.a2c_network.eval_disc(amp_demo_obs)
40 | result["disc_demo_logit"] = disc_demo_logit
41 |
42 | # # HACK....
43 | # if input_dict.get("compute_direct_logit", False):
44 | # from phc.utils.torch_utils import project_to_norm
45 | # import ipdb; ipdb.set_trace()
46 | # mus = project_to_norm(result['mus'], input_dict.get("embedding_norm", 1.0))
47 | # mus = mus.view(-1, 32, 64)
48 | # mus = mus.reshape(-1, 2048)
49 | # result['disc_direct_logit'] = self.a2c_network.eval_disc(mus)
50 |
51 |
52 | # amp_obs.requires_grad_(True)
53 | # disc_agent_logit = self.a2c_network.eval_disc(amp_obs)
54 | # import ipdb; ipdb.set_trace()
55 | # torch.autograd.grad(disc_agent_logit, amp_obs, grad_outputs=torch.ones_like(disc_agent_logit), create_graph=False, retain_graph=True, only_inputs=True)
56 | # torch.autograd.grad(disc_agent_replay_logit, amp_obs_replay, grad_outputs=torch.ones_like(disc_agent_replay_logit), create_graph=False, retain_graph=True, only_inputs=True)
57 | # torch.autograd.grad(disc_demo_logit, amp_demo_obs, grad_outputs=torch.ones_like(disc_demo_logit), create_graph=False, retain_graph=True, only_inputs=True)
58 | # (1 / (1 + torch.exp(-disc_demo_logit)))[:50]
59 |
60 | return result
61 |
62 | def dropout_amp_obs(self, amp_obs, dropout_mask):
63 | return amp_obs * dropout_mask
64 |
65 | def get_dropout_mask(self,
66 | amp_obs,
67 | steps,
68 | num_masks=3,
69 | dropout_rate=0.3):
70 | # ZL Hack: amp_obs_dims, should drop out whole joints
71 | # [root_rot 6, root_vel 3, root_ang_vel 3, dof_pos 23 * 6 - 4 * 6, dof_vel 69 - 12, key_body_pos 3 * 4, shape_obs_disc 11]
72 | # [root_rot 6, root_vel 3, root_ang_vel 3, dof_pos 23 * 6 - 4 * 6, dof_vel 69 - 12, key_body_pos 3 * 4, shape_obs_disc 47]
73 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 11 = 206
74 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 = 195 # mean body
75 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 = 196 # mean body + height
76 | # 1 + 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 11 = 207 # shape body + height
77 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 10 = 205 # concise limb weight
78 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 47 = 242 # full limb weight
79 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 59 = 254 - masterfoot
80 | B, F = amp_obs.shape
81 | B, _, amp_f = amp_obs.view(B, steps, -1).shape
82 | try:
83 | assert (F / steps == 205 or F / steps == 254 or F / steps == 242 or F / steps == 206 or F / steps == 197 or F / steps == 188 or F / steps == 195 or F / steps == 196 or F / steps == 207)
84 | except:
85 | print(F/steps)
86 | import ipdb; ipdb.set_trace()
87 | print(F/steps)
88 |
89 | dof_joints_offset = 12 # 6 + 3 + 3
90 | num_joints = 19
91 |
92 | if F / steps == 197: # Remove neck
93 | num_joints = 18
94 | elif F / steps == 188: # Remove hands
95 | num_joints = 17
96 | elif F / steps == 196 or F / steps == 207:
97 | dof_joints_offset = 13 # 1 + 6 + 3 + 3
98 |
99 | dof_vel_offsets = dof_joints_offset + num_joints * 6 # 12 + 19 * 6
100 |
101 | dropout_mask = torch.ones([B, amp_f, num_masks])
102 |
103 | for idx_joint in range(num_joints):
104 | has_drop_out = torch.rand(B, num_masks) > dropout_rate
105 | dropout_mask[:, dof_joints_offset + idx_joint * 6 : dof_joints_offset + idx_joint * 6 + 6, :] = has_drop_out[:, None]
106 | dropout_mask[:, dof_vel_offsets + idx_joint * 3 : dof_vel_offsets + idx_joint * 3 + 3, :] = has_drop_out[:, None]
107 | return dropout_mask.repeat(1, steps, 1).to(amp_obs)
108 |
109 |
110 |
--------------------------------------------------------------------------------
/scripts/data_process/grad_fit_g1_shape.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 | sys.path.append(os.getcwd())
7 |
8 | from phc.utils import torch_utils
9 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
10 | from scipy.spatial.transform import Rotation as sRot
11 | import numpy as np
12 | import torch
13 | from smpl_sim.smpllib.smpl_parser import (
14 | SMPL_Parser,
15 | SMPLH_Parser,
16 | SMPLX_Parser,
17 | )
18 |
19 | import joblib
20 | import torch
21 | import torch.nn.functional as F
22 | import math
23 | from phc.utils.pytorch3d_transforms import axis_angle_to_matrix
24 | from torch.autograd import Variable
25 | from scipy.ndimage import gaussian_filter1d
26 | from tqdm.notebook import tqdm
27 | from smpl_sim.smpllib.smpl_joint_names import SMPL_MUJOCO_NAMES, SMPL_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES, SMPLH_MUJOCO_NAMES
28 | from phc.utils.torch_g1_humanoid_batch import Humanoid_Batch, G1_ROTATION_AXIS
29 |
30 | # h1_joint_names = [ 'pelvis',
31 | # 'left_hip_yaw_link', 'left_hip_roll_link','left_hip_pitch_link', 'left_knee_link', 'left_ankle_link',
32 | # 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_link',
33 | # 'torso_link', 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
34 | # 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link']
35 |
36 | g1_joint_names = [ 'pelvis',
37 | 'left_hip_pitch_link', 'left_hip_roll_link','left_hip_yaw_link', 'left_knee_link', 'left_ankle_pitch_link','left_ankle_roll_link',
38 | 'right_hip_pitch_link', 'right_hip_roll_link', 'right_hip_yaw_link', 'right_knee_link', 'right_ankle_pitch_link','right_ankle_roll_link',
39 | 'waist_yaw_link', 'waist_roll_link', 'torso_link', 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link', 'left_wrist_roll_link', 'left_wrist_pitch_link', 'left_wrist_yaw_link',
40 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link', 'right_wrist_roll_link', 'right_wrist_pitch_link', 'right_wrist_yaw_link']
41 |
42 |
43 | g1_fk = Humanoid_Batch(extend_head=True) # load forward kinematics model
44 | #### Define corresonpdances between h1 and smpl joints
45 | g1_joint_names_augment = g1_joint_names + ["left_hand_link", "right_hand_link", "head_link"]
46 | g1_joint_pick = ['pelvis', 'left_hip_yaw_link', "left_knee_link", "left_ankle_pitch_link", 'right_hip_yaw_link', 'right_knee_link', 'right_ankle_pitch_link', "left_shoulder_roll_link", "left_elbow_link", "left_hand_link", "right_shoulder_roll_link", "right_elbow_link", "right_hand_link", "head_link"]
47 | smpl_joint_pick = ["Pelvis", "L_Hip", "L_Knee", "L_Ankle", "R_Hip", "R_Knee", "R_Ankle", "L_Shoulder", "L_Elbow", "L_Hand", "R_Shoulder", "R_Elbow", "R_Hand", "Head"]
48 | g1_joint_pick_idx = [ g1_joint_names_augment.index(j) for j in g1_joint_pick]
49 | smpl_joint_pick_idx = [SMPL_BONE_ORDER_NAMES.index(j) for j in smpl_joint_pick]
50 |
51 |
52 | #### Preparing fitting varialbes
53 | device = torch.device("cpu")
54 | pose_aa_g1 = np.repeat(np.repeat(sRot.identity().as_rotvec()[None, None, None, ], 32, axis = 2), 1, axis = 1)
55 | pose_aa_g1 = torch.from_numpy(pose_aa_g1).float()
56 |
57 | dof_pos = torch.zeros((1, 29))
58 | pose_aa_g1 = torch.cat([torch.zeros((1, 1, 3)), G1_ROTATION_AXIS * dof_pos[..., None], torch.zeros((1, 2, 3))], axis = 1)
59 |
60 |
61 | root_trans = torch.zeros((1, 1, 3))
62 |
63 | ###### prepare SMPL default pause for H1
64 | pose_aa_stand = np.zeros((1, 72))
65 | rotvec = sRot.from_quat([0.5, 0.5, 0.5, 0.5]).as_rotvec()
66 | pose_aa_stand[:, :3] = rotvec
67 | pose_aa_stand = pose_aa_stand.reshape(-1, 24, 3)
68 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('L_Shoulder')] = sRot.from_euler("xyz", [0, 0, -np.pi/2], degrees = False).as_rotvec()
69 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('R_Shoulder')] = sRot.from_euler("xyz", [0, 0, np.pi/2], degrees = False).as_rotvec()
70 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('L_Elbow')] = sRot.from_euler("xyz", [0, -np.pi/2, 0], degrees = False).as_rotvec()
71 | pose_aa_stand[:, SMPL_BONE_ORDER_NAMES.index('R_Elbow')] = sRot.from_euler("xyz", [0, np.pi/2, 0], degrees = False).as_rotvec()
72 | pose_aa_stand = torch.from_numpy(pose_aa_stand.reshape(-1, 72))
73 |
74 | smpl_parser_n = SMPL_Parser(model_path="data/smpl", gender="neutral")
75 |
76 | ###### Shape fitting
77 | trans = torch.zeros([1, 3])
78 | beta = torch.zeros([1, 10])
79 | verts, joints = smpl_parser_n.get_joints_verts(pose_aa_stand, beta , trans)
80 | offset = joints[:, 0] - trans
81 | root_trans_offset = trans + offset
82 |
83 | fk_return = g1_fk.fk_batch(pose_aa_g1[None, ], root_trans_offset[None, 0:1])
84 |
85 | shape_new = Variable(torch.zeros([1, 10]).to(device), requires_grad=True)
86 | scale = Variable(torch.ones([1]).to(device), requires_grad=True)
87 | optimizer_shape = torch.optim.Adam([shape_new, scale],lr=0.1)
88 |
89 |
90 | for iteration in range(1000):
91 | verts, joints = smpl_parser_n.get_joints_verts(pose_aa_stand, shape_new, trans[0:1])
92 | root_pos = joints[:, 0]
93 | joints = (joints - joints[:, 0]) * scale + root_pos
94 | diff = fk_return.global_translation_extend[:, :, g1_joint_pick_idx] - joints[:, smpl_joint_pick_idx]
95 | loss_g = diff.norm(dim = -1).mean()
96 | loss = loss_g
97 | if iteration % 100 == 0:
98 | print(iteration, loss.item() * 1000)
99 |
100 | optimizer_shape.zero_grad()
101 | loss.backward()
102 | optimizer_shape.step()
103 |
104 | os.makedirs("data/g1", exist_ok=True)
105 | joblib.dump((shape_new.detach(), scale), "data/g1/shape_optimized_v3.pkl") # V2 has hip jointsrea
106 | print(f"shape fitted and saved to data/g1/shape_optimized_v3.pkl")
--------------------------------------------------------------------------------
/phc/phc/learning/pnn.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import torch
4 | import torch.nn as nn
5 | from phc.learning.network_builder import NetworkBuilder
6 | from collections import defaultdict
7 | from rl_games.algos_torch import torch_ext
8 | from tqdm import tqdm
9 |
10 |
11 | class PNN(NetworkBuilder.BaseNetwork):
12 |
13 | def __init__(self, mlp_args, output_size=69, numCols=4, has_lateral=True):
14 | super(PNN, self).__init__()
15 | self.numCols = numCols
16 | units = mlp_args['units']
17 | dense_func = mlp_args['dense_func']
18 | self.has_lateral = has_lateral
19 |
20 | self.actors = nn.ModuleList()
21 | for i in range(numCols):
22 | mlp = self._build_sequential_mlp(output_size, **mlp_args)
23 | self.actors.append(mlp)
24 |
25 | if self.has_lateral:
26 |
27 | self.u = nn.ModuleList()
28 |
29 | for i in range(numCols - 1):
30 | self.u.append(nn.ModuleList())
31 | for j in range(i + 1):
32 | u = nn.Sequential()
33 | in_size = units[0]
34 | for unit in units[1:]:
35 | u.append(dense_func(in_size, unit, bias=False))
36 | in_size = unit
37 | u.append(dense_func(units[-1], output_size, bias=False))
38 | # torch.nn.init.zeros_(u[-1].weight)
39 | self.u[i].append(u)
40 |
41 | def freeze_pnn(self, idx):
42 | for param in self.actors[:idx].parameters():
43 | param.requires_grad = False
44 | if self.has_lateral:
45 | for param in self.u[:idx - 1].parameters():
46 | param.requires_grad = False
47 |
48 | def load_base_net(self, model_path, actors=1):
49 | checkpoint = torch_ext.load_checkpoint(model_path)
50 | for idx in range(actors):
51 | self.load_actor(checkpoint, idx)
52 |
53 | def load_actor(self, checkpoint, idx=0):
54 | state_dict = self.actors[idx].state_dict()
55 | state_dict['0.weight'].copy_(checkpoint['model']['a2c_network.actor_mlp.0.weight'])
56 | state_dict['0.bias'].copy_(checkpoint['model']['a2c_network.actor_mlp.0.bias'])
57 | state_dict['2.weight'].copy_(checkpoint['model']['a2c_network.actor_mlp.2.weight'])
58 | state_dict['2.bias'].copy_(checkpoint['model']['a2c_network.actor_mlp.2.bias'])
59 | state_dict['4.weight'].copy_(checkpoint['model']['a2c_network.mu.weight'])
60 | state_dict['4.bias'].copy_(checkpoint['model']['a2c_network.mu.bias'])
61 |
62 | def _build_sequential_mlp(self, actions_num, input_size, units, activation, dense_func, norm_only_first_layer=False, norm_func_name=None, need_norm = True):
63 | print('build mlp:', input_size)
64 | in_size = input_size
65 | layers = []
66 | for unit in units:
67 | layers.append(dense_func(in_size, unit))
68 | layers.append(self.activations_factory.create(activation))
69 |
70 | if not need_norm:
71 | continue
72 | if norm_only_first_layer and norm_func_name is not None:
73 | need_norm = False
74 | if norm_func_name == 'layer_norm':
75 | layers.append(torch.nn.LayerNorm(unit))
76 | elif norm_func_name == 'batch_norm':
77 | layers.append(torch.nn.BatchNorm1d(unit))
78 | in_size = unit
79 |
80 |
81 | layers.append(nn.Linear(units[-1], actions_num))
82 | return nn.Sequential(*layers)
83 |
84 | def forward(self, x, idx=-1):
85 | if self.has_lateral:
86 | # idx == -1: forward all, output all
87 | # idx == others, forward till idx.
88 | if idx == 0:
89 | actions = self.actors[0](x)
90 | return actions, [actions]
91 | else:
92 | if idx == -1:
93 | idx = self.numCols - 1
94 | activation_cache = defaultdict(list)
95 |
96 | for curr_idx in range(0, idx + 1):
97 | curr_actor = self.actors[curr_idx]
98 | assert len(curr_actor) == 5 # Only support three MLPs right now
99 | activation_1 = curr_actor[:2](x)
100 |
101 | acc_acts_1 = [self.u[curr_idx - 1][col_idx][0](activation_cache[0][col_idx]) for col_idx in range(len(activation_cache[0]))] # curr_idx - 1 as we need to go to the previous coloumn's index to activate the weight
102 | activation_2 = curr_actor[3](curr_actor[2](activation_1) + sum(acc_acts_1)) # ReLU, full
103 |
104 | # acc_acts_2 = [self.u[curr_idx - 1][col_idx][1](activation_cache[1][col_idx]) for col_idx in range(len(activation_cache[1]))]
105 | # actions = curr_actor[4](activation_2) + sum(acc_acts_2)
106 |
107 | actions = curr_actor[4](activation_2) # disable action space transfer.
108 |
109 | # acc_acts_1 = []
110 | # for col_idx in range(len(activation_cache[0])):
111 | # acc_acts_1.append(self.u[curr_idx - 1][col_idx][0](activation_cache[0][col_idx]))
112 |
113 | # activation_2 = curr_actor[3](curr_actor[2](activation_1) + sum(acc_acts_1))
114 |
115 | # acc_acts_2 = []
116 | # for col_idx in range(len(activation_cache[1])):
117 | # acc_acts_2.append(self.u[curr_idx - 1][col_idx][1](activation_cache[1][col_idx]))
118 | # actions = curr_actor[4](activation_2) + sum(acc_acts_2)
119 |
120 | activation_cache[0].append(activation_1)
121 | activation_cache[1].append(activation_2)
122 | activation_cache[2].append(actions)
123 |
124 | return actions, activation_cache[2]
125 | else:
126 | if idx != -1:
127 | actions = self.actors[idx](x)
128 | return actions, [actions]
129 | else:
130 | actions = [self.actors[idx](x) for idx in range(self.numCols)]
131 | return actions, actions
132 |
--------------------------------------------------------------------------------
/scripts/render_smpl_o3d.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 |
7 | sys.path.append(os.getcwd())
8 |
9 | import open3d as o3d
10 | import open3d.visualization.rendering as rendering
11 | import imageio
12 | from tqdm import tqdm
13 | import joblib
14 | import numpy as np
15 | import torch
16 |
17 | from uhc.smpllib.smpl_parser import (
18 | SMPL_Parser,
19 | SMPLH_Parser,
20 | SMPLX_Parser,
21 | )
22 | import random
23 |
24 | from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names
25 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
26 | from scipy.spatial.transform import Rotation as sRot
27 | import matplotlib.pyplot as plt
28 | from tqdm import tqdm
29 | import cv2
30 |
31 | paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01
32 |
33 |
34 | def main():
35 | render = rendering.OffscreenRenderer(2560, 960)
36 | # render.scene.set_clear_color(np.array([0, 0, 0, 1]))
37 | ############ Load SMPL Data ############
38 | pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl"
39 | mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
40 | Name = pkl_dir.split("/")[-1].split(".")[0]
41 | pkl_data = joblib.load(pkl_dir)
42 | data_dir = "data/smpl"
43 | mujoco_2_smpl = [mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names]
44 | smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral")
45 | smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male")
46 | smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female")
47 |
48 | data_seq = pkl_data['0_0']
49 | pose_quat, trans = data_seq['body_quat'].numpy()[::2], data_seq['trans'].numpy()[::2]
50 | skeleton_tree = SkeletonTree.from_dict(data_seq['skeleton_tree'])
51 | offset = skeleton_tree.local_translation[0]
52 | root_trans_offset = trans - offset.numpy()
53 | gender, beta = data_seq['betas'][0], data_seq['betas'][1:]
54 |
55 | if gender == 0:
56 | smpl_parser = smpl_parser_n
57 | elif gender == 1:
58 | smpl_parser = smpl_parser_m
59 | else:
60 | smpl_parser = smpl_parser_f
61 |
62 | sk_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, torch.from_numpy(pose_quat), torch.from_numpy(trans), is_local=True)
63 |
64 | global_rot = sk_state.global_rotation
65 | B, J, N = global_rot.shape
66 | pose_quat = (sRot.from_quat(global_rot.reshape(-1, 4).numpy()) * sRot.from_quat([0.5, 0.5, 0.5, 0.5])).as_quat().reshape(B, -1, 4)
67 | B_down = pose_quat.shape[0]
68 | new_sk_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, torch.from_numpy(pose_quat), torch.from_numpy(trans), is_local=False)
69 | local_rot = new_sk_state.local_rotation
70 | pose_aa = sRot.from_quat(local_rot.reshape(-1, 4).numpy()).as_rotvec().reshape(B_down, -1, 3)
71 | pose_aa = pose_aa[:, mujoco_2_smpl, :].reshape(B_down, -1)
72 | root_trans_offset[..., :2] = root_trans_offset[..., :2] - root_trans_offset[0:1, :2]
73 | with torch.no_grad():
74 | vertices, joints = smpl_parser.get_joints_verts(pose=torch.from_numpy(pose_aa), th_trans=torch.from_numpy(root_trans_offset), th_betas=torch.from_numpy(beta[None,]))
75 | # vertices, joints = smpl_parser.get_joints_verts(pose=torch.from_numpy(pose_aa), th_betas=torch.from_numpy(beta[None,]))
76 | vertices = vertices.numpy()
77 | faces = smpl_parser.faces
78 | smpl_mesh = o3d.geometry.TriangleMesh()
79 | smpl_mesh.vertices = o3d.utility.Vector3dVector(vertices[0])
80 | smpl_mesh.triangles = o3d.utility.Vector3iVector(faces)
81 | # smpl_mesh.compute_triangle_normals()
82 | smpl_mesh.compute_vertex_normals()
83 |
84 | groun_plane = rendering.MaterialRecord()
85 | groun_plane.base_color = [1, 1, 1, 1]
86 | # groun_plane.shader = "defaultLit"
87 |
88 | box = o3d.geometry.TriangleMesh()
89 | ground_size = 10
90 | box = box.create_box(width=ground_size, height=1, depth=ground_size)
91 | box.compute_triangle_normals()
92 | # box.compute_vertex_normals()
93 | box.translate(np.array([-ground_size / 2, -1, -ground_size / 2]))
94 | box.rotate(sRot.from_euler('x', 90, degrees=True).as_matrix(), center=(0, 0, 0))
95 | render.scene.add_geometry("box", box, groun_plane)
96 |
97 | # cyl.compute_vertex_normals()
98 | # cyl.translate([-2, 0, 1.5])
99 |
100 | ending_color = rendering.MaterialRecord()
101 | ending_color.base_color = np.array([35, 102, 218, 256]) / 256
102 | ending_color.shader = "defaultLit"
103 |
104 | render.scene.add_geometry("cyl", smpl_mesh, ending_color)
105 | eye_level = 1
106 | render.setup_camera(60.0, [0, 0, eye_level], [0, -3, eye_level], [0, 0, 1]) # center (lookat), eye (pos), up
107 |
108 | # render.scene.scene.set_sun_light([0, 1, 0], [1.0, 1.0, 1.0], 100000)
109 | # render.scene.scene.enable_sun_light(True)
110 | # render.scene.scene.enable_light_shadow("sun", True)
111 |
112 | for i in tqdm(range(0, 50, 5)):
113 | smpl_mesh.vertices = o3d.utility.Vector3dVector(vertices[i])
114 | color_rgb = np.array([35, 102, 218, 256]) / 256 * (1 - i / 50)
115 | color_rgb[-1] = 1
116 | ending_color.base_color = color_rgb
117 | render.scene.add_geometry(f"cly_{i}", smpl_mesh, ending_color)
118 | break
119 |
120 | # render.scene.show_axes(True)
121 | img = render.render_to_image()
122 | cv2.imwrite("output/renderings/iccv2023/test_data.png", np.asarray(img)[..., ::-1])
123 | plt.figure(dpi=400)
124 | plt.imshow(img)
125 | plt.show()
126 |
127 | # writer = imageio.get_writer("output/renderings/test_data.mp4", fps=30, macro_block_size=None)
128 |
129 | # for i in tqdm(range(B_down)):
130 |
131 | # smpl_mesh.vertices = o3d.utility.Vector3dVector(vertices[i])
132 |
133 | # render.scene.remove_geometry('cyl')
134 | # render.scene.add_geometry("cyl", smpl_mesh, color)
135 | # img = render.render_to_image()
136 | # writer.append_data(np.asarray(img))
137 |
138 | # writer.close()
139 |
140 |
141 | if __name__ == "__main__":
142 | main()
--------------------------------------------------------------------------------
/scripts/.ipynb_checkpoints/render_smpl_o3d-checkpoint.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 |
7 | sys.path.append(os.getcwd())
8 |
9 | import open3d as o3d
10 | import open3d.visualization.rendering as rendering
11 | import imageio
12 | from tqdm import tqdm
13 | import joblib
14 | import numpy as np
15 | import torch
16 |
17 | from uhc.smpllib.smpl_parser import (
18 | SMPL_Parser,
19 | SMPLH_Parser,
20 | SMPLX_Parser,
21 | )
22 | import random
23 |
24 | from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names
25 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
26 | from scipy.spatial.transform import Rotation as sRot
27 | import matplotlib.pyplot as plt
28 | from tqdm import tqdm
29 | import cv2
30 |
31 | paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01
32 |
33 |
34 | def main():
35 | render = rendering.OffscreenRenderer(2560, 960)
36 | # render.scene.set_clear_color(np.array([0, 0, 0, 1]))
37 | ############ Load SMPL Data ############
38 | pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl"
39 | mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
40 | Name = pkl_dir.split("/")[-1].split(".")[0]
41 | pkl_data = joblib.load(pkl_dir)
42 | data_dir = "data/smpl"
43 | mujoco_2_smpl = [mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names]
44 | smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral")
45 | smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male")
46 | smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female")
47 |
48 | data_seq = pkl_data['0_0']
49 | pose_quat, trans = data_seq['body_quat'].numpy()[::2], data_seq['trans'].numpy()[::2]
50 | skeleton_tree = SkeletonTree.from_dict(data_seq['skeleton_tree'])
51 | offset = skeleton_tree.local_translation[0]
52 | root_trans_offset = trans - offset.numpy()
53 | gender, beta = data_seq['betas'][0], data_seq['betas'][1:]
54 |
55 | if gender == 0:
56 | smpl_parser = smpl_parser_n
57 | elif gender == 1:
58 | smpl_parser = smpl_parser_m
59 | else:
60 | smpl_parser = smpl_parser_f
61 |
62 | sk_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, torch.from_numpy(pose_quat), torch.from_numpy(trans), is_local=True)
63 |
64 | global_rot = sk_state.global_rotation
65 | B, J, N = global_rot.shape
66 | pose_quat = (sRot.from_quat(global_rot.reshape(-1, 4).numpy()) * sRot.from_quat([0.5, 0.5, 0.5, 0.5])).as_quat().reshape(B, -1, 4)
67 | B_down = pose_quat.shape[0]
68 | new_sk_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, torch.from_numpy(pose_quat), torch.from_numpy(trans), is_local=False)
69 | local_rot = new_sk_state.local_rotation
70 | pose_aa = sRot.from_quat(local_rot.reshape(-1, 4).numpy()).as_rotvec().reshape(B_down, -1, 3)
71 | pose_aa = pose_aa[:, mujoco_2_smpl, :].reshape(B_down, -1)
72 | root_trans_offset[..., :2] = root_trans_offset[..., :2] - root_trans_offset[0:1, :2]
73 | with torch.no_grad():
74 | vertices, joints = smpl_parser.get_joints_verts(pose=torch.from_numpy(pose_aa), th_trans=torch.from_numpy(root_trans_offset), th_betas=torch.from_numpy(beta[None,]))
75 | # vertices, joints = smpl_parser.get_joints_verts(pose=torch.from_numpy(pose_aa), th_betas=torch.from_numpy(beta[None,]))
76 | vertices = vertices.numpy()
77 | faces = smpl_parser.faces
78 | smpl_mesh = o3d.geometry.TriangleMesh()
79 | smpl_mesh.vertices = o3d.utility.Vector3dVector(vertices[0])
80 | smpl_mesh.triangles = o3d.utility.Vector3iVector(faces)
81 | # smpl_mesh.compute_triangle_normals()
82 | smpl_mesh.compute_vertex_normals()
83 |
84 | groun_plane = rendering.MaterialRecord()
85 | groun_plane.base_color = [1, 1, 1, 1]
86 | # groun_plane.shader = "defaultLit"
87 |
88 | box = o3d.geometry.TriangleMesh()
89 | ground_size = 10
90 | box = box.create_box(width=ground_size, height=1, depth=ground_size)
91 | box.compute_triangle_normals()
92 | # box.compute_vertex_normals()
93 | box.translate(np.array([-ground_size / 2, -1, -ground_size / 2]))
94 | box.rotate(sRot.from_euler('x', 90, degrees=True).as_matrix(), center=(0, 0, 0))
95 | render.scene.add_geometry("box", box, groun_plane)
96 |
97 | # cyl.compute_vertex_normals()
98 | # cyl.translate([-2, 0, 1.5])
99 |
100 | ending_color = rendering.MaterialRecord()
101 | ending_color.base_color = np.array([35, 102, 218, 256]) / 256
102 | ending_color.shader = "defaultLit"
103 |
104 | render.scene.add_geometry("cyl", smpl_mesh, ending_color)
105 | eye_level = 1
106 | render.setup_camera(60.0, [0, 0, eye_level], [0, -3, eye_level], [0, 0, 1]) # center (lookat), eye (pos), up
107 |
108 | # render.scene.scene.set_sun_light([0, 1, 0], [1.0, 1.0, 1.0], 100000)
109 | # render.scene.scene.enable_sun_light(True)
110 | # render.scene.scene.enable_light_shadow("sun", True)
111 |
112 | for i in tqdm(range(0, 50, 5)):
113 | smpl_mesh.vertices = o3d.utility.Vector3dVector(vertices[i])
114 | color_rgb = np.array([35, 102, 218, 256]) / 256 * (1 - i / 50)
115 | color_rgb[-1] = 1
116 | ending_color.base_color = color_rgb
117 | render.scene.add_geometry(f"cly_{i}", smpl_mesh, ending_color)
118 | break
119 |
120 | # render.scene.show_axes(True)
121 | img = render.render_to_image()
122 | cv2.imwrite("output/renderings/iccv2023/test_data.png", np.asarray(img)[..., ::-1])
123 | plt.figure(dpi=400)
124 | plt.imshow(img)
125 | plt.show()
126 |
127 | # writer = imageio.get_writer("output/renderings/test_data.mp4", fps=30, macro_block_size=None)
128 |
129 | # for i in tqdm(range(B_down)):
130 |
131 | # smpl_mesh.vertices = o3d.utility.Vector3dVector(vertices[i])
132 |
133 | # render.scene.remove_geometry('cyl')
134 | # render.scene.add_geometry("cyl", smpl_mesh, color)
135 | # img = render.render_to_image()
136 | # writer.append_data(np.asarray(img))
137 |
138 | # writer.close()
139 |
140 |
141 | if __name__ == "__main__":
142 | main()
--------------------------------------------------------------------------------
/scripts/data_process/convert_amass_isaac.py:
--------------------------------------------------------------------------------
1 | from ast import Try
2 | import torch
3 | import joblib
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from scipy import ndimage
7 | from scipy.spatial.transform import Rotation as sRot
8 | import glob
9 | import os
10 | import sys
11 | import pdb
12 | import os.path as osp
13 | from pathlib import Path
14 |
15 | sys.path.append(os.getcwd())
16 |
17 | from uhc.khrylib.utils import get_body_qposaddr
18 | from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names
19 | from uhc.smpllib.smpl_local_robot import SMPL_Robot as LocalRobot
20 | import scipy.ndimage.filters as filters
21 | from typing import List, Optional
22 | from tqdm import tqdm
23 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
24 | import argparse
25 |
26 | def run(in_file: str, out_file: str):
27 |
28 | robot_cfg = {
29 | "mesh": False,
30 | "model": "smpl",
31 | "upright_start": True,
32 | "body_params": {},
33 | "joint_params": {},
34 | "geom_params": {},
35 | "actuator_params": {},
36 | }
37 | print(robot_cfg)
38 |
39 | smpl_local_robot = LocalRobot(
40 | robot_cfg,
41 | data_dir="data/smpl",
42 | )
43 |
44 | amass_data = joblib.load(in_file)
45 |
46 | double = False
47 |
48 | mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
49 |
50 |
51 | amass_full_motion_dict = {}
52 | for key_name in tqdm(amass_data.keys()):
53 | smpl_data_entry = amass_data[key_name]
54 | B = smpl_data_entry['pose_aa'].shape[0]
55 |
56 | start, end = 0, 0
57 |
58 | pose_aa = smpl_data_entry['pose_aa'].copy()[start:]
59 | root_trans = smpl_data_entry['trans'].copy()[start:]
60 | B = pose_aa.shape[0]
61 |
62 | beta = smpl_data_entry['beta'].copy() if "beta" in smpl_data_entry else smpl_data_entry['betas'].copy()
63 | if len(beta.shape) == 2:
64 | beta = beta[0]
65 |
66 | gender = smpl_data_entry.get("gender", "neutral")
67 | fps = 30.0
68 |
69 | if isinstance(gender, np.ndarray):
70 | gender = gender.item()
71 |
72 | if isinstance(gender, bytes):
73 | gender = gender.decode("utf-8")
74 | if gender == "neutral":
75 | gender_number = [0]
76 | elif gender == "male":
77 | gender_number = [1]
78 | elif gender == "female":
79 | gender_number = [2]
80 | else:
81 | import ipdb
82 | ipdb.set_trace()
83 | raise Exception("Gender Not Supported!!")
84 |
85 | smpl_2_mujoco = [joint_names.index(q) for q in mujoco_joint_names if q in joint_names]
86 | batch_size = pose_aa.shape[0]
87 | pose_aa = np.concatenate([pose_aa[:, :66], np.zeros((batch_size, 6))], axis=1)
88 | pose_aa_mj = pose_aa.reshape(-1, 24, 3)[..., smpl_2_mujoco, :].copy()
89 |
90 | num = 1
91 | if double:
92 | num = 2
93 | for idx in range(num):
94 | pose_quat = sRot.from_rotvec(pose_aa_mj.reshape(-1, 3)).as_quat().reshape(batch_size, 24, 4)
95 |
96 | gender_number, beta[:], gender = [0], 0, "neutral"
97 | print("using neutral model")
98 |
99 | smpl_local_robot.load_from_skeleton(betas=torch.from_numpy(beta[None,]), gender=gender_number, objs_info=None)
100 | smpl_local_robot.write_xml("phc/data/assets/mjcf/smpl_humanoid_1.xml")
101 | skeleton_tree = SkeletonTree.from_mjcf("phc/data/assets/mjcf/smpl_humanoid_1.xml")
102 |
103 | root_trans_offset = torch.from_numpy(root_trans) + skeleton_tree.local_translation[0]
104 |
105 | new_sk_state = SkeletonState.from_rotation_and_root_translation(
106 | skeleton_tree, # This is the wrong skeleton tree (location wise) here, but it's fine since we only use the parent relationship here.
107 | torch.from_numpy(pose_quat),
108 | root_trans_offset,
109 | is_local=True)
110 |
111 | if robot_cfg['upright_start']:
112 | pose_quat_global = (sRot.from_quat(new_sk_state.global_rotation.reshape(-1, 4).numpy()) * sRot.from_quat([0.5, 0.5, 0.5, 0.5]).inv()).as_quat().reshape(B, -1, 4) # should fix pose_quat as well here...
113 |
114 | new_sk_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, torch.from_numpy(pose_quat_global), root_trans_offset, is_local=False)
115 | pose_quat = new_sk_state.local_rotation.numpy()
116 |
117 | ############################################################
118 | # key_name_dump = key_name + f"_{idx}"
119 | key_name_dump = key_name
120 | if idx == 1:
121 | left_to_right_index = [0, 5, 6, 7, 8, 1, 2, 3, 4, 9, 10, 11, 12, 13, 19, 20, 21, 22, 23, 14, 15, 16, 17, 18]
122 | pose_quat_global = pose_quat_global[:, left_to_right_index]
123 | pose_quat_global[..., 0] *= -1
124 | pose_quat_global[..., 2] *= -1
125 |
126 | root_trans_offset[..., 1] *= -1
127 | ############################################################
128 |
129 | new_motion_out = {}
130 | new_motion_out['pose_quat_global'] = pose_quat_global
131 | new_motion_out['pose_quat'] = pose_quat
132 | new_motion_out['trans_orig'] = root_trans
133 | new_motion_out['root_trans_offset'] = root_trans_offset
134 | new_motion_out['beta'] = beta
135 | new_motion_out['gender'] = gender
136 | new_motion_out['pose_aa'] = pose_aa
137 | new_motion_out['fps'] = fps
138 | amass_full_motion_dict[key_name_dump] = new_motion_out
139 |
140 | Path(out_file).parents[0].mkdir(parents=True, exist_ok=True)
141 | joblib.dump(amass_full_motion_dict, out_file)
142 | return
143 |
144 | # import ipdb
145 |
146 | # ipdb.set_trace()
147 |
148 | if __name__ == "__main__":
149 | parser = argparse.ArgumentParser()
150 | parser.add_argument("--in_file", type=str, default="sample_data/amass_copycat_take5_train.pkl")
151 | parser.add_argument("--out_file", type=str, default="data/amass/pkls/amass_copycat_take5_train.pkl")
152 | args = parser.parse_args()
153 | run(
154 | in_file=args.in_file,
155 | out_file=args.out_file
156 | )
157 |
--------------------------------------------------------------------------------
/phc/phc/env/tasks/vec_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2023, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
29 | from gym import spaces
30 |
31 | from isaacgym import gymtorch
32 | from isaacgym.torch_utils import to_torch
33 | import torch
34 | import numpy as np
35 |
36 |
37 | # VecEnv Wrapper for RL training
38 | class VecTask():
39 |
40 | def __init__(self, task, rl_device, clip_observations=5.0):
41 | self.task = task
42 |
43 | self.num_environments = task.num_envs
44 | self.num_agents = 1 # used for multi-agent environments
45 | self.num_observations = task.num_obs
46 | self.num_states = task.num_states
47 | self.num_actions = task.num_actions
48 |
49 | self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
50 | self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
51 | if isinstance(self.num_actions, int):
52 | self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.)
53 | elif isinstance(self.num_actions, list):
54 | self.act_space = spaces.Tuple([spaces.Discrete(num_actions) for num_actions in self.num_actions])
55 |
56 |
57 | self.clip_obs = clip_observations
58 | self.rl_device = rl_device
59 |
60 | print("RL device: ", rl_device)
61 |
62 | def step(self, actions):
63 | raise NotImplementedError
64 |
65 | def reset(self):
66 | raise NotImplementedError
67 |
68 | def get_number_of_agents(self):
69 | return self.num_agents
70 |
71 | @property
72 | def observation_space(self):
73 | return self.obs_space
74 |
75 | @property
76 | def action_space(self):
77 | return self.act_space
78 |
79 | @property
80 | def num_envs(self):
81 | return self.num_environments
82 |
83 | @property
84 | def num_acts(self):
85 | return self.num_actions
86 |
87 | @property
88 | def num_obs(self):
89 | return self.num_observations
90 |
91 |
92 | # C++ CPU Class
93 | class VecTaskCPU(VecTask):
94 |
95 | def __init__(self, task, rl_device, sync_frame_time=False, clip_observations=5.0):
96 | super().__init__(task, rl_device, clip_observations=clip_observations)
97 | self.sync_frame_time = sync_frame_time
98 |
99 | def step(self, actions):
100 | actions = actions.cpu().numpy()
101 | self.task.render(self.sync_frame_time)
102 |
103 | obs, rewards, resets, extras = self.task.step(actions)
104 |
105 | return (to_torch(np.clip(obs, -self.clip_obs, self.clip_obs), dtype=torch.float, device=self.rl_device), to_torch(rewards, dtype=torch.float, device=self.rl_device), to_torch(resets, dtype=torch.uint8, device=self.rl_device), [])
106 |
107 | def reset(self):
108 | actions = 0.01 * (1 - 2 * np.random.rand(self.num_envs, self.num_actions)).astype('f')
109 |
110 | # step the simulator
111 | obs, rewards, resets, extras = self.task.step(actions)
112 |
113 | return to_torch(np.clip(obs, -self.clip_obs, self.clip_obs), dtype=torch.float, device=self.rl_device)
114 |
115 |
116 | # C++ GPU Class
117 | class VecTaskGPU(VecTask):
118 |
119 | def __init__(self, task, rl_device, clip_observations=5.0):
120 | super().__init__(task, rl_device, clip_observations=clip_observations)
121 |
122 | self.obs_tensor = gymtorch.wrap_tensor(self.task.obs_tensor, counts=(self.task.num_envs, self.task.num_obs))
123 | self.rewards_tensor = gymtorch.wrap_tensor(self.task.rewards_tensor, counts=(self.task.num_envs,))
124 | self.resets_tensor = gymtorch.wrap_tensor(self.task.resets_tensor, counts=(self.task.num_envs,))
125 |
126 | def step(self, actions):
127 | self.task.render(False)
128 | actions_tensor = gymtorch.unwrap_tensor(actions)
129 |
130 | self.task.step(actions_tensor)
131 |
132 | return torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs), self.rewards_tensor, self.resets_tensor, []
133 |
134 | def reset(self):
135 | actions = 0.01 * (1 - 2 * torch.rand([self.task.num_envs, self.task.num_actions], dtype=torch.float32, device=self.rl_device))
136 | actions_tensor = gymtorch.unwrap_tensor(actions)
137 |
138 | # step the simulator
139 | self.task.step(actions_tensor)
140 |
141 | return torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs)
142 |
143 |
144 | # Python CPU/GPU Class
145 | class VecTaskPython(VecTask):
146 |
147 | def get_state(self):
148 | return torch.clamp(self.task.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
149 |
150 | def step(self, actions):
151 |
152 | self.task.step(actions)
153 |
154 | return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device), self.task.rew_buf.to(self.rl_device), self.task.reset_buf.to(self.rl_device), self.task.extras
155 |
156 | def reset(self):
157 | actions = 0.01 * (1 - 2 * torch.rand([self.task.num_envs, self.task.num_actions], dtype=torch.float32, device=self.rl_device))
158 |
159 | # step the simulator
160 | self.task.step(actions)
161 |
162 | return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
163 |
--------------------------------------------------------------------------------
/phc/phc/learning/unrealego/unrealego_autoencoder_model.py:
--------------------------------------------------------------------------------
1 | from cProfile import run
2 | from enum import auto
3 | import torch
4 | import torch.nn as nn
5 | from torch.autograd import Variable
6 | from torch.cuda.amp import autocast, GradScaler
7 | from torch.nn import MSELoss
8 |
9 | import itertools
10 | from .base_model import BaseModel
11 | from . import network
12 | from utils.loss import LossFuncLimb, LossFuncCosSim, LossFuncMPJPE
13 | from utils.util import batch_compute_similarity_transform_torch
14 |
15 |
16 | class UnrealEgoAutoEncoderModel(BaseModel):
17 | def name(self):
18 | return 'UnrealEgo AutoEncoder model'
19 |
20 | def initialize(self, opt):
21 | BaseModel.initialize(self, opt)
22 |
23 | self.opt = opt
24 | self.scaler = GradScaler(enabled=opt.use_amp)
25 |
26 | self.loss_names = [
27 | 'heatmap_left_rec', 'heatmap_right_rec',
28 | 'pose', 'cos_sim',
29 | ]
30 |
31 | if self.isTrain:
32 | self.visual_names = [
33 | 'input_rgb_left', 'input_rgb_right',
34 | 'pred_heatmap_left', 'pred_heatmap_right',
35 | 'gt_heatmap_left', 'gt_heatmap_right',
36 | 'pred_heatmap_left_rec', 'pred_heatmap_right_rec'
37 | ]
38 | else:
39 | self.visual_names = [
40 | # 'input_rgb_left', 'input_rgb_right',
41 | 'pred_heatmap_left', 'pred_heatmap_right',
42 | 'gt_heatmap_left', 'gt_heatmap_right',
43 | ]
44 |
45 | self.visual_pose_names = [
46 | "pred_pose", "gt_pose"
47 | ]
48 |
49 | if self.isTrain:
50 | self.model_names = ['HeatMap', 'AutoEncoder']
51 | else:
52 | self.model_names = ['HeatMap', 'AutoEncoder']
53 |
54 | self.eval_key = "mpjpe"
55 | self.cm2mm = 10
56 |
57 |
58 | # define the transform network
59 | self.net_HeatMap = network.define_HeatMap(opt, model=opt.model)
60 | self.net_AutoEncoder = network.define_AutoEncoder(opt, model=opt.model)
61 |
62 | self.load_networks(
63 | net=self.net_HeatMap,
64 | path_to_trained_weights=opt.path_to_trained_heatmap
65 | )
66 | network._freeze(self.net_HeatMap)
67 |
68 | # define loss functions
69 | self.lossfunc_MSE = MSELoss()
70 | self.lossfunc_limb = LossFuncLimb()
71 | self.lossfunc_cos_sim = LossFuncCosSim()
72 | self.lossfunc_MPJPE = LossFuncMPJPE()
73 |
74 | if self.isTrain:
75 | # initialize optimizers
76 | self.optimizer_AutoEncoder = torch.optim.Adam(
77 | params=self.net_AutoEncoder.parameters(),
78 | lr=opt.lr,
79 | weight_decay=opt.weight_decay
80 | )
81 |
82 | self.optimizers = []
83 | self.schedulers = []
84 | self.optimizers.append(self.optimizer_AutoEncoder)
85 | for optimizer in self.optimizers:
86 | self.schedulers.append(network.get_scheduler(optimizer, opt))
87 |
88 | def set_input(self, data):
89 | self.data = data
90 | self.input_rgb_left = data['input_rgb_left'].cuda(self.device)
91 | self.input_rgb_right = data['input_rgb_right'].cuda(self.device)
92 | self.gt_heatmap_left = data['gt_heatmap_left'].cuda(self.device)
93 | self.gt_heatmap_right = data['gt_heatmap_right'].cuda(self.device)
94 | self.gt_pose = data['gt_local_pose'].cuda(self.device)
95 |
96 | def forward(self):
97 | with autocast(enabled=self.opt.use_amp):
98 | # estimate stereo heatmaps
99 | with torch.no_grad():
100 | pred_heatmap_cat = self.net_HeatMap(self.input_rgb_left, self.input_rgb_right)
101 | self.pred_heatmap_left, self.pred_heatmap_right = torch.chunk(pred_heatmap_cat, 2, dim=1)
102 |
103 | # estimate pose and reconstruct stereo heatmaps
104 | self.pred_pose, pred_heatmap_rec_cat = self.net_AutoEncoder(pred_heatmap_cat)
105 | self.pred_heatmap_left_rec, self.pred_heatmap_right_rec = torch.chunk(pred_heatmap_rec_cat, 2, dim=1)
106 |
107 | def backward_AutoEncoder(self):
108 | with autocast(enabled=self.opt.use_amp):
109 | loss_pose = self.lossfunc_MPJPE(self.pred_pose, self.gt_pose)
110 | loss_cos_sim = self.lossfunc_cos_sim(self.pred_pose, self.gt_pose)
111 | loss_heatmap_left_rec = self.lossfunc_MSE(
112 | self.pred_heatmap_left_rec, self.pred_heatmap_left.detach()
113 | )
114 | loss_heatmap_right_rec = self.lossfunc_MSE(
115 | self.pred_heatmap_right_rec, self.pred_heatmap_right.detach()
116 | )
117 |
118 | self.loss_pose = loss_pose * self.opt.lambda_mpjpe
119 | self.loss_cos_sim = loss_cos_sim * self.opt.lambda_cos_sim * self.opt.lambda_mpjpe
120 | self.loss_heatmap_left_rec = loss_heatmap_left_rec * self.opt.lambda_heatmap_rec
121 | self.loss_heatmap_right_rec = loss_heatmap_right_rec * self.opt.lambda_heatmap_rec
122 |
123 | loss_total = self.loss_pose + self.loss_cos_sim + \
124 | self.loss_heatmap_left_rec + self.loss_heatmap_right_rec
125 |
126 | self.scaler.scale(loss_total).backward()
127 |
128 | def optimize_parameters(self):
129 |
130 | # set model trainable
131 | self.net_AutoEncoder.train()
132 |
133 | # set optimizer.zero_grad()
134 | self.optimizer_AutoEncoder.zero_grad()
135 |
136 | # forward
137 | self.forward()
138 |
139 | # backward
140 | self.backward_AutoEncoder()
141 |
142 | # optimizer step
143 | self.scaler.step(self.optimizer_AutoEncoder)
144 |
145 | self.scaler.update()
146 |
147 | def evaluate(self, runnning_average_dict):
148 | # set evaluation mode
149 | self.net_HeatMap.eval()
150 | self.net_AutoEncoder.eval()
151 |
152 | # forward pass
153 | pred_heatmap_cat = self.net_HeatMap(self.input_rgb_left, self.input_rgb_right)
154 | self.pred_heatmap_left, self.pred_heatmap_right = torch.chunk(pred_heatmap_cat, 2, dim=1)
155 | self.pred_pose = self.net_AutoEncoder.predict_pose(pred_heatmap_cat)
156 |
157 | S1_hat = batch_compute_similarity_transform_torch(self.pred_pose, self.gt_pose)
158 |
159 | # compute metrics
160 | for id in range(self.pred_pose.size()[0]): # batch size
161 | # calculate mpjpe and p_mpjpe # cm to mm
162 | mpjpe = self.lossfunc_MPJPE(self.pred_pose[id], self.gt_pose[id]) * self.cm2mm
163 | pa_mpjpe = self.lossfunc_MPJPE(S1_hat[id], self.gt_pose[id]) * self.cm2mm
164 |
165 | # update metrics dict
166 | runnning_average_dict.update(dict(
167 | mpjpe=mpjpe,
168 | pa_mpjpe=pa_mpjpe)
169 | )
170 |
171 | return runnning_average_dict
172 |
173 |
174 |
--------------------------------------------------------------------------------
/scripts/data_process/convert_data_mdm.py:
--------------------------------------------------------------------------------
1 | from ast import Try
2 | import torch
3 | import joblib
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from scipy import ndimage
7 | from scipy.spatial.transform import Rotation as sRot
8 | import glob
9 | import os
10 | import sys
11 | import pdb
12 | import os.path as osp
13 |
14 | sys.path.append(os.getcwd())
15 |
16 | from uhc.utils.config_utils.copycat_config import Config as CC_Config
17 | from uhc.khrylib.utils import get_body_qposaddr
18 | from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names
19 | from uhc.smpllib.smpl_robot import Robot
20 | from uhc.smpllib.smpl_local_robot import SMPL_Robot as LocalRobot
21 | import scipy.ndimage.filters as filters
22 | from typing import List, Optional
23 | from tqdm import tqdm
24 | from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState
25 |
26 | robot_cfg = {
27 | "mesh": False,
28 | "model": "smpl",
29 | "upright_start": True,
30 | "body_params": {},
31 | "joint_params": {},
32 | "geom_params": {},
33 | "actuator_params": {},
34 | }
35 | print(robot_cfg)
36 |
37 | smpl_local_robot = LocalRobot(
38 | robot_cfg,
39 | data_dir="data/smpl",
40 | )
41 | # res_data = joblib.load("data/mdm/res.pk")
42 | # res_data = joblib.load("data/mdm/res_wave.pk")
43 | # res_data = joblib.load("data/mdm/res_phone.pk")
44 | res_data = joblib.load("data/mdm/res_run.pk")
45 |
46 | ipdb.set_trace()
47 | amass_data = {}
48 | for i in range(len(res_data['json_file']['thetas'])):
49 | pose_euler = np.array(res_data['json_file']['thetas'])[i].reshape(-1, 24, 3)
50 | B = pose_euler.shape[0]
51 | trans = np.array(res_data['json_file']['root_translation'])[i]
52 | pose_aa = sRot.from_euler('XYZ', pose_euler.reshape(-1, 3), degrees=True).as_rotvec().reshape(B, 72)
53 |
54 | transform = sRot.from_euler('xyz', np.array([np.pi / 2, 0, 0]), degrees=False)
55 | new_root = (transform * sRot.from_rotvec(pose_aa[:, :3])).as_rotvec()
56 | pose_aa[:, :3] = new_root
57 |
58 | trans = trans.dot(transform.as_matrix().T)
59 | trans[:, 2] = trans[:, 2] - (trans[0, 2] - 0.92)
60 |
61 | amass_data[f"{i}"] = {"pose_aa": pose_aa, "trans": trans, 'beta': np.zeros(10)}
62 |
63 | double = False
64 |
65 | mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand']
66 | amass_full_motion_dict = {}
67 | for key_name in tqdm(amass_data.keys()):
68 | key_name_dump = key_name
69 | smpl_data_entry = amass_data[key_name]
70 | file_name = f"data/amass/singles/{key_name}.npy"
71 | B = smpl_data_entry['pose_aa'].shape[0]
72 |
73 | start, end = 0, 0
74 |
75 | pose_aa = smpl_data_entry['pose_aa'].copy()[start:]
76 | root_trans = smpl_data_entry['trans'].copy()[start:]
77 | B = pose_aa.shape[0]
78 |
79 | beta = smpl_data_entry['beta'].copy() if "beta" in smpl_data_entry else smpl_data_entry['betas'].copy()
80 | if len(beta.shape) == 2:
81 | beta = beta[0]
82 |
83 | gender = smpl_data_entry.get("gender", "neutral")
84 | fps = 30.0
85 |
86 | if isinstance(gender, np.ndarray):
87 | gender = gender.item()
88 |
89 | if isinstance(gender, bytes):
90 | gender = gender.decode("utf-8")
91 | if gender == "neutral":
92 | gender_number = [0]
93 | elif gender == "male":
94 | gender_number = [1]
95 | elif gender == "female":
96 | gender_number = [2]
97 | else:
98 | import ipdb
99 | ipdb.set_trace()
100 | raise Exception("Gender Not Supported!!")
101 |
102 | smpl_2_mujoco = [joint_names.index(q) for q in mujoco_joint_names if q in joint_names]
103 | batch_size = pose_aa.shape[0]
104 | pose_aa = np.concatenate([pose_aa[:, :66], np.zeros((batch_size, 6))], axis=1)
105 | pose_aa_mj = pose_aa.reshape(-1, 24, 3)[..., smpl_2_mujoco, :].copy()
106 |
107 | num = 1
108 | pose_quat = sRot.from_rotvec(pose_aa_mj.reshape(-1, 3)).as_quat().reshape(batch_size, 24, 4)
109 |
110 | gender_number, beta[:], gender = [0], 0, "neutral"
111 | print("using neutral model")
112 |
113 | smpl_local_robot.load_from_skeleton(betas=torch.from_numpy(beta[None,]), gender=gender_number, objs_info=None)
114 | smpl_local_robot.write_xml("egoquest/data/assets/mjcf/smpl_humanoid_1.xml")
115 | skeleton_tree = SkeletonTree.from_mjcf("egoquest/data/assets/mjcf/smpl_humanoid_1.xml")
116 |
117 | root_trans_offset = torch.from_numpy(root_trans) + skeleton_tree.local_translation[0]
118 |
119 | new_sk_state = SkeletonState.from_rotation_and_root_translation(
120 | skeleton_tree, # This is the wrong skeleton tree (location wise) here, but it's fine since we only use the parent relationship here.
121 | torch.from_numpy(pose_quat),
122 | root_trans_offset,
123 | is_local=True)
124 |
125 | if robot_cfg['upright_start']:
126 | pose_quat_global = (sRot.from_quat(new_sk_state.global_rotation.reshape(-1, 4).numpy()) * sRot.from_quat([0.5, 0.5, 0.5, 0.5]).inv()).as_quat().reshape(B, -1, 4) # should fix pose_quat as well here...
127 |
128 | print("############### filtering!!! ###############")
129 | import scipy.ndimage.filters as filters
130 | from uhc.utils.transform_utils import quat_correct
131 | root_trans_offset = filters.gaussian_filter1d(root_trans_offset, 3, axis=0, mode="nearest")
132 | root_trans_offset = torch.from_numpy(root_trans_offset)
133 | pose_quat_global = np.stack([quat_correct(pose_quat_global[:, i]) for i in range(pose_quat_global.shape[1])], axis=1)
134 |
135 | # select_quats = np.linalg.norm(pose_quat_global[:-1, :] - pose_quat_global[1:, :], axis=2) > np.linalg.norm(pose_quat_global[:-1, :] + pose_quat_global[1:, :], axis=2) # checkup
136 |
137 | filtered_quats = filters.gaussian_filter1d(pose_quat_global, 2, axis=0, mode="nearest")
138 | pose_quat_global = filtered_quats / np.linalg.norm(filtered_quats, axis=-1)[..., None]
139 | print("############### filtering!!! ###############")
140 | new_sk_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, torch.from_numpy(pose_quat_global), root_trans_offset, is_local=False)
141 | pose_quat = new_sk_state.local_rotation.numpy()
142 |
143 | new_motion_out = {}
144 | new_motion_out['pose_quat_global'] = pose_quat_global
145 | new_motion_out['pose_quat'] = pose_quat
146 | new_motion_out['trans_orig'] = root_trans
147 | new_motion_out['root_trans_offset'] = root_trans_offset
148 | new_motion_out['beta'] = beta
149 | new_motion_out['gender'] = gender
150 | new_motion_out['pose_aa'] = pose_aa
151 | new_motion_out['fps'] = fps
152 | amass_full_motion_dict[key_name_dump] = new_motion_out
153 |
154 | import ipdb
155 |
156 | ipdb.set_trace()
157 | joblib.dump(amass_full_motion_dict, "data/mdm/mdm_isaac_run.pkl")
158 | # joblib.dump(amass_full_motion_dict, "data/amass/pkls/hybrik/test_hyberIK_sfv.pkl")
159 |
--------------------------------------------------------------------------------
/phc/phc/learning/vq_quantizer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class Quantizer(nn.Module):
7 | def __init__(self, n_e, e_dim, beta):
8 | super(Quantizer, self).__init__()
9 |
10 | self.e_dim = e_dim
11 | self.n_e = n_e
12 | self.beta = beta
13 |
14 | self.embedding = nn.Embedding(self.n_e, self.e_dim)
15 | # self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
16 | # self.embedding.weight.data.uniform_(-1.0 / 2, 1.0 / 2)
17 | self.embedding.weight.data.uniform_(-1.0 / 256, 1.0 / 256)
18 | # self.embedding.weight.data = self.embedding.weight.data/self.embedding.weight.data.norm(dim = -1, keepdim=True) # project to sphere
19 | # self.embedding.weight.data[:] *= 10
20 |
21 |
22 | def forward(self, z, return_perplexity=False, return_loss = True):
23 | """
24 | Inputs the output of the encoder network z and maps it to a discrete
25 | one-hot vectort that is the index of the closest embedding vector e_j
26 | z (continuous) -> z_q (discrete)
27 | :param z (B, seq_len, channel):
28 | :return z_q:
29 | """
30 | assert z.shape[-1] == self.e_dim
31 | z_flattened = z.contiguous().view(-1, self.e_dim)
32 |
33 | # B x V
34 | d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
35 | torch.sum(self.embedding.weight**2, dim=1) - 2 * \
36 | torch.matmul(z_flattened, self.embedding.weight.t())
37 | # B x 1
38 | min_encoding_indices = torch.argmin(d, dim=1)
39 | z_q = self.embedding(min_encoding_indices).view(z.shape)
40 |
41 | # compute loss for embedding
42 | if return_loss:
43 | loss = torch.mean((z_q - z.detach())**2) + self.beta * torch.mean((z_q.detach() - z)**2)
44 | # loss = self.beta * torch.mean((z_q.detach() - z)**2)
45 |
46 | # preserve gradients
47 | z_q = z + (z_q - z).detach()
48 | else:
49 | loss = torch.tensor(0.0).to(z.device)
50 |
51 | if return_perplexity:
52 | min_encodings = F.one_hot(min_encoding_indices, self.n_e).type(z.dtype) # measuring utilization
53 | e_mean = torch.mean(min_encodings, dim=0)
54 | perplexity = torch.exp(-torch.sum(e_mean*torch.log(e_mean + 1e-10)))
55 | return loss, z_q, min_encoding_indices, perplexity
56 | else:
57 | return loss, z_q, min_encoding_indices
58 |
59 | def map2index(self, z):
60 | """
61 | Inputs the output of the encoder network z and maps it to a discrete
62 | one-hot vectort that is the index of the closest embedding vector e_j
63 | z (continuous) -> z_q (discrete)
64 | :param z (B, seq_len, channel):
65 | :return z_q:
66 | """
67 | assert z.shape[-1] == self.e_dim
68 | z_flattened = z.contiguous().view(-1, self.e_dim)
69 |
70 | # B x V
71 | d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
72 | torch.sum(self.embedding.weight**2, dim=1) - 2 * \
73 | torch.matmul(z_flattened, self.embedding.weight.t())
74 | # B x 1
75 | min_encoding_indices = torch.argmin(d, dim=1)
76 | return min_encoding_indices
77 |
78 | def get_codebook_entry(self, indices):
79 | """
80 |
81 | :param indices(B, seq_len):
82 | :return z_q(B, seq_len, e_dim):
83 | """
84 | index_flattened = indices.view(-1)
85 | z_q = self.embedding(index_flattened)
86 | z_q = z_q.view(indices.shape + (self.e_dim, )).contiguous()
87 | return z_q
88 |
89 |
90 | class EmbeddingEMA(nn.Module):
91 | def __init__(self, num_tokens, codebook_dim, decay=0.99, eps=1e-5):
92 | super(EmbeddingEMA, self).__init__()
93 | self.decay = decay
94 | self.eps = eps
95 | weight = torch.randn(num_tokens, codebook_dim)
96 |
97 | # weight = weight/weight.norm(dim = -1, keepdim=True) # project to sphere
98 |
99 | self.weight = nn.Parameter(weight, requires_grad=False)
100 | # self.weight.data.uniform_(-1.0 / num_tokens, 1.0 / num_tokens)
101 | self.weight.data.uniform_(-1.0, 1.0)
102 |
103 | self.cluster_size = nn.Parameter(torch.zeros(num_tokens), requires_grad=False) # counts for how many times the code is used.
104 | self.embed_avg = nn.Parameter(weight.clone(), requires_grad=False)
105 | self.update = True
106 |
107 | def forward(self, embed_id):
108 | return F.embedding(embed_id, self.weight)
109 |
110 | def cluster_size_ema_update(self, new_cluster_size):
111 | self.cluster_size.data.mul_(self.decay).add_(new_cluster_size, alpha=1 - self.decay)
112 |
113 | def embed_avg_ema_update(self, new_emb_avg):
114 | self.update_idxes = new_emb_avg.abs().sum(dim = -1) > 0
115 | self.embed_avg.data[self.update_idxes] = self.embed_avg.data[self.update_idxes].mul_(self.decay).add(new_emb_avg[self.update_idxes], alpha=1 - self.decay)
116 |
117 | def weight_update(self, num_tokens):
118 | n = self.cluster_size.sum()
119 | smoothed_cluster_size = ((self.cluster_size + self.eps) / (n + num_tokens*self.eps) * n)
120 | embed_normalized = self.embed_avg
121 | embed_normalized[self.update_idxes] = self.embed_avg[self.update_idxes] / smoothed_cluster_size.unsqueeze(1)[self.update_idxes]
122 | self.weight.data.copy_(embed_normalized)
123 |
124 |
125 |
126 |
127 | class EMAVectorQuantizer(nn.Module):
128 | def __init__(self, n_embed, embedding_dim, beta, decay=0.99, eps=1e-5):
129 | super(EMAVectorQuantizer, self).__init__()
130 |
131 | self.codebook_dim = embedding_dim
132 | self.num_tokens = n_embed
133 | self.beta = beta
134 | self.embedding = EmbeddingEMA(self.num_tokens, self.codebook_dim, decay, eps)
135 |
136 | def forward(self, z, return_perplexity=False):
137 | z_flattened = z.view(-1, self.codebook_dim)
138 |
139 | d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
140 | torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
141 | torch.matmul(z_flattened, self.embedding.weight.t())
142 |
143 | min_encoding_indices = torch.argmin(d, dim=1)
144 | z_q = self.embedding(min_encoding_indices).view(z.shape)
145 |
146 | min_encodings = F.one_hot(min_encoding_indices, self.num_tokens).type(z.dtype)
147 |
148 | if self.training and self.embedding.update:
149 | encoding_sum = min_encodings.sum(0)
150 | embed_sum = min_encodings.transpose(0, 1) @ z_flattened
151 |
152 | self.embedding.cluster_size_ema_update(encoding_sum)
153 | self.embedding.embed_avg_ema_update(embed_sum)
154 | self.embedding.weight_update(self.num_tokens)
155 |
156 | loss = self.beta * F.mse_loss(z_q.detach(), z)
157 |
158 | z_q = z + (z_q - z).detach()
159 |
160 | if return_perplexity:
161 | e_mean = torch.mean(min_encodings, dim=0)
162 | perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
163 | return loss, z_q, min_encoding_indices, perplexity
164 | else:
165 | return loss, z_q, min_encoding_indices
166 |
167 |
--------------------------------------------------------------------------------