├── .gitignore
├── Readme.md
├── amp
├── __init__.py
├── data
│ ├── assets
│ │ ├── mesh
│ │ │ └── smpl
│ │ │ │ └── 5117b308-a58e-4020-9d6b-4dd1930a10f6
│ │ │ │ └── geom
│ │ │ │ ├── Chest.stl
│ │ │ │ ├── Head.stl
│ │ │ │ ├── Hips.stl
│ │ │ │ ├── L_Ankle.stl
│ │ │ │ ├── L_Elbow.stl
│ │ │ │ ├── L_Hand.stl
│ │ │ │ ├── L_Hip.stl
│ │ │ │ ├── L_Index1.stl
│ │ │ │ ├── L_Index2.stl
│ │ │ │ ├── L_Index3.stl
│ │ │ │ ├── L_Knee.stl
│ │ │ │ ├── L_Middle1.stl
│ │ │ │ ├── L_Middle2.stl
│ │ │ │ ├── L_Middle3.stl
│ │ │ │ ├── L_Pinky1.stl
│ │ │ │ ├── L_Pinky2.stl
│ │ │ │ ├── L_Pinky3.stl
│ │ │ │ ├── L_Ring1.stl
│ │ │ │ ├── L_Ring2.stl
│ │ │ │ ├── L_Ring3.stl
│ │ │ │ ├── L_Shoulder.stl
│ │ │ │ ├── L_Thorax.stl
│ │ │ │ ├── L_Thumb1.stl
│ │ │ │ ├── L_Thumb2.stl
│ │ │ │ ├── L_Thumb3.stl
│ │ │ │ ├── L_Toe.stl
│ │ │ │ ├── L_Wrist.stl
│ │ │ │ ├── LeftArm.stl
│ │ │ │ ├── LeftChest.stl
│ │ │ │ ├── LeftFoot.stl
│ │ │ │ ├── LeftHand.stl
│ │ │ │ ├── LeftLeg.stl
│ │ │ │ ├── LeftShoulder.stl
│ │ │ │ ├── LeftToe.stl
│ │ │ │ ├── LeftUpLeg.stl
│ │ │ │ ├── LeftWrist.stl
│ │ │ │ ├── Mouth.stl
│ │ │ │ ├── Neck.stl
│ │ │ │ ├── Pelvis.stl
│ │ │ │ ├── R_Ankle.stl
│ │ │ │ ├── R_Elbow.stl
│ │ │ │ ├── R_Hand.stl
│ │ │ │ ├── R_Hip.stl
│ │ │ │ ├── R_Index1.stl
│ │ │ │ ├── R_Index2.stl
│ │ │ │ ├── R_Index3.stl
│ │ │ │ ├── R_Knee.stl
│ │ │ │ ├── R_Middle1.stl
│ │ │ │ ├── R_Middle2.stl
│ │ │ │ ├── R_Middle3.stl
│ │ │ │ ├── R_Ring1.stl
│ │ │ │ ├── R_Ring2.stl
│ │ │ │ ├── R_Ring3.stl
│ │ │ │ ├── R_Shoulder.stl
│ │ │ │ ├── R_Thorax.stl
│ │ │ │ ├── R_Thumb1.stl
│ │ │ │ ├── R_Thumb2.stl
│ │ │ │ ├── R_Thumb3.stl
│ │ │ │ ├── R_Toe.stl
│ │ │ │ ├── R_Wrist.stl
│ │ │ │ ├── R_pinky1.stl
│ │ │ │ ├── R_pinky2.stl
│ │ │ │ ├── R_pinky3.stl
│ │ │ │ ├── RightArm.stl
│ │ │ │ ├── RightChest.stl
│ │ │ │ ├── RightFoot.stl
│ │ │ │ ├── RightHand.stl
│ │ │ │ ├── RightLeg.stl
│ │ │ │ ├── RightShoulder.stl
│ │ │ │ ├── RightToe.stl
│ │ │ │ ├── RightUpLeg.stl
│ │ │ │ ├── RightWrist.stl
│ │ │ │ ├── Spine.stl
│ │ │ │ ├── Spine1.stl
│ │ │ │ ├── Spine2.stl
│ │ │ │ └── Torso.stl
│ │ └── mjcf
│ │ │ ├── amp_humanoid.xml
│ │ │ ├── amp_humanoid_sword_shield.xml
│ │ │ ├── ball_medium.urdf
│ │ │ ├── block_feet.xml
│ │ │ ├── block_projectile.urdf
│ │ │ ├── block_projectile_large.urdf
│ │ │ ├── capsule.urdf
│ │ │ ├── capsule_feet.xml
│ │ │ ├── heading_marker.urdf
│ │ │ ├── humanoid_template_local.xml
│ │ │ ├── location_marker.urdf
│ │ │ ├── master_block_feet.xml
│ │ │ ├── mesh_feet.xml
│ │ │ ├── mesh_humanoid.xml
│ │ │ ├── ov_humanoid.xml
│ │ │ ├── ov_humanoid_sword_shield.xml
│ │ │ ├── sensor_marker.urdf
│ │ │ ├── smpl_humanoid.xml
│ │ │ ├── smpl_humanoid_1.xml
│ │ │ ├── smpl_mesh_humanoid_amass_v1.xml
│ │ │ ├── smplh_humanoid_1.xml
│ │ │ ├── strike_target.urdf
│ │ │ └── traj_marker.urdf
│ └── cfg
│ │ └── release
│ │ ├── mlp_slim_exp_v1.yaml
│ │ └── mlp_slim_exp_v2.yaml
├── env
│ ├── tasks
│ │ ├── __init__.py
│ │ ├── base_task.py
│ │ ├── humanoid.py
│ │ ├── humanoid_amp.py
│ │ ├── humanoid_amp_task.py
│ │ ├── humanoid_pedestrain_terrain.py
│ │ ├── humanoid_pedestrain_terrain_im.py
│ │ ├── humanoid_pedestrian.py
│ │ ├── humanoid_traj.py
│ │ ├── vec_task.py
│ │ └── vec_task_wrappers.py
│ └── util
│ │ ├── gym_util.py
│ │ └── traj_generator.py
├── learning
│ ├── amp_continuous.py
│ ├── amp_continuous_value.py
│ ├── amp_datasets.py
│ ├── amp_models.py
│ ├── amp_network_builder.py
│ ├── amp_players.py
│ ├── amp_value_players.py
│ ├── common_agent.py
│ ├── common_player.py
│ ├── network_builder.py
│ └── replay_buffer.py
├── run.py
└── utils
│ ├── __init__.py
│ ├── benchmarking.py
│ ├── config.py
│ ├── data_tree.py
│ ├── draw_utils.py
│ ├── flags.py
│ ├── konia_transform.py
│ ├── logger.py
│ ├── motion_lib.py
│ ├── motion_lib_smpl.py
│ ├── parse_task.py
│ ├── tools.py
│ ├── torch_transforms.py
│ └── torch_utils.py
├── assets
└── teaser.gif
├── poselib
├── .gitignore
├── README.md
├── __init__.py
├── poselib
│ ├── __init__.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── backend
│ │ │ ├── __init__.py
│ │ │ ├── abstract.py
│ │ │ └── logger.py
│ │ ├── rotation3d.py
│ │ ├── tensor_utils.py
│ │ └── tests
│ │ │ ├── __init__.py
│ │ │ └── test_rotation.py
│ ├── skeleton
│ │ ├── __init__.py
│ │ ├── backend
│ │ │ ├── __init__.py
│ │ │ └── fbx
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fbx_py27_backend.py
│ │ │ │ └── fbx_read_wrapper.py
│ │ ├── skeleton3d.py
│ │ └── tests
│ │ │ ├── __init__.py
│ │ │ ├── ant.xml
│ │ │ ├── test_skeleton.py
│ │ │ └── transfer_npy.py
│ └── visualization
│ │ ├── __init__.py
│ │ ├── common.py
│ │ ├── core.py
│ │ ├── plt_plotter.py
│ │ ├── simple_plotter_tasks.py
│ │ ├── skeleton_plotter_tasks.py
│ │ └── tests
│ │ ├── __init__.py
│ │ └── test_plotter.py
└── setup.py
├── requirements.txt
└── uhc
├── __init__.py
├── assets
└── mjcf
│ └── humanoid_template_local.xml
├── khrylib
├── __init__.py
├── mocap
│ ├── mocap_to_mujoco.py
│ ├── pose.py
│ ├── skeleton.py
│ ├── skeleton_bvh.py
│ ├── skeleton_local.py
│ ├── skeleton_mesh.py
│ ├── skeleton_mesh_local.py
│ └── vis_joint_range.py
├── models
│ ├── __init__.py
│ ├── cmlp.py
│ ├── discriminator.py
│ ├── erd_net.py
│ ├── mlp.py
│ ├── mobile_net.py
│ ├── resnet.py
│ ├── rnn.py
│ └── simple_cnn.py
├── rl
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── agent.py
│ │ ├── agent_pg.py
│ │ ├── agent_ppo.py
│ │ └── agent_trpo.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── common.py
│ │ ├── critic.py
│ │ ├── distributions.py
│ │ ├── logger_rl.py
│ │ ├── policy.py
│ │ ├── policy_disc.py
│ │ ├── policy_gaussian.py
│ │ ├── running_norm.py
│ │ └── trajbatch.py
│ ├── envs
│ │ ├── common
│ │ │ ├── mjviewer.py
│ │ │ └── mujoco_env.py
│ │ └── visual
│ │ │ └── humanoid_vis.py
│ └── utils
│ │ └── visualizer.py
├── scripts
│ └── create_vis_model.py
└── utils
│ ├── __init__.py
│ ├── logger.py
│ ├── math.py
│ ├── memory.py
│ ├── mujoco.py
│ ├── tools.py
│ ├── torch.py
│ ├── transformation.py
│ └── zfilter.py
├── smpllib
├── np_smpl_humanoid_batch.py
├── numpy_smpl_humanoid.py
├── smpl_eval.py
├── smpl_local_robot.py
├── smpl_mujoco.py
├── smpl_parser.py
├── smpl_robot.py
├── torch_smpl_humanoid.py
└── torch_smpl_humanoid_batch.py
└── utils
├── config_utils
├── base_config.py
├── copycat_config.py
└── uhm_config.py
├── convert_amass_isaac.py
├── convert_amass_isaac_slim.py
├── copycat_visualizer.py
├── create_h36m_humanoid.py
├── flags.py
├── geom.py
├── h36m_specs.py
├── image_utils.py
├── kp_utils.py
├── lightning_utils.py
├── math_utils.py
├── math_utils_new.py
├── replay_data.py
├── rotation_conversions.py
├── tools.py
├── torch_ext.py
├── torch_geometry_transforms.py
├── torch_utils.py
├── transform_utils.py
├── transformation.py
└── vis_model_utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.pth
3 | *.png
4 | *.jpg
5 | *.jpeg
6 | *.mp4
7 | *.avi
8 |
9 |
--------------------------------------------------------------------------------
/amp/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Chest.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Chest.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Head.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Head.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Hips.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Hips.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ankle.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ankle.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Elbow.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Elbow.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Hand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Hand.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Hip.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Hip.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Index1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Index1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Index2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Index2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Index3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Index3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Knee.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Knee.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Middle1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Middle1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Middle2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Middle2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Middle3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Middle3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Pinky1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Pinky1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Pinky2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Pinky2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Pinky3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Pinky3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ring1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ring1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ring2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ring2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ring3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Ring3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Shoulder.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Shoulder.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thorax.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thorax.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thumb1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thumb1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thumb2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thumb2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thumb3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Thumb3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Toe.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Toe.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Wrist.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/L_Wrist.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftArm.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftArm.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftChest.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftChest.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftFoot.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftFoot.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftHand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftHand.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftLeg.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftLeg.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftShoulder.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftShoulder.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftToe.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftToe.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftUpLeg.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftUpLeg.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftWrist.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/LeftWrist.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Mouth.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Mouth.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Neck.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Neck.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Pelvis.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Pelvis.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ankle.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ankle.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Elbow.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Elbow.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Hand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Hand.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Hip.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Hip.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Index1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Index1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Index2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Index2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Index3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Index3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Knee.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Knee.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Middle1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Middle1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Middle2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Middle2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Middle3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Middle3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ring1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ring1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ring2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ring2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ring3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Ring3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Shoulder.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Shoulder.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thorax.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thorax.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thumb1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thumb1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thumb2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thumb2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thumb3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Thumb3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Toe.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Toe.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Wrist.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_Wrist.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_pinky1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_pinky1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_pinky2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_pinky2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_pinky3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/R_pinky3.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightArm.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightArm.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightChest.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightChest.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightFoot.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightFoot.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightHand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightHand.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightLeg.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightLeg.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightShoulder.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightShoulder.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightToe.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightToe.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightUpLeg.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightUpLeg.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightWrist.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/RightWrist.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Spine.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Spine.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Spine1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Spine1.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Spine2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Spine2.stl
--------------------------------------------------------------------------------
/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Torso.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/amp/data/assets/mesh/smpl/5117b308-a58e-4020-9d6b-4dd1930a10f6/geom/Torso.stl
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/ball_medium.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/block_feet.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/block_projectile.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/block_projectile_large.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/capsule.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/capsule_feet.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/heading_marker.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/humanoid_template_local.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/location_marker.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/master_block_feet.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/mesh_feet.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/sensor_marker.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/strike_target.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/amp/data/assets/mjcf/traj_marker.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/amp/env/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
--------------------------------------------------------------------------------
/amp/env/tasks/humanoid_amp_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | import torch
9 |
10 | import env.tasks.humanoid_amp as humanoid_amp
11 |
12 | class HumanoidAMPTask(humanoid_amp.HumanoidAMP):
13 | def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless):
14 | self._enable_task_obs = cfg["env"]["enableTaskObs"]
15 |
16 | super().__init__(cfg=cfg,
17 | sim_params=sim_params,
18 | physics_engine=physics_engine,
19 | device_type=device_type,
20 | device_id=device_id,
21 | headless=headless)
22 | self.has_task = True
23 | return
24 |
25 |
26 | def get_obs_size(self):
27 | obs_size = super().get_obs_size()
28 | if (self._enable_task_obs):
29 | task_obs_size = self.get_task_obs_size()
30 | obs_size += task_obs_size
31 | return obs_size
32 |
33 | def get_task_obs_size(self):
34 | return 0
35 |
36 | def get_task_obs_size_detail(self):
37 | return NotImplemented
38 |
39 | def pre_physics_step(self, actions):
40 | super().pre_physics_step(actions)
41 | self._update_task()
42 | return
43 |
44 | def render(self, sync_frame_time=False):
45 | super().render(sync_frame_time)
46 |
47 | if self.viewer:
48 | self._draw_task()
49 | return
50 |
51 | def _update_task(self):
52 | return
53 |
54 | def _reset_envs(self, env_ids):
55 | self._reset_default_env_ids = []
56 | self._reset_ref_env_ids = []
57 | if len(env_ids) > 0:
58 | self._state_reset_happened = True
59 | self.obs_hist_buf[env_ids] *= 0
60 | self.temporal_obs_buf[env_ids] *= 0
61 | self.action_hist_buf[env_ids] *= 0
62 | self.reward_hist_buf[env_ids] *= 0
63 | self._reset_actors(env_ids)
64 | self._reset_env_tensors(env_ids)
65 | self._refresh_sim_tensors()
66 | self._reset_task(env_ids)
67 | self._compute_observations(env_ids)
68 | self._init_amp_obs(env_ids)
69 | return
70 |
71 | def _reset_task(self, env_ids):
72 | return
73 |
74 | def _compute_observations(self, env_ids=None):
75 | # env_ids is used for resetting
76 | humanoid_obs = self._compute_humanoid_obs(env_ids)
77 |
78 | if (self._enable_task_obs):
79 | task_obs = self._compute_task_obs(env_ids)
80 | obs = torch.cat([humanoid_obs, task_obs], dim=-1)
81 | else:
82 | obs = humanoid_obs
83 | if self.has_flip_observation:
84 | flip_obs = self._compute_flip_humanoid_obs(env_ids)
85 |
86 | if (self._enable_task_obs):
87 | flip_task_obs = self._compute_flip_task_obs(task_obs, env_ids)
88 | flip_obs = torch.cat([flip_obs, flip_task_obs], dim=-1)
89 |
90 | if (env_ids is None):
91 | self._flip_obs_buf[:] = flip_obs
92 | else:
93 | self._flip_obs_buf[env_ids] = flip_obs
94 |
95 | #### add by jingbo, resister obs histrory for transformer
96 | self.register_obs_hist(env_ids, obs) ### for all observation
97 | self.register_obs_buf(env_ids, humanoid_obs) ### for humanoid observation
98 |
99 | if (env_ids is None):
100 | if self._temporal_output:
101 | obs = torch.cat([self.obs_hist_buf, self.action_hist_buf], dim=1).reshape(self.num_envs, -1)
102 | if self.use_temporal_buf:
103 | obs = torch.cat([obs, self.temporal_obs_buf.reshape(self.num_envs, -1)], dim=-1)
104 | self.obs_buf[:] = obs
105 | else:
106 | if self._temporal_output:
107 | obs = torch.cat([self.obs_hist_buf[env_ids], self.action_hist_buf[env_ids]], dim=1).reshape(env_ids.shape[0], -1)
108 | if self.use_temporal_buf:
109 | obs = torch.cat([obs, self.temporal_obs_buf[env_ids].reshape(env_ids.shape[0], -1)], dim=-1)
110 | self.obs_buf[env_ids] = obs
111 | return
112 |
113 | def _compute_task_obs(self, env_ids=None):
114 | return NotImplemented
115 |
116 | def _compute_reward(self, actions):
117 | return NotImplemented
118 |
119 | def _draw_task(self):
120 | return
121 |
122 |
--------------------------------------------------------------------------------
/amp/env/tasks/humanoid_pedestrian.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | import torch
9 | import numpy as np
10 |
11 | import env.tasks.humanoid_traj as humanoid_traj
12 | from isaacgym import gymapi
13 |
14 |
15 | class HumanoidPedestrian(humanoid_traj.HumanoidTraj):
16 | def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless):
17 | super().__init__(cfg=cfg,
18 | sim_params=sim_params,
19 | physics_engine=physics_engine,
20 | device_type=device_type,
21 | device_id=device_id,
22 | headless=headless)
23 | return
24 |
25 |
26 | def _create_ground_plane(self):
27 | plane_params = gymapi.PlaneParams()
28 | plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
29 | plane_params.distance = 10
30 | plane_params.static_friction = self.plane_static_friction
31 | plane_params.dynamic_friction = self.plane_dynamic_friction
32 | plane_params.restitution = self.plane_restitution
33 | self.gym.add_ground(self.sim, plane_params)
34 |
35 | mesh_data = np.load("data/mesh/mesh_simplified_3.npz")
36 | mesh_vertices = mesh_data["vertices"]
37 | mesh_triangles = mesh_data["faces"].astype(np.uint32)
38 |
39 | tm_params = gymapi.TriangleMeshParams()
40 | tm_params.nb_vertices = mesh_vertices.shape[0]
41 | tm_params.nb_triangles = mesh_triangles.shape[0]
42 | tm_params.transform.p.x = 0.0
43 | tm_params.transform.p.y = 0.0
44 | tm_params.transform.p.z = 0.0
45 | tm_params.static_friction = self.plane_static_friction
46 | tm_params.dynamic_friction = self.plane_dynamic_friction
47 | tm_params.restitution = self.plane_restitution
48 |
49 | self.gym.add_triangle_mesh(self.sim, mesh_vertices.flatten(order='C'), mesh_triangles.flatten(order='C'), tm_params)
50 |
51 | return
52 |
--------------------------------------------------------------------------------
/amp/env/tasks/vec_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from gym import spaces
9 |
10 | from isaacgym import gymtorch
11 | from isaacgym.torch_utils import to_torch
12 | import torch
13 | import numpy as np
14 |
15 |
16 | # VecEnv Wrapper for RL training
17 | class VecTask():
18 | def __init__(self, task, rl_device, clip_observations=5.0, clip_actions=1.0):
19 | self.task = task
20 |
21 | self.num_environments = task.num_envs
22 | self.num_agents = 1 # used for multi-agent environments
23 | self.num_observations = task.num_obs
24 | self.num_states = task.num_states
25 | self.num_actions = task.num_actions
26 |
27 | self.obs_space = spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)
28 | self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
29 | self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.)
30 |
31 | self.clip_obs = clip_observations
32 | self.clip_actions = clip_actions
33 | self.rl_device = rl_device
34 |
35 | print("RL device: ", rl_device)
36 |
37 | def step(self, actions):
38 | raise NotImplementedError
39 |
40 | def reset(self):
41 | raise NotImplementedError
42 |
43 | def get_number_of_agents(self):
44 | return self.num_agents
45 |
46 | @property
47 | def observation_space(self):
48 | return self.obs_space
49 |
50 | @property
51 | def action_space(self):
52 | return self.act_space
53 |
54 | @property
55 | def num_envs(self):
56 | return self.num_environments
57 |
58 | @property
59 | def num_acts(self):
60 | return self.num_actions
61 |
62 | @property
63 | def num_obs(self):
64 | return self.num_observations
65 |
66 |
67 | # C++ CPU Class
68 | class VecTaskCPU(VecTask):
69 | def __init__(self, task, rl_device, sync_frame_time=False, clip_observations=5.0, clip_actions=1.0):
70 | super().__init__(task, rl_device, clip_observations=clip_observations, clip_actions=clip_actions)
71 | self.sync_frame_time = sync_frame_time
72 |
73 | def step(self, actions):
74 | actions = actions.cpu().numpy()
75 | self.task.render(self.sync_frame_time)
76 |
77 | obs, rewards, resets, extras = self.task.step(np.clip(actions, -self.clip_actions, self.clip_actions))
78 |
79 | return (to_torch(np.clip(obs, -self.clip_obs, self.clip_obs), dtype=torch.float, device=self.rl_device),
80 | to_torch(rewards, dtype=torch.float, device=self.rl_device),
81 | to_torch(resets, dtype=torch.uint8, device=self.rl_device), [])
82 |
83 | def reset(self):
84 | actions = 0.01 * (1 - 2 * np.random.rand(self.num_envs, self.num_actions)).astype('f')
85 |
86 | # step the simulator
87 | obs, rewards, resets, extras = self.task.step(actions)
88 |
89 | return to_torch(np.clip(obs, -self.clip_obs, self.clip_obs), dtype=torch.float, device=self.rl_device)
90 |
91 |
92 | # C++ GPU Class
93 | class VecTaskGPU(VecTask):
94 | def __init__(self, task, rl_device, clip_observations=5.0, clip_actions=1.0):
95 | super().__init__(task, rl_device, clip_observations=clip_observations, clip_actions=clip_actions)
96 |
97 | self.obs_tensor = gymtorch.wrap_tensor(self.task.obs_tensor, counts=(self.task.num_envs, self.task.num_obs))
98 | self.rewards_tensor = gymtorch.wrap_tensor(self.task.rewards_tensor, counts=(self.task.num_envs,))
99 | self.resets_tensor = gymtorch.wrap_tensor(self.task.resets_tensor, counts=(self.task.num_envs,))
100 |
101 | def step(self, actions):
102 | self.task.render(False)
103 | actions_clipped = torch.clamp(actions, -self.clip_actions, self.clip_actions)
104 | actions_tensor = gymtorch.unwrap_tensor(actions_clipped)
105 |
106 | self.task.step(actions_tensor)
107 |
108 | return torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs), self.rewards_tensor, self.resets_tensor, []
109 |
110 | def reset(self):
111 | actions = 0.01 * (1 - 2 * torch.rand([self.task.num_envs, self.task.num_actions], dtype=torch.float32, device=self.rl_device))
112 | actions_tensor = gymtorch.unwrap_tensor(actions)
113 |
114 | # step the simulator
115 | self.task.step(actions_tensor)
116 |
117 | return torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs)
118 |
119 |
120 | # Python CPU/GPU Class
121 | class VecTaskPython(VecTask):
122 |
123 | def get_state(self):
124 | return torch.clamp(self.task.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
125 |
126 | def step(self, actions):
127 | actions_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)
128 |
129 | self.task.step(actions_tensor)
130 |
131 | return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device), self.task.rew_buf.to(self.rl_device), self.task.reset_buf.to(self.rl_device), self.task.extras
132 |
133 | def reset(self):
134 | actions = 0.01 * (1 - 2 * torch.rand([self.task.num_envs, self.task.num_actions], dtype=torch.float32, device=self.rl_device))
135 |
136 | # step the simulator
137 | self.task.step(actions)
138 |
139 | return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
140 |
--------------------------------------------------------------------------------
/amp/env/tasks/vec_task_wrappers.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from gym import spaces
9 | import numpy as np
10 | import torch
11 | from env.tasks.vec_task import VecTaskCPU, VecTaskGPU, VecTaskPython
12 |
13 | class VecTaskCPUWrapper(VecTaskCPU):
14 | def __init__(self, task, rl_device, sync_frame_time=False, clip_observations=5.0, clip_actions=1.0):
15 | super().__init__(task, rl_device, sync_frame_time, clip_observations, clip_actions)
16 | return
17 |
18 | class VecTaskGPUWrapper(VecTaskGPU):
19 | def __init__(self, task, rl_device, clip_observations=5.0, clip_actions=1.0):
20 | super().__init__(task, rl_device, clip_observations, clip_actions)
21 | return
22 |
23 |
24 | class VecTaskPythonWrapper(VecTaskPython):
25 | def __init__(self, task, rl_device, clip_observations=5.0, clip_actions=1.0):
26 | super().__init__(task, rl_device, clip_observations, clip_actions)
27 |
28 | self._amp_obs_space = spaces.Box(np.ones(task.get_num_amp_obs()) * -np.Inf, np.ones(task.get_num_amp_obs()) * np.Inf)
29 | return
30 |
31 | def reset(self, env_ids=None):
32 | self.task.reset(env_ids)
33 | return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)
34 |
35 |
36 | @property
37 | def amp_observation_space(self):
38 | return self._amp_obs_space
39 |
40 | def fetch_amp_obs_demo(self, num_samples):
41 | return self.task.fetch_amp_obs_demo(num_samples)
--------------------------------------------------------------------------------
/amp/learning/amp_datasets.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from rl_games.common import datasets
3 |
4 | class AMPDataset(datasets.PPODataset):
5 | def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
6 | super().__init__(batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len)
7 | self._idx_buf = torch.randperm(batch_size)
8 | return
9 |
10 | def update_mu_sigma(self, mu, sigma):
11 | raise NotImplementedError()
12 | return
13 |
14 | def _get_item(self, idx):
15 | start = idx * self.minibatch_size
16 | end = (idx + 1) * self.minibatch_size
17 | sample_idx = self._idx_buf[start:end]
18 |
19 | input_dict = {}
20 | for k,v in self.values_dict.items():
21 | if k not in self.special_names and v is not None:
22 | input_dict[k] = v[sample_idx]
23 |
24 | if (end >= self.batch_size):
25 | self._shuffle_idx_buf()
26 |
27 | return input_dict
28 |
29 | def _shuffle_idx_buf(self):
30 | self._idx_buf[:] = torch.randperm(self.batch_size)
31 | return
--------------------------------------------------------------------------------
/amp/learning/amp_models.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from rl_games.algos_torch.models import ModelA2CContinuousLogStd
3 | import torch
4 | class ModelAMPContinuous(ModelA2CContinuousLogStd):
5 | def __init__(self, network):
6 | super().__init__(network)
7 | return
8 |
9 | def build(self, config):
10 | net = self.network_builder.build('amp', **config)
11 | for name, _ in net.named_parameters():
12 | print(name)
13 | return ModelAMPContinuous.Network(net)
14 |
15 | class Network(ModelA2CContinuousLogStd.Network):
16 | def __init__(self, a2c_network):
17 | super().__init__(a2c_network)
18 | return
19 |
20 | def forward(self, input_dict):
21 | is_train = input_dict.get('is_train', True)
22 | amp_dropout = input_dict.get("amp_dropout", False)
23 | amp_steps = input_dict.get("amp_steps", 2)
24 | env_cfg = input_dict.get("env_cfg", None)
25 | result = super().forward(input_dict)
26 |
27 | if (is_train):
28 | amp_obs, amp_obs_replay, amp_demo_obs = input_dict['amp_obs'], input_dict['amp_obs_replay'], input_dict['amp_obs_demo']
29 | if amp_dropout:
30 | dropout_mask = self.get_dropout_mask(input_dict['amp_obs'], amp_steps, env_cfg)
31 | amp_obs, amp_obs_replay, amp_demo_obs = self.dropout_amp_obs(amp_obs, dropout_mask[..., 0]), \
32 | self.dropout_amp_obs(amp_obs_replay, dropout_mask[..., 1]), self.dropout_amp_obs(amp_demo_obs, dropout_mask[..., 2])
33 | del dropout_mask
34 |
35 | disc_agent_logit = self.a2c_network.eval_disc(amp_obs)
36 | result["disc_agent_logit"] = disc_agent_logit
37 |
38 | disc_agent_replay_logit = self.a2c_network.eval_disc(amp_obs_replay)
39 | result["disc_agent_replay_logit"] = disc_agent_replay_logit
40 |
41 | disc_demo_logit = self.a2c_network.eval_disc(amp_demo_obs)
42 | result["disc_demo_logit"] = disc_demo_logit
43 |
44 | return result
45 |
46 | def dropout_amp_obs(self, amp_obs, dropout_mask):
47 | return amp_obs * dropout_mask
48 |
49 | def get_dropout_mask(self,
50 | amp_obs,
51 | steps,
52 | env_cfg,
53 | num_masks=3,
54 | dropout_rate=0.3):
55 | # ZL Hack: amp_obs_dims, should drop out whole joints
56 | # [root_rot 6, root_vel 3, root_ang_vel 3, dof_pos 23 * 6 - 4 * 6, dof_vel 69 - 12, key_body_pos 3 * 4, shape_obs_disc 11]
57 | # [root_rot 6, root_vel 3, root_ang_vel 3, dof_pos 23 * 6 - 4 * 6, dof_vel 69 - 12, key_body_pos 3 * 4, shape_obs_disc 47]
58 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 11 = 206 # normal with betas
59 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 10 = 205 # concise limb weight
60 | # 6 + 3 + 3 + 19 * 6 + 19 * 3 + 3 * 4 + 59 = 254 # masterfoot
61 | B, F = amp_obs.shape
62 | B, _, amp_f = amp_obs.view(B, steps, -1).shape
63 | masterfoot, remove_neck = env_cfg["env"].get("masterfoot", False), env_cfg["env"].get("remove_neck", False)
64 | amp_smpl_keypoint_observation = env_cfg["env"].get("ampSMPLKeypointObs", False)
65 | try:
66 | assert (F/ steps == 161 or F / steps == 206 or F / steps == 205 or F / steps == 254 or F / steps == 197 or F / steps == 195 or F / steps == 188 or F / steps == 187)
67 | except:
68 | print(F/steps)
69 | import ipdb; ipdb.set_trace()
70 | print(F/steps)
71 |
72 | dof_joints_offset = 12 # 6 + 3 + 3
73 | num_joints = 19
74 | if not amp_smpl_keypoint_observation:
75 | dof_vel_offsets = 126 # 12 + 19 * 6
76 | if F / steps == 197: # Remove one joint
77 | dof_vel_offsets = 120
78 | num_joints = 18
79 | elif remove_neck: # Remove two joints
80 | dof_vel_offsets = 114
81 | num_joints = 17
82 |
83 | dropout_mask = torch.ones([B, amp_f, num_masks])
84 |
85 | for idx_joint in range(num_joints):
86 | has_drop_out = torch.rand(B, num_masks) > dropout_rate
87 | dropout_mask[:, dof_joints_offset + idx_joint * 6 : dof_joints_offset + idx_joint * 6 + 6, :] = has_drop_out[:, None]
88 | dropout_mask[:, dof_vel_offsets + idx_joint * 3 : dof_vel_offsets + idx_joint * 3 + 3, :] = has_drop_out[:, None]
89 | else:
90 | num_joints = 23
91 | dropout_mask = torch.ones([B, amp_f, num_masks])
92 | for idx_joint in range(num_joints):
93 | has_drop_out = torch.rand(B, num_masks) > dropout_rate
94 | dropout_mask[:, dof_joints_offset + idx_joint * 6 : dof_joints_offset + idx_joint * 6 + 6, :] = has_drop_out[:, None]
95 | return dropout_mask.repeat(1, steps, 1).to(amp_obs)
96 |
--------------------------------------------------------------------------------
/amp/learning/amp_network_builder.py:
--------------------------------------------------------------------------------
1 | from rl_games.algos_torch import torch_ext
2 | from rl_games.algos_torch import layers
3 | #from rl_games.algos_torch import network_builder
4 | from amp.learning import network_builder
5 |
6 | import torch
7 | import torch.nn as nn
8 | import numpy as np
9 |
10 | DISC_LOGIT_INIT_SCALE = 1.0
11 |
12 | class AMPBuilder(network_builder.A2CBuilder):
13 | def __init__(self, **kwargs):
14 | super().__init__(**kwargs)
15 | return
16 |
17 | class Network(network_builder.A2CBuilder.Network):
18 | def __init__(self, params, **kwargs):
19 | super().__init__(params, **kwargs)
20 |
21 | if self.is_continuous:
22 | if (not self.space_config['learn_sigma']):
23 | actions_num = kwargs.get('actions_num')
24 | sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
25 | self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=False, dtype=torch.float32), requires_grad=False)
26 | sigma_init(self.sigma)
27 |
28 | amp_input_shape = kwargs.get('amp_input_shape')
29 | self._build_disc(amp_input_shape)
30 |
31 | return
32 |
33 | def load(self, params):
34 | super().load(params)
35 |
36 | self._disc_units = params['disc']['units']
37 | self._disc_activation = params['disc']['activation']
38 | self._disc_initializer = params['disc']['initializer']
39 | return
40 |
41 | def forward(self, obs_dict):
42 | obs = obs_dict['obs']
43 | states = obs_dict.get('rnn_states', None)
44 |
45 | actor_outputs = self.eval_actor(obs)
46 | value = self.eval_critic(obs)
47 |
48 | output = actor_outputs + (value, states)
49 |
50 | return output
51 |
52 | def eval_actor(self, obs):
53 | #obs = obs_dict['obs']
54 |
55 | a_out = self.actor_cnn(obs)
56 | a_out = a_out.contiguous().view(a_out.size(0), -1)
57 | a_out = self.actor_mlp(a_out)
58 |
59 | if self.is_discrete:
60 | logits = self.logits(a_out)
61 | return logits
62 |
63 | if self.is_multi_discrete:
64 | logits = [logit(a_out) for logit in self.logits]
65 | return logits
66 |
67 | if self.is_continuous:
68 | mu = self.mu_act(self.mu(a_out))
69 | if self.space_config['fixed_sigma']:
70 | sigma = mu * 0.0 + self.sigma_act(self.sigma)
71 | else:
72 | sigma = self.sigma_act(self.sigma(a_out))
73 |
74 |
75 | return mu, sigma
76 | return
77 |
78 | def eval_critic(self, obs):
79 | c_out = self.critic_cnn(obs)
80 | c_out = c_out.contiguous().view(c_out.size(0), -1)
81 | c_out = self.critic_mlp(c_out)
82 | value = self.value_act(self.value(c_out))
83 | return value
84 |
85 | def eval_disc(self, amp_obs):
86 | disc_mlp_out = self._disc_mlp(amp_obs)
87 | disc_logits = self._disc_logits(disc_mlp_out)
88 | return disc_logits
89 |
90 | def get_disc_logit_weights(self):
91 | return torch.flatten(self._disc_logits.weight)
92 |
93 | def get_disc_weights(self):
94 | weights = []
95 | for m in self._disc_mlp.modules():
96 | if isinstance(m, nn.Linear):
97 | weights.append(torch.flatten(m.weight))
98 |
99 | weights.append(torch.flatten(self._disc_logits.weight))
100 | return weights
101 |
102 | def _build_disc(self, input_shape):
103 | self._disc_mlp = nn.Sequential()
104 |
105 | mlp_args = {
106 | 'input_size' : input_shape[0],
107 | 'units' : self._disc_units,
108 | 'activation' : self._disc_activation,
109 | 'dense_func' : torch.nn.Linear
110 | }
111 | self._disc_mlp = self._build_mlp(**mlp_args)
112 |
113 | mlp_out_size = self._disc_units[-1]
114 | self._disc_logits = torch.nn.Linear(mlp_out_size, 1)
115 |
116 | mlp_init = self.init_factory.create(**self._disc_initializer)
117 | for m in self._disc_mlp.modules():
118 | if isinstance(m, nn.Linear):
119 | mlp_init(m.weight)
120 | if getattr(m, "bias", None) is not None:
121 | torch.nn.init.zeros_(m.bias)
122 |
123 | torch.nn.init.uniform_(self._disc_logits.weight, -DISC_LOGIT_INIT_SCALE, DISC_LOGIT_INIT_SCALE)
124 | torch.nn.init.zeros_(self._disc_logits.bias)
125 |
126 | return
127 |
128 | def build(self, name, **kwargs):
129 | net = AMPBuilder.Network(self.params, **kwargs)
130 | return net
--------------------------------------------------------------------------------
/amp/learning/amp_value_players.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | from rl_games.algos_torch import torch_ext
5 | from rl_games.algos_torch.running_mean_std import RunningMeanStd
6 | from rl_games.common.player import BasePlayer
7 |
8 | import learning.amp_players as amp_players
9 | import matplotlib
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 |
13 | class AMPPlayerContinuousValue(amp_players.AMPPlayerContinuous):
14 | def __init__(self, config):
15 | super().__init__(config)
16 | return
17 |
18 |
19 | def _post_step(self, info):
20 | super()._post_step(info)
21 | if (self.env.task.viewer):
22 | # self._amp_debug(info)
23 | self._task_value_debug(info)
24 |
25 |
26 | return
27 |
28 |
29 | def _task_value_debug(self, info):
30 | obs = info['obs']
31 | amp_obs = info['amp_obs']
32 | task_value = self._eval_task_value(obs)
33 | amp_obs_single = amp_obs[0:1]
34 |
35 | critic_value = self._eval_critic(obs)
36 | disc_pred = self._eval_disc(amp_obs_single)
37 | amp_rewards = self._calc_amp_rewards(amp_obs_single)
38 | disc_reward = amp_rewards['disc_rewards']
39 | plot_all = torch.cat([critic_value, task_value])
40 | plotter_names = ("task_value", "task")
41 | self.live_plotter(plot_all.cpu().numpy(), plotter_names = plotter_names)
42 | return
43 |
44 | def _eval_task_value(self, input):
45 | input = self._preproc_input(input)
46 | return self.model.a2c_network.eval_task_value(input)
47 |
48 | def live_plotter(self, w, plotter_names, identifier='', pause_time=0.00000001):
49 | matplotlib.use("Qt5agg")
50 | num_lines = len(w)
51 | if not hasattr(self, 'lines'):
52 | size = 100
53 | self.x_vec = np.linspace(0, 1, size + 1)[0:-1]
54 | self.y_vecs = [np.array([0] * len(self.x_vec)) for i in range(7)]
55 | self.lines = [[] for i in range(num_lines)]
56 | # this is the call to matplotlib that allows dynamic plotting
57 | plt.ion()
58 |
59 | self.fig = plt.figure(figsize=(1, 1))
60 | ax = self.fig.add_subplot(111)
61 | # create a variable for the line so we can later update it
62 |
63 | for i in range(num_lines):
64 | l, = ax.plot(self.x_vec, self.y_vecs[i], '-o', alpha=0.8)
65 | self.lines[i] = l
66 |
67 | # update plot label/title
68 | plt.ylabel('Values')
69 |
70 | plt.title('{}'.format(identifier))
71 | plt.ylim((-0.75, 1.5))
72 | plt.gca().legend(plotter_names)
73 | plt.show()
74 |
75 | for i in range(num_lines):
76 | # after the figure, axis, and line are created, we only need to update the y-data
77 | self.y_vecs[i][-1] = w[i]
78 | self.lines[i].set_ydata(self.y_vecs[i])
79 | # this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
80 | self.y_vecs[i] = np.append(self.y_vecs[i][1:], 0.0)
81 |
82 | # plt.pause(pause_time)
83 | self.fig.canvas.start_event_loop(0.001)
--------------------------------------------------------------------------------
/amp/learning/replay_buffer.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | class ReplayBuffer():
4 | def __init__(self, buffer_size, device):
5 | self._head = 0
6 | self._total_count = 0
7 | self._buffer_size = buffer_size
8 | self._device = device
9 | self._data_buf = None
10 | self._sample_idx = torch.randperm(buffer_size)
11 | self._sample_head = 0
12 |
13 | return
14 |
15 | def reset(self):
16 | self._head = 0
17 | self._total_count = 0
18 | self._reset_sample_idx()
19 | return
20 |
21 | def get_buffer_size(self):
22 | return self._buffer_size
23 |
24 | def get_total_count(self):
25 | return self._total_count
26 |
27 | def store(self, data_dict):
28 | if (self._data_buf is None):
29 | self._init_data_buf(data_dict)
30 |
31 | n = next(iter(data_dict.values())).shape[0]
32 | buffer_size = self.get_buffer_size()
33 | assert(n <= buffer_size)
34 |
35 | for key, curr_buf in self._data_buf.items():
36 | curr_n = data_dict[key].shape[0]
37 | assert(n == curr_n)
38 |
39 | store_n = min(curr_n, buffer_size - self._head)
40 | curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
41 |
42 | remainder = n - store_n
43 | if (remainder > 0):
44 | curr_buf[0:remainder] = data_dict[key][store_n:]
45 |
46 | self._head = (self._head + n) % buffer_size
47 | self._total_count += n
48 |
49 | return
50 |
51 | def sample(self, n):
52 | total_count = self.get_total_count()
53 | buffer_size = self.get_buffer_size()
54 |
55 | idx = torch.arange(self._sample_head, self._sample_head + n)
56 | idx = idx % buffer_size
57 | rand_idx = self._sample_idx[idx]
58 | if (total_count < buffer_size):
59 | rand_idx = rand_idx % self._head
60 |
61 | samples = dict()
62 | for k, v in self._data_buf.items():
63 | samples[k] = v[rand_idx]
64 |
65 | self._sample_head += n
66 | if (self._sample_head >= buffer_size):
67 | self._reset_sample_idx()
68 |
69 | return samples
70 |
71 | def _reset_sample_idx(self):
72 | buffer_size = self.get_buffer_size()
73 | self._sample_idx[:] = torch.randperm(buffer_size)
74 | self._sample_head = 0
75 | return
76 |
77 | def _init_data_buf(self, data_dict):
78 | buffer_size = self.get_buffer_size()
79 | self._data_buf = dict()
80 |
81 | for k, v in data_dict.items():
82 | v_shape = v.shape[1:]
83 | self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
84 |
85 | return
--------------------------------------------------------------------------------
/amp/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
--------------------------------------------------------------------------------
/amp/utils/benchmarking.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | import time
3 | from collections import defaultdict
4 | import re
5 | import sys
6 |
7 | average_times = defaultdict(lambda: (0,0))
8 |
9 | @contextmanager
10 | def timeit(name):
11 | start = time.time()
12 | yield
13 | end = time.time()
14 | total_time, num_calls = average_times[name]
15 | total_time += end-start
16 | num_calls += 1
17 | print("TIME:", name, end-start, "| AVG", total_time / num_calls, f"| TOTAL {total_time} {num_calls}")
18 | average_times[name] = (total_time, num_calls)
19 |
20 | def time_decorator(func):
21 | def with_times(*args, **kwargs):
22 | with timeit(func.__name__):
23 | return func(*args, **kwargs)
24 | return with_times
25 |
26 |
27 | def recover_map(lines):
28 | info = {}
29 | pattern = re.compile(".* (.*) .* \| .* (.*\\b) .*\| .* (.*) (.*)")
30 |
31 | for l in lines:
32 | if not l.startswith("TIME"):
33 | continue
34 |
35 | match = pattern.match(l)
36 |
37 | name = match.group(1)
38 | avg = float(match.group(2))
39 | total_time = float(match.group(3))
40 | total_calls = float(match.group(4))
41 | info[name] = (avg, total_time, total_calls)
42 |
43 | return info
44 |
45 | def compare_files(fileA, fileB):
46 | with open(fileA) as fA:
47 | linesA = fA.readlines()
48 |
49 | with open(fileB) as fB:
50 | linesB = fB.readlines()
51 |
52 | mapA = recover_map(linesA)
53 | mapB = recover_map(linesB)
54 |
55 | keysA = set(mapA.keys())
56 | keysB = set(mapB.keys())
57 |
58 | inter = keysA.intersection(keysB)
59 | print("Missing A", keysA.difference(inter))
60 | print("Missing B", keysB.difference(inter))
61 |
62 | keys_ordered = list(sorted([(mapA[k][1], k) for k in inter], reverse=True))
63 |
64 | for _, k in keys_ordered:
65 | print(f"{k} {mapA[k]} {mapB[k]}")
66 |
67 |
68 | if __name__ == "__main__":
69 | fA = sys.argv[1]
70 | fB = sys.argv[2]
71 | compare_files(fA, fB)
--------------------------------------------------------------------------------
/amp/utils/draw_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import skimage
3 | from skimage.draw import polygon
4 | from skimage.draw import bezier_curve
5 | from skimage.draw import circle_perimeter
6 | from skimage.draw import disk
7 | from scipy import ndimage
8 | import matplotlib
9 | import matplotlib.pyplot as plt
10 |
11 | def agt_color(aidx):
12 | return matplotlib.colors.to_rgb(
13 | plt.rcParams['axes.prop_cycle'].by_key()['color'][aidx % 10])
14 |
15 | def draw_disk(img_size=80, max_r=10, iterations=3):
16 | shape = (img_size, img_size)
17 | img = np.zeros(shape, dtype=np.uint8)
18 | x, y = np.random.uniform(max_r, img_size - max_r, size=(2))
19 | radius = int(np.random.uniform(max_r))
20 | rr, cc = disk((x, y), radius, shape=shape)
21 | np.clip(rr, 0, img_size - 1, out=rr)
22 | np.clip(cc, 0, img_size - 1, out=cc)
23 | img[rr, cc] = 1
24 | return img
25 |
26 |
27 | def draw_circle(img_size=80, max_r=10, iterations=3):
28 | img = np.zeros((img_size, img_size), dtype=np.uint8)
29 | r, c = np.random.uniform(max_r, img_size - max_r, size=(2, )).astype(int)
30 | radius = int(np.random.uniform(max_r))
31 | rr, cc = circle_perimeter(r, c, radius)
32 | np.clip(rr, 0, img_size - 1, out=rr); np.clip(cc, 0, img_size - 1, out=cc)
33 | img[rr, cc] = 1
34 | img = ndimage.binary_dilation(img, iterations=1).astype(int)
35 | return img
36 |
37 |
38 | def draw_curve(img_size=80, max_sides=10, iterations=3):
39 | img = np.zeros((img_size, img_size), dtype=np.uint8)
40 | r0, c0, r1, c1, r2, c2 = np.random.uniform(0, img_size,
41 | size=(6, )).astype(int)
42 | w = np.random.random()
43 | rr, cc = bezier_curve(r0, c0, r1, c1, r2, c2, w)
44 | np.clip(rr, 0, img_size - 1, out=rr); np.clip(cc, 0, img_size - 1, out=cc)
45 | img[rr, cc] = 1
46 | img = ndimage.binary_dilation(img, iterations=iterations).astype(int)
47 | return img
48 |
49 |
50 | def draw_polygon(img_size=80, max_sides=10):
51 | img = np.zeros((img_size, img_size), dtype=np.uint8)
52 | num_coord = int(np.random.uniform(3, max_sides))
53 | r = np.random.uniform(0, img_size, size=(num_coord, )).astype(int)
54 | c = np.random.uniform(0, img_size, size=(num_coord, )).astype(int)
55 | rr, cc = polygon(r, c)
56 | np.clip(rr, 0, img_size - 1, out=rr)
57 | np.clip(cc, 0, img_size - 1, out=cc)
58 | img[rr, cc] = 1
59 | return img
60 |
61 |
62 | def draw_ellipse(img_size=80, max_size=10):
63 | img = np.zeros((img_size, img_size), dtype=np.uint8)
64 | r, c, rradius, cradius = np.random.uniform(max_size, img_size - max_size), np.random.uniform(max_size, img_size - max_size),\
65 | np.random.uniform(1, max_size), np.random.uniform(1, max_size)
66 | rr, cc = skimage.draw.ellipse(r, c, rradius, cradius)
67 | np.clip(rr, 0, img_size - 1, out=rr)
68 | np.clip(cc, 0, img_size - 1, out=cc)
69 | img[rr, cc] = 1
70 | return img
--------------------------------------------------------------------------------
/amp/utils/flags.py:
--------------------------------------------------------------------------------
1 | __all__ = ['flags', 'summation']
2 |
3 | class Flags(object):
4 | def __init__(self, items):
5 | for key, val in items.items():
6 | setattr(self,key,val)
7 |
8 | flags = Flags({
9 | 'test': False,
10 | 'debug': False
11 | })
12 |
--------------------------------------------------------------------------------
/amp/utils/logger.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------------
2 | # @brief:
3 | # The logger here will be called all across the project. It is inspired
4 | # by Yuxin Wu (ppwwyyxx@gmail.com)
5 | #
6 | # @author:
7 | # Tingwu Wang, 2017, Feb, 20th
8 | # -----------------------------------------------------------------------------
9 |
10 | import logging
11 | import sys
12 | import os
13 | import datetime
14 | from termcolor import colored
15 |
16 | __all__ = ['set_file_handler'] # the actual worker is the '_logger'
17 |
18 |
19 | class _MyFormatter(logging.Formatter):
20 | '''
21 | @brief:
22 | a class to make sure the format could be used
23 | '''
24 |
25 | def format(self, record):
26 | date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')
27 | msg = '%(message)s'
28 |
29 | if record.levelno == logging.WARNING:
30 | fmt = date + ' ' + \
31 | colored('WRN', 'red', attrs=[]) + ' ' + msg
32 | elif record.levelno == logging.ERROR or \
33 | record.levelno == logging.CRITICAL:
34 | fmt = date + ' ' + \
35 | colored('ERR', 'red', attrs=['underline']) + ' ' + msg
36 | else:
37 | fmt = date + ' ' + msg
38 |
39 | if hasattr(self, '_style'):
40 | # Python3 compatibilty
41 | self._style._fmt = fmt
42 | self._fmt = fmt
43 |
44 | return super(self.__class__, self).format(record)
45 |
46 |
47 | _logger = logging.getLogger('joint_embedding')
48 | _logger.propagate = False
49 | _logger.setLevel(logging.INFO)
50 |
51 | # set the console output handler
52 | con_handler = logging.StreamHandler(sys.stdout)
53 | con_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
54 | _logger.addHandler(con_handler)
55 |
56 |
57 | class GLOBAL_PATH(object):
58 |
59 | def __init__(self, path=None):
60 | if path is None:
61 | path = os.getcwd()
62 | self.path = path
63 |
64 | def _set_path(self, path):
65 | self.path = path
66 |
67 | def _get_path(self):
68 | return self.path
69 |
70 |
71 | PATH = GLOBAL_PATH()
72 |
73 |
74 | def set_file_handler(path=None, prefix='', time_str=''):
75 | # set the file output handler
76 | if time_str == '':
77 | file_name = prefix + \
78 | datetime.datetime.now().strftime("%A_%d_%B_%Y_%I:%M%p") + '.log'
79 | else:
80 | file_name = prefix + time_str + '.log'
81 |
82 | if path is None:
83 | mod = sys.modules['__main__']
84 | path = os.path.join(os.path.abspath(mod.__file__), '..', '..', 'log')
85 | else:
86 | path = os.path.join(path, 'log')
87 | path = os.path.abspath(path)
88 |
89 | path = os.path.join(path, file_name)
90 | if not os.path.exists(path):
91 | os.makedirs(path)
92 |
93 | PATH._set_path(path)
94 | path = os.path.join(path, file_name)
95 | from tensorboard_logger import configure
96 | configure(path)
97 |
98 | file_handler = logging.FileHandler(
99 | filename=os.path.join(path, 'logger'), encoding='utf-8', mode='w')
100 | file_handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
101 | _logger.addHandler(file_handler)
102 |
103 | _logger.info('Log file set to {}'.format(path))
104 | return path
105 |
106 |
107 | def _get_path():
108 | return PATH._get_path()
109 |
110 |
111 | _LOGGING_METHOD = ['info', 'warning', 'error', 'critical',
112 | 'warn', 'exception', 'debug']
113 |
114 | # export logger functions
115 | for func in _LOGGING_METHOD:
116 | locals()[func] = getattr(_logger, func)
117 |
--------------------------------------------------------------------------------
/amp/utils/parse_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from env.tasks.humanoid import Humanoid
9 | from env.tasks.humanoid_amp import HumanoidAMP
10 | from env.tasks.humanoid_amp_task import HumanoidAMPTask
11 | from env.tasks.humanoid_traj import HumanoidTraj
12 | from env.tasks.humanoid_pedestrian import HumanoidPedestrian
13 | from env.tasks.humanoid_pedestrain_terrain import HumanoidPedestrianTerrain
14 | from amp.env.tasks.humanoid_pedestrain_terrain_im import HumanoidPedestrianTerrainIm
15 | from env.tasks.vec_task_wrappers import VecTaskPythonWrapper
16 | from isaacgym import rlgpu
17 |
18 | import json
19 | import numpy as np
20 |
21 |
22 | def warn_task_name():
23 | raise Exception(
24 | "Unrecognized task!\nTask should be one of: [BallBalance, Cartpole, CartpoleYUp, Ant, Humanoid, Anymal, FrankaCabinet, Quadcopter, ShadowHand, ShadowHandLSTM, ShadowHandFFOpenAI, ShadowHandFFOpenAITest, ShadowHandOpenAI, ShadowHandOpenAITest, Ingenuity]")
25 |
26 | def parse_task(args, cfg, cfg_train, sim_params):
27 |
28 | # create native task and pass custom config
29 | device_id = args.device_id
30 | rl_device = args.rl_device
31 |
32 | cfg["seed"] = cfg_train.get("seed", -1)
33 | cfg_task = cfg["env"]
34 | cfg_task["seed"] = cfg["seed"]
35 |
36 | task = eval(args.task)(
37 | cfg=cfg,
38 | sim_params=sim_params,
39 | physics_engine=args.physics_engine,
40 | device_type=args.device,
41 | device_id=device_id,
42 | headless=args.headless)
43 | env = VecTaskPythonWrapper(task, rl_device, cfg_train.get("clip_observations", np.inf), cfg_train.get("clip_actions", 1.0))
44 |
45 | return task, env
46 |
--------------------------------------------------------------------------------
/assets/teaser.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/assets/teaser.gif
--------------------------------------------------------------------------------
/poselib/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | # vscode
107 | .vscode/
108 |
--------------------------------------------------------------------------------
/poselib/README.md:
--------------------------------------------------------------------------------
1 | # poselib
2 |
3 | `poselib` is a library for loading, manipulating, and retargeting skeleton poses and motions. It is separated into three modules: `poselib.poselib.core` for basic data loading and tensor operations, `poselib.poselib.skeleton` for higher-level skeleton operations, and `poselib.poselib.visualization` for displaying skeleton poses.
4 |
5 | ## poselib.poselib.core
6 | - `poselib.poselib.core.rotation3d`: A set of Torch JIT functions for dealing with quaternions, transforms, and rotation/transformation matrices.
7 | - `quat_*` manipulate and create quaternions in [x, y, z, w] format (where w is the real component).
8 | - `transform_*` handle 7D transforms in [quat, pos] format.
9 | - `rot_matrix_*` handle 3x3 rotation matrices.
10 | - `euclidean_*` handle 4x4 Euclidean transformation matrices.
11 | - `poselib.poselib.core.tensor_utils`: Provides loading and saving functions for PyTorch tensors.
12 |
13 | ## poselib.poselib.skeleton
14 | - `poselib.poselib.skeleton.skeleton3d`: Utilities for loading and manipulating skeleton poses, and retargeting poses to different skeletons.
15 | - `SkeletonTree` is a class that stores a skeleton as a tree structure.
16 | - `SkeletonState` describes the static state of a skeleton, and provides both global and local joint angles.
17 | - `SkeletonMotion` describes a time-series of skeleton states and provides utilities for computing joint velocities.
18 |
19 | ## poselib.poselib.visualization
20 | - `poselib.poselib.visualization.common`: Functions used for visualizing skeletons interactively in `matplotlib`.
21 |
--------------------------------------------------------------------------------
/poselib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.0.1"
2 |
3 | from .core import *
4 |
--------------------------------------------------------------------------------
/poselib/poselib/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .tensor_utils import *
2 | from .rotation3d import *
3 | from .backend import Serializable, logger
4 |
--------------------------------------------------------------------------------
/poselib/poselib/core/backend/__init__.py:
--------------------------------------------------------------------------------
1 | from .abstract import Serializable
2 |
3 | from .logger import logger
4 |
--------------------------------------------------------------------------------
/poselib/poselib/core/backend/abstract.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from abc import ABCMeta, abstractmethod, abstractclassmethod
9 | from collections import OrderedDict
10 | import json
11 |
12 | import numpy as np
13 | import os
14 |
15 | TENSOR_CLASS = {}
16 |
17 |
18 | def register(name):
19 | global TENSOR_CLASS
20 |
21 | def core(tensor_cls):
22 | TENSOR_CLASS[name] = tensor_cls
23 | return tensor_cls
24 |
25 | return core
26 |
27 |
28 | def _get_cls(name):
29 | global TENSOR_CLASS
30 | return TENSOR_CLASS[name]
31 |
32 |
33 | class NumpyEncoder(json.JSONEncoder):
34 | """ Special json encoder for numpy types """
35 |
36 | def default(self, obj):
37 | if isinstance(
38 | obj,
39 | (
40 | np.int_,
41 | np.intc,
42 | np.intp,
43 | np.int8,
44 | np.int16,
45 | np.int32,
46 | np.int64,
47 | np.uint8,
48 | np.uint16,
49 | np.uint32,
50 | np.uint64,
51 | ),
52 | ):
53 | return int(obj)
54 | elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
55 | return float(obj)
56 | elif isinstance(obj, (np.ndarray,)):
57 | return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape)
58 | return json.JSONEncoder.default(self, obj)
59 |
60 |
61 | def json_numpy_obj_hook(dct):
62 | if isinstance(dct, dict) and "__ndarray__" in dct:
63 | data = np.asarray(dct["__ndarray__"], dtype=dct["dtype"])
64 | return data.reshape(dct["shape"])
65 | return dct
66 |
67 |
68 | class Serializable:
69 | """ Implementation to read/write to file.
70 | All class the is inherited from this class needs to implement to_dict() and
71 | from_dict()
72 | """
73 |
74 | @abstractclassmethod
75 | def from_dict(cls, dict_repr, *args, **kwargs):
76 | """ Read the object from an ordered dictionary
77 |
78 | :param dict_repr: the ordered dictionary that is used to construct the object
79 | :type dict_repr: OrderedDict
80 | :param args, kwargs: the arguments that need to be passed into from_dict()
81 | :type args, kwargs: additional arguments
82 | """
83 | pass
84 |
85 | @abstractmethod
86 | def to_dict(self):
87 | """ Construct an ordered dictionary from the object
88 |
89 | :rtype: OrderedDict
90 | """
91 | pass
92 |
93 | @classmethod
94 | def from_file(cls, path, *args, **kwargs):
95 | """ Read the object from a file (either .npy or .json)
96 |
97 | :param path: path of the file
98 | :type path: string
99 | :param args, kwargs: the arguments that need to be passed into from_dict()
100 | :type args, kwargs: additional arguments
101 | """
102 | if path.endswith(".json"):
103 | with open(path, "r") as f:
104 | d = json.load(f, object_hook=json_numpy_obj_hook)
105 | elif path.endswith(".npy"):
106 | d = np.load(path, allow_pickle=True).item()
107 | else:
108 | assert False, "failed to load {} from {}".format(cls.__name__, path)
109 | assert d["__name__"] == cls.__name__, "the file belongs to {}, not {}".format(
110 | d["__name__"], cls.__name__
111 | )
112 | return cls.from_dict(d, *args, **kwargs)
113 |
114 | def to_file(self, path: str) -> None:
115 | """ Write the object to a file (either .npy or .json)
116 |
117 | :param path: path of the file
118 | :type path: string
119 | """
120 | if os.path.dirname(path) != "" and not os.path.exists(os.path.dirname(path)):
121 | os.makedirs(os.path.dirname(path))
122 | d = self.to_dict()
123 | d["__name__"] = self.__class__.__name__
124 | if path.endswith(".json"):
125 | with open(path, "w") as f:
126 | json.dump(d, f, cls=NumpyEncoder, indent=4)
127 | elif path.endswith(".npy"):
128 | np.save(path, d)
129 |
--------------------------------------------------------------------------------
/poselib/poselib/core/backend/logger.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | import logging
9 |
10 | logger = logging.getLogger("poselib")
11 | logger.setLevel(logging.INFO)
12 |
13 | if not len(logger.handlers):
14 | formatter = logging.Formatter(
15 | fmt="%(asctime)-15s - %(levelname)s - %(module)s - %(message)s"
16 | )
17 | handler = logging.StreamHandler()
18 | handler.setFormatter(formatter)
19 | logger.addHandler(handler)
20 | logger.info("logger initialized")
21 |
--------------------------------------------------------------------------------
/poselib/poselib/core/tensor_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4 | # NVIDIA CORPORATION and its licensors retain all intellectual property
5 | # and proprietary rights in and to this software, related documentation
6 | # and any modifications thereto. Any use, reproduction, disclosure or
7 | # distribution of this software and related documentation without an express
8 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
9 |
10 | from collections import OrderedDict
11 | from .backend import Serializable
12 | import torch
13 |
14 |
15 | class TensorUtils(Serializable):
16 | @classmethod
17 | def from_dict(cls, dict_repr, *args, **kwargs):
18 | """ Read the object from an ordered dictionary
19 |
20 | :param dict_repr: the ordered dictionary that is used to construct the object
21 | :type dict_repr: OrderedDict
22 | :param kwargs: the arguments that need to be passed into from_dict()
23 | :type kwargs: additional arguments
24 | """
25 | return torch.from_numpy(dict_repr["arr"].astype(dict_repr["context"]["dtype"]))
26 |
27 | def to_dict(self):
28 | """ Construct an ordered dictionary from the object
29 |
30 | :rtype: OrderedDict
31 | """
32 | return NotImplemented
33 |
34 | def tensor_to_dict(x):
35 | """ Construct an ordered dictionary from the object
36 |
37 | :rtype: OrderedDict
38 | """
39 | x_np = x.numpy()
40 | return {
41 | "arr": x_np,
42 | "context": {
43 | "dtype": x_np.dtype.name
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/poselib/poselib/core/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/poselib/core/tests/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/core/tests/test_rotation.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from ..rotation3d import *
9 | import numpy as np
10 | import torch
11 |
12 | q = torch.from_numpy(np.array([[0, 1, 2, 3], [-2, 3, -1, 5]], dtype=np.float32))
13 | print("q", q)
14 | r = quat_normalize(q)
15 | x = torch.from_numpy(np.array([[1, 0, 0], [0, -1, 0]], dtype=np.float32))
16 | print(r)
17 | print(quat_rotate(r, x))
18 |
19 | angle = torch.from_numpy(np.array(np.random.rand() * 10.0, dtype=np.float32))
20 | axis = torch.from_numpy(
21 | np.array([1, np.random.rand() * 10.0, np.random.rand() * 10.0], dtype=np.float32),
22 | )
23 |
24 | print(repr(angle))
25 | print(repr(axis))
26 |
27 | rot = quat_from_angle_axis(angle, axis)
28 | x = torch.from_numpy(np.random.rand(5, 6, 3))
29 | y = quat_rotate(quat_inverse(rot), quat_rotate(rot, x))
30 | print(x.numpy())
31 | print(y.numpy())
32 | assert np.allclose(x.numpy(), y.numpy())
33 |
34 | m = torch.from_numpy(np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]], dtype=np.float32))
35 | r = quat_from_rotation_matrix(m)
36 | t = torch.from_numpy(np.array([0, 1, 0], dtype=np.float32))
37 | se3 = transform_from_rotation_translation(r=r, t=t)
38 | print(se3)
39 | print(transform_apply(se3, t))
40 |
41 | rot = quat_from_angle_axis(
42 | torch.from_numpy(np.array([45, -54], dtype=np.float32)),
43 | torch.from_numpy(np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32)),
44 | degree=True,
45 | )
46 | trans = torch.from_numpy(np.array([[1, 1, 0], [1, 1, 0]], dtype=np.float32))
47 | transform = transform_from_rotation_translation(r=rot, t=trans)
48 |
49 | t = transform_mul(transform, transform_inverse(transform))
50 | gt = np.zeros((2, 7))
51 | gt[:, 0] = 1.0
52 | print(t.numpy())
53 | print(gt)
54 | # assert np.allclose(t.numpy(), gt)
55 |
56 | transform2 = torch.from_numpy(
57 | np.array(
58 | [[1, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=np.float32
59 | ),
60 | )
61 | transform2 = euclidean_to_transform(transform2)
62 | print(transform2)
63 |
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/poselib/skeleton/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/backend/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/poselib/skeleton/backend/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/backend/fbx/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/poselib/skeleton/backend/fbx/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 |
4 | NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary
5 | rights in and to this software, related documentation and any modifications thereto. Any
6 | use, reproduction, disclosure or distribution of this software and related documentation
7 | without an express license agreement from NVIDIA CORPORATION is strictly prohibited.
8 | """
9 |
10 | """
11 | Script that reads in fbx files from python 2
12 |
13 | This requires a configs file, which contains the command necessary to switch conda
14 | environments to run the fbx reading script from python 2
15 | """
16 |
17 | from ....core import logger
18 |
19 | import inspect
20 | import os
21 |
22 | import numpy as np
23 |
24 | # Get the current folder to import the config file
25 | current_folder = os.path.realpath(
26 | os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])
27 | )
28 |
29 |
30 | def fbx_to_array(fbx_file_path, fbx_configs, root_joint, fps):
31 | """
32 | Reads an fbx file to an array.
33 |
34 | Currently reading of the frame time is not supported. 120 fps is hard coded TODO
35 |
36 | :param fbx_file_path: str, file path to fbx
37 | :return: tuple with joint_names, parents, transforms, frame time
38 | """
39 |
40 | # Ensure the file path is valid
41 | fbx_file_path = os.path.abspath(fbx_file_path)
42 | assert os.path.exists(fbx_file_path)
43 |
44 | # Switch directories to the utils folder to ensure the reading works
45 | previous_cwd = os.getcwd()
46 | os.chdir(current_folder)
47 |
48 | # Call the python 2.7 script
49 | temp_file_path = os.path.join(current_folder, fbx_configs["tmp_path"])
50 | python_path = fbx_configs["fbx_py27_path"]
51 | logger.info("executing python script to read fbx data using Autodesk FBX SDK...")
52 | command = '{} fbx_py27_backend.py "{}" "{}" "{}" "{}"'.format(
53 | python_path, fbx_file_path, temp_file_path, root_joint, fps
54 | )
55 | logger.debug("executing command: {}".format(command))
56 | os.system(command)
57 | logger.info(
58 | "executing python script to read fbx data using Autodesk FBX SDK... done"
59 | )
60 |
61 | with open(temp_file_path, "rb") as f:
62 | data = np.load(f)
63 | output = (
64 | data["names"],
65 | data["parents"],
66 | data["transforms"],
67 | data["fps"],
68 | )
69 |
70 | # Remove the temporary file
71 | os.remove(temp_file_path)
72 |
73 | # Return the os to its previous cwd, otherwise reading multiple files might fail
74 | os.chdir(previous_cwd)
75 | return output
76 |
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/poselib/skeleton/tests/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/tests/ant.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/tests/test_skeleton.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from ...core import *
9 | from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion
10 |
11 | import numpy as np
12 | import torch
13 |
14 | from ...visualization.common import (
15 | plot_skeleton_state,
16 | plot_skeleton_motion_interactive,
17 | )
18 |
19 | from ...visualization.plt_plotter import Matplotlib3DPlotter
20 | from ...visualization.skeleton_plotter_tasks import (
21 | Draw3DSkeletonMotion,
22 | Draw3DSkeletonState,
23 | )
24 |
25 |
26 | def test_skel_tree():
27 | skel_tree = SkeletonTree.from_mjcf(
28 | "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml",
29 | backend="pytorch",
30 | )
31 | skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch")
32 | # assert skel_tree.to_str() == skel_tree_rec.to_str()
33 | print(skel_tree.node_names)
34 | print(skel_tree.local_translation)
35 | print(skel_tree.parent_indices)
36 | skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree)
37 | plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state)
38 | skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"])
39 | plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state)
40 |
41 |
42 | def test_skel_motion():
43 | skel_motion = SkeletonMotion.from_file(
44 | "/tmp/tmp.npy", backend="pytorch", load_context=True
45 | )
46 |
47 | plot_skeleton_motion_interactive(skel_motion)
48 |
49 |
50 | def test_grad():
51 | source_motion = SkeletonMotion.from_file(
52 | "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy",
53 | backend="pytorch",
54 | device="cuda:0",
55 | )
56 | source_tpose = SkeletonState.from_file(
57 | "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy",
58 | backend="pytorch",
59 | device="cuda:0",
60 | )
61 |
62 | target_tpose = SkeletonState.from_file(
63 | "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy",
64 | backend="pytorch",
65 | device="cuda:0",
66 | )
67 | target_skeleton_tree = target_tpose.skeleton_tree
68 |
69 | joint_mapping = {
70 | "upArm_r": "right_shoulder",
71 | "upArm_l": "left_shoulder",
72 | "loArm_r": "right_elbow",
73 | "loArm_l": "left_elbow",
74 | "upLeg_r": "right_hip",
75 | "upLeg_l": "left_hip",
76 | "loLeg_r": "right_knee",
77 | "loLeg_l": "left_knee",
78 | "foot_r": "right_ankle",
79 | "foot_l": "left_ankle",
80 | "hips": "pelvis",
81 | "neckA": "neck",
82 | "spineA": "abdomen",
83 | }
84 |
85 | rotation_to_target_skeleton = quat_from_angle_axis(
86 | angle=torch.tensor(90.0).float(),
87 | axis=torch.tensor([1, 0, 0]).float(),
88 | degree=True,
89 | )
90 |
91 | target_motion = source_motion.retarget_to(
92 | joint_mapping=joint_mapping,
93 | source_tpose_local_rotation=source_tpose.local_rotation,
94 | source_tpose_root_translation=source_tpose.root_translation,
95 | target_skeleton_tree=target_skeleton_tree,
96 | target_tpose_local_rotation=target_tpose.local_rotation,
97 | target_tpose_root_translation=target_tpose.root_translation,
98 | rotation_to_target_skeleton=rotation_to_target_skeleton,
99 | scale_to_target_skeleton=0.01,
100 | )
101 |
102 | target_state = SkeletonState(
103 | target_motion.tensor[800, :],
104 | target_motion.skeleton_tree,
105 | target_motion.is_local,
106 | )
107 |
108 | skeleton_tree = target_state.skeleton_tree
109 | root_translation = target_state.root_translation
110 | global_translation = target_state.global_translation
111 |
112 | q = np.zeros((len(skeleton_tree), 4), dtype=np.float32)
113 | q[..., 3] = 1.0
114 | q = torch.from_numpy(q)
115 | max_its = 10000
116 |
117 | task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
118 | plotter = Matplotlib3DPlotter(task)
119 |
120 | for i in range(max_its):
121 | r = quat_normalize(q)
122 | s = SkeletonState.from_rotation_and_root_translation(
123 | skeleton_tree, r=r, t=root_translation, is_local=True
124 | )
125 | print(" quat norm: {}".format(q.norm(p=2, dim=-1).mean().numpy()))
126 |
127 | task.update(s)
128 | plotter.update()
129 | plotter.show()
130 |
131 |
132 | test_grad()
--------------------------------------------------------------------------------
/poselib/poselib/skeleton/tests/transfer_npy.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | import numpy as np
9 | from ...core import Tensor, SO3, Quaternion, Vector3D
10 | from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion
11 |
12 | tpose = np.load(
13 | "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/flex_tpose.npy"
14 | ).item()
15 |
16 | local_rotation = SO3.from_numpy(tpose["local_rotation"], dtype="float32")
17 | root_translation = Vector3D.from_numpy(tpose["root_translation"], dtype="float32")
18 | skeleton_tree = tpose["skeleton_tree"]
19 | parent_indices = Tensor.from_numpy(skeleton_tree["parent_indices"], dtype="int32")
20 | local_translation = Vector3D.from_numpy(
21 | skeleton_tree["local_translation"], dtype="float32"
22 | )
23 | node_names = skeleton_tree["node_names"]
24 | skeleton_tree = SkeletonTree(node_names, parent_indices, local_translation)
25 | skeleton_state = SkeletonState.from_rotation_and_root_translation(
26 | skeleton_tree=skeleton_tree, r=local_rotation, t=root_translation, is_local=True
27 | )
28 |
29 | skeleton_state.to_file(
30 | "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/flex_tpose_new.npy"
31 | )
32 |
--------------------------------------------------------------------------------
/poselib/poselib/visualization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/poselib/visualization/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/visualization/core.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | """
9 | The base abstract classes for plotter and the plotting tasks. It describes how the plotter
10 | deals with the tasks in the general cases
11 | """
12 | from typing import List
13 |
14 |
15 | class BasePlotterTask(object):
16 | _task_name: str # unique name of the task
17 | _task_type: str # type of the task is used to identify which callable
18 |
19 | def __init__(self, task_name: str, task_type: str) -> None:
20 | self._task_name = task_name
21 | self._task_type = task_type
22 |
23 | @property
24 | def task_name(self):
25 | return self._task_name
26 |
27 | @property
28 | def task_type(self):
29 | return self._task_type
30 |
31 | def get_scoped_name(self, name):
32 | return self._task_name + "/" + name
33 |
34 | def __iter__(self):
35 | """Should override this function to return a list of task primitives
36 | """
37 | raise NotImplementedError
38 |
39 |
40 | class BasePlotterTasks(object):
41 | def __init__(self, tasks) -> None:
42 | self._tasks = tasks
43 |
44 | def __iter__(self):
45 | for task in self._tasks:
46 | yield from task
47 |
48 |
49 | class BasePlotter(object):
50 | """An abstract plotter which deals with a plotting task. The children class needs to implement
51 | the functions to create/update the objects according to the task given
52 | """
53 |
54 | _task_primitives: List[BasePlotterTask]
55 |
56 | def __init__(self, task: BasePlotterTask) -> None:
57 | self._task_primitives = []
58 | self.create(task)
59 |
60 | @property
61 | def task_primitives(self):
62 | return self._task_primitives
63 |
64 | def create(self, task: BasePlotterTask) -> None:
65 | """Create more task primitives from a task for the plotter"""
66 | new_task_primitives = list(task) # get all task primitives
67 | self._task_primitives += new_task_primitives # append them
68 | self._create_impl(new_task_primitives)
69 |
70 | def update(self) -> None:
71 | """Update the plotter for any updates in the task primitives"""
72 | self._update_impl(self._task_primitives)
73 |
74 | def _update_impl(self, task_list: List[BasePlotterTask]) -> None:
75 | raise NotImplementedError
76 |
77 | def _create_impl(self, task_list: List[BasePlotterTask]) -> None:
78 | raise NotImplementedError
79 |
--------------------------------------------------------------------------------
/poselib/poselib/visualization/simple_plotter_tasks.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | """
9 | This is where all the task primitives are defined
10 | """
11 | import numpy as np
12 |
13 | from .core import BasePlotterTask
14 |
15 |
16 | class DrawXDLines(BasePlotterTask):
17 | _lines: np.ndarray
18 | _color: str
19 | _line_width: int
20 | _alpha: float
21 | _influence_lim: bool
22 |
23 | def __init__(
24 | self,
25 | task_name: str,
26 | lines: np.ndarray,
27 | color: str = "blue",
28 | line_width: int = 2,
29 | alpha: float = 1.0,
30 | influence_lim: bool = True,
31 | ) -> None:
32 | super().__init__(task_name=task_name, task_type=self.__class__.__name__)
33 | self._color = color
34 | self._line_width = line_width
35 | self._alpha = alpha
36 | self._influence_lim = influence_lim
37 | self.update(lines)
38 |
39 | @property
40 | def influence_lim(self) -> bool:
41 | return self._influence_lim
42 |
43 | @property
44 | def raw_data(self):
45 | return self._lines
46 |
47 | @property
48 | def color(self):
49 | return self._color
50 |
51 | @property
52 | def line_width(self):
53 | return self._line_width
54 |
55 | @property
56 | def alpha(self):
57 | return self._alpha
58 |
59 | @property
60 | def dim(self):
61 | raise NotImplementedError
62 |
63 | @property
64 | def name(self):
65 | return "{}DLines".format(self.dim)
66 |
67 | def update(self, lines):
68 | self._lines = np.array(lines)
69 | shape = self._lines.shape
70 | assert shape[-1] == self.dim and shape[-2] == 2 and len(shape) == 3
71 |
72 | def __getitem__(self, index):
73 | return self._lines[index]
74 |
75 | def __len__(self):
76 | return self._lines.shape[0]
77 |
78 | def __iter__(self):
79 | yield self
80 |
81 |
82 | class DrawXDDots(BasePlotterTask):
83 | _dots: np.ndarray
84 | _color: str
85 | _marker_size: int
86 | _alpha: float
87 | _influence_lim: bool
88 |
89 | def __init__(
90 | self,
91 | task_name: str,
92 | dots: np.ndarray,
93 | color: str = "blue",
94 | marker_size: int = 10,
95 | alpha: float = 1.0,
96 | influence_lim: bool = True,
97 | ) -> None:
98 | super().__init__(task_name=task_name, task_type=self.__class__.__name__)
99 | self._color = color
100 | self._marker_size = marker_size
101 | self._alpha = alpha
102 | self._influence_lim = influence_lim
103 | self.update(dots)
104 |
105 | def update(self, dots):
106 | self._dots = np.array(dots)
107 | shape = self._dots.shape
108 | assert shape[-1] == self.dim and len(shape) == 2
109 |
110 | def __getitem__(self, index):
111 | return self._dots[index]
112 |
113 | def __len__(self):
114 | return self._dots.shape[0]
115 |
116 | def __iter__(self):
117 | yield self
118 |
119 | @property
120 | def influence_lim(self) -> bool:
121 | return self._influence_lim
122 |
123 | @property
124 | def raw_data(self):
125 | return self._dots
126 |
127 | @property
128 | def color(self):
129 | return self._color
130 |
131 | @property
132 | def marker_size(self):
133 | return self._marker_size
134 |
135 | @property
136 | def alpha(self):
137 | return self._alpha
138 |
139 | @property
140 | def dim(self):
141 | raise NotImplementedError
142 |
143 | @property
144 | def name(self):
145 | return "{}DDots".format(self.dim)
146 |
147 |
148 | class DrawXDTrail(DrawXDDots):
149 | @property
150 | def line_width(self):
151 | return self.marker_size
152 |
153 | @property
154 | def name(self):
155 | return "{}DTrail".format(self.dim)
156 |
157 |
158 | class Draw2DLines(DrawXDLines):
159 | @property
160 | def dim(self):
161 | return 2
162 |
163 |
164 | class Draw3DLines(DrawXDLines):
165 | @property
166 | def dim(self):
167 | return 3
168 |
169 |
170 | class Draw2DDots(DrawXDDots):
171 | @property
172 | def dim(self):
173 | return 2
174 |
175 |
176 | class Draw3DDots(DrawXDDots):
177 | @property
178 | def dim(self):
179 | return 3
180 |
181 |
182 | class Draw2DTrail(DrawXDTrail):
183 | @property
184 | def dim(self):
185 | return 2
186 |
187 |
188 | class Draw3DTrail(DrawXDTrail):
189 | @property
190 | def dim(self):
191 | return 3
192 |
193 |
--------------------------------------------------------------------------------
/poselib/poselib/visualization/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/poselib/poselib/visualization/tests/__init__.py
--------------------------------------------------------------------------------
/poselib/poselib/visualization/tests/test_plotter.py:
--------------------------------------------------------------------------------
1 | from typing import cast
2 |
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 |
6 | from ..core import BasePlotterTask, BasePlotterTasks
7 | from ..plt_plotter import Matplotlib3DPlotter
8 | from ..simple_plotter_tasks import Draw3DDots, Draw3DLines
9 |
10 | task = Draw3DLines(task_name="test",
11 | lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
12 | task2 = Draw3DDots(task_name="test2",
13 | dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
14 | task3 = BasePlotterTasks([task, task2])
15 | plotter = Matplotlib3DPlotter(cast(BasePlotterTask, task3))
16 | plt.show()
17 |
--------------------------------------------------------------------------------
/poselib/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup(
4 | name="poselib",
5 | packages=["poselib"],
6 | version="0.0.42",
7 | description="Framework Agnostic Tensor Programming",
8 | author="Qiyang Li, Kelly Guo, Brendon Matusch",
9 | classifiers=[
10 | "Programming Language :: Python",
11 | "Programming Language :: Python :: 3",
12 | "License :: OSI Approved :: GNU General Public License (GPL)",
13 | "Operating System :: OS Independent",
14 | "Development Status :: 1 - Planning",
15 | "Environment :: Console",
16 | "Intended Audience :: Science/Research",
17 | "Topic :: Scientific/Engineering :: GIS",
18 | ],
19 | )
20 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.21.1
2 | termcolor==1.1.0
3 | rl-games==1.1.4
4 | tensorboard==1.15.0
5 | chumpy
6 | vtk
7 | numpy-stl
8 | joblib
9 | scikit-image
10 | scikit-learn
11 | scipy
12 | matplotlib
13 | torchgeometry
14 | pillow
15 | tqdm
16 | lxml
17 | opencv-python
18 | wandb
19 | ipdb
20 | patchelf
21 | termcolor
22 | rl-games==1.1.4
23 | pyyaml
24 | mujoco-py<2.2,>=2.1
25 | scikit-image
26 | gym
27 | git+https://github.com/ZhengyiLuo/smplx.git@master
28 | human_body_prior
29 | autograd
30 | sklearn
31 | chumpy
--------------------------------------------------------------------------------
/uhc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/uhc/__init__.py
--------------------------------------------------------------------------------
/uhc/assets/mjcf/humanoid_template_local.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/uhc/khrylib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/uhc/khrylib/__init__.py
--------------------------------------------------------------------------------
/uhc/khrylib/mocap/pose.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | from bvh import Bvh
4 | from uhc.khrylib.utils.transformation import quaternion_slerp, quaternion_from_euler, euler_from_quaternion
5 |
6 |
7 | def load_amc_file(fname, scale):
8 |
9 | with open(fname) as f:
10 | content = f.readlines()
11 |
12 | bone_addr = dict()
13 | poses = []
14 | cur_pos = None
15 | fr = 1
16 | for line in content:
17 | line_words = line.split()
18 | cmd = line_words[0]
19 | if cmd == str(fr):
20 | if cur_pos:
21 | poses.append(np.array(cur_pos))
22 | cur_pos = []
23 | fr += 1
24 | elif cur_pos is not None:
25 | start_ind = len(cur_pos)
26 | if cmd == 'root':
27 | cur_pos += [float(word)*scale for word in line_words[1:4]]
28 | cur_pos += [math.radians(float(word)) for word in line_words[4:]]
29 | elif cmd == 'lfoot' or cmd == 'rfoot':
30 | cur_pos += reversed([math.radians(float(word)) for word in line_words[1:]])
31 | if len(cur_pos) < 3:
32 | cur_pos.insert(-1, 0.0)
33 | else:
34 | cur_pos += reversed([math.radians(float(word)) for word in line_words[1:]])
35 | if fr == 2:
36 | end_ind = len(cur_pos)
37 | bone_addr[cmd] = (start_ind, end_ind)
38 |
39 | if cur_pos:
40 | poses.append(np.array(cur_pos))
41 | poses = np.vstack(poses)
42 | return poses, bone_addr
43 |
44 |
45 | def load_bvh_file(fname, skeleton):
46 | with open(fname) as f:
47 | mocap = Bvh(f.read())
48 |
49 | # build bone_addr
50 | bone_addr = dict()
51 | start_ind = 0
52 | for bone in skeleton.bones:
53 | end_ind = start_ind + len(bone.channels)
54 | bone_addr[bone.name] = (start_ind, end_ind)
55 | start_ind = end_ind
56 | dof_num = start_ind
57 |
58 | poses = np.zeros((mocap.nframes, dof_num))
59 | for i in range(mocap.nframes):
60 | for bone in skeleton.bones:
61 | trans = np.array(mocap.frame_joint_channels(i, bone.name, bone.channels))
62 | if bone == skeleton.root:
63 | trans[:3] *= skeleton.len_scale
64 | trans[3:6] = np.deg2rad(trans[3:6])
65 | else:
66 | trans = np.deg2rad(trans)
67 | start_ind, end_ind = bone_addr[bone.name]
68 | poses[i, start_ind:end_ind] = trans
69 |
70 | return poses, bone_addr
71 |
72 |
73 | def lin_interp(pose1, pose2, t):
74 | pose_t = (1 - t) * pose1 + t * pose2
75 | if np.any(np.abs(pose2[3:] - pose1[3:]) > np.pi * 0.5):
76 | pose_t[3:] = pose1[3:] if t < 0.5 else pose2[3:]
77 | return pose_t
78 |
79 |
80 | def interpolated_traj(poses, sample_t=0.030, mocap_fr=120, interp_func=lin_interp):
81 | N = poses.shape[0]
82 | T = float(N-1)/mocap_fr
83 | num = int(math.floor(T/sample_t))
84 | sampling_times = np.arange(num+1)*sample_t*mocap_fr
85 |
86 | poses_samp = []
87 | for t in sampling_times:
88 | start = int(math.floor(t))
89 | end = min(int(math.ceil(t)), poses.shape[0] - 1)
90 | pose_interp = interp_func(poses[start, :], poses[end, :], t-math.floor(t))
91 | poses_samp.append(pose_interp)
92 | poses_samp = np.vstack(poses_samp)
93 |
94 | return poses_samp
95 |
96 |
97 |
--------------------------------------------------------------------------------
/uhc/khrylib/mocap/vis_joint_range.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import sys
4 | sys.path.append(os.getcwd())
5 |
6 | from uhc.khrylib.utils import *
7 | from mujoco_py import load_model_from_path, MjSim
8 | from uhc.khrylib.rl.envs.common.mjviewer import MjViewer
9 |
10 |
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument('--model', default='assets/mujoco_models/human36m_orig.xml')
13 | args = parser.parse_args()
14 |
15 | model = load_model_from_path(args.model)
16 | sim = MjSim(model)
17 | viewer = MjViewer(sim)
18 |
19 | jind = -1
20 | jang = 30.0
21 |
22 |
23 | def key_callback(key, action, mods):
24 | global jind, jang
25 |
26 | if action != glfw.RELEASE:
27 | return False
28 | elif key == glfw.KEY_LEFT:
29 | jind = max(jind - 1, -1)
30 | print('{} {} {}'.format(model.joint_names[jind + 1] if jind >= 0 else 'rest', jind, jang))
31 | return True
32 | elif key == glfw.KEY_RIGHT:
33 | jind = min(jind + 1, len(model.joint_names) - 2)
34 | print('{} {} {}'.format(model.joint_names[jind + 1] if jind >= 0 else 'rest', jind, jang))
35 | return True
36 | elif key == glfw.KEY_UP:
37 | jang += 5.0
38 | print('{} {} {}'.format(model.joint_names[jind + 1] if jind >= 0 else 'rest', jind, jang))
39 | return True
40 | elif key == glfw.KEY_DOWN:
41 | jang -= 5.0
42 | print('{} {} {}'.format(model.joint_names[jind + 1] if jind >= 0 else 'rest', jind, jang))
43 | return True
44 | return False
45 |
46 |
47 | viewer._hide_overlay = True
48 | viewer.custom_key_callback = key_callback
49 | while True:
50 | sim.data.qpos[:] = 0.0
51 | sim.data.qpos[2] = 1.0
52 | if jind >= 0:
53 | sim.data.qpos[7 + jind] = math.radians(jang)
54 | sim.forward()
55 | viewer.render()
56 |
57 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IDC-Flash/PacerPlus/fd66dda3e5062d5efa6ae37b3f7e2a8cc9eafcf8/uhc/khrylib/models/__init__.py
--------------------------------------------------------------------------------
/uhc/khrylib/models/cmlp.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 |
4 |
5 | class CMLP(nn.Module):
6 | def __init__(self, input_dim, cond_dim, hidden_dims=(128, 128), activation='tanh'):
7 | super().__init__()
8 | if activation == 'tanh':
9 | self.activation = torch.tanh
10 | elif activation == 'relu':
11 | self.activation = torch.relu
12 | elif activation == 'sigmoid':
13 | self.activation = torch.sigmoid
14 |
15 | self.cond_dim = cond_dim
16 | self.out_dim = hidden_dims[-1]
17 | self.affine_layers = nn.ModuleList()
18 | last_dim = input_dim
19 | for nh in hidden_dims:
20 | self.affine_layers.append(nn.Linear(last_dim + cond_dim, nh))
21 | last_dim = nh
22 |
23 | def forward(self, c, x):
24 | for affine in self.affine_layers:
25 | x = torch.cat((c, x), dim=1)
26 | x = self.activation(affine(x))
27 | return x
28 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/discriminator.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 |
4 |
5 | class Discriminator(nn.Module):
6 | def __init__(self, net, net_out_dim=None):
7 | super().__init__()
8 | self.net = net
9 | if net_out_dim is None:
10 | net_out_dim = net.out_dim
11 | self.logic = nn.Linear(net_out_dim, 1)
12 | self.logic.weight.data.mul_(0.1)
13 | self.logic.bias.data.mul_(0.0)
14 |
15 | def forward(self, x):
16 | x = self.net(x)
17 | prob = torch.sigmoid(self.logic(x))
18 | return prob
19 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/erd_net.py:
--------------------------------------------------------------------------------
1 | from uhc.khrylib.utils.torch import *
2 | from torch import nn
3 | from uhc.khrylib.models.rnn import RNN
4 | from uhc.khrylib.models.mlp import MLP
5 |
6 |
7 | class ERDNet(nn.Module):
8 |
9 | def __init__(self, state_dim):
10 | super().__init__()
11 | self.state_dim = state_dim
12 | self.encoder_mlp = MLP(state_dim, (500,), 'relu')
13 | self.encoder_linear = nn.Linear(500, 500)
14 | self.lstm1 = RNN(500, 1000, 'lstm')
15 | self.lstm2 = RNN(1000, 1000, 'lstm')
16 | self.decoder_mlp = MLP(1000, (500, 100), 'relu')
17 | self.decoder_linear = nn.Linear(100, state_dim)
18 | self.mode = 'batch'
19 |
20 | def initialize(self, mode):
21 | self.mode = mode
22 | self.lstm1.set_mode(mode)
23 | self.lstm2.set_mode(mode)
24 | self.lstm1.initialize()
25 | self.lstm2.initialize()
26 |
27 | def forward(self, x):
28 | if self.mode == 'batch':
29 | batch_size = x.shape[1]
30 | x = x.view(-1, x.shape[-1])
31 | x = self.encoder_mlp(x)
32 | x = self.encoder_linear(x)
33 | if self.mode == 'batch':
34 | x = x.view(-1, batch_size, x.shape[-1])
35 | x = self.lstm1(x)
36 | x = self.lstm2(x)
37 | if self.mode == 'batch':
38 | x = x.view(-1, x.shape[-1])
39 | x = self.decoder_mlp(x)
40 | x = self.decoder_linear(x)
41 | return x
42 |
43 |
44 | if __name__ == '__main__':
45 | net = ERDNet(64)
46 | input = ones(32, 3, 64)
47 | out = net(input)
48 | print(out.shape)
49 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/mlp.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 |
4 |
5 | class MLP(nn.Module):
6 | def __init__(self, input_dim, hidden_dims=(128, 128), activation='tanh'):
7 | super().__init__()
8 | if activation == 'tanh':
9 | self.activation = torch.tanh
10 | elif activation == 'relu':
11 | self.activation = torch.relu
12 | elif activation == 'sigmoid':
13 | self.activation = torch.sigmoid
14 | elif activation == 'gelu':
15 | self.activation = torch.nn.GELU()
16 |
17 | self.out_dim = hidden_dims[-1]
18 | self.affine_layers = nn.ModuleList()
19 | last_dim = input_dim
20 | for nh in hidden_dims:
21 | self.affine_layers.append(nn.Linear(last_dim, nh))
22 | last_dim = nh
23 |
24 | def forward(self, x):
25 | for affine in self.affine_layers:
26 | x = self.activation(affine(x))
27 | return x
28 |
29 |
30 | class MLPWithInputSkips(torch.nn.Module):
31 | def __init__(
32 | self,
33 | n_layers: int,
34 | input_dim: int,
35 | output_dim: int,
36 | skip_dim: int,
37 | hidden_dim: int,
38 | input_skips,
39 | ):
40 | super().__init__()
41 |
42 | layers = []
43 |
44 | for layeri in range(n_layers):
45 | if layeri == 0:
46 | dimin = input_dim
47 | dimout = hidden_dim
48 | elif layeri in input_skips:
49 | dimin = hidden_dim + skip_dim
50 | dimout = hidden_dim
51 | else:
52 | dimin = hidden_dim
53 | dimout = hidden_dim
54 |
55 | linear = torch.nn.Linear(dimin, dimout)
56 | layers.append(torch.nn.Sequential(linear, torch.nn.ReLU(True)))
57 |
58 | self.mlp = torch.nn.ModuleList(layers)
59 | self._input_skips = set(input_skips)
60 |
61 | def forward(self, x: torch.Tensor, z: torch.Tensor) -> torch.Tensor:
62 | y = x
63 |
64 | for li, layer in enumerate(self.mlp):
65 | if li in self._input_skips:
66 | y = torch.cat((y, z), dim=-1)
67 |
68 | y = layer(y)
69 |
70 | return y
71 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/mobile_net.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 | from uhc.khrylib.utils.torch import *
3 |
4 |
5 | class MobileNet(nn.Module):
6 | def __init__(self, out_dim):
7 | super().__init__()
8 | self.out_dim = out_dim
9 |
10 | def conv_bn(inp, oup, stride):
11 | return nn.Sequential(
12 | nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
13 | nn.BatchNorm2d(oup),
14 | nn.ReLU(inplace=True)
15 | )
16 |
17 | def conv_dw(inp, oup, stride):
18 | return nn.Sequential(
19 | nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
20 | nn.BatchNorm2d(inp),
21 | nn.ReLU(inplace=True),
22 |
23 | nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
24 | nn.BatchNorm2d(oup),
25 | nn.ReLU(inplace=True),
26 | )
27 |
28 | self.model = nn.Sequential(
29 | conv_bn(3, 32, 2),
30 | conv_dw(32, 64, 1),
31 | conv_dw(64, 128, 2),
32 | conv_dw(128, 128, 1),
33 | conv_dw(128, 256, 2),
34 | conv_dw(256, 256, 1),
35 | conv_dw(256, 512, 2),
36 | conv_dw(512, 512, 1),
37 | conv_dw(512, 512, 1),
38 | conv_dw(512, 512, 1),
39 | conv_dw(512, 512, 1),
40 | conv_dw(512, 512, 1),
41 | conv_dw(512, 1024, 2),
42 | conv_dw(1024, 1024, 1),
43 | nn.AvgPool2d(7),
44 | )
45 | self.fc = nn.Linear(1024, out_dim)
46 |
47 | def forward(self, x):
48 | x = self.model(x)
49 | x = x.view(-1, 1024)
50 | x = self.fc(x)
51 | return x
52 |
53 |
54 | if __name__ == '__main__':
55 | import time
56 | torch.set_grad_enabled(False)
57 | net = MobileNet(128)
58 | input = ones(1, 3, 224, 224)
59 | for i in range(10):
60 | t0 = time.time()
61 | out = net(input)
62 | print(time.time() - t0)
63 | print(out.shape)
64 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/resnet.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 | from torchvision import models
3 | from uhc.khrylib.utils.torch import *
4 |
5 |
6 | class ResNet(nn.Module):
7 |
8 | def __init__(self, out_dim, fix_params=False, running_stats=False):
9 | super().__init__()
10 | self.out_dim = out_dim
11 | self.resnet = models.resnet18(pretrained=True)
12 | if fix_params:
13 | for param in self.resnet.parameters():
14 | param.requires_grad = False
15 | self.resnet.fc = nn.Linear(self.resnet.fc.in_features, out_dim)
16 | self.bn_stats(running_stats)
17 |
18 | def forward(self, x):
19 | return self.resnet(x)
20 |
21 | def bn_stats(self, track_running_stats):
22 | for m in self.modules():
23 | if type(m) == nn.BatchNorm2d:
24 | m.track_running_stats = track_running_stats
25 |
26 |
27 | if __name__ == '__main__':
28 | import time
29 | net = ResNet(128)
30 | t0 = time.time()
31 | input = ones(1, 3, 224, 224)
32 | out = net(input)
33 | print(time.time() - t0)
34 | print(out.shape)
35 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/rnn.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from uhc.khrylib.utils.torch import *
3 |
4 |
5 | class RNN(nn.Module):
6 | def __init__(self, input_dim, out_dim, cell_type='lstm', bi_dir=False):
7 | super().__init__()
8 | self.input_dim = input_dim
9 | self.out_dim = out_dim
10 | self.cell_type = cell_type
11 | self.bi_dir = bi_dir
12 | self.mode = 'batch'
13 | rnn_cls = nn.LSTMCell if cell_type == 'lstm' else nn.GRUCell
14 | hidden_dim = out_dim // 2 if bi_dir else out_dim
15 | self.rnn_f = rnn_cls(self.input_dim, hidden_dim)
16 | if bi_dir:
17 | self.rnn_b = rnn_cls(self.input_dim, hidden_dim)
18 | self.hx, self.cx = None, None
19 |
20 | def set_mode(self, mode):
21 | self.mode = mode
22 |
23 | def initialize(self, batch_size=1, hx=None, cx=None):
24 | if self.mode == 'step':
25 | self.hx = zeros((batch_size, self.rnn_f.hidden_size)) if hx is None else hx
26 | if self.cell_type == 'lstm':
27 | self.cx = zeros((batch_size, self.rnn_f.hidden_size)) if cx is None else cx
28 |
29 | def forward(self, x):
30 | if self.mode == 'step':
31 | self.hx, self.cx = batch_to(x.device, self.hx, self.cx)
32 | if self.cell_type == 'lstm':
33 | self.hx, self.cx = self.rnn_f(x, (self.hx, self.cx))
34 | else:
35 | self.hx = self.rnn_f(x, self.hx)
36 | rnn_out = self.hx
37 | else:
38 | rnn_out_f = self.batch_forward(x)
39 | if not self.bi_dir:
40 | return rnn_out_f
41 | rnn_out_b = self.batch_forward(x, reverse=True)
42 | rnn_out = torch.cat((rnn_out_f, rnn_out_b), 2)
43 | return rnn_out
44 |
45 | def batch_forward(self, x, reverse=False):
46 | rnn = self.rnn_b if reverse else self.rnn_f
47 | rnn_out = []
48 | hx = zeros((x.size(1), rnn.hidden_size), device=x.device)
49 | if self.cell_type == 'lstm':
50 | cx = zeros((x.size(1), rnn.hidden_size), device=x.device)
51 | ind = reversed(range(x.size(0))) if reverse else range(x.size(0))
52 | for t in ind:
53 | if self.cell_type == 'lstm':
54 | hx, cx = rnn(x[t, ...], (hx, cx))
55 | else:
56 | hx = rnn(x[t, ...], hx)
57 | rnn_out.append(hx.unsqueeze(0))
58 | if reverse:
59 | rnn_out.reverse()
60 | rnn_out = torch.cat(rnn_out, 0)
61 | return rnn_out
62 |
63 |
64 | if __name__ == '__main__':
65 | rnn = RNN(12, 24, 'gru', bi_dir=True)
66 | input = zeros(5, 3, 12)
67 | out = rnn(input)
68 | print(out.shape)
69 |
--------------------------------------------------------------------------------
/uhc/khrylib/models/simple_cnn.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class SimpleCNN(nn.Module):
6 |
7 | def __init__(self, out_dim):
8 | super().__init__()
9 | self.conv1 = nn.Conv2d(3, 32, kernel_size=4, stride=4)
10 | self.conv2 = nn.Conv2d(32, 32, kernel_size=4, stride=4)
11 | self.conv3 = nn.Conv2d(32, 16, kernel_size=4, stride=4)
12 | self.fc = nn.Linear(144, out_dim)
13 |
14 | def forward(self, x):
15 | x = torch.relu(self.conv1(x))
16 | x = torch.relu(self.conv2(x))
17 | x = torch.relu(self.conv3(x))
18 | x = self.fc(x.view(x.size(0), -1))
19 | return x
20 |
21 |
22 | if __name__ == '__main__':
23 | import time
24 | torch.set_grad_enabled(False)
25 | net = SimpleCNN(128)
26 | t0 = time.time()
27 | input = torch.zeros(1, 3, 224, 224)
28 | out = net(input)
29 | print(time.time() - t0)
30 | print(out.shape)
31 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ['agents', 'core', 'envs']
2 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/agents/__init__.py:
--------------------------------------------------------------------------------
1 | from uhc.khrylib.rl.agents.agent_pg import AgentPG
2 | from uhc.khrylib.rl.agents.agent_ppo import AgentPPO
3 | from uhc.khrylib.rl.agents.agent_trpo import AgentTRPO
4 |
5 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/agents/agent_pg.py:
--------------------------------------------------------------------------------
1 | from uhc.khrylib.rl.core import estimate_advantages
2 | from uhc.khrylib.rl.agents.agent import Agent
3 | from uhc.khrylib.utils.torch import *
4 | import time
5 |
6 |
7 | class AgentPG(Agent):
8 |
9 | def __init__(self, tau=0.95, optimizer_policy=None, optimizer_value=None,
10 | opt_num_epochs=1, value_opt_niter=1, **kwargs):
11 | super().__init__(**kwargs)
12 | self.tau = tau
13 | self.optimizer_policy = optimizer_policy
14 | self.optimizer_value = optimizer_value
15 | self.opt_num_epochs = opt_num_epochs
16 | self.value_opt_niter = value_opt_niter
17 |
18 | def update_value(self, states, returns):
19 | """update critic"""
20 | for _ in range(self.value_opt_niter):
21 | values_pred = self.value_net(self.trans_value(states))
22 | value_loss = (values_pred - returns).pow(2).mean()
23 | self.optimizer_value.zero_grad()
24 | value_loss.backward()
25 | self.optimizer_value.step()
26 |
27 | def update_policy(self, states, actions, returns, advantages, exps):
28 | """update policy"""
29 | # use a2c by default
30 | ind = exps.nonzero().squeeze(1)
31 | for _ in range(self.opt_num_epochs):
32 | self.update_value(states, returns)
33 | log_probs = self.policy_net.get_log_prob(self.trans_policy(states)[ind], actions[ind])
34 | policy_loss = -(log_probs * advantages[ind]).mean()
35 | self.optimizer_policy.zero_grad()
36 | policy_loss.backward()
37 | self.optimizer_policy.step()
38 |
39 | def update_params(self, batch):
40 | t0 = time.time()
41 | to_train(*self.update_modules)
42 | states = torch.from_numpy(batch.states).to(self.dtype).to(self.device)
43 | actions = torch.from_numpy(batch.actions).to(self.dtype).to(self.device)
44 | rewards = torch.from_numpy(batch.rewards).to(self.dtype).to(self.device)
45 | masks = torch.from_numpy(batch.masks).to(self.dtype).to(self.device)
46 | exps = torch.from_numpy(batch.exps).to(self.dtype).to(self.device)
47 | with to_test(*self.update_modules):
48 | with torch.no_grad():
49 | values = self.value_net(self.trans_value(states))
50 |
51 | """get advantage estimation from the trajectories"""
52 | advantages, returns = estimate_advantages(rewards, masks, values, self.gamma, self.tau)
53 |
54 | self.update_policy(states, actions, returns, advantages, exps)
55 |
56 | return time.time() - t0
--------------------------------------------------------------------------------
/uhc/khrylib/rl/agents/agent_ppo.py:
--------------------------------------------------------------------------------
1 | import math
2 | from uhc.khrylib.utils.torch import *
3 | from uhc.khrylib.rl.agents import AgentPG
4 |
5 |
6 | class AgentPPO(AgentPG):
7 |
8 | def __init__(self, clip_epsilon=0.2, mini_batch_size=64, use_mini_batch=False,
9 | policy_grad_clip=None, **kwargs):
10 | super().__init__(**kwargs)
11 | self.clip_epsilon = clip_epsilon
12 | self.mini_batch_size = mini_batch_size
13 | self.use_mini_batch = use_mini_batch
14 | self.policy_grad_clip = policy_grad_clip
15 |
16 | def update_policy(self, states, actions, returns, advantages, exps):
17 | """update policy"""
18 | with to_test(*self.update_modules):
19 | with torch.no_grad():
20 | fixed_log_probs = self.policy_net.get_log_prob(self.trans_policy(states), actions)
21 |
22 | for _ in range(self.opt_num_epochs):
23 | if self.use_mini_batch:
24 | perm = np.arange(states.shape[0])
25 | np.random.shuffle(perm)
26 | perm = LongTensor(perm).to(self.device)
27 |
28 | states, actions, returns, advantages, fixed_log_probs, exps = \
29 | states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), \
30 | fixed_log_probs[perm].clone(), exps[perm].clone()
31 |
32 | optim_iter_num = int(math.floor(states.shape[0] / self.mini_batch_size))
33 | for i in range(optim_iter_num):
34 | ind = slice(i * self.mini_batch_size, min((i + 1) * self.mini_batch_size, states.shape[0]))
35 | states_b, actions_b, advantages_b, returns_b, fixed_log_probs_b, exps_b = \
36 | states[ind], actions[ind], advantages[ind], returns[ind], fixed_log_probs[ind], exps[ind]
37 | ind = exps_b.nonzero(as_tuple=False).squeeze(1)
38 | self.update_value(states_b, returns_b)
39 | surr_loss = self.ppo_loss(states_b, actions_b, advantages_b, fixed_log_probs_b, ind)
40 | self.optimizer_policy.zero_grad()
41 | surr_loss.backward()
42 | self.clip_policy_grad()
43 | self.optimizer_policy.step()
44 | else:
45 | ind = exps.nonzero(as_tuple=False).squeeze(1)
46 | self.update_value(states, returns)
47 | surr_loss = self.ppo_loss(states, actions, advantages, fixed_log_probs, ind)
48 | self.optimizer_policy.zero_grad()
49 | surr_loss.backward()
50 | self.clip_policy_grad()
51 | self.optimizer_policy.step()
52 |
53 | def clip_policy_grad(self):
54 | if self.policy_grad_clip is not None:
55 | for params, max_norm in self.policy_grad_clip:
56 | torch.nn.utils.clip_grad_norm_(params, max_norm)
57 |
58 | def ppo_loss(self, states, actions, advantages, fixed_log_probs, ind):
59 | log_probs = self.policy_net.get_log_prob(self.trans_policy(states)[ind], actions[ind])
60 | ratio = torch.exp(log_probs - fixed_log_probs[ind])
61 | advantages = advantages[ind]
62 | surr1 = ratio * advantages
63 | surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantages
64 | surr_loss = -torch.min(surr1, surr2).mean()
65 | return surr_loss
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/__init__.py:
--------------------------------------------------------------------------------
1 | from uhc.khrylib.rl.core.common import *
2 | from uhc.khrylib.rl.core.critic import Value
3 | from uhc.khrylib.rl.core.distributions import DiagGaussian, Categorical
4 | from uhc.khrylib.rl.core.logger_rl import LoggerRL
5 | from uhc.khrylib.rl.core.policy import Policy
6 | from uhc.khrylib.rl.core.policy_disc import PolicyDiscrete
7 | from uhc.khrylib.rl.core.policy_gaussian import PolicyGaussian
8 | from uhc.khrylib.rl.core.trajbatch import TrajBatch
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/common.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from uhc.khrylib.utils import batch_to
3 |
4 |
5 | def estimate_advantages(rewards, masks, values, gamma, tau):
6 | device = rewards.device
7 | rewards, masks, values = batch_to(torch.device('cpu'), rewards, masks, values)
8 | tensor_type = type(rewards)
9 | deltas = tensor_type(rewards.size(0), 1)
10 | advantages = tensor_type(rewards.size(0), 1)
11 |
12 | prev_value = 0
13 | prev_advantage = 0
14 | for i in reversed(range(rewards.size(0))):
15 | deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
16 | advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]
17 |
18 | prev_value = values[i, 0]
19 | prev_advantage = advantages[i, 0]
20 |
21 | returns = values + advantages
22 | advantages = (advantages - advantages.mean()) / advantages.std()
23 |
24 | advantages, returns = batch_to(device, advantages, returns)
25 | return advantages, returns
26 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/critic.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 |
4 |
5 | class Value(nn.Module):
6 | def __init__(self, net, net_out_dim=None):
7 | super().__init__()
8 | self.net = net
9 | if net_out_dim is None:
10 | net_out_dim = net.out_dim
11 | self.value_head = nn.Linear(net_out_dim, 1)
12 | self.value_head.weight.data.mul_(0.1)
13 | self.value_head.bias.data.mul_(0.0)
14 |
15 | def forward(self, x):
16 | x = self.net(x)
17 | value = self.value_head(x)
18 | return value
19 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/distributions.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.distributions import Normal
3 | from torch.distributions import Categorical as TorchCategorical
4 |
5 |
6 | class DiagGaussian(Normal):
7 |
8 | def __init__(self, loc, scale):
9 | super().__init__(loc, scale)
10 |
11 | def kl(self):
12 | loc1 = self.loc
13 | scale1 = self.scale
14 | log_scale1 = self.scale.log()
15 | loc0 = self.loc.detach()
16 | scale0 = self.scale.detach()
17 | log_scale0 = log_scale1.detach()
18 | kl = log_scale1 - log_scale0 + (scale0.pow(2) + (loc0 - loc1).pow(2)) / (2.0 * scale1.pow(2)) - 0.5
19 | return kl.sum(1, keepdim=True)
20 |
21 | def log_prob(self, value):
22 | return super().log_prob(value).sum(1, keepdim=True)
23 |
24 | def mean_sample(self):
25 | return self.loc
26 |
27 |
28 | class Categorical(TorchCategorical):
29 |
30 | def __init__(self, probs=None, logits=None):
31 | super().__init__(probs, logits)
32 |
33 | def kl(self):
34 | loc1 = self.loc
35 | scale1 = self.scale
36 | log_scale1 = self.scale.log()
37 | loc0 = self.loc.detach()
38 | scale0 = self.scale.detach()
39 | log_scale0 = log_scale1.detach()
40 | kl = log_scale1 - log_scale0 + (scale0.pow(2) + (loc0 - loc1).pow(2)) / (2.0 * scale1.pow(2)) - 0.5
41 | return kl.sum(1, keepdim=True)
42 |
43 | def log_prob(self, value):
44 | return super().log_prob(value).unsqueeze(1)
45 |
46 | def mean_sample(self):
47 | return self.probs.argmax(dim=1)
48 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/logger_rl.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 |
4 | class LoggerRL:
5 |
6 | def __init__(self):
7 | self.num_steps = 0
8 | self.num_episodes = 0
9 | self.avg_episode_len = 0
10 | self.total_reward = 0
11 | self.min_episode_reward = math.inf
12 | self.max_episode_reward = -math.inf
13 | self.total_c_reward = 0
14 | self.min_c_reward = math.inf
15 | self.max_c_reward = -math.inf
16 | self.episode_reward = 0
17 | self.avg_episode_reward = 0
18 | self.avg_c_reward = 0
19 | self.avg_episode_c_reward = 0
20 | self.total_c_info = 0
21 | self.avg_c_info = 0
22 | self.avg_episode_c_info = 0
23 | self.sample_time = 0
24 |
25 | def start_episode(self, env):
26 | self.episode_reward = 0
27 |
28 | def step(self, env, reward, c_reward, c_info, info):
29 | self.episode_reward += reward
30 | self.total_c_reward += c_reward
31 | self.total_c_info += c_info
32 | self.min_c_reward = min(self.min_c_reward, c_reward)
33 | self.max_c_reward = max(self.max_c_reward, c_reward)
34 | self.num_steps += 1
35 |
36 | def end_episode(self, env):
37 | self.num_episodes += 1
38 | self.total_reward += self.episode_reward
39 | self.min_episode_reward = min(self.min_episode_reward, self.episode_reward)
40 | self.max_episode_reward = max(self.max_episode_reward, self.episode_reward)
41 |
42 | def end_sampling(self):
43 | self.avg_episode_len = self.num_steps / self.num_episodes
44 | self.avg_episode_reward = self.total_reward / self.num_episodes
45 | self.avg_c_reward = self.total_c_reward / self.num_steps
46 | self.avg_c_info = self.total_c_info / self.num_steps
47 | self.avg_episode_c_reward = self.total_c_reward / self.num_episodes
48 | self.avg_episode_c_info = self.total_c_info / self.num_episodes
49 |
50 | @classmethod
51 | def merge(cls, logger_list):
52 | logger = cls()
53 | logger.total_reward = sum([x.total_reward for x in logger_list])
54 | logger.num_episodes = sum([x.num_episodes for x in logger_list])
55 | logger.num_steps = sum([x.num_steps for x in logger_list])
56 | logger.avg_episode_len = logger.num_steps / logger.num_episodes
57 | logger.avg_episode_reward = logger.total_reward / logger.num_episodes
58 | logger.max_episode_reward = max([x.max_episode_reward for x in logger_list])
59 | logger.min_episode_reward = max([x.min_episode_reward for x in logger_list])
60 | logger.total_c_reward = sum([x.total_c_reward for x in logger_list])
61 | logger.avg_c_reward = logger.total_c_reward / logger.num_steps
62 | logger.max_c_reward = max([x.max_c_reward for x in logger_list])
63 | logger.min_c_reward = min([x.min_c_reward for x in logger_list])
64 | logger.total_c_info = sum([x.total_c_info for x in logger_list])
65 | logger.avg_c_info = logger.total_c_info / logger.num_steps
66 | logger.avg_episode_c_reward = logger.total_c_reward / logger.num_episodes
67 | logger.avg_episode_c_info = logger.total_c_info / logger.num_episodes
68 | return logger
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/policy.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class Policy(nn.Module):
5 | def __init__(self):
6 | super().__init__()
7 |
8 | def forward(self, x):
9 | """This function should return a distribution to sample action from"""
10 | raise NotImplementedError
11 |
12 | def select_action(self, x, mean_action=False):
13 | dist = self.forward(x)
14 | action = dist.mean_sample() if mean_action else dist.sample()
15 | return action
16 |
17 | def get_kl(self, x):
18 | dist = self.forward(x)
19 | return dist.kl()
20 |
21 | def get_log_prob(self, x, action):
22 | dist = self.forward(x)
23 | return dist.log_prob(action)
24 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/policy_disc.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from uhc.utils.math_utils import *
3 | from uhc.khrylib.rl.core.distributions import Categorical
4 | from uhc.khrylib.rl.core.policy import Policy
5 |
6 |
7 | class PolicyDiscrete(Policy):
8 | def __init__(self, net, action_num, net_out_dim=None):
9 | super().__init__()
10 | self.type = 'discrete'
11 | if net_out_dim is None:
12 | net_out_dim = net.out_dim
13 | self.net = net
14 | self.action_head = nn.Linear(net_out_dim, action_num)
15 | self.action_head.weight.data.mul_(0.1)
16 | self.action_head.bias.data.mul_(0.0)
17 |
18 | def forward(self, x):
19 | x = self.net(x)
20 | action_prob = torch.softmax(self.action_head(x), dim=1)
21 | return Categorical(probs=action_prob)
22 |
23 | def get_fim(self, x):
24 | action_prob = self.forward(x)
25 | M = action_prob.pow(-1).view(-1).detach()
26 | return M, action_prob, {}
27 |
28 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/policy_gaussian.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from uhc.khrylib.rl.core.distributions import DiagGaussian
3 | from uhc.khrylib.rl.core.policy import Policy
4 | from uhc.utils.math_utils import *
5 | from uhc.khrylib.models.mlp import MLP
6 |
7 |
8 | class PolicyGaussian(Policy):
9 | def __init__(self, cfg, action_dim, state_dim, net_out_dim=None):
10 | super().__init__()
11 | self.type = "gaussian"
12 | policy_hsize = cfg.policy_hsize
13 | policy_htype = cfg.policy_htype
14 | fix_std = cfg.fix_std
15 | log_std = cfg.log_std
16 | self.net = net = MLP(state_dim, policy_hsize, policy_htype)
17 | if net_out_dim is None:
18 | net_out_dim = net.out_dim
19 | self.action_mean = nn.Linear(net_out_dim, action_dim)
20 | self.action_mean.weight.data.mul_(0.1)
21 | self.action_mean.bias.data.mul_(0.0)
22 | self.action_log_std = nn.Parameter(
23 | torch.ones(1, action_dim) * log_std, requires_grad=not fix_std
24 | )
25 |
26 | def forward(self, x):
27 | x = self.net(x)
28 | action_mean = self.action_mean(x)
29 | action_log_std = self.action_log_std.expand_as(action_mean)
30 | action_std = torch.exp(action_log_std)
31 | return DiagGaussian(action_mean, action_std)
32 |
33 | def get_fim(self, x):
34 | dist = self.forward(x)
35 | cov_inv = self.action_log_std.exp().pow(-2).squeeze(0).repeat(x.size(0))
36 | param_count = 0
37 | std_index = 0
38 | id = 0
39 | for name, param in self.named_parameters():
40 | if name == "action_log_std":
41 | std_id = id
42 | std_index = param_count
43 | param_count += param.view(-1).shape[0]
44 | id += 1
45 | return cov_inv.detach(), dist.loc, {"std_id": std_id, "std_index": std_index}
46 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/running_norm.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class RunningNorm(nn.Module):
6 | """
7 | y = (x-mean)/std
8 | using running estimates of mean,std
9 | """
10 |
11 | def __init__(self, dim, demean=True, destd=True, clip=5.0):
12 | super().__init__()
13 | self.dim = dim
14 | self.demean = demean
15 | self.destd = destd
16 | self.clip = clip
17 | self.register_buffer("n", torch.tensor(0, dtype=torch.long))
18 | self.register_buffer("mean", torch.zeros(dim))
19 | self.register_buffer("var", torch.zeros(dim))
20 | self.register_buffer("std", torch.zeros(dim))
21 |
22 | def update(self, x):
23 | var_x, mean_x = torch.var_mean(x, dim=0, unbiased=False)
24 | m = x.shape[0]
25 | w = self.n.to(x.dtype) / (m + self.n).to(x.dtype)
26 | self.var[:] = (
27 | w * self.var + (1 - w) * var_x + w * (1 - w) * (mean_x - self.mean).pow(2)
28 | )
29 | self.mean[:] = w * self.mean + (1 - w) * mean_x
30 | self.std[:] = torch.sqrt(self.var)
31 | self.n += m
32 |
33 | def forward(self, x):
34 | if self.training:
35 | with torch.no_grad():
36 | self.update(x)
37 | if self.n > 0:
38 | if self.demean:
39 | x = x - self.mean
40 | if self.destd:
41 | x = x / (self.std + 1e-8)
42 | if self.clip:
43 | x = torch.clamp(x, -self.clip, self.clip)
44 | return x
45 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/core/trajbatch.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class TrajBatch:
5 | def __init__(self, memory_list):
6 | memory = memory_list[0]
7 | for x in memory_list[1:]:
8 | memory.append(x)
9 | self.batch = zip(*memory.sample())
10 | self.states = np.stack(next(self.batch))
11 | self.actions = np.stack(next(self.batch))
12 | self.masks = np.stack(next(self.batch))
13 | self.next_states = np.stack(next(self.batch))
14 | self.rewards = np.stack(next(self.batch))
15 | self.exps = np.stack(next(self.batch))
16 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/envs/visual/humanoid_vis.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import mujoco_py
3 |
4 | from uhc.khrylib.rl.envs.common import mujoco_env
5 |
6 |
7 | class HumanoidVisEnv(mujoco_env.MujocoEnv):
8 | def __init__(self, vis_model_file, nframes=6, focus=True):
9 | mujoco_env.MujocoEnv.__init__(self, vis_model_file, nframes)
10 |
11 | self.set_cam_first = set()
12 | self.focus = focus
13 |
14 | def step(self, a):
15 | return np.zeros((10, 1)), 0, False, dict()
16 |
17 | def reset_model(self):
18 | c = 0
19 | self.set_state(
20 | self.np_random.uniform(low=-c, high=c, size=self.model.nq),
21 | self.np_random.uniform(low=-c, high=c, size=self.model.nv),
22 | )
23 | return None
24 |
25 | def sim_forward(self):
26 | self.sim.forward()
27 |
28 | def set_video_path(
29 | self, image_path="/tmp/image_%07d.png", video_path="/tmp/video_%07d.mp4"
30 | ):
31 | self.viewer._image_path = image_path
32 | self.viewer._video_path = video_path
33 |
34 | def viewer_setup(self, mode):
35 | self.viewer.cam.trackbodyid = 1
36 | if self.focus:
37 | self.viewer.cam.lookat[:2] = self.data.qpos[:2]
38 | self.viewer.cam.lookat[2] = 0.8
39 | if mode not in self.set_cam_first:
40 | self.viewer.video_fps = 30
41 | self.viewer.frame_skip = self.frame_skip
42 | self.viewer.cam.distance = self.model.stat.extent * 1.5
43 | self.viewer.cam.elevation = -10
44 | self.viewer.cam.azimuth = 45
45 | self.set_cam_first.add(mode)
46 |
47 | def reload_sim_model(self, xml_str):
48 | del self.sim
49 | del self.model
50 | del self.data
51 | del self.viewer
52 | del self._viewers
53 | self.model = mujoco_py.load_model_from_xml(xml_str)
54 | self.sim = mujoco_py.MjSim(self.model)
55 | self.data = self.sim.data
56 | self.init_qpos = self.sim.data.qpos.copy()
57 | self.init_qvel = self.sim.data.qvel.copy()
58 | self.viewer = None
59 | self._viewers = {}
60 | self._get_viewer("human")._hide_overlay = True
61 | self.reset()
62 | print("Reloading Vis Sim")
63 |
--------------------------------------------------------------------------------
/uhc/khrylib/rl/utils/visualizer.py:
--------------------------------------------------------------------------------
1 | from uhc.khrylib.rl.envs.visual.humanoid_vis import HumanoidVisEnv
2 | import glfw
3 | import math
4 |
5 |
6 | class Visualizer:
7 | def __init__(self, vis_file):
8 | self.fr = 0
9 | self.num_fr = 0
10 | self.T_arr = [1, 2, 4, 6, 8, 10, 12, 15, 20, 30, 40, 50, 60]
11 | self.T = 12
12 | self.paused = False
13 | self.reverse = False
14 | self.repeat = False
15 | self.vis_file = vis_file
16 |
17 | self.env_vis = HumanoidVisEnv(vis_file, 1, focus=False)
18 |
19 | self.env_vis._get_viewer("human")._hide_overlay = True
20 | self.env_vis.set_custom_key_callback(self.key_callback)
21 |
22 | def data_generator(self):
23 | raise NotImplementedError
24 |
25 | def update_pose(self):
26 | raise NotImplementedError
27 |
28 | def key_callback(self, key, action, mods):
29 |
30 | if action != glfw.RELEASE:
31 | return False
32 | if key == glfw.KEY_D:
33 | self.T = self.T_arr[(self.T_arr.index(self.T) + 1) % len(self.T_arr)]
34 | print(f"T: {self.T}")
35 | elif key == glfw.KEY_F:
36 | self.T = self.T_arr[(self.T_arr.index(self.T) - 1) % len(self.T_arr)]
37 | print(f"T: {self.T}")
38 | elif key == glfw.KEY_Q:
39 | self.data = next(self.data_gen, None)
40 | if self.data is None:
41 | print("end of data!!")
42 | exit()
43 | self.fr = 0
44 | self.update_pose()
45 | elif key == glfw.KEY_W:
46 | self.fr = 0
47 | self.update_pose()
48 | elif key == glfw.KEY_E:
49 | self.fr = self.num_fr - 1
50 | self.update_pose()
51 | elif key == glfw.KEY_G:
52 | self.repeat = not self.repeat
53 | self.update_pose()
54 |
55 | elif key == glfw.KEY_S:
56 | self.reverse = not self.reverse
57 | elif key == glfw.KEY_RIGHT:
58 | if self.fr < self.num_fr - 1:
59 | self.fr += 1
60 | self.update_pose()
61 | elif key == glfw.KEY_LEFT:
62 | if self.fr > 0:
63 | self.fr -= 1
64 | self.update_pose()
65 | elif key == glfw.KEY_SPACE:
66 | self.paused = not self.paused
67 | else:
68 | return False
69 | return True
70 |
71 | def render(self):
72 | self.env_vis.render()
73 |
74 | def show_animation(self):
75 | self.t = 0
76 | while True:
77 | if self.t >= math.floor(self.T):
78 | if not self.reverse:
79 | if self.fr < self.num_fr - 1:
80 | self.fr += 1
81 | elif self.repeat:
82 | self.fr = 0
83 | elif self.reverse and self.fr > 0:
84 | self.fr -= 1
85 | self.update_pose()
86 | self.t = 0
87 | self.render()
88 | if not self.paused:
89 | self.t += 1
90 |
--------------------------------------------------------------------------------
/uhc/khrylib/scripts/create_vis_model.py:
--------------------------------------------------------------------------------
1 | from lxml import etree
2 | from lxml.etree import XMLParser, parse, ElementTree, Element, SubElement
3 | from copy import deepcopy
4 | import argparse
5 |
6 |
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument('--in-model', type=str, default='human36m_vis_single_v1')
9 | parser.add_argument('--out-model', type=str, default='human36m_vis_sample_single_v1')
10 | parser.add_argument('--num', type=int, default=10)
11 | parser.add_argument('--trans-range', type=int, default=(-1, -1))
12 |
13 | args = parser.parse_args()
14 |
15 | in_file = 'assets/mujoco_models/%s.xml' % args.in_model
16 | out_file = 'assets/mujoco_models/%s.xml' % args.out_model
17 | parser = XMLParser(remove_blank_text=True)
18 | tree = parse(in_file, parser=parser)
19 | root = tree.getroot().find('worldbody')
20 | body = root.find('body')
21 | for i in range(1, args.num):
22 | new_body = deepcopy(body)
23 | if args.trans_range[0] <= i < args.trans_range[1]:
24 | new_body.attrib['childclass'] = 'trans'
25 | new_body.attrib['name'] = '%d_%s' % (i, new_body.attrib['name'])
26 | for node in new_body.findall(".//body"):
27 | node.attrib['name'] = '%d_%s' % (i, node.attrib['name'])
28 | for node in new_body.findall(".//joint"):
29 | node.attrib['name'] = '%d_%s' % (i, node.attrib['name'])
30 | root.append(new_body)
31 | if args.trans_range[0] == 0:
32 | body.attrib['childclass'] = 'trans'
33 |
34 | tree.write(out_file, pretty_print=True)
--------------------------------------------------------------------------------
/uhc/khrylib/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from uhc.khrylib.utils.memory import *
2 | from uhc.khrylib.utils.zfilter import *
3 | from uhc.khrylib.utils.torch import *
4 | from uhc.khrylib.utils.tools import *
5 | from uhc.khrylib.utils.logger import *
6 | from uhc.khrylib.utils.mujoco import *
7 |
--------------------------------------------------------------------------------
/uhc/khrylib/utils/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | def create_logger(filename, file_handle=True):
5 | # create logger
6 | for handler in logging.root.handlers[:]:
7 | logging.root.removeHandler(handler)
8 |
9 | logger = logging.getLogger(filename)
10 | if (logger.hasHandlers()):
11 | logger.handlers.clear()
12 | logger.propagate = False
13 | logger.setLevel(logging.DEBUG)
14 | # create console handler with a higher log level
15 | ch = logging.StreamHandler()
16 | ch.setLevel(logging.INFO)
17 | stream_formatter = logging.Formatter('%(message)s')
18 | ch.setFormatter(stream_formatter)
19 | logger.addHandler(ch)
20 |
21 | if file_handle :
22 | # create file handler which logs even debug messages
23 | os.makedirs(os.path.dirname(filename), exist_ok=True)
24 | fh = logging.FileHandler(filename, mode='a')
25 | fh.setLevel(logging.DEBUG)
26 | file_formatter = logging.Formatter('[%(asctime)s] %(message)s')
27 | fh.setFormatter(file_formatter)
28 | logger.addHandler(fh)
29 |
30 | return logger
31 |
--------------------------------------------------------------------------------
/uhc/khrylib/utils/memory.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 |
4 | class Memory(object):
5 | def __init__(self):
6 | self.memory = []
7 |
8 | def push(self, *args):
9 | """Saves a tuple."""
10 | self.memory.append([*args])
11 |
12 | def sample(self, batch_size=None):
13 | if batch_size is None:
14 | return self.memory
15 | else:
16 | random_batch = random.sample(self.memory, batch_size)
17 | return random_batch
18 |
19 | def append(self, new_memory):
20 | self.memory += new_memory.memory
21 |
22 | def __len__(self):
23 | return len(self.memory)
24 |
25 |
--------------------------------------------------------------------------------
/uhc/khrylib/utils/mujoco.py:
--------------------------------------------------------------------------------
1 | from uhc.utils.math_utils import *
2 |
3 | def get_body_qveladdr(model):
4 | body_qposaddr = dict()
5 | for i, body_name in enumerate(model.body_names):
6 | start_joint = model.body_jntadr[i]
7 | if start_joint < 0:
8 | continue
9 | end_joint = start_joint + model.body_jntnum[i]
10 | start_qposaddr = model.jnt_dofadr[start_joint]
11 | if end_joint < len(model.jnt_dofadr):
12 | end_qposaddr = model.jnt_dofadr[end_joint]
13 | else:
14 | end_qposaddr = model.nv
15 | body_qposaddr[body_name] = (start_qposaddr, end_qposaddr)
16 | return body_qposaddr
17 |
18 | def get_body_qposaddr(model):
19 | body_qposaddr = dict()
20 | for i, body_name in enumerate(model.body_names):
21 | start_joint = model.body_jntadr[i]
22 | if start_joint < 0:
23 | continue
24 | end_joint = start_joint + model.body_jntnum[i]
25 | start_qposaddr = model.jnt_qposadr[start_joint]
26 | if end_joint < len(model.jnt_qposadr):
27 | end_qposaddr = model.jnt_qposadr[end_joint]
28 | else:
29 | end_qposaddr = model.nq
30 | body_qposaddr[body_name] = (start_qposaddr, end_qposaddr)
31 | return body_qposaddr
32 |
33 | def align_human_state(qpos, qvel, ref_qpos):
34 | qpos[:2] = ref_qpos[:2]
35 | hq = get_heading_q(ref_qpos[3:7])
36 | qpos[3:7] = quaternion_multiply(hq, qpos[3:7])
37 | qvel[:3] = quat_mul_vec(hq, qvel[:3])
38 |
39 |
40 | def get_traj_pos(orig_traj):
41 | traj_pos = orig_traj[:, 2:].copy()
42 | for i in range(traj_pos.shape[0]):
43 | traj_pos[i, 1:5] = de_heading(traj_pos[i, 1:5])
44 | return traj_pos
45 |
46 |
47 | def get_traj_vel(orig_traj, dt):
48 | traj_vel = []
49 | for i in range(orig_traj.shape[0] - 1):
50 | vel = get_qvel_fd(orig_traj[i, :], orig_traj[i + 1, :], dt, 'heading')
51 | traj_vel.append(vel)
52 | traj_vel.append(traj_vel[-1].copy())
53 | traj_vel = np.vstack(traj_vel)
54 | return traj_vel
--------------------------------------------------------------------------------
/uhc/khrylib/utils/tools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import shutil
4 | import datetime
5 | import subprocess
6 | from os import path
7 | from PIL import Image
8 | from uhc.utils.math_utils import *
9 | import cv2
10 |
11 |
12 | def assets_dir():
13 | return path.abspath(path.join(path.dirname(path.abspath(__file__)), '../assets'))
14 |
15 |
16 | def out_dir():
17 | return path.abspath(path.join(path.dirname(path.abspath(__file__)), '../out'))
18 |
19 |
20 | def log_dir():
21 | return path.abspath(path.join(path.dirname(path.abspath(__file__)), '../logs'))
22 |
23 |
24 | def recreate_dirs(*dirs):
25 | for d in dirs:
26 | if os.path.exists(d):
27 | shutil.rmtree(d, ignore_errors=True)
28 | os.makedirs(d)
29 |
30 |
31 | def load_img(path):
32 | # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
33 | with open(path, 'rb') as f:
34 | I = Image.open(f)
35 | img = I.resize((224, 224), Image.ANTIALIAS).convert('RGB')
36 | return img
37 |
38 |
39 | def save_screen_shots(window, file_name, transparent=False, autogui=False):
40 | import glfw
41 | xpos, ypos = glfw.get_window_pos(window)
42 | width, height = glfw.get_window_size(window)
43 | if autogui:
44 | import pyautogui
45 | image = pyautogui.screenshot(region=(xpos*2, ypos*2, width*2, height*2))
46 | image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGRA if transparent else cv2.COLOR_RGB2BGR)
47 | if transparent:
48 | image[np.all(image >= [240, 240, 240, 240], axis=2)] = [255, 255, 255, 0]
49 | cv2.imwrite(file_name, image)
50 | else:
51 | print(width*2, height*2)
52 | subprocess.call(['screencapture', '-x', '-m', f'-R {xpos},{ypos},{width},{height}', file_name])
53 |
54 |
55 | def get_eta_str(cur_iter, total_iter, time_per_iter):
56 | eta = time_per_iter * (total_iter - cur_iter - 1)
57 | return str(datetime.timedelta(seconds=round(eta)))
58 |
59 |
60 | class AverageMeter(object):
61 | """Computes and stores the average and current value"""
62 |
63 | def __init__(self):
64 | self.reset()
65 |
66 | def reset(self):
67 | self.val = 0
68 | self.avg = 0
69 | self.sum = 0
70 | self.count = 0
71 |
72 | def update(self, val, n=1):
73 | self.val = val
74 | self.sum += val * n
75 | self.count += n
76 | self.avg = self.sum / self.count
--------------------------------------------------------------------------------
/uhc/khrylib/utils/zfilter.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | # from https://github.com/joschu/modular_rl
4 | # http://www.johndcook.com/blog/standard_deviation/
5 |
6 |
7 | class RunningStat(object):
8 | def __init__(self, shape):
9 | self._n = 0
10 | self._M = np.zeros(shape)
11 | self._S = np.zeros(shape)
12 |
13 | def push(self, x):
14 | x = np.asarray(x)
15 | assert x.shape == self._M.shape
16 | self._n += 1
17 | if self._n == 1:
18 | self._M[...] = x
19 | else:
20 | oldM = self._M.copy()
21 | self._M[...] = oldM + (x - oldM) / self._n
22 | self._S[...] = self._S + (x - oldM) * (x - self._M)
23 |
24 | @property
25 | def n(self):
26 | return self._n
27 |
28 | @property
29 | def mean(self):
30 | return self._M
31 |
32 | @property
33 | def var(self):
34 | return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
35 |
36 | @property
37 | def std(self):
38 | return np.sqrt(self.var)
39 |
40 | @property
41 | def shape(self):
42 | return self._M.shape
43 |
44 |
45 | class ZFilter:
46 | """
47 | y = (x-mean)/std
48 | using running estimates of mean,std
49 | """
50 |
51 | def __init__(self, shape, demean=True, destd=True, clip=10.0):
52 | self.demean = demean
53 | self.destd = destd
54 | self.clip = clip
55 |
56 | self.rs = RunningStat(shape)
57 |
58 | def __call__(self, x, update=True):
59 | if update:
60 | self.rs.push(x)
61 | if self.demean:
62 | x = x - self.rs.mean
63 | if self.destd:
64 | x = x / (self.rs.std + 1e-8)
65 | if self.clip:
66 | x = np.clip(x, -self.clip, self.clip)
67 | return x
68 |
69 | def set_mean_std(self, mean, std, n):
70 | self.rs._n = n
71 | self.rs._M[...] = mean
72 | self.rs._S[...] = std
73 |
--------------------------------------------------------------------------------
/uhc/utils/config_utils/base_config.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import os
3 | import os.path as osp
4 | import glob
5 | import numpy as np
6 | from uhc.khrylib.utils import recreate_dirs
7 |
8 | class Base_Config:
9 |
10 | def __init__(self, cfg_id, base_dir = "", create_dirs=False, cfg_dict=None):
11 | self.id = cfg_id
12 | base_dir = base_dir if base_dir else ''
13 | self.base_dir = os.path.expanduser(base_dir)
14 |
15 | if cfg_dict is not None:
16 | cfg = cfg_dict
17 | else:
18 | cfg_path = osp.join(self.base_dir, f"config/**/{cfg_id}.yml")
19 | files = glob.glob(cfg_path, recursive=True)
20 | assert(len(files) == 1)
21 | cfg_name = files[0]
22 | cfg = yaml.safe_load(open(cfg_name, 'r'))
23 |
24 | # create dirs
25 | self.cfg_dict = cfg
26 | self.main_result_dir = osp.join(self.base_dir, "results")
27 | self.proj_name = proj_name = cfg.get("proj_name", "motion_im")
28 |
29 | self.cfg_dir = osp.join(self.main_result_dir, proj_name, cfg_id)
30 | self.model_dir = osp.join(self.cfg_dir, "models")
31 |
32 | self.output_dir = self.result_dir = osp.join(self.cfg_dir, "results")
33 | self.log_dir = osp.join(self.cfg_dir, "log")
34 | os.makedirs(self.model_dir, exist_ok=True)
35 | os.makedirs(self.output_dir, exist_ok=True)
36 | if create_dirs and not osp.exists(self.log_dir):
37 | recreate_dirs(self.log_dir)
38 | self.seed = self.cfg_dict.get('seed', 1)
39 | self.notes = cfg.get('notes', "exp notes")
40 |
41 | # group specs
42 | self.data_specs = cfg.get('data_specs', {})
43 | self.loss_specs = cfg.get('loss_specs', {})
44 | self.model_specs = cfg.get('model_specs', {})
45 |
46 | # Default training configs
47 | self.lr = self.cfg_dict.get("lr", 3.e-4)
48 | self.num_epoch = self.cfg_dict.get("num_epoch", 100)
49 | self.num_epoch_fix = self.cfg_dict.get("num_epoch_fix", 10)
50 | self.save_n_epochs = self.cfg_dict.get("save_n_epochs", 20)
51 | self.eval_n_epochs = self.cfg_dict.get("eval_n_epochs", 20)
52 |
53 | self.num_samples = self.data_specs.get("num_samples", 5000)
54 | self.batch_size = self.data_specs.get("batch_size", 5000)
55 |
56 |
57 | def get(self, key, default = None):
58 | return self.cfg_dict.get(key, default)
59 |
60 | def update(self, dict):
61 | for k, v in vars(dict).items():
62 | self.__setattr__(k, v)
--------------------------------------------------------------------------------
/uhc/utils/config_utils/uhm_config.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import os
3 | import os.path as osp
4 | import glob
5 | import numpy as np
6 |
7 | from uhc.khrylib.utils import recreate_dirs
8 | from uhc.utils.config_utils.base_config import Base_Config
9 |
10 | class Config(Base_Config):
11 |
12 | def __init__(self, mujoco_path = '%s.xml', **kwargs):
13 | super().__init__( **kwargs)
14 |
15 | self.mujoco_model_file = mujoco_path % self.cfg_dict['mujoco_model']
16 |
17 |
18 | self.policy_optimizer = self.cfg_dict['policy_optimizer']
19 | self.policy_specs = self.cfg_dict.get("policy_specs", {})
20 | self.scene_specs = self.cfg_dict.get("scene_specs", {})
21 | self.scene_mujoco_file = mujoco_path % self.scene_specs.get("scene_mujoco_file", "humanoid_smpl_neutral_mesh_all_h36m")
22 | self.cc_cfg = self.policy_specs.get("cc_cfg", "copycat_9")
23 | self.agent_name = self.cfg_dict.get("agent_name", "agent_uhm")
24 | self.model_name = self.model_specs.get("model_name", "kin_net")
25 | self.policy_name = self.policy_specs.get("policy_name", "kin_policy")
26 | self.env_name = self.scene_specs.get("env_name", "humanoid_kin_v1")
27 | ## Model Specs
28 | self.autoregressive = self.model_specs.get("autoregressive", True)
29 | self.remove_base = self.model_specs.get("remove_base", True)
30 |
31 | # Policy Specs
32 | self.reward_weights = self.policy_specs.get("reward_weights", {})
33 | self.env_term_body = self.policy_specs.get("env_term_body", "body")
34 | self.env_episode_len = self.policy_specs.get("env_episode_len", "body")
35 | self.obs_vel = self.cfg_dict.get('obs_vel', 'full')
36 |
37 | ## Data Specs
38 | self.fr_num = self.data_specs.get("fr_num", 80)
39 |
40 |
--------------------------------------------------------------------------------
/uhc/utils/create_h36m_humanoid.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import numpy as np
4 | from mujoco_py import load_model_from_path, MjSim
5 | from uhc.khrylib.rl.envs.common.mjviewer import MjViewer
6 | from uhc.khrylib.mocap.skeleton import Skeleton
7 | from data_process.h36m_specs import *
8 |
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('--render', action='store_true', default=True)
11 | parser.add_argument('--template_id', type=str, default='humanoid_template')
12 | parser.add_argument('--model_id', type=str, default='human36m_orig')
13 | args = parser.parse_args()
14 |
15 | template_file = 'assets/mujoco_models/template/%s.xml' % args.template_id
16 | model_file = 'assets/mujoco_models/%s.xml' % args.model_id
17 | skeleton = Skeleton()
18 | skeleton.load_from_offsets(offsets, parents, 0.01, exclude_bones, channels, spec_channels)
19 | print(template_file)
20 | skeleton.write_xml(model_file, template_file, offset=np.array([0, 0, 1]))
21 |
22 | # model = load_model_from_path(model_file)
23 | # sim = MjSim(model)
24 | # viewer = MjViewer(sim)
25 | # sim.data.qpos[:] = 0
26 | # sim.data.qpos[2] = 1.0
27 | # sim.forward()
28 |
29 | # while args.render:
30 | # viewer.render()
31 |
--------------------------------------------------------------------------------
/uhc/utils/flags.py:
--------------------------------------------------------------------------------
1 | __all__ = ['flags', 'summation']
2 |
3 | class Flags(object):
4 | def __init__(self, *items):
5 | for key,val in zip(items[:-1], items[1:]):
6 | setattr(self,key,val)
7 |
8 | flags = Flags('debug', False)
--------------------------------------------------------------------------------
/uhc/utils/geom.py:
--------------------------------------------------------------------------------
1 | from vtk import (
2 | vtkQuadricDecimation,
3 | vtkPolyData,
4 | vtkSTLReader,
5 | vtkSTLWriter,
6 | vtkTransform,
7 | vtkCenterOfMass,
8 | vtkTransformPolyDataFilter,
9 | )
10 |
11 |
12 | def quadric_mesh_decimation(fname, reduction_rate, verbose=False):
13 | reader = vtkSTLReader()
14 | reader.SetFileName(fname)
15 | reader.Update()
16 | inputPoly = reader.GetOutput()
17 |
18 | decimate = vtkQuadricDecimation()
19 | decimate.SetInputData(inputPoly)
20 | decimate.SetTargetReduction(reduction_rate)
21 | decimate.Update()
22 | decimatedPoly = vtkPolyData()
23 | decimatedPoly.ShallowCopy(decimate.GetOutput())
24 |
25 | if verbose:
26 | print(
27 | f"Mesh Decimation: (points, faces) goes from ({inputPoly.GetNumberOfPoints(), inputPoly.GetNumberOfPolys()}) "
28 | f"to ({decimatedPoly.GetNumberOfPoints(), decimatedPoly.GetNumberOfPolys()})"
29 | )
30 |
31 | stlWriter = vtkSTLWriter()
32 | stlWriter.SetFileName(fname)
33 | stlWriter.SetFileTypeToBinary()
34 | stlWriter.SetInputData(decimatedPoly)
35 | stlWriter.Write()
36 |
37 |
38 | def center_scale_mesh(fname, scale):
39 | reader = vtkSTLReader()
40 | reader.SetFileName(fname)
41 | reader.Update()
42 | inputPoly = reader.GetOutputPort()
43 |
44 | centerOfMassFilter = vtkCenterOfMass()
45 | centerOfMassFilter.SetInputConnection(inputPoly)
46 | centerOfMassFilter.SetUseScalarsAsWeights(False)
47 | centerOfMassFilter.Update()
48 | center = centerOfMassFilter.GetCenter()
49 |
50 | transform = vtkTransform()
51 | transform.PostMultiply()
52 | transform.Translate(-center[0], -center[1], -center[2])
53 | transform.Scale(scale, scale, scale)
54 | transform.Translate(center[0], center[1], center[2])
55 | transform.Update()
56 |
57 | transformFilter = vtkTransformPolyDataFilter()
58 | transformFilter.SetInputConnection(inputPoly)
59 | transformFilter.SetTransform(transform)
60 | transformFilter.Update()
61 |
62 | stlWriter = vtkSTLWriter()
63 | stlWriter.SetFileName(fname)
64 | stlWriter.SetFileTypeToBinary()
65 | stlWriter.SetInputConnection(transformFilter.GetOutputPort())
66 | stlWriter.Write()
--------------------------------------------------------------------------------
/uhc/utils/h36m_specs.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | names = ['Hips', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToeBase', 'Site_RToe', 'LeftUpLeg', 'LeftLeg', 'LeftFoot',
3 | 'LeftToeBase', 'Site_LToe', 'Spine', 'Spine1', 'Neck', 'Head', 'Site_Head', 'LeftShoulder', 'LeftArm', 'LeftForeArm',
4 | 'LeftHand', 'LeftHandThumb', 'Site_LThumb', 'LeftWrist', 'Site_LWrist', 'RightShoulder', 'RightArm', 'RightForeArm',
5 | 'RightHand', 'RightHandThumb', 'Site_RThumb', 'RightWrist', 'Site_RWrist']
6 | offsets = [[ 0. , 0. , 0. ],
7 | [-12.7193936, 0. , 0. ],
8 | [ 0. , -43.4291009, 0. ],
9 | [ 0. , -44.8767017, 0. ],
10 | [ 0. , 0. , 15.1507021],
11 | [ 0. , 0. , 7.4999997],
12 | [ 12.7193940, 0. , 0. ],
13 | [ 0. , -43.4291013, 0. ],
14 | [ 0. , -44.8767017, 0. ],
15 | [ 0. , 0. , 15.1507012],
16 | [ 0. , 0. , 7.5000011],
17 | [ 0. , 0.1 , 0. ],
18 | [ 0. , 24.5913012, 0. ],
19 | [ 0. , 24.8462965, 0. ],
20 | [ 0. , 9.2752478, 0. ],
21 | [ 0. , 11.4999962, 0. ],
22 | [ 0. , 24.8462965, 0. ],
23 | [ 0. , 12.4881980, 0. ],
24 | [ 0. , 25.9758047, 0. ],
25 | [ 0. , 24.5542024, 0. ],
26 | [ 0. , 0. , 0. ],
27 | [ 0. , 0. , 10.000000],
28 | [ 0. , 9.99999671, 0. ],
29 | [ 0. , 0. , 0. ],
30 | [ 0. , 24.8462965, 0. ],
31 | [ 0. , 12.4882004, 0. ],
32 | [ 0. , 25.9757994, 0. ],
33 | [ 0. , 24.5541986, 0. ],
34 | [ 0. , 0. , 0. ],
35 | [ 0. , 0. , 9.999997],
36 | [ 0. , 13.7500031, 0. ],
37 | [ 0. , 0. , 0. ]]
38 | offsets = {names[i]: x for i, x in enumerate(offsets)}
39 | parents = [-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
40 | 16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30]
41 | parents = {names[i]: names[x] for i, x in enumerate(parents)}
42 | parents[names[0]] = None
43 | bone_addr = {
44 | 'Hips': (0, 6),
45 | 'RightUpLeg': (6, 9),
46 | 'RightLeg': (10, 11),
47 | 'RightFoot': (12, 15),
48 | 'RightToeBase': (15, 18),
49 | 'LeftUpLeg': (18, 21),
50 | 'LeftLeg': (22, 23),
51 | 'LeftFoot': (24, 27),
52 | 'LeftToeBase': (27, 30),
53 | 'Spine': (30, 33),
54 | 'Spine1': (33, 36),
55 | 'Neck': (36, 39),
56 | 'Head': (39, 42),
57 | 'LeftShoulder': (42, 45),
58 | 'LeftArm': (45, 48),
59 | 'LeftForeArm': (49, 50),
60 | 'LeftHand': (51, 54),
61 | 'LeftHandThumb': (54, 57),
62 | 'LeftWrist': (57, 60),
63 | 'RightShoulder': (60, 63),
64 | 'RightArm': (63, 66),
65 | 'RightForeArm': (67, 68),
66 | 'RightHand': (69, 72),
67 | 'RightHandThumb': (72, 75),
68 | 'RightWrist': (75, 78)
69 | }
70 | joint_shuffle_ind = np.array([1, 2, 0])
71 | exclude_bones = {'Thumb', 'Site', 'Wrist', 'Toe'}
72 | channels = ['z', 'x', 'y']
73 | spec_channels = {'LeftForeArm': ['x'], 'RightForeArm': ['x'],
74 | 'LeftLeg': ['x'], 'RightLeg': ['x']}
75 | # make offsets symmetric
76 | for bone in names:
77 | if 'Left' in bone:
78 | symm_bone = bone.replace('Left', 'Right')
79 | offset_left = offsets[bone]
80 | offset_right = offsets[symm_bone]
81 | sign_left = offset_left / (np.abs(offset_left) + 1e-12)
82 | sign_right = offset_right / (np.abs(offset_right) + 1e-12)
83 | new_offset = (np.abs(offset_left) + np.abs(offset_right)) * 0.5
84 | offsets[bone] = sign_left * new_offset
85 | offsets[symm_bone] = sign_right * new_offset
86 | print(offsets)
87 |
88 |
--------------------------------------------------------------------------------
/uhc/utils/image_utils.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 | sys.path.append(os.getcwd())
7 |
8 | import numpy as np
9 | import cv2
10 | from skimage.util.shape import view_as_windows
11 |
12 |
13 | def get_chunk_selects(chunk_idxes, last_chunk, window_size = 80, overlap = 10):
14 | shift = window_size - int(overlap/2)
15 | chunck_selects = []
16 | for i in range(len(chunk_idxes)):
17 | chunk_idx = chunk_idxes[i]
18 | if i == 0:
19 | chunck_selects.append((0, shift))
20 | elif i == len(chunk_idxes) - 1:
21 | chunck_selects.append((-last_chunk, window_size))
22 | else:
23 | chunck_selects.append((int(overlap/2), shift))
24 | return chunck_selects
25 |
26 | def get_chunk_with_overlap(num_frames, window_size = 80, overlap = 10, return_idxes = False):
27 | assert overlap % 2 == 0
28 | if num_frames <= window_size:
29 | chunk_idexes = np.linspace(0, num_frames-1, num_frames).astype(int)
30 | return [chunk_idexes], [(0, len(chunk_idexes))]
31 |
32 | step = window_size - overlap
33 | chunk_idxes = view_as_windows(np.array(range(num_frames)), window_size, step= step)
34 | chunk_supp = np.linspace(num_frames - window_size, num_frames-1, num = window_size).astype(int)
35 | chunk_idxes = np.concatenate((chunk_idxes, chunk_supp[None, ]))
36 | last_chunk = chunk_idxes[-1][:step][-1] - chunk_idxes[-2][:step][-1] + int(overlap/2)
37 | chunck_selects = get_chunk_selects(chunk_idxes, last_chunk, window_size= window_size, overlap=overlap)
38 | if return_idxes:
39 | chunk_boundary = chunk_idxes[:, [0, -1]]
40 | chunk_boundary[:, -1] += 1
41 | return chunk_boundary, chunck_selects
42 | else:
43 | return chunk_idxes, chunck_selects
44 |
45 | def assemble_videos(videos, grid_size, description, out_file_name, text_color = (255, 255, 255)):
46 | x_grid_num = grid_size[1]
47 | y_grid_num = grid_size[0]
48 | y_shape, x_shape, _ = videos[0][0].shape
49 | canvas = np.zeros((y_shape * y_grid_num, x_shape * x_grid_num, 3)).astype(np.uint8)
50 |
51 |
52 | out = cv2.VideoWriter(out_file_name, cv2.VideoWriter_fourcc(*'FMP4'), 30, (canvas.shape[1], canvas.shape[0]))
53 | for i in range(len(videos[0])):
54 | for x in range(x_grid_num):
55 | for y in range(y_grid_num):
56 | curr_image = videos[x * y + x][i]
57 | curr_discription = description[x * y + x]
58 | canvas[y_shape * y : y_shape * (y + 1),x_shape * x:x_shape * (x + 1), :] = curr_image
59 | cv2.putText(canvas, curr_discription , (x_shape * x, y_shape * y + 20), 2, 0.5, text_color)
60 | out.write(canvas)
61 | out.release()
62 |
63 | def crop_center(img,cropx,cropy):
64 | y,x, _ = img.shape
65 | startx = x//2-(cropx//2)
66 | starty = y//2-(cropy//2)
67 | return img[starty:starty+cropy,startx:startx+cropx, :]
68 |
69 |
70 | def crop_side(img,cropx,cropy):
71 | y,x, _ = img.shape
72 | startx = x//8-(cropx//2)
73 | starty = y//2-(cropy//2)
74 | return img[starty:starty+cropy,startx:startx+cropx, :]
75 |
76 |
77 | def read_video_frames(vid_dir):
78 | cap = cv2.VideoCapture(vid_dir)
79 | frames = []
80 | while(cap.isOpened()):
81 | # Capture frame-by-frame
82 | ret, frame = cap.read()
83 | if ret == True:
84 | frames.append(frame)
85 | pass
86 | else:
87 | break
88 | cap.release()
89 | return frames
90 |
91 | def write_individaul_frames(frames, output_dir):
92 | for i in range(len(frames)):
93 | cv2.imwrite(os.path.join(output_dir, "frame%06d.png"%i), frames[i])
94 |
95 | def write_frames_to_video(frames, out_file_name = "output.mp4", frame_rate = 30, add_text = None, text_color = (255, 255, 255)):
96 | y_shape, x_shape, _ = frames[0].shape
97 | out = cv2.VideoWriter(out_file_name, cv2.VideoWriter_fourcc(*'FMP4'), frame_rate, (x_shape, y_shape))
98 | transform_dtype = False
99 | transform_256 = False
100 |
101 | if frames[0].dtype != np.uint8:
102 | transform_dtype = True
103 | if np.max(frames[0]) < 1:
104 | transform_256 = True
105 |
106 | for i in range(len(frames)):
107 | curr_frame = frames[i]
108 |
109 | if transform_256:
110 | curr_frame = curr_frame * 256
111 | if transform_dtype:
112 | curr_frame = curr_frame.astype(np.uint8)
113 | if not add_text is None:
114 | cv2.putText(curr_frame, add_text , (0, 20), 3, 1, text_color)
115 |
116 | out.write(curr_frame)
117 | out.release()
118 |
119 | def read_img_dir(img_dir):
120 | images = []
121 | for img_path in sorted(glob.glob(osp.join(img_dir, "*"))):
122 | images.append(cv2.imread(img_path))
123 | return images
124 |
125 | def read_img_list(img_list):
126 | images = []
127 | for img_path in img_list:
128 | images.append(cv2.imread(img_path))
129 | return images
130 |
131 | def resize_frames(frames, size_x = 224, size_y = 224):
132 | new_frames = []
133 | for i in range(len(frames)):
134 | curr_frame = frames[i]
135 | curr_frame = cv2.resize(curr_frame, (size_x, size_y))
136 | new_frames.append(curr_frame)
137 | return new_frames
138 |
139 |
140 |
--------------------------------------------------------------------------------
/uhc/utils/lightning_utils.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 | sys.path.append(os.getcwd())
7 |
8 | import pytorch_lightning as pl
9 | from pytorch_lightning.callbacks import ModelCheckpoint
10 | from pathlib import Path
11 | from pytorch_lightning.utilities import rank_zero_only
12 | from pytorch_lightning.loggers import LightningLoggerBase
13 | from pytorch_lightning.loggers.base import rank_zero_experiment
14 | import logging
15 |
16 | class PeriodicCheckpoint(ModelCheckpoint):
17 | def __init__(self, every: int, *args, **kwargs):
18 | super().__init__( *args, **kwargs)
19 | self.every = every
20 |
21 | def on_train_batch_end(
22 | self, trainer: pl.Trainer, pl_module: pl.LightningModule, *args, **kwargs
23 | ):
24 | if (pl_module.global_step + 1) % self.every == 0:
25 | assert self.dirpath is not None
26 | current = Path(self.dirpath) / f"model_{pl_module.global_step}.ckpt"
27 | trainer.save_checkpoint(current)
28 |
29 | class TextLogger(LightningLoggerBase):
30 | def __init__(self, cfg, filename, file_handle = True):
31 | super().__init__()
32 | self.cfg = cfg
33 | self.logger = logger = logging.getLogger(filename)
34 | logger.propagate = False
35 | logger.setLevel(logging.DEBUG)
36 | # create console handler with a higher log level
37 | ch = logging.StreamHandler()
38 | ch.setLevel(logging.INFO)
39 | stream_formatter = logging.Formatter('%(message)s')
40 | ch.setFormatter(stream_formatter)
41 | logger.addHandler(ch)
42 |
43 | if file_handle:
44 | # create file handler which logs even debug messages
45 | os.makedirs(os.path.dirname(filename), exist_ok=True)
46 | fh = logging.FileHandler(filename, mode='a')
47 | fh.setLevel(logging.DEBUG)
48 | file_formatter = logging.Formatter('[%(asctime)s] %(message)s')
49 | fh.setFormatter(file_formatter)
50 | logger.addHandler(fh)
51 |
52 | @property
53 | def name(self):
54 |
55 | return 'TextLogger'
56 |
57 | @property
58 | @rank_zero_experiment
59 | def experiment(self):
60 | # Return the experiment object associated with this logger.
61 | pass
62 |
63 | @property
64 | def version(self):
65 | # Return the experiment version, int or str.
66 | return '0.1'
67 |
68 | @rank_zero_only
69 | def log_hyperparams(self, params):
70 | # params is an argparse.Namespace
71 | # your code to record hyperparameters goes here
72 | pass
73 |
74 | @rank_zero_only
75 | def log_metrics(self, metrics, step):
76 | # metrics is a dictionary of metric names and values
77 | # your code to record metrics goes here
78 | log_str = "".join([f"{k} : {v:.3f} \t" for k, v in metrics.items()])
79 | self.logger.info(log_str)
80 |
81 | @rank_zero_only
82 | def save(self):
83 | # Optional. Any code necessary to save logger data goes here
84 | # If you implement this, remember to call `super().save()`
85 | # at the start of the method (important for aggregation of metrics)
86 | # super().save()
87 | pass
88 |
89 | @rank_zero_only
90 | def finalize(self, status):
91 | # Optional. Any code that needs to be run after training
92 | # finishes goes here
93 | pass
94 |
--------------------------------------------------------------------------------
/uhc/utils/math_utils_new.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import math
3 | import numpy as np
4 | from uhc.utils.transformation import (
5 | quaternion_matrix,
6 | quaternion_about_axis,
7 | quaternion_inverse,
8 | quaternion_multiply,
9 | rotation_from_quaternion,
10 | rotation_from_matrix,
11 | )
12 |
13 |
14 | def ewma(x, alpha=0.05):
15 | avg = x[0]
16 | for i in x[1:]:
17 | avg = alpha * i + (1 - alpha) * avg
18 | return avg
19 |
20 |
21 | def normal_entropy(std):
22 | var = std.pow(2)
23 | entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
24 | return entropy.sum(1, keepdim=True)
25 |
26 |
27 | def normal_log_density(x, mean, log_std, std):
28 | var = std.pow(2)
29 | log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * math.log(2 * math.pi) - log_std
30 | return log_density.sum(1, keepdim=True)
31 |
32 |
33 | def get_qvel_fd(cur_qpos, next_qpos, dt, transform=None):
34 | v = (next_qpos[:3] - cur_qpos[:3]) / dt
35 | qrel = quaternion_multiply(next_qpos[3:7], quaternion_inverse(cur_qpos[3:7]))
36 | # qrel /= np.linalg.norm(qrel)
37 | axis, angle = rotation_from_quaternion(qrel, True)
38 |
39 | if angle > np.pi: # -180 < angle < 180
40 | angle -= 2 * np.pi #
41 | elif angle < -np.pi:
42 | angle += 2 * np.pi
43 |
44 | rv = (axis * angle) / dt
45 | rv = transform_vec(rv, cur_qpos[3:7], "root")
46 | qvel = (next_qpos[7:] - cur_qpos[7:]) / dt
47 | qvel = np.concatenate((v, rv, qvel))
48 | if transform is not None:
49 | v = transform_vec(v, cur_qpos[3:7], transform)
50 | qvel[:3] = v
51 | return qvel
52 |
53 |
54 | def get_angvel_fd(prev_bquat, cur_bquat, dt):
55 | q_diff = multi_quat_diff(cur_bquat, prev_bquat)
56 | n_joint = q_diff.shape[0] // 4
57 | body_angvel = np.zeros(n_joint * 3)
58 | for i in range(n_joint):
59 | body_angvel[3 * i : 3 * i + 3] = (
60 | rotation_from_quaternion(q_diff[4 * i : 4 * i + 4]) / dt
61 | )
62 | return body_angvel
63 |
64 |
65 | def transform_vec(v, q, trans="root"):
66 | if trans == "root":
67 | rot = quaternion_matrix(q)[:3, :3]
68 | elif trans == "heading":
69 | hq = q.copy()
70 | hq[1] = 0.0
71 | hq[2] = 0.0
72 | hq /= np.linalg.norm(hq)
73 | rot = quaternion_matrix(hq)[:3, :3]
74 | else:
75 | assert False
76 | v = rot.T.dot(v[:, None]).ravel()
77 | return v
78 |
79 |
80 | def get_heading_q(q):
81 | hq = q.copy()
82 | hq[1] = 0.0
83 | hq[2] = 0.0
84 | hq /= np.linalg.norm(hq)
85 | return hq
86 |
87 |
88 | def get_heading(q):
89 | hq = q.copy()
90 | hq[1] = 0
91 | hq[2] = 0
92 | if hq[3] < 0:
93 | hq *= -1
94 | hq /= np.linalg.norm(hq)
95 | return 2 * math.acos(hq[0])
96 |
97 |
98 | def de_heading(q):
99 | return quaternion_multiply(quaternion_inverse(get_heading_q(q)), q)
100 |
101 |
102 | def multi_quat_diff(nq1, nq0):
103 | """return the relative quaternions q1-q0 of N joints"""
104 |
105 | nq_diff = np.zeros_like(nq0)
106 | for i in range(nq1.shape[0] // 4):
107 | ind = slice(4 * i, 4 * i + 4)
108 | q1 = nq1[ind]
109 | q0 = nq0[ind]
110 | nq_diff[ind] = quaternion_multiply(q1, quaternion_inverse(q0))
111 | return nq_diff
112 |
113 |
114 | def multi_quat_norm(nq):
115 | """return the scalar rotation of a N joints"""
116 |
117 | nq_norm = np.arccos(np.clip(nq[::4], -1.0, 1.0))
118 | return nq_norm
119 |
120 |
121 | def multi_quat_norm_v2(nq):
122 |
123 | _diff = []
124 | for i in range(nq.shape[0] // 4):
125 | q = nq[4 * i : 4 * (i + 1)]
126 | d = np.array([abs(q[0]) - 1.0, q[1], q[2], q[3]])
127 | _diff.append(np.linalg.norm(d))
128 | return np.array(_diff)
129 |
130 |
131 | def quat_mul_vec(q, v):
132 | return quaternion_matrix(q)[:3, :3].dot(v[:, None]).ravel()
133 |
134 |
135 | def quat_to_bullet(q):
136 | return np.array([q[1], q[2], q[3], q[0]])
137 |
138 |
139 | def quat_from_bullet(q):
140 | return np.array([q[3], q[0], q[1], q[2]])
141 |
142 |
143 | def quat_from_expmap(e):
144 | angle = np.linalg.norm(e)
145 | if angle < 1e-8:
146 | axis = np.array([1.0, 0.0, 0.0], dtype=np.float64)
147 | angle = 0.0
148 | else:
149 | axis = e / angle
150 | return quaternion_about_axis(angle, axis)
151 |
152 |
153 | def quat_correct(quat):
154 | """ Converts quaternion to minimize Euclidean distance from previous quaternion (wxyz order) """
155 | for q in range(1, quat.shape[0]):
156 | if np.linalg.norm(quat[q - 1] - quat[q], axis=0) > np.linalg.norm(
157 | quat[q - 1] + quat[q], axis=0
158 | ):
159 | quat[q] = -quat[q]
160 | return quat
161 |
--------------------------------------------------------------------------------
/uhc/utils/replay_data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | sys.path.append(os.getcwd())
4 |
5 | from mujoco_py import load_model_from_path, MjSim
6 | from uhc.khrylib.rl.envs.common.mjviewer import MjViewer
7 | import pickle
8 | import argparse
9 | import glfw
10 | import math
11 |
12 | parser = argparse.ArgumentParser()
13 | parser.add_argument('--model_id', type=str, default='human36m_v1')
14 | parser.add_argument('--offset_z', type=float, default=0.0)
15 | parser.add_argument('--start_take', type=str, default=None)
16 | parser.add_argument('--dataset', type=str, default='h36m/data_qpos_h36m')
17 | args = parser.parse_args()
18 |
19 | model_file = f'assets/mujoco_models/{args.model_id}.xml'
20 | model = load_model_from_path(model_file)
21 | sim = MjSim(model)
22 | viewer = MjViewer(sim)
23 |
24 |
25 |
26 | def key_callback(key, action, mods):
27 | global T, fr, paused, stop, offset_z, take_ind, reverse
28 |
29 | if action != glfw.RELEASE:
30 | return False
31 | elif key == glfw.KEY_D:
32 | T *= 1.5
33 | elif key == glfw.KEY_F:
34 | T = max(1, T / 1.5)
35 | elif key == glfw.KEY_R:
36 | stop = True
37 | elif key == glfw.KEY_W:
38 | fr = 0
39 | update_mocap()
40 | elif key == glfw.KEY_S:
41 | reverse = not reverse
42 | elif key == glfw.KEY_C:
43 | take_ind = (take_ind + 1) % len(takes)
44 | load_take()
45 | update_mocap()
46 | elif key == glfw.KEY_Z:
47 | take_ind = (take_ind - 1) % len(takes)
48 | load_take()
49 | update_mocap()
50 | elif key == glfw.KEY_RIGHT:
51 | if fr < qpos_traj.shape[0] - 1:
52 | fr += 1
53 | update_mocap()
54 | elif key == glfw.KEY_LEFT:
55 | if fr > 0:
56 | fr -= 1
57 | update_mocap()
58 | elif key == glfw.KEY_UP:
59 | offset_z += 0.001
60 | update_mocap()
61 | elif key == glfw.KEY_DOWN:
62 | offset_z -= 0.001
63 | update_mocap()
64 | elif key == glfw.KEY_SPACE:
65 | paused = not paused
66 | else:
67 | return False
68 | return True
69 |
70 |
71 | def update_mocap():
72 | print(f'{take[0]} {take[1]}: [{fr}, {qpos_traj.shape[0]}] dz: {offset_z:.3f}')
73 | print(qpos_traj.shape)
74 | sim.data.qpos[:] = qpos_traj[fr]
75 | sim.data.qpos[2] += offset_z
76 | sim.forward()
77 |
78 |
79 | def load_take():
80 | global qpos_traj, fr, take
81 | take = takes[take_ind]
82 | fr = 0
83 | qpos_traj = data[take[0]][take[1]]
84 |
85 |
86 | data = pickle.load(open(os.path.expanduser('data/{}.p').format(args.dataset), 'rb'))
87 | takes = [(subject, action) for subject, s_data in data.items() for action in s_data.keys()]
88 |
89 | qpos_traj = None
90 | take = None
91 | take_ind = 0 if args.start_take is None else takes.index(tuple(args.start_take.split(',')))
92 | fr = 0
93 | offset_z = args.offset_z
94 | # load_take()
95 |
96 | T = 10
97 | paused = False
98 | stop = False
99 | reverse = False
100 | glfw.set_window_size(viewer.window, 1000, 960)
101 | glfw.set_window_pos(viewer.window, 400, 0)
102 | viewer._hide_overlay = True
103 | viewer.cam.distance = 10
104 | viewer.cam.elevation = -20
105 | viewer.cam.azimuth = 90
106 | viewer.custom_key_callback = key_callback
107 |
108 | load_take()
109 | update_mocap()
110 | t = 0
111 | while not stop:
112 | if t >= math.floor(T):
113 | if not reverse and fr < qpos_traj.shape[0] - 1:
114 | fr += 1
115 | update_mocap()
116 | elif reverse and fr > 0:
117 | fr -= 1
118 | update_mocap()
119 | t = 0
120 |
121 | viewer.render()
122 | if not paused:
123 | t += 1
124 |
125 |
126 |
127 |
--------------------------------------------------------------------------------
/uhc/utils/vis_model_utils.py:
--------------------------------------------------------------------------------
1 | from lxml import etree
2 | from lxml.etree import XMLParser, parse, ElementTree, Element, SubElement
3 | from copy import deepcopy
4 |
5 |
6 | def create_vis_model(in_file, out_file, num=10):
7 | xml_parser = XMLParser(remove_blank_text=True)
8 | tree = parse(in_file, parser=xml_parser)
9 | # remove_elements = ['actuator', 'contact', 'equality', 'sensor']
10 | remove_elements = ['actuator', 'contact', 'equality']
11 | for elem in remove_elements:
12 | node = tree.getroot().find(elem)
13 | if node is None:
14 | print(f"has no elem: {elem}")
15 | else:
16 | node.getparent().remove(node)
17 |
18 | option = tree.getroot().find('option')
19 | flag = SubElement(option, 'flag', {'contact': 'disable'})
20 | option.addnext(Element('size', {'njmax': '1000'}))
21 |
22 | worldbody = tree.getroot().find('worldbody')
23 | body = worldbody.find('body')
24 | for i in range(1, num):
25 | new_body = deepcopy(body)
26 | new_body.attrib['name'] = '%d_%s' % (i, new_body.attrib['name'])
27 | for node in new_body.findall(".//body"):
28 | node.attrib['name'] = '%d_%s' % (i, node.attrib['name'])
29 | for node in new_body.findall(".//joint"):
30 | node.attrib['name'] = '%d_%s' % (i, node.attrib['name'])
31 | for node in new_body.findall(".//site"):
32 | node.attrib['name'] = '%d_%s' % (i, node.attrib['name'])
33 | worldbody.append(new_body)
34 | tree.write(out_file, pretty_print=True)
35 |
36 |
37 | if __name__=='__main__':
38 | import argparse
39 | parser = argparse.ArgumentParser()
40 | parser.add_argument('--cfg', type=str, default=None)
41 | parser.add_argument('--in_model', type=str, default='assets/mujoco_models/models/character1/model.xml')
42 | parser.add_argument('--out_model', type=str, default='assets/mujoco_models/models/character1/model_vis.xml')
43 | args = parser.parse_args()
44 |
45 | in_model = f'assets/mujoco_models/models/{args.cfg}/model.xml' if args.cfg is not None else args.in_model
46 | out_model = f'assets/mujoco_models/models/{args.cfg}/model_vis.xml' if args.cfg is not None else args.out_model
47 |
48 | create_vis_model(in_model, out_model)
--------------------------------------------------------------------------------