├── .gitignore
├── INSTALL.md
├── LICENSE-CC-BY-NC-4.0.md
├── README.md
├── demo.jpg
├── description
├── README.md
└── robots
│ ├── cfg
│ ├── config.yaml
│ └── robot
│ │ └── unitree_g1_29dof_anneal_23dof.yaml
│ ├── dtype.py
│ └── g1
│ ├── 29dof_official.urdf
│ ├── dof_axis.npy
│ ├── g1_23dof_lock_wrist.urdf
│ ├── g1_23dof_lock_wrist.xml
│ ├── g1_23dof_lock_wrist_fitmotionONLY.xml
│ ├── g1_29dof_rev_1_0.xml
│ ├── g1_29dof_rev_1_0_with_toe.xml
│ ├── meshes
│ ├── head_link.STL
│ ├── left_ankle_pitch_link.STL
│ ├── left_ankle_roll_link.STL
│ ├── left_elbow_link.STL
│ ├── left_elbow_link_merge.STL
│ ├── left_hand_index_0_link.STL
│ ├── left_hand_index_1_link.STL
│ ├── left_hand_middle_0_link.STL
│ ├── left_hand_middle_1_link.STL
│ ├── left_hand_palm_link.STL
│ ├── left_hand_thumb_0_link.STL
│ ├── left_hand_thumb_1_link.STL
│ ├── left_hand_thumb_2_link.STL
│ ├── left_hip_pitch_link.STL
│ ├── left_hip_roll_link.STL
│ ├── left_hip_yaw_link.STL
│ ├── left_knee_link.STL
│ ├── left_rubber_hand.STL
│ ├── left_shoulder_pitch_link.STL
│ ├── left_shoulder_roll_link.STL
│ ├── left_shoulder_yaw_link.STL
│ ├── left_wrist_pitch_link.STL
│ ├── left_wrist_roll_link.STL
│ ├── left_wrist_roll_rubber_hand.STL
│ ├── left_wrist_yaw_link.STL
│ ├── logo_link.STL
│ ├── pelvis.STL
│ ├── pelvis_contour_link.STL
│ ├── right_ankle_pitch_link.STL
│ ├── right_ankle_roll_link.STL
│ ├── right_elbow_link.STL
│ ├── right_elbow_link_merge.STL
│ ├── right_hand_index_0_link.STL
│ ├── right_hand_index_1_link.STL
│ ├── right_hand_middle_0_link.STL
│ ├── right_hand_middle_1_link.STL
│ ├── right_hand_palm_link.STL
│ ├── right_hand_thumb_0_link.STL
│ ├── right_hand_thumb_1_link.STL
│ ├── right_hand_thumb_2_link.STL
│ ├── right_hip_pitch_link.STL
│ ├── right_hip_roll_link.STL
│ ├── right_hip_yaw_link.STL
│ ├── right_knee_link.STL
│ ├── right_rubber_hand.STL
│ ├── right_shoulder_pitch_link.STL
│ ├── right_shoulder_roll_link.STL
│ ├── right_shoulder_yaw_link.STL
│ ├── right_wrist_pitch_link.STL
│ ├── right_wrist_roll_link.STL
│ ├── right_wrist_roll_rubber_hand.STL
│ ├── right_wrist_yaw_link.STL
│ ├── torso_constraint_L_link.STL
│ ├── torso_constraint_L_rod_link.STL
│ ├── torso_constraint_R_link.STL
│ ├── torso_constraint_R_rod_link.STL
│ ├── torso_link.STL
│ ├── torso_link_23dof_rev_1_0.STL
│ ├── torso_link_rev_1_0.STL
│ ├── waist_constraint_L.STL
│ ├── waist_constraint_R.STL
│ ├── waist_roll_link.STL
│ ├── waist_roll_link_rev_1_0.STL
│ ├── waist_support_link.STL
│ ├── waist_yaw_link.STL
│ └── waist_yaw_link_rev_1_0.STL
│ ├── phc_g1_23dof.yaml
│ └── smpl_humanoid.xml
├── example
├── motion_data
│ ├── Bruce_Lee_pose.pkl
│ ├── Charleston_dance.pkl
│ ├── Hooks_punch.pkl
│ ├── Horse-stance_pose.pkl
│ ├── Horse-stance_punch.pkl
│ ├── Roundhouse_kick.pkl
│ └── Side_kick.pkl
├── pretrained_horse_stance_pose
│ ├── config.yaml
│ ├── exported
│ │ └── model_50000.onnx
│ └── model_50000.pt
├── pretrained_horse_stance_pose_2
│ ├── config.yaml
│ ├── exported
│ │ └── model_119000.onnx
│ └── model_119000.pt
└── pretrained_horse_stance_punch
│ ├── config.yaml
│ ├── exported
│ └── model_33000.onnx
│ └── model_33000.pt
├── humanoidverse
├── README.md
├── __init__.py
├── agents
│ ├── __init__.py
│ ├── base_algo
│ │ └── base_algo.py
│ ├── callbacks
│ │ ├── __init__.py
│ │ ├── analysis_plot_force.py
│ │ ├── analysis_plot_force_estimator.py
│ │ ├── analysis_plot_locomotion.py
│ │ ├── analysis_plot_locomotion_estimate_vel.py
│ │ ├── analysis_plot_motion_tracking.py
│ │ ├── analysis_plot_motion_tracking_openloop.py
│ │ ├── analysis_plot_template.html
│ │ └── base_callback.py
│ ├── mh_ppo
│ │ └── mh_ppo.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── data_utils.py
│ │ ├── encoder_modules.py
│ │ ├── modules.py
│ │ └── ppo_modules.py
│ └── ppo
│ │ └── ppo.py
├── config
│ ├── algo
│ │ └── mh_ppo.yaml
│ ├── base.yaml
│ ├── base
│ │ ├── fabric.yaml
│ │ ├── hydra.yaml
│ │ └── structure.yaml
│ ├── base_eval.yaml
│ ├── deploy
│ │ ├── external.yaml
│ │ ├── multiple.yaml
│ │ └── single.yaml
│ ├── domain_rand
│ │ ├── NO_domain_rand.yaml
│ │ ├── domain_rand_base.yaml
│ │ ├── dr_nil.yaml
│ │ └── main.yaml
│ ├── env
│ │ ├── base_task.yaml
│ │ ├── legged_base.yaml
│ │ └── motion_tracking.yaml
│ ├── exp
│ │ ├── base_exp.yaml
│ │ ├── legged_base.yaml
│ │ └── motion_tracking.yaml
│ ├── obs
│ │ ├── legged_obs.yaml
│ │ └── motion_tracking
│ │ │ ├── benchmark.yaml
│ │ │ └── main.yaml
│ ├── opt
│ │ ├── record.yaml
│ │ └── wandb.yaml
│ ├── rewards
│ │ └── motion_tracking
│ │ │ └── main.yaml
│ ├── robot
│ │ ├── g1
│ │ │ ├── g1_23dof_lock_wrist.yaml
│ │ │ └── g1_only_leg.yaml
│ │ └── robot_base.yaml
│ ├── simulator
│ │ ├── genesis.yaml
│ │ ├── isaacgym.yaml
│ │ ├── isaacsim.yaml
│ │ └── mujoco.yaml
│ └── terrain
│ │ ├── terrain_base.yaml
│ │ ├── terrain_locomotion.yaml
│ │ └── terrain_locomotion_plane.yaml
├── deploy
│ ├── __init__.py
│ ├── external
│ │ ├── __init__.py
│ │ └── core.py
│ ├── mujoco.py
│ └── urcirobot.py
├── envs
│ ├── __init__.py
│ ├── base_task
│ │ ├── __init__.py
│ │ └── base_task.py
│ ├── env_utils
│ │ ├── __init__.py
│ │ ├── history_handler.py
│ │ ├── terrain.py
│ │ └── visualization.py
│ ├── legged_base_task
│ │ ├── __init__.py
│ │ └── legged_robot_base.py
│ └── motion_tracking
│ │ ├── __init__.py
│ │ └── motion_tracking.py
├── eval_agent.py
├── isaac_utils
│ ├── isaac_utils
│ │ ├── __init__.py
│ │ ├── maths.py
│ │ └── rotations.py
│ └── setup.py
├── measure_traj.py
├── ratio_eps.py
├── sample_eps.py
├── simulator
│ ├── base_simulator
│ │ └── base_simulator.py
│ ├── genesis
│ │ ├── genesis.py
│ │ ├── genesis_mjdebug.py
│ │ ├── genesis_viewer.py
│ │ └── tmp_gs_utils.py
│ ├── isaacgym
│ │ ├── isaacgym.py
│ │ └── isaacgym_hoi.py
│ └── isaacsim
│ │ ├── event_cfg.py
│ │ ├── events.py
│ │ ├── isaaclab_cfg.py
│ │ ├── isaaclab_viewpoint_camera_controller.py
│ │ ├── isaacsim.py
│ │ └── isaacsim_articulation_cfg.py
├── train_agent.py
├── urci.py
└── utils
│ ├── __init__.py
│ ├── average_meters.py
│ ├── common.py
│ ├── config_utils.py
│ ├── devtool.py
│ ├── helpers.py
│ ├── inference_helpers.py
│ ├── logging.py
│ ├── math.py
│ ├── motion_lib
│ ├── __init__.py
│ ├── dof_axis.npy
│ ├── motion_lib_base.py
│ ├── motion_lib_robot.py
│ ├── motion_lib_robot_WJX.py
│ ├── motion_utils
│ │ ├── __init__.py
│ │ ├── flags.py
│ │ └── rotation_conversions.py
│ ├── skeleton.py
│ └── torch_humanoid_batch.py
│ ├── noise_tool.py
│ ├── real
│ ├── __init__.py
│ └── rotation_helper.py
│ └── torch_utils.py
├── motion_source
├── README.md
├── convert_lafan_pkl.py
├── count_pkl_contact_mask.py
└── utils
│ ├── __init__.py
│ ├── rotation_conversions.py
│ └── torch_humanoid_batch.py
├── overview.jpg
├── robot_motion_process
├── README.md
├── motion_interpolation_pkl.py
├── motion_readpkl.py
├── traj_vis.ipynb
├── vis_q_mj.py
└── vis_rr.py
├── setup.py
├── smpl_retarget
├── README.md
├── mink_retarget
│ ├── __init__.py
│ ├── convert_fit_motion.py
│ ├── retargeting
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── mink_retarget.py
│ │ ├── torch_humanoid_batch.py
│ │ └── torch_smplx_humanoid_batch.py
│ └── shape_optimized_neutral.pkl
├── motion_filter
│ ├── constants.py
│ ├── model
│ │ ├── __init__.py
│ │ └── smpl.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── mesh_utils.py
│ │ ├── motion_filter.py
│ │ ├── part_volumes.py
│ │ └── smpl_paraser.py
├── phc_retarget
│ ├── fit_smpl_motion.py
│ └── fit_smpl_shape.py
├── poselib
│ ├── .gitignore
│ ├── README.md
│ ├── poselib
│ │ ├── __init__.py
│ │ ├── core
│ │ │ ├── __init__.py
│ │ │ ├── backend
│ │ │ │ ├── __init__.py
│ │ │ │ ├── abstract.py
│ │ │ │ └── logger.py
│ │ │ ├── rotation3d.py
│ │ │ ├── tensor_utils.py
│ │ │ └── tests
│ │ │ │ ├── __init__.py
│ │ │ │ └── test_rotation.py
│ │ ├── skeleton
│ │ │ ├── __init__.py
│ │ │ ├── backend
│ │ │ │ ├── __init__.py
│ │ │ │ └── fbx
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── fbx_py27_backend.py
│ │ │ │ │ └── fbx_read_wrapper.py
│ │ │ ├── skeleton3d.py
│ │ │ └── tests
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ant.xml
│ │ │ │ ├── test_skeleton.py
│ │ │ │ └── transfer_npy.py
│ │ └── visualization
│ │ │ ├── __init__.py
│ │ │ ├── common.py
│ │ │ ├── core.py
│ │ │ ├── plt_plotter.py
│ │ │ ├── simple_plotter_tasks.py
│ │ │ ├── skeleton_plotter_tasks.py
│ │ │ └── tests
│ │ │ ├── __init__.py
│ │ │ └── test_plotter.py
│ └── setup.py
└── retargeted_motion_data
│ └── phc
│ └── shape_optimized_v1.pkl
└── smpl_vis
├── README.md
├── import_motion_blender.py
├── smpl_render.py
└── utils
├── __init__.py
├── body_model_smpl.py
├── geo_transform.py
├── renderer.py
├── renderer_tools.py
└── smpl_utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __*/
2 |
3 |
4 |
5 | isaacgym/
6 | isaac-gym-preview-4
7 |
8 | *.pdf
9 | *.csv
10 | *.pkl
11 | *.npz
12 | *.tar.gz
13 |
14 |
15 |
16 | # Compiled Python bytecode
17 | *.py[cod]
18 | # Log files
19 | *.log
20 | # * ONNX files
21 | # *.onnx
22 | output*
23 | *.pdf
24 |
25 | playground/robot_description
26 |
27 |
28 | *.csv
29 | extracted_motions*
30 | *.lprof
31 |
32 | # JetBrains IDE
33 | .idea/
34 | .vscode/
35 |
36 | logs*/
37 | # Generated by MacOS
38 | .DS_Store
39 |
40 | lafan
41 | SharedMotions/ASAP-motion
42 | SharedMotions
43 | PaperMotions
44 |
45 | humanoidverse/data
46 | # Generated by Windows
47 | Thumbs.db
48 |
49 | logs_real_motion*
50 | *.zip
51 |
52 | # Applications
53 | *.app
54 | *.exe
55 | *.war
56 |
57 | # Large media files
58 | *.mp4
59 | *.tiff
60 | *.avi
61 | *.flv
62 | *.mov
63 | *.wmv
64 |
65 | # logs
66 | logs
67 | logs_eval
68 | runs
69 | outputs
70 | results/
71 | hydra_logs/
72 | wandb/
73 | # other
74 | *.egg-info
75 | __pycache__
76 | # unitree_sdk2_python
77 | roboverse/data/motions
78 | *.pkl
79 | *.pt
80 |
81 | *.TXT
82 |
83 | sim2real/models/*.onnx
84 | sim2real/models/dec_loco/*.onnx
85 |
86 | roboverse/data/smpl
87 | data/shape
88 | data/motions
89 | roboverse/data/Recorded/
90 | roboverse/data/AMASS/
91 | AMASS/*
92 |
93 | motion_source/lafan_pkl/
94 | motion_source/lafan_pkl_cont_mask/
95 | smpl_retarget/smpl_model/
96 | smpl_retarget/motion_data/
97 | smpl_retarget/retargeted_motion_data
98 |
99 | *.txt
100 |
101 |
--------------------------------------------------------------------------------
/INSTALL.md:
--------------------------------------------------------------------------------
1 |
2 | ### Hardware Requirements
3 |
4 | We test the code in the following environment:
5 | - **OS**: Ubuntu 20.04
6 | - **GPU**: NVIDIA RTX 4090, Driver Version: 560.35.03
7 | - **CPU**: 13th Gen Intel(R) Core(TM) i7-13700
8 |
9 |
10 |
11 | ### Environment Setup
12 | ```bash
13 | # Assuming pwd: PBHC/
14 | conda create -n pbhc python=3.8
15 | conda activate pbhc
16 |
17 | # Install and Test IsaacGym
18 | wget https://developer.nvidia.com/isaac-gym-preview-4
19 | tar -xvzf isaac-gym-preview-4
20 | pip install -e isaacgym/python
21 | cd isaacgym/python/examples
22 | python 1080_balls_of_solitude.py # or `python joint_monkey.py`
23 | cd ../../..
24 |
25 |
26 | # Install PBHC
27 | pip install -e .
28 | pip install -e humanoidverse/isaac_utils
29 |
30 | ```
31 |
32 | ```bash
33 | # (Optional) Install additional dependencies for motion visualization with rerun (robot_motion_process/vis_rr.py)
34 | pip install rerun-sdk==0.22.0 trimesh
35 | ```
36 |
37 |
--------------------------------------------------------------------------------
/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/demo.jpg
--------------------------------------------------------------------------------
/description/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/README.md
--------------------------------------------------------------------------------
/description/robots/cfg/config.yaml:
--------------------------------------------------------------------------------
1 | defaults:
2 | - _self_
3 | # - env: env_im
4 | - robot: smpl_humanoid
5 | # - learning: im
6 | # - sim: default_sim
7 | # - control: default_control
8 | # - domain_rand: default_dr
9 |
10 | project_name: "PHC"
11 | notes: "Default Notes"
12 | exp_name: &exp_name humanoid_smpl
13 | headless: True
14 | seed: 0
15 | no_log: False
16 | resume_str: null
17 | num_threads: 64
18 | test: False
19 | dataset: False
20 | mlp: False
21 | output_path: output/HumanoidIm/${exp_name}
22 | torch_deterministic: False
23 | epoch: 0
24 | im_eval: False
25 | horovod: False # Use horovod for multi-gpu training, have effect only with rl_games RL library
26 | rl_device: "cuda:0"
27 | device: "cuda"
28 | device_id: 0
29 | metadata: false
30 | play: ${test}
31 | train: True
32 | collect_dataset: False
33 | disable_multiprocessing: True
34 |
35 |
36 | ####### Testing Configs. ########
37 | server_mode: False
38 | has_eval: True
39 | no_virtual_display: False
40 | render_o3d: False
41 | debug: False
42 | follow: False
43 | add_proj: False
44 | real_traj: False
45 |
46 | hydra:
47 | job:
48 | name: ${exp_name}
49 | env_set:
50 | OMP_NUM_THREADS: 1
51 | run:
52 | dir: output/HumanoidIm/${exp_name}
53 |
54 |
--------------------------------------------------------------------------------
/description/robots/cfg/robot/unitree_g1_29dof_anneal_23dof.yaml:
--------------------------------------------------------------------------------
1 | humanoid_type: g1_29dof_anneal_23dof
2 | bias_offset: False
3 | has_self_collision: True
4 | has_mesh: False
5 | has_jt_limit: False
6 | has_dof_subset: True
7 | has_upright_start: True
8 | has_smpl_pd_offset: False
9 | remove_toe: False # For humanoid's geom toe
10 | motion_sym_loss: False
11 | sym_loss_coef: 1
12 | big_ankle: True
13 |
14 | has_shape_obs: false
15 | has_shape_obs_disc: false
16 | has_shape_variation: False
17 |
18 | masterfoot: False
19 | freeze_toe: false
20 | freeze_hand: False
21 | box_body: True
22 | real_weight: True
23 | real_weight_porpotion_capsules: True
24 | real_weight_porpotion_boxes: True
25 |
26 | body_names: [ 'pelvis',
27 | 'left_hip_pitch_link', 'left_hip_roll_link', 'left_hip_yaw_link', 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link',
28 | 'right_hip_pitch_link', 'right_hip_roll_link', 'right_hip_yaw_link', 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link',
29 | 'waist_yaw_link', 'waist_roll_link', 'torso_link',
30 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
31 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link' ]
32 |
33 | limb_weight_group:
34 | - [ 'left_hip_pitch_link', 'left_hip_roll_link', 'left_hip_yaw_link', 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link' ]
35 | - [ 'right_hip_pitch_link', 'right_hip_roll_link', 'right_hip_yaw_link', 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link' ]
36 | - [ 'pelvis', 'waist_yaw_link', 'waist_roll_link', 'torso_link' ]
37 | - [ 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link' ]
38 | - [ 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link' ]
39 |
40 | dof_names: [ 'left_hip_pitch_link', 'left_hip_roll_link', 'left_hip_yaw_link',
41 | 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link',
42 | 'right_hip_pitch_link', 'right_hip_roll_link', 'right_hip_yaw_link',
43 | 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link',
44 | 'waist_yaw_link', 'waist_roll_link', 'torso_link',
45 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
46 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link' ]
47 |
48 | right_foot_name: 'r_foot_roll'
49 | left_foot_name: 'l_foot_roll'
50 |
51 | asset:
52 | assetRoot: "./"
53 | assetFileName: "../description/robots/g1/g1_23dof_lock_wrist_fitmotionONLY.xml"
54 | urdfFileName: "../description/robots/g1/g1_23dof_lock_wrist_fitmotionONLY.xml"
55 |
56 | extend_config:
57 | - joint_name: "left_hand_link"
58 | parent_name: "left_elbow_link"
59 | pos: [ 0.25, 0.0, 0.0 ]
60 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
61 | - joint_name: "right_hand_link"
62 | parent_name: "right_elbow_link"
63 | pos: [ 0.25, 0.0, 0.0 ]
64 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
65 | - joint_name: "head_link"
66 | parent_name: "torso_link"
67 | pos: [ 0.0, 0.0, 0.42 ]
68 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
69 | - joint_name: "left_toe_link"
70 | parent_name: "left_ankle_roll_link"
71 | pos: [ 0.08, 0., -0.03 ] # 0.08 0 -0.06 # amass 0.04, -0.02, -0.02
72 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
73 | - joint_name: "right_toe_link"
74 | parent_name: "right_ankle_roll_link"
75 | pos: [ 0.08, 0., -0.03 ] # amass 0.04, -0.02, -0.02
76 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
77 |
78 | base_link: "torso_link"
79 | joint_matches:
80 | - [ "pelvis", "Pelvis" ]
81 | - [ "left_hip_pitch_link", "L_Hip" ]
82 | - [ "left_knee_link", "L_Knee" ]
83 | - [ "left_ankle_roll_link", "L_Ankle" ]
84 | # - [ "left_ankle_pitch_link", "L_Ankle" ] # new
85 | - [ "right_hip_pitch_link", "R_Hip" ]
86 | - [ "right_knee_link", "R_Knee" ]
87 | - [ "right_ankle_roll_link", "R_Ankle" ]
88 | # - [ "right_ankle_pitch_link", "R_Ankle" ] # new
89 | - [ "left_shoulder_roll_link", "L_Shoulder" ]
90 | - [ "left_elbow_link", "L_Elbow" ]
91 | - [ "left_hand_link", "L_Hand" ]
92 | - [ "right_shoulder_roll_link", "R_Shoulder" ]
93 | - [ "right_elbow_link", "R_Elbow" ]
94 | - [ "right_hand_link", "R_Hand" ]
95 | - [ "head_link", "Head" ]
96 | - [ "left_toe_link", "L_Toe" ]
97 | - [ "right_toe_link", "R_Toe" ]
98 |
99 |
100 | smpl_pose_modifier:
101 | - Pelvis: "[np.pi/2, 0, np.pi/2]"
102 | - L_Shoulder: "[0, 0, -np.pi/2]"
103 | - R_Shoulder: "[0, 0, np.pi/2]"
104 | - L_Elbow: "[0, -np.pi/2, 0]"
105 | - R_Elbow: "[0, np.pi/2, 0]"
106 |
--------------------------------------------------------------------------------
/description/robots/g1/dof_axis.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/dof_axis.npy
--------------------------------------------------------------------------------
/description/robots/g1/meshes/head_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/head_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_ankle_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_ankle_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_ankle_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_ankle_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_elbow_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_elbow_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_elbow_link_merge.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_elbow_link_merge.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_index_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_index_0_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_index_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_index_1_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_middle_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_middle_0_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_middle_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_middle_1_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_palm_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_palm_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_thumb_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_thumb_0_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_thumb_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_thumb_1_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hand_thumb_2_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hand_thumb_2_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hip_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hip_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hip_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hip_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_hip_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_hip_yaw_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_knee_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_knee_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_rubber_hand.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_shoulder_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_shoulder_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_shoulder_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_shoulder_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_shoulder_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_shoulder_yaw_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_wrist_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_wrist_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_wrist_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_wrist_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_wrist_roll_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_wrist_roll_rubber_hand.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/left_wrist_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/left_wrist_yaw_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/logo_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/logo_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/pelvis.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/pelvis.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/pelvis_contour_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/pelvis_contour_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_ankle_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_ankle_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_ankle_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_ankle_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_elbow_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_elbow_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_elbow_link_merge.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_elbow_link_merge.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_index_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_index_0_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_index_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_index_1_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_middle_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_middle_0_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_middle_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_middle_1_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_palm_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_palm_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_thumb_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_thumb_0_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_thumb_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_thumb_1_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hand_thumb_2_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hand_thumb_2_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hip_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hip_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hip_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hip_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_hip_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_hip_yaw_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_knee_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_knee_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_rubber_hand.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_shoulder_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_shoulder_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_shoulder_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_shoulder_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_shoulder_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_shoulder_yaw_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_wrist_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_wrist_pitch_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_wrist_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_wrist_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_wrist_roll_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_wrist_roll_rubber_hand.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/right_wrist_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/right_wrist_yaw_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/torso_constraint_L_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/torso_constraint_L_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/torso_constraint_L_rod_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/torso_constraint_L_rod_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/torso_constraint_R_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/torso_constraint_R_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/torso_constraint_R_rod_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/torso_constraint_R_rod_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/torso_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/torso_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/torso_link_23dof_rev_1_0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/torso_link_23dof_rev_1_0.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/torso_link_rev_1_0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/torso_link_rev_1_0.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/waist_constraint_L.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/waist_constraint_L.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/waist_constraint_R.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/waist_constraint_R.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/waist_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/waist_roll_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/waist_roll_link_rev_1_0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/waist_roll_link_rev_1_0.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/waist_support_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/waist_support_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/waist_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/waist_yaw_link.STL
--------------------------------------------------------------------------------
/description/robots/g1/meshes/waist_yaw_link_rev_1_0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/description/robots/g1/meshes/waist_yaw_link_rev_1_0.STL
--------------------------------------------------------------------------------
/description/robots/g1/phc_g1_23dof.yaml:
--------------------------------------------------------------------------------
1 | humanoid_type: g1_29dof_anneal_23dof
2 | bias_offset: False
3 | has_self_collision: True
4 | has_mesh: False
5 | has_jt_limit: False
6 | has_dof_subset: True
7 | has_upright_start: True
8 | has_smpl_pd_offset: False
9 | remove_toe: False # For humanoid's geom toe
10 | motion_sym_loss: False
11 | sym_loss_coef: 1
12 | big_ankle: True
13 |
14 | has_shape_obs: false
15 | has_shape_obs_disc: false
16 | has_shape_variation: False
17 |
18 | masterfoot: False
19 | freeze_toe: false
20 | freeze_hand: False
21 | box_body: True
22 | real_weight: True
23 | real_weight_porpotion_capsules: True
24 | real_weight_porpotion_boxes: True
25 |
26 | body_names: [ 'pelvis',
27 | 'left_hip_pitch_link', 'left_hip_roll_link', 'left_hip_yaw_link', 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link',
28 | 'right_hip_pitch_link', 'right_hip_roll_link', 'right_hip_yaw_link', 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link',
29 | 'waist_yaw_link', 'waist_roll_link', 'torso_link',
30 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
31 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link' ]
32 |
33 | limb_weight_group:
34 | - [ 'left_hip_pitch_link', 'left_hip_roll_link', 'left_hip_yaw_link', 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link' ]
35 | - [ 'right_hip_pitch_link', 'right_hip_roll_link', 'right_hip_yaw_link', 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link' ]
36 | - [ 'pelvis', 'waist_yaw_link', 'waist_roll_link', 'torso_link' ]
37 | - [ 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link' ]
38 | - [ 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link' ]
39 |
40 | dof_names: [ 'left_hip_pitch_link', 'left_hip_roll_link', 'left_hip_yaw_link',
41 | 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link',
42 | 'right_hip_pitch_link', 'right_hip_roll_link', 'right_hip_yaw_link',
43 | 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link',
44 | 'waist_yaw_link', 'waist_roll_link', 'torso_link',
45 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
46 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link' ]
47 |
48 | right_foot_name: 'r_foot_roll'
49 | left_foot_name: 'l_foot_roll'
50 |
51 | asset:
52 | assetRoot: "./"
53 | assetFileName: "description/robots/g1/g1_23dof_lock_wrist_fitmotionONLY.xml"
54 | urdfFileName: "description/robots/g1/g1_23dof_lock_wrist_fitmotionONLY.xml"
55 |
56 | extend_config:
57 | - joint_name: "left_hand_link"
58 | parent_name: "left_elbow_link"
59 | pos: [ 0.25, 0.0, 0.0 ]
60 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
61 | - joint_name: "right_hand_link"
62 | parent_name: "right_elbow_link"
63 | pos: [ 0.25, 0.0, 0.0 ]
64 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
65 | - joint_name: "head_link"
66 | parent_name: "torso_link"
67 | pos: [ 0.0, 0.0, 0.42 ]
68 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
69 | - joint_name: "left_toe_link"
70 | parent_name: "left_ankle_roll_link"
71 | pos: [ 0.08, 0, -0.06 ] # 0.08 0 -0.06 # amass 0.04, -0.02, -0.02
72 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
73 | - joint_name: "right_toe_link"
74 | parent_name: "right_ankle_roll_link"
75 | pos: [ 0.08, 0, -0.06 ] # amass 0.04, -0.02, -0.02
76 | rot: [ 1.0, 0.0, 0.0, 0.0 ] # w x y z
77 |
78 | base_link: "torso_link"
79 | joint_matches:
80 | - [ "pelvis", "Pelvis" ]
81 | - [ "left_hip_pitch_link", "L_Hip" ]
82 | - [ "left_knee_link", "L_Knee" ]
83 | - [ "left_ankle_roll_link", "L_Ankle" ]
84 | # - [ "left_ankle_pitch_link", "L_Ankle" ] # new
85 | - [ "right_hip_pitch_link", "R_Hip" ]
86 | - [ "right_knee_link", "R_Knee" ]
87 | - [ "right_ankle_roll_link", "R_Ankle" ]
88 | # - [ "right_ankle_pitch_link", "R_Ankle" ] # new
89 | - [ "left_shoulder_roll_link", "L_Shoulder" ]
90 | - [ "left_elbow_link", "L_Elbow" ]
91 | - [ "left_hand_link", "L_Hand" ]
92 | - [ "right_shoulder_roll_link", "R_Shoulder" ]
93 | - [ "right_elbow_link", "R_Elbow" ]
94 | - [ "right_hand_link", "R_Hand" ]
95 | - [ "head_link", "Head" ]
96 | - [ "left_toe_link", "L_Toe" ]
97 | - [ "right_toe_link", "R_Toe" ]
98 |
99 |
100 | smpl_pose_modifier:
101 | - Pelvis: "[np.pi/2, 0, np.pi/2]"
102 | - L_Shoulder: "[0, 0, -np.pi/2]"
103 | - R_Shoulder: "[0, 0, np.pi/2]"
104 | - L_Elbow: "[0, -np.pi/2, 0]"
105 | - R_Elbow: "[0, np.pi/2, 0]"
106 |
--------------------------------------------------------------------------------
/example/motion_data/Bruce_Lee_pose.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/motion_data/Bruce_Lee_pose.pkl
--------------------------------------------------------------------------------
/example/motion_data/Charleston_dance.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/motion_data/Charleston_dance.pkl
--------------------------------------------------------------------------------
/example/motion_data/Hooks_punch.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/motion_data/Hooks_punch.pkl
--------------------------------------------------------------------------------
/example/motion_data/Horse-stance_pose.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/motion_data/Horse-stance_pose.pkl
--------------------------------------------------------------------------------
/example/motion_data/Horse-stance_punch.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/motion_data/Horse-stance_punch.pkl
--------------------------------------------------------------------------------
/example/motion_data/Roundhouse_kick.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/motion_data/Roundhouse_kick.pkl
--------------------------------------------------------------------------------
/example/motion_data/Side_kick.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/motion_data/Side_kick.pkl
--------------------------------------------------------------------------------
/example/pretrained_horse_stance_pose/exported/model_50000.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/pretrained_horse_stance_pose/exported/model_50000.onnx
--------------------------------------------------------------------------------
/example/pretrained_horse_stance_pose/model_50000.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/pretrained_horse_stance_pose/model_50000.pt
--------------------------------------------------------------------------------
/example/pretrained_horse_stance_pose_2/exported/model_119000.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/pretrained_horse_stance_pose_2/exported/model_119000.onnx
--------------------------------------------------------------------------------
/example/pretrained_horse_stance_pose_2/model_119000.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/pretrained_horse_stance_pose_2/model_119000.pt
--------------------------------------------------------------------------------
/example/pretrained_horse_stance_punch/exported/model_33000.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/pretrained_horse_stance_punch/exported/model_33000.onnx
--------------------------------------------------------------------------------
/example/pretrained_horse_stance_punch/model_33000.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/example/pretrained_horse_stance_punch/model_33000.pt
--------------------------------------------------------------------------------
/humanoidverse/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/agents/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/base_algo/base_algo.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from torch import nn, Tensor
4 |
5 | import time
6 | import math
7 | from pathlib import Path
8 | from typing import Optional, List, Tuple, Dict
9 |
10 |
11 | from hydra.utils import instantiate
12 |
13 | from humanoidverse.envs.base_task.base_task import BaseTask
14 |
15 | class BaseAlgo:
16 | def __init__(self, env: BaseTask, config, device):
17 | self.env = env
18 | self.config = config
19 | self.device = device
20 |
21 | def setup(self):
22 | return NotImplementedError
23 |
24 | def learn(self):
25 | return NotImplementedError
26 |
27 | def load(self, path):
28 | return NotImplementedError
29 |
30 | @property
31 | def inference_model(self):
32 | return NotImplementedError
33 |
34 | def env_step(self, actions, extra_info=None):
35 | obs_dict, rewards, dones, extras = self.env.step(actions, extra_info)
36 | return obs_dict, rewards, dones, extras
37 |
38 | @torch.no_grad()
39 | def evaluate_policy(self):
40 | return NotImplementedError
41 |
42 | @torch.no_grad()
43 | def evaluate_policy_steps(self, Nsteps:int):
44 | return NotImplementedError
45 |
46 | def save(self, path=None, name="last.ckpt"):
47 | raise NotImplementedError
48 |
--------------------------------------------------------------------------------
/humanoidverse/agents/callbacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/agents/callbacks/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/callbacks/analysis_plot_template.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
38 |
39 |
40 |
41 |
Robot State Plots
42 |
43 |
Loading plots...
44 |
45 |
46 |
52 |
53 |
--------------------------------------------------------------------------------
/humanoidverse/agents/callbacks/base_callback.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.nn import Module
3 |
4 | class RL_EvalCallback(Module):
5 | def __init__(self, config, training_loop):
6 | super().__init__()
7 | self.config = config
8 | self.training_loop = training_loop
9 | self.device = self.training_loop.device
10 |
11 | def on_pre_evaluate_policy(self):
12 | pass
13 |
14 | def on_pre_eval_env_step(self, actor_state):
15 | return actor_state
16 |
17 | def on_post_eval_env_step(self, actor_state):
18 | return actor_state
19 |
20 | def on_post_evaluate_policy(self):
21 | pass
22 |
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/agents/modules/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/encoder_modules.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import torch
4 | import torch.nn as nn
5 | from .modules import BaseModule
6 |
7 |
8 | class Estimator(nn.Module):
9 | def __init__(self, obs_dim_dict, module_config_dict):
10 | super(Estimator, self).__init__()
11 | self.module = BaseModule(obs_dim_dict, module_config_dict)
12 |
13 | # def estimate(self, obs_history):
14 | # return self.module(obs_history)
15 |
16 | def forward(self, obs_history):
17 | return self.module(obs_history)
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/modules.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import inspect
4 |
5 | class BaseModule(nn.Module):
6 | def __init__(self, obs_dim_dict, module_config_dict):
7 | super(BaseModule, self).__init__()
8 | self.obs_dim_dict = obs_dim_dict
9 | self.module_config_dict = module_config_dict
10 |
11 | self._calculate_input_dim()
12 | self._calculate_output_dim()
13 | self._build_network_layer(self.module_config_dict.layer_config)
14 |
15 | def _calculate_input_dim(self):
16 | # calculate input dimension based on the input specifications
17 | input_dim = 0
18 | for each_input in self.module_config_dict['input_dim']:
19 | if each_input in self.obs_dim_dict:
20 | # atomic observation type
21 | input_dim += self.obs_dim_dict[each_input]
22 | elif isinstance(each_input, (int, float)):
23 | # direct numeric input
24 | input_dim += each_input
25 | else:
26 | current_function_name = inspect.currentframe().f_code.co_name
27 | raise ValueError(f"{current_function_name} - Unknown input type: {each_input}")
28 |
29 | self.input_dim = input_dim
30 |
31 | def _calculate_output_dim(self):
32 | output_dim = 0
33 | for each_output in self.module_config_dict['output_dim']:
34 | if isinstance(each_output, (int, float)):
35 | output_dim += each_output
36 | else:
37 | current_function_name = inspect.currentframe().f_code.co_name
38 | raise ValueError(f"{current_function_name} - Unknown output type: {each_output}")
39 | self.output_dim = output_dim
40 |
41 | def _build_network_layer(self, layer_config):
42 | if layer_config['type'] == 'MLP':
43 | self._build_mlp_layer(layer_config)
44 | else:
45 | raise NotImplementedError(f"Unsupported layer type: {layer_config['type']}")
46 |
47 | def _build_mlp_layer(self, layer_config):
48 | layers = []
49 | hidden_dims = layer_config['hidden_dims']
50 | output_dim = self.output_dim
51 | activation = getattr(nn, layer_config['activation'])()
52 |
53 | layers.append(nn.Linear(self.input_dim, hidden_dims[0]))
54 | layers.append(activation)
55 |
56 | for l in range(len(hidden_dims)):
57 | if l == len(hidden_dims) - 1:
58 | layers.append(nn.Linear(hidden_dims[l], output_dim))
59 | else:
60 | layers.append(nn.Linear(hidden_dims[l], hidden_dims[l + 1]))
61 | layers.append(activation)
62 |
63 | self.module = nn.Sequential(*layers)
64 |
65 | def forward(self, input):
66 | return self.module(input)
--------------------------------------------------------------------------------
/humanoidverse/config/algo/mh_ppo.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | algo:
4 | _target_: humanoidverse.agents.mh_ppo.mh_ppo.MHPPO
5 | _recursive_: False
6 | config:
7 | num_learning_epochs: 5
8 | num_mini_batches: 4
9 | clip_param: 0.2
10 | gamma: 0.99
11 | lam: 0.95
12 | value_loss_coef: 1.0
13 | entropy_coef: 0.01
14 | l2c2:
15 | enable: False
16 | lambda_value: 1.0
17 | lambda_policy: 0.1
18 |
19 | actor_learning_rate: 1.e-3 # 5e-4 # 1.e-3
20 | critic_learning_rate: 1.e-3 # 5e-4 # 1.e-3
21 | max_grad_norm: 1.0
22 | use_clipped_value_loss: True
23 | schedule: "adaptive"
24 | desired_kl: 0.01
25 |
26 | num_steps_per_env: 24
27 | save_interval: 1000
28 | logging_interval: 25
29 |
30 | load_optimizer: True
31 |
32 | init_noise_std: 0.8
33 |
34 | num_learning_iterations: 1000000
35 | init_at_random_ep_len: True
36 | eval_callbacks: null
37 |
38 | phase_embed:
39 | type: Original # Original, Sinusoidal, SinusoidalV2, Learnable. See 'humanoidverse/agents/modules/ppo_modules.py' for details
40 | dim: 16 # if type==Original, not used
41 | # Other Idea: additive, concat, multiplicative, etc.
42 |
43 | module_dict:
44 | actor:
45 | input_dim: [actor_obs]
46 | output_dim: [robot_action_dim]
47 | layer_config:
48 | type: MLP
49 | hidden_dims: [512, 256, 128]
50 | activation: ELU
51 | critic:
52 | type: MLP
53 | input_dim: [critic_obs]
54 | # output_dim: [1]
55 | output_dim: [num_rew_fn]
56 | layer_config:
57 | type: MLP
58 | hidden_dims: [768, 512, 128]
59 | activation: ELU
--------------------------------------------------------------------------------
/humanoidverse/config/base.yaml:
--------------------------------------------------------------------------------
1 | # First we define the global structures that will be used by all the configs.
2 | defaults:
3 | # - base/fabric
4 | - _self_
5 | - base/hydra
6 | - base/structure
7 |
8 | # These are global variables that all levels of the config can access.
9 | ## Experiment setup
10 | seed: 0
11 | codebase_version: 1.0 # this is recorded to enable auto-conversion of models between different versions of the codebase
12 | headless: True
13 | num_envs: 4096
14 |
15 | ### Checkpoint logic
16 | auto_load_latest: False
17 | checkpoint: null
18 |
19 | ### Naming and dir structure
20 | project_name: TEST
21 | experiment_name: TEST
22 |
23 | base_dir: logs
24 | timestamp: ${now:%Y%m%d_%H%M%S}
25 | experiment_dir: ${base_dir}/${project_name}/${timestamp}-${experiment_name}-${log_task_name}-${robot.asset.robot_type}
26 | save_dir: ${experiment_dir}/.hydra
27 |
28 | force_flat_terrain: False
29 |
30 | use_wandb: False
31 | log_task_name: TEST
32 |
33 | ### Simulation
34 | sim_type: isaacgym
35 | env_spacing: 20
36 | output_dir: ${experiment_dir}/output
37 |
38 | eval_overrides:
39 | headless: False
40 | num_envs: 1
41 | auto_load_latest: False
42 | use_wandb: False
43 |
--------------------------------------------------------------------------------
/humanoidverse/config/base/fabric.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | fabric:
4 | _target_: lightning.fabric.Fabric
5 | _convert_: all
6 | accelerator: gpu
7 | devices: ${ngpu}
8 | num_nodes: ${nodes}
9 | strategy:
10 | _target_: lightning.fabric.strategies.DDPStrategy
11 | precision: ${amp_precision}
12 | loggers:
13 | - _target_: lightning.fabric.loggers.TensorBoardLogger
14 | root_dir: ${save_dir}
15 |
16 | # These are global variables that all levels of the config can access.
17 | ## Compute setup
18 | ngpu: 1
19 | nodes: 1
20 | torch_deterministic: False
21 | amp_precision: 32-true
22 | force_flat_terrain: False
23 |
24 | eval_overrides:
25 | ngpu: 1
26 | nodes: 1
27 |
28 | fabric:
29 | loggers: null
30 |
--------------------------------------------------------------------------------
/humanoidverse/config/base/hydra.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | hydra: # So hydra will put your config info in the same dir as your checkpoints
4 | run:
5 | dir: ${save_dir}
6 | sweep:
7 | dir: ${save_dir}
8 | job:
9 | chdir: False
10 | # job_logging:
11 | # disable_existing_loggers: true
12 | # hydra_logging:
13 | # disable_existing_loggers: true
14 |
--------------------------------------------------------------------------------
/humanoidverse/config/base/structure.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | algo: ???
4 |
5 | env: ???
6 |
7 | robot: ???
8 |
9 | terrain: ??? # This is defined in the terrain configs
10 |
--------------------------------------------------------------------------------
/humanoidverse/config/base_eval.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | eval_timestamp: ${now:%Y%m%d_%H%M%S}
4 | eval_name: TEST
5 | eval_log_dir: logs_eval/${eval_name}/${eval_timestamp}
6 | hydra:
7 | run:
8 | dir: ${eval_log_dir}
9 |
--------------------------------------------------------------------------------
/humanoidverse/config/deploy/multiple.yaml:
--------------------------------------------------------------------------------
1 |
2 |
3 | deploy:
4 | deploy_mode: "multiple"
5 |
6 | robot:
7 | motion:
8 | motion_file: "example/motion_data"
9 | log_task_name: motion_tracking # just for compatibility, don't think about it
10 |
11 |
12 | obs:
13 | obs_dict:
14 | actor_obs:
15 | - base_ang_vel
16 | - projected_gravity
17 | - dof_pos
18 | - dof_vel
19 | - actions
20 | - ref_motion_phase
21 | - history_actor
22 | critic_obs:
23 | - base_lin_vel
24 | - base_ang_vel
25 | - projected_gravity
26 | - dof_pos
27 | - dof_vel
28 | - actions
29 | - ref_motion_phase
30 | - dif_local_rigid_body_pos
31 | - local_ref_rigid_body_pos
32 | - history_critic
33 | obs_auxiliary:
34 | history_actor:
35 | base_ang_vel: 4
36 | projected_gravity: 4
37 | dof_pos: 4
38 | dof_vel: 4
39 | actions: 4
40 | ref_motion_phase: 4
41 | relyaw: 4
42 | history_critic:
43 | base_lin_vel: 4
44 | base_ang_vel: 4
45 | projected_gravity: 4
46 | dof_pos: 4
47 | dof_vel: 4
48 | actions: 4
49 | ref_motion_phase: 4
50 | relyaw: 4
51 | short_history:
52 | base_ang_vel: 5
53 | projected_gravity: 5
54 | dof_pos: 5
55 | dof_vel: 5
56 | actions: 5
57 | command_lin_vel: 5
58 | command_ang_vel: 5
59 | obs_scales:
60 | base_lin_vel: 2.0
61 | base_ang_vel: 0.25
62 | projected_gravity: 1.0
63 | command_lin_vel: 1.0
64 | command_ang_vel: 1.0
65 | dof_pos: 1.0
66 | dof_vel: 0.05
67 | base_ang_vel_noise: 0.25
68 | projected_gravity_noise: 1.0
69 | dof_pos_noise: 1.0
70 | dof_vel_noise: 0.05
71 | history: 1.0
72 | short_history: 1.0
73 | actions: 1.0
74 | dif_joint_angles: 1.0
75 | dif_joint_velocities: 1.0
76 | # dif_joint_velocities: 0.05
77 | local_ref_rigid_body_vel: 1.0
78 | global_ref_rigid_body_vel: 1.0
79 | dif_local_rigid_body_pos: 1.0
80 | local_ref_rigid_body_pos: 1.0
81 | local_ref_rigid_body_pos_relyaw: 1.0
82 | relyaw: 1.0
83 | ref_motion_phase: 1.0
84 | history_actor: 1.0
85 | history_critic: 1.0
86 | dr_base_com: 1.0
87 | dr_link_mass: 1.0
88 | dr_kp: 1.0
89 | dr_kd: 1.0
90 | dr_friction: 1.0
91 | dr_ctrl_delay: 1.0
92 | noise_scales:
93 | base_lin_vel: 0.0
94 | base_ang_vel: 0.0
95 | projected_gravity: 0.0
96 | base_ang_vel_noise: 0.3
97 | projected_gravity_noise: 0.2
98 | dof_pos_noise: 0.01
99 | dof_vel_noise: 1.0
100 | command_lin_vel: 0.0
101 | command_ang_vel: 0.0
102 | dof_pos: 0.0
103 | dof_vel: 0.0
104 | actions: 0.0
105 | dif_joint_angles: 0.0
106 | dif_joint_velocities: 0.0
107 | local_ref_rigid_body_vel: 0.0
108 | global_ref_rigid_body_vel: 0.0
109 | dif_local_rigid_body_pos: 0.0
110 | local_ref_rigid_body_pos: 0.0
111 | local_ref_rigid_body_pos_relyaw: 0.0
112 | relyaw: 0.0
113 | ref_motion_phase: 0.0
114 | history_actor: 0.0
115 | history_critic: 0.0
116 | history: 0.0
117 | short_history: 0.0
118 | dr_base_com: 0.0
119 | dr_link_mass: 0.0
120 | dr_kp: 0.0
121 | dr_kd: 0.0
122 | dr_friction: 0.0
123 | dr_ctrl_delay: 0.0
124 | add_noise_currculum: false
125 | obs_dims:
126 | - base_lin_vel: 3
127 | - base_ang_vel: 3
128 | - projected_gravity: 3
129 | - command_lin_vel: 2
130 | - command_ang_vel: 1
131 | - dof_pos: ${robot.dof_obs_size}
132 | - dof_vel: ${robot.dof_obs_size}
133 | - base_ang_vel_noise: 3
134 | - projected_gravity_noise: 3
135 | - dof_pos_noise: ${robot.dof_obs_size}
136 | - dof_vel_noise: ${robot.dof_obs_size}
137 | - actions: ${robot.dof_obs_size}
138 | - dif_joint_angles: ${robot.dof_obs_size}
139 | - dif_joint_velocities: ${robot.dof_obs_size}
140 | - dif_local_rigid_body_pos: ${eval:'3 * ${robot.num_bodies} + 9'}
141 | - local_ref_rigid_body_pos: ${eval:'3 * ${robot.num_bodies} + 9'}
142 | - local_ref_rigid_body_vel: ${eval:'3 * ${robot.num_bodies} + 9'}
143 | - global_ref_rigid_body_vel: ${eval:'3 * ${robot.num_bodies} + 9'}
144 | - local_ref_rigid_body_pos_relyaw: ${eval:'3 * ${robot.num_bodies} + 9'}
145 | - relyaw: 1
146 | - ref_motion_phase: 1
147 | - dr_base_com: 3
148 | - dr_link_mass: 22
149 | - dr_kp: ${robot.dof_obs_size}
150 | - dr_kd: ${robot.dof_obs_size}
151 | - dr_friction: 1
152 | - dr_ctrl_delay: 1
153 | post_compute_config: {}
154 |
--------------------------------------------------------------------------------
/humanoidverse/config/deploy/single.yaml:
--------------------------------------------------------------------------------
1 |
2 | deploy:
3 | deploy_mode: "single"
4 |
5 | BYPASS_ACT: False
6 | SWITCH_EMA: True
7 |
8 | render: True
9 | defcmd: [0.0, 0.0, 0.0, 0.0]
10 | heading_cmd: True
11 |
12 | ctrl_dt: 0.02
13 | net: ???
14 | msg_type: "hg" # "hg" or "go", currently only "hg" is supported
15 | imu_type: "pelvis" # "torso" or "pelvis", currently only "pelvis" is supported
16 | mode_machine: 5
17 |
18 | lowcmd_topic: "rt/lowcmd"
19 | lowstate_topic: "rt/lowstate"
20 | # lowcmd_topic: "low_cmd_topic"
21 | # lowstate_topic: "low_state_topic"
22 |
23 | dof_idx_23_to_29: [ 0, 1, 2, 3, 4, 5,
24 | 6, 7, 8, 9, 10, 11,
25 | 12,13,14,
26 | 15,16,17,18,
27 | 22,23,24,25,]
28 | locked_kp: 40
29 | locked_kd: 1
30 |
31 | # leg_joint2motor_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
32 | # arm_waist_joint2motor_idx: [12, 13, 14,
33 | # 15, 16, 17, 18, 19, 20, 21,
34 | # 22, 23, 24, 25, 26, 27, 28]
35 |
36 |
37 | env:
38 | config:
39 | save_motion: True
40 | save_note: null
41 |
--------------------------------------------------------------------------------
/humanoidverse/config/domain_rand/NO_domain_rand.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 |
4 | defaults:
5 | - domain_rand_base
6 |
7 | domain_rand:
8 | push_robots : False
9 | push_interval_s : [5, 10]
10 | max_push_vel_xy : 1.0
11 |
12 | randomize_base_com : False
13 | base_com_range: #kg
14 | x : [-0.1, 0.1]
15 | y : [-0.1, 0.1]
16 | z : [-0.01, 0.01]
17 |
18 | randomize_link_mass : False
19 | link_mass_range : [0.7, 1.3] # *factor
20 | randomize_link_body_names : [
21 | 'pelvis', 'left_hip_yaw_link', 'left_hip_roll_link', 'left_hip_pitch_link', 'left_knee_link',
22 | 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link',
23 | ]
24 |
25 | randomize_pd_gain : False
26 | kp_range : [0.75, 1.25]
27 | kd_range : [0.75, 1.25]
28 |
29 |
30 |
31 | randomize_friction : False
32 | # randomize_friction : False
33 | friction_range : [-0.6, 1.2]
34 |
35 | randomize_base_mass : False # replaced by randomize_link_mass
36 | added_mass_range : [-5., 10.]
37 |
38 | randomize_torque_rfi : False
39 | rfi_lim : 0.1
40 | randomize_rfi_lim : False
41 | rfi_lim_range : [0.5, 1.5]
42 |
43 | randomize_ctrl_delay : False
44 | ctrl_delay_step_range : [0, 2] # integer max real delay is 90ms
45 |
46 | randomize_motion_ref_xyz: False # head only for now
47 | motion_ref_xyz_range : [[-0.02, 0.02],[-0.02, 0.02],[-0.1, 0.1]]
48 |
49 | motion_package_loss: False
50 | package_loss_range: [1, 10] # dt = 0.02s, delay for 0.02s - 0.2s
51 | package_loss_interval_s : 2
52 |
53 |
54 | born_offset : False
55 | born_offset_curriculum: False
56 | born_offset_level_down_threshold: 50
57 | born_offset_level_up_threshold: 120
58 | level_degree: 0.00005
59 | born_distance : 0.25
60 | born_offset_range: [0.0, 1]
61 | born_offset_possibility : 1.0
62 |
63 | born_heading_curriculum: False
64 | born_heading_randomization : False
65 | born_heading_level_down_threshold: 50
66 | born_heading_level_up_threshold: 120
67 | born_heading_degree: 10
68 | born_heading_range: [0, 180]
69 | born_heading_level_degree: 0.00005
70 |
--------------------------------------------------------------------------------
/humanoidverse/config/domain_rand/domain_rand_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | domain_rand:
4 | # push robots
5 | push_robots : True
6 | push_interval_s : [5, 10]
7 | max_push_vel_xy : 1.0
8 |
9 | # base com
10 | randomize_base_com : True
11 | base_com_range:
12 | x : [-0.1., 0.1.]
13 | y : [-0.1., 0.1.]
14 | z : [-0.1, 0.1]
15 |
16 | # link mass
17 | randomize_link_mass : True
18 | link_mass_range : [0.8, 1.2]
19 |
20 | # pd gain
21 | randomize_pd_gain : True
22 | kp_range : [0.75, 1.25]
23 | kd_range : [0.75, 1.25]
24 |
25 | # friction
26 | randomize_friction : True
27 | friction_range : [0.5, 1.25]
28 |
29 | # base mass
30 | randomize_base_mass : False
31 |
32 | # rfi
33 | randomize_torque_rfi : True
34 | rfi_lim : 0.1
35 | randomize_rfi_lim : True
36 | rfi_lim_range : [0.5, 1.5]
37 |
38 |
39 | # control delay
40 | randomize_ctrl_delay : True
41 | ctrl_delay_step_range : [0, 2] # integer max real delay is 90ms
--------------------------------------------------------------------------------
/humanoidverse/config/domain_rand/dr_nil.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | # Small version DR, modified from redr1l_inertia_all
3 |
4 | defaults:
5 | - domain_rand_base
6 |
7 | domain_rand:
8 | _push_fixed: true
9 | push_robots : True
10 | push_interval_s : [5, 10]
11 | max_push_vel_xy : 0.001
12 |
13 | randomize_base_com : True
14 | base_com_range: #kg
15 | x : [-0.001,0.001]
16 | y : [-0.001,0.001]
17 | z : [-0.001,0.001]
18 |
19 | randomize_link_mass : True
20 | link_mass_range : [0.999, 1.001] # *factor
21 | randomize_link_body_names : [
22 | 'pelvis', 'torso_link',
23 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
24 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link',
25 | 'left_hip_yaw_link', 'left_hip_roll_link', 'left_hip_pitch_link', 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link',
26 | 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link'
27 | ]
28 |
29 | randomize_pd_gain : True
30 | kp_range : [0.999, 1.001]
31 | kd_range : [0.999, 1.001]
32 |
33 |
34 |
35 | randomize_friction : True
36 | # randomize_friction : False
37 | friction_range : [0.4, 1.2]
38 |
39 | randomize_base_mass : False # replaced by randomize_link_mass
40 | added_mass_range : [-5., 10.]
41 |
42 | randomize_torque_rfi : True
43 | use_rao : True
44 | rao_lim : 0.001
45 | rfi_lim : 0.001
46 | randomize_rfi_lim : True
47 | rfi_lim_range : [0.0005, 0.0015]
48 |
49 | randomize_ctrl_delay : True
50 | ctrl_delay_step_range : [0, 0] # integer max real delay is 90ms
51 |
52 | randomize_motion_ref_xyz: False # head only for now
53 | motion_ref_xyz_range : [[-0.02, 0.02],[-0.02, 0.02],[-0.1, 0.1]]
54 |
55 | motion_package_loss: False
56 | package_loss_range: [1, 10] # dt = 0.02s, delay for 0.02s - 0.2s
57 | package_loss_interval_s : 2
58 |
59 |
60 | born_offset : False
61 | born_offset_curriculum: False
62 | born_offset_level_down_threshold: 50
63 | born_offset_level_up_threshold: 120
64 | level_degree: 0.00005
65 | born_distance : 0.25
66 | born_offset_range: [0.0, 1]
67 | born_offset_possibility : 1.0
68 |
69 | born_heading_curriculum: False
70 | born_heading_randomization : False
71 | born_heading_level_down_threshold: 50
72 | born_heading_level_up_threshold: 120
73 | born_heading_degree: 10
74 | born_heading_range: [0, 180]
75 | born_heading_level_degree: 0.00005
76 |
--------------------------------------------------------------------------------
/humanoidverse/config/domain_rand/main.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | # Modified from `redr1l.yaml`
3 |
4 | defaults:
5 | - domain_rand_base
6 |
7 | domain_rand:
8 | reinit_epis_rand: -1
9 |
10 | _push_fixed: true
11 | push_robots : True
12 | push_interval_s : [5, 10]
13 | max_push_vel_xy : 0.1
14 |
15 | randomize_base_com : True
16 | base_com_range: #kg
17 | x : [-0.05, 0.05]
18 | y : [-0.05, 0.05]
19 | z : [-0.01, 0.01]
20 |
21 | randomize_link_mass : True
22 | link_mass_range : [0.9, 1.1] # *factor
23 | randomize_link_com : False
24 | link_com_range: #m
25 | x : [-0.01, 0.01]
26 | y : [-0.01, 0.01]
27 | z : [-0.01, 0.01]
28 | randomize_link_body_names : [
29 | 'pelvis', 'torso_link',
30 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
31 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link',
32 | 'left_hip_yaw_link', 'left_hip_roll_link', 'left_hip_pitch_link', 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link',
33 | 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link'
34 | ]
35 |
36 | randomize_link_inertia : True
37 | link_inertia_range : [0.9,1.1] # *factor
38 | randomize_link_inertia_names : [
39 | 'pelvis', 'torso_link',
40 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
41 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link',
42 | 'left_hip_yaw_link', 'left_hip_roll_link', 'left_hip_pitch_link', 'left_knee_link', 'left_ankle_pitch_link', 'left_ankle_roll_link',
43 | 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link', 'right_ankle_pitch_link', 'right_ankle_roll_link'
44 | ]
45 |
46 | heavy_upper:
47 | enable: False
48 | ratio: 1.1 # all mass and inertia are scaled by this ratio
49 | body_names: [
50 | 'torso_link',
51 | 'left_shoulder_pitch_link', 'left_shoulder_roll_link', 'left_shoulder_yaw_link', 'left_elbow_link',
52 | 'right_shoulder_pitch_link', 'right_shoulder_roll_link', 'right_shoulder_yaw_link', 'right_elbow_link',
53 | ]
54 | randomize_pd_gain : True
55 | kp_range : [0.9, 1.1]
56 | kd_range : [0.9, 1.1]
57 |
58 | parallel_serial_pd:
59 | # multiplicative composition with randomize_pd_gain, not replacing it
60 | enable: False
61 | ratio: [0.8, 1.2]
62 | joint_idx: [4,5, 10,11, 13,14]
63 |
64 |
65 | randomize_friction : True
66 | # randomize_friction : False
67 | friction_range : [0.2, 1.2]
68 | randomize_restitution : False
69 | restitution_range : [0.0, 1.0]
70 |
71 | randomize_base_mass : False # replaced by randomize_link_mass
72 | added_mass_range : [-5., 10.]
73 |
74 | randomize_torque_rfi : True
75 | use_rao : True
76 | rao_lim : 0.05
77 | rfi_lim : 0.05
78 | randomize_rfi_lim : True
79 | rfi_lim_range : [0.5, 1.5]
80 |
81 | randomize_ctrl_delay : True
82 | ctrl_delay_step_range : [0, 2] # integer max real delay is 90ms
83 |
84 | randomize_motion_ref_xyz: False # head only for now
85 | motion_ref_xyz_range : [[-0.02, 0.02],[-0.02, 0.02],[-0.1, 0.1]]
86 |
87 | motion_package_loss: False
88 | package_loss_range: [1, 10] # dt = 0.02s, delay for 0.02s - 0.2s
89 | package_loss_interval_s : 2
90 |
91 |
92 | born_offset : False
93 | born_offset_curriculum: False
94 | born_offset_level_down_threshold: 50
95 | born_offset_level_up_threshold: 120
96 | level_degree: 0.00005
97 | born_distance : 0.25
98 | born_offset_range: [0.0, 1]
99 | born_offset_possibility : 1.0
100 |
101 | born_heading_curriculum: False
102 | born_heading_randomization : False
103 | born_heading_level_down_threshold: 50
104 | born_heading_level_up_threshold: 120
105 | born_heading_degree: 10
106 | born_heading_range: [0, 180]
107 | born_heading_level_degree: 0.00005
108 |
--------------------------------------------------------------------------------
/humanoidverse/config/env/base_task.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | env:
4 | _target_: humanoidverse.envs.base_task.base_task.BaseTask
5 | _recursive_: False
6 | config:
7 | experiment_name: ${experiment_name}
8 | num_envs: ${num_envs}
9 | headless: ${headless}
10 | simulator: ${simulator}
11 | save_rendering_dir: null
12 | ckpt_dir: null
13 |
14 | # Globally accessible parameters
15 | log_task_name: base_task
--------------------------------------------------------------------------------
/humanoidverse/config/env/legged_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - base_task
5 |
6 | # Env structure
7 | env:
8 | _target_: humanoidverse.envs.legged_base_task.legged_robot_base.LeggedRobotBase
9 | _recursive_: False
10 | config:
11 | robot: ${robot}
12 | domain_rand: ${domain_rand}
13 | rewards: ${rewards}
14 | terrain: ${terrain}
15 | obs: ${obs}
16 | env_spacing: 5.0
17 | max_episode_length_s: 100000
18 | use_vec_reward: False
19 |
20 | normalization:
21 | clip_actions: 100.0
22 | clip_observations: 100.0
23 |
24 | # simulator:
25 | # sim:
26 | # fps: 200 # 1/dt , dt = 0.005
27 | # control_decimation: 4 # decimation
28 | # substeps: 1
29 | termination:
30 | terminate_when_close_to_dof_pos_limit: False
31 | terminate_when_close_to_dof_vel_limit: False
32 | terminate_when_close_to_torque_limit: False
33 | termination_scales:
34 | termination_close_to_dof_pos_limit : 0.98
35 | termination_close_to_dof_vel_limit : 0.98
36 | termination_close_to_torque_limit : 0.98
37 |
38 | # Globally accessible parameters
39 | log_task_name: legged_base
--------------------------------------------------------------------------------
/humanoidverse/config/env/motion_tracking.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - legged_base
5 |
6 | # Env structure
7 | env:
8 | _target_: humanoidverse.envs.motion_tracking.motion_tracking.LeggedRobotMotionTracking
9 | config:
10 | use_vec_reward: True
11 | max_episode_length_s: 100000
12 | termination:
13 | terminate_by_contact: False
14 | terminate_by_gravity: True
15 | terminate_by_low_height: False
16 | terminate_when_motion_end: True
17 | terminate_when_motion_far: True
18 | terminate_when_dof_far: False
19 | terminate_when_close_to_dof_pos_limit: False
20 | terminate_when_close_to_dof_vel_limit: False
21 | terminate_when_close_to_torque_limit: False
22 | termination_scales:
23 | termination_min_base_height : 0.2
24 | termination_gravity_x : 0.8
25 | termination_gravity_y : 0.8
26 | termination_gravity : 0.75
27 | termination_motion_far_threshold : 1.5
28 | termination_close_to_dof_pos_limit : 0.98
29 | termination_close_to_dof_vel_limit : 0.98
30 | termination_close_to_torque_limit : 0.98
31 | termination_probality:
32 | terminate_when_close_to_dof_pos_limit: 0.25
33 | terminate_when_close_to_dof_vel_limit: 0.25
34 | terminate_when_close_to_torque_limit: 0.25
35 | termination_curriculum:
36 | terminate_when_motion_far_curriculum: True
37 | terminate_when_motion_far_initial_threshold: 1.5
38 | terminate_when_motion_far_threshold_max: 2.0
39 | terminate_when_motion_far_threshold_min: 0.3
40 | terminate_when_motion_far_curriculum_degree: 0.000025
41 | terminate_when_motion_far_curriculum_level_down_threshold: 40
42 | terminate_when_motion_far_curriculum_level_up_threshold: 42
43 |
44 | terminate_when_dof_far_curriculum:
45 | enable: True
46 | init: 3.0
47 | max: 3.0
48 | min: 1.0
49 | degree: 0.000002
50 | level_down_threshold: 40
51 | level_up_threshold: 42
52 |
53 | resample_motion_when_training: False
54 | resample_time_interval_s: 2000
55 |
56 | noise_to_initial_level: 0
57 | init_noise_scale:
58 | dof_pos: 0.1
59 | dof_vel: 0.15
60 | root_pos: 0.05
61 | root_rot: 10 # degree, * 3.14 / 180
62 | root_vel: 0.01
63 | root_ang_vel: 0.01
64 |
65 | use_teleop_control: False
66 | enforce_randomize_motion_start_eval: False
67 |
68 |
69 | soft_dynamic_correction:
70 | enable: False
71 | alpha: 0.1
72 | type: "deter" # "prob" or "deter"
73 | curriculum:
74 | enable: True
75 | max_alpha: 0.9
76 | min_alpha: 0.0
77 | degree: 0.00001
78 | level_down_threshold: 40
79 | level_up_threshold: 42
80 |
81 |
82 |
83 | # Globally accessible parameters
84 | log_task_name: motion_tracking
85 |
86 | eval_overrides:
87 | env:
88 | config:
89 | max_episode_length_s: 100000
90 |
--------------------------------------------------------------------------------
/humanoidverse/config/exp/base_exp.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - /algo: base_algo
5 | - /env: base_task
6 |
7 | experiment_name: TESTBaseExp
--------------------------------------------------------------------------------
/humanoidverse/config/exp/legged_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - /algo: ppo
5 | - /env: legged_base
6 |
7 | experiment_name: TESTLeggedBase
--------------------------------------------------------------------------------
/humanoidverse/config/exp/motion_tracking.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | # - /algo: ppo
5 | - /algo: mh_ppo
6 | - /env: motion_tracking
7 |
8 | experiment_name: TEST_Motion_Tracking
--------------------------------------------------------------------------------
/humanoidverse/config/obs/legged_obs.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Be careful when using _raw, history
4 | obs:
5 | obs_dict:
6 | actor_obs: ???
7 | critic_obs: ???
8 |
9 | # define those coumpounds in obs_dict, for example, you can define different long/short history with different length
10 | obs_auxiliary: ???
11 |
12 | obs_scales: ???
13 |
14 | noise_scales: ???
15 |
16 | obs_dims: ???
17 | # Will store values after preprocessing, don't put anything here
18 | post_compute_config: {}
19 | motion_len: -1
--------------------------------------------------------------------------------
/humanoidverse/config/obs/motion_tracking/benchmark.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Modified from 'deepmimic_a2c_nolinvel_LARGEnoise_history.yaml', add priv information to critic
4 |
5 | # Be careful when using _raw, history
6 | obs:
7 | obs_dict:
8 | actor_obs: [
9 | base_lin_vel,
10 | base_ang_vel,
11 | projected_gravity,
12 | dof_pos,
13 | dof_vel,
14 | actions,
15 | ref_motion_phase,
16 | dif_local_rigid_body_pos,
17 | local_ref_rigid_body_pos,
18 | history_actor
19 | ]
20 |
21 | critic_obs: [
22 | base_lin_vel,
23 | base_ang_vel,
24 | projected_gravity,
25 | dof_pos,
26 | dof_vel,
27 | actions,
28 | ref_motion_phase,
29 | dif_local_rigid_body_pos,
30 | local_ref_rigid_body_pos,
31 | dr_base_com,
32 | dr_link_mass,
33 | dr_kp,
34 | dr_kd,
35 | dr_friction,
36 | dr_ctrl_delay,
37 | history_critic
38 | ]
39 |
40 | obs_auxiliary: # define those coumpounds in obs_dict, for example, you can define different long/short history with different length
41 |
42 | history_actor: {
43 | base_lin_vel: 4,
44 | base_ang_vel: 4,
45 | projected_gravity: 4,
46 | dof_pos: 4,
47 | dof_vel: 4,
48 | actions: 4,
49 | ref_motion_phase: 4
50 | }
51 |
52 | history_critic: {
53 | base_lin_vel: 4,
54 | base_ang_vel: 4,
55 | projected_gravity: 4,
56 | dof_pos: 4,
57 | dof_vel: 4,
58 | actions: 4,
59 | ref_motion_phase: 4
60 | }
61 |
62 | obs_scales: {
63 | base_lin_vel: 2.0,
64 | base_ang_vel: 0.25,
65 | projected_gravity: 1.0,
66 | dof_pos: 1.0,
67 | dof_vel: 0.05,
68 | actions: 1.0,
69 | dif_local_rigid_body_pos: 1.0,
70 | local_ref_rigid_body_pos: 1.0,
71 | ref_motion_phase: 1.0,
72 | history_actor: 1.0,
73 | history_critic: 1.0,
74 | dr_base_com: 1.0,
75 | dr_link_mass: 1.0,
76 | dr_kp: 1.0,
77 | dr_kd: 1.0,
78 | dr_friction: 1.0,
79 | dr_ctrl_delay: 1.0,
80 | }
81 |
82 | noise_scales: {
83 | base_lin_vel: 0.0,
84 | base_ang_vel: 0.0,
85 | projected_gravity: 0.0,
86 | dof_pos: 0.00,
87 | dof_vel: 0.0,
88 | actions: 0.0,
89 | dif_local_rigid_body_pos: 0.0,
90 | local_ref_rigid_body_pos: 0.0,
91 | ref_motion_phase: 0.0,
92 | history_actor: 0.0,
93 | history_critic: 0.0,
94 | dr_base_com: 0.0,
95 | dr_link_mass: 0.0,
96 | dr_kp: 0.0,
97 | dr_kd: 0.0,
98 | dr_friction: 0.0,
99 | dr_ctrl_delay: 0.0,
100 | }
101 |
102 | add_noise_currculum: False
103 | noise_initial_value: 0.05
104 | noise_value_max: 1.0
105 | noise_value_min: 0.00001
106 | soft_dof_pos_curriculum_degree: 0.00001
107 | soft_dof_pos_curriculum_level_down_threshold: 40
108 | soft_dof_pos_curriculum_level_up_threshold: 42
109 |
110 | obs_dims:
111 | - base_lin_vel: 3
112 | - base_ang_vel: 3
113 | - projected_gravity: 3
114 | - dof_pos: ${robot.dof_obs_size}
115 | - dof_vel: ${robot.dof_obs_size}
116 | - actions: ${robot.dof_obs_size}
117 | - dif_local_rigid_body_pos: ${eval:'3 * ${robot.num_bodies} + 9'} # hardcoded for 3 extended body
118 | - local_ref_rigid_body_pos: ${eval:'3 * ${robot.num_bodies} + 9'}
119 | - ref_motion_phase: 1
120 | - dr_base_com: 3
121 | - dr_link_mass: 22 # = num of 'randomize_link_body_names'
122 | - dr_kp: ${robot.dof_obs_size}
123 | - dr_kd: ${robot.dof_obs_size}
124 | - dr_friction: 1
125 | - dr_ctrl_delay: 1
126 |
127 | post_compute_config: {} # Will store values after preprocessing, don't put anything here
128 | motion_len: -1
--------------------------------------------------------------------------------
/humanoidverse/config/obs/motion_tracking/main.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Modified from 'deepmimic_a2c_nolinvel_LARGEnoise_history.yaml', add priv information to critic
4 |
5 | # Be careful when using _raw, history
6 | obs:
7 | obs_dict:
8 | actor_obs: [
9 | base_ang_vel,
10 | projected_gravity,
11 | dof_pos,
12 | dof_vel,
13 | actions,
14 | ref_motion_phase,
15 | # dif_local_rigid_body_pos,
16 | # local_ref_rigid_body_pos,
17 | # history,
18 | history_actor
19 | ]
20 |
21 | critic_obs: [
22 | base_lin_vel,
23 | base_ang_vel,
24 | projected_gravity,
25 | dof_pos,
26 | dof_vel,
27 | actions,
28 | ref_motion_phase,
29 | dif_local_rigid_body_pos,
30 | local_ref_rigid_body_pos,
31 | dr_base_com,
32 | dr_link_mass,
33 | dr_kp,
34 | dr_kd,
35 | dr_friction,
36 | dr_ctrl_delay,
37 | history_critic
38 | ]
39 |
40 | obs_auxiliary: # define those coumpounds in obs_dict, for example, you can define different long/short history with different length
41 |
42 | history_actor: {
43 | base_ang_vel: 4,
44 | projected_gravity: 4,
45 | dof_pos: 4,
46 | dof_vel: 4,
47 | actions: 4,
48 | ref_motion_phase: 4
49 | }
50 |
51 | history_critic: {
52 | base_lin_vel: 4,
53 | base_ang_vel: 4,
54 | projected_gravity: 4,
55 | dof_pos: 4,
56 | dof_vel: 4,
57 | actions: 4,
58 | ref_motion_phase: 4
59 | }
60 |
61 | obs_scales: {
62 | base_lin_vel: 2.0,
63 | base_ang_vel: 0.25,
64 | projected_gravity: 1.0,
65 | dof_pos: 1.0,
66 | dof_vel: 0.05,
67 | actions: 1.0,
68 | dif_local_rigid_body_pos: 1.0,
69 | local_ref_rigid_body_pos: 1.0,
70 | ref_motion_phase: 1.0,
71 | history_actor: 1.0,
72 | history_critic: 1.0,
73 | dr_base_com: 1.0,
74 | dr_link_mass: 1.0,
75 | dr_kp: 1.0,
76 | dr_kd: 1.0,
77 | dr_friction: 1.0,
78 | dr_ctrl_delay: 1.0,
79 | }
80 |
81 | noise_scales: {
82 | base_lin_vel: 0.0,
83 | base_ang_vel: 0.3,
84 | projected_gravity: 0.2,
85 | dof_pos: 0.01,
86 | dof_vel: 1.0,
87 | actions: 0.0,
88 | dif_local_rigid_body_pos: 0.0,
89 | local_ref_rigid_body_pos: 0.0,
90 | ref_motion_phase: 0.0,
91 | history_actor: 0.0,
92 | history_critic: 0.0,
93 | dr_base_com: 0.0,
94 | dr_link_mass: 0.0,
95 | dr_kp: 0.0,
96 | dr_kd: 0.0,
97 | dr_friction: 0.0,
98 | dr_ctrl_delay: 0.0,
99 | }
100 |
101 | add_noise_currculum: False
102 | noise_initial_value: 0.05
103 | noise_value_max: 1.0
104 | noise_value_min: 0.00001
105 | soft_dof_pos_curriculum_degree: 0.00001
106 | soft_dof_pos_curriculum_level_down_threshold: 40
107 | soft_dof_pos_curriculum_level_up_threshold: 42
108 |
109 | obs_dims:
110 | - base_lin_vel: 3
111 | - base_ang_vel: 3
112 | - projected_gravity: 3
113 | - dof_pos: ${robot.dof_obs_size}
114 | - dof_vel: ${robot.dof_obs_size}
115 | - actions: ${robot.dof_obs_size}
116 | - dif_local_rigid_body_pos: ${eval:'3 * ${robot.num_bodies} + 9'} # hardcoded for 3 extended body
117 | - local_ref_rigid_body_pos: ${eval:'3 * ${robot.num_bodies} + 9'}
118 | - ref_motion_phase: 1
119 | - dr_base_com: 3
120 | - dr_link_mass: 22 # = num of 'randomize_link_body_names'
121 | - dr_kp: ${robot.dof_obs_size}
122 | - dr_kd: ${robot.dof_obs_size}
123 | - dr_friction: 1
124 | - dr_ctrl_delay: 1
125 |
126 | post_compute_config: {} # Will store values after preprocessing, don't put anything here
127 | motion_len: -1
--------------------------------------------------------------------------------
/humanoidverse/config/opt/record.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | env:
4 | config:
5 | save_motion: True
6 | eval_timestamp: ${eval_timestamp}
7 | save_note: null
8 | ckpt_dir: null
9 | save_total_steps: 10000
--------------------------------------------------------------------------------
/humanoidverse/config/opt/wandb.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | use_wandb: True
4 |
5 | wandb:
6 | wandb_project: null
7 | wandb_tags: 'online' # 'online' or 'offline' or 'disabled'
8 | wandb_group: null
9 | wandb_id: null
10 | wandb_entity: agileh2o
11 | wandb_dir: ${experiment_dir}/.wandb
--------------------------------------------------------------------------------
/humanoidverse/config/rewards/motion_tracking/main.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # defaults:
4 | # - reward_base
5 |
6 |
7 | rewards:
8 | set_reward: Weiji
9 | set_reward_date: 20250417
10 | only_positive_rewards: False
11 | reward_scales:
12 | teleop_contact_mask: 0.5
13 | # teleop_contact_mask: 0.5
14 | teleop_max_joint_position: 1.0
15 |
16 | teleop_body_position_extend: 1.0
17 | teleop_vr_3point: 1.6
18 | teleop_body_position_feet: 1.5
19 | teleop_body_rotation_extend: 0.5
20 | teleop_body_ang_velocity_extend: 0.5
21 | teleop_body_velocity_extend: 0.5
22 | teleop_joint_position: 1.0
23 | teleop_joint_velocity: 1.0
24 | penalty_torques: -0.000001
25 | # penalty_dof_vel: -0.0001
26 | # penalty_dof_acc: -2.5e-8
27 | penalty_action_rate: -0.5 # need to be tuned up to -0.5 for real robot
28 |
29 | feet_air_time: 1.0
30 | penalty_feet_contact_forces: -0.01
31 | penalty_stumble: -2.0
32 | penalty_slippage: -1.0
33 |
34 | limits_dof_pos: -10.0
35 | limits_dof_vel: -5.0
36 | limits_torque: -5.0
37 | termination: -200.0
38 | collision: -30.0
39 |
40 |
41 | teleop_body_pos_lowerbody_weight : 1.0
42 | teleop_body_pos_upperbody_weight : 1.0
43 |
44 | desired_feet_max_height_for_this_air: 0.2
45 |
46 | reward_tracking_sigma:
47 | teleop_max_joint_pos: 1.0
48 |
49 | teleop_upper_body_pos: 0.015
50 | teleop_lower_body_pos: 0.015
51 | teleop_vr_3point_pos: 0.015
52 | teleop_feet_pos: 0.01
53 |
54 | teleop_body_rot: 0.1
55 | teleop_body_vel: 1.0
56 | teleop_body_ang_vel: 15.0
57 | teleop_joint_pos: 0.3
58 | teleop_joint_vel: 30.0
59 |
60 | adaptive_tracking_sigma:
61 | enable: True
62 | alpha: 1e-3
63 | type: origin # origin, mean, scale
64 | scale: 1.0
65 |
66 |
67 | locomotion_max_contact_force: 400.0
68 | desired_feet_air_time: 0.3
69 |
70 | reward_penalty_curriculum: True
71 | reward_initial_penalty_scale : 0.10
72 | reward_min_penalty_scale: 0.0
73 | reward_max_penalty_scale: 1.0
74 | reward_penalty_level_down_threshold: 40
75 | reward_penalty_level_up_threshold: 42
76 | reward_penalty_degree: 0.00001 # 0.00001
77 | num_compute_average_epl: 10000
78 |
79 | reward_limit:
80 | soft_dof_pos_limit: 0.9
81 | soft_dof_vel_limit: 0.9
82 | soft_torque_limit: 0.825
83 |
84 | reward_limits_curriculum:
85 | soft_dof_pos_curriculum: True
86 | soft_dof_pos_initial_limit: 0.95
87 | soft_dof_pos_max_limit: 0.95
88 | soft_dof_pos_min_limit: 0.95
89 | soft_dof_pos_curriculum_degree: 0.00000025
90 | soft_dof_pos_curriculum_level_down_threshold: 40
91 | soft_dof_pos_curriculum_level_up_threshold: 42
92 |
93 | soft_dof_vel_curriculum: True
94 | soft_dof_vel_initial_limit: 0.95
95 | soft_dof_vel_max_limit: 0.95
96 | soft_dof_vel_min_limit: 0.95
97 | soft_dof_vel_curriculum_degree: 0.00000025
98 | soft_dof_vel_curriculum_level_down_threshold: 40
99 | soft_dof_vel_curriculum_level_up_threshold: 42
100 |
101 | soft_torque_curriculum: True
102 | soft_torque_initial_limit: 0.85
103 | soft_torque_max_limit: 0.85
104 | soft_torque_min_limit: 0.85
105 | soft_torque_curriculum_degree: 0.00000025
106 | soft_torque_curriculum_level_down_threshold: 40
107 | soft_torque_curriculum_level_up_threshold: 42
108 |
109 | reward_penalty_reward_names : [
110 | "penalty_torques",
111 | "penalty_dof_acc",
112 | "penalty_dof_vel",
113 | "penalty_action_rate",
114 | "limits_dof_pos",
115 | "limits_dof_vel",
116 | "limits_torque",
117 | "feet_heading_alignment",
118 | "penalty_feet_ori",
119 | "penalty_slippage",
120 | "collision"
121 | ]
--------------------------------------------------------------------------------
/humanoidverse/config/robot/robot_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | robot:
4 | only_track_leg: ???
5 | # Observation parameters
6 | dof_obs_size: ???
7 | number_of_actions: ???
8 | self_obs_max_coords: ???
9 | num_bodies: ???
10 |
11 | # Control parameters
12 | # dof_body_ids: ???
13 | # dof_offsets: ???
14 |
15 | algo_obs_dim_dict: ???
16 |
17 | key_bodies: null
18 | contact_bodies: null
19 |
20 | foot_name: null
21 |
22 | init_state: null
23 |
24 | contact_pairs_multiplier: 16
25 |
26 | num_key_bodies: ${len:${robot.key_bodies}}
27 | mimic_small_marker_bodies: null
28 |
29 |
30 | control:
31 | # Can be "isaac_pd" or "P"/"V"/"T" for Proportional, Velocity, Torque control
32 | control_type: isaac_pd
33 | # PD Drive parameters:
34 | stiffness: null
35 | damping: null
36 | # action scale: target angle = actionScale * action + defaultAngle
37 | # only used in manual PD control
38 | action_scale: 1.0
39 | # Used with isaac pd controller
40 | isaac_pd_scale: False # This is needed for the SMPL model due to weight variations
41 | clamp_actions: 1.0
42 | clip_torques: ???
43 |
44 | asset:
45 | collapse_fixed_joints: null
46 | replace_cylinder_with_capsule: null
47 | flip_visual_attachments: null
48 | armature: null
49 | thickness: null
50 | max_angular_velocity: null
51 | max_linear_velocity: null
52 | density: null
53 | angular_damping: null
54 | linear_damping: null
55 | disable_gravity: null
56 | fix_base_link: null
57 | default_dof_drive_mode: 1 # see GymDofDriveModeFlags (0 is none, 1 is pos tgt, 2 is vel tgt, 3 effort)
58 |
59 | robot_type: ???
60 | urdf_file: null
61 | usd_file: null
62 | xml_file: null
63 | asset_root: "description/robots"
64 | self_collisions: 0
65 | filter_ints: null
66 |
67 | # motion_lib:
68 | # _target_: phys_anim.utils.motion_lib.MotionLib
69 | # motion_file: ${motion_file}
70 | # ref_height_adjust: 0.
71 | # fix_motion_heights: True
72 |
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/genesis.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.genesis.genesis.Genesis
6 | _recursive_: False
7 | config:
8 | name: "genesis"
9 | terrain: ${terrain}
10 | sim:
11 | fps: 200
12 | control_decimation: 4
13 | substeps: 1
14 | render_mode: "human" # [None, "human", "rgb_array"]
15 | render_interval: 1
16 |
17 | scene:
18 | num_envs: ${num_envs}
19 | env_spacing: ${env.config.env_spacing}
20 | replicate_physics: True
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/isaacgym.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.isaacgym.isaacgym.IsaacGym
6 | _recursive_: False
7 | config:
8 | name: "isaacgym"
9 | terrain: ${terrain}
10 | sim:
11 | fps: 200
12 | control_decimation: 4
13 | substeps: 1
14 | physx:
15 | num_threads: 4
16 | solver_type: 1 # 0: pgs, 1: tgs
17 | num_position_iterations: 4
18 | num_velocity_iterations: 0
19 | contact_offset: 0.01
20 | rest_offset: 0.0
21 | bounce_threshold_velocity: 0.5
22 | max_depenetration_velocity: 1.0
23 | default_buffer_size_multiplier: 5
24 | contact_collection: 2
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/isaacsim.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.isaacsim.isaacsim.IsaacSim
6 | _recursive_: False
7 | config:
8 | name: "isaacsim"
9 | terrain: ${terrain}
10 | sim:
11 | fps: 200
12 | control_decimation: 4
13 | substeps: 1
14 | physx:
15 | num_threads: 10
16 | solver_type: 1 # 0: pgs, 1: tgs
17 | num_position_iterations: 4
18 | num_velocity_iterations: 0
19 | contact_offset: 0.01
20 | rest_offset: 0.0
21 | bounce_threshold_velocity: 0.5
22 | max_depenetration_velocity: 1.0
23 | default_buffer_size_multiplier: 5
24 | contact_collection: 2
25 | render_mode: "human" # [None, "human", "rgb_array"]
26 | render_interval: 4
27 |
28 | scene:
29 | num_envs: ${num_envs}
30 | env_spacing: ${env.config.env_spacing}
31 | replicate_physics: True
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/mujoco.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.mujoco.mujoco.MuJoCo
6 | _recursive_: False
7 | config:
8 | name: "mujoco"
9 | terrain: ${terrain}
10 | sim:
11 | fps: 500
12 | control_decimation: 10
13 | # fps: 200
14 | # control_decimation: 4
15 | render_mode: "human" # [None, "human", "rgb_array"]
16 | scene:
17 | num_envs: ${num_envs}
18 | env_spacing: ${env.config.env_spacing}
--------------------------------------------------------------------------------
/humanoidverse/config/terrain/terrain_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | terrain:
4 | static_friction: 1.0
5 | dynamic_friction: 1.0
6 | restitution: 0.0
--------------------------------------------------------------------------------
/humanoidverse/config/terrain/terrain_locomotion.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - terrain_base
5 |
6 | terrain:
7 | mesh_type : 'trimesh' # "heightfield" # none, plane, heightfield or trimesh
8 | horizontal_scale : 0.1 # [m]
9 | vertical_scale : 0.005 # [m]
10 | border_size : 40 # [m]
11 | curriculum : False
12 | # curriculum : False
13 | static_friction: 1.0
14 | dynamic_friction: 1.0
15 | restitution: 0.0
16 | # rough terrain only:
17 | measure_heights : False # keep it False
18 | measured_points_x : [ 0.] # 1mx1.6m rectangle (without center line)
19 | measured_points_y : [ 0.]
20 | selected : False # select a unique terrain type and pass all arguments
21 | terrain_kwargs : null # Dict of arguments for selected terrain
22 | max_init_terrain_level : 9 # starting curriculum state
23 | terrain_length : 8.
24 | terrain_width : 8.
25 | num_rows: 10 # number of terrain rows (levels)
26 | num_cols : 20 # number of terrain cols (types)
27 | terrain_types : ['flat', 'rough', 'low_obst', 'smooth_slope', 'rough_slope'] # do not duplicate!
28 | terrain_proportions : [0.2, 0.6, 0.2, 0.0, 0.0]
29 | # trimesh only:
30 | slope_treshold : 0.75 # slopes above this threshold will be corrected to vertical surfaces
--------------------------------------------------------------------------------
/humanoidverse/config/terrain/terrain_locomotion_plane.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - terrain_base
5 |
6 | terrain:
7 | mesh_type : 'plane' # "heightfield" # none, plane, heightfield or trimesh
8 | horizontal_scale : 0.1 # [m]
9 | vertical_scale : 0.005 # [m]
10 | border_size : 40 # [m]
11 | curriculum : False
12 | # curriculum : False
13 | static_friction: 1.0
14 | dynamic_friction: 1.0
15 | restitution: 0.0
16 | # rough terrain only:
17 | measure_heights : False # keep it False
18 | measured_points_x : [ 0.] # 1mx1.6m rectangle (without center line)
19 | measured_points_y : [ 0.]
20 | selected : False # select a unique terrain type and pass all arguments
21 | terrain_kwargs : null # Dict of arguments for selected terrain
22 | max_init_terrain_level : 9 # starting curriculum state
23 | terrain_length : 8.
24 | terrain_width : 8.
25 | num_rows: 10 # number of terrain rows (levels)
26 | num_cols : 20 # number of terrain cols (types)
27 | terrain_types : ['flat', 'rough', 'low_obst', 'smooth_slope', 'rough_slope'] # do not duplicate!
28 | terrain_proportions : [0.2, 0.6, 0.2, 0.0, 0.0]
29 | # trimesh only:
30 | slope_treshold : 0.75 # slopes above this threshold will be corrected to vertical surfaces
--------------------------------------------------------------------------------
/humanoidverse/deploy/__init__.py:
--------------------------------------------------------------------------------
1 | from .urcirobot import URCIRobot, ObsCfg, URCIPolicyObs, CfgType
2 |
3 |
4 |
--------------------------------------------------------------------------------
/humanoidverse/deploy/external/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | # Grammar: ckpt=_external_xxxx
3 | # Then, ckpt is sent to here `GetExternalPolicy(description)` and get parsed
4 |
5 | from humanoidverse.deploy import *
6 | from typing import Dict
7 | from .core import *
8 |
9 |
10 |
--------------------------------------------------------------------------------
/humanoidverse/deploy/external/core.py:
--------------------------------------------------------------------------------
1 |
2 | # Grammar: ckpt=_external_xxxx
3 | # Then, ckpt is sent to here `GetExternalPolicy(description)` and get parsed
4 |
5 | from humanoidverse.deploy import *
6 | from typing import Dict
7 |
8 | def GetZeroPolicy(name: str)->URCIPolicyObs:
9 | # Usage: $EVALMJC=_external_zero
10 | import numpy as np
11 |
12 | def policy_fn(obs_dict: Dict[str, np.ndarray]) -> np.ndarray:
13 | action = np.zeros((1,23))
14 | return action
15 |
16 | def obs_fn(robot: URCIRobot)->np.ndarray:
17 | # print(robot.timer)
18 | q_split = np.split(robot.q, [6, 12, 15, 15+4], axis=0)
19 | print(q_split)
20 | return np.zeros(1)
21 |
22 | return (obs_fn, policy_fn)
23 |
24 |
25 | def GetSinPolicy(name: str)->URCIPolicyObs:
26 | # Usage: $EVALMJC=_external_zero
27 | import numpy as np
28 |
29 | action = np.zeros((1,23))
30 |
31 | def policy_fn(obs_dict: Dict[str, np.ndarray]) -> np.ndarray:
32 | timer = obs_dict['actor_obs'][0]
33 |
34 | # frequency =
35 |
36 | # action[0,3] = np.sin(timer) * 0.3
37 | # action[0,9] = np.cos(timer) * 0.3
38 |
39 |
40 |
41 | action[0,4] = np.sin(timer) * 0.5 *4
42 | action[0,5] = np.cos(timer) * 0.2 *4
43 |
44 | action[0,10] = np.cos(timer) * 0.5 *4
45 | action[0,11] = np.sin(timer) * 0.2 *4
46 | return action
47 |
48 | def obs_fn(robot: URCIRobot)->np.ndarray:
49 | # print(robot.timer)
50 | return np.array(robot.timer*robot.dt)
51 |
52 | return (obs_fn, policy_fn)
53 |
54 | def GetExternalPolicy(description: str, config: CfgType)->URCIPolicyObs:
55 | external_name = description[len('_external'):]
56 |
57 | if external_name.startswith('_zero'):
58 | return GetZeroPolicy(external_name[5:])
59 | elif external_name.startswith('_sin'):
60 | return GetSinPolicy(external_name[4:])
61 | else:
62 | raise ValueError(f"Unknown external policy: {external_name}")
63 |
64 |
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/humanoidverse/envs/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_task import *
2 | from .env_utils import *
3 | from .legged_base_task import *
4 | from .motion_tracking import *
--------------------------------------------------------------------------------
/humanoidverse/envs/base_task/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/envs/base_task/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/envs/env_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/envs/env_utils/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/envs/env_utils/history_handler.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import Tensor
3 | from termcolor import colored
4 | from loguru import logger
5 |
6 |
7 |
8 | # @time_prot_cls_dec_
9 | # TimePortion: 1.57%
10 | class HistoryHandler:
11 |
12 | def __init__(self, num_envs, history_config, obs_dims, device):
13 | self.obs_dims = obs_dims
14 | self.device = device
15 | self.num_envs = num_envs
16 | self.history = {}
17 |
18 | self.buffer_config = {}
19 | for aux_key, aux_config in history_config.items():
20 | for obs_key, obs_num in aux_config.items():
21 | if obs_key in self.buffer_config:
22 | self.buffer_config[obs_key] = max(self.buffer_config[obs_key], obs_num)
23 | else:
24 | self.buffer_config[obs_key] = obs_num
25 |
26 | for key in self.buffer_config.keys():
27 | self.history[key] = torch.zeros(num_envs, self.buffer_config[key], obs_dims[key], device=self.device)
28 |
29 | logger.info(colored("History Handler Initialized", "green"))
30 | for key, value in self.buffer_config.items():
31 | logger.info(f"Key: {key}, Value: {value}")
32 |
33 | def reset(self, reset_ids):
34 | if len(reset_ids)==0:
35 | return
36 | assert set(self.buffer_config.keys()) == set(self.history.keys()), f"History keys mismatch\n{self.buffer_config.keys()}\n{self.history.keys()}"
37 | for key in self.history.keys():
38 | self.history[key][reset_ids] *= 0.
39 |
40 | def add(self, key: str, value: Tensor):
41 | assert key in self.history.keys(), f"Key {key} not found in history"
42 | val = self.history[key].clone()
43 | self.history[key][:, 1:] = val[:, :-1]
44 | self.history[key][:, 0] = value.clone()
45 |
46 | def query(self, key: str):
47 | assert key in self.history.keys(), f"Key {key} not found in history"
48 | return self.history[key].clone()
--------------------------------------------------------------------------------
/humanoidverse/envs/env_utils/visualization.py:
--------------------------------------------------------------------------------
1 |
2 | class Point:
3 | # this is for visualization
4 | def __init__(self, pt):
5 | self.x = pt[0]
6 | self.y = pt[1]
7 | self.z = pt[2]
--------------------------------------------------------------------------------
/humanoidverse/envs/legged_base_task/__init__.py:
--------------------------------------------------------------------------------
1 | from .legged_robot_base import *
--------------------------------------------------------------------------------
/humanoidverse/envs/motion_tracking/__init__.py:
--------------------------------------------------------------------------------
1 | from .motion_tracking import *
--------------------------------------------------------------------------------
/humanoidverse/isaac_utils/isaac_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/isaac_utils/isaac_utils/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/isaac_utils/isaac_utils/maths.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | import random
4 | import os
5 |
6 | @torch.jit.script
7 | def normalize(x, eps: float = 1e-9):
8 | return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
9 |
10 | @torch.jit.script
11 | def torch_rand_float(lower, upper, shape, device):
12 | # type: (float, float, Tuple[int, int], str) -> Tensor
13 | return (upper - lower) * torch.rand(*shape, device=device) + lower
14 |
15 | @torch.jit.script
16 | def copysign(a, b):
17 | # type: (float, Tensor) -> Tensor
18 | a = torch.tensor(a, device=b.device, dtype=torch.float).repeat(b.shape[0])
19 | return torch.abs(a) * torch.sign(b)
20 |
21 |
22 |
23 | def set_seed(seed, torch_deterministic=False):
24 | """ set seed across modules """
25 | if seed == -1 and torch_deterministic:
26 | seed = 42
27 | elif seed == -1:
28 | seed = np.random.randint(0, 10000)
29 | print("Setting seed: {}".format(seed))
30 |
31 | random.seed(seed)
32 | np.random.seed(seed)
33 | torch.manual_seed(seed)
34 | os.environ["PYTHONHASHSEED"] = str(seed)
35 | torch.cuda.manual_seed(seed)
36 | torch.cuda.manual_seed_all(seed)
37 |
38 | if torch_deterministic:
39 | # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
40 | os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
41 | torch.backends.cudnn.benchmark = False
42 | torch.backends.cudnn.deterministic = True
43 | torch.use_deterministic_algorithms(True)
44 | else:
45 | torch.backends.cudnn.benchmark = True
46 | torch.backends.cudnn.deterministic = False
47 |
48 | return seed
49 |
50 |
--------------------------------------------------------------------------------
/humanoidverse/isaac_utils/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup(
4 | name="isaac_utils",
5 | packages=["isaac_utils"],
6 | version="0.0.1",
7 | description="Unified torch env_utils for IsaacGym and IsaacSim",
8 | author="",
9 | classifiers=[],
10 | )
--------------------------------------------------------------------------------
/humanoidverse/simulator/genesis/genesis_viewer.py:
--------------------------------------------------------------------------------
1 |
2 | class Viewer:
3 | """This class handles controlling the camera associated with a viewport in the simulator.
4 |
5 | It can be used to set the viewpoint camera to track different origin types:
6 |
7 | - **world**: the center of the world (static)
8 | - **env**: the center of an environment (static)
9 | - **asset_root**: the root of an asset in the scene (e.g. tracking a robot moving in the scene)
10 |
11 | On creation, the camera is set to track the origin type specified in the configuration.
12 |
13 | For the :attr:`asset_root` origin type, the camera is updated at each rendering step to track the asset's
14 | root position. For this, it registers a callback to the post update event stream from the simulation app.
15 | """
16 |
17 | def __init__(self):
18 | """Initialize the ViewportCameraController.
19 |
20 | Args:
21 | env: The environment.
22 | cfg: The configuration for the viewport camera controller.
23 |
24 | Raises:
25 | ValueError: If origin type is configured to be "env" but :attr:`cfg.env_index` is out of bounds.
26 | ValueError: If origin type is configured to be "asset_root" but :attr:`cfg.asset_name` is unset.
27 |
28 | """
29 | self._cfg = None
30 | return
31 |
32 | def __del__(self):
33 | """Unsubscribe from the callback."""
34 | # use hasattr to handle case where __init__ has not completed before __del__ is called
35 | return
36 |
37 | """
38 | Properties
39 | """
40 |
41 | @property
42 | def cfg(self):
43 | """The configuration for the viewer."""
44 | return self._cfg
45 |
46 | """
47 | Public Functions
48 | """
49 |
50 | def set_view_env_index(self, env_index: int):
51 | """Sets the environment index for the camera view.
52 |
53 | Args:
54 | env_index: The index of the environment to set the camera view to.
55 |
56 | Raises:
57 | ValueError: If the environment index is out of bounds. It should be between 0 and num_envs - 1.
58 | """
59 | return
60 |
61 | def update_view_to_world(self):
62 | """Updates the viewer's origin to the origin of the world which is (0, 0, 0)."""
63 | return
64 |
65 | def update_view_to_env(self):
66 | """Updates the viewer's origin to the origin of the selected environment."""
67 | return
68 |
69 | def update_view_to_asset_root(self, asset_name: str):
70 | """Updates the viewer's origin based upon the root of an asset in the scene.
71 |
72 | Args:
73 | asset_name: The name of the asset in the scene. The name should match the name of the
74 | asset in the scene.
75 |
76 | Raises:
77 | ValueError: If the asset is not in the scene.
78 | """
79 | return
80 |
81 | def update_view_location(self, eye, lookat):
82 | """Updates the camera view pose based on the current viewer origin and the eye and lookat positions.
83 |
84 | Args:
85 | eye: The eye position of the camera. If None, the current eye position is used.
86 | lookat: The lookat position of the camera. If None, the current lookat position is used.
87 | """
88 | return
89 |
90 | """
91 | Private Functions
92 | """
93 |
94 | def _update_tracking_callback(self, event):
95 | """Updates the camera view at each rendering step."""
96 | return
97 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/genesis/tmp_gs_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def wrap_to_pi(angles):
6 | angles %= 2 * np.pi
7 | angles -= 2 * np.pi * (angles > np.pi)
8 | return angles
9 |
10 |
11 | def gs_rand_float(lower, upper, shape, device):
12 | return (upper - lower) * torch.rand(size=shape, device=device) + lower
13 |
14 |
15 | def gs_inv_quat(quat):
16 | qw, qx, qy, qz = quat.unbind(-1)
17 | inv_quat = torch.stack([1.0 * qw, -qx, -qy, -qz], dim=-1)
18 | return inv_quat
19 |
20 |
21 | def gs_transform_by_quat(pos, quat):
22 | qw, qx, qy, qz = quat.unbind(-1)
23 |
24 | rot_matrix = torch.stack(
25 | [
26 | 1.0 - 2 * qy**2 - 2 * qz**2,
27 | 2 * qx * qy - 2 * qz * qw,
28 | 2 * qx * qz + 2 * qy * qw,
29 | 2 * qx * qy + 2 * qz * qw,
30 | 1 - 2 * qx**2 - 2 * qz**2,
31 | 2 * qy * qz - 2 * qx * qw,
32 | 2 * qx * qz - 2 * qy * qw,
33 | 2 * qy * qz + 2 * qx * qw,
34 | 1 - 2 * qx**2 - 2 * qy**2,
35 | ],
36 | dim=-1,
37 | ).reshape(*quat.shape[:-1], 3, 3)
38 | rotated_pos = torch.matmul(rot_matrix, pos.unsqueeze(-1)).squeeze(-1)
39 |
40 | return rotated_pos
41 |
42 |
43 | def gs_quat2euler(quat): # xyz
44 | # Extract quaternion components
45 | qw, qx, qy, qz = quat.unbind(-1)
46 |
47 | # Roll (x-axis rotation)
48 | sinr_cosp = 2 * (qw * qx + qy * qz)
49 | cosr_cosp = 1 - 2 * (qx * qx + qy * qy)
50 | roll = torch.atan2(sinr_cosp, cosr_cosp)
51 |
52 | # Pitch (y-axis rotation)
53 | sinp = 2 * (qw * qy - qz * qx)
54 | pitch = torch.where(
55 | torch.abs(sinp) >= 1,
56 | torch.sign(sinp) * torch.tensor(torch.pi / 2),
57 | torch.asin(sinp),
58 | )
59 |
60 | # Yaw (z-axis rotation)
61 | siny_cosp = 2 * (qw * qz + qx * qy)
62 | cosy_cosp = 1 - 2 * (qy * qy + qz * qz)
63 | yaw = torch.atan2(siny_cosp, cosy_cosp)
64 |
65 | return torch.stack([roll, pitch, yaw], dim=-1)
66 |
67 |
68 | def gs_euler2quat(xyz): # xyz
69 |
70 | roll, pitch, yaw = xyz.unbind(-1)
71 |
72 | cosr = (roll * 0.5).cos()
73 | sinr = (roll * 0.5).sin()
74 | cosp = (pitch * 0.5).cos()
75 | sinp = (pitch * 0.5).sin()
76 | cosy = (yaw * 0.5).cos()
77 | siny = (yaw * 0.5).sin()
78 |
79 | qw = cosr * cosp * cosy + sinr * sinp * siny
80 | qx = sinr * cosp * cosy - cosr * sinp * siny
81 | qy = cosr * sinp * cosy + sinr * cosp * siny
82 | qz = cosr * cosp * siny - sinr * sinp * cosy
83 |
84 | return torch.stack([qw, qx, qy, qz], dim=-1)
85 |
86 |
87 | def gs_quat_from_angle_axis(angle, axis):
88 | theta = (angle / 2).unsqueeze(-1)
89 | xyz = normalize(axis) * theta.sin()
90 | w = theta.cos()
91 | return normalize(torch.cat([w, xyz], dim=-1))
92 |
93 |
94 | def normalize(x, eps: float = 1e-9):
95 | return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
96 |
97 |
98 | def gs_quat_mul(a, b):
99 | assert a.shape == b.shape
100 | shape = a.shape
101 | a = a.reshape(-1, 4)
102 | b = b.reshape(-1, 4)
103 |
104 | w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
105 | w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
106 | ww = (z1 + x1) * (x2 + y2)
107 | yy = (w1 - y1) * (w2 + z2)
108 | zz = (w1 + y1) * (w2 - z2)
109 | xx = ww + yy + zz
110 | qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
111 | w = qq - ww + (z1 - y1) * (y2 - z2)
112 | x = qq - xx + (x1 + w1) * (x2 + w2)
113 | y = qq - yy + (w1 - x1) * (y2 + z2)
114 | z = qq - zz + (z1 + y1) * (w2 - x2)
115 |
116 | quat = torch.stack([w, x, y, z], dim=-1).view(shape)
117 |
118 | return quat
119 |
120 |
121 | def gs_quat_apply(a, b):
122 | shape = b.shape
123 | a = a.reshape(-1, 4)
124 | b = b.reshape(-1, 3)
125 | xyz = a[:, 1:]
126 | t = xyz.cross(b, dim=-1) * 2
127 | return (b + a[:, :1] * t + xyz.cross(t, dim=-1)).view(shape)
128 |
129 |
130 | def gs_quat_apply_yaw(quat, vec):
131 | quat_yaw = quat.clone().view(-1, 4)
132 | quat_yaw[:, 1:3] = 0.
133 | quat_yaw = normalize(quat_yaw)
134 | return gs_quat_apply(quat_yaw, vec)
135 |
136 |
137 | def gs_quat_conjugate(a):
138 | shape = a.shape
139 | a = a.reshape(-1, 4)
140 | return torch.cat((a[:, :1], -a[:, 1:], ), dim=-1).view(shape)
141 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/isaacsim/event_cfg.py:
--------------------------------------------------------------------------------
1 |
2 | from omni.isaac.lab.managers import EventTermCfg as EventTerm
3 |
4 | from omni.isaac.lab.managers import SceneEntityCfg
5 | from omni.isaac.lab.utils import configclass
6 |
7 |
8 | import omni.isaac.lab.envs.mdp as mdp
9 |
10 | # @configclass
11 | # class EventCfg:
12 | # """Configuration for events."""
13 |
14 | # scale_body_mass = EventTerm(
15 | # func=mdp.randomize_rigid_body_mass,
16 | # mode="startup",
17 | # params={
18 | # "asset_cfg": SceneEntityCfg("robot", body_names=".*"),
19 | # "mass_distribution_params": (0.8, 1.2),
20 | # "operation": "scale",
21 | # },
22 | # )
23 |
24 | # random_joint_friction = EventTerm(
25 | # func=mdp.randomize_joint_parameters,
26 | # mode="startup",
27 | # params={
28 | # "asset_cfg": SceneEntityCfg("robot", joint_names=".*"),
29 | # "friction_distribution_params": (0.5, 1.25),
30 | # "operation": "scale",
31 | # },
32 | # )
33 |
34 | @configclass
35 | class EventCfg:
36 | """Configuration for events."""
37 | scale_body_mass = None
38 | random_joint_friction = None
39 |
40 |
41 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/isaacsim/events.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import numpy as np
4 | import torch
5 | from typing import TYPE_CHECKING, Literal
6 |
7 | import omni.isaac.lab.envs.mdp as mdp
8 | import omni.isaac.lab.utils.math as math_utils
9 | from omni.isaac.lab.assets import Articulation, RigidObject
10 | from omni.isaac.lab.managers import SceneEntityCfg
11 |
12 | from omni.isaac.lab.envs import ManagerBasedEnv
13 |
14 | def resolve_dist_fn(
15 | distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
16 | ):
17 | dist_fn = math_utils.sample_uniform
18 |
19 | if distribution == "uniform":
20 | dist_fn = math_utils.sample_uniform
21 | elif distribution == "log_uniform":
22 | dist_fn = math_utils.sample_log_uniform
23 | elif distribution == "gaussian":
24 | dist_fn = math_utils.sample_gaussian
25 | else:
26 | raise ValueError(f"Unrecognized distribution {distribution}")
27 |
28 | return dist_fn
29 |
30 | def randomize_body_com(
31 | env: ManagerBasedEnv,
32 | env_ids: torch.Tensor | None,
33 | asset_cfg: SceneEntityCfg,
34 | distribution_params: tuple[float, float] | tuple[torch.Tensor, torch.Tensor],
35 | operation: Literal["add", "abs", "scale"],
36 | distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
37 | num_envs: int = 1, # number of environments
38 | ):
39 | """Randomize the com of the bodies by adding, scaling or setting random values.
40 |
41 | This function allows randomizing the center of mass of the bodies of the asset. The function samples random values from the
42 | given distribution parameters and adds, scales or sets the values into the physics simulation based on the operation.
43 |
44 | .. tip::
45 | This function uses CPU tensors to assign the body masses. It is recommended to use this function
46 | only during the initialization of the environment.
47 | """
48 | # extract the used quantities (to enable type-hinting)
49 | asset: RigidObject | Articulation = env.scene[asset_cfg.name]
50 |
51 | # resolve environment ids
52 | if env_ids is None:
53 | env_ids = torch.arange(num_envs, device="cpu")
54 | else:
55 | env_ids = env_ids.cpu()
56 |
57 | # resolve body indices
58 | if asset_cfg.body_ids == slice(None):
59 | body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu")
60 | else:
61 | body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu")
62 |
63 | # get the current masses of the bodies (num_assets, num_bodies)
64 | coms = asset.root_physx_view.get_coms()
65 | # apply randomization on default values
66 | # import ipdb; ipdb.set_trace()
67 | coms[env_ids[:, None], body_ids] = env.default_coms[env_ids[:, None], body_ids].clone()
68 |
69 | dist_fn = resolve_dist_fn(distribution)
70 |
71 | if isinstance(distribution_params[0], torch.Tensor):
72 | distribution_params = (distribution_params[0].to(coms.device), distribution_params[1].to(coms.device))
73 |
74 | env.base_com_bias[env_ids, :] = dist_fn(
75 | *distribution_params, (env_ids.shape[0], env.base_com_bias.shape[1]), device=coms.device
76 | )
77 |
78 | # sample from the given range
79 | if operation == "add":
80 | coms[env_ids[:, None], body_ids, :3] += env.base_com_bias[env_ids[:, None], :]
81 | elif operation == "abs":
82 | coms[env_ids[:, None], body_ids, :3] = env.base_com_bias[env_ids[:, None], :]
83 | elif operation == "scale":
84 | coms[env_ids[:, None], body_ids, :3] *= env.base_com_bias[env_ids[:, None], :]
85 | else:
86 | raise ValueError(
87 | f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'abs' or 'scale'."
88 | )
89 | # set the mass into the physics simulation
90 | asset.root_physx_view.set_coms(coms, env_ids)
--------------------------------------------------------------------------------
/humanoidverse/simulator/isaacsim/isaacsim_articulation_cfg.py:
--------------------------------------------------------------------------------
1 |
2 | import omni.isaac.lab.sim as sim_utils
3 | from omni.isaac.lab.actuators import ActuatorNetMLPCfg, DCMotorCfg, ImplicitActuatorCfg
4 | from omni.isaac.lab.assets.articulation import ArticulationCfg
5 | from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
6 |
7 | ARTICULATION_CFG = ArticulationCfg(
8 | spawn=sim_utils.UsdFileCfg(
9 | # usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/H1/h1.usd",
10 | usd_path="description/robots/h1/h1.usd",
11 | activate_contact_sensors=True,
12 | rigid_props=sim_utils.RigidBodyPropertiesCfg(
13 | disable_gravity=False,
14 | retain_accelerations=False,
15 | linear_damping=0.0,
16 | angular_damping=0.0,
17 | max_linear_velocity=1000.0,
18 | max_angular_velocity=1000.0,
19 | max_depenetration_velocity=1.0,
20 | ),
21 | articulation_props=sim_utils.ArticulationRootPropertiesCfg(
22 | enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=4
23 | ),
24 | ),
25 | init_state=ArticulationCfg.InitialStateCfg(
26 | pos=(0.0, 0.0, 1.05),
27 | joint_pos={
28 | ".*_hip_yaw_joint": 0.0,
29 | ".*_hip_roll_joint": 0.0,
30 | ".*_hip_pitch_joint": -0.28, # -16 degrees
31 | ".*_knee_joint": 0.79, # 45 degrees
32 | ".*_ankle_joint": -0.52, # -30 degrees
33 | "torso_joint": 0.0,
34 | ".*_shoulder_pitch_joint": 0.28,
35 | ".*_shoulder_roll_joint": 0.0,
36 | ".*_shoulder_yaw_joint": 0.0,
37 | ".*_elbow_joint": 0.52,
38 | },
39 | joint_vel={".*": 0.0},
40 | ),
41 | soft_joint_pos_limit_factor=0.9,
42 | actuators={
43 | "legs": ImplicitActuatorCfg(
44 | joint_names_expr=[".*_hip_yaw_joint", ".*_hip_roll_joint", ".*_hip_pitch_joint", ".*_knee_joint", "torso_joint"],
45 | effort_limit=300,
46 | velocity_limit=100.0,
47 | stiffness={
48 | ".*_hip_yaw_joint": 150.0,
49 | ".*_hip_roll_joint": 150.0,
50 | ".*_hip_pitch_joint": 200.0,
51 | ".*_knee_joint": 200.0,
52 | "torso_joint": 200.0,
53 | },
54 | damping={
55 | ".*_hip_yaw_joint": 5.0,
56 | ".*_hip_roll_joint": 5.0,
57 | ".*_hip_pitch_joint": 5.0,
58 | ".*_knee_joint": 5.0,
59 | "torso_joint": 5.0,
60 | },
61 | ),
62 | "feet": ImplicitActuatorCfg(
63 | joint_names_expr=[".*_ankle_joint"],
64 | effort_limit=100,
65 | velocity_limit=100.0,
66 | stiffness={".*_ankle_joint": 20.0},
67 | damping={".*_ankle_joint": 4.0},
68 | ),
69 | "arms": ImplicitActuatorCfg(
70 | joint_names_expr=[".*_shoulder_pitch_joint", ".*_shoulder_roll_joint", ".*_shoulder_yaw_joint", ".*_elbow_joint"],
71 | effort_limit=300,
72 | velocity_limit=100.0,
73 | stiffness={
74 | ".*_shoulder_pitch_joint": 40.0,
75 | ".*_shoulder_roll_joint": 40.0,
76 | ".*_shoulder_yaw_joint": 40.0,
77 | ".*_elbow_joint": 40.0,
78 | },
79 | damping={
80 | ".*_shoulder_pitch_joint": 10.0,
81 | ".*_shoulder_roll_joint": 10.0,
82 | ".*_shoulder_yaw_joint": 10.0,
83 | ".*_elbow_joint": 10.0,
84 | },
85 | ),
86 | },
87 | )
--------------------------------------------------------------------------------
/humanoidverse/train_agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from pathlib import Path
4 |
5 | import hydra
6 | from hydra.core.hydra_config import HydraConfig
7 | from hydra.core.config_store import ConfigStore
8 | from hydra.utils import instantiate
9 | from omegaconf import OmegaConf
10 |
11 | import logging
12 | from loguru import logger
13 |
14 | from utils.devtool import *
15 |
16 |
17 | from utils.config_utils import * # noqa: E402, F403
18 | @hydra.main(config_path="config", config_name="base", version_base="1.1")
19 | def main(config: OmegaConf):
20 | # import ipdb; ipdb.set_trace()
21 | simulator_type = config.simulator['_target_'].split('.')[-1]
22 | # import ipdb; ipdb.set_trace()
23 | if simulator_type == 'IsaacSim':
24 | from omni.isaac.lab.app import AppLauncher
25 | import argparse
26 | parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
27 | AppLauncher.add_app_launcher_args(parser)
28 |
29 | args_cli, hydra_args = parser.parse_known_args()
30 | sys.argv = [sys.argv[0]] + hydra_args
31 | args_cli.num_envs = config.num_envs
32 | args_cli.seed = config.seed
33 | args_cli.env_spacing = config.env.config.env_spacing # config.env_spacing
34 | args_cli.output_dir = config.output_dir
35 | args_cli.headless = config.headless
36 |
37 | app_launcher = AppLauncher(args_cli)
38 | simulation_app = app_launcher.app
39 |
40 | # import ipdb; ipdb.set_trace()
41 | if simulator_type == 'IsaacGym':
42 | import isaacgym # noqa: F401
43 |
44 |
45 | # have to import torch after isaacgym
46 | import torch # noqa: E402
47 | from humanoidverse.envs.base_task.base_task import BaseTask # noqa: E402
48 | from humanoidverse.agents.base_algo.base_algo import BaseAlgo # noqa: E402
49 | from humanoidverse.utils.helpers import pre_process_config
50 | from humanoidverse.utils.logging import HydraLoggerBridge
51 |
52 | # resolve=False is important otherwise overrides
53 | # at inference time won't work properly
54 | # also, I believe this must be done before instantiation
55 |
56 | # logging to hydra log file
57 | hydra_log_path = os.path.join(HydraConfig.get().runtime.output_dir, "train.log")
58 | logger.remove()
59 | logger.add(hydra_log_path, level="DEBUG")
60 |
61 | # Get log level from LOGURU_LEVEL environment variable or use INFO as default
62 | console_log_level = os.environ.get("LOGURU_LEVEL", "INFO").upper()
63 | logger.add(sys.stdout, level=console_log_level, colorize=True)
64 |
65 | logging.basicConfig(level=logging.DEBUG)
66 | logging.getLogger().addHandler(HydraLoggerBridge())
67 |
68 | unresolved_conf = OmegaConf.to_container(config, resolve=False)
69 | os.chdir(hydra.utils.get_original_cwd())
70 |
71 | if config.use_wandb:
72 | import wandb
73 | project_name = f"{config.project_name}"
74 | run_name = f"{config.timestamp}_{config.experiment_name}_{config.log_task_name}_{config.robot.asset.robot_type}"
75 | wandb_dir = Path(config.wandb.wandb_dir)
76 | wandb_dir.mkdir(exist_ok=True, parents=True)
77 | logger.info(f"Saving wandb logs to {wandb_dir}")
78 | wandb.init(project=project_name,
79 | entity=config.wandb.wandb_entity,
80 | name=run_name,
81 | sync_tensorboard=True,
82 | config=unresolved_conf,
83 | dir=wandb_dir)
84 |
85 | if hasattr(config, 'device'):
86 | if config.device is not None:
87 | device = config.device
88 | else:
89 | device = "cuda:0" if torch.cuda.is_available() else "cpu"
90 | else:
91 | device = "cuda:0" if torch.cuda.is_available() else "cpu"
92 |
93 | pre_process_config(config)
94 |
95 | config.env.config.save_rendering_dir = str(Path(config.experiment_dir) / "renderings_training")
96 | env: BaseEnv = instantiate(config=config.env, device=device)
97 |
98 |
99 | experiment_save_dir = Path(config.experiment_dir)
100 | experiment_save_dir.mkdir(exist_ok=True, parents=True)
101 |
102 | logger.info(f"Saving config file to {experiment_save_dir}")
103 | with open(experiment_save_dir / "config.yaml", "w") as file:
104 | OmegaConf.save(unresolved_conf, file)
105 |
106 | algo: BaseAlgo = instantiate(device=device, env=env, config=config.algo, log_dir=experiment_save_dir)
107 | algo.setup()
108 | if config.checkpoint is not None:
109 | algo.load(config.checkpoint)
110 |
111 | # handle saving config
112 | algo.learn()
113 |
114 | if simulator_type == 'IsaacSim':
115 | simulation_app.close()
116 |
117 | if __name__ == "__main__":
118 | main()
119 |
--------------------------------------------------------------------------------
/humanoidverse/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/utils/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/utils/average_meters.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | class AverageMeter(nn.Module):
7 | def __init__(self, in_shape, max_size):
8 | super(AverageMeter, self).__init__()
9 | self.max_size = max_size
10 | self.current_size = 0
11 | self.register_buffer("mean", torch.zeros(in_shape, dtype=torch.float32))
12 |
13 | def update(self, values):
14 | size = values.size()[0]
15 | if size == 0:
16 | return
17 | new_mean = torch.mean(values.float(), dim=0)
18 | size = np.clip(size, 0, self.max_size)
19 | old_size = min(self.max_size - size, self.current_size)
20 | size_sum = old_size + size
21 | self.current_size = size_sum
22 | self.mean = (self.mean * old_size + new_mean * size) / size_sum
23 |
24 | def clear(self):
25 | self.current_size = 0
26 | self.mean.fill_(0)
27 |
28 | def __len__(self):
29 | return self.current_size
30 |
31 | def get_mean(self):
32 | return self.mean.squeeze(0).cpu().numpy()
33 |
34 |
35 | class TensorAverageMeter:
36 | def __init__(self):
37 | self.tensors = []
38 |
39 | def add(self, x):
40 | if len(x.shape) == 0:
41 | x = x.unsqueeze(0)
42 | self.tensors.append(x)
43 |
44 | def mean(self):
45 | if len(self.tensors) == 0:
46 | return 0
47 | cat = torch.cat(self.tensors, dim=0)
48 | if cat.numel() == 0:
49 | return 0
50 | else:
51 | return cat.mean()
52 |
53 | def clear(self):
54 | self.tensors = []
55 |
56 | def mean_and_clear(self):
57 | mean = self.mean()
58 | self.clear()
59 | return mean
60 |
61 |
62 | class TensorAverageMeterDict:
63 | def __init__(self):
64 | self.data = {}
65 |
66 | def add(self, data_dict):
67 | for k, v in data_dict.items():
68 | # Originally used a defaultdict, this had lambda
69 | # pickling issues with DDP.
70 | if k not in self.data:
71 | self.data[k] = TensorAverageMeter()
72 | self.data[k].add(v)
73 |
74 | def mean(self):
75 | mean_dict = {k: v.mean() for k, v in self.data.items()}
76 | return mean_dict
77 |
78 | def clear(self):
79 | self.data = {}
80 |
81 | def mean_and_clear(self):
82 | mean = self.mean()
83 | self.clear()
84 | return mean
--------------------------------------------------------------------------------
/humanoidverse/utils/common.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2022, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
29 | import argparse
30 | import os
31 | import random
32 | import sys
33 | from datetime import datetime
34 |
35 | import numpy as np
36 | import torch
37 |
38 |
39 | def seeding(seed=0, torch_deterministic=False):
40 | print("Setting seed: {}".format(seed))
41 |
42 | random.seed(seed)
43 | np.random.seed(seed)
44 | torch.manual_seed(seed)
45 | os.environ["PYTHONHASHSEED"] = str(seed)
46 | torch.cuda.manual_seed(seed)
47 | torch.cuda.manual_seed_all(seed)
48 |
49 | if torch_deterministic:
50 | # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
51 | os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
52 | torch.backends.cudnn.benchmark = False
53 | torch.backends.cudnn.deterministic = True
54 | torch.use_deterministic_algorithms(True)
55 | else:
56 | torch.backends.cudnn.benchmark = True
57 | torch.backends.cudnn.deterministic = False
58 |
59 | return seed
60 |
--------------------------------------------------------------------------------
/humanoidverse/utils/config_utils.py:
--------------------------------------------------------------------------------
1 | import math
2 | from loguru import logger
3 | from omegaconf import OmegaConf
4 |
5 | try:
6 | OmegaConf.register_new_resolver("eval", eval)
7 | OmegaConf.register_new_resolver("if", lambda pred, a, b: a if pred else b)
8 | OmegaConf.register_new_resolver("eq", lambda x, y: x.lower() == y.lower())
9 | OmegaConf.register_new_resolver("sqrt", lambda x: math.sqrt(float(x)))
10 | OmegaConf.register_new_resolver("sum", lambda x: sum(x))
11 | OmegaConf.register_new_resolver("ceil", lambda x: math.ceil(x))
12 | OmegaConf.register_new_resolver("int", lambda x: int(x))
13 | OmegaConf.register_new_resolver("len", lambda x: len(x))
14 | OmegaConf.register_new_resolver("sum_list", lambda lst: sum(lst))
15 | except Exception as e:
16 | logger.warning(f"Warning: Some resolvers already registered: {e}")
17 |
18 | # OmegaConf.register_new_resolver("eval", eval)
19 | # OmegaConf.register_new_resolver("if", lambda pred, a, b: a if pred else b)
20 | # OmegaConf.register_new_resolver("eq", lambda x, y: x.lower() == y.lower())
21 | # OmegaConf.register_new_resolver("sqrt", lambda x: math.sqrt(float(x)))
22 | # OmegaConf.register_new_resolver("sum", lambda x: sum(x))
23 | # OmegaConf.register_new_resolver("ceil", lambda x: math.ceil(x))
24 | # OmegaConf.register_new_resolver("int", lambda x: int(x))
25 | # OmegaConf.register_new_resolver("len", lambda x: len(x))
26 | # OmegaConf.register_new_resolver("sum_list", lambda lst: sum(lst))
27 |
--------------------------------------------------------------------------------
/humanoidverse/utils/devtool.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | from typing import Callable, Any
4 | from functools import wraps
5 | def pdb_decorator(fn: Callable) -> Callable:
6 | @wraps(fn)
7 | def wrapper(*args: Any, **kwargs: Any) -> Any:
8 | return fn(*args, **kwargs)
9 | return wrapper
10 |
11 |
12 |
--------------------------------------------------------------------------------
/humanoidverse/utils/inference_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import os
4 | import copy
5 |
6 | def export_policy_as_jit(actor_critic, path, exported_policy_name):
7 | os.makedirs(path, exist_ok=True)
8 | path = os.path.join(path, exported_policy_name)
9 | model = copy.deepcopy(actor_critic.actor).to('cpu')
10 | traced_script_module = torch.jit.script(model)
11 | traced_script_module.save(path)
12 |
13 | def export_policy_as_onnx(inference_model, path, exported_policy_name, example_obs_dict):
14 | os.makedirs(path, exist_ok=True)
15 | path = os.path.join(path, exported_policy_name)
16 |
17 | actor = copy.deepcopy(inference_model['actor']).to('cpu')
18 |
19 | class PPOWrapper(nn.Module):
20 | def __init__(self, actor):
21 | """
22 | model: The original PyTorch model.
23 | input_keys: List of input names as keys for the input dictionary.
24 | """
25 | super(PPOWrapper, self).__init__()
26 | self.actor = actor
27 |
28 | def forward(self, actor_obs):
29 | """
30 | Dynamically creates a dictionary from the input keys and args.
31 | """
32 | return self.actor.act_inference(actor_obs)
33 |
34 | wrapper = PPOWrapper(actor)
35 | example_input_list = example_obs_dict["actor_obs"]
36 |
37 | # actor.double()
38 | # wrapper.double()
39 | # example_input_list.double()
40 |
41 | torch.onnx.export(
42 | wrapper,
43 | example_input_list, # Pass x1 and x2 as separate inputs
44 | path,
45 | verbose=True,
46 | input_names=["actor_obs"], # Specify the input names
47 | output_names=["action"], # Name the output
48 | # opset_version=17 # Specify the opset version, if needed
49 | export_params=True,
50 | opset_version=13,
51 | do_constant_folding=True,
52 | )
53 |
54 | def export_policy_and_estimator_as_onnx(inference_model, path, exported_policy_name, example_obs_dict):
55 | os.makedirs(path, exist_ok=True)
56 | path = os.path.join(path, exported_policy_name)
57 |
58 | actor = copy.deepcopy(inference_model['actor']).to('cpu')
59 | left_hand_force_estimator = copy.deepcopy(inference_model['left_hand_force_estimator']).to('cpu')
60 | right_hand_force_estimator = copy.deepcopy(inference_model['right_hand_force_estimator']).to('cpu')
61 |
62 | class PPOForceEstimatorWrapper(nn.Module):
63 | def __init__(self, actor, left_hand_force_estimator, right_hand_force_estimator):
64 | """
65 | model: The original PyTorch model.
66 | input_keys: List of input names as keys for the input dictionary.
67 | """
68 | super(PPOForceEstimatorWrapper, self).__init__()
69 | self.actor = actor
70 | self.left_hand_force_estimator = left_hand_force_estimator
71 | self.right_hand_force_estimator = right_hand_force_estimator
72 |
73 | def forward(self, inputs):
74 | """
75 | Dynamically creates a dictionary from the input keys and args.
76 | """
77 | actor_obs, history_for_estimator = inputs
78 | left_hand_force_estimator_output = self.left_hand_force_estimator(history_for_estimator)
79 | right_hand_force_estimator_output = self.right_hand_force_estimator(history_for_estimator)
80 | input_for_actor = torch.cat([actor_obs, left_hand_force_estimator_output, right_hand_force_estimator_output], dim=-1)
81 | return self.actor.act_inference(input_for_actor), left_hand_force_estimator_output, right_hand_force_estimator_output
82 |
83 | wrapper = PPOForceEstimatorWrapper(actor, left_hand_force_estimator, right_hand_force_estimator)
84 | example_input_list = [example_obs_dict["actor_obs"], example_obs_dict["long_history_for_estimator"]]
85 | torch.onnx.export(
86 | wrapper,
87 | example_input_list, # Pass x1 and x2 as separate inputs
88 | path,
89 | verbose=True,
90 | input_names=["actor_obs", "long_history_for_estimator"], # Specify the input names
91 | output_names=["action", "left_hand_force_estimator_output", "right_hand_force_estimator_output"], # Name the output
92 | opset_version=13 # Specify the opset version, if needed
93 | )
--------------------------------------------------------------------------------
/humanoidverse/utils/logging.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from loguru import logger
3 | from contextlib import contextmanager
4 | import sys
5 | import os
6 |
7 | class HydraLoggerBridge(logging.Handler):
8 | def emit(self, record):
9 | # Get corresponding loguru level
10 | try:
11 | level = logger.level(record.levelname).name
12 | except ValueError:
13 | level = record.levelno
14 |
15 | # Find caller from where the logged message originated
16 | frame, depth = logging.currentframe(), 2
17 | while frame and frame.f_code.co_filename == logging.__file__:
18 | frame = frame.f_back
19 | depth += 1
20 |
21 | logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
22 |
23 | class LoguruStream:
24 | def write(self, message):
25 | if message.strip(): # Only log non-empty messages
26 | logger.info(message.strip()) # Changed to debug level
27 |
28 | def flush(self):
29 | pass
30 |
31 | @contextmanager
32 | def capture_stdout_to_loguru():
33 | logger.remove()
34 | logger.add(sys.stdout, level="INFO")
35 | loguru_stream = LoguruStream()
36 | old_stdout = sys.stdout
37 | sys.stdout = loguru_stream
38 | try:
39 | yield
40 | finally:
41 | sys.stdout = old_stdout
42 | logger.remove()
43 | console_log_level = os.environ.get("LOGURU_LEVEL", "INFO").upper()
44 | logger.add(sys.stdout, level=console_log_level, colorize=True)
--------------------------------------------------------------------------------
/humanoidverse/utils/math.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: BSD-3-Clause
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 | #
29 | # Copyright (c) 2021 ETH Zurich, Nikita Rudin
30 |
31 | import torch
32 | from torch import Tensor
33 | import numpy as np
34 | from humanoidverse.utils.torch_utils import quat_apply, normalize
35 | from typing import Tuple
36 |
37 | # xyzw
38 | # @ torch.jit.script
39 | def quat_apply_yaw(quat, vec):
40 | quat_yaw = quat.clone().view(-1, 4)
41 | quat_yaw[:, :2] = 0.
42 | quat_yaw = normalize(quat_yaw)
43 | return quat_apply(quat_yaw, vec)
44 |
45 | # @ torch.jit.script
46 | def wrap_to_pi(angles):
47 | angles %= 2*np.pi
48 | angles -= 2*np.pi * (angles > np.pi)
49 | return angles
50 |
51 | # @ torch.jit.script
52 | def torch_rand_sqrt_float(lower, upper, shape, device):
53 | # type: (float, float, Tuple[int, int], str) -> Tensor
54 | r = 2*torch.rand(*shape, device=device) - 1
55 | r = torch.where(r<0., -torch.sqrt(-r), torch.sqrt(r))
56 | r = (r + 1.) / 2.
57 | return (upper - lower) * r + lower
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/utils/motion_lib/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/dof_axis.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/utils/motion_lib/dof_axis.npy
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/motion_lib_robot.py:
--------------------------------------------------------------------------------
1 | from humanoidverse.utils.motion_lib.motion_lib_base import MotionLibBase
2 | from humanoidverse.utils.motion_lib.torch_humanoid_batch import Humanoid_Batch
3 | class MotionLibRobot(MotionLibBase):
4 | def __init__(self, motion_lib_cfg, num_envs, device):
5 | super().__init__(motion_lib_cfg = motion_lib_cfg, num_envs = num_envs, device = device)
6 | self.mesh_parsers = Humanoid_Batch(motion_lib_cfg)
7 | return
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/motion_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/utils/motion_lib/motion_utils/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/motion_utils/flags.py:
--------------------------------------------------------------------------------
1 | __all__ = ['flags', 'summation']
2 |
3 | class Flags(object):
4 | def __init__(self, items):
5 | for key, val in items.items():
6 | setattr(self,key,val)
7 |
8 | flags = Flags({
9 | 'test': False,
10 | 'debug': False,
11 | "real_traj": False,
12 | "im_eval": False,
13 | })
14 |
--------------------------------------------------------------------------------
/humanoidverse/utils/real/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/humanoidverse/utils/real/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/utils/real/rotation_helper.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.spatial.transform import Rotation as R
3 |
4 |
5 | def get_gravity_orientation(quaternion):
6 | # WXYZ
7 | qw = quaternion[0]
8 | qx = quaternion[1]
9 | qy = quaternion[2]
10 | qz = quaternion[3]
11 |
12 | gravity_orientation = np.zeros(3)
13 |
14 | gravity_orientation[0] = 2 * (-qz * qx + qw * qy)
15 | gravity_orientation[1] = -2 * (qz * qy + qw * qx)
16 | gravity_orientation[2] = 1 - 2 * (qw * qw + qz * qz)
17 |
18 | return gravity_orientation
19 |
20 |
21 | def transform_imu_data(waist_yaw, waist_yaw_omega, imu_quat, imu_omega):
22 | # imu_quat: WXYZ
23 | RzWaist = R.from_euler("z", waist_yaw).as_matrix()
24 | R_torso = R.from_quat([imu_quat[1], imu_quat[2], imu_quat[3], imu_quat[0]]).as_matrix()
25 | R_pelvis = np.dot(R_torso, RzWaist.T)
26 | w = np.dot(RzWaist, imu_omega[0]) - np.array([0, 0, waist_yaw_omega])
27 | return R.from_matrix(R_pelvis).as_quat()[[3, 0, 1, 2]], w
28 |
29 |
30 | def quaternion_to_euler_array(quat:np.ndarray)->np.ndarray:
31 | # Ensure quaternion is in the correct format [x, y, z, w]
32 | x, y, z, w = quat
33 |
34 | # Roll (x-axis rotation)
35 | t0 = +2.0 * (w * x + y * z)
36 | t1 = +1.0 - 2.0 * (x * x + y * y)
37 | roll_x = np.arctan2(t0, t1)
38 |
39 | # Pitch (y-axis rotation)
40 | t2 = +2.0 * (w * y - z * x)
41 | t2 = np.clip(t2, -1.0, 1.0)
42 | pitch_y = np.arcsin(t2)
43 |
44 | # Yaw (z-axis rotation)
45 | t3 = +2.0 * (w * z + x * y)
46 | t4 = +1.0 - 2.0 * (y * y + z * z)
47 | yaw_z = np.arctan2(t3, t4)
48 |
49 | # Returns roll, pitch, yaw in a NumPy array in radians
50 | return np.array([roll_x, pitch_y, yaw_z])
51 |
52 |
53 |
54 | def rpy_to_quaternion_array(rpy: np.ndarray) -> np.ndarray:
55 | """Convert roll-pitch-yaw (radians) to quaternion [x, y, z, w]"""
56 | roll, pitch, yaw = rpy
57 |
58 | cy = np.cos(yaw * 0.5)
59 | sy = np.sin(yaw * 0.5)
60 | cp = np.cos(pitch * 0.5)
61 | sp = np.sin(pitch * 0.5)
62 | cr = np.cos(roll * 0.5)
63 | sr = np.sin(roll * 0.5)
64 |
65 | w = cr * cp * cy + sr * sp * sy
66 | x = sr * cp * cy - cr * sp * sy
67 | y = cr * sp * cy + sr * cp * sy
68 | z = cr * cp * sy - sr * sp * cy
69 |
70 | return np.array([x, y, z, w])
71 |
72 |
73 |
74 | def quat_mul_np(q1:np.ndarray, q2:np.ndarray)->np.ndarray:
75 | # q1: XYZW # q2: XYZW
76 | # q1: XYZW # q2: XYZW
77 | x1, y1, z1, w1 = q1
78 | x2, y2, z2, w2 = q2
79 |
80 | w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
81 | x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
82 | y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
83 | z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2
84 |
85 | return np.array([x, y, z, w]) # XYZW
86 |
87 | def random_perturb_quaternion(quat:np.ndarray, max_angle:float)->np.ndarray:
88 | """
89 | Randomly perturb a quaternion by a random axis and angle
90 | """
91 | # quat: XYZW
92 |
93 | # Generate random axis and angle for perturbation
94 | axis = np.random.randn(3)
95 | axis = axis / np.linalg.norm(axis) # Normalize axis
96 | angle = max_angle * np.random.rand() # Random angle within max_angle
97 |
98 | # Convert angle-axis to quaternion
99 | sin_half_angle = np.sin(angle / 2)
100 | cos_half_angle = np.cos(angle / 2)
101 | perturb_quat = np.array([sin_half_angle * axis[0],
102 | sin_half_angle * axis[1],
103 | sin_half_angle * axis[2],
104 | cos_half_angle]) # XYZW format
105 |
106 | # Multiply quaternions to apply perturbation
107 | return quat_mul_np(perturb_quat, quat)
--------------------------------------------------------------------------------
/motion_source/README.md:
--------------------------------------------------------------------------------
1 | # Motion Source
2 |
3 | This folder describes how we collect human motion from various sources and process them to the SMPL format.
4 |
5 | ## SMPL Format
6 |
7 | SMPL format data consists of a dictionary. In details:
8 | ```
9 | 'betas': (10,) - this is the shape paramater of SMPL model, not important
10 | 'gender': str - the gender of SMPL model, default is neutral
11 | 'poses': (frames, 66) - joint rotations, 66 represents 22 joints with 3 angles of rotation each.
12 | 'trans': (frames, 3) - root translation (global translation)
13 | 'mocap_framerate': int - motion fps, amass and video data is 30, mocap data is 120.
14 | ```
15 |
16 |
17 | ## AMASS
18 |
19 | AMASS motions do not need further processing in this step.
20 |
21 | ## Video
22 |
23 | We use [GVHMR](https://github.com/zju3dv/GVHMR) to extract motions from videos.
24 |
25 | ## LAFAN (Unitree)
26 |
27 | > Note that the Unitree Retargeted `LAFAN` dataset is not available on the public. It's originally released in [here](https://huggingface.co/datasets/unitreerobotics/LAFAN1_Retargeting_Dataset).
28 |
29 | The format of **Unitree LAFAN** dataset is `csv`. Two steps to process these data:
30 |
31 | 1. convert csv to pkl:
32 | ```
33 | python convert_lafan_pkl.py --filepath --start 0 --end 100
34 | ```
35 | - ` ` is the path of csv file and does not contain `.csv`.
36 | - `start` is the start frame index and `end` is the end frame index. We select the `[start:end]` frames from the csv file.
37 |
38 | 2. count pkl contact mask:
39 |
40 | ```
41 | python count_pkl_contact_mask.py robot=unitree_g1_29dof_anneal_23dof +input_folder= +target_folder=
42 | ```
43 | - It computes the contact mask of the motion data by a thresholding method.
--------------------------------------------------------------------------------
/motion_source/convert_lafan_pkl.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.spatial.transform import Rotation as sRot
3 | import joblib
4 | import argparse
5 | import os
6 |
7 |
8 | def convert(file_name, data):
9 | data = data.astype(np.float32)
10 | print(data.shape)
11 | root_trans = data[:, :3]
12 | root_qua = data[:, 3:7]
13 | dof = data[:, 7:]
14 |
15 | dof_new = np.concatenate((dof[:, :19], dof[:, 22:26]), axis=1)
16 | root_aa = sRot.from_quat(root_qua).as_rotvec()
17 |
18 | dof_axis = np.load('../description/robots/g1/dof_axis.npy', allow_pickle=True)
19 | dof_axis = dof_axis.astype(np.float32)
20 |
21 | pose_aa = np.concatenate(
22 | (np.expand_dims(root_aa, axis=1), dof_axis * np.expand_dims(dof_new, axis=2), np.zeros((data.shape[0], 3, 3))),
23 | axis=1).astype(np.float32)
24 |
25 | data_dump = {
26 | "root_trans_offset": root_trans,
27 | "pose_aa": pose_aa,
28 | "dof": dof_new,
29 | "root_rot": root_qua,
30 | "smpl_joints": pose_aa,
31 | "fps": 30
32 | }
33 |
34 | all_data = {}
35 | all_data[file_name] = data_dump
36 |
37 | os.makedirs('./lafan_pkl', exist_ok=True)
38 | joblib.dump(all_data, f'./lafan_pkl/{file_name}.pkl')
39 |
40 |
41 | if __name__ == "__main__":
42 | parser = argparse.ArgumentParser()
43 | parser.add_argument('--filepath', type=str, help="File name", required=True)
44 | parser.add_argument('--start', type=int, help="Start frame", default=0)
45 | parser.add_argument('--end', type=int, help="End frame", default=100)
46 | args = parser.parse_args()
47 |
48 | start_frame = args.start
49 | end_frame = args.end
50 | robot_type = 'g1'
51 |
52 | filepath = args.filepath
53 | csv_file = filepath + '.csv'
54 | data = np.genfromtxt(csv_file, delimiter=',')[start_frame:end_frame, :]
55 |
56 | output_name = filepath.split('/')[-1] + '_' + f'{start_frame}' + '_' + f'{end_frame}'
57 |
58 | convert(output_name, data)
--------------------------------------------------------------------------------
/motion_source/count_pkl_contact_mask.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 | import pdb
5 | import os.path as osp
6 | import numpy as np
7 |
8 | sys.path.append(os.getcwd())
9 |
10 | from utils.torch_humanoid_batch import Humanoid_Batch
11 | import torch
12 | import joblib
13 | import hydra
14 | from omegaconf import DictConfig, OmegaConf
15 |
16 | from scipy.spatial.transform import Rotation as sRot
17 |
18 | def foot_detect(positions, thres=0.002):
19 | fid_r, fid_l = 12,6
20 | positions = positions.numpy()
21 | velfactor, heightfactor = np.array([thres]), np.array([0.12])
22 | feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
23 | feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
24 | feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
25 | feet_l_h = positions[1:,fid_l,2]
26 | feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(int) & (feet_l_h < heightfactor).astype(int)).astype(np.float32)
27 | feet_l = np.expand_dims(feet_l,axis=1)
28 | feet_l = np.concatenate([np.array([[1.]]),feet_l],axis=0)
29 |
30 | feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
31 | feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
32 | feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
33 | feet_r_h = positions[1:,fid_r,2]
34 | feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor).astype(int) & (feet_r_h < heightfactor).astype(int)).astype(np.float32)
35 | feet_r = np.expand_dims(feet_r,axis=1)
36 | feet_r = np.concatenate([np.array([[1.]]),feet_r],axis=0)
37 | return feet_l, feet_r
38 |
39 | def count_pose_aa(motion):
40 | dof = motion['dof']
41 | root_qua = motion['root_rot']
42 | dof_new = np.concatenate((dof[:, :19], dof[:, 22:26]), axis=1)
43 | root_aa = sRot.from_quat(root_qua).as_rotvec()
44 |
45 | dof_axis = np.load('../description/robots/g1/dof_axis.npy', allow_pickle=True)
46 | dof_axis = dof_axis.astype(np.float32)
47 |
48 | pose_aa = np.concatenate(
49 | (np.expand_dims(root_aa, axis=1), dof_axis * np.expand_dims(dof_new, axis=2), np.zeros((dof_new.shape[0], 3, 3))),
50 | axis=1).astype(np.float32)
51 |
52 | return pose_aa,dof_new
53 |
54 | def process_motion(motion, cfg):
55 | device = torch.device("cpu")
56 | humanoid_fk = Humanoid_Batch(cfg.robot) # load forward kinematics model
57 |
58 | # breakpoint()
59 |
60 | if 'pose_aa' not in motion.keys():
61 | pose_aa,dof = count_pose_aa(motion=motion)
62 | motion['pose_aa'] = pose_aa
63 | motion['dof'] = dof
64 | pose_aa = torch.from_numpy(pose_aa).unsqueeze(0)
65 | else:
66 | pose_aa = torch.from_numpy(motion['pose_aa']).unsqueeze(0)
67 | root_trans = torch.from_numpy(motion['root_trans_offset']).unsqueeze(0)
68 |
69 | fk_return = humanoid_fk.fk_batch(pose_aa, root_trans)
70 |
71 | feet_l, feet_r = foot_detect(fk_return.global_translation_extend[0])
72 |
73 | motion['contact_mask'] = np.concatenate([feet_l,feet_r],axis=-1)
74 | motion['smpl_joints'] = fk_return.global_translation_extend[0].detach().numpy()
75 |
76 | return motion
77 |
78 |
79 | @hydra.main(version_base=None, config_path="../description/robots/cfg", config_name="config")
80 | def main(cfg: DictConfig) -> None:
81 | folder_path = cfg.input_folder
82 | if folder_path[-1]=='/':
83 | target_folder_path = folder_path[:-1] + '_contact_mask'
84 | else:
85 | target_folder_path = folder_path+'_contact_mask'
86 | os.makedirs(target_folder_path, exist_ok=True)
87 | target_folder_list = os.listdir(target_folder_path)
88 | for filename in os.listdir(folder_path):
89 | if filename.split('.')[0] + '_cont_mask.pkl' in target_folder_list:
90 | continue
91 | filename = filename.split('.')[0]
92 | motion_file = folder_path + '/' + f'{filename}.pkl'
93 | motion_data = joblib.load(motion_file)
94 | motion_data_keys = list(motion_data.keys())
95 | motion = process_motion(motion_data[motion_data_keys[0]], cfg)
96 | save_data={}
97 | save_data[motion_data_keys[0]] = motion
98 | dumped_file = f'{target_folder_path}/{filename}_cont_mask.pkl'
99 | joblib.dump(save_data, dumped_file)
100 |
101 | if __name__ == "__main__":
102 | main()
--------------------------------------------------------------------------------
/motion_source/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/motion_source/utils/__init__.py
--------------------------------------------------------------------------------
/overview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/overview.jpg
--------------------------------------------------------------------------------
/robot_motion_process/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Robot Motion Processing
3 |
4 | In this folder, we provide tools for visualizing and analyzing the processed motion and the motion trajectory from simulation.
5 |
6 | All of the retargeted reference motion and motion trajectory collected from simulation take the same data format. `motion_readpkl.py` provide an example code to load them.
7 |
8 | To collect the motion trajectory in IsaacGym, use `humanoidverse/sample_eps.py`. For MuJoCo, use `humanoidverse/urci.py`, the program will automatically save the motion trajectory to a pickle file when each episode ends.
9 |
10 |
11 |
12 | ## Visualization
13 |
14 | `vis_q_mj.py` and `vis_rr.py` are both used to visualize the motion data. These two scripts load the same data format, but have a different GUI. `vis_q_mj.py` is based on `mujoco_py`, provide reference motion & torque visualization and support manually correct the contact mask. `vis_rr.py` is based on `rerun`, provide more interactive visualization.
15 |
16 |
17 |
18 | Usage:
19 |
20 | ```bash
21 | python robot_motion_process/vis_q_mj.py +motion_file=...
22 | ```
23 | See `def key_call_back(keycode)` in `vis_q_mj.py` for the key mapping.
24 |
25 | ```bash
26 | python robot_motion_process/vis_rr.py --filepath=...
27 | ```
28 |
29 | Note that you need to install the additional dependencies for `vis_rr.py`.
30 |
31 | ```bash
32 | # (Optional) Install additional dependencies for visualization
33 | pip install rerun-sdk==0.22.0 trimesh
34 | ```
35 |
36 |
37 |
38 | ## Interpolation
39 |
40 | `motion_interpolation_pkl.py` is used to preprocess the motion data. Given a default robot pose, this script will add a linear interpolation between the default pose and the motion frames at both the beginning and the end. So that the processed motion data all begin and end with the default pose.
41 |
42 | This is useful for deployment since it's inconvenient to initialize the real-world robot to the beginning pose of each motion. Some pose may be strange so that the robot cannot stand stably.
43 |
44 | Usage:
45 | ```bash
46 | python robot_motion_process/motion_interpolation_pkl.py +origin_file_name=... +start=... +end=... +start_inter_frame=... +end_inter_frame=...
47 | ```
48 | - `origin_file_name` is the path to the original motion data, it should be a pickle file.
49 | - `start` and `end` are the start and end frame index of the motion data, we select the `[start:end]` frames from the original motion data.
50 | - `start_inter_frame` and `end_inter_frame` are the number of the added interpolation frames at the beginning and the end of the motion data.
51 |
52 |
53 | ## Trajectory Analysis
54 |
55 | `traj_vis.ipynb` is used to visualize the motion trajectory. For motion data collected from policy rollout, you can use it to compare the motion trajectory with the reference motion.
56 |
57 | Usage: Change the path to motion data in the notebook according to the example cells and run all cells.
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/robot_motion_process/motion_readpkl.py:
--------------------------------------------------------------------------------
1 | import joblib
2 | import numpy as np
3 | import argparse
4 |
5 | def main():
6 | parser = argparse.ArgumentParser(description="Load a motion file using joblib.")
7 | parser.add_argument('motion_file', type=str, help='Path to the motion file to load.')
8 | args = parser.parse_args()
9 |
10 | out_motion_data = joblib.load(args.motion_file)
11 | motion_data = next(iter(out_motion_data.values()))
12 |
13 | print(out_motion_data.keys())
14 | print(motion_data.keys())
15 | # print(motion_data)
16 | breakpoint()
17 |
18 | if __name__ == "__main__":
19 | main()
20 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 |
3 | setup(
4 | name='pbhc',
5 | version='0.0.1',
6 | license="MIT",
7 | packages=find_packages(),
8 | description='Physics-Based Humanoid motion Control (PBHC)',
9 | python_requires=">=3.8",
10 | install_requires=[
11 | "hydra-core>=1.2.0",
12 | "numpy==1.24.4",
13 | "mujoco==3.2.3",
14 | "mujoco-python-viewer",
15 | "rich",
16 | "ipdb",
17 | "matplotlib",
18 | "termcolor",
19 | "wandb",
20 | "plotly",
21 | "tqdm",
22 | "loguru",
23 | "meshcat",
24 | "pynput",
25 | "scipy",
26 | "tensorboard",
27 | "onnx",
28 | "onnxruntime",
29 | "opencv-python",
30 | "joblib",
31 | "easydict",
32 | "lxml",
33 | "numpy-stl",
34 | "open3d",
35 | "toolz",
36 | # 'rerun-sdk==0.22.0',
37 | # 'trimesh',
38 | # 'git+https://github.com/ZhengyiLuo/SMPLSim.git@master',
39 | 'typer',
40 | 'dm_control',
41 | 'loop_rate_limiters',
42 | 'mink',
43 | 'quadprog',
44 | 'einops'
45 | ]
46 | )
--------------------------------------------------------------------------------
/smpl_retarget/README.md:
--------------------------------------------------------------------------------
1 | # SMPL Motion Retarget
2 |
3 | Given the SMPL format motion data, this folder describes how we retarget them to the robot, take Unitree G1 as an example.
4 |
5 | Our code incorporates the retargeting pipelines from [MaskedMimic](https://github.com/NVlabs/ProtoMotions) and [PHC](https://github.com/ZhengyiLuo/PHC) - the former is built upon the differential inverse kinematics framework [Mink](https://github.com/kevinzakka/mink), while the latter employs gradient-based optimization.
6 |
7 | Both methods can be used to retarget human motion to the robot with slightly different results. We use Mink pipeline in our experiments.
8 |
9 | ## Motion Filter (Optional)
10 |
11 | Due to reconstruction inaccuracies and out-of-distribution issues in HMR models, motions extracted from videos may violate physical and biomechanical constraints. Thus, we try to filter out these motions via physics-based principles in our motion pipeline. You can choose it as an optional step.
12 |
13 | The motion filter implementation is adapted from the official [IPMAN](https://github.com/sha2nkt/ipman-r) codebase, so please refer to its installation process.
14 |
15 | Usage:
16 | ```
17 | cd motion_filter
18 | python utils/motion_filter.py --folder --convert_rot True
19 | ```
20 | - If motions are y-up, set `--convert_rot False`. If motions are z-up, set `--convert_rot True`.
21 |
22 | ## Mink Retarget
23 |
24 | First install `poselib`:
25 | ```
26 | cd poselib
27 | pip install -e .
28 | ```
29 |
30 | Retarget command:
31 | ```
32 | python mink_retarget/convert_fit_motion.py
33 | ```
34 |
35 | Add `--correct` will use motion correction.
36 |
37 | `` is the root folder of motion data. The motion data folder should be like this:
38 |
39 | ```
40 | motion_data/
41 | ├── video_motion/
42 | | └── video1.npz
43 | | └── video2.npz
44 | | └── video3.npz
45 | | └── ...
46 | └── amass/
47 | └── reverse_spin.npz
48 | └── wushu_form.npz
49 | └── ...
50 | ```
51 | In above case, `` is `motion_data/`
52 |
53 | ## PHC Retarget
54 |
55 | Download the [SMPL](https://smpl.is.tue.mpg.de/) v1.1.0 parameters and place them in the `smpl_model/smpl/` folder. Rename the files `basicmodel_neutral_lbs_10_207_0_v1.1.0.pkl`, `basicmodel_m_lbs_10_207_0_v1.1.0.pkl`, `basicmodel_f_lbs_10_207_0_v1.1.0.pkl` to `SMPL_NEUTRAL.pkl`, `SMPL_MALE.pkl` and `SMPL_FEMALE.pkl` respectively.
56 |
57 | The folder structure should be organized like this:
58 | ```
59 | smpl_model/smpl/
60 | ├── SMPL_FEMALE.pkl
61 | ├── SMPL_MALE.pkl
62 | └── SMPL_NEUTRAL.pkl
63 | ```
64 |
65 | Retarget command:
66 | ```
67 | python phc_retarget/fit_smpl_motion.py robot=unitree_g1_29dof_anneal_23dof +motion=
68 | ```
--------------------------------------------------------------------------------
/smpl_retarget/mink_retarget/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/mink_retarget/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/mink_retarget/retargeting/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/mink_retarget/retargeting/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/mink_retarget/retargeting/config.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict
2 |
3 | def g1_mapping():
4 | #### Config for extension
5 | extend_config = [
6 | {
7 | "joint_name": "left_rubber_hand_2",
8 | "parent_name": "left_wrist_yaw_link",
9 | "pos": [0.12, 0, 0.0],
10 | "rot": [1.0, 0.0, 0.0, 0.0],
11 | },
12 | {
13 | "joint_name": "right_rubber_hand_2",
14 | "parent_name": "right_wrist_yaw_link",
15 | "pos": [0.12, 0, 0.0],
16 | "rot": [1.0, 0.0, 0.0, 0.0],
17 | },
18 | {
19 | "joint_name": "head",
20 | "parent_name": "pelvis",
21 | "pos": [0.0, 0.0, 0.4],
22 | "rot": [1.0, 0.0, 0.0, 0.0],
23 | },
24 | {
25 | "joint_name": "left_toe_link",
26 | "parent_name": "left_ankle_roll_link",
27 | "pos": [0.08, 0.0, -0.01],
28 | "rot": [1.0, 0.0, 0.0, 0.0],
29 | },
30 | {
31 | "joint_name": "right_toe_link",
32 | "parent_name": "right_ankle_roll_link",
33 | "pos": [0.08, 0.0, -0.01],
34 | "rot": [1.0, 0.0, 0.0, 0.0],
35 | },
36 |
37 | ]
38 |
39 | base_link = "torso_link"
40 | joint_matches = [
41 | ["pelvis", "Pelvis"],
42 | ["left_hip_yaw_link", "L_Hip"],
43 | ["left_knee_link", "L_Knee"],
44 | ["left_ankle_roll_link", "L_Ankle"],
45 | ["right_hip_yaw_link", "R_Hip"],
46 | ["right_knee_link", "R_Knee"],
47 | ["right_ankle_roll_link", "R_Ankle"],
48 | ["left_shoulder_pitch_link", "L_Shoulder"],
49 | ["left_elbow_link", "L_Elbow"],
50 | ["left_rubber_hand_2", "L_Hand"],
51 | ["right_shoulder_pitch_link", "R_Shoulder"],
52 | ["right_elbow_link", "R_Elbow"],
53 | ["right_rubber_hand_2", "R_Hand"],
54 | ["head", "Head"],
55 | ]
56 |
57 | smpl_pose_modifier = [
58 | {"Pelvis": "[np.pi/2, 0, np.pi/2]"},
59 | {"L_Shoulder": "[0, 0, -np.pi/2]"},
60 | {"R_Shoulder": "[0, 0, np.pi/2]"},
61 | {"L_Elbow": "[0, -np.pi/2, 0]"},
62 | {"R_Elbow": "[0, np.pi/2, 0]"},
63 | ]
64 |
65 | asset_file = "../description/robots/g1/g1_29dof_rev_1_0.xml"
66 |
67 | return EasyDict(
68 | extend_config=extend_config,
69 | base_link=base_link,
70 | joint_matches=joint_matches,
71 | smpl_pose_modifier=smpl_pose_modifier,
72 | asset_file=asset_file,
73 | )
74 |
75 | def smplx_with_limits_mapping():
76 | #### Config for extension
77 | extend_config = []
78 |
79 | base_link = "Pelvis"
80 |
81 | smplx_joints = [
82 | "Pelvis",
83 | "L_Hip",
84 | "L_Knee",
85 | "L_Ankle",
86 | "L_Toe",
87 | "R_Hip",
88 | "R_Knee",
89 | "R_Ankle",
90 | "R_Toe",
91 | "Torso",
92 | "Spine",
93 | "Chest",
94 | "Neck",
95 | "Head",
96 | "L_Thorax",
97 | "L_Shoulder",
98 | "L_Elbow",
99 | "L_Wrist",
100 | "L_Index1",
101 | "L_Index2",
102 | "L_Index3",
103 | "L_Middle1",
104 | "L_Middle2",
105 | "L_Middle3",
106 | "L_Pinky1",
107 | "L_Pinky2",
108 | "L_Pinky3",
109 | "L_Ring1",
110 | "L_Ring2",
111 | "L_Ring3",
112 | "L_Thumb1",
113 | "L_Thumb2",
114 | "L_Thumb3",
115 | "R_Thorax",
116 | "R_Shoulder",
117 | "R_Elbow",
118 | "R_Wrist",
119 | "R_Index1",
120 | "R_Index2",
121 | "R_Index3",
122 | "R_Middle1",
123 | "R_Middle2",
124 | "R_Middle3",
125 | "R_Pinky1",
126 | "R_Pinky2",
127 | "R_Pinky3",
128 | "R_Ring1",
129 | "R_Ring2",
130 | "R_Ring3",
131 | "R_Thumb1",
132 | "R_Thumb2",
133 | "R_Thumb3",
134 | ]
135 | joint_matches = [[joint, joint] for joint in smplx_joints]
136 |
137 | smpl_pose_modifier = []
138 |
139 | asset_file = "data/assets/mjcf/smplx_humanoid_with_limits.xml"
140 |
141 | return EasyDict(
142 | extend_config=extend_config,
143 | base_link=base_link,
144 | joint_matches=joint_matches,
145 | smpl_pose_modifier=smpl_pose_modifier,
146 | asset_file=asset_file,
147 | )
148 |
149 |
150 | def get_config(humanoid_type: str):
151 | if humanoid_type == "g1":
152 | return g1_mapping()
153 | elif humanoid_type == "smplx_humanoid_with_limits":
154 | return smplx_with_limits_mapping()
155 | else:
156 | raise NotImplementedError
157 |
--------------------------------------------------------------------------------
/smpl_retarget/mink_retarget/shape_optimized_neutral.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/mink_retarget/shape_optimized_neutral.pkl
--------------------------------------------------------------------------------
/smpl_retarget/motion_filter/constants.py:
--------------------------------------------------------------------------------
1 | FOCAL_LENGTH = 5000.
2 | IMG_RES = 224
3 | CONTACT_THRESH = 0.1 # gmof_rho in overlay vis = contact thresh
4 | #IMG_RES = 256
5 |
6 | JOINT_REGRESSOR_TRAIN_EXTRA = 'data/J_regressor_extra.npy'
7 |
8 | # Mean and standard deviation for normalizing input image
9 | IMG_NORM_MEAN = [0.485, 0.456, 0.406]
10 | IMG_NORM_STD = [0.229, 0.224, 0.225]
11 |
12 | # Dictionary of ground plane offsets in datasets
13 | GROUND_OFFSETS = {
14 | 'h36m': -0.095,
15 | }
16 |
17 | """
18 | We create a superset of joints containing the OpenPose joints together with the ones that each dataset provides.
19 | We keep a superset of 24 joints such that we include all joints from every dataset.
20 | If a dataset doesn't provide annotations for a specific joint, we simply ignore it.
21 | The joints used here are the following:
22 | """
23 | JOINT_NAMES = [
24 | # 25 OpenPose joints (in the order provided by OpenPose)
25 | 'OP Nose',
26 | 'OP Neck',
27 | 'OP RShoulder',
28 | 'OP RElbow',
29 | 'OP RWrist',
30 | 'OP LShoulder',
31 | 'OP LElbow',
32 | 'OP LWrist',
33 | 'OP MidHip',
34 | 'OP RHip',
35 | 'OP RKnee',
36 | 'OP RAnkle',
37 | 'OP LHip',
38 | 'OP LKnee',
39 | 'OP LAnkle',
40 | 'OP REye',
41 | 'OP LEye',
42 | 'OP REar',
43 | 'OP LEar',
44 | 'OP LBigToe',
45 | 'OP LSmallToe',
46 | 'OP LHeel',
47 | 'OP RBigToe',
48 | 'OP RSmallToe',
49 | 'OP RHeel',
50 | # 24 Ground Truth joints (superset of joints from different datasets)
51 | 'Right Ankle',
52 | 'Right Knee',
53 | 'Right Hip',
54 | 'Left Hip',
55 | 'Left Knee',
56 | 'Left Ankle',
57 | 'Right Wrist',
58 | 'Right Elbow',
59 | 'Right Shoulder',
60 | 'Left Shoulder',
61 | 'Left Elbow',
62 | 'Left Wrist',
63 | 'Neck (LSP)',
64 | 'Top of Head (LSP)',
65 | 'Pelvis (MPII)',
66 | 'Thorax (MPII)',
67 | 'Spine (H36M)',
68 | 'Jaw (H36M)',
69 | 'Head (H36M)',
70 | 'Nose',
71 | 'Left Eye',
72 | 'Right Eye',
73 | 'Left Ear',
74 | 'Right Ear'
75 | ]
76 |
77 | # Dict containing the joints in numerical order
78 | JOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))}
79 |
80 | # Map joints to SMPL joints
81 | JOINT_MAP = {
82 | 'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,
83 | 'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,
84 | 'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,
85 | 'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,
86 | 'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,
87 | 'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,
88 | 'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,
89 | 'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,
90 | 'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,
91 | 'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,
92 | 'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,
93 | 'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,
94 | 'Neck (LSP)': 47, 'Top of Head (LSP)': 48,
95 | 'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,
96 | 'Spine (H36M)': 51, 'Jaw (H36M)': 52,
97 | 'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,
98 | 'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27
99 | }
100 |
101 | # Joint selectors
102 | # Indices to get the 14 LSP joints from the 17 H36M joints
103 | H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
104 | H36M_TO_J14 = H36M_TO_J17[:14]
105 | # Indices to get the 14 LSP joints from the ground truth joints
106 | J24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]
107 | J24_TO_J14 = J24_TO_J17[:14]
108 |
109 | # Permutation of SMPL pose parameters when flipping the shape
110 | SMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]
111 | SMPL_POSE_FLIP_PERM = []
112 | for i in SMPL_JOINTS_FLIP_PERM:
113 | SMPL_POSE_FLIP_PERM.append(3*i)
114 | SMPL_POSE_FLIP_PERM.append(3*i+1)
115 | SMPL_POSE_FLIP_PERM.append(3*i+2)
116 | # Permutation indices for the 24 ground truth joints
117 | J24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]
118 | # Permutation indices for the full set of 49 joints
119 | J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\
120 | + [25+i for i in J24_FLIP_PERM]
121 |
--------------------------------------------------------------------------------
/smpl_retarget/motion_filter/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .smpl import SMPL
--------------------------------------------------------------------------------
/smpl_retarget/motion_filter/model/smpl.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | import smplx
4 | from smplx import SMPL as _SMPL
5 | # from smplx.body_models import ModelOutput
6 | from smplx.utils import SMPLOutput
7 | from smplx.lbs import vertices2joints
8 | import constants
9 |
10 |
11 | class SMPL(_SMPL):
12 | """ Extension of the official SMPL implementation to support more joints """
13 |
14 | def __init__(self, *args, **kwargs):
15 | super(SMPL, self).__init__(*args, **kwargs)
16 | joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]
17 | J_regressor_extra = np.load(constants.JOINT_REGRESSOR_TRAIN_EXTRA)
18 | self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
19 | self.joint_map = torch.tensor(joints, dtype=torch.long)
20 |
21 | def forward(self, *args, **kwargs):
22 | kwargs['get_skin'] = True
23 | smpl_output = super(SMPL, self).forward(*args, **kwargs)
24 | extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
25 | joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
26 | joints = joints[:, self.joint_map, :]
27 | output = SMPLOutput(vertices=smpl_output.vertices,
28 | global_orient=smpl_output.global_orient,
29 | body_pose=smpl_output.body_pose,
30 | joints=joints,
31 | betas=smpl_output.betas,
32 | full_pose=smpl_output.full_pose)
33 |
34 | return output
--------------------------------------------------------------------------------
/smpl_retarget/motion_filter/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/motion_filter/utils/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/motion_filter/utils/mesh_utils.py:
--------------------------------------------------------------------------------
1 | import trimesh
2 | import os.path as osp
3 | import numpy as np
4 | import torch
5 | import os
6 | import sys
7 | import torch.nn as nn
8 |
9 | ESSENTIALS_DIR = 'data/essentials'
10 |
11 |
12 | def sparse_batch_mm(m1, m2):
13 | """
14 |
15 | m1: sparse matrix of size N x M
16 | m2: dense matrix of size B x M x K
17 | returns m1@m2 matrix of size B x N x K
18 | """
19 |
20 | batch_size = m2.shape[0]
21 | # stack m2 into columns: (B x N x K) -> (N, B, K) -> (N, B * K)
22 | m2_stack = m2.transpose(0, 1).reshape(m1.shape[1], -1)
23 | result = m1.mm(m2_stack).reshape(m1.shape[0], batch_size, -1) \
24 | .transpose(1, 0)
25 | return result
26 |
27 |
28 | class HDfier():
29 | def __init__(self, model_type='smpl', device='cuda'):
30 | hd_operator_path = osp.join(ESSENTIALS_DIR, 'hd_model', model_type,
31 | f'{model_type}_neutral_hd_vert_regressor_sparse.npz')
32 | hd_operator = np.load(hd_operator_path)
33 | self.hd_operator = torch.sparse.FloatTensor(
34 | torch.tensor(hd_operator['index_row_col']),
35 | torch.tensor(hd_operator['values']),
36 | torch.Size(hd_operator['size']))
37 | self.model_type = model_type
38 | self.device = device
39 |
40 | def hdfy_mesh(self, vertices):
41 | """
42 | Applies a regressor that maps SMPL vertices to uniformly distributed vertices
43 | """
44 | # device = body.vertices.device
45 | # check if vertices ndim are 3, if not , add a new axis
46 | if vertices.dim() != 3:
47 | # batchify the vertices
48 | vertices = vertices[None, :, :]
49 |
50 | # check if vertices are an ndarry, if yes, make pytorch tensor
51 | if isinstance(vertices, np.ndarray):
52 | vertices = torch.from_numpy(vertices).to(self.device)
53 |
54 | vertices = vertices.to(torch.double)
55 |
56 | if self.hd_operator.device != vertices.device:
57 | self.hd_operator = self.hd_operator.to(vertices.device)
58 | hd_verts = sparse_batch_mm(self.hd_operator, vertices).to(torch.float)
59 | return hd_verts
60 |
--------------------------------------------------------------------------------
/smpl_retarget/motion_filter/utils/smpl_paraser.py:
--------------------------------------------------------------------------------
1 | from smpl_sim.smpllib.smpl_parser import (
2 | SMPL_Parser,
3 | SMPLH_Parser,
4 | SMPLX_Parser,
5 | )
6 | import torch
7 | import argparse
8 | import numpy as np
9 |
10 | if __name__ == '__main__':
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument('--filepath', type=str, required=True)
13 | args = parser.parse_args()
14 | filepath = args.filepath
15 | motion_file = np.load(filepath, allow_pickle=True)
16 | smpl_parser_n = SMPL_Parser(model_path="data/smpl", gender="neutral")
17 |
18 | root_trans = motion_file['trans']
19 | pose_aa = np.concatenate([motion_file['poses'][:, :66], np.zeros((root_trans.shape[0], 6))], axis=-1)
20 | betas = motion_file['betas']
21 | gender = motion_file['gender']
22 |
23 | origin_verts, origin_global_trans = smpl_parser_n.get_joints_verts(torch.from_numpy(pose_aa), torch.from_numpy(betas).unsqueeze(0), torch.from_numpy(root_trans))
24 |
25 | breakpoint()
--------------------------------------------------------------------------------
/smpl_retarget/poselib/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | # vscode
107 | .vscode/
108 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/README.md:
--------------------------------------------------------------------------------
1 | # poselib
2 |
3 | `poselib` is a library for loading, manipulating, and retargeting skeleton poses and motions. It is separated into three modules: `poselib.core` for basic data loading and tensor operations, `poselib.skeleton` for higher-level skeleton operations, and `poselib.visualization` for displaying skeleton poses.
4 |
5 | ## poselib.core
6 | - `poselib.core.rotation3d`: A set of Torch JIT functions for dealing with quaternions, transforms, and rotation/transformation matrices.
7 | - `quat_*` manipulate and create quaternions in [x, y, z, w] format (where w is the real component).
8 | - `transform_*` handle 7D transforms in [quat, pos] format.
9 | - `rot_matrix_*` handle 3x3 rotation matrices.
10 | - `euclidean_*` handle 4x4 Euclidean transformation matrices.
11 | - `poselib.core.tensor_utils`: Provides loading and saving functions for PyTorch tensors.
12 |
13 | ## poselib.skeleton
14 | - `poselib.skeleton.skeleton3d`: Utilities for loading and manipulating skeleton poses, and retargeting poses to different skeletons.
15 | - `SkeletonTree` is a class that stores a skeleton as a tree structure.
16 | - `SkeletonState` describes the static state of a skeleton, and provides both global and local joint angles.
17 | - `SkeletonMotion` describes a time-series of skeleton states and provides utilities for computing joint velocities.
18 |
19 | ## poselib.visualization
20 | - `poselib.visualization.common`: Functions used for visualizing skeletons interactively in `matplotlib`.
21 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.0.1"
2 |
3 | from .core import *
4 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .tensor_utils import *
2 | from .rotation3d import *
3 | from .backend import Serializable, logger
4 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/core/backend/__init__.py:
--------------------------------------------------------------------------------
1 | from .abstract import Serializable
2 |
3 | from .logger import logger
4 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/core/backend/abstract.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from abc import ABCMeta, abstractmethod, abstractclassmethod
9 | from collections import OrderedDict
10 | import json
11 |
12 | import numpy as np
13 | import os
14 |
15 | TENSOR_CLASS = {}
16 |
17 |
18 | def register(name):
19 | global TENSOR_CLASS
20 |
21 | def core(tensor_cls):
22 | TENSOR_CLASS[name] = tensor_cls
23 | return tensor_cls
24 |
25 | return core
26 |
27 |
28 | def _get_cls(name):
29 | global TENSOR_CLASS
30 | return TENSOR_CLASS[name]
31 |
32 |
33 | class NumpyEncoder(json.JSONEncoder):
34 | """ Special json encoder for numpy types """
35 |
36 | def default(self, obj):
37 | if isinstance(
38 | obj,
39 | (
40 | np.int_,
41 | np.intc,
42 | np.intp,
43 | np.int8,
44 | np.int16,
45 | np.int32,
46 | np.int64,
47 | np.uint8,
48 | np.uint16,
49 | np.uint32,
50 | np.uint64,
51 | ),
52 | ):
53 | return int(obj)
54 | elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
55 | return float(obj)
56 | elif isinstance(obj, (np.ndarray,)):
57 | return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape)
58 | return json.JSONEncoder.default(self, obj)
59 |
60 |
61 | def json_numpy_obj_hook(dct):
62 | if isinstance(dct, dict) and "__ndarray__" in dct:
63 | data = np.asarray(dct["__ndarray__"], dtype=dct["dtype"])
64 | return data.reshape(dct["shape"])
65 | return dct
66 |
67 |
68 | class Serializable:
69 | """ Implementation to read/write to file.
70 | All class the is inherited from this class needs to implement to_dict() and
71 | from_dict()
72 | """
73 |
74 | @abstractclassmethod
75 | def from_dict(cls, dict_repr, *args, **kwargs):
76 | """ Read the object from an ordered dictionary
77 |
78 | :param dict_repr: the ordered dictionary that is used to construct the object
79 | :type dict_repr: OrderedDict
80 | :param args, kwargs: the arguments that need to be passed into from_dict()
81 | :type args, kwargs: additional arguments
82 | """
83 | pass
84 |
85 | @abstractmethod
86 | def to_dict(self):
87 | """ Construct an ordered dictionary from the object
88 |
89 | :rtype: OrderedDict
90 | """
91 | pass
92 |
93 | @classmethod
94 | def from_file(cls, path, *args, **kwargs):
95 | """ Read the object from a file (either .npy or .json)
96 |
97 | :param path: path of the file
98 | :type path: string
99 | :param args, kwargs: the arguments that need to be passed into from_dict()
100 | :type args, kwargs: additional arguments
101 | """
102 | if path.endswith(".json"):
103 | with open(path, "r") as f:
104 | d = json.load(f, object_hook=json_numpy_obj_hook)
105 | elif path.endswith(".npy"):
106 | d = np.load(path, allow_pickle=True).item()
107 | else:
108 | assert False, "failed to load {} from {}".format(cls.__name__, path)
109 | assert d["__name__"] == cls.__name__, "the file belongs to {}, not {}".format(
110 | d["__name__"], cls.__name__
111 | )
112 | return cls.from_dict(d, *args, **kwargs)
113 |
114 | def to_file(self, path: str) -> None:
115 | """ Write the object to a file (either .npy or .json)
116 |
117 | :param path: path of the file
118 | :type path: string
119 | """
120 | if os.path.dirname(path) != "" and not os.path.exists(os.path.dirname(path)):
121 | os.makedirs(os.path.dirname(path))
122 | d = self.to_dict()
123 | d["__name__"] = self.__class__.__name__
124 | if path.endswith(".json"):
125 | with open(path, "w") as f:
126 | json.dump(d, f, cls=NumpyEncoder, indent=4)
127 | elif path.endswith(".npy"):
128 | np.save(path, d)
129 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/core/backend/logger.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | import logging
9 |
10 | logger = logging.getLogger("poselib")
11 | logger.setLevel(logging.INFO)
12 |
13 | if not len(logger.handlers):
14 | formatter = logging.Formatter(
15 | fmt="%(asctime)-15s - %(levelname)s - %(module)s - %(message)s"
16 | )
17 | handler = logging.StreamHandler()
18 | handler.setFormatter(formatter)
19 | logger.addHandler(handler)
20 | logger.info("logger initialized")
21 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/core/tensor_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4 | # NVIDIA CORPORATION and its licensors retain all intellectual property
5 | # and proprietary rights in and to this software, related documentation
6 | # and any modifications thereto. Any use, reproduction, disclosure or
7 | # distribution of this software and related documentation without an express
8 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
9 |
10 | from collections import OrderedDict
11 | from .backend import Serializable
12 | import torch
13 |
14 |
15 | class TensorUtils(Serializable):
16 | @classmethod
17 | def from_dict(cls, dict_repr, *args, **kwargs):
18 | """ Read the object from an ordered dictionary
19 |
20 | :param dict_repr: the ordered dictionary that is used to construct the object
21 | :type dict_repr: OrderedDict
22 | :param kwargs: the arguments that need to be passed into from_dict()
23 | :type kwargs: additional arguments
24 | """
25 | return torch.from_numpy(dict_repr["arr"].astype(dict_repr["context"]["dtype"]))
26 |
27 | def to_dict(self):
28 | """ Construct an ordered dictionary from the object
29 |
30 | :rtype: OrderedDict
31 | """
32 | return NotImplemented
33 |
34 | def tensor_to_dict(x):
35 | """ Construct an ordered dictionary from the object
36 |
37 | :rtype: OrderedDict
38 | """
39 | x_np = x.numpy()
40 | return {
41 | "arr": x_np,
42 | "context": {
43 | "dtype": x_np.dtype.name
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/core/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/poselib/poselib/core/tests/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/core/tests/test_rotation.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from ..rotation3d import *
9 | import numpy as np
10 | import torch
11 |
12 | q = torch.from_numpy(np.array([[0, 1, 2, 3], [-2, 3, -1, 5]], dtype=np.float32))
13 | print("q", q)
14 | r = quat_normalize(q)
15 | x = torch.from_numpy(np.array([[1, 0, 0], [0, -1, 0]], dtype=np.float32))
16 | print(r)
17 | print(quat_rotate(r, x))
18 |
19 | angle = torch.from_numpy(np.array(np.random.rand() * 10.0, dtype=np.float32))
20 | axis = torch.from_numpy(np.array([1, np.random.rand() * 10.0, np.random.rand() * 10.0], dtype=np.float32),)
21 |
22 | print(repr(angle))
23 | print(repr(axis))
24 |
25 | rot = quat_from_angle_axis(angle, axis)
26 | x = torch.from_numpy(np.random.rand(5, 6, 3))
27 | y = quat_rotate(quat_inverse(rot), quat_rotate(rot, x))
28 | print(x.numpy())
29 | print(y.numpy())
30 | assert np.allclose(x.numpy(), y.numpy())
31 |
32 | m = torch.from_numpy(np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]], dtype=np.float32))
33 | r = quat_from_rotation_matrix(m)
34 | t = torch.from_numpy(np.array([0, 1, 0], dtype=np.float32))
35 | se3 = transform_from_rotation_translation(r=r, t=t)
36 | print(se3)
37 | print(transform_apply(se3, t))
38 |
39 | rot = quat_from_angle_axis(
40 | torch.from_numpy(np.array([45, -54], dtype=np.float32)),
41 | torch.from_numpy(np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32)),
42 | degree=True,
43 | )
44 | trans = torch.from_numpy(np.array([[1, 1, 0], [1, 1, 0]], dtype=np.float32))
45 | transform = transform_from_rotation_translation(r=rot, t=trans)
46 |
47 | t = transform_mul(transform, transform_inverse(transform))
48 | gt = np.zeros((2, 7))
49 | gt[:, 0] = 1.0
50 | print(t.numpy())
51 | print(gt)
52 | # assert np.allclose(t.numpy(), gt)
53 |
54 | transform2 = torch.from_numpy(np.array([[1, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=np.float32),)
55 | transform2 = euclidean_to_transform(transform2)
56 | print(transform2)
57 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/poselib/poselib/skeleton/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/backend/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/poselib/poselib/skeleton/backend/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/backend/fbx/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/poselib/poselib/skeleton/backend/fbx/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/backend/fbx/fbx_read_wrapper.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 |
4 | NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary
5 | rights in and to this software, related documentation and any modifications thereto. Any
6 | use, reproduction, disclosure or distribution of this software and related documentation
7 | without an express license agreement from NVIDIA CORPORATION is strictly prohibited.
8 | """
9 |
10 | """
11 | Script that reads in fbx files from python 2
12 |
13 | This requires a configs file, which contains the command necessary to switch conda
14 | environments to run the fbx reading script from python 2
15 | """
16 |
17 | from ....core import logger
18 |
19 | import inspect
20 | import os
21 |
22 | import numpy as np
23 |
24 | # Get the current folder to import the config file
25 | current_folder = os.path.realpath(
26 | os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])
27 | )
28 |
29 |
30 | def fbx_to_array(fbx_file_path, fbx_configs, root_joint, fps):
31 | """
32 | Reads an fbx file to an array.
33 |
34 | Currently reading of the frame time is not supported. 120 fps is hard coded TODO
35 |
36 | :param fbx_file_path: str, file path to fbx
37 | :return: tuple with joint_names, parents, transforms, frame time
38 | """
39 |
40 | # Ensure the file path is valid
41 | fbx_file_path = os.path.abspath(fbx_file_path)
42 | assert os.path.exists(fbx_file_path)
43 |
44 | # Switch directories to the env_utils folder to ensure the reading works
45 | previous_cwd = os.getcwd()
46 | os.chdir(current_folder)
47 |
48 | # Call the python 2.7 script
49 | temp_file_path = os.path.join(current_folder, fbx_configs["tmp_path"])
50 | python_path = fbx_configs["fbx_py27_path"]
51 | logger.info("executing python script to read fbx data using Autodesk FBX SDK...")
52 | command = '{} fbx_py27_backend.py "{}" "{}" "{}" "{}"'.format(
53 | python_path, fbx_file_path, temp_file_path, root_joint, fps
54 | )
55 | logger.debug("executing command: {}".format(command))
56 | os.system(command)
57 | logger.info(
58 | "executing python script to read fbx data using Autodesk FBX SDK... done"
59 | )
60 |
61 | with open(temp_file_path, "rb") as f:
62 | data = np.load(f)
63 | output = (
64 | data["names"],
65 | data["parents"],
66 | data["transforms"],
67 | data["fps"],
68 | )
69 |
70 | # Remove the temporary file
71 | os.remove(temp_file_path)
72 |
73 | # Return the os to its previous cwd, otherwise reading multiple files might fail
74 | os.chdir(previous_cwd)
75 | return output
76 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/poselib/poselib/skeleton/tests/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/tests/ant.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/tests/test_skeleton.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | from ...core import *
9 | from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion
10 |
11 | import numpy as np
12 | import torch
13 |
14 | from ...visualization.common import (
15 | plot_skeleton_state,
16 | plot_skeleton_motion_interactive,
17 | )
18 |
19 | from ...visualization.plt_plotter import Matplotlib3DPlotter
20 | from ...visualization.skeleton_plotter_tasks import (
21 | Draw3DSkeletonMotion,
22 | Draw3DSkeletonState,
23 | )
24 |
25 |
26 | def test_skel_tree():
27 | skel_tree = SkeletonTree.from_mjcf(
28 | "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml",
29 | backend="pytorch",
30 | )
31 | skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch")
32 | # assert skel_tree.to_str() == skel_tree_rec.to_str()
33 | print(skel_tree.node_names)
34 | print(skel_tree.local_translation)
35 | print(skel_tree.parent_indices)
36 | skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree)
37 | plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state)
38 | skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"])
39 | plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state)
40 |
41 |
42 | def test_skel_motion():
43 | skel_motion = SkeletonMotion.from_file(
44 | "/tmp/tmp.npy", backend="pytorch", load_context=True
45 | )
46 |
47 | plot_skeleton_motion_interactive(skel_motion)
48 |
49 |
50 | def test_grad():
51 | source_motion = SkeletonMotion.from_file(
52 | "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy",
53 | backend="pytorch",
54 | device="cuda:0",
55 | )
56 | source_tpose = SkeletonState.from_file(
57 | "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy",
58 | backend="pytorch",
59 | device="cuda:0",
60 | )
61 |
62 | target_tpose = SkeletonState.from_file(
63 | "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy",
64 | backend="pytorch",
65 | device="cuda:0",
66 | )
67 | target_skeleton_tree = target_tpose.skeleton_tree
68 |
69 | joint_mapping = {
70 | "upArm_r": "right_shoulder",
71 | "upArm_l": "left_shoulder",
72 | "loArm_r": "right_elbow",
73 | "loArm_l": "left_elbow",
74 | "upLeg_r": "right_hip",
75 | "upLeg_l": "left_hip",
76 | "loLeg_r": "right_knee",
77 | "loLeg_l": "left_knee",
78 | "foot_r": "right_ankle",
79 | "foot_l": "left_ankle",
80 | "hips": "pelvis",
81 | "neckA": "neck",
82 | "spineA": "abdomen",
83 | }
84 |
85 | rotation_to_target_skeleton = quat_from_angle_axis(
86 | angle=torch.tensor(90.0).float(),
87 | axis=torch.tensor([1, 0, 0]).float(),
88 | degree=True,
89 | )
90 |
91 | target_motion = source_motion.retarget_to(
92 | joint_mapping=joint_mapping,
93 | source_tpose_local_rotation=source_tpose.local_rotation,
94 | source_tpose_root_translation=source_tpose.root_translation,
95 | target_skeleton_tree=target_skeleton_tree,
96 | target_tpose_local_rotation=target_tpose.local_rotation,
97 | target_tpose_root_translation=target_tpose.root_translation,
98 | rotation_to_target_skeleton=rotation_to_target_skeleton,
99 | scale_to_target_skeleton=0.01,
100 | )
101 |
102 | target_state = SkeletonState(
103 | target_motion.tensor[800, :],
104 | target_motion.skeleton_tree,
105 | target_motion.is_local,
106 | )
107 |
108 | skeleton_tree = target_state.skeleton_tree
109 | root_translation = target_state.root_translation
110 | global_translation = target_state.global_translation
111 |
112 | q = np.zeros((len(skeleton_tree), 4), dtype=np.float32)
113 | q[..., 3] = 1.0
114 | q = torch.from_numpy(q)
115 | max_its = 10000
116 |
117 | task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
118 | plotter = Matplotlib3DPlotter(task)
119 |
120 | for i in range(max_its):
121 | r = quat_normalize(q)
122 | s = SkeletonState.from_rotation_and_root_translation(
123 | skeleton_tree, r=r, t=root_translation, is_local=True
124 | )
125 | print(" quat norm: {}".format(q.norm(p=2, dim=-1).mean().numpy()))
126 |
127 | task.update(s)
128 | plotter.update()
129 | plotter.show()
130 |
131 |
132 | test_grad()
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/skeleton/tests/transfer_npy.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | import numpy as np
9 | from ...core import Tensor, SO3, Quaternion, Vector3D
10 | from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion
11 |
12 | tpose = np.load(
13 | "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/flex_tpose.npy"
14 | ).item()
15 |
16 | local_rotation = SO3.from_numpy(tpose["local_rotation"], dtype="float32")
17 | root_translation = Vector3D.from_numpy(tpose["root_translation"], dtype="float32")
18 | skeleton_tree = tpose["skeleton_tree"]
19 | parent_indices = Tensor.from_numpy(skeleton_tree["parent_indices"], dtype="int32")
20 | local_translation = Vector3D.from_numpy(
21 | skeleton_tree["local_translation"], dtype="float32"
22 | )
23 | node_names = skeleton_tree["node_names"]
24 | skeleton_tree = SkeletonTree(node_names, parent_indices, local_translation)
25 | skeleton_state = SkeletonState.from_rotation_and_root_translation(
26 | skeleton_tree=skeleton_tree, r=local_rotation, t=root_translation, is_local=True
27 | )
28 |
29 | skeleton_state.to_file(
30 | "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/flex_tpose_new.npy"
31 | )
32 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/visualization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/poselib/poselib/visualization/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/visualization/core.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | # NVIDIA CORPORATION and its licensors retain all intellectual property
3 | # and proprietary rights in and to this software, related documentation
4 | # and any modifications thereto. Any use, reproduction, disclosure or
5 | # distribution of this software and related documentation without an express
6 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
7 |
8 | """
9 | The base abstract classes for plotter and the plotting tasks. It describes how the plotter
10 | deals with the tasks in the general cases
11 | """
12 | from typing import List
13 |
14 |
15 | class BasePlotterTask(object):
16 | _task_name: str # unique name of the task
17 | _task_type: str # type of the task is used to identify which callable
18 |
19 | def __init__(self, task_name: str, task_type: str) -> None:
20 | self._task_name = task_name
21 | self._task_type = task_type
22 |
23 | @property
24 | def task_name(self):
25 | return self._task_name
26 |
27 | @property
28 | def task_type(self):
29 | return self._task_type
30 |
31 | def get_scoped_name(self, name):
32 | return self._task_name + "/" + name
33 |
34 | def __iter__(self):
35 | """Should override this function to return a list of task primitives
36 | """
37 | raise NotImplementedError
38 |
39 |
40 | class BasePlotterTasks(object):
41 | def __init__(self, tasks) -> None:
42 | self._tasks = tasks
43 |
44 | def __iter__(self):
45 | for task in self._tasks:
46 | yield from task
47 |
48 |
49 | class BasePlotter(object):
50 | """An abstract plotter which deals with a plotting task. The children class needs to implement
51 | the functions to create/update the objects according to the task given
52 | """
53 |
54 | _task_primitives: List[BasePlotterTask]
55 |
56 | def __init__(self, task: BasePlotterTask) -> None:
57 | self._task_primitives = []
58 | self.create(task)
59 |
60 | @property
61 | def task_primitives(self):
62 | return self._task_primitives
63 |
64 | def create(self, task: BasePlotterTask) -> None:
65 | """Create more task primitives from a task for the plotter"""
66 | new_task_primitives = list(task) # get all task primitives
67 | self._task_primitives += new_task_primitives # append them
68 | self._create_impl(new_task_primitives)
69 |
70 | def update(self) -> None:
71 | """Update the plotter for any updates in the task primitives"""
72 | self._update_impl(self._task_primitives)
73 |
74 | def _update_impl(self, task_list: List[BasePlotterTask]) -> None:
75 | raise NotImplementedError
76 |
77 | def _create_impl(self, task_list: List[BasePlotterTask]) -> None:
78 | raise NotImplementedError
79 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/visualization/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/poselib/poselib/visualization/tests/__init__.py
--------------------------------------------------------------------------------
/smpl_retarget/poselib/poselib/visualization/tests/test_plotter.py:
--------------------------------------------------------------------------------
1 | from typing import cast
2 |
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 |
6 | from ..core import BasePlotterTask, BasePlotterTasks
7 | from ..plt_plotter import Matplotlib3DPlotter
8 | from ..simple_plotter_tasks import Draw3DDots, Draw3DLines
9 |
10 | task = Draw3DLines(task_name="test",
11 | lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
12 | task2 = Draw3DDots(task_name="test2",
13 | dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
14 | task3 = BasePlotterTasks([task, task2])
15 | plotter = Matplotlib3DPlotter(cast(BasePlotterTask, task3))
16 | plt.show()
17 |
--------------------------------------------------------------------------------
/smpl_retarget/poselib/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup(
4 | name="poselib",
5 | packages=["poselib"],
6 | version="0.0.42",
7 | description="Framework Agnostic Tensor Programming",
8 | author="Qiyang Li, Kelly Guo, Brendon Matusch",
9 | classifiers=[
10 | "Programming Language :: Python",
11 | "Programming Language :: Python :: 3",
12 | "License :: OSI Approved :: GNU General Public License (GPL)",
13 | "Operating System :: OS Independent",
14 | "Development Status :: 1 - Planning",
15 | "Environment :: Console",
16 | "Intended Audience :: Science/Research",
17 | "Topic :: Scientific/Engineering :: GIS",
18 | ],
19 | )
20 |
--------------------------------------------------------------------------------
/smpl_retarget/retargeted_motion_data/phc/shape_optimized_v1.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_retarget/retargeted_motion_data/phc/shape_optimized_v1.pkl
--------------------------------------------------------------------------------
/smpl_vis/README.md:
--------------------------------------------------------------------------------
1 | # SMPL Motions Visualization
2 |
3 | This folder describes how to visualize the SMPL format motion data. We provide two methods: `Blender` and `PyTorch3D`.
4 |
5 | ## Blender Vis
6 |
7 | [Blender](https://www.blender.org/) is an open source software for 3D CG. It's also a powerful tool for motion visualization.
8 |
9 | Import `npz` motion data into blender for visualization.
10 |
11 | 1. Download version `2.9.0` of blender and the [SMPL add-on](https://smpl.is.tue.mpg.de/index.html).
12 |
13 | 2. Add the SMPL object to the blender scene and run the `import_motion_blender.py` script in the `scripting` bar to bind the motion data to the SMPL object.
14 |
15 | ## PyTorch3D Vis
16 |
17 | This implementation is adapted from the `GVHMR` code, so please refer to its [installation](https://github.com/zju3dv/GVHMR/blob/main/docs/INSTALL.md) process.
18 |
19 | `smpl_neutral_J_regressor.pt` and `smplx2smpl_sparse.pt` must be put in `./body_model` (These two files can be found in the [repo](https://github.com/zju3dv/GVHMR/tree/main/hmr4d/utils/body_model)). Download [SMPLX] parameters and place it in the `../smpl_retarget/smpl_model/smplx/` folder. Rename the file to `SMPLX_NEUTRAL.npz`. The folder structure of `../smpl_retarget/smpl_model` should be organized like this:
20 | ```
21 | smpl_model/
22 | ├── smpl/
23 | | └── SMPL_FEMALE.pkl
24 | | └── SMPL_MALE.pkl
25 | | └── SMPL_NEUTRAL.pkl
26 | └── smplx/
27 | └── SMPLX_NEUTRAL.npz
28 | ```
29 |
30 | Run the following command to visualize motion:
31 |
32 | ```
33 | python smpl_render.py --filepath
34 | ```
35 |
36 | - `` contains `.npz`
--------------------------------------------------------------------------------
/smpl_vis/import_motion_blender.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 | from pathlib import Path
4 | from scipy.spatial.transform import Rotation as sRot
5 | import numpy as np
6 |
7 | npz_path = "Path_TO_NPZ"
8 | body_model_type = "SMPL-male"
9 | gender = "neutral"
10 | fps = 30
11 |
12 | try:
13 | data = np.load(npz_path, allow_pickle=True)
14 | print("Loaded keys:", list(data.keys()))
15 | except Exception as e:
16 | raise Exception(f"can not import the file: {str(e)}")
17 |
18 | poses = data['poses'][:, :72]
19 | betas = data['betas']
20 | trans = data['trans']
21 |
22 | if body_model_type not in bpy.data.objects:
23 | raise Exception(f"please first import {body_model_type} to scence")
24 |
25 | body = bpy.data.objects[body_model_type]
26 |
27 | if 'betas' in data:
28 | if hasattr(body, 'shape_keys'):
29 | for i in range(10):
30 | key = body.shape_keys.key_blocks.get(f'Shape{i:03d}')
31 | if key:
32 | key.value = betas[i]
33 |
34 | bone_list = ['Pelvis', 'L_Hip', 'R_Hip', 'Spine1', 'L_Knee', 'R_Knee', 'Spine2', 'L_Ankle',
35 | 'R_Ankle', 'Spine3', 'L_Foot', 'R_Foot', 'Neck', 'L_Collar', 'R_Collar',
36 | 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist',
37 | 'R_Wrist', 'L_Hand', 'R_Hand']
38 |
39 | bpy.context.scene.frame_end = len(poses)
40 | for frame_idx in range(len(poses)):
41 | pose = poses[frame_idx]
42 | trans_vec = trans[frame_idx]
43 |
44 | i = 0
45 | for bone in bone_list:
46 | bone = body.pose.bones[bone]
47 | bone.rotation_mode = 'XYZ'
48 | if bone=='Pelvis':
49 | bone.rotation_euler = (sRot.from_rotvec(pose[:3]) * sRot.from_euler('xyz', np.array([np.pi / 2, 0, np.pi]))).as_rotvec()
50 | else:
51 | bone.rotation_euler = (pose[i*3], pose[i*3+1], pose[i*3+2])
52 | bone.keyframe_insert(data_path="rotation_euler", frame=frame_idx)
53 | i+=1
54 |
55 | root_bone = body.pose.bones['Pelvis']
56 | root_bone.location = (trans_vec[0], trans_vec[1], trans_vec[2])
57 | root_bone.keyframe_insert(data_path="location", frame=frame_idx)
58 | #root_bone.location = trans_vec
59 |
60 | body.keyframe_insert(data_path="location", frame=frame_idx)
61 | body.keyframe_insert(data_path="rotation_euler", frame=frame_idx)
62 |
63 | print(f"Processed frame {frame_idx}/{len(poses)}")
64 |
65 | bpy.context.scene.render.fps = fps
66 | bpy.context.scene.frame_preview_start = 0
67 | bpy.context.scene.frame_preview_end = len(poses)
68 |
69 | print("Import Success!")
70 |
--------------------------------------------------------------------------------
/smpl_vis/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TeleHuman/PBHC/0cb674b452c0ddbe37b5701522b475fac3afafd4/smpl_vis/utils/__init__.py
--------------------------------------------------------------------------------
/smpl_vis/utils/geo_transform.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn.functional as F
4 | from einops import einsum
5 | import imageio.v3 as iio
6 |
7 | def get_writer(video_path, fps=30, crf=17):
8 | """remember to .close()"""
9 | writer = iio.imopen(video_path, "w", plugin="pyav")
10 | writer.init_video_stream("libx264", fps=fps)
11 | writer._video_stream.options = {"crf": str(crf)}
12 | return writer
13 |
14 |
15 | def get_video_lwh(video_path):
16 | L, H, W, _ = iio.improps(video_path, plugin="pyav").shape
17 | return L, W, H
18 |
19 | def create_camera_sensor(width=None, height=None, f_fullframe=None):
20 | if width is None or height is None:
21 | # The 4:3 aspect ratio is widely adopted by image sensors in mobile phones.
22 | if np.random.rand() < 0.5:
23 | width, height = 1200, 1600
24 | else:
25 | width, height = 1600, 1200
26 |
27 | # Sample FOV from common options:
28 | # 1. wide-angle lenses are common in mobile phones,
29 | # 2. telephoto lenses has less perspective effect, which should makes it easy to learn
30 | if f_fullframe is None:
31 | f_fullframe_options = [24, 26, 28, 30, 35, 40, 50, 60, 70]
32 | f_fullframe = np.random.choice(f_fullframe_options)
33 |
34 | # We use diag to map focal-length: https://www.nikonians.org/reviews/fov-tables
35 | diag_fullframe = (24**2 + 36**2) ** 0.5
36 | diag_img = (width**2 + height**2) ** 0.5
37 | focal_length = diag_img / diag_fullframe * f_fullframe
38 |
39 | K_fullimg = torch.eye(3)
40 | K_fullimg[0, 0] = focal_length
41 | K_fullimg[1, 1] = focal_length
42 | K_fullimg[0, 2] = width / 2
43 | K_fullimg[1, 2] = height / 2
44 |
45 | return width, height, K_fullimg
46 |
47 | def apply_T_on_points(points, T):
48 | """
49 | Args:
50 | points: (..., N, 3)
51 | T: (..., 4, 4)
52 | Returns: (..., N, 3)
53 | """
54 | points_T = torch.einsum("...ki,...ji->...jk", T[..., :3, :3], points) + T[..., None, :3, 3]
55 | return points_T
56 |
57 | def transform_mat(R, t):
58 | """
59 | Args:
60 | R: Bx3x3 array of a batch of rotation matrices
61 | t: Bx3x(1) array of a batch of translation vectors
62 | Returns:
63 | T: Bx4x4 Transformation matrix
64 | """
65 | # No padding left or right, only add an extra row
66 | if len(R.shape) > len(t.shape):
67 | t = t[..., None]
68 | return torch.cat([F.pad(R, [0, 0, 0, 1]), F.pad(t, [0, 0, 0, 1], value=1)], dim=-1)
69 |
70 | def compute_T_ayfz2ay(joints, inverse=False):
71 | """
72 | Args:
73 | joints: (B, J, 3), in the start-frame, ay-coordinate
74 | Returns:
75 | if inverse == False:
76 | T_ayfz2ay: (B, 4, 4)
77 | else :
78 | T_ay2ayfz: (B, 4, 4)
79 | """
80 | t_ayfz2ay = joints[:, 0, :].detach().clone()
81 | t_ayfz2ay[:, 1] = 0 # do not modify y
82 |
83 | RL_xz_h = joints[:, 1, [0, 2]] - joints[:, 2, [0, 2]] # (B, 2), hip point to left side
84 | RL_xz_s = joints[:, 16, [0, 2]] - joints[:, 17, [0, 2]] # (B, 2), shoulder point to left side
85 | RL_xz = RL_xz_h + RL_xz_s
86 | I_mask = RL_xz.pow(2).sum(-1) < 1e-4 # do not rotate, when can't decided the face direction
87 | # if I_mask.sum() > 0:
88 | # Log.warn("{} samples can't decide the face direction".format(I_mask.sum()))
89 |
90 | x_dir = torch.zeros_like(t_ayfz2ay) # (B, 3)
91 | x_dir[:, [0, 2]] = F.normalize(RL_xz, 2, -1)
92 | y_dir = torch.zeros_like(x_dir)
93 | y_dir[..., 1] = 1 # (B, 3)
94 | z_dir = torch.cross(x_dir, y_dir, dim=-1)
95 | R_ayfz2ay = torch.stack([x_dir, y_dir, z_dir], dim=-1) # (B, 3, 3)
96 | R_ayfz2ay[I_mask] = torch.eye(3).to(R_ayfz2ay)
97 |
98 | if inverse:
99 | R_ay2ayfz = R_ayfz2ay.transpose(1, 2)
100 | t_ay2ayfz = -einsum(R_ayfz2ay, t_ayfz2ay, "b i j , b i -> b j")
101 | return transform_mat(R_ay2ayfz, t_ay2ayfz)
102 | else:
103 | return transform_mat(R_ayfz2ay, t_ayfz2ay)
--------------------------------------------------------------------------------
/smpl_vis/utils/smpl_utils.py:
--------------------------------------------------------------------------------
1 | from .body_model_smpl import BodyModelSMPLH, BodyModelSMPLX
2 | def make_smplx(type="neu_fullpose", **kwargs):
3 | if type == "smpl":
4 | bm_kwargs = {
5 | "model_path": "../smpl_retarget/smpl_model",
6 | "model_type": "smpl",
7 | "gender": "neutral",
8 | "num_betas": 10,
9 | "create_body_pose": False,
10 | "create_betas": False,
11 | "create_global_orient": False,
12 | "create_transl": False,
13 | }
14 | bm_kwargs.update(kwargs)
15 | # model = SMPL(**bm_kwargs)
16 | model = BodyModelSMPLH(**bm_kwargs)
17 | elif type == "smplh":
18 | bm_kwargs = {
19 | "model_type": "smplh",
20 | "gender": kwargs.get("gender", "male"),
21 | "use_pca": False,
22 | "flat_hand_mean": False,
23 | }
24 | model = BodyModelSMPLH(model_path="../body_model", **bm_kwargs)
25 | elif type == "supermotion":
26 | # SuperMotion is trained on BEDLAM dataset, the smplx config is the same except only 10 betas are used
27 | bm_kwargs = {
28 | "model_type": "smplx",
29 | "gender": "neutral",
30 | "num_pca_comps": 12,
31 | "flat_hand_mean": False,
32 | }
33 | bm_kwargs.update(kwargs)
34 | model = BodyModelSMPLX(model_path="../smpl_retarget/smpl_model", **bm_kwargs)
35 | else:
36 | raise NotImplementedError
37 |
38 | return model
--------------------------------------------------------------------------------