├── .gitignore
├── LICENSE
├── README.md
├── assets
├── g1.gif
├── hero_static.gif
├── ip.png
└── t1.gif
├── humanoidverse
├── __init__.py
├── agents
│ ├── __init__.py
│ ├── base_algo
│ │ └── base_algo.py
│ ├── callbacks
│ │ ├── __init__.py
│ │ ├── analysis_plot_template.html
│ │ └── base_callback.py
│ ├── decouple
│ │ └── ppo_decoupled_wbc_ma.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── data_utils.py
│ │ ├── encoder_modules.py
│ │ ├── modules.py
│ │ ├── ppo_modules.py
│ │ └── world_models.py
│ ├── ppo
│ │ └── ppo.py
│ └── ppo_locomanip.py
├── config
│ ├── algo
│ │ └── ppo_decoupled_wbc_ma.yaml
│ ├── base.yaml
│ ├── base
│ │ ├── fabric.yaml
│ │ ├── hydra.yaml
│ │ └── structure.yaml
│ ├── base_eval.yaml
│ ├── domain_rand
│ │ ├── NO_domain_rand.yaml
│ │ └── domain_rand_rl_gym.yaml
│ ├── env
│ │ ├── base_task.yaml
│ │ ├── decoupled_locomotion_stand_height_waist_wbc_ma_diff_force.yaml
│ │ └── legged_base.yaml
│ ├── exp
│ │ ├── base_exp.yaml
│ │ ├── decoupled_locomotion_stand_height_waist_wbc_diff_force_ma_ppo_ma_env.yaml
│ │ └── legged_base.yaml
│ ├── obs
│ │ ├── dec_loco
│ │ │ ├── g1_29dof_obs_diff_force_history_wolinvel_ma.yaml
│ │ │ └── t1_29dof_obs_diff_force_history_wolinvel_ma.yaml
│ │ └── legged_obs.yaml
│ ├── opt
│ │ ├── record.yaml
│ │ └── wandb.yaml
│ ├── rewards
│ │ └── dec_loco
│ │ │ └── reward_dec_loco_stand_height_ma_diff_force.yaml
│ ├── robot
│ │ ├── g1
│ │ │ └── g1_29dof_waist_fakehand.yaml
│ │ ├── robot_base.yaml
│ │ └── t1
│ │ │ └── t1_29dof_waist_wrist.yaml
│ ├── simulator
│ │ ├── genesis.yaml
│ │ ├── isaacgym.yaml
│ │ ├── isaacsim.yaml
│ │ └── mujoco.yaml
│ └── terrain
│ │ ├── terrain_base.yaml
│ │ ├── terrain_locomotion.yaml
│ │ └── terrain_locomotion_plane.yaml
├── data
│ ├── motions
│ │ ├── g1_29dof
│ │ │ └── v1
│ │ │ │ └── accad_all.pkl
│ │ └── t1_29dof
│ │ │ └── v1
│ │ │ └── accad_all.pkl
│ ├── robots
│ │ ├── g1
│ │ │ ├── .asset_hash
│ │ │ ├── g1_29dof.urdf
│ │ │ ├── g1_29dof.xml
│ │ │ ├── g1_29dof_fakehand.urdf
│ │ │ ├── g1_29dof_fakehand_freebase.urdf
│ │ │ ├── g1_29dof_old.xml
│ │ │ ├── g1_29dof_old_freebase.xml
│ │ │ ├── g1_29dof_old_freebase_camera.xml
│ │ │ ├── meshes
│ │ │ │ ├── head_link.STL
│ │ │ │ ├── left_G1_EE_pad.STL
│ │ │ │ ├── left_ankle_pitch_link.STL
│ │ │ │ ├── left_ankle_roll_link.STL
│ │ │ │ ├── left_elbow_link.STL
│ │ │ │ ├── left_elbow_link_merge.STL
│ │ │ │ ├── left_hand_index_0_link.STL
│ │ │ │ ├── left_hand_index_1_link.STL
│ │ │ │ ├── left_hand_middle_0_link.STL
│ │ │ │ ├── left_hand_middle_1_link.STL
│ │ │ │ ├── left_hand_palm_link.STL
│ │ │ │ ├── left_hand_thumb_0_link.STL
│ │ │ │ ├── left_hand_thumb_1_link.STL
│ │ │ │ ├── left_hand_thumb_2_link.STL
│ │ │ │ ├── left_hip_pitch_link.STL
│ │ │ │ ├── left_hip_roll_link.STL
│ │ │ │ ├── left_hip_yaw_link.STL
│ │ │ │ ├── left_knee_link.STL
│ │ │ │ ├── left_rubber_hand.STL
│ │ │ │ ├── left_shoulder_pitch_link.STL
│ │ │ │ ├── left_shoulder_roll_link.STL
│ │ │ │ ├── left_shoulder_yaw_link.STL
│ │ │ │ ├── left_wrist_pitch_link.STL
│ │ │ │ ├── left_wrist_roll_link.STL
│ │ │ │ ├── left_wrist_roll_rubber_hand.STL
│ │ │ │ ├── left_wrist_yaw_link.STL
│ │ │ │ ├── logo_link.STL
│ │ │ │ ├── pelvis.STL
│ │ │ │ ├── pelvis_contour_link.STL
│ │ │ │ ├── right_G1_EE_pad.STL
│ │ │ │ ├── right_ankle_pitch_link.STL
│ │ │ │ ├── right_ankle_roll_link.STL
│ │ │ │ ├── right_elbow_link.STL
│ │ │ │ ├── right_elbow_link_merge.STL
│ │ │ │ ├── right_hand_index_0_link.STL
│ │ │ │ ├── right_hand_index_1_link.STL
│ │ │ │ ├── right_hand_middle_0_link.STL
│ │ │ │ ├── right_hand_middle_1_link.STL
│ │ │ │ ├── right_hand_palm_link.STL
│ │ │ │ ├── right_hand_thumb_0_link.STL
│ │ │ │ ├── right_hand_thumb_1_link.STL
│ │ │ │ ├── right_hand_thumb_2_link.STL
│ │ │ │ ├── right_hip_pitch_link.STL
│ │ │ │ ├── right_hip_roll_link.STL
│ │ │ │ ├── right_hip_yaw_link.STL
│ │ │ │ ├── right_knee_link.STL
│ │ │ │ ├── right_rubber_hand.STL
│ │ │ │ ├── right_shoulder_pitch_link.STL
│ │ │ │ ├── right_shoulder_roll_link.STL
│ │ │ │ ├── right_shoulder_yaw_link.STL
│ │ │ │ ├── right_wrist_pitch_link.STL
│ │ │ │ ├── right_wrist_roll_link.STL
│ │ │ │ ├── right_wrist_roll_rubber_hand.STL
│ │ │ │ ├── right_wrist_yaw_link.STL
│ │ │ │ ├── torso_constraint_L_link.STL
│ │ │ │ ├── torso_constraint_L_rod_link.STL
│ │ │ │ ├── torso_constraint_R_link.STL
│ │ │ │ ├── torso_constraint_R_rod_link.STL
│ │ │ │ ├── torso_link.STL
│ │ │ │ ├── tote_no_bar.STL
│ │ │ │ ├── waist_constraint_L.STL
│ │ │ │ ├── waist_constraint_R.STL
│ │ │ │ ├── waist_roll_link.STL
│ │ │ │ ├── waist_support_link.STL
│ │ │ │ └── waist_yaw_link.STL
│ │ │ ├── scene_29dof_freebase.xml
│ │ │ └── scene_29dof_freebase_camera.xml
│ │ └── t1
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── meshes
│ │ │ ├── AL1.STL
│ │ │ ├── AL2.STL
│ │ │ ├── AL3.STL
│ │ │ ├── AL4.STL
│ │ │ ├── AL5.STL
│ │ │ ├── AL6.STL
│ │ │ ├── AL7.STL
│ │ │ ├── AR1.STL
│ │ │ ├── AR2.STL
│ │ │ ├── AR3.STL
│ │ │ ├── AR4.STL
│ │ │ ├── AR5.STL
│ │ │ ├── AR6.STL
│ │ │ ├── AR7.STL
│ │ │ ├── Ankle_Cross_Left.STL
│ │ │ ├── Ankle_Cross_Right.STL
│ │ │ ├── Crank_Down_Left.STL
│ │ │ ├── Crank_Down_Right.STL
│ │ │ ├── Crank_Up_Left.STL
│ │ │ ├── Crank_Up_Right.STL
│ │ │ ├── Down_Left_XX_Ball.STL
│ │ │ ├── Down_Left_X_Ball.STL
│ │ │ ├── Down_Left_Y_Ball.STL
│ │ │ ├── Down_Right_XX_Ball.STL
│ │ │ ├── Down_Right_X_Ball.STL
│ │ │ ├── Down_Right_Y_Ball.STL
│ │ │ ├── H1.STL
│ │ │ ├── H2.STL
│ │ │ ├── Hip_Pitch_Left.STL
│ │ │ ├── Hip_Pitch_Right.STL
│ │ │ ├── Hip_Roll_Left.STL
│ │ │ ├── Hip_Roll_Right.STL
│ │ │ ├── Hip_Yaw_Left.STL
│ │ │ ├── Hip_Yaw_Right.STL
│ │ │ ├── Link_Long_Left.STL
│ │ │ ├── Link_Long_Right.STL
│ │ │ ├── Link_Short_Left.STL
│ │ │ ├── Link_Short_Right.STL
│ │ │ ├── Shank_Left.STL
│ │ │ ├── Shank_Right.STL
│ │ │ ├── Trunk.STL
│ │ │ ├── Up_Left_XX_Ball.STL
│ │ │ ├── Up_Left_X_Ball.STL
│ │ │ ├── Up_Left_Y_Ball.STL
│ │ │ ├── Up_Right_XX_Ball.STL
│ │ │ ├── Up_Right_X_Ball.STL
│ │ │ ├── Up_Right_Y_Ball.STL
│ │ │ ├── Waist.STL
│ │ │ ├── left_Link1.STL
│ │ │ ├── left_Link11.STL
│ │ │ ├── left_Link2.STL
│ │ │ ├── left_Link22.STL
│ │ │ ├── left_base_link.STL
│ │ │ ├── left_foot_link.STL
│ │ │ ├── left_hand_link.STL
│ │ │ ├── left_rubber_hand.STL
│ │ │ ├── right_Link1.STL
│ │ │ ├── right_Link11.STL
│ │ │ ├── right_Link2.STL
│ │ │ ├── right_Link22.STL
│ │ │ ├── right_base_link.STL
│ │ │ ├── right_foot_link.STL
│ │ │ ├── right_hand_link.STL
│ │ │ └── right_rubber_hand.STL
│ │ │ ├── scene_t1_29dof.xml
│ │ │ ├── t1.png
│ │ │ ├── t1_29dof.urdf
│ │ │ └── t1_29dof.xml
│ └── totes
│ │ ├── meshes
│ │ └── base_link.STL
│ │ └── urdf
│ │ └── tote.urdf
├── envs
│ ├── __init__.py
│ ├── base_task
│ │ ├── __init__.py
│ │ └── base_task.py
│ ├── decoupled_locomotion
│ │ ├── __init__.py
│ │ ├── decoupled_locomotion_stand_height_waist_wbc_ma.py
│ │ ├── decoupled_locomotion_stand_height_waist_wbc_ma_diff_force.py
│ │ └── decoupled_locomotion_stand_ma.py
│ ├── env_utils
│ │ ├── __init__.py
│ │ ├── general.py
│ │ ├── terrain.py
│ │ └── visualization.py
│ ├── legged_base_task
│ │ ├── __init__.py
│ │ └── legged_robot_base_ma.py
│ └── locomotion
│ │ ├── __init__.py
│ │ └── locomotion_ma.py
├── eval_agent.py
├── simulator
│ ├── base_simulator
│ │ └── base_simulator.py
│ ├── genesis
│ │ ├── genesis.py
│ │ ├── genesis_mjdebug.py
│ │ ├── genesis_viewer.py
│ │ └── tmp_gs_utils.py
│ ├── isaacgym
│ │ └── isaacgym.py
│ └── isaacsim
│ │ ├── event_cfg.py
│ │ ├── events.py
│ │ ├── isaaclab_cfg.py
│ │ ├── isaaclab_viewpoint_camera_controller.py
│ │ ├── isaacsim.py
│ │ └── isaacsim_articulation_cfg.py
├── train_agent.py
└── utils
│ ├── __init__.py
│ ├── average_meters.py
│ ├── common.py
│ ├── config_utils.py
│ ├── helpers.py
│ ├── inference_helpers.py
│ ├── logging.py
│ ├── math.py
│ ├── motion_lib
│ ├── __init__.py
│ ├── motion_lib_base.py
│ ├── motion_lib_robot.py
│ ├── motion_utils
│ │ ├── __init__.py
│ │ ├── flags.py
│ │ └── rotation_conversions.py
│ ├── skeleton.py
│ └── torch_humanoid_batch.py
│ ├── terrain.py
│ └── torch_utils.py
├── isaac_utils
├── isaac_utils
│ ├── __init__.py
│ ├── maths.py
│ └── rotations.py
└── setup.py
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled Python bytecode
2 | *.py[cod]
3 | # Log files
4 | *.log
5 | * ONNX files
6 | *.onnx
7 | # JetBrains IDE
8 | .idea/
9 | .vscode/
10 |
11 | # Generated by MacOS
12 | .DS_Store
13 |
14 | # Generated by Windows
15 | Thumbs.db
16 |
17 | # Applications
18 | *.app
19 | *.exe
20 | *.war
21 |
22 | # Large media files
23 | *.mp4
24 | *.tiff
25 | *.avi
26 | *.flv
27 | *.mov
28 | *.wmv
29 |
30 | # logs
31 | logs
32 | logs_eval
33 | runs
34 | outputs
35 | results/
36 | hydra_logs/
37 | wandb/
38 | # other
39 | *.egg-info
40 | __pycache__
41 |
42 | *.pkl
43 | *.pt
44 | !humanoidverse/data/motions/g1_29dof/v1/accad_all.pkl
45 | !humanoidverse/data/motions/t1_29dof/v1/accad_all.pkl
46 |
47 | *.TXT
48 |
49 | humanoidverse/data/smpl
50 | data/shape
51 | data/motions
52 | humanoidverse/data/Recorded/
53 | humanoidverse/data/AMASS/
54 | AMASS/*
55 |
56 | *.sh
57 | *.txt
58 | *.zip
59 |
60 | *.jpg
61 | *.pem
62 |
63 | test.py
64 |
65 | *.npz
66 |
67 | README_FULL.md
68 | # sim2real
69 | humanoidverse/agents/callbacks/analysis_plot_decoupled_loco_manip_perf.py
70 | humanoidverse/config/opt/eval_analysis_plot_decoupled_loco_manip_perf.yaml
71 | debug_logs
72 | scripts
73 | sim2real
74 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Yuanhang Zhang
4 | s
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
FALCON: Learning Force-Adaptive Humanoid Loco-Manipulation
2 |
3 |
4 |
5 |
6 |
7 | [[Website]](https://lecar-lab.github.io/falcon-humanoid/)
8 | [[Arxiv]](https://lecar-lab.github.io/falcon-humanoid/)
9 | [[Video]](https://www.youtube.com/watch?v=OfsvJ5-Fyzg)
10 |
11 |

12 |
13 |
14 |
15 |
16 | [](https://developer.nvidia.com/isaac-gym) [](https://ubuntu.com/blog/tag/22-04-lts) []()
17 |
18 |
19 |

20 |
21 |
22 |
23 | ## TODO
24 | - [x] Release training code
25 | - [ ] Release sim2sim code
26 | - [ ] Release sim2real code
27 |
28 |
29 | # Installation
30 |
31 | ## IsaacGym Conda Env
32 |
33 | Create mamba/conda environment, in the following we use conda for example, but you can use mamba as well.
34 |
35 | ```bash
36 | conda create -n fcgym python=3.8
37 | conda activate fcgym
38 | ```
39 | ### Install IsaacGym
40 |
41 | Download [IsaacGym](https://developer.nvidia.com/isaac-gym/download) and extract:
42 |
43 | ```bash
44 | wget https://developer.nvidia.com/isaac-gym-preview-4
45 | tar -xvzf isaac-gym-preview-4
46 | ```
47 |
48 | Install IsaacGym Python API:
49 |
50 | ```bash
51 | pip install -e isaacgym/python
52 | ```
53 |
54 | Test installation:
55 |
56 | ```bash
57 | cd isaacgym/python/examples
58 |
59 | python 1080_balls_of_solitude.py # or
60 | python joint_monkey.py
61 | ```
62 |
63 | For libpython error:
64 |
65 | - Check conda path:
66 | ```bash
67 | conda info -e
68 | ```
69 | - Set LD_LIBRARY_PATH:
70 | ```bash
71 | export LD_LIBRARY_PATH=:$LD_LIBRARY_PATH
72 | ```
73 |
74 | ### Install FALCON
75 |
76 | ```bash
77 | git clone https://github.com/LeCAR-Lab/FALCON.git
78 | cd FALCON
79 |
80 | pip install -e .
81 | pip install -e isaac_utils
82 | ```
83 |
84 | # FALCON Training
85 | ## Unitree G1_29DoF
86 |
87 | Training Command
88 |
89 | ```bash
90 | python humanoidverse/train_agent.py \
91 | +exp=decoupled_locomotion_stand_height_waist_wbc_diff_force_ma_ppo_ma_env \
92 | +simulator=isaacgym \
93 | +domain_rand=domain_rand_rl_gym \
94 | +rewards=dec_loco/reward_dec_loco_stand_height_ma_diff_force \
95 | +robot=g1/g1_29dof_waist_fakehand \
96 | +terrain=terrain_locomotion_plane \
97 | +obs=dec_loco/g1_29dof_obs_diff_force_history_wolinvel_ma \
98 | num_envs=4096 \
99 | project_name=g1_29dof_falcon \
100 | experiment_name=g1_29dof_falcon \
101 | +opt=wandb \
102 | obs.add_noise=True \
103 | env.config.fix_upper_body_prob=0.3 \
104 | robot.dof_effort_limit_scale=0.9 \
105 | rewards.reward_initial_penalty_scale=0.1 \
106 | rewards.reward_penalty_degree=0.0001
107 | ```
108 |
109 |
110 |
111 |
112 | Evaluation Command
113 |
114 | ```bash
115 | python humanoidverse/eval_agent.py \
116 | +checkpoint=
117 | ```
118 |
119 |
120 |
121 | After around `6k` iterations, in `IsaacGym`:
122 |
123 |
124 |
125 |
126 | |
127 |
128 |
129 |
130 | ## Booster T1_29DoF
131 |
132 | Training Command
133 |
134 | ```bash
135 | python humanoidverse/train_agent.py \
136 | +exp=decoupled_locomotion_stand_height_waist_wbc_diff_force_ma_ppo_ma_env \
137 | +simulator=isaacgym \
138 | +domain_rand=domain_rand_rl_gym \
139 | +rewards=dec_loco/reward_dec_loco_stand_height_ma_diff_force \
140 | +robot=t1/t1_29dof_waist_wrist \
141 | +terrain=terrain_locomotion_plane \
142 | +obs=dec_loco/t1_29dof_obs_diff_force_history_wolinvel_ma \
143 | num_envs=4096 \
144 | project_name=t1_29dof_falcon \
145 | experiment_name=t1_29dof_falcon \
146 | +opt=wandb \
147 | obs.add_noise=True \
148 | env.config.fix_upper_body_prob=0.3 \
149 | robot.dof_effort_limit_scale=0.9 \
150 | rewards.reward_initial_penalty_scale=0.1 \
151 | rewards.reward_penalty_degree=0.0001 \
152 | rewards.feet_height_target=0.08 \
153 | rewards.feet_height_stand=0.02 \
154 | rewards.desired_feet_max_height_for_this_air=0.08 \
155 | rewards.desired_base_height=0.62 \
156 | rewards.reward_scales.penalty_lower_body_action_rate=-0.5 \
157 | rewards.reward_scales.penalty_upper_body_action_rate=-0.5 \
158 | env.config.apply_force_pos_ratio_range=[0.5,2.0]
159 | ```
160 |
161 |
162 |
163 |
164 | Evaluation Command
165 |
166 | ```bash
167 | python humanoidverse/eval_agent.py \
168 | +checkpoint=
169 | ```
170 |
171 |
172 |
173 | After around `6k` iterations, in `IsaacGym`:
174 |
175 |
176 |
177 |
178 | |
179 |
180 |
181 |
182 | # Citation
183 | If you find our work useful, please consider citing us!
184 |
185 | ```bibtex
186 | @article{zhang2025falcon,
187 | title={FALCON: Learning Force-Adaptive Humanoid Loco-Manipulation},
188 | author={Zhang, Yuanhang and Yuan, Yifu and Gurunath, Prajwal and He, Tairan and Omidshafiei, Shayegan and Agha-mohammadi, Ali-akbar and Vazquez-Chanlatte, Marcell and Pedersen, Liam and Shi, Guanya},
189 | journal={arXiv preprint arXiv:2505.06776},
190 | year={2025}
191 | }
192 | ```
193 |
194 | # Acknowledgement
195 | **FALCON** is built upon [ASAP](https://github.com/LeCAR-Lab/ASAP) and [HumanoidVerse](https://github.com/LeCAR-Lab/HumanoidVerse).
196 |
197 | # License
198 |
199 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
200 |
--------------------------------------------------------------------------------
/assets/g1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/assets/g1.gif
--------------------------------------------------------------------------------
/assets/hero_static.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/assets/hero_static.gif
--------------------------------------------------------------------------------
/assets/ip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/assets/ip.png
--------------------------------------------------------------------------------
/assets/t1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/assets/t1.gif
--------------------------------------------------------------------------------
/humanoidverse/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/agents/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/base_algo/base_algo.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from torch import nn, Tensor
4 |
5 | import time
6 | import math
7 | from pathlib import Path
8 | from typing import Optional, List, Tuple, Dict
9 |
10 |
11 | from hydra.utils import instantiate
12 |
13 | from humanoidverse.envs.base_task.base_task import BaseTask
14 |
15 | class BaseAlgo:
16 | def __init__(self, env: BaseTask, config, device):
17 | self.env = env
18 | self.config = config
19 | self.device = device
20 |
21 | def setup(self):
22 | return NotImplementedError
23 |
24 | def learn(self):
25 | return NotImplementedError
26 |
27 | def load(self, path):
28 | return NotImplementedError
29 |
30 | @property
31 | def inference_model(self):
32 | return NotImplementedError
33 |
34 | def env_step(self, actions, extra_info=None):
35 | obs_dict, rewards, dones, extras = self.env.step(actions, extra_info)
36 | return obs_dict, rewards, dones, extras
37 |
38 | @torch.no_grad()
39 | def evaluate_policy(self):
40 | return NotImplementedError
41 |
42 | def save(self, path=None, name="last.ckpt"):
43 | raise NotImplementedError
44 |
--------------------------------------------------------------------------------
/humanoidverse/agents/callbacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/agents/callbacks/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/callbacks/analysis_plot_template.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
38 |
39 |
40 |
41 |
Robot State Plots
42 |
43 |
Loading plots...
44 |
45 |
46 |
52 |
53 |
--------------------------------------------------------------------------------
/humanoidverse/agents/callbacks/base_callback.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.nn import Module
3 |
4 | class RL_EvalCallback(Module):
5 | def __init__(self, config, training_loop):
6 | super().__init__()
7 | self.config = config
8 | self.training_loop = training_loop
9 | self.device = self.training_loop.device
10 |
11 | def on_pre_evaluate_policy(self):
12 | pass
13 |
14 | def on_pre_eval_env_step(self, actor_state):
15 | return actor_state
16 |
17 | def on_post_eval_env_step(self, actor_state):
18 | return actor_state
19 |
20 | def on_post_evaluate_policy(self):
21 | pass
22 |
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/agents/modules/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/encoder_modules.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import torch
4 | import torch.nn as nn
5 | from .modules import BaseModule
6 |
7 |
8 | class Estimator(nn.Module):
9 | def __init__(self, obs_dim_dict, module_config_dict):
10 | super(Estimator, self).__init__()
11 | self.module = BaseModule(obs_dim_dict, module_config_dict)
12 |
13 | # def estimate(self, obs_history):
14 | # return self.module(obs_history)
15 |
16 | def forward(self, obs_history):
17 | return self.module(obs_history)
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/modules.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import inspect
4 |
5 | class BaseModule(nn.Module):
6 | def __init__(self, obs_dim_dict, module_config_dict):
7 | super(BaseModule, self).__init__()
8 | self.obs_dim_dict = obs_dim_dict
9 | self.module_config_dict = module_config_dict
10 | self.history_length = module_config_dict.get('history_length', {})
11 |
12 | self._calculate_input_dim()
13 | self._calculate_output_dim()
14 | self._build_network_layer(self.module_config_dict.layer_config)
15 |
16 | def _calculate_input_dim(self):
17 | # calculate input dimension based on the input specifications
18 | input_dim = 0
19 | for each_input in self.module_config_dict['input_dim']:
20 | if each_input in self.obs_dim_dict:
21 | # atomic observation type
22 | input_dim += self.obs_dim_dict[each_input] * self.history_length.get(each_input, 1)
23 | elif isinstance(each_input, (int, float)):
24 | # direct numeric input
25 | input_dim += each_input
26 | else:
27 | current_function_name = inspect.currentframe().f_code.co_name
28 | raise ValueError(f"{current_function_name} - Unknown input type: {each_input}")
29 |
30 | self.input_dim = input_dim
31 |
32 | def _calculate_output_dim(self):
33 | output_dim = 0
34 | for each_output in self.module_config_dict['output_dim']:
35 | if isinstance(each_output, (int, float)):
36 | output_dim += each_output
37 | else:
38 | current_function_name = inspect.currentframe().f_code.co_name
39 | raise ValueError(f"{current_function_name} - Unknown output type: {each_output}")
40 | self.output_dim = output_dim
41 |
42 | def _build_network_layer(self, layer_config):
43 | if layer_config['type'] == 'MLP':
44 | self._build_mlp_layer(layer_config)
45 | else:
46 | raise NotImplementedError(f"Unsupported layer type: {layer_config['type']}")
47 |
48 | def _build_mlp_layer(self, layer_config):
49 | layers = []
50 | hidden_dims = layer_config['hidden_dims']
51 | output_dim = self.output_dim
52 | activation = getattr(nn, layer_config['activation'])()
53 |
54 | layers.append(nn.Linear(self.input_dim, hidden_dims[0]))
55 | layers.append(activation)
56 |
57 | dropout = layer_config.get("dropout_prob", 0)
58 | if dropout > 0:
59 | layers.append(nn.Dropout(p=dropout))
60 |
61 | for l in range(len(hidden_dims)):
62 | if l == len(hidden_dims) - 1:
63 | layers.append(nn.Linear(hidden_dims[l], output_dim))
64 | else:
65 | layers.append(nn.Linear(hidden_dims[l], hidden_dims[l + 1]))
66 | layers.append(activation)
67 | if dropout > 0:
68 | layers.append(nn.Dropout(p=dropout))
69 |
70 | self.module = nn.Sequential(*layers)
71 |
72 | def forward(self, input):
73 | return self.module(input)
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/ppo_modules.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from copy import deepcopy
3 |
4 | import torch
5 | import torch.nn as nn
6 | from torch.distributions import Normal
7 |
8 | from .modules import BaseModule
9 | from omegaconf import DictConfig
10 |
11 | class PPOActor(nn.Module):
12 | def __init__(self,
13 | obs_dim_dict,
14 | module_config_dict,
15 | num_actions,
16 | init_noise_std):
17 | super(PPOActor, self).__init__()
18 |
19 | module_config_dict = self._process_module_config(module_config_dict, num_actions)
20 |
21 | self.actor_module = BaseModule(obs_dim_dict, module_config_dict)
22 |
23 | self.num_actions = {'lower_body': 15, 'upper_body': 14} # TODO: Hardcode
24 | # Action noise
25 | if isinstance(init_noise_std, dict) or isinstance(init_noise_std, DictConfig):
26 | std_list = []
27 | for key in self.num_actions.keys():
28 | if key not in init_noise_std:
29 | raise ValueError(f"Key {key} not found in init_noise_std.")
30 | std_value = init_noise_std[key]
31 | num = self.num_actions[key]
32 | std_list.append(std_value * torch.ones(num))
33 | std_tensor = torch.cat(std_list)
34 | else:
35 | std_tensor = init_noise_std * torch.ones(num_actions)
36 |
37 | self.std = nn.Parameter(std_tensor)
38 | self.min_noise_std = module_config_dict.get('min_noise_std', None)
39 | self.min_mean_noise_std = module_config_dict.get('min_mean_noise_std', None)
40 | # self.std = nn.Parameter(init_noise_std * torch.ones(num_actions))
41 | self.distribution = None
42 | # disable args validation for speedup
43 | Normal.set_default_validate_args = False
44 |
45 | def _process_module_config(self, module_config_dict, num_actions):
46 | for idx, output_dim in enumerate(module_config_dict['output_dim']):
47 | if output_dim == 'robot_action_dim':
48 | module_config_dict['output_dim'][idx] = num_actions
49 | return module_config_dict
50 |
51 | @property
52 | def actor(self):
53 | return self.actor_module
54 |
55 | @staticmethod
56 | # not used at the moment
57 | def init_weights(sequential, scales):
58 | [torch.nn.init.orthogonal_(module.weight, gain=scales[idx]) for idx, module in
59 | enumerate(mod for mod in sequential if isinstance(mod, nn.Linear))]
60 |
61 | def reset(self, dones=None):
62 | pass
63 |
64 | def forward(self):
65 | raise NotImplementedError
66 |
67 | @property
68 | def action_mean(self):
69 | return self.distribution.mean
70 |
71 | @property
72 | def action_std(self):
73 | return self.distribution.stddev
74 |
75 | @property
76 | def entropy(self):
77 | return self.distribution.entropy().sum(dim=-1)
78 |
79 | def update_distribution(self, actor_obs):
80 | mean = self.actor(actor_obs)
81 | if self.min_noise_std:
82 | clamped_std = torch.clamp(self.std, min=self.min_noise_std)
83 | self.distribution = Normal(mean, mean * 0. + clamped_std)
84 | elif self.min_mean_noise_std:
85 | current_mean = self.std.mean()
86 | if current_mean < self.min_mean_noise_std:
87 | scale_up = self.min_mean_noise_std / (current_mean + 1e-6)
88 | clamped_std = self.std * scale_up
89 | else:
90 | clamped_std = self.std
91 | self.distribution = Normal(mean, mean * 0. + clamped_std)
92 | else: self.distribution = Normal(mean, mean * 0. + self.std)
93 |
94 | def act(self, actor_obs, **kwargs):
95 | self.update_distribution(actor_obs)
96 | return self.distribution.sample()
97 |
98 | def get_actions_log_prob(self, actions):
99 | return self.distribution.log_prob(actions).sum(dim=-1)
100 |
101 | def act_inference(self, actor_obs):
102 | actions_mean = self.actor(actor_obs)
103 | return actions_mean
104 |
105 | def to_cpu(self):
106 | self.actor = deepcopy(self.actor).to('cpu')
107 | self.std.to('cpu')
108 |
109 |
110 |
111 | class PPOCritic(nn.Module):
112 | def __init__(self,
113 | obs_dim_dict,
114 | module_config_dict):
115 | super(PPOCritic, self).__init__()
116 |
117 | self.critic_module = BaseModule(obs_dim_dict, module_config_dict)
118 |
119 | @property
120 | def critic(self):
121 | return self.critic_module
122 |
123 | def reset(self, dones=None):
124 | pass
125 |
126 | def evaluate(self, critic_obs, **kwargs):
127 | value = self.critic(critic_obs)
128 | return value
129 |
130 | # Deprecated: TODO: Let Wenli Fix this
131 | class PPOActorFixSigma(PPOActor):
132 | def __init__(self,
133 | obs_dim_dict,
134 | network_dict,
135 | network_load_dict,
136 | num_actions,):
137 | super(PPOActorFixSigma, self).__init__(obs_dim_dict, network_dict, network_load_dict, num_actions, 0.0)
138 |
139 | def update_distribution(self, obs_dict):
140 | mean = self.actor(obs_dict)['head']
141 | self.distribution = mean
142 |
143 | @property
144 | def action_mean(self):
145 | return self.distribution
146 |
147 | def get_actions_log_prob(self, actions):
148 | raise NotImplementedError
149 |
150 | def act(self, obs_dict, **kwargs):
151 | self.update_distribution(obs_dict)
152 | return self.distribution
--------------------------------------------------------------------------------
/humanoidverse/agents/modules/world_models.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from torch.utils.tensorboard import SummaryWriter as TensorboardSummaryWriter
4 | import os
5 | import statistics
6 | from collections import deque
7 | import copy
8 | from typing import Optional, Tuple, Dict
9 | from isaacgym import gymapi
10 | from isaacgym import gymtorch
11 | from hydra.utils import instantiate
12 |
13 |
14 | class BaseWorldModel():
15 | def __init__(self, config, device='cpu'):
16 | self.config = config
17 | self.device = device
18 |
19 | def step(self, action):
20 | raise NotImplementedError
21 |
22 | def next(self, obs, action):
23 | raise NotImplementedError
24 |
25 | def reset(self, state=None, buffer=None):
26 | raise NotImplementedError
27 |
28 |
29 | class SimWorldModel(BaseWorldModel):
30 | def __init__(self,
31 | config,
32 | env,
33 | n_samples:Optional[int]=100,
34 | device='cpu'):
35 | super(SimWorldModel, self).__init__(config, device)
36 | self.env = env
37 | self.env.set_is_evaluating()
38 | self.num_samples = n_samples
39 | self.action_dim = self.env.config.robot.actions_dim
40 | self.obs_dim = self.env.config.robot.policy_obs_dim
41 | self.rollout_ids = torch.arange(self.num_samples, device=self.device) + 1
42 | self.reset()
43 | self.states = None # states for all sim environments
44 |
45 | def reset(self, state=None, buffer=None):
46 | # Remark: privileged_obs is for critic learning, unused in model-based RL algo
47 | if state is None:
48 | self.env.reset_envs_idx(self.rollout_ids)
49 | else:
50 | self.set_envs_to_state(state, self.rollout_ids)
51 |
52 | self.states = {"dof_states": copy.deepcopy(self.env.dof_state.view(1 + self.num_samples, self.env.num_dof, 2)[1:]),
53 | "root_states": copy.deepcopy(self.env.robot_root_states[1:])}
54 |
55 | def set_envs_to_state(self, desired_state, ids=None):
56 | # TODO
57 | if ids is None:
58 | ids = list(range(self.env.num_envs))
59 | root_states = {}
60 | for key in ["dof_states", "root_states"]:
61 | root_states[key] = desired_state[key][1:]
62 |
63 | # Create a numpy array to hold the states for all environments
64 | self.env.reset_envs_idx(self.rollout_ids, target_states=root_states)
65 |
66 | def step(self, actions):
67 | """
68 | parameters:
69 | actions: (n_samples, action_dim)
70 | outputs:
71 | obs: (n_samples, obs_dim)
72 | rewards: (n_samples,)
73 | dones: (n_samples,)
74 | """
75 | actions = torch.cat([torch.zeros_like(actions[[0]], device=self.device), actions], dim=0) # add a zero action for the first env
76 | obs, rewards, dones, infos = self.env.step(actions) # returns all envs' info (self.num_samples + 1)
77 | self.states = {"dof_states": copy.deepcopy(self.env.dof_state.view(1 + self.num_samples, self.env.num_dof, 2)[1:]),
78 | "root_states": copy.deepcopy(self.env.robot_root_states[1:])}
79 | self.obs_buf = obs["actor_obs"]
80 | self.privileged_obs_buf = obs["critic_obs"]
81 | return obs["actor_obs"][1:], rewards[1:], (1 - dones[1:]).bool()
82 |
83 | def next(self, states, actions):
84 | raise NotImplementedError
85 |
86 |
87 | # TODO: Implement the WorldModel class
88 | # class RSSM(BaseWorldModel):
89 | # def __init__(self, env, config, device='cpu'):
90 | # super(RSSM, self).__init__(env, config, device)
91 | # self.model = instantiate(config.model)
92 | # self.model = self.model.to(self.device)
93 | # self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.lr)
94 | # self.loss_fn = torch.nn.MSELoss()
95 | # self._dynamics = None
96 | # self._reward = None
97 | # self._pi = None
98 | # self._task_emb = None
99 | # self._critic = None
100 | # self.obs, self.privileged_obs = self.reset()
101 |
102 | # def next(self, obs, action):
103 | # raise NotImplementedError
104 |
105 |
106 | class MultiSimWorldModel(BaseWorldModel):
107 | def __init__(self,
108 | config,
109 | sim_config,
110 | n_samples:Optional[int]=100,
111 | command:Optional=None,
112 | device='cpu'):
113 | super(MultiSimWorldModel, self).__init__(config, device)
114 | self.env = instantiate(sim_config, device=device)
115 | self.env.set_is_evaluating(command)
116 | self.num_samples = n_samples
117 | self.action_dim = self.env.config.robot.actions_dim
118 | self.obs_dim = self.env.config.robot.policy_obs_dim
119 | self.rollout_ids = torch.arange(self.num_samples, device=self.device)
120 | self.reset()
121 | self.states = None
122 |
123 | def reset(self, state=None, buffer=None):
124 | # Remark: privileged_obs is for critic learning, unused in model-based RL algo
125 | if state is None:
126 | self.env.reset_envs_idx(self.rollout_ids)
127 | else:
128 | self.set_envs_to_state(state, buffer, self.rollout_ids)
129 |
130 | self.states = {"dof_states": copy.deepcopy(self.env.dof_state.view(self.num_samples, self.env.num_dof, 2)),
131 | "root_states": copy.deepcopy(self.env.robot_root_states)}
132 |
133 | def set_envs_to_state(self, desired_state, desired_buf, ids=None):
134 | if ids is None:
135 | ids = list(range(self.env.num_envs))
136 | # Create a numpy array to hold the states for all environments
137 | self.env.reset_envs_idx(self.rollout_ids, target_states=desired_state, target_buf=desired_buf)
138 |
139 | def get_env_dim(self):
140 | # TODO
141 | # get sim setups for all envs
142 | return {
143 | "dof_shape": [self.num_samples, self.env.num_dof, 2],
144 | "root_states_shape": self.env.robot_root_states.shape,
145 | "obs_dim": self.obs_dim,
146 | }
147 |
148 | def step(self, actions):
149 | """
150 | parameters:
151 | actions: (n_samples, action_dim)
152 | outputs:
153 | obs: (n_samples, obs_dim)
154 | rewards: (n_samples,)
155 | dones: (n_samples,)
156 | """
157 | obs, rewards, dones, infos = self.env.step(actions) # returns all envs' info (self.num_samples + 1)
158 | self.states = {"dof_states": copy.deepcopy(self.env.dof_state.view(self.num_samples, self.env.num_dof, 2)),
159 | "root_states": copy.deepcopy(self.env.robot_root_states)}
160 | #print("commands:", self.env.commands)
161 | self.obs_buf = obs["actor_obs"]
162 | self.privileged_obs_buf = obs["critic_obs"]
163 | return obs["actor_obs"], rewards, (1 - dones).bool()
164 |
165 | def next(self, states, actions):
166 | raise NotImplementedError
167 |
168 |
--------------------------------------------------------------------------------
/humanoidverse/agents/ppo_locomanip.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.optim as optim
4 |
5 | from humanoidverse.agents.modules.ppo_modules import PPOActor, PPOCritic
6 | from humanoidverse.agents.modules.data_utils import RolloutStorage
7 | from humanoidverse.envs.base_task.base_task import BaseTask
8 | from humanoidverse.agents.base_algo import BaseAlgo
9 | from humanoidverse.agents.callbacks.base_callback import RL_EvalCallback
10 | from humanoidverse.utils.average_meters import TensorAverageMeterDict
11 |
12 | from torch.utils.tensorboard import SummaryWriter as TensorboardSummaryWriter
13 | import time
14 | import os
15 | import statistics
16 | from collections import deque
17 | from hydra.utils import instantiate
18 | from loguru import logger
19 | from rich.progress import track
20 | from rich.console import Console
21 | from rich.panel import Panel
22 | from rich.live import Live
23 | console = Console()
24 |
25 | ##################################################################
26 | ######## Note: This agent is ONLY for EVALUATION purposes ########
27 | ##################################################################
28 | class PPOLocoManip(BaseAlgo):
29 | def __init__(self,
30 | env: BaseTask,
31 | config,
32 | log_dir=None,
33 | device='cpu'):
34 |
35 | self.device= device
36 | self.env = env
37 | self.config = config
38 | self.log_dir = log_dir
39 |
40 | self._init_config()
41 |
42 | self.tot_timesteps = 0
43 | self.tot_time = 0
44 |
45 | # Book keeping
46 | self.ep_infos = []
47 | self.rewbuffer = deque(maxlen=100)
48 | self.lenbuffer = deque(maxlen=100)
49 | self.cur_reward_sum = torch.zeros(self.env.num_envs, dtype=torch.float, device=self.device)
50 | self.cur_episode_length = torch.zeros(self.env.num_envs, dtype=torch.float, device=self.device)
51 |
52 | self.eval_callbacks: list[RL_EvalCallback] = []
53 | self.episode_env_tensors = TensorAverageMeterDict()
54 | _ = self.env.reset_all()
55 |
56 | # Actor observations for stand and loco policies
57 | self.actor_obs = ['actor_stand_obs', 'actor_loco_obs']
58 |
59 | def _init_config(self):
60 | # Env related Config
61 | self.num_envs: int = self.env.config.num_envs
62 | self.algo_obs_dim_dict = self.env.config.robot.algo_obs_dim_dict
63 | self.num_act = self.env.config.robot.lower_body_actions_dim
64 |
65 | def setup(self):
66 | logger.info("Setting up PPO Loco Manip")
67 | self._setup_models_and_optimizer()
68 |
69 | def _setup_models_and_optimizer(self):
70 | self.actor_stand = PPOActor(
71 | obs_dim_dict=self.algo_obs_dim_dict,
72 | module_config_dict=self.config.module_dict.actor_stand,
73 | num_actions=self.num_act,
74 | init_noise_std=self.config.init_noise_std
75 | ).to(self.device)
76 | self.actor_loco = PPOActor(
77 | obs_dim_dict=self.algo_obs_dim_dict,
78 | module_config_dict=self.config.module_dict.actor_loco,
79 | num_actions=self.num_act,
80 | init_noise_std=self.config.init_noise_std
81 | ).to(self.device)
82 | self.actor = [self.actor_stand, self.actor_loco]
83 |
84 | def _eval_mode(self):
85 | self.actor[0].eval()
86 | self.actor[1].eval()
87 |
88 | def load(self, ckpt_stand_path, ckpt_loco_path):
89 | if ckpt_stand_path is not None:
90 | logger.info(f"Loading standing checkpoint from {ckpt_stand_path}")
91 | loaded_dict_stand = torch.load(ckpt_stand_path, map_location=self.device)
92 | self.actor[0].load_state_dict(loaded_dict_stand["actor_model_state_dict"])
93 | if ckpt_loco_path is not None:
94 | logger.info(f"Loading locomotion checkpoint from {ckpt_loco_path}")
95 | loaded_dict_loco = torch.load(ckpt_loco_path, map_location=self.device)
96 | self.actor[1].load_state_dict(loaded_dict_loco["actor_model_state_dict"])
97 | return loaded_dict_stand['infos'], loaded_dict_loco["infos"]
98 |
99 | def _process_env_step(self, rewards, dones, infos):
100 | self.actor[self.env.control_mode].reset(dones)
101 |
102 | def _actor_act_step(self, obs_dict):
103 | return self.actor[self.env.control_mode].act(obs_dict)
104 |
105 | @property
106 | def inference_model(self):
107 | return self.actor
108 |
109 | ##########################################################################################
110 | # Code for Evaluation
111 | ##########################################################################################
112 |
113 | def env_step(self, actor_state):
114 | actions_lower_body = actor_state["actions"]
115 | actions_upper_body = self.env.ref_upper_dof_pos
116 | actions = torch.cat([actions_lower_body, actions_upper_body], dim=1)
117 | obs_dict, rewards, dones, extras = self.env.step(actions)
118 | actor_state.update(
119 | {"obs": obs_dict, "rewards": rewards, "dones": dones, "extras": extras}
120 | )
121 | return actor_state
122 |
123 | @torch.no_grad()
124 | def get_example_obs(self):
125 | obs_dict = self.env.reset_all()
126 | for obs_key in obs_dict.keys():
127 | print(obs_key, sorted(self.env.config.obs.obs_dict[obs_key]))
128 | # move to cpu
129 | for k in obs_dict:
130 | obs_dict[k] = obs_dict[k].cpu()
131 | return obs_dict
132 |
133 | @torch.no_grad()
134 | def evaluate_loco_manip_policy(self):
135 | self._create_eval_callbacks()
136 | self._pre_evaluate_policy()
137 | actor_state = self._create_actor_state()
138 | step = 0
139 | self.eval_policy = self._get_inference_policy()
140 | obs_dict = self.env.reset_all()
141 | init_actions = torch.zeros(self.env.num_envs, self.num_act, device=self.device)
142 | actor_state.update({"obs": obs_dict, "actions": init_actions})
143 | actor_state = self._pre_eval_env_step(actor_state)
144 | while True:
145 | actor_state["step"] = step
146 | actor_state = self._pre_eval_env_step(actor_state)
147 | actor_state = self.env_step(actor_state)
148 | actor_state = self._post_eval_env_step(actor_state)
149 | step += 1
150 |
151 | def _create_actor_state(self):
152 | return {"done_indices": [], "stop": False}
153 |
154 | def _create_eval_callbacks(self):
155 | if self.config.eval_callbacks is not None:
156 | for cb in self.config.eval_callbacks:
157 | self.eval_callbacks.append(instantiate(self.config.eval_callbacks[cb], training_loop=self))
158 |
159 | def _pre_evaluate_policy(self, reset_env=True):
160 | self._eval_mode()
161 | self.env.set_is_evaluating()
162 | if reset_env:
163 | _ = self.env.reset_all()
164 |
165 | for c in self.eval_callbacks:
166 | c.on_pre_evaluate_policy()
167 |
168 | def _post_evaluate_policy(self):
169 | for c in self.eval_callbacks:
170 | c.on_post_evaluate_policy()
171 |
172 | def _pre_eval_env_step(self, actor_state: dict):
173 | eval_policy = self.eval_policy[self.env.control_mode]
174 | actor_obs = actor_state["obs"][self.actor_obs[self.env.control_mode]]
175 | actions = eval_policy(actor_obs)
176 | actor_state.update({"actions": actions})
177 | for c in self.eval_callbacks:
178 | actor_state = c.on_pre_eval_env_step(actor_state)
179 | return actor_state
180 |
181 | def _post_eval_env_step(self, actor_state):
182 | for c in self.eval_callbacks:
183 | actor_state = c.on_post_eval_env_step(actor_state)
184 | return actor_state
185 |
186 | def _get_inference_policy(self, device=None):
187 | self._eval_mode()
188 | if device is not None:
189 | self.actor[0].to(device)
190 | self.actor[1].to(device)
191 | return [self.actor[0].act_inference,
192 | self.actor[1].act_inference]
--------------------------------------------------------------------------------
/humanoidverse/config/algo/ppo_decoupled_wbc_ma.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | algo:
4 | _target_: humanoidverse.agents.decouple.ppo_decoupled_wbc_ma.PPOMultiActorCritic
5 | _recursive_: False
6 | config:
7 | num_learning_epochs: 5
8 | num_mini_batches: 4
9 | clip_param: 0.2
10 | gamma: 0.99
11 | lam: 0.95
12 | value_loss_coef: 1.0
13 | entropy_coef: 0.01
14 | actor_learning_rate: 1.e-3 # 5e-4 # 1.e-3
15 | critic_learning_rate: 1.e-3 # 5e-4 # 1.e-3
16 | max_grad_norm: 1.0
17 | use_clipped_value_loss: True
18 | schedule: "adaptive"
19 | desired_kl: 0.01
20 |
21 | num_steps_per_env: 24
22 | save_interval: 100
23 |
24 | load_optimizer: True
25 |
26 | init_noise_std: {
27 | lower_body: 0.8,
28 | upper_body: 0.6, # 0.6 for residual, 0.6 for non-residual
29 | }
30 |
31 | num_learning_iterations: 10000
32 | init_at_random_ep_len: True
33 | eval_callbacks: null
34 |
35 | log_all_action_std: True
36 |
37 | module_dict:
38 | actor_lower_body:
39 | input_dim: [actor_obs]
40 | history_length: ${obs.history_length}
41 | output_dim: [robot_action_dim]
42 | layer_config:
43 | type: MLP
44 | hidden_dims: [512, 256, 128]
45 | activation: ELU
46 | # min_noise_std: 0.06
47 | # min_mean_noise_std: 0.20
48 | actor_upper_body:
49 | input_dim: [actor_obs]
50 | history_length: ${obs.history_length}
51 | output_dim: [robot_action_dim]
52 | layer_config:
53 | type: MLP
54 | hidden_dims: [512, 256, 128]
55 | activation: ELU
56 | # min_noise_std: 0.12
57 | # min_mean_noise_std: 0.18
58 | critic:
59 | type: MLP
60 | input_dim: [critic_obs]
61 | history_length: ${obs.history_length}
62 | output_dim: [1]
63 | layer_config:
64 | type: MLP
65 | hidden_dims: [512, 256, 128]
66 | activation: ELU
--------------------------------------------------------------------------------
/humanoidverse/config/base.yaml:
--------------------------------------------------------------------------------
1 | # First we define the global structures that will be used by all the configs.
2 | defaults:
3 | # - base/fabric
4 | - _self_
5 | - base/hydra
6 | - base/structure
7 |
8 | # These are global variables that all levels of the config can access.
9 | ## Experiment setup
10 | seed: 0
11 | codebase_version: 1.0 # this is recorded to enable auto-conversion of models between different versions of the codebase
12 | headless: True
13 | num_envs: 4096
14 |
15 | ### Checkpoint logic
16 | auto_load_latest: False
17 | checkpoint: null
18 |
19 | ### Naming and dir structure
20 | project_name: TEST
21 | experiment_name: TEST
22 |
23 | base_dir: logs
24 | timestamp: ${now:%Y%m%d_%H%M%S}
25 | experiment_dir: ${base_dir}/${project_name}/${timestamp}-${experiment_name}-${log_task_name}-${robot.asset.robot_type}
26 | save_dir: ${experiment_dir}/.hydra
27 |
28 | force_flat_terrain: False
29 |
30 | use_wandb: False
31 | log_task_name: TEST
32 |
33 | ### Simulation
34 | sim_type: isaacgym
35 | env_spacing: 20
36 | output_dir: ${experiment_dir}/output
37 |
38 | eval_overrides:
39 | headless: False
40 | num_envs: 1
41 | auto_load_latest: False
42 | use_wandb: False
43 |
--------------------------------------------------------------------------------
/humanoidverse/config/base/fabric.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | fabric:
4 | _target_: lightning.fabric.Fabric
5 | _convert_: all
6 | accelerator: gpu
7 | devices: ${ngpu}
8 | num_nodes: ${nodes}
9 | strategy:
10 | _target_: lightning.fabric.strategies.DDPStrategy
11 | precision: ${amp_precision}
12 | loggers:
13 | - _target_: lightning.fabric.loggers.TensorBoardLogger
14 | root_dir: ${save_dir}
15 |
16 | # These are global variables that all levels of the config can access.
17 | ## Compute setup
18 | ngpu: 1
19 | nodes: 1
20 | torch_deterministic: False
21 | amp_precision: 32-true
22 | force_flat_terrain: False
23 |
24 | eval_overrides:
25 | ngpu: 1
26 | nodes: 1
27 |
28 | fabric:
29 | loggers: null
30 |
--------------------------------------------------------------------------------
/humanoidverse/config/base/hydra.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | hydra: # So hydra will put your config info in the same dir as your checkpoints
4 | run:
5 | dir: ${save_dir}
6 | sweep:
7 | dir: ${save_dir}
8 | job:
9 | chdir: False
10 | # job_logging:
11 | # disable_existing_loggers: true
12 | # hydra_logging:
13 | # disable_existing_loggers: true
14 |
--------------------------------------------------------------------------------
/humanoidverse/config/base/structure.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | algo: ???
4 |
5 | env: ???
6 |
7 | robot: ???
8 |
9 | terrain: ??? # This is defined in the terrain configs
10 |
--------------------------------------------------------------------------------
/humanoidverse/config/base_eval.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | eval_timestamp: ${now:%Y%m%d_%H%M%S}
4 | eval_name: TEST
5 | eval_log_dir: logs_eval/${eval_name}/${eval_timestamp}
6 | hydra:
7 | run:
8 | dir: ${eval_log_dir}
9 |
--------------------------------------------------------------------------------
/humanoidverse/config/domain_rand/NO_domain_rand.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 |
4 | defaults:
5 | - domain_rand_base
6 |
7 | domain_rand:
8 | push_robots : False
9 | push_interval_s : [5, 10]
10 | max_push_vel_xy : 1.0
11 |
12 | randomize_friction : False
13 | # randomize_friction : False
14 | friction_range : [-0.6, 1.2]
15 |
16 | randomize_base_mass : False # replaced by randomize_link_mass
17 | added_mass_range : [-5., 10.]
18 |
19 |
20 | randomize_base_com : False
21 | base_com_range: #kg
22 | x : [-0.1, 0.1]
23 | y : [-0.1, 0.1]
24 | z : [-0.01, 0.01]
25 |
26 | randomize_link_mass : False
27 | link_mass_range : [0.7, 1.3] # *factor
28 | randomize_link_body_names : [
29 | 'pelvis', 'left_hip_yaw_link', 'left_hip_roll_link', 'left_hip_pitch_link', 'left_knee_link',
30 | 'right_hip_yaw_link', 'right_hip_roll_link', 'right_hip_pitch_link', 'right_knee_link',
31 | ]
32 |
33 | randomize_pd_gain : False
34 | kp_range : [0.75, 1.25]
35 | kd_range : [0.75, 1.25]
36 |
37 |
38 | randomize_torque_rfi : False
39 | rfi_lim : 0.1
40 | randomize_rfi_lim : False
41 | rfi_lim_range : [0.5, 1.5]
42 |
43 | randomize_ctrl_delay : False
44 | ctrl_delay_step_range : [0, 2] # integer max real delay is 90ms
45 |
46 | randomize_motion_ref_xyz: False # head only for now
47 | motion_ref_xyz_range : [[-0.02, 0.02],[-0.02, 0.02],[-0.1, 0.1]]
48 |
49 | motion_package_loss: False
50 | package_loss_range: [1, 10] # dt = 0.02s, delay for 0.02s - 0.2s
51 | package_loss_interval_s : 2
52 |
53 |
54 | born_offset : False
55 | born_offset_curriculum: False
56 | born_offset_level_down_threshold: 50
57 | born_offset_level_up_threshold: 120
58 | level_degree: 0.00005
59 | born_distance : 0.25
60 | born_offset_range: [0.0, 1]
61 | born_offset_possibility : 1.0
62 |
63 | born_heading_curriculum: False
64 | born_heading_randomization : False
65 | born_heading_level_down_threshold: 50
66 | born_heading_level_up_threshold: 120
67 | born_heading_degree: 10
68 | born_heading_range: [0, 180]
69 | born_heading_level_degree: 0.00005
70 |
--------------------------------------------------------------------------------
/humanoidverse/config/domain_rand/domain_rand_rl_gym.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | domain_rand:
4 | # push robots
5 | push_robots : False
6 | push_interval_s : [5, 16]
7 | max_push_vel_xy : 1.0
8 |
9 | # base com
10 | randomize_base_com : False
11 | base_com_range:
12 | x : [-0.01, 0.01]
13 | y : [-0.01, 0.01]
14 | z : [-0.01, 0.01]
15 |
16 | # link mass
17 | randomize_link_mass : True
18 | link_mass_range : [0.9, 1.2]
19 |
20 | # pd gain
21 | randomize_pd_gain : True
22 | kp_range : [0.9, 1.1]
23 | kd_range : [0.9, 1.1]
24 |
25 | # friction
26 | randomize_friction : True
27 | friction_range : [0.5, 1.25] # [0.1, 1.0]
28 |
29 | # base mass
30 | randomize_base_mass : True
31 | added_mass_range : [-1., 3.]
32 |
33 | # rfi
34 | randomize_torque_rfi : False
35 | rfi_lim : 0.1
36 | randomize_rfi_lim : False
37 | rfi_lim_range : [0.5, 1.5]
38 |
39 |
40 | # control delay
41 | randomize_ctrl_delay : True
42 | lower_body_ctrl_delay: True
43 | upper_body_ctrl_delay: True
44 | ctrl_delay_step_range : [0, 1] # integer max real delay is 90ms
--------------------------------------------------------------------------------
/humanoidverse/config/env/base_task.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | env:
4 | _target_: humanoidverse.envs.base_task.base_task.BaseTask
5 | _recursive_: False
6 | config:
7 | experiment_name: ${experiment_name}
8 | num_envs: ${num_envs}
9 | headless: ${headless}
10 | simulator: ${simulator}
11 | save_rendering_dir: null
12 | ckpt_dir: null
13 |
14 | analysis: False
15 | eval_force_level: 0
16 |
17 | # Globally accessible parameters
18 | log_task_name: base_task
--------------------------------------------------------------------------------
/humanoidverse/config/env/decoupled_locomotion_stand_height_waist_wbc_ma_diff_force.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - legged_base
5 |
6 | # Env structure
7 | env:
8 | _target_: humanoidverse.envs.decoupled_locomotion.decoupled_locomotion_stand_height_waist_wbc_ma_diff_force.LeggedRobotDecoupledLocomotionStanceHeightWBCForce
9 | config:
10 | max_episode_length_s: 20
11 | locomotion_obs_scales:
12 | lin_vel: 1.0
13 | ang_vel: 1.0
14 | dof_pos: 1.0
15 | dof_vel: 1.0
16 | humanoid_commands_scale: 1.0
17 | locomotion_command_ranges:
18 | lin_vel_x: [-1.0, 1.0]
19 | lin_vel_y: [-1.0, 1.0]
20 | ang_vel_yaw: [-1.0, 1.0]
21 | heading: [-3.14, 3.14]
22 | base_height: [-0.25, 0.0]
23 | # base_height: [-0.0, 0.0]
24 | stand_prob: 0.4
25 | tapping_in_place_prob: 0.0
26 | # Upper body waist motion setting
27 | fix_upper_body_prob: 0.3
28 | apply_waist_roll_only_when_stance: True
29 | apply_waist_pitch_only_when_stance: True
30 | apply_waist_yaw_only_when_stance: True
31 | fix_waist_yaw: True
32 | fix_waist_roll: True
33 | fix_waist_pitch: True
34 | fix_waist_yaw_range: [-1.0, 1.0]
35 | fix_waist_roll_range: [-0.0, 0.0]
36 | fix_waist_pitch_range: [-0.0, 0.0]
37 | zero_fix_waist_yaw_prob: 0.1
38 | zero_fix_waist_roll_prob: 1.0
39 | zero_fix_waist_pitch_prob: 1.0
40 | # Upper body motion resampling
41 | resample_motion_when_training: True
42 | resample_time_interval_s: 2000
43 | # Locomotion command resampling
44 | locomotion_command_resampling_time: 10.0
45 | obs_history:
46 | history_len: 0
47 |
48 | # Residual upper body action
49 | residual_upper_body_action: True
50 |
51 | # Force
52 | max_force_estimation: True
53 | update_apply_force_phase: True
54 | use_lpf: True
55 | force_filter_alpha: 0.05
56 | zero_tapping_xy_force: False
57 | zero_force_prob: [0.25, 0.25, 0.25] # 0.2
58 | random_force_prob: 0.0
59 | apply_force_x_range: [-40.0, 40.0]
60 | apply_force_y_range: [-40.0, 40.0]
61 | apply_force_z_range: [-50.0, 5.0]
62 | apply_force_pos_ratio_range: [0.0, 1.0]
63 | lower_body_force_compensation: False
64 | apply_force_in_physics_step: True
65 | apply_force_compensation_in_physics_step: False
66 | randomize_force_duration: [150, 250] # The unit is step, where policy runs at 50Hz
67 | only_apply_z_force_when_walking: False
68 | only_apply_resistance_when_walking: True
69 |
70 | termination:
71 | terminate_by_contact: False
72 | # terminate_by_lin_vel: False
73 | # terminate_by_ang_vel: False
74 | terminate_by_gravity: True
75 | terminate_by_low_height: True
76 | terminate_when_close_to_dof_pos_limit: False
77 | terminate_when_close_to_dof_vel_limit: False
78 | terminate_when_close_to_torque_limit: False
79 | terminate_when_motion_end: True
80 | terminate_when_low_upper_dof_tracking: False
81 |
82 | termination_scales:
83 | termination_min_base_height : 0.3
84 | # termination_max_base_vel : 10.0
85 | # termination_max_base_ang_vel : 5.0
86 | termination_gravity_x : 0.8
87 | termination_gravity_y : 0.8
88 | termination_close_to_dof_pos_limit : 0.98
89 | termination_close_to_dof_vel_limit : 0.98
90 | termination_close_to_torque_limit : 0.98
91 | terminate_when_low_upper_dof_tracking_threshold: 0.1
92 |
93 |
94 | # Globally accessible parameters
95 | log_task_name: decoupled_locomotion
96 |
97 | eval_overrides:
98 | env:
99 | config:
100 | max_episode_length_s: 100000
101 |
--------------------------------------------------------------------------------
/humanoidverse/config/env/legged_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - base_task
5 |
6 | # Env structure
7 | env:
8 | _target_: humanoidverse.envs.legged_base_task.legged_robot_base.LeggedRobotBase
9 | _recursive_: False
10 | config:
11 | robot: ${robot}
12 | domain_rand: ${domain_rand}
13 | rewards: ${rewards}
14 | terrain: ${terrain}
15 | obs: ${obs}
16 | env_spacing: 5.0
17 | max_episode_length_s: 20
18 |
19 | normalization:
20 | clip_actions: 100.0
21 | clip_observations: 100.0
22 |
23 | # simulator:
24 | # sim:
25 | # fps: 200 # 1/dt , dt = 0.005
26 | # control_decimation: 4 # decimation
27 | # substeps: 1
28 | termination:
29 | terminate_when_close_to_dof_pos_limit: False
30 | terminate_when_close_to_dof_vel_limit: False
31 | terminate_when_close_to_torque_limit: False
32 | termination_scales:
33 | termination_close_to_dof_pos_limit : 0.98
34 | termination_close_to_dof_vel_limit : 0.98
35 | termination_close_to_torque_limit : 0.98
36 |
37 | # Globally accessible parameters
38 | log_task_name: legged_base
--------------------------------------------------------------------------------
/humanoidverse/config/exp/base_exp.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - /algo: base_algo
5 | - /env: base_task
6 |
7 | experiment_name: TESTBaseExp
--------------------------------------------------------------------------------
/humanoidverse/config/exp/decoupled_locomotion_stand_height_waist_wbc_diff_force_ma_ppo_ma_env.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - /algo: ppo_decoupled_wbc_ma
5 | - /env: decoupled_locomotion_stand_height_waist_wbc_ma_diff_force
6 |
7 | experiment_name: TEST_DecoupledLocomotion
--------------------------------------------------------------------------------
/humanoidverse/config/exp/legged_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - /algo: ppo
5 | - /env: legged_base
6 |
7 | experiment_name: TESTLeggedBase
--------------------------------------------------------------------------------
/humanoidverse/config/obs/dec_loco/g1_29dof_obs_diff_force_history_wolinvel_ma.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Be careful when using _raw, history
4 | obs:
5 | use_phase: True
6 | obs_dict:
7 | actor_obs: [
8 | # base_lin_vel,
9 | base_ang_vel,
10 | projected_gravity,
11 | command_lin_vel,
12 | command_ang_vel,
13 | command_stand,
14 | command_waist_dofs,
15 | command_base_height,
16 | ref_upper_dof_pos,
17 | dof_pos,
18 | dof_vel,
19 | actions,
20 | # sin_phase,
21 | # cos_phase,
22 | ]
23 |
24 | critic_obs: [
25 | base_orientation,
26 | base_lin_vel,
27 | base_ang_vel,
28 | projected_gravity,
29 | command_lin_vel,
30 | command_ang_vel,
31 | command_stand,
32 | command_waist_dofs,
33 | command_base_height,
34 | ref_upper_dof_pos,
35 | dof_pos,
36 | dof_vel,
37 | actions,
38 | left_ee_apply_force,
39 | right_ee_apply_force,
40 | # sin_phase,
41 | # cos_phase,
42 | ]
43 |
44 | history_length:
45 | actor_obs: 5
46 | critic_obs: 1
47 |
48 | obs_scales: {
49 | base_orientation: 1.0,
50 | base_lin_vel: 2.0,
51 | base_ang_vel: 0.25,
52 | projected_gravity: 1.0,
53 | command_lin_vel: 1.0,
54 | command_ang_vel: 1.0,
55 | command_stand: 1.0,
56 | command_waist_dofs: 1.0,
57 | command_base_height: 2.0,
58 | ref_upper_dof_pos: 1.0,
59 | dof_pos: 1.0,
60 | dof_vel: 0.05,
61 | actions: 1.0,
62 | sin_phase: 1.0,
63 | cos_phase: 1.0,
64 | left_ee_apply_force: 0.1,
65 | right_ee_apply_force: 0.1,
66 | history_actor: 1.0,
67 | }
68 |
69 | add_noise: False
70 | # noise_scales: {
71 | # base_lin_vel: 0.0,
72 | # base_ang_vel: 0.2,
73 | # projected_gravity: 0.05,
74 | # command_lin_vel: 0.0,
75 | # command_ang_vel: 0.0,
76 | # command_stand: 0.0,
77 | # command_base_height: 0.0,
78 | # ref_upper_dof_pos: 0.01,
79 | # dof_pos: 0.01,
80 | # dof_vel: 1.5, # 1.5
81 | # actions: 0.0,
82 | # sin_phase: 0.0,
83 | # cos_phase: 0.0,
84 | # phase_time: 0.0,
85 | # history_actor: 0.0,
86 | # history_critic: 0.0,
87 | # }
88 | noise_scales: {
89 | base_orientation: 0.0,
90 | base_lin_vel: 0.0,
91 | base_ang_vel: 0.0,
92 | projected_gravity: 0.0,
93 | command_lin_vel: 0.0,
94 | command_ang_vel: 0.0,
95 | command_stand: 0.0,
96 | command_waist_dofs: 0.0,
97 | command_base_height: 0.0,
98 | ref_upper_dof_pos: 0.0,
99 | dof_pos: 0.01,
100 | dof_vel: 0.1, # 1.5
101 | actions: 0.0,
102 | sin_phase: 0.0,
103 | cos_phase: 0.0,
104 | left_ee_apply_force: 0.0,
105 | right_ee_apply_force: 0.0,
106 | history_actor: 0.0,
107 | }
108 |
109 | # obs_dims should use list instead of dict
110 | # will be converted to dict in `pre_process_config`
111 | obs_dims:
112 | - base_orientation: 4
113 | - base_lin_vel: 3
114 | - base_ang_vel: 3
115 | - projected_gravity: 3
116 | - command_lin_vel: 2
117 | - command_ang_vel: 1
118 | - command_stand: 1
119 | - command_waist_dofs: 3
120 | - command_base_height: 1
121 | - ref_upper_dof_pos: ${robot.upper_body_actions_dim} # upper body actions
122 | - dof_pos: ${robot.dof_obs_size}
123 | - dof_vel: ${robot.dof_obs_size}
124 | - actions: ${robot.dof_obs_size} # wbc body actions
125 | - sin_phase: 1
126 | - cos_phase: 1
127 | - left_ee_apply_force: 3
128 | - right_ee_apply_force: 3
129 |
--------------------------------------------------------------------------------
/humanoidverse/config/obs/dec_loco/t1_29dof_obs_diff_force_history_wolinvel_ma.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Be careful when using _raw, history
4 | obs:
5 | use_phase: True
6 | obs_dict:
7 | actor_obs: [
8 | # base_lin_vel,
9 | base_ang_vel,
10 | projected_gravity,
11 | command_lin_vel,
12 | command_ang_vel,
13 | command_stand,
14 | command_waist_dofs,
15 | command_base_height,
16 | ref_upper_dof_pos,
17 | dof_pos,
18 | dof_vel,
19 | actions,
20 | # sin_phase,
21 | # cos_phase,
22 | ]
23 |
24 | critic_obs: [
25 | base_orientation,
26 | base_lin_vel,
27 | base_ang_vel,
28 | projected_gravity,
29 | command_lin_vel,
30 | command_ang_vel,
31 | command_stand,
32 | command_waist_dofs,
33 | command_base_height,
34 | ref_upper_dof_pos,
35 | dof_pos,
36 | dof_vel,
37 | actions,
38 | left_ee_apply_force,
39 | right_ee_apply_force,
40 | # sin_phase,
41 | # cos_phase,
42 | ]
43 |
44 | history_length:
45 | actor_obs: 5
46 | critic_obs: 1
47 |
48 | obs_scales: {
49 | base_orientation: 1.0,
50 | base_lin_vel: 1.0,
51 | base_ang_vel: 1.0,
52 | projected_gravity: 1.0,
53 | command_lin_vel: 1.0,
54 | command_ang_vel: 1.0,
55 | command_stand: 1.0,
56 | command_waist_dofs: 1.0,
57 | command_base_height: 2.0,
58 | ref_upper_dof_pos: 1.0,
59 | dof_pos: 1.0,
60 | dof_vel: 0.1,
61 | actions: 1.0,
62 | sin_phase: 1.0,
63 | cos_phase: 1.0,
64 | left_ee_apply_force: 0.1,
65 | right_ee_apply_force: 0.1,
66 | history_actor: 1.0,
67 | }
68 |
69 | add_noise: False
70 | # noise_scales: {
71 | # base_lin_vel: 0.0,
72 | # base_ang_vel: 0.2,
73 | # projected_gravity: 0.05,
74 | # command_lin_vel: 0.0,
75 | # command_ang_vel: 0.0,
76 | # command_stand: 0.0,
77 | # command_base_height: 0.0,
78 | # ref_upper_dof_pos: 0.01,
79 | # dof_pos: 0.01,
80 | # dof_vel: 1.5, # 1.5
81 | # actions: 0.0,
82 | # sin_phase: 0.0,
83 | # cos_phase: 0.0,
84 | # phase_time: 0.0,
85 | # history_actor: 0.0,
86 | # history_critic: 0.0,
87 | # }
88 | noise_scales: {
89 | base_orientation: 0.0,
90 | base_lin_vel: 0.0,
91 | base_ang_vel: 0.0,
92 | projected_gravity: 0.0,
93 | command_lin_vel: 0.0,
94 | command_ang_vel: 0.0,
95 | command_stand: 0.0,
96 | command_waist_dofs: 0.0,
97 | command_base_height: 0.0,
98 | ref_upper_dof_pos: 0.0,
99 | dof_pos: 0.01,
100 | dof_vel: 0.1, # 1.5
101 | actions: 0.0,
102 | sin_phase: 0.0,
103 | cos_phase: 0.0,
104 | left_ee_apply_force: 0.0,
105 | right_ee_apply_force: 0.0,
106 | history_actor: 0.0,
107 | }
108 |
109 | # obs_dims should use list instead of dict
110 | # will be converted to dict in `pre_process_config`
111 | obs_dims:
112 | - base_orientation: 4
113 | - base_lin_vel: 3
114 | - base_ang_vel: 3
115 | - projected_gravity: 3
116 | - command_lin_vel: 2
117 | - command_ang_vel: 1
118 | - command_stand: 1
119 | - command_waist_dofs: 3
120 | - command_base_height: 1
121 | - ref_upper_dof_pos: ${robot.upper_body_actions_dim} # upper body actions
122 | - dof_pos: ${robot.dof_obs_size}
123 | - dof_vel: ${robot.dof_obs_size}
124 | - actions: ${robot.dof_obs_size} # wbc body actions
125 | - sin_phase: 1
126 | - cos_phase: 1
127 | - left_ee_apply_force: 3
128 | - right_ee_apply_force: 3
129 |
--------------------------------------------------------------------------------
/humanoidverse/config/obs/legged_obs.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Be careful when using _raw, history
4 | obs:
5 | obs_dict:
6 | actor_obs: ???
7 | critic_obs: ???
8 |
9 | history_length:
10 | actor_obs: ???
11 | critic_obs: ???
12 |
13 | obs_scales: ???
14 |
15 | noise_scales: ???
16 |
17 | obs_dims: ???
18 | # Will store values after preprocessing, don't put anything here
19 | post_compute_config: {}
20 |
--------------------------------------------------------------------------------
/humanoidverse/config/opt/record.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | env:
4 | config:
5 | save_motion: False
6 | eval_timestamp: ${eval_timestamp}
7 | save_note: null
8 | ckpt_dir: null
9 | save_total_steps: 10000
--------------------------------------------------------------------------------
/humanoidverse/config/opt/wandb.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | use_wandb: True
4 |
5 | wandb:
6 | wandb_project: null
7 | wandb_tags: 'online' # 'online' or 'offline' or 'disabled'
8 | wandb_group: null
9 | wandb_id: null
10 | wandb_entity: hang0610 # 'hang0610'
11 | wandb_dir: ${experiment_dir}/.wandb
--------------------------------------------------------------------------------
/humanoidverse/config/rewards/dec_loco/reward_dec_loco_stand_height_ma_diff_force.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # defaults:
4 | # - reward_base
5 |
6 | rewards:
7 | set_reward: Yuanhang
8 | set_reward_date: 20250405
9 | only_positive_rewards: False
10 | reward_scales:
11 | tracking_lin_vel_x: 2.0
12 | tracking_lin_vel_y: 1.5
13 | tracking_ang_vel: 4.0
14 | tracking_walk_base_height: 1.0
15 | tracking_stance_base_height: 4.0 # 1.0
16 | # tracking_waist_dofs: 0.5
17 | tracking_waist_dofs_tapping: 0.5
18 | tracking_waist_dofs_stance: 3.0
19 | # tracking_arm_dofs: 0.5
20 | penalty_lin_vel_z: -2.0
21 | penalty_ang_vel_xy: -0.05 # -0.5
22 | penalty_orientation: -1.5 # -1.0
23 | penalty_torso_orientation: -1.0 # -1.0
24 | penalty_lower_body_torques: -1e-5 #-0.00001
25 | penalty_lower_body_dof_vel: -1.0e-3
26 | penalty_lower_body_dof_acc: -2.5e-7
27 | penalty_lower_body_action_rate: -0.1 # -0.5
28 | # penalty_feet_contact_forces: -0.01
29 | # penalty_stumble: -10.0
30 | # penalty_slippage: -1.0
31 | penalty_contact_no_vel: -0.2 # -0.2
32 | penalty_feet_ori: -2.0
33 | limits_lower_body_dof_pos: -5.0
34 | # limits_lower_body_dof_vel: -5.0
35 | # limits_lower_body_torque: -0.1
36 | feet_air_time: 4.0
37 | base_height: -10.0
38 | termination: -250.0 # -500.0
39 | penalty_feet_height: -5.0
40 | penalty_feet_swing_height: -20.0
41 | feet_heading_alignment: -0.25
42 | penalty_close_feet_xy: -10.
43 | # penalty_close_knees_xy: -2.
44 | penalty_hip_pos: -2.5 # -5.0
45 | penalty_ang_vel_xy_torso: -1.0 # -0.75
46 | penalty_stance_dof: -1.0e-3 # -1.0e-3
47 | # penalty_stance_feet: -2.5
48 | penalty_stance_tap_feet: -5.0
49 | penalty_stance_root: -5.0
50 | # contact: 0.18 # 0.18
51 | # alive: 1.0 # 0.15
52 | # penalty_waist_dofs: -0.5
53 | # penalty_upper_body_dofs_freeze: -0.4
54 | penalty_stand_still: -0.15
55 | penalty_stance_symmetry: -0.5
56 | # penalty_stance_contact_no_ang_vel: -0.1
57 | penalty_ankle_roll: -2.0
58 | # penalty_stance_feet_vel: -1.0e-2
59 | penalty_contact: -4.0
60 | penalty_negative_knee_joint: -1.0
61 | # penalty_torso_roll_orientation: -2.5
62 | penalty_diff_feet_air_time: -5.0
63 | feet_max_height_for_this_air: -0.0
64 | penalty_shift_in_zero_command: -1.0
65 | penalty_ang_shift_in_zero_command: -1.0
66 | # penalty_root_acc_lower_body: -1e-4
67 |
68 | # tracking_arm_dofs: 0.5
69 | tracking_upper_body_dofs: 4.0
70 | penalty_upper_body_torques: -1e-5 #-0.00001
71 | penalty_upper_body_dof_vel: -1.0e-3
72 | penalty_upper_body_dof_acc: -2.5e-7
73 | penalty_upper_body_action_rate: -0.1 # -0.5
74 | limits_upper_body_dof_pos: -5.0
75 | penalty_ee_lin_acc: -1e-1
76 | penalty_ee_ang_acc: -1e-2
77 | # penalty_root_acc_upper_body: -1e-4
78 | limits_upper_body_dof_vel: -5.0
79 | limits_upper_body_torque: -0.1
80 | reward_groups:
81 | lower_body: [
82 | "tracking_lin_vel_x",
83 | "tracking_lin_vel_y",
84 | "tracking_ang_vel",
85 | # "tracking_waist_dofs",
86 | "tracking_waist_dofs_tapping",
87 | "tracking_waist_dofs_stance",
88 | "tracking_walk_base_height",
89 | "tracking_stance_base_height",
90 | "penalty_lin_vel_z",
91 | "penalty_ang_vel_xy",
92 | "penalty_orientation",
93 | "penalty_lower_body_torques",
94 | "penalty_lower_body_dof_vel",
95 | "penalty_lower_body_dof_acc",
96 | "penalty_lower_body_action_rate",
97 | "penalty_contact_no_vel",
98 | "penalty_feet_ori",
99 | "limits_lower_body_dof_pos",
100 | # "limits_lower_body_dof_vel",
101 | # "limits_lower_body_torque",
102 | "feet_air_time",
103 | "penalty_diff_feet_air_time",
104 | "base_height",
105 | "termination",
106 | "penalty_feet_height",
107 | "penalty_feet_swing_height",
108 | # "penalty_feet_contact_forces",
109 | "feet_heading_alignment",
110 | "penalty_close_feet_xy",
111 | "penalty_hip_pos",
112 | "penalty_ang_vel_xy_torso",
113 | "penalty_stance_dof",
114 | "penalty_stance_tap_feet",
115 | "penalty_stance_root",
116 | "penalty_stand_still",
117 | "penalty_stance_symmetry",
118 | "penalty_ankle_roll",
119 | "penalty_contact",
120 | "penalty_negative_knee_joint",
121 | "penalty_torso_orientation",
122 | "feet_max_height_for_this_air",
123 | # "alive",
124 | "penalty_shift_in_zero_command",
125 | "penalty_ang_shift_in_zero_command",
126 | # "penalty_root_acc_lower_body",
127 | # "penalty_stance_contact_no_ang_vel"
128 | ]
129 | upper_body: [
130 | "tracking_upper_body_dofs",
131 | "penalty_upper_body_torques",
132 | "penalty_upper_body_dof_vel",
133 | "penalty_upper_body_dof_acc",
134 | "penalty_upper_body_action_rate",
135 | "limits_upper_body_dof_pos",
136 | "limits_upper_body_dof_vel",
137 | "limits_upper_body_torque",
138 | "penalty_ee_lin_acc",
139 | "penalty_ee_ang_acc",
140 | # "penalty_root_acc_upper_body",
141 | ]
142 |
143 | reward_weights:
144 | lower_body: 1.0
145 | upper_body: 1.0
146 |
147 | # fix upper body
148 | fix_upper_body: False
149 | # gait period
150 | gait_period: 1.0
151 |
152 | reward_tracking_sigma:
153 | lin_vel: 0.25
154 | ang_vel: 0.25
155 | base_height: 0.05
156 | waist_dofs: 0.05
157 | arm_dofs: 0.1
158 | upper_body_dofs: 0.1 # 0.1
159 | reward_limit:
160 | soft_dof_pos_limit: 0.95
161 | soft_dof_vel_limit: 0.95
162 | soft_torque_limit: 0.95
163 |
164 | close_knees_threshold: 0.16
165 | close_feet_threshold: 0.17
166 | locomotion_max_contact_force: 300.0
167 |
168 | # Unitree G1
169 | feet_height_target: 0.12
170 | feet_height_stand: 0.025
171 | desired_feet_max_height_for_this_air: 0.12
172 | desired_base_height: 0.75 # 0.728
173 | # Booster T1
174 | # feet_height_target: 0.08
175 | # feet_height_stand: 0.020
176 | # desired_feet_max_height_for_this_air: 0.08
177 | # desired_base_height: 0.62 # 0.728
178 |
179 | stance_base_height_penalty_scale: 5.0
180 |
181 | reward_penalty_curriculum: True
182 | reward_initial_penalty_scale : 0.5
183 | reward_min_penalty_scale: 0.0
184 | reward_max_penalty_scale: 1.0
185 | reward_penalty_level_down_threshold: 40 # 150 40 20
186 | reward_penalty_level_up_threshold: 210 # 850 230 90
187 | reward_penalty_degree: 0.00001
188 | num_compute_average_epl: 10000
189 |
190 | upper_body_motion_scale_curriculum: False
191 | upper_body_motion_initial_scale: 0.0
192 | upper_body_motion_scale_up_threshold: 210
193 | upper_body_motion_scale_down_threshold: 200
194 | upper_body_motion_scale_up: 0.05
195 | upper_body_motion_scale_down: 0.01
196 | upper_body_motion_scale_max: 1.0
197 | upper_body_motion_scale_min: 0.0
198 |
199 | command_height_scale_curriculum: True
200 | command_height_scale_initial_scale: 0.1
201 | command_height_scale_up_threshold: 210
202 | command_height_scale_down_threshold: 200
203 | command_height_scale_up: 0.02
204 | command_height_scale_down: 0.01
205 | command_height_scale_degree: 0.00005
206 | command_height_scale_max: 1.0
207 | command_height_scale_min: 0.0
208 |
209 | force_scale_curriculum: True
210 | force_scale_initial_scale: 0.1
211 | force_scale_up_threshold: 210
212 | force_scale_down_threshold: 200
213 | force_scale_up: 0.02
214 | force_scale_down: 0.02
215 | force_scale_degree: 0.00005
216 | force_scale_max: 1.0
217 | force_scale_min: 0.0
218 |
219 | upper_body_tracking_sigma_curriculum: False
220 | upper_body_tracking_sigma_initial_scale: 1.0
221 | upper_body_tracking_sigma_scale_up_threshold: 0.45
222 | upper_body_tracking_sigma_scale_down_threshold: 0.5
223 | upper_body_tracking_sigma_scale_up: 0.02
224 | upper_body_tracking_sigma_scale_down: 0.02
225 | upper_body_tracking_sigma_max: 1.0
226 | upper_body_tracking_sigma_min: 0.1
227 |
228 | reward_penalty_reward_names: [
229 | "penalty_upper_body_torques",
230 | "penalty_upper_body_dof_acc",
231 | "penalty_upper_body_dof_vel",
232 | "penalty_upper_body_action_rate",
233 | "penalty_lower_body_torques",
234 | "penalty_lower_body_dof_acc",
235 | "penalty_lower_body_dof_vel",
236 | "penalty_lower_body_action_rate",
237 | "limits_upper_body_dof_pos",
238 | "limits_upper_body_dof_vel",
239 | "limits_upper_body_torque",
240 | "limits_lower_body_dof_pos",
241 | "limits_lower_body_dof_vel",
242 | "limits_lower_body_torque",
243 | # "penalty_stance_contact_no_ang_vel",
244 | # "penalty_feet_ori",
245 | # "feet_heading_alignment",
246 | # "penalty_ang_vel_xy_torso",
247 | # "penalty_torso_roll_orientation",
248 | # "penalty_stance_dof",
249 | # "penalty_stance_tap_feet",
250 | # "penalty_stance_root",
251 | # "penalty_stance_symmetry",
252 | # "penalty_shift_in_zero_command",
253 | # "penalty_ang_shift_in_zero_command",
254 | # "penalty_ee_lin_acc",
255 | # "penalty_ee_ang_acc",
256 | ]
257 |
258 |
--------------------------------------------------------------------------------
/humanoidverse/config/robot/robot_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | robot:
4 | # Observation parameters
5 | dof_obs_size: ???
6 | number_of_actions: ???
7 | self_obs_max_coords: ???
8 | num_bodies: ???
9 |
10 | # Control parameters
11 | # dof_body_ids: ???
12 | # dof_offsets: ???
13 |
14 | algo_obs_dim_dict: ???
15 |
16 | key_bodies: null
17 | contact_bodies: null
18 |
19 | foot_name: null
20 |
21 | init_state: null
22 |
23 | contact_pairs_multiplier: 16
24 |
25 | num_key_bodies: ${len:${robot.key_bodies}}
26 | mimic_small_marker_bodies: null
27 |
28 | randomize_link_body_names: ???
29 |
30 | control:
31 | # Can be "isaac_pd" or "P"/"V"/"T" for Proportional, Velocity, Torque control
32 | control_type: isaac_pd
33 | # PD Drive parameters:
34 | stiffness: null
35 | damping: null
36 | # action scale: target angle = actionScale * action + defaultAngle
37 | # only used in manual PD control
38 | action_scale: 1.0
39 | # Used with isaac pd controller
40 | isaac_pd_scale: False # This is needed for the SMPL model due to weight variations
41 | clamp_actions: 1.0
42 | clip_torques: ???
43 |
44 | asset:
45 | collapse_fixed_joints: null
46 | replace_cylinder_with_capsule: null
47 | flip_visual_attachments: null
48 | armature: null
49 | thickness: null
50 | max_angular_velocity: null
51 | max_linear_velocity: null
52 | density: null
53 | angular_damping: null
54 | linear_damping: null
55 | disable_gravity: null
56 | fix_base_link: null
57 | default_dof_drive_mode: 1 # see GymDofDriveModeFlags (0 is none, 1 is pos tgt, 2 is vel tgt, 3 effort)
58 |
59 | robot_type: ???
60 | urdf_file: null
61 | usd_file: null
62 | xml_file: null
63 | asset_root: "humanoidverse/data/robots"
64 | self_collisions: True
65 | filter_ints: null
66 |
67 | # motion_lib:
68 | # _target_: phys_anim.utils.motion_lib.MotionLib
69 | # motion_file: ${motion_file}
70 | # ref_height_adjust: 0.
71 | # fix_motion_heights: True
72 |
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/genesis.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.genesis.genesis.Genesis
6 | _recursive_: False
7 | config:
8 | name: "genesis"
9 | terrain: ${terrain}
10 | plane:
11 | static_friction: 1.0
12 | dynamic_friction: 1.0
13 | restitution: 0.0
14 | sim:
15 | fps: 200
16 | control_decimation: 4
17 | substeps: 1
18 | render_mode: "human" # [None, "human", "rgb_array"]
19 | render_interval: 1
20 |
21 | scene:
22 | num_envs: ${num_envs}
23 | env_spacing: ${env.config.env_spacing}
24 | replicate_physics: True
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/isaacgym.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.isaacgym.isaacgym.IsaacGym
6 | _recursive_: False
7 | config:
8 | name: "isaacgym"
9 | terrain: ${terrain}
10 | plane:
11 | static_friction: 1.0
12 | dynamic_friction: 1.0
13 | restitution: 0.0
14 | sim:
15 | fps: 200
16 | control_decimation: 4
17 | substeps: 1
18 | physx:
19 | num_threads: 4
20 | solver_type: 1 # 0: pgs, 1: tgs
21 | num_position_iterations: 4
22 | num_velocity_iterations: 0
23 | contact_offset: 0.01
24 | rest_offset: 0.0
25 | bounce_threshold_velocity: 0.5
26 | max_depenetration_velocity: 1.0
27 | default_buffer_size_multiplier: 5
28 | contact_collection: 2
29 | enable_dof_force_sensors: False
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/isaacsim.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.isaacsim.isaacsim.IsaacSim
6 | _recursive_: False
7 | config:
8 | name: "isaacsim"
9 | terrain: ${terrain}
10 | plane:
11 | static_friction: 1.0
12 | dynamic_friction: 1.0
13 | restitution: 0.0
14 | sim:
15 | fps: 200
16 | control_decimation: 4
17 | substeps: 1
18 | physx:
19 | num_threads: 10
20 | solver_type: 1 # 0: pgs, 1: tgs
21 | num_position_iterations: 4
22 | num_velocity_iterations: 0
23 | contact_offset: 0.01
24 | rest_offset: 0.0
25 | bounce_threshold_velocity: 0.5
26 | max_depenetration_velocity: 1.0
27 | default_buffer_size_multiplier: 5
28 | contact_collection: 2
29 | render_mode: "human" # [None, "human", "rgb_array"]
30 | render_interval: 4
31 |
32 | scene:
33 | num_envs: ${num_envs}
34 | env_spacing: ${env.config.env_spacing}
35 | replicate_physics: True
--------------------------------------------------------------------------------
/humanoidverse/config/simulator/mujoco.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | # Env structure
4 | simulator:
5 | _target_: humanoidverse.simulator.mujoco.mujoco.MuJoCo
6 | _recursive_: False
7 | config:
8 | name: "mujoco"
9 | terrain: ${terrain}
10 | plane:
11 | static_friction: 1.0
12 | dynamic_friction: 1.0
13 | restitution: 0.0
14 | sim:
15 | fps: 200
16 | control_decimation: 4
17 | substeps: 1
18 | render_mode: "human" # [None, "human", "rgb_array"]
19 | render_interval: 1
20 |
21 | scene:
22 | num_envs: ${num_envs}
23 | env_spacing: ${env.config.env_spacing}
--------------------------------------------------------------------------------
/humanoidverse/config/terrain/terrain_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | terrain:
4 | static_friction: 1.0
5 | dynamic_friction: 1.0
6 | restitution: 0.0
--------------------------------------------------------------------------------
/humanoidverse/config/terrain/terrain_locomotion.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - terrain_base
5 |
6 | terrain:
7 | mesh_type : 'trimesh' # "heightfield" # none, plane, heightfield or trimesh
8 | horizontal_scale : 0.1 # [m]
9 | vertical_scale : 0.005 # [m]
10 | border_size : 40 # [m]
11 | curriculum : False
12 | # curriculum : False
13 | static_friction: 1.0
14 | dynamic_friction: 1.0
15 | restitution: 0.0
16 | # rough terrain only:
17 | measure_heights : False # keep it False
18 | measured_points_x : [ 0.] # 1mx1.6m rectangle (without center line)
19 | measured_points_y : [ 0.]
20 | selected : False # select a unique terrain type and pass all arguments
21 | terrain_kwargs : null # Dict of arguments for selected terrain
22 | max_init_terrain_level : 9 # starting curriculum state
23 | terrain_length : 8.
24 | terrain_width : 8.
25 | num_rows: 10 # number of terrain rows (levels)
26 | num_cols : 20 # number of terrain cols (types)
27 | terrain_types : ['flat', 'rough', 'low_obst', 'smooth_slope', 'rough_slope'] # do not duplicate!
28 | terrain_proportions : [0.2, 0.6, 0.2, 0.0, 0.0]
29 | # trimesh only:
30 | slope_treshold : 0.75 # slopes above this threshold will be corrected to vertical surfaces
--------------------------------------------------------------------------------
/humanoidverse/config/terrain/terrain_locomotion_plane.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | defaults:
4 | - terrain_base
5 |
6 | terrain:
7 | mesh_type : 'plane' # "heightfield" # none, plane, heightfield or trimesh
8 | horizontal_scale : 0.1 # [m]
9 | vertical_scale : 0.005 # [m]
10 | border_size : 40 # [m]
11 | curriculum : False
12 | # curriculum : False
13 | static_friction: 1.0
14 | dynamic_friction: 1.0
15 | restitution: 0.0
16 | # rough terrain only:
17 | measure_heights : False # keep it False
18 | measured_points_x : [ 0.] # 1mx1.6m rectangle (without center line)
19 | measured_points_y : [ 0.]
20 | selected : False # select a unique terrain type and pass all arguments
21 | terrain_kwargs : null # Dict of arguments for selected terrain
22 | max_init_terrain_level : 9 # starting curriculum state
23 | terrain_length : 8.
24 | terrain_width : 8.
25 | num_rows: 10 # number of terrain rows (levels)
26 | num_cols : 20 # number of terrain cols (types)
27 | terrain_types : ['flat', 'rough', 'low_obst', 'smooth_slope', 'rough_slope'] # do not duplicate!
28 | terrain_proportions : [0.2, 0.6, 0.2, 0.0, 0.0]
29 | # trimesh only:
30 | slope_treshold : 0.75 # slopes above this threshold will be corrected to vertical surfaces
--------------------------------------------------------------------------------
/humanoidverse/data/motions/g1_29dof/v1/accad_all.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/motions/g1_29dof/v1/accad_all.pkl
--------------------------------------------------------------------------------
/humanoidverse/data/motions/t1_29dof/v1/accad_all.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/motions/t1_29dof/v1/accad_all.pkl
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/.asset_hash:
--------------------------------------------------------------------------------
1 | b3de36f52e27b977487af75b6c3a10bf
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/head_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/head_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_G1_EE_pad.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_G1_EE_pad.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_ankle_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_ankle_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_ankle_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_ankle_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_elbow_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_elbow_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_elbow_link_merge.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_elbow_link_merge.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_index_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_index_0_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_index_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_index_1_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_middle_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_middle_0_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_middle_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_middle_1_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_palm_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_palm_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_thumb_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_thumb_0_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_thumb_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_thumb_1_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hand_thumb_2_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hand_thumb_2_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hip_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hip_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hip_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hip_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_hip_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_hip_yaw_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_knee_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_knee_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_rubber_hand.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_shoulder_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_shoulder_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_shoulder_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_shoulder_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_shoulder_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_shoulder_yaw_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_wrist_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_wrist_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_wrist_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_wrist_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_wrist_roll_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_wrist_roll_rubber_hand.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/left_wrist_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/left_wrist_yaw_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/logo_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/logo_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/pelvis.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/pelvis.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/pelvis_contour_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/pelvis_contour_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_G1_EE_pad.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_G1_EE_pad.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_ankle_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_ankle_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_ankle_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_ankle_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_elbow_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_elbow_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_elbow_link_merge.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_elbow_link_merge.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_index_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_index_0_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_index_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_index_1_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_middle_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_middle_0_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_middle_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_middle_1_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_palm_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_palm_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_thumb_0_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_thumb_0_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_thumb_1_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_thumb_1_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hand_thumb_2_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hand_thumb_2_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hip_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hip_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hip_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hip_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_hip_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_hip_yaw_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_knee_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_knee_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_rubber_hand.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_shoulder_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_shoulder_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_shoulder_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_shoulder_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_shoulder_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_shoulder_yaw_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_wrist_pitch_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_wrist_pitch_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_wrist_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_wrist_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_wrist_roll_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_wrist_roll_rubber_hand.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/right_wrist_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/right_wrist_yaw_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/torso_constraint_L_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/torso_constraint_L_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/torso_constraint_L_rod_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/torso_constraint_L_rod_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/torso_constraint_R_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/torso_constraint_R_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/torso_constraint_R_rod_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/torso_constraint_R_rod_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/torso_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/torso_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/tote_no_bar.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/tote_no_bar.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/waist_constraint_L.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/waist_constraint_L.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/waist_constraint_R.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/waist_constraint_R.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/waist_roll_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/waist_roll_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/waist_support_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/waist_support_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/meshes/waist_yaw_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/g1/meshes/waist_yaw_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/scene_29dof_freebase.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
28 |
29 |
32 |
--------------------------------------------------------------------------------
/humanoidverse/data/robots/g1/scene_29dof_freebase_camera.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
29 |
33 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/README.md:
--------------------------------------------------------------------------------
1 | # Booster T1 Description (MJCF)
2 |
3 | > [!IMPORTANT]
4 | > Requires MuJoCo 2.3.4 or later.
5 |
6 | ## Changelog
7 |
8 | - 7/02/2025: Initial release.
9 |
10 | ## Overview
11 |
12 | This package contains a simplified robot description (MJCF) of the [T1 Humanoid
13 | Robot](https://www.boosterobotics.com/robots/) developed by [Booster
14 | Robotics](https://www.boosterobotics.com/). It is derived from the [publicly available
15 | URDF
16 | description](https://github.com/BoosterRobotics/booster_gym/blob/main/resources/T1/T1_serial.urdf).
17 |
18 |
19 |
20 |
21 |
22 | ## MJCF derivation steps
23 |
24 | 1. Started from `t1_serial.urdf`.
25 | 2. Added the following to the URDF `` tag ``.
26 | 3. Loaded the URDF into MuJoCo and saved a corresponding MJCF.
27 | 4. Added freejoint to trunk.
28 | 5. Added imu site to trunk at position `"0 0 0"`.
29 | 6. Added home keyframe.
30 | 7. Added tracking light.
31 | 8. Add frictionloss + armature to joints (not identified) for added stability.
32 | 9. Switched to implicitfast and used position actuators with kp/kv semantics.
33 | 10. Added IMU sensors.
34 |
35 | ## License
36 |
37 | This model is released under an [Apache-2.0 License](LICENSE).
38 |
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AL1.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AL1.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AL2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AL2.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AL3.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AL3.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AL4.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AL4.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AL5.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AL5.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AL6.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AL6.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AL7.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AL7.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AR1.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AR1.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AR2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AR2.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AR3.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AR3.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AR4.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AR4.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AR5.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AR5.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AR6.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AR6.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/AR7.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/AR7.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Ankle_Cross_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Ankle_Cross_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Ankle_Cross_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Ankle_Cross_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Crank_Down_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Crank_Down_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Crank_Down_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Crank_Down_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Crank_Up_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Crank_Up_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Crank_Up_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Crank_Up_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Down_Left_XX_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Down_Left_XX_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Down_Left_X_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Down_Left_X_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Down_Left_Y_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Down_Left_Y_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Down_Right_XX_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Down_Right_XX_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Down_Right_X_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Down_Right_X_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Down_Right_Y_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Down_Right_Y_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/H1.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/H1.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/H2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/H2.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Hip_Pitch_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Hip_Pitch_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Hip_Pitch_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Hip_Pitch_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Hip_Roll_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Hip_Roll_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Hip_Roll_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Hip_Roll_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Hip_Yaw_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Hip_Yaw_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Hip_Yaw_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Hip_Yaw_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Link_Long_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Link_Long_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Link_Long_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Link_Long_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Link_Short_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Link_Short_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Link_Short_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Link_Short_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Shank_Left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Shank_Left.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Shank_Right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Shank_Right.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Trunk.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Trunk.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Up_Left_XX_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Up_Left_XX_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Up_Left_X_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Up_Left_X_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Up_Left_Y_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Up_Left_Y_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Up_Right_XX_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Up_Right_XX_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Up_Right_X_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Up_Right_X_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Up_Right_Y_Ball.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Up_Right_Y_Ball.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/Waist.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/Waist.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_Link1.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_Link1.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_Link11.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_Link11.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_Link2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_Link2.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_Link22.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_Link22.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_base_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_base_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_foot_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_foot_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_hand_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_hand_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/left_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/left_rubber_hand.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_Link1.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_Link1.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_Link11.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_Link11.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_Link2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_Link2.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_Link22.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_Link22.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_base_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_base_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_foot_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_foot_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_hand_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_hand_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/meshes/right_rubber_hand.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/meshes/right_rubber_hand.STL
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/scene_t1_29dof.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
23 |
24 |
--------------------------------------------------------------------------------
/humanoidverse/data/robots/t1/t1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/robots/t1/t1.png
--------------------------------------------------------------------------------
/humanoidverse/data/totes/meshes/base_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/data/totes/meshes/base_link.STL
--------------------------------------------------------------------------------
/humanoidverse/data/totes/urdf/tote.urdf:
--------------------------------------------------------------------------------
1 |
2 |
5 |
7 |
9 |
10 |
13 |
16 |
18 |
25 |
32 |
33 |
34 |
37 |
38 |
40 |
41 |
43 |
45 |
46 |
47 |
48 |
51 |
52 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/humanoidverse/envs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/envs/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/envs/base_task/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/envs/base_task/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/envs/decoupled_locomotion/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/envs/decoupled_locomotion/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/envs/env_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/envs/env_utils/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/envs/env_utils/general.py:
--------------------------------------------------------------------------------
1 | import os
2 | import copy
3 | import torch
4 | import numpy as np
5 |
6 | def class_to_dict(obj) -> dict:
7 | if not hasattr(obj,"__dict__"):
8 | return obj
9 | result = {}
10 | for key in dir(obj):
11 | if key.startswith("_"):
12 | continue
13 | element = []
14 | val = getattr(obj, key)
15 | if isinstance(val, list):
16 | for item in val:
17 | element.append(class_to_dict(item))
18 | else:
19 | element = class_to_dict(val)
20 | result[key] = element
21 | return result
22 |
23 | def update_class_from_dict(obj, dict):
24 | for key, val in dict.items():
25 | attr = getattr(obj, key, None)
26 | if isinstance(attr, type):
27 | update_class_from_dict(attr, val)
28 | else:
29 | setattr(obj, key, val)
30 | return
--------------------------------------------------------------------------------
/humanoidverse/envs/env_utils/visualization.py:
--------------------------------------------------------------------------------
1 |
2 | class Point:
3 | # this is for visualization
4 | def __init__(self, pt):
5 | self.x = pt[0]
6 | self.y = pt[1]
7 | self.z = pt[2]
--------------------------------------------------------------------------------
/humanoidverse/envs/legged_base_task/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/envs/legged_base_task/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/envs/locomotion/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/envs/locomotion/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/eval_agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from pathlib import Path
4 |
5 | import hydra
6 | from hydra.utils import instantiate
7 | from hydra.core.hydra_config import HydraConfig
8 | from omegaconf import OmegaConf
9 | from humanoidverse.utils.logging import HydraLoggerBridge
10 | import logging
11 | from utils.config_utils import * # noqa: E402, F403
12 |
13 | # add argparse arguments
14 |
15 | from humanoidverse.utils.config_utils import * # noqa: E402, F403
16 | from loguru import logger
17 |
18 | @hydra.main(config_path="config", config_name="base_eval")
19 | def main(override_config: OmegaConf):
20 | # logging to hydra log file
21 | hydra_log_path = os.path.join(HydraConfig.get().runtime.output_dir, "eval.log")
22 | logger.remove()
23 | logger.add(hydra_log_path, level="DEBUG")
24 |
25 | # Get log level from LOGURU_LEVEL environment variable or use INFO as default
26 | console_log_level = os.environ.get("LOGURU_LEVEL", "INFO").upper()
27 | logger.add(sys.stdout, level=console_log_level, colorize=True)
28 |
29 | logging.basicConfig(level=logging.DEBUG)
30 | logging.getLogger().addHandler(HydraLoggerBridge())
31 |
32 | os.chdir(hydra.utils.get_original_cwd())
33 |
34 | if override_config.checkpoint is not None:
35 | has_config = True
36 | checkpoint = Path(override_config.checkpoint)
37 | config_path = checkpoint.parent / "config.yaml"
38 | if not config_path.exists():
39 | config_path = checkpoint.parent.parent / "config.yaml"
40 | if not config_path.exists():
41 | has_config = False
42 | logger.error(f"Could not find config path: {config_path}")
43 |
44 | if has_config:
45 | logger.info(f"Loading training config file from {config_path}")
46 | with open(config_path) as file:
47 | train_config = OmegaConf.load(file)
48 |
49 | if train_config.eval_overrides is not None:
50 | train_config = OmegaConf.merge(
51 | train_config, train_config.eval_overrides
52 | )
53 |
54 | config = OmegaConf.merge(train_config, override_config)
55 | else:
56 | config = override_config
57 | else:
58 | if override_config.eval_overrides is not None:
59 | config = override_config.copy()
60 | eval_overrides = OmegaConf.to_container(config.eval_overrides, resolve=True)
61 | for arg in sys.argv[1:]:
62 | if not arg.startswith("+"):
63 | key = arg.split("=")[0]
64 | if key in eval_overrides:
65 | del eval_overrides[key]
66 | config.eval_overrides = OmegaConf.create(eval_overrides)
67 | config = OmegaConf.merge(config, eval_overrides)
68 | else:
69 | config = override_config
70 |
71 | simulator_type = config.simulator['_target_'].split('.')[-1]
72 | if simulator_type == 'IsaacSim':
73 | from omni.isaac.lab.app import AppLauncher
74 | import argparse
75 | parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
76 | parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
77 | parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
78 | parser.add_argument("--env_spacing", type=int, default=20, help="Distance between environments in simulator.")
79 | parser.add_argument("--output_dir", type=str, default="logs", help="Directory to store the training output.")
80 | AppLauncher.add_app_launcher_args(parser)
81 |
82 | # Parse known arguments to get argparse params
83 | args_cli, hydra_args = parser.parse_known_args()
84 |
85 | app_launcher = AppLauncher(args_cli)
86 | simulation_app = app_launcher.app
87 | print('args_cli', args_cli)
88 | print('hydra_args', hydra_args)
89 | sys.argv = [sys.argv[0]] + hydra_args
90 | if simulator_type == 'IsaacGym':
91 | import isaacgym
92 |
93 | from humanoidverse.agents.base_algo.base_algo import BaseAlgo # noqa: E402
94 | from humanoidverse.utils.helpers import pre_process_config
95 | import torch
96 | from humanoidverse.utils.inference_helpers import export_policy_as_jit
97 | from humanoidverse.utils.inference_helpers import export_multi_agent_decouple_policy_as_onnx
98 |
99 | pre_process_config(config)
100 |
101 | device = "cuda:0" if torch.cuda.is_available() else "cpu"
102 |
103 | eval_log_dir = Path(config.eval_log_dir)
104 | eval_log_dir.mkdir(parents=True, exist_ok=True)
105 |
106 | logger.info(f"Saving eval logs to {eval_log_dir}")
107 | with open(eval_log_dir / "config.yaml", "w") as file:
108 | OmegaConf.save(config, file)
109 |
110 | ckpt_num = config.checkpoint.split('/')[-1].split('_')[-1].split('.')[0]
111 | config.env.config.save_rendering_dir = str(checkpoint.parent / "renderings" / f"ckpt_{ckpt_num}")
112 | config.env.config.ckpt_dir = str(checkpoint.parent) # commented out for now, might need it back to save motion
113 | env = instantiate(config.env, device=device)
114 |
115 | algo: BaseAlgo = instantiate(config.algo, env=env, device=device, log_dir=None)
116 | algo.setup()
117 | algo.load(config.checkpoint)
118 |
119 | EXPORT_ONNX = False
120 | EXPORT_POLICY = False
121 | checkpoint_path = str(checkpoint)
122 |
123 | checkpoint_dir = os.path.dirname(checkpoint_path)
124 |
125 | # from checkpoint path
126 |
127 | humanoidverse_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
128 | exported_policy_path = os.path.join(humanoidverse_ROOT_DIR, checkpoint_dir, 'exported')
129 | os.makedirs(exported_policy_path, exist_ok=True)
130 | exported_policy_name = checkpoint_path.split('/')[-1]
131 | exported_onnx_name = exported_policy_name.replace('.pt', '.onnx')
132 |
133 | if EXPORT_POLICY:
134 | export_policy_as_jit(algo.alg.actor_critic, exported_policy_path, exported_policy_name)
135 | logger.info('Exported policy as jit script to: ', os.path.join(exported_policy_path, exported_policy_name))
136 | if EXPORT_ONNX:
137 | example_obs_dict = algo.get_example_obs()
138 | export_multi_agent_decouple_policy_as_onnx(algo.inference_model, exported_policy_path, exported_onnx_name, example_obs_dict,
139 | config.robot.get('body_keys', ['lower_body', 'upper_body']))
140 | logger.info(f'Body keys: {config.robot.get("body_keys", ["lower_body", "upper_body"])}')
141 | logger.info(f'Exported policy as onnx to: {os.path.join(exported_policy_path, exported_onnx_name)}')
142 |
143 | algo.evaluate_policy()
144 |
145 |
146 | if __name__ == "__main__":
147 | main()
148 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/base_simulator/base_simulator.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | from loguru import logger
4 | import torch
5 |
6 | class BaseSimulator:
7 | """
8 | Base class for robotic simulation environments, providing a framework for simulation setup,
9 | environment creation, and control over robotic assets and simulation properties.
10 | """
11 | def __init__(self, config, device):
12 | """
13 | Initializes the base simulator with configuration settings and simulation device.
14 |
15 | Args:
16 | config (dict): Configuration dictionary for the simulation.
17 | device (str): Device type for simulation ('cpu' or 'cuda').
18 | """
19 | self.config = config
20 | self.sim_device = device
21 | self.headless = False
22 |
23 | self._rigid_body_pos: torch.Tensor
24 | self._rigid_body_rot: torch.Tensor
25 | self._rigid_body_vel: torch.Tensor
26 | self._rigid_body_ang_vel: torch.Tensor
27 |
28 | # ----- Configuration Setup Methods -----
29 |
30 | def set_headless(self, headless):
31 | """
32 | Sets the headless mode for the simulator.
33 |
34 | Args:
35 | headless (bool): If True, runs the simulation without graphical display.
36 | """
37 | self.headless = headless
38 |
39 | def setup(self):
40 | """
41 | Initializes the simulator parameters and environment. This method should be implemented
42 | by subclasses to set specific simulator configurations.
43 | """
44 | raise NotImplementedError("The 'setup' method must be implemented in subclasses.")
45 |
46 | # ----- Terrain Setup Methods -----
47 |
48 | def setup_terrain(self, mesh_type):
49 | """
50 | Configures the terrain based on specified mesh type.
51 |
52 | Args:
53 | mesh_type (str): Type of terrain mesh ('plane', 'heightfield', 'trimesh').
54 | """
55 | raise NotImplementedError("The 'setup_terrain' method must be implemented in subclasses.")
56 |
57 | # ----- Robot Asset Setup Methods -----
58 |
59 | def load_assets(self, robot_config):
60 | """
61 | Loads the robot assets into the simulation environment.
62 | save self.num_dofs, self.num_bodies, self.dof_names, self.body_names
63 | Args:
64 | robot_config (dict): HumanoidVerse Configuration for the robot asset.
65 | """
66 | raise NotImplementedError("The 'load_assets' method must be implemented in subclasses.")
67 |
68 | # ----- Environment Creation Methods -----
69 |
70 | def create_envs(self, num_envs, env_origins, base_init_state, env_config):
71 | """
72 | Creates and initializes environments with specified configurations.
73 |
74 | Args:
75 | num_envs (int): Number of environments to create.
76 | env_origins (list): List of origin positions for each environment.
77 | base_init_state (array): Initial state of the base.
78 | env_config (dict): Configuration for each environment.
79 | """
80 | raise NotImplementedError("The 'create_envs' method must be implemented in subclasses.")
81 |
82 | # ----- Property Retrieval Methods -----
83 |
84 | def get_dof_limits_properties(self):
85 | """
86 | Retrieves the DOF (degrees of freedom) limits and properties.
87 |
88 | Returns:
89 | Tuple of tensors representing position limits, velocity limits, and torque limits for each DOF.
90 | """
91 | raise NotImplementedError("The 'get_dof_limits_properties' method must be implemented in subclasses.")
92 |
93 | def find_rigid_body_indice(self, body_name):
94 | """
95 | Finds the index of a specified rigid body.
96 |
97 | Args:
98 | body_name (str): Name of the rigid body to locate.
99 |
100 | Returns:
101 | int: Index of the rigid body.
102 | """
103 | raise NotImplementedError("The 'find_rigid_body_indice' method must be implemented in subclasses.")
104 |
105 | # ----- Simulation Preparation and Refresh Methods -----
106 |
107 | def prepare_sim(self):
108 | """
109 | Prepares the simulation environment and refreshes any relevant tensors.
110 | """
111 | raise NotImplementedError("The 'prepare_sim' method must be implemented in subclasses.")
112 |
113 | def refresh_sim_tensors(self):
114 | """
115 | Refreshes the state tensors in the simulation to ensure they are up-to-date.
116 | """
117 | raise NotImplementedError("The 'refresh_sim_tensors' method must be implemented in subclasses.")
118 |
119 | # ----- Control Application Methods -----
120 |
121 | def apply_torques_at_dof(self, torques):
122 | """
123 | Applies the specified torques to the robot's degrees of freedom (DOF).
124 |
125 | Args:
126 | torques (tensor): Tensor containing torques to apply.
127 | """
128 | raise NotImplementedError("The 'apply_torques_at_dof' method must be implemented in subclasses.")
129 |
130 | def set_actor_root_state_tensor(self, set_env_ids, root_states):
131 | """
132 | Sets the root state tensor for specified actors within environments.
133 |
134 | Args:
135 | set_env_ids (tensor): Tensor of environment IDs where states will be set.
136 | root_states (tensor): New root states to apply.
137 | """
138 | raise NotImplementedError("The 'set_actor_root_state_tensor' method must be implemented in subclasses.")
139 |
140 | def set_dof_state_tensor(self, set_env_ids, dof_states):
141 | """
142 | Sets the DOF state tensor for specified actors within environments.
143 |
144 | Args:
145 | set_env_ids (tensor): Tensor of environment IDs where states will be set.
146 | dof_states (tensor): New DOF states to apply.
147 | """
148 | raise NotImplementedError("The 'set_dof_state_tensor' method must be implemented in subclasses.")
149 |
150 | def simulate_at_each_physics_step(self):
151 | """
152 | Advances the simulation by a single physics step.
153 | """
154 | raise NotImplementedError("The 'simulate_at_each_physics_step' method must be implemented in subclasses.")
155 |
156 | # ----- Viewer Setup and Rendering Methods -----
157 |
158 | def setup_viewer(self):
159 | """
160 | Sets up a viewer for visualizing the simulation, allowing keyboard interactions.
161 | """
162 | raise NotImplementedError("The 'setup_viewer' method must be implemented in subclasses.")
163 |
164 | def render(self, sync_frame_time=True):
165 | """
166 | Renders the simulation frame-by-frame, syncing frame time if required.
167 |
168 | Args:
169 | sync_frame_time (bool): Whether to synchronize the frame time.
170 | """
171 | raise NotImplementedError("The 'render' method must be implemented in subclasses.")
172 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/genesis/genesis_viewer.py:
--------------------------------------------------------------------------------
1 |
2 | class Viewer:
3 | """This class handles controlling the camera associated with a viewport in the simulator.
4 |
5 | It can be used to set the viewpoint camera to track different origin types:
6 |
7 | - **world**: the center of the world (static)
8 | - **env**: the center of an environment (static)
9 | - **asset_root**: the root of an asset in the scene (e.g. tracking a robot moving in the scene)
10 |
11 | On creation, the camera is set to track the origin type specified in the configuration.
12 |
13 | For the :attr:`asset_root` origin type, the camera is updated at each rendering step to track the asset's
14 | root position. For this, it registers a callback to the post update event stream from the simulation app.
15 | """
16 |
17 | def __init__(self):
18 | """Initialize the ViewportCameraController.
19 |
20 | Args:
21 | env: The environment.
22 | cfg: The configuration for the viewport camera controller.
23 |
24 | Raises:
25 | ValueError: If origin type is configured to be "env" but :attr:`cfg.env_index` is out of bounds.
26 | ValueError: If origin type is configured to be "asset_root" but :attr:`cfg.asset_name` is unset.
27 |
28 | """
29 | self._cfg = None
30 | return
31 |
32 | def __del__(self):
33 | """Unsubscribe from the callback."""
34 | # use hasattr to handle case where __init__ has not completed before __del__ is called
35 | return
36 |
37 | """
38 | Properties
39 | """
40 |
41 | @property
42 | def cfg(self):
43 | """The configuration for the viewer."""
44 | return self._cfg
45 |
46 | """
47 | Public Functions
48 | """
49 |
50 | def set_view_env_index(self, env_index: int):
51 | """Sets the environment index for the camera view.
52 |
53 | Args:
54 | env_index: The index of the environment to set the camera view to.
55 |
56 | Raises:
57 | ValueError: If the environment index is out of bounds. It should be between 0 and num_envs - 1.
58 | """
59 | return
60 |
61 | def update_view_to_world(self):
62 | """Updates the viewer's origin to the origin of the world which is (0, 0, 0)."""
63 | return
64 |
65 | def update_view_to_env(self):
66 | """Updates the viewer's origin to the origin of the selected environment."""
67 | return
68 |
69 | def update_view_to_asset_root(self, asset_name: str):
70 | """Updates the viewer's origin based upon the root of an asset in the scene.
71 |
72 | Args:
73 | asset_name: The name of the asset in the scene. The name should match the name of the
74 | asset in the scene.
75 |
76 | Raises:
77 | ValueError: If the asset is not in the scene.
78 | """
79 | return
80 |
81 | def update_view_location(self, eye, lookat):
82 | """Updates the camera view pose based on the current viewer origin and the eye and lookat positions.
83 |
84 | Args:
85 | eye: The eye position of the camera. If None, the current eye position is used.
86 | lookat: The lookat position of the camera. If None, the current lookat position is used.
87 | """
88 | return
89 |
90 | """
91 | Private Functions
92 | """
93 |
94 | def _update_tracking_callback(self, event):
95 | """Updates the camera view at each rendering step."""
96 | return
97 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/genesis/tmp_gs_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def wrap_to_pi(angles):
6 | angles %= 2 * np.pi
7 | angles -= 2 * np.pi * (angles > np.pi)
8 | return angles
9 |
10 |
11 | def gs_rand_float(lower, upper, shape, device):
12 | return (upper - lower) * torch.rand(size=shape, device=device) + lower
13 |
14 |
15 | def gs_inv_quat(quat):
16 | qw, qx, qy, qz = quat.unbind(-1)
17 | inv_quat = torch.stack([1.0 * qw, -qx, -qy, -qz], dim=-1)
18 | return inv_quat
19 |
20 |
21 | def gs_transform_by_quat(pos, quat):
22 | qw, qx, qy, qz = quat.unbind(-1)
23 |
24 | rot_matrix = torch.stack(
25 | [
26 | 1.0 - 2 * qy**2 - 2 * qz**2,
27 | 2 * qx * qy - 2 * qz * qw,
28 | 2 * qx * qz + 2 * qy * qw,
29 | 2 * qx * qy + 2 * qz * qw,
30 | 1 - 2 * qx**2 - 2 * qz**2,
31 | 2 * qy * qz - 2 * qx * qw,
32 | 2 * qx * qz - 2 * qy * qw,
33 | 2 * qy * qz + 2 * qx * qw,
34 | 1 - 2 * qx**2 - 2 * qy**2,
35 | ],
36 | dim=-1,
37 | ).reshape(*quat.shape[:-1], 3, 3)
38 | rotated_pos = torch.matmul(rot_matrix, pos.unsqueeze(-1)).squeeze(-1)
39 |
40 | return rotated_pos
41 |
42 |
43 | def gs_quat2euler(quat): # xyz
44 | # Extract quaternion components
45 | qw, qx, qy, qz = quat.unbind(-1)
46 |
47 | # Roll (x-axis rotation)
48 | sinr_cosp = 2 * (qw * qx + qy * qz)
49 | cosr_cosp = 1 - 2 * (qx * qx + qy * qy)
50 | roll = torch.atan2(sinr_cosp, cosr_cosp)
51 |
52 | # Pitch (y-axis rotation)
53 | sinp = 2 * (qw * qy - qz * qx)
54 | pitch = torch.where(
55 | torch.abs(sinp) >= 1,
56 | torch.sign(sinp) * torch.tensor(torch.pi / 2),
57 | torch.asin(sinp),
58 | )
59 |
60 | # Yaw (z-axis rotation)
61 | siny_cosp = 2 * (qw * qz + qx * qy)
62 | cosy_cosp = 1 - 2 * (qy * qy + qz * qz)
63 | yaw = torch.atan2(siny_cosp, cosy_cosp)
64 |
65 | return torch.stack([roll, pitch, yaw], dim=-1)
66 |
67 |
68 | def gs_euler2quat(xyz): # xyz
69 |
70 | roll, pitch, yaw = xyz.unbind(-1)
71 |
72 | cosr = (roll * 0.5).cos()
73 | sinr = (roll * 0.5).sin()
74 | cosp = (pitch * 0.5).cos()
75 | sinp = (pitch * 0.5).sin()
76 | cosy = (yaw * 0.5).cos()
77 | siny = (yaw * 0.5).sin()
78 |
79 | qw = cosr * cosp * cosy + sinr * sinp * siny
80 | qx = sinr * cosp * cosy - cosr * sinp * siny
81 | qy = cosr * sinp * cosy + sinr * cosp * siny
82 | qz = cosr * cosp * siny - sinr * sinp * cosy
83 |
84 | return torch.stack([qw, qx, qy, qz], dim=-1)
85 |
86 |
87 | def gs_quat_from_angle_axis(angle, axis):
88 | theta = (angle / 2).unsqueeze(-1)
89 | xyz = normalize(axis) * theta.sin()
90 | w = theta.cos()
91 | return normalize(torch.cat([w, xyz], dim=-1))
92 |
93 |
94 | def normalize(x, eps: float = 1e-9):
95 | return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
96 |
97 |
98 | def gs_quat_mul(a, b):
99 | assert a.shape == b.shape
100 | shape = a.shape
101 | a = a.reshape(-1, 4)
102 | b = b.reshape(-1, 4)
103 |
104 | w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
105 | w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
106 | ww = (z1 + x1) * (x2 + y2)
107 | yy = (w1 - y1) * (w2 + z2)
108 | zz = (w1 + y1) * (w2 - z2)
109 | xx = ww + yy + zz
110 | qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
111 | w = qq - ww + (z1 - y1) * (y2 - z2)
112 | x = qq - xx + (x1 + w1) * (x2 + w2)
113 | y = qq - yy + (w1 - x1) * (y2 + z2)
114 | z = qq - zz + (z1 + y1) * (w2 - x2)
115 |
116 | quat = torch.stack([w, x, y, z], dim=-1).view(shape)
117 |
118 | return quat
119 |
120 |
121 | def gs_quat_apply(a, b):
122 | shape = b.shape
123 | a = a.reshape(-1, 4)
124 | b = b.reshape(-1, 3)
125 | xyz = a[:, 1:]
126 | t = xyz.cross(b, dim=-1) * 2
127 | return (b + a[:, :1] * t + xyz.cross(t, dim=-1)).view(shape)
128 |
129 |
130 | def gs_quat_apply_yaw(quat, vec):
131 | quat_yaw = quat.clone().view(-1, 4)
132 | quat_yaw[:, 1:3] = 0.
133 | quat_yaw = normalize(quat_yaw)
134 | return gs_quat_apply(quat_yaw, vec)
135 |
136 |
137 | def gs_quat_conjugate(a):
138 | shape = a.shape
139 | a = a.reshape(-1, 4)
140 | return torch.cat((a[:, :1], -a[:, 1:], ), dim=-1).view(shape)
141 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/isaacsim/event_cfg.py:
--------------------------------------------------------------------------------
1 |
2 | from omni.isaac.lab.managers import EventTermCfg as EventTerm
3 |
4 | from omni.isaac.lab.managers import SceneEntityCfg
5 | from omni.isaac.lab.utils import configclass
6 |
7 |
8 | import omni.isaac.lab.envs.mdp as mdp
9 |
10 | # @configclass
11 | # class EventCfg:
12 | # """Configuration for events."""
13 |
14 | # scale_body_mass = EventTerm(
15 | # func=mdp.randomize_rigid_body_mass,
16 | # mode="startup",
17 | # params={
18 | # "asset_cfg": SceneEntityCfg("robot", body_names=".*"),
19 | # "mass_distribution_params": (0.8, 1.2),
20 | # "operation": "scale",
21 | # },
22 | # )
23 |
24 | # random_joint_friction = EventTerm(
25 | # func=mdp.randomize_joint_parameters,
26 | # mode="startup",
27 | # params={
28 | # "asset_cfg": SceneEntityCfg("robot", joint_names=".*"),
29 | # "friction_distribution_params": (0.5, 1.25),
30 | # "operation": "scale",
31 | # },
32 | # )
33 |
34 | @configclass
35 | class EventCfg:
36 | """Configuration for events."""
37 | scale_body_mass = None
38 | random_joint_friction = None
39 |
40 |
41 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/isaacsim/events.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import numpy as np
4 | import torch
5 | from typing import TYPE_CHECKING, Literal
6 |
7 | import omni.isaac.lab.envs.mdp as mdp
8 | import omni.isaac.lab.utils.math as math_utils
9 | from omni.isaac.lab.assets import Articulation, RigidObject
10 | from omni.isaac.lab.managers import SceneEntityCfg
11 |
12 | from omni.isaac.lab.envs import ManagerBasedEnv
13 |
14 | def resolve_dist_fn(
15 | distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
16 | ):
17 | dist_fn = math_utils.sample_uniform
18 |
19 | if distribution == "uniform":
20 | dist_fn = math_utils.sample_uniform
21 | elif distribution == "log_uniform":
22 | dist_fn = math_utils.sample_log_uniform
23 | elif distribution == "gaussian":
24 | dist_fn = math_utils.sample_gaussian
25 | else:
26 | raise ValueError(f"Unrecognized distribution {distribution}")
27 |
28 | return dist_fn
29 |
30 | def randomize_body_com(
31 | env: ManagerBasedEnv,
32 | env_ids: torch.Tensor | None,
33 | asset_cfg: SceneEntityCfg,
34 | distribution_params: tuple[float, float] | tuple[torch.Tensor, torch.Tensor],
35 | operation: Literal["add", "abs", "scale"],
36 | distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
37 | num_envs: int = 1, # number of environments
38 | ):
39 | """Randomize the com of the bodies by adding, scaling or setting random values.
40 |
41 | This function allows randomizing the center of mass of the bodies of the asset. The function samples random values from the
42 | given distribution parameters and adds, scales or sets the values into the physics simulation based on the operation.
43 |
44 | .. tip::
45 | This function uses CPU tensors to assign the body masses. It is recommended to use this function
46 | only during the initialization of the environment.
47 | """
48 | # extract the used quantities (to enable type-hinting)
49 | asset: RigidObject | Articulation = env.scene[asset_cfg.name]
50 |
51 | # resolve environment ids
52 | if env_ids is None:
53 | env_ids = torch.arange(num_envs, device="cpu")
54 | else:
55 | env_ids = env_ids.cpu()
56 |
57 | # resolve body indices
58 | if asset_cfg.body_ids == slice(None):
59 | body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu")
60 | else:
61 | body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu")
62 |
63 | # get the current masses of the bodies (num_assets, num_bodies)
64 | coms = asset.root_physx_view.get_coms()
65 | # apply randomization on default values
66 | coms[env_ids[:, None], body_ids] = env.default_coms[env_ids[:, None], body_ids].clone()
67 |
68 | dist_fn = resolve_dist_fn(distribution)
69 |
70 | if isinstance(distribution_params[0], torch.Tensor):
71 | distribution_params = (distribution_params[0].to(coms.device), distribution_params[1].to(coms.device))
72 |
73 | env.base_com_bias[env_ids, :] = dist_fn(
74 | *distribution_params, (env_ids.shape[0], env.base_com_bias.shape[1]), device=coms.device
75 | )
76 |
77 | # sample from the given range
78 | if operation == "add":
79 | coms[env_ids[:, None], body_ids, :3] += env.base_com_bias[env_ids[:, None], :]
80 | elif operation == "abs":
81 | coms[env_ids[:, None], body_ids, :3] = env.base_com_bias[env_ids[:, None], :]
82 | elif operation == "scale":
83 | coms[env_ids[:, None], body_ids, :3] *= env.base_com_bias[env_ids[:, None], :]
84 | else:
85 | raise ValueError(
86 | f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'abs' or 'scale'."
87 | )
88 | # set the mass into the physics simulation
89 | asset.root_physx_view.set_coms(coms, env_ids)
--------------------------------------------------------------------------------
/humanoidverse/simulator/isaacsim/isaaclab_viewpoint_camera_controller.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022-2024, The Isaac Lab Project Developers.
2 | # All rights reserved.
3 | #
4 | # SPDX-License-Identifier: BSD-3-Clause
5 |
6 | from __future__ import annotations
7 |
8 | import copy
9 | import numpy as np
10 | import torch
11 | import weakref
12 | from collections.abc import Sequence
13 | from typing import TYPE_CHECKING
14 |
15 | import omni.kit.app
16 | import omni.timeline
17 |
18 | if TYPE_CHECKING:
19 | from omni.isaac.lab.envs import DirectRLEnv, ManagerBasedEnv, ViewerCfg
20 |
21 |
22 |
23 |
24 | class ViewportCameraController:
25 | """This class handles controlling the camera associated with a viewport in the simulator.
26 |
27 | It can be used to set the viewpoint camera to track different origin types:
28 |
29 | - **world**: the center of the world (static)
30 | - **env**: the center of an environment (static)
31 | - **asset_root**: the root of an asset in the scene (e.g. tracking a robot moving in the scene)
32 |
33 | On creation, the camera is set to track the origin type specified in the configuration.
34 |
35 | For the :attr:`asset_root` origin type, the camera is updated at each rendering step to track the asset's
36 | root position. For this, it registers a callback to the post update event stream from the simulation app.
37 | """
38 |
39 | def __init__(self, env, cfg: ViewerCfg):
40 | """Initialize the ViewportCameraController.
41 |
42 | Args:
43 | env: The environment.
44 | cfg: The configuration for the viewport camera controller.
45 |
46 | Raises:
47 | ValueError: If origin type is configured to be "env" but :attr:`cfg.env_index` is out of bounds.
48 | ValueError: If origin type is configured to be "asset_root" but :attr:`cfg.asset_name` is unset.
49 |
50 | """
51 | # store inputs
52 | self._env = env
53 | self._cfg = copy.deepcopy(cfg)
54 | # cast viewer eye and look-at to numpy arrays
55 | self.default_cam_eye = np.array(self._cfg.eye)
56 | self.default_cam_lookat = np.array(self._cfg.lookat)
57 |
58 | # set the camera origins
59 | if self.cfg.origin_type == "env":
60 | # check that the env_index is within bounds
61 | self.set_view_env_index(self.cfg.env_index)
62 | # set the camera origin to the center of the environment
63 | self.update_view_to_env()
64 | elif self.cfg.origin_type == "asset_root":
65 | # note: we do not yet update camera for tracking an asset origin, as the asset may not yet be
66 | # in the scene when this is called. Instead, we subscribe to the post update event to update the camera
67 | # at each rendering step.
68 | if self.cfg.asset_name is None:
69 | raise ValueError(f"No asset name provided for viewer with origin type: '{self.cfg.origin_type}'.")
70 | else:
71 | # set the camera origin to the center of the world
72 | self.update_view_to_world()
73 |
74 | # subscribe to post update event so that camera view can be updated at each rendering step
75 | app_interface = omni.kit.app.get_app_interface()
76 | app_event_stream = app_interface.get_post_update_event_stream()
77 | self._viewport_camera_update_handle = app_event_stream.create_subscription_to_pop(
78 | lambda event, obj=weakref.proxy(self): obj._update_tracking_callback(event)
79 | )
80 |
81 | def __del__(self):
82 | """Unsubscribe from the callback."""
83 | # use hasattr to handle case where __init__ has not completed before __del__ is called
84 | if hasattr(self, "_viewport_camera_update_handle") and self._viewport_camera_update_handle is not None:
85 | self._viewport_camera_update_handle.unsubscribe()
86 | self._viewport_camera_update_handle = None
87 |
88 | """
89 | Properties
90 | """
91 |
92 | @property
93 | def cfg(self) -> ViewerCfg:
94 | """The configuration for the viewer."""
95 | return self._cfg
96 |
97 | """
98 | Public Functions
99 | """
100 |
101 | def set_view_env_index(self, env_index: int):
102 | """Sets the environment index for the camera view.
103 |
104 | Args:
105 | env_index: The index of the environment to set the camera view to.
106 |
107 | Raises:
108 | ValueError: If the environment index is out of bounds. It should be between 0 and num_envs - 1.
109 | """
110 | # check that the env_index is within bounds
111 | if env_index < 0 or env_index >= self._env.config.scene.num_envs:
112 | raise ValueError(
113 | f"Out of range value for attribute 'env_index': {env_index}."
114 | f" Expected a value between 0 and {self._env.config.scene.num_envs - 1} for the current environment."
115 | )
116 | # update the environment index
117 | self.cfg.env_index = env_index
118 | # update the camera view if the origin is set to env type (since, the camera view is static)
119 | # note: for assets, the camera view is updated at each rendering step
120 | if self.cfg.origin_type == "env":
121 | self.update_view_to_env()
122 |
123 | def update_view_to_world(self):
124 | """Updates the viewer's origin to the origin of the world which is (0, 0, 0)."""
125 | # set origin type to world
126 | self.cfg.origin_type = "world"
127 | # update the camera origins
128 | self.viewer_origin = torch.zeros(3)
129 | # update the camera view
130 | self.update_view_location()
131 |
132 | def update_view_to_env(self):
133 | """Updates the viewer's origin to the origin of the selected environment."""
134 | # set origin type to world
135 | self.cfg.origin_type = "env"
136 | # update the camera origins
137 | self.viewer_origin = self._env.scene.env_origins[self.cfg.env_index]
138 | # update the camera view
139 | self.update_view_location()
140 |
141 | def update_view_to_asset_root(self, asset_name: str):
142 | """Updates the viewer's origin based upon the root of an asset in the scene.
143 |
144 | Args:
145 | asset_name: The name of the asset in the scene. The name should match the name of the
146 | asset in the scene.
147 |
148 | Raises:
149 | ValueError: If the asset is not in the scene.
150 | """
151 | # check if the asset is in the scene
152 | if self.cfg.asset_name != asset_name:
153 | asset_entities = [*self._env.scene.rigid_objects.keys(), *self._env.scene.articulations.keys()]
154 | if asset_name not in asset_entities:
155 | raise ValueError(f"Asset '{asset_name}' is not in the scene. Available entities: {asset_entities}.")
156 | # update the asset name
157 | self.cfg.asset_name = asset_name
158 | # set origin type to asset_root
159 | self.cfg.origin_type = "asset_root"
160 | # update the camera origins
161 | self.viewer_origin = self._env.scene[self.cfg.asset_name].data.root_pos_w[self.cfg.env_index]
162 | # update the camera view
163 | self.update_view_location()
164 |
165 | def update_view_location(self, eye: Sequence[float] | None = None, lookat: Sequence[float] | None = None):
166 | """Updates the camera view pose based on the current viewer origin and the eye and lookat positions.
167 |
168 | Args:
169 | eye: The eye position of the camera. If None, the current eye position is used.
170 | lookat: The lookat position of the camera. If None, the current lookat position is used.
171 | """
172 | # store the camera view pose for later use
173 | if eye is not None:
174 | self.default_cam_eye = np.asarray(eye)
175 | if lookat is not None:
176 | self.default_cam_lookat = np.asarray(lookat)
177 | # set the camera locations
178 | viewer_origin = self.viewer_origin.detach().cpu().numpy()
179 | cam_eye = viewer_origin + self.default_cam_eye
180 | cam_target = viewer_origin + self.default_cam_lookat
181 |
182 | # set the camera view
183 | self._env.sim.set_camera_view(eye=cam_eye, target=cam_target)
184 |
185 | """
186 | Private Functions
187 | """
188 |
189 | def _update_tracking_callback(self, event):
190 | """Updates the camera view at each rendering step."""
191 | # update the camera view if the origin is set to asset_root
192 | # in other cases, the camera view is static and does not need to be updated continuously
193 | if self.cfg.origin_type == "asset_root" and self.cfg.asset_name is not None:
194 | self.update_view_to_asset_root(self.cfg.asset_name)
195 |
--------------------------------------------------------------------------------
/humanoidverse/simulator/isaacsim/isaacsim_articulation_cfg.py:
--------------------------------------------------------------------------------
1 |
2 | import omni.isaac.lab.sim as sim_utils
3 | from omni.isaac.lab.actuators import ActuatorNetMLPCfg, DCMotorCfg, ImplicitActuatorCfg
4 | from omni.isaac.lab.assets.articulation import ArticulationCfg
5 | from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
6 |
7 | ARTICULATION_CFG = ArticulationCfg(
8 | spawn=sim_utils.UsdFileCfg(
9 | # usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/H1/h1.usd",
10 | usd_path="humanoidverse/data/robots/h1/h1.usd",
11 | activate_contact_sensors=True,
12 | rigid_props=sim_utils.RigidBodyPropertiesCfg(
13 | disable_gravity=False,
14 | retain_accelerations=False,
15 | linear_damping=0.0,
16 | angular_damping=0.0,
17 | max_linear_velocity=1000.0,
18 | max_angular_velocity=1000.0,
19 | max_depenetration_velocity=1.0,
20 | ),
21 | articulation_props=sim_utils.ArticulationRootPropertiesCfg(
22 | enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=4
23 | ),
24 | ),
25 | init_state=ArticulationCfg.InitialStateCfg(
26 | pos=(0.0, 0.0, 1.05),
27 | joint_pos={
28 | ".*_hip_yaw_joint": 0.0,
29 | ".*_hip_roll_joint": 0.0,
30 | ".*_hip_pitch_joint": -0.28, # -16 degrees
31 | ".*_knee_joint": 0.79, # 45 degrees
32 | ".*_ankle_joint": -0.52, # -30 degrees
33 | "torso_joint": 0.0,
34 | ".*_shoulder_pitch_joint": 0.28,
35 | ".*_shoulder_roll_joint": 0.0,
36 | ".*_shoulder_yaw_joint": 0.0,
37 | ".*_elbow_joint": 0.52,
38 | },
39 | joint_vel={".*": 0.0},
40 | ),
41 | soft_joint_pos_limit_factor=0.9,
42 | actuators={
43 | "legs": ImplicitActuatorCfg(
44 | joint_names_expr=[".*_hip_yaw_joint", ".*_hip_roll_joint", ".*_hip_pitch_joint", ".*_knee_joint", "torso_joint"],
45 | effort_limit=300,
46 | velocity_limit=100.0,
47 | stiffness={
48 | ".*_hip_yaw_joint": 150.0,
49 | ".*_hip_roll_joint": 150.0,
50 | ".*_hip_pitch_joint": 200.0,
51 | ".*_knee_joint": 200.0,
52 | "torso_joint": 200.0,
53 | },
54 | damping={
55 | ".*_hip_yaw_joint": 5.0,
56 | ".*_hip_roll_joint": 5.0,
57 | ".*_hip_pitch_joint": 5.0,
58 | ".*_knee_joint": 5.0,
59 | "torso_joint": 5.0,
60 | },
61 | ),
62 | "feet": ImplicitActuatorCfg(
63 | joint_names_expr=[".*_ankle_joint"],
64 | effort_limit=100,
65 | velocity_limit=100.0,
66 | stiffness={".*_ankle_joint": 20.0},
67 | damping={".*_ankle_joint": 4.0},
68 | ),
69 | "arms": ImplicitActuatorCfg(
70 | joint_names_expr=[".*_shoulder_pitch_joint", ".*_shoulder_roll_joint", ".*_shoulder_yaw_joint", ".*_elbow_joint"],
71 | effort_limit=300,
72 | velocity_limit=100.0,
73 | stiffness={
74 | ".*_shoulder_pitch_joint": 40.0,
75 | ".*_shoulder_roll_joint": 40.0,
76 | ".*_shoulder_yaw_joint": 40.0,
77 | ".*_elbow_joint": 40.0,
78 | },
79 | damping={
80 | ".*_shoulder_pitch_joint": 10.0,
81 | ".*_shoulder_roll_joint": 10.0,
82 | ".*_shoulder_yaw_joint": 10.0,
83 | ".*_elbow_joint": 10.0,
84 | },
85 | ),
86 | },
87 | )
--------------------------------------------------------------------------------
/humanoidverse/train_agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from pathlib import Path
4 |
5 | import hydra
6 | from hydra.core.hydra_config import HydraConfig
7 | from hydra.core.config_store import ConfigStore
8 | from hydra.utils import instantiate
9 | from omegaconf import OmegaConf
10 |
11 | import logging
12 | from loguru import logger
13 |
14 | from utils.config_utils import * # noqa: E402, F403
15 |
16 | @hydra.main(config_path="config", config_name="base", version_base="1.1")
17 | def main(config: OmegaConf):
18 | simulator_type = config.simulator['_target_'].split('.')[-1]
19 | if simulator_type == 'IsaacSim':
20 | from omni.isaac.lab.app import AppLauncher
21 | import argparse
22 | parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
23 | AppLauncher.add_app_launcher_args(parser)
24 |
25 | args_cli, hydra_args = parser.parse_known_args()
26 | sys.argv = [sys.argv[0]] + hydra_args
27 | args_cli.num_envs = config.num_envs
28 | args_cli.seed = config.seed
29 | args_cli.env_spacing = config.env.config.env_spacing # config.env_spacing
30 | args_cli.output_dir = config.output_dir
31 | args_cli.headless = config.headless
32 |
33 | app_launcher = AppLauncher(args_cli)
34 | simulation_app = app_launcher.app
35 |
36 | if simulator_type == 'IsaacGym':
37 | import isaacgym # noqa: F401
38 |
39 |
40 | # have to import torch after isaacgym
41 | import torch # noqa: E402
42 | from utils.common import seeding
43 | import wandb
44 | from humanoidverse.envs.base_task.base_task import BaseTask # noqa: E402
45 | from humanoidverse.agents.base_algo.base_algo import BaseAlgo # noqa: E402
46 | from humanoidverse.utils.helpers import pre_process_config
47 | from humanoidverse.utils.logging import HydraLoggerBridge
48 |
49 | # logging to hydra log file
50 | hydra_log_path = os.path.join(HydraConfig.get().runtime.output_dir, "train.log")
51 | logger.remove()
52 | logger.add(hydra_log_path, level="DEBUG")
53 |
54 | # Get log level from LOGURU_LEVEL environment variable or use INFO as default
55 | console_log_level = os.environ.get("LOGURU_LEVEL", "INFO").upper()
56 | logger.add(sys.stdout, level=console_log_level, colorize=True)
57 |
58 | logging.basicConfig(level=logging.DEBUG)
59 | logging.getLogger().addHandler(HydraLoggerBridge())
60 |
61 | unresolved_conf = OmegaConf.to_container(config, resolve=False)
62 | os.chdir(hydra.utils.get_original_cwd())
63 |
64 | if config.use_wandb:
65 | project_name = f"{config.project_name}"
66 | run_name = f"{config.timestamp}_{config.experiment_name}_{config.log_task_name}_{config.robot.asset.robot_type}"
67 | wandb_dir = Path(config.wandb.wandb_dir)
68 | wandb_dir.mkdir(exist_ok=True, parents=True)
69 | logger.info(f"Saving wandb logs to {wandb_dir}")
70 | wandb.init(project=project_name,
71 | entity=config.wandb.wandb_entity,
72 | name=run_name,
73 | sync_tensorboard=True,
74 | config=unresolved_conf,
75 | dir=wandb_dir)
76 |
77 | if hasattr(config, 'device'):
78 | if config.device is not None:
79 | device = config.device
80 | else:
81 | device = "cuda:0" if torch.cuda.is_available() else "cpu"
82 | else:
83 | device = "cuda:0" if torch.cuda.is_available() else "cpu"
84 |
85 | pre_process_config(config)
86 |
87 | # torch.set_float32_matmul_precision("medium")
88 |
89 | # fabric: Fabric = instantiate(config.fabric)
90 | # fabric.launch()
91 |
92 | # if config.seed is not None:
93 | # rank = fabric.global_rank
94 | # if rank is None:
95 | # rank = 0
96 | # fabric.seed_everything(config.seed + rank)
97 | # seeding(config.seed + rank, torch_deterministic=config.torch_deterministic)
98 | config.env.config.save_rendering_dir = str(Path(config.experiment_dir) / "renderings_training")
99 | env: BaseEnv = instantiate(config=config.env, device=device)
100 |
101 |
102 | experiment_save_dir = Path(config.experiment_dir)
103 | experiment_save_dir.mkdir(exist_ok=True, parents=True)
104 |
105 | logger.info(f"Saving config file to {experiment_save_dir}")
106 | with open(experiment_save_dir / "config.yaml", "w") as file:
107 | OmegaConf.save(unresolved_conf, file)
108 |
109 | algo: BaseAlgo = instantiate(device=device, env=env, config=config.algo, log_dir=experiment_save_dir)
110 | algo.setup()
111 |
112 | if config.checkpoint is not None:
113 | algo.load(config.checkpoint)
114 |
115 | # handle saving config
116 | algo.learn()
117 |
118 | if simulator_type == 'IsaacSim':
119 | simulation_app.close()
120 |
121 | if __name__ == "__main__":
122 | main()
123 |
--------------------------------------------------------------------------------
/humanoidverse/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/utils/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/utils/average_meters.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | class AverageMeter(nn.Module):
7 | def __init__(self, in_shape, max_size):
8 | super(AverageMeter, self).__init__()
9 | self.max_size = max_size
10 | self.current_size = 0
11 | self.register_buffer("mean", torch.zeros(in_shape, dtype=torch.float32))
12 |
13 | def update(self, values):
14 | size = values.size()[0]
15 | if size == 0:
16 | return
17 | new_mean = torch.mean(values.float(), dim=0)
18 | size = np.clip(size, 0, self.max_size)
19 | old_size = min(self.max_size - size, self.current_size)
20 | size_sum = old_size + size
21 | self.current_size = size_sum
22 | self.mean = (self.mean * old_size + new_mean * size) / size_sum
23 |
24 | def clear(self):
25 | self.current_size = 0
26 | self.mean.fill_(0)
27 |
28 | def __len__(self):
29 | return self.current_size
30 |
31 | def get_mean(self):
32 | return self.mean.squeeze(0).cpu().numpy()
33 |
34 |
35 | class TensorAverageMeter:
36 | def __init__(self):
37 | self.tensors = []
38 |
39 | def add(self, x):
40 | if len(x.shape) == 0:
41 | x = x.unsqueeze(0)
42 | self.tensors.append(x)
43 |
44 | def mean(self):
45 | if len(self.tensors) == 0:
46 | return 0
47 | cat = torch.cat(self.tensors, dim=0)
48 | if cat.numel() == 0:
49 | return 0
50 | else:
51 | return cat.mean()
52 |
53 | def clear(self):
54 | self.tensors = []
55 |
56 | def mean_and_clear(self):
57 | mean = self.mean()
58 | self.clear()
59 | return mean
60 |
61 |
62 | class TensorAverageMeterDict:
63 | def __init__(self):
64 | self.data = {}
65 |
66 | def add(self, data_dict):
67 | for k, v in data_dict.items():
68 | # Originally used a defaultdict, this had lambda
69 | # pickling issues with DDP.
70 | if k not in self.data:
71 | self.data[k] = TensorAverageMeter()
72 | self.data[k].add(v)
73 |
74 | def mean(self):
75 | mean_dict = {k: v.mean() for k, v in self.data.items()}
76 | return mean_dict
77 |
78 | def clear(self):
79 | self.data = {}
80 |
81 | def mean_and_clear(self):
82 | mean = self.mean()
83 | self.clear()
84 | return mean
--------------------------------------------------------------------------------
/humanoidverse/utils/common.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2022, NVIDIA Corporation
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
29 | import argparse
30 | import os
31 | import random
32 | import sys
33 | from datetime import datetime
34 |
35 | import numpy as np
36 | import torch
37 |
38 |
39 | # if there's overlap between args_list and commandline input, use commandline input
40 | def solve_argv_conflict(args_list):
41 | arguments_to_be_removed = []
42 | arguments_size = []
43 |
44 | for argv in sys.argv[1:]:
45 | if argv.startswith("-"):
46 | size_count = 1
47 | for i, args in enumerate(args_list):
48 | if args == argv:
49 | arguments_to_be_removed.append(args)
50 | for more_args in args_list[i + 1 :]:
51 | if not more_args.startswith("-"):
52 | size_count += 1
53 | else:
54 | break
55 | arguments_size.append(size_count)
56 | break
57 |
58 | for args, size in zip(arguments_to_be_removed, arguments_size):
59 | args_index = args_list.index(args)
60 | for _ in range(size):
61 | args_list.pop(args_index)
62 |
63 |
64 | def print_error(*message):
65 | print("\033[91m", "ERROR ", *message, "\033[0m")
66 | raise RuntimeError
67 |
68 |
69 | def print_ok(*message):
70 | print("\033[92m", *message, "\033[0m")
71 |
72 |
73 | def print_warning(*message):
74 | print("\033[93m", *message, "\033[0m")
75 |
76 |
77 | def print_info(*message):
78 | print("\033[96m", *message, "\033[0m")
79 |
80 |
81 | def get_time_stamp():
82 | now = datetime.now()
83 | year = now.strftime("%Y")
84 | month = now.strftime("%m")
85 | day = now.strftime("%d")
86 | hour = now.strftime("%H")
87 | minute = now.strftime("%M")
88 | second = now.strftime("%S")
89 | return "{}-{}-{}-{}-{}-{}".format(month, day, year, hour, minute, second)
90 |
91 |
92 | def parse_model_args(model_args_path):
93 | fp = open(model_args_path, "r")
94 | model_args = eval(fp.read())
95 | model_args = argparse.Namespace(**model_args)
96 |
97 | return model_args
98 |
99 |
100 | def seeding(seed=0, torch_deterministic=False):
101 | print("Setting seed: {}".format(seed))
102 |
103 | random.seed(seed)
104 | np.random.seed(seed)
105 | torch.manual_seed(seed)
106 | os.environ["PYTHONHASHSEED"] = str(seed)
107 | torch.cuda.manual_seed(seed)
108 | torch.cuda.manual_seed_all(seed)
109 |
110 | if torch_deterministic:
111 | # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
112 | os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
113 | torch.backends.cudnn.benchmark = False
114 | torch.backends.cudnn.deterministic = True
115 | torch.use_deterministic_algorithms(True)
116 | else:
117 | torch.backends.cudnn.benchmark = True
118 | torch.backends.cudnn.deterministic = False
119 |
120 | return seed
121 |
122 |
123 | def distance_l2(root_pos, wp_pos):
124 | return torch.norm(wp_pos - root_pos, dim=0)
125 |
126 |
127 | def value_to_color(value, min_value, max_value):
128 | """
129 | Converts a numerical value to an RGB color.
130 | The color will range from blue (low values) to red (high values).
131 | """
132 | # Ensure value is within the range [0, max_value]
133 | value = max(min_value, min(value, max_value))
134 |
135 | # Calculate the proportion of the value
136 | red = (value - min_value) / (max_value - min_value)
137 |
138 | # Map the proportion to the red channel for a red gradient
139 | # Blue for minimum value and red for maximum value
140 | blue = 1 - red
141 | green = 0 # Keep green constant for simplicity
142 |
143 | # Return the RGB color
144 | return red, green, blue
145 |
146 | def normalize(x, min_value, max_value, target_min=0.0, target_max=1.0):
147 | """
148 | Normalize a value from a given range to a target range.
149 | """
150 | # Normalize the value to the range [0, 1]
151 | normalized = (x - min_value) / (max_value - min_value)
152 |
153 | # Scale the value to the target range
154 | scaled = target_min + normalized * (target_max - target_min)
155 |
156 | return scaled
157 |
158 | def unnormalize(x, min_value, max_value, target_min=0.0, target_max=1.0):
159 | """
160 | Unnormalize a value from a target range to a given range.
161 | """
162 | # Normalize the value to the range [0, 1]
163 | normalized = (x - target_min) / (target_max - target_min)
164 |
165 | # Scale the value to the target range
166 | scaled = min_value + normalized * (max_value - min_value)
167 |
168 | return scaled
--------------------------------------------------------------------------------
/humanoidverse/utils/config_utils.py:
--------------------------------------------------------------------------------
1 | import math
2 | from loguru import logger
3 | from omegaconf import OmegaConf
4 |
5 | try:
6 | OmegaConf.register_new_resolver("eval", eval)
7 | OmegaConf.register_new_resolver("if", lambda pred, a, b: a if pred else b)
8 | OmegaConf.register_new_resolver("eq", lambda x, y: x.lower() == y.lower())
9 | OmegaConf.register_new_resolver("sqrt", lambda x: math.sqrt(float(x)))
10 | OmegaConf.register_new_resolver("sum", lambda x: sum(x))
11 | OmegaConf.register_new_resolver("ceil", lambda x: math.ceil(x))
12 | OmegaConf.register_new_resolver("int", lambda x: int(x))
13 | OmegaConf.register_new_resolver("len", lambda x: len(x))
14 | OmegaConf.register_new_resolver("sum_list", lambda lst: sum(lst))
15 | except Exception as e:
16 | logger.warning(f"Warning: Some resolvers already registered: {e}")
17 |
18 | # OmegaConf.register_new_resolver("eval", eval)
19 | # OmegaConf.register_new_resolver("if", lambda pred, a, b: a if pred else b)
20 | # OmegaConf.register_new_resolver("eq", lambda x, y: x.lower() == y.lower())
21 | # OmegaConf.register_new_resolver("sqrt", lambda x: math.sqrt(float(x)))
22 | # OmegaConf.register_new_resolver("sum", lambda x: sum(x))
23 | # OmegaConf.register_new_resolver("ceil", lambda x: math.ceil(x))
24 | # OmegaConf.register_new_resolver("int", lambda x: int(x))
25 | # OmegaConf.register_new_resolver("len", lambda x: len(x))
26 | # OmegaConf.register_new_resolver("sum_list", lambda lst: sum(lst))
27 |
--------------------------------------------------------------------------------
/humanoidverse/utils/helpers.py:
--------------------------------------------------------------------------------
1 | import os
2 | import copy
3 | import torch
4 | from torch import nn
5 | import numpy as np
6 | import random
7 |
8 | from typing import Any, List, Dict
9 | from termcolor import colored
10 | from loguru import logger
11 |
12 | def class_to_dict(obj) -> dict:
13 | if not hasattr(obj,"__dict__"):
14 | return obj
15 | result = {}
16 | for key in dir(obj):
17 | if key.startswith("_"):
18 | continue
19 | element = []
20 | val = getattr(obj, key)
21 | if isinstance(val, list):
22 | for item in val:
23 | element.append(class_to_dict(item))
24 | else:
25 | element = class_to_dict(val)
26 | result[key] = element
27 | return result
28 |
29 | def pre_process_config(config) -> None:
30 |
31 | # compute observation_dim
32 | # config.robot.policy_obs_dim = -1
33 | # config.robot.critic_obs_dim = -1
34 |
35 | obs_dim_dict = dict()
36 | _obs_key_list = config.env.config.obs.obs_dict
37 |
38 | assert set(config.env.config.obs.noise_scales.keys()) == set(config.env.config.obs.obs_scales.keys())
39 |
40 | # convert obs_dims to list of dicts
41 | each_dict_obs_dims = {k: v for d in config.env.config.obs.obs_dims for k, v in d.items()}
42 | config.env.config.obs.obs_dims = each_dict_obs_dims
43 | logger.info(f"obs_dims: {each_dict_obs_dims}")
44 | auxiliary_obs_dims = {}
45 | if hasattr(config.env.config.obs, 'obs_auxiliary'):
46 | _aux_obs_key_list = config.env.config.obs.obs_auxiliary
47 | auxiliary_obs_dims = {}
48 | for aux_obs_key, aux_config in _aux_obs_key_list.items():
49 | auxiliary_obs_dims[aux_obs_key] = 0
50 | for _key, _num in aux_config.items():
51 | assert _key in config.env.config.obs.obs_dims.keys()
52 | auxiliary_obs_dims[aux_obs_key] += config.env.config.obs.obs_dims[_key] * _num
53 | logger.info(f"auxiliary_obs_dims: {auxiliary_obs_dims}")
54 | for obs_key, obs_config in _obs_key_list.items():
55 | obs_dim_dict[obs_key] = 0
56 | for key in obs_config:
57 | if key.endswith("_raw"): key = key[:-4]
58 | if key in config.env.config.obs.obs_dims.keys():
59 | obs_dim_dict[obs_key] += config.env.config.obs.obs_dims[key]
60 | logger.info(f"{obs_key}: {key} has dim: {config.env.config.obs.obs_dims[key]}")
61 | elif key in auxiliary_obs_dims.keys():
62 | obs_dim_dict[obs_key] += auxiliary_obs_dims[key]
63 | logger.info(f"{obs_key}: {key} has dim: {auxiliary_obs_dims[key]}")
64 | else:
65 | logger.error(f"{obs_key}: {key} not found in obs_dims")
66 | raise ValueError(f"{obs_key}: {key} not found in obs_dims")
67 | config.robot.algo_obs_dim_dict = obs_dim_dict
68 | logger.info(f"algo_obs_dim_dict: {config.robot.algo_obs_dim_dict}")
69 |
70 | # compute action_dim for ppo
71 | # for agent in config.algo.config.network_dict.keys():
72 | # for network in config.algo.config.network_dict[agent].keys():
73 | # output_dim = config.algo.config.network_dict[agent][network].output_dim
74 | # if output_dim == "action_dim":
75 | # config.algo.config.network_dict[agent][network].output_dim = config.env.config.robot.actions_dim
76 |
77 | # print the config
78 | logger.debug(f"PPO CONFIG")
79 | logger.debug(f"{config.algo.config.module_dict}")
80 | # logger.debug(f"{config.algo.config.network_dict}")
81 |
82 | def parse_observation(cls: Any,
83 | key_list: List,
84 | buf_dict: Dict,
85 | obs_scales: Dict,
86 | noise_scales: Dict,
87 | current_noise_curriculum_value: Any=1.0) -> None:
88 | """ Parse observations for the legged_robot_base class
89 | """
90 |
91 | for obs_key in key_list:
92 | if obs_key.endswith("_raw"):
93 | obs_key = obs_key[:-4]
94 | obs_noise = 0.
95 | else:
96 | obs_noise = noise_scales[obs_key] * current_noise_curriculum_value
97 |
98 | # print(f"obs_key: {obs_key}, obs_noise: {obs_noise}")
99 | actor_obs = getattr(cls, f"_get_obs_{obs_key}")().clone()
100 | obs_scale = obs_scales[obs_key]
101 | # Yuanhang: use rand_like (uniform 0-1) instead of randn_like (N~[0,1])
102 | # buf_dict[obs_key] = actor_obs * obs_scale + (torch.randn_like(actor_obs)* 2. - 1.) * obs_noise
103 | # print("noise_scales", noise_scales)
104 | # print("obs_noise", obs_noise)
105 | buf_dict[obs_key] = (actor_obs + (torch.rand_like(actor_obs)* 2. - 1.) * obs_noise) * obs_scale
106 |
107 |
108 | def export_policy_as_jit(actor_critic, path):
109 | if hasattr(actor_critic, 'memory_a'):
110 | # assumes LSTM: TODO add GRU
111 | exporter = PolicyExporterLSTM(actor_critic)
112 | exporter.export(path)
113 | else:
114 | os.makedirs(path, exist_ok=True)
115 | path = os.path.join(path, 'policy_1.pt')
116 | model = copy.deepcopy(actor_critic.actor).to('cpu')
117 | traced_script_module = torch.jit.script(model)
118 | traced_script_module.save(path)
119 |
120 | class PolicyExporterLSTM(torch.nn.Module):
121 | def __init__(self, actor_critic):
122 | super().__init__()
123 | self.actor = copy.deepcopy(actor_critic.actor)
124 | self.is_recurrent = actor_critic.is_recurrent
125 | self.memory = copy.deepcopy(actor_critic.memory_a.rnn)
126 | self.memory.cpu()
127 | self.register_buffer(f'hidden_state', torch.zeros(self.memory.num_layers, 1, self.memory.hidden_size))
128 | self.register_buffer(f'cell_state', torch.zeros(self.memory.num_layers, 1, self.memory.hidden_size))
129 |
130 | def forward(self, x):
131 | out, (h, c) = self.memory(x.unsqueeze(0), (self.hidden_state, self.cell_state))
132 | self.hidden_state[:] = h
133 | self.cell_state[:] = c
134 | return self.actor(out.squeeze(0))
135 |
136 | @torch.jit.export
137 | def reset_memory(self):
138 | self.hidden_state[:] = 0.
139 | self.cell_state[:] = 0.
140 |
141 | def export(self, path):
142 | os.makedirs(path, exist_ok=True)
143 | path = os.path.join(path, 'policy_lstm_1.pt')
144 | self.to('cpu')
145 | traced_script_module = torch.jit.script(self)
146 | traced_script_module.save(path)
147 |
--------------------------------------------------------------------------------
/humanoidverse/utils/inference_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import os
4 | import copy
5 |
6 | def export_policy_as_jit(actor_critic, path, exported_policy_name):
7 | os.makedirs(path, exist_ok=True)
8 | path = os.path.join(path, exported_policy_name)
9 | model = copy.deepcopy(actor_critic.actor).to('cpu')
10 | traced_script_module = torch.jit.script(model)
11 | traced_script_module.save(path)
12 |
13 | def export_decouple_policy_and_estimator_history_as_onnx(inference_model, path, exported_policy_name, example_obs_dict):
14 | os.makedirs(path, exist_ok=True)
15 | path = os.path.join(path, exported_policy_name)
16 |
17 | actor = copy.deepcopy(inference_model['actor']).to('cpu')
18 | left_ee_force_estimator = copy.deepcopy(inference_model['left_ee_force_estimator']).to('cpu')
19 | right_ee_force_estimator = copy.deepcopy(inference_model['right_ee_force_estimator']).to('cpu')
20 |
21 | class PPODecoupleForceEstimatorWrapper(nn.Module):
22 | def __init__(self, actor, left_ee_force_estimator, right_ee_force_estimator):
23 | """
24 | model: The original PyTorch model.
25 | input_keys: List of input names as keys for the input dictionary.
26 | """
27 | super(PPODecoupleForceEstimatorWrapper, self).__init__()
28 | self.actor = actor
29 | self.left_ee_force_estimator = left_ee_force_estimator
30 | self.right_ee_force_estimator = right_ee_force_estimator
31 |
32 | def forward(self, inputs):
33 | """
34 | Dynamically creates a dictionary from the input keys and args.
35 | """
36 | actor_obs, estimator_obs, history_estimated_force = inputs
37 | left_ee_force_estimator_output = self.left_ee_force_estimator(estimator_obs)
38 | right_ee_force_estimator_output = self.right_ee_force_estimator(estimator_obs)
39 | input_for_actor = torch.cat([actor_obs,
40 | left_ee_force_estimator_output,
41 | right_ee_force_estimator_output,
42 | history_estimated_force], dim=-1)
43 | return self.actor.act_inference(input_for_actor), left_ee_force_estimator_output, right_ee_force_estimator_output
44 |
45 | wrapper = PPODecoupleForceEstimatorWrapper(actor, left_ee_force_estimator, right_ee_force_estimator)
46 | example_input_list = [example_obs_dict["actor_obs"],
47 | example_obs_dict["estimator_obs"],
48 | example_obs_dict["estimated_force_history"]]
49 | torch.onnx.export(
50 | wrapper,
51 | example_input_list, # Pass x1 and x2 as separate inputs
52 | path,
53 | verbose=True,
54 | input_names=["actor_obs", "estimator_obs", "estimated_force_history"], # Specify the input names
55 | output_names=["action", "left_ee_force_estimator_output", "right_ee_force_estimator_output"], # Name the output
56 | opset_version=13 # Specify the opset version, if needed
57 | )
58 |
59 | def export_decouple_policy_and_estimator_as_onnx(inference_model, path, exported_policy_name, example_obs_dict):
60 | os.makedirs(path, exist_ok=True)
61 | path = os.path.join(path, exported_policy_name)
62 |
63 | actor = copy.deepcopy(inference_model['actor']).to('cpu')
64 | left_ee_force_estimator = copy.deepcopy(inference_model['left_ee_force_estimator']).to('cpu')
65 | right_ee_force_estimator = copy.deepcopy(inference_model['right_ee_force_estimator']).to('cpu')
66 |
67 | class PPODecoupleForceEstimatorWrapper(nn.Module):
68 | def __init__(self, actor, left_ee_force_estimator, right_ee_force_estimator):
69 | """
70 | model: The original PyTorch model.
71 | input_keys: List of input names as keys for the input dictionary.
72 | """
73 | super(PPODecoupleForceEstimatorWrapper, self).__init__()
74 | self.actor = actor
75 | self.left_ee_force_estimator = left_ee_force_estimator
76 | self.right_ee_force_estimator = right_ee_force_estimator
77 |
78 | def forward(self, inputs):
79 | """
80 | Dynamically creates a dictionary from the input keys and args.
81 | """
82 | actor_obs, estimator_obs = inputs
83 | left_ee_force_estimator_output = self.left_ee_force_estimator(estimator_obs)
84 | right_ee_force_estimator_output = self.right_ee_force_estimator(estimator_obs)
85 | input_for_actor = torch.cat([actor_obs,
86 | left_ee_force_estimator_output,
87 | right_ee_force_estimator_output], dim=-1)
88 | return self.actor.act_inference(input_for_actor), left_ee_force_estimator_output, right_ee_force_estimator_output
89 |
90 | wrapper = PPODecoupleForceEstimatorWrapper(actor, left_ee_force_estimator, right_ee_force_estimator)
91 | example_input_list = [example_obs_dict["actor_obs"],
92 | example_obs_dict["estimator_obs"]]
93 | torch.onnx.export(
94 | wrapper,
95 | example_input_list, # Pass x1 and x2 as separate inputs
96 | path,
97 | verbose=True,
98 | input_names=["actor_obs", "estimator_obs"], # Specify the input names
99 | output_names=["action", "left_ee_force_estimator_output", "right_ee_force_estimator_output"], # Name the output
100 | opset_version=13 # Specify the opset version, if needed
101 | )
102 |
103 | def export_multi_agent_decouple_policy_as_onnx(inference_model, path, exported_policy_name, example_obs_dict, body_keys=["lower_body", "upper_body"]):
104 | os.makedirs(path, exist_ok=True)
105 | path = os.path.join(path, exported_policy_name)
106 |
107 | actors = {
108 | k: copy.deepcopy(inference_model['actors'][k]).to('cpu')
109 | for k in body_keys
110 | }
111 |
112 | class PPOMADecoupleWrapper(nn.Module):
113 | def __init__(self, actors, body_keys):
114 | super(PPOMADecoupleWrapper, self).__init__()
115 | self.actors = nn.ModuleDict(actors)
116 | self.body_keys = body_keys
117 |
118 | def forward(self, actor_obs):
119 | actions = [self.actors[k].act_inference(actor_obs) for k in self.body_keys]
120 | return torch.cat(actions, dim=-1)
121 |
122 | wrapper = PPOMADecoupleWrapper(actors, body_keys)
123 | example_input_list = example_obs_dict["actor_obs"]
124 |
125 | torch.onnx.export(
126 | wrapper,
127 | example_input_list,
128 | path,
129 | verbose=True,
130 | input_names=["actor_obs"],
131 | output_names=["action"],
132 | opset_version=13
133 | )
--------------------------------------------------------------------------------
/humanoidverse/utils/logging.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from loguru import logger
3 | from contextlib import contextmanager
4 | import sys
5 | import os
6 |
7 | class HydraLoggerBridge(logging.Handler):
8 | def emit(self, record):
9 | # Get corresponding loguru level
10 | try:
11 | level = logger.level(record.levelname).name
12 | except ValueError:
13 | level = record.levelno
14 |
15 | # Find caller from where the logged message originated
16 | frame, depth = logging.currentframe(), 2
17 | while frame and frame.f_code.co_filename == logging.__file__:
18 | frame = frame.f_back
19 | depth += 1
20 |
21 | logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
22 |
23 | class LoguruStream:
24 | def write(self, message):
25 | if message.strip(): # Only log non-empty messages
26 | logger.info(message.strip()) # Changed to debug level
27 |
28 | def flush(self):
29 | pass
30 |
31 | @contextmanager
32 | def capture_stdout_to_loguru():
33 | logger.remove()
34 | logger.add(sys.stdout, level="INFO")
35 | loguru_stream = LoguruStream()
36 | old_stdout = sys.stdout
37 | sys.stdout = loguru_stream
38 | try:
39 | yield
40 | finally:
41 | sys.stdout = old_stdout
42 | logger.remove()
43 | console_log_level = os.environ.get("LOGURU_LEVEL", "INFO").upper()
44 | logger.add(sys.stdout, level=console_log_level, colorize=True)
--------------------------------------------------------------------------------
/humanoidverse/utils/math.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: BSD-3-Clause
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice, this
8 | # list of conditions and the following disclaimer.
9 | #
10 | # 2. Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # 3. Neither the name of the copyright holder nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 | #
29 | # Copyright (c) 2021 ETH Zurich, Nikita Rudin
30 |
31 | import torch
32 | from torch import Tensor
33 | import numpy as np
34 | from humanoidverse.utils.torch_utils import quat_apply, normalize
35 | from typing import Tuple
36 |
37 | # @ torch.jit.script
38 | def quat_apply_yaw(quat, vec):
39 | quat_yaw = quat.clone().view(-1, 4)
40 | quat_yaw[:, :2] = 0.
41 | quat_yaw = normalize(quat_yaw)
42 | return quat_apply(quat_yaw, vec)
43 |
44 | # @ torch.jit.script
45 | def wrap_to_pi(angles):
46 | angles %= 2*np.pi
47 | angles -= 2*np.pi * (angles > np.pi)
48 | return angles
49 |
50 | # @ torch.jit.script
51 | def torch_rand_sqrt_float(lower, upper, shape, device):
52 | # type: (float, float, Tuple[int, int], str) -> Tensor
53 | r = 2*torch.rand(*shape, device=device) - 1
54 | r = torch.where(r<0., -torch.sqrt(-r), torch.sqrt(r))
55 | r = (r + 1.) / 2.
56 | return (upper - lower) * r + lower
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/utils/motion_lib/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/motion_lib_robot.py:
--------------------------------------------------------------------------------
1 | from humanoidverse.utils.motion_lib.motion_lib_base import MotionLibBase
2 | from humanoidverse.utils.motion_lib.torch_humanoid_batch import Humanoid_Batch
3 | import hydra
4 | import torch
5 | from loguru import logger
6 | from omegaconf import DictConfig
7 | class MotionLibRobot(MotionLibBase):
8 | def __init__(self, motion_lib_cfg, num_envs, device):
9 | super().__init__(motion_lib_cfg = motion_lib_cfg, num_envs = num_envs, device = device)
10 | self.mesh_parsers = Humanoid_Batch(motion_lib_cfg)
11 | # return
12 |
13 | @hydra.main(version_base=None, config_path="../../config", config_name="base")
14 | def main(config: DictConfig):
15 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
16 | num_envs = 512
17 | motionlib = MotionLibRobot(config.robot.motion, num_envs, device)
18 | motions = motionlib.load_motions()
19 | logger.info(f"Loaded {len(motions)} motions")
20 | motion_times = 0
21 | import ipdb; ipdb.set_trace()
22 | for motion in motions:
23 | # logger.info(f"Motion DoF Pos Length: {motion['dof_pos'].shape}")
24 | # logger.info(f"First Frame Pos: {motion['dof_pos'][0, 12:]}")
25 | motion_times += motion['dof_pos'].shape[0] / motion['fps']
26 | logger.info(f"Average Motion Length: {motion_times / len(motions)}")
27 |
28 | if __name__ == "__main__":
29 | main()
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/motion_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LeCAR-Lab/FALCON/a273be3a977e7df2610ab0b71a4250c0e55b0d7c/humanoidverse/utils/motion_lib/motion_utils/__init__.py
--------------------------------------------------------------------------------
/humanoidverse/utils/motion_lib/motion_utils/flags.py:
--------------------------------------------------------------------------------
1 | __all__ = ['flags', 'summation']
2 |
3 | class Flags(object):
4 | def __init__(self, items):
5 | for key, val in items.items():
6 | setattr(self,key,val)
7 |
8 | flags = Flags({
9 | 'test': False,
10 | 'debug': False,
11 | "real_traj": False,
12 | "im_eval": False,
13 | })
14 |
--------------------------------------------------------------------------------
/humanoidverse/utils/terrain.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpy.random import choice
3 | from scipy import interpolate
4 |
5 | from isaacgym import terrain_utils
6 | from legged_gym.envs.base.legged_robot_config import LeggedRobotCfg
7 |
8 | class Terrain:
9 | def __init__(self, cfg: LeggedRobotCfg.terrain, num_robots) -> None:
10 |
11 | self.cfg = cfg
12 | self.num_robots = num_robots
13 | self.type = cfg.mesh_type
14 | if self.type in ["none", 'plane']:
15 | return
16 | self.env_length = cfg.terrain_length
17 | self.env_width = cfg.terrain_width
18 | self.proportions = [np.sum(cfg.terrain_proportions[:i+1]) for i in range(len(cfg.terrain_proportions))]
19 |
20 | self.cfg.num_sub_terrains = cfg.num_rows * cfg.num_cols
21 | self.env_origins = np.zeros((cfg.num_rows, cfg.num_cols, 3))
22 |
23 | self.width_per_env_pixels = int(self.env_width / cfg.horizontal_scale)
24 | self.length_per_env_pixels = int(self.env_length / cfg.horizontal_scale)
25 |
26 | self.border = int(cfg.border_size/self.cfg.horizontal_scale)
27 | self.tot_cols = int(cfg.num_cols * self.width_per_env_pixels) + 2 * self.border
28 | self.tot_rows = int(cfg.num_rows * self.length_per_env_pixels) + 2 * self.border
29 |
30 | self.height_field_raw = np.zeros((self.tot_rows , self.tot_cols), dtype=np.int16)
31 | if cfg.curriculum:
32 | self.curiculum()
33 | elif cfg.selected:
34 | self.selected_terrain()
35 | else:
36 | self.randomized_terrain()
37 |
38 | self.heightsamples = self.height_field_raw
39 | if self.type=="trimesh":
40 | self.vertices, self.triangles = terrain_utils.convert_heightfield_to_trimesh( self.height_field_raw,
41 | self.cfg.horizontal_scale,
42 | self.cfg.vertical_scale,
43 | self.cfg.slope_treshold)
44 |
45 | def randomized_terrain(self):
46 | for k in range(self.cfg.num_sub_terrains):
47 | # Env coordinates in the world
48 | (i, j) = np.unravel_index(k, (self.cfg.num_rows, self.cfg.num_cols))
49 |
50 | choice = np.random.uniform(0, 1)
51 | difficulty = np.random.choice([0.5, 0.75, 0.9])
52 | terrain = self.make_terrain(choice, difficulty)
53 | self.add_terrain_to_map(terrain, i, j)
54 |
55 | def curiculum(self):
56 | for j in range(self.cfg.num_cols):
57 | for i in range(self.cfg.num_rows):
58 | difficulty = i / self.cfg.num_rows
59 | choice = j / self.cfg.num_cols + 0.001
60 |
61 | terrain = self.make_terrain(choice, difficulty)
62 | self.add_terrain_to_map(terrain, i, j)
63 |
64 | def selected_terrain(self):
65 | terrain_type = self.cfg.terrain_kwargs.pop('type')
66 | for k in range(self.cfg.num_sub_terrains):
67 | # Env coordinates in the world
68 | (i, j) = np.unravel_index(k, (self.cfg.num_rows, self.cfg.num_cols))
69 |
70 | terrain = terrain_utils.SubTerrain("terrain",
71 | width=self.width_per_env_pixels,
72 | length=self.width_per_env_pixels,
73 | vertical_scale=self.vertical_scale,
74 | horizontal_scale=self.horizontal_scale)
75 |
76 | eval(terrain_type)(terrain, **self.cfg.terrain_kwargs.terrain_kwargs)
77 | self.add_terrain_to_map(terrain, i, j)
78 |
79 | def make_terrain(self, choice, difficulty):
80 | terrain = terrain_utils.SubTerrain( "terrain",
81 | width=self.width_per_env_pixels,
82 | length=self.width_per_env_pixels,
83 | vertical_scale=self.cfg.vertical_scale,
84 | horizontal_scale=self.cfg.horizontal_scale)
85 | slope = difficulty * 0.4
86 | step_height = 0.05 + 0.18 * difficulty
87 | discrete_obstacles_height = 0.05 + difficulty * 0.2
88 | stepping_stones_size = 1.5 * (1.05 - difficulty)
89 | stone_distance = 0.05 if difficulty==0 else 0.1
90 | gap_size = 1. * difficulty
91 | pit_depth = 1. * difficulty
92 | if choice < self.proportions[0]:
93 | if choice < self.proportions[0]/ 2:
94 | slope *= -1
95 | terrain_utils.pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.)
96 | elif choice < self.proportions[1]:
97 | terrain_utils.pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.)
98 | terrain_utils.random_uniform_terrain(terrain, min_height=-0.05, max_height=0.05, step=0.005, downsampled_scale=0.2)
99 | elif choice < self.proportions[3]:
100 | if choice Tensor
13 | return (upper - lower) * torch.rand(*shape, device=device) + lower
14 |
15 | @torch.jit.script
16 | def copysign(a, b):
17 | # type: (float, Tensor) -> Tensor
18 | a = torch.tensor(a, device=b.device, dtype=torch.float).repeat(b.shape[0])
19 | return torch.abs(a) * torch.sign(b)
20 |
21 |
22 |
23 | def set_seed(seed, torch_deterministic=False):
24 | """ set seed across modules """
25 | if seed == -1 and torch_deterministic:
26 | seed = 42
27 | elif seed == -1:
28 | seed = np.random.randint(0, 10000)
29 | print("Setting seed: {}".format(seed))
30 |
31 | random.seed(seed)
32 | np.random.seed(seed)
33 | torch.manual_seed(seed)
34 | os.environ["PYTHONHASHSEED"] = str(seed)
35 | torch.cuda.manual_seed(seed)
36 | torch.cuda.manual_seed_all(seed)
37 |
38 | if torch_deterministic:
39 | # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
40 | os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
41 | torch.backends.cudnn.benchmark = False
42 | torch.backends.cudnn.deterministic = True
43 | torch.use_deterministic_algorithms(True)
44 | else:
45 | torch.backends.cudnn.benchmark = True
46 | torch.backends.cudnn.deterministic = False
47 |
48 | return seed
49 |
50 |
--------------------------------------------------------------------------------
/isaac_utils/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup(
4 | name="isaac_utils",
5 | packages=["isaac_utils"],
6 | version="0.0.1",
7 | description="Unified torch env_utils for IsaacGym and IsaacSim",
8 | author="",
9 | classifiers=[],
10 | )
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 |
3 | setup(
4 | name='falcon',
5 | version='0.0.1',
6 | license="MIT",
7 | packages=find_packages(),
8 | description='FALCON: Learning Force-Adaptive Humanoid Loco-Manipulation',
9 | url="https://github.com/LeCAR-Lab/FALCON", # Update this with your actual repository URL
10 | python_requires=">=3.8",
11 | install_requires=[
12 | "hydra-core>=1.2.0",
13 | "numpy==1.23.5",
14 | "rich",
15 | "ipdb",
16 | "matplotlib",
17 | "termcolor",
18 | "wandb",
19 | "plotly",
20 | "tqdm",
21 | "loguru",
22 | "meshcat",
23 | "pynput",
24 | "scipy",
25 | "tensorboard",
26 | "onnx",
27 | "onnxruntime",
28 | "opencv-python",
29 | "joblib",
30 | "easydict",
31 | "lxml",
32 | "numpy-stl",
33 | "open3d"
34 | ]
35 | )
--------------------------------------------------------------------------------