├── .bashrc_docker ├── .dockerignore ├── .flake8 ├── .gitignore ├── .gitlab-ci.yml ├── .gitmodules ├── .pydocstyle ├── Dockerfile.build ├── Dockerfile.dev ├── Dockerfile.train ├── MANIFEST.in ├── README.md ├── build_docker.sh ├── build_docker_build.sh ├── build_docker_train.sh ├── clear_builds.sh ├── config.toml ├── doc └── img │ └── example.png ├── docs ├── Makefile ├── make.bat └── source │ ├── _static │ └── .gitkeep │ ├── _templates │ ├── autoapi │ │ ├── index.rst │ │ └── python │ │ │ ├── attribute.rst │ │ │ ├── class.rst │ │ │ ├── data.rst │ │ │ ├── exception.rst │ │ │ ├── function.rst │ │ │ ├── method.rst │ │ │ ├── module.rst │ │ │ ├── package.rst │ │ │ └── property.rst │ ├── custom-class-template.rst │ └── custom-module-template.rst │ ├── benchmark_envs.rst │ ├── conf.py │ ├── define_env.rst │ ├── demos.rst │ ├── human_animations.rst │ ├── img │ ├── hrgym_demo.png │ └── hrgym_demo_failsafe.png │ ├── index.rst │ ├── installation.rst │ ├── safety_shield.rst │ ├── training.rst │ └── wrappers.rst ├── entrypoint.sh ├── environment.yml ├── gitlab_runner.sh ├── gitlab_runner_dind.sh ├── human_robot_gym ├── MUJOCO_LOG.TXT ├── __init__.py ├── callbacks │ ├── custom_wandb_callback.py │ ├── logging_callback.py │ └── model_reset_callback.py ├── controllers │ ├── __init__.py │ └── failsafe_controller │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── config │ │ └── failsafe.json │ │ ├── failsafe_controller │ │ ├── __init__.py │ │ ├── failsafe_controller.py │ │ └── plot_capsule.py │ │ └── files.txt ├── demonstrations │ ├── __init__.py │ └── experts │ │ ├── __init__.py │ │ ├── collaborative_hammering_cart_expert.py │ │ ├── collaborative_lifting_cart_expert.py │ │ ├── expert.py │ │ ├── pick_place_human_cart_expert.py │ │ ├── reach_human_cart_expert.py │ │ └── reach_human_expert.py ├── demos │ ├── __init__.py │ ├── demo_animation_clips.py │ ├── demo_collaborative_hammering_environment.py │ ├── demo_collaborative_lifting_environment.py │ ├── demo_collaborative_stacking_environment.py │ ├── demo_gym_functionality.py │ ├── demo_gym_functionality_Schunk_IK_interactive.py │ ├── demo_gym_functionality_Schunk_fullstop_criterion.py │ ├── demo_gym_functionality_Schunk_pfl_criterion.py │ ├── demo_gym_functionality_Schunk_safety.py │ ├── demo_gym_functionality_iiwa.py │ ├── demo_gym_functionality_jaco.py │ ├── demo_gym_functionality_kinova3.py │ ├── demo_gym_functionality_panda.py │ ├── demo_gym_functionality_panda_new.py │ ├── demo_gym_functionality_sawyer.py │ ├── demo_gym_functionality_ur5e.py │ ├── demo_human_object_inspection_environment.py │ ├── demo_human_robot_handover_environment.py │ ├── demo_pick_place_close_human_environment.py │ ├── demo_pick_place_human_environment.py │ ├── demo_pick_place_pointing_human_environment.py │ ├── demo_reach_human_cart_environment.py │ ├── demo_reach_human_environment.py │ ├── demo_robot_human_handover_environment.py │ ├── profile_gym_functionality_Schunk.py │ ├── profile_gym_functionality_Schunk_IK.py │ └── test_safety_shield.py ├── environments │ ├── __init__.py │ ├── gym_envs │ │ ├── __init__.py │ │ └── make_gym.py │ └── manipulation │ │ ├── __init__.py │ │ ├── collaborative_hammering_cartesian_env.py │ │ ├── collaborative_lifting_cartesian_env.py │ │ ├── collaborative_stacking_cartesian_env.py │ │ ├── human_env.py │ │ ├── human_object_inspection_cartesian_env.py │ │ ├── human_robot_handover_cartesian_env.py │ │ ├── pick_place_close_human_cartesian_env.py │ │ ├── pick_place_human_cartesian_env.py │ │ ├── pick_place_pointing_human_cartesian_env.py │ │ ├── reach_human_cartesian_env.py │ │ ├── reach_human_env.py │ │ └── robot_human_handover_cartesian_env.py ├── models │ ├── __init__.py │ ├── assets │ │ ├── arenas │ │ │ └── table_arena.xml │ │ ├── human │ │ │ ├── README.md │ │ │ ├── arm.xml │ │ │ ├── common │ │ │ │ ├── materials.xml │ │ │ │ ├── sky1.png │ │ │ │ └── skybox.xml │ │ │ ├── etc │ │ │ │ └── model.pdf │ │ │ ├── hand.xml │ │ │ ├── human.xml │ │ │ ├── humanoid_smpl_neutral_mesh.xml │ │ │ ├── humanoid_smpl_neutral_mesh_all.xml │ │ │ ├── humanoid_smpl_neutral_mesh_all_step.xml │ │ │ ├── humanoid_smpl_neutral_mesh_all_vis.xml │ │ │ ├── humanoid_smpl_neutral_mesh_vis.xml │ │ │ ├── meshes │ │ │ │ ├── Chest.stl │ │ │ │ ├── Head.stl │ │ │ │ ├── Hips.stl │ │ │ │ ├── L_Ankle.stl │ │ │ │ ├── L_Elbow.stl │ │ │ │ ├── L_Hand.stl │ │ │ │ ├── L_Hip.stl │ │ │ │ ├── L_Knee.stl │ │ │ │ ├── L_Shoulder.stl │ │ │ │ ├── L_Thorax.stl │ │ │ │ ├── L_Toe.stl │ │ │ │ ├── L_Wrist.stl │ │ │ │ ├── LeftArm.stl │ │ │ │ ├── LeftChest.stl │ │ │ │ ├── LeftFoot.stl │ │ │ │ ├── LeftHand.stl │ │ │ │ ├── LeftHand_corrected.stl │ │ │ │ ├── LeftLeg.stl │ │ │ │ ├── LeftShoulder.stl │ │ │ │ ├── LeftToe.stl │ │ │ │ ├── LeftUpLeg.stl │ │ │ │ ├── LeftWrist.stl │ │ │ │ ├── Mouth.stl │ │ │ │ ├── Neck.stl │ │ │ │ ├── Pelvis.stl │ │ │ │ ├── R_Ankle.stl │ │ │ │ ├── R_Elbow.stl │ │ │ │ ├── R_Hand.stl │ │ │ │ ├── R_Hip.stl │ │ │ │ ├── R_Knee.stl │ │ │ │ ├── R_Shoulder.stl │ │ │ │ ├── R_Thorax.stl │ │ │ │ ├── R_Toe.stl │ │ │ │ ├── R_Wrist.stl │ │ │ │ ├── RightArm.stl │ │ │ │ ├── RightChest.stl │ │ │ │ ├── RightFoot.stl │ │ │ │ ├── RightHand.stl │ │ │ │ ├── RightLeg.stl │ │ │ │ ├── RightShoulder.stl │ │ │ │ ├── RightToe.stl │ │ │ │ ├── RightUpLeg.stl │ │ │ │ ├── RightWrist.stl │ │ │ │ ├── Spine.stl │ │ │ │ ├── Spine1.stl │ │ │ │ ├── Spine2.stl │ │ │ │ └── Torso.stl │ │ │ ├── simple_arm.xml │ │ │ └── template │ │ │ │ └── humanoid_template.xml │ │ ├── objects │ │ │ └── nail.xml │ │ ├── robots │ │ │ └── schunk │ │ │ │ ├── meshes │ │ │ │ ├── arm_1_link.stl │ │ │ │ ├── arm_1_link_small.stl │ │ │ │ ├── arm_2_link.stl │ │ │ │ ├── arm_3_link.stl │ │ │ │ ├── arm_3_link_small.stl │ │ │ │ ├── arm_4_link.stl │ │ │ │ ├── arm_4_link_collision.stl │ │ │ │ ├── arm_4_link_low_poly.stl │ │ │ │ ├── arm_5_link.stl │ │ │ │ ├── arm_5_link_small.stl │ │ │ │ ├── arm_6_link.stl │ │ │ │ ├── arm_6_link_low_poly.stl │ │ │ │ └── arm_base_link.stl │ │ │ │ ├── robot.urdf │ │ │ │ ├── robot.xml │ │ │ │ ├── robot_pybullet.urdf │ │ │ │ └── schunk.urdf │ │ └── textures │ │ │ ├── blue-shirt.png │ │ │ ├── blue-wood.png │ │ │ ├── brass-ambra.png │ │ │ ├── bread.png │ │ │ ├── can.png │ │ │ ├── ceramic.png │ │ │ ├── cereal.png │ │ │ ├── clay.png │ │ │ ├── cream-plaster.png │ │ │ ├── dark-wood.png │ │ │ ├── dirt.png │ │ │ ├── glass.png │ │ │ ├── gray-felt.png │ │ │ ├── gray-plaster.png │ │ │ ├── gray-woodgrain.png │ │ │ ├── green-shirt.png │ │ │ ├── green-wood.png │ │ │ ├── jeans.png │ │ │ ├── lemon.png │ │ │ ├── light-wood.png │ │ │ ├── metal.png │ │ │ ├── pink-plaster.png │ │ │ ├── plaster-wall-4k.jpg │ │ │ ├── plywood-4k.jpg │ │ │ ├── red-shirt.png │ │ │ ├── red-wood.png │ │ │ ├── skin.png │ │ │ ├── skin2.jpg │ │ │ ├── skin2.png │ │ │ ├── steel-brushed.png │ │ │ ├── steel-scratched.png │ │ │ ├── white-bricks.png │ │ │ ├── white-plaster.png │ │ │ ├── wood-tiles.png │ │ │ ├── wood-varnished-panels.png │ │ │ └── yellow-plaster.png │ ├── grippers │ │ ├── __init__.py │ │ └── rethink_valid_gripper.py │ ├── objects │ │ ├── __init__.py │ │ ├── human │ │ │ ├── __init__.py │ │ │ ├── c3d_info.txt │ │ │ └── human.py │ │ └── obstacle.py │ └── robots │ │ ├── __init__.py │ │ ├── config │ │ ├── iiwa.json │ │ ├── jaco.json │ │ ├── kinova3.json │ │ ├── panda.json │ │ ├── sawyer.json │ │ ├── schunk.json │ │ └── ur5e.json │ │ └── manipulators │ │ ├── __init__.py │ │ ├── panda_robot_zero.py │ │ ├── pinocchio_manipulator_model.py │ │ └── schunk_robot.py ├── robots │ └── __init__.py ├── training │ ├── __init__.py │ ├── config │ │ ├── algorithm │ │ │ ├── ppo.yaml │ │ │ ├── sac.yaml │ │ │ ├── sac_her.yaml │ │ │ └── sb3_default │ │ │ │ ├── ppo.yaml │ │ │ │ └── sac.yaml │ │ ├── collaborative_lifting_cart_expert_data_collection.yaml │ │ ├── consistent_seeding.yaml │ │ ├── environment │ │ │ ├── collaborative_hammering_cart.yaml │ │ │ ├── collaborative_lifting_cart.yaml │ │ │ ├── collaborative_stacking_cart.yaml │ │ │ ├── default │ │ │ │ ├── collaborative_hammering_cart.yaml │ │ │ │ ├── collaborative_lifting_cart.yaml │ │ │ │ ├── collaborative_stacking_cart.yaml │ │ │ │ ├── human_env.yaml │ │ │ │ ├── human_object_inspection_cart.yaml │ │ │ │ ├── human_robot_handover_cart.yaml │ │ │ │ ├── pick_place_human_cart.yaml │ │ │ │ ├── pick_place_pointing_human_cart.yaml │ │ │ │ ├── reach_human.yaml │ │ │ │ ├── reach_human_cart.yaml │ │ │ │ └── robot_human_handover_cart.yaml │ │ │ ├── human_object_inspection_cart.yaml │ │ │ ├── human_robot_handover_cart.yaml │ │ │ ├── pick_place_human_cart.yaml │ │ │ ├── pick_place_pointing_human_cart.yaml │ │ │ ├── reach_human.yaml │ │ │ ├── reach_human_cart.yaml │ │ │ └── robot_human_handover_cart.yaml │ │ ├── eval.yaml │ │ ├── expert │ │ │ ├── collaborative_lifting_cart.yaml │ │ │ ├── default │ │ │ │ ├── collaborative_lifting_cart.yaml │ │ │ │ ├── pick_place_human_cart.yaml │ │ │ │ ├── reach_human.yaml │ │ │ │ └── reach_human_cart.yaml │ │ │ ├── pick_place_human_cart.yaml │ │ │ ├── reach_human.yaml │ │ │ └── reach_human_cart.yaml │ │ ├── human_pick_place_sac_action_imitation.yaml │ │ ├── human_pick_place_sac_state_imitation.yaml │ │ ├── human_reach_ppo_parallel.yaml │ │ ├── human_reach_sac.yaml │ │ ├── human_reach_sac_her.yaml │ │ ├── pick_place_human_cart_expert_data_collection.yaml │ │ ├── reach_human_cart_expert_data_collection.yaml │ │ ├── reach_human_expert_data_collection.yaml │ │ ├── robot │ │ │ ├── iiwa.yaml │ │ │ ├── jaco.yaml │ │ │ ├── kinova3.yaml │ │ │ ├── panda.yaml │ │ │ ├── sawyer.yaml │ │ │ ├── schunk.yaml │ │ │ └── ur5e.yaml │ │ ├── run │ │ │ ├── default_training.yaml │ │ │ ├── expert_imitation_training.yaml │ │ │ ├── her_training.yaml │ │ │ └── parallel_training.yaml │ │ ├── wandb_run │ │ │ └── default_wandb.yaml │ │ └── wrappers │ │ │ ├── action_based_expert_imitation_reward │ │ │ ├── default_cart_action_based_expert_imitation_reward.yaml │ │ │ └── default_joint_action_based_expert_imitation_reward.yaml │ │ │ ├── collision_prevention │ │ │ └── default_collision_prevention.yaml │ │ │ ├── dataset_obs_norm │ │ │ └── default_dataset_obs_norm.yaml │ │ │ ├── default_wrappers.yaml │ │ │ ├── ik_position_delta │ │ │ └── default_ik_position_delta.yaml │ │ │ ├── safe.yaml │ │ │ ├── safe_ik.yaml │ │ │ ├── safe_ik_action_imitation.yaml │ │ │ ├── state_based_expert_imitation_reward │ │ │ ├── collaborative_lifting_cart_state_based_expert_imitation_reward.yaml │ │ │ ├── default_state_based_expert_imitation_reward.yaml │ │ │ ├── pick_place_human_cart_state_based_expert_imitation_reward.yaml │ │ │ └── reach_human_cart_state_based_expert_imitation_reward.yaml │ │ │ └── visualization │ │ │ └── default_visualization.yaml │ ├── config_icra_2024 │ │ └── environment_evaluation │ │ │ ├── dataset_creation │ │ │ ├── CL.yaml │ │ │ ├── CS.yaml │ │ │ ├── HRH.yaml │ │ │ ├── PP.yaml │ │ │ ├── R.yaml │ │ │ └── RHH.yaml │ │ │ ├── evaluation │ │ │ ├── CL.yaml │ │ │ ├── CS.yaml │ │ │ ├── HRH.yaml │ │ │ ├── PP.yaml │ │ │ ├── R.yaml │ │ │ └── RHH.yaml │ │ │ └── training │ │ │ ├── CL-AIR.yaml │ │ │ ├── CL-RSI.yaml │ │ │ ├── CL-SAC.yaml │ │ │ ├── CL-SIR.yaml │ │ │ ├── CS-AIR.yaml │ │ │ ├── CS-RSI.yaml │ │ │ ├── CS-SAC.yaml │ │ │ ├── CS-SIR.yaml │ │ │ ├── HRH-AIR.yaml │ │ │ ├── HRH-RSI.yaml │ │ │ ├── HRH-SAC.yaml │ │ │ ├── HRH-SIR.yaml │ │ │ ├── PP-AIR.yaml │ │ │ ├── PP-RSI.yaml │ │ │ ├── PP-SAC.yaml │ │ │ ├── PP-SIR.yaml │ │ │ ├── R-AIR.yaml │ │ │ ├── R-RSI.yaml │ │ │ ├── R-SAC.yaml │ │ │ ├── R-SIR.yaml │ │ │ ├── RHH-AIR.yaml │ │ │ ├── RHH-RSI.yaml │ │ │ ├── RHH-SAC.yaml │ │ │ └── RHH-SIR.yaml │ ├── create_expert_dataset.py │ ├── evaluate_models_to_csv_SB3.py │ ├── icra_2024_animation_overfitting.sh │ ├── icra_2024_environment_evaluation.sh │ ├── icra_2024_run_experiments.sh │ ├── playback_recorded_episode.py │ └── train_SB3.py ├── utils │ ├── __init__.py │ ├── animation_utils.py │ ├── bvh_rotate_palm_up.py │ ├── cart_keyboard_controller.py │ ├── config_utils.py │ ├── convert_bvh.py │ ├── data_pipeline.py │ ├── env_util.py │ ├── env_util_SB3.py │ ├── errors.py │ ├── expert_imitation_reward_utils.py │ ├── mjcf_utils.py │ ├── ou_process.py │ ├── pairing.py │ ├── pinocchio_utils.py │ ├── spatial.py │ ├── training_utils.py │ ├── training_utils_SB3.py │ └── visualization.py └── wrappers │ ├── HER_buffer_add_monkey_patch.py │ ├── __init__.py │ ├── action_based_expert_imitation_reward_wrapper.py │ ├── collision_prevention_wrapper.py │ ├── dataset_collection_wrapper.py │ ├── dataset_wrapper.py │ ├── expert_obs_wrapper.py │ ├── goal_env_wrapper.py │ ├── ik_position_delta_wrapper.py │ ├── normalized_box_env.py │ ├── state_based_expert_imitation_reward_wrapper.py │ ├── time_limit.py │ └── visualization_wrapper.py ├── icra_2024_run_experiments_in_docker.sh ├── notes_docker.txt ├── remove_all_build_files.sh ├── requirements.txt ├── results_ICRA2024 ├── README.md ├── data │ ├── anim_time │ │ ├── animation_time.csv │ │ └── animation_times.csv │ ├── eval │ │ ├── cl │ │ │ └── stats.csv │ │ ├── cl_split_short_long_comp │ │ │ └── stats.csv │ │ ├── cl_splits │ │ │ ├── ablation_summary.csv │ │ │ ├── ep_len_conf.csv │ │ │ ├── ep_len_mean.csv │ │ │ ├── ep_len_total.csv │ │ │ ├── ep_rew_conf.csv │ │ │ ├── ep_rew_mean.csv │ │ │ ├── ep_rew_total.csv │ │ │ ├── ep_rew_total_avg.csv │ │ │ ├── success_conf.csv │ │ │ ├── success_mean.csv │ │ │ ├── success_total.csv │ │ │ └── success_total_avg.csv │ │ ├── cs │ │ │ └── stats.csv │ │ ├── pp │ │ │ └── stats.csv │ │ ├── rhh │ │ │ └── stats.csv │ │ ├── rhh_rsi_res │ │ │ └── stats.csv │ │ └── rhh_rsi_res_long │ │ │ └── stats.csv │ ├── eval_train │ │ ├── cl │ │ │ ├── air │ │ │ │ └── stats.csv │ │ │ ├── sac │ │ │ │ └── stats.csv │ │ │ ├── sac_rsi │ │ │ │ └── stats.csv │ │ │ └── sir │ │ │ │ └── stats.csv │ │ ├── cs │ │ │ ├── air │ │ │ │ └── stats.csv │ │ │ ├── sac │ │ │ │ └── stats.csv │ │ │ ├── sac_rsi │ │ │ │ └── stats.csv │ │ │ └── sir │ │ │ │ └── stats.csv │ │ ├── hrh │ │ │ ├── air │ │ │ │ └── stats.csv │ │ │ ├── air_resetting │ │ │ │ └── stats.csv │ │ │ ├── sac │ │ │ │ └── stats.csv │ │ │ ├── sac_rsi │ │ │ │ └── stats.csv │ │ │ └── sir │ │ │ │ └── stats.csv │ │ ├── pp │ │ │ ├── air │ │ │ │ └── stats.csv │ │ │ ├── sac │ │ │ │ └── stats.csv │ │ │ ├── sac_rsi │ │ │ │ └── stats.csv │ │ │ └── sir │ │ │ │ └── stats.csv │ │ ├── reach │ │ │ ├── air │ │ │ │ └── stats.csv │ │ │ ├── sac │ │ │ │ └── stats.csv │ │ │ ├── sac_rsi │ │ │ │ └── stats.csv │ │ │ └── sir │ │ │ │ └── stats.csv │ │ └── rhh │ │ │ ├── air │ │ │ └── stats.csv │ │ │ ├── air_resetting │ │ │ └── stats.csv │ │ │ ├── sac │ │ │ └── stats.csv │ │ │ ├── sac_rsi │ │ │ └── stats.csv │ │ │ └── sir │ │ │ └── stats.csv │ └── train │ │ ├── cl │ │ ├── air │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── expert │ │ │ └── stats.csv │ │ ├── sac │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── sac_rsi │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ └── sir │ │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── cs │ │ ├── air │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── expert │ │ │ └── stats.csv │ │ ├── sac │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── sac_rsi │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ └── sir │ │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── hrh │ │ ├── air │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── air_resetting │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── expert │ │ │ └── stats.csv │ │ ├── sac │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── sac_rsi │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ └── sir │ │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── pp │ │ ├── air │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── air_no_step │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── air_tanh │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── expert │ │ │ └── stats.csv │ │ ├── sac │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── sac_rsi │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ ├── sir │ │ │ └── stats │ │ │ │ ├── ep_env_rew_mean.csv │ │ │ │ ├── ep_len_mean.csv │ │ │ │ └── n_goal_reached.csv │ │ └── sir_tanh │ │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── reach │ │ └── expert │ │ │ └── stats.csv │ │ └── rhh │ │ ├── air │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── air_resetting │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── air_resetting_norsi │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── expert │ │ └── stats.csv │ │ ├── sac │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ ├── sac_rsi │ │ └── stats │ │ │ ├── ep_env_rew_mean.csv │ │ │ ├── ep_len_mean.csv │ │ │ └── n_goal_reached.csv │ │ └── sir │ │ └── stats │ │ ├── ep_env_rew_mean.csv │ │ ├── ep_len_mean.csv │ │ └── n_goal_reached.csv ├── generate_files.py ├── output │ ├── ablation_study.pdf │ ├── ablation_study.tex │ ├── legend1.pdf │ ├── legend1.tex │ ├── reward_cl_train.pdf │ ├── reward_cl_train.tex │ ├── reward_cs_train.pdf │ ├── reward_cs_train.tex │ ├── reward_hrh_train.pdf │ ├── reward_hrh_train.tex │ ├── reward_pp_train.pdf │ ├── reward_pp_train.tex │ ├── reward_reach_train.pdf │ ├── reward_reach_train.tex │ ├── reward_rhh_train.pdf │ ├── reward_rhh_train.tex │ ├── success_cl_train.pdf │ ├── success_cl_train.tex │ ├── success_cs_train.pdf │ ├── success_cs_train.tex │ ├── success_hrh_train.pdf │ ├── success_hrh_train.tex │ ├── success_pp_train.pdf │ ├── success_pp_train.tex │ ├── success_reach_train.pdf │ ├── success_reach_train.tex │ ├── success_rhh_train.pdf │ └── success_rhh_train.tex ├── plotstyle.tex ├── run_ablation_study.sh ├── run_reward_plots_train.sh └── run_success_plots_train.sh ├── run_docker.sh ├── run_docker_build.sh ├── run_docker_train.sh ├── setup.cfg └── setup.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | devel/ 3 | build/ 4 | /models 5 | runs/ 6 | wandb/ 7 | __pycache__/ 8 | *.py[cod] 9 | *.egg-info/ 10 | *.egg 11 | MUJOCO_LOG.TXT 12 | .gittoken -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = 3 | .git, 4 | __pycache__, 5 | build, 6 | dist, 7 | human_robot_gym/controllers/failsafe_controller/sara-shield 8 | human_robot_gym/models/assets/human/animations/human-robot-animations 9 | max-line-length = 120 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | devel/ 3 | build/ 4 | /models 5 | outputs/ 6 | runs/ 7 | wandb/ 8 | __pycache__/ 9 | *.py[cod] 10 | *.egg-info/ 11 | *.egg 12 | MUJOCO_LOG.TXT 13 | .gittoken 14 | docs/source/autoapi/ 15 | outputs/ 16 | multirun/ 17 | /datasets/ 18 | *.out 19 | videos/ 20 | csv/ 21 | my_models/ 22 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "human_robot_gym/controllers/failsafe_controller/sara-shield"] 2 | path = human_robot_gym/controllers/failsafe_controller/sara-shield 3 | url = https://github.com/JakobThumm/sara-shield.git 4 | branch = main 5 | [submodule "human_robot_gym/models/assets/human/animations/human-robot-animations"] 6 | path = human_robot_gym/models/assets/human/animations/human-robot-animations 7 | url = https://github.com/JakobThumm/human-robot-animations.git 8 | -------------------------------------------------------------------------------- /.pydocstyle: -------------------------------------------------------------------------------- 1 | [pydocstyle] 2 | # Codes: http://www.pydocstyle.org/en/stable/error_codes.html 3 | # select = D101, D102, D103, D105, D106, D107, D201, D202, D204, D207, D208, D209, D210, D211, D214, D215, D300, D301, D417 4 | max-line-length = 120 5 | match-dir = '[^external].*' -------------------------------------------------------------------------------- /Dockerfile.build: -------------------------------------------------------------------------------- 1 | FROM continuumio/miniconda3 2 | 3 | # Avoid warnings by switching to noninteractive 4 | ENV DEBIAN_FRONTEND=noninteractive 5 | # setup environment 6 | ENV LANG C.UTF-8 7 | ENV LC_ALL C.UTF-8 8 | ## Install apt packages 9 | RUN apt-get update --fix-missing \ 10 | && apt-get install --no-install-recommends -y \ 11 | tzdata \ 12 | dirmngr \ 13 | gnupg2 \ 14 | psmisc \ 15 | python3.6 \ 16 | python3-pip \ 17 | python-is-python3 \ 18 | mpich \ 19 | python3-tk \ 20 | python3-dev \ 21 | libosmesa6-dev \ 22 | libgl1-mesa-glx \ 23 | libglfw3 \ 24 | apt-utils dialog 2>&1 \ 25 | git \ 26 | iproute2 \ 27 | procps \ 28 | lsb-release \ 29 | nano \ 30 | libopenmpi-dev \ 31 | swig \ 32 | wget \ 33 | ca-certificates \ 34 | curl \ 35 | git \ 36 | bzip2 \ 37 | sudo \ 38 | cmake \ 39 | build-essential \ 40 | tar \ 41 | unzip \ 42 | curl \ 43 | g++ \ 44 | gcc-9 \ 45 | clang \ 46 | libgtest-dev \ 47 | libgmock-dev \ 48 | patchelf \ 49 | ssh \ 50 | curl \ 51 | liboctomap-dev \ 52 | python3-sphinx 53 | 54 | # Create the environment: 55 | COPY environment.yml . 56 | RUN conda env create -f environment.yml 57 | 58 | # Clean up 59 | RUN apt-get autoremove -y \ 60 | && apt-get autoremove \ 61 | && apt-get clean -y \ 62 | && rm -rf /var/lib/apt/lists/* 63 | 64 | # Switch back to dialog for any ad-hoc use of apt-get 65 | ENV DEBIAN_FRONTEND=dialog 66 | 67 | COPY .bashrc_docker /root/.bashrc 68 | RUN echo "conda activate hrgym" >> /root/.bashrc 69 | 70 | # Get Eigen3.4 71 | RUN mkdir /usr/include/eigen3/ 72 | WORKDIR /usr/include/eigen3/ 73 | RUN curl -LJO https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.tar.bz2 74 | RUN tar -xvf eigen-3.4.0.tar.bz2 75 | RUN rm eigen-3.4.0.tar.bz2 76 | RUN echo export CMAKE_PREFIX_PATH="/usr/include/eigen3/eigen-3.4.0" >> /root/.bashrc 77 | RUN echo export EIGEN3_INCLUDE_DIR="/usr/include/eigen3/eigen-3.4.0" >> /root/.bashrc 78 | WORKDIR /usr/include/eigen3/eigen-3.4.0/ 79 | RUN mkdir build 80 | WORKDIR /usr/include/eigen3/eigen-3.4.0/build 81 | RUN cmake .. && make install 82 | WORKDIR / -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include human_robot_gym/models/assets/ * 2 | global-include *.yaml 3 | global-include *.json -------------------------------------------------------------------------------- /build_docker.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # Run this file either with ./build_docker.sh user or ./build_docker.sh root. 3 | # User mode will create a user in docker so that the files you create are not made by root. 4 | # Root mode creates a "classic" root user in docker. Use that power at your own risk. 5 | user=${1:-user} 6 | echo "Chosen user mode"=$user 7 | if [ "$user" = "root" ] 8 | then 9 | docker build \ 10 | -f Dockerfile.dev \ 11 | --build-arg MODE=root \ 12 | -t human-robot-gym/$USER:v1 . 13 | elif [ "$user" = "user" ] 14 | then 15 | docker build \ 16 | -f Dockerfile.dev \ 17 | --build-arg MODE=user \ 18 | --build-arg USER_UID=$(id -u) \ 19 | --build-arg USER_GID=$(id -g) \ 20 | --build-arg USERNAME=$USER \ 21 | -t human-robot-gym/$USER:v1 . 22 | else 23 | echo "User mode unkown. Please choose user, root, or leave it out for default user" 24 | fi 25 | 26 | 27 | -------------------------------------------------------------------------------- /build_docker_build.sh: -------------------------------------------------------------------------------- 1 | docker build \ 2 | -f Dockerfile.build \ 3 | -t human-robot-gym-build/$USER:v1 . 4 | -------------------------------------------------------------------------------- /build_docker_train.sh: -------------------------------------------------------------------------------- 1 | # Run this file either with `./build_docker_train_v2.sh user` or `./build_docker_train_v2.sh root`. 2 | # User mode will create a user in the docker so that the files you create are not made by root. 3 | # Root mode creates a "classic" root user in docker. 4 | 5 | user=${1:-user} 6 | echo "Chosen mode: $user" 7 | 8 | if [ "$user" = "root" ] 9 | then 10 | DOCKER_BUILDKIT=1 docker build \ 11 | -f Dockerfile.train \ 12 | --build-arg MODE=root \ 13 | -t human-robot-gym-train/root:v2 . 14 | elif [ "$user" = "user" ] 15 | then 16 | DOCKER_BUILDKIT=1 docker build \ 17 | -f Dockerfile.train \ 18 | --build-arg MODE=user \ 19 | --build-arg USER_UID=$(id -u) \ 20 | --build-arg USER_GID=$(id -g) \ 21 | --build-arg USERNAME=$USER \ 22 | -t human-robot-gym-train/$USER:v2 . 23 | else 24 | echo "User mode unkown. Please choose user, root, or leave it out for default user" 25 | fi 26 | -------------------------------------------------------------------------------- /clear_builds.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # This file clears all build files. 3 | 4 | find ./* -depth -name "build" -type d -exec rm -rf "{}" \; 5 | find ./* -depth -name ".egg-info" -type d -exec rm -rf "{}" \; 6 | find human_robot_gym/* -depth -name "__pycache__" -type d -exec rm -rf "{}" \; -------------------------------------------------------------------------------- /config.toml: -------------------------------------------------------------------------------- 1 | concurrent = 1 2 | check_interval = 0 3 | 4 | [session_server] 5 | session_timeout = 1800 6 | 7 | [[runners]] 8 | name = "hrgym-dind-runner" 9 | url = "https://gitlab.lrz.de" 10 | token = "GR1348941Eb1zzyhSA8rA6xHhhsM5" 11 | executor = "docker" 12 | [runners.custom_build_dir] 13 | enabled = true 14 | [runners.cache] 15 | [runners.cache.s3] 16 | [runners.cache.gcs] 17 | Insecure = false 18 | [runners.docker] 19 | tls_verify = false 20 | image = "docker:stable" 21 | privileged = true 22 | disable_entrypoint_overwrite = false 23 | oom_kill_disable = false 24 | disable_cache = false 25 | volumes = ["/certs/client", "/cache", "/var/run/docker.sock:/var/run/docker.sock"] 26 | shm_size = 0 27 | -------------------------------------------------------------------------------- /doc/img/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/doc/img/example.png -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/source/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/docs/source/_static/.gitkeep -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/index.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | This page contains auto-generated API reference documentation [#f1]_. 5 | 6 | .. toctree:: 7 | :titlesonly: 8 | 9 | {% for page in pages %} 10 | {% if page.top_level_object and page.display %} 11 | {{ page.include_path }} 12 | {% endif %} 13 | {% endfor %} 14 | 15 | .. [#f1] Created with `sphinx-autoapi `_ 16 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/attribute.rst: -------------------------------------------------------------------------------- 1 | {% extends "python/data.rst" %} 2 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/class.rst: -------------------------------------------------------------------------------- 1 | {% if obj.display %} 2 | .. py:{{ obj.type }}:: {{ obj.short_name }}{% if obj.args %}({{ obj.args }}){% endif %} 3 | {% for (args, return_annotation) in obj.overloads %} 4 | {{ " " * (obj.type | length) }} {{ obj.short_name }}{% if args %}({{ args }}){% endif %} 5 | {% endfor %} 6 | 7 | 8 | {% if obj.bases %} 9 | {% if "show-inheritance" in autoapi_options %} 10 | Bases: {% for base in obj.bases %}{{ base|link_objs }}{% if not loop.last %}, {% endif %}{% endfor %} 11 | {% endif %} 12 | 13 | 14 | {% if "show-inheritance-diagram" in autoapi_options and obj.bases != ["object"] %} 15 | .. autoapi-inheritance-diagram:: {{ obj.obj["full_name"] }} 16 | :parts: 1 17 | {% if "private-members" in autoapi_options %} 18 | :private-bases: 19 | {% endif %} 20 | 21 | {% endif %} 22 | {% endif %} 23 | {% if obj.docstring %} 24 | {{ obj.docstring|indent(3) }} 25 | {% endif %} 26 | {% if "inherited-members" in autoapi_options %} 27 | {% set visible_classes = obj.classes|selectattr("display")|list %} 28 | {% else %} 29 | {% set visible_classes = obj.classes|rejectattr("inherited")|selectattr("display")|list %} 30 | {% endif %} 31 | {% for klass in visible_classes %} 32 | {{ klass.render()|indent(3) }} 33 | {% endfor %} 34 | {% if "inherited-members" in autoapi_options %} 35 | {% set visible_properties = obj.properties|selectattr("display")|list %} 36 | {% else %} 37 | {% set visible_properties = obj.properties|rejectattr("inherited")|selectattr("display")|list %} 38 | {% endif %} 39 | {% for property in visible_properties %} 40 | {{ property.render()|indent(3) }} 41 | {% endfor %} 42 | {% if "inherited-members" in autoapi_options %} 43 | {% set visible_attributes = obj.attributes|selectattr("display")|list %} 44 | {% else %} 45 | {% set visible_attributes = obj.attributes|rejectattr("inherited")|selectattr("display")|list %} 46 | {% endif %} 47 | {% for attribute in visible_attributes %} 48 | {{ attribute.render()|indent(3) }} 49 | {% endfor %} 50 | {% if "inherited-members" in autoapi_options %} 51 | {% set visible_methods = obj.methods|selectattr("display")|list %} 52 | {% else %} 53 | {% set visible_methods = obj.methods|rejectattr("inherited")|selectattr("display")|list %} 54 | {% endif %} 55 | {% for method in visible_methods %} 56 | {{ method.render()|indent(3) }} 57 | {% endfor %} 58 | {% endif %} 59 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/data.rst: -------------------------------------------------------------------------------- 1 | {% if obj.display %} 2 | .. py:{{ obj.type }}:: {{ obj.name }} 3 | {%- if obj.annotation is not none %} 4 | 5 | :type: {%- if obj.annotation %} {{ obj.annotation }}{%- endif %} 6 | 7 | {%- endif %} 8 | 9 | {%- if obj.value is not none %} 10 | 11 | :value: {% if obj.value is string and obj.value.splitlines()|count > 1 -%} 12 | Multiline-String 13 | 14 | .. raw:: html 15 | 16 |
Show Value 17 | 18 | .. code-block:: python 19 | 20 | """{{ obj.value|indent(width=8,blank=true) }}""" 21 | 22 | .. raw:: html 23 | 24 |
25 | 26 | {%- else -%} 27 | {%- if obj.value is string -%} 28 | {{ "%r" % obj.value|string|truncate(100) }} 29 | {%- else -%} 30 | {{ obj.value|string|truncate(100) }} 31 | {%- endif -%} 32 | {%- endif %} 33 | {%- endif %} 34 | 35 | 36 | {{ obj.docstring|indent(3) }} 37 | {% endif %} 38 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/exception.rst: -------------------------------------------------------------------------------- 1 | {% extends "python/class.rst" %} 2 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/function.rst: -------------------------------------------------------------------------------- 1 | {% if obj.display %} 2 | .. py:function:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} 3 | 4 | {% for (args, return_annotation) in obj.overloads %} 5 | {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %} 6 | 7 | {% endfor %} 8 | {% for property in obj.properties %} 9 | :{{ property }}: 10 | {% endfor %} 11 | 12 | {% if obj.docstring %} 13 | {{ obj.docstring|indent(3) }} 14 | {% endif %} 15 | {% endif %} 16 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/method.rst: -------------------------------------------------------------------------------- 1 | {%- if obj.display %} 2 | .. py:method:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} 3 | 4 | {% for (args, return_annotation) in obj.overloads %} 5 | {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %} 6 | 7 | {% endfor %} 8 | {% if obj.properties %} 9 | {% for property in obj.properties %} 10 | :{{ property }}: 11 | {% endfor %} 12 | 13 | {% else %} 14 | 15 | {% endif %} 16 | {% if obj.docstring %} 17 | {{ obj.docstring|indent(3) }} 18 | {% endif %} 19 | {% endif %} 20 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/module.rst: -------------------------------------------------------------------------------- 1 | {% if not obj.display %} 2 | :orphan: 3 | 4 | {% endif %} 5 | :py:mod:`{{ obj.name }}` 6 | =========={{ "=" * obj.name|length }} 7 | 8 | .. py:module:: {{ obj.name }} 9 | 10 | {% if obj.docstring %} 11 | .. autoapi-nested-parse:: 12 | 13 | {{ obj.docstring|indent(3) }} 14 | 15 | {% endif %} 16 | 17 | {% block subpackages %} 18 | {% set visible_subpackages = obj.subpackages|selectattr("display")|list %} 19 | {% if visible_subpackages %} 20 | Subpackages 21 | ----------- 22 | .. toctree:: 23 | :titlesonly: 24 | :maxdepth: 3 25 | 26 | {% for subpackage in visible_subpackages %} 27 | {{ subpackage.short_name }}/index.rst 28 | {% endfor %} 29 | 30 | 31 | {% endif %} 32 | {% endblock %} 33 | {% block submodules %} 34 | {% set visible_submodules = obj.submodules|selectattr("display")|list %} 35 | {% if visible_submodules %} 36 | Submodules 37 | ---------- 38 | .. toctree:: 39 | :titlesonly: 40 | :maxdepth: 1 41 | 42 | {% for submodule in visible_submodules %} 43 | {{ submodule.short_name }}/index.rst 44 | {% endfor %} 45 | 46 | 47 | {% endif %} 48 | {% endblock %} 49 | {% block content %} 50 | {% if obj.all is not none %} 51 | {% set visible_children = obj.children|selectattr("short_name", "in", obj.all)|list %} 52 | {% elif obj.type is equalto("package") %} 53 | {% set visible_children = obj.children|selectattr("display")|list %} 54 | {% else %} 55 | {% set visible_children = obj.children|selectattr("display")|rejectattr("imported")|list %} 56 | {% endif %} 57 | {% if visible_children %} 58 | {{ obj.type|title }} Contents 59 | {{ "-" * obj.type|length }}--------- 60 | 61 | {% set visible_classes = visible_children|selectattr("type", "equalto", "class")|list %} 62 | {% set visible_functions = visible_children|selectattr("type", "equalto", "function")|list %} 63 | {% set visible_attributes = visible_children|selectattr("type", "equalto", "data")|list %} 64 | {% if "show-module-summary" in autoapi_options and (visible_classes or visible_functions) %} 65 | {% block classes scoped %} 66 | {% if visible_classes %} 67 | Classes 68 | ~~~~~~~ 69 | 70 | .. autoapisummary:: 71 | 72 | {% for klass in visible_classes %} 73 | {{ klass.id }} 74 | {% endfor %} 75 | 76 | 77 | {% endif %} 78 | {% endblock %} 79 | 80 | {% block functions scoped %} 81 | {% if visible_functions %} 82 | Functions 83 | ~~~~~~~~~ 84 | 85 | .. autoapisummary:: 86 | 87 | {% for function in visible_functions %} 88 | {{ function.id }} 89 | {% endfor %} 90 | 91 | 92 | {% endif %} 93 | {% endblock %} 94 | 95 | {% block attributes scoped %} 96 | {% if visible_attributes %} 97 | Attributes 98 | ~~~~~~~~~~ 99 | 100 | .. autoapisummary:: 101 | 102 | {% for attribute in visible_attributes %} 103 | {{ attribute.id }} 104 | {% endfor %} 105 | 106 | 107 | {% endif %} 108 | {% endblock %} 109 | {% endif %} 110 | {% for obj_item in visible_children %} 111 | {{ obj_item.render()|indent(0) }} 112 | {% endfor %} 113 | {% endif %} 114 | {% endblock %} 115 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/package.rst: -------------------------------------------------------------------------------- 1 | {% extends "python/module.rst" %} 2 | -------------------------------------------------------------------------------- /docs/source/_templates/autoapi/python/property.rst: -------------------------------------------------------------------------------- 1 | {%- if obj.display %} 2 | .. py:property:: {{ obj.short_name }} 3 | {% if obj.annotation %} 4 | :type: {{ obj.annotation }} 5 | {% endif %} 6 | {% if obj.properties %} 7 | {% for property in obj.properties %} 8 | :{{ property }}: 9 | {% endfor %} 10 | {% endif %} 11 | 12 | {% if obj.docstring %} 13 | {{ obj.docstring|indent(3) }} 14 | {% endif %} 15 | {% endif %} 16 | -------------------------------------------------------------------------------- /docs/source/_templates/custom-class-template.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :members: 7 | :show-inheritance: 8 | :inherited-members: 9 | :special-members: __call__, __add__, __mul__ 10 | 11 | {% block methods %} 12 | {% if methods %} 13 | .. rubric:: {{ _('Methods') }} 14 | 15 | .. autosummary:: 16 | :nosignatures: 17 | {% for item in methods %} 18 | {%- if not item.startswith('_') %} 19 | ~{{ name }}.{{ item }} 20 | {%- endif -%} 21 | {%- endfor %} 22 | {% endif %} 23 | {% endblock %} 24 | 25 | {% block attributes %} 26 | {% if attributes %} 27 | .. rubric:: {{ _('Attributes') }} 28 | 29 | .. autosummary:: 30 | {% for item in attributes %} 31 | ~{{ name }}.{{ item }} 32 | {%- endfor %} 33 | {% endif %} 34 | {% endblock %} 35 | -------------------------------------------------------------------------------- /docs/source/_templates/custom-module-template.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block attributes %} 6 | {% if attributes %} 7 | .. rubric:: Module attributes 8 | 9 | .. autosummary:: 10 | :toctree: 11 | {% for item in attributes %} 12 | {{ item }} 13 | {%- endfor %} 14 | {% endif %} 15 | {% endblock %} 16 | 17 | {% block functions %} 18 | {% if functions %} 19 | .. rubric:: {{ _('Functions') }} 20 | 21 | .. autosummary:: 22 | :toctree: 23 | :nosignatures: 24 | {% for item in functions %} 25 | {{ item }} 26 | {%- endfor %} 27 | {% endif %} 28 | {% endblock %} 29 | 30 | {% block classes %} 31 | {% if classes %} 32 | .. rubric:: {{ _('Classes') }} 33 | 34 | .. autosummary:: 35 | :toctree: 36 | :template: custom-class-template.rst 37 | :nosignatures: 38 | {% for item in classes %} 39 | {{ item }} 40 | {%- endfor %} 41 | {% endif %} 42 | {% endblock %} 43 | 44 | {% block exceptions %} 45 | {% if exceptions %} 46 | .. rubric:: {{ _('Exceptions') }} 47 | 48 | .. autosummary:: 49 | :toctree: 50 | {% for item in exceptions %} 51 | {{ item }} 52 | {%- endfor %} 53 | {% endif %} 54 | {% endblock %} 55 | 56 | {% block modules %} 57 | {% if modules %} 58 | .. autosummary:: 59 | :toctree: 60 | :template: custom-module-template.rst 61 | :recursive: 62 | {% for item in modules %} 63 | {{ item }} 64 | {%- endfor %} 65 | {% endif %} 66 | {% endblock %} 67 | -------------------------------------------------------------------------------- /docs/source/img/hrgym_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/docs/source/img/hrgym_demo.png -------------------------------------------------------------------------------- /docs/source/img/hrgym_demo_failsafe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/docs/source/img/hrgym_demo_failsafe.png -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. human-robot-gym documentation master file, created by 2 | sphinx-quickstart on Tue Apr 11 10:57:11 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to human-robot-gym's documentation! 7 | =========================================== 8 | 9 | **human-robot-gym** is a Python library for training and 10 | evaluating reinforcement learning (RL) agents in human-robot 11 | collaboration scenarios. 12 | 13 | First, install the project with the :doc:`installation` section. 14 | 15 | .. toctree:: 16 | :maxdepth: 1 17 | :caption: Contents: 18 | 19 | installation 20 | demos 21 | training 22 | safety_shield 23 | benchmark_envs 24 | define_env 25 | wrappers 26 | human_animations 27 | autoapi/index 28 | 29 | 30 | Indices and tables 31 | ================== 32 | 33 | * :ref:`genindex` 34 | * :ref:`modindex` 35 | * :ref:`search` 36 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | 5 | 6 | Clone the repo with submodules 7 | ------------------------------ 8 | 9 | .. code-block:: bash 10 | 11 | git clone --recurse-submodules git@gitlab.lrz.de:cps-rl/human-robot-gym.git 12 | 13 | Install MuJoCo 14 | -------------- 15 | Steps: 16 | 17 | 1. Download the MuJoCo version 2.1 binaries for `Linux `_ or `OSX `_. 18 | 2. Extract the downloaded ``mujoco210`` directory into ``~/.mujoco/mujoco210``. 19 | 20 | If you want to specify a nonstandard location for the package, 21 | use the env variable ``MUJOCO_PY_MUJOCO_PATH``. 22 | 23 | Under linux, make sure to install: 24 | 25 | .. code-block:: bash 26 | 27 | sudo apt install -y libosmesa6-dev libgl1-mesa-glx libglfw3 libgtest-dev 28 | 29 | 30 | Setup anaconda environment 31 | -------------------------- 32 | If you haven't done already, `install anaconda `_. 33 | Add ``conda-forge`` to your channels with 34 | 35 | .. code-block:: bash 36 | 37 | conda config --add channels conda-forge 38 | conda config --set channel_priority strict 39 | 40 | and create the ``hrgym`` conda environment: 41 | 42 | .. code-block:: bash 43 | 44 | conda env create -f environment.yml 45 | conda activate hrgym 46 | 47 | All requirements will automatically get installed by conda. 48 | 49 | Install the failsafe controller / safety shield 50 | ----------------------------------------------- 51 | The installation requires ``gcc``, ``c++>=17``, and ``Eigen3`` version 3.4 (`download it here `_. 52 | Set the path to your eigen3 installation to this env variable, e.g., 53 | 54 | .. code-block:: bash 55 | 56 | export EIGEN3_INCLUDE_DIR="/usr/include/eigen3/eigen-3.4.0" 57 | 58 | Now run 59 | 60 | .. code-block:: bash 61 | 62 | cd human-robot-gym/human_robot_gym/controllers/failsafe_controller/sara-shield 63 | pip install -r requirements.txt 64 | python setup.py install 65 | 66 | Install the human-robot-gym 67 | --------------------------- 68 | 69 | .. code-block:: bash 70 | 71 | cd human-robot-gym 72 | pip install -e . 73 | 74 | Add to your ``~/.bashrc`` 75 | ------------------------- 76 | E.g. with ``nano ~/.bashrc``: 77 | 78 | .. code-block:: bash 79 | 80 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/thummj/.mujoco/mujoco210/bin 81 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia 82 | export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLEW.so 83 | -------------------------------------------------------------------------------- /docs/source/safety_shield.rst: -------------------------------------------------------------------------------- 1 | Safety Shield 2 | ============= 3 | 4 | We guarantee provable safety for the human-robot collaboration by means of a safety shield. 5 | The shield functionality is described in-depth in `our ICRA'22 paper `_. 6 | 7 | -------------------------------------------------------------------------------- /docs/source/wrappers.rst: -------------------------------------------------------------------------------- 1 | Environment wrappers 2 | ==================== 3 | 4 | Environment wrappers add functionality to the environment. 5 | Everything that is optional, and not part of the core environment should be defined as an environment wrapper. 6 | 7 | Available wrappers 8 | ------------------ 9 | 10 | In addition to the usual gym wrappers, the following wrappers are available: 11 | - ``CollisionPreventionWrapper``: Prevents the agent from colliding with the static environment and itself. 12 | - ``CartActionBasedExpertImitationRewardWrapper``: Adds a reward based on the distance to the expert trajectory. 13 | - ``ExpertObsWrapper``: Allows to assemble observations for scripted expert policies. 14 | - ``GoalEnvironmentGymWrapper``: Allows to train using hindsight experience replay (HER) by adding the goal state to the observation. 15 | - ``IKPositionDeltaWrapper``: Allows to use Cartesian position deltas as actions. 16 | - ``TimeLimit``: Limits the number of steps per episode. 17 | - ``VisualizationWrapper``: Allows to visualize the environment. 18 | 19 | 20 | Writing your own wrapper 21 | ------------------------ 22 | 23 | Wrappers usually override the ``step()`` and ``reset()`` methods. 24 | You can take a look at the ``IKPositionDeltaWrapper`` for an example. -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash --login 2 | # The --login ensures the bash configuration is loaded, 3 | # enabling Conda. 4 | 5 | # Enable strict mode. 6 | set -euo pipefail 7 | # ... Run whatever commands ... 8 | 9 | # Temporarily disable strict mode and activate conda: 10 | set +euo pipefail 11 | conda activate hrgym 12 | 13 | # Re-enable strict mode: 14 | set -euo pipefail 15 | 16 | # exec the final command: 17 | cd /tmp/human-robot-gym/human_robot_gym/controllers/failsafe_controller/sara-shield && sudo rm -rf build safety_shield/build && sudo -E $CONDA_PREFIX/bin/python setup.py install 18 | cd /tmp/human-robot-gym/ && pip install -e . 19 | 20 | exec "$@" -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: hrgym 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - pip>=22.0.4 6 | - python=3.8 7 | - setuptools==65.5.0 8 | - wheel=0.38.4 # Fix for Issue #160: https://github.com/openai/gym/issues/3202#issuecomment-1513593788 9 | - pip: 10 | # works for regular pip packages 11 | - bvh==0.3 12 | - stable_baselines3==1.5.0 13 | - robosuite==1.3.2 14 | - mujoco-py==2.1.2.14 15 | - opencv_python==4.5.5.64 16 | - pin 17 | - pybullet==3.2.1 18 | - meshcat==0.3.2 19 | - numpy<=1.23.4 20 | - scipy==1.8.0 21 | - numba<=0.53.1 22 | - wandb==0.13.1 23 | - flake8==5.0.4 24 | - black==22.6.0 25 | - patchelf==0.15.0 26 | - h5py==3.7.0 27 | - tensorboard==2.10.0 28 | - pydocstyle==6.1.1 29 | - mypy==0.971 30 | - pytest==7.1.1 31 | - sphinx_rtd_theme 32 | - sphinx-autoapi 33 | - graphviz 34 | - hydra-core==1.3.2 35 | - hydra-ray-launcher==1.2.0 36 | -------------------------------------------------------------------------------- /gitlab_runner.sh: -------------------------------------------------------------------------------- 1 | docker run -d --name human-robot-gym-gitlab-runner --restart always \ 2 | -v /srv/gitlab-runner/config:/etc/gitlab-runner \ 3 | -v /var/run/docker.sock:/var/run/docker.sock \ 4 | gitlab/gitlab-runner:latest 5 | 6 | docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register -n \ 7 | --url https://gitlab.lrz.de \ 8 | --registration-token $GITLABTOKEN \ 9 | --executor docker \ 10 | --description "Gitlab runner" \ 11 | --docker-image "docker:19.03.12" \ 12 | --tag-list "docstring,linting,python,test,unittest " 13 | --pre-clone-script "sudo apt update && sudo apt install openssh-client" -------------------------------------------------------------------------------- /gitlab_runner_dind.sh: -------------------------------------------------------------------------------- 1 | docker run -d --name human-robot-gym-gitlab-runner-dind --restart always \ 2 | -v /srv/gitlab-runner/config:/etc/gitlab-runner \ 3 | -v /var/run/docker.sock:/var/run/docker.sock \ 4 | gitlab/gitlab-runner:latest 5 | 6 | docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register -n \ 7 | --url https://gitlab.lrz.de \ 8 | --registration-token $GITLABTOKEN \ 9 | --executor docker \ 10 | --description "DinD runner" \ 11 | --docker-image "docker:19.03.12" \ 12 | --docker-privileged \ 13 | --docker-volumes "/certs/client" 14 | -------------------------------------------------------------------------------- /human_robot_gym/MUJOCO_LOG.TXT: -------------------------------------------------------------------------------- 1 | Thu Jan 13 18:23:19 2022 2 | ERROR: Rank-defficient Hessian in HessianDirect 3 | 4 | -------------------------------------------------------------------------------- /human_robot_gym/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the human-robot-gym.""" 2 | # flake8: noqa 3 | import os 4 | from robosuite.environments.base import make 5 | 6 | from human_robot_gym.controllers.failsafe_controller.failsafe_controller import ( 7 | FailsafeController, 8 | ) 9 | 10 | from human_robot_gym.environments.manipulation.human_env import HumanEnv 11 | from human_robot_gym.environments.manipulation.reach_human_env import ReachHuman 12 | 13 | from robosuite.environments import ALL_ENVIRONMENTS 14 | from robosuite.controllers import ALL_CONTROLLERS, load_controller_config 15 | from robosuite.robots import ALL_ROBOTS 16 | from robosuite.models.grippers import ALL_GRIPPERS 17 | 18 | __version__ = "0.1.0" 19 | __logo__ = """ 20 | 21 | .,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,. 22 | . . 23 | ............. ,(/,,(##.. 24 | .*,,,,(,,,,,( (#(/*,,,,,/* /,*. 25 | .*,,,,(,,,,,( */###(#*(. /,(. 26 | .*,,,,*...... (/*/( //##*. 27 | .*,,,,, *,,,/ * .* . 28 | .*,,,,, *,,,/ .*. . 29 | .*,,,,, *,,,/ . 30 | .*,,,,(,,,,,( *,,,/ . 31 | .*,,,,(,,,,,(,,,/,,,/ . 32 | .*,,,,(,,,,,(,,,/,,,/ . 33 | ..*****,,,,,*,,,*,,,, . 34 | . . 35 | 36 | """ 37 | 38 | human_robot_gym_root = os.path.dirname(__file__) 39 | -------------------------------------------------------------------------------- /human_robot_gym/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | """Custom robot controllers module.""" 2 | -------------------------------------------------------------------------------- /human_robot_gym/controllers/failsafe_controller/.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | -------------------------------------------------------------------------------- /human_robot_gym/controllers/failsafe_controller/__init__.py: -------------------------------------------------------------------------------- 1 | """The failsafe controller module.""" 2 | -------------------------------------------------------------------------------- /human_robot_gym/controllers/failsafe_controller/config/failsafe.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "JOINT_POSITION", 3 | "input_max": 1, 4 | "input_min": -1, 5 | "output_max": 0.2, 6 | "output_min": -0.2, 7 | "kp": 100, 8 | "damping_ratio": 1, 9 | "impedance_mode": "fixed", 10 | "kp_limits": [0, 300], 11 | "damping_ratio_limits": [0, 10], 12 | "qpos_limits": null, 13 | "interpolation": null, 14 | "ramp_ratio": 0.2 15 | } -------------------------------------------------------------------------------- /human_robot_gym/controllers/failsafe_controller/failsafe_controller/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes all functionality of the controlling part of the failsafe controller. 2 | 3 | This package uses the python bindings of the safety shield. 4 | """ 5 | import safety_shield_py # noqa: F401 6 | from .failsafe_controller import FailsafeController # noqa: F401 7 | -------------------------------------------------------------------------------- /human_robot_gym/controllers/failsafe_controller/failsafe_controller/plot_capsule.py: -------------------------------------------------------------------------------- 1 | """This file describe a capsule object that can be used for plotting. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | 8 | Changelog: 9 | 2.5.22 JT Formatted docstrings 10 | """ 11 | import math 12 | from scipy.spatial.transform import Rotation 13 | 14 | import numpy as np 15 | 16 | 17 | class PlotCapsule: 18 | """Capsule with different formats for plotting.""" 19 | 20 | def __init__(self, p1, p2, r): 21 | """Initialize a plot capsule. 22 | 23 | Args: 24 | p1: Point 1 (x, y, z) 25 | p2: Point 2 (x, y, z) 26 | r: Radius 27 | """ 28 | self.update_pos(p1, p2, r) 29 | 30 | def update_pos(self, p1, p2, r): 31 | """Update the position (and possibly radius) of the capsule. 32 | 33 | Args: 34 | p1 (list): x, y, z coordinates of end point 1 of the cylinder 35 | p2 (list): x, y, z coordinates of end point 1 of the cylinder 36 | r (double): radius of the capsule 37 | """ 38 | self.p1 = p1 39 | self.p2 = p2 40 | self.r = r 41 | # Convert to pos, size, mat format. 42 | p1x = p1[0] 43 | p1y = p1[1] 44 | p1z = p1[2] 45 | p2x = p2[0] 46 | p2y = p2[1] 47 | p2z = p2[2] 48 | v2_x = p2x - p1x 49 | v2_y = p2y - p1y 50 | v2_z = p2z - p1z 51 | norm = math.sqrt(math.pow(v2_x, 2) + math.pow(v2_y, 2) + math.pow(v2_z, 2)) 52 | # POS 53 | self.pos = [(p1x + p2x) / 2, (p1y + p2y) / 2, (p1z + p2z) / 2] 54 | if norm > 1e-6: 55 | # SIZE 56 | self.size = [r, r, norm / 2] 57 | # ORIENTATION 58 | # Rotate z axis vector to direction vector according to 59 | # https://stackoverflow.com/questions/1171849/finding-quaternion-representing-the-rotation-from-one-vector-to-another/1171995#1171995 60 | a_x = -v2_y / norm 61 | a_y = v2_x / norm 62 | a_z = 0 63 | a_w = 1 + v2_z / norm 64 | norm_q = math.sqrt( 65 | math.pow(a_w, 2) 66 | + math.pow(a_x, 2) 67 | + math.pow(a_y, 2) 68 | + math.pow(a_z, 2) 69 | ) 70 | if norm_q > 1e-6: 71 | w = a_w / norm_q 72 | x = a_x / norm_q 73 | y = a_y / norm_q 74 | z = a_z / norm_q 75 | rot = Rotation.from_quat([x, y, z, w]) 76 | else: 77 | rot = Rotation.from_quat([0, 0, 0, 1]) 78 | self.mat = np.array(rot.as_matrix()) 79 | else: 80 | self.size = [r, r, 0.00001] 81 | self.mat = np.eye(3) 82 | -------------------------------------------------------------------------------- /human_robot_gym/controllers/failsafe_controller/files.txt: -------------------------------------------------------------------------------- 1 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/libSaRA.so.1 2 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/libSaRA.so.1.0.0 3 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/libSaRA.so 4 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/libreflexxes_type_iv.so 5 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/libreflexxes_type_iv.so.1 6 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/libreflexxes_type_iv.so.1.0.0 7 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/safety_shield_py.cpython-36m-x86_64-linux-gnu.so 8 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/EGG-INFO/PKG-INFO 9 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/EGG-INFO/not-zip-safe 10 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/EGG-INFO/SOURCES.txt 11 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/EGG-INFO/requires.txt 12 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/EGG-INFO/dependency_links.txt 13 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/EGG-INFO/native_libs.txt 14 | /home/jakob/anaconda3/envs/robot-rl/lib/python3.6/site-packages/failsafe_controller-0.0.1-py3.6-linux-x86_64.egg/EGG-INFO/top_level.txt 15 | -------------------------------------------------------------------------------- /human_robot_gym/demonstrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/demonstrations/__init__.py -------------------------------------------------------------------------------- /human_robot_gym/demonstrations/experts/__init__.py: -------------------------------------------------------------------------------- 1 | from .expert import Expert # noqa: 401 2 | from .reach_human_expert import ReachHumanExpert, ReachHumanExpertObservation # noqa: 401 3 | from .reach_human_cart_expert import ReachHumanCartExpert # noqa: 401 4 | from .pick_place_human_cart_expert import PickPlaceHumanCartExpert, PickPlaceHumanCartExpertObservation # noqa: 401 5 | from .collaborative_lifting_cart_expert import CollaborativeLiftingCartExpert, CollaborativeLiftingCartExpertObservation # noqa: 401 6 | from .collaborative_hammering_cart_expert import CollaborativeHammeringCartExpert, CollaborativeHammeringCartExpertObservation # noqa: 401 7 | 8 | REGISTERED_EXPERTS = { 9 | "ReachHuman": ReachHumanExpert, 10 | "ReachHumanCart": ReachHumanCartExpert, 11 | "PickPlaceHumanCart": PickPlaceHumanCartExpert, 12 | "CollaborativeLiftingCart": CollaborativeLiftingCartExpert, 13 | "CollaborativeHammeringCart": CollaborativeHammeringCartExpert, 14 | } 15 | -------------------------------------------------------------------------------- /human_robot_gym/demonstrations/experts/expert.py: -------------------------------------------------------------------------------- 1 | """This file implements the base class for expert policies. 2 | 3 | Author: 4 | Felix Trost (FT) 5 | 6 | Changelog: 7 | 06.02.23 FT File creation 8 | """ 9 | from gym import Space 10 | import numpy as np 11 | from typing import Any, Dict 12 | 13 | 14 | class Expert: 15 | """Expert policy base class. 16 | 17 | Args: 18 | observation_space (Space): gym space for observations; 19 | should be the same as in the environment 20 | action_space (Space): gym space for actions; 21 | should be the same as in the environment 22 | """ 23 | def __init__( 24 | self, 25 | observation_space: Space, 26 | action_space: Space, 27 | ): 28 | self.observation_space = observation_space 29 | self.action_space = action_space 30 | 31 | def __call__(self, observation: Dict[str, Any]) -> np.ndarray: 32 | """Query an action based on the given observation. 33 | 34 | Args: 35 | observation (np.ndarray): observation from current state 36 | 37 | Returns: 38 | np.ndarray: action 39 | """ 40 | return self.action_space.sample() 41 | -------------------------------------------------------------------------------- /human_robot_gym/demos/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes demos for the human-robot-gym.""" 2 | from robosuite.controllers import controller_factory # noqa: F401 3 | -------------------------------------------------------------------------------- /human_robot_gym/demos/demo_gym_functionality.py: -------------------------------------------------------------------------------- 1 | """This script shows an example of a Panda robot in an human environment. 2 | 3 | For instance, this can be used with our provided training function to train an RL agent in an human environment. 4 | """ 5 | 6 | import robosuite as suite 7 | import time 8 | from robosuite.wrappers import GymWrapper 9 | 10 | import human_robot_gym.environments.manipulation.reach_human_env # noqa: F401 11 | 12 | 13 | if __name__ == "__main__": 14 | # Notice how the environment is wrapped by the wrapper 15 | env = GymWrapper( 16 | suite.make( 17 | "ReachHuman", 18 | robots="Panda", # use Sawyer robot 19 | robot_base_offset=[0, 0, 0], 20 | use_camera_obs=False, # do not use pixel observations 21 | has_offscreen_renderer=False, # not needed since not using pixel obs 22 | has_renderer=True, # make sure we can render to the screen 23 | render_camera=None, 24 | reward_shaping=True, # use dense rewards 25 | control_freq=20, # control should happen fast enough so that simulation looks smooth 26 | hard_reset=False, 27 | verbose=True, 28 | ) 29 | ) 30 | 31 | for i_episode in range(20): 32 | observation = env.reset() 33 | t1 = time.time() 34 | for t in range(1000): 35 | action = env.action_space.sample() # np.array([0, 1, 0, 0, 0, 0, 0, 0]) 36 | observation, reward, done, info = env.step(action) 37 | if done: 38 | print("Episode finished after {} timesteps".format(t + 1)) 39 | break 40 | print("Episode {}, fps = {}".format(i_episode, 500 / (time.time() - t1))) 41 | -------------------------------------------------------------------------------- /human_robot_gym/demos/profile_gym_functionality_Schunk.py: -------------------------------------------------------------------------------- 1 | """This script shows an example of the Schunk robot being safely controlled in an human environment. 2 | 3 | For instance, this can be used with our provided training function to train a safe RL agent. 4 | """ 5 | import cProfile 6 | import robosuite as suite 7 | import numpy as np # noqa: F401 8 | 9 | from robosuite.wrappers import GymWrapper 10 | from robosuite.controllers import load_controller_config 11 | 12 | from human_robot_gym.utils.mjcf_utils import file_path_completion, merge_configs 13 | import human_robot_gym.environments.manipulation.reach_human_env # noqa: F401 14 | import human_robot_gym.robots # noqa: F401 15 | from human_robot_gym.wrappers.visualization_wrapper import VisualizationWrapper # noqa: F401 16 | from human_robot_gym.wrappers.collision_prevention_wrapper import ( 17 | CollisionPreventionWrapper, 18 | ) 19 | 20 | 21 | def run_episode(env): 22 | env.reset() 23 | for i in range(100): 24 | action = env.action_space.sample() 25 | obs, reward, done, info = env.step(action) 26 | if done: 27 | break 28 | 29 | 30 | if __name__ == "__main__": 31 | # Notice how the environment is wrapped by the wrapper 32 | controller_config = dict() 33 | controller_conig_path = file_path_completion( 34 | "controllers/failsafe_controller/config/failsafe.json" 35 | ) 36 | robot_conig_path = file_path_completion("models/robots/config/schunk.json") 37 | controller_config = load_controller_config(custom_fpath=controller_conig_path) 38 | robot_config = load_controller_config(custom_fpath=robot_conig_path) 39 | controller_config = merge_configs(controller_config, robot_config) 40 | controller_configs = [controller_config] 41 | 42 | env = GymWrapper( 43 | suite.make( 44 | "ReachHuman", 45 | robots="Schunk", # use Sawyer robot 46 | robot_base_offset=[0, 0, 0], 47 | use_camera_obs=False, # do not use pixel observations 48 | has_offscreen_renderer=False, # not needed since not using pixel obs 49 | has_renderer=False, # make sure we can render to the screen 50 | render_camera=None, 51 | render_collision_mesh=False, 52 | reward_shaping=False, # use dense rewards 53 | control_freq=10, # control should happen fast enough so that simulation looks smooth 54 | hard_reset=False, 55 | horizon=1000, 56 | controller_configs=controller_configs, 57 | shield_type="SSM", 58 | visualize_failsafe_controller=False, 59 | visualize_pinocchio=False, 60 | base_human_pos_offset=[0.0, 0.0, 0.0], 61 | ) 62 | ) 63 | 64 | env = CollisionPreventionWrapper( 65 | env=env, collision_check_fn=env.check_collision_action, replace_type=0 66 | ) 67 | 68 | cProfile.run("run_episode(env)", sort="cumtime") 69 | -------------------------------------------------------------------------------- /human_robot_gym/demos/test_safety_shield.py: -------------------------------------------------------------------------------- 1 | """This script shows an example of the Schunk robot being safely controlled in an human environment. 2 | 3 | For instance, this can be used with our provided training function to train a safe RL agent. 4 | """ 5 | 6 | from safety_shield_py import Motion 7 | from safety_shield_py import SafetyShield 8 | 9 | from human_robot_gym.utils.mjcf_utils import file_path_completion 10 | 11 | if __name__ == "__main__": 12 | trajectory_parameters_file = file_path_completion( 13 | "controllers/failsafe_controller/sara-shield/safety_shield/config/trajectory_parameters_schunk.yaml") 14 | robot_config_file = file_path_completion( 15 | "controllers/failsafe_controller/sara-shield/safety_shield/config/robot_parameters_schunk.yaml") 16 | mocap_config_file = file_path_completion( 17 | "controllers/failsafe_controller/sara-shield/safety_shield/config/cmu_mocap_no_hand.yaml") 18 | 19 | safety_shield = SafetyShield( 20 | activate_shield=True, 21 | sample_time=0.004, 22 | trajectory_config_file=trajectory_parameters_file, 23 | robot_config_file=robot_config_file, 24 | mocap_config_file=mocap_config_file, 25 | init_x=0.0, 26 | init_y=0.0, 27 | init_z=0.0, 28 | init_roll=0.0, 29 | init_pitch=0.0, 30 | init_yaw=0.0, 31 | init_qpos=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 32 | ) 33 | 34 | human_measurement = [ 35 | [10.0, 10.0, 0.0] for i in range(21) 36 | ] 37 | 38 | command_motion = Motion(0.0, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) 39 | v = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] 40 | t = 0 41 | for ep in range(1000): 42 | for step in range(100000): 43 | t += 0.001 44 | safety_shield.humanMeasurement(human_measurement, t) 45 | t += 0.003 46 | if step % 10000 == 0: 47 | p = [0.2 * t, 0.0, 0.0, 0.0, 0.0, 0.0] 48 | safety_shield.newLongTermTrajectory(p, v) 49 | desired_motion = safety_shield.step(t) 50 | desired_qpos = desired_motion.getAngle() 51 | desired_qvel = desired_motion.getVelocity() 52 | desided_qacc = desired_motion.getAcceleration() 53 | 54 | t = 0 55 | safety_shield.reset(activate_shield=True, 56 | init_x=0.0, 57 | init_y=0.0, 58 | init_z=0.0, 59 | init_roll=0.0, 60 | init_pitch=0.0, 61 | init_yaw=0.0, 62 | init_qpos=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 63 | current_time=t) 64 | -------------------------------------------------------------------------------- /human_robot_gym/environments/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines all environments and necessary gym wrappers.""" 2 | from robosuite.environments.base import REGISTERED_ENVS, MujocoEnv # noqa: F401 3 | 4 | from human_robot_gym.environments.manipulation.human_env import HumanEnv # noqa: F401 5 | from human_robot_gym.environments.manipulation.reach_human_env import ReachHuman # noqa: F401 6 | from human_robot_gym.environments.manipulation.reach_human_cartesian_env import ReachHumanCart # noqa: F401 7 | from human_robot_gym.environments.manipulation.pick_place_human_cartesian_env import PickPlaceHumanCart # noqa: F401 8 | from human_robot_gym.environments.manipulation.human_object_inspection_cartesian_env import ( # noqa: F401 9 | HumanObjectInspectionCart 10 | ) 11 | from human_robot_gym.environments.manipulation.robot_human_handover_cartesian_env import ( # noqa: F401 12 | RobotHumanHandoverCart 13 | ) 14 | from human_robot_gym.environments.manipulation.human_robot_handover_cartesian_env import ( # noqa: F401 15 | HumanRobotHandoverCart 16 | ) 17 | from human_robot_gym.environments.manipulation.collaborative_lifting_cartesian_env import ( # noqa: F401 18 | CollaborativeLiftingCart 19 | ) 20 | from human_robot_gym.environments.manipulation.collaborative_stacking_cartesian_env import ( # noqa: F401 21 | CollaborativeStackingCart 22 | ) 23 | from human_robot_gym.environments.manipulation.collaborative_hammering_cartesian_env import ( # noqa: F401 24 | CollaborativeHammeringCart 25 | ) 26 | 27 | ALL_ENVIRONMENTS = REGISTERED_ENVS.keys() 28 | -------------------------------------------------------------------------------- /human_robot_gym/environments/gym_envs/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines functionality necessary for defining gym environments.""" 2 | 3 | import robosuite.environments # noqa: F401 4 | -------------------------------------------------------------------------------- /human_robot_gym/environments/gym_envs/make_gym.py: -------------------------------------------------------------------------------- 1 | """This file defines how gym environments are made. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | 8 | Changelog: 9 | 2.5.22 JT Formatted docstrings 10 | """ 11 | 12 | from typing import Union, List 13 | 14 | from robosuite.wrappers.gym_wrapper import GymWrapper 15 | from robosuite.environments.base import make 16 | from gym import Env 17 | from gym.envs.registration import spec 18 | 19 | 20 | def make_gym(env: str, robots: Union[str, List[str]], id: str, **kwargs) -> Env: 21 | """Wrap an environment with a gym wrapper. 22 | 23 | Args: 24 | robots: list of robots in the environment. 25 | id: Gym environment id. 26 | 27 | Returns: 28 | Wrapped gym environment. 29 | """ 30 | gym_env = GymWrapper(env=make(env, robots=robots, **kwargs)) 31 | gym_env.spec = spec(id) 32 | return gym_env 33 | -------------------------------------------------------------------------------- /human_robot_gym/environments/manipulation/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines the manipulation environments in the human-robot-gym.""" 2 | -------------------------------------------------------------------------------- /human_robot_gym/models/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines the models loaded by the simulation.""" 2 | 3 | import os 4 | from robosuite.models.world import MujocoWorldBase # noqa: F401 5 | 6 | assets_root = os.path.join(os.path.dirname(__file__), "assets") 7 | -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/README.md: -------------------------------------------------------------------------------- 1 | The human models are taken from https://github.com/KlabCMU/kin-poly. -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/common/materials.xml: -------------------------------------------------------------------------------- 1 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/common/sky1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/common/sky1.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/common/skybox.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/etc/model.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/etc/model.pdf -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/hand.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Chest.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Chest.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Head.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Head.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Hips.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Hips.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Ankle.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Ankle.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Elbow.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Elbow.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Hand.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Hand.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Hip.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Hip.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Knee.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Knee.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Shoulder.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Shoulder.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Thorax.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Thorax.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Toe.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Toe.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/L_Wrist.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/L_Wrist.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftArm.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftArm.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftChest.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftChest.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftFoot.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftFoot.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftHand.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftHand.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftHand_corrected.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftHand_corrected.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftLeg.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftLeg.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftShoulder.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftShoulder.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftToe.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftToe.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftUpLeg.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftUpLeg.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/LeftWrist.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/LeftWrist.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Mouth.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Mouth.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Neck.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Neck.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Pelvis.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Pelvis.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Ankle.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Ankle.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Elbow.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Elbow.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Hand.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Hand.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Hip.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Hip.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Knee.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Knee.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Shoulder.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Shoulder.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Thorax.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Thorax.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Toe.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Toe.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/R_Wrist.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/R_Wrist.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightArm.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightArm.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightChest.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightChest.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightFoot.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightFoot.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightHand.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightHand.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightLeg.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightLeg.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightShoulder.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightShoulder.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightToe.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightToe.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightUpLeg.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightUpLeg.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/RightWrist.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/RightWrist.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Spine.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Spine.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Spine1.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Spine1.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Spine2.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Spine2.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/meshes/Torso.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/human/meshes/Torso.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/human/template/humanoid_template.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 27 | -------------------------------------------------------------------------------- /human_robot_gym/models/assets/objects/nail.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_1_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_1_link.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_1_link_small.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_1_link_small.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_2_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_2_link.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_3_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_3_link.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_3_link_small.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_3_link_small.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_4_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_4_link.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_4_link_collision.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_4_link_collision.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_4_link_low_poly.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_4_link_low_poly.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_5_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_5_link.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_5_link_small.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_5_link_small.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_6_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_6_link.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_6_link_low_poly.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_6_link_low_poly.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/robots/schunk/meshes/arm_base_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/robots/schunk/meshes/arm_base_link.stl -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/blue-shirt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/blue-shirt.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/blue-wood.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/blue-wood.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/brass-ambra.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/brass-ambra.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/bread.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/bread.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/can.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/can.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/ceramic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/ceramic.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/cereal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/cereal.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/clay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/clay.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/cream-plaster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/cream-plaster.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/dark-wood.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/dark-wood.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/dirt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/dirt.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/glass.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/glass.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/gray-felt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/gray-felt.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/gray-plaster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/gray-plaster.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/gray-woodgrain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/gray-woodgrain.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/green-shirt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/green-shirt.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/green-wood.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/green-wood.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/jeans.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/jeans.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/lemon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/lemon.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/light-wood.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/light-wood.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/metal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/metal.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/pink-plaster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/pink-plaster.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/plaster-wall-4k.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/plaster-wall-4k.jpg -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/plywood-4k.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/plywood-4k.jpg -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/red-shirt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/red-shirt.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/red-wood.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/red-wood.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/skin.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/skin2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/skin2.jpg -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/skin2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/skin2.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/steel-brushed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/steel-brushed.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/steel-scratched.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/steel-scratched.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/white-bricks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/white-bricks.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/white-plaster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/white-plaster.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/wood-tiles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/wood-tiles.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/wood-varnished-panels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/wood-varnished-panels.png -------------------------------------------------------------------------------- /human_robot_gym/models/assets/textures/yellow-plaster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/models/assets/textures/yellow-plaster.png -------------------------------------------------------------------------------- /human_robot_gym/models/grippers/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines all gripper models.""" 2 | # flake8: noqa 3 | from human_robot_gym.models.grippers.rethink_valid_gripper import RethinkValidGripper 4 | from robosuite.models.grippers import ALL_GRIPPERS, GRIPPER_MAPPING 5 | 6 | GRIPPER_MAPPING["RethinkValidGripper"] = RethinkValidGripper 7 | 8 | ALL_GRIPPERS = GRIPPER_MAPPING.keys() 9 | -------------------------------------------------------------------------------- /human_robot_gym/models/grippers/rethink_valid_gripper.py: -------------------------------------------------------------------------------- 1 | """This file adds a subclass to the Rethink gripper with a valid init_qpos. 2 | 3 | Supposedly due to a bug in robosuite, the Rethink gripper is initialized with a gripper joint angle 4 | that is outside of the actual joint limits. This causes the gripper to snap to valid joint values 5 | in the first steps after initialization. 6 | 7 | This subclass fixes this issue by setting the init_qpos to a valid joint angle. 8 | 9 | Author: 10 | Felix Trost (FT) 11 | 12 | Changelog: 13 | 27.3.23 FT Created file 14 | """ 15 | import numpy as np 16 | 17 | from robosuite.models.grippers import RethinkGripper 18 | 19 | 20 | class RethinkValidGripper(RethinkGripper): 21 | """Rethink gripper with valid init_qpos.""" 22 | @property 23 | def init_qpos(self) -> np.ndarray: 24 | """Override parent property by choosing a value within the actual joint limits. 25 | We choose the value that corresponds to a fully opened gripper.""" 26 | return self.qpos_range[1] 27 | 28 | @property 29 | def qpos_range(self) -> np.ndarray: 30 | """Range of the gripper joint positions. Values determined empirically. 31 | 32 | Returns: 33 | np.ndarray: Range of the gripper joint positions. 2D array of shape (2, 2): 34 | qpos_range[0, :] = minimal qpos values for each joint 35 | qpos_range[1, :] = maximal qpos values for each joint 36 | """ 37 | return np.array( 38 | [ 39 | [-0.0118366, 0.0118366], 40 | [0.011499, -0.011499], 41 | ] 42 | ) 43 | -------------------------------------------------------------------------------- /human_robot_gym/models/objects/__init__.py: -------------------------------------------------------------------------------- 1 | """This file defines the objects package.""" 2 | -------------------------------------------------------------------------------- /human_robot_gym/models/objects/human/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines the human model.""" 2 | -------------------------------------------------------------------------------- /human_robot_gym/models/objects/human/c3d_info.txt: -------------------------------------------------------------------------------- 1 | 'Left front waist' 2 | 'Right front waist' 3 | 'Left back waist' 4 | 'Right back waist' 5 | 'Left thigh' 6 | 'Left knee' 7 | 'Left shin' 8 | 'Left ankle' 9 | 'Left heel' 10 | 'Left metatarsal five' 11 | 'Left toe' 12 | 'Right Thigh' 13 | 'Right knee' 14 | 'Right shin offset' 15 | 'Right ankle' 16 | 'Right heel' 17 | 'Right metatarsal five' 18 | 'Right toe' 19 | 'Clavicle' 20 | 'Sternum' 21 | 'Back of neck' 22 | 'Upper back' 23 | 'Right back offset marker' 24 | 'Left front head' 25 | 'Right front head' 26 | 'Left back head' 27 | 'Right back head' 28 | 'Left shoulder' 29 | 'Left upper arm' 30 | 'Left elbow' 31 | 'Left forearm' 32 | 'Left wrist bar thumb side' 33 | 'Left wrist bar pinkie side' 34 | 'Left hand' 35 | 'Right shoulder' 36 | 'Right upper arm' 37 | 'Right elbow' 38 | 'Right forearm' 39 | 'Right wrist bar thumb side' 40 | 'Right wrist bar pinkie side' 41 | 'Right hand' -------------------------------------------------------------------------------- /human_robot_gym/models/objects/human/human.py: -------------------------------------------------------------------------------- 1 | """This file describes the human model. 2 | 3 | Humans have multiple joint elements, some of which are observable. 4 | The human object is fully defined in assets/human/human.xml. 5 | 6 | Owner: 7 | Jakob Thumm (JT) 8 | 9 | Contributors: 10 | Julian Balletshofer (JB) 11 | Changelog: 12 | 2.5.22 JT Formatted docstrings 13 | 15.7.22 JB added properties for right and left hand and head 14 | """ 15 | 16 | from robosuite.models.objects.xml_objects import MujocoXMLObject 17 | 18 | from human_robot_gym.utils.mjcf_utils import xml_path_completion 19 | 20 | 21 | class HumanObject(MujocoXMLObject): 22 | """Human object that is loaded from an XML file. 23 | 24 | The human can be controlled by setting the x, y, and z components of each human joint. 25 | 26 | Args: 27 | name (str): Name of the human object. 28 | """ 29 | 30 | def __init__(self, name): # noqa: D107 31 | super().__init__( 32 | xml_path_completion("human/human.xml"), 33 | name=name, 34 | joints=None, 35 | obj_type="all", 36 | duplicate_collision_geoms=True, 37 | ) 38 | self._setup_joint_names() 39 | 40 | @property 41 | def left_hand(self): 42 | """Get the joint name of the left hand.""" 43 | return self.naming_prefix + "L_Hand" 44 | 45 | @property 46 | def right_hand(self): 47 | """Get the joint name of the right hand.""" 48 | return self.naming_prefix + "R_Hand" 49 | 50 | @property 51 | def head(self): 52 | """Get the joint name of the head.""" 53 | return self.naming_prefix + "Head" 54 | 55 | def _setup_joint_names(self): 56 | """Define the name of all controllable and observable joints.""" 57 | self.joint_elements = [ 58 | "L_Hip", 59 | "R_Hip", 60 | "Torso", 61 | "L_Knee", 62 | "R_Knee", 63 | "Spine", 64 | "L_Ankle", 65 | "R_Ankle", 66 | "Chest", 67 | "L_Toe", 68 | "R_Toe", 69 | "Neck", 70 | "L_Thorax", 71 | "R_Thorax", 72 | "Head", 73 | "L_Shoulder", 74 | "R_Shoulder", 75 | "L_Elbow", 76 | "R_Elbow", 77 | "L_Wrist", 78 | "R_Wrist", 79 | "L_Hand", 80 | "R_Hand", 81 | ] 82 | self.joint_names = [] 83 | for joint in self.joint_elements: 84 | self.joint_names.append(self.naming_prefix + joint + "_x") 85 | self.joint_names.append(self.naming_prefix + joint + "_y") 86 | self.joint_names.append(self.naming_prefix + joint + "_z") 87 | # Observables: 88 | self.obs_joint_elements = [ 89 | "Torso", 90 | "Chest", 91 | "Head", 92 | "L_Shoulder", 93 | "R_Shoulder", 94 | "L_Elbow", 95 | "R_Elbow", 96 | "L_Hand", 97 | "R_Hand", 98 | ] 99 | -------------------------------------------------------------------------------- /human_robot_gym/models/robots/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines all robot models.""" 2 | # flake8: noqa 3 | from human_robot_gym.models.robots.manipulators import ( 4 | pinocchio_manipulator_model, 5 | schunk_robot, 6 | panda_robot_zero, 7 | ) 8 | -------------------------------------------------------------------------------- /human_robot_gym/models/robots/config/iiwa.json: -------------------------------------------------------------------------------- 1 | { 2 | "qpos_limits": [[-2.96706, -2.0944, -2.96706, -2.0944, -2.96706, -2.0944, -3.05433], [2.96706, 2.0944, 2.96706, 2.0944, 2.96706, 2.0944, 3.05433]], 3 | "velocity_limits": [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] 4 | } -------------------------------------------------------------------------------- /human_robot_gym/models/robots/config/jaco.json: -------------------------------------------------------------------------------- 1 | { 2 | "qpos_limits": [[-6.28319, 0.820305, -6.28319, 0.523599, -6.28319, 1.13446, -6.28319], [6.28319, 5.46288, 6.28319, 5.75959, 6.28319, 5.14872, 6.28319]], 3 | "velocity_limits": [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] 4 | } -------------------------------------------------------------------------------- /human_robot_gym/models/robots/config/kinova3.json: -------------------------------------------------------------------------------- 1 | { 2 | "qpos_limits": [[-6.28319, -2.41, -6.28319, -2.66, -6.28319, -2.23, -6.28319], [6.28319, 2.41, 6.28319, 2.66, 6.28319, 2.23, 6.28319]], 3 | "velocity_limits": [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] 4 | } -------------------------------------------------------------------------------- /human_robot_gym/models/robots/config/panda.json: -------------------------------------------------------------------------------- 1 | { 2 | "qpos_limits": [[-2.9, -1.8, -2.9, -3.1, -2.9, -0.01, -2.9], [2.9, 1.8, 2.9, -0.07, 2.9, 3.8, 2.9]], 3 | "velocity_limits": [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] 4 | } -------------------------------------------------------------------------------- /human_robot_gym/models/robots/config/sawyer.json: -------------------------------------------------------------------------------- 1 | { 2 | "qpos_limits": [[-3.0503, -3.8095, -3.0426, -3.0439, -2.9761, -2.9761, -4.7124], [3.0503, 2.2736, 3.0426, 3.0439, 2.9761, 2.9761, 4.7124]], 3 | "velocity_limits": [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] 4 | } -------------------------------------------------------------------------------- /human_robot_gym/models/robots/config/schunk.json: -------------------------------------------------------------------------------- 1 | { 2 | "qpos_limits": [[-2.9, -1.8, -2.60, -2.9, -1.85, -2.9], [2.9, 1.8, 2.60, 2.9, 1.85, 2.9]], 3 | "velocity_limits": [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] 4 | } -------------------------------------------------------------------------------- /human_robot_gym/models/robots/config/ur5e.json: -------------------------------------------------------------------------------- 1 | { 2 | "qpos_limits": [[-6.28319, -6.28319, -3.14159, -6.28319, -6.28319, -6.28319], [6.28319, 6.28319, 3.14159, 6.28319, 6.28319, 6.28319]], 3 | "velocity_limits": [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] 4 | } -------------------------------------------------------------------------------- /human_robot_gym/models/robots/manipulators/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines the manipulator models.""" 2 | from .schunk_robot import Schunk # noqa: F401 3 | from .panda_robot_zero import PandaZero # noqa: F401 4 | -------------------------------------------------------------------------------- /human_robot_gym/models/robots/manipulators/panda_robot_zero.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from robosuite.models.robots.manipulators.manipulator_model import ManipulatorModel 4 | from robosuite.utils.mjcf_utils import xml_path_completion 5 | 6 | 7 | class PandaZero(ManipulatorModel): 8 | """ 9 | Panda is a sensitive single-arm robot designed by Franka. 10 | 11 | Args: 12 | idn (int or str): Number or some other unique identification string for this robot instance 13 | """ 14 | 15 | def __init__(self, idn=0): 16 | super().__init__(xml_path_completion("robots/panda/robot.xml"), idn=idn) 17 | 18 | # Set joint damping 19 | self.set_joint_attribute(attrib="damping", values=np.array((0.1, 0.1, 0.1, 0.1, 0.1, 0.01, 0.01))) 20 | 21 | @property 22 | def default_mount(self): 23 | return "RethinkMount" 24 | 25 | @property 26 | def default_gripper(self): 27 | return "PandaGripper" 28 | 29 | @property 30 | def default_controller_config(self): 31 | return "default_panda" 32 | 33 | @property 34 | def init_qpos(self): 35 | return np.array([0, 1, 0, 0, 0, 0, 0], dtype=np.float64) 36 | 37 | @property 38 | def base_xpos_offset(self): 39 | return { 40 | "bins": (-0.5, -0.1, 0), 41 | "empty": (-0.6, 0, 0), 42 | "table": lambda table_length: (-0.16 - table_length / 2, 0, 0), 43 | } 44 | 45 | @property 46 | def top_offset(self): 47 | return np.array((0, 0, 1.0)) 48 | 49 | @property 50 | def _horizontal_radius(self): 51 | return 0.5 52 | 53 | @property 54 | def arm_type(self): 55 | return "single" 56 | -------------------------------------------------------------------------------- /human_robot_gym/models/robots/manipulators/schunk_robot.py: -------------------------------------------------------------------------------- 1 | """This file describes the Schunk model. 2 | 3 | The Schunk robot uses pinocchio to model kinematics and dynamics. 4 | 5 | Owner: 6 | Jakob Thumm (JT) 7 | 8 | Contributors: 9 | Felix Trost (FT) 10 | 11 | Changelog: 12 | 2.5.22 JT Formatted docstrings 13 | 27.3.23 FT Changed default gripper to RethinkValidGripper 14 | """ 15 | 16 | import numpy as np 17 | 18 | from human_robot_gym.models.robots.manipulators.pinocchio_manipulator_model import ( 19 | PinocchioManipulatorModel, 20 | ) 21 | from human_robot_gym.utils.mjcf_utils import xml_path_completion 22 | import human_robot_gym.models.grippers # noqa: F401 23 | 24 | 25 | class Schunk(PinocchioManipulatorModel): 26 | """ 27 | Schunk is a sensitive single-arm robot designed by Schunk. 28 | 29 | Args: 30 | idn (int or str): Number or some other unique identification string for this robot instance 31 | """ 32 | 33 | def __init__(self, idn=0): # noqa: D107 34 | super().__init__( 35 | fname=xml_path_completion("robots/schunk/robot.xml"), 36 | urdf_file=xml_path_completion("robots/schunk/robot.urdf"), 37 | package_dirs=xml_path_completion("robots/"), 38 | idn=idn, 39 | ) 40 | 41 | # Set joint damping 42 | self.set_joint_attribute( 43 | attrib="damping", 44 | values=np.array((0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001)), 45 | ) 46 | 47 | @property 48 | def default_mount(self): 49 | """Get default mount.""" 50 | return "RethinkMount" 51 | 52 | @property 53 | def default_gripper(self): 54 | """Get default gripper.""" 55 | return "RethinkValidGripper" 56 | 57 | @property 58 | def default_controller_config(self): 59 | """Get default controller config.""" 60 | return "default_panda" 61 | 62 | @property 63 | def init_qpos(self): 64 | """Get the initial joint position.""" 65 | return np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) 66 | 67 | @property 68 | def base_xpos_offset(self): 69 | """Get the base offset.""" 70 | # TODO: Tune these values 71 | return { 72 | "bins": (-0.5, -0.1, 0), 73 | "empty": (-0.6, 0, 0), 74 | "table": lambda table_length: (-0.16 - table_length / 2, 0, 0), 75 | } 76 | 77 | @property 78 | def top_offset(self): 79 | """Get the top offset.""" 80 | # TODO: Tune these values 81 | return np.array((0, 0, 1.0)) 82 | 83 | @property 84 | def _horizontal_radius(self): 85 | """Get the horizontal radius.""" 86 | # TODO: Tune these values 87 | return 0.5 88 | 89 | @property 90 | def arm_type(self): 91 | """Get the arm type.""" 92 | return "single" 93 | -------------------------------------------------------------------------------- /human_robot_gym/robots/__init__.py: -------------------------------------------------------------------------------- 1 | """Defines robot class mapping.""" 2 | import human_robot_gym.models.robots # noqa: F401 3 | from robosuite.robots import ROBOT_CLASS_MAPPING 4 | from robosuite.models.robots.robot_model import REGISTERED_ROBOTS # noqa: F401 5 | from robosuite.robots.single_arm import SingleArm 6 | 7 | # Robot class mappings -- must be maintained manually 8 | ROBOT_CLASS_MAPPING["Schunk"] = SingleArm 9 | ROBOT_CLASS_MAPPING["PandaZero"] = SingleArm 10 | -------------------------------------------------------------------------------- /human_robot_gym/training/__init__.py: -------------------------------------------------------------------------------- 1 | """this package defines the training functions.""" 2 | import human_robot_gym.robots # noqa: F401 3 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/algorithm/ppo.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - sb3_default/ppo@_here_ 3 | - _self_ 4 | 5 | learning_rate: 7e-5 6 | n_steps: 64 7 | batch_size: 64 8 | n_epochs: 20 9 | gamma: 0.99 10 | gae_lambda: 0.9 11 | clip_range: 0.2 12 | clip_range_vf: null 13 | normalize_advantage: true 14 | ent_coef: 0.0 15 | vf_coef: 0.5 16 | max_grad_norm: 0.5 17 | use_sde: false 18 | sde_sample_freq: 4 19 | target_kl: null 20 | policy_kwargs: 21 | net_arch: 22 | - 64 23 | - 64 24 | ortho_init: false 25 | log_std_init: -2.7 26 | full_std: true 27 | use_expln: false 28 | squash_output: true 29 | verbose: 1 -------------------------------------------------------------------------------- /human_robot_gym/training/config/algorithm/sac.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - sb3_default/sac@_here_ 3 | - _self_ 4 | 5 | learning_rate: 5e-4 6 | learning_starts: 1000 7 | batch_size: 128 8 | tau: 0.005 9 | gamma: 0.99 10 | train_freq: 11 | - 100 12 | - step 13 | gradient_steps: -1 14 | action_noise: null 15 | ent_coef: auto_0.2 16 | target_update_interval: 1 17 | target_entropy: auto 18 | use_sde: false 19 | sde_sample_freq: -1 20 | use_sde_at_warmup: false 21 | policy_kwargs: 22 | net_arch: 23 | - 64 24 | - 64 25 | verbose: 1 -------------------------------------------------------------------------------- /human_robot_gym/training/config/algorithm/sac_her.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - sb3_default/sac@_here_ 3 | - _self_ 4 | 5 | policy: MultiInputPolicy 6 | learning_rate: 5e-4 7 | buffer_size: 1_000_000 8 | learning_starts: 1000 9 | batch_size: 128 10 | tau: 5e-3 11 | gamma: 0.99 12 | train_freq: 13 | - 1 14 | - episode 15 | gradient_steps: -1 16 | action_noise: null 17 | replay_buffer_kwargs: 18 | n_sampled_goal: 4 19 | goal_selection_strategy: future 20 | online_sampling: true 21 | ent_coef: auto_0.2 22 | target_update_interval: 1 23 | target_entropy: auto 24 | use_sde: false 25 | sde_sample_freq: -1 26 | use_sde_at_warmup: false 27 | policy_kwargs: 28 | net_arch: 29 | - 64 30 | - 64 31 | - 64 32 | verbose: 1 -------------------------------------------------------------------------------- /human_robot_gym/training/config/algorithm/sb3_default/ppo.yaml: -------------------------------------------------------------------------------- 1 | name: PPO 2 | policy: MlpPolicy 3 | learning_rate: 3e-4 4 | n_steps: 2048 5 | batch_size: 64 6 | n_epochs: 10 7 | gamma: 0.99 8 | gae_lambda: 0.95 9 | clip_range: 0.2 10 | clip_range_vf: null 11 | normalize_advantage: true 12 | ent_coef: 0.0 13 | vf_coef: 0.5 14 | max_grad_norm: 0.5 15 | use_sde: False 16 | sde_sample_freq: -1 17 | target_kl: null 18 | create_eval_env: false 19 | policy_kwargs: null 20 | verbose: 0 21 | seed: null 22 | device: auto 23 | _init_setup_model: true 24 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/algorithm/sb3_default/sac.yaml: -------------------------------------------------------------------------------- 1 | name: SAC 2 | policy: MlpPolicy 3 | learning_rate: 3e-4 4 | buffer_size: 1_000_000 5 | learning_starts: 100 6 | batch_size: 256 7 | tau: 0.005 8 | gamma: 0.99 9 | train_freq: 1 10 | gradient_steps: 1 11 | replay_buffer_kwargs: null 12 | optimize_memory_usage: False 13 | ent_coef: auto 14 | target_update_interval: 1 15 | target_entropy: auto 16 | use_sde: False 17 | sde_sample_freq: -1 18 | use_sde_at_warmup: False 19 | create_eval_env: False 20 | policy_kwargs: null 21 | verbose: 0 22 | seed: null 23 | device: auto 24 | _init_setup_model: True 25 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/collaborative_lifting_cart_expert_data_collection.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: collaborative_lifting_cart 4 | - wrappers: safe_ik 5 | - expert: collaborative_lifting_cart 6 | - run: expert_imitation_training 7 | - consistent_seeding 8 | - _self_ 9 | - eval 10 | 11 | environment: 12 | verbose: true 13 | done_at_success: true 14 | run: 15 | obs_keys: 16 | - board_quat 17 | - dist_eef_to_human_head 18 | - vec_eef_to_human_lh 19 | - vec_eef_to_human_rh 20 | wrappers: 21 | ik_position_delta: 22 | action_limit: 0.1 23 | expert: 24 | signal_to_noise_ratio: 0.98 25 | 26 | 27 | dataset_name: collaborative-lifting-dataset 28 | start_episode_index: 0 29 | n_episodes: 200 30 | n_threads: 1 31 | load_episode_index: null 32 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/consistent_seeding.yaml: -------------------------------------------------------------------------------- 1 | environment: 2 | seed: ${run.seed} 3 | algorithm: 4 | seed: ${run.seed} -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/collaborative_hammering_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/pick_place_human_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | collision_reward: 0 12 | has_offscreen_renderer: false 13 | render_camera: null 14 | horizon: 1000 15 | hard_reset: false 16 | done_at_collision: false 17 | done_at_success: false 18 | nail_hammered_in_reward: 0.0 19 | hammer_gripped_reward_bonus: 0.0 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/collaborative_lifting_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/collaborative_lifting_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | collision_reward: 0 12 | has_offscreen_renderer: false 13 | render_camera: null 14 | horizon: 5000 15 | hard_reset: false 16 | done_at_collision: false 17 | done_at_success: true 18 | imbalance_failure_reward: -10 19 | reward_shaping: true 20 | n_animations_sampled_per_100_steps: 0.2 21 | human_animation_freq: 20 22 | human_animation_names: 23 | - CollaborativeLifting/2 24 | - CollaborativeLifting/3 25 | - CollaborativeLifting/4 26 | - CollaborativeLifting/5 27 | - CollaborativeLifting/6 28 | # - CollaborativeLifting/7 29 | # - CollaborativeLifting/8 30 | - CollaborativeLifting/9 31 | - CollaborativeLifting/10 32 | board_full_size: 33 | - 1.0 34 | - 0.4 35 | - 0.03 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/collaborative_stacking_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/collaborative_stacking_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | has_offscreen_renderer: false 12 | render_camera: null 13 | horizon: 3000 14 | hard_reset: false 15 | done_at_collision: false 16 | done_at_success: true 17 | task_reward: 2.0 18 | object_gripped_reward: 0.75 19 | second_cube_at_target_reward: 0.0 20 | fourth_cube_at_target_reward: 1.0 21 | collision_reward: 0.0 22 | human_rand: 23 | - 0.02 # Forward-backward 24 | - 0.1 # Left-right 25 | - 0.1 # Rotational angle -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/collaborative_hammering_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - human_env 3 | - _self_ 4 | 5 | env_id: CollaborativeHammeringCart 6 | table_full_size: 7 | - 1.5 8 | - 2.0 9 | - 0.05 10 | table_friction: 11 | - 1.0 12 | - 5e-3 13 | - 1e-4 14 | board_full_size: 15 | - 1.0 16 | - 0.4 17 | - 0.03 18 | nail_hammered_in_reward: -1.0 19 | hammer_gripped_reward_bonus: 0.0 20 | goal_tolerance: 0.05 21 | n_nail_placements_sampled_per_100_steps: 1 22 | gripper_controllable: false 23 | human_animation_names: 24 | - CollaborativeHammering/0 25 | - CollaborativeHammering/1 26 | - CollaborativeHammering/2 27 | - CollaborativeHammering/3 28 | - CollaborativeHammering/4 29 | - CollaborativeHammering/5 30 | - CollaborativeHammering/6 31 | - CollaborativeHammering/7 32 | - CollaborativeHammering/8 33 | obstacle_placement_initializer: null 34 | human_animation_freq: 100 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/collaborative_lifting_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - human_env 3 | - _self_ 4 | 5 | env_id: CollaborativeLiftingCart 6 | table_full_size: 7 | - 0.4 8 | - 1.5 9 | - 0.05 10 | table_friction: 11 | - 1.0 12 | - 5e-3 13 | - 1e-4 14 | board_full_size: 15 | - 1.0 16 | - 0.5 17 | - 0.03 18 | board_released_reward: -10 19 | collision_reward: -10 20 | imbalance_failure_reward: -10 21 | min_balance: 0.8 22 | obstacle_placement_initializer: null 23 | horizon: 1000 24 | human_animation_names: 25 | - CollaborativeLifting/2 26 | - CollaborativeLifting/3 27 | - CollaborativeLifting/4 28 | - CollaborativeLifting/5 29 | - CollaborativeLifting/6 30 | - CollaborativeLifting/7 31 | - CollaborativeLifting/8 32 | - CollaborativeLifting/9 33 | - CollaborativeLifting/10 34 | done_at_collision: false 35 | done_at_success: false 36 | human_animation_freq: 100 37 | n_animations_sampled_per_100_steps: 3 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/collaborative_stacking_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - human_env 3 | - _self_ 4 | 5 | env_id: CollaborativeStackingCart 6 | table_full_size: 7 | - 1.2 8 | - 2.0 9 | - 0.05 10 | table_friction: 11 | - 1.0 12 | - 5e-3 13 | - 1e-4 14 | object_full_size: 15 | - 0.045 16 | - 0.045 17 | - 0.045 18 | reward_scale: 1.0 19 | reward_shaping: false 20 | collision_reward: -10 21 | stack_toppled_reward: -10 22 | task_reward: 1 23 | second_cube_at_target_reward: -1 24 | fourth_cube_at_target_reward: -1 25 | object_gripped_reward: 0 26 | goal_dist: 0.025 27 | obstacle_placement_initializer: null 28 | human_animation_names: 29 | - CollaborativeStacking/0 30 | - CollaborativeStacking/1 31 | - CollaborativeStacking/2 32 | - CollaborativeStacking/3 33 | - CollaborativeStacking/4 34 | - CollaborativeStacking/5 35 | - CollaborativeStacking/6 36 | - CollaborativeStacking/7 37 | done_at_collision: false 38 | done_at_success: false 39 | shield_type: SSM -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/human_env.yaml: -------------------------------------------------------------------------------- 1 | env_id: HumanEnv 2 | robot_base_offset: null 3 | env_configuration: default 4 | controller_configs: null 5 | gripper_types: default 6 | initialization_noise: default 7 | use_camera_obs: true 8 | use_object_obs: true 9 | reward_scale: 1.0 10 | reward_shaping: false 11 | collision_reward: -10 12 | task_reward: 1 13 | done_at_success: false 14 | done_at_collision: false 15 | has_renderer: false 16 | has_offscreen_renderer: true 17 | render_camera: frontview 18 | render_collision_mesh: false 19 | render_visual_mesh: true 20 | render_gpu_device_id: -1 21 | control_freq: 10 22 | horizon: 1000 23 | ignore_done: false 24 | hard_reset: true 25 | camera_names: frontview 26 | camera_heights: 256 27 | camera_widths: 256 28 | camera_depths: false 29 | camera_segmentations: null 30 | renderer: mujoco 31 | renderer_config: null 32 | shield_type: SSM 33 | visualize_failsafe_controller: false 34 | visualize_pinocchio: false 35 | control_sample_time: 0.004 36 | human_animation_names: 37 | - CMU/62_01 38 | - CMU/62_03 39 | - CMU/62_04 40 | - CMU/62_07 41 | - CMU/62_09 42 | - CMU/62_10 43 | - CMU/62_12 44 | - CMU/62_13 45 | - CMU/62_14 46 | - CMU/62_15 47 | - CMU/62_16 48 | - CMU/62_18 49 | - CMU/62_19 50 | - CMU/62_20 51 | base_human_pos_offset: 52 | - 0.0 53 | - 0.0 54 | - 0.0 55 | human_animation_freq: 120 56 | human_rand: 57 | - 0.0 58 | - 0.0 59 | - 0.0 60 | n_animations_sampled_per_100_steps: 5 61 | safe_vel: 0.001 62 | self_collision_safety: 0.01 63 | collision_debounce_delay: 0.01 64 | seed: 0 65 | verbose: false 66 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/human_object_inspection_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - pick_place_human_cart 3 | - _self_ 4 | 5 | env_id: HumanObjectInspectionCart 6 | human_animation_names: 7 | - ObjectInspection/0 8 | - ObjectInspection/1 9 | - ObjectInspection/2 10 | - ObjectInspection/3 11 | - ObjectInspection/4 12 | - ObjectInspection/5 13 | - ObjectInspection/6 14 | - ObjectInspection/7 15 | object_at_target_reward: -1 16 | goal_exit_tolerance: 0.02 17 | n_animations_sampled_per_100_steps: 2 18 | n_object_placements_sampled_per_100_steps: 3 19 | object_placement_initializer: null -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/human_robot_handover_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - pick_place_human_cart 3 | - _self_ 4 | 5 | env_id: HumanRobotHandoverCart 6 | human_animation_names: 7 | - HumanRobotHandover/0 8 | - HumanRobotHandover/1 9 | - HumanRobotHandover/2 10 | - HumanRobotHandover/3 11 | - HumanRobotHandover/4 12 | - HumanRobotHandover/5 13 | - HumanRobotHandover/6 14 | - HumanRobotHandover/7 15 | - HumanRobotHandover/advanced_0 16 | - HumanRobotHandover/advanced_1 17 | - HumanRobotHandover/advanced_2 18 | human_animation_freq: 100 19 | n_animations_sampled_per_100_steps: 2 20 | shield_type: PFL 21 | n_targets_sampled_per_100_steps: 2 22 | target_placement_initializer: null 23 | object_at_target_reward: -1.0 24 | table_full_size: 25 | - 1.0 26 | - 2.0 27 | - 0.05 28 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/pick_place_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - human_env 3 | - _self_ 4 | 5 | env_id: PickPlaceHumanCart 6 | table_full_size: 7 | - 1.5 8 | - 2.0 9 | - 0.05 10 | table_friction: 11 | - 1.0 12 | - 5e-3 13 | - 1e-4 14 | goal_dist: 0.1 15 | object_gripped_reward: -1.0 16 | obstacle_placement_initializer: null 17 | human_animation_names: 18 | - CMU/62_01 19 | - CMU/62_03 20 | - CMU/62_04 21 | - CMU/62_07 22 | - CMU/62_09 23 | - CMU/62_10 24 | - CMU/62_12 25 | - CMU/62_13 26 | - CMU/62_14 27 | - CMU/62_15 28 | - CMU/62_16 29 | - CMU/62_18 30 | - CMU/62_19 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/pick_place_pointing_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - pick_place_human_cart 3 | - _self_ 4 | 5 | env_id: PickPlacePointingHumanCart 6 | human_animation_names: 7 | - PickPlacePointingHuman/0 8 | - PickPlacePointingHuman/1 9 | n_object_placements_sampled_per_100_steps: 3 10 | object_placement_initializer: null 11 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/reach_human.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - human_env 3 | - _self_ 4 | 5 | env_id: ReachHuman 6 | table_full_size: 7 | - 1.5 8 | - 2.0 9 | - 0.05 10 | table_friction: 11 | - 1.5 12 | - 2.0 13 | - 0.05 14 | goal_dist: 0.1 15 | n_goals_sampled_per_100_steps: 20 16 | object_placement_initializer: null 17 | obstacle_placement_initializer: null 18 | human_animation_names: 19 | - CMU/62_01 20 | - CMU/62_03 21 | - CMU/62_04 22 | - CMU/62_07 23 | - CMU/62_09 24 | - CMU/62_10 25 | - CMU/62_12 26 | - CMU/62_13 27 | - CMU/62_14 28 | - CMU/62_15 29 | - CMU/62_16 30 | - CMU/62_18 31 | - CMU/62_19 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/reach_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - reach_human 3 | - _self_ 4 | 5 | env_id: ReachHumanCart 6 | table_friction: 7 | - 1.0 # reach_human: 1.5 8 | - 5e-3 # reach_human: 2.0 9 | - 1e-4 # reach_human: 0.05 10 | goal_dist: 0.05 # reach_human: 0.1 11 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/default/robot_human_handover_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - pick_place_human_cart 3 | - _self_ 4 | 5 | env_id: RobotHumanHandoverCart 6 | human_animation_names: 7 | - RobotHumanHandover/0 8 | - RobotHumanHandover/1 9 | - RobotHumanHandover/2 10 | - RobotHumanHandover/3 11 | - RobotHumanHandover/4 12 | - RobotHumanHandover/5 13 | - RobotHumanHandover/6 14 | - RobotHumanHandover/7 15 | - RobotHumanHandover/8 16 | - RobotHumanHandover/advanced_0 17 | - RobotHumanHandover/advanced_1 18 | - RobotHumanHandover/advanced_2 19 | - RobotHumanHandover/advanced_3 20 | - RobotHumanHandover/advanced_4 21 | - RobotHumanHandover/advanced_5 22 | human_animation_freq: 100 23 | n_animations_sampled_per_100_steps: 2 24 | shield_type: PFL 25 | n_object_placements_sampled_per_100_steps: 3 26 | object_placement_initializer: null 27 | object_in_human_hand_reward: -1.0 28 | object_gripped_reward: -1.0 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/human_object_inspection_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/human_object_inspection_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | collision_reward: 0 12 | has_offscreen_renderer: false 13 | render_camera: null 14 | horizon: 1000 15 | hard_reset: false 16 | done_at_collision: false 17 | done_at_success: false 18 | human_rand: 19 | - 0.0 20 | - 0.5 21 | - 0.0 22 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/human_robot_handover_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/human_robot_handover_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | has_offscreen_renderer: false 12 | render_camera: null 13 | horizon: 1000 14 | hard_reset: false 15 | done_at_collision: false 16 | done_at_success: true 17 | human_animation_names: 18 | - HumanRobotHandover/0 19 | - HumanRobotHandover/1 20 | - HumanRobotHandover/2 21 | - HumanRobotHandover/3 22 | - HumanRobotHandover/4 23 | - HumanRobotHandover/5 24 | - HumanRobotHandover/6 25 | - HumanRobotHandover/7 26 | human_animation_freq: 90 27 | n_targets_sampled_per_100_steps: 3 28 | object_at_target_reward: 0.0 29 | object_gripped_reward: -0.25 30 | collision_reward: -1.0 31 | human_rand: 32 | - 0.0 33 | - 0.2 34 | - 0.1 -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/pick_place_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/pick_place_human_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | collision_reward: 0 12 | has_offscreen_renderer: false 13 | render_camera: null 14 | horizon: 1000 15 | hard_reset: false 16 | done_at_collision: false 17 | done_at_success: false 18 | target_placement_initializer: null 19 | n_object_placements_sampled_per_100_steps: 3 20 | n_targets_sampled_per_100_steps: 3 21 | object_placement_initializer: null 22 | object_gripped_reward: -0.25 23 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/pick_place_pointing_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/pick_place_pointing_human_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | collision_reward: 0 12 | has_offscreen_renderer: false 13 | render_camera: null 14 | horizon: 500 15 | hard_reset: false 16 | done_at_collision: false 17 | done_at_success: false 18 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/reach_human.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/reach_human@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | table_friction: 10 | - 1.0 11 | - 5e-3 12 | - 1e-4 13 | use_camera_obs: false 14 | use_object_obs: true 15 | collision_reward: 0 16 | has_offscreen_renderer: false 17 | render_camera: null 18 | control_freq: 10 19 | horizon: 100 20 | ignore_done: true 21 | hard_reset: false 22 | camera_names: frontview 23 | safe_vel: 0.01 24 | self_collision_safety: 0.012 25 | done_at_success: true 26 | randomize_initial_pos: false -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/reach_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/reach_human_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | table_friction: 10 | - 1.0 11 | - 5e-3 12 | - 1e-4 13 | use_camera_obs: false 14 | use_object_obs: true 15 | collision_reward: 0 16 | has_offscreen_renderer: false 17 | render_camera: null 18 | control_freq: 10 19 | horizon: 100 20 | ignore_done: true 21 | hard_reset: false 22 | camera_names: frontview 23 | safe_vel: 0.01 24 | self_collision_safety: 0.012 25 | done_at_success: true 26 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/environment/robot_human_handover_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/robot_human_handover_cart@_here_ 3 | - _self_ 4 | 5 | robot_base_offset: 6 | - 0.0 7 | - 0.0 8 | - 0.0 9 | use_camera_obs: false 10 | use_object_obs: true 11 | has_offscreen_renderer: false 12 | render_camera: null 13 | horizon: 1000 14 | hard_reset: false 15 | done_at_collision: false 16 | done_at_success: true 17 | human_animation_names: 18 | - RobotHumanHandover/0 19 | - RobotHumanHandover/1 20 | - RobotHumanHandover/2 21 | - RobotHumanHandover/3 22 | - RobotHumanHandover/4 23 | - RobotHumanHandover/5 24 | - RobotHumanHandover/6 25 | - RobotHumanHandover/7 26 | - RobotHumanHandover/8 27 | human_animation_freq: 90 28 | object_in_human_hand_reward: 0.0 29 | object_gripped_reward: -0.25 30 | collision_reward: -1.0 31 | human_rand: 32 | - 0.0 33 | - 0.2 34 | - 0.1 35 | goal_dist: 0.06 -------------------------------------------------------------------------------- /human_robot_gym/training/config/eval.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - override wrappers/visualization: default_visualization 3 | 4 | environment: 5 | has_renderer: true 6 | run: 7 | n_envs: 1 8 | test_only: true 9 | load_step: final 10 | id: null # needs to be overwritten 11 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/collaborative_lifting_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/collaborative_lifting_cart@_here_ 3 | - _self_ 4 | 5 | signal_to_noise_ratio: 0.9 6 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/default/collaborative_lifting_cart.yaml: -------------------------------------------------------------------------------- 1 | id: CollaborativeLiftingCart 2 | signal_to_noise_ratio: 1 3 | board_size: ${environment.board_full_size} 4 | human_grip_offset: 0.1 5 | delta_time: 0.01 6 | seed: null 7 | obs_keys: 8 | - vec_eef_to_human_lh 9 | - vec_eef_to_human_rh 10 | - board_quat 11 | - board_gripped -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/default/pick_place_human_cart.yaml: -------------------------------------------------------------------------------- 1 | id: PickPlaceHumanCart 2 | signal_to_noise_ratio: 1 3 | hover_dist: 0.2 4 | tan_theta: 0.5 5 | horizontal_epsilon: 0.03 6 | vertical_epsilon: 0.015 7 | goal_dist: 0.08 8 | gripper_fully_opened_threshold: 0.02 9 | release_when_delivered: true 10 | delta_time: 0.01 11 | seed: null 12 | obs_keys: 13 | - object_gripped 14 | - vec_eef_to_object 15 | - vec_eef_to_target 16 | - robot0_gripper_qpos 17 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/default/reach_human.yaml: -------------------------------------------------------------------------------- 1 | id: ReachHuman 2 | signal_to_noise_ratio: 1 3 | delta_time: 0.01 4 | seed: null 5 | obs_keys: 6 | - goal_difference 7 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/default/reach_human_cart.yaml: -------------------------------------------------------------------------------- 1 | id: ReachHumanCart 2 | signal_to_noise_ratio: 1 3 | delta_time: 0.01 4 | seed: null 5 | obs_keys: 6 | - goal_difference 7 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/pick_place_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/pick_place_human_cart@_here_ 3 | - _self_ 4 | 5 | signal_to_noise_ratio: 0.9 6 | horizontal_epsilon: 0.035 7 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/reach_human.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/reach_human@_here_ 3 | - _self_ 4 | 5 | signal_to_noise_ratio: 0.95 -------------------------------------------------------------------------------- /human_robot_gym/training/config/expert/reach_human_cart.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default/reach_human_cart@_here_ 3 | - _self_ 4 | 5 | signal_to_noise_ratio: 0.95 6 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/human_pick_place_sac_action_imitation.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: pick_place_human_cart 4 | - wrappers: safe_ik_action_imitation 5 | - algorithm: sac 6 | - expert: pick_place_human_cart 7 | - run: expert_imitation_training 8 | - wandb_run: default_wandb 9 | - _self_ 10 | - consistent_seeding 11 | # - eval 12 | 13 | run: 14 | type: wandb 15 | n_envs: 8 16 | obs_keys: 17 | - object_gripped 18 | - vec_eef_to_object 19 | - vec_eef_to_target 20 | - gripper_aperture 21 | - dist_eef_to_human_head 22 | - dist_eef_to_human_lh 23 | - dist_eef_to_human_rh 24 | log_interval: 25 | - 5000 26 | - step 27 | wrappers: 28 | ik_position_delta: 29 | action_limit: 0.1 30 | action_based_expert_imitation_reward: 31 | alpha: 0.1 32 | beta: 0.5 33 | iota_m: 0.05 34 | iota_g: 0.1 35 | dataset_name: pick-place-dataset 36 | rsi_prob: 1 37 | expert: 38 | seed: ${run.seed} -------------------------------------------------------------------------------- /human_robot_gym/training/config/human_pick_place_sac_state_imitation.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: pick_place_human_cart 4 | - wrappers: safe_ik 5 | - algorithm: sac 6 | - expert: pick_place_human_cart 7 | - run: expert_imitation_training 8 | - wandb_run: default_wandb 9 | - _self_ 10 | - consistent_seeding 11 | # - eval 12 | - override wrappers/state_based_expert_imitation_reward: pick_place_human_cart_state_based_expert_imitation_reward 13 | - override wrappers/dataset_obs_norm: default_dataset_obs_norm 14 | 15 | environment: 16 | done_at_success: true 17 | object_gripped_reward: -0.25 18 | horizon: 1000 19 | wrappers: 20 | ik_position_delta: 21 | action_limit: 0.1 22 | state_based_expert_imitation_reward: 23 | dataset_name: pick-place-small-obs 24 | alpha: 0.25 25 | beta: 0.7 26 | rsi_prob: 0.5 27 | use_et: false 28 | iota_m: 0.1 29 | iota_g: 0.01 30 | et_dist: 6 31 | dataset_obs_norm: 32 | dataset_name: ${wrappers.state_based_expert_imitation_reward.dataset_name} 33 | squash_factor: null 34 | allow_different_observation_shapes: true # needed because of time observation 35 | algorithm: 36 | policy_kwargs: 37 | net_arch: 38 | - 64 39 | - 64 40 | - 64 41 | verbose: 0 42 | run: 43 | run_type: wandb 44 | n_envs: 8 45 | n_steps: 3_000_000 46 | obs_keys: 47 | - object_gripped 48 | - vec_eef_to_object 49 | - vec_eef_to_target 50 | - gripper_aperture 51 | - dist_eef_to_human_head 52 | - dist_eef_to_human_lh 53 | - dist_eef_to_human_rh 54 | seed: 0 55 | log_info_keys: 56 | - n_goal_reached 57 | - collision 58 | - collision_type 59 | - n_collisions 60 | - n_collisions_static 61 | - n_collisions_robot 62 | - n_collisions_human 63 | - n_collisions_critical 64 | - timeout 65 | - failsafe_interventions 66 | - action_resamples 67 | - im_rew_mean 68 | - env_rew_mean 69 | - full_rew_mean 70 | - ep_im_rew_mean 71 | - ep_env_rew_mean 72 | - ep_full_rew_mean 73 | - g_im_rew_mean 74 | - m_im_rew_mean 75 | - ep_g_im_rew_mean 76 | - ep_m_im_rew_mean 77 | - early_termination 78 | expert: 79 | seed: ${run.seed} -------------------------------------------------------------------------------- /human_robot_gym/training/config/human_reach_ppo_parallel.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: reach_human 4 | - wrappers: safe 5 | - run: parallel_training 6 | - algorithm: ppo 7 | - wandb_run: default_wandb 8 | - _self_ 9 | - consistent_seeding 10 | # - eval # uncomment to evaluate with renderer instead of training 11 | 12 | run: 13 | type: tensorboard 14 | obs_keys: 15 | - object-state 16 | - goal_difference 17 | environment: 18 | reward_shaping: true 19 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/human_reach_sac.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: reach_human 4 | - wrappers: safe 5 | - run: parallel_training 6 | - algorithm: sac 7 | - wandb_run: default_wandb 8 | - _self_ 9 | - consistent_seeding 10 | # - eval # uncomment to evaluate with renderer instead of training 11 | 12 | run: 13 | type: tensorboard 14 | obs_keys: 15 | - object-state 16 | - goal_difference 17 | environment: 18 | reward_shaping: true 19 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/human_reach_sac_her.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: reach_human 4 | - wrappers: safe 5 | - run: her_training 6 | - algorithm: sac_her 7 | - wandb_run: default_wandb 8 | - _self_ 9 | - consistent_seeding 10 | # - eval # uncomment to evaluate with renderer instead of training 11 | 12 | run: 13 | type: wandb 14 | obs_keys: 15 | - object-state 16 | - goal_difference 17 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/pick_place_human_cart_expert_data_collection.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: pick_place_human_cart 4 | - wrappers: safe_ik 5 | - expert: pick_place_human_cart 6 | - run: expert_imitation_training 7 | - consistent_seeding 8 | - _self_ 9 | # - eval 10 | 11 | environment: 12 | verbose: true 13 | done_at_success: true 14 | run: 15 | obs_keys: 16 | - object_gripped 17 | - vec_eef_to_object 18 | - vec_eef_to_target 19 | - gripper_aperture 20 | - dist_eef_to_human_head 21 | - dist_eef_to_human_lh 22 | - dist_eef_to_human_rh 23 | wrappers: 24 | ik_position_delta: 25 | action_limit: 0.1 26 | expert: 27 | signal_to_noise_ratio: 0.98 28 | seed: ${run.seed} 29 | 30 | 31 | dataset_name: pick-place-dataset 32 | start_episode_index: 0 33 | n_episodes: 200 34 | n_threads: 1 35 | load_episode_index: null 36 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/reach_human_cart_expert_data_collection.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: reach_human_cart 4 | - wrappers: safe_ik 5 | - expert: reach_human_cart 6 | - run: expert_imitation_training 7 | - consistent_seeding 8 | - _self_ 9 | # - eval 10 | 11 | environment: 12 | verbose: true 13 | done_at_success: false 14 | run: 15 | obs_keys: 16 | - goal_difference 17 | expert: 18 | signal_to_noise_ratio: 0.98 19 | seed: ${run.seed} 20 | 21 | dataset_name: reach-cart-dataset 22 | start_episode_index: 0 23 | n_episodes: 200 24 | n_threads: 1 25 | load_episode_index: null 26 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/reach_human_expert_data_collection.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - robot: schunk 3 | - environment: reach_human 4 | - wrappers: safe 5 | - expert: reach_human 6 | - run: expert_imitation_training 7 | - consistent_seeding 8 | - _self_ 9 | - eval 10 | 11 | environment: 12 | verbose: true 13 | done_at_success: false 14 | run: 15 | obs_keys: 16 | - goal_difference 17 | expert: 18 | signal_to_noise_ratio: 0.98 19 | seed: ${run.seed} 20 | 21 | dataset_name: reach-dataset 22 | start_episode_index: 0 23 | n_episodes: 200 24 | n_threads: 1 25 | load_episode_index: null 26 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/robot/iiwa.yaml: -------------------------------------------------------------------------------- 1 | name: IIWA 2 | controller_config_path: controllers/failsafe_controller/config/failsafe.json 3 | robot_config_path: models/robots/config/iiwa.json 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/robot/jaco.yaml: -------------------------------------------------------------------------------- 1 | name: Jaco 2 | controller_config_path: controllers/failsafe_controller/config/failsafe.json 3 | robot_config_path: models/robots/config/jaco.json 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/robot/kinova3.yaml: -------------------------------------------------------------------------------- 1 | name: Kinova3 2 | controller_config_path: controllers/failsafe_controller/config/failsafe.json 3 | robot_config_path: models/robots/config/kinova3.json 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/robot/panda.yaml: -------------------------------------------------------------------------------- 1 | name: Panda 2 | controller_config_path: controllers/failsafe_controller/config/failsafe.json 3 | robot_config_path: models/robots/config/panda.json 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/robot/sawyer.yaml: -------------------------------------------------------------------------------- 1 | name: Sawyer 2 | controller_config_path: controllers/failsafe_controller/config/failsafe.json 3 | robot_config_path: models/robots/config/sawyer.json 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/robot/schunk.yaml: -------------------------------------------------------------------------------- 1 | name: Schunk 2 | controller_config_path: controllers/failsafe_controller/config/failsafe.json 3 | robot_config_path: models/robots/config/schunk.json 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/robot/ur5e.yaml: -------------------------------------------------------------------------------- 1 | name: UR5e 2 | controller_config_path: controllers/failsafe_controller/config/failsafe.json 3 | robot_config_path: models/robots/config/ur5e.json 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/run/default_training.yaml: -------------------------------------------------------------------------------- 1 | n_envs: 1 2 | n_steps: 1_000_000 3 | save_freq: 200_000 4 | test_only: false 5 | load_step: null 6 | id: null 7 | type: debug 8 | log_interval: 9 | - 1000 10 | - step 11 | seed: 0 12 | eval_seed: null 13 | start_index: 0 14 | n_test_episodes: 20 15 | env_type: env 16 | obs_keys: null 17 | expert_obs_keys: null 18 | log_info_keys: 19 | - n_goal_reached 20 | - collision 21 | - collision_type 22 | - n_collisions 23 | - n_collisions_static 24 | - n_collisions_robot 25 | - n_collisions_human 26 | - n_collisions_critical 27 | - timeout 28 | - failsafe_interventions 29 | - action_resamples 30 | monitor_dir: null 31 | vec_env_kwargs: null 32 | monitor_kwargs: null 33 | verbose: true 34 | resetting_interval: null 35 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/run/expert_imitation_training.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default_training 3 | - _self_ 4 | 5 | expert_obs_keys: ${expert.obs_keys} 6 | dataset_name: dataset -------------------------------------------------------------------------------- /human_robot_gym/training/config/run/her_training.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default_training 3 | - _self_ 4 | 5 | env_type: goal_env 6 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/run/parallel_training.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default_training 3 | - _self_ 4 | 5 | n_envs: 10 6 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wandb_run/default_wandb.yaml: -------------------------------------------------------------------------------- 1 | project: safe_human_robot_rl 2 | entity: null 3 | group: hrgym 4 | name: hrgym_${run.seed} 5 | tags: null -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/action_based_expert_imitation_reward/default_cart_action_based_expert_imitation_reward.yaml: -------------------------------------------------------------------------------- 1 | alpha: 0 2 | beta: 0 3 | iota_m: 0.1 4 | iota_g: 0.25 5 | m_sim_fn: "gaussian" 6 | g_sim_fn: "gaussian" 7 | rsi_prob: null 8 | dataset_name: dataset -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/action_based_expert_imitation_reward/default_joint_action_based_expert_imitation_reward.yaml: -------------------------------------------------------------------------------- 1 | alpha: 0 2 | beta: 0 3 | iota_m: 0.1 4 | iota_g: 0.25 5 | m_sim_fn: "gaussian" 6 | g_sim_fn: "gaussian" 7 | rsi_prob: null 8 | dataset_name: dataset 9 | normalize_joint_actions: false 10 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/collision_prevention/default_collision_prevention.yaml: -------------------------------------------------------------------------------- 1 | replace_type: 0 2 | n_resamples: 20 3 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/dataset_obs_norm/default_dataset_obs_norm.yaml: -------------------------------------------------------------------------------- 1 | dataset_name: ${run.dataset_name} 2 | mean: null 3 | std: null 4 | squash_factor: null 5 | allow_different_observation_shapes: false -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/default_wrappers.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - optional collision_prevention: null 3 | - optional ik_position_delta: null 4 | - optional visualization: null 5 | - optional action_based_expert_imitation_reward: null 6 | - optional state_based_expert_imitation_reward: null 7 | - optional dataset_obs_norm: null 8 | - _self_ 9 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/ik_position_delta/default_ik_position_delta.yaml: -------------------------------------------------------------------------------- 1 | urdf_file: models/assets/robots/schunk/robot_pybullet.urdf 2 | action_limit: 0.15 3 | x_output_max: 1 4 | x_position_limits: null 5 | residual_threshold: 1e-3 6 | max_iter: 50 7 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/safe.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default_wrappers 3 | - override collision_prevention: default_collision_prevention 4 | - _self_ 5 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/safe_ik.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - safe 3 | - override ik_position_delta: default_ik_position_delta 4 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/safe_ik_action_imitation.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - safe_ik 3 | - override action_based_expert_imitation_reward: default_cart_action_based_expert_imitation_reward 4 | - _self_ 5 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/state_based_expert_imitation_reward/collaborative_lifting_cart_state_based_expert_imitation_reward.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default_state_based_expert_imitation_reward 3 | 4 | beta: 0 5 | iota: 0.1 6 | sim_fn: "gaussian" 7 | et_dist: 2 -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/state_based_expert_imitation_reward/default_state_based_expert_imitation_reward.yaml: -------------------------------------------------------------------------------- 1 | dataset_name: ${run.dataset_name} 2 | alpha: 0 3 | observe_time: true 4 | rsi_prob: null 5 | use_et: false 6 | verbose: false 7 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/state_based_expert_imitation_reward/pick_place_human_cart_state_based_expert_imitation_reward.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default_state_based_expert_imitation_reward 3 | 4 | beta: 0 5 | iota_m: 0.1 6 | iota_g: 0.05 7 | m_sim_fn: "gaussian" 8 | g_sim_fn: "gaussian" 9 | et_dist: 2 10 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/state_based_expert_imitation_reward/reach_human_cart_state_based_expert_imitation_reward.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default_state_based_expert_imitation_reward 3 | 4 | iota: 0.1 5 | sim_fn: "gaussian" 6 | et_dist: 2 7 | -------------------------------------------------------------------------------- /human_robot_gym/training/config/wrappers/visualization/default_visualization.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/human_robot_gym/training/config/wrappers/visualization/default_visualization.yaml -------------------------------------------------------------------------------- /human_robot_gym/training/icra_2024_run_experiments.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # Run environment evaluations 4 | human_robot_gym/training/icra_2024_environment_evaluation.sh R HumanReach 100 1000000 100 8 800 5 | human_robot_gym/training/icra_2024_environment_evaluation.sh PP PickAndPlace 500 3000000 1000 8 8000 6 | human_robot_gym/training/icra_2024_environment_evaluation.sh CL CollaborativeLifting 100 1000000 5000 8 8000 7 | human_robot_gym/training/icra_2024_environment_evaluation.sh RHH RobotHumanHandover 500 3000000 1000 8 8000 8 | human_robot_gym/training/icra_2024_environment_evaluation.sh HRH HumanRobotHandover 500 3000000 1000 8 8000 9 | human_robot_gym/training/icra_2024_environment_evaluation.sh CS CollaborativeStacking 500 3000000 3000 8 8000 10 | 11 | # Run ablation study on overfitting on human animations 12 | human_robot_gym/training/icra_2024_animation_overfitting.sh 13 | -------------------------------------------------------------------------------- /human_robot_gym/training/playback_recorded_episode.py: -------------------------------------------------------------------------------- 1 | """This file contains a script to playback episodes from datasets collected with the `DatasetCollectionWrapper`. 2 | 3 | We use hydra config files. The config files are located in the `human_robot_gym/training/config` folder. 4 | 5 | To specify the config file to use for playback, use the `--config-name` flag: 6 | 7 | ``` 8 | python human_robot_gym/training/playback_recorded_episode.py --config-name pick_place_data_playback 9 | ``` 10 | 11 | For more information about hydra, see https://hydra.cc/docs/intro/ 12 | or refer to the human-robot-gym documentation: https://cps-rl.pages.gitlab.lrz.de/human-robot-gym/docs/training.html 13 | 14 | Author: 15 | Felix Trost (FT) 16 | 17 | Changelog: 18 | 15.06.23 FT File creation 19 | """ 20 | import os 21 | 22 | import numpy as np 23 | 24 | import hydra 25 | from omegaconf import OmegaConf 26 | 27 | import robosuite # noqa: F401 28 | 29 | import human_robot_gym.robots # noqa: F401 30 | from human_robot_gym.utils.training_utils import create_wrapped_env_from_config 31 | from human_robot_gym.utils.mjcf_utils import file_path_completion 32 | from human_robot_gym.utils.config_utils import DataCollectionConfig 33 | 34 | 35 | def load_ep_data(config: DataCollectionConfig): 36 | """Load the xml file and the state dictionary of an episode. 37 | 38 | Args: 39 | config (DataCollectionConfig): Data collection config 40 | """ 41 | load_episode = config.load_episode_index 42 | 43 | if load_episode is None: 44 | print("Select which episode to load:") 45 | load_episode = int(input()) 46 | 47 | episode_dir = "%06i" % load_episode 48 | dataset_path = file_path_completion(f"../datasets/{config.dataset_name}/ep_{episode_dir}") 49 | xml_path = os.path.join(dataset_path, "model.xml") 50 | states_path = os.path.join(dataset_path, "state.npz") 51 | 52 | xml_file = None 53 | 54 | with open(xml_path, "r") as f: 55 | xml_file = f.read() 56 | 57 | dic = np.load(states_path, allow_pickle=True) 58 | 59 | return xml_file, dic 60 | 61 | 62 | def playback_trajectory(config: DataCollectionConfig): 63 | """Playback a trajectory from a dataset. 64 | 65 | Loops the episode by playing it forwards and backwards. 66 | 67 | Args: 68 | config (DataCollectionConfig): Data collection config 69 | """ 70 | env = create_wrapped_env_from_config(config) 71 | 72 | xml_file, dic = load_ep_data(config) 73 | env.reset() 74 | env.unwrapped.reset_from_xml_string(xml_file) 75 | 76 | while True: 77 | for state in list(dic["states"]) + list(dic["states"][::-1]): 78 | env.unwrapped.set_environment_state(state) 79 | env.unwrapped.render() 80 | 81 | 82 | @hydra.main(version_base=None, config_path="config", config_name=None) 83 | def main(config: DataCollectionConfig): 84 | if config.run.verbose: 85 | print(OmegaConf.to_yaml(cfg=config, resolve=True)) 86 | 87 | playback_trajectory(config=config) 88 | 89 | 90 | if __name__ == '__main__': 91 | main() 92 | -------------------------------------------------------------------------------- /human_robot_gym/training/train_SB3.py: -------------------------------------------------------------------------------- 1 | """This file implements a training script for training stable-baselines3 agents in human-robot-gym environments. 2 | 3 | We use hydra to configure the training. The config files are located in the `human_robot_gym/training/config` folder. 4 | To specify the config file to use for training, use the `--config-name` flag: 5 | 6 | ``` 7 | python human_robot_gym/training/train_SB3.py --config-name human_reach_ppo_parallel 8 | ``` 9 | 10 | For more information about hydra, see https://hydra.cc/docs/intro/ 11 | or refer to the human-robot-gym documentation: https://cps-rl.pages.gitlab.lrz.de/human-robot-gym/docs/training.html 12 | 13 | Author: 14 | Felix Trost (FT) 15 | 16 | Changelog: 17 | 25.04.23 FT File creation 18 | """ 19 | import torch 20 | import hydra 21 | from omegaconf import OmegaConf 22 | 23 | import robosuite # noqa: F401 24 | 25 | from human_robot_gym.utils.config_utils import TrainingConfig 26 | from human_robot_gym.utils.training_utils_SB3 import train_and_evaluate 27 | import human_robot_gym.robots # noqa: F401 28 | 29 | 30 | @hydra.main(version_base=None, config_path="config", config_name=None) 31 | def main(config: TrainingConfig): 32 | torch.set_num_threads(1) 33 | 34 | if config.run.verbose: 35 | print(OmegaConf.to_yaml(cfg=config, resolve=True)) 36 | 37 | try: 38 | train_and_evaluate(config) 39 | except Exception as e: 40 | import traceback 41 | traceback.print_exception(type(e), e, e.__traceback__) 42 | raise e 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /human_robot_gym/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """This package defines utility functions.""" 2 | # flake8: noqa 3 | from robosuite.utils.errors import ( 4 | robosuiteError, 5 | XMLError, 6 | SimulationError, 7 | RandomizationError, 8 | ) 9 | from human_robot_gym import models 10 | -------------------------------------------------------------------------------- /human_robot_gym/utils/env_util.py: -------------------------------------------------------------------------------- 1 | """This file describes functions for creating the human-robot-gym environments. 2 | 3 | Contributors: 4 | Felix Trost (FT) 5 | Martin Winter (MW) 6 | 7 | Changelog: 8 | 15.02.23 FT added support for expert observation environments 9 | 16.07.23 MW moved all SB3 specific code to human_robot_gym/utils/env_util_SB3.py 10 | """ 11 | import struct 12 | from typing import Optional, Dict, Any, List 13 | import gym 14 | from human_robot_gym.wrappers.time_limit import TimeLimit 15 | import robosuite 16 | from robosuite.wrappers.gym_wrapper import GymWrapper 17 | from human_robot_gym.wrappers.goal_env_wrapper import GoalEnvironmentGymWrapper 18 | from human_robot_gym.wrappers.expert_obs_wrapper import ExpertObsWrapper 19 | 20 | 21 | def make_robosuite_env( 22 | env_id: str, 23 | env_kwargs: Optional[Dict[str, Any]] = None 24 | ) -> gym.Env: 25 | """Make the robosuite environment.""" 26 | return robosuite.make(env_id, **env_kwargs) 27 | 28 | 29 | def add_time_limit( 30 | env: gym.Env, 31 | max_episode_steps: int = 1000 32 | ) -> gym.Env: 33 | if env.spec is None: 34 | env.spec = struct 35 | if max_episode_steps is not None: 36 | env = TimeLimit(env, max_episode_steps=max_episode_steps) 37 | return env 38 | 39 | 40 | def make_gym_env( 41 | env_id: str, 42 | env_kwargs: Optional[Dict[str, Any]] = None, 43 | obs_keys: Optional[List[str]] = None, 44 | ) -> gym.Env: 45 | """Make the gym environment and add the optional TimeLimit wrapper. 46 | 47 | We add the TimeLimit wrapper here because it would require a second Monitor wrapper later. 48 | """ 49 | env = GymWrapper(make_robosuite_env(env_id, env_kwargs), keys=obs_keys) 50 | env = add_time_limit(env, max_episode_steps=env_kwargs.get("horizon", None)) 51 | return env 52 | 53 | 54 | def make_goal_env( 55 | env_id: str, 56 | env_kwargs: Optional[Dict[str, Any]] = None, 57 | obs_keys: Optional[List[str]] = None, 58 | ) -> gym.Env: 59 | """Make the goal environment and add the optional TimeLimit wrapper. 60 | 61 | We add the TimeLimit wrapper here because it would require a second Monitor wrapper later. 62 | """ 63 | env = GoalEnvironmentGymWrapper(make_robosuite_env(env_id, env_kwargs), keys=obs_keys) 64 | env = add_time_limit(env, max_episode_steps=env_kwargs.get("horizon", None)) 65 | return env 66 | 67 | 68 | def make_expert_obs_env( 69 | env_id: str, 70 | env_kwargs: Optional[Dict[str, Any]] = None, 71 | obs_keys: Optional[List[str]] = None, 72 | expert_obs_keys: Optional[List[str]] = None, 73 | ) -> gym.Env: 74 | """Make the expert obs environment and add the optional TimeLimit wrapper. 75 | 76 | We add the TimeLimit wrapper here because it would require a second Monitor wrapper later. 77 | """ 78 | env = ExpertObsWrapper(make_robosuite_env(env_id, env_kwargs), agent_keys=obs_keys, expert_keys=expert_obs_keys) 79 | env = add_time_limit(env, max_episode_steps=env_kwargs.get("horizon", None)) 80 | return env 81 | -------------------------------------------------------------------------------- /human_robot_gym/utils/errors.py: -------------------------------------------------------------------------------- 1 | """This file describes custom error messages for robotics applications. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | Jonathan Kuelz (JK) 8 | 9 | Changelog: 10 | 2.5.22 JT Formatted docstrings 11 | """ 12 | import numpy as np 13 | import numpy.testing as np_test 14 | 15 | 16 | class DuplicateValueError(Exception): 17 | """Raise whenever a duplicate value is added to a container (e.g. a list, set) that allows unique values only.""" 18 | 19 | pass 20 | 21 | 22 | class InvalidAssemblyError(Exception): 23 | """Raise when an assembly of modules should be created that's not valid.""" 24 | 25 | pass 26 | 27 | 28 | class UniqueValueError(ValueError): 29 | """Raise whenever a value, such as an ID or name should be unique, but isn't.""" 30 | 31 | pass 32 | 33 | 34 | class UnexpectedSpatialShapeError(ValueError): 35 | """Raise whenever a spatial input (point, rotation, transformation) has the wrong input shape.""" 36 | 37 | pass 38 | 39 | 40 | def assert_has_3d_point(p: np.ndarray): 41 | """Check whether the input can be interpreted as a point in cartesian space. 42 | 43 | Raises: 44 | UnexpectedSpatialShapeError 45 | """ 46 | s = p.shape 47 | if s not in [(3,), (1, 3), (3, 1), (4, 4)]: 48 | raise UnexpectedSpatialShapeError( 49 | f"Array of shape {s} cannot be interpreted as 3D point." 50 | ) 51 | 52 | 53 | def assert_is_3d_point(p: np.ndarray): 54 | """Assert on points in cartesian space. 55 | 56 | Raises: 57 | UnexpectedSpatialShapeError 58 | """ 59 | s = p.shape 60 | if s not in [(3,), (3, 1)]: 61 | raise UnexpectedSpatialShapeError( 62 | f"Array of shape {s} is not a point in 3D space." 63 | ) 64 | 65 | 66 | def assert_is_rotation_matrix(R: np.ndarray): 67 | """Check whether a 3x3 matrix is a valid rotation (orthonormal). 68 | 69 | Raises 70 | AssertionError 71 | """ 72 | if R.shape != (3, 3): 73 | raise UnexpectedSpatialShapeError( 74 | f"Rotation matrix must be of shape (3, 3). Given: {R.shape}" 75 | ) 76 | np_test.assert_allclose( 77 | R @ R.T, np.eye(3), err_msg="Rotation matrix is not orthogonal", atol=1e-9 78 | ) 79 | np_test.assert_almost_equal( 80 | np.linalg.det(R), 81 | 1.0, 82 | err_msg="Rotation matrix determinant != 1 (not volume preserving)", 83 | ) 84 | 85 | 86 | def assert_is_homogeneous_transformation(T: np.ndarray): 87 | """Check for shape, valid rotation matrix and last row. 88 | 89 | Raises: 90 | UnexpectedSpatialShapeError, AssertionError 91 | """ 92 | if T.shape != (4, 4): 93 | raise UnexpectedSpatialShapeError( 94 | f"Array of shape {T.shape} cannot be interpreted as homogeneous transformation." 95 | ) 96 | assert_is_rotation_matrix(T[:3, :3]) 97 | np_test.assert_allclose(T[3, :], np.array([0.0, 0.0, 0.0, 1.0]), atol=1e-9) 98 | -------------------------------------------------------------------------------- /human_robot_gym/utils/expert_imitation_reward_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def gaussian_similarity_fn(delta: float, iota: float) -> float: 5 | r"""Similarity function for expert imitation reward training. 6 | 7 | Used in `ActionBasedExpertImitationRewardWrapper` and `StateBasedExpertImitationRewardWrapper` subclasses. 8 | Rescaled Gaussian function that returns a similarity value in $(0, 1]$ based on a non-negative distance metric 9 | $\delta$ between the agent and expert. 10 | 11 | DeepMimic (Peng et al., 2018) uses a similar model for the end-effector similarity reward. 12 | Link to paper: https://arxiv.org/abs/1804.02717 13 | 14 | Exponential form: 15 | $sim_G(\delta) = exp{-0.5 \cdot (\delta \cdot \frac{\nu}{\iota})^2}$ 16 | 17 | where: 18 | \nu = sqrt{2 \cdot ln(2)} 19 | 20 | Simplifies to: 21 | sim_G(\delta) = 2^{-(\frac{\delta}{\iota})^2} 22 | 23 | Args: 24 | delta (float): distance metric between agent and expert 25 | iota (float): half width at half maximum; 26 | distance after which the reward should be at 0.5 27 | Returns: 28 | float: similarity based on distance 29 | """ 30 | return 2 ** (-(delta / iota) ** 2) 31 | 32 | 33 | def tanh_similarity_fn(delta: float, iota: float) -> float: 34 | r"""Similarity function for expert imitation reward training. 35 | 36 | Used in `ActionBasedExpertImitationRewardWrapper` and `StateBasedExpertImitationRewardWrapper` subclasses. 37 | Rescaled tanh function that returns a similarity value in $(0, 1]$ based on a non-negative distance metric 38 | $\delta$ between the agent and expert. 39 | 40 | Function: $sim_T(\delta) = -tanh(tan(0.5) \cdot \frac{\delta}{\iota}) + 1$ 41 | 42 | Args: 43 | delta (float): distance metric between agent and expert 44 | iota (float): scaling parameter: distance after which the reward should be at 0.5 45 | Returns: 46 | float: similarity based on distance 47 | """ 48 | return -np.tanh(np.tan(0.5) * delta / iota) + 1 49 | 50 | 51 | def similarity_fn(name: str, delta: float, iota: float) -> float: 52 | r"""Calculate similarity from a non-negative distance metric for expert imitation reward training. 53 | 54 | Used in `ActionBasedExpertImitationRewardWrapper` and `StateBasedExpertImitationRewardWrapper` subclasses. 55 | Returns a similarity value in $(0, 1]$. The similarity function to use is specified by `name`. 56 | 57 | Args: 58 | name (str): similarity function name. Can be either `"gaussian"` or `"tanh"`. 59 | delta (float): distance metric between agent and expert 60 | iota (float): scaling parameter: distance after which the reward should be at 0.5 61 | 62 | Returns: 63 | function: similarity value based on distance 64 | 65 | Raises: 66 | ValueError: Unknown similarity function: {name} 67 | """ 68 | if name == 'gaussian': 69 | return gaussian_similarity_fn(delta=delta, iota=iota) 70 | elif name == 'tanh': 71 | return tanh_similarity_fn(delta=delta, iota=iota) 72 | else: 73 | raise ValueError(f'Unknown similarity function: {name}') 74 | -------------------------------------------------------------------------------- /human_robot_gym/utils/pairing.py: -------------------------------------------------------------------------------- 1 | """This file describes the cantor pairing function for hashing two integers into one.""" 2 | 3 | 4 | def cantor_pairing(a: int, b: int) -> int: 5 | """Cantor pairing function. 6 | 7 | hash = (a + b) * (a + b + 1) / 2 + b 8 | 9 | Args: 10 | a (int): first integer 11 | b (int): second integer 12 | 13 | Returns: 14 | int: the cantor pairing of a and b 15 | """ 16 | return (a + b) * (a + b + 1) // 2 + b 17 | -------------------------------------------------------------------------------- /human_robot_gym/utils/pinocchio_utils.py: -------------------------------------------------------------------------------- 1 | """This file describes utility functions for pinocchio. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | 8 | Changelog: 9 | 2.5.22 JT Formatted docstrings 10 | """ 11 | import numpy as np 12 | 13 | 14 | def q_pin(arr): 15 | """Append a 0.0 to the given array. 16 | 17 | The pinocchio model has an additional fictional joint. 18 | 19 | Args: 20 | arr (np.array) 21 | 22 | Returns: 23 | np.array 24 | """ 25 | return np.append(arr, 0.0) 26 | -------------------------------------------------------------------------------- /human_robot_gym/wrappers/__init__.py: -------------------------------------------------------------------------------- 1 | """This package describes the gym wrappers and some additional training functionality.""" 2 | # flake8: noqa 3 | from robosuite.wrappers.wrapper import Wrapper 4 | from robosuite.wrappers.data_collection_wrapper import DataCollectionWrapper 5 | from robosuite.wrappers.demo_sampler_wrapper import DemoSamplerWrapper 6 | from robosuite.wrappers.domain_randomization_wrapper import DomainRandomizationWrapper 7 | from robosuite.wrappers.visualization_wrapper import VisualizationWrapper 8 | 9 | try: 10 | from robosuite.wrappers.gym_wrapper import GymWrapper 11 | except: 12 | print("Warning: make sure gym is installed if you want to use the GymWrapper.") 13 | -------------------------------------------------------------------------------- /human_robot_gym/wrappers/normalized_box_env.py: -------------------------------------------------------------------------------- 1 | """This file descibes the normalized box environment wrapper. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | 8 | Changelog: 9 | 2.5.22 JT Formatted docstrings 10 | """ 11 | import numpy as np 12 | from gym.spaces import Box 13 | import gym.core 14 | 15 | 16 | class NormalizedBoxEnv(gym.core.Wrapper): 17 | """Normalize action to lie in [-1, 1]. 18 | 19 | Optionally normalize observations and scale reward. 20 | 21 | Args: 22 | env: gym environment to wrap 23 | reward_scale: scale the reward 24 | obs_mean: usual mean of the observation 25 | obs_std: standard deviation of the observation 26 | """ 27 | 28 | def __init__( 29 | self, 30 | env, 31 | reward_scale=1.0, 32 | obs_mean=None, 33 | obs_std=None, 34 | ): # noqa: D107 35 | super().__init__(env) 36 | self._should_normalize = not (obs_mean is None and obs_std is None) 37 | if self._should_normalize: 38 | if obs_mean is None: 39 | obs_mean = np.zeros_like(env.observation_space.low) 40 | else: 41 | obs_mean = np.array(obs_mean) 42 | if obs_std is None: 43 | obs_std = np.ones_like(env.observation_space.low) 44 | else: 45 | obs_std = np.array(obs_std) 46 | self._reward_scale = reward_scale 47 | self._obs_mean = obs_mean 48 | self._obs_std = obs_std 49 | ub = np.ones(self.env.action_space.shape) 50 | self.action_space = Box(-1 * ub, ub) 51 | 52 | def estimate_obs_stats(self, obs_batch, override_values=False): 53 | """Estimate the obs mean and standard deviation. 54 | 55 | Args: 56 | obs_batch: Batch of observations 57 | override_values: self._obs_mean and self._obs_std will be overridden. 58 | """ 59 | if self._obs_mean is not None and not override_values: 60 | raise Exception( 61 | "Observation mean and std already set. To " 62 | "override, set override_values to True." 63 | ) 64 | self._obs_mean = np.mean(obs_batch, axis=0) 65 | self._obs_std = np.std(obs_batch, axis=0) 66 | 67 | def _apply_normalize_obs(self, obs): 68 | return (obs - self._obs_mean) / (self._obs_std + 1e-8) 69 | 70 | def step(self, action): 71 | """Step the environment. 72 | 73 | Scale the action and normalize the observations. 74 | """ 75 | lb = self.env.action_space.low 76 | ub = self.env.action_space.high 77 | scaled_action = lb + (action + 1.0) * 0.5 * (ub - lb) 78 | scaled_action = np.clip(scaled_action, lb, ub) 79 | 80 | wrapped_step = self.env.step(scaled_action) 81 | next_obs, reward, done, info = wrapped_step 82 | if self._should_normalize: 83 | next_obs = self._apply_normalize_obs(next_obs) 84 | return next_obs, reward * self._reward_scale, done, info 85 | 86 | def __str__(self): 87 | """Return env as string.""" 88 | return "Normalized: %s" % self.env 89 | -------------------------------------------------------------------------------- /human_robot_gym/wrappers/time_limit.py: -------------------------------------------------------------------------------- 1 | """This file defines the time limit wrapper. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | 8 | Changelog: 9 | 2.5.22 JT Formatted docstrings 10 | """ 11 | from robosuite.wrappers import Wrapper 12 | 13 | 14 | class TimeLimit(Wrapper): 15 | """Wraps a robosuite environment with a time limit functionality. 16 | 17 | Args: 18 | env: Robotsuite environment to wrap. 19 | max_episode_steps: Maximum number of steps before timeout. 20 | """ 21 | 22 | def __init__(self, env, max_episode_steps=None): # noqa: D107 23 | super(TimeLimit, self).__init__(env) 24 | if max_episode_steps is None and self.env.spec is not None: 25 | max_episode_steps = env.spec.max_episode_steps 26 | if self.env.spec is not None: 27 | self.env.spec.max_episode_steps = max_episode_steps 28 | self._max_episode_steps = max_episode_steps 29 | self._elapsed_steps = None 30 | 31 | def step(self, action): 32 | """Step the environment. 33 | 34 | Sets the done flag after the max number of steps. 35 | """ 36 | assert ( 37 | self._elapsed_steps is not None 38 | ), "Cannot call env.step() before calling reset()" 39 | observation, reward, done, info = self.env.step(action) 40 | self._elapsed_steps += 1 41 | if self._elapsed_steps >= self._max_episode_steps: 42 | info["TimeLimit.truncated"] = not done 43 | done = True 44 | return observation, reward, done, info 45 | 46 | def reset(self, **kwargs): 47 | """Reset the environment step counter.""" 48 | self._elapsed_steps = 0 49 | return self.env.reset(**kwargs) 50 | -------------------------------------------------------------------------------- /human_robot_gym/wrappers/visualization_wrapper.py: -------------------------------------------------------------------------------- 1 | """This file defines the a visualization wrapper for a robosuite environment. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | 8 | Changelog: 9 | 2.5.22 JT Formatted docstrings 10 | """ 11 | from robosuite.wrappers import Wrapper 12 | 13 | 14 | class VisualizationWrapper(Wrapper): 15 | """Wraps a robosuite environment with a visualization functionality. 16 | 17 | Args: 18 | env: Robosuite environment to wrap. 19 | """ 20 | 21 | def __init__(self, env): # noqa: D107 22 | super(VisualizationWrapper, self).__init__(env) 23 | 24 | def step(self, action): 25 | """Step the environment and render the visualization.""" 26 | observation, reward, done, info = self.env.step(action) 27 | self.unwrapped.render() 28 | return observation, reward, done, info 29 | -------------------------------------------------------------------------------- /icra_2024_run_experiments_in_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ./build_docker_train.sh user 3 | ./run_docker_train.sh user "human_robot_gym/training/icra_2024_run_experiments.sh" 4 | -------------------------------------------------------------------------------- /notes_docker.txt: -------------------------------------------------------------------------------- 1 | python -m pytest works but pytest does not. 2 | Still need to figure out how to activate conda env in Dockerfile. -------------------------------------------------------------------------------- /remove_all_build_files.sh: -------------------------------------------------------------------------------- 1 | find . -name __pycache__ -exec rm -rf {} \; 2 | find . -name build -exec rm -rf {} \; 3 | find . -name dist -exec rm -rf {} \; 4 | find . -name *.egg-info -exec rm -rf {} \; 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | bvh==0.3 2 | gym==0.21 3 | meshcat==0.3.2 4 | mujoco_py==2.1.2.14 5 | numpy<=1.23 6 | opencv_python==4.5.5.64 7 | pin==2.6.14 8 | robosuite==1.3.2 9 | scipy==1.8.0 10 | stable_baselines3==1.5.0 11 | numba>=0.52.0,<=0.53.1 12 | mujoco-py<2.2,>=2.1 13 | wandb>=0.12 14 | flake8>=4.0 15 | black>=22.3 16 | patchelf>=0.14 17 | h5py>=3.6 18 | hpp-fcl==2.2.0 19 | tensorboard>=2.8 20 | pydocstyle>=6.1 21 | mypy>=0.950 22 | envpool==0.6.1 23 | pybullet>=3.2.1 24 | sphinx_rtd_theme 25 | sphinx-autoapi 26 | graphviz -------------------------------------------------------------------------------- /results_ICRA2024/README.md: -------------------------------------------------------------------------------- 1 | # Results ICRA 2024 2 | 3 | Here are the steps to recreate the results from our ICRA 2024 paper. 4 | 5 | ## 1. Training -> Save to tensorboard files 6 | To reproduce the results described in the [human-robot-gym paper](https://arxiv.org/pdf/2310.06208.pdf), execute 7 | 8 | ``` 9 | ./icra_2024_run_experiments_in_docker.sh 10 | ``` 11 | 12 | This will 13 | - Create a docker image, 14 | - Run a container, and 15 | - Run all experiments within the container. 16 | 17 | ## 2. Convert tensorboard files to csv files 18 | This uses the human-robot-gym script `python human_robot_gym/utils/data_pipeline.py`. 19 | 20 | The pipeline consists of the following steps: 21 | 1. Scrape tensorboard data from the runs folder 22 | 2. Average all datapoints in a given time interval (rastering) 23 | 3. Determine statistics (running mean, std and bootstrapped 95% confidence intervals) 24 | 25 | After every step, the data is saved to .csv files. The output folder contains the following subfolders: 26 | - raw: Contains the raw data from tensorboard 27 | - rastered: Contains the rastered data 28 | - stats: Contains the statistics 29 | 30 | Example usage: 31 | 32 | ``` 33 | python human_robot_gym/utils/data_pipeline.py ... -y \ 34 | -i -o -t ... \ 35 | -n -g -w -b 36 | ``` 37 | 38 | The arguments are: 39 | - ... : The run ids of the experiments. The run id is the name of the folder containing the 40 | tensorboard files. 41 | - -y (optional): If specified, the script will not ask for confirmation before overwriting existing files. 42 | - -i : Folder in which the tensorboard log files are located. 43 | May also be a remote folder in which case the files will be first copied to a local folder. 44 | - -o : Path to output folder. The csv files will be saved in this folder. 45 | - -t ... : List of metrics to include in the csv file. 46 | If not specified, the following tags will be included: 47 | - "rollout/ep_env_rew_mean": episode return 48 | - "rollout/ep_len_mean": episode length 49 | - "rollout/n_goal_reached": success rate 50 | - -n : relevant for rastering, step in training at which to stop rastering. If not specified, the last step 51 | of the training will be used. 52 | - -g : relevant for rastering, granularity of rastering. Size of the step interval in which 53 | the data is averaged. 54 | - -w : relevant for statistics extraction, window size for moving average filter. 55 | - -b : relevant for statistics extraction, number of bootstrap samples. 56 | 57 | ## 3. Generate the plotting tex files 58 | Run `python generate_files.py` here. 59 | This will generate the plotting files in `output/*.tex`. 60 | You can then use `pdflatex` to run these files. 61 | 62 | ## 4. Run the plotting files 63 | We provide scripts to compile all plotting files using `run_reward_plots_train.sh`, `run_success_plots_train.sh`, and `run_ablation_study.sh`. -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,success_mean,success_std,success_025,success_975,label 2 | 341.597375695355,155.2106869678659,311.16650948608327,372.2841309160973,419.43,142.83292722618268,390.59,446.4955095086776,0.66,0.4737087712930805,0.55,0.74,AIR 3 | 312.55267208414153,170.92010848417135,278.6432698434189,345.87282277202536,394.87,160.10981575156472,362.6149355932115,425.6676463631119,0.6,0.4898979485566356,0.49,0.68,SIR 4 | 323.5527711795253,172.25136493971493,290.1896218608686,357.5523739897674,404.33,165.0933102823976,371.6911922648757,435.41039630857546,0.56,0.4963869458396343,0.45,0.65,SAC+RSI 5 | 368.7423522492466,139.37961721102508,340.98624135667905,395.1368981134388,440.22,134.15107752083097,413.75,466.33,0.81,0.3923009049186605,0.71,0.87,SAC 6 | 285.2776813030243,180.99426809943688,213.3500910834612,372.0080010562591,371.05,171.2350650421811,300.43992396493053,451.40892693879584,0.45,0.49749371855331,0.2,0.6,Expert 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_split_short_long_comp/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,success_mean,success_std,success_025,success_975,label,color 2 | 339.0041452426114,164.21610964369128,306.99666928878304,370.9274960933169,415.11,153.64965961563337,385.3075631171119,444.94,0.69,0.462493243193887,0.58,0.77,SAC after $1 \cdot 10^6$; Training Set,PLOTOlive 3 | 350.9447079522826,71.21568910662752,335.6634822954912,363.8293799885933,436.48,57.117682025796526,422.9276341673468,445.81,0.65,0.4769696007084727,0.54,0.73,SAC after $1 \cdot 10^6$; Test Set,PLOTBluishGreen 4 | 341.597375695355,155.2106869678659,311.16650948608327,372.2841309160973,419.43,142.83292722618268,390.59,446.4955095086776,0.66,0.4737087712930805,0.55,0.74,SAC after $3 \cdot 10^6$; Training Set,PLOTOlive 5 | 310.27411038397463,116.92239934390368,285.0514120367688,330.7725617335603,397.75,101.64471211037,374.42,414.54,0.57,0.4950757517794625,0.46,0.66,SAC after $3 \cdot 10^6$ Steps; Test Set,PLOTBluishGreen 6 | 285.2776813030243,180.99426809943688,213.3500910834612,372.0080010562591,371.05,171.2350650421811,300.43992396493053,451.40892693879584,0.45,0.49749371855331,0.2,0.6,Expert; Training Set,PLOTGray 7 | 407.6003809005022,5.58536671317089,405.29247416535793,410.104610820824,466.7,22.884711053452257,455.2,473.6,1.0,0.0,1.0,1.0,Expert; Test Set,PLOTBlack 8 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ablation_summary.csv: -------------------------------------------------------------------------------- 1 | metric,trainset_mean,trainset_025,trainset_975,testset_mean,testset_025,testset_975 2 | Normalized reward,1.1146865142379003,0.9767993456759645,1.2405064655746725,0.9785264096865869,0.8460897976265026,1.1132443350512513 3 | Success rate,0.728,0.624,0.8,0.49800000000000005,0.3931787070332903,0.5820000000000001 4 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ep_len_conf.csv: -------------------------------------------------------------------------------- 1 | Group,trainset,testset,expert_train,expert_test 2 | Split 0,415.11,436.48,371.05,466.7 3 | Split 1,407.29,316.07,431.7,421.85 4 | Split 2,405.31,325.58,432.1,415.2 5 | Split 3,401.22,549.02,358.25,559.65 6 | Split 4,447.28,356.78,419.7,429.8 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ep_len_mean.csv: -------------------------------------------------------------------------------- 1 | Group,trainset,testset,expert_train,expert_test 2 | Split 0,415.11,436.48,371.05,466.7 3 | Split 1,407.29,316.07,431.7,421.85 4 | Split 2,405.31,325.58,432.1,415.2 5 | Split 3,401.22,549.02,358.25,559.65 6 | Split 4,447.28,356.78,419.7,429.8 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ep_len_total.csv: -------------------------------------------------------------------------------- 1 | Group,trainset_mean,trainset_025,trainset_975,testset_mean,testset_025,testset_975,expert_train_mean,expert_train_025,expert_train_975,expert_test_mean,expert_test_025,expert_test_975 2 | Split 0,415.11,385.3075631171119,444.94,436.48,422.9276341673468,445.81,371.05,300.43992396493053,451.40892693879584,466.7,455.2,473.6 3 | Split 1,407.29,386.9503413497701,427.5919713811068,316.07,263.9755400084397,365.5994342043669,431.7,369.4880150058001,465.15,421.85,337.0,506.65 4 | Split 2,405.31,371.3828697623331,437.7783404320745,325.58,306.686587753664,345.3483154053863,432.1,363.9527104728231,498.3982995979908,415.2,374.9941616942531,449.507229807572 5 | Split 3,401.22,373.5572952691332,423.7363326133023,549.02,528.7005011515745,567.1741016907605,358.25,302.15,414.4377228276349,559.65,520.9,597.45 6 | Split 4,447.28,420.50330018230767,472.03,356.78,321.94014673109933,390.90699037355785,419.7,360.2,482.2,429.8,343.6,498.75 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ep_rew_conf.csv: -------------------------------------------------------------------------------- 1 | Group,trainset,testset,expert_train,expert_test 2 | Split 0,339.0041452426114,350.9447079522826,285.2776813030243,407.6003809005022 3 | Split 1,328.93993081052264,229.14798150479797,350.0617379412055,339.2916570365429 4 | Split 2,334.4440619707108,241.18765848290613,342.4234110221267,315.77458951547743 5 | Split 3,326.1461188212037,464.542229834944,266.09192994236946,472.1960994973779 6 | Split 4,378.3875152828998,273.6724581548572,333.9652131229639,335.893616835773 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ep_rew_mean.csv: -------------------------------------------------------------------------------- 1 | Group,trainset,testset,expert_train,expert_test 2 | Split 0,339.0041452426114,350.9447079522826,285.2776813030243,407.6003809005022 3 | Split 1,328.93993081052264,229.14798150479797,350.0617379412055,339.2916570365429 4 | Split 2,334.4440619707108,241.18765848290613,342.4234110221267,315.77458951547743 5 | Split 3,326.1461188212037,464.542229834944,266.09192994236946,472.1960994973779 6 | Split 4,378.3875152828998,273.6724581548572,333.9652131229639,335.893616835773 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ep_rew_total.csv: -------------------------------------------------------------------------------- 1 | Group,trainset_mean,trainset_025,trainset_975,testset_mean,testset_025,testset_975,expert_train_mean,expert_train_025,expert_train_975,expert_test_mean,expert_test_025,expert_test_975 2 | Split 0,339.0041452426114,306.99666928878304,370.9274960933169,350.9447079522826,335.6634822954912,363.8293799885933,285.2776813030243,213.3500910834612,372.0080010562591,407.6003809005022,405.29247416535793,410.104610820824 3 | Split 1,328.93993081052264,306.6723245381633,349.5099712377639,229.14798150479797,179.66699438861406,283.991105475145,350.0617379412055,289.7925713789823,386.8049793109303,339.2916570365429,238.73948151260896,441.0280205282267 4 | Split 2,334.4440619707108,298.3064958068641,368.97696286848617,241.18765848290613,222.9935225735188,260.11677829179143,342.4234110221267,272.5960717367543,415.72641191280405,315.77458951547743,262.33642778391527,361.1904468426428 5 | Split 3,326.1461188212037,297.7317329370644,349.3977162319933,464.542229834944,439.53413489312,487.53867155965423,266.09192994236946,205.5485136601262,322.92856110974486,472.1960994973779,408.1769438547782,530.0621292562882 6 | Split 4,378.3875152828998,347.9178426275423,404.3406078575484,273.6724581548572,238.2417602028852,309.88432383360504,333.9652131229639,268.81670544557045,408.6257430710597,335.893616835773,260.8926976263589,398.3181336791223 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/ep_rew_total_avg.csv: -------------------------------------------------------------------------------- 1 | trainset_mean,trainset_025,trainset_975,testset_mean,testset_025,testset_975,expert_train_mean,expert_train_025,expert_train_975,expert_test_mean,expert_test_025,expert_test_975 2 | 341.38435442558966,311.5250130396834,368.63055085782173,311.8990071859576,283.21997887072587,341.0720518297578,315.5639946663379,250.0207906609789,381.2187392921596,374.1512687571347,315.08760498860386,428.1406682254208 3 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/success_conf.csv: -------------------------------------------------------------------------------- 1 | Group,trainset,testset,expert_train,expert_test 2 | Split 0,0.69,0.65,0.45,1.0 3 | Split 1,0.66,0.24,0.85,0.45 4 | Split 2,0.7,0.43,0.6,0.65 5 | Split 3,0.76,0.63,0.55,0.7 6 | Split 4,0.83,0.54,0.6,0.55 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/success_mean.csv: -------------------------------------------------------------------------------- 1 | Group,trainset,testset,expert_train,expert_test 2 | Split 0,0.69,0.65,0.45,1.0 3 | Split 1,0.66,0.24,0.85,0.45 4 | Split 2,0.7,0.43,0.6,0.65 5 | Split 3,0.76,0.63,0.55,0.7 6 | Split 4,0.83,0.54,0.6,0.55 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/success_total.csv: -------------------------------------------------------------------------------- 1 | Group,trainset_mean,trainset_025,trainset_975,testset_mean,testset_025,testset_975,expert_train_mean,expert_train_025,expert_train_975,expert_test_mean,expert_test_025,expert_test_975 2 | Split 0,0.69,0.58,0.77,0.65,0.54,0.73,0.45,0.2,0.6,1.0,1.0,1.0 3 | Split 1,0.66,0.55,0.74,0.24,0.15,0.32,0.85,0.55,0.95,0.45,0.2,0.6 4 | Split 2,0.7,0.59,0.77,0.43,0.32,0.52,0.6,0.3,0.75,0.65,0.35,0.8 5 | Split 3,0.76,0.66,0.83,0.63,0.5258935351664513,0.71,0.55,0.25,0.7,0.7,0.4,0.85 6 | Split 4,0.83,0.74,0.89,0.54,0.43,0.63,0.6,0.3,0.75,0.55,0.3,0.7 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cl_splits/success_total_avg.csv: -------------------------------------------------------------------------------- 1 | trainset_mean,trainset_025,trainset_975,testset_mean,testset_025,testset_975,expert_train_mean,expert_train_025,expert_train_975,expert_test_mean,expert_test_025,expert_test_975 2 | 0.728,0.624,0.8,0.49800000000000005,0.3931787070332903,0.5820000000000001,0.6100000000000001,0.32,0.75,0.6699999999999999,0.4499999999999999,0.79 3 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/cs/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,success_mean,success_std,success_025,success_975,label 2 | -3000.0,0.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,0.0,0.0,0.0,0.0,AIR 3 | -3000.0,0.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,0.0,0.0,0.0,0.0,SIR 4 | -3000.0,0.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,0.0,0.0,0.0,0.0,SAC+RSI 5 | -3000.0,0.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,0.0,0.0,0.0,0.0,SAC 6 | -475.1375,806.4464352756171,-1016.610426452473,-211.89319751651635,958.55,1152.1362538779863,533.8,1565.6541202111591,0.4,0.4898979485566356,0.15,0.6,Expert 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/pp/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,success_mean,success_std,success_025,success_975,label 2 | -77.0425,32.54376005857343,-86.95159847450898,-72.535,149.71,92.41290981242824,138.76,188.06216639662708,0.99,0.0994987437106619,0.94,1.0,AIR 3 | -203.8525,157.54889358148475,-241.0849704973571,-177.53828158737934,533.42,389.7078182433604,460.2338055992016,614.3872404728451,0.59,0.4918333050943174,0.48,0.6769706222330569,SIR 4 | -204.41,175.8381518613068,-247.7180918274828,-175.49382116764116,537.08,410.13353142604666,455.89439519128535,619.4131043389206,0.57,0.4950757517794625,0.46,0.65,SAC+RSI 5 | -1000.0,0.0,-1000.0,-1000.0,1000.0,0.0,1000,1000,0.0,0.0,0.0,0.0,SAC 6 | -265.1375,334.8389302616259,-448.3371273418204,-149.7,313.6,316.707814870426,205.1549243296084,488.0336165292989,0.85,0.3570714214271425,0.55,0.95,Expert 7 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/rhh/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,success_mean,success_std,success_025,success_975,label 2 | -483.26,337.2264612245012,-552.6686855543185,-418.8557464166946,897.44,251.69768850746323,839.6133740812843,939.5792141375066,0.16,0.3666060555964672,0.09,0.23,AIR+Res. 3 | -459.7825,369.5687464041704,-535.1077215200276,-390.8389662489942,783.23,346.0580256257611,711.6661270791928,845.1053490213999,0.3,0.4582575694955839,0.21,0.39,AIR 4 | -991.62,83.37994722953475,-1000.0,-974.86,994.15,58.206765070737255,959.0594659491306,1000.0,0.01,0.0994987437106619,0.0,0.03,SIR 5 | -999.85,1.176329035601859,-1000.0,-999.4375,1000.0,0.0,1000.0,1000.0,0.0,0.0,0.0,0.0,SAC+RSI 6 | -876.1075,273.61668729949565,-921.5575,-815.203644908729,991.34,86.1659120534333,948.04,1000.0,0.01,0.0994987437106619,0.0,0.03,SAC 7 | -482.0,405.2180045975253,-664.1194336415008,-309.60046817568417,652.5,390.630579960146,473.6276613887875,810.05,0.45,0.49749371855331,0.2,0.6,Expert 8 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/rhh_rsi_res/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,success_mean,success_std,success_025,success_975,label 2 | -406.5075,374.0032694626479,-485.4910356488149,-338.3800612196883,701.48,365.984548307712,629.2111767100124,770.6611449852959,0.44,0.4963869458396343,0.34,0.53,AIR+Res. no RSI 3 | -483.26,337.2264612245012,-552.6686855543185,-418.8557464166946,897.44,251.69768850746323,839.6133740812843,939.5792141375066,0.16,0.3666060555964672,0.09,0.23,AIR+Res. 4 | -459.7825,369.5687464041704,-535.1077215200276,-390.8389662489942,783.23,346.0580256257611,711.6661270791928,845.1053490213999,0.3,0.4582575694955839,0.21,0.39,AIR 5 | -482.0,405.2180045975253,-664.1194336415008,-309.60046817568417,652.5,390.630579960146,473.6276613887875,810.05,0.45,0.49749371855331,0.2,0.6,Expert 6 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval/rhh_rsi_res_long/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,success_mean,success_std,success_025,success_975,label 2 | -461.0535,384.45458316002686,-496.2337983355227,-427.28858064381103,757.086,353.4147515370573,724.9240683973965,786.9359776666859,0.368,0.4822613399392491,0.324,0.408,AIR+Res. no RSI 3 | -464.119,333.4725735933916,-493.05209419638817,-435.5559994915992,889.612,262.9006684206033,864.6631003376499,911.4179298882912,0.164,0.3702755730533679,0.132,0.196,AIR+Res. 4 | -491.9665,381.28941829370245,-524.6209486596861,-458.071243626585,802.384,335.97765482841265,771.9415161102248,830.0906454656509,0.272,0.4449898875255481,0.232,0.312,AIR 5 | -481.26,411.4396005490964,-564.8989348460843,-402.6797645422186,659.17,395.6133479800701,580.6275627382886,736.8625620614712,0.43,0.4950757517794625,0.33,0.52,Expert 6 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval_train/cs/sac/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_rew_median,ep_rew_lower_quartile,ep_rew_upper_quartile,ep_rew_lower_whisker,ep_rew_upper_whisker,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,ep_len_median,ep_len_lower_quartile,ep_len_upper_quartile,ep_len_lower_whisker,ep_len_upper_whisker,success_mean,success_std,success_025,success_975,success_median,success_lower_quartile,success_upper_quartile,success_lower_whisker,success_upper_whisker,step 2 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0 3 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,200000 4 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,400000 5 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,600000 6 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,800000 7 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1000000 8 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1200000 9 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1400000 10 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1600000 11 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1800000 12 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2000000 13 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2200000 14 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2400000 15 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2600000 16 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2800000 17 | -3000.0,0.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,-3000.0,3000.0,0.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,3000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,3000000 18 | -------------------------------------------------------------------------------- /results_ICRA2024/data/eval_train/pp/sac/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_rew_median,ep_rew_lower_quartile,ep_rew_upper_quartile,ep_rew_lower_whisker,ep_rew_upper_whisker,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,ep_len_median,ep_len_lower_quartile,ep_len_upper_quartile,ep_len_lower_whisker,ep_len_upper_whisker,success_mean,success_std,success_025,success_975,success_median,success_lower_quartile,success_upper_quartile,success_lower_whisker,success_upper_whisker,step 2 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0 3 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,200000 4 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,400000 5 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,600000 6 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,800000 7 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1000000 8 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1200000 9 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1400000 10 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1600000 11 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1800000 12 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2000000 13 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2200000 14 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2400000 15 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2600000 16 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2800000 17 | -1000.0,0.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,-1000.0,1000.0,0.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,1000.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,3000000 18 | -------------------------------------------------------------------------------- /results_ICRA2024/data/train/cl/expert/stats.csv: -------------------------------------------------------------------------------- 1 | success_mean,success_025,success_975,ep_len_mean,ep_len_025,ep_len_975,ep_rew_mean,ep_rew_025,ep_rew_975 2 | 0.5699999999999999,0.46,0.65,410.27,378.1954566361617,444.02,316.5490937069618,281.31005195768574,353.00448443742545 3 | -------------------------------------------------------------------------------- /results_ICRA2024/data/train/cs/expert/stats.csv: -------------------------------------------------------------------------------- 1 | success_mean,success_025,success_975,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,max_stack_height_mean,max_stack_height_std,max_stack_height_025,max_stack_height_975 2 | 0.3,0.26,0.34,1071.62,1197.4556908712739,970.2063829795558,1178.4044197487362,-444.64,792.0476366040114,-519.6345492958884,-379.8548510617162,2.38,0.8170679286326198,2.306,2.446 3 | -------------------------------------------------------------------------------- /results_ICRA2024/data/train/hrh/expert/stats.csv: -------------------------------------------------------------------------------- 1 | success_mean,success_025,success_975,ep_len_mean,ep_len_025,ep_len_975,ep_rew_mean,ep_rew_025,ep_rew_975 2 | 0.664,0.62,0.704,540.746,511.7303022316921,571.6354677611964,-438.593,-473.09069555824817,-407.6482720423289 3 | -------------------------------------------------------------------------------- /results_ICRA2024/data/train/pp/expert/stats.csv: -------------------------------------------------------------------------------- 1 | success_mean,success_025,success_975,ep_len_mean,ep_len_025,ep_len_975,ep_rew_mean,ep_rew_025,ep_rew_975 2 | 0.9439999999999999,0.918,0.96,230.984,213.1797963492099,251.37563033181996,-171.988,-193.95342403159768,-153.2078718170566 3 | -------------------------------------------------------------------------------- /results_ICRA2024/data/train/reach/expert/stats.csv: -------------------------------------------------------------------------------- 1 | ep_rew_mean,ep_rew_std,ep_rew_025,ep_rew_975,ep_rew_median,ep_rew_lower_quartile,ep_rew_upper_quartile,ep_rew_lower_whisker,ep_rew_upper_whisker,ep_len_mean,ep_len_std,ep_len_025,ep_len_975,ep_len_median,ep_len_lower_quartile,ep_len_upper_quartile,ep_len_lower_whisker,ep_len_upper_whisker,success_mean,success_std,success_025,success_975,success_median,success_lower_quartile,success_upper_quartile,success_lower_whisker,success_upper_whisker,failsafe_interventions_mean,failsafe_interventions_std,failsafe_interventions_025,failsafe_interventions_975,failsafe_interventions_median,failsafe_interventions_lower_quartile,failsafe_interventions_upper_quartile,failsafe_interventions_lower_whisker,failsafe_interventions_upper_whisker,n_collisions_mean,n_collisions_std,n_collisions_025,n_collisions_975,n_collisions_median,n_collisions_lower_quartile,n_collisions_upper_quartile,n_collisions_lower_whisker,n_collisions_upper_whisker,n_collisions_human_mean,n_collisions_human_std,n_collisions_human_025,n_collisions_human_975,n_collisions_human_median,n_collisions_human_lower_quartile,n_collisions_human_upper_quartile,n_collisions_human_lower_whisker,n_collisions_human_upper_whisker,n_collisions_critical_mean,n_collisions_critical_std,n_collisions_critical_025,n_collisions_critical_975,n_collisions_critical_median,n_collisions_critical_lower_quartile,n_collisions_critical_upper_quartile,n_collisions_critical_lower_whisker,n_collisions_critical_upper_whisker,n_collisions_static_mean,n_collisions_static_std,n_collisions_static_025,n_collisions_static_975,n_collisions_static_median,n_collisions_static_lower_quartile,n_collisions_static_upper_quartile,n_collisions_static_lower_whisker,n_collisions_static_upper_whisker,n_collisions_robot_mean,n_collisions_robot_std,n_collisions_robot_025,n_collisions_robot_975,n_collisions_robot_median,n_collisions_robot_lower_quartile,n_collisions_robot_upper_quartile,n_collisions_robot_lower_whisker,n_collisions_robot_upper_whisker 2 | -6.358770908322185,3.0371599493293386,-7.758327041168735,-5.158573900930511,-5.922929216176271,-7.830750580411404,-4.392460269853473,-11.456574274227023,-0.9423210751265289,56.35,10.178796330452933,52.1,60.65,56.5,49.5,61.75,36,78,1.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.85,4.283075628809894,0.5,4.35,0.0,0.0,0.0,2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 3 | -------------------------------------------------------------------------------- /results_ICRA2024/data/train/rhh/expert/stats.csv: -------------------------------------------------------------------------------- 1 | success_mean,success_025,success_975,ep_len_mean,ep_len_025,ep_len_975,ep_rew_mean,ep_rew_025,ep_rew_975 2 | 0.63,0.5909134304924014,0.676,494.274,460.2281592195383,529.537972556155,-285.69,-318.2773909220958,-256.11457008800636 3 | -------------------------------------------------------------------------------- /results_ICRA2024/output/ablation_study.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/ablation_study.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/ablation_study.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = ablation_study 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.05cm -0.08cm -0.00cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotAblation{cl} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/legend1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/legend1.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/legend1.tex: -------------------------------------------------------------------------------- 1 | \documentclass[border=0cm]{standalone} 2 | \usepackage{tikz} 3 | \usepackage{pgfplots} 4 | \usepackage{pgfplotstable} 5 | \usepgfplotslibrary{fillbetween} 6 | \usepackage[nohyperlinks, nolist]{acronym} 7 | \definecolorset{RGB}{RPTH}{}{% 8 | Black, 0, 0, 0;% 9 | Blue, 0, 45, 106;% 10 | Red, 227, 27, 35;% 11 | MediumBlue, 0, 92, 171;% 12 | LightBlue, 220, 238, 243;% 13 | Yellow, 255, 195, 37;% 14 | Gray, 80, 91, 88;% 15 | Cyan, 27, 220, 238% 16 | } 17 | \RequirePackage{times} 18 | \newcommand\FramedBox[3]{% 19 | \setlength\fboxsep{0pt} 20 | \setlength{\fboxrule}{0.5pt} 21 | \fbox{\parbox[t][#1][c]{#2}{\centering #3}}} 22 | \begin{document} 23 | \hspace{-3.5mm} 24 | \setlength{\tabcolsep}{5pt} 25 | \FramedBox{6mm}{176mm}{ 26 | \begin{small} 27 | \begin{tabular}{l l l l l l l l l l} 28 | \raisebox{0.45mm}{\colorbox{RPTHGray}{\textcolor{RPTHGray}{--}}\hspace{1.3mm}\colorbox{RPTHGray}{\textcolor{RPTHGray}{--}}} & \footnotesize{Expert} 29 | & \raisebox{0.45mm}{\colorbox{RPTHCyan}{\textcolor{RPTHCyan}{---}}} & \footnotesize{SAC} 30 | & \raisebox{0.45mm}{\colorbox{RPTHYellow}{\textcolor{RPTHYellow}{---}}} & \footnotesize{Reference state initialization} 31 | & \raisebox{0.45mm}{\colorbox{RPTHRed}{\textcolor{RPTHRed}{---}}} & \footnotesize{State-based imitation reward} 32 | & \raisebox{0.45mm}{\colorbox{RPTHBlue}{\textcolor{RPTHBlue}{---}}} & \footnotesize{Action-based imitation reward} 33 | \end{tabular} 34 | \end{small} 35 | } 36 | \end{document} -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_cl_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/reward_cl_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_cl_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = reward_cl_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotNormalizedReward{cl}{False}{100}{316.5490937069618}{281.31005195768574}{353.00448443742545} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_cs_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/reward_cs_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_cs_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = reward_cs_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotNormalizedReward{cs}{False}{-3000}{-444.64}{-519.6345492958884}{-379.8548510617162} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_hrh_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/reward_hrh_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_hrh_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = reward_hrh_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotNormalizedReward{hrh}{False}{-1000}{-438.593}{-473.09069555824817}{-407.6482720423289} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_pp_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/reward_pp_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_pp_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = reward_pp_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotNormalizedReward{pp}{False}{-1000}{-171.988}{-193.95342403159768}{-153.2078718170566} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_reach_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/reward_reach_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_reach_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = reward_reach_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.05cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotNormalizedRewardShort{reach}{True}{-50}{-6.358770908322185}{-7.758327041168735}{-5.158573900930511} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_rhh_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/reward_rhh_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/reward_rhh_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = reward_rhh_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotNormalizedReward{rhh}{False}{-1000}{-285.69}{-318.2773909220958}{-256.11457008800636} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/success_cl_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/success_cl_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/success_cl_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = succes_cl_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotSuccess{cl}{False}{0.57}{0.46}{0.65} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/success_cs_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/success_cs_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/success_cs_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = succes_cs_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotSuccess{cs}{False}{0.3}{0.26}{0.34} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/success_hrh_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/success_hrh_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/success_hrh_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = succes_hrh_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotSuccess{hrh}{False}{0.664}{0.62}{0.704} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/success_pp_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/success_pp_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/success_pp_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = succes_pp_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotSuccess{pp}{False}{0.944}{0.918}{0.96} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/success_reach_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/success_reach_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/success_reach_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = succes_reach_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.05cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotSuccessShort{reach}{True}{1.0}{1.0}{1.0} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/output/success_rhh_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TUMcps/human-robot-gym/0b7805a85e3788b0ce534361751bb344fa4a55f8/results_ICRA2024/output/success_rhh_train.pdf -------------------------------------------------------------------------------- /results_ICRA2024/output/success_rhh_train.tex: -------------------------------------------------------------------------------- 1 | % !TeX root = succes_rhh_train 2 | % border={ } left and right should be same. 3 | \documentclass[border={-0.12cm -0.11cm -0.07cm -0.00cm},tikz]{standalone} 4 | \input{../plotstyle} 5 | 6 | \begin{document} 7 | \plotSuccess{rhh}{False}{0.63}{0.5909134304924014}{0.676} 8 | \end{document} 9 | -------------------------------------------------------------------------------- /results_ICRA2024/run_ablation_study.sh: -------------------------------------------------------------------------------- 1 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode ablation_study.tex) 2 | -------------------------------------------------------------------------------- /results_ICRA2024/run_reward_plots_train.sh: -------------------------------------------------------------------------------- 1 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'legend1'.tex) 2 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'reward_reach_train'.tex) 3 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'reward_pp_train'.tex) 4 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'reward_cl_train'.tex) 5 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'reward_rhh_train'.tex) 6 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'reward_hrh_train'.tex) 7 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'reward_cs_train'.tex) 8 | -------------------------------------------------------------------------------- /results_ICRA2024/run_success_plots_train.sh: -------------------------------------------------------------------------------- 1 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'success_reach_train'.tex) 2 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'success_pp_train'.tex) 3 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'success_cl_train'.tex) 4 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'success_rhh_train'.tex) 5 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'success_hrh_train'.tex) 6 | (cd output && pdflatex -synctex=1 -interaction=nonstopmode 'success_cs_train'.tex) 7 | -------------------------------------------------------------------------------- /run_docker_build.sh: -------------------------------------------------------------------------------- 1 | # Currently, pip install requirements inside the docker only works if you are 2 | # running the docker container as root. 3 | # Consider further tuning of the Docker build process to allow non-root users 4 | # with: --user=1000 \ 5 | # --volume="$(pwd):/home/$USER/" \ 6 | # --volume="/home/$USER/.mujoco/:/home/$USER/.mujoco/" \ 7 | docker run -it \ 8 | --net=host \ 9 | --privileged \ 10 | --volume="$(pwd):/home/human-robot-gym/" \ 11 | --volume="/home/$USER/.mujoco/:/home/.mujoco/" \ 12 | human-robot-gym-build/$USER:v1 \ 13 | bash 14 | -------------------------------------------------------------------------------- /run_docker_train.sh: -------------------------------------------------------------------------------- 1 | # Run this file either with `./run_docker_train_v2.sh user` or `./run_docker_train_v2.sh root`. 2 | # User mode ensures that the files you create are not made by root. 3 | # Root mode creates a "classic" root user in docker. 4 | # The /runs, /models, and /wandb folders are mounted 5 | # to store training results outside the docker. 6 | 7 | user=${1:-user} 8 | bash_command="/bin/bash" 9 | 10 | docker_command="docker run -it" 11 | 12 | if [ $2 ] 13 | then 14 | bash_command="/bin/bash -c ${2}" 15 | docker_command="docker run" 16 | fi 17 | 18 | command="cd human-robot-gym; conda run --no-capture-output -n hrgym pip install -e .; conda run --no-capture-output -n hrgym ${bash_command}" 19 | 20 | echo "Chosen mode: $user" 21 | if [ "$user" = "root" ] 22 | then 23 | ${docker_command} \ 24 | --net=host \ 25 | --volume="$(pwd)/:/root/human-robot-gym/" \ 26 | --shm-size=10.24gb \ 27 | human-robot-gym-train/root:v2 "${command}" 28 | elif [ "$user" = "user" ] 29 | then 30 | ${docker_command} \ 31 | --net=host \ 32 | --volume="$(pwd)/:/home/$USER/human-robot-gym/" \ 33 | --shm-size=10.24gb \ 34 | human-robot-gym-train/$USER:v2 "${command}" 35 | else 36 | echo "User mode unknown. Please choose user, root, or leave out for default user" 37 | fi 38 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = human_robot_gym 3 | author = Jakob Thumm 4 | author_email = jakob.thumm@tum.de 5 | version = 0.1.0 6 | description="human-robot-gym is an extension to the robosuite package\ 7 | to train RL algorithms on robots in human environments." 8 | url = TODO 9 | 10 | [options] 11 | install_requires = 12 | bvh 13 | mujoco_py==2.1.2.14 14 | opencv_python 15 | robosuite==1.3.2 16 | stable_baselines3==1.5.0 17 | pin 18 | pybullet 19 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """This file defines the installation procedure of the human-robot-gym. 2 | 3 | Owner: 4 | Jakob Thumm (JT) 5 | 6 | Contributors: 7 | 8 | Changelog: 9 | 2.5.22 JT Formatted docstrings 10 | 11.5.22 JT Moved install requirements to setup.cfg 11 | 11.5.22 JT Removed long description 12 | """ 13 | from setuptools import find_packages, setup 14 | 15 | if __name__ == "__main__": 16 | setup( 17 | packages=[package for package in find_packages() if package.startswith("human_robot_gym")], 18 | python_requires=">=3", 19 | eager_resources=["*"], 20 | include_package_data=True 21 | ) 22 | --------------------------------------------------------------------------------