├── .gitignore ├── LICENSE ├── docs ├── .gitignore └── img │ ├── SPARK_logo.png │ ├── g1_benchmark.gif │ ├── g1_benchmark_sim.gif │ ├── g1_fixed_base.gif │ ├── g1_mobile_base.gif │ ├── g1_right_arm.gif │ ├── g1_right_arm_safe_real.gif │ ├── g1_right_arm_safe_sim.gif │ ├── g1_safe_teleop.gif │ ├── g1_safe_teleop_sim.gif │ ├── g1_sport_mode.gif │ ├── spark_sys_framework.png │ └── spark_system.jpg ├── example ├── run_g1_benchmark.py ├── run_g1_safe_teleop_real.py ├── run_g1_safe_teleop_sim.py ├── run_g1_unsafe_teleop_sim.py └── run_g1_visulaize_safe_area.py ├── install.sh ├── module ├── spark_agent │ ├── setup.py │ └── spark_agent │ │ ├── __init__.py │ │ ├── base │ │ ├── base_agent.py │ │ └── base_agent_config.py │ │ ├── real │ │ ├── __init__.py │ │ └── g1 │ │ │ ├── g1_idl_struct.py │ │ │ ├── g1_real_agent.py │ │ │ └── loco │ │ │ ├── __init__.py │ │ │ ├── g1_loco_api.py │ │ │ └── g1_loco_client.py │ │ └── simulation │ │ ├── mujoco │ │ ├── g1_mujoco_agent.py │ │ ├── motion.pt │ │ └── mujoco_agent.py │ │ └── simulation_agent.py ├── spark_policy │ ├── setup.py │ └── spark_policy │ │ ├── __init__.py │ │ ├── base │ │ └── base_policy.py │ │ ├── feedback │ │ ├── g1_benchmark_pid_policy.py │ │ └── g1_teleop_pid_policy.py │ │ └── model_free │ │ └── user_rl_policy.py ├── spark_robot │ ├── resources │ │ └── g1 │ │ │ ├── g1_29dof_fixed_base.xml │ │ │ ├── g1_29dof_mobile_base.xml │ │ │ ├── g1_29dof_right_arm.xml │ │ │ ├── g1_29dof_whole_body.xml │ │ │ ├── g1_29dof_whole_body_fixed.xml │ │ │ └── meshes │ │ │ ├── head_link.STL │ │ │ ├── left_ankle_pitch_link.STL │ │ │ ├── left_ankle_roll_link.STL │ │ │ ├── left_elbow_link.STL │ │ │ ├── left_hand_index_0_link.STL │ │ │ ├── left_hand_index_1_link.STL │ │ │ ├── left_hand_middle_0_link.STL │ │ │ ├── left_hand_middle_1_link.STL │ │ │ ├── left_hand_palm_link.STL │ │ │ ├── left_hand_thumb_0_link.STL │ │ │ ├── left_hand_thumb_1_link.STL │ │ │ ├── left_hand_thumb_2_link.STL │ │ │ ├── left_hip_pitch_link.STL │ │ │ ├── left_hip_roll_link.STL │ │ │ ├── left_hip_yaw_link.STL │ │ │ ├── left_knee_link.STL │ │ │ ├── left_rubber_hand.STL │ │ │ ├── left_shoulder_pitch_link.STL │ │ │ ├── left_shoulder_roll_link.STL │ │ │ ├── left_shoulder_yaw_link.STL │ │ │ ├── left_wrist_pitch_link.STL │ │ │ ├── left_wrist_roll_link.STL │ │ │ ├── left_wrist_roll_rubber_hand.STL │ │ │ ├── left_wrist_yaw_link.STL │ │ │ ├── logo_link.STL │ │ │ ├── pelvis.STL │ │ │ ├── pelvis_contour_link.STL │ │ │ ├── right_ankle_pitch_link.STL │ │ │ ├── right_ankle_roll_link.STL │ │ │ ├── right_elbow_link.STL │ │ │ ├── right_hand_index_0_link.STL │ │ │ ├── right_hand_index_1_link.STL │ │ │ ├── right_hand_middle_0_link.STL │ │ │ ├── right_hand_middle_1_link.STL │ │ │ ├── right_hand_palm_link.STL │ │ │ ├── right_hand_thumb_0_link.STL │ │ │ ├── right_hand_thumb_1_link.STL │ │ │ ├── right_hand_thumb_2_link.STL │ │ │ ├── right_hip_pitch_link.STL │ │ │ ├── right_hip_roll_link.STL │ │ │ ├── right_hip_yaw_link.STL │ │ │ ├── right_knee_link.STL │ │ │ ├── right_rubber_hand.STL │ │ │ ├── right_shoulder_pitch_link.STL │ │ │ ├── right_shoulder_roll_link.STL │ │ │ ├── right_shoulder_yaw_link.STL │ │ │ ├── right_wrist_pitch_link.STL │ │ │ ├── right_wrist_roll_link.STL │ │ │ ├── right_wrist_roll_rubber_hand.STL │ │ │ ├── right_wrist_yaw_link.STL │ │ │ ├── torso_constraint_L_link.STL │ │ │ ├── torso_constraint_L_rod_link.STL │ │ │ ├── torso_constraint_R_link.STL │ │ │ ├── torso_constraint_R_rod_link.STL │ │ │ ├── torso_link.STL │ │ │ ├── torso_link_rev_1_0.STL │ │ │ ├── waist_constraint_L.STL │ │ │ ├── waist_constraint_R.STL │ │ │ ├── waist_roll_link.STL │ │ │ ├── waist_roll_link_rev_1_0.STL │ │ │ ├── waist_support_link.STL │ │ │ ├── waist_yaw_link.STL │ │ │ └── waist_yaw_link_rev_1_0.STL │ ├── setup.py │ └── spark_robot │ │ ├── __init__.py │ │ ├── base │ │ ├── base_robot_config.py │ │ └── base_robot_kinematics.py │ │ └── g1 │ │ ├── config │ │ ├── g1_fixed_base_dynamic_1_config.py │ │ ├── g1_fixed_base_dynamic_2_config.py │ │ ├── g1_mobile_base_dynamic_1_config.py │ │ ├── g1_mobile_base_dynamic_2_config.py │ │ ├── g1_right_arm_dynamic_1_config.py │ │ ├── g1_right_arm_dynamic_2_config.py │ │ ├── g1_sport_mode_dynamic_1_config.py │ │ ├── g1_sport_mode_dynamic_2_config.py │ │ ├── g1_whole_body_dynamic_1_config.py │ │ └── g1_whole_body_dynamic_2_config.py │ │ ├── kinematics │ │ ├── g1_fixed_base_kinematics.py │ │ ├── g1_mobile_base_kinematics.py │ │ ├── g1_right_arm_kinematics.py │ │ └── g1_whole_body_kinematics.py │ │ └── utils │ │ └── weighted_moving_filter.py ├── spark_safe │ ├── setup.py │ └── spark_safe │ │ ├── __init__.py │ │ ├── safe_algo │ │ ├── __init__.py │ │ ├── base │ │ │ ├── base_safe_algo.py │ │ │ └── bypass_safe_algo.py │ │ └── value_based │ │ │ ├── __init__.py │ │ │ ├── base │ │ │ ├── base_safety_index.py │ │ │ ├── basic_collision_safety_index.py │ │ │ ├── collision_safety_index_1.py │ │ │ ├── collision_safety_index_1_approx.py │ │ │ ├── collision_safety_index_2.py │ │ │ ├── collision_safety_index_2_approx.py │ │ │ ├── collision_safety_index_2_nn.py │ │ │ ├── onnx_model │ │ │ │ ├── n_2.onnx │ │ │ │ └── n_2_scalar.onnx │ │ │ └── value_based_safe_algo.py │ │ │ ├── cbf │ │ │ ├── basic_control_barrier_function.py │ │ │ └── relaxed_control_barrier_function.py │ │ │ ├── pfm │ │ │ └── basic_potential_field_method.py │ │ │ ├── sma │ │ │ └── basic_sliding_mode_algorithm.py │ │ │ ├── ssa │ │ │ ├── basic_safe_set_algorithm.py │ │ │ └── relaxed_safe_set_algorithm.py │ │ │ └── sss │ │ │ ├── basic_sublevel_safe_set_algorithm.py │ │ │ └── relaxed_sublevel_safe_set_algorithm.py │ │ └── safe_controller │ │ ├── __init__.py │ │ ├── base │ │ ├── base_safe_controller.py │ │ └── base_safe_controller_config.py │ │ └── g1 │ │ └── g1_basic_safe_controller_config.py ├── spark_task │ ├── setup.py │ └── spark_task │ │ ├── __init__.py │ │ └── task │ │ ├── base │ │ ├── base_task.py │ │ └── base_task_config.py │ │ ├── g1_benchmark │ │ └── g1_benchmark_task.py │ │ └── g1_teleop_sim │ │ └── g1_teleop_sim_task.py └── spark_utils │ ├── setup.py │ └── spark_utils │ ├── __init__.py │ ├── arguments.py │ ├── geometry.py │ ├── helpers.py │ ├── logger.py │ └── math.py ├── pipeline ├── setup.py └── spark_pipeline │ ├── __init__.py │ ├── base │ ├── base_pipeline.py │ └── base_pipeline_config.py │ ├── g1_benchmark │ ├── __init__.py │ ├── evaluation.py │ ├── g1_benchmark_pipeline.py │ ├── g1_benchmark_pipeline_config.py │ └── g1_benchmark_test_case_generator.py │ ├── g1_safe_teleop │ ├── g1_safe_teleop_pipeline.py │ ├── g1_safe_teleop_real_pipeline_config.py │ ├── g1_safe_teleop_sim_pipeline_config.py │ └── g1_unsafe_teleop_sim_pipeline_config.py │ ├── plotter.py │ └── visualization.py ├── readme.md └── wrapper ├── spark_algo ├── setup.py └── spark_algo │ ├── __init__.py │ ├── spark_algo_config.py │ └── spark_algo_wrapper.py └── spark_env ├── setup.py └── spark_env ├── __init__.py ├── spark_env_config.py ├── spark_env_wrapper.py └── user_env_wrapper.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | log/ 3 | data/ 4 | **/*.egg-info/ 5 | .vscode 6 | **/MUJOCO_LOG.TXT 7 | **/*_log/ 8 | **/dev 9 | publication/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Intelligent Control Lab and Contributors 4 | (See CONTRIBUTORS.md for the full list of contributors) 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | vid/ -------------------------------------------------------------------------------- /docs/img/SPARK_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/SPARK_logo.png -------------------------------------------------------------------------------- /docs/img/g1_benchmark.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_benchmark.gif -------------------------------------------------------------------------------- /docs/img/g1_benchmark_sim.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_benchmark_sim.gif -------------------------------------------------------------------------------- /docs/img/g1_fixed_base.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_fixed_base.gif -------------------------------------------------------------------------------- /docs/img/g1_mobile_base.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_mobile_base.gif -------------------------------------------------------------------------------- /docs/img/g1_right_arm.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_right_arm.gif -------------------------------------------------------------------------------- /docs/img/g1_right_arm_safe_real.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_right_arm_safe_real.gif -------------------------------------------------------------------------------- /docs/img/g1_right_arm_safe_sim.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_right_arm_safe_sim.gif -------------------------------------------------------------------------------- /docs/img/g1_safe_teleop.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_safe_teleop.gif -------------------------------------------------------------------------------- /docs/img/g1_safe_teleop_sim.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_safe_teleop_sim.gif -------------------------------------------------------------------------------- /docs/img/g1_sport_mode.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/g1_sport_mode.gif -------------------------------------------------------------------------------- /docs/img/spark_sys_framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/spark_sys_framework.png -------------------------------------------------------------------------------- /docs/img/spark_system.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/docs/img/spark_system.jpg -------------------------------------------------------------------------------- /example/run_g1_unsafe_teleop_sim.py: -------------------------------------------------------------------------------- 1 | from spark_pipeline import G1SafeTeleopPipeline, G1UnsafeTeleopSimPipelineConfig 2 | 3 | if __name__ == "__main__": 4 | 5 | cfg = G1UnsafeTeleopSimPipelineConfig() 6 | 7 | # ------------------------- change g1 model if needed ------------------------ # 8 | 9 | # --------------------- configure safe control algorithm --------------------- # 10 | 11 | # --------------------------- other configurations --------------------------- # 12 | cfg.env.task.enable_ros = False # if ROS is needed for receiving task info 13 | cfg.env.agent.obstacle_debug["manual_movement_step_size"] = 0.1 # Tune step size for keyboard controlled obstacles 14 | 15 | # ------------------------------- run pipeline ------------------------------- # 16 | pipeline = G1SafeTeleopPipeline(cfg) 17 | pipeline.run() -------------------------------------------------------------------------------- /example/run_g1_visulaize_safe_area.py: -------------------------------------------------------------------------------- 1 | from spark_pipeline import G1BenchmarkPipeline as Pipeline 2 | from spark_pipeline import G1BenchmarkPipelineConfig as PipelineConfig 3 | 4 | def config_task_module(cfg: PipelineConfig, **kwargs): 5 | """Configure task-related settings.""" 6 | cfg.env.task.num_obstacle_task = 1 7 | cfg.env.task.max_episode_length = 2000 8 | cfg.env.task.obstacle_direction = [0.0, 1.0, 0.0] 9 | cfg.env.task.obstacle_init = [0.2, -1.0, 0.9] 10 | cfg.env.task.obstacle_velocity = 0.15 11 | cfg.env.task.goal_left_velocity = 0.0 12 | cfg.env.task.goal_right_velocity = 0.0 13 | cfg.env.task.base_goal_enable = False 14 | cfg.env.task.mode = "Velocity" 15 | 16 | return cfg 17 | 18 | def config_agent_module(cfg: PipelineConfig, **kwargs): 19 | """Configure agent-related settings.""" 20 | cfg.env.agent.enable_viewer = True 21 | cfg.env.agent.use_sim_dynamics = False 22 | 23 | return cfg 24 | 25 | def config_policy_module(cfg: PipelineConfig, **kwargs): 26 | """Configure policy-related settings.""" 27 | # Use the default policy configuration 28 | 29 | return cfg 30 | 31 | def config_safety_module(cfg: PipelineConfig, **kwargs): 32 | """Configure safety-related settings.""" 33 | # --------------------- Config Safe Control Algorithm --------------------- # 34 | safe_algo = kwargs.get("safe_algo", "bypass") # Default to 'bypass' if not provided 35 | match safe_algo: 36 | case "bypass": 37 | cfg.algo.safe_controller.safe_algo.class_name = "ByPassSafeControl" 38 | 39 | case "ssa": 40 | cfg.algo.safe_controller.safe_algo.class_name = "BasicSafeSetAlgorithm" 41 | cfg.algo.safe_controller.safe_algo.eta_ssa = 0.1 42 | cfg.algo.safe_controller.safe_algo.control_weight = [ 43 | 1.0, 1.0, 1.0, # waist 44 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # left arm 45 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # right arm 46 | 1.0, 1.0, 1.0, 47 | ] 48 | 49 | case "rssa": 50 | cfg.algo.safe_controller.safe_algo.class_name = "RelaxedSafeSetAlgorithm" 51 | cfg.algo.safe_controller.safe_algo.eta_ssa = 1.0 52 | cfg.algo.safe_controller.safe_algo.control_weight = [ 53 | 1.0, 1.0, 1.0, # waist 54 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # left arm 55 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # right arm 56 | 1.0, 1.0, 1.0 # locomotion 57 | ] 58 | 59 | case "sss": 60 | cfg.algo.safe_controller.safe_algo.class_name = "SublevelSafeSetAlgorithm" 61 | cfg.algo.safe_controller.safe_algo.lambda_sss = 1.0 62 | cfg.algo.safe_controller.safe_algo.slack_weight = 1e3 63 | cfg.algo.safe_controller.safe_algo.control_weight = [ 64 | 1.0, 1.0, 1.0, # waist 65 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # left arm 66 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # right arm 67 | 1.0, 1.0, 1.0 # locomotion 68 | ] 69 | 70 | case "cbf": 71 | cfg.algo.safe_controller.safe_algo.class_name = "ControlBarrierFunction" 72 | cfg.algo.safe_controller.safe_algo.lambda_cbf = 1.0 73 | cfg.algo.safe_controller.safe_algo.slack_weight = 1e3 74 | cfg.algo.safe_controller.safe_algo.control_weight = [ 75 | 1.0, 1.0, 1.0, # waist 76 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # left arm 77 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # right arm 78 | 1.0, 1.0, 1.0 # locomotion 79 | ] 80 | 81 | case "pfm": 82 | cfg.algo.safe_controller.safe_algo.class_name = "PotentialFieldMethod" 83 | cfg.algo.safe_controller.safe_algo.c_pfm = 1.0 84 | 85 | case "sma": 86 | cfg.algo.safe_controller.safe_algo.class_name = "SlidingModeAlgorithm" 87 | cfg.algo.safe_controller.safe_algo.c_sma = 1.0 88 | 89 | if "FixedBase" in cfg.robot.cfg.class_name: 90 | cfg.algo.safe_controller.safe_algo.control_weight = cfg.algo.safe_controller.safe_algo.control_weight[:-3] 91 | elif "RightArm" in cfg.robot.cfg.class_name: 92 | cfg.algo.safe_controller.safe_algo.control_weight = cfg.algo.safe_controller.safe_algo.control_weight[3:10] 93 | 94 | # --------------------- Config Safe Control Index --------------------- # 95 | safety_index = kwargs.get("safety_index", "si1") # Default to 'si1' if not provided 96 | match safety_index: 97 | case "si1": 98 | cfg.algo.safe_controller.safety_index.class_name = "FirstOrderCollisionSafetyIndex" 99 | case "si2": 100 | cfg.algo.safe_controller.safety_index.class_name = "SecondOrderCollisionSafetyIndex" 101 | cfg.algo.safe_controller.safety_index.phi_n = 1.0 102 | cfg.algo.safe_controller.safety_index.phi_k = 1.0 103 | case 'si2nn': 104 | cfg.algo.safe_controller.safety_index.class_name = "SecondOrderNNCollisionSafetyIndex" 105 | cfg.algo.safe_controller.safety_index.phi_n = 2, 106 | cfg.algo.safe_controller.safety_index.phi_k = 1, 107 | cfg.algo.safe_controller.safety_index.phi_nn_path = "n_2_scalar.onnx" 108 | 109 | return cfg 110 | 111 | def config_pipeline(cfg: PipelineConfig, **kwargs): 112 | """Configure pipeline settings.""" 113 | cfg.robot.cfg.class_name = kwargs.get("robot_cfg", "G1FixedBaseDynamic2Config") 114 | cfg.max_num_steps = 2000 115 | cfg.enable_logger = True 116 | cfg.enable_plotter = True 117 | cfg.enable_safe_zone_render = True 118 | cfg.metric_selection.dist_goal_base = False 119 | cfg.metric_selection.dist_self = True 120 | cfg.metric_selection.dist_robot_to_env = True 121 | cfg.metric_selection.dist_goal_arm = True 122 | cfg.metric_selection.seed = True 123 | cfg.metric_selection.done = True 124 | 125 | return cfg 126 | 127 | def run(**kwargs): 128 | """Main execution block to run the benchmark pipeline.""" 129 | # List of test cases to choose from 130 | 131 | # Generate initial pipeline configuration 132 | cfg = PipelineConfig() 133 | 134 | # Apply configuration modules in order 135 | cfg = config_pipeline(cfg, **kwargs) 136 | cfg = config_task_module(cfg, **kwargs) 137 | cfg = config_agent_module(cfg, **kwargs) 138 | cfg = config_policy_module(cfg, **kwargs) 139 | cfg = config_safety_module(cfg, **kwargs) 140 | 141 | # Run the pipeline 142 | pipeline = Pipeline(cfg) 143 | pipeline.run(save_path = kwargs.get("save_path", None)) 144 | 145 | return 146 | 147 | if __name__ == "__main__": 148 | save_path = "./log" 149 | run(robot_cfg = "G1FixedBaseDynamic2Config", 150 | safe_algo = "rssa", 151 | safety_index = "si2", 152 | save_path = save_path) 153 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Initialize Conda 4 | CONDA_BASE=$(conda info --base) 5 | source "$CONDA_BASE/etc/profile.d/conda.sh" 6 | 7 | # Function to report errors and exit 8 | function handle_error { 9 | echo "Error: $1" 10 | exit 1 11 | } 12 | 13 | # Default values 14 | INSTALL_ROS=false 15 | ENV_NAME="" 16 | 17 | # Parse arguments 18 | while [[ $# -gt 0 ]]; do 19 | case "$1" in 20 | --ros) 21 | INSTALL_ROS=true 22 | shift 23 | ;; 24 | --name) 25 | shift 26 | # Check if environment name is provided 27 | if [[ -z "$1" || "$1" == "--"* ]]; then 28 | handle_error "You must provide an environment name after --name." 29 | fi 30 | ENV_NAME="$1" 31 | shift 32 | ;; 33 | *) 34 | handle_error "Unknown option: $1" 35 | ;; 36 | esac 37 | done 38 | 39 | # Check if ENV_NAME was provided 40 | if [[ -z "$ENV_NAME" ]]; then 41 | handle_error "You must provide an environment name using --name env_name" 42 | fi 43 | 44 | # Step 1: Create Conda environment 45 | echo "Step 1: Creating Conda environment: $ENV_NAME" 46 | conda create --name $ENV_NAME "python=3.10" -y || handle_error "Failed to create Conda environment $ENV_NAME using 'conda create --name $ENV_NAME python=3.10'" 47 | 48 | # Step 2: Activate the environment 49 | echo "Step 2: Activating Conda environment: $ENV_NAME" 50 | conda activate $ENV_NAME || handle_error "Failed to activate Conda environment $ENV_NAME using 'conda activate $ENV_NAME'" 51 | 52 | # Step 4: Install numpy=2.0 53 | echo "Step 3: Installing numpy=2.0" 54 | conda install numpy=2.0 -y || handle_error "Failed to install numpy=2.0 using 'conda install numpy=2.0'" 55 | 56 | # Step 3: Install Pinocchio 57 | echo "Step 4: Installing Pinocchio" 58 | conda install -c conda-forge pinocchio=3.2.0 -y || handle_error "Failed to install Pinocchio using 'conda install -c conda-forge pinocchio=3.2.0'" 59 | 60 | # Step 5: Install core Python packages 61 | echo "Step 5: Installing core Python packages" 62 | 63 | # Declare the array of core Python packages 64 | core_packages=("scipy" "numba" "casadi" "matplotlib" "mujoco" "pyyaml" "osqp" "tensorboardX" "torch" "scikit-image" "tensorboard" "onnx" "onnx2torch" "ipdb") 65 | 66 | # Iterate through each package in the array 67 | for package in "${core_packages[@]}"; do 68 | echo "Installing $package..." 69 | pip install "$package" || handle_error "Failed to install Python package $package using 'pip install $package'" 70 | done 71 | 72 | # Step 6: Install spark_* modules 73 | echo "Step 6: Installing spark_* modules" 74 | 75 | # Declare the array of spark modules 76 | spark_modules=( 77 | "module/spark_agent" 78 | "module/spark_policy" 79 | "module/spark_robot" 80 | "module/spark_safe" 81 | "module/spark_task" 82 | "module/spark_utils" 83 | "pipeline/" 84 | "wrapper/spark_algo" 85 | "wrapper/spark_env" 86 | ) 87 | 88 | # Iterate through each module in the array 89 | for module in "${spark_modules[@]}"; do 90 | echo "Installing module: $module" 91 | cd "$module" || handle_error "Failed to enter directory $module" 92 | pip install -e . || handle_error "Failed to install module $module using 'pip install -e .'" 93 | cd - || handle_error "Failed to return to parent directory from $module" 94 | done 95 | 96 | echo "All spark_* modules installed successfully!" 97 | 98 | # Step 7: Conditionally install ROS dependencies 99 | if $INSTALL_ROS; then 100 | echo "Step 7: Installing ROS dependencies" 101 | 102 | # Declare the array of ROS packages 103 | ros_packages=("yaml" "rospkg" "catkin_pkg") 104 | 105 | # Iterate through each package in the array 106 | for package in "${ros_packages[@]}"; do 107 | echo "Installing $package..." 108 | conda install conda-forge::"$package" -y || handle_error "Failed to install ROS package $package using 'conda install -c conda-forge $package'" 109 | done 110 | 111 | echo "ROS dependencies installed successfully!" 112 | else 113 | echo "Step 7: Skipping ROS dependencies installation. Use '--ros' if ROS-related utilities are needed." 114 | fi 115 | 116 | echo "Installation process completed successfully!" 117 | -------------------------------------------------------------------------------- /module/spark_agent/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_agent", 5 | version="0.1", 6 | description="Interface for the real robot and the simulation.", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SPARK_AGENT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) 4 | 5 | from .base.base_agent_config import BaseAgentConfig 6 | from .base.base_agent import BaseAgent 7 | from .simulation.mujoco.mujoco_agent import MujocoAgent 8 | from .simulation.mujoco.g1_mujoco_agent import G1MujocoAgent 9 | from .real.g1.g1_real_agent import G1RealAgent 10 | -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/base/base_agent.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import numpy as np 3 | from spark_robot import RobotConfig 4 | 5 | class BaseAgent(ABC): 6 | """ 7 | Abstract base class for defining robot agents. 8 | Provides a structure for handling robot control, feedback, and state updates. 9 | """ 10 | 11 | def __init__(self, robot_cfg: RobotConfig) -> None: 12 | """ 13 | Initialize the robot agent with the given configuration. 14 | 15 | Args: 16 | robot_cfg (RobotConfig): Configuration object for the robot. 17 | """ 18 | super().__init__() 19 | self.robot_cfg = robot_cfg 20 | 21 | # Degrees of freedom (DoFs) and control parameters 22 | self.num_dof = len(self.robot_cfg.DoFs) 23 | self.num_control = len(self.robot_cfg.Control) 24 | self.num_obstacle_agent = 0 # Number of obstacle agents (default: 0) 25 | 26 | # Command variables (desired values for DoFs) 27 | self.dof_pos_cmd = None # Position command 28 | self.dof_vel_cmd = None # Velocity command 29 | self.dof_acc_cmd = None # Acceleration command 30 | 31 | # Feedback variables (actual sensor readings) 32 | self.dof_pos_fbk = None # Position feedback 33 | self.dof_vel_fbk = None # Velocity feedback 34 | self.dof_acc_fbk = None # Acceleration feedback 35 | 36 | # Current DoF state (latest computed values) 37 | self.dof_pos = None # Current position 38 | self.dof_vel = None # Current velocity 39 | self.dof_acc = None # Current acceleration 40 | 41 | def reset(self) -> None: 42 | """ 43 | Reset the robot agent's command, feedback, and state variables. 44 | """ 45 | self.dof_pos_cmd = None 46 | self.dof_vel_cmd = None 47 | self.dof_acc_cmd = None 48 | 49 | self.dof_pos_fbk = None 50 | self.dof_vel_fbk = None 51 | self.dof_acc_fbk = None 52 | 53 | self.dof_pos = None 54 | self.dof_vel = None 55 | self.dof_acc = None 56 | 57 | @abstractmethod 58 | def send_control(self, control: np.ndarray) -> None: 59 | """ 60 | Execute the given control input on the robot. 61 | 62 | Args: 63 | control (np.ndarray): Control input array. 64 | """ 65 | pass 66 | 67 | def _post_control_processing(self, **kwargs) -> None: 68 | """ 69 | Perform any necessary post-control operations, such as: 70 | - Simulation step 71 | - State refresh 72 | - Rendering 73 | 74 | Args: 75 | **kwargs: Additional arguments for processing. 76 | """ 77 | pass 78 | 79 | def step(self, control: np.ndarray, **kwargs) -> None: 80 | """ 81 | Perform a single step of the robot agent by sending control input and executing post-processing. 82 | 83 | Args: 84 | control (np.ndarray): Control input array. 85 | **kwargs: Additional arguments for processing. 86 | """ 87 | self.send_control(control, **kwargs) 88 | self._post_control_processing(**kwargs) 89 | 90 | @abstractmethod 91 | def get_feedback(self) -> None: 92 | """ 93 | Retrieve the current state feedback of the robot agent. 94 | """ 95 | pass 96 | 97 | def compose_cmd_state(self) -> np.ndarray: 98 | """ 99 | Compose the state representation of the robot agent based on the robot configuration. 100 | This method should be implemented in subclasses. 101 | 102 | Returns: 103 | np.ndarray: State representation of the robot agent. 104 | """ 105 | pass -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/base/base_agent_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import IntEnum, Enum 3 | import inspect 4 | 5 | class BaseAgentConfig: 6 | 7 | def __init__(self) -> None: 8 | 9 | super().__init__() 10 | 11 | """ Initializes all member classes recursively. Ignores all names starting with '__' (buit-in methods).""" 12 | self.init_member_classes(self) 13 | 14 | # taken from git@github.com:leggedrobotics/legged_gym.git 15 | @staticmethod 16 | def init_member_classes(obj): 17 | print(f"Initializing {obj.__class__.__name__}") 18 | 19 | # Iterate over all attributes names 20 | for key in dir(obj): 21 | # Disregard builtin attributes 22 | if key == "__class__": 23 | continue 24 | 25 | # Get the corresponding attribute object 26 | var = getattr(obj, key) 27 | 28 | # Check if the attribute is a class but not an Enum 29 | if inspect.isclass(var) and not issubclass(var, Enum): 30 | 31 | # Instantiate the class 32 | i_var = var() 33 | # Set the attribute to the instance instead of the type 34 | setattr(obj, key, i_var) 35 | # Recursively init members of the attribute 36 | BaseAgentConfig.init_member_classes(i_var) -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/real/__init__.py: -------------------------------------------------------------------------------- 1 | from .g1.g1_real_agent import G1RealAgent -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/real/g1/g1_idl_struct.py: -------------------------------------------------------------------------------- 1 | """ 2 | Data structures used to contain Unitree IDL (Interface Definition Language). 3 | These structures are used to communicate with the Unitree SDK. 4 | These data structures are based on buffer data structures, using threading. 5 | """ 6 | import threading 7 | 8 | 9 | class DataBuffer: 10 | def __init__(self): 11 | self.data = None 12 | self.lock = threading.Lock() 13 | 14 | def GetData(self): 15 | with self.lock: 16 | return self.data 17 | 18 | def SetData(self, data): 19 | with self.lock: 20 | self.data = data 21 | 22 | class MotorState: 23 | def __init__(self): 24 | self.q = None 25 | self.dq = None 26 | self.ddq = None 27 | self.tau_est = None 28 | 29 | class MotorCmd: 30 | def __init__(self): 31 | self.q = None 32 | self.dq = None 33 | self.kp = None 34 | self.kd = None 35 | self.tau = None 36 | 37 | # ---------------------------------------------------------------------------- # 38 | # Low State # 39 | # ---------------------------------------------------------------------------- # 40 | class LowStateDataStruct: 41 | def __init__(self): 42 | # TODO: change this to be more generic in the future for H1 43 | ### IMU State ### 44 | self.imu_rpy = None 45 | self.imu_quaternion = None 46 | self.imu_gyroscope = None 47 | self.imu_accelerometer = None 48 | ### Motor State ### 49 | self.motor_state = [MotorState() for _ in range(35)] 50 | 51 | class G1_29_LowState(LowStateDataStruct): 52 | def __init__(self): 53 | super().__init__() 54 | ### IMU State ### 55 | self.imu_rpy = None 56 | self.imu_quaternion = None 57 | self.imu_gyroscope = None 58 | self.imu_accelerometer = None 59 | ### Motor State ### 60 | self.motor_state = [MotorState() for _ in range(35)] 61 | 62 | class H1_LowState(LowStateDataStruct): 63 | def __init__(self): 64 | super().__init__() 65 | raise NotImplementedError("H1_LowState not implemented.") 66 | 67 | # ---------------------------------------------------------------------------- # 68 | # High State (Sport Mode) # 69 | # ---------------------------------------------------------------------------- # 70 | class HighStateDataStruct: 71 | def __init__(self): 72 | # TODO: change this to be more generic in the future for H1 73 | self.position = None 74 | self.velocity = None 75 | self.yaw_speed = None 76 | 77 | class G1_29_HighState(HighStateDataStruct): 78 | def __init__(self): 79 | super().__init__() 80 | self.position = None 81 | self.velocity = None 82 | self.yaw_speed = None 83 | 84 | class H1_HighState(HighStateDataStruct): 85 | def __init__(self): 86 | super().__init__() 87 | raise NotImplementedError("H1_HighState not implemented.") 88 | 89 | # ---------------------------------------------------------------------------- # 90 | # Command State # 91 | # ---------------------------------------------------------------------------- # 92 | 93 | class CmdDataStruct: 94 | def __init__(self): 95 | # TODO: change this to be more generic in the future for H1 96 | ### Arm SDK Command ### 97 | self.motor_cmd = [MotorCmd() for _ in range(17)] 98 | ### NotUsedJoint ### 99 | self.arm_sdk_notUsedJoint_q = 0.0 100 | 101 | class G1_29_ArmSDK_State(CmdDataStruct): 102 | def __init__(self): 103 | super().__init__() 104 | ### Arm SDK Command ### 105 | self.motor_cmd = [MotorCmd() for _ in range(17)] 106 | ### NotUsedJoint ### 107 | self.arm_sdk_notUsedJoint_q = 0.0 108 | 109 | class H1_ArmSDK_State(CmdDataStruct): 110 | def __init__(self): 111 | super().__init__() 112 | raise NotImplementedError("H1_ArmSDK_State not implemented.") 113 | 114 | class G1_29_LowCmd_State(CmdDataStruct): 115 | def __init__(self): 116 | super().__init__() 117 | ### Low Command ### 118 | self.motor_cmd = [MotorCmd() for _ in range(35)] 119 | 120 | class H1_LowCmd_State(CmdDataStruct): 121 | def __init__(self): 122 | super().__init__() 123 | raise NotImplementedError("H1_LowCmd_State not implemented.") -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/real/g1/loco/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_agent/spark_agent/real/g1/loco/__init__.py -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/real/g1/loco/g1_loco_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | Developed by ICL (Intelligent Control Lab) at CMU Robotics Institute. 3 | Developer: Kai Yun 4 | """ 5 | 6 | """ 7 | Service Name 8 | """ 9 | LOCO_SERVICE_NAME = "loco" 10 | 11 | """ 12 | Service API version 13 | """ 14 | LOCO_API_VERSION = "1.0.0.0" 15 | 16 | """ 17 | API IDs 18 | """ 19 | ROBOT_API_ID_LOCO_GET_FSM_ID = 7001 20 | ROBOT_API_ID_LOCO_GET_FSM_MODE = 7002 21 | ROBOT_API_ID_LOCO_GET_BALANCE_MODE = 7003 22 | ROBOT_API_ID_LOCO_GET_SWING_HEIGHT = 7004 23 | ROBOT_API_ID_LOCO_GET_STAND_HEIGHT = 7005 24 | ROBOT_API_ID_LOCO_GET_PHASE = 7006 # deprecated 25 | 26 | ROBOT_API_ID_LOCO_SET_FSM_ID = 7101 27 | ROBOT_API_ID_LOCO_SET_BALANCE_MODE = 7102 28 | ROBOT_API_ID_LOCO_SET_SWING_HEIGHT = 7103 29 | ROBOT_API_ID_LOCO_SET_STAND_HEIGHT = 7104 30 | ROBOT_API_ID_LOCO_SET_VELOCITY = 7105 -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/simulation/mujoco/motion.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_agent/spark_agent/simulation/mujoco/motion.pt -------------------------------------------------------------------------------- /module/spark_agent/spark_agent/simulation/simulation_agent.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from spark_agent.base.base_agent import BaseAgent 3 | from abc import ABC, abstractmethod 4 | 5 | class SimulationAgent(BaseAgent): 6 | 7 | def __init__(self, robot_cfg): 8 | super().__init__(robot_cfg) 9 | self.use_sim_dynamics = True 10 | 11 | def send_control(self, command: np.ndarray, **kwargs) -> None: 12 | if self.use_sim_dynamics: 13 | self._send_control_sim_dynamics(command, **kwargs) 14 | else: 15 | self._send_control_modeled_dynamics(command, **kwargs) 16 | 17 | def _send_control_sim_dynamics(self, command: np.ndarray, **kwargs) -> None: 18 | raise NotImplementedError("send_vel_command_sim_dynamics not implemented") 19 | 20 | def _send_control_modeled_dynamics(self, command: np.ndarray, **kwargs) -> None: 21 | raise NotImplementedError("send_vel_command_modeled_dynamics not implemented") 22 | 23 | 24 | -------------------------------------------------------------------------------- /module/spark_policy/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_policy", 5 | version="0.1", 6 | description="Robot policy.", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) -------------------------------------------------------------------------------- /module/spark_policy/spark_policy/__init__.py: -------------------------------------------------------------------------------- 1 | from .base.base_policy import BasePolicy 2 | from .feedback.g1_teleop_pid_policy import G1TeleopPIDPolicy 3 | from .feedback.g1_benchmark_pid_policy import G1BenchmarkPIDPolicy -------------------------------------------------------------------------------- /module/spark_policy/spark_policy/base/base_policy.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import IntEnum, Enum 3 | from spark_agent import BaseAgent 4 | from spark_robot import RobotConfig, RobotKinematics 5 | import inspect 6 | import numpy as np 7 | 8 | class BasePolicy(ABC): 9 | def __init__(self, robot_cfg: RobotConfig, robot_kinematics: RobotKinematics) -> None: 10 | self.robot_cfg : RobotConfig = robot_cfg 11 | self.robot_kinematics : RobotKinematics = robot_kinematics 12 | self.num_dof = len(self.robot_cfg.DoFs) 13 | self.num_control = len(self.robot_cfg.Control) 14 | 15 | @abstractmethod 16 | def act(self, agent_feedback: dict, task_info: dict) -> None: 17 | pass -------------------------------------------------------------------------------- /module/spark_policy/spark_policy/feedback/g1_benchmark_pid_policy.py: -------------------------------------------------------------------------------- 1 | from spark_policy.base.base_policy import BasePolicy 2 | from spark_agent import BaseAgent 3 | from spark_robot import RobotKinematics, RobotConfig 4 | import numpy as np 5 | from scipy.spatial.transform import Rotation as R 6 | 7 | class G1BenchmarkPIDPolicy(BasePolicy): 8 | def __init__(self, robot_cfg: RobotConfig, robot_kinematics: RobotKinematics) -> None: 9 | super().__init__(robot_cfg, robot_kinematics) 10 | 11 | def tracking_pos_with_vel(self, 12 | dof_pos_target, 13 | dof_vel_target, 14 | dof_pos_current, 15 | dof_vel_current): 16 | K_p = 1.0 * np.ones(len(self.robot_cfg.Control)) 17 | K_d = 0.1 * np.ones(len(self.robot_cfg.Control)) 18 | dof_vel_nominal = K_p * (dof_pos_target - dof_pos_current) + K_d * (dof_vel_target - dof_vel_current) 19 | nominal_control = np.linalg.pinv(self.robot_cfg.dynamics_g(dof_pos_current)) @ dof_vel_nominal 20 | return nominal_control 21 | 22 | def tracking_pos_with_acc(self, 23 | dof_pos_target, 24 | dof_vel_target, 25 | dof_pos_current, 26 | dof_vel_current): 27 | 28 | K_p_vel = 1.0 * np.ones(len(self.robot_cfg.DoFs)) 29 | K_d_vel = 0.4 * np.ones(len(self.robot_cfg.DoFs)) 30 | K_p_acc = 10.0 * np.ones(len(self.robot_cfg.DoFs)) 31 | K_d_acc = 5.0 * np.ones(len(self.robot_cfg.DoFs)) 32 | nominal_dof_vel = K_p_vel * (dof_pos_target - dof_pos_current) - K_d_vel * dof_vel_current 33 | # nominal_dof_acc = nominal_dof_vel - dof_vel_current 34 | nominal_dof_acc = K_p_acc * (dof_pos_target - dof_pos_current) - K_d_acc * dof_vel_current 35 | nominal_control = np.linalg.pinv(self.robot_cfg.dynamics_g(np.concatenate((dof_pos_current, dof_vel_current)))) @ np.concatenate((nominal_dof_vel, nominal_dof_acc)) 36 | return nominal_control 37 | 38 | def act(self, agent_feedback: dict, task_info: dict): 39 | info = {} 40 | 41 | dof_pos_cmd = agent_feedback["dof_pos_cmd"] 42 | dof_vel_cmd = agent_feedback["dof_vel_cmd"] 43 | dof_pos_fbk = agent_feedback["dof_pos_fbk"] 44 | dof_vel_fbk = agent_feedback["dof_vel_fbk"] 45 | goal_teleop = task_info["goal_teleop"] 46 | robot_base_frame = agent_feedback["robot_base_frame"] 47 | 48 | dof_pos_current = dof_pos_cmd 49 | for dof in self.robot_cfg.DoFs: 50 | if dof.name in ["LinearX", "LinearY", "RotYaw"]: 51 | dof_pos_current[dof] = dof_pos_fbk[dof] 52 | try: 53 | if task_info["arm_goal_enable"]: 54 | dof_pos_target, _ = self.robot_kinematics.inverse_kinematics([np.linalg.inv(robot_base_frame) @ goal_teleop["left"], 55 | np.linalg.inv(robot_base_frame) @ goal_teleop["right"]]) 56 | 57 | if task_info["base_goal_enable"]: 58 | rot = R.from_matrix(goal_teleop["base"][:3, :3]) 59 | euler = rot.as_euler("xyz") 60 | dof_pos_target[self.robot_cfg.DoFs.LinearX] = goal_teleop["base"][0, 3] 61 | dof_pos_target[self.robot_cfg.DoFs.LinearY] = goal_teleop["base"][1, 3] 62 | dof_pos_target[self.robot_cfg.DoFs.RotYaw] = euler[2] 63 | 64 | if "Dynamic1" in self.robot_cfg.__class__.__name__: 65 | dof_vel_target = np.zeros_like(dof_vel_fbk) 66 | dof_pos_current = dof_pos_fbk 67 | dof_vel_current = dof_vel_fbk 68 | control = self.tracking_pos_with_vel(dof_pos_target, dof_vel_target, dof_pos_current, dof_vel_current) 69 | if "Dynamic2" in self.robot_cfg.__class__.__name__: 70 | dof_vel_target = np.zeros_like(dof_vel_fbk) 71 | dof_pos_current = dof_pos_fbk 72 | dof_vel_current = dof_vel_fbk 73 | control = self.tracking_pos_with_acc(dof_pos_target, dof_vel_target, dof_pos_current, dof_vel_current) 74 | 75 | info["ik_success"] = True 76 | 77 | except Exception as e: 78 | print("inverse_kinematics error", e) 79 | 80 | control = np.zeros_like(dof_pos_cmd) 81 | info["ik_success"] = False 82 | raise e 83 | 84 | for control_id in self.robot_cfg.Control: 85 | control[control_id] = np.clip(control[control_id], -self.robot_cfg.ControlLimit[control_id], self.robot_cfg.ControlLimit[control_id]) 86 | 87 | return control, info -------------------------------------------------------------------------------- /module/spark_policy/spark_policy/feedback/g1_teleop_pid_policy.py: -------------------------------------------------------------------------------- 1 | from spark_policy.base.base_policy import BasePolicy 2 | from spark_agent import BaseAgent 3 | from spark_robot import RobotKinematics, RobotConfig 4 | import numpy as np 5 | from scipy.spatial.transform import Rotation as R 6 | 7 | class G1TeleopPIDPolicy(BasePolicy): 8 | def __init__(self, robot_cfg: RobotConfig, robot_kinematics: RobotKinematics) -> None: 9 | super().__init__(robot_cfg, robot_kinematics) 10 | 11 | def tracking_pos_with_vel(self, 12 | dof_pos_target, 13 | dof_vel_target, 14 | dof_pos_current, 15 | dof_vel_current): 16 | #NOTE: Parameters for real robot open loop control with cmd pos and vel 17 | # K_p = 120.0 * np.ones(len(self.robot_cfg.Control)) 18 | # K_d = 0.0 * np.ones(len(self.robot_cfg.Control)) 19 | # NOTE: Parameters for real robot closed loop control with fbk pos and vel 20 | # K_p = 50.0 * np.ones(len(self.robot_cfg.Control)) 21 | # K_d = 5.0 * np.ones(len(self.robot_cfg.Control)) 22 | # NOTE: Parameters for viz robot open loop control with fbk pos and vel 23 | # K_p = 10.0 * np.ones(len(self.robot_cfg.Control)) 24 | # K_d = 0.0 * np.ones(len(self.robot_cfg.Control)) 25 | # NOTE: Parameters for sim robot open loop control with fbk pos and vel 26 | K_p = 1.0 * np.ones(len(self.robot_cfg.Control)) 27 | K_d = 0.0 * np.ones(len(self.robot_cfg.Control)) 28 | dof_vel_nominal = K_p * (dof_pos_target - dof_pos_current) + K_d * (dof_vel_target - dof_vel_current) 29 | nominal_control = np.linalg.pinv(self.robot_cfg.dynamics_g(dof_pos_current)) @ dof_vel_nominal 30 | return nominal_control 31 | 32 | def tracking_pos_with_acc(self, 33 | dof_pos_target, 34 | dof_vel_target, 35 | dof_pos_current, 36 | dof_vel_current): 37 | 38 | K_p = 1.0 * np.ones(len(self.robot_cfg.DoFs)) 39 | K_d = 0.0 * np.ones(len(self.robot_cfg.DoFs)) 40 | nominal_dof_vel = np.zeros_like(dof_vel_current) 41 | nominal_dof_vel = K_p * (dof_pos_target - dof_pos_current) + K_d * dof_vel_current 42 | nominal_dof_acc = nominal_dof_vel - dof_vel_current 43 | nominal_control = np.linalg.pinv(self.robot_cfg.dynamics_g(np.concatenate((dof_pos_current, dof_vel_current)))) @ np.concatenate((nominal_dof_vel, nominal_dof_acc)) 44 | 45 | return nominal_control 46 | 47 | def act(self, agent_feedback: dict, task_info: dict): 48 | info = {} 49 | dof_pos_cmd = agent_feedback["dof_pos_cmd"] 50 | dof_vel_cmd = agent_feedback["dof_vel_cmd"] 51 | dof_pos_fbk = agent_feedback["dof_pos_fbk"] 52 | dof_vel_fbk = agent_feedback["dof_vel_fbk"] 53 | robot_base_frame = agent_feedback["robot_base_frame"] 54 | goal_teleop = task_info["goal_teleop"] 55 | try: 56 | dof_pos_target, _ = self.robot_kinematics.inverse_kinematics([np.linalg.inv(robot_base_frame) @ goal_teleop["left"], 57 | np.linalg.inv(robot_base_frame) @ goal_teleop["right"]]) 58 | 59 | if "Dynamic1" in self.robot_cfg.__class__.__name__: 60 | dof_vel_target = np.zeros_like(dof_vel_fbk) 61 | dof_pos_current = dof_pos_cmd 62 | dof_vel_current = dof_vel_cmd 63 | control = self.tracking_pos_with_vel(dof_pos_target, dof_vel_target, dof_pos_current, dof_vel_current) 64 | if "Dynamic2" in self.robot_cfg.__class__.__name__: 65 | dof_vel_target = np.zeros_like(dof_vel_fbk) 66 | dof_pos_current = dof_pos_cmd 67 | dof_vel_current = dof_vel_cmd 68 | control = self.tracking_pos_with_acc(dof_pos_target, dof_vel_target, dof_pos_current, dof_vel_current) 69 | 70 | info["ik_success"] = True 71 | 72 | except Exception as e: 73 | print("inverse_kinematics error", e) 74 | 75 | control = np.zeros_like(dof_pos_fbk) 76 | info["ik_success"] = False 77 | raise e 78 | 79 | for control_id in self.robot_cfg.Control: 80 | control[control_id] = np.clip(control[control_id], -self.robot_cfg.ControlLimit[control_id], self.robot_cfg.ControlLimit[control_id]) 81 | 82 | return control, info -------------------------------------------------------------------------------- /module/spark_policy/spark_policy/model_free/user_rl_policy.py: -------------------------------------------------------------------------------- 1 | from spark_policy.base.base_policy import BasePolicy 2 | from spark_agent import BaseAgent 3 | from spark_robot import RobotKinematics, RobotConfig 4 | import numpy as np 5 | 6 | class UserRLPolicy(BasePolicy): 7 | def __init__(self, **kwargs) -> None: 8 | super().__init__(**kwargs) 9 | 10 | def act(self, agent_feedback: dict, task_info: dict): 11 | ''' 12 | User should implement their own policy here to get the control signal. 13 | ''' 14 | dof_control = None 15 | info = None 16 | 17 | return dof_control, info -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/head_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/head_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_ankle_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_ankle_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_ankle_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_ankle_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_elbow_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_elbow_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_index_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_index_0_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_index_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_index_1_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_middle_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_middle_0_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_middle_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_middle_1_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_palm_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_palm_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_thumb_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_thumb_0_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_thumb_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_thumb_1_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hand_thumb_2_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hand_thumb_2_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hip_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hip_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hip_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hip_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_hip_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_hip_yaw_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_knee_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_knee_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_rubber_hand.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_shoulder_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_shoulder_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_shoulder_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_shoulder_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_shoulder_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_shoulder_yaw_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_wrist_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_wrist_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_wrist_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_wrist_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_wrist_roll_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_wrist_roll_rubber_hand.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/left_wrist_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/left_wrist_yaw_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/logo_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/logo_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/pelvis.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/pelvis.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/pelvis_contour_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/pelvis_contour_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_ankle_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_ankle_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_ankle_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_ankle_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_elbow_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_elbow_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_index_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_index_0_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_index_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_index_1_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_middle_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_middle_0_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_middle_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_middle_1_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_palm_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_palm_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_thumb_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_thumb_0_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_thumb_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_thumb_1_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hand_thumb_2_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hand_thumb_2_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hip_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hip_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hip_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hip_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_hip_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_hip_yaw_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_knee_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_knee_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_rubber_hand.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_shoulder_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_shoulder_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_shoulder_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_shoulder_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_shoulder_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_shoulder_yaw_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_wrist_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_wrist_pitch_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_wrist_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_wrist_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_wrist_roll_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_wrist_roll_rubber_hand.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/right_wrist_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/right_wrist_yaw_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/torso_constraint_L_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/torso_constraint_L_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/torso_constraint_L_rod_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/torso_constraint_L_rod_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/torso_constraint_R_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/torso_constraint_R_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/torso_constraint_R_rod_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/torso_constraint_R_rod_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/torso_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/torso_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/torso_link_rev_1_0.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/torso_link_rev_1_0.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/waist_constraint_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/waist_constraint_L.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/waist_constraint_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/waist_constraint_R.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/waist_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/waist_roll_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/waist_roll_link_rev_1_0.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/waist_roll_link_rev_1_0.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/waist_support_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/waist_support_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/waist_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/waist_yaw_link.STL -------------------------------------------------------------------------------- /module/spark_robot/resources/g1/meshes/waist_yaw_link_rev_1_0.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_robot/resources/g1/meshes/waist_yaw_link_rev_1_0.STL -------------------------------------------------------------------------------- /module/spark_robot/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_robot", 5 | version="0.1", 6 | description="Robot lib for spark", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | package_data={ 14 | # Include URDF and STL files in the installation 15 | "spark_robot": [ 16 | "resources/g1/*.urdf", 17 | "resources/g1/meshes/*.STL", 18 | ] 19 | }, 20 | ) 21 | -------------------------------------------------------------------------------- /module/spark_robot/spark_robot/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SPARK_ROBOT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) 4 | SPARK_ROBOT_RESOURCE_DIR = os.path.join(SPARK_ROBOT_ROOT, "resources") 5 | 6 | from .base.base_robot_config import RobotConfig 7 | from .base.base_robot_kinematics import RobotKinematics 8 | from .g1.kinematics.g1_right_arm_kinematics import G1RightArmKinematics 9 | from .g1.kinematics.g1_fixed_base_kinematics import G1FixedBaseKinematics 10 | from .g1.kinematics.g1_mobile_base_kinematics import G1MobileBaseKinematics 11 | from .g1.kinematics.g1_whole_body_kinematics import G1WholeBodyKinematics 12 | 13 | # Single Integrator 14 | from .g1.config.g1_fixed_base_dynamic_1_config import G1FixedBaseDynamic1Config 15 | from .g1.config.g1_mobile_base_dynamic_1_config import G1MobileBaseDynamic1Config 16 | from .g1.config.g1_right_arm_dynamic_1_config import G1RightArmDynamic1Config 17 | from .g1.config.g1_sport_mode_dynamic_1_config import G1SportModeDynamic1Config 18 | from .g1.config.g1_whole_body_dynamic_1_config import G1WholeBodyDynamic1Config 19 | 20 | # Double Integrator 21 | from .g1.config.g1_fixed_base_dynamic_2_config import G1FixedBaseDynamic2Config 22 | from .g1.config.g1_mobile_base_dynamic_2_config import G1MobileBaseDynamic2Config 23 | from .g1.config.g1_right_arm_dynamic_2_config import G1RightArmDynamic2Config 24 | from .g1.config.g1_sport_mode_dynamic_2_config import G1SportModeDynamic2Config 25 | from .g1.config.g1_whole_body_dynamic_2_config import G1WholeBodyDynamic2Config -------------------------------------------------------------------------------- /module/spark_robot/spark_robot/base/base_robot_kinematics.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import numpy as np 3 | from spark_robot.base.base_robot_config import RobotConfig 4 | 5 | class RobotKinematics(ABC): 6 | """ 7 | An abstract base class for defining robot kinematics. 8 | """ 9 | 10 | def __init__(self, robot_cfg: RobotConfig) -> None: 11 | super().__init__() 12 | 13 | self.robot_cfg = robot_cfg 14 | self.num_dof = len(self.robot_cfg.DoFs) 15 | self.num_control = len(self.robot_cfg.Control) 16 | 17 | @abstractmethod 18 | def update_base_frame(self, trans_world2base: np.ndarray, dof: np.ndarray) -> np.ndarray: 19 | """ 20 | Compute the base frame of the robot in the world frame. 21 | Dof may update the base frame. For example, the calcualtion of grad_phi. 22 | 23 | Input: 24 | trans_world2base: [4, 4], SE(3) of robot base frame in world frame 25 | dof: 1D array, current dof values defined by RobotConfig.DoFs 26 | 27 | Output: 28 | T: [4, 4], SE(3) of robot base frame in world frame 29 | 30 | """ 31 | pass 32 | 33 | @abstractmethod 34 | def forward_kinematics(self, dof: np.ndarray) -> np.ndarray: 35 | """ 36 | Compute the forward kinematics for the robot. 37 | 38 | Input: 39 | dof: 1D array, current dof values defined by RobotConfig.DoFs 40 | 41 | Output: 42 | T: [num_frames, 4, 4], SE(3) of RobotConfig.Frames in robot base frame 43 | 44 | """ 45 | pass 46 | 47 | @abstractmethod 48 | def inverse_kinematics(self, T: np.ndarray) -> np.ndarray: 49 | """ 50 | Compute the inverse kinematics for the robot. 51 | 52 | Input: 53 | T: [num_frames, 4, 4], SE(3) of frames to solve for in robot base frame 54 | 55 | Output: 56 | dof: 1D array, dof values defined by RobotConfig.DoFs 57 | info: dict with additional information 58 | 59 | """ 60 | pass 61 | -------------------------------------------------------------------------------- /module/spark_robot/spark_robot/g1/kinematics/g1_mobile_base_kinematics.py: -------------------------------------------------------------------------------- 1 | import casadi 2 | import numpy as np 3 | import pinocchio as pin 4 | from pinocchio import casadi as cpin 5 | import os 6 | 7 | from spark_robot.g1.kinematics.g1_fixed_base_kinematics import G1FixedBaseKinematics 8 | from spark_robot.base.base_robot_config import RobotConfig 9 | from spark_robot import SPARK_ROBOT_RESOURCE_DIR 10 | 11 | class G1MobileBaseKinematics(G1FixedBaseKinematics): 12 | def __init__(self, robot_cfg: RobotConfig, **kwargs) -> None: 13 | super().__init__(robot_cfg, **kwargs) 14 | 15 | def _init_whole_body_kinematics(self): 16 | print("Initializing G1MobileBaseKinematics") 17 | jointComposite = pin.JointModelComposite(3); 18 | jointComposite.addJoint(pin.JointModelPX()); 19 | jointComposite.addJoint(pin.JointModelPY()); 20 | jointComposite.addJoint(pin.JointModelRZ()); 21 | 22 | self.robot = pin.RobotWrapper.BuildFromMJCF( 23 | os.path.join(SPARK_ROBOT_RESOURCE_DIR, self.kinematics_model_path), jointComposite 24 | ) 25 | 26 | self.model = self.robot.model 27 | self.add_extra_frames(self.model) 28 | self.data = pin.Data(self.model) 29 | 30 | self.pin_frame_dict = {} 31 | for frame in self.robot_cfg.Frames: 32 | for j in range(self.model.nframes): 33 | pin_frame = self.model.frames[j] 34 | pin_frame_id = self.model.getFrameId(pin_frame.name) 35 | if frame.name == pin_frame.name: 36 | self.pin_frame_dict[frame.name] = pin_frame_id 37 | print(f"Frame {frame.name} has ID {self.pin_frame_dict[frame.name]}") 38 | 39 | def inverse_kinematics(self, T , current_lr_arm_motor_q = None, current_lr_arm_motor_dq = None): 40 | left_wrist, right_wrist = T[0], T[1] 41 | if current_lr_arm_motor_q is not None: 42 | self.init_data = current_lr_arm_motor_q 43 | self.opti.set_initial(self.var_q, self.init_data) 44 | 45 | self.opti.set_value(self.param_tf_l, left_wrist) 46 | self.opti.set_value(self.param_tf_r, right_wrist) 47 | self.opti.set_value(self.var_q_last, self.init_data) # for smooth 48 | 49 | try: 50 | sol = self.opti.solve() 51 | # sol = self.opti.solve_limited() 52 | 53 | sol_q = self.opti.value(self.var_q) 54 | # self.smooth_filter.add_data(sol_q) 55 | # sol_q = self.smooth_filter.filtered_data 56 | 57 | if current_lr_arm_motor_dq is not None: 58 | v = current_lr_arm_motor_dq * 0.0 59 | else: 60 | v = (sol_q - self.init_data) * 0.0 61 | v = np.zeros(self.reduced_fixed_base_model.nv) 62 | self.init_data = sol_q 63 | 64 | sol_tauff = pin.rnea(self.reduced_fixed_base_model, self.reduced_fixed_base_data, sol_q, v, np.zeros(self.reduced_fixed_base_model.nv)) 65 | sol_tauff = np.concatenate([sol_tauff, np.zeros(len(self.robot_cfg.DoFs) - sol_tauff.shape[0])], axis=0) 66 | 67 | info = {"sol_tauff": sol_tauff, "success": True} 68 | 69 | dof = np.zeros(len(self.robot_cfg.DoFs)) 70 | dof[:len(sol_q)] = sol_q 71 | 72 | return dof, info 73 | 74 | except Exception as e: 75 | print(f"ERROR in convergence, plotting debug info.{e}") 76 | 77 | sol_q = self.opti.debug.value(self.var_q) 78 | 79 | if current_lr_arm_motor_dq is not None: 80 | v = current_lr_arm_motor_dq * 0.0 81 | else: 82 | v = (sol_q - self.init_data) * 0.0 83 | 84 | self.init_data = sol_q 85 | 86 | sol_tauff = pin.rnea(self.reduced_fixed_base_model, self.reduced_fixed_base_data, sol_q, v, np.zeros(self.reduced_fixed_base_model.nv)) 87 | import ipdb; ipdb.set_trace() 88 | sol_tauff = np.concatenate([sol_tauff, np.zeros(len(self.robot_cfg.DoFs) - sol_tauff.shape[0])], axis=0) 89 | 90 | print(f"sol_q:{sol_q} \nmotorstate: \n{current_lr_arm_motor_q} \nleft_pose: \n{left_wrist} \nright_pose: \n{right_wrist}") 91 | 92 | info = {"sol_tauff": sol_tauff * 0.0, "success": False} 93 | 94 | raise e 95 | 96 | def pre_computation(self, dof, ddof = None): 97 | q = np.zeros(self.model.nq) 98 | q[:3] = dof[-3:] 99 | q[3:] = dof[:-3] 100 | pin.forwardKinematics(self.model, self.data, q) 101 | pin.updateFramePlacements(self.model, self.data) 102 | pin.computeJointJacobians(self.model, self.data) 103 | pin.updateGlobalPlacements(self.model, self.data) 104 | if ddof is not None: 105 | dq = np.zeros(self.model.nv) 106 | dq[:3] = ddof[-3:] 107 | dq[3:] = ddof[:-3] 108 | pin.computeJointJacobiansTimeVariation(self.model, self.data, q, dq) 109 | return 110 | 111 | def forward_kinematics(self, dof): 112 | # in robot base frame 113 | self.pre_computation(dof) 114 | frames = self.get_forward_kinematics() 115 | base_id = self.model.getFrameId("robot") 116 | frames = np.linalg.inv(self.data.oMf[base_id].homogeneous) @ frames 117 | return frames 118 | 119 | def get_jacobian(self, frame_id): 120 | pin_frame_id = self.pin_frame_dict[frame_id] 121 | pin_J = pin.getFrameJacobian(self.model, self.data, pin_frame_id, pin.LOCAL_WORLD_ALIGNED) 122 | J = np.zeros_like(pin_J) 123 | J[:, :-3] = pin_J[:, 3:] 124 | J[:, -3:] = pin_J[:, :3] 125 | return J 126 | 127 | def get_jacobian_dot(self, frame_id): 128 | pin_frame_id = self.pin_frame_dict[frame_id] 129 | pin_dJ = pin.getFrameJacobianTimeVariation(self.model, self.data, pin_frame_id, pin.LOCAL_WORLD_ALIGNED) 130 | dJ = np.zeros_like(pin_dJ) 131 | dJ[:, :-3] = pin_dJ[:, 3:] 132 | dJ[:, -3:] = pin_dJ[:, :3] 133 | return dJ 134 | 135 | if __name__ == "__main__": 136 | 137 | arm_ik = G1MobileBaseKinematics() 138 | -------------------------------------------------------------------------------- /module/spark_robot/spark_robot/g1/kinematics/g1_whole_body_kinematics.py: -------------------------------------------------------------------------------- 1 | import casadi 2 | import numpy as np 3 | import pinocchio as pin 4 | from pinocchio import casadi as cpin 5 | import os 6 | 7 | from spark_robot.g1.kinematics.g1_fixed_base_kinematics import G1FixedBaseKinematics 8 | from spark_robot.base.base_robot_config import RobotConfig 9 | from spark_robot import SPARK_ROBOT_RESOURCE_DIR 10 | 11 | class G1WholeBodyKinematics(G1FixedBaseKinematics): 12 | def __init__(self, robot_cfg: RobotConfig, **kwargs) -> None: 13 | super().__init__(robot_cfg, **kwargs) 14 | 15 | def _init_whole_body_kinematics(self): 16 | print("Initializing G1WholeBodyKinematics") 17 | self.kinematics_model_path = 'g1/g1_29dof_whole_body_fixed.xml' 18 | self.robot = pin.RobotWrapper.BuildFromMJCF( 19 | os.path.join(SPARK_ROBOT_RESOURCE_DIR, self.kinematics_model_path), 20 | pin.JointModelFreeFlyer() 21 | ) 22 | 23 | self.model = self.robot.model 24 | self.add_extra_frames(self.model) 25 | self.data = pin.Data(self.model) 26 | 27 | self.pin_frame_dict = {} 28 | for frame in self.robot_cfg.Frames: 29 | for j in range(self.model.nframes): 30 | pin_frame = self.model.frames[j] 31 | pin_frame_id = self.model.getFrameId(pin_frame.name) 32 | if frame.name == pin_frame.name: 33 | self.pin_frame_dict[frame] = pin_frame_id 34 | print(f"Frame {pin_frame.name} has ID {self.pin_frame_dict[frame]}") 35 | 36 | for j in range(self.model.nframes): 37 | pin_frame = self.model.frames[j] 38 | print(f"Frame {pin_frame.name} has ID {j}") 39 | 40 | # Compute total mass by summing the masses of all bodies 41 | total_mass = sum(inertia.mass for inertia in self.model.inertias) 42 | 43 | print(f"Total robot mass: {total_mass} kg") 44 | 45 | def pre_computation(self, dof_pos, dof_vel = None): 46 | pin_dof_pos = dof_pos.copy() 47 | quat = pin_dof_pos[3:7].copy() 48 | pin_quat = np.array([quat[1], quat[2], quat[3], quat[0]]) 49 | pin_dof_pos[3:7] = pin_quat 50 | pin.forwardKinematics(self.model, self.data, pin_dof_pos) 51 | pin.updateFramePlacements(self.model, self.data) 52 | pin.computeJointJacobians(self.model, self.data) 53 | pin.updateGlobalPlacements(self.model, self.data) 54 | pin.computeCentroidalMap(self.model, self.data, pin_dof_pos) 55 | if dof_vel is not None: 56 | pin.computeJointJacobiansTimeVariation(self.model, self.data, pin_dof_pos, dof_vel) 57 | return 58 | 59 | def forward_kinematics(self, dof): 60 | # in robot base frame 61 | self.pre_computation(dof) 62 | frames = self.get_forward_kinematics() 63 | base_id = self.model.getFrameId("robot") 64 | frames = np.linalg.inv(self.data.oMf[base_id].homogeneous) @ frames 65 | return frames 66 | 67 | def get_dynamics(self, dof_pos, dof_vel, contact_frames): 68 | pin_dof_pos = dof_pos.copy() 69 | quat = pin_dof_pos[3:7].copy() 70 | pin_quat = np.array([quat[1], quat[2], quat[3], quat[0]]) 71 | pin_dof_pos[3:7] = pin_quat 72 | pin.forwardKinematics(self.model, self.data, pin_dof_pos) 73 | pin.updateFramePlacements(self.model, self.data) 74 | pin.computeJointJacobians(self.model, self.data) 75 | pin.updateGlobalPlacements(self.model, self.data) 76 | pin.computeJointJacobiansTimeVariation(self.model, self.data, pin_dof_pos, dof_vel) 77 | M = pin.crba(self.model, self.data, pin_dof_pos) # Compute mass matrix 78 | nle = pin.nonLinearEffects(self.model, self.data, pin_dof_pos, dof_vel) # Compute Coriolis and gravity terms 79 | J_c = np.vstack([self.get_jacobian(frame_name)[:3,:] for frame_name in contact_frames]) 80 | dJ_c = np.vstack([self.get_jacobian_dot(frame_name)[:3,:] for frame_name in contact_frames]) 81 | frame_c = np.vstack([self.get_frame_transformation(frame_name)[:3,3] for frame_name in contact_frames]) 82 | 83 | self.compute_centroidal_dynamics(pin_dof_pos, dof_vel, np.ones(self.robot.model.nv) * 0.) 84 | 85 | return M, nle, J_c, dJ_c, frame_c 86 | 87 | def compute_centroidal_dynamics(self, q, v, a): 88 | """ Compute centroidal dynamics. """ 89 | pin.computeCentroidalMap(self.robot.model, self.robot.data, q) 90 | pin.computeCentroidalMomentumTimeVariation(self.robot.model, self.robot.data, q, v, a) 91 | 92 | Ag = self.robot.data.Ag.copy() # Should now be filled 93 | dAg = (self.robot.data.dhg.vector[:, np.newaxis] - Ag @ a[:, np.newaxis]) @ np.linalg.pinv(v[:, np.newaxis]) 94 | # print("dhg: ", self.robot.data.dhg.vector[:3]) 95 | print("hg: ", np.round(self.robot.data.hg.vector[:3], 2)) 96 | 97 | if (Ag @ a + dAg @ v - self.robot.data.dhg.vector).sum() >= 1e-6: 98 | print("Warning: Centroidal dynamics computation failed. Check the input values.") 99 | 100 | pin.computeCentroidalMomentum(self.robot.model, self.robot.data, q, v) 101 | pin.computeCentroidalMap(self.robot.model, self.robot.data, q) 102 | 103 | 104 | return Ag, dAg 105 | 106 | def get_jacobian(self, frame_name): 107 | """ Compute the Jacobian of a given frame in the robot base frame """ 108 | return pin.getFrameJacobian(self.model, self.data, self.model.getFrameId(frame_name), pin.LOCAL_WORLD_ALIGNED) 109 | 110 | def get_jacobian_dot(self, frame_name): 111 | """ Compute the Jacobian of a given frame in the robot base frame """ 112 | return pin.getFrameJacobianTimeVariation(self.model, self.data, self.model.getFrameId(frame_name), pin.LOCAL_WORLD_ALIGNED) 113 | 114 | def get_frame_transformation(self, frame_name): 115 | """ Compute the transformation matrix of a given frame in the robot base frame """ 116 | return self.data.oMf[self.model.getFrameId(frame_name)].homogeneous 117 | 118 | 119 | if __name__ == "__main__": 120 | 121 | arm_ik = G1WholeBodyKinematics() 122 | -------------------------------------------------------------------------------- /module/spark_robot/spark_robot/g1/utils/weighted_moving_filter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | class WeightedMovingFilter: 6 | def __init__(self, weights, data_size = 14): 7 | self._window_size = len(weights) 8 | self._weights = np.array(weights) 9 | assert np.isclose(np.sum(self._weights), 1.0), "[WeightedMovingFilter] the sum of weights list must be 1.0!" 10 | self._data_size = data_size 11 | self._filtered_data = np.zeros(self._data_size) 12 | self._data_queue = [] 13 | 14 | def _apply_filter(self): 15 | if len(self._data_queue) < self._window_size: 16 | return self._data_queue[-1] 17 | 18 | data_array = np.array(self._data_queue) 19 | temp_filtered_data = np.zeros(self._data_size) 20 | for i in range(self._data_size): 21 | temp_filtered_data[i] = np.convolve(data_array[:, i], self._weights, mode='valid')[-1] 22 | 23 | return temp_filtered_data 24 | 25 | def add_data(self, new_data): 26 | assert len(new_data) == self._data_size 27 | 28 | if len(self._data_queue) > 0 and np.array_equal(new_data, self._data_queue[-1]): 29 | return # skip duplicate data 30 | 31 | if len(self._data_queue) >= self._window_size: 32 | self._data_queue.pop(0) 33 | 34 | self._data_queue.append(new_data) 35 | self._filtered_data = self._apply_filter() 36 | 37 | @property 38 | def filtered_data(self): 39 | return self._filtered_data 40 | 41 | 42 | def visualize_filter_comparison(filter_params, steps): 43 | import time 44 | t = np.linspace(0, 4 * np.pi, steps) 45 | original_data = np.array([np.sin(t + i) + np.random.normal(0, 0.2, len(t)) for i in range(35)]).T # sin wave with noise, shape is [len(t), 35] 46 | 47 | plt.figure(figsize=(14, 10)) 48 | 49 | for idx, weights in enumerate(filter_params): 50 | filter = WeightedMovingFilter(weights, 14) 51 | data_2b_filtered = original_data.copy() 52 | filtered_data = [] 53 | 54 | time1 = time.time() 55 | 56 | for i in range(steps): 57 | filter.add_data(data_2b_filtered[i][13:27]) # step i, columns 13 to 26 (total:14) 58 | data_2b_filtered[i][13:27] = filter.filtered_data 59 | filtered_data.append(data_2b_filtered[i]) 60 | 61 | time2 = time.time() 62 | print(f"filter_params:{filter_params[idx]}, time cosume:{time2 - time1}") 63 | 64 | filtered_data = np.array(filtered_data) 65 | 66 | # col0 should not 2b filtered 67 | plt.subplot(len(filter_params), 2, idx * 2 + 1) 68 | plt.plot(filtered_data[:, 0], label=f'Filtered (Window {filter._window_size})') 69 | plt.plot(original_data[:, 0], 'r--', label='Original', alpha=0.5) 70 | plt.title(f'Joint 1 - Should not to be filtered.') 71 | plt.xlabel('Step') 72 | plt.ylabel('Value') 73 | plt.legend() 74 | 75 | # col13 should 2b filtered 76 | plt.subplot(len(filter_params), 2, idx * 2 + 2) 77 | plt.plot(filtered_data[:, 13], label=f'Filtered (Window {filter._window_size})') 78 | plt.plot(original_data[:, 13], 'r--', label='Original', alpha=0.5) 79 | plt.title(f'Joint 13 - Window {filter._window_size}, Weights {weights}') 80 | plt.xlabel('Step') 81 | plt.ylabel('Value') 82 | plt.legend() 83 | 84 | plt.tight_layout() 85 | plt.show() 86 | 87 | 88 | if __name__ == "__main__": 89 | # windows_size and weights 90 | filter_params = [ 91 | (np.array([0.7, 0.2, 0.1])), 92 | (np.array([0.5, 0.3, 0.2])), 93 | (np.array([0.4, 0.3, 0.2, 0.1])), 94 | ] 95 | 96 | visualize_filter_comparison(filter_params, steps = 100) -------------------------------------------------------------------------------- /module/spark_safe/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_safe", 5 | version="0.1", 6 | description="Safe controller for spark", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) 14 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SPARK_SAFE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) 4 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/__init__.py: -------------------------------------------------------------------------------- 1 | from .base.base_safe_algo import BaseSafeAlgorithm 2 | from .base.bypass_safe_algo import ByPassSafeControl 3 | 4 | from .value_based.base.base_safety_index import BaseSafetyIndex 5 | from .value_based.base.value_based_safe_algo import ValueBasedSafeAlgorithm 6 | from .value_based.base.collision_safety_index_1 import FirstOrderCollisionSafetyIndex 7 | from .value_based.base.collision_safety_index_1_approx import FirstOrderCollisionSafetyIndexApprox 8 | from .value_based.base.collision_safety_index_2 import SecondOrderCollisionSafetyIndex 9 | from .value_based.base.collision_safety_index_2_approx import SecondOrderCollisionSafetyIndexApprox 10 | from .value_based.base.collision_safety_index_2_nn import SecondOrderNNCollisionSafetyIndex 11 | 12 | from .value_based.ssa.basic_safe_set_algorithm import BasicSafeSetAlgorithm 13 | from .value_based.ssa.relaxed_safe_set_algorithm import RelaxedSafeSetAlgorithm 14 | from .value_based.cbf.basic_control_barrier_function import BasicControlBarrierFunction 15 | from .value_based.cbf.relaxed_control_barrier_function import RelaxedControlBarrierFunction 16 | from .value_based.sss.basic_sublevel_safe_set_algorithm import BasicSublevelSafeSetAlgorithm 17 | from .value_based.sss.relaxed_sublevel_safe_set_algorithm import RelaxedSublevelSafeSetAlgorithm 18 | from .value_based.sma.basic_sliding_mode_algorithm import BasicSlidingModeAlgorithm 19 | from .value_based.pfm.basic_potential_field_method import BasicPotentialFieldMethod -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/base/base_safe_algo.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from spark_robot import RobotKinematics 3 | import numpy as np 4 | 5 | class BaseSafeAlgorithm(ABC): 6 | def __init__(self, robot_kinematics: RobotKinematics, **kwargs): 7 | super().__init__() 8 | 9 | self.robot_kinematics = robot_kinematics 10 | self.robot_cfg = robot_kinematics.robot_cfg 11 | self.num_dof = len(self.robot_cfg.DoFs) 12 | self.num_control = len(self.robot_cfg.Control) 13 | 14 | @abstractmethod 15 | def safe_control(self, 16 | x : np.ndarray, 17 | u_ref : np.ndarray, 18 | agent_feedback: dict, 19 | task_info : dict, 20 | action_info : dict): 21 | pass 22 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/base/bypass_safe_algo.py: -------------------------------------------------------------------------------- 1 | from .base_safe_algo import BaseSafeAlgorithm 2 | import numpy as np 3 | 4 | class ByPassSafeControl(BaseSafeAlgorithm): 5 | 6 | def __init__(self, **kwargs): 7 | super().__init__(**kwargs) 8 | 9 | def safe_control(self, 10 | x : np.ndarray, 11 | u_ref : np.ndarray, 12 | agent_feedback: dict, 13 | task_info : dict, 14 | action_info : dict 15 | ): 16 | 17 | info = {} 18 | 19 | return u_ref, info -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_safe/spark_safe/safe_algo/value_based/__init__.py -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/base_safety_index.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from spark_robot import RobotKinematics 3 | import numpy as np 4 | 5 | class BaseSafetyIndex(ABC): 6 | ''' 7 | Base class for safety index for articulated robots and multiple safety constraints. 8 | ''' 9 | 10 | def __init__(self, 11 | robot_kinematics: RobotKinematics 12 | ) -> None: 13 | super().__init__() 14 | 15 | self.robot_kinematics = robot_kinematics 16 | self.robot_cfg = robot_kinematics.robot_cfg 17 | self.num_dof = len(self.robot_cfg.DoFs) 18 | self.num_state = self.robot_cfg.num_state 19 | self.num_control = len(self.robot_cfg.Control) 20 | 21 | # to be computed in derived class 22 | self.num_constraint = None 23 | self.phi_mask = None 24 | 25 | @abstractmethod 26 | def phi(self): 27 | ''' 28 | [num_constraint,] Safety index function. 29 | ''' 30 | pass 31 | 32 | def decode_constraint_info(self) -> dict: 33 | ''' 34 | Recover constraint info from phi vector. 35 | ''' 36 | raise NotImplementedError("decode_constraint_info not implemented") 37 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/collision_safety_index_1.py: -------------------------------------------------------------------------------- 1 | from spark_robot import RobotKinematics 2 | import numpy as np 3 | 4 | from spark_safe.safe_algo.value_based.base.basic_collision_safety_index import BasicCollisionSafetyIndex 5 | 6 | class FirstOrderCollisionSafetyIndex(BasicCollisionSafetyIndex): 7 | 8 | def __init__(self, 9 | robot_kinematics : RobotKinematics, 10 | **kwargs 11 | ): 12 | 13 | super().__init__(robot_kinematics, **kwargs) 14 | 15 | assert "Dynamic1" in self.robot_cfg.__class__.__name__, "FirstOrderCollisionSafetyIndex is only supported for robot configuration with Dynamic1 (Velocity Control)" 16 | 17 | # Safety index parameters for the first order safety index 18 | self.n = 1.0 19 | self.k = 0 20 | 21 | # Safety specification in the Cartesian space 22 | self._phi0 = lambda d, normal: self.dmin - np.sum( - d * normal, axis=1) 23 | 24 | # Time derivative of the safety specification in the Cartesian space 25 | self._phi0dot = lambda v, normal: np.sum(v * normal, axis=1) 26 | 27 | # General implementation of first order safety index; normal direction is the distance decreasing direction 28 | self._phi = self._phi0 29 | 30 | # General implementation of the time derivative of the first order safety index in the Cartesian space 31 | self._phi_dot = self._phi0dot 32 | 33 | 34 | self.Cartesian_Lg = lambda normal: normal 35 | self.Cartesian_Lf = lambda normal: np.zeros((normal.shape[0], 1)) 36 | 37 | def phi(self, 38 | x : np.ndarray, 39 | task_info: dict): 40 | ''' 41 | [num_constraint,] Safety index function. 42 | 43 | Args: 44 | x (np.ndarray): [num_state,] robot state. 45 | ''' 46 | 47 | robot_collision_vol, obstacle_collision_vol = self.get_vol_info(x, task_info) 48 | 49 | d, v, normal, curv = self.compute_pairwise_vol_info(robot_collision_vol, obstacle_collision_vol) 50 | 51 | phi = self._phi(d, normal) 52 | phi0 = self._phi0(d, normal) 53 | phi0dot = self._phi0dot(v, normal) 54 | Cartesian_Lg = self.Cartesian_Lg(normal) 55 | Cartesian_Lf = self.Cartesian_Lf(normal) 56 | dof_pos = self.robot_cfg.decompose_state_to_dof_pos(x) 57 | 58 | jacobian_full = self.jacobian_full(dof_pos) 59 | gx = self.robot_cfg.dynamics_g(x) 60 | 61 | Lg = np.einsum('ij,ijk->ik', Cartesian_Lg, jacobian_full @ gx) 62 | Lf = Cartesian_Lf 63 | 64 | return phi, Lg, Lf, phi0, phi0dot -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/collision_safety_index_1_approx.py: -------------------------------------------------------------------------------- 1 | from spark_robot import RobotKinematics 2 | import numpy as np 3 | from spark_utils import compute_pairwise_distance 4 | 5 | from spark_safe.safe_algo.value_based.base.basic_collision_safety_index import BasicCollisionSafetyIndex 6 | 7 | class FirstOrderCollisionSafetyIndexApprox(BasicCollisionSafetyIndex): 8 | 9 | def __init__(self, 10 | robot_kinematics : RobotKinematics, 11 | **kwargs 12 | ): 13 | 14 | super().__init__(robot_kinematics, **kwargs) 15 | 16 | assert "Dynamic1" in self.robot_cfg.__class__.__name__, "FirstOrderCollisionSafetyIndexApprox is only supported for robot configuration with Dynamic1 (Velocity Control)" 17 | 18 | # Safety index parameters for the first order safety index 19 | self.n = 1.0 20 | self.k = 0 21 | 22 | self.eps_phi = 1e-6 # Add small value to avoid division by zero 23 | self.eps_x = 1e-4 # Small value for numerical differentiation 24 | self.eps_dof = 1e-4 # Small value for numerical differentiation of state 25 | 26 | # Safety specification in the Cartesian space 27 | self._phi0 = lambda dist: self.dmin - dist 28 | 29 | # Time derivative of the safety specification in the Cartesian space 30 | self._phi0dot = lambda phi0_forward_dof, phi0_backward_dof: (phi0_forward_dof - phi0_backward_dof) / (2 * self.eps_x) 31 | 32 | # General implementation of second-order safety index 33 | self._phi = lambda phi0, phi0dot: phi0 + self.k * phi0dot 34 | 35 | def construct_vol_list(self, 36 | x : np.ndarray, 37 | task_info: dict, 38 | ): 39 | 40 | trans_world2base = task_info["robot_base_frame"] 41 | obstacle_vol_frames_world = task_info["obstacle"]["frames_world"] 42 | obstacle_vol_geom = task_info["obstacle"]["geom"] 43 | 44 | vol_frame_list_robot = [] 45 | vol_geom_list_robot = [] 46 | x_list = [] 47 | x_list.append(x) 48 | for i in range(self.num_state): 49 | x_forward = x.copy() 50 | x_forward[i] += self.eps_x 51 | x_backward = x.copy() 52 | x_backward[i] -= self.eps_x 53 | x_list.append(x_forward) 54 | x_list.append(x_backward) 55 | 56 | for _x in x_list: 57 | # compute all robot frames 58 | # frames = self.robot_kinematics.forward_kinematics(self.robot_cfg.decompose_state_to_dof_pos(x)) 59 | dof_pos = self.robot_cfg.decompose_state_to_dof_pos(_x) 60 | dof_vel = self.robot_cfg.decompose_state_to_dof_vel(_x) 61 | dof_pos_forward = dof_pos.copy() 62 | dof_pos_forward += dof_vel * self.eps_dof 63 | 64 | dof_pos_backward = dof_pos.copy() 65 | dof_pos_backward -= dof_vel * self.eps_dof 66 | for _dof_pos in [dof_pos, dof_pos_forward, dof_pos_backward]: 67 | self.robot_kinematics.pre_computation(_dof_pos, dof_vel) 68 | frames = self.robot_kinematics.forward_kinematics(_dof_pos) 69 | trans_world2base = self.robot_kinematics.update_base_frame(trans_world2base, _dof_pos) 70 | # # get collision vol frames 71 | collision_vol_frames_base = np.stack([frames[fid] for fid in self.collision_vol_frame_ids], axis=0) # [num_collision_vol, 4, 4] 72 | # # transform to world frame 73 | collision_vol_frames_world = trans_world2base @ collision_vol_frames_base 74 | # import ipdb;ipdb.set_trace() 75 | vol_frame_list_robot.append(collision_vol_frames_world) 76 | vol_geom_list_robot.append(self.collision_vol_geom) 77 | B = len(vol_frame_list_robot) 78 | 79 | return np.array(vol_frame_list_robot), np.array(self.collision_vol_geom), np.tile(obstacle_vol_frames_world, (B, 1, 1, 1)), np.array(obstacle_vol_geom) 80 | 81 | 82 | def phi(self, 83 | x : np.ndarray, 84 | task_info: dict, 85 | ): 86 | ''' 87 | Args: 88 | x (np.ndarray): [num_state,] Robot state values. 89 | 90 | Returns: 91 | grad (np.ndarray): [num_constraint, num_state] gradient of safety index function w.r.t. x. 92 | ''' 93 | self.update_obstacle_info(task_info) 94 | 95 | vol_frame_list_robot, vol_geom_list_robot, obstacle_vol_frames_world, obstacle_vol_geom = self.construct_vol_list(x, task_info) 96 | dist_env = compute_pairwise_distance( 97 | frame_list_1 = vol_frame_list_robot, 98 | geom_list_1 = vol_geom_list_robot, 99 | frame_list_2 = obstacle_vol_frames_world, 100 | geom_list_2 = obstacle_vol_geom 101 | ) 102 | # compute pairwise distances for self collision 103 | dist_self = compute_pairwise_distance( 104 | frame_list_1 = vol_frame_list_robot, 105 | geom_list_1 = vol_geom_list_robot, 106 | frame_list_2 = vol_frame_list_robot, 107 | geom_list_2 = vol_geom_list_robot 108 | ) 109 | 110 | dist_env_curr_dof = dist_env[0::3] 111 | dist_env_forward_dof = dist_env[1::3] 112 | dist_env_backward_dof = dist_env[2::3] 113 | dist_self_curr_dof = dist_self[0::3] 114 | dist_self_forward_dof = dist_self[1::3] 115 | dist_self_backward_dof = dist_self[2::3] 116 | 117 | # compute phi 118 | B = dist_self_curr_dof.shape[0] 119 | dist = np.zeros((B, self.num_constraint)) 120 | dist_forward_dof = np.zeros((B, self.num_constraint)) 121 | dist_backward_dof = np.zeros((B, self.num_constraint)) 122 | dist[:, :self.num_constraint_env] = dist_env_curr_dof.reshape(B, -1) 123 | dist_forward_dof[:, :self.num_constraint_env] = dist_env_forward_dof.reshape(B, -1) 124 | dist_backward_dof[:, :self.num_constraint_env] = dist_env_backward_dof.reshape(B, -1) 125 | if self.num_constraint_self > 0: 126 | triu_indices = np.triu_indices(dist_self_curr_dof.shape[1], k=1) 127 | dist[:, -self.num_constraint_self:] = dist_self_curr_dof[:, triu_indices[0], triu_indices[1]] 128 | dist_forward_dof[:, -self.num_constraint_self:] = dist_self_forward_dof[:, triu_indices[0], triu_indices[1]] 129 | dist_backward_dof[:, -self.num_constraint_self:] = dist_self_backward_dof[:, triu_indices[0], triu_indices[1]] 130 | 131 | phi0_dof = self._phi0(dist) 132 | phi0_forward_dof = self._phi0(dist_forward_dof) 133 | phi0_backward_dof = self._phi0(dist_backward_dof) 134 | 135 | phi0dot_dof = self._phi0dot(phi0_forward_dof, phi0_backward_dof) 136 | phi_dof = self._phi(phi0_dof, phi0dot_dof) 137 | 138 | phi = phi_dof[0] 139 | phi0 = phi0_dof[0] 140 | phi0dot = phi0dot_dof[0] 141 | # compute phi gradient 142 | phi_forward_x = phi_dof[1::2] 143 | phi_backward_x = phi_dof[2::2] 144 | grad_phi = (phi_forward_x - phi_backward_x) / (2 * self.eps_dof) 145 | 146 | grad_phi = grad_phi.T 147 | 148 | Lf = grad_phi @ self.robot_cfg.dynamics_f(x) 149 | Lg = grad_phi @ self.robot_cfg.dynamics_g(x) 150 | 151 | return phi, Lg, Lf, phi0, phi0dot 152 | 153 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/collision_safety_index_2.py: -------------------------------------------------------------------------------- 1 | from spark_robot import RobotKinematics 2 | import numpy as np 3 | 4 | from spark_safe.safe_algo.value_based.base.basic_collision_safety_index import BasicCollisionSafetyIndex 5 | 6 | class SecondOrderCollisionSafetyIndex(BasicCollisionSafetyIndex): 7 | 8 | def __init__(self, 9 | robot_kinematics : RobotKinematics, 10 | **kwargs 11 | ): 12 | 13 | super().__init__(robot_kinematics, **kwargs) 14 | 15 | assert "Dynamic2" in self.robot_cfg.__class__.__name__, "SecondOrderCollisionSafetyIndex is only supported for robot configuration with Dynamic2 (Acceleration Control)" 16 | 17 | self.n = kwargs["phi_n"] 18 | self.k = kwargs["phi_k"] 19 | eps = 1e-6 # Add small value to avoid division by zero 20 | 21 | # Safety specification in the Cartesian space 22 | self._phi0 = lambda d, normal: self.dmin - np.sum(- d * normal, axis=1) 23 | 24 | # Time derivative of the safety specification in the Cartesian space 25 | self._phi0dot = lambda v, normal: np.sum(v * normal, axis=1) 26 | 27 | # General implementation of first-order safety index 28 | # Normal direction is the distance-decreasing direction 29 | self._phi = lambda d, v, normal: ( 30 | self.dmin**self.n - np.sum(- d * normal, axis=1)**self.n 31 | + self.k * np.sum(v * normal, axis=1) 32 | ) 33 | 34 | # General implementation of the time derivative of the first-order safety index 35 | self._phi_dot = lambda d, v, a, normal, curv: ( 36 | - self.n * (np.sum(- d * normal, axis=1) + eps)**(self.n - 1) * np.sum(v * normal, axis=1) 37 | + self.k * np.sum(a * normal, axis=1) 38 | - self.k * np.sum(v * (curv @ v[..., None]).squeeze(-1), axis=1) 39 | ) 40 | 41 | # Control-related functions in Cartesian space 42 | self.Cartesian_Lg = lambda normal: self.k * normal 43 | 44 | self.Cartesian_Lf = lambda d, v, normal, curv: ( 45 | - self.n * (np.sum(- d * normal, axis=1) + eps)**(self.n - 1) * np.sum(v * normal, axis=1) 46 | - self.k * np.sum(v * (curv @ v[..., None]).squeeze(-1), axis=1) 47 | ) 48 | 49 | def phi(self, 50 | x : np.ndarray, 51 | task_info: dict): 52 | ''' 53 | [num_constraint,] Safety index function. 54 | 55 | Args: 56 | x (np.ndarray): [num_state,] robot state. 57 | ''' 58 | 59 | robot_collision_vol, obstacle_collision_vol = self.get_vol_info(x, task_info) 60 | d, v, normal, curv = self.compute_pairwise_vol_info(robot_collision_vol, obstacle_collision_vol) 61 | 62 | phi = self._phi(d, v, normal) 63 | phi0 = self._phi0(d, normal) 64 | phi0dot = self._phi0dot(v, normal) 65 | Cartesian_Lg = self.Cartesian_Lg(normal) 66 | Cartesian_Lf = self.Cartesian_Lf(d, v, normal, curv) 67 | 68 | dof_pos = self.robot_cfg.decompose_state_to_dof_pos(x) 69 | dof_vel = self.robot_cfg.decompose_state_to_dof_vel(x) 70 | 71 | jacobian_full = self.jacobian_full(dof_pos) 72 | jacobian_dot_full = self.jacobian_dot_full(dof_pos, dof_vel) 73 | 74 | gx = self.robot_cfg.dynamics_g(x) 75 | fx = self.robot_cfg.dynamics_f(x) 76 | s_matrix = np.hstack([np.zeros((len(x) - self.num_dof, self.num_dof)), np.eye(len(x) - self.num_dof)]) # only take the velocity part 77 | 78 | Lg = np.einsum('ij,ijk->ik', Cartesian_Lg, jacobian_full @ s_matrix @ gx) 79 | Lf = Cartesian_Lf + np.einsum('ij,ijk->ik', Cartesian_Lg, jacobian_dot_full) @ dof_vel + np.einsum('ij,ijk->ik', Cartesian_Lg, jacobian_full) @ s_matrix @ fx 80 | return phi, Lg, Lf, phi0, phi0dot 81 | 82 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/collision_safety_index_2_approx.py: -------------------------------------------------------------------------------- 1 | from spark_robot import RobotKinematics 2 | import numpy as np 3 | from spark_utils import compute_pairwise_distance 4 | 5 | from spark_safe.safe_algo.value_based.base.basic_collision_safety_index import BasicCollisionSafetyIndex 6 | 7 | class SecondOrderCollisionSafetyIndexApprox(BasicCollisionSafetyIndex): 8 | 9 | def __init__(self, 10 | robot_kinematics : RobotKinematics, 11 | **kwargs 12 | ): 13 | 14 | super().__init__(robot_kinematics, **kwargs) 15 | 16 | assert "Dynamic2" in self.robot_cfg.__class__.__name__, "SecondOrderCollisionSafetyIndexApprox is only supported for robot configuration with Dynamic2 (Acceleration Control)" 17 | 18 | self.n = kwargs["phi_n"] 19 | self.k = kwargs["phi_k"] 20 | self.eps_phi = 1e-6 # Add small value to avoid division by zero 21 | self.eps_x = 1e-4 # Small value for numerical differentiation 22 | self.eps_dof = 1e-4 # Small value for numerical differentiation of state 23 | 24 | # Safety specification in the Cartesian space 25 | self._phi0 = lambda dist: self.dmin ** self.n - dist ** self.n 26 | 27 | # Time derivative of the safety specification in the Cartesian space 28 | self._phi0dot = lambda phi0_forward_dof, phi0_backward_dof: (phi0_forward_dof - phi0_backward_dof) / (2 * self.eps_x) 29 | 30 | # General implementation of second-order safety index 31 | self._phi = lambda phi0, phi0dot: phi0 + self.k * phi0dot 32 | 33 | def construct_vol_list(self, 34 | x : np.ndarray, 35 | task_info: dict, 36 | ): 37 | 38 | trans_world2base = task_info["robot_base_frame"] 39 | obstacle_vol_frames_world = task_info["obstacle"]["frames_world"] 40 | obstacle_vol_geom = task_info["obstacle"]["geom"] 41 | 42 | vol_frame_list_robot = [] 43 | vol_geom_list_robot = [] 44 | x_list = [] 45 | x_list.append(x) 46 | for i in range(self.num_state): 47 | x_forward = x.copy() 48 | x_forward[i] += self.eps_x 49 | x_backward = x.copy() 50 | x_backward[i] -= self.eps_x 51 | x_list.append(x_forward) 52 | x_list.append(x_backward) 53 | 54 | for _x in x_list: 55 | # compute all robot frames 56 | # frames = self.robot_kinematics.forward_kinematics(self.robot_cfg.decompose_state_to_dof_pos(x)) 57 | dof_pos = self.robot_cfg.decompose_state_to_dof_pos(_x) 58 | dof_vel = self.robot_cfg.decompose_state_to_dof_vel(_x) 59 | dof_pos_forward = dof_pos.copy() 60 | dof_pos_forward += dof_vel * self.eps_dof 61 | 62 | dof_pos_backward = dof_pos.copy() 63 | dof_pos_backward -= dof_vel * self.eps_dof 64 | for _dof_pos in [dof_pos, dof_pos_forward, dof_pos_backward]: 65 | self.robot_kinematics.pre_computation(_dof_pos, dof_vel) 66 | frames = self.robot_kinematics.forward_kinematics(_dof_pos) 67 | # trans_world2base = self.robot_kinematics.get_base_frame(_dof_pos) 68 | trans_world2base = self.robot_kinematics.update_base_frame(trans_world2base, _dof_pos) 69 | 70 | # # get collision vol frames 71 | collision_vol_frames_base = np.stack([frames[fid] for fid in self.collision_vol_frame_ids], axis=0) # [num_collision_vol, 4, 4] 72 | # # transform to world frame 73 | collision_vol_frames_world = trans_world2base @ collision_vol_frames_base 74 | # import ipdb;ipdb.set_trace() 75 | vol_frame_list_robot.append(collision_vol_frames_world) 76 | vol_geom_list_robot.append(self.collision_vol_geom) 77 | B = len(vol_frame_list_robot) 78 | 79 | return np.array(vol_frame_list_robot), np.array(self.collision_vol_geom), np.tile(obstacle_vol_frames_world, (B, 1, 1, 1)), np.array(obstacle_vol_geom) 80 | 81 | 82 | def phi(self, 83 | x : np.ndarray, 84 | task_info: dict, 85 | ): 86 | ''' 87 | Args: 88 | x (np.ndarray): [num_state,] Robot state values. 89 | 90 | Returns: 91 | grad (np.ndarray): [num_constraint, num_state] gradient of safety index function w.r.t. x. 92 | ''' 93 | self.update_obstacle_info(task_info) 94 | 95 | vol_frame_list_robot, vol_geom_list_robot, obstacle_vol_frames_world, obstacle_vol_geom = self.construct_vol_list(x, task_info) 96 | dist_env = compute_pairwise_distance( 97 | frame_list_1 = vol_frame_list_robot, 98 | geom_list_1 = vol_geom_list_robot, 99 | frame_list_2 = obstacle_vol_frames_world, 100 | geom_list_2 = obstacle_vol_geom 101 | ) 102 | # compute pairwise distances for self collision 103 | dist_self = compute_pairwise_distance( 104 | frame_list_1 = vol_frame_list_robot, 105 | geom_list_1 = vol_geom_list_robot, 106 | frame_list_2 = vol_frame_list_robot, 107 | geom_list_2 = vol_geom_list_robot 108 | ) 109 | 110 | dist_env_curr_dof = dist_env[0::3] 111 | dist_env_forward_dof = dist_env[1::3] 112 | dist_env_backward_dof = dist_env[2::3] 113 | dist_self_curr_dof = dist_self[0::3] 114 | dist_self_forward_dof = dist_self[1::3] 115 | dist_self_backward_dof = dist_self[2::3] 116 | 117 | # compute phi 118 | B = dist_self_curr_dof.shape[0] 119 | dist = np.zeros((B, self.num_constraint)) 120 | dist_forward_dof = np.zeros((B, self.num_constraint)) 121 | dist_backward_dof = np.zeros((B, self.num_constraint)) 122 | dist[:, :self.num_constraint_env] = dist_env_curr_dof.reshape(B, -1) 123 | dist_forward_dof[:, :self.num_constraint_env] = dist_env_forward_dof.reshape(B, -1) 124 | dist_backward_dof[:, :self.num_constraint_env] = dist_env_backward_dof.reshape(B, -1) 125 | if self.num_constraint_self > 0: 126 | triu_indices = np.triu_indices(dist_self_curr_dof.shape[1], k=1) 127 | dist[:, -self.num_constraint_self:] = dist_self_curr_dof[:, triu_indices[0], triu_indices[1]] 128 | dist_forward_dof[:, -self.num_constraint_self:] = dist_self_forward_dof[:, triu_indices[0], triu_indices[1]] 129 | dist_backward_dof[:, -self.num_constraint_self:] = dist_self_backward_dof[:, triu_indices[0], triu_indices[1]] 130 | 131 | phi0_dof = self._phi0(dist) 132 | phi0_forward_dof = self._phi0(dist_forward_dof) 133 | phi0_backward_dof = self._phi0(dist_backward_dof) 134 | 135 | phi0dot_dof = self._phi0dot(phi0_forward_dof, phi0_backward_dof) 136 | phi_dof = self._phi(phi0_dof, phi0dot_dof) 137 | 138 | phi = phi_dof[0] 139 | phi0 = phi0_dof[0] 140 | phi0dot = phi0dot_dof[0] 141 | # compute phi gradient 142 | phi_forward_x = phi_dof[1::2] 143 | phi_backward_x = phi_dof[2::2] 144 | grad_phi = (phi_forward_x - phi_backward_x) / (2 * self.eps_dof) 145 | 146 | grad_phi = grad_phi.T 147 | 148 | Lf = grad_phi @ self.robot_cfg.dynamics_f(x) 149 | Lg = grad_phi @ self.robot_cfg.dynamics_g(x) 150 | 151 | return phi, Lg, Lf, phi0, phi0dot 152 | 153 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/collision_safety_index_2_nn.py: -------------------------------------------------------------------------------- 1 | from spark_robot import RobotKinematics 2 | import numpy as np 3 | import torch 4 | import onnx 5 | from onnx2torch import convert 6 | 7 | from spark_safe.safe_algo.value_based.base.basic_collision_safety_index import BasicCollisionSafetyIndex 8 | from spark_safe import SPARK_SAFE_ROOT 9 | import os 10 | 11 | class SecondOrderNNCollisionSafetyIndex(BasicCollisionSafetyIndex): 12 | 13 | def __init__(self, 14 | robot_kinematics : RobotKinematics, 15 | **kwargs 16 | ): 17 | 18 | super().__init__(robot_kinematics, **kwargs) 19 | 20 | assert "Dynamic2" in self.robot_cfg.__class__.__name__, "SecondOrderNNCollisionSafetyIndex is only supported for robot configuration with Dynamic2 (Acceleration Control)" 21 | 22 | 23 | self.n = kwargs["phi_n"] 24 | self.k = kwargs["phi_k"] 25 | 26 | self.phi_nn_path = os.path.join(SPARK_SAFE_ROOT, "spark_safe/safe_algo/value_based/base/onnx_model", kwargs["phi_nn_path"]) 27 | self.phi_nn_onnx = onnx.load(self.phi_nn_path) 28 | onnx.checker.check_model(self.phi_nn_onnx) 29 | print("Model successfully saved and verified in ONNX format.") 30 | self.phi_nn_torch = convert(self.phi_nn_onnx) 31 | self.phi_nn_torch.eval() 32 | 33 | # Safety specification in the Cartesian space 34 | self.phi0 = lambda d, normal: self.dmin - np.sum(- d * normal, axis=1) 35 | 36 | # Time derivative of the safety specification in the Cartesian space 37 | self.phi0dot = lambda v, normal: np.sum(v * normal, axis=1) 38 | 39 | # General implementation of second order safety index; normal direction is the distance decreasing direction 40 | self._phi = lambda d, v, normal, nn_term: self.dmin - np.sum(- d * normal, axis=1) + nn_term + self.k*np.sum(v * normal, axis=1) 41 | 42 | # General implementation of the time derivative of the second order safety index in the Cartesian space 43 | self._phi_dot = lambda d, v, a, normal, curv, nn_term_grad: np.sum(v * normal, axis=1) - np.sum(nn_term_grad[:, :3] * normal, axis=1) * np.sum(v * normal, axis=1) + self.k*np.sum(a * normal, axis=1) - self.k*np.sum(v * (curv@v[..., None]).squeeze(-1), axis=1) 44 | 45 | self.Cartesian_Lg = lambda normal: self.k * normal 46 | self.Cartesian_Lf = lambda d, v, normal, curv, nn_term_grad: np.sum(v * normal, axis=1) - np.sum(nn_term_grad[:, :3] * normal, axis=1) * np.sum(v * normal, axis=1) - self.k*np.sum(v * (curv@v[..., None]).squeeze(-1), axis=1) 47 | 48 | def phi(self, 49 | x : np.ndarray, 50 | task_info: dict): 51 | ''' 52 | [num_constraint,] Safety index function. 53 | 54 | Args: 55 | x (np.ndarray): [num_state,] robot state. 56 | ''' 57 | 58 | robot_collision_vol, obstacle_collision_vol = self.get_vol_info(x, task_info) 59 | d, v, normal, curv = self.compute_pairwise_vol_info(robot_collision_vol, obstacle_collision_vol) 60 | 61 | torch_input = torch.from_numpy(np.sum(d * normal, axis=1, dtype=np.float32).reshape(-1,1)).requires_grad_(True) 62 | torch_output = self.phi_nn_torch(torch_input) 63 | torch_output.backward(torch.ones_like(torch_output)) 64 | torch_grad = torch_input.grad.numpy() 65 | 66 | phi = self._phi(d, v, normal, torch_output[:,0].detach().numpy().flatten()) 67 | phi0 = self.phi0(d, normal) 68 | phi0dot = self.phi0dot(v, normal) 69 | Cartesian_Lg = self.Cartesian_Lg(normal) 70 | Cartesian_Lf = self.Cartesian_Lf(d, v, normal, curv, torch_grad) 71 | 72 | dof_pos = self.robot_cfg.decompose_state_to_dof_pos(x) 73 | dof_vel = self.robot_cfg.decompose_state_to_dof_vel(x) 74 | 75 | jacobian_full = self.jacobian_full(dof_pos) 76 | jacobian_dot_full = self.jacobian_dot_full(dof_pos, dof_vel) 77 | 78 | gx = self.robot_cfg.dynamics_g(x) 79 | fx = self.robot_cfg.dynamics_f(x) 80 | 81 | s_matrix = np.hstack([np.zeros((self.num_dof, self.num_dof)), np.eye(self.num_dof)]) # only take the velocity part 82 | 83 | Lg = np.einsum('ij,ijk->ik', Cartesian_Lg, jacobian_full @ s_matrix @ gx) 84 | Lf = Cartesian_Lf + np.einsum('ij,ijk->ik', Cartesian_Lg, jacobian_dot_full) @ dof_vel + np.einsum('ij,ijk->ik', Cartesian_Lg, jacobian_full) @ s_matrix @ fx 85 | 86 | return phi, Lg, Lf, phi0, phi0dot 87 | 88 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/onnx_model/n_2.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_safe/spark_safe/safe_algo/value_based/base/onnx_model/n_2.onnx -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/onnx_model/n_2_scalar.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/module/spark_safe/spark_safe/safe_algo/value_based/base/onnx_model/n_2_scalar.onnx -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/base/value_based_safe_algo.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | import numpy as np 3 | 4 | from spark_safe.safe_algo.base.base_safe_algo import BaseSafeAlgorithm 5 | from spark_safe.safe_algo.value_based.base.base_safety_index import BaseSafetyIndex 6 | from spark_robot import RobotKinematics 7 | 8 | class ValueBasedSafeAlgorithm(BaseSafeAlgorithm): 9 | def __init__(self, safety_index: BaseSafetyIndex, **kwargs): 10 | 11 | super().__init__(**kwargs) 12 | 13 | self.safety_index = safety_index 14 | 15 | self.control_max = np.array([self.robot_cfg.ControlLimit[i] for i in self.robot_cfg.Control]) 16 | self.control_min = np.array([-self.robot_cfg.ControlLimit[i] for i in self.robot_cfg.Control]) 17 | 18 | # @abstractmethod 19 | # def kappa(self, x): 20 | # ''' 21 | # K-class function for value based safe algorithms 22 | 23 | # control constraint: phi_dot <= -k(phi) 24 | 25 | # x: R^n 26 | # k: R^n -> R^n 27 | # ''' 28 | # pass 29 | 30 | @abstractmethod 31 | def safe_control(self, 32 | x : np.ndarray, 33 | u_ref : np.ndarray, 34 | agent_feedback: dict, 35 | task_info : dict, 36 | action_info : dict 37 | ) : 38 | 39 | pass 40 | 41 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/cbf/basic_control_barrier_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import osqp 3 | from scipy import sparse 4 | 5 | from spark_safe.safe_algo.value_based.base.value_based_safe_algo import ValueBasedSafeAlgorithm 6 | 7 | class BasicControlBarrierFunction(ValueBasedSafeAlgorithm): 8 | 9 | def __init__(self, **kwargs): 10 | super().__init__(**kwargs) 11 | print("Initializing ControlBarrierFunction") 12 | # default values 13 | self.lambda_default = kwargs["lambda_cbf"] 14 | self.control_weight = np.array(kwargs["control_weight"]) 15 | assert self.control_weight.shape == (self.num_control,), f"control_weight shape {self.control_weight.shape} does not match num_control {self.num_control}" 16 | 17 | def lambda_fn(self, phi): 18 | lambda_values = np.where( 19 | (self.safety_index.phi_mask.reshape(-1,1) > 0), 20 | np.ones((self.safety_index.num_constraint, 1)) * self.lambda_default * phi.reshape(-1, 1), 21 | np.ones((self.safety_index.num_constraint, 1)) * -np.inf 22 | ) 23 | return lambda_values.reshape(-1, 1) 24 | 25 | def qp_solver(self, 26 | u_ref, Q_u, 27 | Lg, Lf, 28 | eps_=1.00e-2, abs_=1.00e-2): 29 | """ 30 | The qp solver min||u - u_ref|| s.t. Lg * u + Lf <= 0 31 | 32 | :param u_ref: Reference robot control. 33 | :param Q: Quadratic cost matrix (Hessian). 34 | :param Lg: Gradient of constraints with respect to control input. 35 | :param Lf: Constraint violation (CBF condition). 36 | """ 37 | n = u_ref.shape[0] 38 | m = Lf.shape[0] 39 | 40 | # Construct q 41 | 42 | Q = sparse.csc_matrix(Q_u) 43 | q = -(Q_u.T @ u_ref.reshape(-1, 1)).reshape(-1) 44 | C_upper = sparse.csc_matrix(Lg) 45 | C_lower = sparse.eye(n) 46 | Cmat = sparse.vstack([C_upper, C_lower]).tocsc() 47 | 48 | # Construct l and u 49 | l = np.concatenate([-np.inf * np.ones(m), self.control_min.flatten()]) 50 | u = np.concatenate([-Lf.reshape(-1), self.control_max.flatten()]) 51 | 52 | # Setup and solve the problem 53 | prob = osqp.OSQP() 54 | prob.setup(Q, q, Cmat, l, u, alpha=1.0, eps_abs = abs_, eps_rel = eps_, verbose=False) 55 | result = prob.solve() 56 | if result.info.status == 'solved': 57 | u_sol = result.x[:n].flatten() 58 | violation = np.zeros(m) 59 | return u_sol, violation 60 | else: 61 | violation = Lf + Lg @ u_ref.reshape(-1, 1) 62 | violation = violation.reshape(-1) 63 | violation[violation < 0] = 0 64 | return u_ref, violation 65 | 66 | def safe_control(self, 67 | x : np.ndarray, 68 | u_ref : np.ndarray, 69 | agent_feedback: dict, 70 | task_info : dict, 71 | action_info : dict 72 | ): 73 | 74 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 75 | 76 | Q_u = np.diag(self.control_weight) 77 | 78 | # for QP solver 79 | u_safe, violation = self.qp_solver( 80 | u_ref = u_ref, 81 | Q_u = Q_u, 82 | Lg = Lg, 83 | Lf = Lf.reshape(-1,1) + self.lambda_fn(phi).reshape(-1,1) 84 | ) 85 | 86 | info = { 87 | "trigger_safe": True, 88 | } 89 | 90 | # append decode info 91 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 92 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 93 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 94 | info.update(self.safety_index.decode_constraint_info(violation, 'violation')) 95 | 96 | return u_safe, info -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/cbf/relaxed_control_barrier_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import osqp 3 | from scipy import sparse 4 | 5 | from spark_safe.safe_algo.value_based.cbf.basic_control_barrier_function import BasicControlBarrierFunction 6 | 7 | class RelaxedControlBarrierFunction(BasicControlBarrierFunction): 8 | ''' 9 | Relaxed-CBF 10 | 11 | CBF with multiple phi_dot <= -lambda * phi constraints and slack variables for each constraint 12 | Slack variables are penalized quadratically in the QP solver with manual weights 13 | ''' 14 | 15 | def __init__(self, **kwargs): 16 | super().__init__(**kwargs) 17 | print("Initializing RelaxedControlBarrierFunction") 18 | # default values 19 | self.lambda_default = kwargs["lambda_cbf"] 20 | self.control_weight = np.array(kwargs["control_weight"]) 21 | self.slack_weight_default = kwargs["slack_weight"] 22 | self.slack_regularization_order = kwargs.get("slack_regularization_order", 2) 23 | 24 | def qp_solver(self, 25 | u_ref, Q_u, 26 | Lg, Lf, 27 | eps_=1.00e-2, abs_=1.00e-2): 28 | """ 29 | The qp solver min||u - u_ref|| s.t. Lg * u + Lf <= 0 30 | 31 | :param u_ref: Reference robot control. 32 | :param Q: Quadratic cost matrix (Hessian). 33 | :param Lg: Gradient of constraints with respect to control input. 34 | :param Lf: Constraint violation (SSA condition). 35 | """ 36 | n = u_ref.shape[0] 37 | m = Lf.shape[0] 38 | 39 | # Construct q 40 | 41 | Q_u = sparse.csc_matrix(Q_u) 42 | q_u = -(Q_u.T @ u_ref.reshape(-1, 1)).reshape(-1) 43 | if self.slack_regularization_order == 1: 44 | """ 45 | min||u-uref|| + ||s|| 46 | s.t. Lg u + Lf <= 0 47 | s >= 0 48 | """ 49 | Q_s = sparse.csc_matrix( np.zeros((m, m))) 50 | q_s = self.slack_weight_default * np.ones(self.safety_index.num_constraint) 51 | 52 | elif self.slack_regularization_order == 2: 53 | """ 54 | min||u-uref|| + ||s||^2 55 | s.t. Lg u + Lf <= 0 56 | s >= 0 57 | """ 58 | Q_s = sparse.diags(self.slack_weight_default * np.ones(self.safety_index.num_constraint)) 59 | q_s = np.zeros(m) 60 | else: 61 | raise ValueError(f"slack_regularization_order {self.slack_regularization_order} not supported, must be 1 or 2") 62 | 63 | 64 | # Construct Q matrix 65 | Q = sparse.block_diag([Q_u, Q_s]).tocsc() 66 | # Construct q vector 67 | q = np.concatenate([q_u, q_s]) 68 | 69 | # Construct C matrix 70 | C_upper = sparse.hstack([Lg, -sparse.eye(m)]) 71 | C_lower = sparse.eye(m + n) 72 | Cmat = sparse.vstack([C_upper, C_lower]).tocsc() 73 | 74 | # Construct l and u 75 | l = np.concatenate([-np.inf * np.ones(m), self.control_min.flatten(), np.zeros(m)]) 76 | u = np.concatenate([-Lf.reshape(-1), self.control_max.flatten(), np.inf * np.ones(m)]) 77 | 78 | # Setup and solve the problem 79 | prob = osqp.OSQP() 80 | prob.setup(Q, q, Cmat, l, u, alpha=1.0, eps_abs = abs_, eps_rel = eps_, verbose=False) 81 | result = prob.solve() 82 | if result.info.status == 'solved': 83 | # print("Solution found:", u_sol) 84 | u_sol = result.x[:n].flatten() 85 | s_sol = result.x[n:].flatten() 86 | return u_sol, s_sol 87 | else: 88 | print("No Solution") 89 | violation = Lf + Lg @ u_ref.reshape(-1, 1) 90 | violation[violation < 0] = 0 91 | return u_ref, np.asarray(violation).reshape(-1) 92 | 93 | def safe_control(self, 94 | x : np.ndarray, 95 | u_ref : np.ndarray, 96 | agent_feedback: dict, 97 | task_info : dict, 98 | action_info : dict 99 | ): 100 | 101 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 102 | 103 | Q_u = np.diag(self.control_weight) 104 | 105 | # for QP solver 106 | u_safe, violation = self.qp_solver( 107 | u_ref = u_ref, 108 | Q_u = Q_u, 109 | Lg = Lg, 110 | Lf = Lf.reshape(-1,1) + self.lambda_fn(phi).reshape(-1,1) 111 | ) 112 | 113 | info = { 114 | "trigger_safe": True, 115 | "violation": violation, 116 | "env_collision_mask": self.safety_index.env_collision_mask 117 | } 118 | 119 | # append decode info 120 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 121 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 122 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 123 | info.update(self.safety_index.decode_constraint_info(violation, 'violation')) 124 | 125 | return u_safe, info -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/pfm/basic_potential_field_method.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from spark_safe.safe_algo.value_based.base.value_based_safe_algo import ValueBasedSafeAlgorithm 4 | 5 | class BasicPotentialFieldMethod(ValueBasedSafeAlgorithm): 6 | 7 | def __init__(self, safety_index, **kwargs): 8 | super().__init__(safety_index, **kwargs) 9 | print("Initializing BasicPotentialFieldMethod") 10 | # default values 11 | self.c_pfm_default = kwargs["c_pfm"] 12 | 13 | def safe_control(self, 14 | x : np.ndarray, 15 | u_ref : np.ndarray, 16 | agent_feedback: dict, 17 | task_info : dict, 18 | action_info : dict 19 | ): 20 | 21 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 22 | 23 | if (phi[self.safety_index.phi_mask>0].max() > 0): 24 | trigger_safe = True 25 | masked_indices_phi = np.argwhere(self.safety_index.env_collision_mask) # Get the row and column indices of unmasked positions 26 | # Find the index of the maximum value in the masked array 27 | phi_env = phi[:self.safety_index.num_constraint_env].reshape(self.safety_index.num_robot_vol, self.safety_index.num_obstacle_vol) 28 | max_index_flat_env = np.argmax(phi_env[self.safety_index.env_collision_mask]) 29 | 30 | s_matrix = np.zeros((self.safety_index.num_dof, self.safety_index.num_state)) 31 | s_matrix[:, -self.safety_index.num_dof:] = np.eye(self.safety_index.num_dof) 32 | 33 | # Map the flat index back to row and column indices in the original array 34 | max_row_col_env = tuple(masked_indices_phi[max_index_flat_env]) 35 | max_phi_idx = np.ravel_multi_index(max_row_col_env, phi_env.shape) 36 | 37 | dof_pos = self.robot_cfg.decompose_state_to_dof_pos(x) 38 | jacobian_full = self.safety_index.jacobian_full(dof_pos) 39 | 40 | critical_jacobian = jacobian_full[max_phi_idx, :] 41 | Cartesian_Lg = np.einsum('ij,ijk->ik', Lg, np.linalg.pinv(jacobian_full @ s_matrix @ self.robot_cfg.dynamics_g(x))) 42 | 43 | # Calculate safe control in cartesian space for the critical robot frame 44 | u_ref_cartesian = critical_jacobian @ s_matrix @ self.robot_cfg.dynamics_g(x) @ u_ref 45 | u_safe_cartesian = u_ref_cartesian - self.c_pfm_default * Cartesian_Lg[max_phi_idx, :] 46 | 47 | # Convert the cartesian safe control back to the configuration space 48 | u_safe = np.linalg.pinv(critical_jacobian @ s_matrix @ self.robot_cfg.dynamics_g(x)) @ u_safe_cartesian 49 | else: 50 | trigger_safe = False 51 | u_safe = u_ref 52 | 53 | info = { 54 | "trigger_safe": trigger_safe, 55 | } 56 | 57 | # append decode info 58 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 59 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 60 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 61 | 62 | return u_safe, info 63 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/sma/basic_sliding_mode_algorithm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import osqp 3 | from scipy import sparse 4 | 5 | from spark_safe.safe_algo.value_based.base.value_based_safe_algo import ValueBasedSafeAlgorithm 6 | 7 | class BasicSlidingModeAlgorithm(ValueBasedSafeAlgorithm): 8 | 9 | def __init__(self, safety_index, **kwargs): 10 | super().__init__(safety_index, **kwargs) 11 | print("Initializing BasicSlidingModeAlgorithm") 12 | # default values 13 | self.c_sma_default = kwargs["c_sma"] 14 | 15 | def safe_control(self, 16 | x : np.ndarray, 17 | u_ref : np.ndarray, 18 | agent_feedback: dict, 19 | task_info : dict, 20 | action_info : dict 21 | ): 22 | 23 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 24 | 25 | if (phi[self.safety_index.phi_mask>0].max() > 0): 26 | trigger_safe = True 27 | 28 | active_id = np.where((self.safety_index.phi_mask > 0) & (phi >= 0))[0] 29 | 30 | max_phi_index = active_id[np.argmax(phi[active_id])] 31 | Lg_max_phi = Lg[max_phi_index, :].reshape(-1) 32 | u_safe = u_ref - self.c_sma_default* Lg_max_phi 33 | 34 | else: 35 | trigger_safe = False 36 | u_safe = u_ref 37 | 38 | for control_id in self.robot_cfg.Control: 39 | u_safe[control_id] = np.clip(u_safe[control_id], -self.robot_cfg.ControlLimit[control_id], self.robot_cfg.ControlLimit[control_id]) 40 | 41 | 42 | info = { 43 | "trigger_safe": trigger_safe, 44 | } 45 | 46 | # append decode info 47 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 48 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 49 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 50 | 51 | 52 | return u_safe, info 53 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/ssa/basic_safe_set_algorithm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import osqp 3 | from scipy import sparse 4 | 5 | from spark_safe.safe_algo.value_based.base.value_based_safe_algo import ValueBasedSafeAlgorithm 6 | 7 | class BasicSafeSetAlgorithm(ValueBasedSafeAlgorithm): 8 | 9 | def __init__(self, **kwargs): 10 | super().__init__(**kwargs) 11 | print("Initializing BasicSafeSetAlgorithm") 12 | # default values 13 | self.eta_default = kwargs["eta_ssa"] 14 | self.control_weight = np.array(kwargs["control_weight"]) 15 | assert self.control_weight.shape == (self.num_control,), f"control_weight shape {self.control_weight.shape} does not match num_control {self.num_control}" 16 | 17 | def eta_fn(self, phi): 18 | eta_values = np.where( 19 | (self.safety_index.phi_mask.reshape(-1,1) > 0) & (phi.reshape(-1,1) >= 0), 20 | np.ones((self.safety_index.num_constraint, 1)) * self.eta_default, 21 | np.ones((self.safety_index.num_constraint, 1)) * -np.inf 22 | ) 23 | return eta_values.reshape(-1, 1) 24 | 25 | def qp_solver(self, 26 | u_ref, Q_u, 27 | Lg, Lf, 28 | eps_=1.00e-2, abs_=1.00e-2): 29 | """ 30 | The qp solver min||u - u_ref|| s.t. Lg * u + Lf <= 0 31 | 32 | :param u_ref: Reference robot control. 33 | :param Q: Quadratic cost matrix (Hessian). 34 | :param Lg: Gradient of constraints with respect to control input. 35 | :param Lf: Constraint violation (SSA condition). 36 | """ 37 | n = u_ref.shape[0] 38 | m = Lf.shape[0] 39 | 40 | # Construct q 41 | Q = sparse.csc_matrix(Q_u) 42 | q = -(Q_u.T @ u_ref.reshape(-1, 1)).reshape(-1) 43 | C_upper = sparse.csc_matrix(Lg) 44 | C_lower = sparse.eye(n) 45 | Cmat = sparse.vstack([C_upper, C_lower]).tocsc() 46 | 47 | # Construct l and u 48 | l = np.concatenate([-np.inf * np.ones(m), self.control_min.flatten()]) 49 | u = np.concatenate([-Lf.reshape(-1), self.control_max.flatten()]) 50 | 51 | # Setup and solve the problem 52 | prob = osqp.OSQP() 53 | prob.setup(Q, q, Cmat, l, u, alpha=1.0, eps_abs = abs_, eps_rel = eps_, verbose=False) 54 | result = prob.solve() 55 | if result.info.status == 'solved': 56 | u_sol = result.x[:n].flatten() 57 | violation = np.zeros(m) 58 | return u_sol, violation 59 | else: 60 | violation = Lf + Lg @ u_ref.reshape(-1, 1) 61 | violation = violation.reshape(-1) 62 | violation[violation < 0] = 0 63 | return u_ref, violation 64 | 65 | def safe_control(self, 66 | x : np.ndarray, 67 | u_ref : np.ndarray, 68 | agent_feedback: dict, 69 | task_info : dict, 70 | action_info : dict 71 | ): 72 | 73 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 74 | # dof_pos = self.robot_cfg.decompose_state_to_dof_pos(x) 75 | # self.robot_kinematics.pre_computation(dof_pos) 76 | # Use this for cartesian tracking of hands 77 | # jacobian_left = self.robot_kinematics.get_jacobian(self.robot_cfg.Frames.L_ee)[:3, :] 78 | # jacobian_right = self.robot_kinematics.get_jacobian(self.robot_cfg.Frames.R_ee)[:3, :] 79 | # jacobian_full = np.vstack([jacobian_left, jacobian_right]) 80 | # Q_u = jacobian_full.T @ jacobian_full 81 | 82 | Q_u = np.diag(self.control_weight) 83 | if (phi[self.safety_index.phi_mask>0].max() > 0): 84 | trigger_safe = True 85 | # for QP solver 86 | u_safe, violation = self.qp_solver( 87 | u_ref = u_ref, 88 | Q_u = Q_u, 89 | Lg = Lg, 90 | Lf = Lf.reshape(-1,1) + self.eta_fn(phi).reshape(-1,1) 91 | ) 92 | else: 93 | trigger_safe = False 94 | u_safe = u_ref 95 | violation = np.zeros(Lf.shape[0]) 96 | 97 | info = { 98 | "trigger_safe": trigger_safe, 99 | } 100 | 101 | # append decode info 102 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 103 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 104 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 105 | info.update(self.safety_index.decode_constraint_info(violation, 'violation')) 106 | 107 | return u_safe, info 108 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/ssa/relaxed_safe_set_algorithm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import osqp 3 | from scipy import sparse 4 | 5 | from spark_safe.safe_algo.value_based.ssa.basic_safe_set_algorithm import BasicSafeSetAlgorithm 6 | 7 | class RelaxedSafeSetAlgorithm(BasicSafeSetAlgorithm): 8 | ''' 9 | Relaxed-SSA 10 | 11 | SSA with multiple phi_dot <= -eta_ssa constraints and slack variables for each constraint 12 | Slack variables are penalized quadratically in the QP solver with manual weights 13 | ''' 14 | 15 | def __init__(self, **kwargs): 16 | super().__init__(**kwargs) 17 | print("Initializing RelaxedSafeSetAlgorithm") 18 | # default values 19 | self.eta_default = kwargs["eta_ssa"] 20 | self.control_weight = np.array(kwargs["control_weight"]) 21 | self.slack_weight_default = kwargs["slack_weight"] 22 | self.slack_regularization_order = kwargs.get("slack_regularization_order", 2) 23 | 24 | def qp_solver(self, 25 | u_ref, Q_u, 26 | Lg, Lf, 27 | eps_=1.00e-2, abs_=1.00e-2): 28 | """ 29 | The qp solver min||u - u_ref|| s.t. Lg * u + Lf <= 0 30 | 31 | :param u_ref: Reference robot control. 32 | :param Q: Quadratic cost matrix (Hessian). 33 | :param Lg: Gradient of constraints with respect to control input. 34 | :param Lf: Constraint violation (SSA condition). 35 | """ 36 | n = u_ref.shape[0] 37 | m = Lf.shape[0] 38 | 39 | # Construct q 40 | 41 | Q_u = sparse.csc_matrix(Q_u) 42 | q_u = -(Q_u.T @ u_ref.reshape(-1, 1)).reshape(-1) 43 | if self.slack_regularization_order == 1: 44 | """ 45 | min||u-uref|| + ||s|| 46 | s.t. Lg u + Lf <= 0 47 | s >= 0 48 | """ 49 | Q_s = sparse.csc_matrix( np.zeros((m, m))) 50 | q_s = self.slack_weight_default * np.ones(self.safety_index.num_constraint) 51 | 52 | elif self.slack_regularization_order == 2: 53 | """ 54 | min||u-uref|| + ||s||^2 55 | s.t. Lg u + Lf <= 0 56 | s >= 0 57 | """ 58 | Q_s = sparse.diags(self.slack_weight_default * np.ones(self.safety_index.num_constraint)) 59 | q_s = np.zeros(m) 60 | else: 61 | raise ValueError(f"slack_regularization_order {self.slack_regularization_order} not supported, must be 1 or 2") 62 | 63 | 64 | # Construct Q matrix 65 | Q = sparse.block_diag([Q_u, Q_s]).tocsc() 66 | # Construct q vector 67 | q = np.concatenate([q_u, q_s]) 68 | 69 | # Construct C matrix 70 | C_upper = sparse.hstack([Lg, -sparse.eye(m)]) 71 | C_lower = sparse.eye(m + n) 72 | Cmat = sparse.vstack([C_upper, C_lower]).tocsc() 73 | 74 | # Construct l and u 75 | l = np.concatenate([-np.inf * np.ones(m), self.control_min.flatten(), np.zeros(m)]) 76 | u = np.concatenate([-Lf.reshape(-1), self.control_max.flatten(), np.inf * np.ones(m)]) 77 | 78 | # Setup and solve the problem 79 | prob = osqp.OSQP() 80 | prob.setup(Q, q, Cmat, l, u, alpha=1.0, eps_abs = abs_, eps_rel = eps_, verbose=False) 81 | result = prob.solve() 82 | if result.info.status == 'solved': 83 | # print("Solution found:", u_sol) 84 | u_sol = result.x[:n].flatten() 85 | s_sol = result.x[n:].flatten() 86 | return u_sol, s_sol 87 | else: 88 | print("No Solution") 89 | violation = Lf + Lg @ u_ref.reshape(-1, 1) 90 | violation[violation < 0] = 0 91 | return u_ref, np.asarray(violation).reshape(-1) 92 | 93 | def safe_control(self, 94 | x : np.ndarray, 95 | u_ref : np.ndarray, 96 | agent_feedback: dict, 97 | task_info : dict, 98 | action_info : dict 99 | ): 100 | 101 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 102 | 103 | # Use this for cartesian tracking of hands 104 | # dof_pos = self.robot_cfg.decompose_state_to_dof_pos(x) 105 | # self.robot_kinematics.pre_computation(dof_pos) 106 | # jacobian_left = self.robot_kinematics.get_jacobian(self.robot_cfg.Frames.L_ee.name)[:3, :] 107 | # jacobian_right = self.robot_kinematics.get_jacobian(self.robot_cfg.Frames.R_ee.name)[:3, :] 108 | # jacobian_full = np.vstack([jacobian_left, jacobian_right]) 109 | # Q_u = jacobian_full.T @ jacobian_full 110 | 111 | Q_u = np.diag(self.control_weight) 112 | 113 | if (phi[self.safety_index.phi_mask>0].max() > 0): 114 | trigger_safe = True 115 | # for QP solver 116 | u_safe, violation = self.qp_solver( 117 | u_ref = u_ref, 118 | Q_u = Q_u, 119 | Lg = Lg, 120 | Lf = Lf.reshape(-1,1) + self.eta_fn(phi).reshape(-1,1) 121 | ) 122 | else: 123 | trigger_safe = False 124 | u_safe = u_ref 125 | violation = np.zeros_like(phi) 126 | 127 | info = { 128 | "trigger_safe": trigger_safe, 129 | "violation": violation, 130 | "env_collision_mask": self.safety_index.env_collision_mask 131 | } 132 | 133 | # append decode info 134 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 135 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 136 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 137 | info.update(self.safety_index.decode_constraint_info(violation, 'violation')) 138 | 139 | return u_safe, info 140 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/sss/basic_sublevel_safe_set_algorithm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import osqp 3 | from scipy import sparse 4 | 5 | from spark_safe.safe_algo.value_based.base.value_based_safe_algo import ValueBasedSafeAlgorithm 6 | 7 | class BasicSublevelSafeSetAlgorithm(ValueBasedSafeAlgorithm): 8 | 9 | def __init__(self, **kwargs): 10 | super().__init__(**kwargs) 11 | print("Initializing BasicSublevelSafeSetAlgorithm") 12 | # default values 13 | self.lambda_default = kwargs["lambda_sss"] 14 | self.control_weight = np.array(kwargs["control_weight"]) 15 | assert self.control_weight.shape == (self.num_control,), f"control_weight shape {self.control_weight.shape} does not match num_control {self.num_control}" 16 | 17 | def lambda_fn(self, phi): 18 | lambda_values = np.where( 19 | (self.safety_index.phi_mask.reshape(-1,1) > 0) & (phi.reshape(-1,1) >= 0), 20 | np.ones((self.safety_index.num_constraint, 1)) * self.lambda_default * phi.reshape(-1, 1), 21 | np.ones((self.safety_index.num_constraint, 1)) * -np.inf 22 | ) 23 | return lambda_values.reshape(-1, 1) 24 | 25 | def qp_solver(self, 26 | u_ref, Q_u, 27 | Lg, Lf, 28 | eps_=1.00e-2, abs_=1.00e-2): 29 | """ 30 | The qp solver min||u - u_ref|| s.t. Lg * u + Lf <= 0 31 | 32 | :param u_ref: Reference robot control. 33 | :param Q: Quadratic cost matrix (Hessian). 34 | :param Lg: Gradient of constraints with respect to control input. 35 | :param Lf: Constraint violation (SSS condition). 36 | """ 37 | n = u_ref.shape[0] 38 | m = Lf.shape[0] 39 | 40 | # Construct q 41 | 42 | Q = sparse.csc_matrix(Q_u) 43 | q = -(Q_u.T @ u_ref.reshape(-1, 1)).reshape(-1) 44 | C_upper = sparse.csc_matrix(Lg) 45 | C_lower = sparse.eye(n) 46 | Cmat = sparse.vstack([C_upper, C_lower]).tocsc() 47 | 48 | # Construct l and u 49 | l = np.concatenate([-np.inf * np.ones(m), self.control_min.flatten()]) 50 | u = np.concatenate([-Lf.reshape(-1), self.control_max.flatten()]) 51 | 52 | # Setup and solve the problem 53 | prob = osqp.OSQP() 54 | prob.setup(Q, q, Cmat, l, u, alpha=1.0, eps_abs = abs_, eps_rel = eps_, verbose=False) 55 | result = prob.solve() 56 | if result.info.status == 'solved': 57 | u_sol = result.x[:n].flatten() 58 | violation = np.zeros(m) 59 | return u_sol, violation 60 | else: 61 | violation = Lf + Lg @ u_ref.reshape(-1, 1) 62 | violation = violation.reshape(-1) 63 | violation[violation < 0] = 0 64 | return u_ref, violation 65 | 66 | def safe_control(self, 67 | x : np.ndarray, 68 | u_ref : np.ndarray, 69 | agent_feedback: dict, 70 | task_info : dict, 71 | action_info : dict 72 | ): 73 | 74 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 75 | 76 | Q_u = np.diag(self.control_weight) 77 | if (phi[self.safety_index.phi_mask>0].max() > 0): 78 | trigger_safe = True 79 | # for QP solver 80 | u_safe, violation = self.qp_solver( 81 | u_ref = u_ref, 82 | Q_u = Q_u, 83 | Lg = Lg, 84 | Lf = Lf.reshape(-1,1) + self.lambda_fn(phi).reshape(-1,1) 85 | ) 86 | else: 87 | trigger_safe = False 88 | u_safe = u_ref 89 | violation = np.zeros(Lf.shape[0]) 90 | 91 | info = { 92 | "trigger_safe": trigger_safe, 93 | } 94 | 95 | # append decode info 96 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 97 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 98 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 99 | info.update(self.safety_index.decode_constraint_info(violation, 'violation')) 100 | 101 | return u_safe, info 102 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_algo/value_based/sss/relaxed_sublevel_safe_set_algorithm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import osqp 3 | from scipy import sparse 4 | 5 | from spark_safe.safe_algo.value_based.sss.basic_sublevel_safe_set_algorithm import BasicSublevelSafeSetAlgorithm 6 | 7 | class RelaxedSublevelSafeSetAlgorithm(BasicSublevelSafeSetAlgorithm): 8 | ''' 9 | Relaxed-SSS 10 | 11 | SSS with multiple phi_dot <= -lambda * phi constraints and slack variables for each constraint 12 | Slack variables are penalized quadratically in the QP solver with manual weights 13 | ''' 14 | 15 | def __init__(self, **kwargs): 16 | super().__init__(**kwargs) 17 | print("Initializing RelaxedSublevelSafeSetAlgorithm") 18 | # default values 19 | self.lambda_default = kwargs["lambda_sss"] 20 | self.control_weight = np.array(kwargs["control_weight"]) 21 | self.slack_weight_default = kwargs["slack_weight"] 22 | self.slack_regularization_order = kwargs.get("slack_regularization_order", 2) 23 | 24 | def qp_solver(self, 25 | u_ref, Q_u, 26 | Lg, Lf, 27 | eps_=1.00e-2, abs_=1.00e-2): 28 | """ 29 | The qp solver min||u - u_ref|| s.t. Lg * u + Lf <= 0 30 | 31 | :param u_ref: Reference robot control. 32 | :param Q: Quadratic cost matrix (Hessian). 33 | :param Lg: Gradient of constraints with respect to control input. 34 | :param Lf: Constraint violation (SSA condition). 35 | """ 36 | n = u_ref.shape[0] 37 | m = Lf.shape[0] 38 | 39 | # Construct q 40 | 41 | Q_u = sparse.csc_matrix(Q_u) 42 | q_u = -(Q_u.T @ u_ref.reshape(-1, 1)).reshape(-1) 43 | if self.slack_regularization_order == 1: 44 | """ 45 | min||u-uref|| + ||s|| 46 | s.t. Lg u + Lf <= 0 47 | s >= 0 48 | """ 49 | Q_s = sparse.csc_matrix( np.zeros((m, m))) 50 | q_s = self.slack_weight_default * np.ones(self.safety_index.num_constraint) 51 | 52 | elif self.slack_regularization_order == 2: 53 | """ 54 | min||u-uref|| + ||s||^2 55 | s.t. Lg u + Lf <= 0 56 | s >= 0 57 | """ 58 | Q_s = sparse.diags(self.slack_weight_default * np.ones(self.safety_index.num_constraint)) 59 | q_s = np.zeros(m) 60 | else: 61 | raise ValueError(f"slack_regularization_order {self.slack_regularization_order} not supported, must be 1 or 2") 62 | 63 | 64 | # Construct Q matrix 65 | Q = sparse.block_diag([Q_u, Q_s]).tocsc() 66 | # Construct q vector 67 | q = np.concatenate([q_u, q_s]) 68 | 69 | # Construct C matrix 70 | C_upper = sparse.hstack([Lg, -sparse.eye(m)]) 71 | C_lower = sparse.eye(m + n) 72 | Cmat = sparse.vstack([C_upper, C_lower]).tocsc() 73 | 74 | # Construct l and u 75 | l = np.concatenate([-np.inf * np.ones(m), self.control_min.flatten(), np.zeros(m)]) 76 | u = np.concatenate([-Lf.reshape(-1), self.control_max.flatten(), np.inf * np.ones(m)]) 77 | 78 | # Setup and solve the problem 79 | prob = osqp.OSQP() 80 | prob.setup(Q, q, Cmat, l, u, alpha=1.0, eps_abs = abs_, eps_rel = eps_, verbose=False) 81 | result = prob.solve() 82 | if result.info.status == 'solved': 83 | # print("Solution found:", u_sol) 84 | u_sol = result.x[:n].flatten() 85 | s_sol = result.x[n:].flatten() 86 | return u_sol, s_sol 87 | else: 88 | print("No Solution") 89 | violation = Lf + Lg @ u_ref.reshape(-1, 1) 90 | violation[violation < 0] = 0 91 | return u_ref, np.asarray(violation).reshape(-1) 92 | 93 | def safe_control(self, 94 | x : np.ndarray, 95 | u_ref : np.ndarray, 96 | agent_feedback: dict, 97 | task_info : dict, 98 | action_info : dict 99 | ): 100 | 101 | phi, Lg, Lf, phi0, phi0dot = self.safety_index.phi(x, task_info) 102 | 103 | Q_u = np.diag(self.control_weight) 104 | 105 | if (phi[self.safety_index.phi_mask>0].max() > 0): 106 | trigger_safe = True 107 | # for QP solver 108 | u_safe, violation = self.qp_solver( 109 | u_ref = u_ref, 110 | Q_u = Q_u, 111 | Lg = Lg, 112 | Lf = Lf.reshape(-1,1) + self.lambda_fn(phi).reshape(-1,1) 113 | ) 114 | else: 115 | trigger_safe = False 116 | u_safe = u_ref 117 | violation = np.zeros_like(phi) 118 | 119 | info = { 120 | "trigger_safe": trigger_safe, 121 | "violation": violation, 122 | "env_collision_mask": self.safety_index.env_collision_mask 123 | } 124 | 125 | # append decode info 126 | info.update(self.safety_index.decode_constraint_info(phi, 'phi_safe')) 127 | info.update(self.safety_index.decode_constraint_info(phi0, 'phi0')) 128 | info.update(self.safety_index.decode_constraint_info(phi0dot, 'phi0dot')) 129 | info.update(self.safety_index.decode_constraint_info(violation, 'violation')) 130 | 131 | return u_safe, info 132 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_controller/__init__.py: -------------------------------------------------------------------------------- 1 | from .base.base_safe_controller_config import SafeControllerConfig 2 | from .base.base_safe_controller import BaseSafeController 3 | from .g1.g1_basic_safe_controller_config import G1BasicSafeControllerConfig 4 | 5 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_controller/base/base_safe_controller.py: -------------------------------------------------------------------------------- 1 | from .base_safe_controller_config import SafeControllerConfig 2 | from spark_robot import RobotConfig, RobotKinematics 3 | from spark_safe.safe_algo import BaseSafetyIndex 4 | from spark_safe.safe_algo import BaseSafeAlgorithm 5 | import numpy as np 6 | from spark_utils import initialize_class 7 | 8 | class BaseSafeController: 9 | 10 | def __init__(self, 11 | cfg : SafeControllerConfig, 12 | robot_cfg: RobotConfig = None, 13 | robot_kinematics: RobotKinematics = None 14 | ) -> None: 15 | 16 | # import spark_safe.safe_controller as safe_controller 17 | # assert(hasattr(safe_controller, class_cfg_name)) 18 | 19 | # self.cfg : SafeControllerConfig = getattr(safe_controller, class_cfg_name)() 20 | self.cfg : SafeControllerConfig = cfg 21 | 22 | if robot_cfg is not None: 23 | self.robot_cfg : RobotConfig = robot_cfg 24 | print("BaseSafeController: using provided robot_cfg") 25 | else: 26 | self.robot_cfg : RobotConfig = initialize_class(self.cfg.robot.cfg) 27 | print("BaseSafeController: initializing robot_cfg") 28 | 29 | if robot_kinematics is not None: 30 | self.robot_kinematics : RobotKinematics = robot_kinematics 31 | print("BaseSafeController: using provided robot_kinematics") 32 | else: 33 | self.robot_kinematics : RobotKinematics = initialize_class(self.cfg.robot.kinematics, robot_cfg=self.robot_cfg) 34 | print("BaseSafeController: initializing robot_kinematics") 35 | 36 | self.safety_index : BaseSafetyIndex = initialize_class(self.cfg.safety_index, robot_kinematics=self.robot_kinematics) 37 | self.safe_algo : BaseSafeAlgorithm = initialize_class(self.cfg.safe_algo, safety_index=self.safety_index, robot_kinematics=self.robot_kinematics) 38 | 39 | def safe_control(self, 40 | x : np.ndarray, 41 | u_ref : np.ndarray, 42 | agent_feedback: dict, 43 | task_info : dict, 44 | action_info : dict 45 | ): 46 | 47 | return self.safe_algo.safe_control(x, u_ref, agent_feedback, task_info, action_info) 48 | -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_controller/base/base_safe_controller_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import IntEnum, Enum 3 | import inspect 4 | 5 | class SafeControllerConfig: 6 | 7 | def __init__(self) -> None: 8 | 9 | super().__init__() 10 | 11 | """ Initializes all member classes recursively. Ignores all names starting with '__' (buit-in methods).""" 12 | self.init_member_classes(self) 13 | 14 | # taken from git@github.com:leggedrobotics/legged_gym.git 15 | @staticmethod 16 | def init_member_classes(obj): 17 | 18 | # Iterate over all attributes names 19 | for key in dir(obj): 20 | # Disregard builtin attributes 21 | if key == "__class__": 22 | continue 23 | 24 | # Get the corresponding attribute object 25 | var = getattr(obj, key) 26 | 27 | # Check if the attribute is a class but not an Enum 28 | if inspect.isclass(var) and not issubclass(var, Enum): 29 | 30 | # Instantiate the class 31 | i_var = var() 32 | # Set the attribute to the instance instead of the type 33 | setattr(obj, key, i_var) 34 | # Recursively init members of the attribute 35 | SafeControllerConfig.init_member_classes(i_var) 36 | 37 | class robot: 38 | 39 | class cfg: 40 | class_name = None 41 | 42 | class kinematics: 43 | class_name = None 44 | 45 | class safety_index: 46 | class_name = None 47 | 48 | class safe_algo: 49 | class_name = None -------------------------------------------------------------------------------- /module/spark_safe/spark_safe/safe_controller/g1/g1_basic_safe_controller_config.py: -------------------------------------------------------------------------------- 1 | from spark_safe.safe_controller import SafeControllerConfig 2 | 3 | class G1BasicSafeControllerConfig(SafeControllerConfig): 4 | 5 | class robot( SafeControllerConfig.robot ): 6 | 7 | class cfg( SafeControllerConfig.robot.cfg ): 8 | class_name = "G1WholeBodyConfig" 9 | 10 | class kinematics( SafeControllerConfig.robot.kinematics ): 11 | class_name = "G1UpperBodyKinematics" 12 | 13 | class safety_index( SafeControllerConfig.safety_index ): 14 | class_name = "BasicCollisionSafetyIndex" 15 | min_distance = { 16 | "environment": 0.05, 17 | "self": 0.05 18 | } 19 | 20 | # todo move to robot config 21 | enable_self_collision = True 22 | 23 | class safe_algo( SafeControllerConfig.safe_algo ): 24 | class_name = "SafeSetAlgorithm" 25 | eta_ssa = 1.0 26 | safety_buffer = 0.1 # positive to allow hold state 27 | slack_weight = 1e3 28 | control_weight = [ 29 | 1.0, 1.0, 1.0, 30 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 31 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 32 | 1.0, 1.0, 1.0 33 | ] 34 | 35 | # class_name = "SublevelSafeSetAlgorithm" 36 | # lambda_sss = 1.0 37 | # slack_weight = 1e3 38 | # control_weight = [ 39 | # 1.0, 1.0, 1.0, 40 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 41 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 42 | # 1.0, 1.0, 1.0 43 | # ] 44 | 45 | # class_name = "ControlBarrierFunction" 46 | # lambda_cbf = 1 47 | # slack_weight = 1e3 48 | # control_weight = [ 49 | # 1.0, 1.0, 1.0, 50 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 51 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 52 | # 1.0, 1.0, 1.0 53 | # ] 54 | 55 | # class_name = "PotentialFieldMethod" 56 | # c_pfm = 0.1 57 | 58 | # class_name = "SlidingModeAlgorithm" 59 | # c_sma = 1.0 60 | -------------------------------------------------------------------------------- /module/spark_task/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_task", 5 | version="0.1", 6 | description="Robot task manager.", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) -------------------------------------------------------------------------------- /module/spark_task/spark_task/__init__.py: -------------------------------------------------------------------------------- 1 | from .task.base.base_task_config import BaseTaskConfig 2 | from .task.base.base_task import BaseTask 3 | from .task.g1_teleop_sim.g1_teleop_sim_task import G1TeleopSimTask 4 | from .task.g1_benchmark.g1_benchmark_task import G1BenchmarkTask -------------------------------------------------------------------------------- /module/spark_task/spark_task/task/base/base_task.py: -------------------------------------------------------------------------------- 1 | from .base_task_config import BaseTaskConfig 2 | from spark_utils import class_to_dict 3 | from spark_agent import BaseAgent 4 | from spark_robot import RobotConfig, RobotKinematics 5 | from abc import ABC, abstractmethod 6 | 7 | def initialize_class(class_cfg, **kwargs): 8 | ''' 9 | Initialize a class from a configuration object. 10 | Also pass any additional keyword arguments to the class constructor 11 | ''' 12 | 13 | class_cfg_dict = class_to_dict(class_cfg) 14 | class_name = class_cfg_dict.pop('class_name', None) 15 | 16 | if class_name is None: 17 | raise ValueError(f'class_name not found in {class_cfg}') 18 | 19 | import spark_agent 20 | for module in [spark_agent]: 21 | 22 | if hasattr(module, class_name): 23 | class_name = getattr(module, class_name) 24 | 25 | return class_name(**{ **class_cfg_dict, **kwargs }) 26 | 27 | class BaseTask(ABC): 28 | 29 | def __init__(self, robot_cfg: RobotKinematics, robot_kinematics: RobotKinematics, agent : BaseAgent): 30 | 31 | self.robot_cfg : RobotConfig = robot_cfg 32 | self.robot_kinematics : RobotKinematics = robot_kinematics 33 | self.agent : BaseAgent = agent 34 | self.num_obstacle_agent = self.agent.num_obstacle_agent 35 | 36 | @abstractmethod 37 | def reset(self, feedback): 38 | ''' 39 | Reset the task environment. 40 | ''' 41 | pass 42 | 43 | @abstractmethod 44 | def step(self, feedback): 45 | ''' 46 | Execute a step in the task environment. 47 | ''' 48 | pass 49 | 50 | @abstractmethod 51 | def get_info(self, feedback) -> dict: 52 | """ 53 | Get the state from the agent and the environment. 54 | Output the task information to the policy in the form of a dictionary. 55 | """ 56 | pass -------------------------------------------------------------------------------- /module/spark_task/spark_task/task/base/base_task_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import IntEnum, Enum 3 | import inspect 4 | 5 | class BaseTaskConfig: 6 | 7 | def __init__(self) -> None: 8 | 9 | super().__init__() 10 | 11 | """ Initializes all member classes recursively. Ignores all names starting with '__' (buit-in methods).""" 12 | self.init_member_classes(self) 13 | 14 | # taken from git@github.com:leggedrobotics/legged_gym.git 15 | @staticmethod 16 | def init_member_classes(obj): 17 | print(f"Initializing {obj.__class__.__name__}") 18 | 19 | # Iterate over all attributes names 20 | for key in dir(obj): 21 | # Disregard builtin attributes 22 | if key == "__class__": 23 | continue 24 | 25 | # Get the corresponding attribute object 26 | var = getattr(obj, key) 27 | 28 | # Check if the attribute is a class but not an Enum 29 | if inspect.isclass(var) and not issubclass(var, Enum): 30 | 31 | # Instantiate the class 32 | i_var = var() 33 | # Set the attribute to the instance instead of the type 34 | setattr(obj, key, i_var) 35 | # Recursively init members of the attribute 36 | BaseTaskConfig.init_member_classes(i_var) 37 | 38 | class agent: 39 | class_name = None -------------------------------------------------------------------------------- /module/spark_utils/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_utils", 5 | version="0.1", 6 | description="Utilities.", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) 14 | -------------------------------------------------------------------------------- /module/spark_utils/spark_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .geometry import * 2 | from .helpers import * 3 | from .math import * 4 | from .logger import * 5 | from .arguments import * -------------------------------------------------------------------------------- /module/spark_utils/spark_utils/arguments.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | def parse_args(): 4 | 5 | # Create the parser 6 | parser = argparse.ArgumentParser(description="Spark Args") 7 | 8 | # Add arguments 9 | # parser.add_argument("--name", type=str, help="Your name", required=True) 10 | # parser.add_argument("--age", type=int, help="Your age", required=True) 11 | # parser.add_argument("--verbose", action="store_true", help="Enable verbose output") 12 | parser.add_argument("--safe_algo", type=str, default='ssa', help="ssa/sss/cbf/etc.", required=False) 13 | 14 | # Parse arguments 15 | args = parser.parse_args() 16 | 17 | return args -------------------------------------------------------------------------------- /module/spark_utils/spark_utils/helpers.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def initialize_class(class_cfg, **kwargs): 4 | ''' 5 | Initialize a class from a configuration object. 6 | Also pass any additional keyword arguments to the class constructor 7 | ''' 8 | 9 | class_cfg_dict = class_to_dict(class_cfg) 10 | 11 | if 'class_name' not in class_cfg_dict: 12 | raise ValueError(f'class_name not found in {class_cfg}') 13 | 14 | class_name = class_cfg_dict.pop('class_name') 15 | 16 | if class_name is None: 17 | return None 18 | 19 | import spark_task 20 | import spark_policy 21 | import spark_safe.safe_controller as safe_controller 22 | import spark_safe.safe_algo as safe_algo 23 | import spark_agent 24 | import spark_robot 25 | 26 | for module in [spark_policy, safe_controller, spark_agent, spark_robot, spark_task, safe_algo]: 27 | 28 | if hasattr(module, class_name): 29 | class_name = getattr(module, class_name) 30 | 31 | return class_name(**{ **class_cfg_dict, **kwargs }) 32 | 33 | raise ValueError(f'Class {class_name} not found in any of the modules') 34 | 35 | def class_to_dict(obj) -> dict: 36 | if not hasattr(obj,"__dict__"): 37 | return obj 38 | result = {} 39 | for key in dir(obj): 40 | if key.startswith("_"): 41 | continue 42 | element = [] 43 | val = getattr(obj, key) 44 | if isinstance(val, list): 45 | for item in val: 46 | element.append(class_to_dict(item)) 47 | else: 48 | element = class_to_dict(val) 49 | result[key] = element 50 | return result 51 | 52 | def update_class_attributes(obj, attrs: dict): 53 | 54 | for key, value in attrs.items(): 55 | if not hasattr(obj, key): 56 | print(f'Attribute {key} not found in {obj}, adding to {obj}') 57 | setattr(obj, key, value) 58 | 59 | return obj -------------------------------------------------------------------------------- /module/spark_utils/spark_utils/logger.py: -------------------------------------------------------------------------------- 1 | import os 2 | from tensorboardX import SummaryWriter 3 | import numpy as np 4 | 5 | class DataBuffer: 6 | 7 | def __init__(self): 8 | self.data = {} 9 | 10 | def add(self, key, value=None): 11 | if key not in self.data: 12 | self.data[key] = [] 13 | 14 | if value is not None: 15 | self.data[key].append(value) 16 | 17 | def get(self, key): 18 | return self.data[key] 19 | 20 | def clear(self): 21 | self.data = {} 22 | 23 | def save_npz(self, path): 24 | 25 | for key in self.data: 26 | self.data[key] = np.array(self.data[key]) 27 | 28 | np.savez(path, **self.data) 29 | print("Data saved to: ", path) 30 | 31 | class Logger: 32 | def __init__(self, log_dir, n_logged_samples=10, summary_writer=None, counter=0): 33 | self._log_dir = log_dir 34 | print('########################') 35 | print('logging outputs to ', log_dir) 36 | print('########################') 37 | self._n_logged_samples = n_logged_samples 38 | self._summ_writer = SummaryWriter(log_dir, flush_secs=1, max_queue=1) 39 | self.counter = counter 40 | 41 | def log_scalar(self, scalar, name): 42 | self._summ_writer.add_scalar('{}'.format(name), scalar, self.counter) 43 | 44 | def log_scalars(self, scalar_dict, group_name, step, phase): 45 | """Will log all scalars in the same plot.""" 46 | self._summ_writer.add_scalars('{}_{}'.format(group_name, phase), scalar_dict, step) 47 | 48 | def log_image(self, image, name, step): 49 | assert(len(image.shape) == 3) # [C, H, W] 50 | self._summ_writer.add_image('{}'.format(name), image, step) 51 | 52 | def log_video(self, video_frames, name, step, fps=10): 53 | assert len(video_frames.shape) == 5, "Need [N, T, C, H, W] input tensor for video logging!" 54 | self._summ_writer.add_video('{}'.format(name), video_frames, step, fps=fps) 55 | 56 | def log_paths_as_videos(self, paths, step, max_videos_to_save=2, fps=10, video_title='video'): 57 | 58 | # reshape the rollouts 59 | videos = [np.transpose(p['image_obs'][:, 0], [0, 3, 1, 2]) for p in paths] 60 | 61 | # max rollout length 62 | max_videos_to_save = np.min([max_videos_to_save, len(videos)]) 63 | max_length = videos[0].shape[0] 64 | for i in range(max_videos_to_save): 65 | if videos[i].shape[0]>max_length: 66 | max_length = videos[i].shape[0] 67 | 68 | # pad rollouts to all be same length 69 | for i in range(max_videos_to_save): 70 | if videos[i].shape[0] 0, "Figure logging requires input shape [batch x figures]!" 81 | self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step) 82 | 83 | def log_figure(self, figure, name, step, phase): 84 | """figure: matplotlib.pyplot figure handle""" 85 | self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step) 86 | 87 | def log_graph(self, array, name, step, phase): 88 | """figure: matplotlib.pyplot figure handle""" 89 | # need to implement this after 90 | # im = plot_graph(array) 91 | # self._summ_writer.add_image('{}_{}'.format(name, phase), im, step) 92 | raise NotImplementedError 93 | def log_text(self, tag, text_string, global_step=None, walltime=None): 94 | self._summ_writer.add_text(tag, text_string, global_step=self.counter) 95 | 96 | def dump_scalars(self, log_path=None): 97 | log_path = os.path.join(self._log_dir, "scalar_data.json") if log_path is None else log_path 98 | self._summ_writer.export_scalars_to_json(log_path) 99 | 100 | def flush(self): 101 | self._summ_writer.flush() 102 | self.counter += 1 103 | -------------------------------------------------------------------------------- /module/spark_utils/spark_utils/math.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.spatial.transform import Rotation as R 3 | 4 | def quaternion_to_rotation_matrix(q): 5 | """ 6 | Converts a quaternion into a 3x3 rotation matrix. 7 | 8 | Parameters: 9 | q (array-like): Quaternion [w, x, y, z] 10 | 11 | Returns: 12 | np.ndarray: 3x3 rotation matrix 13 | """ 14 | w, x, y, z = q 15 | 16 | R = np.array([ 17 | [1 - 2 * (y**2 + z**2), 2 * (x*y - z*w), 2 * (x*z + y*w)], 18 | [2 * (x*y + z*w), 1 - 2 * (x**2 + z**2), 2 * (y*z - x*w)], 19 | [2 * (x*z - y*w), 2 * (y*z + x*w), 1 - 2 * (x**2 + y**2)] 20 | ]) 21 | return R 22 | 23 | def transformation_to_pos_quat(transform_matrix): 24 | """ 25 | Convert a 4x4 transformation matrix to position and quaternion. 26 | 27 | :param transform_matrix: 4x4 numpy array representing the transformation matrix 28 | :return: position (tuple) and quaternion (tuple) 29 | """ 30 | # Extract position 31 | position = transform_matrix[:3, 3] 32 | 33 | # Extract rotation matrix 34 | rotation_matrix = transform_matrix[:3, :3] 35 | 36 | # Convert rotation matrix to quaternion 37 | qw = np.sqrt(1.0 + rotation_matrix[0, 0] + rotation_matrix[1, 1] + rotation_matrix[2, 2]) / 2.0 38 | if qw > 0.00001: # Check for numerical stability 39 | qx = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / (4.0 * qw) 40 | qy = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / (4.0 * qw) 41 | qz = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / (4.0 * qw) 42 | else: 43 | qx = rotation_matrix[2, 1] + rotation_matrix[1, 2] 44 | qy = rotation_matrix[0, 2] + rotation_matrix[2, 0] 45 | qz = rotation_matrix[1, 0] + rotation_matrix[0, 1] 46 | qw = 0.0 47 | 48 | return np.array(position), np.array([qx, qy, qz, qw]) 49 | 50 | def pos_quat_to_transformation(position, quaternion): 51 | """ 52 | Convert position and quaternion to a 4x4 transformation matrix. 53 | 54 | :param position: Tuple representing (x, y, z) 55 | :param quaternion: Tuple representing (qx, qy, qz, qw) 56 | :return: 4x4 numpy array representing the transformation matrix 57 | """ 58 | qx, qy, qz, qw = quaternion 59 | 60 | # Create rotation matrix from quaternion 61 | r11 = 1 - 2 * (qy**2 + qz**2) 62 | r12 = 2 * (qx * qy - qw * qz) 63 | r13 = 2 * (qx * qz + qw * qy) 64 | r21 = 2 * (qx * qy + qw * qz) 65 | r22 = 1 - 2 * (qx**2 + qz**2) 66 | r23 = 2 * (qy * qz - qw * qx) 67 | r31 = 2 * (qx * qz - qw * qy) 68 | r32 = 2 * (qy * qz + qw * qx) 69 | r33 = 1 - 2 * (qx**2 + qy**2) 70 | 71 | # Create the transformation matrix 72 | transform_matrix = np.array([[r11, r12, r13, position[0]], 73 | [r21, r22, r23, position[1]], 74 | [r31, r32, r33, position[2]], 75 | [0, 0, 0, 1]]) 76 | 77 | return transform_matrix 78 | 79 | def se3_to_transformation(se3): 80 | """ 81 | Convert a pin.SE3 object to a 4x4 transformation matrix. 82 | 83 | :param se3: pin.SE3 object representing the transformation 84 | :return: 4x4 numpy array representing the transformation matrix 85 | """ 86 | # Extract rotation matrix (3x3) 87 | rotation = se3.rotation 88 | 89 | # Extract translation vector (3x1) 90 | translation = se3.translation 91 | 92 | # Create the transformation matrix 93 | transformation_matrix = np.eye(4) # 4x4 identity matrix 94 | transformation_matrix[:3, :3] = rotation # Set the rotation 95 | transformation_matrix[:3, 3] = translation.flatten() # Set the translation 96 | 97 | return transformation_matrix 98 | 99 | def rpy2quat(rpy, order='xyz'): 100 | r = R.from_euler(order, rpy, degrees=False) 101 | quat = r.as_quat() 102 | return quat 103 | 104 | def quat2rpy(quat, order='xyz'): 105 | r = R.from_quat(quat) 106 | rpy = r.as_euler(order, degrees=False) 107 | return rpy 108 | 109 | def quat2SE3(quat: np.ndarray, position: np.ndarray): 110 | se3 = pin.SE3( 111 | pin.Quaternion(quat[0], quat[1], quat[2], quat[3]), 112 | position 113 | ) 114 | return se3 115 | 116 | def rpy2SE3(rpy: np.ndarray, position: np.ndarray): 117 | quat = rpy2quat(rpy) 118 | se3 = quat2SE3(quat, position) 119 | return se3 120 | -------------------------------------------------------------------------------- /pipeline/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_pipeline", 5 | version="0.1", 6 | description="Main task runner.", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) -------------------------------------------------------------------------------- /pipeline/spark_pipeline/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SPARK_PIPELINE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) 4 | 5 | from .base.base_pipeline_config import BasePipelineConfig 6 | from .base.base_pipeline import BasePipeline 7 | 8 | from .g1_safe_teleop.g1_safe_teleop_real_pipeline_config import G1SafeTeleopRealPipelineConfig 9 | from .g1_safe_teleop.g1_safe_teleop_sim_pipeline_config import G1SafeTeleopSimPipelineConfig 10 | from .g1_safe_teleop.g1_unsafe_teleop_sim_pipeline_config import G1UnsafeTeleopSimPipelineConfig 11 | from .g1_safe_teleop.g1_safe_teleop_pipeline import G1SafeTeleopPipeline 12 | 13 | from .visualization import render_critical_pairs, render_value_based_debug_info 14 | 15 | from .g1_benchmark.g1_benchmark_pipeline_config import G1BenchmarkPipelineConfig 16 | from .g1_benchmark.g1_benchmark_pipeline import G1BenchmarkPipeline 17 | from .g1_benchmark.g1_benchmark_test_case_generator import generate_g1_benchmark_test_case -------------------------------------------------------------------------------- /pipeline/spark_pipeline/base/base_pipeline.py: -------------------------------------------------------------------------------- 1 | from spark_utils import initialize_class 2 | from abc import ABC, abstractmethod 3 | from .base_pipeline_config import BasePipelineConfig 4 | from spark_robot import RobotConfig, RobotKinematics 5 | from spark_algo import SparkAlgoWrapper 6 | from spark_env import SparkEnvWrapper 7 | import numpy as np 8 | import time 9 | from typing import Generic, TypeVar 10 | 11 | PipelineConfigT = TypeVar("PipelineConfigT", bound="BasePipelineConfig") 12 | 13 | class BasePipeline(ABC, Generic[PipelineConfigT]): 14 | 15 | def __init__(self, cfg : PipelineConfigT): 16 | 17 | self.cfg = cfg 18 | self.max_num_steps = self.cfg.max_num_steps 19 | 20 | self.robot_cfg : RobotConfig = initialize_class(self.cfg.robot.cfg) 21 | self.cfg.robot.kinematics.class_name = self.robot_cfg.kinematics_class_name 22 | self.robot_kinematics : RobotKinematics = initialize_class(self.cfg.robot.kinematics, robot_cfg=self.robot_cfg) 23 | self.env : SparkEnvWrapper = SparkEnvWrapper(self.cfg.env, robot_cfg=self.robot_cfg, robot_kinematics=self.robot_kinematics) 24 | self.algo : SparkAlgoWrapper = SparkAlgoWrapper(self.cfg.algo, robot_cfg=self.robot_cfg, robot_kinematics=self.robot_kinematics) 25 | 26 | self.pipeline_step = 0 27 | 28 | @abstractmethod 29 | def post_physics_step(self): 30 | pass 31 | 32 | def run(self): 33 | 34 | # reset environment 35 | agent_feedback, task_info = self.env.reset() 36 | 37 | # initial action 38 | u_safe, action_info = self.algo.act(agent_feedback, task_info) 39 | 40 | while self.pipeline_step < self.max_num_steps or self.max_num_steps < 0: 41 | 42 | # environment step 43 | # s_next = env(s, a) 44 | agent_feedback, task_info = self.env.step(u_safe, action_info) 45 | 46 | # next action 47 | # a_next = algo(s_next) 48 | u_safe, action_info = self.algo.act(agent_feedback, task_info) 49 | # post physics step (e.g., rendering, status publishing) 50 | self.post_physics_step(agent_feedback, task_info, action_info) 51 | 52 | self.pipeline_step += 1 53 | 54 | if task_info["done"]: 55 | break 56 | 57 | # end of pipeline 58 | print("Pipeline done") 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /pipeline/spark_pipeline/base/base_pipeline_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import IntEnum, Enum 3 | import inspect 4 | from spark_algo import SparkAlgoConfig 5 | from spark_env import SparkEnvConfig 6 | from spark_safe.safe_controller import SafeControllerConfig 7 | 8 | class BasePipelineConfig: 9 | 10 | def __init__(self) -> None: 11 | 12 | super().__init__() 13 | 14 | """ Initializes all member classes recursively. Ignores all names starting with '__' (buit-in methods).""" 15 | self.init_member_classes(self) 16 | 17 | # taken from git@github.com:leggedrobotics/legged_gym.git 18 | @staticmethod 19 | def init_member_classes(obj): 20 | print(f"Initializing {obj.__class__.__name__}") 21 | 22 | # Iterate over all attributes names 23 | for key in dir(obj): 24 | # Disregard builtin attributes 25 | if key == "__class__": 26 | continue 27 | 28 | # Get the corresponding attribute object 29 | var = getattr(obj, key) 30 | 31 | # Check if the attribute is a class but not an Enum 32 | if inspect.isclass(var) and not issubclass(var, Enum): 33 | 34 | # Instantiate the class 35 | i_var = var() 36 | # Set the attribute to the instance instead of the type 37 | setattr(obj, key, i_var) 38 | # Recursively init members of the attribute 39 | BasePipelineConfig.init_member_classes(i_var) 40 | 41 | max_num_steps = -1 42 | 43 | class robot: 44 | 45 | class cfg: 46 | class_name = None 47 | 48 | class kinematics: 49 | class_name = None 50 | 51 | # todo insert spark_pipeline between base and downstream. add guard_pipeline 52 | class env(SparkEnvConfig): 53 | 54 | pass 55 | 56 | class algo(SparkAlgoConfig): 57 | 58 | pass 59 | -------------------------------------------------------------------------------- /pipeline/spark_pipeline/g1_benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligent-control-lab/spark/51b777e6275fe8b2ee8b0b579351f9c57f205a2f/pipeline/spark_pipeline/g1_benchmark/__init__.py -------------------------------------------------------------------------------- /pipeline/spark_pipeline/g1_benchmark/g1_benchmark_pipeline_config.py: -------------------------------------------------------------------------------- 1 | from spark_pipeline.base.base_pipeline_config import BasePipelineConfig 2 | 3 | class G1BenchmarkPipelineConfig(BasePipelineConfig): 4 | 5 | # benchmark config 6 | max_num_steps = 500 7 | max_num_reset = -1 8 | enable_logger = False 9 | enable_plotter = False 10 | enable_safe_zone_render = False 11 | 12 | class metric_selection: 13 | dist_self = False 14 | dist_robot_to_env = False 15 | dist_goal_arm = False 16 | dist_goal_base = False 17 | seed = False 18 | done = False 19 | 20 | violation = True 21 | 22 | class robot( BasePipelineConfig.robot ): 23 | 24 | class cfg( BasePipelineConfig.robot.cfg ): 25 | class_name = "G1MobileBaseDynamic1Config" 26 | 27 | class kinematics( BasePipelineConfig.robot.kinematics ): 28 | pass 29 | 30 | class env( BasePipelineConfig.env ): 31 | 32 | class task( BasePipelineConfig.env.task ): 33 | class_name = "G1BenchmarkTask" 34 | task_name = "G1BenchmarkTask" # e.g., will appear as ros node name 35 | task_config = {} 36 | 37 | class agent( BasePipelineConfig.env.agent ): 38 | class_name = "G1MujocoAgent" 39 | dt = 0.002 40 | control_decimation = 5 41 | enable_viewer = True 42 | # obstacles perceived by agent. For mujoco, we create some obstacles movable via keyboard 43 | obstacle_debug = dict( 44 | num_obstacle = 0, 45 | manual_movement_step_size = 0.1 46 | ) 47 | 48 | class algo( BasePipelineConfig.algo ): 49 | 50 | class policy( BasePipelineConfig.algo.policy ): 51 | class_name = "G1BenchmarkPIDPolicy" 52 | 53 | class safe_controller( BasePipelineConfig.algo.safe_controller ): 54 | 55 | # see G1BasicCollisionVelSSAConfig for full spec 56 | # when used in pipeline config, robot spec comes from pipeline config 57 | # robot spec in safe_controller config is ignored 58 | 59 | class safety_index( BasePipelineConfig.algo.safe_controller.safety_index ): 60 | class_name = "FirstOrderCollisionSafetyIndex" 61 | min_distance = { 62 | "environment": 0.1, 63 | "self": 0.01 64 | } 65 | 66 | enable_self_collision = False 67 | 68 | class safe_algo( BasePipelineConfig.algo.safe_controller.safe_algo ): 69 | class_name = "SafeSetAlgorithm" 70 | eta_ssa = 1.0 71 | safety_buffer = 0.1 # positive to allow hold state 72 | use_slack = True 73 | slack_weight = 1e3 74 | slack_regularization_order = 2 75 | control_weight = [ 76 | 1.0, 1.0, 1.0, # waist 77 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # left arm 78 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # right arm 79 | 1.0, 1.0, 1.0 # locomotion 80 | ] 81 | 82 | # class_name = "SublevelSafeSetAlgorithm" 83 | # lambda_sss = 1.0 84 | # slack_weight = 1e3 85 | # control_weight = [ 86 | # 1.0, 1.0, 1.0, 87 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 88 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 89 | # 1.0, 1.0, 1.0 90 | # ] 91 | 92 | # class_name = "ControlBarrierFunction" 93 | # lambda_cbf = 1 94 | # slack_weight = 1e3 95 | # control_weight = [ 96 | # 1.0, 1.0, 1.0, 97 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 98 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 99 | # 1.0, 1.0, 1.0 100 | # ] 101 | 102 | # class_name = "PotentialFieldMethod" 103 | # c_pfm = 0.1 104 | 105 | # class_name = "SlidingModeAlgorithm" 106 | # c_sma = 1.0 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /pipeline/spark_pipeline/g1_safe_teleop/g1_safe_teleop_real_pipeline_config.py: -------------------------------------------------------------------------------- 1 | from spark_pipeline.base.base_pipeline_config import BasePipelineConfig 2 | from spark_safe.safe_controller import G1BasicSafeControllerConfig 3 | 4 | class G1SafeTeleopRealPipelineConfig(BasePipelineConfig): 5 | 6 | max_num_steps = -1 7 | enable_logger = False 8 | enable_plotter = False 9 | enable_safe_zone_render = False 10 | 11 | 12 | class metric_selection: 13 | 14 | dof_pos = False 15 | dof_vel = False 16 | goal_pos = False 17 | obstacle_pos = False 18 | 19 | dist_self = False 20 | dist_robot_to_env = False 21 | dist_goal_arm = False 22 | dist_goal_base = False 23 | seed = False 24 | done = False 25 | 26 | violation = True 27 | loop_time = True 28 | trigger_safe_controller = True 29 | 30 | class robot( BasePipelineConfig.robot ): 31 | 32 | class cfg( BasePipelineConfig.robot.cfg ): 33 | class_name = "G1FixedBaseConfig" 34 | 35 | class kinematics( BasePipelineConfig.robot.kinematics ): 36 | pass 37 | 38 | class env( BasePipelineConfig.env ): 39 | 40 | class task( BasePipelineConfig.env.task ): 41 | class_name = "G1TeleopSimTask" 42 | task_name = "G1TeleopSimTask" # e.g., will appear as ros node name 43 | enable_ros = True 44 | 45 | ros_params = dict( 46 | robot_command_topic = "/g1_29dof/arm_position_controller/command", 47 | robot_state_topic = "/g1_29dof/robot_state", 48 | robot_teleop_topic = "/g1_29dof/robot_teleop", 49 | obstacle_topic = "/g1_29dof/human_state", 50 | mssa_demo_obstacle_topic = "/g1_29dof/obstacle_state", 51 | ) 52 | max_episode_length = -1 53 | 54 | class agent( BasePipelineConfig.env.agent ): 55 | class_name = "G1RealAgent" 56 | dt = 0.002 57 | control_decimation = 5 58 | unitree_model = "g1" 59 | level = "high" 60 | send_cmd = False 61 | enable_viewer = False #TODO: add visulaization with real agent 62 | 63 | class algo( BasePipelineConfig.algo ): 64 | 65 | class policy( BasePipelineConfig.algo.policy ): 66 | class_name = "G1TeleopPIDPolicy" 67 | 68 | class safe_controller( BasePipelineConfig.algo.safe_controller ): 69 | 70 | class safety_index( BasePipelineConfig.algo.safe_controller.safety_index ): 71 | class_name = "BasicCollisionSafetyIndex" 72 | min_distance = { 73 | "environment": 0.1, 74 | "self": 0.01 75 | } 76 | 77 | enable_self_collision = True 78 | 79 | class safe_algo( BasePipelineConfig.algo.safe_controller.safe_algo ): 80 | # class_name = "ByPassSafeControl" 81 | class_name = "SafeSetAlgorithm" 82 | eta_ssa = 1.0 83 | safety_buffer = 0.1 # positive to allow hold state 84 | slack_weight = 1e3 85 | control_weight = [ 86 | 1.0, 1.0, 1.0, # waist 87 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # left arm 88 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # right arm 89 | 10.0, 10.0, 10.0 # locomotion 90 | ] 91 | 92 | # class_name = "SublevelSafeSetAlgorithm" 93 | # lambda_sss = 1.0 94 | # slack_weight = 1e3 95 | # control_weight = [ 96 | # 1.0, 1.0, 1.0, 97 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 98 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 99 | # 1.0, 1.0, 1.0 100 | # ] 101 | 102 | # class_name = "ControlBarrierFunction" 103 | # lambda_cbf = 1 104 | # slack_weight = 1e3 105 | # control_weight = [ 106 | # 1.0, 1.0, 1.0, 107 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 108 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 109 | # 1.0, 1.0, 1.0 110 | # ] 111 | 112 | # class_name = "PotentialFieldMethod" 113 | # c_pfm = 0.1 114 | 115 | # class_name = "SlidingModeAlgorithm" 116 | # c_sma = 1.0 117 | 118 | 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /pipeline/spark_pipeline/g1_safe_teleop/g1_safe_teleop_sim_pipeline_config.py: -------------------------------------------------------------------------------- 1 | from spark_pipeline.base.base_pipeline_config import BasePipelineConfig 2 | 3 | class G1SafeTeleopSimPipelineConfig(BasePipelineConfig): 4 | 5 | max_num_steps = -1 6 | enable_logger = False 7 | enable_plotter = False 8 | enable_safe_zone_render = False 9 | 10 | class metric_selection: 11 | dof_pos = False 12 | dof_vel = False 13 | goal_pos = False 14 | obstacle_pos = False 15 | 16 | dist_self = False 17 | dist_robot_to_env = False 18 | dist_goal_arm = False 19 | dist_goal_base = False 20 | seed = False 21 | done = False 22 | 23 | violation = True 24 | loop_time = True 25 | trigger_safe_controller = True 26 | 27 | class robot( BasePipelineConfig.robot ): 28 | 29 | class cfg( BasePipelineConfig.robot.cfg ): 30 | class_name = "G1SportModeDynamic1Config" 31 | 32 | class kinematics( BasePipelineConfig.robot.kinematics ): 33 | pass 34 | 35 | class env( BasePipelineConfig.env ): 36 | 37 | class task( BasePipelineConfig.env.task ): 38 | class_name = "G1TeleopSimTask" 39 | task_name = "G1TeleopSimTask" # e.g., will appear as ros node name 40 | enable_ros = True 41 | 42 | ros_params = dict( 43 | robot_command_topic = "/g1_29dof/arm_position_controller/command", 44 | robot_state_topic = "/g1_29dof/robot_state", 45 | robot_teleop_topic = "/g1_29dof/robot_teleop", 46 | obstacle_topic = "/g1_29dof/human_state", 47 | mssa_demo_obstacle_topic = "/g1_29dof/obstacle_state", 48 | ) 49 | max_episode_length = -1 50 | 51 | class agent( BasePipelineConfig.env.agent ): 52 | class_name = "G1MujocoAgent" 53 | dt = 0.002 54 | control_decimation = 5 55 | enable_viewer = True 56 | # obstacles perceived by agent. For mujoco, we create some obstacles movable via keyboard 57 | obstacle_debug = dict( 58 | num_obstacle = 2, 59 | manual_movement_step_size = 0.01 60 | ) 61 | 62 | class algo( BasePipelineConfig.algo ): 63 | 64 | class policy( BasePipelineConfig.algo.policy ): 65 | class_name = "G1TeleopPIDPolicy" 66 | 67 | class safe_controller( BasePipelineConfig.algo.safe_controller ): 68 | 69 | class safety_index( BasePipelineConfig.algo.safe_controller.safety_index ): 70 | class_name = "FirstOrderCollisionSafetyIndex" 71 | min_distance = { 72 | "environment": 0.1, 73 | "self": 0.01 74 | } 75 | 76 | enable_self_collision = True 77 | 78 | class safe_algo( BasePipelineConfig.algo.safe_controller.safe_algo ): 79 | # class_name = "ByPassSafeControl" 80 | class_name = "SafeSetAlgorithm" 81 | eta_ssa = 1.0 82 | safety_buffer = 0.1 # positive to allow hold state 83 | slack_weight = 1e3 84 | control_weight = [ 85 | 1.0, 1.0, 1.0, # waist 86 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # left arm 87 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # right arm 88 | 10.0, 10.0, 10.0 # locomotion 89 | ] 90 | 91 | # class_name = "SublevelSafeSetAlgorithm" 92 | # lambda_sss = 1.0 93 | # slack_weight = 1e3 94 | # control_weight = [ 95 | # 1.0, 1.0, 1.0, 96 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 97 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 98 | # 1.0, 1.0, 1.0 99 | # ] 100 | 101 | # class_name = "ControlBarrierFunction" 102 | # lambda_cbf = 1 103 | # slack_weight = 1e3 104 | # control_weight = [ 105 | # 1.0, 1.0, 1.0, 106 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 107 | # 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 108 | # 1.0, 1.0, 1.0 109 | # ] 110 | 111 | # class_name = "PotentialFieldMethod" 112 | # c_pfm = 0.1 113 | 114 | # class_name = "SlidingModeAlgorithm" 115 | # c_sma = 1.0 116 | 117 | 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /pipeline/spark_pipeline/g1_safe_teleop/g1_unsafe_teleop_sim_pipeline_config.py: -------------------------------------------------------------------------------- 1 | from spark_pipeline.base.base_pipeline_config import BasePipelineConfig 2 | 3 | class G1UnsafeTeleopSimPipelineConfig(BasePipelineConfig): 4 | 5 | max_num_steps = -1 6 | 7 | class robot( BasePipelineConfig.robot ): 8 | 9 | class cfg( BasePipelineConfig.robot.cfg ): 10 | class_name = "G1WholeBodyConfig" 11 | 12 | class kinematics( BasePipelineConfig.robot.kinematics ): 13 | pass 14 | 15 | class env( BasePipelineConfig.env ): 16 | 17 | class task( BasePipelineConfig.env.task ): 18 | class_name = "G1TeleopSimTask" 19 | task_name = "G1TeleopSimTask" # e.g., will appear as ros node name 20 | enable_ros = True 21 | ros_params = dict( 22 | robot_command_topic = "/g1_29dof/arm_position_controller/command", 23 | robot_state_topic = "/g1_29dof/robot_state", 24 | robot_teleop_topic = "/g1_29dof/robot_teleop", 25 | obstacle_topic = "/g1_29dof/human_state", 26 | ) 27 | max_episode_length = -1 28 | 29 | class agent( BasePipelineConfig.env.agent ): 30 | class_name = "G1MujocoAgent" 31 | dt = 0.01 32 | # obstacles perceived by agent. For mujoco, we create some obstacles movable via keyboard 33 | obstacle_debug = dict( 34 | num_obstacle = 2, 35 | manual_movement_step_size = 0.1 36 | ) 37 | 38 | class algo( BasePipelineConfig.algo ): 39 | 40 | class policy( BasePipelineConfig.algo.policy ): 41 | class_name = "G1TeleopPIDPolicy" 42 | 43 | class safe_controller( BasePipelineConfig.algo.safe_controller ): 44 | 45 | class safety_index( BasePipelineConfig.algo.safe_controller.safety_index ): 46 | class_name = "BasicCollisionSafetyIndex" 47 | min_distance = { 48 | "environment": 0.1, 49 | "self": 0.01 50 | } 51 | 52 | enable_self_collision = True 53 | 54 | class safe_algo( BasePipelineConfig.algo.safe_controller.safe_algo ): 55 | class_name = "ByPassSafeControl" 56 | 57 | -------------------------------------------------------------------------------- /pipeline/spark_pipeline/plotter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from tensorboard.backend.event_processing.event_accumulator import EventAccumulator 6 | import argparse 7 | import signal 8 | 9 | 10 | def get_latest_event_file(log_dir): 11 | """Get the latest TensorBoard event file""" 12 | event_files = [f for f in os.listdir(log_dir) if f.startswith("events.out.tfevents")] 13 | if not event_files: 14 | return None 15 | return os.path.join(log_dir, sorted(event_files)[-1]) # Pick latest file 16 | 17 | def read_tensorboard_tags(log_file): 18 | """Read all available scalar tags from a TensorBoardX log file""" 19 | event_acc = EventAccumulator(log_file) 20 | event_acc.Reload() 21 | return event_acc.Tags()["scalars"] 22 | 23 | def read_tensorboard_scalars(log_file, tags): 24 | """Read scalar values from TensorBoardX log file""" 25 | event_acc = EventAccumulator(log_file) 26 | event_acc.Reload() 27 | 28 | scalar_data = {} 29 | for tag in event_acc.Tags()["scalars"]: 30 | if tag in tags or "phi" in tag or "traj" in tag: 31 | events = event_acc.Scalars(tag) 32 | steps = [e.step for e in events] 33 | values = [e.value for e in events] 34 | scalar_data[tag] = (steps, values) 35 | 36 | return scalar_data 37 | 38 | import numpy as np 39 | 40 | class Plotter(): 41 | def __init__(self, ax, tag, window_size): 42 | self.ax = ax 43 | self.tag = tag # Fix: Ensure `tag` is assigned correctly 44 | self.window_size = window_size # Fix: Store window_size 45 | 46 | 47 | def plot(self, data): 48 | if "phi" in self.tag: 49 | self.plot_phi(data) 50 | elif "traj" in self.tag: 51 | self.window_size = 100 52 | self.plot_traj(data) 53 | 54 | def plot_phi(self, data): 55 | if self.tag in data: 56 | steps, values = data[self.tag] 57 | if len(steps) > 0 and len(values) > 0: 58 | self.ax.clear() 59 | self.line, = self.ax.plot([], [], label=self.tag) 60 | 61 | if len(steps) >= self.window_size: # Fix: Use `self.window_size` 62 | xlim_min = steps[-self.window_size] 63 | xlim_max = steps[-1] 64 | else: 65 | xlim_min = 0 66 | xlim_max = self.window_size 67 | 68 | steps = steps[xlim_min: xlim_max] 69 | values = values[xlim_min: xlim_max] 70 | self.line.set_xdata(steps) 71 | self.line.set_ydata(values) 72 | values_min = np.min(values) 73 | values_max = np.max(values) 74 | self.ax.set_ylim(np.minimum(values_min - 0.01, -0.05), np.maximum(values_max + 0.01, 0.05)) 75 | self.ax.set_xlim(xlim_min, xlim_max) 76 | self.ax.hlines(y=0, xmin = xlim_min, xmax = xlim_max, color='black', linestyle='--', linewidth=0.5) 77 | 78 | self.ax.legend() 79 | self.ax.relim() 80 | self.ax.autoscale_view() 81 | 82 | def plot_traj(self, data): 83 | tag_split = self.tag.split("_") 84 | name = '_'.join(tag_split[1:]) 85 | phi0_name = "phi0_" + name 86 | phi0dot_name = "phi0dot_" + name 87 | phi_k_name = "phi_k_" + name 88 | if phi0_name in data and phi0dot_name in data and phi_k_name in data: 89 | phi0_steps, phi0_values = data[phi0_name] 90 | phi0dot_steps, phi0dot_values = data[phi0dot_name] 91 | _, phi_k_values = data[phi_k_name] 92 | phi_k = phi_k_values[-1] 93 | if len(phi0_steps) == 0 or len(phi0dot_steps) == 0: 94 | return # Fix: Handle empty data cases 95 | 96 | # Ensure `step` is a valid index 97 | step = min(len(phi0_values), len(phi0dot_values)) 98 | 99 | phi0_values = phi0_values[:step] 100 | phi0dot_values = phi0dot_values[:step] 101 | 102 | if step >= 1: 103 | if len(phi0_values) >= self.window_size: 104 | phi0_values = phi0_values[-self.window_size:] 105 | phi0dot_values = phi0dot_values[-self.window_size:] 106 | 107 | # Fix: Ensure proper axis limits (commented out for now) 108 | # self.ax.set_xlim(-np.abs(phi0_values[-1]) * 1.2, np.abs(phi0_values[-1]) * 1.2) 109 | # self.ax.set_ylim(-np.abs(phi0dot_values[-1]) * 1.2, np.abs(phi0dot_values[-1]) * 1.2) 110 | self.ax.clear() 111 | self.line, = self.ax.plot([], [], label=self.tag) 112 | self.line.set_xdata(phi0_values) 113 | self.line.set_ydata(phi0dot_values) 114 | self.ax.set_xlim(-0.2, 0.2) 115 | self.ax.set_ylim(-0.2, 0.2) 116 | self.ax.vlines(x=0, ymin = -100,ymax = 100, color='black', linestyle='--', linewidth=0.5) 117 | self.ax.hlines(y=0, xmin = -100,xmax = 100, color='black', linestyle='--', linewidth=0.5) 118 | self.ax.scatter(phi0_values[-1], phi0dot_values[-1], color='r', s=10) 119 | 120 | # Define x values 121 | 122 | if phi_k != 0: 123 | x = np.linspace(-10, 10, 100) 124 | self.ax.plot(x, - x/phi_k, color='red', linestyle='--', label="phi = 0") 125 | self.ax.legend() 126 | self.ax.relim() 127 | self.ax.autoscale_view() 128 | 129 | 130 | 131 | # Signal handler for SIGINT (Ctrl+C) 132 | def signal_handler(sig, frame): 133 | global shutdown_flag 134 | print("\nReceived SIGINT, shutting down...") 135 | shutdown_flag = True 136 | 137 | 138 | 139 | if __name__ == "__main__": 140 | parser = argparse.ArgumentParser(description="Plot tags with a directory path") 141 | 142 | # Define the arguments 143 | parser.add_argument('log_path', type=str, help="Directory path for plotting") 144 | parser.add_argument('tags', type=str, nargs='+', help="Tags to plot") 145 | 146 | # Parse the arguments 147 | args = parser.parse_args() 148 | 149 | # Access the directory path and tags 150 | log_path = args.log_path 151 | tags = args.tags 152 | 153 | window_size = 1000 154 | # Set up a flag to handle graceful shutdown 155 | shutdown_flag = False 156 | # Register the signal handler for SIGINT 157 | signal.signal(signal.SIGINT, signal_handler) 158 | 159 | if log_path: 160 | all_tags = read_tensorboard_tags(log_path) 161 | print("Available Tags:", all_tags) 162 | 163 | user_tags = args.tags 164 | print("Selected Tags:", user_tags) 165 | if not user_tags: 166 | print("No valid tags selected. Exiting.") 167 | else: 168 | num_plots = len(user_tags) 169 | 170 | rows = int(np.ceil(num_plots / 2)) 171 | cols = int(np.ceil(num_plots / rows)) 172 | # Create subplots based on user-selected tags 173 | fig, axes = plt.subplots(rows, cols, figsize=(2*cols, 2*rows)) 174 | axes = axes.flatten() 175 | plt.ion() # Interactive mode 176 | plt.subplots_adjust(hspace=0.2, wspace=0.2) 177 | fig_manager = plt.get_current_fig_manager() 178 | fig_manager.window.wm_geometry("+3000+300") 179 | 180 | 181 | plotter_list = [] 182 | for i, tag in enumerate(user_tags): 183 | plotter_list.append(Plotter(axes[i], tag, window_size)) 184 | # Real-time update loop 185 | while not shutdown_flag: 186 | scalar_data = read_tensorboard_scalars(log_path, user_tags) 187 | for plotter in plotter_list: 188 | plotter.plot(scalar_data) 189 | plt.draw() 190 | plt.pause(0.001) # Update every second 191 | time.sleep(0.001) 192 | 193 | else: 194 | print("No TensorBoard event file found.") 195 | -------------------------------------------------------------------------------- /wrapper/spark_algo/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_algo", 5 | version="0.1", 6 | description="Algo wrapper.", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) -------------------------------------------------------------------------------- /wrapper/spark_algo/spark_algo/__init__.py: -------------------------------------------------------------------------------- 1 | from .spark_algo_config import SparkAlgoConfig 2 | from .spark_algo_wrapper import SparkAlgoWrapper -------------------------------------------------------------------------------- /wrapper/spark_algo/spark_algo/spark_algo_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import IntEnum, Enum 3 | import inspect 4 | from spark_safe.safe_controller import SafeControllerConfig 5 | 6 | class SparkAlgoConfig: 7 | 8 | def __init__(self) -> None: 9 | 10 | super().__init__() 11 | 12 | """ Initializes all member classes recursively. Ignores all names starting with '__' (buit-in methods).""" 13 | self.init_member_classes(self) 14 | 15 | # taken from git@github.com:leggedrobotics/legged_gym.git 16 | @staticmethod 17 | def init_member_classes(obj): 18 | print(f"Initializing {obj.__class__.__name__}") 19 | 20 | # Iterate over all attributes names 21 | for key in dir(obj): 22 | # Disregard builtin attributes 23 | if key == "__class__": 24 | continue 25 | 26 | # Get the corresponding attribute object 27 | var = getattr(obj, key) 28 | 29 | # Check if the attribute is a class but not an Enum 30 | if inspect.isclass(var) and not issubclass(var, Enum): 31 | 32 | # Instantiate the class 33 | i_var = var() 34 | # Set the attribute to the instance instead of the type 35 | setattr(obj, key, i_var) 36 | # Recursively init members of the attribute 37 | SparkAlgoConfig.init_member_classes(i_var) 38 | 39 | class policy: 40 | class_name = None 41 | 42 | class safe_controller( SafeControllerConfig ): 43 | 44 | pass -------------------------------------------------------------------------------- /wrapper/spark_algo/spark_algo/spark_algo_wrapper.py: -------------------------------------------------------------------------------- 1 | from spark_utils import initialize_class 2 | 3 | from .spark_algo_config import SparkAlgoConfig 4 | from spark_safe.safe_controller import BaseSafeController 5 | from spark_policy import BasePolicy 6 | from spark_robot import RobotConfig, RobotKinematics 7 | import numpy as np 8 | class SparkAlgoWrapper: 9 | 10 | def __init__(self, cfg : SparkAlgoConfig, robot_cfg : RobotConfig, robot_kinematics : RobotKinematics): 11 | self.cfg = cfg 12 | 13 | # self.safe_controller : BaseSafeController = initialize_class( 14 | # self.cfg.safe_controller, robot_cfg=robot_cfg, robot_kinematics=robot_kinematics 15 | # ) 16 | self.robot_cfg = robot_cfg 17 | self.safe_controller : BaseSafeController = BaseSafeController( 18 | cfg = self.cfg.safe_controller, 19 | robot_cfg = robot_cfg, 20 | robot_kinematics = robot_kinematics 21 | ) 22 | 23 | self.policy : BasePolicy = initialize_class(self.cfg.policy, 24 | robot_cfg=robot_cfg, 25 | robot_kinematics=robot_kinematics) 26 | 27 | def act(self, agent_feedback, task_info): 28 | 29 | u_ref, action_info = self.policy.act( 30 | agent_feedback = agent_feedback, 31 | task_info = task_info 32 | ) 33 | # compute safe control 34 | u_safe, safe_control_info = self.safe_controller.safe_control( 35 | x = agent_feedback["state"], 36 | u_ref = u_ref, 37 | agent_feedback = agent_feedback, 38 | task_info = task_info, 39 | action_info = action_info 40 | ) 41 | if np.isnan(u_safe).any() or np.isinf(u_safe).any(): 42 | print("u_safe contains nan or inf", u_safe) 43 | u_safe = np.zeros_like(u_safe) 44 | 45 | import ipdb;ipdb.set_trace() 46 | 47 | for control_id in self.robot_cfg.Control: 48 | u_safe[control_id] = np.clip(u_safe[control_id], -self.robot_cfg.ControlLimit[control_id], self.robot_cfg.ControlLimit[control_id]) 49 | 50 | action_info['u_ref'] = u_ref 51 | action_info['u_safe'] = u_safe 52 | return u_safe, {**action_info, **safe_control_info} -------------------------------------------------------------------------------- /wrapper/spark_env/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="spark_env", 5 | version="0.1", 6 | description="Env wrapper.", 7 | packages=find_packages(), 8 | include_package_data=True, 9 | install_requires=[ 10 | "numpy", # Add your library dependencies here 11 | ], 12 | python_requires=">=3.8", 13 | ) -------------------------------------------------------------------------------- /wrapper/spark_env/spark_env/__init__.py: -------------------------------------------------------------------------------- 1 | from .spark_env_config import SparkEnvConfig 2 | from .spark_env_wrapper import SparkEnvWrapper -------------------------------------------------------------------------------- /wrapper/spark_env/spark_env/spark_env_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import IntEnum, Enum 3 | import inspect 4 | 5 | class SparkEnvConfig: 6 | 7 | def __init__(self) -> None: 8 | 9 | super().__init__() 10 | 11 | """ Initializes all member classes recursively. Ignores all names starting with '__' (buit-in methods).""" 12 | self.init_member_classes(self) 13 | 14 | # taken from git@github.com:leggedrobotics/legged_gym.git 15 | @staticmethod 16 | def init_member_classes(obj): 17 | print(f"Initializing {obj.__class__.__name__}") 18 | 19 | # Iterate over all attributes names 20 | for key in dir(obj): 21 | # Disregard builtin attributes 22 | if key == "__class__": 23 | continue 24 | 25 | # Get the corresponding attribute object 26 | var = getattr(obj, key) 27 | 28 | # Check if the attribute is a class but not an Enum 29 | if inspect.isclass(var) and not issubclass(var, Enum): 30 | 31 | # Instantiate the class 32 | i_var = var() 33 | # Set the attribute to the instance instead of the type 34 | setattr(obj, key, i_var) 35 | # Recursively init members of the attribute 36 | SparkEnvConfig.init_member_classes(i_var) 37 | 38 | class task: 39 | class_name = None 40 | 41 | class agent: 42 | class_name = None -------------------------------------------------------------------------------- /wrapper/spark_env/spark_env/spark_env_wrapper.py: -------------------------------------------------------------------------------- 1 | from .spark_env_config import SparkEnvConfig 2 | from spark_robot import RobotConfig, RobotKinematics 3 | from spark_agent import BaseAgent 4 | from spark_task import BaseTask 5 | from spark_utils import initialize_class 6 | 7 | class SparkEnvWrapper: 8 | 9 | def __init__(self, cfg: SparkEnvConfig, robot_cfg: RobotConfig, robot_kinematics: RobotKinematics): 10 | self.cfg = cfg 11 | 12 | self.agent : BaseAgent = initialize_class(self.cfg.agent, robot_cfg=robot_cfg) 13 | 14 | self.task : BaseTask = initialize_class(self.cfg.task, 15 | robot_cfg=robot_cfg, 16 | robot_kinematics=robot_kinematics, 17 | agent=self.agent) 18 | 19 | def reset(self): 20 | 21 | self.agent.reset() 22 | 23 | # get agent feedback 24 | agent_feedback = self.agent.get_feedback() 25 | 26 | self.task.reset(agent_feedback) 27 | 28 | # process agent feedback, receive additional perception, generate goal 29 | task_info = self.task.get_info(agent_feedback) 30 | 31 | return agent_feedback, task_info 32 | 33 | def step(self, u_safe, action_info): 34 | 35 | # execute action 36 | self.agent.step(u_safe, action_info = action_info) 37 | 38 | # get agent feedback 39 | agent_feedback = self.agent.get_feedback() 40 | 41 | # execute task step (process agent feedback, receive additional perception, generate goal) 42 | self.task.step(agent_feedback) 43 | 44 | # get task info 45 | task_info = self.task.get_info(agent_feedback) 46 | 47 | return agent_feedback, task_info -------------------------------------------------------------------------------- /wrapper/spark_env/spark_env/user_env_wrapper.py: -------------------------------------------------------------------------------- 1 | from .spark_env_config import SparkEnvConfig 2 | from .spark_env_wrapper import SparkEnvWrapper 3 | from spark_robot import RobotConfig, RobotKinematics 4 | from spark_agent import BaseAgent 5 | from spark_task import BaseTask 6 | from spark_utils import initialize_class 7 | 8 | class UserEnvWrapper(SparkEnvWrapper): 9 | 10 | def __init__(self, cfg: SparkEnvConfig, robot_cfg: RobotConfig, robot_kinematics: RobotKinematics): 11 | self.cfg = cfg 12 | 13 | self.agent : BaseAgent = initialize_class(self.cfg.agent, robot_cfg=robot_cfg) 14 | 15 | self.task : BaseTask = initialize_class(self.cfg.task, 16 | robot_cfg=robot_cfg, 17 | robot_kinematics=robot_kinematics, 18 | agent=self.agent) 19 | 20 | def reset(self): 21 | ''' 22 | User need to implement this function to reset the environment 23 | And return the initial agent feedback and task info 24 | ''' 25 | # get agent feedback 26 | agent_feedback = None 27 | 28 | # process agent feedback, receive additional perception, generate goal 29 | task_info = None 30 | 31 | return agent_feedback, task_info 32 | 33 | def step(self, u_safe): 34 | ''' 35 | User need to implement this function to step the environment 36 | And return the agent feedback and task info 37 | ''' 38 | 39 | # get agent feedback 40 | agent_feedback = None 41 | 42 | # get task info 43 | task_info = None 44 | 45 | return agent_feedback, task_info 46 | --------------------------------------------------------------------------------