├── .gitattributes ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── docs └── README_zh.md ├── pyproject.toml ├── test ├── test_load_dataset.py ├── test_load_h5.py └── test_local_push_to_hub.py └── unitree_lerobot ├── eval_robot ├── assets │ ├── brainco_hand │ │ ├── brainco.yml │ │ ├── brainco_left.urdf │ │ ├── brainco_right.urdf │ │ └── meshes │ │ │ ├── left_base_link.STL │ │ │ ├── left_index_distal_Link.STL │ │ │ ├── left_index_proximal_Link.STL │ │ │ ├── left_index_tip_Link.STL │ │ │ ├── left_middle_distal_Link.STL │ │ │ ├── left_middle_proximal_Link.STL │ │ │ ├── left_middle_tip_Link.STL │ │ │ ├── left_pinky_distal_Link.STL │ │ │ ├── left_pinky_proximal_Link.STL │ │ │ ├── left_pinky_tip_Link.STL │ │ │ ├── left_ring_distal_Link.STL │ │ │ ├── left_ring_proximal_Link.STL │ │ │ ├── left_ring_tip_Link.STL │ │ │ ├── left_thumb_distal_Link.STL │ │ │ ├── left_thumb_metacarpal_Link.STL │ │ │ ├── left_thumb_proximal_Link.STL │ │ │ ├── left_thumb_tip_Link.STL │ │ │ ├── right_base_link.STL │ │ │ ├── right_index_distal_link.STL │ │ │ ├── right_index_proximal_link.STL │ │ │ ├── right_index_tip.STL │ │ │ ├── right_middle_distal_link.STL │ │ │ ├── right_middle_proximal_link.STL │ │ │ ├── right_middle_tip.STL │ │ │ ├── right_pinky_distal_link.STL │ │ │ ├── right_pinky_proximal_link.STL │ │ │ ├── right_pinky_tip.STL │ │ │ ├── right_ring_distal_link.STL │ │ │ ├── right_ring_proximal_link.STL │ │ │ ├── right_ring_tip.STL │ │ │ ├── right_thumb_distal_link.STL │ │ │ ├── right_thumb_metacarpal_link.STL │ │ │ ├── right_thumb_proximal_link.STL │ │ │ └── right_thumb_tip.STL │ ├── g1 │ │ ├── .gitignore │ │ ├── README.md │ │ ├── g1_body23.urdf │ │ ├── g1_body29_hand14.urdf │ │ ├── g1_body29_hand14.xml │ │ └── meshes │ │ │ ├── head_link.STL │ │ │ ├── left_ankle_pitch_link.STL │ │ │ ├── left_ankle_roll_link.STL │ │ │ ├── left_elbow_link.STL │ │ │ ├── left_hand_index_0_link.STL │ │ │ ├── left_hand_index_1_link.STL │ │ │ ├── left_hand_middle_0_link.STL │ │ │ ├── left_hand_middle_1_link.STL │ │ │ ├── left_hand_palm_link.STL │ │ │ ├── left_hand_thumb_0_link.STL │ │ │ ├── left_hand_thumb_1_link.STL │ │ │ ├── left_hand_thumb_2_link.STL │ │ │ ├── left_hip_pitch_link.STL │ │ │ ├── left_hip_roll_link.STL │ │ │ ├── left_hip_yaw_link.STL │ │ │ ├── left_knee_link.STL │ │ │ ├── left_rubber_hand.STL │ │ │ ├── left_shoulder_pitch_link.STL │ │ │ ├── left_shoulder_roll_link.STL │ │ │ ├── left_shoulder_yaw_link.STL │ │ │ ├── left_wrist_pitch_link.STL │ │ │ ├── left_wrist_roll_link.STL │ │ │ ├── left_wrist_roll_rubber_hand.STL │ │ │ ├── left_wrist_yaw_link.STL │ │ │ ├── logo_link.STL │ │ │ ├── pelvis.STL │ │ │ ├── pelvis_contour_link.STL │ │ │ ├── right_ankle_pitch_link.STL │ │ │ ├── right_ankle_roll_link.STL │ │ │ ├── right_elbow_link.STL │ │ │ ├── right_hand_index_0_link.STL │ │ │ ├── right_hand_index_1_link.STL │ │ │ ├── right_hand_middle_0_link.STL │ │ │ ├── right_hand_middle_1_link.STL │ │ │ ├── right_hand_palm_link.STL │ │ │ ├── right_hand_thumb_0_link.STL │ │ │ ├── right_hand_thumb_1_link.STL │ │ │ ├── right_hand_thumb_2_link.STL │ │ │ ├── right_hip_pitch_link.STL │ │ │ ├── right_hip_roll_link.STL │ │ │ ├── right_hip_yaw_link.STL │ │ │ ├── right_knee_link.STL │ │ │ ├── right_rubber_hand.STL │ │ │ ├── right_shoulder_pitch_link.STL │ │ │ ├── right_shoulder_roll_link.STL │ │ │ ├── right_shoulder_yaw_link.STL │ │ │ ├── right_wrist_pitch_link.STL │ │ │ ├── right_wrist_roll_link.STL │ │ │ ├── right_wrist_roll_rubber_hand.STL │ │ │ ├── right_wrist_yaw_link.STL │ │ │ ├── torso_constraint_L_link.STL │ │ │ ├── torso_constraint_L_rod_link.STL │ │ │ ├── torso_constraint_R_link.STL │ │ │ ├── torso_constraint_R_rod_link.STL │ │ │ ├── torso_link.STL │ │ │ ├── torso_link_23dof_rev_1_0.STL │ │ │ ├── torso_link_rev_1_0.STL │ │ │ ├── waist_constraint_L.STL │ │ │ ├── waist_constraint_R.STL │ │ │ ├── waist_roll_link.STL │ │ │ ├── waist_roll_link_rev_1_0.STL │ │ │ ├── waist_support_link.STL │ │ │ ├── waist_yaw_link.STL │ │ │ └── waist_yaw_link_rev_1_0.STL │ ├── inspire_hand │ │ ├── inspire_hand.yml │ │ ├── inspire_hand_left.urdf │ │ ├── inspire_hand_right.urdf │ │ └── meshes │ │ │ ├── L_hand_base_link.STL │ │ │ ├── Link11_L.STL │ │ │ ├── Link11_R.STL │ │ │ ├── Link12_L.STL │ │ │ ├── Link12_R.STL │ │ │ ├── Link13_L.STL │ │ │ ├── Link13_R.STL │ │ │ ├── Link14_L.STL │ │ │ ├── Link14_R.STL │ │ │ ├── Link15_L.STL │ │ │ ├── Link15_R.STL │ │ │ ├── Link16_L.STL │ │ │ ├── Link16_R.STL │ │ │ ├── Link17_L.STL │ │ │ ├── Link17_R.STL │ │ │ ├── Link18_L.STL │ │ │ ├── Link18_R.STL │ │ │ ├── Link19_L.STL │ │ │ ├── Link19_R.STL │ │ │ ├── Link20_L.STL │ │ │ ├── Link20_R.STL │ │ │ ├── Link21_L.STL │ │ │ ├── Link21_R.STL │ │ │ ├── Link22_L.STL │ │ │ ├── Link22_R.STL │ │ │ └── R_hand_base_link.STL │ └── unitree_hand │ │ ├── meshes │ │ ├── left_hand_index_0_link.STL │ │ ├── left_hand_index_0_link.STL.convex.stl │ │ ├── left_hand_index_1_link.STL │ │ ├── left_hand_index_1_link.STL.convex.stl │ │ ├── left_hand_middle_0_link.STL │ │ ├── left_hand_middle_0_link.STL.convex.stl │ │ ├── left_hand_middle_1_link.STL │ │ ├── left_hand_middle_1_link.STL.convex.stl │ │ ├── left_hand_palm_link.STL │ │ ├── left_hand_palm_link.STL.convex.stl │ │ ├── left_hand_thumb_0_link.STL │ │ ├── left_hand_thumb_0_link.STL.convex.stl │ │ ├── left_hand_thumb_1_link.STL │ │ ├── left_hand_thumb_2_link.STL │ │ ├── left_hand_thumb_2_link.STL.convex.stl │ │ ├── right_hand_index_0_link.STL │ │ ├── right_hand_index_0_link.STL.convex.stl │ │ ├── right_hand_index_1_link.STL │ │ ├── right_hand_index_1_link.STL.convex.stl │ │ ├── right_hand_middle_0_link.STL │ │ ├── right_hand_middle_0_link.STL.convex.stl │ │ ├── right_hand_middle_1_link.STL │ │ ├── right_hand_middle_1_link.STL.convex.stl │ │ ├── right_hand_palm_link.STL │ │ ├── right_hand_palm_link.STL.convex.stl │ │ ├── right_hand_thumb_0_link.STL │ │ ├── right_hand_thumb_0_link.STL.convex.stl │ │ ├── right_hand_thumb_1_link.STL │ │ ├── right_hand_thumb_2_link.STL │ │ └── right_hand_thumb_2_link.STL.convex.stl │ │ ├── unitree_dex3.yml │ │ ├── unitree_dex3_left.urdf │ │ └── unitree_dex3_right.urdf ├── eval_g1.py ├── eval_g1_dataset.py ├── eval_g1_sim.py ├── image_server │ ├── image_client.py │ └── image_server.py ├── make_robot.py ├── replay_robot.py ├── robot_control │ ├── robot_arm.py │ ├── robot_arm_ik.py │ ├── robot_hand_brainco.py │ ├── robot_hand_inspire.py │ └── robot_hand_unitree.py └── utils │ ├── episode_writer.py │ ├── rerun_visualizer.py │ ├── sim_savedata_utils.py │ ├── sim_state_topic.py │ ├── utils.py │ └── weighted_moving_filter.py └── utils ├── constants.py ├── convert_lerobot_to_h5.py ├── convert_unitree_json_to_h5.py ├── convert_unitree_json_to_lerobot.py └── sort_and_rename_folders.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Copyright 2024 The HuggingFace Inc. team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | *.memmap filter=lfs diff=lfs merge=lfs -text 16 | *.stl filter=lfs diff=lfs merge=lfs -text 17 | *.safetensors filter=lfs diff=lfs merge=lfs -text 18 | *.mp4 filter=lfs diff=lfs merge=lfs -text 19 | *.arrow filter=lfs diff=lfs merge=lfs -text 20 | *.json !text !filter !merge !diff 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | .ruff_cache 3 | __pycache__/ 4 | figure.png 5 | unitree_sdk2_python/ 6 | unitree_lerobot.egg-info 7 | 8 | cam_high.jpg 9 | cam_right_wrist.jpg 10 | cam_left_wrist.jpg 11 | data/ 12 | ./data 13 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "unitree_lerobot/lerobot"] 2 | path = unitree_lerobot/lerobot 3 | url = https://github.com/huggingface/lerobot 4 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2024 The HuggingFace Inc. team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | default_language_version: 16 | python: python3.10 17 | 18 | repos: 19 | ##### Meta ##### 20 | - repo: meta 21 | hooks: 22 | - id: check-useless-excludes 23 | - id: check-hooks-apply 24 | 25 | ##### General Code Quality & Formatting ##### 26 | - repo: https://github.com/pre-commit/pre-commit-hooks 27 | rev: v5.0.0 28 | hooks: 29 | - id: check-added-large-files 30 | args: ['--maxkb=1024'] 31 | - id: debug-statements 32 | - id: check-merge-conflict 33 | - id: check-case-conflict 34 | - id: check-yaml 35 | - id: check-toml 36 | - id: end-of-file-fixer 37 | - id: trailing-whitespace 38 | 39 | - repo: https://github.com/astral-sh/ruff-pre-commit 40 | rev: v0.12.4 41 | hooks: 42 | - id: ruff-format 43 | - id: ruff 44 | args: [--fix, --exit-non-zero-on-fix] 45 | 46 | - repo: https://github.com/adhtruong/mirrors-typos 47 | rev: v1.34.0 48 | hooks: 49 | - id: typos 50 | args: [--force-exclude] 51 | 52 | - repo: https://github.com/asottile/pyupgrade 53 | rev: v3.20.0 54 | hooks: 55 | - id: pyupgrade 56 | args: [--py310-plus] 57 | 58 | ##### Markdown Quality ##### 59 | - repo: https://github.com/rbubley/mirrors-prettier 60 | rev: v3.6.2 61 | hooks: 62 | - id: prettier 63 | name: Format Markdown with Prettier 64 | types_or: [markdown, mdx] 65 | args: [--prose-wrap=preserve] 66 | 67 | ##### Security ##### 68 | - repo: https://github.com/gitleaks/gitleaks 69 | rev: v8.27.2 70 | hooks: 71 | - id: gitleaks 72 | 73 | # - repo: https://github.com/woodruffw/zizmor-pre-commit 74 | # rev: v1.11.0 75 | # hooks: 76 | # - id: zizmor 77 | 78 | - repo: https://github.com/PyCQA/bandit 79 | rev: 1.8.6 80 | hooks: 81 | - id: bandit 82 | args: ["-c", "pyproject.toml"] 83 | additional_dependencies: ["bandit[toml]"] 84 | 85 | # TODO(Steven): Uncomment when ready to use 86 | ##### Static Analysis & Typing ##### 87 | # - repo: https://github.com/pre-commit/mirrors-mypy 88 | # rev: v1.16.0 89 | # hooks: 90 | # - id: mypy 91 | # args: [--python-version=3.10] 92 | 93 | ##### Docstring Checks ##### 94 | # - repo: https://github.com/akaihola/darglint2 95 | # rev: v1.8.2 96 | # hooks: 97 | # - id: darglint2 98 | # args: ["--docstring-style", "google", "-v", "2"] 99 | # exclude: ^tests/.*$ 100 | 101 | # - repo: https://github.com/econchick/interrogate 102 | # rev: 1.7.0 103 | # hooks: 104 | # - id: interrogate 105 | # args: ["-vv", "--config=pyproject.toml"] 106 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright [2024] [HangZhou YuShu TECHNOLOGY CO.,LTD. ("Unitree Robotics")] 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | --- 16 | 17 | This code builds upon following open-source code-bases. Please visit the URLs to see the respective LICENSES: 18 | 19 | 1. https://github.com/huggingface/lerobot 20 | 2. https://github.com/unitreerobotics/unitree_dds_wrapper 21 | 22 | --- 23 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "unitree_lerobot" 7 | version = "0.3.0" 8 | description = "unitree data conversion and real machine inference verification" 9 | readme = "README.md" 10 | requires-python = ">=3.10,<3.11" 11 | license = { text = "Apache-2.0" } 12 | authors = [ 13 | { name = "unitree"} 14 | ] 15 | keywords = ["unitree", "robotics", "lerobot"] 16 | 17 | dependencies = [ 18 | "transformers>=4.45.2", 19 | "tyro>=0.9.10", 20 | "matplotlib>=3.9.0", 21 | "meshcat==0.3.2", 22 | # "unitree_sdk2py @ git+https://github.com/unitreerobotics/unitree_sdk2_python.git@master", 23 | "logging_mp" 24 | ] 25 | 26 | [tool.setuptools] 27 | packages = ["unitree_lerobot"] 28 | 29 | [tool.ruff] 30 | line-length = 120 31 | target-version = "py310" 32 | exclude = ["build", "venv", "__pycache__"] 33 | fix = true 34 | show-fixes = true 35 | 36 | # [tool.ruff.lint] 37 | # select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"] 38 | # ignore = ["N801"] 39 | 40 | [tool.ruff.lint.per-file-ignores] 41 | "constants.py" = ["N815"] 42 | 43 | [tool.bandit] 44 | exclude_dirs = [] 45 | skips = ["B101", "B311", "B404", "B603", "B615"] 46 | -------------------------------------------------------------------------------- /test/test_load_dataset.py: -------------------------------------------------------------------------------- 1 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 2 | import tqdm 3 | 4 | episode_index = 1 5 | dataset = LeRobotDataset(repo_id="unitreerobotics/G1_Dex3_ToastedBread_Dataset") 6 | 7 | from_idx = dataset.meta.episodes["dataset_from_index"][episode_index] 8 | to_idx = dataset.meta.episodes["dataset_to_index"][episode_index] 9 | for step_idx in tqdm.tqdm(range(from_idx, to_idx)): 10 | step = dataset[step_idx] 11 | -------------------------------------------------------------------------------- /test/test_load_h5.py: -------------------------------------------------------------------------------- 1 | """ 2 | python test/test_load_h5.py --h5-path $HOME/datasets/episode_0.hdf5 3 | """ 4 | 5 | import tyro 6 | import h5py 7 | import cv2 8 | from pathlib import Path 9 | import numpy as np 10 | 11 | 12 | def read_hdf5(h5_path: Path, print_structure: bool = True, print_data: bool = True): 13 | """ 14 | Read an HDF5 file and print its structure and data. 15 | 16 | Args: 17 | h5_path (Path): Path to the HDF5 file 18 | print_structure (bool): Whether to print the file structure 19 | print_data (bool): Whether to print the data content 20 | """ 21 | try: 22 | with h5py.File(h5_path, "r") as f: 23 | print(f"Successfully opened file: {h5_path}") 24 | h5_name = [] 25 | # Print file structure 26 | if print_structure: 27 | print("\nFile structure:") 28 | 29 | def print_attrs(name, obj): 30 | print(f" {name} (Type: {type(obj)})") 31 | h5_name.append(name) 32 | if isinstance(obj, h5py.Dataset): 33 | print(f" Shape: {obj.shape}, Dtype: {obj.dtype}") 34 | for key, val in obj.attrs.items(): 35 | print(f" Attribute: {key} = {val}") 36 | 37 | f.visititems(print_attrs) 38 | 39 | # Print data content (only partial data to avoid excessive output) 40 | if print_data: 41 | for name in h5_name: 42 | dataset = f[name] 43 | print(f"\nData from {name}:") 44 | 45 | if isinstance(dataset, h5py.Dataset): 46 | # Get dataset shape and type 47 | shape = dataset.shape 48 | dtype = dataset.dtype 49 | print("------------------------------------------------") 50 | print(f"Shape: {shape}, Dtype: {dtype}") 51 | # Print memory usage 52 | print(f"\nTotal dataset size: {dataset.size * dataset.dtype.itemsize / (1024**2):.2f} MB") 53 | 54 | # Special handling for datasets with name containing 'observations/images' 55 | if "observations/images" in name: 56 | print("Image Dataset [width, height, channels]:") 57 | print(dataset[0].dtype) 58 | # Print statistics 59 | sample = ( 60 | dataset[0] 61 | if dataset[0].dtype == "uint8" 62 | else cv2.imdecode(np.frombuffer(dataset[0], dtype=np.uint8), cv2.IMREAD_COLOR) 63 | ) 64 | 65 | cv2.imwrite(f"{name.split('/')[-1]}.jpg", sample) 66 | print(f"Sample image shape: {sample.shape}") 67 | print(f"Pixel value range: {sample.min()} - {sample.max()}") 68 | 69 | # Print corner pixels (top-left 4x4 area) 70 | print("\nTop-left 4x4 corner of first image (R,G,B channels):") 71 | print(sample[:4, :4, :]) 72 | 73 | # Print center pixel values 74 | center_y, center_x = sample.shape[0] // 2, sample.shape[1] // 2 75 | print("\nCenter pixel values (R,G,B):") 76 | print(sample[center_y, center_x, :]) 77 | 78 | else: 79 | # Standard data printing for non-image datasets 80 | data = dataset[...] 81 | if data.size > 10: 82 | print("First 10 elements:", data.flatten()[:10], "...") 83 | else: 84 | print(data) 85 | else: 86 | print(f"{name} is not a dataset.") 87 | 88 | except Exception as e: 89 | print(f"Error reading file: {e}") 90 | 91 | 92 | if __name__ == "__main__": 93 | tyro.cli(read_hdf5) 94 | -------------------------------------------------------------------------------- /test/test_local_push_to_hub.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import tyro 4 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 5 | 6 | 7 | def local_push_to_hub( 8 | repo_id: str, 9 | root_path: Path = None, 10 | ): 11 | dataset = LeRobotDataset(repo_id=repo_id, root=root_path) 12 | dataset.push_to_hub(upload_large_folder=True) 13 | 14 | 15 | if __name__ == "__main__": 16 | tyro.cli(local_push_to_hub) 17 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/brainco.yml: -------------------------------------------------------------------------------- 1 | left: 2 | type: DexPilot # or vector 3 | urdf_path: brainco_hand/brainco_left.urdf 4 | 5 | # Target refers to the retargeting target, which is the robot hand 6 | target_joint_names: 7 | [ 8 | "left_thumb_metacarpal_joint", 9 | "left_thumb_proximal_joint", 10 | "left_index_proximal_joint", 11 | "left_middle_proximal_joint", 12 | "left_ring_proximal_joint", 13 | "left_pinky_proximal_joint", 14 | ] 15 | 16 | # for DexPilot type 17 | wrist_link_name: "base_link" 18 | finger_tip_link_names: [ "left_thumb_tip", "left_index_tip", "left_middle_tip", "left_ring_tip", "left_pinky_tip" ] 19 | # If you do not know exactly how it is used, please leave it to None for default. 20 | target_link_human_indices_dexpilot: [[ 9, 14, 19, 24, 14, 19, 24, 19, 24, 24, 0, 0, 0, 0, 0], [ 4, 4, 4, 4, 9, 9, 9, 14, 14, 19, 4, 9, 14, 19, 24]] 21 | 22 | # for vector type 23 | target_origin_link_names: ["base_link", "base_link", "base_link", "base_link", "base_link"] 24 | target_task_link_names: [ "left_thumb_tip", "left_index_tip", "left_middle_tip", "left_ring_tip", "left_pinky_tip" ] 25 | target_link_human_indices_vector: [ [ 0, 0, 0, 0, 0 ], [ 4, 9, 14, 19, 24 ] ] 26 | 27 | # Scaling factor for vector retargeting only 28 | # For example, Allegro is 1.6 times larger than normal human hand, then this scaling factor should be 1.6 29 | scaling_factor: 0.90 30 | # A smaller alpha means stronger filtering, i.e. more smooth but also larger latency 31 | low_pass_alpha: 0.2 32 | 33 | right: 34 | type: DexPilot # or vector 35 | urdf_path: brainco_hand/brainco_right.urdf 36 | 37 | # Target refers to the retargeting target, which is the robot hand 38 | target_joint_names: 39 | [ 40 | "right_thumb_metacarpal_joint", 41 | "right_thumb_proximal_joint", 42 | "right_index_proximal_joint", 43 | "right_middle_proximal_joint", 44 | "right_ring_proximal_joint", 45 | "right_pinky_proximal_joint", 46 | ] 47 | # for DexPilot type 48 | wrist_link_name: "base_link" 49 | finger_tip_link_names: [ "right_thumb_tip", "right_index_tip", "right_middle_tip", "right_ring_tip", "right_pinky_tip" ] 50 | target_link_human_indices_dexpilot: [[ 9, 14, 19, 24, 14, 19, 24, 19, 24, 24, 0, 0, 0, 0, 0], [ 4, 4, 4, 4, 9, 9, 9, 14, 14, 19, 4, 9, 14, 19, 24]] 51 | 52 | # for vector type 53 | target_origin_link_names: ["base_link", "base_link", "base_link", "base_link", "base_link"] 54 | target_task_link_names: [ "right_thumb_tip", "right_index_tip", "right_middle_tip", "right_ring_tip", "right_pinky_tip" ] 55 | target_link_human_indices_vector: [ [ 0, 0, 0, 0, 0 ], [ 4, 9, 14, 19, 24 ] ] 56 | 57 | # Scaling factor for vector retargeting only 58 | # For example, Allegro is 1.6 times larger than normal human hand, then this scaling factor should be 1.6 59 | scaling_factor: 0.90 60 | # A smaller alpha means stronger filtering, i.e. more smooth but also larger latency 61 | low_pass_alpha: 0.2 62 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_base_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_base_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_index_distal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_index_distal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_index_proximal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_index_proximal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_index_tip_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_index_tip_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_middle_distal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_middle_distal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_middle_proximal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_middle_proximal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_middle_tip_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_middle_tip_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_pinky_distal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_pinky_distal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_pinky_proximal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_pinky_proximal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_pinky_tip_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_pinky_tip_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_ring_distal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_ring_distal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_ring_proximal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_ring_proximal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_ring_tip_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_ring_tip_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_distal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_distal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_metacarpal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_metacarpal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_proximal_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_proximal_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_tip_Link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/left_thumb_tip_Link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_base_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_base_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_index_distal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_index_distal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_index_proximal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_index_proximal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_index_tip.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_index_tip.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_middle_distal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_middle_distal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_middle_proximal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_middle_proximal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_middle_tip.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_middle_tip.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_pinky_distal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_pinky_distal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_pinky_proximal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_pinky_proximal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_pinky_tip.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_pinky_tip.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_ring_distal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_ring_distal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_ring_proximal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_ring_proximal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_ring_tip.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_ring_tip.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_distal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_distal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_metacarpal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_metacarpal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_proximal_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_proximal_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_tip.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/brainco_hand/meshes/right_thumb_tip.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/.gitignore: -------------------------------------------------------------------------------- 1 | *.gv 2 | *.pdf 3 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/README.md: -------------------------------------------------------------------------------- 1 | # Unitree G1 Description (URDF & MJCF) 2 | 3 | ## Overview 4 | 5 | This package includes a universal humanoid robot description (URDF & MJCF) for the [Unitree G1](https://www.unitree.com/g1/), developed by [Unitree Robotics](https://www.unitree.com/). 6 | 7 | MJCF/URDF for the G1 robot: 8 | 9 | | MJCF/URDF file name | `mode_machine` | Hip roll reduction ratio | Update status | dof#leg | dof#waist | dof#arm | dof#hand | 10 | | ----------------------------- | :------------: | :----------------------: | ------------- | :-----: | :-------: | :-----: | :------: | 11 | | `g1_23dof` | 1 | 14.5 | Beta | 6\*2 | 1 | 5\*2 | 0 | 12 | | `g1_29dof` | 2 | 14.5 | Beta | 6\*2 | 3 | 7\*2 | 0 | 13 | | `g1_29dof_with_hand` | 2 | 14.5 | Beta | 6\*2 | 3 | 7\*2 | 7\*2 | 14 | | `g1_29dof_lock_waist` | 3 | 14.5 | Beta | 6\*2 | 1 | 7\*2 | 0 | 15 | | `g1_23dof_rev_1_0` | 4 | 22.5 | Up-to-date | 6\*2 | 1 | 5\*2 | 0 | 16 | | `g1_29dof_rev_1_0` | 5 | 22.5 | Up-to-date | 6\*2 | 3 | 7\*2 | 0 | 17 | | `g1_29dof_with_hand_rev_1_0` | 5 | 22.5 | Up-to-date | 6\*2 | 3 | 7\*2 | 7\*2 | 18 | | `g1_29dof_lock_waist_rev_1_0` | 6 | 22.5 | Up-to-date | 6\*2 | 1 | 7\*2 | 0 | 19 | | `g1_dual_arm` | 9 | null | Up-to-date | 0 | 0 | 7\*2 | 0 | 20 | 21 | ## Visualization with [MuJoCo](https://github.com/google-deepmind/mujoco) 22 | 23 | 1. Open MuJoCo Viewer 24 | 25 | ```bash 26 | pip install mujoco 27 | python -m mujoco.viewer 28 | ``` 29 | 30 | 2. Drag and drop the MJCF/URDF model file (`g1_XXX.xml`/`g1_XXX.urdf`) to the MuJoCo Viewer. 31 | 32 | ## Note for teleoperate 33 | 34 | g1_body29_hand14 is modified from [g1_29dof_with_hand_rev_1_0](https://github.com/unitreerobotics/unitree_ros/blob/master/robots/g1_description/g1_29dof_with_hand_rev_1_0.urdf) 35 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/head_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/head_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_ankle_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_ankle_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_ankle_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_ankle_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_elbow_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_elbow_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_index_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_index_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_index_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_index_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_middle_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_middle_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_middle_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_middle_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_palm_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_palm_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_thumb_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_thumb_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_thumb_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_thumb_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_thumb_2_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hand_thumb_2_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hip_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hip_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hip_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hip_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_hip_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_hip_yaw_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_knee_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_knee_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_rubber_hand.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_shoulder_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_shoulder_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_shoulder_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_shoulder_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_shoulder_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_shoulder_yaw_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_roll_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_roll_rubber_hand.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/left_wrist_yaw_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/logo_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/logo_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/pelvis.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/pelvis.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/pelvis_contour_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/pelvis_contour_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_ankle_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_ankle_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_ankle_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_ankle_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_elbow_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_elbow_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_index_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_index_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_index_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_index_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_middle_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_middle_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_middle_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_middle_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_palm_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_palm_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_thumb_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_thumb_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_thumb_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_thumb_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_thumb_2_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hand_thumb_2_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hip_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hip_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hip_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hip_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_hip_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_hip_yaw_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_knee_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_knee_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_rubber_hand.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_shoulder_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_shoulder_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_shoulder_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_shoulder_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_shoulder_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_shoulder_yaw_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_pitch_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_pitch_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_roll_rubber_hand.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_roll_rubber_hand.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/right_wrist_yaw_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_L_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_L_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_L_rod_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_L_rod_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_R_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_R_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_R_rod_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/torso_constraint_R_rod_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/torso_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/torso_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/torso_link_23dof_rev_1_0.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/torso_link_23dof_rev_1_0.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/torso_link_rev_1_0.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/torso_link_rev_1_0.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/waist_constraint_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/waist_constraint_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/waist_constraint_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/waist_constraint_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/waist_roll_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/waist_roll_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/waist_roll_link_rev_1_0.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/waist_roll_link_rev_1_0.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/waist_support_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/waist_support_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/waist_yaw_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/waist_yaw_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/g1/meshes/waist_yaw_link_rev_1_0.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/g1/meshes/waist_yaw_link_rev_1_0.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/inspire_hand.yml: -------------------------------------------------------------------------------- 1 | left: 2 | type: DexPilot # or vector 3 | urdf_path: inspire_hand/inspire_hand_left.urdf 4 | 5 | # Target refers to the retargeting target, which is the robot hand 6 | target_joint_names: 7 | [ 8 | 'L_thumb_proximal_yaw_joint', 9 | 'L_thumb_proximal_pitch_joint', 10 | 'L_index_proximal_joint', 11 | 'L_middle_proximal_joint', 12 | 'L_ring_proximal_joint', 13 | 'L_pinky_proximal_joint' 14 | ] 15 | 16 | # for DexPilot type 17 | wrist_link_name: "L_hand_base_link" 18 | finger_tip_link_names: [ "L_thumb_tip", "L_index_tip", "L_middle_tip", "L_ring_tip", "L_pinky_tip" ] 19 | # If you do not know exactly how it is used, please leave it to None for default. 20 | target_link_human_indices_dexpilot: [[ 9, 14, 19, 24, 14, 19, 24, 19, 24, 24, 0, 0, 0, 0, 0], [ 4, 4, 4, 4, 9, 9, 9, 14, 14, 19, 4, 9, 14, 19, 24]] 21 | 22 | # for vector type 23 | target_origin_link_names: [ "L_hand_base_link", "L_hand_base_link", "L_hand_base_link", "L_hand_base_link", "L_hand_base_link"] 24 | target_task_link_names: [ "L_thumb_tip", "L_index_tip", "L_middle_tip", "L_ring_tip", "L_pinky_tip" ] 25 | target_link_human_indices_vector: [ [ 0, 0, 0, 0, 0 ], [ 4, 9, 14, 19, 24 ] ] 26 | 27 | # Scaling factor for vector retargeting only 28 | # For example, Allegro is 1.6 times larger than normal human hand, then this scaling factor should be 1.6 29 | scaling_factor: 1.20 30 | # A smaller alpha means stronger filtering, i.e. more smooth but also larger latency 31 | low_pass_alpha: 0.2 32 | 33 | right: 34 | type: DexPilot # or vector 35 | urdf_path: inspire_hand/inspire_hand_right.urdf 36 | 37 | # Target refers to the retargeting target, which is the robot hand 38 | target_joint_names: 39 | [ 40 | 'R_thumb_proximal_yaw_joint', 41 | 'R_thumb_proximal_pitch_joint', 42 | 'R_index_proximal_joint', 43 | 'R_middle_proximal_joint', 44 | 'R_ring_proximal_joint', 45 | 'R_pinky_proximal_joint' 46 | ] 47 | 48 | # for DexPilot type 49 | wrist_link_name: "R_hand_base_link" 50 | finger_tip_link_names: [ "R_thumb_tip", "R_index_tip", "R_middle_tip", "R_ring_tip", "R_pinky_tip" ] 51 | # If you do not know exactly how it is used, please leave it to None for 52 | target_link_human_indices_dexpilot: [[ 9, 14, 19, 24, 14, 19, 24, 19, 24, 24, 0, 0, 0, 0, 0], [ 4, 4, 4, 4, 9, 9, 9, 14, 14, 19, 4, 9, 14, 19, 24]] 53 | 54 | target_origin_link_names: [ "R_hand_base_link", "R_hand_base_link", "R_hand_base_link", "R_hand_base_link", "R_hand_base_link"] 55 | target_task_link_names: [ "R_thumb_tip", "R_index_tip", "R_middle_tip", "R_ring_tip", "R_pinky_tip" ] 56 | target_link_human_indices_vector: [ [ 0, 0, 0, 0, 0 ], [ 4, 9, 14, 19, 24 ] ] 57 | 58 | # Scaling factor for vector retargeting only 59 | # For example, Allegro is 1.6 times larger than normal human hand, then this scaling factor should be 1.6 60 | scaling_factor: 1.20 61 | # A smaller alpha means stronger filtering, i.e. more smooth but also larger latency 62 | low_pass_alpha: 0.2 63 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/L_hand_base_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/L_hand_base_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link11_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link11_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link11_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link11_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link12_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link12_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link12_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link12_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link13_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link13_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link13_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link13_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link14_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link14_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link14_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link14_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link15_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link15_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link15_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link15_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link16_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link16_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link16_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link16_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link17_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link17_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link17_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link17_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link18_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link18_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link18_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link18_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link19_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link19_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link19_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link19_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link20_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link20_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link20_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link20_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link21_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link21_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link21_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link21_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link22_L.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link22_L.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link22_R.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/Link22_R.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/inspire_hand/meshes/R_hand_base_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/inspire_hand/meshes/R_hand_base_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_index_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_index_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_index_0_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:18edc9c9e5d331c4688e0228933c38b0fb32b3e6996fc29d8b34536a4a96723f 3 | size 11539 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_index_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_index_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_index_1_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:9882cf848b85cad2658908f5561d8d74fd00a334d20cc5b98600db4039589005 3 | size 19085 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_middle_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_middle_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_middle_0_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:18edc9c9e5d331c4688e0228933c38b0fb32b3e6996fc29d8b34536a4a96723f 3 | size 11539 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_middle_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_middle_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_middle_1_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:9882cf848b85cad2658908f5561d8d74fd00a334d20cc5b98600db4039589005 3 | size 19085 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_palm_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_palm_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_palm_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:cfba746337c55b9061e3f6f42410600b97bab54ddb1eecc30f4777015da38769 3 | size 42037 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_0_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:2562ae9942cd3882029c9d428553e5bef8ab22c98f3390b405066e2ff8df0372 3 | size 5096 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_2_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_2_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/left_hand_thumb_2_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:ba776cd2a9942d0f2b2e43e6c7499249f64c6d0e4308e975f0e1914bd63205bc 3 | size 19245 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_index_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_index_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_index_0_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:59cfcffc3e576588593e5837c314ed792f12ab1c7f79b343d5947a5fcd1d3315 3 | size 11509 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_index_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_index_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_index_1_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:933d1ffe36331935e3938920c6dbf46699f414dd907a49ba01808e7fa26f83ef 3 | size 19091 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_middle_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_middle_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_middle_0_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:59cfcffc3e576588593e5837c314ed792f12ab1c7f79b343d5947a5fcd1d3315 3 | size 11509 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_middle_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_middle_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_middle_1_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:933d1ffe36331935e3938920c6dbf46699f414dd907a49ba01808e7fa26f83ef 3 | size 19091 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_palm_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_palm_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_palm_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:2fe93d1076c7f821bcc1074ee58bbbb0f4e763e2a8f3ddbed467d3691a3d05eb 3 | size 42237 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_0_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_0_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_0_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:569f4deb0f8f517b378c316019227d14154a127e9fc3d4d4e90fc22bf83246b2 3 | size 5024 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_1_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_1_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_2_link.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unitreerobotics/unitree_IL_lerobot/66ca89d6fffd919c001e7af06f116dca78e01cf1/unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_2_link.STL -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/meshes/right_hand_thumb_2_link.STL.convex.stl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:c0d5204702b9811b633b4a023cd505ddb36be11aed004c93c514dc1fd842711b 3 | size 19111 4 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/assets/unitree_hand/unitree_dex3.yml: -------------------------------------------------------------------------------- 1 | left: 2 | type: DexPilot # or vector 3 | urdf_path: unitree_hand/unitree_dex3_left.urdf 4 | 5 | # Target refers to the retargeting target, which is the robot hand 6 | target_joint_names: 7 | [ 8 | "left_hand_thumb_0_joint", 9 | "left_hand_thumb_1_joint", 10 | "left_hand_thumb_2_joint", 11 | "left_hand_middle_0_joint", 12 | "left_hand_middle_1_joint", 13 | "left_hand_index_0_joint", 14 | "left_hand_index_1_joint", 15 | ] 16 | 17 | # for DexPilot type 18 | wrist_link_name: "base_link" 19 | finger_tip_link_names: [ "thumb_tip", "index_tip", "middle_tip"] 20 | # If you do not know exactly how it is used, please leave it to None for default. 21 | target_link_human_indices_dexpilot: [[ 9, 14, 14, 0, 0, 0], [ 4, 4, 9, 4, 9, 14]] 22 | 23 | # for vector type 24 | target_origin_link_names: ["base_link_thumb","base_link_index","base_link_middle"] 25 | target_task_link_names: ["thumb_tip","index_tip","middle_tip"] 26 | target_link_human_indices_vector: [[0, 0, 0], [4, 9, 14]] 27 | 28 | # Scaling factor for vector retargeting only 29 | # For example, Allegro is 1.6 times larger than normal human hand, then this scaling factor should be 1.6 30 | scaling_factor: 1.0 31 | # A smaller alpha means stronger filtering, i.e. more smooth but also larger latency 32 | low_pass_alpha: 0.2 33 | 34 | right: 35 | type: DexPilot # or vector 36 | urdf_path: unitree_hand/unitree_dex3_right.urdf 37 | 38 | # Target refers to the retargeting target, which is the robot hand 39 | target_joint_names: 40 | [ 41 | "right_hand_thumb_0_joint", 42 | "right_hand_thumb_1_joint", 43 | "right_hand_thumb_2_joint", 44 | "right_hand_index_0_joint", 45 | "right_hand_index_1_joint", 46 | "right_hand_middle_0_joint", 47 | "right_hand_middle_1_joint", 48 | ] 49 | # for DexPilot type 50 | wrist_link_name: "base_link" 51 | finger_tip_link_names: [ "thumb_tip", "index_tip", "middle_tip"] 52 | target_link_human_indices_dexpilot: [[ 9, 14, 14, 0, 0, 0], [ 4, 4, 9, 4, 9, 14]] 53 | 54 | # for vector type 55 | target_origin_link_names: ["base_link_thumb","base_link_index","base_link_middle"] 56 | target_task_link_names: ["thumb_tip", "index_tip", "middle_tip"] 57 | target_link_human_indices_vector: [[0, 0, 0], [4, 9, 14]] 58 | 59 | # Scaling factor for vector retargeting only 60 | # For example, Allegro is 1.6 times larger than normal human hand, then this scaling factor should be 1.6 61 | scaling_factor: 1.0 62 | # A smaller alpha means stronger filtering, i.e. more smooth but also larger latency 63 | low_pass_alpha: 0.2 64 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/eval_g1.py: -------------------------------------------------------------------------------- 1 | """' 2 | Refer to: lerobot/lerobot/scripts/eval.py 3 | lerobot/lerobot/scripts/econtrol_robot.py 4 | lerobot/robot_devices/control_utils.py 5 | """ 6 | 7 | import time 8 | import torch 9 | import logging 10 | 11 | import numpy as np 12 | from pprint import pformat 13 | from dataclasses import asdict 14 | from torch import nn 15 | from contextlib import nullcontext 16 | from typing import Any 17 | from lerobot.policies.factory import make_policy, make_pre_post_processors 18 | from lerobot.utils.utils import ( 19 | get_safe_torch_device, 20 | init_logging, 21 | ) 22 | from lerobot.configs import parser 23 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 24 | from lerobot.policies.pretrained import PreTrainedPolicy 25 | from multiprocessing.sharedctypes import SynchronizedArray 26 | from lerobot.processor.rename_processor import rename_stats 27 | from lerobot.processor import ( 28 | PolicyAction, 29 | PolicyProcessorPipeline, 30 | ) 31 | from unitree_lerobot.eval_robot.make_robot import ( 32 | setup_image_client, 33 | setup_robot_interface, 34 | process_images_and_observations, 35 | ) 36 | from unitree_lerobot.eval_robot.utils.utils import ( 37 | cleanup_resources, 38 | predict_action, 39 | to_list, 40 | to_scalar, 41 | EvalRealConfig, 42 | ) 43 | from unitree_lerobot.eval_robot.utils.rerun_visualizer import RerunLogger, visualization_data 44 | 45 | import logging_mp 46 | 47 | logging_mp.basic_config(level=logging_mp.INFO) 48 | logger_mp = logging_mp.get_logger(__name__) 49 | 50 | 51 | def eval_policy( 52 | cfg: EvalRealConfig, 53 | dataset: LeRobotDataset, 54 | policy: PreTrainedPolicy | None = None, 55 | preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]] | None = None, 56 | postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction] | None = None, 57 | ): 58 | assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module." 59 | 60 | logger_mp.info(f"Arguments: {cfg}") 61 | 62 | if cfg.visualization: 63 | rerun_logger = RerunLogger() 64 | 65 | # Reset policy and processor if they are provided 66 | if policy is not None and preprocessor is not None and postprocessor is not None: 67 | policy.reset() 68 | preprocessor.reset() 69 | postprocessor.reset() 70 | 71 | image_info = None 72 | try: 73 | # --- Setup Phase --- 74 | image_info = setup_image_client(cfg) 75 | robot_interface = setup_robot_interface(cfg) 76 | 77 | # Unpack interfaces for convenience 78 | arm_ctrl, arm_ik, ee_shared_mem, arm_dof, ee_dof = ( 79 | robot_interface[key] for key in ["arm_ctrl", "arm_ik", "ee_shared_mem", "arm_dof", "ee_dof"] 80 | ) 81 | tv_img_array, wrist_img_array, tv_img_shape, wrist_img_shape, is_binocular, has_wrist_cam = ( 82 | image_info[key] 83 | for key in [ 84 | "tv_img_array", 85 | "wrist_img_array", 86 | "tv_img_shape", 87 | "wrist_img_shape", 88 | "is_binocular", 89 | "has_wrist_cam", 90 | ] 91 | ) 92 | 93 | # Get initial pose from the first step of the dataset 94 | from_idx = dataset.meta.episodes["dataset_from_index"][0] 95 | step = dataset[from_idx] 96 | init_arm_pose = step["observation.state"][:arm_dof].cpu().numpy() 97 | 98 | user_input = input("Enter 's' to initialize the robot and start the evaluation: ") 99 | idx = 0 100 | print(f"user_input: {user_input}") 101 | full_state = None 102 | if user_input.lower() == "s": 103 | # "The initial positions of the robot's arm and fingers take the initial positions during data recording." 104 | logger_mp.info("Initializing robot to starting pose...") 105 | tau = robot_interface["arm_ik"].solve_tau(init_arm_pose) 106 | robot_interface["arm_ctrl"].ctrl_dual_arm(init_arm_pose, tau) 107 | time.sleep(1.0) # Give time for the robot to move 108 | # --- Run Main Loop --- 109 | logger_mp.info(f"Starting evaluation loop at {cfg.frequency} Hz.") 110 | while True: 111 | loop_start_time = time.perf_counter() 112 | # 1. Get Observations 113 | observation, current_arm_q = process_images_and_observations( 114 | tv_img_array, wrist_img_array, tv_img_shape, wrist_img_shape, is_binocular, has_wrist_cam, arm_ctrl 115 | ) 116 | left_ee_state = right_ee_state = np.array([]) 117 | if cfg.ee: 118 | with ee_shared_mem["lock"]: 119 | full_state = np.array(ee_shared_mem["state"][:]) 120 | left_ee_state = full_state[:ee_dof] 121 | right_ee_state = full_state[ee_dof:] 122 | state_tensor = torch.from_numpy( 123 | np.concatenate((current_arm_q, left_ee_state, right_ee_state), axis=0) 124 | ).float() 125 | observation["observation.state"] = state_tensor 126 | # 2. Get Action from Policy 127 | action = predict_action( 128 | observation, 129 | policy, 130 | get_safe_torch_device(policy.config.device), 131 | preprocessor, 132 | postprocessor, 133 | policy.config.use_amp, 134 | step["task"], 135 | use_dataset=cfg.use_dataset, 136 | robot_type=None, 137 | ) 138 | action_np = action.cpu().numpy() 139 | # 3. Execute Action 140 | arm_action = action_np[:arm_dof] 141 | tau = arm_ik.solve_tau(arm_action) 142 | arm_ctrl.ctrl_dual_arm(arm_action, tau) 143 | 144 | if cfg.ee: 145 | ee_action_start_idx = arm_dof 146 | left_ee_action = action_np[ee_action_start_idx : ee_action_start_idx + ee_dof] 147 | right_ee_action = action_np[ee_action_start_idx + ee_dof : ee_action_start_idx + 2 * ee_dof] 148 | # logger_mp.info(f"EE Action: left {left_ee_action}, right {right_ee_action}") 149 | 150 | if isinstance(ee_shared_mem["left"], SynchronizedArray): 151 | ee_shared_mem["left"][:] = to_list(left_ee_action) 152 | ee_shared_mem["right"][:] = to_list(right_ee_action) 153 | elif hasattr(ee_shared_mem["left"], "value") and hasattr(ee_shared_mem["right"], "value"): 154 | ee_shared_mem["left"].value = to_scalar(left_ee_action) 155 | ee_shared_mem["right"].value = to_scalar(right_ee_action) 156 | 157 | if cfg.visualization: 158 | visualization_data(idx, observation, state_tensor.numpy(), action_np, rerun_logger) 159 | idx += 1 160 | # Maintain frequency 161 | time.sleep(max(0, (1.0 / cfg.frequency) - (time.perf_counter() - loop_start_time))) 162 | except Exception as e: 163 | logger_mp.info(f"An error occurred: {e}") 164 | finally: 165 | if image_info: 166 | cleanup_resources(image_info) 167 | 168 | 169 | @parser.wrap() 170 | def eval_main(cfg: EvalRealConfig): 171 | logging.info(pformat(asdict(cfg))) 172 | 173 | # Check device is available 174 | device = get_safe_torch_device(cfg.policy.device, log=True) 175 | 176 | torch.backends.cudnn.benchmark = True 177 | torch.backends.cuda.matmul.allow_tf32 = True 178 | 179 | logging.info("Making policy.") 180 | 181 | dataset = LeRobotDataset(repo_id=cfg.repo_id) 182 | 183 | policy = make_policy(cfg=cfg.policy, ds_meta=dataset.meta) 184 | policy.eval() 185 | 186 | preprocessor, postprocessor = make_pre_post_processors( 187 | policy_cfg=cfg.policy, 188 | pretrained_path=cfg.policy.pretrained_path, 189 | dataset_stats=rename_stats(dataset.meta.stats, cfg.rename_map), 190 | preprocessor_overrides={ 191 | "device_processor": {"device": cfg.policy.device}, 192 | "rename_observations_processor": {"rename_map": cfg.rename_map}, 193 | }, 194 | ) 195 | 196 | with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext(): 197 | eval_policy(cfg, dataset, policy, preprocessor, postprocessor) 198 | 199 | logging.info("End of eval") 200 | 201 | 202 | if __name__ == "__main__": 203 | init_logging() 204 | eval_main() 205 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/eval_g1_dataset.py: -------------------------------------------------------------------------------- 1 | """' 2 | Refer to: lerobot/lerobot/scripts/eval.py 3 | lerobot/lerobot/scripts/econtrol_robot.py 4 | lerobot/robot_devices/control_utils.py 5 | """ 6 | 7 | import torch 8 | import tqdm 9 | import logging 10 | import time 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | from pprint import pformat 14 | from typing import Any 15 | from dataclasses import asdict 16 | from torch import nn 17 | from contextlib import nullcontext 18 | from lerobot.policies.factory import make_policy, make_pre_post_processors 19 | from lerobot.utils.utils import ( 20 | get_safe_torch_device, 21 | init_logging, 22 | ) 23 | from lerobot.configs import parser 24 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 25 | from lerobot.policies.pretrained import PreTrainedPolicy 26 | from multiprocessing.sharedctypes import SynchronizedArray 27 | from lerobot.processor.rename_processor import rename_stats 28 | from lerobot.processor import ( 29 | PolicyAction, 30 | PolicyProcessorPipeline, 31 | ) 32 | 33 | from unitree_lerobot.eval_robot.utils.utils import ( 34 | extract_observation, 35 | predict_action, 36 | to_list, 37 | to_scalar, 38 | EvalRealConfig, 39 | ) 40 | from unitree_lerobot.eval_robot.utils.rerun_visualizer import RerunLogger, visualization_data 41 | 42 | 43 | import logging_mp 44 | 45 | logging_mp.basic_config(level=logging_mp.INFO) 46 | logger_mp = logging_mp.get_logger(__name__) 47 | 48 | 49 | def eval_policy( 50 | cfg: EvalRealConfig, 51 | dataset: LeRobotDataset, 52 | policy: PreTrainedPolicy | None = None, 53 | preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]] | None = None, 54 | postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction] | None = None, 55 | ): 56 | assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module." 57 | 58 | logger_mp.info(f"Arguments: {cfg}") 59 | 60 | if cfg.visualization: 61 | rerun_logger = RerunLogger() 62 | 63 | # Reset policy and processor if they are provided 64 | if policy is not None and preprocessor is not None and postprocessor is not None: 65 | policy.reset() 66 | preprocessor.reset() 67 | postprocessor.reset() 68 | 69 | # init pose dataset.meta.episodes["dataset_from_index"][episode_index] 70 | from_idx = dataset.meta.episodes["dataset_from_index"][0] 71 | step = dataset[from_idx] 72 | to_idx = dataset.meta.episodes["dataset_to_index"][0] 73 | 74 | ground_truth_actions = [] 75 | predicted_actions = [] 76 | 77 | if cfg.send_real_robot: 78 | from unitree_lerobot.eval_robot.make_robot import setup_robot_interface 79 | 80 | robot_interface = setup_robot_interface(cfg) 81 | arm_ctrl, arm_ik, ee_shared_mem, arm_dof, ee_dof = ( 82 | robot_interface[key] for key in ["arm_ctrl", "arm_ik", "ee_shared_mem", "arm_dof", "ee_dof"] 83 | ) 84 | init_arm_pose = step["observation.state"][:arm_dof].cpu().numpy() 85 | 86 | # ===============init robot===================== 87 | user_input = input("Please enter the start signal (enter 's' to start the subsequent program):") 88 | if user_input.lower() == "s": 89 | if cfg.send_real_robot: 90 | # Initialize robot to starting pose 91 | logger_mp.info("Initializing robot to starting pose...") 92 | tau = robot_interface["arm_ik"].solve_tau(init_arm_pose) 93 | robot_interface["arm_ctrl"].ctrl_dual_arm(init_arm_pose, tau) 94 | 95 | time.sleep(1) 96 | 97 | for step_idx in tqdm.tqdm(range(from_idx, to_idx)): 98 | loop_start_time = time.perf_counter() 99 | 100 | step = dataset[step_idx] 101 | observation = extract_observation(step) 102 | 103 | action = predict_action( 104 | observation, 105 | policy, 106 | get_safe_torch_device(policy.config.device), 107 | preprocessor, 108 | postprocessor, 109 | policy.config.use_amp, 110 | step["task"], 111 | use_dataset=True, 112 | robot_type=None, 113 | ) 114 | action_np = action.cpu().numpy() 115 | 116 | ground_truth_actions.append(step["action"].numpy()) 117 | predicted_actions.append(action_np) 118 | 119 | if cfg.send_real_robot: 120 | # Execute Action 121 | arm_action = action_np[:arm_dof] 122 | tau = arm_ik.solve_tau(arm_action) 123 | arm_ctrl.ctrl_dual_arm(arm_action, tau) 124 | # logger_mp.info(f"Arm Action: {arm_action}") 125 | 126 | if cfg.ee: 127 | ee_action_start_idx = arm_dof 128 | left_ee_action = action_np[ee_action_start_idx : ee_action_start_idx + ee_dof] 129 | right_ee_action = action_np[ee_action_start_idx + ee_dof : ee_action_start_idx + 2 * ee_dof] 130 | # logger_mp.info(f"EE Action: left {left_ee_action}, right {right_ee_action}") 131 | 132 | if isinstance(ee_shared_mem["left"], SynchronizedArray): 133 | ee_shared_mem["left"][:] = to_list(left_ee_action) 134 | ee_shared_mem["right"][:] = to_list(right_ee_action) 135 | elif hasattr(ee_shared_mem["left"], "value") and hasattr(ee_shared_mem["right"], "value"): 136 | ee_shared_mem["left"].value = to_scalar(left_ee_action) 137 | ee_shared_mem["right"].value = to_scalar(right_ee_action) 138 | 139 | if cfg.visualization: 140 | visualization_data(step_idx, observation, observation["observation.state"], action_np, rerun_logger) 141 | 142 | # Maintain frequency 143 | time.sleep(max(0, (1.0 / cfg.frequency) - (time.perf_counter() - loop_start_time))) 144 | 145 | ground_truth_actions = np.array(ground_truth_actions) 146 | predicted_actions = np.array(predicted_actions) 147 | 148 | # Get the number of timesteps and action dimensions 149 | n_timesteps, n_dims = ground_truth_actions.shape 150 | 151 | # Create a figure with subplots for each action dimension 152 | fig, axes = plt.subplots(n_dims, 1, figsize=(12, 4 * n_dims), sharex=True) 153 | fig.suptitle("Ground Truth vs Predicted Actions") 154 | 155 | # Plot each dimension 156 | for i in range(n_dims): 157 | ax = axes[i] if n_dims > 1 else axes 158 | 159 | ax.plot(ground_truth_actions[:, i], label="Ground Truth", color="blue") 160 | ax.plot(predicted_actions[:, i], label="Predicted", color="red", linestyle="--") 161 | ax.set_ylabel(f"Dim {i + 1}") 162 | ax.legend() 163 | 164 | # Set common x-label 165 | axes[-1].set_xlabel("Timestep") 166 | 167 | plt.tight_layout() 168 | # plt.show() 169 | 170 | time.sleep(1) 171 | plt.savefig("figure.png") 172 | 173 | 174 | @parser.wrap() 175 | def eval_main(cfg: EvalRealConfig): 176 | logging.info(pformat(asdict(cfg))) 177 | 178 | # Check device is available 179 | device = get_safe_torch_device(cfg.policy.device, log=True) 180 | 181 | torch.backends.cudnn.benchmark = True 182 | torch.backends.cuda.matmul.allow_tf32 = True 183 | 184 | logging.info("Making policy.") 185 | 186 | dataset = LeRobotDataset(repo_id=cfg.repo_id) 187 | 188 | policy = make_policy(cfg=cfg.policy, ds_meta=dataset.meta) 189 | policy.eval() 190 | 191 | preprocessor, postprocessor = make_pre_post_processors( 192 | policy_cfg=cfg.policy, 193 | pretrained_path=cfg.policy.pretrained_path, 194 | dataset_stats=rename_stats(dataset.meta.stats, cfg.rename_map), 195 | preprocessor_overrides={ 196 | "device_processor": {"device": cfg.policy.device}, 197 | "rename_observations_processor": {"rename_map": cfg.rename_map}, 198 | }, 199 | ) 200 | 201 | with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext(): 202 | eval_policy(cfg, dataset, policy, preprocessor, postprocessor) 203 | 204 | logging.info("End of eval") 205 | 206 | 207 | if __name__ == "__main__": 208 | init_logging() 209 | eval_main() 210 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/eval_g1_sim.py: -------------------------------------------------------------------------------- 1 | """' 2 | Refer to: lerobot/lerobot/scripts/eval.py 3 | lerobot/lerobot/scripts/econtrol_robot.py 4 | lerobot/robot_devices/control_utils.py 5 | """ 6 | 7 | import time 8 | import torch 9 | import logging 10 | 11 | import numpy as np 12 | from pprint import pformat 13 | from dataclasses import asdict 14 | from torch import nn 15 | from contextlib import nullcontext 16 | from typing import Any 17 | 18 | from lerobot.policies.factory import make_policy, make_pre_post_processors 19 | from lerobot.utils.utils import ( 20 | get_safe_torch_device, 21 | init_logging, 22 | ) 23 | from lerobot.configs import parser 24 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 25 | from lerobot.policies.pretrained import PreTrainedPolicy 26 | from multiprocessing.sharedctypes import SynchronizedArray 27 | from lerobot.processor.rename_processor import rename_stats 28 | from lerobot.processor import ( 29 | PolicyAction, 30 | PolicyProcessorPipeline, 31 | ) 32 | from unitree_lerobot.eval_robot.make_robot import ( 33 | setup_image_client, 34 | setup_robot_interface, 35 | process_images_and_observations, 36 | ) 37 | from unitree_lerobot.eval_robot.utils.utils import ( 38 | cleanup_resources, 39 | predict_action, 40 | to_list, 41 | to_scalar, 42 | ) 43 | from unitree_lerobot.eval_robot.utils.sim_savedata_utils import ( 44 | EvalRealConfig, 45 | process_data_add, 46 | is_success, 47 | ) 48 | from unitree_lerobot.eval_robot.utils.rerun_visualizer import RerunLogger, visualization_data 49 | 50 | import logging_mp 51 | 52 | logging_mp.basic_config(level=logging_mp.INFO) 53 | logger_mp = logging_mp.get_logger(__name__) 54 | 55 | 56 | def eval_policy( 57 | cfg: EvalRealConfig, 58 | dataset: LeRobotDataset, 59 | policy: PreTrainedPolicy | None = None, 60 | preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]] | None = None, 61 | postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction] | None = None, 62 | ): 63 | assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module." 64 | 65 | logger_mp.info(f"Arguments: {cfg}") 66 | 67 | if cfg.visualization: 68 | rerun_logger = RerunLogger() 69 | 70 | # Reset policy and processor if they are provided 71 | if policy is not None and preprocessor is not None and postprocessor is not None: 72 | policy.reset() 73 | preprocessor.reset() 74 | postprocessor.reset() 75 | 76 | image_info = None 77 | try: 78 | # --- Setup Phase --- 79 | image_info = setup_image_client(cfg) 80 | robot_interface = setup_robot_interface(cfg) 81 | 82 | # Unpack interfaces for convenience 83 | ( 84 | arm_ctrl, 85 | arm_ik, 86 | ee_shared_mem, 87 | arm_dof, 88 | ee_dof, 89 | sim_state_subscriber, 90 | sim_reward_subscriber, 91 | episode_writer, 92 | reset_pose_publisher, 93 | ) = ( 94 | robot_interface[key] 95 | for key in [ 96 | "arm_ctrl", 97 | "arm_ik", 98 | "ee_shared_mem", 99 | "arm_dof", 100 | "ee_dof", 101 | "sim_state_subscriber", 102 | "sim_reward_subscriber", 103 | "episode_writer", 104 | "reset_pose_publisher", 105 | ] 106 | ) 107 | tv_img_array, wrist_img_array, tv_img_shape, wrist_img_shape, is_binocular, has_wrist_cam = ( 108 | image_info[key] 109 | for key in [ 110 | "tv_img_array", 111 | "wrist_img_array", 112 | "tv_img_shape", 113 | "wrist_img_shape", 114 | "is_binocular", 115 | "has_wrist_cam", 116 | ] 117 | ) 118 | 119 | # Get initial pose from the first step of the dataset 120 | from_idx = dataset.meta.episodes["dataset_from_index"][0] 121 | step = dataset[from_idx] 122 | init_arm_pose = step["observation.state"][:arm_dof].cpu().numpy() 123 | 124 | user_input = input("Enter 's' to initialize the robot and start the evaluation: ") 125 | idx = 0 126 | print(f"user_input: {user_input}") 127 | full_state = None 128 | 129 | reward_stats = { 130 | "reward_sum": 0.0, 131 | "episode_num": 0.0, 132 | } 133 | 134 | if user_input.lower() == "s": 135 | # "The initial positions of the robot's arm and fingers take the initial positions during data recording." 136 | logger_mp.info("Initializing robot to starting pose...") 137 | tau = robot_interface["arm_ik"].solve_tau(init_arm_pose) 138 | robot_interface["arm_ctrl"].ctrl_dual_arm(init_arm_pose, tau) 139 | time.sleep(1.0) # Give time for the robot to move 140 | 141 | # --- Run Main Loop --- 142 | logger_mp.info(f"Starting evaluation loop at {cfg.frequency} Hz.") 143 | while True: 144 | if cfg.save_data: 145 | if reward_stats["episode_num"] == 0: 146 | episode_writer.create_episode() 147 | loop_start_time = time.perf_counter() 148 | 149 | # 1. Get Observations 150 | observation, current_arm_q = process_images_and_observations( 151 | tv_img_array, wrist_img_array, tv_img_shape, wrist_img_shape, is_binocular, has_wrist_cam, arm_ctrl 152 | ) 153 | left_ee_state = right_ee_state = np.array([]) 154 | if cfg.ee: 155 | with ee_shared_mem["lock"]: 156 | full_state = np.array(ee_shared_mem["state"][:]) 157 | left_ee_state = full_state[:ee_dof] 158 | right_ee_state = full_state[ee_dof:] 159 | state_tensor = torch.from_numpy( 160 | np.concatenate((current_arm_q, left_ee_state, right_ee_state), axis=0) 161 | ).float() 162 | observation["observation.state"] = state_tensor 163 | # 2. Get Action from Policy 164 | action = predict_action( 165 | observation, 166 | policy, 167 | get_safe_torch_device(policy.config.device), 168 | preprocessor, 169 | postprocessor, 170 | policy.config.use_amp, 171 | step["task"], 172 | use_dataset=cfg.use_dataset, 173 | robot_type=None, 174 | ) 175 | action_np = action.cpu().numpy() 176 | # 3. Execute Action 177 | arm_action = action_np[:arm_dof] 178 | tau = arm_ik.solve_tau(arm_action) 179 | arm_ctrl.ctrl_dual_arm(arm_action, tau) 180 | 181 | if cfg.ee: 182 | ee_action_start_idx = arm_dof 183 | left_ee_action = action_np[ee_action_start_idx : ee_action_start_idx + ee_dof] 184 | right_ee_action = action_np[ee_action_start_idx + ee_dof : ee_action_start_idx + 2 * ee_dof] 185 | # logger_mp.info(f"EE Action: left {left_ee_action}, right {right_ee_action}") 186 | 187 | if isinstance(ee_shared_mem["left"], SynchronizedArray): 188 | ee_shared_mem["left"][:] = to_list(left_ee_action) 189 | ee_shared_mem["right"][:] = to_list(right_ee_action) 190 | elif hasattr(ee_shared_mem["left"], "value") and hasattr(ee_shared_mem["right"], "value"): 191 | ee_shared_mem["left"].value = to_scalar(left_ee_action) 192 | ee_shared_mem["right"].value = to_scalar(right_ee_action) 193 | # save data 194 | if cfg.save_data: 195 | process_data_add(episode_writer, observation, current_arm_q, full_state, action, arm_dof, ee_dof) 196 | 197 | is_success( 198 | sim_reward_subscriber, 199 | episode_writer, 200 | reset_pose_publisher, 201 | policy, 202 | cfg, 203 | reward_stats, 204 | init_arm_pose, 205 | robot_interface, 206 | ) 207 | 208 | if cfg.visualization: 209 | visualization_data(idx, observation, state_tensor.numpy(), action_np, rerun_logger) 210 | idx += 1 211 | reward_stats["episode_num"] = reward_stats["episode_num"] + 1 212 | # Maintain frequency 213 | time.sleep(max(0, (1.0 / cfg.frequency) - (time.perf_counter() - loop_start_time))) 214 | 215 | except Exception as e: 216 | logger_mp.info(f"An error occurred: {e}") 217 | finally: 218 | if image_info: 219 | cleanup_resources(image_info) 220 | # Clean up sim state subscriber if it exists 221 | if "sim_state_subscriber" in locals() and sim_state_subscriber: 222 | sim_state_subscriber.stop_subscribe() 223 | logger_mp.info("SimStateSubscriber cleaned up") 224 | if "sim_reward_subscriber" in locals() and sim_reward_subscriber: 225 | sim_reward_subscriber.stop_subscribe() 226 | logger_mp.info("SimRewardSubscriber cleaned up") 227 | 228 | 229 | @parser.wrap() 230 | def eval_main(cfg: EvalRealConfig): 231 | logging.info(pformat(asdict(cfg))) 232 | 233 | # Check device is available 234 | device = get_safe_torch_device(cfg.policy.device, log=True) 235 | 236 | torch.backends.cudnn.benchmark = True 237 | torch.backends.cuda.matmul.allow_tf32 = True 238 | 239 | logging.info("Making policy.") 240 | 241 | dataset = LeRobotDataset(repo_id=cfg.repo_id) 242 | 243 | policy = make_policy(cfg=cfg.policy, ds_meta=dataset.meta) 244 | policy.eval() 245 | 246 | preprocessor, postprocessor = make_pre_post_processors( 247 | policy_cfg=cfg.policy, 248 | pretrained_path=cfg.policy.pretrained_path, 249 | dataset_stats=rename_stats(dataset.meta.stats, cfg.rename_map), 250 | preprocessor_overrides={ 251 | "device_processor": {"device": cfg.policy.device}, 252 | "rename_observations_processor": {"rename_map": cfg.rename_map}, 253 | }, 254 | ) 255 | 256 | with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext(): 257 | eval_policy(cfg, dataset, policy, preprocessor, postprocessor) 258 | 259 | logging.info("End of eval") 260 | 261 | 262 | if __name__ == "__main__": 263 | init_logging() 264 | eval_main() 265 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/image_server/image_client.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import zmq 3 | import numpy as np 4 | import time 5 | import struct 6 | from collections import deque 7 | from multiprocessing import shared_memory 8 | 9 | 10 | class ImageClient: 11 | def __init__( 12 | self, 13 | tv_img_shape=None, 14 | tv_img_shm_name=None, 15 | wrist_img_shape=None, 16 | wrist_img_shm_name=None, 17 | image_show=False, 18 | server_address="192.168.123.164", 19 | port=5555, 20 | Unit_Test=False, 21 | ): 22 | """ 23 | tv_img_shape: User's expected head camera resolution shape (H, W, C). It should match the output of the image service terminal. 24 | tv_img_shm_name: Shared memory is used to easily transfer images across processes to the Vuer. 25 | wrist_img_shape: User's expected wrist camera resolution shape (H, W, C). It should maintain the same shape as tv_img_shape. 26 | wrist_img_shm_name: Shared memory is used to easily transfer images. 27 | image_show: Whether to display received images in real time. 28 | server_address: The ip address to execute the image server script. 29 | port: The port number to bind to. It should be the same as the image server. 30 | Unit_Test: When both server and client are True, it can be used to test the image transfer latency, \ 31 | network jitter, frame loss rate and other information. 32 | """ 33 | self.running = True 34 | self._image_show = image_show 35 | self._server_address = server_address 36 | self._port = port 37 | 38 | self.tv_img_shape = tv_img_shape 39 | self.wrist_img_shape = wrist_img_shape 40 | 41 | self.tv_enable_shm = False 42 | if self.tv_img_shape is not None and tv_img_shm_name is not None: 43 | self.tv_image_shm = shared_memory.SharedMemory(name=tv_img_shm_name) 44 | self.tv_img_array = np.ndarray(tv_img_shape, dtype=np.uint8, buffer=self.tv_image_shm.buf) 45 | self.tv_enable_shm = True 46 | 47 | self.wrist_enable_shm = False 48 | if self.wrist_img_shape is not None and wrist_img_shm_name is not None: 49 | self.wrist_image_shm = shared_memory.SharedMemory(name=wrist_img_shm_name) 50 | self.wrist_img_array = np.ndarray(wrist_img_shape, dtype=np.uint8, buffer=self.wrist_image_shm.buf) 51 | self.wrist_enable_shm = True 52 | 53 | # Performance evaluation parameters 54 | self._enable_performance_eval = Unit_Test 55 | if self._enable_performance_eval: 56 | self._init_performance_metrics() 57 | 58 | def _init_performance_metrics(self): 59 | self._frame_count = 0 # Total frames received 60 | self._last_frame_id = -1 # Last received frame ID 61 | 62 | # Real-time FPS calculation using a time window 63 | self._time_window = 1.0 # Time window size (in seconds) 64 | self._frame_times = deque() # Timestamps of frames received within the time window 65 | 66 | # Data transmission quality metrics 67 | self._latencies = deque() # Latencies of frames within the time window 68 | self._lost_frames = 0 # Total lost frames 69 | self._total_frames = 0 # Expected total frames based on frame IDs 70 | 71 | def _update_performance_metrics(self, timestamp, frame_id, receive_time): 72 | # Update latency 73 | latency = receive_time - timestamp 74 | self._latencies.append(latency) 75 | 76 | # Remove latencies outside the time window 77 | while self._latencies and self._frame_times and self._latencies[0] < receive_time - self._time_window: 78 | self._latencies.popleft() 79 | 80 | # Update frame times 81 | self._frame_times.append(receive_time) 82 | # Remove timestamps outside the time window 83 | while self._frame_times and self._frame_times[0] < receive_time - self._time_window: 84 | self._frame_times.popleft() 85 | 86 | # Update frame counts for lost frame calculation 87 | expected_frame_id = self._last_frame_id + 1 if self._last_frame_id != -1 else frame_id 88 | if frame_id != expected_frame_id: 89 | lost = frame_id - expected_frame_id 90 | if lost < 0: 91 | print(f"[Image Client] Received out-of-order frame ID: {frame_id}") 92 | else: 93 | self._lost_frames += lost 94 | print( 95 | f"[Image Client] Detected lost frames: {lost}, Expected frame ID: {expected_frame_id}, Received frame ID: {frame_id}" 96 | ) 97 | self._last_frame_id = frame_id 98 | self._total_frames = frame_id + 1 99 | 100 | self._frame_count += 1 101 | 102 | def _print_performance_metrics(self, receive_time): 103 | if self._frame_count % 30 == 0: 104 | # Calculate real-time FPS 105 | real_time_fps = len(self._frame_times) / self._time_window if self._time_window > 0 else 0 106 | 107 | # Calculate latency metrics 108 | if self._latencies: 109 | avg_latency = sum(self._latencies) / len(self._latencies) 110 | max_latency = max(self._latencies) 111 | min_latency = min(self._latencies) 112 | jitter = max_latency - min_latency 113 | else: 114 | avg_latency = max_latency = min_latency = jitter = 0 115 | 116 | # Calculate lost frame rate 117 | lost_frame_rate = (self._lost_frames / self._total_frames) * 100 if self._total_frames > 0 else 0 118 | 119 | print( 120 | f"[Image Client] Real-time FPS: {real_time_fps:.2f}, Avg Latency: {avg_latency * 1000:.2f} ms, Max Latency: {max_latency * 1000:.2f} ms, \ 121 | Min Latency: {min_latency * 1000:.2f} ms, Jitter: {jitter * 1000:.2f} ms, Lost Frame Rate: {lost_frame_rate:.2f}%" 122 | ) 123 | 124 | def _close(self): 125 | self._socket.close() 126 | self._context.term() 127 | if self._image_show: 128 | cv2.destroyAllWindows() 129 | print("Image client has been closed.") 130 | 131 | def receive_process(self): 132 | # Set up ZeroMQ context and socket 133 | self._context = zmq.Context() 134 | self._socket = self._context.socket(zmq.SUB) 135 | self._socket.connect(f"tcp://{self._server_address}:{self._port}") 136 | self._socket.setsockopt_string(zmq.SUBSCRIBE, "") 137 | 138 | print("\nImage client has started, waiting to receive data...") 139 | try: 140 | while self.running: 141 | # Receive message 142 | message = self._socket.recv() 143 | receive_time = time.time() 144 | 145 | if self._enable_performance_eval: 146 | header_size = struct.calcsize("dI") 147 | try: 148 | # Attempt to extract header and image data 149 | header = message[:header_size] 150 | jpg_bytes = message[header_size:] 151 | timestamp, frame_id = struct.unpack("dI", header) 152 | except struct.error as e: 153 | print(f"[Image Client] Error unpacking header: {e}, discarding message.") 154 | continue 155 | else: 156 | # No header, entire message is image data 157 | jpg_bytes = message 158 | # Decode image 159 | np_img = np.frombuffer(jpg_bytes, dtype=np.uint8) 160 | current_image = cv2.imdecode(np_img, cv2.IMREAD_COLOR) 161 | if current_image is None: 162 | print("[Image Client] Failed to decode image.") 163 | continue 164 | 165 | if self.tv_enable_shm: 166 | np.copyto(self.tv_img_array, np.array(current_image[:, : self.tv_img_shape[1]])) 167 | 168 | if self.wrist_enable_shm: 169 | np.copyto(self.wrist_img_array, np.array(current_image[:, -self.wrist_img_shape[1] :])) 170 | 171 | if self._image_show: 172 | height, width = current_image.shape[:2] 173 | resized_image = cv2.resize(current_image, (width // 2, height // 2)) 174 | cv2.imshow("Image Client Stream", resized_image) 175 | if cv2.waitKey(1) & 0xFF == ord("q"): 176 | self.running = False 177 | 178 | if self._enable_performance_eval: 179 | self._update_performance_metrics(timestamp, frame_id, receive_time) 180 | self._print_performance_metrics(receive_time) 181 | 182 | except KeyboardInterrupt: 183 | print("Image client interrupted by user.") 184 | except Exception as e: 185 | print(f"[Image Client] An error occurred while receiving data: {e}") 186 | finally: 187 | self._close() 188 | 189 | 190 | if __name__ == "__main__": 191 | # example1 192 | # tv_img_shape = (480, 1280, 3) 193 | # img_shm = shared_memory.SharedMemory(create=True, size=np.prod(tv_img_shape) * np.uint8().itemsize) 194 | # img_array = np.ndarray(tv_img_shape, dtype=np.uint8, buffer=img_shm.buf) 195 | # img_client = ImageClient(tv_img_shape = tv_img_shape, tv_img_shm_name = img_shm.name) 196 | # img_client.receive_process() 197 | 198 | # example2 199 | # Initialize the client with performance evaluation enabled 200 | # client = ImageClient(image_show = True, server_address='127.0.0.1', Unit_Test=True) # local test 201 | client = ImageClient(image_show=True, server_address="192.168.123.164", Unit_Test=False) # deployment test 202 | client.receive_process() 203 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/make_robot.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import shared_memory, Value, Array, Lock 2 | from typing import Any 3 | import numpy as np 4 | import argparse 5 | import threading 6 | import torch 7 | from unitree_lerobot.eval_robot.image_server.image_client import ImageClient 8 | from unitree_lerobot.eval_robot.robot_control.robot_arm import ( 9 | G1_29_ArmController, 10 | G1_23_ArmController, 11 | ) 12 | from unitree_lerobot.eval_robot.robot_control.robot_arm_ik import G1_29_ArmIK, G1_23_ArmIK 13 | from unitree_lerobot.eval_robot.robot_control.robot_hand_unitree import ( 14 | Dex3_1_Controller, 15 | Dex1_1_Gripper_Controller, 16 | ) 17 | 18 | from unitree_lerobot.eval_robot.utils.episode_writer import EpisodeWriter 19 | 20 | from unitree_lerobot.eval_robot.robot_control.robot_hand_inspire import Inspire_Controller 21 | from unitree_lerobot.eval_robot.robot_control.robot_hand_brainco import Brainco_Controller 22 | 23 | 24 | from unitree_sdk2py.core.channel import ChannelPublisher 25 | from unitree_sdk2py.idl.std_msgs.msg.dds_ import String_ 26 | 27 | import logging_mp 28 | 29 | logging_mp.basic_config(level=logging_mp.INFO) 30 | logger_mp = logging_mp.get_logger(__name__) 31 | 32 | # Configuration for robot arms 33 | ARM_CONFIG = { 34 | "G1_29": {"controller": G1_29_ArmController, "ik_solver": G1_29_ArmIK, "dof": 14}, 35 | "G1_23": {"controller": G1_23_ArmController, "ik_solver": G1_23_ArmIK, "dof": 14}, 36 | # Add other arms here 37 | } 38 | 39 | # Configuration for end-effectors 40 | EE_CONFIG: dict[str, dict[str, Any]] = { 41 | "dex3": { 42 | "controller": Dex3_1_Controller, 43 | "dof": 7, 44 | "shared_mem_type": "Array", 45 | "shared_mem_size": 7, 46 | # "out_len": 14, 47 | }, 48 | "dex1": { 49 | "controller": Dex1_1_Gripper_Controller, 50 | "dof": 1, 51 | "shared_mem_type": "Value", 52 | # "out_len": 2, 53 | }, 54 | "inspire1": { 55 | "controller": Inspire_Controller, 56 | "dof": 6, 57 | "shared_mem_type": "Array", 58 | "shared_mem_size": 6, 59 | # "out_len": 12, 60 | }, 61 | "brainco": { 62 | "controller": Brainco_Controller, 63 | "dof": 6, 64 | "shared_mem_type": "Array", 65 | "shared_mem_size": 6, 66 | # "out_len": 12, 67 | }, 68 | } 69 | 70 | 71 | def setup_image_client(args: argparse.Namespace) -> dict[str, Any]: 72 | """Initializes and starts the image client and shared memory.""" 73 | # image client: img_config should be the same as the configuration in image_server.py (of Robot's development computing unit) 74 | if getattr(args, "sim", False): 75 | img_config = { 76 | "fps": 30, 77 | "head_camera_type": "opencv", 78 | "head_camera_image_shape": [480, 640], # Head camera resolution 79 | "head_camera_id_numbers": [0], 80 | "wrist_camera_type": "opencv", 81 | "wrist_camera_image_shape": [480, 640], # Wrist camera resolution 82 | "wrist_camera_id_numbers": [2, 4], 83 | } 84 | else: 85 | img_config = { 86 | "fps": 30, 87 | "head_camera_type": "opencv", 88 | "head_camera_image_shape": [480, 1280], # Head camera resolution 89 | "head_camera_id_numbers": [0], 90 | "wrist_camera_type": "opencv", 91 | "wrist_camera_image_shape": [480, 640], # Wrist camera resolution 92 | "wrist_camera_id_numbers": [2, 4], 93 | } 94 | 95 | ASPECT_RATIO_THRESHOLD = 2.0 # If the aspect ratio exceeds this value, it is considered binocular 96 | if len(img_config["head_camera_id_numbers"]) > 1 or ( 97 | img_config["head_camera_image_shape"][1] / img_config["head_camera_image_shape"][0] > ASPECT_RATIO_THRESHOLD 98 | ): 99 | BINOCULAR = True 100 | else: 101 | BINOCULAR = False 102 | if "wrist_camera_type" in img_config: 103 | WRIST = True 104 | else: 105 | WRIST = False 106 | 107 | if BINOCULAR and not ( 108 | img_config["head_camera_image_shape"][1] / img_config["head_camera_image_shape"][0] > ASPECT_RATIO_THRESHOLD 109 | ): 110 | tv_img_shape = (img_config["head_camera_image_shape"][0], img_config["head_camera_image_shape"][1] * 2, 3) 111 | else: 112 | tv_img_shape = (img_config["head_camera_image_shape"][0], img_config["head_camera_image_shape"][1], 3) 113 | 114 | tv_img_shm = shared_memory.SharedMemory(create=True, size=np.prod(tv_img_shape) * np.uint8().itemsize) 115 | tv_img_array = np.ndarray(tv_img_shape, dtype=np.uint8, buffer=tv_img_shm.buf) 116 | 117 | if WRIST and getattr(args, "sim", False): 118 | wrist_img_shape = (img_config["wrist_camera_image_shape"][0], img_config["wrist_camera_image_shape"][1] * 2, 3) 119 | wrist_img_shm = shared_memory.SharedMemory(create=True, size=np.prod(wrist_img_shape) * np.uint8().itemsize) 120 | wrist_img_array = np.ndarray(wrist_img_shape, dtype=np.uint8, buffer=wrist_img_shm.buf) 121 | img_client = ImageClient( 122 | tv_img_shape=tv_img_shape, 123 | tv_img_shm_name=tv_img_shm.name, 124 | wrist_img_shape=wrist_img_shape, 125 | wrist_img_shm_name=wrist_img_shm.name, 126 | server_address="127.0.0.1", 127 | ) 128 | elif WRIST and not getattr(args, "sim", False): 129 | wrist_img_shape = (img_config["wrist_camera_image_shape"][0], img_config["wrist_camera_image_shape"][1] * 2, 3) 130 | wrist_img_shm = shared_memory.SharedMemory(create=True, size=np.prod(wrist_img_shape) * np.uint8().itemsize) 131 | wrist_img_array = np.ndarray(wrist_img_shape, dtype=np.uint8, buffer=wrist_img_shm.buf) 132 | img_client = ImageClient( 133 | tv_img_shape=tv_img_shape, 134 | tv_img_shm_name=tv_img_shm.name, 135 | wrist_img_shape=wrist_img_shape, 136 | wrist_img_shm_name=wrist_img_shm.name, 137 | ) 138 | else: 139 | img_client = ImageClient(tv_img_shape=tv_img_shape, tv_img_shm_name=tv_img_shm.name) 140 | 141 | has_wrist_cam = "wrist_camera_type" in img_config 142 | 143 | image_receive_thread = threading.Thread(target=img_client.receive_process, daemon=True) 144 | image_receive_thread.daemon = True 145 | image_receive_thread.start() 146 | 147 | return { 148 | "tv_img_array": tv_img_array, 149 | "wrist_img_array": wrist_img_array, 150 | "tv_img_shape": tv_img_shape, 151 | "wrist_img_shape": wrist_img_shape, 152 | "is_binocular": BINOCULAR, 153 | "has_wrist_cam": has_wrist_cam, 154 | "shm_resources": [tv_img_shm, wrist_img_shm], 155 | } 156 | 157 | 158 | def _resolve_out_len(spec: dict[str, Any]) -> int: 159 | return int(spec.get("out_len", 2 * int(spec["dof"]))) 160 | 161 | 162 | def setup_robot_interface(args: argparse.Namespace) -> dict[str, Any]: 163 | """ 164 | Initializes robot controllers and IK solvers based on configuration. 165 | """ 166 | # ---------- Arm ---------- 167 | arm_spec = ARM_CONFIG[args.arm] 168 | arm_ik = arm_spec["ik_solver"]() 169 | is_sim = getattr(args, "sim", False) 170 | arm_ctrl = arm_spec["controller"](motion_mode=args.motion, simulation_mode=is_sim) 171 | 172 | # ---------- End Effector (optional) ---------- 173 | ee_ctrl, ee_shared_mem, ee_dof = None, {}, 0 174 | 175 | if ee_key := getattr(args, "ee", "").lower(): 176 | if ee_key not in EE_CONFIG: 177 | raise ValueError(f"Unknown end-effector '{args.ee}'. Available: {list(EE_CONFIG.keys())}") 178 | 179 | spec = EE_CONFIG[ee_key] 180 | mem_type, out_len, ee_dof = spec["shared_mem_type"].lower(), _resolve_out_len(spec), spec["dof"] 181 | data_lock = Lock() 182 | 183 | left_in, right_in = ( 184 | (Array("d", spec["shared_mem_size"], lock=True), Array("d", spec["shared_mem_size"], lock=True)) 185 | if mem_type == "array" 186 | else (Value("d", 0.0, lock=True), Value("d", 0.0, lock=True)) 187 | ) 188 | 189 | state_arr, action_arr = Array("d", out_len, lock=False), Array("d", out_len, lock=False) 190 | 191 | ee_ctrl = spec["controller"](left_in, right_in, data_lock, state_arr, action_arr, simulation_mode=is_sim) 192 | 193 | ee_shared_mem = { 194 | "left": left_in, 195 | "right": right_in, 196 | "state": state_arr, 197 | "action": action_arr, 198 | "lock": data_lock, 199 | } 200 | 201 | # ---------- Simulation helpers (optional) ---------- 202 | episode_writer = None 203 | if is_sim: 204 | reset_pose_publisher = ChannelPublisher("rt/reset_pose/cmd", String_) 205 | reset_pose_publisher.Init() 206 | from unitree_lerobot.eval_robot.utils.sim_state_topic import ( 207 | start_sim_state_subscribe, 208 | start_sim_reward_subscribe, 209 | ) 210 | 211 | sim_state_subscriber = start_sim_state_subscribe() 212 | sim_reward_subscriber = start_sim_reward_subscribe() 213 | if getattr(args, "save_data", False) and getattr(args, "task_dir", None): 214 | episode_writer = EpisodeWriter(args.task_dir, frequency=30, image_size=[640, 480]) 215 | return { 216 | "arm_ctrl": arm_ctrl, 217 | "arm_ik": arm_ik, 218 | "ee_ctrl": ee_ctrl, 219 | "ee_shared_mem": ee_shared_mem, 220 | "arm_dof": int(arm_spec["dof"]), 221 | "ee_dof": ee_dof, 222 | "sim_state_subscriber": sim_state_subscriber, 223 | "sim_reward_subscriber": sim_reward_subscriber, 224 | "episode_writer": episode_writer, 225 | "reset_pose_publisher": reset_pose_publisher, 226 | } 227 | return { 228 | "arm_ctrl": arm_ctrl, 229 | "arm_ik": arm_ik, 230 | "ee_ctrl": ee_ctrl, 231 | "ee_shared_mem": ee_shared_mem, 232 | "arm_dof": int(arm_spec["dof"]), 233 | "ee_dof": ee_dof, 234 | } 235 | 236 | 237 | def process_images_and_observations( 238 | tv_img_array, wrist_img_array, tv_img_shape, wrist_img_shape, is_binocular, has_wrist_cam, arm_ctrl 239 | ): 240 | """Processes images and generates observations.""" 241 | current_tv_image = tv_img_array.copy() 242 | current_wrist_image = wrist_img_array.copy() if has_wrist_cam else None 243 | 244 | left_top_cam = current_tv_image[:, : tv_img_shape[1] // 2] if is_binocular else current_tv_image 245 | right_top_cam = current_tv_image[:, tv_img_shape[1] // 2 :] if is_binocular else None 246 | 247 | left_wrist_cam = right_wrist_cam = None 248 | if has_wrist_cam and current_wrist_image is not None: 249 | left_wrist_cam = current_wrist_image[:, : wrist_img_shape[1] // 2] 250 | right_wrist_cam = current_wrist_image[:, wrist_img_shape[1] // 2 :] 251 | observation = { 252 | "observation.images.cam_left_high": torch.from_numpy(left_top_cam), 253 | "observation.images.cam_right_high": torch.from_numpy(right_top_cam) if is_binocular else None, 254 | "observation.images.cam_left_wrist": torch.from_numpy(left_wrist_cam) if has_wrist_cam else None, 255 | "observation.images.cam_right_wrist": torch.from_numpy(right_wrist_cam) if has_wrist_cam else None, 256 | } 257 | current_arm_q = arm_ctrl.get_current_dual_arm_q() 258 | 259 | return observation, current_arm_q 260 | 261 | 262 | def publish_reset_category(category: int, publisher): # Scene Reset signal 263 | msg = String_(data=str(category)) 264 | publisher.Write(msg) 265 | logger_mp.info(f"published reset category: {category}") 266 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/replay_robot.py: -------------------------------------------------------------------------------- 1 | """' 2 | Refer to: lerobot/lerobot/scripts/eval.py 3 | lerobot/lerobot/scripts/econtrol_robot.py 4 | lerobot/robot_devices/control_utils.py 5 | """ 6 | 7 | import time 8 | import numpy as np 9 | 10 | from multiprocessing.sharedctypes import SynchronizedArray 11 | from lerobot.configs import parser 12 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 13 | from unitree_lerobot.eval_robot.make_robot import ( 14 | setup_image_client, 15 | setup_robot_interface, 16 | process_images_and_observations, 17 | ) 18 | from unitree_lerobot.eval_robot.utils.utils import cleanup_resources, EvalRealConfig 19 | 20 | from unitree_lerobot.eval_robot.utils.rerun_visualizer import RerunLogger, visualization_data 21 | from unitree_lerobot.eval_robot.utils.utils import to_list, to_scalar 22 | 23 | import logging_mp 24 | 25 | logging_mp.basic_config(level=logging_mp.INFO) 26 | logger_mp = logging_mp.get_logger(__name__) 27 | 28 | 29 | @parser.wrap() 30 | def replay_main(cfg: EvalRealConfig): 31 | logger_mp.info(f"Arguments: {cfg}") 32 | 33 | if cfg.visualization: 34 | rerun_logger = RerunLogger() 35 | 36 | image_info = setup_image_client(cfg) 37 | robot_interface = setup_robot_interface(cfg) 38 | 39 | """The main control and evaluation loop.""" 40 | # Unpack interfaces for convenience 41 | arm_ctrl, arm_ik, ee_shared_mem, arm_dof, ee_dof = ( 42 | robot_interface[key] for key in ["arm_ctrl", "arm_ik", "ee_shared_mem", "arm_dof", "ee_dof"] 43 | ) 44 | tv_img_array, wrist_img_array, tv_img_shape, wrist_img_shape, is_binocular, has_wrist_cam = ( 45 | image_info[key] 46 | for key in [ 47 | "tv_img_array", 48 | "wrist_img_array", 49 | "tv_img_shape", 50 | "wrist_img_shape", 51 | "is_binocular", 52 | "has_wrist_cam", 53 | ] 54 | ) 55 | 56 | logger_mp.info(f"Starting evaluation loop at {cfg.frequency} Hz.") 57 | 58 | dataset = LeRobotDataset(repo_id=cfg.repo_id, root=cfg.root, episodes=[cfg.episodes]) 59 | actions = dataset.hf_dataset.select_columns("action") 60 | 61 | # init pose 62 | from_idx = dataset.meta.episodes["dataset_from_index"][0] 63 | step = dataset[from_idx] 64 | init_left_arm_pose = step["observation.state"][:14].cpu().numpy() 65 | 66 | user_input = input("Please enter the start signal (enter 's' to start the subsequent program):") 67 | if user_input.lower() == "s": 68 | # "The initial positions of the robot's arm and fingers take the initial positions during data recording." 69 | logger_mp.info("Initializing robot to starting pose...") 70 | tau = arm_ik.solve_tau(init_left_arm_pose) 71 | arm_ctrl.ctrl_dual_arm(init_left_arm_pose, tau) 72 | time.sleep(1) 73 | for idx in range(dataset.num_frames): 74 | loop_start_time = time.perf_counter() 75 | 76 | left_ee_state = right_ee_state = np.array([]) 77 | action_np = actions[idx]["action"].numpy() 78 | 79 | # exec action 80 | arm_action = action_np[:arm_dof] 81 | tau = arm_ik.solve_tau(arm_action) 82 | arm_ctrl.ctrl_dual_arm(arm_action, tau) 83 | logger_mp.info(f"arm_action {arm_action}, tau {tau}") 84 | 85 | if cfg.ee: 86 | ee_action_start_idx = arm_dof 87 | left_ee_action = action_np[ee_action_start_idx : ee_action_start_idx + ee_dof] 88 | right_ee_action = action_np[ee_action_start_idx + ee_dof : ee_action_start_idx + 2 * ee_dof] 89 | logger_mp.info(f"EE Action: left {left_ee_action}, right {right_ee_action}") 90 | 91 | with ee_shared_mem["lock"]: 92 | full_state = np.array(ee_shared_mem["state"][:]) 93 | left_ee_state = full_state[:ee_dof] 94 | right_ee_state = full_state[ee_dof:] 95 | 96 | if isinstance(ee_shared_mem["left"], SynchronizedArray): 97 | ee_shared_mem["left"][:] = to_list(left_ee_action) 98 | ee_shared_mem["right"][:] = to_list(right_ee_action) 99 | elif hasattr(ee_shared_mem["left"], "value") and hasattr(ee_shared_mem["right"], "value"): 100 | ee_shared_mem["left"].value = to_scalar(left_ee_action) 101 | ee_shared_mem["right"].value = to_scalar(right_ee_action) 102 | 103 | if cfg.visualization: 104 | observation, current_arm_q = process_images_and_observations( 105 | tv_img_array, wrist_img_array, tv_img_shape, wrist_img_shape, is_binocular, has_wrist_cam, arm_ctrl 106 | ) 107 | state = np.concatenate((current_arm_q, left_ee_state, right_ee_state)) 108 | 109 | visualization_data(idx, observation, state, action_np, rerun_logger) 110 | 111 | # Maintain frequency 112 | time.sleep(max(0, (1.0 / cfg.frequency) - (time.perf_counter() - loop_start_time))) 113 | 114 | cleanup_resources(image_info) 115 | 116 | 117 | if __name__ == "__main__": 118 | replay_main() 119 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/robot_control/robot_hand_brainco.py: -------------------------------------------------------------------------------- 1 | from unitree_sdk2py.core.channel import ChannelPublisher, ChannelSubscriber, ChannelFactoryInitialize # dds 2 | from unitree_sdk2py.idl.unitree_go.msg.dds_ import MotorCmds_, MotorStates_ # idl 3 | from unitree_sdk2py.idl.default import unitree_go_msg_dds__MotorCmd_ 4 | 5 | import numpy as np 6 | from enum import IntEnum 7 | import threading 8 | import time 9 | from multiprocessing import Process, Array 10 | 11 | import logging_mp 12 | 13 | logger_mp = logging_mp.get_logger(__name__) 14 | 15 | brainco_Num_Motors = 6 16 | kTopicbraincoLeftCommand = "rt/brainco/left/cmd" 17 | kTopicbraincoLeftState = "rt/brainco/left/state" 18 | kTopicbraincoRightCommand = "rt/brainco/right/cmd" 19 | kTopicbraincoRightState = "rt/brainco/right/state" 20 | 21 | 22 | class Brainco_Controller: 23 | def __init__( 24 | self, 25 | left_hand_array, 26 | right_hand_array, 27 | dual_hand_data_lock=None, 28 | dual_hand_state_array=None, 29 | dual_hand_action_array=None, 30 | fps=100.0, 31 | Unit_Test=False, 32 | simulation_mode=False, 33 | ): 34 | logger_mp.info("Initialize Brainco_Controller...") 35 | self.fps = fps 36 | self.hand_sub_ready = False 37 | self.Unit_Test = Unit_Test 38 | self.simulation_mode = simulation_mode 39 | 40 | if self.simulation_mode: 41 | ChannelFactoryInitialize(1) 42 | else: 43 | ChannelFactoryInitialize(0) 44 | 45 | # initialize handcmd publisher and handstate subscriber 46 | self.LeftHandCmb_publisher = ChannelPublisher(kTopicbraincoLeftCommand, MotorCmds_) 47 | self.LeftHandCmb_publisher.Init() 48 | self.RightHandCmb_publisher = ChannelPublisher(kTopicbraincoRightCommand, MotorCmds_) 49 | self.RightHandCmb_publisher.Init() 50 | 51 | self.LeftHandState_subscriber = ChannelSubscriber(kTopicbraincoLeftState, MotorStates_) 52 | self.LeftHandState_subscriber.Init() 53 | self.RightHandState_subscriber = ChannelSubscriber(kTopicbraincoRightState, MotorStates_) 54 | self.RightHandState_subscriber.Init() 55 | 56 | # Shared Arrays for hand states 57 | self.left_hand_state_array = Array("d", brainco_Num_Motors, lock=True) 58 | self.right_hand_state_array = Array("d", brainco_Num_Motors, lock=True) 59 | 60 | # initialize subscribe thread 61 | self.subscribe_state_thread = threading.Thread(target=self._subscribe_hand_state) 62 | self.subscribe_state_thread.daemon = True 63 | self.subscribe_state_thread.start() 64 | 65 | while not self.hand_sub_ready: 66 | time.sleep(0.1) 67 | logger_mp.warning("[brainco_Controller] Waiting to subscribe dds...") 68 | logger_mp.info("[brainco_Controller] Subscribe dds ok.") 69 | 70 | hand_control_process = Process( 71 | target=self.control_process, 72 | args=( 73 | left_hand_array, 74 | right_hand_array, 75 | self.left_hand_state_array, 76 | self.right_hand_state_array, 77 | dual_hand_data_lock, 78 | dual_hand_state_array, 79 | dual_hand_action_array, 80 | ), 81 | ) 82 | hand_control_process.daemon = True 83 | hand_control_process.start() 84 | 85 | logger_mp.info("Initialize brainco_Controller OK!\n") 86 | 87 | def _subscribe_hand_state(self): 88 | while True: 89 | left_hand_msg = self.LeftHandState_subscriber.Read() 90 | right_hand_msg = self.RightHandState_subscriber.Read() 91 | self.hand_sub_ready = True 92 | if left_hand_msg is not None and right_hand_msg is not None: 93 | # Update left hand state 94 | for idx, id in enumerate(Brainco_Left_Hand_JointIndex): 95 | self.left_hand_state_array[idx] = left_hand_msg.states[id].q 96 | # Update right hand state 97 | for idx, id in enumerate(Brainco_Right_Hand_JointIndex): 98 | self.right_hand_state_array[idx] = right_hand_msg.states[id].q 99 | time.sleep(0.002) 100 | 101 | def ctrl_dual_hand(self, left_q_target, right_q_target): 102 | """ 103 | Set current left, right hand motor state target q 104 | """ 105 | for idx, id in enumerate(Brainco_Left_Hand_JointIndex): 106 | self.left_hand_msg.cmds[id].q = left_q_target[idx] 107 | for idx, id in enumerate(Brainco_Right_Hand_JointIndex): 108 | self.right_hand_msg.cmds[id].q = right_q_target[idx] 109 | 110 | self.LeftHandCmb_publisher.Write(self.left_hand_msg) 111 | self.RightHandCmb_publisher.Write(self.right_hand_msg) 112 | # logger_mp.debug("hand ctrl publish ok.") 113 | 114 | def control_process( 115 | self, 116 | left_hand_array, 117 | right_hand_array, 118 | left_hand_state_array, 119 | right_hand_state_array, 120 | dual_hand_data_lock=None, 121 | dual_hand_state_array=None, 122 | dual_hand_action_array=None, 123 | ): 124 | self.running = True 125 | 126 | left_q_target = np.full(brainco_Num_Motors, 0) 127 | right_q_target = np.full(brainco_Num_Motors, 0) 128 | 129 | # initialize brainco hand's cmd msg 130 | self.left_hand_msg = MotorCmds_() 131 | self.left_hand_msg.cmds = [unitree_go_msg_dds__MotorCmd_() for _ in range(len(Brainco_Left_Hand_JointIndex))] 132 | self.right_hand_msg = MotorCmds_() 133 | self.right_hand_msg.cmds = [unitree_go_msg_dds__MotorCmd_() for _ in range(len(Brainco_Right_Hand_JointIndex))] 134 | 135 | for idx, id in enumerate(Brainco_Left_Hand_JointIndex): 136 | self.left_hand_msg.cmds[id].q = 0.0 137 | self.left_hand_msg.cmds[id].dq = 1.0 138 | for idx, id in enumerate(Brainco_Right_Hand_JointIndex): 139 | self.right_hand_msg.cmds[id].q = 0.0 140 | self.right_hand_msg.cmds[id].dq = 1.0 141 | 142 | try: 143 | while self.running: 144 | start_time = time.time() 145 | # get dual hand state 146 | with left_hand_array.get_lock(): 147 | left_hand_mat = np.array(left_hand_array[:]).copy() 148 | with right_hand_array.get_lock(): 149 | right_hand_mat = np.array(right_hand_array[:]).copy() 150 | 151 | # Read left and right q_state from shared arrays 152 | state_data = np.concatenate((np.array(left_hand_state_array[:]), np.array(right_hand_state_array[:]))) 153 | 154 | action_data = np.concatenate((left_hand_mat, right_hand_mat)) 155 | if dual_hand_data_lock is not None: 156 | with dual_hand_data_lock: 157 | dual_hand_state_array[:] = state_data 158 | dual_hand_action_array[:] = action_data 159 | 160 | if dual_hand_state_array and dual_hand_action_array: 161 | with dual_hand_data_lock: 162 | left_q_target = left_hand_mat 163 | right_q_target = right_hand_mat 164 | 165 | self.ctrl_dual_hand(left_q_target, right_q_target) 166 | current_time = time.time() 167 | time_elapsed = current_time - start_time 168 | sleep_time = max(0, (1 / self.fps) - time_elapsed) 169 | time.sleep(sleep_time) 170 | finally: 171 | logger_mp.info("brainco_Controller has been closed.") 172 | 173 | 174 | # according to the official documentation, https://www.brainco-hz.com/docs/revolimb-hand/product/parameters.html 175 | # the motor sequence is as shown in the table below 176 | # ┌──────┬───────┬────────────┬────────┬────────┬────────┬────────┐ 177 | # │ Id │ 0 │ 1 │ 2 │ 3 │ 4 │ 5 │ 178 | # ├──────┼───────┼────────────┼────────┼────────┼────────┼────────┤ 179 | # │Joint │ thumb │ thumb-aux | index │ middle │ ring │ pinky │ 180 | # └──────┴───────┴────────────┴────────┴────────┴────────┴────────┘ 181 | class Brainco_Right_Hand_JointIndex(IntEnum): 182 | kRightHandThumb = 0 183 | kRightHandThumbAux = 1 184 | kRightHandIndex = 2 185 | kRightHandMiddle = 3 186 | kRightHandRing = 4 187 | kRightHandPinky = 5 188 | 189 | 190 | class Brainco_Left_Hand_JointIndex(IntEnum): 191 | kLeftHandThumb = 0 192 | kLeftHandThumbAux = 1 193 | kLeftHandIndex = 2 194 | kLeftHandMiddle = 3 195 | kLeftHandRing = 4 196 | kLeftHandPinky = 5 197 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/robot_control/robot_hand_inspire.py: -------------------------------------------------------------------------------- 1 | from unitree_sdk2py.core.channel import ChannelPublisher, ChannelSubscriber, ChannelFactoryInitialize # dds 2 | from unitree_sdk2py.idl.unitree_go.msg.dds_ import MotorCmds_, MotorStates_ # idl 3 | from unitree_sdk2py.idl.default import unitree_go_msg_dds__MotorCmd_ 4 | 5 | import numpy as np 6 | from enum import IntEnum 7 | import threading 8 | import time 9 | from multiprocessing import Process, Array 10 | 11 | import logging_mp 12 | 13 | logger_mp = logging_mp.get_logger(__name__) 14 | 15 | Inspire_Num_Motors = 6 16 | kTopicInspireCommand = "rt/inspire/cmd" 17 | kTopicInspireState = "rt/inspire/state" 18 | 19 | 20 | class Inspire_Controller: 21 | def __init__( 22 | self, 23 | left_hand_array, 24 | right_hand_array, 25 | dual_hand_data_lock=None, 26 | dual_hand_state_array=None, 27 | dual_hand_action_array=None, 28 | fps=100.0, 29 | Unit_Test=False, 30 | simulation_mode=False, 31 | ): 32 | logger_mp.info("Initialize Inspire_Controller...") 33 | self.fps = fps 34 | self.Unit_Test = Unit_Test 35 | self.simulation_mode = simulation_mode 36 | 37 | if self.simulation_mode: 38 | ChannelFactoryInitialize(1) 39 | else: 40 | ChannelFactoryInitialize(0) 41 | 42 | # initialize handcmd publisher and handstate subscriber 43 | self.HandCmb_publisher = ChannelPublisher(kTopicInspireCommand, MotorCmds_) 44 | self.HandCmb_publisher.Init() 45 | 46 | self.HandState_subscriber = ChannelSubscriber(kTopicInspireState, MotorStates_) 47 | self.HandState_subscriber.Init() 48 | 49 | # Shared Arrays for hand states 50 | self.left_hand_state_array = Array("d", Inspire_Num_Motors, lock=True) 51 | self.right_hand_state_array = Array("d", Inspire_Num_Motors, lock=True) 52 | 53 | # initialize subscribe thread 54 | self.subscribe_state_thread = threading.Thread(target=self._subscribe_hand_state) 55 | self.subscribe_state_thread.daemon = True 56 | self.subscribe_state_thread.start() 57 | 58 | while True: 59 | if any(self.right_hand_state_array): # any(self.left_hand_state_array) and 60 | break 61 | time.sleep(0.01) 62 | logger_mp.warning("[Inspire_Controller] Waiting to subscribe dds...") 63 | logger_mp.info("[Inspire_Controller] Subscribe dds ok.") 64 | 65 | hand_control_process = Process( 66 | target=self.control_process, 67 | args=( 68 | left_hand_array, 69 | right_hand_array, 70 | self.left_hand_state_array, 71 | self.right_hand_state_array, 72 | dual_hand_data_lock, 73 | dual_hand_state_array, 74 | dual_hand_action_array, 75 | ), 76 | ) 77 | hand_control_process.daemon = True 78 | hand_control_process.start() 79 | 80 | logger_mp.info("Initialize Inspire_Controller OK!\n") 81 | 82 | def _subscribe_hand_state(self): 83 | while True: 84 | hand_msg = self.HandState_subscriber.Read() 85 | if hand_msg is not None: 86 | for idx, id in enumerate(Inspire_Left_Hand_JointIndex): 87 | self.left_hand_state_array[idx] = hand_msg.states[id].q 88 | for idx, id in enumerate(Inspire_Right_Hand_JointIndex): 89 | self.right_hand_state_array[idx] = hand_msg.states[id].q 90 | time.sleep(0.002) 91 | 92 | def ctrl_dual_hand(self, left_q_target, right_q_target): 93 | """ 94 | Set current left, right hand motor state target q 95 | """ 96 | for idx, id in enumerate(Inspire_Left_Hand_JointIndex): 97 | self.hand_msg.cmds[id].q = left_q_target[idx] 98 | for idx, id in enumerate(Inspire_Right_Hand_JointIndex): 99 | self.hand_msg.cmds[id].q = right_q_target[idx] 100 | 101 | self.HandCmb_publisher.Write(self.hand_msg) 102 | # logger_mp.debug("hand ctrl publish ok.") 103 | 104 | def control_process( 105 | self, 106 | left_hand_array, 107 | right_hand_array, 108 | left_hand_state_array, 109 | right_hand_state_array, 110 | dual_hand_data_lock=None, 111 | dual_hand_state_array=None, 112 | dual_hand_action_array=None, 113 | ): 114 | self.running = True 115 | 116 | left_q_target = np.full(Inspire_Num_Motors, 1.0) 117 | right_q_target = np.full(Inspire_Num_Motors, 1.0) 118 | 119 | # initialize inspire hand's cmd msg 120 | self.hand_msg = MotorCmds_() 121 | self.hand_msg.cmds = [ 122 | unitree_go_msg_dds__MotorCmd_() 123 | for _ in range(len(Inspire_Right_Hand_JointIndex) + len(Inspire_Left_Hand_JointIndex)) 124 | ] 125 | 126 | for idx, id in enumerate(Inspire_Left_Hand_JointIndex): 127 | self.hand_msg.cmds[id].q = 1.0 128 | for idx, id in enumerate(Inspire_Right_Hand_JointIndex): 129 | self.hand_msg.cmds[id].q = 1.0 130 | 131 | try: 132 | while self.running: 133 | start_time = time.time() 134 | 135 | # get dual hand state 136 | with left_hand_array.get_lock(): 137 | left_hand_mat = np.array(left_hand_array[:]).copy() 138 | with right_hand_array.get_lock(): 139 | right_hand_mat = np.array(right_hand_array[:]).copy() 140 | 141 | # Read left and right q_state from shared arrays 142 | state_data = np.concatenate((np.array(left_hand_state_array[:]), np.array(right_hand_state_array[:]))) 143 | 144 | action_data = np.concatenate((left_hand_mat, right_hand_mat)) 145 | if dual_hand_data_lock is not None: 146 | with dual_hand_data_lock: 147 | dual_hand_state_array[:] = state_data 148 | dual_hand_action_array[:] = action_data 149 | 150 | if dual_hand_state_array and dual_hand_action_array: 151 | with dual_hand_data_lock: 152 | left_q_target = left_hand_mat 153 | right_q_target = right_hand_mat 154 | 155 | self.ctrl_dual_hand(left_q_target, right_q_target) 156 | current_time = time.time() 157 | time_elapsed = current_time - start_time 158 | sleep_time = max(0, (1 / self.fps) - time_elapsed) 159 | time.sleep(sleep_time) 160 | finally: 161 | logger_mp.info("Inspire_Controller has been closed.") 162 | 163 | 164 | # Update hand state, according to the official documentation, https://support.unitree.com/home/en/G1_developer/inspire_dfx_dexterous_hand 165 | # the state sequence is as shown in the table below 166 | # ┌──────┬───────┬──────┬────────┬────────┬────────────┬────────────────┬───────┬──────┬────────┬────────┬────────────┬────────────────┐ 167 | # │ Id │ 0 │ 1 │ 2 │ 3 │ 4 │ 5 │ 6 │ 7 │ 8 │ 9 │ 10 │ 11 │ 168 | # ├──────┼───────┼──────┼────────┼────────┼────────────┼────────────────┼───────┼──────┼────────┼────────┼────────────┼────────────────┤ 169 | # │ │ Right Hand │ Left Hand │ 170 | # │Joint │ pinky │ ring │ middle │ index │ thumb-bend │ thumb-rotation │ pinky │ ring │ middle │ index │ thumb-bend │ thumb-rotation │ 171 | # └──────┴───────┴──────┴────────┴────────┴────────────┴────────────────┴───────┴──────┴────────┴────────┴────────────┴────────────────┘ 172 | class Inspire_Right_Hand_JointIndex(IntEnum): 173 | kRightHandPinky = 0 174 | kRightHandRing = 1 175 | kRightHandMiddle = 2 176 | kRightHandIndex = 3 177 | kRightHandThumbBend = 4 178 | kRightHandThumbRotation = 5 179 | 180 | 181 | class Inspire_Left_Hand_JointIndex(IntEnum): 182 | kLeftHandPinky = 6 183 | kLeftHandRing = 7 184 | kLeftHandMiddle = 8 185 | kLeftHandIndex = 9 186 | kLeftHandThumbBend = 10 187 | kLeftHandThumbRotation = 11 188 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/utils/episode_writer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import json 4 | import datetime 5 | import numpy as np 6 | import time 7 | 8 | from queue import Queue, Empty 9 | from threading import Thread 10 | import logging_mp 11 | 12 | logger_mp = logging_mp.get_logger(__name__) 13 | 14 | 15 | class EpisodeWriter: 16 | def __init__(self, task_dir, frequency=30, image_size=[640, 480]): 17 | """ 18 | image_size: [width, height] 19 | """ 20 | logger_mp.info("==> EpisodeWriter initializing...\n") 21 | self.task_dir = task_dir 22 | self.frequency = frequency 23 | self.image_size = image_size 24 | 25 | self.data = {} 26 | self.episode_data = [] 27 | self.item_id = -1 28 | self.episode_id = -1 29 | if os.path.exists(self.task_dir): 30 | episode_dirs = [episode_dir for episode_dir in os.listdir(self.task_dir) if "episode_" in episode_dir] 31 | episode_last = sorted(episode_dirs)[-1] if len(episode_dirs) > 0 else None 32 | self.episode_id = 0 if episode_last is None else int(episode_last.split("_")[-1]) 33 | logger_mp.info(f"==> task_dir directory already exist, now self.episode_id is:{self.episode_id}\n") 34 | else: 35 | os.makedirs(self.task_dir) 36 | logger_mp.info("==> episode directory does not exist, now create one.\n") 37 | self.data_info() 38 | self.text_desc() 39 | self.result = None 40 | self.is_available = True # Indicates whether the class is available for new operations 41 | # Initialize the queue and worker thread 42 | self.item_data_queue = Queue(-1) 43 | self.stop_worker = False 44 | self.need_save = False # Flag to indicate when save_episode is triggered 45 | self.worker_thread = Thread(target=self.process_queue) 46 | self.worker_thread.start() 47 | 48 | logger_mp.info("==> EpisodeWriter initialized successfully.\n") 49 | 50 | def data_info(self, version="1.0.0", date=None, author=None): 51 | self.info = { 52 | "version": "1.0.0" if version is None else version, 53 | "date": datetime.date.today().strftime("%Y-%m-%d") if date is None else date, 54 | "author": "unitree" if author is None else author, 55 | "image": {"width": self.image_size[0], "height": self.image_size[1], "fps": self.frequency}, 56 | "depth": {"width": self.image_size[0], "height": self.image_size[1], "fps": self.frequency}, 57 | "audio": {"sample_rate": 16000, "channels": 1, "format": "PCM", "bits": 16}, # PCM_S16 58 | "joint_names": { 59 | "left_arm": [ 60 | "kLeftShoulderPitch", 61 | "kLeftShoulderRoll", 62 | "kLeftShoulderYaw", 63 | "kLeftElbow", 64 | "kLeftWristRoll", 65 | "kLeftWristPitch", 66 | "kLeftWristyaw", 67 | ], 68 | "left_ee": [], 69 | "right_arm": [], 70 | "right_ee": [], 71 | "body": [], 72 | }, 73 | "tactile_names": { 74 | "left_ee": [], 75 | "right_ee": [], 76 | }, 77 | "sim_state": "", 78 | } 79 | 80 | def text_desc(self): 81 | self.text = { 82 | "goal": "Place the wooden blocks into the yellow frame, stacking them from bottom to top in the order: red, yellow, green.", 83 | "desc": "Using the gripper, first place the red wooden block into the yellow frame. Next, stack the yellow wooden block on top of the red one, and finally place the green wooden block on top of the yellow block.", 84 | "steps": "", 85 | } 86 | 87 | def create_episode(self): 88 | """ 89 | Create a new episode. 90 | Returns: 91 | bool: True if the episode is successfully created, False otherwise. 92 | Note: 93 | Once successfully created, this function will only be available again after save_episode complete its save task. 94 | """ 95 | if not self.is_available: 96 | logger_mp.info( 97 | "==> The class is currently unavailable for new operations. Please wait until ongoing tasks are completed." 98 | ) 99 | return False # Return False if the class is unavailable 100 | 101 | # Reset episode-related data and create necessary directories 102 | self.item_id = -1 103 | self.episode_data = [] 104 | self.episode_id = self.episode_id + 1 105 | 106 | self.episode_dir = os.path.join(self.task_dir, f"episode_{str(self.episode_id).zfill(4)}") 107 | self.color_dir = os.path.join(self.episode_dir, "colors") 108 | self.depth_dir = os.path.join(self.episode_dir, "depths") 109 | self.audio_dir = os.path.join(self.episode_dir, "audios") 110 | self.json_path = os.path.join(self.episode_dir, "data.json") 111 | os.makedirs(self.episode_dir, exist_ok=True) 112 | os.makedirs(self.color_dir, exist_ok=True) 113 | os.makedirs(self.depth_dir, exist_ok=True) 114 | os.makedirs(self.audio_dir, exist_ok=True) 115 | 116 | self.is_available = False # After the episode is created, the class is marked as unavailable until the episode is successfully saved 117 | logger_mp.info(f"==> New episode created: {self.episode_dir}") 118 | return True # Return True if the episode is successfully created 119 | 120 | def add_item(self, colors, depths=None, states=None, actions=None, tactiles=None, audios=None, sim_state=None): 121 | # Increment the item ID 122 | self.item_id += 1 123 | # Create the item data dictionary 124 | item_data = { 125 | "idx": self.item_id, 126 | "colors": colors, 127 | "depths": depths, 128 | "states": states, 129 | "actions": actions, 130 | "tactiles": tactiles, 131 | "audios": audios, 132 | "sim_state": sim_state, 133 | } 134 | # Enqueue the item data 135 | self.item_data_queue.put(item_data) 136 | 137 | def process_queue(self): 138 | while not self.stop_worker or not self.item_data_queue.empty(): 139 | # Process items in the queue 140 | try: 141 | item_data = self.item_data_queue.get(timeout=1) 142 | try: 143 | self._process_item_data(item_data) 144 | except Exception as e: 145 | logger_mp.info(f"Error processing item_data (idx={item_data['idx']}): {e}") 146 | self.item_data_queue.task_done() 147 | except Empty: 148 | pass 149 | 150 | # Check if save_episode was triggered 151 | if self.need_save and self.item_data_queue.empty(): 152 | self._save_episode() 153 | 154 | def _process_item_data(self, item_data): 155 | idx = item_data["idx"] 156 | colors = item_data.get("colors", {}) 157 | depths = item_data.get("depths", {}) 158 | audios = item_data.get("audios", {}) 159 | 160 | # Save images 161 | if colors: 162 | for idx_color, (color_key, color) in enumerate(colors.items()): 163 | color_name = f"{str(idx).zfill(6)}_{color_key}.jpg" 164 | if not cv2.imwrite(os.path.join(self.color_dir, color_name), color): 165 | logger_mp.info("Failed to save color image.") 166 | item_data["colors"][color_key] = os.path.join("colors", color_name) 167 | 168 | # Save depths 169 | if depths: 170 | for idx_depth, (depth_key, depth) in enumerate(depths.items()): 171 | depth_name = f"{str(idx).zfill(6)}_{depth_key}.jpg" 172 | if not cv2.imwrite(os.path.join(self.depth_dir, depth_name), depth): 173 | logger_mp.info("Failed to save depth image.") 174 | item_data["depths"][depth_key] = os.path.join("depths", depth_name) 175 | 176 | # Save audios 177 | if audios: 178 | for mic, audio in audios.items(): 179 | audio_name = f"audio_{str(idx).zfill(6)}_{mic}.npy" 180 | np.save(os.path.join(self.audio_dir, audio_name), audio.astype(np.int16)) 181 | item_data["audios"][mic] = os.path.join("audios", audio_name) 182 | 183 | # Update episode data 184 | self.episode_data.append(item_data) 185 | 186 | def save_episode(self, result): 187 | """ 188 | Trigger the save operation. This sets the save flag, and the process_queue thread will handle it. 189 | """ 190 | self.need_save = True # Set the save flag 191 | self.result = result 192 | logger_mp.info("==> Episode saved start...") 193 | 194 | def _save_episode(self): 195 | """ 196 | Save the episode data to a JSON file. 197 | """ 198 | self.data["info"] = self.info 199 | self.data["text"] = self.text 200 | self.data["data"] = self.episode_data 201 | self.data["result"] = self.result 202 | 203 | with open(self.json_path, "w", encoding="utf-8") as jsonf: 204 | jsonf.write(json.dumps(self.data, indent=4, ensure_ascii=False)) 205 | self.need_save = False # Reset the save flag 206 | self.is_available = True # Mark the class as available after saving 207 | logger_mp.info(f"==> Episode saved successfully to {self.json_path} with result: {self.result}") 208 | 209 | def close(self): 210 | """ 211 | Stop the worker thread and ensure all tasks are completed. 212 | """ 213 | self.item_data_queue.join() 214 | if not self.is_available: # If self.is_available is False, it means there is still data not saved. 215 | self.save_episode(self.result) 216 | while not self.is_available: 217 | time.sleep(0.01) 218 | self.stop_worker = True 219 | self.worker_thread.join() 220 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/utils/rerun_visualizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from datetime import datetime 3 | from typing import Any 4 | 5 | import rerun as rr 6 | import rerun.blueprint as rrb 7 | 8 | 9 | class RerunLogger: 10 | """ 11 | A fully automatic Rerun logger designed to parse and visualize step 12 | dictionaries directly from a LeRobotDataset. 13 | """ 14 | 15 | def __init__( 16 | self, 17 | prefix: str = "", 18 | memory_limit: str = "200MB", 19 | idxrangeboundary: int | None = 300, 20 | ): 21 | """Initializes the Rerun logger.""" 22 | # Use a descriptive name for the Rerun recording 23 | rr.init(f"Dataset_Log_{datetime.now().strftime('%Y%m%d_%H%M%S')}") 24 | rr.spawn(memory_limit=memory_limit) 25 | 26 | self.prefix = prefix 27 | self.blueprint_sent = False 28 | self.idxrangeboundary = idxrangeboundary 29 | 30 | # --- Internal cache for discovered keys --- 31 | self._image_keys: tuple[str, ...] = () 32 | self._state_key: str = "" 33 | self._action_key: str = "" 34 | self._index_key: str = "index" 35 | self._task_key: str = "task" 36 | self._episode_index_key: str = "episode_index" 37 | 38 | self.current_episode = -1 39 | 40 | def _initialize_from_data(self, step_data: dict[str, Any]): 41 | """Inspects the first data dictionary to discover components and set up the blueprint.""" 42 | print("RerunLogger: First data packet received. Auto-configuring...") 43 | 44 | image_keys = [] 45 | for key, value in step_data.items(): 46 | if key.startswith("observation.images.") and isinstance(value, torch.Tensor) and value.ndim > 2: 47 | image_keys.append(key) 48 | elif key == "observation.state": 49 | self._state_key = key 50 | elif key == "action": 51 | self._action_key = key 52 | 53 | self._image_keys = tuple(sorted(image_keys)) 54 | 55 | if "index" in step_data: 56 | self._index_key = "index" 57 | elif "frame_index" in step_data: 58 | self._index_key = "frame_index" 59 | 60 | print(f" - Using '{self._index_key}' for time sequence.") 61 | print(f" - Detected State Key: '{self._state_key}'") 62 | print(f" - Detected Action Key: '{self._action_key}'") 63 | print(f" - Detected Image Keys: {self._image_keys}") 64 | if self.idxrangeboundary: 65 | self.setup_blueprint() 66 | 67 | def setup_blueprint(self): 68 | """Sets up and sends the Rerun blueprint based on detected components.""" 69 | views = [] 70 | 71 | for key in self._image_keys: 72 | clean_name = key.replace("observation.images.", "") 73 | entity_path = f"{self.prefix}images/{clean_name}" 74 | views.append(rrb.Spatial2DView(origin=entity_path, name=clean_name)) 75 | 76 | if self._state_key: 77 | entity_path = f"{self.prefix}state" 78 | views.append( 79 | rrb.TimeSeriesView( 80 | origin=entity_path, 81 | name="Observation State", 82 | time_ranges=[ 83 | rrb.VisibleTimeRange( 84 | "frame", 85 | start=rrb.TimeRangeBoundary.cursor_relative(seq=-self.idxrangeboundary), 86 | end=rrb.TimeRangeBoundary.cursor_relative(), 87 | ) 88 | ], 89 | plot_legend=rrb.PlotLegend(visible=True), 90 | ) 91 | ) 92 | 93 | if self._action_key: 94 | entity_path = f"{self.prefix}action" 95 | views.append( 96 | rrb.TimeSeriesView( 97 | origin=entity_path, 98 | name="Action", 99 | time_ranges=[ 100 | rrb.VisibleTimeRange( 101 | "frame", 102 | start=rrb.TimeRangeBoundary.cursor_relative(seq=-self.idxrangeboundary), 103 | end=rrb.TimeRangeBoundary.cursor_relative(), 104 | ) 105 | ], 106 | plot_legend=rrb.PlotLegend(visible=True), 107 | ) 108 | ) 109 | 110 | if not views: 111 | print("Warning: No visualizable components detected in the data.") 112 | return 113 | 114 | grid = rrb.Grid(contents=views) 115 | rr.send_blueprint(grid) 116 | self.blueprint_sent = True 117 | 118 | def log_step(self, step_data: dict[str, Any]): 119 | """Logs a single step dictionary from your dataset.""" 120 | if not self.blueprint_sent: 121 | self._initialize_from_data(step_data) 122 | 123 | if self._index_key in step_data: 124 | current_index = step_data[self._index_key].item() 125 | rr.set_time_sequence("frame", current_index) 126 | 127 | episode_idx = step_data.get(self._episode_index_key, torch.tensor(-1)).item() 128 | if episode_idx != self.current_episode: 129 | self.current_episode = episode_idx 130 | task_name = step_data.get(self._task_key, "Unknown Task") 131 | log_text = f"Starting Episode {self.current_episode}: {task_name}" 132 | rr.log(f"{self.prefix}info/task", rr.TextLog(log_text, level=rr.TextLogLevel.INFO)) 133 | 134 | for key in self._image_keys: 135 | if key in step_data: 136 | image_tensor = step_data[key] 137 | if image_tensor.ndim > 2: 138 | clean_name = key.replace("observation.images.", "") 139 | entity_path = f"{self.prefix}images/{clean_name}" 140 | if image_tensor.shape[0] in [1, 3, 4]: 141 | image_tensor = image_tensor.permute(1, 2, 0) 142 | rr.log(entity_path, rr.Image(image_tensor)) 143 | 144 | if self._state_key in step_data: 145 | state_tensor = step_data[self._state_key] 146 | entity_path = f"{self.prefix}state" 147 | for i, val in enumerate(state_tensor): 148 | rr.log(f"{entity_path}/joint_{i}", rr.Scalar(val.item())) 149 | 150 | if self._action_key in step_data: 151 | action_tensor = step_data[self._action_key] 152 | entity_path = f"{self.prefix}action" 153 | for i, val in enumerate(action_tensor): 154 | rr.log(f"{entity_path}/joint_{i}", rr.Scalar(val.item())) 155 | 156 | 157 | def visualization_data(idx, observation, state, action, online_logger): 158 | item_data: dict[str, Any] = { 159 | "index": torch.tensor(idx), 160 | "observation.state": state, 161 | "action": action, 162 | } 163 | for k, v in observation.items(): 164 | if k not in ("index", "observation.state", "action"): 165 | item_data[k] = v 166 | online_logger.log_step(item_data) 167 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/utils/sim_savedata_utils.py: -------------------------------------------------------------------------------- 1 | # for simulation 2 | import torch 3 | import numpy as np 4 | import logging_mp 5 | from unitree_lerobot.eval_robot.utils.utils import ( 6 | reset_policy, 7 | ) 8 | from unitree_lerobot.eval_robot.make_robot import ( 9 | publish_reset_category, 10 | ) 11 | from dataclasses import dataclass 12 | from lerobot.configs import parser 13 | from lerobot.configs.policies import PreTrainedConfig 14 | import time 15 | 16 | logging_mp.basic_config(level=logging_mp.INFO) 17 | logger_mp = logging_mp.get_logger(__name__) 18 | 19 | 20 | def process_data_add(episode_writer, observation_image, current_arm_q, ee_state, action, arm_dof, ee_dof): 21 | if episode_writer is None: 22 | return 23 | if ( 24 | observation_image is not None 25 | and current_arm_q is not None 26 | and ee_state is not None 27 | and action is not None 28 | and arm_dof is not None 29 | and ee_dof is not None 30 | ): 31 | # Convert tensors to numpy arrays for JSON serialization 32 | if torch.is_tensor(current_arm_q): 33 | current_arm_q = current_arm_q.detach().cpu().numpy() 34 | if torch.is_tensor(ee_state): 35 | ee_state = ee_state.detach().cpu().numpy() 36 | if torch.is_tensor(action): 37 | action = action.detach().cpu().numpy() 38 | colors = {} 39 | i = 0 40 | for key, value in observation_image.items(): 41 | if "images" in key: 42 | if value is not None: 43 | # Convert PyTorch tensor to numpy array for OpenCV compatibility 44 | if torch.is_tensor(value): 45 | # Convert tensor to numpy array and ensure correct format for OpenCV 46 | img_array = value.detach().cpu().numpy() 47 | # If the image is in CHW format (channels first), convert to HWC format (channels last) 48 | if img_array.ndim == 3 and img_array.shape[0] in [1, 3, 4]: 49 | img_array = np.transpose(img_array, (1, 2, 0)) 50 | # Ensure the array is in uint8 format for OpenCV 51 | if img_array.dtype != np.uint8: 52 | if img_array.max() <= 1.0: # Normalized values [0, 1] 53 | img_array = (img_array * 255).astype(np.uint8) 54 | else: # Values already in [0, 255] range 55 | img_array = img_array.astype(np.uint8) 56 | # Keep original RGB format - no color channel conversion needed 57 | colors[f"color_{i}"] = img_array 58 | else: 59 | colors[f"color_{i}"] = value 60 | i += 1 61 | states = { 62 | "left_arm": { 63 | "qpos": current_arm_q[: arm_dof // 2].tolist(), # numpy.array -> list 64 | "qvel": [], 65 | "torque": [], 66 | }, 67 | "right_arm": { 68 | "qpos": current_arm_q[arm_dof // 2 :].tolist(), 69 | "qvel": [], 70 | "torque": [], 71 | }, 72 | "left_ee": { 73 | "qpos": ee_state[:ee_dof].tolist(), 74 | "qvel": [], 75 | "torque": [], 76 | }, 77 | "right_ee": { 78 | "qpos": ee_state[ee_dof:].tolist(), 79 | "qvel": [], 80 | "torque": [], 81 | }, 82 | "body": { 83 | "qpos": [], 84 | }, 85 | } 86 | actions = { 87 | "left_arm": { 88 | "qpos": action[: arm_dof // 2].tolist(), 89 | "qvel": [], 90 | "torque": [], 91 | }, 92 | "right_arm": { 93 | "qpos": action[arm_dof // 2 :].tolist(), 94 | "qvel": [], 95 | "torque": [], 96 | }, 97 | "left_ee": { 98 | "qpos": action[arm_dof : arm_dof + ee_dof].tolist(), 99 | "qvel": [], 100 | "torque": [], 101 | }, 102 | "right_ee": { 103 | "qpos": action[arm_dof + ee_dof : arm_dof + 2 * ee_dof].tolist(), 104 | "qvel": [], 105 | "torque": [], 106 | }, 107 | "body": { 108 | "qpos": [], 109 | }, 110 | } 111 | episode_writer.add_item(colors, states=states, actions=actions) 112 | 113 | 114 | def process_data_save(episode_writer, result): 115 | """Processes data and saves it.""" 116 | if episode_writer is None: 117 | return 118 | episode_writer.save_episode(result) 119 | 120 | 121 | def is_success( 122 | sim_reward_subscriber, 123 | episode_writer, 124 | reset_pose_publisher, 125 | policy, 126 | cfg, 127 | reward_stats, 128 | init_arm_pose, 129 | robot_interface, 130 | ): 131 | # logger_mp.info(f"arm_action {arm_action}, tau {tau}") 132 | if sim_reward_subscriber: 133 | data = sim_reward_subscriber.read_data() 134 | if data is not None: 135 | if int(data["rewards"][0]) == 1: 136 | reward_stats["reward_sum"] += 1 137 | sim_reward_subscriber.reset_data() 138 | # success 139 | if reward_stats["reward_sum"] >= 25: 140 | process_data_save(episode_writer, "success") 141 | logger_mp.info( 142 | f"Episode {reward_stats['episode_num']} finished with reward {reward_stats['reward_sum']},save data..." 143 | ) 144 | reward_stats["episode_num"] = -1 145 | reward_stats["reward_sum"] = 0 146 | time.sleep(1) 147 | publish_reset_category(1, reset_pose_publisher) 148 | time.sleep(1) 149 | reset_policy(policy) 150 | sim_reward_subscriber.reset_data() 151 | # fail 152 | elif reward_stats["episode_num"] > cfg.max_episodes: 153 | process_data_save(episode_writer, "fail") 154 | logger_mp.info(f"Episode {reward_stats['episode_num']} finished with reward {reward_stats['reward_sum']}") 155 | reward_stats["episode_num"] = -1 156 | reward_stats["reward_sum"] = 0 157 | reset_policy(policy) 158 | sim_reward_subscriber.reset_data() 159 | logger_mp.info("Initializing robot to starting pose...") 160 | tau = robot_interface["arm_ik"].solve_tau(init_arm_pose) 161 | robot_interface["arm_ctrl"].ctrl_dual_arm(init_arm_pose, tau) 162 | time.sleep(1) 163 | publish_reset_category(1, reset_pose_publisher) 164 | time.sleep(1) 165 | reset_policy(policy) 166 | sim_reward_subscriber.reset_data() 167 | time.sleep(1) 168 | 169 | 170 | @dataclass 171 | class EvalRealConfig: 172 | repo_id: str 173 | policy: PreTrainedConfig | None = None 174 | 175 | root: str = "" 176 | episodes: int = 0 177 | frequency: float = 30.0 178 | 179 | # Basic control parameters 180 | arm: str = "G1_29" # G1_29, G1_23 181 | ee: str = "dex3" # dex3, dex1, inspire1, brainco 182 | 183 | # Mode flags 184 | motion: bool = False 185 | headless: bool = False 186 | sim: bool = True 187 | visualization: bool = False 188 | send_real_robot: bool = False 189 | use_dataset: bool = False 190 | save_data: bool = False 191 | task_dir: str = "./data" 192 | max_episodes: int = 1200 193 | 194 | def __post_init__(self): 195 | # HACK: We parse again the cli args here to get the pretrained path if there was one. 196 | policy_path = parser.get_path_arg("policy") 197 | if policy_path: 198 | cli_overrides = parser.get_cli_overrides("policy") 199 | self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) 200 | self.policy.pretrained_path = policy_path 201 | else: 202 | logger_mp.warning( 203 | "No pretrained path was provided, evaluated policy will be built from scratch (random weights)." 204 | ) 205 | 206 | @classmethod 207 | def __get_path_fields__(cls) -> list[str]: 208 | """This enables the parser to load config from the policy using `--policy.path=local/dir`""" 209 | return ["policy"] 210 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/utils/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from typing import Any 4 | from contextlib import nullcontext 5 | from copy import copy 6 | import logging 7 | from dataclasses import dataclass, field 8 | from lerobot.configs import parser 9 | from lerobot.configs.policies import PreTrainedConfig 10 | from lerobot.policies.pretrained import PreTrainedPolicy 11 | from lerobot.processor import PolicyAction, PolicyProcessorPipeline 12 | 13 | 14 | import logging_mp 15 | 16 | logging_mp.basic_config(level=logging_mp.INFO) 17 | logger_mp = logging_mp.get_logger(__name__) 18 | 19 | 20 | def extract_observation(step: dict): 21 | observation = {} 22 | 23 | for key, value in step.items(): 24 | if key.startswith("observation.images."): 25 | if isinstance(value, np.ndarray) and value.ndim == 3 and value.shape[-1] in [1, 3]: 26 | value = np.transpose(value, (2, 0, 1)) 27 | observation[key] = value 28 | 29 | elif key == "observation.state": 30 | observation[key] = value 31 | 32 | return observation 33 | 34 | 35 | def predict_action( 36 | observation: dict[str, np.ndarray], 37 | policy: PreTrainedPolicy, 38 | device: torch.device, 39 | preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], 40 | postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction], 41 | use_amp: bool, 42 | task: str | None = None, 43 | use_dataset: bool | None = False, 44 | robot_type: str | None = None, 45 | ): 46 | observation = copy(observation) 47 | with ( 48 | torch.inference_mode(), 49 | torch.autocast(device_type=device.type) if device.type == "cuda" and use_amp else nullcontext(), 50 | ): 51 | # Convert to pytorch format: channel first and float32 in [0,1] with batch dimension 52 | for name in observation: 53 | if not use_dataset: 54 | # Skip non-tensor observations (like task strings) 55 | if not hasattr(observation[name], "unsqueeze"): 56 | continue 57 | if "images" in name: 58 | observation[name] = observation[name].type(torch.float32) / 255 59 | observation[name] = observation[name].permute(2, 0, 1).contiguous() 60 | 61 | observation[name] = observation[name].unsqueeze(0).to(device) 62 | 63 | observation["task"] = task if task else "" 64 | observation["robot_type"] = robot_type if robot_type else "" 65 | 66 | observation = preprocessor(observation) 67 | 68 | # Compute the next action with the policy 69 | # based on the current observation 70 | action = policy.select_action(observation) 71 | action = postprocessor(action) 72 | 73 | # Remove batch dimension 74 | action = action.squeeze(0) 75 | 76 | # Move to cpu, if not already the case 77 | action = action.to("cpu") 78 | 79 | return action 80 | 81 | 82 | def reset_policy(policy: PreTrainedPolicy): 83 | policy.reset() 84 | 85 | 86 | def cleanup_resources(image_info: dict[str, Any]): 87 | """Safely close and unlink shared memory resources.""" 88 | logger_mp.info("Cleaning up shared memory resources.") 89 | for shm in image_info["shm_resources"]: 90 | if shm: 91 | shm.close() 92 | shm.unlink() 93 | 94 | 95 | def to_list(x): 96 | if torch is not None and isinstance(x, torch.Tensor): 97 | return x.detach().cpu().ravel().tolist() 98 | if isinstance(x, np.ndarray): 99 | return x.ravel().tolist() 100 | if isinstance(x, (list, tuple)): 101 | return list(x) 102 | return [x] 103 | 104 | 105 | def to_scalar(x): 106 | if torch is not None and isinstance(x, torch.Tensor): 107 | return float(x.detach().cpu().ravel()[0].item()) 108 | if isinstance(x, np.ndarray): 109 | return float(x.ravel()[0]) 110 | if isinstance(x, (list, tuple)): 111 | return float(x[0]) 112 | return float(x) 113 | 114 | 115 | @dataclass 116 | class EvalRealConfig: 117 | repo_id: str 118 | policy: PreTrainedConfig | None = None 119 | 120 | root: str = "" 121 | episodes: int = 0 122 | frequency: float = 30.0 123 | 124 | # Basic control parameters 125 | arm: str = "G1_29" # G1_29, G1_23 126 | ee: str = "dex3" # dex3, dex1, inspire1, brainco 127 | 128 | # Mode flags 129 | motion: bool = False 130 | headless: bool = False 131 | visualization: bool = False 132 | send_real_robot: bool = False 133 | use_dataset: bool = False 134 | 135 | rename_map: dict[str, str] = field(default_factory=dict) 136 | 137 | def __post_init__(self): 138 | # HACK: We parse again the cli args here to get the pretrained path if there was one. 139 | policy_path = parser.get_path_arg("policy") 140 | if policy_path: 141 | cli_overrides = parser.get_cli_overrides("policy") 142 | self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) 143 | self.policy.pretrained_path = policy_path 144 | else: 145 | logging.warning( 146 | "No pretrained path was provided, evaluated policy will be built from scratch (random weights)." 147 | ) 148 | 149 | @classmethod 150 | def __get_path_fields__(cls) -> list[str]: 151 | """This enables the parser to load config from the policy using `--policy.path=local/dir`""" 152 | return ["policy"] 153 | -------------------------------------------------------------------------------- /unitree_lerobot/eval_robot/utils/weighted_moving_filter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | class WeightedMovingFilter: 6 | def __init__(self, weights, data_size=14): 7 | self._window_size = len(weights) 8 | self._weights = np.array(weights) 9 | # assert np.isclose(np.sum(self._weights), 1.0), "[WeightedMovingFilter] the sum of weights list must be 1.0!" 10 | self._data_size = data_size 11 | self._filtered_data = np.zeros(self._data_size) 12 | self._data_queue = [] 13 | 14 | def _apply_filter(self): 15 | if len(self._data_queue) < self._window_size: 16 | return self._data_queue[-1] 17 | 18 | data_array = np.array(self._data_queue) 19 | temp_filtered_data = np.zeros(self._data_size) 20 | for i in range(self._data_size): 21 | temp_filtered_data[i] = np.convolve(data_array[:, i], self._weights, mode="valid")[-1] 22 | 23 | return temp_filtered_data 24 | 25 | def add_data(self, new_data): 26 | assert len(new_data) == self._data_size 27 | 28 | if len(self._data_queue) > 0 and np.array_equal(new_data, self._data_queue[-1]): 29 | return # skip duplicate data 30 | 31 | if len(self._data_queue) >= self._window_size: 32 | self._data_queue.pop(0) 33 | 34 | self._data_queue.append(new_data) 35 | self._filtered_data = self._apply_filter() 36 | 37 | @property 38 | def filtered_data(self): 39 | return self._filtered_data 40 | 41 | 42 | def visualize_filter_comparison(filter_params, steps): 43 | import time 44 | 45 | t = np.linspace(0, 4 * np.pi, steps) 46 | original_data = np.array( 47 | [np.sin(t + i) + np.random.normal(0, 0.2, len(t)) for i in range(35)] 48 | ).T # sin wave with noise, shape is [len(t), 35] 49 | 50 | plt.figure(figsize=(14, 10)) 51 | 52 | for idx, weights in enumerate(filter_params): 53 | filter = WeightedMovingFilter(weights, 14) 54 | data_2b_filtered = original_data.copy() 55 | filtered_data = [] 56 | 57 | time1 = time.time() 58 | 59 | for i in range(steps): 60 | filter.add_data(data_2b_filtered[i][13:27]) # step i, columns 13 to 26 (total:14) 61 | data_2b_filtered[i][13:27] = filter.filtered_data 62 | filtered_data.append(data_2b_filtered[i]) 63 | 64 | time2 = time.time() 65 | print(f"filter_params:{filter_params[idx]}, time cosume:{time2 - time1}") 66 | 67 | filtered_data = np.array(filtered_data) 68 | 69 | # col0 should not 2b filtered 70 | plt.subplot(len(filter_params), 2, idx * 2 + 1) 71 | plt.plot(filtered_data[:, 0], label=f"Filtered (Window {filter._window_size})") 72 | plt.plot(original_data[:, 0], "r--", label="Original", alpha=0.5) 73 | plt.title("Joint 1 - Should not to be filtered.") 74 | plt.xlabel("Step") 75 | plt.ylabel("Value") 76 | plt.legend() 77 | 78 | # col13 should 2b filtered 79 | plt.subplot(len(filter_params), 2, idx * 2 + 2) 80 | plt.plot(filtered_data[:, 13], label=f"Filtered (Window {filter._window_size})") 81 | plt.plot(original_data[:, 13], "r--", label="Original", alpha=0.5) 82 | plt.title(f"Joint 13 - Window {filter._window_size}, Weights {weights}") 83 | plt.xlabel("Step") 84 | plt.ylabel("Value") 85 | plt.legend() 86 | 87 | plt.tight_layout() 88 | plt.show() 89 | 90 | 91 | if __name__ == "__main__": 92 | # windows_size and weights 93 | filter_params = [ 94 | (np.array([0.7, 0.2, 0.1])), 95 | (np.array([0.5, 0.3, 0.2])), 96 | (np.array([0.4, 0.3, 0.2, 0.1])), 97 | ] 98 | 99 | visualize_filter_comparison(filter_params, steps=100) 100 | -------------------------------------------------------------------------------- /unitree_lerobot/utils/convert_lerobot_to_h5.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script lerobot to h5. 3 | # --repo-id Your unique repo ID on Hugging Face Hub 4 | # --output_dir Save path to h5 file 5 | 6 | python unitree_lerobot/utils/convert_lerobot_to_h5.py.py \ 7 | --repo-id your_name/g1_grabcube_double_hand \ 8 | --output_dir "$HOME/datasets/g1_grabcube_double_hand" 9 | """ 10 | 11 | import os 12 | import cv2 13 | import h5py 14 | import tyro 15 | import numpy as np 16 | from tqdm import tqdm 17 | from pathlib import Path 18 | from collections import defaultdict 19 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 20 | 21 | 22 | class LeRobotDataProcessor: 23 | def __init__(self, repo_id: str, root: str = None, image_dtype: str = "to_unit8") -> None: 24 | self.image_dtype = image_dtype 25 | self.dataset = LeRobotDataset(repo_id=repo_id, root=root) 26 | 27 | def process_episode(self, episode_index: int) -> dict: 28 | """Process a single episode to extract camera images, state, and action.""" 29 | from_idx = self.dataset.meta.episodes["dataset_from_index"][episode_index] 30 | to_idx = self.dataset.meta.episodes["dataset_to_index"][episode_index] 31 | 32 | episode = defaultdict(list) 33 | cameras = defaultdict(list) 34 | 35 | for step_idx in tqdm( 36 | range(from_idx, to_idx), desc=f"Episode {episode_index}", position=1, leave=False, dynamic_ncols=True 37 | ): 38 | step = self.dataset[step_idx] 39 | 40 | image_dict = { 41 | key.split(".")[2]: cv2.cvtColor( 42 | np.transpose((value.numpy() * 255).astype(np.uint8), (1, 2, 0)), cv2.COLOR_BGR2RGB 43 | ) 44 | for key, value in step.items() 45 | if key.startswith("observation.image") and len(key.split(".")) >= 3 46 | } 47 | 48 | for key, value in image_dict.items(): 49 | if self.image_dtype == "to_unit8": 50 | cameras[key].append(value) 51 | elif self.image_dtype == "to_bytes": 52 | success, encoded_img = cv2.imencode(".jpg", value, [cv2.IMWRITE_JPEG_QUALITY, 100]) 53 | if not success: 54 | raise ValueError(f"Image encoding failed for key: {key}") 55 | cameras[key].append(np.void(encoded_img.tobytes())) 56 | 57 | cam_height, cam_width = next(iter(image_dict.values())).shape[:2] 58 | episode["state"].append(step["observation.state"]) 59 | episode["action"].append(step["action"]) 60 | 61 | episode["cameras"] = cameras 62 | episode["task"] = step["task"] 63 | episode["episode_length"] = to_idx - from_idx 64 | 65 | # Data configuration for later use 66 | episode["data_cfg"] = { 67 | "camera_names": list(image_dict.keys()), 68 | "cam_height": cam_height, 69 | "cam_width": cam_width, 70 | "state_dim": np.squeeze(step["observation.state"].numpy().shape), 71 | "action_dim": np.squeeze(step["action"].numpy().shape), 72 | } 73 | episode["episode_index"] = episode_index 74 | 75 | return episode 76 | 77 | 78 | class H5Writer: 79 | def __init__(self, output_dir: Path) -> None: 80 | self.output_dir = output_dir 81 | os.makedirs(output_dir, exist_ok=True) 82 | 83 | def write_to_h5(self, episode: dict) -> None: 84 | """Write episode data to HDF5 file.""" 85 | 86 | episode_length = episode["episode_length"] 87 | episode_index = episode["episode_index"] 88 | state = episode["state"] 89 | action = episode["action"] 90 | qvel = np.zeros_like(episode["state"]) 91 | cameras = episode["cameras"] 92 | task = episode["task"] 93 | data_cfg = episode["data_cfg"] 94 | 95 | # Prepare data dictionary 96 | data_dict = { 97 | "/observations/qpos": [state], 98 | "/observations/qvel": [qvel], 99 | "/action": [action], 100 | **{f"/observations/images/{k}": [v] for k, v in cameras.items()}, 101 | } 102 | 103 | h5_path = os.path.join(self.output_dir, f"episode_{episode_index}.hdf5") 104 | 105 | with h5py.File(h5_path, "w", rdcc_nbytes=1024**2 * 2, libver="latest") as root: 106 | # Set attributes 107 | root.attrs["sim"] = False 108 | 109 | # Create datasets 110 | obs = root.create_group("observations") 111 | image = obs.create_group("images") 112 | 113 | # Write camera images 114 | for cam_name, images in cameras.items(): 115 | data_dtype = images[0].dtype 116 | shape = ( 117 | (episode_length, data_cfg["cam_height"], data_cfg["cam_width"], 3) 118 | if data_dtype == "uint8" 119 | else (episode_length,) 120 | ) 121 | chunks = (1, data_cfg["cam_height"], data_cfg["cam_width"], 3) if data_dtype == "uint8" else (1,) 122 | image.create_dataset(cam_name, shape=shape, dtype=data_dtype, chunks=chunks, compression="gzip") 123 | # root[f'/observations/images/{cam_name}'][...] = images 124 | 125 | # Write state and action data 126 | obs.create_dataset("qpos", (episode_length, data_cfg["state_dim"]), dtype="float32", compression="gzip") 127 | obs.create_dataset("qvel", (episode_length, data_cfg["state_dim"]), dtype="float32", compression="gzip") 128 | root.create_dataset("action", (episode_length, data_cfg["action_dim"]), dtype="float32", compression="gzip") 129 | 130 | # Write metadata 131 | root.create_dataset("is_edited", (1,), dtype="uint8") 132 | substep_reasonings = root.create_dataset( 133 | "substep_reasonings", (episode_length,), dtype=h5py.string_dtype(encoding="utf-8"), compression="gzip" 134 | ) 135 | root.create_dataset("language_raw", data=task) 136 | substep_reasonings[:] = [task] * episode_length 137 | 138 | # Write additional data 139 | for name, array in data_dict.items(): 140 | root[name][...] = array 141 | 142 | 143 | def lerobot_to_h5(repo_id: str, output_dir: Path, root: str = None) -> None: 144 | """Main function to process and write LeRobot data to HDF5 format.""" 145 | 146 | # Initialize data processor and H5 writer 147 | data_processor = LeRobotDataProcessor( 148 | repo_id, root, image_dtype="to_unit8" 149 | ) # image_dtype Options: "to_unit8", "to_bytes" 150 | h5_writer = H5Writer(output_dir) 151 | 152 | # Process each episode 153 | for episode_index in tqdm( 154 | range(data_processor.dataset.num_episodes), desc="Episodes", position=0, dynamic_ncols=True 155 | ): 156 | episode = data_processor.process_episode(episode_index) 157 | h5_writer.write_to_h5(episode) 158 | 159 | 160 | if __name__ == "__main__": 161 | tyro.cli(lerobot_to_h5) 162 | -------------------------------------------------------------------------------- /unitree_lerobot/utils/convert_unitree_json_to_h5.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script Json to h5. 3 | 4 | # --data_dirs Corresponds to the directory of your JSON dataset 5 | # --output_dir Save path to h5 file 6 | # --robot_type The type of the robot used in the dataset (e.g., Unitree_Z1_Single, Unitree_Z1_Dual, Unitree_G1_Dex1, Unitree_G1_Dex3, Unitree_G1_Brainco, Unitree_G1_Inspire) 7 | 8 | python unitree_lerobot/utils/convert_unitree_json_to_h5.py \ 9 | --data_dirs $HOME/datasets/json \ 10 | --output_dir $HOME/datasets/h5 \ 11 | --robot_type Unitree_G1_Dex3 12 | """ 13 | 14 | import os 15 | import tyro 16 | import json 17 | import h5py 18 | import cv2 19 | import tqdm 20 | import glob 21 | import numpy as np 22 | from pathlib import Path 23 | from collections import defaultdict 24 | from unitree_lerobot.utils.constants import ROBOT_CONFIGS 25 | 26 | 27 | class JsonDataset: 28 | def __init__(self, data_dirs: Path, robot_type: str) -> None: 29 | """ 30 | Initialize the dataset for loading and processing HDF5 files containing robot manipulation data. 31 | 32 | Args: 33 | data_dirs: Path to directory containing training data 34 | """ 35 | assert data_dirs is not None, "Data directory cannot be None" 36 | assert robot_type is not None, "Robot type cannot be None" 37 | self.data_dirs = data_dirs 38 | self.json_file = "data.json" 39 | 40 | # Initialize paths and cache 41 | self._init_paths() 42 | self._init_cache() 43 | self.json_state_data_name = ROBOT_CONFIGS[robot_type].json_state_data_name 44 | self.json_action_data_name = ROBOT_CONFIGS[robot_type].json_action_data_name 45 | self.camera_to_image_key = ROBOT_CONFIGS[robot_type].camera_to_image_key 46 | 47 | def _init_paths(self) -> None: 48 | """Initialize episode and task paths.""" 49 | 50 | self.episode_paths = [] 51 | self.task_paths = [] 52 | 53 | for task_path in glob.glob(os.path.join(self.data_dirs, "*")): 54 | if os.path.isdir(task_path): 55 | episode_paths = glob.glob(os.path.join(task_path, "*")) 56 | if episode_paths: 57 | self.task_paths.append(task_path) 58 | self.episode_paths.extend(episode_paths) 59 | 60 | self.episode_paths = sorted(self.episode_paths) 61 | self.episode_ids = list(range(len(self.episode_paths))) 62 | 63 | def __len__(self) -> int: 64 | """Return the number of episodes in the dataset.""" 65 | return len(self.episode_paths) 66 | 67 | def _init_cache(self) -> list: 68 | """Initialize data cache if enabled.""" 69 | 70 | self.episodes_data_cached = [] 71 | for episode_path in tqdm.tqdm(self.episode_paths, desc="Loading Cache Json"): 72 | json_path = os.path.join(episode_path, self.json_file) 73 | with open(json_path, encoding="utf-8") as jsonf: 74 | self.episodes_data_cached.append(json.load(jsonf)) 75 | 76 | print(f"==> Cached {len(self.episodes_data_cached)} episodes") 77 | 78 | return self.episodes_data_cached 79 | 80 | def _extract_data(self, episode_data: dict, key: str, parts: list[str]) -> np.ndarray: 81 | """ 82 | Extract data from episode dictionary for specified parts. 83 | 84 | Args: 85 | episode_data: Dictionary containing episode data 86 | key: Data key to extract ('states' or 'actions') 87 | parts: List of parts to include ('left_arm', 'right_arm') 88 | 89 | Returns: 90 | Concatenated numpy array of the requested data 91 | """ 92 | result = [] 93 | for sample_data in episode_data["data"]: 94 | data_array = np.array([], dtype=np.float32) 95 | for part in parts: 96 | if part in sample_data[key] and sample_data[key][part] is not None: 97 | qpos = np.array(sample_data[key][part]["qpos"], dtype=np.float32) 98 | data_array = np.concatenate([data_array, qpos]) 99 | result.append(data_array) 100 | return np.array(result) 101 | 102 | def _parse_images(self, episode_path: str, episode_data) -> dict[str, list[np.ndarray]]: 103 | """Load and stack images for a given camera key.""" 104 | 105 | images = defaultdict(list) 106 | 107 | keys = episode_data["data"][0]["colors"].keys() 108 | cameras = [key for key in keys if "depth" not in key] 109 | 110 | for camera in cameras: 111 | image_key = self.camera_to_image_key.get(camera) 112 | 113 | for sample_data in episode_data["data"]: 114 | image_path = os.path.join(episode_path, sample_data["colors"].get(camera)) 115 | if not os.path.exists(image_path): 116 | raise FileNotFoundError(f"Image path does not exist: {image_path}") 117 | 118 | image = cv2.imread(image_path) 119 | if image is None: 120 | raise RuntimeError(f"Failed to read image: {image_path}") 121 | 122 | image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 123 | images[image_key].append(image_rgb) 124 | 125 | return images 126 | 127 | def get_item( 128 | self, 129 | index: int | None = None, 130 | ) -> dict: 131 | """Get a training sample from the dataset.""" 132 | 133 | file_path = np.random.choice(self.episode_paths) if index is None else self.episode_paths[index] 134 | episode_data = self.episodes_data_cached[index] 135 | 136 | # Load state and action data 137 | action = self._extract_data(episode_data, "actions", self.json_action_data_name) 138 | state = self._extract_data(episode_data, "states", self.json_state_data_name) 139 | episode_length = len(state) 140 | state_dim = state.shape[1] if len(state.shape) == 2 else state.shape[0] 141 | action_dim = action.shape[1] if len(action.shape) == 2 else state.shape[0] 142 | 143 | # Load task description 144 | task = episode_data.get("text", {}).get("goal", "") 145 | 146 | # Load camera images 147 | cameras = self._parse_images(file_path, episode_data) 148 | 149 | # Extract camera configuration 150 | cam_height, cam_width = next(img for imgs in cameras.values() if imgs for img in imgs).shape[:2] 151 | data_cfg = { 152 | "camera_names": list(cameras.keys()), 153 | "cam_height": cam_height, 154 | "cam_width": cam_width, 155 | "state_dim": state_dim, 156 | "action_dim": action_dim, 157 | } 158 | 159 | return { 160 | "episode_index": index, 161 | "episode_length": episode_length, 162 | "state": state, 163 | "action": action, 164 | "cameras": cameras, 165 | "task": task, 166 | "data_cfg": data_cfg, 167 | } 168 | 169 | 170 | class H5Writer: 171 | def __init__(self, output_dir: Path) -> None: 172 | self.output_dir = output_dir 173 | os.makedirs(output_dir, exist_ok=True) 174 | 175 | def write_to_h5(self, episode: dict) -> None: 176 | """Write episode data to HDF5 file.""" 177 | 178 | episode_length = episode["episode_length"] 179 | episode_index = episode["episode_index"] 180 | state = episode["state"] 181 | action = episode["action"] 182 | qvel = np.zeros_like(episode["state"]) 183 | cameras = episode["cameras"] 184 | task = episode["task"] 185 | data_cfg = episode["data_cfg"] 186 | 187 | # Prepare data dictionary 188 | data_dict = { 189 | "/observations/qpos": [state], 190 | "/observations/qvel": [qvel], 191 | "/action": [action], 192 | **{f"/observations/images/{k}": [v] for k, v in cameras.items()}, 193 | } 194 | 195 | h5_path = os.path.join(self.output_dir, f"episode_{episode_index}.hdf5") 196 | 197 | with h5py.File(h5_path, "w", rdcc_nbytes=1024**2 * 2, libver="latest") as root: 198 | # Set attributes 199 | root.attrs["sim"] = False 200 | 201 | # Create datasets 202 | obs = root.create_group("observations") 203 | image = obs.create_group("images") 204 | 205 | # Write camera images 206 | for cam_name, images in cameras.items(): 207 | image.create_dataset( 208 | cam_name, 209 | shape=(episode_length, data_cfg["cam_height"], data_cfg["cam_width"], 3), 210 | dtype="uint8", 211 | chunks=(1, data_cfg["cam_height"], data_cfg["cam_width"], 3), 212 | compression="gzip", 213 | ) 214 | # root[f'/observations/images/{cam_name}'][...] = images 215 | 216 | # Write state and action data 217 | obs.create_dataset("qpos", (episode_length, data_cfg["state_dim"]), dtype="float32", compression="gzip") 218 | obs.create_dataset("qvel", (episode_length, data_cfg["state_dim"]), dtype="float32", compression="gzip") 219 | root.create_dataset("action", (episode_length, data_cfg["action_dim"]), dtype="float32", compression="gzip") 220 | 221 | # Write metadata 222 | root.create_dataset("is_edited", (1,), dtype="uint8") 223 | substep_reasonings = root.create_dataset( 224 | "substep_reasonings", (episode_length,), dtype=h5py.string_dtype(encoding="utf-8"), compression="gzip" 225 | ) 226 | root.create_dataset("language_raw", data=task) 227 | substep_reasonings[:] = [task] * episode_length 228 | 229 | # Write additional data 230 | for name, array in data_dict.items(): 231 | root[name][...] = array 232 | 233 | 234 | def json_to_h5( 235 | data_dirs: Path, 236 | output_dir: Path, 237 | robot_type: str, 238 | ) -> None: 239 | """Convert JSON episode data to HDF5 format.""" 240 | dataset = JsonDataset(data_dirs, robot_type) 241 | h5_writer = H5Writer(output_dir) 242 | 243 | for i in tqdm.tqdm(range(len(dataset))): 244 | episode = dataset.get_item(i) 245 | h5_writer.write_to_h5(episode) 246 | 247 | 248 | if __name__ == "__main__": 249 | tyro.cli(json_to_h5) 250 | -------------------------------------------------------------------------------- /unitree_lerobot/utils/convert_unitree_json_to_lerobot.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script Json to Lerobot. 3 | 4 | # --raw-dir Corresponds to the directory of your JSON dataset 5 | # --repo-id Your unique repo ID on Hugging Face Hub 6 | # --robot_type The type of the robot used in the dataset (e.g., Unitree_Z1_Single, Unitree_Z1_Dual, Unitree_G1_Dex1, Unitree_G1_Dex3, Unitree_G1_Brainco, Unitree_G1_Inspire) 7 | # --push_to_hub Whether or not to upload the dataset to Hugging Face Hub (true or false) 8 | 9 | python unitree_lerobot/utils/convert_unitree_json_to_lerobot.py \ 10 | --raw-dir $HOME/datasets/g1_grabcube_double_hand \ 11 | --repo-id your_name/g1_grabcube_double_hand \ 12 | --robot_type Unitree_G1_Dex3 \ 13 | --push_to_hub 14 | """ 15 | 16 | import os 17 | import cv2 18 | import tqdm 19 | import tyro 20 | import json 21 | import glob 22 | import dataclasses 23 | import shutil 24 | import numpy as np 25 | from pathlib import Path 26 | from collections import defaultdict 27 | from typing import Literal 28 | 29 | from lerobot.utils.constants import HF_LEROBOT_HOME 30 | from lerobot.datasets.lerobot_dataset import LeRobotDataset 31 | 32 | from unitree_lerobot.utils.constants import ROBOT_CONFIGS 33 | 34 | 35 | @dataclasses.dataclass(frozen=True) 36 | class DatasetConfig: 37 | use_videos: bool = True 38 | tolerance_s: float = 0.0001 39 | image_writer_processes: int = 10 40 | image_writer_threads: int = 5 41 | video_backend: str | None = None 42 | 43 | 44 | DEFAULT_DATASET_CONFIG = DatasetConfig() 45 | 46 | 47 | class JsonDataset: 48 | def __init__(self, data_dirs: Path, robot_type: str) -> None: 49 | """ 50 | Initialize the dataset for loading and processing HDF5 files containing robot manipulation data. 51 | 52 | Args: 53 | data_dirs: Path to directory containing training data 54 | """ 55 | assert data_dirs is not None, "Data directory cannot be None" 56 | assert robot_type is not None, "Robot type cannot be None" 57 | self.data_dirs = data_dirs 58 | self.json_file = "data.json" 59 | 60 | # Initialize paths and cache 61 | self._init_paths() 62 | self._init_cache() 63 | self.json_state_data_name = ROBOT_CONFIGS[robot_type].json_state_data_name 64 | self.json_action_data_name = ROBOT_CONFIGS[robot_type].json_action_data_name 65 | self.camera_to_image_key = ROBOT_CONFIGS[robot_type].camera_to_image_key 66 | 67 | def _init_paths(self) -> None: 68 | """Initialize episode and task paths.""" 69 | 70 | self.episode_paths = [] 71 | self.task_paths = [] 72 | 73 | for task_path in glob.glob(os.path.join(self.data_dirs, "*")): 74 | if os.path.isdir(task_path): 75 | episode_paths = glob.glob(os.path.join(task_path, "*")) 76 | if episode_paths: 77 | self.task_paths.append(task_path) 78 | self.episode_paths.extend(episode_paths) 79 | 80 | self.episode_paths = sorted(self.episode_paths) 81 | self.episode_ids = list(range(len(self.episode_paths))) 82 | 83 | def __len__(self) -> int: 84 | """Return the number of episodes in the dataset.""" 85 | return len(self.episode_paths) 86 | 87 | def _init_cache(self) -> list: 88 | """Initialize data cache if enabled.""" 89 | 90 | self.episodes_data_cached = [] 91 | for episode_path in tqdm.tqdm(self.episode_paths, desc="Loading Cache Json"): 92 | json_path = os.path.join(episode_path, self.json_file) 93 | with open(json_path, encoding="utf-8") as jsonf: 94 | self.episodes_data_cached.append(json.load(jsonf)) 95 | 96 | print(f"==> Cached {len(self.episodes_data_cached)} episodes") 97 | 98 | return self.episodes_data_cached 99 | 100 | def _extract_data(self, episode_data: dict, key: str, parts: list[str]) -> np.ndarray: 101 | """ 102 | Extract data from episode dictionary for specified parts. 103 | 104 | Args: 105 | episode_data: Dictionary containing episode data 106 | key: Data key to extract ('states' or 'actions') 107 | parts: List of parts to include ('left_arm', 'right_arm') 108 | 109 | Returns: 110 | Concatenated numpy array of the requested data 111 | """ 112 | result = [] 113 | for sample_data in episode_data["data"]: 114 | data_array = np.array([], dtype=np.float32) 115 | for part in parts: 116 | key_parts = part.split(".") 117 | qpos = None 118 | for key_part in key_parts: 119 | if qpos is None and key_part in sample_data[key] and sample_data[key][key_part] is not None: 120 | qpos = sample_data[key][key_part] 121 | else: 122 | if qpos is None: 123 | raise ValueError(f"qpos is None for part: {part}") 124 | qpos = qpos[key_part] 125 | if qpos is None: 126 | raise ValueError(f"qpos is None for part: {part}") 127 | if isinstance(qpos, list): 128 | qpos = np.array(qpos, dtype=np.float32).flatten() 129 | else: 130 | qpos = np.array([qpos], dtype=np.float32).flatten() 131 | data_array = np.concatenate([data_array, qpos]) 132 | result.append(data_array) 133 | return np.array(result) 134 | 135 | def _parse_images(self, episode_path: str, episode_data) -> dict[str, list[np.ndarray]]: 136 | """Load and stack images for a given camera key.""" 137 | 138 | images = defaultdict(list) 139 | 140 | keys = episode_data["data"][0]["colors"].keys() 141 | cameras = [key for key in keys if "depth" not in key] 142 | 143 | for camera in cameras: 144 | image_key = self.camera_to_image_key.get(camera) 145 | if image_key is None: 146 | continue 147 | 148 | for sample_data in episode_data["data"]: 149 | relative_path = sample_data["colors"].get(camera) 150 | if not relative_path: 151 | continue 152 | 153 | image_path = os.path.join(episode_path, relative_path) 154 | if not os.path.exists(image_path): 155 | raise FileNotFoundError(f"Image path does not exist: {image_path}") 156 | 157 | image = cv2.imread(image_path) 158 | if image is None: 159 | raise RuntimeError(f"Failed to read image: {image_path}") 160 | 161 | image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 162 | images[image_key].append(image_rgb) 163 | 164 | return images 165 | 166 | def get_item( 167 | self, 168 | index: int | None = None, 169 | ) -> dict: 170 | """Get a training sample from the dataset.""" 171 | 172 | file_path = np.random.choice(self.episode_paths) if index is None else self.episode_paths[index] 173 | episode_data = self.episodes_data_cached[index] 174 | 175 | # Load state and action data 176 | action = self._extract_data(episode_data, "actions", self.json_action_data_name) 177 | state = self._extract_data(episode_data, "states", self.json_state_data_name) 178 | episode_length = len(state) 179 | state_dim = state.shape[1] if len(state.shape) == 2 else state.shape[0] 180 | action_dim = action.shape[1] if len(action.shape) == 2 else state.shape[0] 181 | 182 | # Load task description 183 | task = episode_data.get("text", {}).get("goal", "") 184 | 185 | # Load camera images 186 | cameras = self._parse_images(file_path, episode_data) 187 | 188 | # Extract camera configuration 189 | cam_height, cam_width = next(img for imgs in cameras.values() if imgs for img in imgs).shape[:2] 190 | data_cfg = { 191 | "camera_names": list(cameras.keys()), 192 | "cam_height": cam_height, 193 | "cam_width": cam_width, 194 | "state_dim": state_dim, 195 | "action_dim": action_dim, 196 | } 197 | 198 | return { 199 | "episode_index": index, 200 | "episode_length": episode_length, 201 | "state": state, 202 | "action": action, 203 | "cameras": cameras, 204 | "task": task, 205 | "data_cfg": data_cfg, 206 | } 207 | 208 | 209 | def create_empty_dataset( 210 | repo_id: str, 211 | robot_type: str, 212 | mode: Literal["video", "image"] = "video", 213 | *, 214 | has_velocity: bool = False, 215 | has_effort: bool = False, 216 | dataset_config: DatasetConfig = DEFAULT_DATASET_CONFIG, 217 | ) -> LeRobotDataset: 218 | motors = ROBOT_CONFIGS[robot_type].motors 219 | cameras = ROBOT_CONFIGS[robot_type].cameras 220 | 221 | features = { 222 | "observation.state": { 223 | "dtype": "float32", 224 | "shape": (len(motors),), 225 | "names": [ 226 | motors, 227 | ], 228 | }, 229 | "action": { 230 | "dtype": "float32", 231 | "shape": (len(motors),), 232 | "names": [ 233 | motors, 234 | ], 235 | }, 236 | } 237 | 238 | if has_velocity: 239 | features["observation.velocity"] = { 240 | "dtype": "float32", 241 | "shape": (len(motors),), 242 | "names": [ 243 | motors, 244 | ], 245 | } 246 | 247 | if has_effort: 248 | features["observation.effort"] = { 249 | "dtype": "float32", 250 | "shape": (len(motors),), 251 | "names": [ 252 | motors, 253 | ], 254 | } 255 | 256 | for cam in cameras: 257 | features[f"observation.images.{cam}"] = { 258 | "dtype": mode, 259 | "shape": (480, 640, 3), 260 | "names": [ 261 | "height", 262 | "width", 263 | "channel", 264 | ], 265 | } 266 | 267 | if Path(HF_LEROBOT_HOME / repo_id).exists(): 268 | shutil.rmtree(HF_LEROBOT_HOME / repo_id) 269 | 270 | return LeRobotDataset.create( 271 | repo_id=repo_id, 272 | fps=30, 273 | robot_type=robot_type, 274 | features=features, 275 | use_videos=dataset_config.use_videos, 276 | tolerance_s=dataset_config.tolerance_s, 277 | image_writer_processes=dataset_config.image_writer_processes, 278 | image_writer_threads=dataset_config.image_writer_threads, 279 | video_backend=dataset_config.video_backend, 280 | ) 281 | 282 | 283 | def populate_dataset( 284 | dataset: LeRobotDataset, 285 | raw_dir: Path, 286 | robot_type: str, 287 | ) -> LeRobotDataset: 288 | json_dataset = JsonDataset(raw_dir, robot_type) 289 | for i in tqdm.tqdm(range(len(json_dataset))): 290 | episode = json_dataset.get_item(i) 291 | 292 | state = episode["state"] 293 | action = episode["action"] 294 | cameras = episode["cameras"] 295 | task = episode["task"] 296 | episode_length = episode["episode_length"] 297 | 298 | num_frames = episode_length 299 | for i in range(num_frames): 300 | frame = { 301 | "observation.state": state[i], 302 | "action": action[i], 303 | } 304 | 305 | for camera, img_array in cameras.items(): 306 | frame[f"observation.images.{camera}"] = img_array[i] 307 | 308 | frame["task"] = task 309 | 310 | dataset.add_frame(frame) 311 | dataset.save_episode() 312 | 313 | return dataset 314 | 315 | 316 | def json_to_lerobot( 317 | raw_dir: Path, 318 | repo_id: str, 319 | robot_type: str, # e.g., Unitree_Z1_Single, Unitree_Z1_Dual, Unitree_G1_Dex1, Unitree_G1_Dex3, Unitree_G1_Brainco, Unitree_G1_Inspire 320 | *, 321 | push_to_hub: bool = False, 322 | mode: Literal["video", "image"] = "video", 323 | dataset_config: DatasetConfig = DEFAULT_DATASET_CONFIG, 324 | ): 325 | if (HF_LEROBOT_HOME / repo_id).exists(): 326 | shutil.rmtree(HF_LEROBOT_HOME / repo_id) 327 | 328 | dataset = create_empty_dataset( 329 | repo_id, 330 | robot_type=robot_type, 331 | mode=mode, 332 | has_effort=False, 333 | has_velocity=False, 334 | dataset_config=dataset_config, 335 | ) 336 | dataset = populate_dataset( 337 | dataset, 338 | raw_dir, 339 | robot_type=robot_type, 340 | ) 341 | 342 | if push_to_hub: 343 | dataset.push_to_hub(upload_large_folder=True) 344 | 345 | 346 | def local_push_to_hub( 347 | repo_id: str, 348 | root_path: Path, 349 | ): 350 | dataset = LeRobotDataset(repo_id=repo_id, root=root_path) 351 | dataset.push_to_hub(upload_large_folder=True) 352 | 353 | 354 | if __name__ == "__main__": 355 | tyro.cli(json_to_lerobot) 356 | -------------------------------------------------------------------------------- /unitree_lerobot/utils/sort_and_rename_folders.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to convert Unitree json data to the LeRobot dataset v2.0 format. 3 | 4 | python unitree_lerobot/utils/sort_and_rename_folders.py --data_dir $HOME/datasets/g1_grabcube_double_hand 5 | """ 6 | 7 | import os 8 | import tyro 9 | import uuid 10 | from pathlib import Path 11 | 12 | 13 | def sort_and_rename_folders(data_dir: Path) -> None: 14 | # Get the list of folders sorted by name 15 | folders = sorted([f for f in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, f))]) 16 | 17 | temp_mapping = {} 18 | 19 | # First, rename all folders to unique temporary names 20 | for folder in folders: 21 | temp_name = str(uuid.uuid4()) 22 | original_path = os.path.join(data_dir, folder) 23 | temp_path = os.path.join(data_dir, temp_name) 24 | os.rename(original_path, temp_path) 25 | temp_mapping[temp_name] = folder 26 | 27 | # Then, rename them to the final target names 28 | start_number = 0 29 | for temp_name, original_folder in temp_mapping.items(): 30 | new_folder_name = f"episode_{start_number:04d}" 31 | temp_path = os.path.join(data_dir, temp_name) 32 | new_path = os.path.join(data_dir, new_folder_name) 33 | os.rename(temp_path, new_path) 34 | start_number += 1 35 | 36 | print("The folders have been successfully renamed.") 37 | 38 | 39 | if __name__ == "__main__": 40 | tyro.cli(sort_and_rename_folders) 41 | --------------------------------------------------------------------------------