├── data └── __init__.py ├── policy ├── __init__.py ├── RDT │ ├── __init__.py │ ├── scripts │ │ ├── __init__.py │ │ ├── read_yaml.py │ │ └── encode_lang.py │ ├── data │ │ ├── .gitignore │ │ ├── empty_lang_embed.pt │ │ └── filelock.py │ ├── configs │ │ ├── finetune_datasets.json │ │ ├── finetune_sample_weights.json │ │ ├── zero2.json │ │ ├── calvin_rel_traj_location_bounds_task_ABC_D.json │ │ ├── pretrain_datasets.json │ │ └── pretrain_sample_weights.json │ ├── generate.sh │ ├── assets │ │ └── head.png │ ├── .gitignore │ ├── requirements.txt │ ├── train │ │ └── image_corrupt.py │ ├── model_config │ │ └── _generate_model_config.py │ └── pretrain.sh ├── ACT │ ├── detr │ │ ├── .gitignore │ │ ├── util │ │ │ └── __init__.py │ │ ├── setup.py │ │ ├── models │ │ │ └── __init__.py │ │ └── README.md │ ├── requirements.txt │ ├── deploy_policy.yml │ ├── README_CN.md │ └── README.md ├── DP │ ├── diffusion_policy │ │ ├── __init__.py │ │ ├── model │ │ │ ├── bet │ │ │ │ ├── libraries │ │ │ │ │ └── mingpt │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── LICENSE │ │ │ │ │ │ └── utils.py │ │ │ │ └── action_ae │ │ │ │ │ └── __init__.py │ │ │ ├── common │ │ │ │ ├── module_attr_mixin.py │ │ │ │ ├── shape_util.py │ │ │ │ ├── dict_of_tensor_mixin.py │ │ │ │ └── lr_scheduler.py │ │ │ ├── diffusion │ │ │ │ ├── positional_embedding.py │ │ │ │ └── conv1d_components.py │ │ │ └── vision │ │ │ │ └── model_getter.py │ │ ├── common │ │ │ ├── precise_sleep.py │ │ │ ├── env_util.py │ │ │ ├── nested_dict_util.py │ │ │ ├── pymunk_util.py │ │ │ ├── robomimic_config_util.py │ │ │ └── checkpoint_util.py │ │ ├── policy │ │ │ └── base_image_policy.py │ │ ├── shared_memory │ │ │ └── shared_memory_util.py │ │ ├── config │ │ │ └── task │ │ │ │ ├── default_task_14.yaml │ │ │ │ └── default_task_16.yaml │ │ └── dataset │ │ │ └── base_dataset.py │ ├── .gitignore │ ├── __init__.py │ ├── process_data.sh │ ├── README_CN.md │ ├── dp_model.py │ └── README.md ├── openvla-oft │ ├── prismatic │ │ ├── py.typed │ │ ├── extern │ │ │ ├── __init__.py │ │ │ └── hf │ │ │ │ └── __init__.py │ │ ├── models │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── llm │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── prompting │ │ │ │ │ │ └── __init__.py │ │ │ │ │ └── phi.py │ │ │ │ └── vision │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dinov2_vit.py │ │ │ │ │ ├── in1k_vit.py │ │ │ │ │ ├── siglip_vit.py │ │ │ │ │ └── clip_vit.py │ │ │ ├── vlas │ │ │ │ └── __init__.py │ │ │ ├── vlms │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── projectors.py │ │ ├── overwatch │ │ │ └── __init__.py │ │ ├── vla │ │ │ ├── __init__.py │ │ │ └── materialize.py │ │ ├── util │ │ │ ├── __init__.py │ │ │ └── nn_utils.py │ │ ├── __init__.py │ │ ├── training │ │ │ ├── __init__.py │ │ │ ├── strategies │ │ │ │ └── __init__.py │ │ │ └── train_utils.py │ │ ├── preprocessing │ │ │ └── __init__.py │ │ └── conf │ │ │ └── __init__.py │ ├── rlds_dataset_builder │ │ ├── LIBERO_10 │ │ │ ├── __init__.py │ │ │ ├── CITATIONS.bib │ │ │ └── README.md │ │ ├── LIBERO_Goal │ │ │ ├── __init__.py │ │ │ ├── CITATIONS.bib │ │ │ └── README.md │ │ ├── LIBERO_Object │ │ │ ├── __init__.py │ │ │ ├── CITATIONS.bib │ │ │ └── README.md │ │ ├── LIBERO_Spatial │ │ │ ├── __init__.py │ │ │ ├── CITATIONS.bib │ │ │ └── README.md │ │ ├── example_dataset │ │ │ ├── __init__.py │ │ │ ├── CITATIONS.bib │ │ │ ├── README.md │ │ │ └── create_example_data.py │ │ ├── ALOHA_Real_Task_Sample │ │ │ ├── __init__.py │ │ │ ├── CITATIONS.bib │ │ │ └── README.md │ │ ├── .gitignore │ │ ├── setup.py │ │ └── LICENSE │ ├── experiments │ │ └── robot │ │ │ ├── libero │ │ │ ├── libero_requirements.txt │ │ │ └── sample_libero_spatial_observation.pkl │ │ │ └── aloha │ │ │ └── requirements_aloha.txt │ ├── .pre-commit-config.yaml │ ├── SETUP.md │ ├── finetune.sh │ └── LICENSE ├── test_policy │ └── deploy_policy.yml ├── openpi │ ├── examples │ │ ├── simple_client │ │ │ ├── requirements.in │ │ │ ├── requirements.txt │ │ │ └── Dockerfile │ │ ├── aloha_sim │ │ │ └── requirements.in │ │ ├── aloha_real │ │ │ └── requirements.in │ │ └── libero │ │ │ └── compose.yml │ ├── src │ │ └── openpi │ │ │ ├── models_pytorch │ │ │ └── transformers_replace │ │ │ │ └── models │ │ │ │ └── siglip │ │ │ │ └── check.py │ │ │ ├── policies │ │ │ └── policy_test.py │ │ │ ├── shared │ │ │ ├── normalize_test.py │ │ │ └── download_test.py │ │ │ └── models │ │ │ └── pi0_test.py │ ├── .gitignore │ ├── finetune.sh │ ├── packages │ │ └── openpi-client │ │ │ ├── pyproject.toml │ │ │ └── src │ │ │ └── openpi_client │ │ │ └── action_chunk_broker.py │ ├── .github │ │ └── workflows │ │ │ └── test.yml │ ├── scripts │ │ ├── train_test.py │ │ └── docker │ │ │ ├── compose.yml │ │ │ └── serve_policy.Dockerfile │ ├── README.md │ └── docs │ │ └── docker.md ├── weights │ └── README.md └── README.md ├── sensor ├── __init__.py ├── touch_sensor.py ├── teleoperation_sensor.py ├── vision_sensor.py ├── TactileGloveRos2_sensor.py ├── sensor.py ├── TactileGloveRos_sensor.py ├── TestVision_sensor.py └── _Pika_sensor.py ├── utils ├── __init__.py ├── ros_subscriber.py ├── task_functions.py └── ros2_subscriber.py ├── controller ├── __init__.py ├── URUrx_controller.py ├── mobile_controller.py ├── dexhand_controller.py ├── TestMobile_controller.py └── controller.py ├── my_robot ├── __init__.py ├── README.md └── _realsense_only.py ├── planner ├── __init__.py ├── requirements.txt └── README.md ├── scripts ├── __init__.py ├── test_robot.py ├── test_sensor.py ├── _download_drAlohaSDK.sh ├── visual_hdf5_rerun.sh └── test_controller.py ├── third_party ├── __init__.py ├── Realman_IK │ ├── qp-tools │ │ ├── doc │ │ │ ├── customdoxygen.css │ │ │ ├── images │ │ │ │ ├── p1.png │ │ │ │ ├── p2.png │ │ │ │ ├── hound.png │ │ │ │ ├── light.png │ │ │ │ ├── panther.png │ │ │ │ ├── a_matrix.png │ │ │ │ ├── nameCard.png │ │ │ │ ├── a1_profile.jpeg │ │ │ │ └── a_matrix_sparse.png │ │ │ ├── Documentation.pdf │ │ │ ├── footer.html │ │ │ └── header.html │ │ ├── python │ │ │ ├── qpSWIFT.egg-info │ │ │ │ ├── dependency_links.txt │ │ │ │ ├── requires.txt │ │ │ │ ├── top_level.txt │ │ │ │ ├── PKG-INFO │ │ │ │ └── SOURCES.txt │ │ │ ├── Matrix.mat │ │ │ ├── dist │ │ │ │ ├── qpSWIFT-1.0.0-py3.11-win-amd64.egg │ │ │ │ └── qpSWIFT-1.0.0-py3.12-win-amd64.egg │ │ │ ├── test.py │ │ │ ├── demoqp.py │ │ │ ├── README.txt │ │ │ ├── basic_test.py │ │ │ └── setup.py │ │ ├── .gitignore │ │ ├── matlab │ │ │ ├── Matrix.mat │ │ │ ├── qpSWIFT.mexmaci64 │ │ │ ├── prototype code │ │ │ │ ├── my_Trans_multiply_diag.m │ │ │ │ ├── my_inverse_diag_matrix.m │ │ │ │ ├── findsteplength.m │ │ │ │ ├── test1.m │ │ │ │ ├── qpSWIFT_init.m │ │ │ │ └── qpSWIFT_checkinputs.m │ │ │ ├── basic_test.m │ │ │ ├── demoqp.m │ │ │ └── README.txt │ │ ├── simulink │ │ │ ├── demoqp.slx │ │ │ ├── demoqp_e.slx │ │ │ ├── qpSWIFT_sfunc.mexa64 │ │ │ ├── qpSWIFT_sfunc.mexw64 │ │ │ ├── qpSWIFT_sfunc_e.mexa64 │ │ │ ├── qpSWIFT_sfunc_e.mexw64 │ │ │ └── README.txt │ │ ├── include │ │ │ ├── CMakeLists.txt │ │ │ ├── qpSWIFT.h │ │ │ └── timer.h │ │ ├── src │ │ │ ├── CMakeLists.txt │ │ │ ├── License(LDL).txt │ │ │ ├── amd_defaults.c │ │ │ ├── timer.c │ │ │ ├── amd_control.c │ │ │ └── License(AMD).txt │ │ ├── demo │ │ │ ├── Matrices.h │ │ │ └── runqp.c │ │ ├── tests │ │ │ └── CMakeLists.txt │ │ └── CITATION.cff │ └── lib │ │ ├── librman_algorithm.so │ │ └── librman_algorithm.dll ├── _download_drAlohaSDK.py └── README.md ├── config ├── RoboTwin_setting.yml ├── robot_1_move_mobile_2.yml ├── robot_2_move_mobile_2.yml ├── robot_1_move_mobile_1.yml ├── robot_2_move_mobile_1.yml ├── robot_1_model_infer.yml └── robot_2_model_infer.yml ├── imgs ├── Wechat.jpg └── myWechat.jpg ├── requirements.txt ├── task_instructions ├── test.json └── README.md ├── eval_offline.sh ├── deploy.sh ├── tools ├── test_press.py ├── realsense_serial.py └── ros_test.py ├── .gitignore └── example ├── task ├── serial_task.py └── parallel_task.py └── collect ├── collect.py └── collect_mp_robot.py /data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /sensor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /controller/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my_robot/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /planner/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/RDT/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/test_robot.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/test_sensor.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /third_party/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/ACT/detr/.gitignore: -------------------------------------------------------------------------------- 1 | !models -------------------------------------------------------------------------------- /policy/RDT/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /my_robot/README.md: -------------------------------------------------------------------------------- 1 | # 将你组装起来的controller放在这里方便调用 -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/extern/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/DP/.gitignore: -------------------------------------------------------------------------------- 1 | data/* 2 | checkpoints/* 3 | -------------------------------------------------------------------------------- /policy/DP/__init__.py: -------------------------------------------------------------------------------- 1 | from .dp_model import * 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/extern/hf/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/test_policy/deploy_policy.yml: -------------------------------------------------------------------------------- 1 | test_info_2: 2 -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/customdoxygen.css: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /config/RoboTwin_setting.yml: -------------------------------------------------------------------------------- 1 | left_arm_dim: 6 2 | right_arm_dim: 6 -------------------------------------------------------------------------------- /policy/RDT/data/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore data files 2 | datasets 3 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_10/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Goal/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/bet/libraries/mingpt/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/RDT/configs/finetune_datasets.json: -------------------------------------------------------------------------------- 1 | [ 2 | "agilex" 3 | ] -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Object/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Spatial/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/example_dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /policy/RDT/configs/finetune_sample_weights.json: -------------------------------------------------------------------------------- 1 | { 2 | "agilex": 100 3 | } -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/ALOHA_Real_Task_Sample/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/qpSWIFT.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/qpSWIFT.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.6 2 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/qpSWIFT.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | qpSWIFT 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/vlas/__init__.py: -------------------------------------------------------------------------------- 1 | from .openvla import OpenVLA 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/vlms/__init__.py: -------------------------------------------------------------------------------- 1 | from .prismatic import PrismaticVLM 2 | -------------------------------------------------------------------------------- /imgs/Wechat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/imgs/Wechat.jpg -------------------------------------------------------------------------------- /imgs/myWechat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/imgs/myWechat.jpg -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/overwatch/__init__.py: -------------------------------------------------------------------------------- 1 | from .overwatch import initialize_overwatch 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/.gitignore: -------------------------------------------------------------------------------- 1 | */data 2 | wandb 3 | __pycache__ 4 | .idea 5 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/vla/__init__.py: -------------------------------------------------------------------------------- 1 | from .materialize import get_vla_dataset_and_collator 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | scipy 2 | numpy 3 | h5py 4 | pyrealsense2 5 | opencv-python 6 | keyboard 7 | pyyaml -------------------------------------------------------------------------------- /policy/ACT/detr/util/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | -------------------------------------------------------------------------------- /policy/RDT/generate.sh: -------------------------------------------------------------------------------- 1 | model_name=${1} 2 | 3 | python ./model_config/_generate_model_config.py $model_name -------------------------------------------------------------------------------- /policy/openpi/examples/simple_client/requirements.in: -------------------------------------------------------------------------------- 1 | numpy>=1.22.4,<2.0.0 2 | rich 3 | tqdm 4 | tyro 5 | polars -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_10/CITATIONS.bib: -------------------------------------------------------------------------------- 1 | // TODO(example_dataset): BibTeX citation 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Goal/CITATIONS.bib: -------------------------------------------------------------------------------- 1 | // TODO(example_dataset): BibTeX citation 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Object/CITATIONS.bib: -------------------------------------------------------------------------------- 1 | // TODO(example_dataset): BibTeX citation 2 | -------------------------------------------------------------------------------- /policy/RDT/assets/head.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/policy/RDT/assets/head.png -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .torch_utils import check_bloat16_supported, set_global_seed 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Spatial/CITATIONS.bib: -------------------------------------------------------------------------------- 1 | // TODO(example_dataset): BibTeX citation 2 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/example_dataset/CITATIONS.bib: -------------------------------------------------------------------------------- 1 | // TODO(example_dataset): BibTeX citation 2 | -------------------------------------------------------------------------------- /scripts/_download_drAlohaSDK.sh: -------------------------------------------------------------------------------- 1 | cd third_party 2 | 3 | python _download_drAlohaSDK.py 4 | unzip dr.zip 5 | rm dr.zip -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/ALOHA_Real_Task_Sample/CITATIONS.bib: -------------------------------------------------------------------------------- 1 | // TODO(example_dataset): BibTeX citation 2 | -------------------------------------------------------------------------------- /policy/RDT/.gitignore: -------------------------------------------------------------------------------- 1 | processed_data/ 2 | training_data/ 3 | checkpoints/ 4 | model_config/*.yml 5 | wandb/* 6 | !models/ 7 | !data/ -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import available_model_names, available_models, get_model_description, load 2 | -------------------------------------------------------------------------------- /policy/RDT/data/empty_lang_embed.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/policy/RDT/data/empty_lang_embed.pt -------------------------------------------------------------------------------- /task_instructions/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "instructions": [ 3 | "test 1.", 4 | "test 2.", 5 | "test 3." 6 | ] 7 | } -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/training/__init__.py: -------------------------------------------------------------------------------- 1 | from .materialize import get_train_strategy 2 | from .metrics import Metrics, VLAMetrics 3 | -------------------------------------------------------------------------------- /policy/openvla-oft/experiments/robot/libero/libero_requirements.txt: -------------------------------------------------------------------------------- 1 | imageio[ffmpeg] 2 | robosuite==1.4.1 3 | bddl 4 | easydict 5 | cloudpickle 6 | gym 7 | -------------------------------------------------------------------------------- /third_party/Realman_IK/lib/librman_algorithm.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/lib/librman_algorithm.so -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | bin/ 3 | matlab/*.mexa64 4 | matlab/*.mexw64 5 | python/build 6 | .vscode 7 | doc/html 8 | simulink/slprj -------------------------------------------------------------------------------- /third_party/Realman_IK/lib/librman_algorithm.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/lib/librman_algorithm.dll -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/p1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/p1.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/p2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/p2.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/Matrix.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/matlab/Matrix.mat -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/Matrix.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/python/Matrix.mat -------------------------------------------------------------------------------- /policy/openpi/examples/aloha_sim/requirements.in: -------------------------------------------------------------------------------- 1 | gym-aloha 2 | imageio 3 | matplotlib 4 | msgpack 5 | numpy>=1.22.4,<2.0.0 6 | typing-extensions 7 | tyro 8 | websockets -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | from .download import convert_to_jpg, download_extract 2 | from .materialize import get_dataset_and_collator 3 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup(name="", packages=[""]) 4 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/hound.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/hound.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/light.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/simulink/demoqp.slx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/simulink/demoqp.slx -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/Documentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/Documentation.pdf -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/panther.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/panther.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/simulink/demoqp_e.slx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/simulink/demoqp_e.slx -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/training/strategies/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_strategy import TrainingStrategy 2 | from .ddp import DDPStrategy 3 | from .fsdp import FSDPStrategy 4 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/a_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/a_matrix.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/nameCard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/nameCard.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/qpSWIFT.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/matlab/qpSWIFT.mexmaci64 -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/a1_profile.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/a1_profile.jpeg -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/images/a_matrix_sparse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/doc/images/a_matrix_sparse.png -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc.mexa64 -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc.mexw64 -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc_e.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc_e.mexa64 -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc_e.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/simulink/qpSWIFT_sfunc_e.mexw64 -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/conf/__init__.py: -------------------------------------------------------------------------------- 1 | from .datasets import DatasetConfig, DatasetRegistry 2 | from .models import ModelConfig, ModelRegistry 3 | from .vla import VLAConfig, VLARegistry 4 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/llm/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_llm import LLMBackbone 2 | from .llama2 import LLaMa2LLMBackbone 3 | from .mistral import MistralLLMBackbone 4 | from .phi import PhiLLMBackbone 5 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/prototype code/my_Trans_multiply_diag.m: -------------------------------------------------------------------------------- 1 | function WTW = my_Trans_multiply_diag(W) 2 | [n,m] = size(W); 3 | WTW = zeros(n,m); 4 | for ii=1:n 5 | WTW(ii,ii) = W(ii,ii)^2; 6 | end -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/dist/qpSWIFT-1.0.0-py3.11-win-amd64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/python/dist/qpSWIFT-1.0.0-py3.11-win-amd64.egg -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/dist/qpSWIFT-1.0.0-py3.12-win-amd64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/third_party/Realman_IK/qp-tools/python/dist/qpSWIFT-1.0.0-py3.12-win-amd64.egg -------------------------------------------------------------------------------- /policy/openpi/src/openpi/models_pytorch/transformers_replace/models/siglip/check.py: -------------------------------------------------------------------------------- 1 | import transformers 2 | 3 | def check_whether_transformers_replace_is_installed_correctly(): 4 | return transformers.__version__ == "4.53.2" -------------------------------------------------------------------------------- /policy/openvla-oft/experiments/robot/libero/sample_libero_spatial_observation.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tian-Nian/control_your_robot/HEAD/policy/openvla-oft/experiments/robot/libero/sample_libero_spatial_observation.pkl -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/prototype code/my_inverse_diag_matrix.m: -------------------------------------------------------------------------------- 1 | function invW = my_inverse_diag_matrix(W) 2 | [n,m] = size(W); 3 | invW = zeros(n,m); 4 | for ii=1:n 5 | invW(ii,ii) = 1/W(ii,ii); 6 | end -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/qpSWIFT.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: qpSWIFT 3 | Version: 1.0.0 4 | Summary: Python interface for qpSWIFT 5 | Author: Abhishek Pandala 6 | Requires-Dist: numpy>=1.6 7 | -------------------------------------------------------------------------------- /eval_offline.sh: -------------------------------------------------------------------------------- 1 | python example/deploy/offline_eval.py \ 2 | --model_name "test_policy"\ 3 | --model_class "TestModel"\ 4 | --model_path "test/path/"\ 5 | --task_name "test"\ 6 | --data_path "save/test_robot/"\ 7 | --episode_num 3\ -------------------------------------------------------------------------------- /policy/ACT/requirements.txt: -------------------------------------------------------------------------------- 1 | torchvision 2 | torch==2.4.1 3 | pyquaternion 4 | pyyaml 5 | rospkg 6 | pexpect 7 | mujoco==2.3.7 8 | dm_control==1.0.14 9 | opencv-python 10 | matplotlib 11 | einops 12 | packaging 13 | h5py 14 | ipython -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .load import available_model_names, available_models, get_model_description, load, load_vla 2 | from .materialize import get_llm_backbone_and_tokenizer, get_vision_backbone_and_transform, get_vlm 3 | -------------------------------------------------------------------------------- /policy/openpi/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore all .pyc files 2 | *.pyc 3 | 4 | # Ignore __pycache__ directory 5 | __pycache__/ 6 | 7 | # Ignore other Python cache files 8 | *.pyo 9 | *.pyd 10 | 11 | # datasets 12 | datasets/ 13 | save/ 14 | checkpoints/ 15 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/prototype code/findsteplength.m: -------------------------------------------------------------------------------- 1 | function alpha = findsteplength(s,Delta_s) 2 | 3 | Test_s = s./Delta_s; 4 | Test_s(s./Delta_s>0) = -1000; 5 | [~,idx] = max(Test_s); 6 | alpha = -Test_s(idx); 7 | 8 | 9 | end -------------------------------------------------------------------------------- /task_instructions/README.md: -------------------------------------------------------------------------------- 1 | ### every task instruction will be loaded from the file 2 | example: 3 | `task_1.json` 4 | ``` 5 | ====== task_1.json ===== 6 | { 7 | instruction: 8 | ["instruction 1"], 9 | ["instruction 2"], 10 | .... 11 | } 12 | ``` 13 | -------------------------------------------------------------------------------- /policy/ACT/detr/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from setuptools import find_packages 3 | 4 | setup( 5 | name="detr", 6 | version="0.0.0", 7 | packages=find_packages(), 8 | license="MIT License", 9 | long_description=open("README.md").read(), 10 | ) 11 | -------------------------------------------------------------------------------- /planner/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==2.4.1 2 | torchvision 3 | transforms3d==0.4.2 4 | sapien==3.0.0b1 5 | scipy==1.10.1 6 | mplib==0.2.1 7 | gymnasium==0.29.1 8 | trimesh==4.4.3 9 | open3d==0.18.0 10 | imageio==2.34.2 11 | pydantic 12 | zarr 13 | openai 14 | huggingface_hub==0.25.0 15 | h5py 16 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_10/README.md: -------------------------------------------------------------------------------- 1 | TODO(example_dataset): Markdown description of your dataset. 2 | Description is **formatted** as markdown. 3 | 4 | It should also contain any processing which has been applied (if any), 5 | (e.g. corrupted example skipped, images cropped,...): 6 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Goal/README.md: -------------------------------------------------------------------------------- 1 | TODO(example_dataset): Markdown description of your dataset. 2 | Description is **formatted** as markdown. 3 | 4 | It should also contain any processing which has been applied (if any), 5 | (e.g. corrupted example skipped, images cropped,...): 6 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Object/README.md: -------------------------------------------------------------------------------- 1 | TODO(example_dataset): Markdown description of your dataset. 2 | Description is **formatted** as markdown. 3 | 4 | It should also contain any processing which has been applied (if any), 5 | (e.g. corrupted example skipped, images cropped,...): 6 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LIBERO_Spatial/README.md: -------------------------------------------------------------------------------- 1 | TODO(example_dataset): Markdown description of your dataset. 2 | Description is **formatted** as markdown. 3 | 4 | It should also contain any processing which has been applied (if any), 5 | (e.g. corrupted example skipped, images cropped,...): 6 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/example_dataset/README.md: -------------------------------------------------------------------------------- 1 | TODO(example_dataset): Markdown description of your dataset. 2 | Description is **formatted** as markdown. 3 | 4 | It should also contain any processing which has been applied (if any), 5 | (e.g. corrupted example skipped, images cropped,...): 6 | -------------------------------------------------------------------------------- /policy/openpi/examples/aloha_real/requirements.in: -------------------------------------------------------------------------------- 1 | Pillow 2 | dm_control 3 | einops 4 | h5py 5 | matplotlib 6 | modern_robotics 7 | msgpack 8 | numpy>=1.22.4,<2.0.0 9 | opencv-python 10 | packaging 11 | pexpect 12 | pyquaternion 13 | pyrealsense2 14 | pyyaml 15 | requests 16 | rospkg 17 | tyro 18 | websockets 19 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/ALOHA_Real_Task_Sample/README.md: -------------------------------------------------------------------------------- 1 | TODO(example_dataset): Markdown description of your dataset. 2 | Description is **formatted** as markdown. 3 | 4 | It should also contain any processing which has been applied (if any), 5 | (e.g. corrupted example skipped, images cropped,...): 6 | -------------------------------------------------------------------------------- /third_party/_download_drAlohaSDK.py: -------------------------------------------------------------------------------- 1 | from huggingface_hub import snapshot_download 2 | 3 | snapshot_download( 4 | repo_id="WadeKe/drAloha", 5 | allow_patterns=["dr.zip"], 6 | local_dir=".", 7 | repo_type="dataset", 8 | resume_download=True, 9 | local_dir_use_symlinks="auto" 10 | ) 11 | -------------------------------------------------------------------------------- /policy/openpi/finetune.sh: -------------------------------------------------------------------------------- 1 | train_config_name=$1 2 | model_name=$2 3 | gpu_use=$3 4 | 5 | uv run scripts/compute_norm_stats.py --config-name $train_config_name 6 | 7 | export CUDA_VISIBLE_DEVICES=$gpu_use 8 | 9 | XLA_PYTHON_CLIENT_MEM_FRACTION=0.9 uv run scripts/train.py $train_config_name --exp-name=$model_name --overwrite -------------------------------------------------------------------------------- /policy/DP/process_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Data processing script - Convert HDF5 data to zarr format for Diffusion Policy training 4 | 5 | # Get arguments 6 | SOURCE_DIR="$1" 7 | OUTPUT_DIR="$2" 8 | NUM_EPISODES="$3" 9 | 10 | # Execute Python script 11 | python3 scripts/process_data.py "$SOURCE_DIR" "$OUTPUT_DIR" "$NUM_EPISODES" 12 | -------------------------------------------------------------------------------- /policy/weights/README.md: -------------------------------------------------------------------------------- 1 | # download model 2 | 3 | ```bash 4 | mkdir RDT && cd RDT 5 | huggingface-cli download google/t5-v1_1-xxl --local-dir t5-v1_1-xxl 6 | huggingface-cli download google/siglip-so400m-patch14-384 --local-dir siglip-so400m-patch14-384 7 | huggingface-cli download robotics-diffusion-transformer/rdt-1b --local-dir rdt-1b 8 | ``` 9 | -------------------------------------------------------------------------------- /policy/ACT/detr/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .detr_vae import build as build_vae 3 | from .detr_vae import build_cnnmlp as build_cnnmlp 4 | 5 | 6 | def build_ACT_model(args): 7 | return build_vae(args) 8 | 9 | 10 | def build_CNNMLP_model(args): 11 | return build_cnnmlp(args) 12 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/llm/prompting/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_prompter import PromptBuilder, PurePromptBuilder 2 | from .llama2_chat_prompter import LLaMa2ChatPromptBuilder 3 | from .mistral_instruct_prompter import MistralInstructPromptBuilder 4 | from .phi_prompter import PhiPromptBuilder 5 | from .vicuna_v15_prompter import VicunaV15ChatPromptBuilder 6 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | python example/deploy/deploy.py \ 2 | --base_model_name "test_policy"\ 3 | --base_model_class "TestModel"\ 4 | --base_model_path "path/to/ckpt"\ 5 | --base_task_name "test"\ 6 | --base_robot_name "test_robot"\ 7 | --base_robot_class "TestRobot"\ 8 | --robotwin \ 9 | --overrides \ 10 | --test_info_1 1 11 | # --video "cam_head"\ -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/vision/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_vision import ImageTransform, VisionBackbone 2 | from .clip_vit import CLIPViTBackbone 3 | from .dinoclip_vit import DinoCLIPViTBackbone 4 | from .dinosiglip_vit import DinoSigLIPViTBackbone 5 | from .dinov2_vit import DinoV2ViTBackbone 6 | from .in1k_vit import IN1KViTBackbone 7 | from .siglip_vit import SigLIPViTBackbone 8 | -------------------------------------------------------------------------------- /third_party/README.md: -------------------------------------------------------------------------------- 1 | # 此文件夹用于放置一些机械臂配置的python文件,如无法pip install安装环境,请将配置库放在这里 2 | 3 | 1. dr: 4 | 是大然aloha机械臂的控制底层代码, 无需编译, 使用提供的默认x64编译后文件, 如果是不同架构系统, 请与厂家联系获得支持. 5 | 2. curobo: 6 | 提供了IK / planner, 需要编译使用. 7 | ```bash 8 | git clone https://github.com/NVlabs/curobo.git 9 | cd curobo 10 | pip install -e . --no-build-isolation 11 | ``` 12 | 3. oculus_reader 13 | 用于控制VR遥操设备QuestVR, 需要编译使用. 14 | -------------------------------------------------------------------------------- /policy/RDT/configs/zero2.json: -------------------------------------------------------------------------------- 1 | { 2 | "bf16": { 3 | "enabled": "auto" 4 | }, 5 | "train_micro_batch_size_per_gpu": "auto", 6 | "train_batch_size": "auto", 7 | "gradient_accumulation_steps": "auto", 8 | "zero_optimization": { 9 | "stage": 2, 10 | "overlap_comm": true, 11 | "contiguous_gradients": true, 12 | "sub_group_size": 1e9 13 | } 14 | } -------------------------------------------------------------------------------- /policy/openvla-oft/experiments/robot/aloha/requirements_aloha.txt: -------------------------------------------------------------------------------- 1 | numpy<2 2 | draccus 3 | torchvision 4 | torch 5 | pyquaternion 6 | pyyaml 7 | rospkg 8 | pexpect 9 | mujoco==2.3.7 10 | dm_control==1.0.14 11 | opencv-python 12 | matplotlib 13 | einops 14 | packaging 15 | h5py 16 | traitlets 17 | ipdb 18 | IPython 19 | modern_robotics 20 | Pillow 21 | termcolor 22 | imageio[ffmpeg] 23 | uvicorn 24 | fastapi 25 | requests 26 | json_numpy 27 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/common/module_attr_mixin.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class ModuleAttrMixin(nn.Module): 5 | 6 | def __init__(self): 7 | super().__init__() 8 | self._dummy_variable = nn.Parameter() 9 | 10 | @property 11 | def device(self): 12 | return next(iter(self.parameters())).device 13 | 14 | @property 15 | def dtype(self): 16 | return next(iter(self.parameters())).dtype 17 | -------------------------------------------------------------------------------- /policy/ACT/detr/README.md: -------------------------------------------------------------------------------- 1 | This part of the codebase is modified from DETR https://github.com/facebookresearch/detr under APACHE 2.0. 2 | 3 | @article{Carion2020EndtoEndOD, 4 | title={End-to-End Object Detection with Transformers}, 5 | author={Nicolas Carion and Francisco Massa and Gabriel Synnaeve and Nicolas Usunier and Alexander Kirillov and Sergey Zagoruyko}, 6 | journal={ArXiv}, 7 | year={2020}, 8 | volume={abs/2005.12872} 9 | } -------------------------------------------------------------------------------- /policy/README.md: -------------------------------------------------------------------------------- 1 | ### policy part 2 | This part integrates several outstanding open-source projects. After making modifications and adaptations for this project, they were added here—with respect and acknowledgment. 3 | 4 | [RDT official](https://github.com/thu-ml/RoboticsDiffusionTransformer) 5 | [openpi official](https://github.com/Physical-Intelligence/openpi) 6 | [openVLA-oft official](https://github.com/YY-GX/openvla-oft) 7 | [rlds datasetbuiler](https://github.com/moojink/rlds_dataset_builder) -------------------------------------------------------------------------------- /policy/RDT/requirements.txt: -------------------------------------------------------------------------------- 1 | packaging==24.0 2 | wandb==0.17.0 3 | deepspeed==0.14.2 4 | accelerate==0.30.1 5 | diffusers==0.27.2 6 | timm==1.0.3 7 | transformers==4.41.0 8 | sentencepiece==0.2.0 9 | h5py==3.11.0 10 | opencv-python==4.9.0.80 11 | imgaug==0.4.0 12 | pytz>=2020.1 13 | 14 | # requirements_data.txt 15 | tfds-nightly==4.9.4.dev202402070044 16 | gsutil==5.27 17 | tensorflow==2.15.0.post1 18 | pillow==10.2.0 19 | pyyaml==6.0.1 20 | opencv-python==4.9.0.80 21 | tensorflow-graphics==2021.12.3 22 | imageio==2.34.0 23 | imageio-ffmpeg==0.4.9 24 | -------------------------------------------------------------------------------- /policy/openpi/packages/openpi-client/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "openpi-client" 3 | version = "0.1.0" 4 | requires-python = ">=3.7" 5 | dependencies = [ 6 | "dm-tree>=0.1.8", 7 | "msgpack>=1.0.5", 8 | "numpy>=1.22.4,<2.0.0", 9 | "pillow>=9.0.0", 10 | "tree>=0.2.4", 11 | "websockets>=11.0", 12 | ] 13 | 14 | [build-system] 15 | requires = ["hatchling"] 16 | build-backend = "hatchling.build" 17 | 18 | [tool.uv] 19 | dev-dependencies = ["pytest>=8.3.4"] 20 | 21 | [tool.ruff] 22 | line-length = 120 23 | target-version = "py37" 24 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/include/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set( 2 | qpSWIFT_headers 3 | "${CMAKE_CURRENT_SOURCE_DIR}/amd.h" 4 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_internal.h" 5 | "${CMAKE_CURRENT_SOURCE_DIR}/SuiteSparse_config.h" 6 | "${CMAKE_CURRENT_SOURCE_DIR}/ldl.h" 7 | "${CMAKE_CURRENT_SOURCE_DIR}/GlobalOptions.h" 8 | "${CMAKE_CURRENT_SOURCE_DIR}/timer.h" 9 | "${CMAKE_CURRENT_SOURCE_DIR}/Auxilary.h" 10 | "${CMAKE_CURRENT_SOURCE_DIR}/qpSWIFT.h" 11 | ) 12 | 13 | 14 | set( 15 | qpSWIFT_headers 16 | "${qpSWIFT_headers}" 17 | PARENT_SCOPE 18 | ) -------------------------------------------------------------------------------- /tools/test_press.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import termios 3 | import tty 4 | import select 5 | import time 6 | 7 | def is_enter_pressed(): 8 | return select.select([sys.stdin], [], [], 0)[0] and sys.stdin.read(1) == '\n' 9 | 10 | def is_space_pressed(): 11 | return select.select([sys.stdin], [], [], 0)[0] and sys.stdin.read(1) == ' ' 12 | 13 | # 示例主循环 14 | while True: 15 | if is_enter_pressed(): 16 | print("Enter pressed") 17 | 18 | elif is_space_pressed(): 19 | print("Space pressed") 20 | break 21 | else: 22 | time.sleep(1 / 10) 23 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import qpSWIFT 3 | 4 | opts = {"MAXITER": 10, "VERBOSE": 1} 5 | 6 | P = np.array([[65.0, -22, -16], 7 | [-22.0, 14, 7], 8 | [-16, 7, 5]]) 9 | 10 | c = np.array([3.0, 2.0, 3.0]) 11 | 12 | 13 | G = np.array([[1.0, 2.0, 1.0], 14 | [2.0, 0.0, 1.0], 15 | [-1.0, 2.0, -1.0]]) 16 | 17 | h = np.array([3.0, 2.0, -2.0]) 18 | 19 | A = np.array([[1.0, 1.0, 1.0]]) 20 | 21 | b = np.array([1.0]) 22 | 23 | k = qpSWIFT.run(c, h, P, G, A, b, opts) 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore all .pyc files 2 | *.pyc 3 | 4 | # Ignore __pycache__ directory 5 | __pycache__/ 6 | 7 | # Ignore other Python cache files 8 | *.pyo 9 | *.pyd 10 | 11 | # datasets 12 | datasets/ 13 | save/ 14 | 15 | # example/teleop 16 | 17 | # third_party 18 | third_party/3D-ViTac_Tactile_Hardware 19 | third_party/dr/ 20 | 21 | execution_logs/ 22 | output/ 23 | data/check_data.json 24 | policy/weights 25 | realsense_captures/ 26 | piper_scripts/ 27 | third_party/test.py 28 | policy/ACT/act_ckpt 29 | policy/ACT/detr/detr.egg-info 30 | controller/URUrx_controller.py 31 | processed_data/* 32 | test/* 33 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/diffusion/positional_embedding.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | class SinusoidalPosEmb(nn.Module): 7 | 8 | def __init__(self, dim): 9 | super().__init__() 10 | self.dim = dim 11 | 12 | def forward(self, x): 13 | device = x.device 14 | half_dim = self.dim // 2 15 | emb = math.log(10000) / (half_dim - 1) 16 | emb = torch.exp(torch.arange(half_dim, device=device) * -emb) 17 | emb = x[:, None] * emb[None, :] 18 | emb = torch.cat((emb.sin(), emb.cos()), dim=-1) 19 | return emb 20 | -------------------------------------------------------------------------------- /policy/RDT/scripts/read_yaml.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import yaml 3 | 4 | def read_yaml_value(file_path, key): 5 | with open(file_path, 'r') as file: 6 | data = yaml.safe_load(file) 7 | value = data.get(key) 8 | if value is not None: 9 | print(value) 10 | else: 11 | print(f"Key '{key}' not found in {file_path}") 12 | 13 | if __name__ == "__main__": 14 | if len(sys.argv) != 3: 15 | print("Usage: python read_yaml.py ") 16 | sys.exit(1) 17 | 18 | file_path = sys.argv[1] 19 | key = sys.argv[2] 20 | read_yaml_value(file_path, key) -------------------------------------------------------------------------------- /config/robot_1_move_mobile_2.yml: -------------------------------------------------------------------------------- 1 | name: "robot_1_move_mobile_2" 2 | info: 3 | is_block: false 4 | robot: 5 | class: 6 | class_path: "my_robot.test_robot" 7 | class_name: "TestRobot" 8 | args: null 9 | run: 10 | function: 11 | function_path: "utils.task_functions" 12 | function_name: "move_mobile_to" 13 | args: 14 | target: [0.35, 0.35, 0., 0., 0., 1.] 15 | success: 16 | function: 17 | function_path: "utils.task_functions" 18 | function_name: "success" 19 | args: 20 | threshold: 0.8 21 | fail: 22 | function: 23 | function_path: "utils.task_functions" 24 | function_name: "success" 25 | args: 26 | threshold: 0.1 27 | -------------------------------------------------------------------------------- /config/robot_2_move_mobile_2.yml: -------------------------------------------------------------------------------- 1 | name: "robot_2_move_mobile_2" 2 | info: 3 | is_block: false 4 | robot: 5 | class: 6 | class_path: "my_robot.test_robot" 7 | class_name: "TestRobot" 8 | args: null 9 | run: 10 | function: 11 | function_path: "utils.task_functions" 12 | function_name: "move_mobile_to" 13 | args: 14 | target: [0.35, 0.35, 0., 0., 0., -1.] 15 | success: 16 | function: 17 | function_path: "utils.task_functions" 18 | function_name: "success" 19 | args: 20 | threshold: 0.8 21 | fail: 22 | function: 23 | function_path: "utils.task_functions" 24 | function_name: "success" 25 | args: 26 | threshold: 0.1 27 | -------------------------------------------------------------------------------- /sensor/touch_sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from sensor.sensor import Sensor 5 | 6 | class TouchSensor(Sensor): 7 | def __init__(self): 8 | super().__init__() 9 | self.name = "touch_sensor" 10 | self.type = "touch_sensor" 11 | self.collect_info = None 12 | 13 | def get_information(self): 14 | touch_info = {} 15 | touch = self.get_touch() 16 | if "force" in self.collect_info: 17 | touch_info["force"] = touch["force"] 18 | if "torque" in self.collect_info: 19 | touch_info["torque"] = touch["torque"] 20 | 21 | return touch_info 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /config/robot_1_move_mobile_1.yml: -------------------------------------------------------------------------------- 1 | name: "robot_1_move_mobile_1" 2 | info: 3 | is_block: false 4 | robot: 5 | class: 6 | class_path: "my_robot.test_robot" 7 | class_name: "TestRobot" 8 | args: null 9 | run: 10 | function: 11 | function_path: "utils.task_functions" 12 | function_name: "move_mobile_to" 13 | args: 14 | target: [0.25, 0.25, 0., 0., 0., 1.] 15 | success: 16 | function: 17 | function_path: "utils.task_functions" 18 | function_name: "success" 19 | args: 20 | threshold: 0.8 21 | # 第一步不能失败 22 | # fail: 23 | # function: 24 | # function_path: "utils.task_functions" 25 | # function_name: "success" 26 | # args: 27 | # threshold: 0.5 28 | -------------------------------------------------------------------------------- /config/robot_2_move_mobile_1.yml: -------------------------------------------------------------------------------- 1 | name: "robot_2_move_mobile_1" 2 | info: 3 | is_block: false 4 | robot: 5 | class: 6 | class_path: "my_robot.test_robot" 7 | class_name: "TestRobot" 8 | args: null 9 | run: 10 | function: 11 | function_path: "utils.task_functions" 12 | function_name: "move_mobile_to" 13 | args: 14 | target: [0.25, 0.25, 0., 0., 0., -1.] 15 | success: 16 | function: 17 | function_path: "utils.task_functions" 18 | function_name: "success" 19 | args: 20 | threshold: 0.8 21 | # 第一步不能失败 22 | # fail: 23 | # function: 24 | # function_path: "utils.task_functions" 25 | # function_name: "success" 26 | # args: 27 | # threshold: 0.5 28 | -------------------------------------------------------------------------------- /controller/URUrx_controller.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from utils.data_handler import debug_print 5 | from controller.arm_controller import ArmController 6 | 7 | import urx 8 | """ 9 | UR base code from: 10 | https://github.com/SintefManufacturing/python-urx?utm_source=chatgpt.com 11 | """ 12 | 13 | class URUrxController(ArmController): 14 | def __init__(self, name): 15 | self.name = name 16 | self.controller_type = "user_controller" 17 | self.controller = None 18 | 19 | def set_up(self): 20 | self.controller = urx.Robot("192.168.0.100") 21 | self.controller .set_tcp((0, 0, 0.1, 0, 0, 0)) 22 | self.controller .set_payload(2, (0, 0, 0.1)) 23 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/prototype code/test1.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear all; 3 | close all; 4 | 5 | 6 | P = [1.2204 1.1123 -3.8935; 7 | 1.1123 3.5821 -3.3333; 8 | -3.8935 -3.3333 19.6174]; 9 | 10 | 11 | 12 | c = [2.7694;-1.3499;3.0349]; 13 | 14 | A = [0.7254 -0.0631 0.7147]; 15 | 16 | b = -0.2050; 17 | 18 | G = [-0.1241 1.4090 0.6715; 19 | 1.4897 1.4172 -1.2075]; 20 | 21 | h = [0.7172;1.6302]; 22 | 23 | % [x,s,fval,jj,x_sol,s_sol,z_sol] = customQP_PredCorr_full(P,c,G,h,A,b); 24 | 25 | [x,s,fval,jj,x_sol,s_sol,z_sol] = qpSWIFT_matlab(P,c,G,h,A,b); 26 | 27 | % options.MAXITER = 3; 28 | 29 | % [sol,basic_info,adv_info] = qpSWIFT(sparse(P),c,sparse(A),b,sparse(G),h,options); 30 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/common/shape_util.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Tuple, Callable 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | def get_module_device(m: nn.Module): 7 | device = torch.device("cpu") 8 | try: 9 | param = next(iter(m.parameters())) 10 | device = param.device 11 | except StopIteration: 12 | pass 13 | return device 14 | 15 | 16 | @torch.no_grad() 17 | def get_output_shape(input_shape: Tuple[int], net: Callable[[torch.Tensor], torch.Tensor]): 18 | device = get_module_device(net) 19 | test_input = torch.zeros((1, ) + tuple(input_shape), device=device) 20 | test_output = net(test_input) 21 | output_shape = tuple(test_output.shape[1:]) 22 | return output_shape 23 | -------------------------------------------------------------------------------- /policy/RDT/data/filelock.py: -------------------------------------------------------------------------------- 1 | import fcntl 2 | 3 | 4 | class FileLock: 5 | """ 6 | A file lock class. 7 | """ 8 | def __init__(self, filename): 9 | self.filename = filename 10 | self.handle = None 11 | 12 | def acquire_read_lock(self): 13 | self.handle = open(self.filename + '.lock', 'r') 14 | fcntl.flock(self.handle, fcntl.LOCK_SH | fcntl.LOCK_NB) 15 | 16 | def acquire_write_lock(self): 17 | self.handle = open(self.filename + '.lock', 'w') 18 | fcntl.flock(self.handle, fcntl.LOCK_EX | fcntl.LOCK_NB) 19 | 20 | def release_lock(self): 21 | if self.handle is not None: 22 | fcntl.flock(self.handle, fcntl.LOCK_UN) 23 | self.handle.close() 24 | self.handle = None 25 | -------------------------------------------------------------------------------- /policy/ACT/deploy_policy.yml: -------------------------------------------------------------------------------- 1 | # Basic experiment configuration 2 | task_name: null 3 | policy_name: ACT 4 | task_config: null 5 | ckpt_setting: null 6 | seed: 0 7 | instruction_type: unseen 8 | 9 | # ACT-specific arguments 10 | action_dim: 14 11 | kl_weight: 10.0 12 | chunk_size: 50 13 | hidden_dim: 512 14 | dim_feedforward: 3200 15 | temporal_agg: false 16 | device: "cuda:0" 17 | 18 | # DETR parser args 19 | ckpt_dir: null 20 | policy_class: ACT 21 | num_epochs: 2000 22 | 23 | # Model training params 24 | position_embedding: sine 25 | lr_backbone: 0.00001 26 | weight_decay: 0.0001 27 | lr: 0.00001 28 | masks: false 29 | dilation: false 30 | backbone: resnet18 31 | nheads: 8 32 | enc_layers: 4 33 | dec_layers: 7 34 | pre_norm: false 35 | dropout: 0.1 36 | camera_names: 37 | - cam_head 38 | - cam_wrist 39 | -------------------------------------------------------------------------------- /scripts/visual_hdf5_rerun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # HDF5数据可视化工具启动脚本 3 | 4 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | 6 | # 检查参数 7 | if [ $# -eq 0 ]; then 8 | echo "用法: $0 [选项]" 9 | echo "示例: $0 data.hdf5 --save output.rrd" 10 | exit 1 11 | fi 12 | 13 | # 检查文件 14 | if [ ! -f "$1" ] && [ ! -d "$1" ]; then 15 | echo "错误: 路径不存在: $1" 16 | exit 1 17 | fi 18 | 19 | # 配置渲染后端 (Vulkan优先) 20 | if command -v vulkaninfo &> /dev/null && vulkaninfo --summary 2>&1 | grep -q "NVIDIA\|AMD\|Intel" 2>/dev/null; then 21 | export WGPU_BACKEND=vulkan 22 | export VK_ICD_FILENAMES=/usr/share/vulkan/icd.d/nvidia_icd.json 23 | else 24 | export WGPU_BACKEND=gl 25 | export MESA_GL_VERSION_OVERRIDE=4.5 26 | fi 27 | 28 | # 运行可视化脚本 29 | python "$SCRIPT_DIR/visual_hdf5_rerun.py" "$@" 30 | -------------------------------------------------------------------------------- /policy/openvla-oft/.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | exclude: ".git" 4 | 5 | repos: 6 | - repo: https://github.com/astral-sh/ruff-pre-commit 7 | rev: v0.2.2 8 | hooks: 9 | - id: ruff 10 | args: [ --fix, --exit-non-zero-on-fix ] 11 | 12 | - repo: https://github.com/psf/black 13 | rev: 24.2.0 14 | hooks: 15 | - id: black 16 | 17 | - repo: https://github.com/pre-commit/pre-commit-hooks 18 | rev: v4.5.0 19 | hooks: 20 | - id: check-added-large-files 21 | - id: check-ast 22 | - id: check-case-conflict 23 | - id: check-merge-conflict 24 | - id: check-toml 25 | - id: check-yaml 26 | - id: end-of-file-fixer 27 | - id: trailing-whitespace 28 | -------------------------------------------------------------------------------- /policy/openpi/.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | pull_request: 4 | branches: 5 | - "*" 6 | 7 | jobs: 8 | run_tests: 9 | name: Run Tests 10 | runs-on: openpi-verylarge 11 | env: 12 | GIT_LFS_SKIP_SMUDGE: true 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Install FFmpeg dependencies 17 | run: | 18 | sudo apt-get update 19 | sudo apt-get install -y ffmpeg libavcodec-dev libavformat-dev libavutil-dev 20 | 21 | - name: Install uv 22 | uses: astral-sh/setup-uv@v5 23 | 24 | - name: Set up Python 25 | run: uv python install 26 | 27 | - name: Install the project 28 | run: uv sync --all-extras --dev 29 | 30 | - name: Run tests 31 | run: uv run pytest --strict-markers -m "not manual" 32 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/vision/dinov2_vit.py: -------------------------------------------------------------------------------- 1 | """ 2 | dinov2_vit.py 3 | """ 4 | 5 | from prismatic.models.backbones.vision.base_vision import TimmViTBackbone 6 | 7 | # Registry =>> Supported DINOv2 Vision Backbones (from TIMM) =>> Note:: Using DINOv2 w/ Registers! 8 | # => Reference: https://arxiv.org/abs/2309.16588 9 | DINOv2_VISION_BACKBONES = {"dinov2-vit-l": "vit_large_patch14_reg4_dinov2.lvd142m"} 10 | 11 | 12 | class DinoV2ViTBackbone(TimmViTBackbone): 13 | def __init__(self, vision_backbone_id: str, image_resize_strategy: str, default_image_size: int = 224) -> None: 14 | super().__init__( 15 | vision_backbone_id, 16 | DINOv2_VISION_BACKBONES[vision_backbone_id], 17 | image_resize_strategy, 18 | default_image_size=default_image_size, 19 | ) 20 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/vision/in1k_vit.py: -------------------------------------------------------------------------------- 1 | """ 2 | in1k_vit.py 3 | 4 | Vision Transformers trained / finetuned on ImageNet (ImageNet-21K =>> ImageNet-1K) 5 | """ 6 | 7 | from prismatic.models.backbones.vision.base_vision import TimmViTBackbone 8 | 9 | # Registry =>> Supported Vision Backbones (from TIMM) 10 | IN1K_VISION_BACKBONES = { 11 | "in1k-vit-l": "vit_large_patch16_224.augreg_in21k_ft_in1k", 12 | } 13 | 14 | 15 | class IN1KViTBackbone(TimmViTBackbone): 16 | def __init__(self, vision_backbone_id: str, image_resize_strategy: str, default_image_size: int = 224) -> None: 17 | super().__init__( 18 | vision_backbone_id, 19 | IN1K_VISION_BACKBONES[vision_backbone_id], 20 | image_resize_strategy, 21 | default_image_size=default_image_size, 22 | ) 23 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/footer.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 12 | 13 | 14 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/prototype code/qpSWIFT_init.m: -------------------------------------------------------------------------------- 1 | function [x0,s0,y0,z0,Phi] = qpSWIFT_init(n,m,p,P,c,G,h,A,b) 2 | 3 | if p == 0 4 | Phi = [P G'; G -eye(m,m)]; 5 | X = Phi\[-c;h]; 6 | x0 = X(1:n); 7 | y0 = []; 8 | else 9 | Phi = [P A' G'; A zeros(p,p) zeros(p,m);G zeros(m,p) -eye(m,m)]; 10 | X = Phi\[-c;b;h]; 11 | x0 = X(1:n); 12 | y0 = X(n+[1:p]); 13 | end 14 | bz = h; 15 | z = (G*x0)-bz; 16 | alpha_p = -min(-z); 17 | 18 | if alpha_p<0 19 | s0 = -z; 20 | else 21 | s0 = -z+(1+alpha_p); 22 | end 23 | 24 | alpha_d = -min(z); 25 | if alpha_d<0 26 | z0 = z; 27 | else 28 | z0 = z+(1+alpha_d); 29 | end 30 | 31 | 32 | end -------------------------------------------------------------------------------- /config/robot_1_model_infer.yml: -------------------------------------------------------------------------------- 1 | name: "robot_1_model_infer" 2 | info: 3 | is_block: false 4 | robot: 5 | class: 6 | class_path: "my_robot.test_robot" 7 | class_name: "TestRobot" 8 | args: null 9 | run: 10 | function: 11 | function_path: "utils.task_functions" 12 | function_name: "infer_once" 13 | args: null 14 | success: 15 | function: 16 | function_path: "utils.task_functions" 17 | function_name: "success" 18 | args: 19 | threshold: 0.8 20 | extras: 21 | model: 22 | class: 23 | class_path: "policy.test_policy.inference_model" 24 | class_name: "TestModel" 25 | args: 26 | model_path: "model/path/" 27 | task_name: "test" 28 | release: true 29 | fail: 30 | function: 31 | function_path: "utils.task_functions" 32 | function_name: "success" 33 | args: 34 | threshold: 0.5 35 | 36 | -------------------------------------------------------------------------------- /config/robot_2_model_infer.yml: -------------------------------------------------------------------------------- 1 | name: "robot_2_model_infer" 2 | info: 3 | is_block: false 4 | robot: 5 | class: 6 | class_path: "my_robot.test_robot_2" 7 | class_name: "TestRobot" 8 | args: null 9 | run: 10 | function: 11 | function_path: "utils.task_functions" 12 | function_name: "infer_once" 13 | args: null 14 | success: 15 | function: 16 | function_path: "utils.task_functions" 17 | function_name: "success" 18 | args: 19 | threshold: 0.8 20 | extras: 21 | model: 22 | class: 23 | class_path: "policy.test_policy.inference_model" 24 | class_name: "TestModel" 25 | args: 26 | model_path: "model/path/" 27 | task_name: "test" 28 | release: true 29 | fail: 30 | function: 31 | function_path: "utils.task_functions" 32 | function_name: "success" 33 | args: 34 | threshold: 0.5 35 | 36 | -------------------------------------------------------------------------------- /example/task/serial_task.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from utils.task import YmlTask, Tasks, ShareSpace 5 | from my_robot.test_robot import TestRobot 6 | import numpy as np 7 | import os 8 | 9 | if __name__ == "__main__": 10 | # os.environ["INFO_LEVEL"] = "DEBUG" # DEBUG , INFO, ERROR 11 | robot = TestRobot() 12 | robot.set_up() 13 | sp = ShareSpace() 14 | my_task = Tasks.build_top({ 15 | "type": "Serial", 16 | "subtasks": [ 17 | YmlTask("./config/robot_1_move_mobile_1.yml", share_space=sp, robot=robot), 18 | YmlTask("./config/robot_1_model_infer.yml",share_space=sp, robot=robot), 19 | YmlTask("./config/robot_1_move_mobile_2.yml", share_space=sp, robot=robot), 20 | ], 21 | }) 22 | while not my_task.is_success(): 23 | my_task.run() 24 | my_task.update() -------------------------------------------------------------------------------- /scripts/test_controller.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('./') 3 | 4 | # change to your controller and test_controller 5 | # from controller.Piper_controller import PiperController 6 | from controller.TestArm_controller import TestArmController 7 | from utils.data_handler import debug_print 8 | 9 | if __name__ == "__main__": 10 | import os 11 | os.environ["INFO_LEVEL"] = "DEBUG" 12 | 13 | controller = TestArmController("play_arm") 14 | debug_print("TestArmController","TestArmController initialized", "INFO") 15 | controller.set_up() 16 | debug_print("TestArmController","TestArmController moved", "INFO") 17 | 18 | test_controller = TestArmController("test_arm",DoFs=6,INFO="DEBUG") 19 | 20 | test_controller.set_collect_info(["joint","qpos","gripper"]) 21 | 22 | test_controller.set_up() 23 | 24 | test_controller.get() 25 | 26 | 27 | -------------------------------------------------------------------------------- /policy/openpi/scripts/train_test.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import os 3 | import pathlib 4 | 5 | import pytest 6 | 7 | os.environ["JAX_PLATFORMS"] = "cpu" 8 | 9 | from openpi.training import config as _config 10 | 11 | from . import train 12 | 13 | 14 | @pytest.mark.parametrize("config_name", ["debug"]) 15 | def test_train(tmp_path: pathlib.Path, config_name: str): 16 | config = dataclasses.replace( 17 | _config._CONFIGS_DICT[config_name], # noqa: SLF001 18 | batch_size=2, 19 | checkpoint_base_dir=str(tmp_path / "checkpoint"), 20 | exp_name="test", 21 | overwrite=False, 22 | resume=False, 23 | num_train_steps=2, 24 | log_interval=1, 25 | ) 26 | train.main(config) 27 | 28 | # test resuming 29 | config = dataclasses.replace(config, resume=True, num_train_steps=4) 30 | train.main(config) 31 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/common/precise_sleep.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def precise_sleep(dt: float, slack_time: float = 0.001, time_func=time.monotonic): 5 | """ 6 | Use hybrid of time.sleep and spinning to minimize jitter. 7 | Sleep dt - slack_time seconds first, then spin for the rest. 8 | """ 9 | t_start = time_func() 10 | if dt > slack_time: 11 | time.sleep(dt - slack_time) 12 | t_end = t_start + dt 13 | while time_func() < t_end: 14 | pass 15 | return 16 | 17 | 18 | def precise_wait(t_end: float, slack_time: float = 0.001, time_func=time.monotonic): 19 | t_start = time_func() 20 | t_wait = t_end - t_start 21 | if t_wait > 0: 22 | t_sleep = t_wait - slack_time 23 | if t_sleep > 0: 24 | time.sleep(t_sleep) 25 | while time_func() < t_end: 26 | pass 27 | return 28 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/common/env_util.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | 5 | def render_env_video(env, states, actions=None): 6 | observations = states 7 | imgs = list() 8 | for i in range(len(observations)): 9 | state = observations[i] 10 | env.set_state(state) 11 | if i == 0: 12 | env.set_state(state) 13 | img = env.render() 14 | # draw action 15 | if actions is not None: 16 | action = actions[i] 17 | coord = (action / 512 * 96).astype(np.int32) 18 | cv2.drawMarker( 19 | img, 20 | coord, 21 | color=(255, 0, 0), 22 | markerType=cv2.MARKER_CROSS, 23 | markerSize=8, 24 | thickness=1, 25 | ) 26 | imgs.append(img) 27 | imgs = np.array(imgs) 28 | return imgs 29 | -------------------------------------------------------------------------------- /policy/openvla-oft/SETUP.md: -------------------------------------------------------------------------------- 1 | # Setup Instructions 2 | 3 | ## Set Up Conda Environment 4 | 5 | ```bash 6 | # Create and activate conda environment 7 | conda create -n openvla-oft python=3.10 -y 8 | conda activate openvla-oft 9 | 10 | # Install PyTorch 11 | # Use a command specific to your machine: https://pytorch.org/get-started/locally/ 12 | pip3 install torch torchvision torchaudio 13 | 14 | # Clone openvla-oft repo and pip install to download dependencies 15 | git clone https://github.com/moojink/openvla-oft.git 16 | cd openvla-oft 17 | pip install -e . 18 | 19 | # Install Flash Attention 2 for training (https://github.com/Dao-AILab/flash-attention) 20 | # =>> If you run into difficulty, try `pip cache remove flash_attn` first 21 | pip install packaging ninja 22 | ninja --version; echo $? # Verify Ninja --> should return exit code "0" 23 | pip install "flash-attn==2.5.5" --no-build-isolation 24 | ``` -------------------------------------------------------------------------------- /tools/realsense_serial.py: -------------------------------------------------------------------------------- 1 | import pyrealsense2 as rs 2 | 3 | def find_connected_realsense_devices(): 4 | # 创建上下文对象 5 | ctx = rs.context() 6 | 7 | # 获取所有连接的设备 8 | devices = ctx.query_devices() 9 | 10 | if len(devices) == 0: 11 | print("No RealSense device detected.") 12 | return 13 | 14 | print(f"Detected {len(devices)} RealSense device(s).") 15 | 16 | for i, dev in enumerate(devices): 17 | serial_number = dev.get_info(rs.camera_info.serial_number) 18 | name = dev.get_info(rs.camera_info.name) 19 | physical_port = dev.get_info(rs.camera_info.physical_port) 20 | 21 | print(f"\n device {i + 1}:") 22 | print(f" name: {name}") 23 | print(f" serial: {serial_number}") 24 | print(f" port: {physical_port}") 25 | 26 | if __name__ == "__main__": 27 | find_connected_realsense_devices() -------------------------------------------------------------------------------- /sensor/teleoperation_sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | import numpy as np 5 | 6 | from sensor.sensor import Sensor 7 | from typing import Dict, Any 8 | 9 | class TeleoperationSensor(Sensor): 10 | def __init__(self): 11 | super().__init__() 12 | self.name = "teleoperation_sensor" 13 | self.sensor = None 14 | 15 | def get_information(self): 16 | sensor_info = {} 17 | state = self.get_state() 18 | if "end_pose" in self.collect_info: 19 | sensor_info["end_pose"] = state["end_pose"] 20 | if "velocity" in self.collect_info: 21 | sensor_info["velocity"] = state["velocity"] 22 | if "gripper" in self.collect_info: 23 | sensor_info["gripper"] = state["gripper"] 24 | if "extra" in self.collect_info: 25 | sensor_info["extra"] = state["extra"] 26 | return sensor_info 27 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/basic_test.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | close all; 4 | 5 | 6 | load('Matrix.mat'); 7 | 8 | sigma_d = 0.0; 9 | Phi = [P A' G'; A zeros(p ,m + p);G zeros(m,p) -eye(m,m)]; 10 | 11 | Permut = amd(Phi); 12 | 13 | 14 | opts.MAXITER = 25; 15 | opts.ABSTOL = 1e-6; 16 | opts.RELTOL = 1e-6; 17 | opts.PERMUT = Permut; 18 | opts.VERBOSE = 0; 19 | 20 | 21 | 22 | for i = 1:1e3 23 | [X,basic_info,adv_info] = qpSWIFT(sparse(P),c,sparse(A),b,sparse(G),h); 24 | 25 | [X1,basic_info1,adv_info1] = qpSWIFT(sparse(P),c,sparse(A),b,sparse(G),h,opts); 26 | 27 | end 28 | 29 | 30 | 31 | Phi = [P G';G -eye(m,m)]; 32 | Permut = amd(Phi); 33 | opts.PERMUT = Permut; 34 | 35 | 36 | for i = 1:1e3 37 | [X2,basic_info2,adv_info2] = qpSWIFT(sparse(P),c,sparse(G),h); 38 | 39 | [X3,basic_info3,adv_info3] = qpSWIFT(sparse(P),c,sparse(G),h,opts); 40 | end 41 | 42 | fprintf("Basic Test Passed\n"); -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/demoqp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import qpSWIFT 3 | 4 | 5 | ### Solver Options 6 | ### For information about Solver options please refer to qpSWIFT 7 | ### documentation 8 | 9 | opts = {'MAXITER':30,'VERBOSE':1,'OUTPUT':2} 10 | 11 | ### Cost Function 12 | 13 | P = np.array([[5.0,1.0,0.0], 14 | [1.0, 2.0, 1.0], 15 | [0.0, 1.0, 4.0]]) 16 | 17 | c = np.array([1.0,2.0,1.0]) 18 | 19 | ### Inequality Constraints 20 | G = np.array([[-4.0,-4.0,0.0], 21 | [0.0,0.0,-1.0]]) 22 | 23 | h = np.array([-1.0,-1.0]) 24 | 25 | ### Equality Constraints 26 | A = np.array([[1.0, -2.0, 1.0]]) 27 | 28 | b = np.array([3.0]) 29 | 30 | ### Equality Constrained QP 31 | reseq = qpSWIFT.run(c,h,P,G,A,b,opts) 32 | 33 | ### Inequality Constrained QP 34 | res = qpSWIFT.run(c,h,P,G,opts=opts) 35 | 36 | 37 | ### Solution 38 | print(res['sol']) 39 | 40 | 41 | print(reseq['sol']) 42 | -------------------------------------------------------------------------------- /policy/openvla-oft/finetune.sh: -------------------------------------------------------------------------------- 1 | torchrun --standalone --nnodes 1 --nproc-per-node X vla-scripts/finetune.py \ 2 | --vla_path openvla/openvla-7b \ 3 | --data_root_dir /PATH/TO/RLDS/DATASETS/DIR/ \ 4 | --dataset_name aloha1_put_X_into_pot_300_demos \ 5 | --run_root_dir ./checkpoints/ \ 6 | --use_l1_regression True \ 7 | --use_diffusion False \ 8 | --use_film True \ 9 | --num_images_in_input 3 \ 10 | --use_proprio True \ 11 | --batch_size 4 \ 12 | --learning_rate 5e-4 \ 13 | --num_steps_before_decay 50000 \ 14 | --max_steps 100005 \ 15 | --use_val_set True \ 16 | --val_freq 10000 \ 17 | --save_freq 10000 \ 18 | --save_latest_checkpoint_only False \ 19 | --image_aug True \ 20 | --lora_rank 32 \ 21 | --wandb_entity "YOUR_WANDB_ENTITY" \ 22 | --wandb_project "YOUR_WANDB_PROJECT" \ 23 | --run_id_note parallel_dec--25_acts_chunk--continuous_acts--L1_regression--3rd_person_img--left_right_wrist_imgs--proprio_state--film -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/common/nested_dict_util.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | 4 | def nested_dict_map(f, x): 5 | """ 6 | Map f over all leaf of nested dict x 7 | """ 8 | 9 | if not isinstance(x, dict): 10 | return f(x) 11 | y = dict() 12 | for key, value in x.items(): 13 | y[key] = nested_dict_map(f, value) 14 | return y 15 | 16 | 17 | def nested_dict_reduce(f, x): 18 | """ 19 | Map f over all values of nested dict x, and reduce to a single value 20 | """ 21 | if not isinstance(x, dict): 22 | return x 23 | 24 | reduced_values = list() 25 | for value in x.values(): 26 | reduced_values.append(nested_dict_reduce(f, value)) 27 | y = functools.reduce(f, reduced_values) 28 | return y 29 | 30 | 31 | def nested_dict_check(f, x): 32 | bool_dict = nested_dict_map(f, x) 33 | result = nested_dict_reduce(lambda x, y: x and y, bool_dict) 34 | return result 35 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/README.txt: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------------ 2 | Compilation -> python3 setup.py install || requires admin privileages, numpy python3-develop packages 3 | -> pip import qpSWIFT (Under Progress) 4 | ------------------------------------------------------------------------------------ 5 | Usage -> Instructions on how to use the module are given in qpSWIFT_help.py 6 | ------------------------------------------------------------------------------------ 7 | Demo -> Demo QP is given in demoqp.py 8 | ------------------------------------------------------------------------------------ 9 | 10 | 11 | ------------------------------------------------------------------------------------ 12 | Note: Make sure you have compatible C compiler, numpy and distutils for your system 13 | ------------------------------------------------------------------------------------ 14 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/policy/base_image_policy.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | import torch 3 | import torch.nn as nn 4 | from diffusion_policy.model.common.module_attr_mixin import ModuleAttrMixin 5 | from diffusion_policy.model.common.normalizer import LinearNormalizer 6 | 7 | 8 | class BaseImagePolicy(ModuleAttrMixin): 9 | # init accepts keyword argument shape_meta, see config/task/*_image.yaml 10 | 11 | def predict_action(self, obs_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: 12 | """ 13 | obs_dict: 14 | str: B,To,* 15 | return: B,Ta,Da 16 | """ 17 | raise NotImplementedError() 18 | 19 | # reset state for stateful policies 20 | def reset(self): 21 | pass 22 | 23 | # ========== training =========== 24 | # no standard training interface except setting normalizer 25 | def set_normalizer(self, normalizer: LinearNormalizer): 26 | raise NotImplementedError() 27 | -------------------------------------------------------------------------------- /policy/openpi/scripts/docker/compose.yml: -------------------------------------------------------------------------------- 1 | # Run with: 2 | # docker compose -f scripts/docker/compose.yml up --build 3 | services: 4 | openpi_server: 5 | image: openpi_server 6 | build: 7 | context: ../.. 8 | dockerfile: scripts/docker/serve_policy.Dockerfile 9 | init: true 10 | tty: true 11 | network_mode: host 12 | # Populate configured openpi data home to /openpi_assets inside the container. 13 | # Populate aws credential inside the container. 14 | volumes: 15 | - $PWD:/app 16 | - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets 17 | environment: 18 | - SERVER_ARGS 19 | - OPENPI_DATA_HOME=/openpi_assets 20 | - IS_DOCKER=true 21 | 22 | # Comment out this block if not running on a machine with GPUs. 23 | deploy: 24 | resources: 25 | reservations: 26 | devices: 27 | - driver: nvidia 28 | count: 1 29 | capabilities: [gpu] 30 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/basic_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import sys 4 | import qpSWIFT 5 | import scipy.io as sio 6 | 7 | data = sio.loadmat('Matrix.mat') 8 | 9 | P = np.array(data['P']) 10 | c = np.reshape(np.array(data['c']),(data['c'].size)) 11 | A = np.array(data['A']) 12 | b = np.reshape(np.array(data['b']),(data['b'].size)) 13 | G = np.array(data['G'],dtype=float) 14 | h = np.reshape(np.array(data['h']),(data['h'].size)) 15 | 16 | num = 1000 17 | 18 | for i in range(0,num): 19 | reseq = qpSWIFT.run(c,h,P,G,A,b) 20 | sys.stdout.write('\r') 21 | sys.stdout.write("Progress: %d %%" % ((i*1.0/num)*0.5*100)) 22 | sys.stdout.flush() 23 | 24 | 25 | for i in range(0,num): 26 | reseq = qpSWIFT.run(c,h,P,G) 27 | sys.stdout.write('\r') 28 | sys.stdout.write("Progress: %d %%" % ((i*1.0/num)*0.5*100 + 50)) 29 | sys.stdout.flush() 30 | 31 | sys.stdout.write('\r') 32 | print("Basic Test Passed") 33 | -------------------------------------------------------------------------------- /policy/openpi/examples/simple_client/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile examples/simple_client/requirements.in -o examples/simple_client/requirements.txt --python-version 3.11.9 3 | docstring-parser==0.16 4 | # via tyro 5 | markdown-it-py==3.0.0 6 | # via rich 7 | mdurl==0.1.2 8 | # via markdown-it-py 9 | numpy==1.26.4 10 | # via -r examples/simple_client/requirements.in 11 | polars==1.30.0 12 | # via -r examples/simple_client/requirements.in 13 | pygments==2.19.1 14 | # via rich 15 | rich==14.0.0 16 | # via 17 | # -r examples/simple_client/requirements.in 18 | # tyro 19 | shtab==1.7.2 20 | # via tyro 21 | tqdm==4.67.1 22 | # via -r examples/simple_client/requirements.in 23 | typeguard==4.4.2 24 | # via tyro 25 | typing-extensions==4.13.2 26 | # via 27 | # typeguard 28 | # tyro 29 | tyro==0.9.22 30 | # via -r examples/simple_client/requirements.in 31 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/demoqp.m: -------------------------------------------------------------------------------- 1 | %%% Sample Quadratic Program for qpSWIFT 2 | 3 | clc; 4 | clear all; 5 | close all; 6 | 7 | 8 | %%%% Solver Options 9 | %%% For information about Solver options please refer to qpSWIFT 10 | %%% documentation or type help qpSWIFT in the matlab command prompt 11 | opts.VERBOSE = 1; 12 | 13 | 14 | %%%% Cost Function 15 | P = [5 1 0;1 2 1;0 1 4]; 16 | c = [1;2;1]; 17 | 18 | 19 | %%%% Equality Constraints 20 | A = [1 -2 1]; 21 | b = 3; 22 | 23 | 24 | %%%% Inequality Constraints 25 | G = [-4 -4 0;0 0 -1]; 26 | h = [-1;-1]; 27 | 28 | 29 | 30 | %%% Equality Constrained Quadratic Program 31 | fprintf("-----Equality Constrained Quadratic Program-----\n\n"); 32 | [soleq,basic_infoeq,adv_infoeq] = qpSWIFT(sparse(P),c,sparse(A),b,sparse(G),h,opts); 33 | 34 | 35 | %%% Inequality Constrained Quadratic Program 36 | fprintf("-----Inequality Constrained Quadratic Program-----\n\n"); 37 | [sol,basic_info,adv_info] = qpSWIFT(sparse(P),c,sparse(G),h,opts); 38 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set( 2 | qpSWIFT_src 3 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_1.c" 4 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_aat.c" 5 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_defaults.c" 6 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_global.c" 7 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_order.c" 8 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_post_tree.c" 9 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_valid.c" 10 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_2.c" 11 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_control.c" 12 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_dump.c" 13 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_info.c" 14 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_postorder.c" 15 | "${CMAKE_CURRENT_SOURCE_DIR}/amd_preprocess.c" 16 | "${CMAKE_CURRENT_SOURCE_DIR}/ldl.c" 17 | "${CMAKE_CURRENT_SOURCE_DIR}/timer.c" 18 | "${CMAKE_CURRENT_SOURCE_DIR}/Auxilary.c" 19 | "${CMAKE_CURRENT_SOURCE_DIR}/qpSWIFT.c" 20 | ) 21 | 22 | 23 | set( 24 | qpSWIFT_src 25 | "${qpSWIFT_src}" 26 | PARENT_SCOPE 27 | ) -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/README.txt: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------- 2 | Compilation -> Type Swift_make('qpSWIFT') in your matlab command window 3 | -> Add the mex file to your working directory to use qpSWIFT 4 | ------------------------------------------------------------------------------- 5 | Usage -> Instructions on how to use the mex-file are given in qpSWIFT.m 6 | or 7 | -> Type help qpSWIFT in your matlab command window 8 | ------------------------------------------------------------------------------- 9 | Demo -> Demo QP is given in demoqp.m 10 | ------------------------------------------------------------------------------- 11 | 12 | 13 | ------------------------------------------------------------------------------- 14 | Note: Make sure you have compatible C compiler available for your matlab version 15 | ------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/src/License(LDL).txt: -------------------------------------------------------------------------------- 1 | LDL Copyright (c) 2005-2013 by Timothy A. Davis. 2 | LDL is also available under other licenses; contact the author for details. 3 | http://www.suitesparse.com 4 | 5 | -------------------------------------------------------------------------------- 6 | 7 | LDL is free software; you can redistribute it and/or 8 | modify it under the terms of the GNU Lesser General Public 9 | License as published by the Free Software Foundation; either 10 | version 2.1 of the License, or (at your option) any later version. 11 | 12 | LDL is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 | Lesser General Public License for more details. 16 | 17 | You should have received a copy of the GNU Lesser General Public 18 | License along with this Module; if not, write to the Free Software 19 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 20 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/qpSWIFT.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | README.txt 2 | pyqpSWIFT.c 3 | setup.py 4 | ../src/Auxilary.c 5 | ../src/amd_1.c 6 | ../src/amd_2.c 7 | ../src/amd_aat.c 8 | ../src/amd_control.c 9 | ../src/amd_defaults.c 10 | ../src/amd_dump.c 11 | ../src/amd_global.c 12 | ../src/amd_info.c 13 | ../src/amd_order.c 14 | ../src/amd_post_tree.c 15 | ../src/amd_postorder.c 16 | ../src/amd_preprocess.c 17 | ../src/amd_valid.c 18 | ../src/ldl.c 19 | ../src/qpSWIFT.c 20 | ../src/timer.c 21 | ../src/Auxilary.c 22 | ../src/amd_1.c 23 | ../src/amd_2.c 24 | ../src/amd_aat.c 25 | ../src/amd_control.c 26 | ../src/amd_defaults.c 27 | ../src/amd_dump.c 28 | ../src/amd_global.c 29 | ../src/amd_info.c 30 | ../src/amd_order.c 31 | ../src/amd_post_tree.c 32 | ../src/amd_postorder.c 33 | ../src/amd_preprocess.c 34 | ../src/amd_valid.c 35 | ../src/ldl.c 36 | ../src/qpSWIFT.c 37 | ../src/timer.c 38 | qpSWIFT.egg-info/PKG-INFO 39 | qpSWIFT.egg-info/SOURCES.txt 40 | qpSWIFT.egg-info/dependency_links.txt 41 | qpSWIFT.egg-info/requires.txt 42 | qpSWIFT.egg-info/top_level.txt -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/vision/siglip_vit.py: -------------------------------------------------------------------------------- 1 | """ 2 | siglip_vit.py 3 | """ 4 | 5 | from prismatic.models.backbones.vision.base_vision import TimmViTBackbone 6 | 7 | # Registry =>> Supported SigLIP Vision Backbones (from TIMM) =>> Note:: Using SigLIP w/ Patch = 14 (but SO400M Arch) 8 | SIGLIP_VISION_BACKBONES = { 9 | "siglip-vit-b16-224px": "vit_base_patch16_siglip_224", 10 | "siglip-vit-b16-256px": "vit_base_patch16_siglip_256", 11 | "siglip-vit-b16-384px": "vit_base_patch16_siglip_384", 12 | "siglip-vit-so400m": "vit_so400m_patch14_siglip_224", 13 | "siglip-vit-so400m-384px": "vit_so400m_patch14_siglip_384", 14 | } 15 | 16 | 17 | class SigLIPViTBackbone(TimmViTBackbone): 18 | def __init__(self, vision_backbone_id: str, image_resize_strategy: str, default_image_size: int = 224) -> None: 19 | super().__init__( 20 | vision_backbone_id, 21 | SIGLIP_VISION_BACKBONES[vision_backbone_id], 22 | image_resize_strategy, 23 | default_image_size=default_image_size, 24 | ) 25 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/include/qpSWIFT.h: -------------------------------------------------------------------------------- 1 | #ifndef __QP_SWIFT_H__ 2 | #define __QP_SWIFT_H__ 3 | 4 | #ifdef __cplusplus 5 | extern "C" 6 | { 7 | #endif 8 | #include "Auxilary.h" 9 | 10 | 11 | /* Main Solver Functions */ 12 | 13 | /* QP Setup Function sparse version */ 14 | QP *QP_SETUP(qp_int n, qp_int m, qp_int p, qp_int *Pjc, qp_int *Pir, qp_real *Ppr, qp_int *Ajc, qp_int *Air, qp_real *Apr, qp_int *Gjc, qp_int *Gir, qp_real *Gpr, qp_real *c, qp_real *h, qp_real *b, qp_real sigma_d, qp_int *Permut); 15 | 16 | /* QP Setup Function dense version */ 17 | QP *QP_SETUP_dense(qp_int n, qp_int m, qp_int p, qp_real *Ppr, qp_real *Apr, qp_real *Gpr, qp_real *c, qp_real *h, qp_real *b, qp_int *Permut, int ordering); 18 | 19 | /* QP Solve Function */ 20 | qp_int QP_SOLVE(QP *myQP); 21 | 22 | /* QP Clean Function sparse version */ 23 | void QP_CLEANUP(QP *myQP); 24 | 25 | /* QP Clean Function dense version */ 26 | void QP_CLEANUP_dense(QP *myQP); 27 | 28 | 29 | #ifdef __cplusplus 30 | } 31 | #endif 32 | 33 | #endif 34 | 35 | /*! @file */ -------------------------------------------------------------------------------- /tools/ros_test.py: -------------------------------------------------------------------------------- 1 | import rospy 2 | from geometry_msgs.msg import Twist 3 | 4 | def move_robot(): 5 | # init ROS node 6 | rospy.init_node('robot_controller', anonymous=True) 7 | 8 | # make publish /cmd_vel topic 9 | pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) 10 | 11 | move_cmd = Twist() 12 | 13 | # Set linear velocity and angular velocity. 14 | move_cmd.linear.x = 0.5 # Linear velocity 0.5 m/s, indicating forward movement. 15 | move_cmd.angular.z = 0.0 # Angular velocity 0.2 rad/s, indicating rotation. 16 | 17 | # set publish freq 18 | rate = rospy.Rate(10) # 10Hz 19 | 20 | rospy.loginfo("Robot moving...") 21 | for _ in range(50): 22 | pub.publish(move_cmd) 23 | rate.sleep() # Control publish frequency is set to 10 Hz. 24 | 25 | # stop 26 | rospy.loginfo("Robot stopped.") 27 | move_cmd.linear.x = 0.0 28 | move_cmd.angular.z = 0.0 29 | pub.publish(move_cmd) 30 | 31 | if __name__ == "__main__": 32 | try: 33 | move_robot() 34 | except rospy.ROSInterruptException: 35 | pass -------------------------------------------------------------------------------- /sensor/vision_sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from sensor.sensor import Sensor 5 | 6 | class VisionSensor(Sensor): 7 | def __init__(self, encode_rgb=False): 8 | super().__init__() 9 | self.name = "vision_sensor" 10 | self.type = "vision_sensor" 11 | self.collect_info = None 12 | self.encode_rgb = encode_rgb 13 | 14 | def get_information(self): 15 | image_info = {} 16 | image = self.get_image() 17 | if "color" in self.collect_info: 18 | if getattr(self, "encode_rgb", False): 19 | import cv2 20 | success, encoded_image = cv2.imencode('.jpg', image["color"]) 21 | jpeg_data = encoded_image.tobytes() 22 | image["color"] = jpeg_data 23 | image_info["color"] = image["color"] 24 | if "depth" in self.collect_info: 25 | image_info["depth"] = image["depth"] 26 | if "point_cloud" in self.collect_info: 27 | image_info["point_cloud"] = image["point_cloud"] 28 | 29 | return image_info 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/bet/libraries/mingpt/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) Copyright (c) 2020 Andrej Karpathy 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | 9 | -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Karl Pertsch 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /policy/openvla-oft/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Moo Jin Kim, Chelsea Finn, Percy Liang. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/python/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup, Extension 2 | import numpy as np 3 | 4 | _qpSWIFT = Extension("qpSWIFT", 5 | sources= [ 6 | "pyqpSWIFT.c", 7 | "../src/amd_1.c", 8 | "../src/amd_2.c", 9 | "../src/amd_aat.c", 10 | "../src/amd_control.c", 11 | "../src/amd_defaults.c", 12 | "../src/amd_dump.c", 13 | "../src/amd_global.c", 14 | "../src/amd_info.c", 15 | "../src/amd_order.c", 16 | "../src/amd_post_tree.c", 17 | "../src/amd_postorder.c", 18 | "../src/amd_preprocess.c", 19 | "../src/amd_valid.c", 20 | "../src/ldl.c", 21 | "../src/timer.c", 22 | "../src/Auxilary.c", 23 | "../src/qpSWIFT.c" 24 | ], 25 | include_dirs=["../include/", 26 | np.get_include(), 27 | ], 28 | # extra_compile_args=["-O3" 29 | # ] 30 | ) 31 | 32 | def main(): 33 | setup( 34 | name="qpSWIFT", 35 | version="1.0.0", 36 | description="Python interface for qpSWIFT", 37 | author="Abhishek Pandala", 38 | setup_requires=["numpy >= 1.6"], 39 | install_requires=["numpy >= 1.6"], 40 | ext_modules=[_qpSWIFT] 41 | ) 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/shared_memory/shared_memory_util.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | from dataclasses import dataclass 3 | import numpy as np 4 | from multiprocessing.managers import SharedMemoryManager 5 | from atomics import atomicview, MemoryOrder, UINT 6 | 7 | 8 | @dataclass 9 | class ArraySpec: 10 | name: str 11 | shape: Tuple[int] 12 | dtype: np.dtype 13 | 14 | 15 | class SharedAtomicCounter: 16 | 17 | def __init__(self, shm_manager: SharedMemoryManager, size: int = 8): # 64bit int 18 | shm = shm_manager.SharedMemory(size=size) 19 | self.shm = shm 20 | self.size = size 21 | self.store(0) # initialize 22 | 23 | @property 24 | def buf(self): 25 | return self.shm.buf[:self.size] 26 | 27 | def load(self) -> int: 28 | with atomicview(buffer=self.buf, atype=UINT) as a: 29 | value = a.load(order=MemoryOrder.ACQUIRE) 30 | return value 31 | 32 | def store(self, value: int): 33 | with atomicview(buffer=self.buf, atype=UINT) as a: 34 | a.store(value, order=MemoryOrder.RELEASE) 35 | 36 | def add(self, value: int): 37 | with atomicview(buffer=self.buf, atype=UINT) as a: 38 | a.add(value, order=MemoryOrder.ACQ_REL) 39 | -------------------------------------------------------------------------------- /policy/DP/README_CN.md: -------------------------------------------------------------------------------- 1 | [![中文](https://img.shields.io/badge/中文-简体-blue)](./README_CN.md) 2 | [![English](https://img.shields.io/badge/English-English-green)](./README.md) 3 | 4 | # DP (Diffusion Policy) 模型部署指南 5 | 6 | ## 快速开始 7 | 8 | ### 1. 环境配置 9 | 10 | 安装 DP 部署所需的依赖环境: 11 | 12 | ```bash 13 | cd policy/DP/ 14 | pip install -e . 15 | ``` 16 | 17 | ### 2. 数据准备与训练 18 | 19 | #### 数据转换 20 | 21 | 将采集的数据转换为 DP 模型所需的 zarr 格式: 22 | 23 | ```bash 24 | cd policy/DP/ 25 | python scripts/process_data.py 26 | # 例子:python process_data.py data/test_data/ processed_data/test_data-100.zarr/ 100 27 | ``` 28 | 29 | ### 3. 真机部署 30 | 31 | 32 | 1. 将训练好的 checkpoint 复制到以下目录: 33 | ``` 34 | control_your_robot/policy/DP/checkpoints/ 35 | ``` 36 | 37 | 2. 修改部署脚本 `example/deploy/piper_single_on_DP.py`: 38 | 39 | ```python 40 | # 在第 316 行左右修改模型路径 41 | model = MYDP(model_path="policy/DP/checkpoints/feed_test_30-100-0/300.ckpt", task_name="feed_test_30", INFO="DEBUG") 42 | ``` 43 | 44 | **参数说明:** 45 | - 第一个参数: Policy 模型的文件夹地址 46 | - 第二个参数: 对应的任务名称 47 | 48 | #### 3.3 执行部署 49 | 50 | 运行部署脚本启动真机执行: 51 | 52 | ```bash 53 | python example/deploy/piper_single_on_DP.py 54 | ``` 55 | 56 | ## 注意事项 57 | 58 | - 确保机械臂已正确使能并连接 59 | - 检查模型路径是否正确 60 | - 部署前建议先在测试环境中验证模型效果 -------------------------------------------------------------------------------- /policy/openvla-oft/rlds_dataset_builder/example_dataset/create_example_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tqdm 3 | import os 4 | 5 | N_TRAIN_EPISODES = 100 6 | N_VAL_EPISODES = 100 7 | 8 | EPISODE_LENGTH = 10 9 | 10 | 11 | def create_fake_episode(path): 12 | episode = [] 13 | for step in range(EPISODE_LENGTH): 14 | episode.append({ 15 | 'image': np.asarray(np.random.rand(64, 64, 3) * 255, dtype=np.uint8), 16 | 'wrist_image': np.asarray(np.random.rand(64, 64, 3) * 255, dtype=np.uint8), 17 | 'state': np.asarray(np.random.rand(10), dtype=np.float32), 18 | 'action': np.asarray(np.random.rand(10), dtype=np.float32), 19 | 'language_instruction': 'dummy instruction', 20 | }) 21 | np.save(path, episode) 22 | 23 | 24 | # create fake episodes for train and validation 25 | print("Generating train examples...") 26 | os.makedirs('data/train', exist_ok=True) 27 | for i in tqdm.tqdm(range(N_TRAIN_EPISODES)): 28 | create_fake_episode(f'data/train/episode_{i}.npy') 29 | 30 | print("Generating val examples...") 31 | os.makedirs('data/val', exist_ok=True) 32 | for i in tqdm.tqdm(range(N_VAL_EPISODES)): 33 | create_fake_episode(f'data/val/episode_{i}.npy') 34 | 35 | print('Successfully created example data!') 36 | -------------------------------------------------------------------------------- /policy/RDT/configs/calvin_rel_traj_location_bounds_task_ABC_D.json: -------------------------------------------------------------------------------- 1 | { 2 | "A": [ 3 | [ 4 | -0.2691913843154907, 5 | -0.21995729207992554, 6 | -0.182277649641037 7 | ], 8 | [ 9 | 0.35127854347229004, 10 | 0.2769763469696045, 11 | 0.17159393429756165 12 | ] 13 | ], 14 | "B": [ 15 | [ 16 | -0.2576896846294403, 17 | -0.22244493663311005, 18 | -0.20557966828346252 19 | ], 20 | [ 21 | 0.32854634523391724, 22 | 0.2922680974006653, 23 | 0.17373555898666382 24 | ] 25 | ], 26 | "C": [ 27 | [ 28 | -0.29205888509750366, 29 | -0.24688798189163208, 30 | -0.17577645182609558 31 | ], 32 | [ 33 | 0.25053921341896057, 34 | 0.3277084231376648, 35 | 0.16431939601898193 36 | ] 37 | ], 38 | "D": [ 39 | [ 40 | -0.25131964683532715, 41 | -0.15233077108860016, 42 | -0.13294968008995056 43 | ], 44 | [ 45 | 0.19209328293800354, 46 | 0.19344553351402283, 47 | 0.1370421051979065 48 | ] 49 | ] 50 | } -------------------------------------------------------------------------------- /policy/openpi/src/openpi/policies/policy_test.py: -------------------------------------------------------------------------------- 1 | from openpi_client import action_chunk_broker 2 | import pytest 3 | 4 | from openpi.policies import aloha_policy 5 | from openpi.policies import policy_config as _policy_config 6 | from openpi.training import config as _config 7 | 8 | 9 | @pytest.mark.manual 10 | def test_infer(): 11 | config = _config.get_config("pi0_aloha_sim") 12 | policy = _policy_config.create_trained_policy(config, "gs://openpi-assets/checkpoints/pi0_aloha_sim") 13 | 14 | example = aloha_policy.make_aloha_example() 15 | result = policy.infer(example) 16 | 17 | assert result["actions"].shape == (config.model.action_horizon, 14) 18 | 19 | 20 | @pytest.mark.manual 21 | def test_broker(): 22 | config = _config.get_config("pi0_aloha_sim") 23 | policy = _policy_config.create_trained_policy(config, "gs://openpi-assets/checkpoints/pi0_aloha_sim") 24 | 25 | broker = action_chunk_broker.ActionChunkBroker( 26 | policy, 27 | # Only execute the first half of the chunk. 28 | action_horizon=config.model.action_horizon // 2, 29 | ) 30 | 31 | example = aloha_policy.make_aloha_example() 32 | for _ in range(config.model.action_horizon): 33 | outputs = broker.infer(example) 34 | assert outputs["actions"].shape == (14,) 35 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/config/task/default_task_14.yaml: -------------------------------------------------------------------------------- 1 | name: task_config 2 | 3 | image_shape: &image_shape [3, -1, -1] 4 | shape_meta: &shape_meta 5 | # acceptable types: rgb, low_dim 6 | obs: 7 | head_cam: 8 | shape: *image_shape 9 | type: rgb 10 | # front_cam: 11 | # shape: *image_shape 12 | # type: rgb 13 | # left_cam: 14 | # shape: *image_shape 15 | # type: rgb 16 | # right_cam: 17 | # shape: *image_shape 18 | # type: rgb 19 | agent_pos: 20 | shape: [14] 21 | type: low_dim 22 | action: 23 | shape: [14] 24 | 25 | env_runner: 26 | _target_: diffusion_policy.env_runner.pusht_image_runner.PushTImageRunner 27 | n_train: 6 28 | n_train_vis: 2 29 | train_start_seed: 0 30 | n_test: 50 31 | n_test_vis: 4 32 | legacy_test: True 33 | test_start_seed: 100000 34 | max_steps: 300 35 | n_obs_steps: ${n_obs_steps} 36 | n_action_steps: ${n_action_steps} 37 | fps: 10 38 | past_action: ${past_action_visible} 39 | n_envs: null 40 | 41 | dataset: 42 | _target_: diffusion_policy.dataset.robot_image_dataset.RobotImageDataset 43 | zarr_path: data/useless.zarr 44 | batch_size: ${dataloader.batch_size} 45 | horizon: ${horizon} 46 | pad_before: ${eval:'${n_obs_steps}-1'} 47 | pad_after: ${eval:'${n_action_steps}-1'} 48 | seed: 42 49 | val_ratio: 0.02 50 | max_train_episodes: null 51 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/config/task/default_task_16.yaml: -------------------------------------------------------------------------------- 1 | name: task_config 2 | 3 | image_shape: &image_shape [3, -1, -1] 4 | shape_meta: &shape_meta 5 | # acceptable types: rgb, low_dim 6 | obs: 7 | head_cam: 8 | shape: *image_shape 9 | type: rgb 10 | # front_cam: 11 | # shape: *image_shape 12 | # type: rgb 13 | # left_cam: 14 | # shape: *image_shape 15 | # type: rgb 16 | # right_cam: 17 | # shape: *image_shape 18 | # type: rgb 19 | agent_pos: 20 | shape: [16] 21 | type: low_dim 22 | action: 23 | shape: [16] 24 | 25 | env_runner: 26 | _target_: diffusion_policy.env_runner.pusht_image_runner.PushTImageRunner 27 | n_train: 6 28 | n_train_vis: 2 29 | train_start_seed: 0 30 | n_test: 50 31 | n_test_vis: 4 32 | legacy_test: True 33 | test_start_seed: 100000 34 | max_steps: 300 35 | n_obs_steps: ${n_obs_steps} 36 | n_action_steps: ${n_action_steps} 37 | fps: 10 38 | past_action: ${past_action_visible} 39 | n_envs: null 40 | 41 | dataset: 42 | _target_: diffusion_policy.dataset.robot_image_dataset.RobotImageDataset 43 | zarr_path: data/useless.zarr 44 | batch_size: ${dataloader.batch_size} 45 | horizon: ${horizon} 46 | pad_before: ${eval:'${n_obs_steps}-1'} 47 | pad_after: ${eval:'${n_action_steps}-1'} 48 | seed: 42 49 | val_ratio: 0.02 50 | max_train_episodes: null 51 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/demo/Matrices.h: -------------------------------------------------------------------------------- 1 | #ifndef __MATRICES_H__ 2 | #define __MATRICES_H__ 3 | #include "GlobalOptions.h" 4 | 5 | /* Using the default value of 0.0 */ 6 | qp_real sigma_d = 0.0; 7 | 8 | /* Cost Function P Matrix in CCS format */ 9 | qp_real Ppr[7] = {5.0, 1.0, 1.0, 2.0, 1.0, 1.0, 4.0}; 10 | qp_int Pir[7] = {0, 1, 0, 1, 2, 1, 2}; 11 | qp_int Pjc[4] = {0, 2, 5, 7}; 12 | 13 | /* Cost Function c vector */ 14 | qp_real c[3] = {1.0, 2.0, 1.0}; 15 | 16 | /* Equality Constraint A Matrix in CCS format */ 17 | qp_real Apr[3] = {1.0, -2.0, 1.0}; 18 | qp_int Air[3] = {0, 0, 0}; 19 | qp_int Ajc[4] = {0, 1, 2, 3}; 20 | 21 | /* Equality Constraints b vector */ 22 | qp_real b[1] = {3.0}; 23 | 24 | /* Inequality Constraint G Matrix in CCS format */ 25 | qp_real Gpr[3] = {-4.0, -4.0, -1.0}; 26 | qp_int Gir[3] = {0, 0, 1}; 27 | qp_int Gjc[4] = {0, 1, 2, 3}; 28 | 29 | /* Inequality Constraint h vector */ 30 | qp_real h[2] = {-1.0, -1.0}; 31 | 32 | /* Data for QP*/ 33 | qp_int n = 3; /* Number of decision Variables */ 34 | qp_int m = 2; /* Number of inequality constraints */ 35 | qp_int p = 1; /* Number of equality constraints */ 36 | 37 | /* Permutation Vector optional */ 38 | qp_int Permut[6] = {5, 2, 3, 1, 4, 0}; 39 | 40 | qp_int Permutineq[5] = {4, 2, 0, 3, 1}; 41 | 42 | #endif 43 | 44 | /*! @file */ -------------------------------------------------------------------------------- /controller/mobile_controller.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from controller.controller import Controller 5 | from typing import Dict, Any 6 | import numpy as np 7 | 8 | class MobileController(Controller): 9 | def __init__(self): 10 | super().__init__() 11 | self.controller_type = "robotic_mobile" 12 | self.controller = None 13 | ''' 14 | 对于底盘移动,不进行is_delta的判断, 直接进行移动 15 | ''' 16 | def move_controller(self, move_data:Dict[str, Any], is_delta=False): 17 | # moving by setting velocity for every joint 18 | if "move_velocity" in move_data.keys(): 19 | self.set_move_velocity(np.array(move_data["move_velocity"])) 20 | # moving by set position 21 | if "move_to" in move_data.keys(): 22 | self.set_move_to(np.array(move_data["move_to"])) 23 | 24 | def get_information(self): 25 | mobile_info = {} 26 | if "move_velocity" in self.collect_info: 27 | mobile_info["move_velocity"] = self.get_move_velocity() 28 | if "position" in self.collect_info: 29 | mobile_info["position"] = self.get_position() 30 | return mobile_info 31 | 32 | def __repr__(self): 33 | if self.controller is not None: 34 | return f"{self.name}: \n \ 35 | controller: {self.controller}" 36 | else: 37 | return super().__repr__() 38 | -------------------------------------------------------------------------------- /policy/openpi/examples/simple_client/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for the simple client. 2 | 3 | # Build the container: 4 | # docker build . -t simple_client -f examples/simple_client/Dockerfile 5 | 6 | # Run the container: 7 | # docker run --rm -it --network=host -v .:/app simple_client /bin/bash 8 | 9 | FROM python:3.7-slim 10 | COPY --from=ghcr.io/astral-sh/uv:0.5.1 /uv /uvx /bin/ 11 | 12 | WORKDIR /app 13 | 14 | # Copy from the cache instead of linking since it's a mounted volume 15 | ENV UV_LINK_MODE=copy 16 | 17 | # Write the virtual environment outside of the project directory so it doesn't 18 | # leak out of the container when we mount the application code. 19 | ENV UV_PROJECT_ENVIRONMENT=/.venv 20 | 21 | # Copy the requirements files so we can install dependencies. 22 | # The rest of the project is mounted as a volume, so we don't need to rebuild on changes. 23 | # This strategy is best for development-style usage. 24 | COPY ./examples/simple_client/requirements.txt /tmp/requirements.txt 25 | COPY ./packages/openpi-client/pyproject.toml /tmp/openpi-client/pyproject.toml 26 | 27 | # Install python dependencies. 28 | RUN uv venv --python 3.11.9 $UV_PROJECT_ENVIRONMENT 29 | RUN uv pip sync /tmp/requirements.txt /tmp/openpi-client/pyproject.toml 30 | ENV PYTHONPATH=/app:/app/src:/app/packages/openpi-client/src 31 | 32 | CMD /bin/bash -c "source /.venv/bin/activate && python examples/simple_client/main.py $SERVER_ARGS" 33 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/vision/clip_vit.py: -------------------------------------------------------------------------------- 1 | """ 2 | clip_vit.py 3 | """ 4 | 5 | from prismatic.models.backbones.vision.base_vision import TimmViTBackbone 6 | 7 | # Registry =>> Supported CLIP Vision Backbones (from TIMM) 8 | CLIP_VISION_BACKBONES = { 9 | "clip-vit-b": "vit_base_patch16_clip_224.openai", 10 | "clip-vit-l": "vit_large_patch14_clip_224.openai", 11 | "clip-vit-l-336px": "vit_large_patch14_clip_336.openai", 12 | } 13 | 14 | 15 | # [IMPORTANT] By Default, TIMM initialized OpenAI CLIP models with the standard GELU activation from PyTorch. 16 | # HOWEVER =>> Original OpenAI models were trained with the quick_gelu *approximation* -- while it's 17 | # a decent approximation, the resulting features are *worse*; this was a super tricky bug 18 | # to identify, but luckily there's an easy fix (`override_act_layer`) 19 | class CLIPViTBackbone(TimmViTBackbone): 20 | def __init__(self, vision_backbone_id: str, image_resize_strategy: str, default_image_size: int = 224) -> None: 21 | super().__init__( 22 | vision_backbone_id, 23 | CLIP_VISION_BACKBONES[vision_backbone_id], 24 | image_resize_strategy, 25 | default_image_size=default_image_size, 26 | override_act_layer="quick_gelu" if CLIP_VISION_BACKBONES[vision_backbone_id].endswith(".openai") else None, 27 | ) 28 | -------------------------------------------------------------------------------- /sensor/TactileGloveRos2_sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from sensor.touch_sensor import TouchSensor 5 | from utils.ros2_subscriber import ROS2Subscriber 6 | from utils.data_handler import is_enter_pressed 7 | 8 | import rclpy 9 | from rclpy.node import Node 10 | 11 | from std_msgs.msg import UInt8MultiArray 12 | import serial 13 | import threading 14 | import time 15 | 16 | class TactileGloveRosSensor(TouchSensor): 17 | def __init__(self, name): 18 | super().__init__() 19 | self.name = name 20 | self.controller_type = "user_controller" 21 | self.controller = None 22 | 23 | def set_up(self, topic_name): 24 | subscriber = ROS2Subscriber( 25 | node_name='"hand_tactile_publisher"', 26 | topic_name=topic_name, 27 | msg_type=UInt8MultiArray, 28 | ) 29 | 30 | self.controller = { "subscriber":subscriber} 31 | 32 | def get_touch(self): 33 | msg = self.controller["subscriber"].get_latest_data() 34 | print(msg) 35 | return{"force":msg.data} 36 | 37 | 38 | if __name__ == "__main__": 39 | touch_sensor = TactileGloveRosSensor("left_hand") 40 | touch_sensor.set_up("left_hand") 41 | 42 | touch_sensor.set_collect_info(["force"]) 43 | 44 | while True: 45 | print(touch_sensor.get()) 46 | if is_enter_pressed(): 47 | break 48 | time.sleep(0.01) -------------------------------------------------------------------------------- /policy/ACT/README_CN.md: -------------------------------------------------------------------------------- 1 | [![中文](https://img.shields.io/badge/中文-简体-blue)](./README_CN.md) 2 | [![English](https://img.shields.io/badge/English-English-green)](./README.md) 3 | 4 | # ACT (Action Chunking Transformer) 模型部署指南 5 | 6 | ## 快速开始 7 | 8 | ### 1. 环境配置 9 | 10 | 安装 ACT 部署所需的依赖环境: 11 | 12 | ```bash 13 | cd policy/ACT/ 14 | pip install -r requirements.txt 15 | ``` 16 | 17 | ### 2. 数据准备与训练 18 | 19 | #### 数据转换 20 | 21 | 将采集的数据转换为 ACT 模型所需的 HDF5 格式: 22 | 23 | ```bash 24 | python scripts/convert2act_hdf5.py 25 | ``` 26 | 27 | **示例:** 28 | ```bash 29 | # 将 save/test/ 目录下的数据转换为 ACT 格式 30 | python scripts/convert2act_hdf5.py ./save/test/ ~/RoboTwin/policy/ACT/data/ 31 | 32 | # 转换 pick_place_cup 任务数据 33 | python scripts/convert2act_hdf5.py save/pick_place_cup /path/to/output 34 | ``` 35 | 36 | ### 3. 真机部署 37 | 38 | 39 | 1. 将训练好的 checkpoint 复制到以下目录: 40 | ``` 41 | control_your_robot/policy/ACT/actckpt/ 42 | ``` 43 | 44 | 2. 修改部署脚本 `example/deploy/piper_single_on_ACT.py`: 45 | 46 | ```python 47 | # 在第 120 行左右修改模型路径 48 | model = MYACT("/path/your/policy/ACT/act_ckpt/act-pick_place_cup/100", "act-pick_place_cup") 49 | ``` 50 | 51 | **参数说明:** 52 | - 第一个参数: Policy 模型的文件夹地址 53 | - 第二个参数: 对应的任务名称 54 | 55 | #### 3.3 执行部署 56 | 57 | 运行部署脚本启动真机执行: 58 | 59 | ```bash 60 | python example/deploy/piper_single_on_ACT.py 61 | ``` 62 | 63 | ## 注意事项 64 | 65 | - 确保机械臂已正确使能并连接 66 | - 检查模型路径是否正确 67 | - 部署前建议先在测试环境中验证模型效果 68 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/src/amd_defaults.c: -------------------------------------------------------------------------------- 1 | /* ========================================================================= */ 2 | /* === AMD_defaults ======================================================== */ 3 | /* ========================================================================= */ 4 | 5 | /* ------------------------------------------------------------------------- */ 6 | /* AMD, Copyright (c) Timothy A. Davis, */ 7 | /* Patrick R. Amestoy, and Iain S. Duff. See ../README.txt for License. */ 8 | /* email: DrTimothyAldenDavis@gmail.com */ 9 | /* ------------------------------------------------------------------------- */ 10 | 11 | /* User-callable. Sets default control parameters for AMD. See amd.h 12 | * for details. 13 | */ 14 | 15 | #include "amd_internal.h" 16 | 17 | /* ========================================================================= */ 18 | /* === AMD defaults ======================================================== */ 19 | /* ========================================================================= */ 20 | 21 | GLOBAL void AMD_defaults 22 | ( 23 | double Control [ ] 24 | ) 25 | { 26 | Int i ; 27 | 28 | if (Control != (double *) NULL) 29 | { 30 | for (i = 0 ; i < AMD_CONTROL ; i++) 31 | { 32 | Control [i] = 0 ; 33 | } 34 | Control [AMD_DENSE] = AMD_DEFAULT_DENSE ; 35 | Control [AMD_AGGRESSIVE] = AMD_DEFAULT_AGGRESSIVE ; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(GTest) 2 | find_package(Eigen3) 3 | 4 | if(${GTest_FOUND} AND ${Eigen3_FOUND}) 5 | # add_executable(tests) 6 | # target_sources(tests PRIVATE ${PROJECT_SOURCE_DIR}/tests/test.cpp ${PROJECT_SOURCE_DIR}/tests/parser.cpp) 7 | # target_include_directories(tests PRIVATE ${GTEST_INCLUDE_DIRS} ${PROJECT_SOURCE_DIR}/tests/include) 8 | # target_link_libraries(tests PRIVATE ${GTEST_LIBRARIES} qpSWIFT-static m pthread) 9 | 10 | add_executable(stdaln) 11 | target_sources(stdaln PRIVATE ${PROJECT_SOURCE_DIR}/tests/stdaln.cpp) 12 | target_include_directories(stdaln PRIVATE ${GTEST_INCLUDE_DIRS} ${PROJECT_SOURCE_DIR}/tests/include) 13 | target_link_libraries(stdaln PRIVATE ${GTEST_LIBRARIES} qpSWIFT-static m pthread) 14 | 15 | add_executable(stdalnT) 16 | target_sources(stdalnT PRIVATE ${PROJECT_SOURCE_DIR}/tests/stdalnT.cpp) 17 | target_include_directories(stdalnT PRIVATE ${GTEST_INCLUDE_DIRS} ${PROJECT_SOURCE_DIR}/tests/include) 18 | target_link_libraries(stdalnT PRIVATE ${GTEST_LIBRARIES} qpSWIFT-static m pthread) 19 | 20 | # add_executable(stdalnTT) 21 | # target_sources(stdalnTT PRIVATE ${PROJECT_SOURCE_DIR}/tests/stdalnTT.cpp) 22 | # target_include_directories(stdalnTT PRIVATE ${GTEST_INCLUDE_DIRS} ${PROJECT_SOURCE_DIR}/tests/include) 23 | # target_link_libraries(stdalnTT PRIVATE ${GTEST_LIBRARIES} qpSWIFT-static m pthread) 24 | 25 | 26 | endif() -------------------------------------------------------------------------------- /policy/RDT/train/image_corrupt.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.simplefilter(action='ignore', category=FutureWarning) 3 | 4 | import numpy as np 5 | np.bool = np.bool_ 6 | import imgaug.augmenters as iaa 7 | from PIL import Image 8 | 9 | 10 | # Define our sequence of augmentation steps that will be applied to every image. 11 | seq = iaa.Sequential( 12 | [ 13 | # Execute one of the following noise augmentations 14 | iaa.OneOf([ 15 | iaa.AdditiveGaussianNoise( 16 | loc=0, scale=(0.0, 0.05*255), per_channel=0.5 17 | ), 18 | iaa.AdditiveLaplaceNoise(scale=(0.0, 0.05*255), per_channel=0.5), 19 | iaa.AdditivePoissonNoise(lam=(0.0, 0.05*255), per_channel=0.5) 20 | ]), 21 | 22 | # Execute one or none of the following blur augmentations 23 | iaa.SomeOf((0, 1), [ 24 | iaa.OneOf([ 25 | iaa.GaussianBlur((0, 3.0)), 26 | iaa.AverageBlur(k=(2, 7)), 27 | iaa.MedianBlur(k=(3, 11)), 28 | ]), 29 | iaa.MotionBlur(k=(3, 36)), 30 | ]), 31 | ], 32 | # do all of the above augmentations in random order 33 | random_order=True 34 | ) 35 | 36 | 37 | def image_corrupt(image: Image): 38 | image_arr = np.array(image) 39 | image_arr = image_arr[None, ...] 40 | 41 | image_arr = seq(images=image_arr) 42 | 43 | image = Image.fromarray(image_arr[0]) 44 | return image 45 | -------------------------------------------------------------------------------- /sensor/sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | import time 4 | 5 | from utils.data_handler import debug_print 6 | 7 | class Sensor: 8 | def __init__(self, timestamp=True): 9 | self.name = "sensor" 10 | self.type = "sensor" 11 | self.timestamp = timestamp 12 | 13 | def set_collect_info(self, collect_info): 14 | self.collect_info = collect_info 15 | if self.timestamp: 16 | self.collect_info.append("timestamp") 17 | 18 | def get(self): 19 | if self.collect_info is None: 20 | debug_print({self.name},f"collect_info is not set, if only collecting controller data, forget this warning", "WARNING") 21 | return None 22 | info = self.get_information() 23 | 24 | if self.timestamp: 25 | info["timestamp"] = time.time_ns() 26 | 27 | for collect_info in self.collect_info: 28 | if info[collect_info] is None: 29 | debug_print(f"{self.name}", f"{collect_info} information is None", "ERROR") 30 | 31 | # 由于sensor数据比较高维, 所以不输出, 只调试信息是否为None 32 | # debug_print(f"{self.name}", f"get data:\n{info} ", "DEBUG") 33 | return {collect_info: info[collect_info] for collect_info in self.collect_info} 34 | 35 | def __repr__(self): 36 | return f"Base Sensor, can't be used directly \n \ 37 | name: {self.name} \n \ 38 | type: {self.type}" 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/diffusion/conv1d_components.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | # from einops.layers.torch import Rearrange 6 | 7 | 8 | class Downsample1d(nn.Module): 9 | 10 | def __init__(self, dim): 11 | super().__init__() 12 | self.conv = nn.Conv1d(dim, dim, 3, 2, 1) 13 | 14 | def forward(self, x): 15 | return self.conv(x) 16 | 17 | 18 | class Upsample1d(nn.Module): 19 | 20 | def __init__(self, dim): 21 | super().__init__() 22 | self.conv = nn.ConvTranspose1d(dim, dim, 4, 2, 1) 23 | 24 | def forward(self, x): 25 | return self.conv(x) 26 | 27 | 28 | class Conv1dBlock(nn.Module): 29 | """ 30 | Conv1d --> GroupNorm --> Mish 31 | """ 32 | 33 | def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): 34 | super().__init__() 35 | 36 | self.block = nn.Sequential( 37 | nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), 38 | # Rearrange('batch channels horizon -> batch channels 1 horizon'), 39 | nn.GroupNorm(n_groups, out_channels), 40 | # Rearrange('batch channels 1 horizon -> batch channels horizon'), 41 | nn.Mish(), 42 | ) 43 | 44 | def forward(self, x): 45 | return self.block(x) 46 | 47 | 48 | def test(): 49 | cb = Conv1dBlock(256, 128, kernel_size=3) 50 | x = torch.zeros((1, 256, 16)) 51 | o = cb(x) 52 | -------------------------------------------------------------------------------- /policy/openpi/examples/libero/compose.yml: -------------------------------------------------------------------------------- 1 | # Run with: 2 | # docker compose -f examples/libero/compose.yml up --build 3 | services: 4 | runtime: 5 | image: libero 6 | depends_on: 7 | - openpi_server 8 | build: 9 | context: ../.. 10 | dockerfile: examples/libero/Dockerfile 11 | init: true 12 | tty: true 13 | network_mode: host 14 | privileged: true 15 | volumes: 16 | - $PWD:/app 17 | - ../../data:/data 18 | - /tmp/.X11-unix:/tmp/.X11-unix:ro 19 | environment: 20 | - CLIENT_ARGS 21 | - DISPLAY=$DISPLAY 22 | - MUJOCO_GL=${MUJOCO_GL:-egl} 23 | deploy: 24 | resources: 25 | reservations: 26 | devices: 27 | - driver: nvidia 28 | count: 1 29 | capabilities: [gpu] 30 | 31 | openpi_server: 32 | image: openpi_server 33 | build: 34 | context: ../.. 35 | dockerfile: scripts/docker/serve_policy.Dockerfile 36 | init: true 37 | tty: true 38 | network_mode: host 39 | volumes: 40 | - $PWD:/app 41 | - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets 42 | environment: 43 | - SERVER_ARGS 44 | - OPENPI_DATA_HOME=/openpi_assets 45 | - IS_DOCKER=true 46 | 47 | # Comment out this block if not running on a machine with GPUs. 48 | deploy: 49 | resources: 50 | reservations: 51 | devices: 52 | - driver: nvidia 53 | count: 1 54 | capabilities: [gpu] 55 | -------------------------------------------------------------------------------- /example/task/parallel_task.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from utils.task import YmlTask, Tasks, ShareSpace 5 | from my_robot.test_robot import TestRobot 6 | import numpy as np 7 | import os 8 | 9 | 10 | 11 | if __name__ == "__main__": 12 | # os.environ["INFO_LEVEL"] = "DEBUG" # DEBUG , INFO, ERROR 13 | robot_1 = TestRobot() 14 | robot_2 = TestRobot() 15 | 16 | robot_1.set_up() 17 | robot_2.set_up() 18 | 19 | sp = ShareSpace() 20 | 21 | my_task = Tasks.build_top({ 22 | "type": "Serial", 23 | "subtasks": [ 24 | {"type": "Parallel", 25 | "subtasks": [ 26 | YmlTask("./config/robot_1_move_mobile_1.yml", share_space=sp, robot=robot_1), 27 | YmlTask("./config/robot_2_move_mobile_1.yml", share_space=sp, robot=robot_2), 28 | ]}, 29 | {"type": "Parallel", 30 | "subtasks": [ 31 | YmlTask("./config/robot_1_model_infer.yml", share_space=sp, robot=robot_1), 32 | YmlTask("./config/robot_2_model_infer.yml", share_space=sp, robot=robot_2), 33 | ]}, 34 | {"type": "Parallel", 35 | "subtasks": [ 36 | YmlTask("./config/robot_1_move_mobile_2.yml", share_space=sp, robot=robot_1), 37 | YmlTask("./config/robot_2_move_mobile_2.yml", share_space=sp, robot=robot_2), 38 | ]}, 39 | ], 40 | }) 41 | while not my_task.is_success(): 42 | my_task.run() 43 | my_task.update() -------------------------------------------------------------------------------- /sensor/TactileGloveRos_sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from sensor.touch_sensor import TouchSensor 5 | from utils.ros_subscriber import ROSSubscriber 6 | from utils.data_handler import is_enter_pressed 7 | from utils.tactile_hand import draw 8 | import rospy 9 | import numpy as np 10 | import time 11 | 12 | from std_msgs.msg import UInt8MultiArray 13 | 14 | class TactileGloveRosSensor(TouchSensor): 15 | def __init__(self, name): 16 | super().__init__() 17 | self.name = name 18 | self.controller_type = "user_controller" 19 | self.controller = None 20 | 21 | def set_up(self, topic_name): 22 | subscriber = ROSSubscriber( 23 | topic_name=topic_name, 24 | msg_type=UInt8MultiArray, 25 | ) 26 | 27 | self.controller = { "subscriber":subscriber} 28 | 29 | def get_touch(self): 30 | msg = self.controller["subscriber"].get_latest_data() 31 | # print(msg) 32 | if msg is not None: 33 | msg = np.frombuffer(msg.data, dtype=np.uint8) 34 | return{"force":msg} 35 | 36 | if __name__ == "__main__": 37 | rospy.init_node("test_node", anonymous=True) 38 | 39 | touch_sensor = TactileGloveRosSensor("left_hand") 40 | touch_sensor.set_up("left_hand") 41 | touch_sensor.set_collect_info(["force"]) 42 | 43 | while True: 44 | data = touch_sensor.get()["force"] 45 | if data is not None: 46 | draw("left", data) 47 | # break 48 | if is_enter_pressed(): 49 | break 50 | time.sleep(0.01) -------------------------------------------------------------------------------- /planner/README.md: -------------------------------------------------------------------------------- 1 | ### 通用的机械臂planner 2 | 由于好多厂家自带的planner很垃圾,所以我结合了RoboTwin2.0最新的planner,实现了一个通用接口. 3 | 考虑到有些朋友还没有自己的机器人,所以顺便给出了RoboTwin中我们最喜欢的cobomagic双臂平台的操控示例啦. 4 | 这个会慢慢做成一个仿真教程~ 5 | 这边还在慢慢开发, 代码变动会很大, 如果希望提供支持, 或者有什么建议, 可以提issue或者私信我. 6 | ### 下载URDF 7 | agliex cobomagic机械臂URDF: 8 | 通过网盘分享的文件:仿真机械臂 9 | 链接: https://pan.baidu.com/s/1Mfrs3spVTeRWUHf_pyHZjQ?pwd=yq7m 10 | 提取码: yq7m 11 | 更多URDF请访问RoboTwin2.0, 提供了许多精修的URDF: 12 | https://github.com/RoboTwin-Platform/RoboTwin.git 13 | 提供的dual_piper_sim_robot.py就是里面的piper URDF. 14 | 15 | 需要修改`curobo_left.yml`和`curobo_right.yml`中`collision_spheres`和`urdf_path`,要求为绝对路径. 16 | 17 | 环境配置: 18 | ```bash 19 | # 安装sapien基础环境 20 | pip install - r requirements.txt 21 | 22 | # 安装curobo 23 | cd ../third_party 24 | git clone https://github.com/NVlabs/curobo.git 25 | cd curobo 26 | pip install -e . --no-build-isolation 27 | cd ../.. 28 | ``` 29 | 30 | ### 快速上手 31 | 需要下载对应的URDF文件, 然后设置代码里面的索引路径.cobomagic示例还要额外修改`.yml`文件的路径索引. 32 | ```bash 33 | # cobomagic通用控制 34 | python planner/cobomagic_sim_robot.py 35 | # piper双臂通用控制 36 | python planner/dual_piper_sim_robot.py 37 | ``` 38 | ### 已经实现 39 | | 日期 | 更新内容 | 状态 | 40 | |------------|----------------------------------|----------| 41 | | 2025.6.23 | 🤖仿真环境中的双单臂组合双臂示例 | ✅ 已发布 | 42 | | 2025.5.22 | 🤖仿真环境中的通用IK示例(4090 0.004s/step) | ✅ 已发布 | 43 | | 2025.5.22 | 🤖仿真环境中的通用planner示例(4090 0.15s/step) | ✅ 已发布 | 44 | | 2025.5.22 | 💻通用planner接入 | ✅ 已发布 | 45 | | 2025.5.22 | 🏙️接入D435仿真摄像头设置 | ✅ 已发布 | 46 | 47 | ### 正在路上 48 | - [ ] 📷多种camera设置支持 49 | - [ ] 📖URDF, sapien使用简单教学与示例 50 | 51 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/dataset/base_dataset.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import torch 4 | import torch.nn 5 | from diffusion_policy.model.common.normalizer import LinearNormalizer 6 | 7 | 8 | class BaseLowdimDataset(torch.utils.data.Dataset): 9 | 10 | def get_validation_dataset(self) -> "BaseLowdimDataset": 11 | # return an empty dataset by default 12 | return BaseLowdimDataset() 13 | 14 | def get_normalizer(self, **kwargs) -> LinearNormalizer: 15 | raise NotImplementedError() 16 | 17 | def get_all_actions(self) -> torch.Tensor: 18 | raise NotImplementedError() 19 | 20 | def __len__(self) -> int: 21 | return 0 22 | 23 | def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: 24 | """ 25 | output: 26 | obs: T, Do 27 | action: T, Da 28 | """ 29 | raise NotImplementedError() 30 | 31 | 32 | class BaseImageDataset(torch.utils.data.Dataset): 33 | 34 | def get_validation_dataset(self) -> "BaseLowdimDataset": 35 | # return an empty dataset by default 36 | return BaseImageDataset() 37 | 38 | def get_normalizer(self, **kwargs) -> LinearNormalizer: 39 | raise NotImplementedError() 40 | 41 | def get_all_actions(self) -> torch.Tensor: 42 | raise NotImplementedError() 43 | 44 | def __len__(self) -> int: 45 | return 0 46 | 47 | def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: 48 | """ 49 | output: 50 | obs: 51 | key: T, * 52 | action: T, Da 53 | """ 54 | raise NotImplementedError() 55 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/src/timer.c: -------------------------------------------------------------------------------- 1 | #include "timer.h" 2 | 3 | 4 | /*! For Windows machines */ 5 | #if (defined WIN32 || _WIN64) 6 | 7 | void tic(qp_timer* t) 8 | { 9 | QueryPerformanceFrequency(&t->freq); 10 | QueryPerformanceCounter(&t->tic); 11 | } 12 | 13 | qp_real toc(qp_timer* t) 14 | { 15 | QueryPerformanceCounter(&t->toc); 16 | return ((t->toc.QuadPart - t->tic.QuadPart) / (qp_real)t->freq.QuadPart); 17 | } 18 | 19 | /*! For macOS */ 20 | #elif (defined __APPLE__) 21 | 22 | void tic(qp_timer* t) 23 | { 24 | t->tic = mach_absolute_time(); 25 | } 26 | 27 | qp_real toc(qp_timer* t) 28 | { 29 | 30 | uint64_t duration; 31 | t->toc = mach_absolute_time(); 32 | duration = t->toc - t->tic; 33 | 34 | mach_timebase_info(&(t->tinfo)); 35 | duration *= t->tinfo.numer; 36 | duration /= t->tinfo.denom; 37 | 38 | return (qp_real)duration / 1000000000; 39 | } 40 | 41 | 42 | 43 | #else 44 | /*! For Posix machines */ 45 | 46 | void tic(qp_timer* t) 47 | { 48 | clock_gettime(CLOCK_MONOTONIC, &t->tic); 49 | } 50 | 51 | 52 | 53 | double toc(qp_timer* t) 54 | { 55 | struct timespec temp; 56 | 57 | clock_gettime(CLOCK_MONOTONIC, &t->toc); 58 | 59 | if ((t->toc.tv_nsec - t->tic.tv_nsec)<0) { 60 | temp.tv_sec = t->toc.tv_sec - t->tic.tv_sec - 1; 61 | temp.tv_nsec = 1000000000 + t->toc.tv_nsec - t->tic.tv_nsec; 62 | } 63 | else { 64 | temp.tv_sec = t->toc.tv_sec - t->tic.tv_sec; 65 | temp.tv_nsec = t->toc.tv_nsec - t->tic.tv_nsec; 66 | } 67 | return (qp_real)temp.tv_sec + (qp_real)temp.tv_nsec / 1000000000; 68 | } 69 | 70 | #endif 71 | /*! @file */ -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/common/pymunk_util.py: -------------------------------------------------------------------------------- 1 | import pygame 2 | import pymunk 3 | import pymunk.pygame_util 4 | import numpy as np 5 | 6 | COLLTYPE_DEFAULT = 0 7 | COLLTYPE_MOUSE = 1 8 | COLLTYPE_BALL = 2 9 | 10 | 11 | def get_body_type(static=False): 12 | body_type = pymunk.Body.DYNAMIC 13 | if static: 14 | body_type = pymunk.Body.STATIC 15 | return body_type 16 | 17 | 18 | def create_rectangle(space, pos_x, pos_y, width, height, density=3, static=False): 19 | body = pymunk.Body(body_type=get_body_type(static)) 20 | body.position = (pos_x, pos_y) 21 | shape = pymunk.Poly.create_box(body, (width, height)) 22 | shape.density = density 23 | space.add(body, shape) 24 | return body, shape 25 | 26 | 27 | def create_rectangle_bb(space, left, bottom, right, top, **kwargs): 28 | pos_x = (left + right) / 2 29 | pos_y = (top + bottom) / 2 30 | height = top - bottom 31 | width = right - left 32 | return create_rectangle(space, pos_x, pos_y, width, height, **kwargs) 33 | 34 | 35 | def create_circle(space, pos_x, pos_y, radius, density=3, static=False): 36 | body = pymunk.Body(body_type=get_body_type(static)) 37 | body.position = (pos_x, pos_y) 38 | shape = pymunk.Circle(body, radius=radius) 39 | shape.density = density 40 | shape.collision_type = COLLTYPE_BALL 41 | space.add(body, shape) 42 | return body, shape 43 | 44 | 45 | def get_body_state(body): 46 | state = np.zeros(6, dtype=np.float32) 47 | state[:2] = body.position 48 | state[2] = body.angle 49 | state[3:5] = body.velocity 50 | state[5] = body.angular_velocity 51 | return state 52 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | title: "qpSWIFT" 3 | message: "If you are using this software, please cite the article following preferred-citation method " 4 | type: software 5 | authors: 6 | - email: agp19@vt.edu 7 | given-names: Abhishek Goud 8 | family-names: Pandala 9 | orcid: 'https://orcid.org/0000-0002-7424-5508' 10 | - given-names: Yanran 11 | family-names: Ding 12 | email: yanran@mit.edu 13 | orcid: 'https://orcid.org/0000-0003-4959-1174' 14 | - given-names: Hae-Won 15 | family-names: Park 16 | email: haewonpark@kaist.ac.kr 17 | orcid: 'https://orcid.org/0000-0001-6130-6589' 18 | identifiers: 19 | - type: url 20 | value: 'https://github.com/qpSWIFT/qpSWIFT' 21 | description: Github Code Repository 22 | repository-code: 'https://github.com/qpSWIFT/qpSWIFT' 23 | url: 'https://qpswift.github.io/' 24 | keywords: 25 | - quadratic-programming 26 | - optimization 27 | - interior-point-method 28 | - robotics 29 | license: GPL-3.0 30 | 31 | ## Preferred Citation Method 32 | 33 | preferred-citation: 34 | type: article 35 | authors: 36 | - family-names: "Pandala" 37 | given-names: "Abhishek Goud" 38 | - family-names: "Ding" 39 | given-names: "Yanran" 40 | - family-names: "Park" 41 | given-names: "Hae-Won" 42 | title: "qpSWIFT: A Real-Time Sparse Quadratic Program Solver for Robotic Applications" 43 | journal: "IEEE Robotics and Automation Letters" 44 | start: 3355 # First page number 45 | end: 3362 # Last page number 46 | volume: 4 47 | issue: 4 48 | year: 2019 49 | doi: 10.1109/LRA.2019.2926664 50 | url: https://doi.org/10.1109/LRA.2019.2926664 -------------------------------------------------------------------------------- /policy/openpi/packages/openpi-client/src/openpi_client/action_chunk_broker.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import numpy as np 4 | import tree 5 | from typing_extensions import override 6 | 7 | from openpi_client import base_policy as _base_policy 8 | 9 | 10 | class ActionChunkBroker(_base_policy.BasePolicy): 11 | """Wraps a policy to return action chunks one-at-a-time. 12 | 13 | Assumes that the first dimension of all action fields is the chunk size. 14 | 15 | A new inference call to the inner policy is only made when the current 16 | list of chunks is exhausted. 17 | """ 18 | 19 | def __init__(self, policy: _base_policy.BasePolicy, action_horizon: int): 20 | self._policy = policy 21 | self._action_horizon = action_horizon 22 | self._cur_step: int = 0 23 | 24 | self._last_results: Dict[str, np.ndarray] | None = None 25 | 26 | @override 27 | def infer(self, obs: Dict) -> Dict: # noqa: UP006 28 | if self._last_results is None: 29 | self._last_results = self._policy.infer(obs) 30 | self._cur_step = 0 31 | 32 | def slicer(x): 33 | if isinstance(x, np.ndarray): 34 | return x[self._cur_step, ...] 35 | else: 36 | return x 37 | 38 | results = tree.map_structure(slicer, self._last_results) 39 | self._cur_step += 1 40 | 41 | if self._cur_step >= self._action_horizon: 42 | self._last_results = None 43 | 44 | return results 45 | 46 | @override 47 | def reset(self) -> None: 48 | self._policy.reset() 49 | self._last_results = None 50 | self._cur_step = 0 51 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/common/dict_of_tensor_mixin.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class DictOfTensorMixin(nn.Module): 6 | 7 | def __init__(self, params_dict=None): 8 | super().__init__() 9 | if params_dict is None: 10 | params_dict = nn.ParameterDict() 11 | self.params_dict = params_dict 12 | 13 | @property 14 | def device(self): 15 | return next(iter(self.parameters())).device 16 | 17 | def _load_from_state_dict( 18 | self, 19 | state_dict, 20 | prefix, 21 | local_metadata, 22 | strict, 23 | missing_keys, 24 | unexpected_keys, 25 | error_msgs, 26 | ): 27 | 28 | def dfs_add(dest, keys, value: torch.Tensor): 29 | if len(keys) == 1: 30 | dest[keys[0]] = value 31 | return 32 | 33 | if keys[0] not in dest: 34 | dest[keys[0]] = nn.ParameterDict() 35 | dfs_add(dest[keys[0]], keys[1:], value) 36 | 37 | def load_dict(state_dict, prefix): 38 | out_dict = nn.ParameterDict() 39 | for key, value in state_dict.items(): 40 | value: torch.Tensor 41 | if key.startswith(prefix): 42 | param_keys = key[len(prefix):].split(".")[1:] 43 | # if len(param_keys) == 0: 44 | # import pdb; pdb.set_trace() 45 | dfs_add(out_dict, param_keys, value.clone()) 46 | return out_dict 47 | 48 | self.params_dict = load_dict(state_dict, prefix + "params_dict") 49 | self.params_dict.requires_grad_(False) 50 | return 51 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/vision/model_getter.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | 4 | 5 | def get_resnet(name, weights=None, **kwargs): 6 | """ 7 | name: resnet18, resnet34, resnet50 8 | weights: "IMAGENET1K_V1", "r3m" 9 | """ 10 | # load r3m weights 11 | if (weights == "r3m") or (weights == "R3M"): 12 | return get_r3m(name=name, **kwargs) 13 | 14 | func = getattr(torchvision.models, name) 15 | resnet = func(weights=weights, **kwargs) 16 | resnet.fc = torch.nn.Identity() 17 | # resnet_new = torch.nn.Sequential( 18 | # resnet, 19 | # torch.nn.Linear(512, 128) 20 | # ) 21 | # return resnet_new 22 | return resnet 23 | 24 | 25 | def get_r3m(name, **kwargs): 26 | """ 27 | name: resnet18, resnet34, resnet50 28 | """ 29 | import r3m 30 | 31 | r3m.device = "cpu" 32 | model = r3m.load_r3m(name) 33 | r3m_model = model.module 34 | resnet_model = r3m_model.convnet 35 | resnet_model = resnet_model.to("cpu") 36 | return resnet_model 37 | 38 | def get_dinov2(model_name: str = "dinov2_vitb14", **kwargs) -> torch.nn.Module: 39 | """ 40 | 加载 DINOv2 视觉Transformer模型 41 | model_name: 可选 dinov2_vits14(小), dinov2_vitb14(基础), 42 | dinov2_vitl14(大), dinov2_vitg14(超大) 43 | weights: 固定为 None (DINOv2 预训练权重自动加载) 44 | """ 45 | # 从 PyTorch Hub 加载官方模型 46 | model = torch.hub.load( 47 | repo_or_dir='facebookresearch/dinov2', 48 | model=model_name, 49 | source='github', 50 | **kwargs 51 | ) 52 | 53 | # 移除分类头(保留特征提取层) 54 | if hasattr(model, 'head'): 55 | model.head = torch.nn.Identity() 56 | 57 | return model -------------------------------------------------------------------------------- /policy/RDT/model_config/_generate_model_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import argparse 4 | from datetime import datetime 5 | 6 | if __name__ == "__main__": 7 | parser = argparse.ArgumentParser(description='Generate finetune config.') 8 | parser.add_argument('model_name', type=str, help='The name of the task (e.g., block_hammer_beat)') 9 | args = parser.parse_args() 10 | model_name = args.model_name 11 | fintune_data_path = os.path.join("training_data/", f"{model_name}") 12 | checkpoint_path = os.path.join("checkpoints/", f"{model_name}") 13 | data = { 14 | 'model': model_name, 15 | 'data_path': fintune_data_path, 16 | 'checkpoint_path': checkpoint_path, 17 | 'pretrained_model_name_or_path': "../weights/RDT/rdt-1b", 18 | 'cuda_visible_device': '...', # args.gpu_use, 19 | 'train_batch_size': 32, 20 | 'sample_batch_size': 64, 21 | 'max_train_steps': 20000, 22 | 'checkpointing_period': 2500, 23 | 'sample_period': 100, 24 | 'checkpoints_total_limit': 40, 25 | 'learning_rate': 1e-4, 26 | 'dataloader_num_workers': 8, 27 | 'state_noise_snr': 40, 28 | 'gradient_accumulation_steps': 1 29 | } 30 | task_config_path = os.path.join("model_config/", f"{model_name}.yml") 31 | 32 | current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 33 | time_comment = f"# Generated on {current_time}\n" 34 | 35 | with open(task_config_path, 'w') as f: 36 | f.write(time_comment) 37 | yaml.dump(data, f, default_flow_style=False, sort_keys=False) 38 | 39 | if not os.path.exists(fintune_data_path): 40 | os.makedirs(fintune_data_path) -------------------------------------------------------------------------------- /policy/RDT/pretrain.sh: -------------------------------------------------------------------------------- 1 | export NCCL_IB_HCA=mlx5_0:1,mlx5_1:1,mlx5_2:1,mlx5_3:1,mlx5_4:1,mlx5_7:1,mlx5_8:1,mlx5_9:1 2 | export NCCL_IB_DISABLE=0 3 | export NCCL_SOCKET_IFNAME=bond0 4 | export NCCL_DEBUG=INFO 5 | export NCCL_NVLS_ENABLE=0 6 | 7 | export TEXT_ENCODER_NAME="google/t5-v1_1-xxl" 8 | export VISION_ENCODER_NAME="google/siglip-so400m-patch14-384" 9 | export OUTPUT_DIR="./checkpoints/rdt-pretrain-1b" 10 | export CFLAGS="-I/usr/include" 11 | export LDFLAGS="-L/usr/lib/x86_64-linux-gnu" 12 | export CUTLASS_PATH="/path/to/cutlass" 13 | 14 | export WANDB_PROJECT="robotics_diffusion_transformer" 15 | 16 | if [ ! -d "$OUTPUT_DIR" ]; then 17 | mkdir "$OUTPUT_DIR" 18 | echo "Folder '$OUTPUT_DIR' created" 19 | else 20 | echo "Folder '$OUTPUT_DIR' already exists" 21 | fi 22 | 23 | # For run in a single node/machine 24 | # accelerate launch main.py \ 25 | # --deepspeed="./configs/zero2.json" \ 26 | # ... 27 | 28 | deepspeed --hostfile=hostfile.txt main.py \ 29 | --deepspeed="./configs/zero2.json" \ 30 | --pretrained_text_encoder_name_or_path=$TEXT_ENCODER_NAME \ 31 | --pretrained_vision_encoder_name_or_path=$VISION_ENCODER_NAME \ 32 | --output_dir=$OUTPUT_DIR \ 33 | --train_batch_size=32 \ 34 | --sample_batch_size=64 \ 35 | --max_train_steps=1000000 \ 36 | --checkpointing_period=1000 \ 37 | --sample_period=500 \ 38 | --checkpoints_total_limit=40 \ 39 | --lr_scheduler="constant" \ 40 | --learning_rate=1e-4 \ 41 | --mixed_precision="bf16" \ 42 | --dataloader_num_workers=8 \ 43 | --dataset_type="pretrain" \ 44 | --report_to=wandb 45 | 46 | # Use this to resume training from some previous checkpoint 47 | # --resume_from_checkpoint="checkpoint-1000" \ 48 | -------------------------------------------------------------------------------- /policy/openpi/src/openpi/shared/normalize_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import openpi.shared.normalize as normalize 4 | 5 | 6 | def test_normalize_update(): 7 | arr = np.arange(12).reshape(4, 3) # 4 vectors of length 3 8 | 9 | stats = normalize.RunningStats() 10 | for i in range(len(arr)): 11 | stats.update(arr[i : i + 1]) # Update with one vector at a time 12 | results = stats.get_statistics() 13 | 14 | assert np.allclose(results.mean, np.mean(arr, axis=0)) 15 | assert np.allclose(results.std, np.std(arr, axis=0)) 16 | 17 | 18 | def test_serialize_deserialize(): 19 | stats = normalize.RunningStats() 20 | stats.update(np.arange(12).reshape(4, 3)) # 4 vectors of length 3 21 | 22 | norm_stats = {"test": stats.get_statistics()} 23 | norm_stats2 = normalize.deserialize_json(normalize.serialize_json(norm_stats)) 24 | assert np.allclose(norm_stats["test"].mean, norm_stats2["test"].mean) 25 | assert np.allclose(norm_stats["test"].std, norm_stats2["test"].std) 26 | 27 | 28 | def test_multiple_batch_dimensions(): 29 | # Test with multiple batch dimensions: (2, 3, 4) where 4 is vector dimension 30 | batch_shape = (2, 3, 4) 31 | arr = np.random.rand(*batch_shape) 32 | 33 | stats = normalize.RunningStats() 34 | stats.update(arr) # Should handle (2, 3, 4) -> reshape to (6, 4) 35 | results = stats.get_statistics() 36 | 37 | # Flatten batch dimensions and compute expected stats 38 | flattened = arr.reshape(-1, arr.shape[-1]) # (6, 4) 39 | expected_mean = np.mean(flattened, axis=0) 40 | expected_std = np.std(flattened, axis=0) 41 | 42 | assert np.allclose(results.mean, expected_mean) 43 | assert np.allclose(results.std, expected_std) 44 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/common/robomimic_config_util.py: -------------------------------------------------------------------------------- 1 | from omegaconf import OmegaConf 2 | from robomimic.config import config_factory 3 | import robomimic.scripts.generate_paper_configs as gpc 4 | from robomimic.scripts.generate_paper_configs import ( 5 | modify_config_for_default_image_exp, 6 | modify_config_for_default_low_dim_exp, 7 | modify_config_for_dataset, 8 | ) 9 | 10 | 11 | def get_robomimic_config(algo_name="bc_rnn", hdf5_type="low_dim", task_name="square", dataset_type="ph"): 12 | base_dataset_dir = "/tmp/null" 13 | filter_key = None 14 | 15 | # decide whether to use low-dim or image training defaults 16 | modifier_for_obs = modify_config_for_default_image_exp 17 | if hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"]: 18 | modifier_for_obs = modify_config_for_default_low_dim_exp 19 | 20 | algo_config_name = "bc" if algo_name == "bc_rnn" else algo_name 21 | config = config_factory(algo_name=algo_config_name) 22 | # turn into default config for observation modalities (e.g.: low-dim or rgb) 23 | config = modifier_for_obs(config) 24 | # add in config based on the dataset 25 | config = modify_config_for_dataset( 26 | config=config, 27 | task_name=task_name, 28 | dataset_type=dataset_type, 29 | hdf5_type=hdf5_type, 30 | base_dataset_dir=base_dataset_dir, 31 | filter_key=filter_key, 32 | ) 33 | # add in algo hypers based on dataset 34 | algo_config_modifier = getattr(gpc, f"modify_{algo_name}_config_for_dataset") 35 | config = algo_config_modifier( 36 | config=config, 37 | task_name=task_name, 38 | dataset_type=dataset_type, 39 | hdf5_type=hdf5_type, 40 | ) 41 | return config 42 | -------------------------------------------------------------------------------- /policy/openpi/src/openpi/shared/download_test.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | import pytest 4 | 5 | import openpi.shared.download as download 6 | 7 | 8 | @pytest.fixture(scope="session", autouse=True) 9 | def set_openpi_data_home(tmp_path_factory): 10 | temp_dir = tmp_path_factory.mktemp("openpi_data") 11 | with pytest.MonkeyPatch().context() as mp: 12 | mp.setenv("OPENPI_DATA_HOME", str(temp_dir)) 13 | yield 14 | 15 | 16 | def test_download_local(tmp_path: pathlib.Path): 17 | local_path = tmp_path / "local" 18 | local_path.touch() 19 | 20 | result = download.maybe_download(str(local_path)) 21 | assert result == local_path 22 | 23 | with pytest.raises(FileNotFoundError): 24 | download.maybe_download("bogus") 25 | 26 | 27 | def test_download_gs_dir(): 28 | remote_path = "gs://openpi-assets/testdata/random" 29 | 30 | local_path = download.maybe_download(remote_path) 31 | assert local_path.exists() 32 | 33 | new_local_path = download.maybe_download(remote_path) 34 | assert new_local_path == local_path 35 | 36 | 37 | def test_download_gs(): 38 | remote_path = "gs://openpi-assets/testdata/random/random_512kb.bin" 39 | 40 | local_path = download.maybe_download(remote_path) 41 | assert local_path.exists() 42 | 43 | new_local_path = download.maybe_download(remote_path) 44 | assert new_local_path == local_path 45 | 46 | 47 | def test_download_fsspec(): 48 | remote_path = "gs://big_vision/paligemma_tokenizer.model" 49 | 50 | local_path = download.maybe_download(remote_path, gs={"token": "anon"}) 51 | assert local_path.exists() 52 | 53 | new_local_path = download.maybe_download(remote_path, gs={"token": "anon"}) 54 | assert new_local_path == local_path 55 | -------------------------------------------------------------------------------- /example/collect/collect.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | import select 5 | 6 | from my_robot.test_robot import TestRobot 7 | 8 | import time 9 | 10 | from utils.data_handler import is_enter_pressed,debug_print 11 | 12 | 13 | if __name__ == "__main__": 14 | import os 15 | os.environ["INFO_LEVEL"] = "DEBUG" # DEBUG , INFO, ERROR 16 | 17 | robot = TestRobot() 18 | robot.set_up() 19 | 20 | start_episode = 0 21 | num_episode = 5 22 | 23 | for episode_id in range(start_episode, start_episode + num_episode): 24 | robot.reset() 25 | debug_print("main", "Press Enter to start...", "INFO") 26 | while not robot.is_start() or not is_enter_pressed(): 27 | time.sleep(1/robot.condition["save_freq"]) 28 | 29 | debug_print("main", "Press Enter to finish...", "INFO") 30 | 31 | avg_collect_time = 0.0 32 | collect_num = 0 33 | while True: 34 | last_time = time.monotonic() 35 | 36 | data = robot.get() 37 | robot.collect(data) 38 | 39 | if is_enter_pressed(): 40 | robot.finish(episode_id) 41 | break 42 | 43 | collect_num += 1 44 | while True: 45 | now = time.monotonic() 46 | if now -last_time > 1/robot.condition["save_freq"]: 47 | avg_collect_time += now -last_time 48 | break 49 | else: 50 | time.sleep(0.001) 51 | extra_info = {} 52 | avg_collect_time = avg_collect_time / collect_num 53 | extra_info["avg_time_interval"] = avg_collect_time 54 | robot.collection.add_extra_condition_info(extra_info) 55 | -------------------------------------------------------------------------------- /policy/DP/dp_model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import hydra 4 | import dill 5 | import sys, os 6 | 7 | current_file_path = os.path.abspath(__file__) 8 | parent_dir = os.path.dirname(current_file_path) 9 | sys.path.append(parent_dir) 10 | 11 | from diffusion_policy.workspace.robotworkspace import RobotWorkspace 12 | from diffusion_policy.env_runner.dp_runner import DPRunner 13 | 14 | class DP: 15 | 16 | def __init__(self, ckpt_file: str, n_obs_steps, n_action_steps): 17 | self.policy = self.get_policy(ckpt_file, None, "cuda:0") 18 | self.runner = DPRunner(n_obs_steps=n_obs_steps, n_action_steps=n_action_steps) 19 | 20 | def update_obs(self, observation): 21 | self.runner.update_obs(observation) 22 | 23 | def reset_obs(self): 24 | self.runner.reset_obs() 25 | 26 | def get_action(self, observation=None): 27 | action = self.runner.get_action(self.policy, observation) 28 | return action 29 | 30 | def get_last_obs(self): 31 | return self.runner.obs[-1] 32 | 33 | def get_policy(self, checkpoint, output_dir, device): 34 | # load checkpoint 35 | payload = torch.load(open(checkpoint, "rb"), pickle_module=dill) 36 | cfg = payload["cfg"] 37 | cls = hydra.utils.get_class(cfg._target_) 38 | workspace = cls(cfg, output_dir=output_dir) 39 | workspace: RobotWorkspace 40 | workspace.load_payload(payload, exclude_keys=None, include_keys=None) 41 | 42 | # get policy from workspace 43 | policy = workspace.model 44 | if cfg.training.use_ema: 45 | policy = workspace.ema_model 46 | 47 | device = torch.device(device) 48 | policy.to(device) 49 | policy.eval() 50 | 51 | return policy -------------------------------------------------------------------------------- /controller/dexhand_controller.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from controller.controller import Controller 5 | from typing import Dict, Any 6 | 7 | class DexHandController(Controller): 8 | def __init__(self): 9 | super().__init__() 10 | self.controller_type = "robotic_hand" 11 | self.is_set_up = False 12 | self.controller = None 13 | 14 | def get_information(self): 15 | hand_info = {} 16 | if "joint" in self.collect_info: 17 | hand_info["joint"] = self.get_joint() 18 | if "action" in self.collect_info: 19 | hand_info["action"] = self.get_action() 20 | if "velocity" in self.collect_info: 21 | hand_info["velocity"] = self.get_velocity() 22 | if "force" in self.collect_info: 23 | hand_info["force"] = self.get_force() 24 | return hand_info 25 | 26 | def move(self, move_data:Dict[str, Any],is_delta=False): 27 | if is_delta: 28 | now_state = self.get_state() 29 | for key, value in move_data.items(): 30 | if key == "joint": 31 | self.set_joint(now_state["joint"] + value) 32 | if key == "action": 33 | self.set_action(now_state["action"] + value) 34 | else: 35 | for key, value in move_data.items(): 36 | if key == "joint": 37 | self.set_joint(value) 38 | if key == "action": 39 | self.set_action(value) 40 | 41 | def __repr__(self): 42 | if self.controller is not None: 43 | return f"{self.name}: \n \ 44 | controller: {self.controller}" 45 | else: 46 | return super().__repr__() 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/include/timer.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __TIMER_H__ 3 | #define __TIMER_H__ 4 | #include "GlobalOptions.h" 5 | 6 | /*! qp timers and their defintions */ 7 | 8 | 9 | /*! For Windows */ 10 | #if (defined _WIN32 || defined _WIN64 || defined _WINDLL ) 11 | 12 | /* Use Windows QueryPerformanceCounter for timing */ 13 | #include 14 | /*! \struct */ 15 | /*! Timer structure to store time information */ 16 | typedef struct qp_timer{ 17 | LARGE_INTEGER tic; /*!< tic time */ 18 | LARGE_INTEGER toc; /*!< tic time */ 19 | LARGE_INTEGER freq; /*!< cpu frequency */ 20 | } qp_timer; 21 | 22 | /*! For macOS */ 23 | #elif (defined __APPLE__) 24 | /*! \struct */ 25 | /*! Timer structure to store time information */ 26 | #include 27 | typedef struct qp_timer{ 28 | uint64_t tic; /*!< tic time */ 29 | uint64_t toc; /*!< toc time */ 30 | mach_timebase_info_data_t tinfo; /*!< time base info */ 31 | } qp_timer; 32 | 33 | 34 | 35 | #else 36 | 37 | /*! For POSIX machines */ 38 | /*! \struct */ 39 | /*! Timer structure to store time information */ 40 | #include 41 | #include 42 | 43 | typedef struct qp_timer{ 44 | struct timespec tic; /*!< tic time */ 45 | struct timespec toc; /*!< toc time */ 46 | } qp_timer; 47 | 48 | #endif 49 | 50 | /*! 51 | * @brief timer tic functions, similar to matlab tic, starts recording time from the instant the function is invoked 52 | * 53 | * 54 | * @param[in] t qp_timer structure 55 | * 56 | */ 57 | void tic(qp_timer* t); 58 | 59 | /*! 60 | * @brief timer toc functions, similar to matlab toc, returns the recorded time 61 | * 62 | * 63 | * @param[in] t qp_timer structure 64 | * @param[out] diff recorded time 65 | * 66 | */ 67 | qp_real toc(qp_timer* t); 68 | #endif 69 | /* END IFDEF __TIMER_H__ */ 70 | 71 | /*! @file */ -------------------------------------------------------------------------------- /policy/openpi/src/openpi/models/pi0_test.py: -------------------------------------------------------------------------------- 1 | import flax.nnx as nnx 2 | import jax 3 | 4 | import openpi.models.pi0_config as _pi0_config 5 | 6 | 7 | def _get_frozen_state(config: _pi0_config.Pi0Config) -> nnx.State: 8 | abstract_model = nnx.eval_shape(config.create, jax.random.key(0)) 9 | 10 | freeze_filter = config.get_freeze_filter() 11 | return nnx.state(abstract_model, nnx.All(nnx.Param, freeze_filter)).flat_state() 12 | 13 | 14 | def test_pi0_full_finetune(): 15 | config = _pi0_config.Pi0Config() 16 | state = _get_frozen_state(config) 17 | assert len(state) == 0 18 | 19 | 20 | def test_pi0_gemma_lora(): 21 | config = _pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora") 22 | state = _get_frozen_state(config) 23 | assert len(state) == 9 24 | assert all("lora" not in p for p in state) 25 | assert all("llm" in p for p in state) 26 | assert all("_1" not in p for p in state) 27 | 28 | 29 | def test_pi0_action_expert_lora(): 30 | config = _pi0_config.Pi0Config(action_expert_variant="gemma_300m_lora") 31 | state = _get_frozen_state(config) 32 | # excluding embedder, rest of the params should be same as gemma_lora. 33 | assert len(state) == 8 34 | assert all("lora" not in p for p in state) 35 | assert all("llm" in p for p in state) 36 | # all frozen params should have _1 in their path since it's the action expert. 37 | assert all(any("_1" in p for p in path) for path in state) 38 | 39 | 40 | def test_pi0_all_lora(): 41 | config = _pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora", action_expert_variant="gemma_300m_lora") 42 | state = _get_frozen_state(config) 43 | # sum of gemma_lora and action_expert_lora's frozen params. 44 | assert len(state) == 17 45 | assert all("lora" not in p for p in state) 46 | assert all("llm" in p for p in state) 47 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/simulink/README.txt: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------- 2 | Compilation -> Type Swift_make('qpSWIFT_sfunc_e') in your matlab command window 3 | to compile qpSWIFT to handle quadratic programs with inequality 4 | and equality constraints. The number of output variables of 5 | s-function is set to 3. To change this, modify the pre-processor 6 | defintion NV (line 13) to required variables in the s-function qpSWIFT_sfunc_e.c 7 | -> Type Swift_make('qpSWIFT_sfunc') in your matlab command window 8 | to compile qpSWIFT to handle quadratic programs with only 9 | inequality constraints. To change this modify the pre-processor 10 | defintion NV (line 13) to required variables in the s-function qpSWIFT_sfunc.c 11 | -> Add the corresponding mex file to your working directory to use qpSWIFT 12 | ------------------------------------------------------------------------------- 13 | Usage -> Instructions on using the qpSWIFT_sfunc.c s-function can be found in the 14 | inputData function of demoqp.slx 15 | -> Instructions on using the qpSWIFT_sfunc_e.c s-function can be found in the 16 | inputData_e function of demoqp_e.slx 17 | ------------------------------------------------------------------------------- 18 | Demo -> Demo QP is given in demoqp.slx and demoqp_e.slx 19 | ------------------------------------------------------------------------------- 20 | 21 | 22 | ------------------------------------------------------------------------------- 23 | Note: Make sure you have compatible C compiler available for your matlab version 24 | ------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /utils/ros_subscriber.py: -------------------------------------------------------------------------------- 1 | import rospy 2 | import threading 3 | from typing import Callable, Optional 4 | 5 | class ROSSubscriber: 6 | def __init__(self, topic_name, msg_type,call: Optional[Callable] = None): 7 | """ 8 | Initialize ROS subscriber 9 | :param topic_name: Name of the topic to subscribe to 10 | :param msg_type: Type of the message 11 | """ 12 | self.topic_name = topic_name 13 | self.msg_type = msg_type 14 | self.latest_msg = None 15 | self.lock = threading.Lock() 16 | self.user_call = call 17 | 18 | self.subscriber = rospy.Subscriber(self.topic_name, self.msg_type, self.callback) 19 | 20 | def callback(self, msg): 21 | """ 22 | Subscriber callback function to receive messages and update the latest data. 23 | :param msg: The received message 24 | """ 25 | with self.lock: 26 | self.latest_msg = msg 27 | if self.user_call: 28 | self.user_call(self.latest_msg) 29 | 30 | def get_latest_data(self): 31 | with self.lock: 32 | return self.latest_msg 33 | 34 | 35 | if __name__=="__main__": 36 | import time 37 | ''' 38 | 示例: 39 | from tracer_msgs.msg import TracerRsStatus 40 | 41 | ros_test = ROSSubscriber('/tracer_rs_status', TracerRsStatus) 42 | # 初始化 ROS 节点 43 | rospy.init_node('ros_subscriber_node', anonymous=True) 44 | for i in range(100): 45 | print(ros_test.get_latest_data()) 46 | time.sleep(0.1) 47 | 48 | 示例: 49 | from geometry_msgs.msg import PoseStamped 50 | ros_test = ROSSubscriber('/pika_pose_l', PoseStamped) 51 | rospy.init_node('ros_subscriber_node', anonymous=True) 52 | 53 | for i in range(100): 54 | print(ros_test.get_latest_data()) 55 | time.sleep(0.1) 56 | ''' 57 | -------------------------------------------------------------------------------- /utils/task_functions.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from utils.task import YmlTask, Tasks 5 | 6 | import numpy as np 7 | import time 8 | def success(task, threshold): 9 | if np.random.random() > threshold: 10 | return False 11 | return True 12 | def move_mobile_to(task, target): 13 | move_data = { 14 | "mobile":{ 15 | "test_mobile": { 16 | "move_to": target, 17 | } 18 | } 19 | } 20 | task.robot.move(move_data) 21 | time.sleep(0.1) 22 | 23 | def infer_once(task): 24 | def input_transform(data): 25 | state = np.concatenate([ 26 | np.array(data[0]["left_arm"]["joint"]).reshape(-1), 27 | np.array(data[0]["left_arm"]["gripper"]).reshape(-1), 28 | np.array(data[0]["right_arm"]["joint"]).reshape(-1), 29 | np.array(data[0]["right_arm"]["gripper"]).reshape(-1) 30 | ]) 31 | 32 | img_arr = data[1]["cam_head"]["color"], data[1]["cam_right_wrist"]["color"], data[1]["cam_left_wrist"]["color"] 33 | return img_arr, state 34 | 35 | def output_transform(data): 36 | move_data = { 37 | "arm":{ 38 | "left_arm":{ 39 | "joint":data[:6], 40 | "gripper":data[6] 41 | }, 42 | "right_arm":{ 43 | "joint":data[7:13], 44 | "gripper":data[13] 45 | } 46 | } 47 | } 48 | return move_data 49 | 50 | img_arr, state = input_transform(task.robot.get()) 51 | task.extras["model"].update_observation_window(img_arr, state) 52 | actions = task.extras["model"].get_action() 53 | for action in actions: 54 | move_data = output_transform(action) 55 | 56 | task.robot.move(move_data) 57 | time.sleep(0.1) 58 | 59 | 60 | -------------------------------------------------------------------------------- /policy/RDT/configs/pretrain_datasets.json: -------------------------------------------------------------------------------- 1 | [ 2 | "fractal20220817_data", 3 | "jaco_play", 4 | "taco_play", 5 | "berkeley_cable_routing", 6 | "viola", 7 | "berkeley_autolab_ur5", 8 | "toto", 9 | "nyu_door_opening_surprising_effectiveness", 10 | "columbia_cairlab_pusht_real", 11 | "stanford_kuka_multimodal_dataset_converted_externally_to_rlds", 12 | "austin_buds_dataset_converted_externally_to_rlds", 13 | "kuka", 14 | "utokyo_xarm_bimanual_converted_externally_to_rlds", 15 | "stanford_hydra_dataset_converted_externally_to_rlds", 16 | "maniskill_dataset_converted_externally_to_rlds", 17 | "ucsd_kitchen_dataset_converted_externally_to_rlds", 18 | "ucsd_pick_and_place_dataset_converted_externally_to_rlds", 19 | "austin_sailor_dataset_converted_externally_to_rlds", 20 | "austin_sirius_dataset_converted_externally_to_rlds", 21 | "bc_z", 22 | "utokyo_pr2_opening_fridge_converted_externally_to_rlds", 23 | "utokyo_pr2_tabletop_manipulation_converted_externally_to_rlds", 24 | "utokyo_xarm_pick_and_place_converted_externally_to_rlds", 25 | "berkeley_mvp_converted_externally_to_rlds", 26 | "berkeley_rpt_converted_externally_to_rlds", 27 | "kaist_nonprehensile_converted_externally_to_rlds", 28 | "tokyo_u_lsmo_converted_externally_to_rlds", 29 | "dlr_sara_grid_clamp_converted_externally_to_rlds", 30 | "stanford_robocook_converted_externally_to_rlds", 31 | "imperialcollege_sawyer_wrist_cam", 32 | "iamlab_cmu_pickup_insert_converted_externally_to_rlds", 33 | "utaustin_mutex", 34 | "berkeley_fanuc_manipulation", 35 | "cmu_play_fusion", 36 | "language_table", 37 | "furniture_bench_dataset_converted_externally_to_rlds", 38 | "droid", 39 | "fmb", 40 | "dobbe", 41 | "qut_dexterous_manpulation", 42 | "aloha_mobile", 43 | "aloha_static", 44 | "roboset", 45 | "rh20t", 46 | "calvin", 47 | "bridgev2" 48 | ] -------------------------------------------------------------------------------- /policy/ACT/README.md: -------------------------------------------------------------------------------- 1 | [![中文](https://img.shields.io/badge/中文-简体-blue)](./README_CN.md) 2 | [![English](https://img.shields.io/badge/English-English-green)](./README_EN.md) 3 | 4 | # ACT (Action Chunking Transformer) Deployment Guide 5 | 6 | ## Quick Start 7 | 8 | ### 1. Environment Setup 9 | 10 | Install the dependencies required for ACT deployment: 11 | 12 | ```bash 13 | cd policy/ACT/ 14 | pip install -r requirements.txt 15 | ``` 16 | 17 | ### 2. Data Preparation and Training 18 | 19 | #### Data Conversion 20 | 21 | Convert the collected data to HDF5 format required by ACT model: 22 | 23 | ```bash 24 | python scripts/convert2act_hdf5.py 25 | ``` 26 | 27 | **Examples:** 28 | ```bash 29 | # Convert data from save/test/ directory to ACT format 30 | python scripts/convert2act_hdf5.py ./save/test/ ~/RoboTwin/policy/ACT/data/ 31 | 32 | # Convert pick_place_cup task data 33 | python scripts/convert2act_hdf5.py save/pick_place_cup /path/to/output 34 | ``` 35 | 36 | ### 3. Real Robot Deployment 37 | 38 | 1. Copy the trained checkpoint to the following directory: 39 | ``` 40 | control_your_robot/policy/ACT/actckpt/ 41 | ``` 42 | 43 | 2. Modify the deployment script `example/deploy/piper_single_on_ACT.py`: 44 | 45 | ```python 46 | # Modify the model path around line 120 47 | model = MYACT("/path/your/policy/ACT/act_ckpt/act-pick_place_cup/100", "act-pick_place_cup") 48 | ``` 49 | 50 | **Parameter Description:** 51 | - First parameter: Policy model folder path 52 | - Second parameter: Corresponding task name 53 | 54 | #### 3.3 Execute Deployment 55 | 56 | Run the deployment script to start real robot execution: 57 | 58 | ```bash 59 | python example/deploy/piper_single_on_ACT.py 60 | ``` 61 | 62 | ## Notes 63 | 64 | - Ensure the robotic arm is properly enabled and connected 65 | - Check if the model path is correct 66 | - It is recommended to verify the model performance in a test environment before deployment 67 | 68 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/bet/libraries/mingpt/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | from torch.nn import functional as F 5 | 6 | 7 | def set_seed(seed): 8 | random.seed(seed) 9 | np.random.seed(seed) 10 | torch.manual_seed(seed) 11 | torch.cuda.manual_seed_all(seed) 12 | 13 | 14 | def top_k_logits(logits, k): 15 | v, ix = torch.topk(logits, k) 16 | out = logits.clone() 17 | out[out < v[:, [-1]]] = -float("Inf") 18 | return out 19 | 20 | 21 | @torch.no_grad() 22 | def sample(model, x, steps, temperature=1.0, sample=False, top_k=None): 23 | """ 24 | take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in 25 | the sequence, feeding the predictions back into the model each time. Clearly the sampling 26 | has quadratic complexity unlike an RNN that is only linear, and has a finite context window 27 | of block_size, unlike an RNN that has an infinite context window. 28 | """ 29 | block_size = model.get_block_size() 30 | model.eval() 31 | for k in range(steps): 32 | x_cond = (x if x.size(1) <= block_size else x[:, -block_size:]) # crop context if needed 33 | logits, _ = model(x_cond) 34 | # pluck the logits at the final step and scale by temperature 35 | logits = logits[:, -1, :] / temperature 36 | # optionally crop probabilities to only the top k options 37 | if top_k is not None: 38 | logits = top_k_logits(logits, top_k) 39 | # apply softmax to convert to probabilities 40 | probs = F.softmax(logits, dim=-1) 41 | # sample from the distribution or take the most likely 42 | if sample: 43 | ix = torch.multinomial(probs, num_samples=1) 44 | else: 45 | _, ix = torch.topk(probs, k=1, dim=-1) 46 | # append to the sequence and continue 47 | x = torch.cat((x, ix), dim=1) 48 | 49 | return x 50 | -------------------------------------------------------------------------------- /policy/RDT/scripts/encode_lang.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | import yaml 5 | 6 | from models.multimodal_encoder.t5_encoder import T5Embedder 7 | 8 | 9 | GPU = 0 10 | MODEL_PATH = "google/t5-v1_1-xxl" 11 | CONFIG_PATH = "configs/base.yaml" 12 | SAVE_DIR = "outs/" 13 | 14 | # Modify this to your task name and instruction 15 | TASK_NAME = "handover_pan" 16 | INSTRUCTION = "Pick up the black marker on the right and put it into the packaging box on the left." 17 | 18 | # Note: if your GPU VRAM is less than 24GB, 19 | # it is recommended to enable offloading by specifying an offload directory. 20 | OFFLOAD_DIR = None # Specify your offload directory here, ensuring the directory exists. 21 | 22 | def main(): 23 | with open(CONFIG_PATH, "r") as fp: 24 | config = yaml.safe_load(fp) 25 | 26 | device = torch.device(f"cuda:{GPU}") 27 | text_embedder = T5Embedder( 28 | from_pretrained=MODEL_PATH, 29 | model_max_length=config["dataset"]["tokenizer_max_length"], 30 | device=device, 31 | use_offload_folder=OFFLOAD_DIR 32 | ) 33 | tokenizer, text_encoder = text_embedder.tokenizer, text_embedder.model 34 | 35 | tokens = tokenizer( 36 | INSTRUCTION, return_tensors="pt", 37 | padding="longest", 38 | truncation=True 39 | )["input_ids"].to(device) 40 | 41 | tokens = tokens.view(1, -1) 42 | with torch.no_grad(): 43 | pred = text_encoder(tokens).last_hidden_state.detach().cpu() 44 | 45 | save_path = os.path.join(SAVE_DIR, f"{TASK_NAME}.pt") 46 | # We save the embeddings in a dictionary format 47 | torch.save({ 48 | "name": TASK_NAME, 49 | "instruction": INSTRUCTION, 50 | "embeddings": pred 51 | }, save_path 52 | ) 53 | 54 | print(f'\"{INSTRUCTION}\" from \"{TASK_NAME}\" is encoded by \"{MODEL_PATH}\" into shape {pred.shape} and saved to \"{save_path}\"') 55 | 56 | 57 | if __name__ == "__main__": 58 | main() 59 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/common/checkpoint_util.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Dict 2 | import os 3 | 4 | 5 | class TopKCheckpointManager: 6 | 7 | def __init__( 8 | self, 9 | save_dir, 10 | monitor_key: str, 11 | mode="min", 12 | k=1, 13 | format_str="epoch={epoch:03d}-train_loss={train_loss:.3f}.ckpt", 14 | ): 15 | assert mode in ["max", "min"] 16 | assert k >= 0 17 | 18 | self.save_dir = save_dir 19 | self.monitor_key = monitor_key 20 | self.mode = mode 21 | self.k = k 22 | self.format_str = format_str 23 | self.path_value_map = dict() 24 | 25 | def get_ckpt_path(self, data: Dict[str, float]) -> Optional[str]: 26 | if self.k == 0: 27 | return None 28 | 29 | value = data[self.monitor_key] 30 | ckpt_path = os.path.join(self.save_dir, self.format_str.format(**data)) 31 | 32 | if len(self.path_value_map) < self.k: 33 | # under-capacity 34 | self.path_value_map[ckpt_path] = value 35 | return ckpt_path 36 | 37 | # at capacity 38 | sorted_map = sorted(self.path_value_map.items(), key=lambda x: x[1]) 39 | min_path, min_value = sorted_map[0] 40 | max_path, max_value = sorted_map[-1] 41 | 42 | delete_path = None 43 | if self.mode == "max": 44 | if value > min_value: 45 | delete_path = min_path 46 | else: 47 | if value < max_value: 48 | delete_path = max_path 49 | 50 | if delete_path is None: 51 | return None 52 | else: 53 | del self.path_value_map[delete_path] 54 | self.path_value_map[ckpt_path] = value 55 | 56 | if not os.path.exists(self.save_dir): 57 | os.mkdir(self.save_dir) 58 | 59 | if os.path.exists(delete_path): 60 | os.remove(delete_path) 61 | return ckpt_path 62 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/matlab/prototype code/qpSWIFT_checkinputs.m: -------------------------------------------------------------------------------- 1 | function [n,m,p,P,c,G,h,A,b] = qpSWIFT_checkinputs(P,c,G,h,A,b) 2 | 3 | if ~issymmetric(P) 4 | disp('Cost Function Quadratic term is not symmetric'); 5 | disp('Working on symmetric part of Cost function;P = (P + PT)/2'); 6 | P = (P + P')./2; 7 | end 8 | 9 | [p1,p2] = size(P); 10 | [g1,g2] = size(G); 11 | [a1,a2] = size(A); 12 | 13 | [c1,c2] = size(c); 14 | [b1,b2] = size(b); 15 | [h1,h2] = size(h); 16 | 17 | if (p1 ~= p2) 18 | disp('P is not a square matrix'); 19 | end 20 | 21 | n = p1; 22 | m = g1; 23 | p = a1; 24 | 25 | if (g2 ~= n) 26 | disp('Number of Columns of G are not consistent'); 27 | end 28 | 29 | if (~isempty(A)) 30 | if (a2 ~= n) 31 | disp('Number of Columns of A are not consistent'); 32 | end 33 | end 34 | 35 | if (p1 ~= length(c)) 36 | disp('Dimensions of P and c are not consistent'); 37 | end 38 | 39 | if (g1 ~= length(h)) 40 | disp('Dimensions of G and h are not consistent'); 41 | end 42 | 43 | if (~isempty(A) || ~isempty(A)) 44 | if (a1 ~= length(b)) 45 | disp('Dimensions of A and b are not consistent'); 46 | end 47 | end 48 | 49 | 50 | if (c2 ~= 1) 51 | disp('c is not a column vector'); 52 | disp('Making it a column vector'); 53 | c = c'; 54 | end 55 | 56 | if (h2 ~= 1) 57 | disp('h is not a column vector'); 58 | disp('Making it a column vector'); 59 | h = h'; 60 | end 61 | 62 | if (~isempty(b)) 63 | if (b2 ~= 1) 64 | disp('b is not a column vector'); 65 | disp('Making it a column vector'); 66 | b = b'; 67 | end 68 | end 69 | 70 | 71 | end -------------------------------------------------------------------------------- /controller/TestMobile_controller.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from controller.mobile_controller import MobileController 5 | 6 | import numpy as np 7 | import time 8 | 9 | from utils.data_handler import debug_print 10 | 11 | class TestMobileController(MobileController): 12 | def __init__(self, name, INFO="DEBUG"): 13 | super().__init__() 14 | self.name = name 15 | self.INFO = INFO 16 | 17 | def set_up(self): 18 | self.position = np.random.rand(6) 19 | self.position[2] = 0. 20 | self.position[3] = 0. 21 | self.position[4] = 0. 22 | 23 | self.velocity = np.random.rand(6) 24 | self.velocity[2] = 0. 25 | self.velocity[3] = 0. 26 | self.velocity[4] = 0. 27 | debug_print(self.name, f"setup success",self.INFO) 28 | 29 | def set_move_velocity(self, velocity): 30 | self.velocity = velocity 31 | self.position += self.velocity * 0.1 32 | 33 | def set_move_to(self, position): 34 | self.position = position 35 | 36 | def get_position(self): 37 | return self.position 38 | 39 | def get_move_velocity(self): 40 | return self.velocity 41 | 42 | def __del__(self): 43 | try: 44 | if hasattr(self, 'controller'): 45 | # Add any necessary cleanup for the arm controller 46 | pass 47 | except: 48 | pass 49 | 50 | if __name__ == "__main__": 51 | import os 52 | os.environ["INFO_LEVEL"] = "DEBUG" # DEBUG , INFO, ERROR 53 | 54 | controller = TestMobileController("test_mobile") 55 | controller.set_up() 56 | controller.set_collect_info(["move_velocity", "position"]) 57 | 58 | for i in range(10): 59 | time.sleep(0.1) 60 | controller.move({"move_velocity": [0.01, 0.01, 0., 0., 0., 0.]}) 61 | 62 | print(controller.get()) 63 | 64 | controller.move({"move_to": [5., 5., 0., 0., 0., 1.]}) 65 | print(controller.get()) -------------------------------------------------------------------------------- /sensor/TestVision_sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | import numpy as np 5 | import time 6 | from sensor.vision_sensor import VisionSensor 7 | 8 | from utils.data_handler import debug_print 9 | 10 | class TestVisonSensor(VisionSensor): 11 | def __init__(self, name,INFO="DEBUG"): 12 | super().__init__() 13 | self.name = name 14 | self.INFO = INFO 15 | 16 | def set_up(self, input=None, is_depth = False, encode_rgb=True): 17 | debug_print(self.name, f"setup success, is_depth={is_depth}",self.INFO) 18 | self.is_depth = is_depth 19 | self.encode_rgb = encode_rgb 20 | 21 | def get_image(self): 22 | image = {} 23 | height = 480 24 | width = 640 25 | if "color" in self.collect_info: 26 | image["color"] = np.random.randint(0, 256, size=(height, width, 3), dtype=np.uint8) 27 | 28 | if "depth" in self.collect_info: 29 | if not self.is_depth: 30 | debug_print(self.name,f"should use set_up(is_depth=True) to enable collecting depth image","ERROR") 31 | raise ValueError 32 | image["depth"] = np.random.randint(0, 256, size=(height, width, 3), dtype=np.uint8) 33 | debug_print(self.name,f"get image success",self.INFO) 34 | return image 35 | 36 | def cleanup(self): 37 | try: 38 | if hasattr(self, 'pipeline'): 39 | self.pipeline.stop() 40 | except Exception as e: 41 | print(f"Error during cleanup: {str(e)}") 42 | 43 | def __del__(self): 44 | self.cleanup() 45 | 46 | if __name__ == "__main__": 47 | import os 48 | os.environ["INFO_LEVEL"] = "DEBUG" 49 | 50 | cam = TestVisonSensor("test", INFO="DEBUG") 51 | cam.set_up() 52 | cam.set_collect_info(["color"]) 53 | cam_list = [] 54 | for i in range(10): 55 | debug_print("TestVison_sensor",f"step:{i}", "INFO") 56 | data = cam.get() 57 | time.sleep(0.1) 58 | -------------------------------------------------------------------------------- /controller/controller.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | from typing import List 5 | import numpy as np 6 | import time 7 | 8 | from utils.data_handler import debug_print 9 | 10 | class Controller: 11 | def __init__(self, timestamp=True): 12 | self.name = "controller" 13 | self.controller_type = "base_controller" 14 | # self.is_set_up = False 15 | self.timestamp = timestamp 16 | 17 | def set_collect_info(self, collect_info:List[str]): 18 | self.collect_info = collect_info 19 | if self.timestamp: 20 | self.collect_info.append("timestamp") 21 | 22 | # get controller infomation 23 | def get(self): 24 | if self.collect_info is None: 25 | raise ValueError(f"{self.name}: collect_info is not set") 26 | info = self.get_information() 27 | 28 | if self.timestamp: 29 | info["timestamp"] = time.time_ns() 30 | 31 | for collect_info in self.collect_info: 32 | if info[collect_info] is None: 33 | debug_print(f"{self.name}", f"{collect_info} information is None", "ERROR") 34 | 35 | debug_print(f"{self.name}", f"get data:\n{info} ", "DEBUG") 36 | return {collect_info: info[collect_info] for collect_info in self.collect_info} 37 | 38 | def move(self, move_data, is_delta=False): 39 | debug_print(f"{self.name}", f"get move data:\n{move_data} ", "DEBUG") 40 | try: 41 | self.move_controller(move_data, is_delta) 42 | except Exception as e: 43 | debug_print(self.name, f"move error: {e}", "WARNING") 44 | 45 | # init controller 46 | def set_up(self): 47 | raise NotImplementedError("This method should be implemented by the subclass") 48 | 49 | # print controller 50 | def __repr__(self): 51 | return f"Base Controller, can't be used directly \n \ 52 | name: {self.name} \n \ 53 | controller_type: {self.controller_type}" 54 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/demo/runqp.c: -------------------------------------------------------------------------------- 1 | #include "qpSWIFT.h" 2 | #include "Matrices.h" 3 | 4 | int main(int argc, char *argv[]) 5 | { 6 | 7 | QP *myQP; 8 | myQP = QP_SETUP(n, m, p, Pjc, Pir, Ppr, Ajc, Air, Apr, Gjc, Gir, Gpr, c, h, b, sigma_d, Permut); 9 | /*For only inequality constrained QP set the pointers of A matrix and b vectro to zero and p = 0 and appropraiatley sized Permut matrix*/ 10 | /*myQP = QP_SETUP(n, m, 0 , Pjc, Pir, Ppr, NULL, NULL, NULL, Gjc, Gir, Gpr, c, h, NULL, sigma_d, NULL); */ 11 | 12 | qp_int ExitCode = QP_SOLVE(myQP); 13 | 14 | if (myQP != NULL) 15 | printf("Setup Time : %f ms\n", myQP->stats->tsetup * 1000.0); 16 | if (ExitCode == QP_OPTIMAL) 17 | { 18 | printf("Solve Time : %f ms\n", (myQP->stats->tsolve + myQP->stats->tsetup) * 1000.0); 19 | printf("KKT_Solve Time : %f ms\n", myQP->stats->kkt_time * 1000.0); 20 | printf("LDL Time : %f ms\n", myQP->stats->ldl_numeric * 1000.0); 21 | printf("Diff : %f ms\n", (myQP->stats->kkt_time - myQP->stats->ldl_numeric) * 1000.0); 22 | printf("Iterations : %ld\n", myQP->stats->IterationCount); 23 | printf("Optimal Solution Found\n"); 24 | } 25 | if (ExitCode == QP_MAXIT) 26 | { 27 | printf("Solve Time : %f ms\n", myQP->stats->tsolve * 1000.0); 28 | printf("KKT_Solve Time : %f ms\n", myQP->stats->kkt_time * 1000.0); 29 | printf("LDL Time : %f ms\n", myQP->stats->ldl_numeric * 1000.0); 30 | printf("Diff : %f ms\n", (myQP->stats->kkt_time - myQP->stats->ldl_numeric) * 1000.0); 31 | printf("Iterations : %ld\n", myQP->stats->IterationCount); 32 | printf("Maximum Iterations reached\n"); 33 | } 34 | 35 | if (ExitCode == QP_FATAL) 36 | { 37 | printf("Unknown Error Detected\n"); 38 | } 39 | 40 | if (ExitCode == QP_KKTFAIL) 41 | { 42 | printf("LDL Factorization fail\n"); 43 | } 44 | 45 | printf("Solution\n"); 46 | 47 | for (int i = 0; i < n; ++i) 48 | printf("x[%d]:%lf\n", i, myQP->x[i]); 49 | 50 | QP_CLEANUP(myQP); 51 | 52 | return 0; 53 | } 54 | 55 | /*! @file */ -------------------------------------------------------------------------------- /policy/openpi/README.md: -------------------------------------------------------------------------------- 1 | [![中文](https://img.shields.io/badge/中文-简体-blue)](./README.md) 2 | [![English](https://img.shields.io/badge/English-English-green)](./README_EN.md) 3 | 4 | ## 如何使用openpi进行训练 5 | 1. 采集数据 6 | 可以使用提供的api进行数据采集, 使用CollectAny进行数据存储, 或者用自己的采集的数据. 7 | 8 | 2. 转化数据格式 9 | 首先转化为通用hdf5格式, 方便转为lerobotdataset: 10 | ``` bash 11 | python scripts/convert2hdf5.py input_path output_path 12 | # example: python scripts/convert2hdf5.py ../../save/task_1/ processed_data/task_1/ 13 | ``` 14 | 然后将对应任务的instructions.json移动到对应任务的文件夹中, json文件格式参考`task_instructions/*.json`. 15 | 16 | 最后根据需求, 转化为lerobotdataset格式, 支持多个任务同时转化, 或者只转化指定任务: 17 | ``` bash 18 | python scripts/convert2lerobot.py --raw_dir data_dir --repo_id your_repo_id # --is_multi 19 | # 如果你是单数据集转化 20 | python scripts/convert2lerobot.py --raw_dir processed_data/task_1/ --repo_id my_task_1 21 | # 如果你输多数据集转化 22 | python scripts/convert2lerobot.py --raw_dir processed_data/ --repo_id union_task --is_multi 23 | ``` 24 | 25 | 3. 选择你的config 26 | 在`src/openpi/training/config.py`中, 选择你要使用的训练形式:单臂/双臂, base/fast, full/lora 27 | 修改repo_id为你转化数据时使用的repo_id 28 | 修改一些你想设置的参数如: 29 | `batch_size`: 训练的总batch size.越大需要的显存越高, 建议开32, 效果不错 30 | `num_train_steps`: 30000步基本都能收敛, 不放心可以开高点 31 | `fsdp_devices`: 如果你单卡显存不够, 你可以开多卡, 注意!fsdp是将单个模型平均分到多卡上,不是每张卡一个完整模型 32 | 33 | ### 注意!!! 34 | 请认真校对你机械臂所需要的action维度, 修改对应policy的output,将输出维度修改对齐你的机械臂维度 35 | 如你是7+1的机械臂, 那么单臂使用的libero的output是[:8], 双臂使用的aloha是[:16] 36 | 37 | 4. 运行`finetune.sh`开启训练 38 | `your_train_config_name`是`config.py`的`_CONFIGS`中的配置信息 39 | `your_model_name`可以随便起, 会决定wandb的模型名称与保存的模型名称, 40 | `gpu_id`是你要使用的gpu对应的id,单卡填0 41 | ```bash 42 | # 如bash finetune.sh pi0_single_base_full my_model 0,1,2 43 | bash finetune.sh your_train_config_name your_model_name gpu_id 44 | ``` 45 | 46 | ## 如何使用openpi进行推理 47 | 在`inference_model.py`中给出了单臂和双臂的部署封装类, 请结合自己的机器人进行数据对齐. 需要修改两个部分: 48 | 1. `train_config_name`要设置为你训练时使用的`train_config` 49 | 2. 要在`src/openpi/training/config.py`对应的`train_config`中将`repo_id`改为你模型训练时使用的数据集的`repo_id` 50 | -------------------------------------------------------------------------------- /policy/openpi/docs/docker.md: -------------------------------------------------------------------------------- 1 | ### Docker Setup 2 | 3 | All of the examples in this repo provide instructions for being run normally, and also using Docker. Although not required, the Docker option is recommended as this will simplify software installation, produce a more stable environment, and also allow you to avoid installing ROS and cluttering your machine, for examples which depend on ROS. 4 | 5 | - Basic Docker installation instructions are [here](https://docs.docker.com/engine/install/). 6 | - Docker must be installed in [rootless mode](https://docs.docker.com/engine/security/rootless/). 7 | - To use your GPU you must also install the [NVIDIA container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html). 8 | - The version of docker installed with `snap` is incompatible with the NVIDIA container toolkit, preventing it from accessing `libnvidia-ml.so` ([issue](https://github.com/NVIDIA/nvidia-container-toolkit/issues/154)). The snap version can be uninstalled with `sudo snap remove docker`. 9 | - Docker Desktop is also incompatible with the NVIDIA runtime ([issue](https://github.com/NVIDIA/nvidia-container-toolkit/issues/229)). Docker Desktop can be uninstalled with `sudo apt remove docker-desktop`. 10 | 11 | 12 | If starting from scratch and your host machine is Ubuntu 22.04, you can use accomplish all of the above with the convenience scripts `scripts/docker/install_docker_ubuntu22.sh` and `scripts/docker/install_nvidia_container_toolkit.sh`. 13 | 14 | Build the Docker image and start the container with the following command: 15 | ```bash 16 | docker compose -f scripts/docker/compose.yml up --build 17 | ``` 18 | 19 | To build and run the Docker image for a specific example, use the following command: 20 | ```bash 21 | docker compose -f examples//compose.yml up --build 22 | ``` 23 | where `` is the name of the example you want to run. 24 | 25 | During the first run of any example, Docker will build the images. Go grab a coffee while this happens. Subsequent runs will be faster since the images are cached. -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/src/amd_control.c: -------------------------------------------------------------------------------- 1 | /* ========================================================================= */ 2 | /* === AMD_control ========================================================= */ 3 | /* ========================================================================= */ 4 | 5 | /* ------------------------------------------------------------------------- */ 6 | /* AMD, Copyright (c) Timothy A. Davis, */ 7 | /* Patrick R. Amestoy, and Iain S. Duff. See ../README.txt for License. */ 8 | /* email: DrTimothyAldenDavis@gmail.com */ 9 | /* ------------------------------------------------------------------------- */ 10 | 11 | /* User-callable. Prints the control parameters for AMD. See amd.h 12 | * for details. If the Control array is not present, the defaults are 13 | * printed instead. 14 | */ 15 | 16 | #include "amd_internal.h" 17 | 18 | GLOBAL void AMD_control 19 | ( 20 | double Control [ ] 21 | ) 22 | { 23 | double alpha ; 24 | Int aggressive ; 25 | 26 | if (Control != (double *) NULL) 27 | { 28 | alpha = Control [AMD_DENSE] ; 29 | aggressive = Control [AMD_AGGRESSIVE] != 0 ; 30 | } 31 | else 32 | { 33 | alpha = AMD_DEFAULT_DENSE ; 34 | aggressive = AMD_DEFAULT_AGGRESSIVE ; 35 | } 36 | 37 | PRINTF (("\nAMD version %d.%d.%d, %s: approximate minimum degree ordering\n" 38 | " dense row parameter: %g\n", AMD_MAIN_VERSION, AMD_SUB_VERSION, 39 | AMD_SUBSUB_VERSION, AMD_DATE, alpha)) ; 40 | 41 | if (alpha < 0) 42 | { 43 | PRINTF ((" no rows treated as dense\n")) ; 44 | } 45 | else 46 | { 47 | PRINTF (( 48 | " (rows with more than max (%g * sqrt (n), 16) entries are\n" 49 | " considered \"dense\", and placed last in output permutation)\n", 50 | alpha)) ; 51 | } 52 | 53 | if (aggressive) 54 | { 55 | PRINTF ((" aggressive absorption: yes\n")) ; 56 | } 57 | else 58 | { 59 | PRINTF ((" aggressive absorption: no\n")) ; 60 | } 61 | 62 | PRINTF ((" size of AMD integer: %d\n\n", sizeof (Int))) ; 63 | } 64 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/src/License(AMD).txt: -------------------------------------------------------------------------------- 1 | AMD, Copyright (c), 1996-2015, Timothy A. Davis, 2 | Patrick R. Amestoy, and Iain S. Duff. All Rights Reserved. 3 | 4 | Availability: 5 | 6 | http://www.suitesparse.com 7 | 8 | ------------------------------------------------------------------------------- 9 | AMD License: BSD 3-clause: 10 | ------------------------------------------------------------------------------- 11 | 12 | Redistribution and use in source and binary forms, with or without 13 | modification, are permitted provided that the following conditions are met: 14 | * Redistributions of source code must retain the above copyright 15 | notice, this list of conditions and the following disclaimer. 16 | * Redistributions in binary form must reproduce the above copyright 17 | notice, this list of conditions and the following disclaimer in the 18 | documentation and/or other materials provided with the distribution. 19 | * Neither the name of the organizations to which the authors are 20 | affiliated, nor the names of its contributors may be used to endorse 21 | or promote products derived from this software without specific prior 22 | written permission. 23 | 24 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY 28 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 29 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 30 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 31 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 34 | DAMAGE. 35 | 36 | 37 | -------------------------------------------------------------------------------- /policy/openpi/scripts/docker/serve_policy.Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for serving a PI policy. 2 | # Based on UV's instructions: https://docs.astral.sh/uv/guides/integration/docker/#developing-in-a-container 3 | 4 | # Build the container: 5 | # docker build . -t openpi_server -f scripts/docker/serve_policy.Dockerfile 6 | 7 | # Run the container: 8 | # docker run --rm -it --network=host -v .:/app --gpus=all openpi_server /bin/bash 9 | 10 | FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04@sha256:2d913b09e6be8387e1a10976933642c73c840c0b735f0bf3c28d97fc9bc422e0 11 | COPY --from=ghcr.io/astral-sh/uv:0.5.1 /uv /uvx /bin/ 12 | 13 | WORKDIR /app 14 | 15 | # Needed because LeRobot uses git-lfs. 16 | RUN apt-get update && apt-get install -y git git-lfs linux-headers-generic build-essential clang 17 | 18 | # Copy from the cache instead of linking since it's a mounted volume 19 | ENV UV_LINK_MODE=copy 20 | 21 | # Write the virtual environment outside of the project directory so it doesn't 22 | # leak out of the container when we mount the application code. 23 | ENV UV_PROJECT_ENVIRONMENT=/.venv 24 | 25 | # Install the project's dependencies using the lockfile and settings 26 | RUN uv venv --python 3.11.9 $UV_PROJECT_ENVIRONMENT 27 | RUN --mount=type=cache,target=/root/.cache/uv \ 28 | --mount=type=bind,source=uv.lock,target=uv.lock \ 29 | --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ 30 | --mount=type=bind,source=packages/openpi-client/pyproject.toml,target=packages/openpi-client/pyproject.toml \ 31 | --mount=type=bind,source=packages/openpi-client/src,target=packages/openpi-client/src \ 32 | GIT_LFS_SKIP_SMUDGE=1 uv sync --frozen --no-install-project --no-dev 33 | 34 | # Copy transformers_replace files while preserving directory structure 35 | COPY src/openpi/models_pytorch/transformers_replace/ /tmp/transformers_replace/ 36 | RUN /.venv/bin/python -c "import transformers; print(transformers.__file__)" | xargs dirname | xargs -I{} cp -r /tmp/transformers_replace/* {} && rm -rf /tmp/transformers_replace 37 | 38 | CMD /bin/bash -c "uv run scripts/serve_policy.py $SERVER_ARGS" 39 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/projectors.py: -------------------------------------------------------------------------------- 1 | """Implementation of additional projectors for additional inputs to the VLA models.""" 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | class ProprioProjector(nn.Module): 7 | """ 8 | Projects proprio state inputs into the LLM's embedding space. 9 | """ 10 | def __init__(self, llm_dim: int, proprio_dim: int) -> None: 11 | super().__init__() 12 | self.llm_dim = llm_dim 13 | self.proprio_dim = proprio_dim 14 | 15 | self.fc1 = nn.Linear(self.proprio_dim, self.llm_dim, bias=True) 16 | self.fc2 = nn.Linear(self.llm_dim, self.llm_dim, bias=True) 17 | self.act_fn1 = nn.GELU() 18 | 19 | def forward(self, proprio: torch.Tensor = None) -> torch.Tensor: 20 | # proprio: (bsz, proprio_dim) 21 | projected_features = self.fc1(proprio) 22 | projected_features = self.act_fn1(projected_features) 23 | projected_features = self.fc2(projected_features) 24 | return projected_features 25 | 26 | 27 | class NoisyActionProjector(nn.Module): 28 | """ 29 | [Diffusion] Projects noisy action inputs into the LLM's embedding space. 30 | 31 | Note that since each action is tokenized into 7 tokens in OpenVLA (rather 32 | than having 1 token per action), each noisy action token will have dimension 1 33 | instead of 7. 34 | """ 35 | def __init__(self, llm_dim: int) -> None: 36 | super().__init__() 37 | self.llm_dim = llm_dim 38 | self.action_token_dim = 1 39 | 40 | self.fc1 = nn.Linear(self.action_token_dim, self.llm_dim, bias=True) 41 | self.fc2 = nn.Linear(self.llm_dim, self.llm_dim, bias=True) 42 | self.act_fn1 = nn.GELU() 43 | 44 | def forward(self, noisy_actions: torch.Tensor = None) -> torch.Tensor: 45 | # noisy_actions: (bsz, num_action_tokens=chunk_len*action_dim, 1) 46 | projected_features = self.fc1(noisy_actions) 47 | projected_features = self.act_fn1(projected_features) 48 | projected_features = self.fc2(projected_features) 49 | return projected_features 50 | -------------------------------------------------------------------------------- /utils/ros2_subscriber.py: -------------------------------------------------------------------------------- 1 | import rclpy 2 | from rclpy.node import Node 3 | from threading import Lock 4 | from typing import Callable, Optional 5 | 6 | 7 | class ROS2Subscriber(Node): 8 | def __init__(self, node_name: str, topic_name: str, msg_type, call: Optional[Callable] = None): 9 | """ 10 | ROS2 Subscriber 封装类 11 | :param node_name: 节点名称 12 | :param topic_name: 订阅的话题名 13 | :param msg_type: 消息类型 14 | :param call: 可选的回调函数 15 | """ 16 | super().__init__(node_name) 17 | self.topic_name = topic_name 18 | self.msg_type = msg_type 19 | self.latest_msg = None 20 | self.lock = Lock() 21 | self.user_call = call 22 | 23 | self.subscription = self.create_subscription( 24 | msg_type, 25 | topic_name, 26 | self.callback, 27 | 10 # QoS depth 28 | ) 29 | 30 | def callback(self, msg): 31 | with self.lock: 32 | self.latest_msg = msg 33 | if self.user_call: 34 | self.user_call(msg) 35 | 36 | def get_latest_data(self): 37 | with self.lock: 38 | return self.latest_msg 39 | 40 | import time 41 | from bunker_msgs.msg import BunkerRCState # 替换为你使用的消息类型 42 | 43 | def custom_callback(msg): 44 | print(f"Received: SWA={msg.swa}, SWC={msg.swc}") 45 | 46 | def main(): 47 | rclpy.init() 48 | 49 | # 创建节点和订阅器对象 50 | subscriber_node = ROS2Subscriber( 51 | node_name='rc_state_listener', 52 | topic_name='/bunker_rc_state', 53 | msg_type=BunkerRCState, 54 | call=custom_callback # 可选 55 | ) 56 | 57 | try: 58 | while rclpy.ok(): 59 | rclpy.spin_once(subscriber_node, timeout_sec=0.1) 60 | msg = subscriber_node.get_latest_data() 61 | if msg: 62 | print(msg) 63 | time.sleep(0.1) 64 | except KeyboardInterrupt: 65 | pass 66 | finally: 67 | subscriber_node.destroy_node() 68 | rclpy.shutdown() 69 | 70 | if __name__ == '__main__': 71 | main() 72 | 73 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/bet/action_ae/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.utils.data import DataLoader 4 | import abc 5 | 6 | from typing import Optional, Union 7 | 8 | import diffusion_policy.model.bet.utils as utils 9 | 10 | 11 | class AbstractActionAE(utils.SaveModule, abc.ABC): 12 | 13 | @abc.abstractmethod 14 | def fit_model( 15 | self, 16 | input_dataloader: DataLoader, 17 | eval_dataloader: DataLoader, 18 | obs_encoding_net: Optional[nn.Module] = None, 19 | ) -> None: 20 | pass 21 | 22 | @abc.abstractmethod 23 | def encode_into_latent( 24 | self, 25 | input_action: torch.Tensor, 26 | input_rep: Optional[torch.Tensor], 27 | ) -> torch.Tensor: 28 | """ 29 | Given the input action, discretize it. 30 | 31 | Inputs: 32 | input_action (shape: ... x action_dim): The input action to discretize. This can be in a batch, 33 | and is generally assumed that the last dimnesion is the action dimension. 34 | 35 | Outputs: 36 | discretized_action (shape: ... x num_tokens): The discretized action. 37 | """ 38 | raise NotImplementedError 39 | 40 | @abc.abstractmethod 41 | def decode_actions( 42 | self, 43 | latent_action_batch: Optional[torch.Tensor], 44 | input_rep_batch: Optional[torch.Tensor] = None, 45 | ) -> torch.Tensor: 46 | """ 47 | Given a discretized action, convert it to a continuous action. 48 | 49 | Inputs: 50 | latent_action_batch (shape: ... x num_tokens): The discretized action 51 | generated by the discretizer. 52 | 53 | Outputs: 54 | continuous_action (shape: ... x action_dim): The continuous action. 55 | """ 56 | raise NotImplementedError 57 | 58 | @property 59 | @abc.abstractmethod 60 | def num_latents(self) -> Union[int, float]: 61 | """ 62 | Number of possible latents for this generator, useful for state priors that use softmax. 63 | """ 64 | return float("inf") 65 | -------------------------------------------------------------------------------- /policy/RDT/configs/pretrain_sample_weights.json: -------------------------------------------------------------------------------- 1 | { 2 | "fractal20220817_data": 271, 3 | "taco_play": 60, 4 | "jaco_play": 33, 5 | "berkeley_cable_routing": 8, 6 | "nyu_door_opening_surprising_effectiveness": 10, 7 | "viola": 12, 8 | "berkeley_autolab_ur5": 32, 9 | "toto": 32, 10 | "kuka": 50, 11 | "language_table": 100, 12 | "columbia_cairlab_pusht_real": 12, 13 | "stanford_kuka_multimodal_dataset_converted_externally_to_rlds": 55, 14 | "stanford_hydra_dataset_converted_externally_to_rlds": 24, 15 | "austin_buds_dataset_converted_externally_to_rlds": 7, 16 | "maniskill_dataset_converted_externally_to_rlds": 174, 17 | "furniture_bench_dataset_converted_externally_to_rlds": 71, 18 | "ucsd_kitchen_dataset_converted_externally_to_rlds": 12, 19 | "ucsd_pick_and_place_dataset_converted_externally_to_rlds": 37, 20 | "austin_sailor_dataset_converted_externally_to_rlds": 15, 21 | "austin_sirius_dataset_converted_externally_to_rlds": 24, 22 | "bc_z": 208, 23 | "utokyo_pr2_opening_fridge_converted_externally_to_rlds": 9, 24 | "utokyo_pr2_tabletop_manipulation_converted_externally_to_rlds": 15, 25 | "utokyo_xarm_pick_and_place_converted_externally_to_rlds": 10, 26 | "utokyo_xarm_bimanual_converted_externally_to_rlds": 1, 27 | "berkeley_mvp_converted_externally_to_rlds": 22, 28 | "berkeley_rpt_converted_externally_to_rlds": 30, 29 | "kaist_nonprehensile_converted_externally_to_rlds": 14, 30 | "tokyo_u_lsmo_converted_externally_to_rlds": 7, 31 | "dlr_sara_grid_clamp_converted_externally_to_rlds": 1, 32 | "stanford_robocook_converted_externally_to_rlds": 50, 33 | "imperialcollege_sawyer_wrist_cam": 13, 34 | "iamlab_cmu_pickup_insert_converted_externally_to_rlds": 25, 35 | "utaustin_mutex": 39, 36 | "berkeley_fanuc_manipulation": 20, 37 | "cmu_play_fusion": 24, 38 | "droid": 303, 39 | "fmb": 42, 40 | "dobbe": 36, 41 | "qut_dexterous_manpulation": 14, 42 | "aloha_mobile": 150, 43 | "aloha_static": 150, 44 | "roboset": 135, 45 | "rh20t": 331, 46 | "calvin": 100, 47 | "bridgev2": 224 48 | } -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/training/train_utils.py: -------------------------------------------------------------------------------- 1 | """Utils for training/fine-tuning scripts.""" 2 | 3 | import torch 4 | 5 | from prismatic.vla.constants import ACTION_DIM, ACTION_TOKEN_BEGIN_IDX, IGNORE_INDEX 6 | 7 | 8 | def get_current_action_mask(token_ids): 9 | # Create a tensor marking positions of IGNORE_INDEX 10 | newline_positions = token_ids != IGNORE_INDEX 11 | 12 | # Calculate cumulative sum to identify regions between newlines 13 | cumsum = torch.cumsum(newline_positions, dim=1) 14 | 15 | # Create the mask 16 | mask = (1 <= cumsum) & (cumsum <= ACTION_DIM) 17 | 18 | # Extract the action part only 19 | action_tokens_only_mask = token_ids > ACTION_TOKEN_BEGIN_IDX 20 | mask = action_tokens_only_mask * mask 21 | 22 | return mask 23 | 24 | 25 | def get_next_actions_mask(token_ids): 26 | # Create a tensor marking positions of IGNORE_INDEX 27 | newline_positions = token_ids != IGNORE_INDEX 28 | 29 | # Calculate cumulative sum to identify regions between newlines 30 | cumsum = torch.cumsum(newline_positions, dim=1) 31 | 32 | # Create the mask 33 | mask = cumsum > ACTION_DIM 34 | 35 | # Extract the action part only 36 | action_tokens_only_mask = token_ids > ACTION_TOKEN_BEGIN_IDX 37 | mask = action_tokens_only_mask * mask 38 | 39 | return mask 40 | 41 | 42 | def compute_token_accuracy(predicted_token_ids, ground_truth_token_ids, mask): 43 | correct_preds = (predicted_token_ids == ground_truth_token_ids) & mask 44 | accuracy = correct_preds.sum().float() / mask.sum().float() 45 | return accuracy 46 | 47 | 48 | def compute_actions_l1_loss(action_tokenizer, predicted_token_ids, ground_truth_token_ids, mask): 49 | pred_continuous_actions = torch.tensor( 50 | action_tokenizer.decode_token_ids_to_actions(predicted_token_ids[mask].cpu().numpy()) 51 | ) 52 | true_continuous_actions = torch.tensor( 53 | action_tokenizer.decode_token_ids_to_actions(ground_truth_token_ids[mask].cpu().numpy()) 54 | ) 55 | l1_loss = torch.nn.functional.l1_loss(pred_continuous_actions, true_continuous_actions) 56 | return l1_loss 57 | -------------------------------------------------------------------------------- /sensor/_Pika_sensor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | import numpy as np 5 | 6 | from sensor.teleoperation_sensor import TeleoperationSensor 7 | from utils.data_handler import matrix_to_xyz_rpy, compute_local_delta_pose, debug_print, apply_local_delta_pose, compute_rotate_matrix 8 | 9 | from scipy.spatial.transform import Rotation as R 10 | from typing import Callable, Optional 11 | 12 | from pika import sense 13 | 14 | ''' 15 | QuestVR base code from: 16 | https://github.com/rail-berkeley/oculus_reader.git 17 | ''' 18 | 19 | class PikaSensor(TeleoperationSensor): 20 | def __init__(self,name): 21 | super().__init__() 22 | self.name = name 23 | 24 | def set_up(self, tty, device_name): 25 | ''' 26 | device_name:跟插入顺序有关 27 | 无线连接: WM0, WM1 28 | 有线连接: T20, T21 29 | ''' 30 | self.sensor = sense(tty) 31 | self.prev_qpos = None 32 | self.device_name = device_name 33 | 34 | def get_state(self): 35 | qpos = self.sensor.get_pose(self.device_name) 36 | # gripper = self.sensor.get_encoder_data()['rad'] / np.pi 37 | if self.prev_qpos is None: 38 | self.prev_qpos = qpos 39 | qpos = np.array([0,0,0,0,0,0]) 40 | else: 41 | qpos = compute_local_delta_pose(self.prev_qpos, qpos) 42 | 43 | qpos = compute_rotate_matrix(qpos) 44 | return { 45 | "end_pose":qpos, 46 | # "extra": gripper, 47 | } 48 | 49 | def reset(self, buttons): 50 | debug_print(f"{self.name}", "reset success!", "INFO") 51 | return 52 | 53 | if __name__ == "__main__": 54 | import time 55 | teleop = PikaSensor("left_pika") 56 | 57 | teleop.set_up() 58 | 59 | teleop.set_collect_info(["end_pose","extra"]) 60 | 61 | while True: 62 | pose, buttons = teleop.get_state()["end_pose"] 63 | left_pose = pose[:6] 64 | right_pose = pose[-6:] 65 | 66 | teleop.reset(buttons) 67 | 68 | print("left_pose:\n", left_pose) 69 | print("right_pose:\n", right_pose) 70 | print("buttons:\n", buttons) 71 | time.sleep(0.1) -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/util/nn_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | nn_utils.py 3 | 4 | Utility functions and PyTorch submodule definitions. 5 | """ 6 | 7 | import torch 8 | import torch.nn as nn 9 | 10 | 11 | # === Definitions for Various Projection Modules, with Signature :: [..., in_dim] --> [..., out_dim] === 12 | class LinearProjector(nn.Module): 13 | def __init__(self, vision_dim: int, llm_dim: int) -> None: 14 | super().__init__() 15 | self.projector = nn.Linear(vision_dim, llm_dim, bias=True) 16 | 17 | def forward(self, img_patches: torch.Tensor) -> torch.Tensor: 18 | return self.projector(img_patches) 19 | 20 | 21 | class MLPProjector(nn.Module): 22 | def __init__(self, vision_dim: int, llm_dim: int, mlp_type: str = "gelu-mlp") -> None: 23 | super().__init__() 24 | if mlp_type == "gelu-mlp": 25 | self.projector = nn.Sequential( 26 | nn.Linear(vision_dim, llm_dim, bias=True), 27 | nn.GELU(), 28 | nn.Linear(llm_dim, llm_dim, bias=True), 29 | ) 30 | else: 31 | raise ValueError(f"Projector with `{mlp_type = }` is not supported!") 32 | 33 | def forward(self, img_patches: torch.Tensor) -> torch.Tensor: 34 | return self.projector(img_patches) 35 | 36 | 37 | class FusedMLPProjector(nn.Module): 38 | def __init__(self, fused_vision_dim: int, llm_dim: int, mlp_type: str = "fused-gelu-mlp") -> None: 39 | super().__init__() 40 | self.initial_projection_dim = fused_vision_dim * 4 41 | if mlp_type == "fused-gelu-mlp": 42 | self.projector = nn.Sequential( 43 | nn.Linear(fused_vision_dim, self.initial_projection_dim, bias=True), 44 | nn.GELU(), 45 | nn.Linear(self.initial_projection_dim, llm_dim, bias=True), 46 | nn.GELU(), 47 | nn.Linear(llm_dim, llm_dim, bias=True), 48 | ) 49 | else: 50 | raise ValueError(f"Fused Projector with `{mlp_type = }` is not supported!") 51 | 52 | def forward(self, fused_img_patches: torch.Tensor) -> torch.Tensor: 53 | return self.projector(fused_img_patches) 54 | -------------------------------------------------------------------------------- /example/collect/collect_mp_robot.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | import time 4 | import select 5 | 6 | from multiprocessing import Process, Event, Barrier 7 | 8 | from my_robot.test_robot import TestRobot, condition 9 | 10 | from utils.time_scheduler import TimeScheduler 11 | from utils.robot_worker import RobotWorker 12 | from utils.data_handler import is_enter_pressed 13 | 14 | from data.collect_any import CollectAny 15 | 16 | 17 | if __name__ == "__main__": 18 | import os 19 | os.environ["INFO_LEVEL"] = "DEBUG" # DEBUG , INFO, ERROR 20 | 21 | start_episode = 0 22 | num_episode = 10 23 | avg_collect_time = 0 24 | 25 | for episode_id in range(start_episode, start_episode + num_episode): 26 | is_start = False 27 | 28 | # 重置进程 29 | # time_lock = Event() 30 | time_lock= Barrier(1+1) 31 | start_event = Event() 32 | finish_event = Event() 33 | robot_process = Process(target=RobotWorker, args=(TestRobot, episode_id, time_lock, start_event, finish_event, "robot_worker")) 34 | time_scheduler = TimeScheduler(work_barrier=time_lock, time_freq=10) # 可以给多个进程同时上锁 35 | 36 | robot_process.start() 37 | while not is_start: 38 | time.sleep(0.01) 39 | if is_enter_pressed(): 40 | is_start = True 41 | start_event.set() 42 | else: 43 | time.sleep(1) 44 | 45 | time_scheduler.start() 46 | while is_start: 47 | time.sleep(0.001) 48 | if is_enter_pressed(): 49 | finish_event.set() 50 | time_scheduler.stop() 51 | is_start = False 52 | 53 | # 销毁多进程 54 | if robot_process.is_alive(): 55 | robot_process.join() 56 | robot_process.close() 57 | 58 | 59 | # 仅用于添加额外信息 60 | collection = CollectAny(condition=condition,start_episode=0) 61 | avg_collect_time = time_scheduler.real_time_average_time_interval 62 | extra_info = {} 63 | extra_info["avg_time_interval"] = avg_collect_time 64 | collection.add_extra_condition_info(extra_info) -------------------------------------------------------------------------------- /policy/DP/README.md: -------------------------------------------------------------------------------- 1 | [![中文](https://img.shields.io/badge/中文-简体-blue)](./README_CN.md) 2 | [![English](https://img.shields.io/badge/English-English-green)](./README.md) 3 | 4 | # DP (Diffusion Policy) Deployment Guide 5 | 6 | ## Quick Start 7 | 8 | ### 1. Environment Setup 9 | 10 | Install the dependencies required for DP deployment: 11 | 12 | ```bash 13 | cd policy/DP/ 14 | pip install -e . 15 | ``` 16 | 17 | **Optional Dependencies:** 18 | 19 | ```bash 20 | # Install with training dependencies (wandb, tensorboard, etc.) 21 | pip install -e .[training] 22 | 23 | # Install with simulation dependencies 24 | pip install -e .[simulation] 25 | 26 | # Install with all optional dependencies 27 | pip install -e .[all] 28 | ``` 29 | 30 | ### 2. Data Preparation and Training 31 | 32 | #### Data Conversion 33 | 34 | Convert the collected data to zarr format required by DP model: 35 | 36 | ```bash 37 | cd policy/DP/ 38 | python scripts/process_data.py 39 | ``` 40 | 41 | **Example:** 42 | ```bash 43 | # Convert 100 episodes from data/test_data/ to zarr format 44 | python process_data.py data/test_data/ processed_data/test_data-100.zarr/ 100 45 | ``` 46 | 47 | ### 3. Real Robot Deployment 48 | 49 | 1. Copy the trained checkpoint to the following directory: 50 | ``` 51 | control_your_robot/policy/DP/checkpoints/ 52 | ``` 53 | 54 | 2. Modify the deployment script `example/deploy/piper_single_on_DP.py`: 55 | 56 | ```python 57 | # Modify the model path around line 316 58 | model = MYDP(model_path="policy/DP/checkpoints/feed_test_30-100-0/300.ckpt", task_name="feed_test_30", INFO="DEBUG") 59 | ``` 60 | 61 | **Parameter Description:** 62 | - `model_path`: Path to the policy model checkpoint 63 | - `task_name`: Corresponding task name 64 | - `INFO`: Log level (DEBUG/INFO/ERROR) 65 | 66 | #### 3.3 Execute Deployment 67 | 68 | Run the deployment script to start real robot execution: 69 | 70 | ```bash 71 | python example/deploy/piper_single_on_DP.py 72 | ``` 73 | 74 | ## Notes 75 | 76 | - Ensure the robotic arm is properly enabled and connected 77 | - Check if the model path is correct 78 | - It is recommended to verify the model performance in a test environment before deployment 79 | 80 | 81 | -------------------------------------------------------------------------------- /third_party/Realman_IK/qp-tools/doc/header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | $projectname: $title 9 | $title 10 | 11 | 12 | 13 | $treeview 14 | $search 15 | $mathjax 16 | 17 | $extrastylesheet 18 | 19 | 20 |
21 | 22 | 23 |
24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 37 | 38 | 39 | 40 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 |
32 |
$projectname 33 |  $projectnumber 34 |
35 |
$projectbrief
36 |
41 |
$projectbrief
42 |
$searchbox
53 |
54 | 55 | 56 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/models/backbones/llm/phi.py: -------------------------------------------------------------------------------- 1 | """ 2 | phi.py 3 | 4 | Class definition for all LLMs derived from PhiForCausalLM. 5 | """ 6 | 7 | from typing import Optional, Type 8 | 9 | import torch 10 | from torch import nn as nn 11 | from transformers import PhiForCausalLM 12 | from transformers.models.phi.modeling_phi import PhiDecoderLayer 13 | 14 | from prismatic.models.backbones.llm.base_llm import HFCausalLLMBackbone 15 | from prismatic.models.backbones.llm.prompting import PhiPromptBuilder, PromptBuilder 16 | 17 | # Registry ==> Support Phi Models (from HF Transformers) 18 | # fmt: off 19 | PHI_MODELS = { 20 | # === Phi-2 === 21 | "phi-2-3b": { 22 | "llm_family": "phi", "llm_cls": PhiForCausalLM, "hf_hub_path": "microsoft/phi-2" 23 | } 24 | } 25 | # fmt: on 26 | 27 | 28 | class PhiLLMBackbone(HFCausalLLMBackbone): 29 | def __init__( 30 | self, 31 | llm_backbone_id: str, 32 | llm_max_length: int = 2048, 33 | hf_token: Optional[str] = None, 34 | inference_mode: bool = False, 35 | use_flash_attention_2: bool = True, 36 | ) -> None: 37 | super().__init__( 38 | llm_backbone_id, 39 | llm_max_length=llm_max_length, 40 | hf_token=hf_token, 41 | inference_mode=inference_mode, 42 | use_flash_attention_2=use_flash_attention_2, 43 | **PHI_MODELS[llm_backbone_id], 44 | ) 45 | 46 | # [Special Case] Phi PAD Token Handling --> for clarity, we add an extra token (and resize) 47 | self.tokenizer.add_special_tokens({"pad_token": "<|pad|>"}) 48 | self.llm.config.pad_token_id = self.tokenizer.pad_token_id 49 | self.llm.resize_token_embeddings(len(self.tokenizer), pad_to_multiple_of=64) 50 | 51 | @property 52 | def prompt_builder_fn(self) -> Type[PromptBuilder]: 53 | if self.identifier.startswith("phi-2"): 54 | return PhiPromptBuilder 55 | 56 | raise ValueError(f"No PromptBuilder defined for LLM Backbone `{self.identifier}`") 57 | 58 | @property 59 | def transformer_layer_cls(self) -> Type[nn.Module]: 60 | return PhiDecoderLayer 61 | 62 | @property 63 | def half_precision_dtype(self) -> torch.dtype: 64 | return torch.bfloat16 65 | -------------------------------------------------------------------------------- /policy/DP/diffusion_policy/model/common/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | from diffusers.optimization import ( 2 | Union, 3 | SchedulerType, 4 | Optional, 5 | Optimizer, 6 | TYPE_TO_SCHEDULER_FUNCTION, 7 | ) 8 | 9 | 10 | def get_scheduler( 11 | name: Union[str, SchedulerType], 12 | optimizer: Optimizer, 13 | num_warmup_steps: Optional[int] = None, 14 | num_training_steps: Optional[int] = None, 15 | **kwargs, 16 | ): 17 | """ 18 | Added kwargs vs diffuser's original implementation 19 | 20 | Unified API to get any scheduler from its name. 21 | 22 | Args: 23 | name (`str` or `SchedulerType`): 24 | The name of the scheduler to use. 25 | optimizer (`torch.optim.Optimizer`): 26 | The optimizer that will be used during training. 27 | num_warmup_steps (`int`, *optional*): 28 | The number of warmup steps to do. This is not required by all schedulers (hence the argument being 29 | optional), the function will raise an error if it's unset and the scheduler type requires it. 30 | num_training_steps (`int``, *optional*): 31 | The number of training steps to do. This is not required by all schedulers (hence the argument being 32 | optional), the function will raise an error if it's unset and the scheduler type requires it. 33 | """ 34 | name = SchedulerType(name) 35 | schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] 36 | if name == SchedulerType.CONSTANT: 37 | return schedule_func(optimizer, **kwargs) 38 | 39 | # All other schedulers require `num_warmup_steps` 40 | if num_warmup_steps is None: 41 | raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") 42 | 43 | if name == SchedulerType.CONSTANT_WITH_WARMUP: 44 | return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, **kwargs) 45 | 46 | # All other schedulers require `num_training_steps` 47 | if num_training_steps is None: 48 | raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") 49 | 50 | return schedule_func( 51 | optimizer, 52 | num_warmup_steps=num_warmup_steps, 53 | num_training_steps=num_training_steps, 54 | **kwargs, 55 | ) 56 | -------------------------------------------------------------------------------- /my_robot/_realsense_only.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("./") 3 | 4 | import numpy as np 5 | 6 | from my_robot.base_robot import Robot 7 | 8 | from controller.Piper_controller import PiperController 9 | from sensor.Realsense_sensor import RealsenseSensor 10 | 11 | from data.collect_any import CollectAny 12 | 13 | # 组装你的控制器 14 | CAMERA_SERIALS = { 15 | # 'head': '419522072373', # Replace with actual serial number 16 | 'wrist': '419522071856', # Replace with actual serial number 17 | } 18 | 19 | # Define start position (in degrees) 20 | START_POSITION_ANGLE_LEFT_ARM = [ 21 | 0, # Joint 1 22 | 0, # Joint 2 23 | 0, # Joint 3 24 | 0, # Joint 4 25 | 0, # Joint 5 26 | 0, # Joint 6 27 | ] 28 | 29 | # Define start position (in degrees) 30 | START_POSITION_ANGLE_RIGHT_ARM = [ 31 | 0, # Joint 1 32 | 0, # Joint 2 33 | 0, # Joint 3 34 | 0, # Joint 4 35 | 0, # Joint 5 36 | 0, # Joint 6 37 | ] 38 | 39 | condition = { 40 | "robot":"piper_single", 41 | "save_path": "./datasets/", # 保存路径 42 | "task_name": "test", # 任务名称 43 | "save_format": "hdf5", # 保存格式 44 | "save_interval": 10, # 保存频率 45 | } 46 | 47 | 48 | class Camera(Robot): 49 | def __init__(self, start_episode=0): 50 | super().__init__(start_episode) 51 | 52 | self.condition = condition 53 | self.sensors = { 54 | "image":{ 55 | # "cam_head": RealsenseSensor("cam_head"), 56 | "cam_wrist": RealsenseSensor("cam_wrist"), 57 | } 58 | } 59 | # ============== 初始化相关 ============== 60 | 61 | def reset(self): 62 | return 63 | 64 | def set_up(self): 65 | # self.sensors["image"]["cam_head"].set_up(CAMERA_SERIALS["head"]) 66 | self.sensors["image"]["cam_wrist"].set_up(CAMERA_SERIALS["wrist"]) 67 | 68 | self.set_collect_type({"image": ["color"] 69 | }) 70 | 71 | print("set up success!") 72 | 73 | if __name__=="__main__": 74 | import time 75 | robot = Camera() 76 | robot.set_up() 77 | # 采集测试 78 | data_list = [] 79 | for i in range(100): 80 | print(i) 81 | data = robot.get() 82 | robot.collect(data) 83 | time.sleep(0.1) 84 | robot.finish() 85 | -------------------------------------------------------------------------------- /policy/openvla-oft/prismatic/vla/materialize.py: -------------------------------------------------------------------------------- 1 | """ 2 | materialize.py 3 | 4 | Factory class for initializing Open-X RLDS-backed datasets, given specified data mixture parameters; provides and 5 | exports individual functions for clear control flow. 6 | """ 7 | 8 | from pathlib import Path 9 | from typing import Tuple, Type 10 | 11 | from torch.utils.data import Dataset 12 | from transformers import PreTrainedTokenizerBase 13 | 14 | from prismatic.models.backbones.llm.prompting import PromptBuilder 15 | from prismatic.models.backbones.vision import ImageTransform 16 | from prismatic.util.data_utils import PaddedCollatorForActionPrediction 17 | from prismatic.vla.action_tokenizer import ActionTokenizer 18 | from prismatic.vla.datasets import EpisodicRLDSDataset, RLDSBatchTransform, RLDSDataset 19 | 20 | 21 | def get_vla_dataset_and_collator( 22 | data_root_dir: Path, 23 | data_mix: str, 24 | image_transform: ImageTransform, 25 | tokenizer: PreTrainedTokenizerBase, 26 | prompt_builder_fn: Type[PromptBuilder], 27 | default_image_resolution: Tuple[int, int, int], 28 | padding_side: str = "right", 29 | predict_stop_token: bool = True, 30 | shuffle_buffer_size: int = 100_000, 31 | train: bool = True, 32 | episodic: bool = False, 33 | image_aug: bool = False, 34 | ) -> Tuple[Dataset, ActionTokenizer, PaddedCollatorForActionPrediction]: 35 | """Initialize RLDS Dataset (wraps TFDS), ActionTokenizer, and initialize transform/collation functions.""" 36 | action_tokenizer = ActionTokenizer(tokenizer) 37 | batch_transform = RLDSBatchTransform( 38 | action_tokenizer, tokenizer, image_transform, prompt_builder_fn, predict_stop_token=predict_stop_token 39 | ) 40 | collator = PaddedCollatorForActionPrediction( 41 | tokenizer.model_max_length, tokenizer.pad_token_id, padding_side=padding_side 42 | ) 43 | 44 | # Build RLDS Iterable Dataset 45 | cls = RLDSDataset if not episodic else EpisodicRLDSDataset 46 | dataset = cls( 47 | data_root_dir, 48 | data_mix, 49 | batch_transform, 50 | resize_resolution=default_image_resolution[1:], 51 | shuffle_buffer_size=shuffle_buffer_size, 52 | train=train, 53 | image_aug=image_aug, 54 | ) 55 | 56 | return dataset, action_tokenizer, collator 57 | --------------------------------------------------------------------------------