├── .gitignore ├── LICENSE ├── README.md ├── humanoid_teleoperation ├── .gitignore ├── README.md ├── collect_data.sh ├── convert_data.sh ├── scripts │ ├── action_util.py │ ├── collect_data.py │ ├── convert_demos.py │ ├── example.cpython-311-x86_64-linux-gnu.so │ ├── multi_realsense.py │ ├── rotation_util.py │ ├── sensor_offset.json │ ├── silverscreen_multicam.py │ └── stream.py ├── start_robo.sh └── teleop-zenoh │ ├── README.md │ ├── arm_control │ ├── FK │ │ ├── __init__.py │ │ ├── configs │ │ │ └── urdfs │ │ │ │ ├── LArm.urdf │ │ │ │ └── RArm.urdf │ │ ├── fkpytest.py │ │ └── libGR1_ArmFk.so │ ├── __init__.py │ ├── libkdl_parser.so │ ├── liborocos-kdl.so │ ├── liborocos-kdl.so.1.5 │ ├── liborocos-kdl.so.1.5.1 │ ├── liburdfdom_model.so │ └── liburdfdom_model.so.1.0 │ ├── communication.py │ ├── config_GR1_T2.yaml │ ├── example.cpython-311-x86_64-linux-gnu.so │ ├── filter.py │ ├── gesture.py │ ├── gripper.py │ ├── libs │ ├── robot_rcs-0.4.0.11-cp311-cp311-manylinux_2_30_x86_64.whl │ └── robot_rcs_gr-1.9.1.10-cp311-cp311-manylinux_2_30_x86_64.whl │ ├── pico_streamer.py │ ├── retarget.py │ ├── run_server.py │ ├── sensor_offset.json │ ├── set_env.py │ ├── urdf │ ├── GR1T1 │ │ ├── meshes │ │ │ ├── base.STL │ │ │ ├── head_pitch.STL │ │ │ ├── head_roll.STL │ │ │ ├── head_yaw.STL │ │ │ ├── l_foot_pitch.STL │ │ │ ├── l_foot_roll.STL │ │ │ ├── l_hand_pitch.STL │ │ │ ├── l_hand_roll.STL │ │ │ ├── l_hand_yaw.STL │ │ │ ├── l_lower_arm_pitch.STL │ │ │ ├── l_shank_pitch.STL │ │ │ ├── l_thigh_pitch.STL │ │ │ ├── l_thigh_roll.STL │ │ │ ├── l_thigh_yaw.STL │ │ │ ├── l_upper_arm_pitch.STL │ │ │ ├── l_upper_arm_roll.STL │ │ │ ├── l_upper_arm_yaw.STL │ │ │ ├── r_foot_pitch.STL │ │ │ ├── r_foot_roll.STL │ │ │ ├── r_hand_pitch.STL │ │ │ ├── r_hand_roll.STL │ │ │ ├── r_hand_yaw.STL │ │ │ ├── r_lower_arm_pitch.STL │ │ │ ├── r_shank_pitch.STL │ │ │ ├── r_thigh_pitch.STL │ │ │ ├── r_thigh_roll.STL │ │ │ ├── r_thigh_yaw.STL │ │ │ ├── r_upper_arm_pitch.STL │ │ │ ├── r_upper_arm_roll.STL │ │ │ ├── r_upper_arm_yaw.STL │ │ │ ├── waist_pitch.STL │ │ │ ├── waist_roll.STL │ │ │ └── waist_yaw.STL │ │ └── urdf │ │ │ ├── GR1T1_simple.urdf │ │ │ └── robot.urdf │ └── GR1T2 │ │ ├── meshes │ │ ├── Larm1.STL │ │ ├── Larm2.STL │ │ ├── Larm3.STL │ │ ├── Larm4.STL │ │ ├── Larm5.STL │ │ ├── Larm6.STL │ │ ├── Larm7.STL │ │ ├── Lleg1.STL │ │ ├── Lleg2.STL │ │ ├── Lleg3.STL │ │ ├── Lleg4.STL │ │ ├── Lleg5.STL │ │ ├── Lleg6.STL │ │ ├── Rarm1.STL │ │ ├── Rarm2.STL │ │ ├── Rarm3.STL │ │ ├── Rarm4.STL │ │ ├── Rarm5.STL │ │ ├── Rarm6.STL │ │ ├── Rarm7.STL │ │ ├── Rleg1.STL │ │ ├── Rleg2.STL │ │ ├── Rleg3.STL │ │ ├── Rleg4.STL │ │ ├── Rleg5.STL │ │ ├── Rleg6.STL │ │ ├── base.STL │ │ ├── head1.STL │ │ ├── head2.STL │ │ ├── head3.STL │ │ ├── torso.STL │ │ ├── waist1.STL │ │ ├── waist2.STL │ │ └── waist3.STL │ │ └── urdf │ │ ├── GR1T2_simple.urdf │ │ └── robot.urdf │ └── utils.py └── vision_pro_app ├── .DS_Store ├── .gitignore ├── CITATION.cff ├── README.md ├── Tracking Streamer.xcodeproj ├── project.pbxproj ├── project.xcworkspace │ ├── contents.xcworkspacedata │ ├── xcshareddata │ │ ├── IDEWorkspaceChecks.plist │ │ └── swiftpm │ │ │ └── Package.resolved │ └── xcuserdata │ │ ├── fftai.xcuserdatad │ │ └── UserInterfaceState.xcuserstate │ │ ├── yhpark.xcuserdatad │ │ └── UserInterfaceState.xcuserstate │ │ └── zeyanjie.xcuserdatad │ │ └── UserInterfaceState.xcuserstate ├── xcshareddata │ └── xcschemes │ │ └── VisionProTeleop.xcscheme └── xcuserdata │ ├── account2309.xcuserdatad │ └── xcschemes │ │ └── xcschememanagement.plist │ └── yhpark.xcuserdatad │ └── xcschemes │ └── xcschememanagement.plist ├── Tracking Streamer ├── .DS_Store ├── App.swift ├── ContentView.swift ├── Supporting files │ ├── .DS_Store │ ├── Archive │ │ ├── DebugView.md │ │ └── HandToGround.md │ ├── Assets.xcassets │ │ ├── .DS_Store │ │ ├── AccentColor.colorset │ │ │ └── Contents.json │ │ ├── AppIcon.solidimagestack │ │ │ ├── Back.solidimagestacklayer │ │ │ │ ├── Content.imageset │ │ │ │ │ ├── Background.png │ │ │ │ │ └── Contents.json │ │ │ │ └── Contents.json │ │ │ ├── Contents.json │ │ │ ├── Front.solidimagestacklayer │ │ │ │ ├── Content.imageset │ │ │ │ │ ├── Contents.json │ │ │ │ │ └── lab_logo_inverted.png │ │ │ │ └── Contents.json │ │ │ └── Middle.solidimagestacklayer │ │ │ │ ├── Content.imageset │ │ │ │ └── Contents.json │ │ │ │ └── Contents.json │ │ ├── Contents.json │ │ ├── Image.imageset │ │ │ └── Contents.json │ │ ├── graph2.imageset │ │ │ ├── Contents.json │ │ │ └── diagram_visionpro.png │ │ └── lab_logo_inverted.imageset │ │ │ ├── Contents.json │ │ │ └── lab_logo_inverted.png │ ├── Info.plist │ ├── InfoPlist.xcstrings │ ├── Localizable.xcstrings │ ├── README assets │ │ ├── appstore_badge.svg │ │ ├── icon.png │ │ ├── screenshot1280w.jpg │ │ ├── screenshot1280w2.jpg │ │ └── screenshot1280w3.jpg │ ├── sound1.m4a │ └── sound2.m4a ├── VisionProTeleop.entitlements ├── 🌐RealityView.swift ├── 📏Unit.swift ├── 🛠️Menu │ ├── 🛠️MenuTop.swift │ ├── 🛠️Panel.swift │ └── 🛠️SettingPanel.swift ├── 🥽AppModel.swift ├── 🧑HeadTrackingComponent&System.swift ├── 🧩Model.swift └── 🧩Name.swift ├── assets ├── axis_convention.png ├── change_ip.png ├── coord_system.png ├── hand_skeleton_convention.png ├── home_button.png ├── main.png ├── short_paper.pdf ├── short_paper_new.pdf └── visionpro_main.png ├── avp_stream ├── .DS_Store ├── __init__.py ├── assets │ ├── huge_axis.urdf │ ├── normal_axis.urdf │ └── small_axis.urdf ├── grpc_msg │ ├── __init__.py │ ├── handtracking.grpc.swift │ ├── handtracking.pb.swift │ ├── handtracking.proto │ ├── handtracking_pb2.py │ └── handtracking_pb2_grpc.py ├── isaac_env.py ├── streamer.py └── utils │ ├── __init__.py │ ├── grpc_utils.py │ ├── isaac_utils.py │ ├── se3_utils.py │ └── trn_constants.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | # *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Yanjie Ze 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [Generalizable Humanoid Manipulation with 3D Diffusion Policies](https://humanoid-manipulation.github.io/) 2 | 3 | Our project is **fully open-sourced**. We separate them into two repos: [Learning & Deployment of iDP3](https://github.com/YanjieZe/Improved-3D-Diffusion-Policy) and [Humanoid Teleoperation](https://github.com/YanjieZe/Humanoid-Teleoperation). This repo is for humanoid teleoperation. 4 | 5 | 6 | https://github.com/user-attachments/assets/9c013594-2181-47f7-a190-bb754c1fd934 7 | 8 | 9 | # Humanoid Teleoperation 10 | 11 | This repo is for humanoid teleoperation using Apple Vision Pro. 12 | 13 | (2024.01.29) Check [this repo](https://github.com/FFTAI/teleoperation) for a more mature version! 14 | 15 | Hardware requirements: 16 | - [Apple Vision Pro](https://www.apple.com/apple-vision-pro/). 17 | - Humanoid robot. We use [Fourier GR1](https://www.fourierintelligence.com/gr1) as the robot platform, with [Inspire Hands](https://inspire-robots.store/collections/the-dexterous-hands) as the end-effector. 18 | - RealSense camera, to stream the robot vision back to Apple Vision Pro. [RealSense L515](https://www.intelrealsense.com/lidar-camera-l515/) is used in our project. 19 | 20 | This repo contains code for two parts: 21 | - [The Vision Pro APP](vision_pro_app/README.md) is installed on Apple Vision Pro. 22 | - [The teleoperation code](humanoid_teleoperation/README.md) is installed on the onboard computer of Fourier GR1. 23 | 24 | The training data is collected and the converted to the training format to train [iDP3](https://github.com/YanjieZe/Improved-3D-Diffusion-Policy). 25 | 26 | 27 | We provide the raw data example in [Google Drive](https://drive.google.com/file/d/1JOaOYugZDtkrz3aYpQq3w8zQPdy4AudD/view?usp=sharing). You could explore the data and use our provided script to convert the data to the training format. We have also provided the training data example in [Google Drive](https://drive.google.com/file/d/1c-rDOe1CcJM8iUuT1ecXKjDYAn-afy2e/view?usp=sharing). 28 | 29 | 30 | ## Installation 31 | 32 | For APP installation, please see [Vision Pro APP README](vision_pro_app/README.md). 33 | 34 | For teleoperation installation on the robot, please see [Teleoperation INSTALL](humanoid_teleoperation/README.md). 35 | 36 | ## Usage 37 | 38 | 39 | 1. In the terminal of the robot, start the robot server: 40 | 41 | cd humanoid_teleoperation 42 | bash start_robo.sh 43 | 44 | 2. In another terminal of the robot, start the teleop: 45 | 46 | cd humanoid_teleoperation 47 | bash collect_data.sh 48 | 49 | 3. In Apple Visiob Pro, open the app and enter. 50 | - You should see the vision stream now. Snap your left hand to start the teleop. 51 | - To end the teleop, snap your left hand again. 52 | 53 | 4. The data is saved to `scripts/demo_dir` by default. 54 | 55 | 56 | 5. Convert data to the training format using our script. Please specify the data path in the script first. For example, I set the path in `humanoid_teleoperation/convert_data.sh` as follow: 57 | 58 | # the path to our provided raw data 59 | demo_path=/home/ze/projects/Humanoid-Teleoperation/humanoid_teleoperation/scripts/demo_dir/raw_data_example 60 | # the path to save the converted data 61 | save_path=/home/ze/projects/Humanoid-Teleoperation/humanoid_teleoperation/scripts/demo_dir/training_data_example 62 | 63 | Then, run the script in the terminal: 64 | 65 | cd humanoid_teleoperation 66 | bash convert_data.sh 67 | 68 | You would have the converted training data. To use the training data, see [our iDP3 repo](https://github.com/YanjieZe/Improved-3D-Diffusion-Policy). 69 | 70 | ## Use it for your own robot 71 | 72 | We also provide the instructions for adapting the teleoperation to your own robot. 73 | 74 | By providing basic configuration and forward kinematics, you can teleoperate your humanoid robot arms and hands. 75 | 76 | Please see [Humanoid Arm Retarget](https://github.com/Hao-Starrr/humanoid-arm-retarget) for more details. 77 | 78 | ## BibTeX 79 | 80 | Please consider citing our paper if you find this repo useful: 81 | ``` 82 | @article{ze2024humanoid_manipulation, 83 | title = {Generalizable Humanoid Manipulation with 3D Diffusion Policies}, 84 | author = {Yanjie Ze and Zixuan Chen and Wenhao Wang and Tianyi Chen and Xialin He and Ying Yuan and Xue Bin Peng and Jiajun Wu}, 85 | year = {2024}, 86 | journal = {arXiv preprint arXiv:2410.10803} 87 | } 88 | ``` 89 | 90 | ## Acknowledgement 91 | 92 | We thank the authors of the following repos for their great work: [3D Diffusion Policy](https://github.com/YanjieZe/3D-Diffusion-Policy), [Diffusion Policy](https://github.com/columbia-ai-robotics/diffusion_policy), [VisionProTeleop](https://github.com/Improbable-AI/VisionProTeleop), [Open-TeleVision](https://github.com/OpenTeleVision/TeleVision). 93 | -------------------------------------------------------------------------------- /humanoid_teleoperation/.gitignore: -------------------------------------------------------------------------------- 1 | **/.DS_Store 2 | .vscode 3 | **/build 4 | **/__pycache__ 5 | **/*.pyc 6 | **/test.py 7 | **/*.log 8 | **/log/ 9 | **/*.egg-info 10 | **/*build 11 | scripts/test_state_est.py 12 | scripts/inputdata.txt 13 | scripts/test_joystick.py 14 | 15 | demo_dir/ 16 | checkpoint_dir/ 17 | **/models-gr1/ 18 | scripts/actions/ 19 | 20 | **/sensor_offset.txt 21 | 22 | **/.idea/ 23 | 24 | **/pybind11/ 25 | 26 | *.npz 27 | *.png 28 | 29 | checkpoints/ 30 | deploy_dir/ 31 | 32 | ckpt_base/ 33 | ablation_data/ -------------------------------------------------------------------------------- /humanoid_teleoperation/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Installation for Teleoperation on Humanoid Robot 3 | 4 | **Install realsense driver for L515**. https://github.com/IntelRealSense/librealsense/releases/tag/v2.54.2 5 | 6 | 7 | **Teleop with Vision Pro.** to use vision pro teleop, first, install our app on vision pro. then install the following packages: 8 | 9 | # I assume you have not installed iDP3 env. You could install the teleop code first, and then install iDP3. 10 | conda remove -n idp3 --all 11 | conda create -n idp3 python=3.11 12 | conda activate idp3 13 | 14 | pip install avp_stream sapien==3.0.b0 tyro anytree trimesh pytransform3d nlopt 15 | pip install cmeel-urdfdom==3.1.1.1 pin==2.7.0 16 | pip install pyrealsense2==2.54.2.5684 17 | pip install quaternionic tqdm 18 | pip install vuer==0.0.32rc7 19 | 20 | cd teleop-zenoh 21 | cd libs 22 | pip install robot_rcs-0.4.0.7-cp311-cp311-manylinux_2_30_x86_64.whl 23 | pip install robot_rcs_gr-1.9.1.4-cp311-cp311-manylinux_2_30_x86_64.whl 24 | 25 | sudo apt-get update -y 26 | # libtinyxml.so.2.6.2 27 | sudo apt install libtinyxml2.6.2v5 28 | # libtinyxml2.so.6 29 | sudo apt-get install -y libtinyxml2-6 30 | sudo apt install libconsole-bridge0.4 31 | 32 | pip install rerun-sdk termcolor h5py flask plotly 33 | 34 | 35 | # Setup Local Vision Stream 36 | 37 | Apple does not allow WebXR on non-https connections. To test the application locally, we need to create a self-signed certificate and install it on the client. You need a ubuntu machine and a router. Connect the VisionPro and the ubuntu machine to the same router. 38 | 39 | 1. install mkcert: https://github.com/FiloSottile/mkcert 40 | ``` 41 | sudo apt install libnss3-tools 42 | curl -JLO "https://dl.filippo.io/mkcert/latest?for=linux/amd64" 43 | chmod +x mkcert-v*-linux-amd64 44 | sudo cp mkcert-v*-linux-amd64 /usr/local/bin/mkcert 45 | ``` 46 | 2. check local ip address: 47 | 48 | ``` 49 | ifconfig | grep inet 50 | ``` 51 | 52 | Suppose the local ip address of the ubuntu machine is `192.168.31.157`. 53 | 54 | 3. create certificate: 55 | 56 | ``` 57 | mkcert -install && mkcert -cert-file cert.pem -key-file key.pem 192.168.31.157 localhost 127.0.0.1 58 | ``` 59 | 60 | 4. open firewall on server 61 | 62 | ``` 63 | sudo iptables -A INPUT -p tcp --dport 8012 -j ACCEPT 64 | sudo iptables-save 65 | sudo iptables -L 66 | ``` 67 | 68 | or can be done with `ufw`: 69 | 70 | ``` 71 | sudo ufw allow 8012 72 | ``` 73 | 74 | 5. 75 | 76 | ```python 77 | self.app = Vuer(host='0.0.0.0', cert="./cert.pem", key="./key.pem") 78 | ``` 79 | 80 | 6. install ca-certificates on VisionPro 81 | 82 | ``` 83 | mkcert -CAROOT 84 | ``` 85 | 86 | Copy the rootCA.pem via AirDrop to VisionPro and install it. 87 | 88 | Settings > General > About > Certificate Trust Settings. Under "Enable full trust for root certificates", turn on trust for the certificate. 89 | 90 | settings > Apps > Safari > Advanced > Feature Flags > Enable WebXR Related Features 91 | 92 | 7. open the browser on Safari on VisionPro and go to `https://192.168.31.157:8012?ws=wss://192.168.31.157:8012` 93 | 94 | 8. Click `Enter VR` and ``Allow`` to start the VR session. 95 | 96 | 97 | # Connect Vision Pro and Your Mac for develop 98 | 99 | What steps did you take to get the device working in Xcode. The following steps should get you started: 100 | 101 | Vision Pro 102 | 103 | Navigate to Settings > General > Remote Devices and remove any computer listed. 104 | 105 | Xcode 106 | 107 | Navigate to Window > Devices and Simulators. 108 | Find the Vision Pro in list on the left hand side of this window. 109 | Select it and hit pair. 110 | 111 | **Note: Remember to turn off the VPN on your mac and make AVP and your mac in the same network.** 112 | 113 | 114 | 115 | 116 | # Error Catching 117 | 118 | GLIBC_2.33 not found: 119 | 120 | [solution](https://blog.csdn.net/huazhang_001/article/details/128828999) 121 | 122 | 123 | AttributeError: type object 'Callable' has no attribute '_abc_registry': 124 | 125 | pip uninstall typing 126 | -------------------------------------------------------------------------------- /humanoid_teleoperation/collect_data.sh: -------------------------------------------------------------------------------- 1 | cd scripts 2 | 3 | python collect_data.py -------------------------------------------------------------------------------- /humanoid_teleoperation/convert_data.sh: -------------------------------------------------------------------------------- 1 | # bash convert_data.sh 2 | 3 | 4 | save_img=1 5 | save_depth=0 6 | 7 | 8 | demo_path=/home/ze/projects/Humanoid-Teleoperation/humanoid_teleoperation/scripts/demo_dir/raw_data_example 9 | save_path=/home/ze/projects/Humanoid-Teleoperation/humanoid_teleoperation/scripts/demo_dir/training_data_example 10 | 11 | cd scripts 12 | python convert_demos.py --demo_dir ${demo_path} \ 13 | --save_dir ${save_path} \ 14 | --save_img ${save_img} \ 15 | --save_depth ${save_depth} \ 16 | -------------------------------------------------------------------------------- /humanoid_teleoperation/scripts/action_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import rotation_util 3 | import torch 4 | 5 | init_arm_pos = np.array([[ 0.27605826, 0.25689336, -0.12734995], 6 | [ 0.29785562, -0.27283502, -0.10588663]]) 7 | init_arm_quat = np.array([[-0.50385825, 0.04293381, 0.86230873, 0.02659343], 8 | [-0.51277079, 0.03748328, 0.85770428, 0.00211863]]) 9 | init_q14d = np.array([-0.09729076, 0.04804406, 0.03029635, -1.7516746, 0.06275351, 0., 10 | 0., -0.16716751, -0.06796489, -0.12827234, -1.75219428, -0.01397494, 11 | 0., 0., ]) 12 | 13 | def joint32_to_joint25(joint): 14 | # q_upper_body = [0.0, waist_pitch, 0.0, head_pitch, 0.0, head_yaw] 15 | # used joint: waist 1 + head 2 + arm 5*2 + hand 6*2 = 25 16 | # full joint: waist 3 + head 3 + arm 7*2 + hand 6*2 = 32 17 | new_joint = np.zeros(1+2+5*2+6*2) 18 | # waist 19 | new_joint[0] = joint[1] 20 | # head 21 | new_joint[1] = joint[3] 22 | new_joint[2] = joint[5] 23 | # arm 24 | new_joint[3:3+5] = joint[6:6+5] 25 | new_joint[3+5:3+5+5] = joint[6+5+2:6+5+2+5] 26 | # hand 27 | new_joint[3+5+5:3+5+5+12] = joint[6+5+2+5+2:6+5+2+5+2+12] 28 | return new_joint 29 | 30 | def joint25_to_joint32(new_joint): 31 | joint = np.zeros(32) 32 | # waist 33 | joint[1] = new_joint[0] 34 | # head 35 | joint[3] = new_joint[1] 36 | joint[5] = new_joint[2] 37 | # arm 38 | joint[6:6+5] = new_joint[3:3+5] 39 | joint[6+5+2:6+5+2+5] = new_joint[3+5:3+5+5] 40 | # hand 41 | joint[6+5+2+5+2:6+5+2+5+2+12] = new_joint[3+5+5:3+5+5+12] 42 | 43 | return joint 44 | 45 | def extract_eef_action(eef_action): 46 | body_action = [0, eef_action[0], 0, eef_action[1], 0, eef_action[2]] 47 | arm_pos = eef_action[3:9].reshape(2, 3) 48 | arm_rot_6d = eef_action[9:21].reshape(2, 6) 49 | # arm_rot_quat = rotation_util.rotation_6d_to_quaternion(torch.from_numpy(arm_rot_6d)) 50 | # arm_rot_quat = arm_rot_quat.numpy() 51 | hand_action = eef_action[21:21+12] 52 | return body_action, arm_pos, arm_rot_6d, hand_action 53 | 54 | def extract_abs_eef(delta_pos, delta_rot_6d, abs_pos, abs_quat): 55 | new_pos = delta_pos + abs_pos 56 | abs_rot_6d = rotation_util.quaternion_to_rotation_6d(torch.from_numpy(abs_quat)).numpy() 57 | new_rot_6d = abs_rot_6d + delta_rot_6d 58 | new_quat = rotation_util.rotation_6d_to_quaternion(torch.from_numpy(new_rot_6d)).numpy() 59 | return new_pos, new_quat, new_rot_6d 60 | 61 | 62 | -------------------------------------------------------------------------------- /humanoid_teleoperation/scripts/collect_data.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import numpy as np 4 | 5 | from silverscreen_multicam import Silverscreen 6 | from termcolor import cprint 7 | 8 | async def ainput(prompt: str) -> str: 9 | return await asyncio.to_thread(input, prompt) 10 | 11 | 12 | async def main(sscr): 13 | obs_fps = 50 14 | 15 | # await ainput("enter") 16 | cprint("Start main loop", "green") 17 | 18 | while True: 19 | await asyncio.sleep(1 / obs_fps) 20 | 21 | 22 | if __name__ == "__main__": 23 | sscr = Silverscreen(fps=50, use_cert=True) 24 | 25 | sscr.app.run() 26 | -------------------------------------------------------------------------------- /humanoid_teleoperation/scripts/convert_demos.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import pickle 4 | import numpy as np 5 | import random 6 | import time 7 | from termcolor import colored 8 | import h5py 9 | import zarr 10 | from termcolor import cprint 11 | from tqdm import tqdm 12 | import argparse 13 | 14 | 15 | 16 | def convert_dataset(args): 17 | demo_dir = args.demo_dir 18 | save_dir = args.save_dir 19 | 20 | save_img = args.save_img 21 | save_depth = args.save_depth 22 | 23 | # create dir to save demonstrations 24 | if os.path.exists(save_dir): 25 | cprint('Data already exists at {}'.format(save_dir), 'red') 26 | cprint("If you want to overwrite, delete the existing directory first.", "red") 27 | cprint("Do you want to overwrite? (y/n)", "red") 28 | # user_input = input() 29 | user_input = 'y' 30 | if user_input == 'y': 31 | cprint('Overwriting {}'.format(save_dir), 'red') 32 | os.system('rm -rf {}'.format(save_dir)) 33 | else: 34 | cprint('Exiting', 'red') 35 | return 36 | os.makedirs(save_dir, exist_ok=True) 37 | 38 | demo_files = [f for f in os.listdir(demo_dir) if f.endswith(".h5")] 39 | demo_files = sorted(demo_files) 40 | 41 | 42 | total_count = 0 43 | color_arrays = [] 44 | depth_arrays = [] 45 | cloud_arrays = [] 46 | state_arrays = [] 47 | action_arrays = [] 48 | episode_ends_arrays = [] 49 | 50 | 51 | for demo_file in demo_files: 52 | # load file (h5) 53 | file_name = os.path.join(demo_dir, demo_file) 54 | print("process:", file_name) 55 | 56 | with h5py.File(file_name, "r") as data: 57 | 58 | 59 | if save_img: 60 | color_array = data["color"][:] 61 | 62 | if save_depth: 63 | depth_array = data["depth"][:] 64 | 65 | cloud_array = data["cloud"][:] 66 | action_array = data["action"][:] 67 | proprioception_array = data["env_qpos_proprioception"][:] 68 | 69 | 70 | # to list 71 | length = len(cloud_array) 72 | if save_img: 73 | color_array = [color_array[i] for i in range(length)] 74 | 75 | if save_depth: 76 | depth_array = [depth_array[i] for i in range(length)] 77 | 78 | 79 | new_cloud_array = [] 80 | for i in range(length): 81 | old_cloud = cloud_array[i] 82 | if old_cloud.shape[0] > 10000: 83 | # Randomly sample points 84 | selected_idx = np.random.choice(old_cloud.shape[0], 10000, replace=True) 85 | new_cloud = old_cloud[selected_idx] 86 | else: 87 | new_cloud = old_cloud 88 | 89 | new_cloud_array.append(new_cloud) 90 | 91 | cloud_array = new_cloud_array 92 | 93 | proprioception_array = [proprioception_array[i] for i in range(length)] 94 | action_array = [action_array[i] for i in range(length)] 95 | 96 | 97 | total_count += len(action_array) 98 | cloud_arrays.extend(cloud_array) 99 | 100 | if save_img: 101 | color_arrays.extend(color_array) 102 | 103 | if save_depth: 104 | depth_arrays.extend(depth_array) 105 | 106 | state_arrays.extend(proprioception_array) 107 | action_arrays.extend(action_array) 108 | episode_ends_arrays.append(total_count) 109 | 110 | ############################### 111 | # save data 112 | ############################### 113 | # create zarr file 114 | zarr_root = zarr.group(save_dir) 115 | zarr_data = zarr_root.create_group('data') 116 | zarr_meta = zarr_root.create_group('meta') 117 | # save img, state, action arrays into data, and episode ends arrays into meta 118 | if save_img: 119 | color_arrays = np.stack(color_arrays, axis=0) 120 | if color_arrays.shape[1] == 3: # make channel last 121 | color_arrays = np.transpose(color_arrays, (0,2,3,1)) 122 | 123 | if save_depth: 124 | depth_arrays = np.stack(depth_arrays, axis=0) 125 | 126 | 127 | state_arrays = np.stack(state_arrays, axis=0) 128 | cloud_arrays = np.stack(cloud_arrays, axis=0) 129 | 130 | 131 | action_arrays = np.stack(action_arrays, axis=0) 132 | episode_ends_arrays = np.array(episode_ends_arrays) 133 | 134 | compressor = zarr.Blosc(cname='zstd', clevel=3, shuffle=1) 135 | 136 | single_size = 500 137 | state_chunk_size = (single_size, state_arrays.shape[1]) 138 | point_cloud_chunk_size = (single_size, cloud_arrays.shape[1], cloud_arrays.shape[2]) 139 | action_chunk_size = (single_size, action_arrays.shape[1]) 140 | if save_img: 141 | img_chunk_size = (single_size, color_arrays.shape[1], color_arrays.shape[2], color_arrays.shape[3]) 142 | zarr_data.create_dataset('img', data=color_arrays, chunks=img_chunk_size, dtype='uint8', overwrite=True, compressor=compressor) 143 | 144 | if save_depth: 145 | depth_chunk_size = (single_size, depth_arrays.shape[1], depth_arrays.shape[2]) 146 | zarr_data.create_dataset('depth', data=depth_arrays, chunks=depth_chunk_size, dtype='float32', overwrite=True, compressor=compressor) 147 | 148 | 149 | zarr_data.create_dataset('state', data=state_arrays, chunks=state_chunk_size, dtype='float32', overwrite=True, compressor=compressor) 150 | zarr_data.create_dataset('point_cloud', data=cloud_arrays, chunks=point_cloud_chunk_size, dtype='float32', overwrite=True, compressor=compressor) 151 | 152 | zarr_data.create_dataset('action', data=action_arrays, chunks=action_chunk_size, dtype='float32', overwrite=True, compressor=compressor) 153 | zarr_meta.create_dataset('episode_ends', data=episode_ends_arrays, dtype='int64', overwrite=True, compressor=compressor) 154 | 155 | # print shape 156 | if save_img: 157 | cprint(f'color shape: {color_arrays.shape}, range: [{np.min(color_arrays)}, {np.max(color_arrays)}]', 'green') 158 | if save_depth: 159 | cprint(f'depth shape: {depth_arrays.shape}, range: [{np.min(depth_arrays)}, {np.max(depth_arrays)}]', 'green') 160 | cprint(f'cloud shape: {cloud_arrays.shape}, range: [{np.min(cloud_arrays)}, {np.max(cloud_arrays)}]', 'green') 161 | cprint(f'state shape: {state_arrays.shape}, range: [{np.min(state_arrays)}, {np.max(state_arrays)}]', 'green') 162 | cprint(f'action shape: {action_arrays.shape}, range: [{np.min(action_arrays)}, {np.max(action_arrays)}]', 'green') 163 | cprint(f'Saved zarr file to {save_dir}', 'green') 164 | 165 | # count file size 166 | total_size = 0 167 | for root, dirs, files in os.walk(save_dir): 168 | for file in files: 169 | total_size += os.path.getsize(os.path.join(root, file)) 170 | cprint(f"Total size: {total_size/1e6} MB", "green") 171 | 172 | if __name__ == "__main__": 173 | parser = argparse.ArgumentParser() 174 | parser.add_argument("--demo_dir", type=str) 175 | parser.add_argument("--save_dir", type=str) 176 | parser.add_argument("--save_img", type=int) 177 | parser.add_argument("--save_depth", type=int) 178 | 179 | 180 | args = parser.parse_args() 181 | 182 | convert_dataset(args) 183 | -------------------------------------------------------------------------------- /humanoid_teleoperation/scripts/example.cpython-311-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/scripts/example.cpython-311-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /humanoid_teleoperation/scripts/rotation_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import serial 4 | import torch 5 | import numpy as np 6 | import torch.nn.functional as F 7 | 8 | 9 | def yamlToDict(file_path): 10 | with open(os.path.join(os.getcwd(), file_path), 'r') as f: 11 | cfg_dict = yaml.load(f, Loader=yaml.SafeLoader) 12 | 13 | return cfg_dict 14 | 15 | 16 | def quatToEuler(quat): 17 | eulerVec = np.zeros(3) 18 | qw = quat[0], qx = quat[1], qy = quat[2], qz = quat[3] 19 | # roll (x-axis rotation) 20 | sinr_cosp = 2 * (qw * qx + qy * qz) 21 | cosr_cosp = 1 - 2 * (qx * qx + qy * qy) 22 | eulerVec[0] = np.atan2(sinr_cosp, cosr_cosp) 23 | 24 | # pitch (y-axis rotation) 25 | sinp = 2 * (qw * qy - qz * qx) 26 | if np.abs(sinp) >= 1: 27 | eulerVec[1] = np.copysign(np.pi / 2, sinp) # use 90 degrees if out of range 28 | else: 29 | eulerVec[1] = np.asin(sinp) 30 | 31 | # yaw (z-axis rotation) 32 | siny_cosp = 2 * (qw * qz + qx * qy) 33 | cosy_cosp = 1 - 2 * (qy * qy + qz * qz) 34 | eulerVec[2] = np.atan2(siny_cosp, cosy_cosp) 35 | 36 | return eulerVec 37 | 38 | def eulerToQuat(euler): 39 | roll, pitch, yaw = euler 40 | cy = np.cos(yaw * 0.5) 41 | sy = np.sin(yaw * 0.5) 42 | cp = np.cos(pitch * 0.5) 43 | sp = np.sin(pitch * 0.5) 44 | cr = np.cos(roll * 0.5) 45 | sr = np.sin(roll * 0.5) 46 | 47 | w = cr * cp * cy + sr * sp * sy 48 | x = sr * cp * cy - cr * sp * sy 49 | y = cr * sp * cy + sr * cp * sy 50 | z = cr * cp * sy - sr * sp * cy 51 | 52 | return np.array([x, y, z, w]) 53 | 54 | def quaternion_multiply(q1, q2): 55 | """ 56 | Multiplies two quaternions. 57 | 58 | Parameters: 59 | q1, q2 -- numpy arrays representing quaternions [x, y, z, w] 60 | """ 61 | x1, y1, z1, w1 = q1 62 | x2, y2, z2, w2 = q2 63 | 64 | w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 65 | x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2 66 | y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2 67 | z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 68 | 69 | return np.array([x, y, z, w]) 70 | 71 | def apply_quaternion_to_vector(q, v): 72 | """ 73 | Applies the rotation represented by quaternion q to vector v. 74 | 75 | Parameters: 76 | q -- numpy array representing a quaternion [x, y, z, w] 77 | v -- numpy array representing a 3D vector [a, b, c] 78 | """ 79 | q_vector = np.append(v, 0.0) # Convert vector to quaternion 80 | q_conjugate = np.array([-q[0], -q[1], -q[2], q[3]]) 81 | 82 | qv = quaternion_multiply(q, q_vector) 83 | qvq = quaternion_multiply(qv, q_conjugate) 84 | 85 | return qvq[:3] # Return only the vector part 86 | 87 | 88 | def configure_serial(port, baudrate): 89 | ser = serial.Serial() 90 | ser.port = port 91 | ser.baudrate = int(baudrate) 92 | ser.bytesize = serial.EIGHTBITS 93 | ser.parity = serial.PARITY_NONE 94 | ser.stopbits = serial.STOPBITS_ONE 95 | return ser 96 | 97 | 98 | def quat_rotate_inverse_np(q, v): 99 | # Extract the scalar part (q_w) and vector part (q_vec) from the quaternion 100 | q_w = q[-1] 101 | q_vec = q[:3] 102 | 103 | # Compute part 'a': Scaled vector based on quaternion's scalar component 104 | a = v * (2.0 * q_w ** 2 - 1.0) 105 | 106 | # Compute part 'b': Cross product of q_vec and v, scaled by quaternion's scalar component 107 | b = np.cross(q_vec, v) * q_w * 2.0 108 | 109 | # Compute part 'c': Dot product of q_vec and v, scaled by q_vec 110 | dot_product = np.dot(q_vec, v) 111 | c = q_vec * (dot_product * 2.0) 112 | 113 | return a - b + c 114 | 115 | 116 | ### from pytorch3d 117 | 118 | def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: 119 | """ 120 | Convert rotations given as quaternions to rotation matrices. 121 | 122 | Args: 123 | quaternions: quaternions with real part first, 124 | as tensor of shape (..., 4). 125 | 126 | Returns: 127 | Rotation matrices as tensor of shape (..., 3, 3). 128 | """ 129 | r, i, j, k = torch.unbind(quaternions, -1) 130 | # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. 131 | two_s = 2.0 / (quaternions * quaternions).sum(-1) 132 | 133 | o = torch.stack( 134 | ( 135 | 1 - two_s * (j * j + k * k), 136 | two_s * (i * j - k * r), 137 | two_s * (i * k + j * r), 138 | two_s * (i * j + k * r), 139 | 1 - two_s * (i * i + k * k), 140 | two_s * (j * k - i * r), 141 | two_s * (i * k - j * r), 142 | two_s * (j * k + i * r), 143 | 1 - two_s * (i * i + j * j), 144 | ), 145 | -1, 146 | ) 147 | return o.reshape(quaternions.shape[:-1] + (3, 3)) 148 | 149 | def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor: 150 | """ 151 | Convert a unit quaternion to a standard form: one in which the real 152 | part is non negative. 153 | 154 | Args: 155 | quaternions: Quaternions with real part first, 156 | as tensor of shape (..., 4). 157 | 158 | Returns: 159 | Standardized quaternions as tensor of shape (..., 4). 160 | """ 161 | return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) 162 | 163 | def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: 164 | """ 165 | Returns torch.sqrt(torch.max(0, x)) 166 | but with a zero subgradient where x is 0. 167 | """ 168 | ret = torch.zeros_like(x) 169 | positive_mask = x > 0 170 | if torch.is_grad_enabled(): 171 | ret[positive_mask] = torch.sqrt(x[positive_mask]) 172 | else: 173 | ret = torch.where(positive_mask, torch.sqrt(x), ret) 174 | return ret 175 | 176 | 177 | def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor: 178 | """ 179 | Convert rotations given as rotation matrices to quaternions. 180 | 181 | Args: 182 | matrix: Rotation matrices as tensor of shape (..., 3, 3). 183 | 184 | Returns: 185 | quaternions with real part first, as tensor of shape (..., 4). 186 | """ 187 | if matrix.size(-1) != 3 or matrix.size(-2) != 3: 188 | raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") 189 | 190 | batch_dim = matrix.shape[:-2] 191 | m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind( 192 | matrix.reshape(batch_dim + (9,)), dim=-1 193 | ) 194 | 195 | q_abs = _sqrt_positive_part( 196 | torch.stack( 197 | [ 198 | 1.0 + m00 + m11 + m22, 199 | 1.0 + m00 - m11 - m22, 200 | 1.0 - m00 + m11 - m22, 201 | 1.0 - m00 - m11 + m22, 202 | ], 203 | dim=-1, 204 | ) 205 | ) 206 | 207 | # we produce the desired quaternion multiplied by each of r, i, j, k 208 | quat_by_rijk = torch.stack( 209 | [ 210 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 211 | # `int`. 212 | torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), 213 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 214 | # `int`. 215 | torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), 216 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 217 | # `int`. 218 | torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), 219 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 220 | # `int`. 221 | torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), 222 | ], 223 | dim=-2, 224 | ) 225 | 226 | # We floor here at 0.1 but the exact level is not important; if q_abs is small, 227 | # the candidate won't be picked. 228 | flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) 229 | quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) 230 | 231 | # if not for numerical problems, quat_candidates[i] should be same (up to a sign), 232 | # forall i; we pick the best-conditioned one (with the largest denominator) 233 | out = quat_candidates[ 234 | F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, : 235 | ].reshape(batch_dim + (4,)) 236 | return standardize_quaternion(out) 237 | 238 | def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: 239 | """ 240 | Converts 6D rotation representation by Zhou et al. [1] to rotation matrix 241 | using Gram--Schmidt orthogonalization per Section B of [1]. 242 | Args: 243 | d6: 6D rotation representation, of size (*, 6) 244 | 245 | Returns: 246 | batch of rotation matrices of size (*, 3, 3) 247 | 248 | [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. 249 | On the Continuity of Rotation Representations in Neural Networks. 250 | IEEE Conference on Computer Vision and Pattern Recognition, 2019. 251 | Retrieved from http://arxiv.org/abs/1812.07035 252 | """ 253 | 254 | a1, a2 = d6[..., :3], d6[..., 3:] 255 | b1 = F.normalize(a1, dim=-1) 256 | b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 257 | b2 = F.normalize(b2, dim=-1) 258 | b3 = torch.cross(b1, b2, dim=-1) 259 | return torch.stack((b1, b2, b3), dim=-2) 260 | 261 | 262 | def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor: 263 | """ 264 | Converts rotation matrices to 6D rotation representation by Zhou et al. [1] 265 | by dropping the last row. Note that 6D representation is not unique. 266 | Args: 267 | matrix: batch of rotation matrices of size (*, 3, 3) 268 | 269 | Returns: 270 | 6D rotation representation, of size (*, 6) 271 | 272 | [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. 273 | On the Continuity of Rotation Representations in Neural Networks. 274 | IEEE Conference on Computer Vision and Pattern Recognition, 2019. 275 | Retrieved from http://arxiv.org/abs/1812.07035 276 | """ 277 | batch_dim = matrix.size()[:-2] 278 | return matrix[..., :2, :].clone().reshape(batch_dim + (6,)) 279 | 280 | 281 | def quaternion_to_rotation_6d(quaternions): 282 | rot_mat = quaternion_to_matrix(quaternions) 283 | rot_6d = matrix_to_rotation_6d(rot_mat) 284 | return rot_6d 285 | 286 | def rotation_6d_to_quaternion(d6): 287 | rot_mat = rotation_6d_to_matrix(d6) 288 | quat = matrix_to_quaternion(rot_mat) 289 | return quat -------------------------------------------------------------------------------- /humanoid_teleoperation/scripts/sensor_offset.json: -------------------------------------------------------------------------------- 1 | { 2 | "192.168.137.170": 227.3510742, 3 | "192.168.137.171": 110.4345703, 4 | "192.168.137.172": 293.1152344, 5 | "192.168.137.173": 108.3691406, 6 | "192.168.137.174": 264.4189453, 7 | "192.168.137.175": 31.33300781, 8 | "192.168.137.150": 191.6894531, 9 | "192.168.137.151": 328.3813477, 10 | "192.168.137.152": 156.5991211, 11 | "192.168.137.153": 110.3466797, 12 | "192.168.137.154": 185.1635742, 13 | "192.168.137.155": 47.94433594, 14 | "192.168.137.190": 276.1743164, 15 | "192.168.137.191": 251.5649414, 16 | "192.168.137.192": 343.6523438 17 | } -------------------------------------------------------------------------------- /humanoid_teleoperation/start_robo.sh: -------------------------------------------------------------------------------- 1 | cd teleop-zenoh 2 | 3 | python run_server.py config_GR1_T2.yaml --verbose -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | 1. Install RBDL 4 | 5 | 2. `cd teleop-zenoh` 6 | 7 | 3. run `python set_env.py` and copy the results and run in your shell 8 | 9 | 4. In the same shell, run: 10 | 11 | ```bash 12 | python run_server.py config_GR1_T1.yml 13 | ``` 14 | 15 | 5. In another shell, run `python teleop.py` 16 | 17 | 6. Enjoy. -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/FK/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/FK/__init__.py -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/FK/fkpytest.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | from ctypes import POINTER, Structure, c_double, c_void_p 3 | import os 4 | 5 | class PositionQuaternion(Structure): 6 | _fields_ = [ 7 | ("pos", c_double * 3), # x, y, z 8 | ("quat", c_double * 4) # w, x, y, z 9 | ] 10 | 11 | 12 | class FKResult(Structure): 13 | _fields_ = [ 14 | ("left_fk", PositionQuaternion), 15 | ("right_fk", PositionQuaternion), 16 | ("left_fk_elb", PositionQuaternion), 17 | ("right_fk_elb", PositionQuaternion) 18 | ] 19 | 20 | 21 | class GR1_ArmFk: 22 | def __init__(self): 23 | so_path = os.path.dirname(__file__) + "/libGR1_ArmFk.so" 24 | 25 | self.lib = ctypes.CDLL(so_path) 26 | 27 | self.lib.GR1_ArmFk_new.restype = c_void_p 28 | self.lib.GR1_ArmFk_delete.argtypes = [c_void_p] 29 | self.lib.GR1_ArmFk_calculateFK.argtypes = [ 30 | c_void_p, POINTER(c_double), POINTER(FKResult)] 31 | self.obj = self.lib.GR1_ArmFk_new() 32 | 33 | def __del__(self): 34 | self.lib.GR1_ArmFk_delete(self.obj) 35 | 36 | def calculateFK(self, motor_positions): 37 | if len(motor_positions) != 14: 38 | raise ValueError("motor_positions must be of length 14") 39 | 40 | motor_positions_array = (c_double * 14)(*motor_positions) 41 | result = FKResult() 42 | 43 | self.lib.GR1_ArmFk_calculateFK( 44 | self.obj, motor_positions_array, ctypes.byref(result)) 45 | 46 | return result 47 | 48 | 49 | if __name__ == "__main__": 50 | arm_fk = GR1_ArmFk() 51 | 52 | motor_positions = [0.0] * 14 53 | result = arm_fk.calculateFK(motor_positions) 54 | 55 | print( 56 | f"Left FK Position: {result.left_fk.pos[:]}, Quaternion: {result.left_fk.quat[:]}") 57 | print( 58 | f"Right FK Position: {result.right_fk.pos[:]}, Quaternion: {result.right_fk.quat[:]}") 59 | print( 60 | f"Left Elbow FK Position: {result.left_fk_elb.pos[:]}, Quaternion: {result.left_fk_elb.quat[:]}") 61 | print( 62 | f"Right Elbow FK Position: {result.right_fk_elb.pos[:]}, Quaternion: {result.right_fk_elb.quat[:]}") 63 | -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/FK/libGR1_ArmFk.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/FK/libGR1_ArmFk.so -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/__init__.py -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/libkdl_parser.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/libkdl_parser.so -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/liborocos-kdl.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/liborocos-kdl.so -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/liborocos-kdl.so.1.5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/liborocos-kdl.so.1.5 -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/liborocos-kdl.so.1.5.1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/liborocos-kdl.so.1.5.1 -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/liburdfdom_model.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/liburdfdom_model.so -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/arm_control/liburdfdom_model.so.1.0: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/arm_control/liburdfdom_model.so.1.0 -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/communication.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import json 3 | import time 4 | import struct 5 | from typing import Literal 6 | from robot_rcs_gr.sdk import ControlGroup, RobotClient 7 | import numpy as np 8 | 9 | from rocs_client import Human 10 | 11 | from avp_stream import VisionProStreamer 12 | from pico_streamer import PicoStreamer 13 | # Logger().state = Logger().STATE_OFF 14 | 15 | 16 | class UDPServer: 17 | def __init__(self, server_ip, server_port): 18 | # 设置UDP服务器地址和端口 19 | self.server = (server_ip, server_port) 20 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 21 | self.socket.settimeout(0.1) 22 | 23 | def send(self, control_cmd): 24 | self.socket.sendto( 25 | bytes(json.dumps(control_cmd), "utf-8"), self.server) 26 | 27 | def send_raw(self, control_cmd: bytes): 28 | self.socket.sendto( 29 | control_cmd, self.server) 30 | 31 | 32 | class UpperBodyCommunication: 33 | def __init__(self, freq=120): 34 | self.client = RobotClient(freq) 35 | time.sleep(0.5) 36 | self.client.set_enable(True) 37 | time.sleep(0.5) 38 | self.set_gains() 39 | 40 | def set_gains(self): 41 | kps = np.array([ 42 | # left leg 43 | 0.875, 0.426, 0.875, 0.875, 0.416, 0.416, 44 | # right leg 45 | 0.875, 0.426, 0.875, 0.875, 0.416, 0.416, 46 | # waist 47 | 0.25, 0.25, 0.25, 48 | # head 49 | 0.25, 0.25, 0.2, 50 | # left arm 51 | 0.2, 0.2, 0.2, 0.2, 0.2, 0.35, 0.35, 52 | # right arm 53 | 0.2, 0.2, 0.2, 0.2, 0.2, 0.35, 0.35, 54 | ]) 55 | kds = np.array([ 56 | # left leg 57 | 0.023, 0.017, 0.365, 0.365, 0.007, 0.007, 58 | # right leg 59 | 0.023, 0.017, 0.365, 0.365, 0.007, 0.007, 60 | # waist 61 | 0.14, 0.14, 0.14, 62 | # head 63 | 0.04, 0.04, 0.005, 64 | # left arm 65 | 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 66 | # right arm 67 | 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 68 | ]) 69 | 70 | self.client.set_gains(kps, kds) 71 | 72 | # TODO 73 | # def init_set_pos(self, q_arms): 74 | # self.client.move_joints( 75 | # ControlGroup.UPPER, np.degrees(q_arms), 2.0, blocking=True) 76 | 77 | def init_set_pos(self, q_arm): 78 | assert q_arm.shape[0] == 14 79 | lower_body = [0.0] * 12 80 | waist_head = [0.0] * 6 81 | joint_target_position = waist_head + q_arm.tolist() 82 | joint_measured_position = self.get_pos() 83 | traj_len = 20 84 | for i in range(traj_len): 85 | cur_target = joint_measured_position * (1- i / traj_len) + np.array(joint_target_position) * (i / traj_len) 86 | self.set_pos(cur_target) 87 | time.sleep(0.05) 88 | 89 | def set_pos(self, q_total): 90 | # wasitt 3, head 3, arm 14 91 | assert q_total.shape[0] == 20 92 | q_total = np.degrees(q_total) 93 | lower_body = [0.0] * 12 94 | joint_target_position = lower_body + q_total.tolist() 95 | self.client._publish("joint", joint_target_position) 96 | 97 | def get_pos(self): 98 | joint_measured_position = self.client.joint_positions[12:32].copy() 99 | joint_measured_position = np.radians(joint_measured_position) 100 | return joint_measured_position 101 | 102 | 103 | class VRCommunication: 104 | def __init__(self, record=False, latency=0): 105 | AVP_IP = "192.168.31.27" 106 | print(f"Vision Pro init {AVP_IP}") 107 | self.avp_ip = AVP_IP 108 | self.s = VisionProStreamer(ip=self.avp_ip, record=record) 109 | self.latency = latency 110 | 111 | if record == False and latency > 0: 112 | raise NotImplementedError 113 | 114 | 115 | def get_data(self): 116 | if self.latency == 0: 117 | return self.s.latest 118 | else: 119 | if len(self.s.recording) < self.latency: 120 | return self.s.recording[0] 121 | else: 122 | return self.s.recording[-(self.latency+1)] 123 | 124 | 125 | class HandCommunication: 126 | 127 | def __init__(self, stupid=False): 128 | if stupid: 129 | self._left_hand_server_ip = '192.168.137.39' 130 | self._right_hand_server_ip = '192.168.137.19' 131 | else: 132 | self._left_hand_server_ip = '192.168.137.19' 133 | self._right_hand_server_ip = '192.168.137.39' 134 | self._server_port = 2333 135 | self.left_hand_udp_server = UDPServer( 136 | self._left_hand_server_ip, self._server_port) 137 | self.right_hand_udp_server = UDPServer( 138 | self._right_hand_server_ip, self._server_port) 139 | self.servers = { 140 | 'left': self.left_hand_udp_server, 141 | 'right': self.right_hand_udp_server 142 | } 143 | 144 | def _angle_set(self, id, angles): 145 | send_data = bytearray() 146 | send_data.append(0xEB) # 包头 147 | send_data.append(0x90) # 包头 148 | send_data.append(id) # 灵巧手 ID 号 149 | send_data.append(0x0F) # 该帧数据部分长度 12 + 3 150 | send_data.append(0x12) # 写寄存器命令标志 151 | send_data.append(0xCE) # 寄存器起始地址低八位 152 | send_data.append(0x05) # 寄存器起始地址高八位 153 | 154 | # Append val1 to val6 as little-endian 155 | for angle in angles: 156 | angle = int(angle) 157 | send_data.append(angle & 0xFF) 158 | send_data.append((angle >> 8) & 0xFF) 159 | 160 | # Calculate checksum 161 | checksum = sum(send_data[2:19]) & 0xFF 162 | send_data.append(checksum) 163 | 164 | return send_data 165 | 166 | def get_angle(self, side: str, id: int): 167 | send_data = bytearray() 168 | send_data.append(0xEB) # 包头 169 | send_data.append(0x90) # 包头 170 | send_data.append(int(id)) 171 | send_data.append(0x04) 172 | send_data.append(0x11) # kCmd_Handg3_Read 173 | send_data.append(0x0a) 174 | send_data.append(0x06) 175 | send_data.append(0x0c) 176 | 177 | checksum = sum(send_data[2:8]) & 0xFF 178 | send_data.append(checksum) 179 | 180 | server = self.servers[side] 181 | 182 | server.send_raw(send_data) 183 | try: 184 | data, addr = server.socket.recvfrom(1024) 185 | received_checksum = data[19] 186 | calculated_checksum = sum(data[2:19]) & 0xFF 187 | 188 | if received_checksum != calculated_checksum: 189 | raise ValueError("Checksum mismatch") 190 | 191 | pos = [ 192 | data[7] | (data[8] << 8), 193 | data[9] | (data[10] << 8), 194 | data[11] | (data[12] << 8), 195 | data[13] | (data[14] << 8), 196 | data[15] | (data[16] << 8), 197 | data[17] | (data[18] << 8) 198 | ] 199 | 200 | return pos 201 | 202 | except Exception as e: 203 | print(e) 204 | return None 205 | 206 | def send_hand_cmd(self, left_hand_angles, right_hand_angles): 207 | # NOTE: de-norm!!! 208 | left_hand_angles = left_hand_angles.copy() 209 | right_hand_angles = right_hand_angles.copy() 210 | left_hand_angles *= 1000. 211 | right_hand_angles *= 1000. 212 | id = 1 213 | left_cmd = self._angle_set(id, left_hand_angles) 214 | self.left_hand_udp_server.send_raw(left_cmd) 215 | try: 216 | _, _ = self.left_hand_udp_server.socket.recvfrom(1024) 217 | except: 218 | pass 219 | 220 | right_cmd = self._angle_set(id, right_hand_angles) 221 | self.right_hand_udp_server.send_raw(right_cmd) 222 | try: 223 | _, _ = self.right_hand_udp_server.socket.recvfrom(1024) 224 | except: 225 | pass 226 | 227 | def get_qpos(self): 228 | left_hand_joint = np.array(self.get_angle('left', 1)) 229 | right_hand_joint = np.array(self.get_angle('right', 1)) 230 | # left_hand_joint = right_hand_joint 231 | # print(left_hand_joint,right_hand_joint) 232 | hand_joint = np.concatenate((left_hand_joint, right_hand_joint)) / 1000. 233 | return hand_joint 234 | 235 | -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/config_GR1_T2.yaml: -------------------------------------------------------------------------------- 1 | device_connected: true 2 | cpu: "X64" 3 | system: "LINUX" 4 | robot: 5 | name: "GR1" 6 | mechanism: "T2" 7 | control_period: 0.01 8 | input_and_calculate_period: 0 9 | 10 | # right now, the state estimator is not very accurate, suggest to disable it 11 | # unless you know what you are doing 12 | state_estimator: 13 | enable: false 14 | path: "/state_estimator/model_gr1_t2.json" 15 | 16 | comm: 17 | enable: false 18 | use_raw: false 19 | use_json: false 20 | use_dds: false 21 | use_sim: false 22 | 23 | operator: 24 | use_joystick: false 25 | joystick_connected: false 26 | joystick_type: "XBOX" # support "XBOS", "PS4", "PS5" 27 | use_keyboard: false 28 | keyboard_connected: false 29 | 30 | ota: 31 | enable: false 32 | 33 | hardware: 34 | use_can: false 35 | use_ethernet: false 36 | use_etherbus: false 37 | use_fi_fse: true 38 | use_fi_fsa: true 39 | 40 | sensor_usb_imu: 41 | usb: [ 42 | "/dev/ttyUSB0" 43 | ] 44 | comm_enable: [ 45 | false 46 | ] 47 | comm_frequency: [ 48 | 500 49 | ] 50 | 51 | sensor_abs_encoder: 52 | type: "FIFSEV1" 53 | ip: [ 54 | # left leg 55 | "192.168.137.170", "192.168.137.171", "192.168.137.172", "192.168.137.173", "192.168.137.174", "192.168.137.175", 56 | # right leg 57 | "192.168.137.150", "192.168.137.151", "192.168.137.152", "192.168.137.153", "192.168.137.154", "192.168.137.155", 58 | # waist 59 | "192.168.137.190", "192.168.137.191", "192.168.137.192", 60 | ] 61 | data_path: "sensor_offset.json" 62 | 63 | fi_fse: 64 | version: "v1" 65 | 66 | fi_fsa: 67 | version: "v1" 68 | debug: false 69 | blocking: false 70 | timeout: 0.01 71 | port_ctrl: 2333 72 | port_comm: 2334 73 | port_fast: 2335 74 | network: "192.168.137.255" 75 | concurrency: true 76 | 77 | actuator: 78 | type: "FIFSAV1" 79 | use_dic: false 80 | names: [ 81 | "l_hip_roll", "l_hip_yaw", "l_hip_pitch", "l_knee_pitch", "l_ankle_pitch", "l_ankle_roll", 82 | "r_hip_roll", "r_hip_yaw", "r_hip_pitch", "r_knee_pitch", "r_ankle_pitch", "r_ankle_roll", 83 | "joint_waist_yaw", "joint_waist_pitch", "joint_waist_roll", 84 | "joint_head_pitch", "joint_head_roll", "joint_head_yaw", 85 | "l_shoulder_pitch", "l_shoulder_roll", "l_shoulder_yaw", "l_elbow_pitch", "l_wrist_yaw", "l_wrist_roll", "l_wrist_pitch", 86 | "r_shoulder_pitch", "r_shoulder_roll", "r_shoulder_yaw", "r_elbow_pitch", "r_wrist_yaw", "r_wrist_roll", "r_wrist_pitch", 87 | ] 88 | ip: [ 89 | # left leg 90 | "192.168.137.70", "192.168.137.71", "192.168.137.72", "192.168.137.73", "192.168.137.74", "192.168.137.75", 91 | # right leg 92 | "192.168.137.50", "192.168.137.51", "192.168.137.52", "192.168.137.53", "192.168.137.54", "192.168.137.55", 93 | # waist 94 | "192.168.137.90", "192.168.137.91", "192.168.137.92", 95 | # head 96 | "192.168.137.93", "192.168.137.94", "192.168.137.95", 97 | # left arm 98 | "192.168.137.10" , "192.168.137.11" , "192.168.137.12" , "192.168.137.13" , "192.168.137.14" , "192.168.137.15" , "192.168.137.16", 99 | # right arm 100 | "192.168.137.30" , "192.168.137.31" , "192.168.137.32" , "192.168.137.33" , "192.168.137.34" , "192.168.137.35" , "192.168.137.36", 101 | ] 102 | comm_enable: [ 103 | # left leg 104 | false, false, false, false, false, false, 105 | # right leg 106 | false, false, false, false, false, false, 107 | # waist 108 | true, true, true, 109 | # head 110 | true, true, true, 111 | # left arm 112 | true, true, true, true, true, true, true, 113 | # right arm 114 | true, true, true, true, true, true, true, 115 | ] 116 | comm_use_fast: [ 117 | # left leg 118 | true, true, true, true, true, true, 119 | # right leg 120 | true, true, true, true, true, true, 121 | # waist 122 | true, true, true, 123 | # head 124 | true, true, true, 125 | # left arm 126 | true, true, true, true, true, true, true, 127 | # right arm 128 | true, true, true, true, true, true, true, 129 | ] 130 | -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/example.cpython-311-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/example.cpython-311-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/filter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: WANG Wenhao 3 | Date: 2024-06-29 4 | Version: 1.0.0 5 | copyright (c) 2024 All Rights Reserved 6 | """ 7 | 8 | import math 9 | import numpy as np 10 | 11 | 12 | def smoothing_factor(t_e, cutoff): 13 | r = 2 * math.pi * cutoff * t_e 14 | return r / (r + 1) 15 | 16 | 17 | def exponential_smoothing(a, x, x_prev): 18 | return a * x + (1 - a) * x_prev 19 | 20 | 21 | class OneEuroFilter: 22 | def __init__(self, t0, x0, dx0=None, min_cutoff=1.0, beta=0.0, d_cutoff=1.0): 23 | """Initialize the one euro filter for a 14-dimensional numpy array.""" 24 | self.min_cutoff = float(min_cutoff) 25 | self.beta = float(beta) 26 | self.d_cutoff = float(d_cutoff) 27 | 28 | # Ensure x0 is a numpy array 29 | self.x_prev = np.array(x0, dtype=float) 30 | 31 | # If dx0 is not provided, initialize it as zeros with the same shape as x0 32 | if dx0 is None: 33 | self.dx_prev = np.zeros_like(self.x_prev) 34 | else: 35 | self.dx_prev = np.array(dx0, dtype=float) 36 | 37 | self.t_prev = float(t0) 38 | 39 | def __call__(self, t, x): 40 | """Compute the filtered signal for a 14-dimensional numpy array.""" 41 | t_e = t - self.t_prev 42 | 43 | # The filtered derivative of the signal 44 | a_d = smoothing_factor(t_e, self.d_cutoff) 45 | dx = (x - self.x_prev) / t_e 46 | dx_hat = exponential_smoothing(a_d, dx, self.dx_prev) 47 | 48 | # The filtered signal 49 | cutoff = self.min_cutoff + self.beta * np.abs(dx_hat) 50 | a = smoothing_factor(t_e, cutoff) 51 | x_hat = exponential_smoothing(a, x, self.x_prev) 52 | 53 | # Memorize the previous values 54 | self.x_prev = x_hat 55 | self.dx_prev = dx_hat 56 | self.t_prev = t 57 | 58 | return x_hat 59 | 60 | 61 | class AverageFilter: 62 | def __init__(self, window_size, dimensions): 63 | self.window_size = window_size 64 | self.dimensions = dimensions 65 | self.values = np.zeros((0, dimensions)) 66 | self.current_sum = np.zeros(dimensions) 67 | 68 | def add_value(self, value): 69 | if len(value) != self.dimensions: 70 | raise ValueError( 71 | "Input value must have the same number of dimensions as specified" 72 | ) 73 | 74 | # Append the new value to the values array 75 | self.values = np.vstack((self.values, value)) 76 | self.current_sum += value 77 | 78 | # If the length exceeds the window size, subtract the oldest value and remove it 79 | if self.values.shape[0] > self.window_size: 80 | self.current_sum -= self.values[0] 81 | self.values = self.values[1:] 82 | 83 | def get_average(self): 84 | if self.values.shape[0] == 0: 85 | return np.zeros(self.dimensions) 86 | return self.current_sum / self.values.shape[0] 87 | -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/gesture.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: WANG Wenhao 3 | Date: 2024-06-29 4 | Version: 1.0.0 5 | copyright (c) 2024 All Rights Reserved 6 | """ 7 | 8 | from scipy.spatial.transform import Rotation 9 | import numpy as np 10 | import time 11 | 12 | 13 | class SnapMonitorOneSide: 14 | def __init__(self, chirality=None): 15 | self.hand_side = chirality 16 | 17 | self.snap_detected = False 18 | self.last_snap_time = 0 # 记录上次打响指的时间 19 | self.prev_distance13 = None 20 | self.prev_distance03 = None 21 | self.prev_movement1 = None 22 | self.last_update_time = 0 # 记录上次更新的时间 23 | 24 | # 设置阈值 25 | self.cooldown = 1.1 # 冷却时间,单位秒 26 | # 0 1 2 3 4 5 分别指的是wrist,拇指,食指,中指,无名指,小指 27 | # 13就是指拇指和中指的相对速度 28 | self.threshold_speed13 = 0.20 # 根据实际需要调整 29 | self.threshold_speed03 = 0.20 # 根据实际需要调整 30 | self.threshold_speed1 = 0.30 # 根据实际需要调整 31 | 32 | def update(self, fingers): 33 | current_time = time.time() 34 | time_interval = current_time - self.last_update_time 35 | if time_interval < 0.1: 36 | self.snap_detected = False 37 | return 38 | 39 | self.last_update_time = current_time 40 | 41 | thumbTipPos = fingers[4, :3, 3] 42 | indexTipPos = fingers[9, :3, 3] 43 | middleTipPos = fingers[14, :3, 3] 44 | distance13 = np.linalg.norm(thumbTipPos - middleTipPos) 45 | distance03 = np.linalg.norm(middleTipPos) 46 | if self.hand_side == 'left': 47 | movement1 = thumbTipPos[0] - thumbTipPos[1] - thumbTipPos[2] 48 | if self.hand_side == 'right': 49 | movement1 = -thumbTipPos[0] + thumbTipPos[1] + thumbTipPos[2] 50 | 51 | # 初始化 previous 值 52 | if self.prev_distance13 is None: 53 | self.prev_distance13 = distance13 54 | self.prev_distance03 = distance03 55 | self.prev_movement1 = movement1 56 | return 57 | 58 | # 计算速度 59 | speed13 = (distance13 - self.prev_distance13)/time_interval 60 | speed03 = (self.prev_distance03 - distance03)/time_interval 61 | speed1 = (movement1 - self.prev_movement1)/time_interval 62 | distance23 = np.linalg.norm(indexTipPos - middleTipPos) 63 | 64 | # 判断是否打响指 65 | # distance23 > 0.02 66 | if (speed13 > self.threshold_speed13 and 67 | speed03 > self.threshold_speed03 and 68 | speed1 > self.threshold_speed1): 69 | if current_time - self.last_snap_time > self.cooldown: 70 | self.snap_detected = True 71 | self.last_snap_time = current_time 72 | else: 73 | self.snap_detected = False 74 | 75 | # 更新 previous 值 76 | self.prev_distance13 = distance13 77 | self.prev_distance03 = distance03 78 | self.prev_movement1 = movement1 79 | 80 | 81 | # call: snap_monitor = SnapMonitor() 82 | # flag: snap_monitor.left.snap_detected or snap_monitor.right.snap_detected 83 | class SnapMonitor: 84 | def __init__(self): 85 | self.left = SnapMonitorOneSide(chirality='left') 86 | self.right = SnapMonitorOneSide(chirality='right') 87 | 88 | def update(self, r): 89 | self.left.update(r['left_fingers']) 90 | self.right.update(r['right_fingers']) 91 | 92 | 93 | # undebug 94 | class SteeringMonitor: 95 | def __init__(self): 96 | pass 97 | # 可能需要非线性映射、根据人体型调整 98 | 99 | def euler2control(self, roll, pitch, yaw): 100 | max_velocity_x = 1.0 101 | max_velocity_y = 1.0 102 | 103 | # Normalize the angles (assuming roll and pitch are in degrees) 104 | norm_roll = roll / 90.0 # Normalize to range [-1, 1] 105 | norm_pitch = pitch / 90.0 # Normalize to range [-1, 1] 106 | norm_yaw = yaw / 90.0 # Normalize to range [-1, 1] 107 | 108 | # Calculate velocity components 109 | vx = max_velocity_x * - norm_pitch # Forward/backward movement 110 | vy = max_velocity_y * - norm_roll # Side-to-side movement 111 | 112 | heading_angle = norm_yaw 113 | 114 | return (vx, vy), heading_angle 115 | 116 | def control_from_wrist(self, wrist, chirality=None): 117 | R = wrist[:3, :3] 118 | 119 | if chirality == 'left': 120 | R_default = np.array([ 121 | [0, 0, -1], 122 | [1, 0, 0], 123 | [0, -1, 0] 124 | ]) 125 | elif chirality == 'right': 126 | R_default = np.array([ 127 | [0, 0, -1], 128 | [-1, 0, 0], 129 | [0, 1, 0] 130 | ]) 131 | 132 | R = R @ R_default.T 133 | R = Rotation.from_matrix(R) 134 | roll, pitch, yaw = R.as_euler('yxz', degrees=True) 135 | 136 | v, heading = self.euler2control(roll, pitch, yaw) 137 | 138 | return v, heading 139 | -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/gripper.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Fourier Intelligence 2 | # Author: Yuxiang Gao 3 | from __future__ import annotations 4 | 5 | import json 6 | import socket 7 | import struct 8 | from dataclasses import dataclass, field 9 | from enum import IntEnum, StrEnum 10 | import time 11 | import traceback 12 | from typing import Literal, TypeAlias 13 | 14 | RequestMethodType: TypeAlias = Literal["GET", "SET"] 15 | 16 | 17 | def pascal_to_snake(name: str) -> str: 18 | return "".join(["_" + i.lower() if i.isupper() else i for i in name]).lstrip("_") 19 | 20 | 21 | def is_pascal_case(name: str) -> bool: 22 | return name[0].isupper() and name[1].islower() and "_" not in name 23 | 24 | 25 | def is_snake_case(name: str) -> bool: 26 | return name.islower() and "_" in name 27 | 28 | 29 | class FourierActuatorUDPPort(IntEnum): 30 | """Fourier UDP ports""" 31 | 32 | COMM = 2333 33 | CTRL = 2334 34 | FAST = 2335 35 | 36 | 37 | class FourierEncoderUDPPort(IntEnum): 38 | """Fourier UDP ports""" 39 | 40 | COMM = 2334 41 | CTRL = 2333 42 | FAST = 2335 43 | 44 | 45 | FourierUDPPort: TypeAlias = FourierEncoderUDPPort | FourierActuatorUDPPort 46 | 47 | 48 | @dataclass 49 | class FIProtocol: 50 | """Helper class for building requests to FSA servers 51 | 52 | Args: 53 | method (RequestMethodType): GET or SET 54 | path (str): request target path. This key will be renamed to `reqTarget` during serialization. 55 | prop (str | None): Optional prop arg. This key will be renamed to `property` during serialization. Maybe move this to data because limited usage 56 | data (dict | None): Additional data for the request. During serialization, the content will be attached to the parent object. 57 | """ 58 | 59 | method: RequestMethodType 60 | path: str = field(default_factory=str) 61 | prop: str = field(default_factory=str) 62 | data: dict = field(default_factory=dict) 63 | 64 | def serialize_model(self): 65 | res = {"method": self.method, "reqTarget": self.path, "property": ""} 66 | # if self.prop is not None: 67 | # res["property"] = self.prop 68 | if self.data is not None: 69 | res.update(self.data) 70 | return res 71 | 72 | def encode(self): 73 | return json.dumps(self.serialize_model()).encode() 74 | 75 | 76 | class FIFastIdentifier(IntEnum): 77 | # WATCHDOG = 0xFF 78 | ENABLE = 0x01 79 | DISABLE = 0x02 80 | CLEAR_FAULT = 0x03 81 | MODE_POSITION = 0x04 82 | MODE_VELOCITY = 0x05 83 | MODE_TORQUE = 0x06 84 | MODE_CURRENT = 0x07 85 | MODE_PD = 0x09 86 | SET_POSITION = 0x0A # ">Bfff" 87 | SET_VELOCITY = 0x0B # ">Bff" 88 | SET_TORQUE = 0x0C # ">Bf" 89 | SET_CURRENT = 0x0D # ">Bf" 90 | SET_PD = 0x0E # ">Bf" 91 | GET_PVC = 0x1A # ">Bfff" 92 | # GET_PVCT = 0x1D # ">Bffff" 93 | GET_ERROR = 0x1B # ">Bi" 94 | 95 | 96 | @dataclass 97 | class FIFastProtocol: 98 | ident: FIFastIdentifier 99 | payload: list[int | float] = field(default_factory=list) 100 | timestamp: float | None = None 101 | 102 | def encode(self): 103 | format_ = ">B" 104 | for i in self.payload: 105 | if isinstance(i, int): 106 | format_ += "i" 107 | elif isinstance(i, float): 108 | format_ += "f" 109 | else: 110 | raise ValueError(f"Invalid type {type(i)} for payload") 111 | return struct.pack(format_, self.ident, *self.payload) 112 | 113 | @classmethod 114 | def from_bytes(cls, data: bytes, ts: float | None = None) -> FIFastProtocol: 115 | try: 116 | ident = FIFastIdentifier(data[0]) 117 | except ValueError as err: 118 | raise ValueError(f"Invalid identifier {data[0]} for payload") from err 119 | 120 | format_ = ">B" 121 | if ident == FIFastIdentifier.GET_PVC: 122 | format_ += "fff" 123 | # elif ident == FIFastIdentifier.GET_PVCT: 124 | # format_ += "ffff" 125 | elif ident == FIFastIdentifier.GET_ERROR: 126 | format_ += "i" 127 | else: 128 | raise ValueError(f"Invalid identifier {ident} for payload") 129 | 130 | payload = struct.unpack(format_, data[: 1 + (len(format_) - 2) * 4]) 131 | # for i in payload: 132 | # if not isinstance(i, int | float): 133 | # raise ValueError(f"Invalid type {type(i)} for payload") 134 | return cls(ident=ident, payload=list(payload[1:]), timestamp=ts) 135 | 136 | 137 | 138 | class Gripper: 139 | def __init__(self, side: Literal["left", "right"]): 140 | self.side = side 141 | self.enabled = False 142 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 143 | self.sock.settimeout(1/30) 144 | match self.side: 145 | case "left": 146 | self.addr = ("192.168.137.17", FourierActuatorUDPPort.FAST) 147 | case "right": 148 | self.addr = ("192.168.137.37", FourierActuatorUDPPort.FAST) 149 | case _: 150 | raise ValueError(f"Invalid side {side}") 151 | 152 | 153 | def init(self): 154 | self.sock.sendto( 155 | FIFastProtocol(FIFastIdentifier.ENABLE).encode(), self.addr) 156 | self.enabled = True 157 | self.close() 158 | time.sleep(1.0) 159 | self.reboot() 160 | self.enabled = False 161 | time.sleep(1) 162 | print("gripper init done.") 163 | 164 | def disable(self): 165 | self.sock.sendto( 166 | FIFastProtocol(FIFastIdentifier.DISABLE).encode(), self.addr) 167 | self.enabled = False 168 | 169 | def read(self): 170 | self.sock.sendto( 171 | FIFastProtocol(FIFastIdentifier.GET_PVC).encode(), self.addr) 172 | try: 173 | data, _ = self.sock.recvfrom(1024) 174 | decoded_data = FIFastProtocol.from_bytes(data) 175 | position, velocity, current = decoded_data.payload 176 | return position, velocity, current 177 | except TimeoutError: # fail after 1 second of no activity 178 | return None 179 | 180 | except Exception as ex: 181 | traceback.print_exc() 182 | 183 | def open(self, pos=50.0): 184 | if not self.enabled: 185 | self.sock.sendto( 186 | FIFastProtocol(FIFastIdentifier.ENABLE).encode(), self.addr) 187 | self.enabled = True 188 | self.sock.sendto( 189 | FIFastProtocol(FIFastIdentifier.MODE_POSITION).encode(), self.addr) 190 | self.sock.sendto( 191 | FIFastProtocol(FIFastIdentifier.SET_POSITION, [float(pos), 0.0, 0.0]).encode(), self.addr) 192 | 193 | def close(self, torque=-1.0): 194 | if not self.enabled: 195 | self.sock.sendto( 196 | FIFastProtocol(FIFastIdentifier.ENABLE).encode(), self.addr) 197 | self.enabled = True 198 | self.sock.sendto( 199 | FIFastProtocol(FIFastIdentifier.MODE_TORQUE).encode(), self.addr) 200 | self.sock.sendto( 201 | FIFastProtocol(FIFastIdentifier.SET_TORQUE, [float(torque)]).encode(), self.addr) 202 | 203 | 204 | def reboot(self): 205 | data = FIProtocol(method="SET", path="/reboot") 206 | self.sock.sendto(data.encode(), (self.addr[0], FourierActuatorUDPPort.CTRL)) -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/libs/robot_rcs-0.4.0.11-cp311-cp311-manylinux_2_30_x86_64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/libs/robot_rcs-0.4.0.11-cp311-cp311-manylinux_2_30_x86_64.whl -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/libs/robot_rcs_gr-1.9.1.10-cp311-cp311-manylinux_2_30_x86_64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/libs/robot_rcs_gr-1.9.1.10-cp311-cp311-manylinux_2_30_x86_64.whl -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/pico_streamer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: WANG Wenhao 3 | Date: 2024-06-29 4 | Version: 1.0.0 5 | copyright (c) 2024 All Rights Reserved 6 | """ 7 | 8 | import socket 9 | import json 10 | from threading import Thread 11 | import numpy as np 12 | import quaternionic 13 | 14 | 15 | YUP2ZUP = np.array([[1, 0, 0, 0], 16 | [0, 0, 1, 0], 17 | [0, 1, 0, 1.70], 18 | [0, 0, 0, 1]], dtype=np.float64) 19 | 20 | T_leftWrist_pico = np.array([ 21 | [-1, 0, 0, 0], 22 | [0, -1, 0, 0], 23 | [0, 0, -1, 0], 24 | [0, 0, 0, 1] 25 | ]) 26 | T_leftElbow_pico = np.array([ 27 | [1, 0, 0, 0], 28 | [0, -1, 0, 0], 29 | [0, 0, 1, 0], 30 | [0, 0, 0, 1] 31 | ]) 32 | T_rightWrist_pico = np.array([ 33 | [-1, 0, 0, 0], 34 | [0, 1, 0, 0], 35 | [0, 0, 1, 0], 36 | [0, 0, 0, 1] 37 | ]) 38 | T_rightElbow_pico = np.array([ 39 | [1, 0, 0, 0], 40 | [0, 1, 0, 0], 41 | [0, 0, -1, 0], 42 | [0, 0, 0, 1] 43 | ]) 44 | 45 | 46 | def msg2transform(msg): 47 | T = np.eye(4) 48 | T[0:3, 3] = np.array(msg[0:3]) 49 | T[0:3, 0:3] = quaternionic.array(msg[3:7]).to_rotation_matrix 50 | return T 51 | 52 | 53 | class PicoStreamer: 54 | 55 | def __init__(self, ip, record=False): 56 | 57 | # Vision Pro IP 58 | self.ip = ip 59 | self.record = record 60 | self.recording = [] 61 | self.latest = None 62 | self.axis_transform = YUP2ZUP 63 | self.start_streaming() 64 | 65 | def start_streaming(self): 66 | 67 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 68 | self.sock.bind((self.ip, 8005)) 69 | stream_thread = Thread(target=self.stream) 70 | stream_thread.start() 71 | while self.latest is None: 72 | pass 73 | print(' == DATA IS FLOWING IN! ==') 74 | print('Ready to start streaming.') 75 | 76 | def stream(self): 77 | while True: 78 | try: 79 | print("\nWaiting to receive message...") 80 | data, address = self.sock.recvfrom(4096) 81 | data_str = data.decode() 82 | t = json.loads(data_str) 83 | r = { 84 | 'left_wrist': self.axis_transform @ msg2transform(t['left_wrist']) @ T_leftWrist_pico, 85 | 'left_elbow': self.axis_transform @ msg2transform(t['left_elbow']) @ T_leftElbow_pico, 86 | 'right_wrist': self.axis_transform @ msg2transform(t['right_wrist']) @ T_rightWrist_pico, 87 | 'right_elbow': self.axis_transform @ msg2transform(t['right_elbow']) @ T_rightElbow_pico, 88 | } 89 | # json.loads(data_str)['left_wrist'] = (x y z) (w x y z) 90 | r['left_forearmWrist'] = np.eye(4) 91 | r['left_forearmWrist'][0:3, 0:3] = r["left_elbow"][0:3, 0:3] 92 | r['left_forearmWrist'][0:3, 3] = r['left_wrist'][0:3, 3] 93 | r['right_forearmWrist'] = np.eye(4) 94 | r['right_forearmWrist'][0:3, 0:3] = r["right_elbow"][0:3, 0:3] 95 | r['right_forearmWrist'][0:3, 3] = r['right_wrist'][0:3, 3] 96 | print(r) 97 | if self.record: 98 | self.recording.append(r) 99 | self.latest = r 100 | 101 | except Exception as e: 102 | print(f"An error occurred: {e}") 103 | pass 104 | 105 | def get_latest(self): 106 | return self.latest 107 | 108 | def get_recording(self): 109 | return self.recording 110 | 111 | 112 | if __name__ == "__main__": 113 | 114 | streamer = PicoStreamer(ip='0.0.0.0') 115 | while True: 116 | latest = streamer.get_latest() 117 | print(latest) 118 | -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/run_server.py: -------------------------------------------------------------------------------- 1 | import typer 2 | from robot_rcs_gr.sdk.server import RobotServer 3 | 4 | 5 | def main( 6 | config: str, 7 | freq: int = typer.Option(500, help="Main loop frequency in hz. defaults to 400hz."), 8 | debug_interval: int = typer.Option(0, help="Debug loop print interval"), 9 | verbose: bool = typer.Option(False, help="Print internal debug info"), 10 | ): 11 | if not verbose: 12 | from robot_rcs.logger.fi_logger import Logger 13 | 14 | Logger().state = Logger().STATE_OFF 15 | 16 | robot = RobotServer(config, freq=freq, debug_print_interval=debug_interval, visualize=False) 17 | robot.spin() 18 | 19 | 20 | if __name__ == "__main__": 21 | typer.run(main) 22 | -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/sensor_offset.json: -------------------------------------------------------------------------------- 1 | { 2 | "192.168.137.170": 93.62548828, 3 | "192.168.137.171": 226.9116211, 4 | "192.168.137.172": 357.8686523, 5 | "192.168.137.173": 145.8984375, 6 | "192.168.137.174": 162.1362305, 7 | "192.168.137.175": 286.4794922, 8 | "192.168.137.150": 247.8955078, 9 | "192.168.137.151": 24.34570313, 10 | "192.168.137.152": 235.8544922, 11 | "192.168.137.153": 238.9746094, 12 | "192.168.137.154": 108.4570313, 13 | "192.168.137.155": 105.7324219, 14 | "192.168.137.190": 302.5195313, 15 | "192.168.137.191": 78.15673828, 16 | "192.168.137.192": 265.0341797 17 | } -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/set_env.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | ld_library_path = ( 4 | os.path.dirname(__file__) 5 | + "/arm_control:" 6 | + os.path.dirname(__file__) 7 | + "/arm_control/FK:" 8 | + "/lib/x86_64-linux-gnu" + ":" 9 | + "/usr/local/lib" 10 | # + os.environ["LD_LIBRARY_PATH"] 11 | + ":$LD_LIBRARY_PATH" 12 | ) 13 | 14 | print(f"export LD_LIBRARY_PATH={ld_library_path}") -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/base.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/base.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/head_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/head_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/head_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/head_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/head_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/head_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_foot_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_foot_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_foot_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_foot_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_hand_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_hand_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_hand_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_hand_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_hand_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_hand_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_lower_arm_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_lower_arm_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_shank_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_shank_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_thigh_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_thigh_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_thigh_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_thigh_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_thigh_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_thigh_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_upper_arm_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_upper_arm_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_upper_arm_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_upper_arm_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_upper_arm_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/l_upper_arm_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_foot_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_foot_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_foot_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_foot_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_hand_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_hand_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_hand_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_hand_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_hand_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_hand_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_lower_arm_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_lower_arm_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_shank_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_shank_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_thigh_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_thigh_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_thigh_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_thigh_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_thigh_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_thigh_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_upper_arm_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_upper_arm_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_upper_arm_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_upper_arm_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_upper_arm_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/r_upper_arm_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/waist_pitch.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/waist_pitch.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/waist_roll.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/waist_roll.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/waist_yaw.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T1/meshes/waist_yaw.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm1.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm1.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm2.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm2.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm3.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm3.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm4.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm4.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm5.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm5.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm6.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm6.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm7.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Larm7.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg1.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg1.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg2.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg2.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg3.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg3.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg4.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg4.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg5.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg5.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg6.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Lleg6.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm1.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm1.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm2.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm2.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm3.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm3.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm4.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm4.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm5.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm5.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm6.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm6.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm7.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rarm7.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg1.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg1.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg2.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg2.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg3.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg3.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg4.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg4.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg5.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg5.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg6.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/Rleg6.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/base.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/base.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/head1.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/head1.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/head2.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/head2.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/head3.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/head3.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/torso.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/torso.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/waist1.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/waist1.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/waist2.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/waist2.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/waist3.STL: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/humanoid_teleoperation/teleop-zenoh/urdf/GR1T2/meshes/waist3.STL -------------------------------------------------------------------------------- /humanoid_teleoperation/teleop-zenoh/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | import rerun as rr 4 | import quaternionic 5 | from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor 6 | 7 | 8 | def log_hands(r): 9 | left_fingers = r["left_wrist"][0] @ r["left_fingers"] 10 | right_fingers = r["right_wrist"][0] @ r["right_fingers"] 11 | 12 | start = time.perf_counter() 13 | ex = ThreadPoolExecutor(max_workers=16) 14 | # ex.submit( 15 | # rr.log, 16 | # "hand_tracking/left_hand", 17 | # rr.Points3D( 18 | # left_fingers[:, 0:3, 3] 19 | # ), 20 | # ) 21 | # ex.submit( 22 | # rr.log, 23 | # "hand_tracking/right_hand", 24 | # rr.Points3D( 25 | # right_fingers[:, 0:3, 3] 26 | # ), 27 | # ) 28 | 29 | left_finger_quats = [] 30 | 31 | for i in range(left_fingers.shape[0]): 32 | left_finger_quats.append( 33 | quaternionic.array.from_rotation_matrix(left_fingers[i][0:3, 0:3]) 34 | ) 35 | 36 | ex.submit( 37 | rr.log, 38 | "hand_tracking/left_hand", 39 | rr.Boxes3D( 40 | centers=left_fingers[:, 0:3, 3], 41 | half_sizes=np.ones((left_fingers.shape[0], 3)) * 0.001, 42 | rotations=left_finger_quats, 43 | ), 44 | ) 45 | 46 | right_finger_quats = [] 47 | 48 | for i in range(right_fingers.shape[0]): 49 | right_finger_quats.append( 50 | quaternionic.array.from_rotation_matrix(right_fingers[i][0:3, 0:3]) 51 | ) 52 | 53 | ex.submit( 54 | rr.log, 55 | "hand_tracking/right_hand", 56 | rr.Boxes3D( 57 | centers=right_fingers[:, 0:3, 3], 58 | half_sizes=np.ones((right_fingers.shape[0], 3)) * 0.001, 59 | rotations=right_finger_quats, 60 | ), 61 | ) 62 | # ex.submit( 63 | # rr.log, 64 | # "hand_tracking/left_hand_root", 65 | # rr.Transform3D( 66 | # translation=r['left_wrist'][0][0:3, 3], 67 | # mat3x3=r['left_wrist'][0][0:3, 0:3] 68 | # ), 69 | # ) 70 | # for i in range(left_fingers.shape[0]): 71 | # ex.submit( 72 | # rr.log, 73 | # f"hand_tracking/left_finger_{i}", 74 | # rr.Transform3D( 75 | # translation=left_fingers[i][0:3, 3], 76 | # mat3x3=left_fingers[i][0:3, 0:3] 77 | # ), 78 | # ) 79 | 80 | # ex.submit( 81 | # rr.log, 82 | # "hand_tracking/right_hand_root", 83 | # rr.Transform3D( 84 | # translation=r['right_wrist'][0][0:3, 3], 85 | # mat3x3=r['right_wrist'][0][0:3, 0:3] 86 | # ), 87 | # ) 88 | # for i in range(right_fingers.shape[0]): 89 | # ex.submit( 90 | # rr.log, 91 | # f"hand_tracking/right_finger_{i}", 92 | # rr.Transform3D( 93 | # translation=right_fingers[i][0:3, 3], 94 | # mat3x3=right_fingers[i][0:3, 0:3] 95 | # ), 96 | # ) 97 | ex.shutdown(wait=False) 98 | 99 | print(f"log time: {time.perf_counter() - start:.5f}") 100 | 101 | 102 | def calculate_angle_between_vectors(v1, v2): 103 | assert type(v1).__module__ == np.__name__ 104 | assert type(v2).__module__ == np.__name__ 105 | assert v1.shape == (3,) 106 | assert v2.shape == (3,) 107 | 108 | cos_theta = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)) 109 | cos_theta = np.clip(cos_theta, -1.0, 1.0) 110 | angle_radians = np.arccos(cos_theta) 111 | return angle_radians 112 | 113 | 114 | # counterclockwise angle from vector_a to vector_b 115 | def calculate_counterclockwise_angle(v1, v2): 116 | assert type(v1).__module__ == np.__name__ 117 | assert type(v2).__module__ == np.__name__ 118 | assert v1.shape == (3,) 119 | assert v2.shape == (3,) 120 | 121 | dot_product = np.dot(v1, v2) 122 | cross_product = np.cross(v1, v2) 123 | magnitude_cross = np.linalg.norm(cross_product) 124 | angle_radians = np.arctan2(magnitude_cross, dot_product) 125 | if cross_product[2] < 0: 126 | angle_radians = -angle_radians 127 | 128 | angle_degrees = np.degrees(angle_radians) 129 | 130 | return angle_degrees 131 | -------------------------------------------------------------------------------- /vision_pro_app/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/.DS_Store -------------------------------------------------------------------------------- /vision_pro_app/.gitignore: -------------------------------------------------------------------------------- 1 | /*.egg-info 2 | /dist 3 | *.pyc -------------------------------------------------------------------------------- /vision_pro_app/CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - family-names: "Park" 5 | given-names: "Younghyo" 6 | orcid: "https://orcid.org/0000-0000-0000-0000" 7 | title: "Teleopeation System using Apple Vision Pro" 8 | version: 0.1.0 9 | url: "https://github.com/Improbable-AI/VisionProTeleop" -------------------------------------------------------------------------------- /vision_pro_app/README.md: -------------------------------------------------------------------------------- 1 | # Apple Vision Pro APP for Humanoid Manipulation 2 | 3 | This APP is the part of the project [Generalizable Humanoid Manipulation with Improved 3D Diffusion Policy](https://humanoid-manipulation.github.io). 4 | 5 | 6 | 7 | # Install on your Vision Pro 8 | 9 | ## Prerequisites 10 | 11 | - Mac with Xcode 15 [[Install here]](https://apps.apple.com/us/app/xcode/id497799835?mt=12/) 12 | - Apple Vision Pro [[Buy Here]](https://www.apple.com/apple-vision-pro/) 13 | - Apple Developer Program Account [[Register Here]](https://developer.apple.com/) 14 | - Vision Pro Developer Strap [[Buy Here]](https://developer.apple.com/visionos/developer-strap/) 15 | 16 | 17 | ## Installing the App 18 | 19 | 20 | ### Step 1. Open Xcode Project 21 | 22 | After you `git cloned` the project, double click `TrackingStreamer.xcodproj`. This will automatically open up Xcode where you can install the app. 23 | 24 | 25 | ### Step 2. Register your developer account 26 | 27 | ![](https://share.cleanshot.com/r2fj3GXg+) 28 | 29 | On the left side bar, click on `VisionProTeleop`. Under Targets - Signing & Capabilities - Team, click `Add an Account` and follow the steps to add your Apple Developer Account. Then, change the `Bundle Identifier` to a unique name of yours. 30 | 31 | ### Step 3. Connect Apple Vision Pro to your Mac 32 | 33 | Using the Vision Pro [Developer Strap](https://developer.apple.com/visionos/developer-strap/), connect your Apple Vision Pro to your Mac. When you're first connecting, Mac will take some time to configure things. When things are ready, you can see Apple Vision Pro appearing to as one of the possible destinations to install. 34 | 35 | ![](https://share.cleanshot.com/knZJsbZB+) 36 | 37 | Before starting to install, change the IP address to the IP of your computer/robot, the one that connects with the camera. For example, my robot's IP is "192.168.31.157". Then I need to change all "192.168.3.26" into "192.168.31.157". 38 | 39 | ![change_ip](assets/change_ip.png) 40 | 41 | Then, click the `play` button to start installing. If everrything went well, you will see "Build Succeeded". 42 | 43 | ### Step 4. Enjoy the App! 44 | 45 | Once you've successfully installed the App, you can disconnect the Vision Pro from your Mac, and use it as a standalone device/app without the Mac. You will see an App Icon on your home screen. Click on the icon to run the app and start streaming. 46 | 47 | 48 | 49 | 50 | 51 | # Local Vision Streaming 52 | 53 | Apple does not allow WebXR on non-https connections. To test the application locally, we need to create a self-signed certificate and install it on the client. You need a ubuntu machine and a router. Connect the VisionPro and the ubuntu machine to the same router. 54 | 1. install mkcert: https://github.com/FiloSottile/mkcert 55 | 2. check local ip address: 56 | 57 | ``` 58 | ifconfig | grep inet 59 | ``` 60 | Suppose the local ip address of the ubuntu machine is `192.168.31.157`. 61 | 62 | 3. create certificate: 63 | 64 | ``` 65 | mkcert -install && mkcert -cert-file cert.pem -key-file key.pem 192.168.31.157 localhost 127.0.0.1 66 | ``` 67 | 68 | 4. open firewall on server 69 | ``` 70 | sudo iptables -A INPUT -p tcp --dport 8012 -j ACCEPT 71 | sudo iptables-save 72 | sudo iptables -L 73 | ``` 74 | or can be done with `ufw`: 75 | ``` 76 | sudo ufw allow 8012 77 | ``` 78 | Note: You should put `cert.pem` and `key.pem` in the folder of the real robot (`real_robot/scripts`). A python usage would look like: 79 | ```python 80 | self.app = Vuer(host='0.0.0.0', cert="./cert.pem", key="./key.pem") 81 | ``` 82 | 83 | 6. install ca-certificates on VisionPro 84 | ``` 85 | mkcert -CAROOT 86 | ``` 87 | Copy the rootCA.pem via AirDrop to VisionPro and install it. 88 | 89 | Settings > General > About > Certificate Trust Settings. Under "Enable full trust for root certificates", turn on trust for the certificate. 90 | 91 | settings > Apps > Safari > Advanced > Feature Flags > Enable All WebXR Related Features 92 | 93 | 7. open the browser on Safari on VisionPro and go to `https://192.168.31.157:8012?ws=wss://192.168.31.157:8012` 94 | 95 | 8. Click `Enter VR` and ``Allow`` to start the VR session. 96 | 97 | 98 | 99 | # Usage 100 | Before using the app, you should make sure the program is running on the robot side. Please refer to the real robot part of HUMANI. 101 | 102 | 1. Enter the app. Ensure your robot (together with the camera) is in the same local network with your AVP. 103 | 104 | 2. Remember to press the home button, to adjust the coordinate system. 105 | 3. Snap your left hand to start the teleoperation 106 | 4. End the teleoperation by again snapping your left hand. 107 | 108 | 109 | 110 | ![home_button](assets/home_button.png) 111 | 112 | 113 | 114 | # Acknowledgment 115 | 116 | The APP is mainly built upon [VisionProTeleop](https://github.com/Improbable-AI/VisionProTeleop) and [TeleVision](https://github.com/OpenTeleVision/TeleVision). If you have further questions, please contact Wenhao Wang by wwenhao@seas.upenn.edu 117 | 118 | # License 119 | 120 | [VisionProTeleop](https://github.com/Improbable-AI/VisionProTeleop) is under the MIT license. [TeleVision](https://github.com/OpenTeleVision/TeleVision) is under the Apache License 2.0. This project is under the MIT license. 121 | 122 | 123 | 124 | 125 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | IDEDidComputeMac32BitWarning 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved: -------------------------------------------------------------------------------- 1 | { 2 | "originHash" : "b0d8975aa19f1bb7106ae78f24de3911e84189892afc3b6fe6db19bcec9e631c", 3 | "pins" : [ 4 | { 5 | "identity" : "grpc-swift", 6 | "kind" : "remoteSourceControl", 7 | "location" : "https://github.com/grpc/grpc-swift.git", 8 | "state" : { 9 | "revision" : "5d0cf1c7b2e641e3a8961089e7e4672f4fe8abac", 10 | "version" : "1.21.1" 11 | } 12 | }, 13 | { 14 | "identity" : "swift-atomics", 15 | "kind" : "remoteSourceControl", 16 | "location" : "https://github.com/apple/swift-atomics.git", 17 | "state" : { 18 | "revision" : "cd142fd2f64be2100422d658e7411e39489da985", 19 | "version" : "1.2.0" 20 | } 21 | }, 22 | { 23 | "identity" : "swift-collections", 24 | "kind" : "remoteSourceControl", 25 | "location" : "https://github.com/apple/swift-collections.git", 26 | "state" : { 27 | "revision" : "94cf62b3ba8d4bed62680a282d4c25f9c63c2efb", 28 | "version" : "1.1.0" 29 | } 30 | }, 31 | { 32 | "identity" : "swift-http-types", 33 | "kind" : "remoteSourceControl", 34 | "location" : "https://github.com/apple/swift-http-types", 35 | "state" : { 36 | "revision" : "12358d55a3824bd5fed310b999ea8cf83a9a1a65", 37 | "version" : "1.0.3" 38 | } 39 | }, 40 | { 41 | "identity" : "swift-log", 42 | "kind" : "remoteSourceControl", 43 | "location" : "https://github.com/apple/swift-log.git", 44 | "state" : { 45 | "revision" : "e97a6fcb1ab07462881ac165fdbb37f067e205d5", 46 | "version" : "1.5.4" 47 | } 48 | }, 49 | { 50 | "identity" : "swift-nio", 51 | "kind" : "remoteSourceControl", 52 | "location" : "https://github.com/apple/swift-nio.git", 53 | "state" : { 54 | "revision" : "635b2589494c97e48c62514bc8b37ced762e0a62", 55 | "version" : "2.63.0" 56 | } 57 | }, 58 | { 59 | "identity" : "swift-nio-extras", 60 | "kind" : "remoteSourceControl", 61 | "location" : "https://github.com/apple/swift-nio-extras.git", 62 | "state" : { 63 | "revision" : "363da63c1966405764f380c627409b2f9d9e710b", 64 | "version" : "1.21.0" 65 | } 66 | }, 67 | { 68 | "identity" : "swift-nio-http2", 69 | "kind" : "remoteSourceControl", 70 | "location" : "https://github.com/apple/swift-nio-http2.git", 71 | "state" : { 72 | "revision" : "0904bf0feb5122b7e5c3f15db7df0eabe623dd87", 73 | "version" : "1.30.0" 74 | } 75 | }, 76 | { 77 | "identity" : "swift-nio-ssl", 78 | "kind" : "remoteSourceControl", 79 | "location" : "https://github.com/apple/swift-nio-ssl.git", 80 | "state" : { 81 | "revision" : "7c381eb6083542b124a6c18fae742f55001dc2b5", 82 | "version" : "2.26.0" 83 | } 84 | }, 85 | { 86 | "identity" : "swift-nio-transport-services", 87 | "kind" : "remoteSourceControl", 88 | "location" : "https://github.com/apple/swift-nio-transport-services.git", 89 | "state" : { 90 | "revision" : "6cbe0ed2b394f21ab0d46b9f0c50c6be964968ce", 91 | "version" : "1.20.1" 92 | } 93 | }, 94 | { 95 | "identity" : "swift-protobuf", 96 | "kind" : "remoteSourceControl", 97 | "location" : "https://github.com/apple/swift-protobuf.git", 98 | "state" : { 99 | "revision" : "65e8f29b2d63c4e38e736b25c27b83e012159be8", 100 | "version" : "1.25.2" 101 | } 102 | }, 103 | { 104 | "identity" : "swift-system", 105 | "kind" : "remoteSourceControl", 106 | "location" : "https://github.com/apple/swift-system.git", 107 | "state" : { 108 | "revision" : "025bcb1165deab2e20d4eaba79967ce73013f496", 109 | "version" : "1.2.1" 110 | } 111 | } 112 | ], 113 | "version" : 3 114 | } 115 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcuserdata/fftai.xcuserdatad/UserInterfaceState.xcuserstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcuserdata/fftai.xcuserdatad/UserInterfaceState.xcuserstate -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcuserdata/yhpark.xcuserdatad/UserInterfaceState.xcuserstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcuserdata/yhpark.xcuserdatad/UserInterfaceState.xcuserstate -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcuserdata/zeyanjie.xcuserdatad/UserInterfaceState.xcuserstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer.xcodeproj/project.xcworkspace/xcuserdata/zeyanjie.xcuserdatad/UserInterfaceState.xcuserstate -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/xcshareddata/xcschemes/VisionProTeleop.xcscheme: -------------------------------------------------------------------------------- 1 | 2 | 5 | 9 | 10 | 16 | 22 | 23 | 24 | 25 | 26 | 32 | 33 | 43 | 45 | 51 | 52 | 53 | 54 | 60 | 62 | 68 | 69 | 70 | 71 | 73 | 74 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/xcuserdata/account2309.xcuserdatad/xcschemes/xcschememanagement.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SchemeUserState 6 | 7 | HandsWidth.xcscheme_^#shared#^_ 8 | 9 | orderHint 10 | 0 11 | 12 | 13 | SuppressBuildableAutocreation 14 | 15 | 165ADB462B4B71B0008A756F 16 | 17 | primary 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer.xcodeproj/xcuserdata/yhpark.xcuserdatad/xcschemes/xcschememanagement.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SchemeUserState 6 | 7 | VisionProTeleop.xcscheme_^#shared#^_ 8 | 9 | orderHint 10 | 0 11 | 12 | 13 | SuppressBuildableAutocreation 14 | 15 | 165ADB462B4B71B0008A756F 16 | 17 | primary 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/.DS_Store -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/App.swift: -------------------------------------------------------------------------------- 1 | import SwiftUI 2 | 3 | @main 4 | struct VisionProTeleopApp: App { 5 | var body: some Scene { 6 | WindowGroup { 7 | ContentView() 8 | } 9 | .windowResizability(.contentSize) 10 | ImmersiveSpace(id: "immersiveSpace") { 11 | 🌐RealityView(model: 🥽AppModel()) 12 | } 13 | 14 | } 15 | init() { 16 | 🧑HeadTrackingComponent.registerComponent() 17 | 🧑HeadTrackingSystem.registerSystem() 18 | } 19 | } 20 | 21 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/ContentView.swift: -------------------------------------------------------------------------------- 1 | import SwiftUI 2 | import CoreLocation 3 | import UIKit 4 | import SystemConfiguration.CaptiveNetwork 5 | 6 | 7 | struct ContentView: View { 8 | @Environment(\.openImmersiveSpace) var openImmersiveSpace 9 | @Environment(\.dismissWindow) var dismissWindow 10 | var body: some View { 11 | VStack(spacing: 32) { 12 | HStack(spacing: 28) { 13 | Image(.graph2) 14 | .resizable() 15 | .aspectRatio(contentMode: .fit) 16 | .frame(width: 1200) 17 | .clipShape(.rect(cornerRadius: 24)) 18 | } 19 | Text("You're on IP address [\(getIPAddress())]") 20 | .font(.largeTitle.weight(.medium)) 21 | 22 | Button { 23 | Task { 24 | await self.openImmersiveSpace(id: "immersiveSpace") 25 | self.dismissWindow() 26 | } 27 | } label: { 28 | Text("Start") 29 | .font(.largeTitle) 30 | .padding(.vertical, 12) 31 | .padding(.horizontal, 4) 32 | } 33 | 34 | } 35 | .padding(32) 36 | } 37 | } 38 | 39 | func getIPAddress() -> String { 40 | var address: String? 41 | var ifaddr: UnsafeMutablePointer? = nil 42 | if getifaddrs(&ifaddr) == 0 { 43 | var ptr = ifaddr 44 | while ptr != nil { 45 | defer { ptr = ptr?.pointee.ifa_next } 46 | 47 | guard let interface = ptr?.pointee else { return "" } 48 | let addrFamily = interface.ifa_addr.pointee.sa_family 49 | if addrFamily == UInt8(AF_INET) || addrFamily == UInt8(AF_INET6) { 50 | 51 | // wifi = ["en0"] 52 | // wired = ["en2", "en3", "en4"] 53 | // cellular = ["pdp_ip0","pdp_ip1","pdp_ip2","pdp_ip3"] 54 | 55 | let name: String = String(cString: (interface.ifa_name)) 56 | if name == "en0" || name == "en2" || name == "en3" || name == "en4" || name == "pdp_ip0" || name == "pdp_ip1" || name == "pdp_ip2" || name == "pdp_ip3" { 57 | var hostname = [CChar](repeating: 0, count: Int(NI_MAXHOST)) 58 | getnameinfo(interface.ifa_addr, socklen_t((interface.ifa_addr.pointee.sa_len)), &hostname, socklen_t(hostname.count), nil, socklen_t(0), NI_NUMERICHOST) 59 | address = String(cString: hostname) 60 | } 61 | } 62 | } 63 | freeifaddrs(ifaddr) 64 | } 65 | return address ?? "" 66 | } 67 | 68 | 69 | func getWiFiName() -> String? { 70 | var ssid: String? 71 | 72 | if let interfaces = CNCopySupportedInterfaces() as NSArray? { 73 | for interface in interfaces { 74 | if let interfaceInfo = CNCopyCurrentNetworkInfo(interface as! CFString) as NSDictionary? { 75 | ssid = interfaceInfo[kCNNetworkInfoKeySSID as String] as? String 76 | break 77 | } 78 | } 79 | } 80 | 81 | return ssid 82 | } 83 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/.DS_Store -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Archive/DebugView.md: -------------------------------------------------------------------------------- 1 | # 👆DebugView 2 | 3 | import SwiftUI 4 | import RealityKit 5 | import ARKit 6 | 7 | struct 👆DebugView: View { 8 | @EnvironmentObject var model: 📱AppModel 9 | @State private var rootEntity: Entity? 10 | @State private var text: String = "placeholder" 11 | var body: some View { 12 | RealityView { content, _ in 13 | let rootEntity = Entity() 14 | rootEntity.name = "ROOT" 15 | content.add(rootEntity) 16 | self.rootEntity = rootEntity 17 | let entity = Entity() 18 | entity.name = "POINTER" 19 | entity.components.set(📍HeadAnchorComponent()) 20 | entity.components.set(InputTargetComponent()) 21 | entity.components.set(CollisionComponent(shapes: [.generateConvex(from: .generateSphere(radius: 0.1))])) 22 | entity.components.set(ModelComponent(mesh: .generateSphere(radius: 0.02), 23 | materials: [SimpleMaterial(color: .white, isMetallic: false)])) 24 | rootEntity.addChild(entity) 25 | do { 26 | let entity = Entity() 27 | entity.name = "LINE" 28 | entity.components.set(OpacityComponent(opacity: 0.9)) 29 | rootEntity.addChild(entity) 30 | } 31 | } update: { content, attachments in 32 | let entity = attachments.entity(for: "resultLabel")! 33 | entity.components.set(📍HeadAnchorComponent()) 34 | entity.name = "resultLabel" 35 | rootEntity?.addChild(entity) 36 | if let p1 = rootEntity?.findEntity(named: "1")?.position, 37 | let p2 = rootEntity?.findEntity(named: "2")?.position { 38 | entity.position = (p1 + p2) / 2 39 | } 40 | } attachments: { 41 | Attachment(id: "resultLabel") { 42 | Text(self.text) 43 | .font(.system(size: 54).bold()) 44 | .padding(24) 45 | .glassBackgroundEffect() 46 | } 47 | } 48 | .onTapGesture { 49 | self.setPoints() 50 | self.setText() 51 | self.setLine() 52 | } 53 | } 54 | } 55 | 56 | fileprivate extension 👆DebugView { 57 | func setPoints() { 58 | guard let pointer = rootEntity?.findEntity(named: "POINTER") else { return } 59 | if rootEntity?.findEntity(named: "1") == nil { 60 | let entity = Entity() 61 | entity.name = "1" 62 | entity.position = pointer.position 63 | entity.components.set(ModelComponent(mesh: .generateSphere(radius: 0.025), 64 | materials: [SimpleMaterial(color: .red, isMetallic: false)])) 65 | rootEntity?.addChild(entity) 66 | } else { 67 | if let entity2 = rootEntity?.findEntity(named: "2") { 68 | rootEntity?.removeChild(entity2) 69 | } 70 | let entity = Entity() 71 | entity.name = "2" 72 | entity.position = pointer.position 73 | entity.components.set(ModelComponent(mesh: .generateSphere(radius: 0.025), 74 | materials: [SimpleMaterial(color: .green, isMetallic: false)])) 75 | rootEntity?.addChild(entity) 76 | } 77 | } 78 | func setText() { 79 | guard let p1 = rootEntity?.findEntity(named: "1")?.position, 80 | let p2 = rootEntity?.findEntity(named: "2")?.position else { 81 | return 82 | } 83 | let lengthFormatter = LengthFormatter() 84 | lengthFormatter.numberFormatter.maximumFractionDigits = 2 85 | self.text = lengthFormatter.string(fromValue: .init(distance(p1, p2)), unit: .meter) 86 | } 87 | func setLine() { 88 | guard let p1 = rootEntity?.findEntity(named: "1")?.position, 89 | let p2 = rootEntity?.findEntity(named: "2")?.position else { 90 | return 91 | } 92 | if let entity = rootEntity?.findEntity(named: "LINE") { 93 | entity.position = (p1 + p2) / 2 94 | entity.components.set(ModelComponent(mesh: .generateBox(width: 0.01, 95 | height: 0.01, 96 | depth: distance(p1, p2), 97 | cornerRadius: 0.005), 98 | materials: [SimpleMaterial(color: .white, isMetallic: false)])) 99 | entity.look(at: p1, 100 | from: entity.position, 101 | relativeTo: nil) 102 | let occlusionEntity = Entity() 103 | occlusionEntity.components.set(ModelComponent(mesh: .generateSphere(radius: 0.08), 104 | materials: [OcclusionMaterial()])) 105 | entity.addChild(occlusionEntity) 106 | } 107 | } 108 | } 109 | 110 | ## system 111 | 112 | func update(context: SceneUpdateContext) { 113 | guard let deviceAnchor = self.provider.queryDeviceAnchor(atTimestamp: CACurrentMediaTime()) else { 114 | return 115 | } 116 | for entity in context.entities(matching: .init(where: .has(📍HeadAnchorComponent.self)), 117 | updatingSystemWhen: .rendering) { 118 | if entity.name == "resultLabel" { 119 | entity.look(at: Transform(matrix: deviceAnchor.originFromAnchorTransform).translation, 120 | from: entity.position(relativeTo: nil), 121 | relativeTo: nil, 122 | forward: .positiveZ) 123 | } 124 | if DEBUG 125 | if entity.name == "POINTER" { 126 | entity.transform = Transform(matrix: deviceAnchor.originFromAnchorTransform) 127 | entity.setPosition([0, 0, -1], relativeTo: entity) 128 | } 129 | endif 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Archive/HandToGround.md: -------------------------------------------------------------------------------- 1 | # Hand to ground Mode 2 | 3 | ``` 4 | import SwiftUI 5 | 6 | enum 🪄Mode: String { 7 | case handToHand, handToGround 8 | } 9 | 10 | extension 🪄Mode: CaseIterable, Identifiable { 11 | var id: Self { self } 12 | var localizedTitle: LocalizedStringResource { 13 | switch self { 14 | case .handToHand: "Hand to hand" 15 | case .handToGround: "Hand to ground" 16 | } 17 | } 18 | } 19 | ``` 20 | 21 | ``` 22 | @AppStorage("mode") var mode: 🪄Mode = .handToHand 23 | ``` 24 | 25 | ``` 26 | Section { 27 | Picker("Mode", selection: self.$model.mode) { 28 | ForEach(🪄Mode.allCases) { 29 | Text($0.localizedTitle) 30 | } 31 | } 32 | } 33 | ``` 34 | 35 | ``` 36 | let heightLineEntity = Entity() 37 | let groundPointEntity: Entity = { 38 | let radius: Float = 0.03 39 | let value = ModelEntity(mesh: .generateSphere(radius: radius), 40 | materials: [SimpleMaterial(color: .yellow, isMetallic: false)]) 41 | let occlusion = ModelEntity(mesh: .generateCylinder(height: radius, radius: radius), 42 | materials: [OcclusionMaterial()]) 43 | occlusion.position.y -= radius / 2 44 | value.addChild(occlusion) 45 | return value 46 | }() 47 | ``` 48 | 49 | ``` 50 | guard let rightPosition = self.indexTipEntities[.right]?.position else { 51 | assertionFailure(); return 52 | } 53 | self.heightLineEntity.position = (self.groundPointEntity.position + rightPosition) / 2 54 | self.heightLineEntity.components.set( 55 | ModelComponent(mesh: .generateBox(width: 0.01, 56 | height: 0.01, 57 | depth: distance(self.groundPointEntity.position, rightPosition), 58 | cornerRadius: 0.005), 59 | materials: [SimpleMaterial(color: .white, isMetallic: false)]) 60 | ) 61 | self.heightLineEntity.look(at: self.groundPointEntity.position, 62 | from: self.heightLineEntity.position, 63 | relativeTo: nil) 64 | self.heightLineEntity.addChild(ModelEntity(mesh: .generateSphere(radius: 0.08), 65 | materials: [OcclusionMaterial()])) 66 | ``` 67 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/.DS_Store -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AccentColor.colorset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "colors" : [ 3 | { 4 | "idiom" : "universal" 5 | }, 6 | { 7 | "appearances" : [ 8 | { 9 | "appearance" : "luminosity", 10 | "value" : "dark" 11 | } 12 | ], 13 | "idiom" : "universal" 14 | } 15 | ], 16 | "info" : { 17 | "author" : "xcode", 18 | "version" : 1 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Back.solidimagestacklayer/Content.imageset/Background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Back.solidimagestacklayer/Content.imageset/Background.png -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Back.solidimagestacklayer/Content.imageset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "filename" : "Background.png", 5 | "idiom" : "vision", 6 | "scale" : "2x" 7 | } 8 | ], 9 | "info" : { 10 | "author" : "xcode", 11 | "version" : 1 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Back.solidimagestacklayer/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "author" : "xcode", 4 | "version" : 1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "author" : "xcode", 4 | "version" : 1 5 | }, 6 | "layers" : [ 7 | { 8 | "filename" : "Front.solidimagestacklayer" 9 | }, 10 | { 11 | "filename" : "Middle.solidimagestacklayer" 12 | }, 13 | { 14 | "filename" : "Back.solidimagestacklayer" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Front.solidimagestacklayer/Content.imageset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "filename" : "lab_logo_inverted.png", 5 | "idiom" : "vision", 6 | "scale" : "2x" 7 | } 8 | ], 9 | "info" : { 10 | "author" : "xcode", 11 | "version" : 1 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Front.solidimagestacklayer/Content.imageset/lab_logo_inverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Front.solidimagestacklayer/Content.imageset/lab_logo_inverted.png -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Front.solidimagestacklayer/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "author" : "xcode", 4 | "version" : 1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Middle.solidimagestacklayer/Content.imageset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "idiom" : "vision", 5 | "scale" : "2x" 6 | } 7 | ], 8 | "info" : { 9 | "author" : "xcode", 10 | "version" : 1 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/AppIcon.solidimagestack/Middle.solidimagestacklayer/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "author" : "xcode", 4 | "version" : 1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "author" : "xcode", 4 | "version" : 1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/Image.imageset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "idiom" : "universal", 5 | "scale" : "1x" 6 | }, 7 | { 8 | "idiom" : "universal", 9 | "scale" : "2x" 10 | }, 11 | { 12 | "idiom" : "universal", 13 | "scale" : "3x" 14 | }, 15 | { 16 | "idiom" : "vision", 17 | "scale" : "2x" 18 | } 19 | ], 20 | "info" : { 21 | "author" : "xcode", 22 | "version" : 1 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/graph2.imageset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "filename" : "diagram_visionpro.png", 5 | "idiom" : "universal" 6 | } 7 | ], 8 | "info" : { 9 | "author" : "xcode", 10 | "version" : 1 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/graph2.imageset/diagram_visionpro.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/graph2.imageset/diagram_visionpro.png -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/lab_logo_inverted.imageset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "filename" : "lab_logo_inverted.png", 5 | "idiom" : "universal", 6 | "scale" : "1x" 7 | }, 8 | { 9 | "idiom" : "universal", 10 | "scale" : "2x" 11 | }, 12 | { 13 | "idiom" : "universal", 14 | "scale" : "3x" 15 | } 16 | ], 17 | "info" : { 18 | "author" : "xcode", 19 | "version" : 1 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/lab_logo_inverted.imageset/lab_logo_inverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/Assets.xcassets/lab_logo_inverted.imageset/lab_logo_inverted.png -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Info.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | ITSAppUsesNonExemptEncryption 6 | 7 | NSHandsTrackingUsageDescription 8 | This app tracks your hand to teleoperate a robot. 9 | NSWorldSensingUsageDescription 10 | This app senses your surronuding world to reconstruct a simulation. 11 | UIApplicationSceneManifest 12 | 13 | UIApplicationPreferredDefaultSceneSessionRole 14 | UIWindowSceneSessionRoleApplication 15 | UIApplicationSupportsMultipleScenes 16 | 17 | UISceneConfigurations 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/InfoPlist.xcstrings: -------------------------------------------------------------------------------- 1 | { 2 | "sourceLanguage" : "en", 3 | "strings" : { 4 | "CFBundleDisplayName" : { 5 | "comment" : "Bundle display name", 6 | "extractionState" : "extracted_with_value", 7 | "localizations" : { 8 | "en" : { 9 | "stringUnit" : { 10 | "state" : "new", 11 | "value" : "ZYJ Teleop" 12 | } 13 | } 14 | } 15 | }, 16 | "CFBundleName" : { 17 | "comment" : "Bundle name", 18 | "extractionState" : "extracted_with_value", 19 | "localizations" : { 20 | "en" : { 21 | "stringUnit" : { 22 | "state" : "new", 23 | "value" : "Tracking Streamer" 24 | } 25 | }, 26 | "ja" : { 27 | "stringUnit" : { 28 | "state" : "needs_review", 29 | "value" : "手がメジャー" 30 | } 31 | } 32 | } 33 | } 34 | }, 35 | "version" : "1.0" 36 | } -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/Localizable.xcstrings: -------------------------------------------------------------------------------- 1 | { 2 | "sourceLanguage" : "en", 3 | "strings" : { 4 | "About" : { 5 | "extractionState" : "stale", 6 | "localizations" : { 7 | "ja" : { 8 | "stringUnit" : { 9 | "state" : "translated", 10 | "value" : "アプリについて" 11 | } 12 | } 13 | } 14 | }, 15 | "Exit" : { 16 | "localizations" : { 17 | "ja" : { 18 | "stringUnit" : { 19 | "state" : "translated", 20 | "value" : "終了" 21 | } 22 | } 23 | } 24 | }, 25 | "Fix / Unfix a pointer by indirect tap." : { 26 | "extractionState" : "stale", 27 | "localizations" : { 28 | "ja" : { 29 | "stringUnit" : { 30 | "state" : "translated", 31 | "value" : "間接タップでポインターを固定できます。" 32 | } 33 | } 34 | } 35 | }, 36 | "Hand tracking authorization:" : { 37 | "extractionState" : "stale", 38 | "localizations" : { 39 | "ja" : { 40 | "stringUnit" : { 41 | "state" : "translated", 42 | "value" : "ハンドトラッキング許可状況:" 43 | } 44 | } 45 | } 46 | }, 47 | "HandsWidth" : { 48 | "extractionState" : "stale", 49 | "localizations" : { 50 | "ja" : { 51 | "stringUnit" : { 52 | "state" : "needs_review", 53 | "value" : "手がメジャー" 54 | } 55 | } 56 | } 57 | }, 58 | "Measurement of the distance between the fingers." : { 59 | "extractionState" : "stale", 60 | "localizations" : { 61 | "ja" : { 62 | "stringUnit" : { 63 | "state" : "translated", 64 | "value" : "指と指の間の距離を測ります。" 65 | } 66 | } 67 | } 68 | }, 69 | "Setting" : { 70 | "extractionState" : "stale", 71 | "localizations" : { 72 | "ja" : { 73 | "stringUnit" : { 74 | "state" : "translated", 75 | "value" : "設定" 76 | } 77 | } 78 | } 79 | }, 80 | "Start" : { 81 | "localizations" : { 82 | "ja" : { 83 | "stringUnit" : { 84 | "state" : "translated", 85 | "value" : "開始" 86 | } 87 | } 88 | } 89 | }, 90 | "Unit" : { 91 | "localizations" : { 92 | "ja" : { 93 | "stringUnit" : { 94 | "state" : "translated", 95 | "value" : "単位" 96 | } 97 | } 98 | } 99 | }, 100 | "You're on IP address [%@]" : { 101 | 102 | } 103 | }, 104 | "version" : "1.0" 105 | } -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/README assets/appstore_badge.svg: -------------------------------------------------------------------------------- 1 | 2 | Download_on_the_App_Store_Badge_US-UK_RGB_blk_4SVG_092917 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/README assets/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/README assets/icon.png -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/README assets/screenshot1280w.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/README assets/screenshot1280w.jpg -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/README assets/screenshot1280w2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/README assets/screenshot1280w2.jpg -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/README assets/screenshot1280w3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/README assets/screenshot1280w3.jpg -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/sound1.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/sound1.m4a -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/Supporting files/sound2.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/Tracking Streamer/Supporting files/sound2.m4a -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/VisionProTeleop.entitlements: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | com.apple.developer.networking.networkextension 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/🌐RealityView.swift: -------------------------------------------------------------------------------- 1 | import WebKit 2 | import SwiftUI 3 | import RealityKit 4 | import ARKit 5 | 6 | struct 🌐RealityView: View { 7 | var model: 🥽AppModel 8 | var body: some View { 9 | 10 | RealityView { content, attachments in 11 | 12 | // 创建并配置 x 轴实体 13 | let xAxisMesh = MeshResource.generateBox(size: [0.5, 0.01, 0.01]) 14 | let xAxisMaterial = SimpleMaterial(color: .red, isMetallic: true) 15 | let xAxisEntity = ModelEntity(mesh: xAxisMesh, materials: [xAxisMaterial]) 16 | xAxisEntity.position = [0.25, 0, 0] // 将其移到原点右侧 17 | // 创建并配置 y 轴实体 18 | let yAxisMesh = MeshResource.generateBox(size: [0.01, 0.01, 0.5]) 19 | let yAxisMaterial = SimpleMaterial(color: .green, isMetallic: true) 20 | let yAxisEntity = ModelEntity(mesh: yAxisMesh, materials: [yAxisMaterial]) 21 | yAxisEntity.position = [0, 0, -0.25] // 将其移到原点前方 22 | // 创建并配置 z 轴实体 23 | let zAxisMesh = MeshResource.generateBox(size: [0.01, 0.5, 0.01]) 24 | let zAxisMaterial = SimpleMaterial(color: .blue, isMetallic: true) 25 | let zAxisEntity = ModelEntity(mesh: zAxisMesh, materials: [zAxisMaterial]) 26 | zAxisEntity.position = [0, 0.25, 0] // 将其移到原点上方 27 | // 将坐标轴实体添加到场景内容 28 | content.add(xAxisEntity) 29 | content.add(yAxisEntity) 30 | content.add(zAxisEntity) 31 | 32 | 33 | let resultLabelEntity = attachments.entity(for: Self.attachmentID)! 34 | resultLabelEntity.components.set(🧑HeadTrackingComponent()) 35 | resultLabelEntity.name = 🧩Name.resultLabel 36 | 37 | // 获取并配置 webViewEntity 38 | let webViewEntity = attachments.entity(for: Self.webViewAttachmentID)! 39 | webViewEntity.position = [0, 1.0, -1.5] // 将其放置在视野前方 40 | 41 | // content.add(webViewEntity) 42 | content.add(resultLabelEntity) 43 | 44 | } attachments: { 45 | Attachment(id: Self.attachmentID) { 46 | WebView(url: URL(string: "https://192.168.31.157:8012/?ws=wss://192.168.31.157:8012")!) 47 | .frame(width: 1500, height: 1200) 48 | } 49 | Attachment(id: Self.webViewAttachmentID) { 50 | // WebView(url: URL(string: "https://192.168.31.157:8012/?ws=wss://192.168.31.157:8012")!) 51 | // .frame(width: 1500, height: 1200) 52 | } 53 | } 54 | .gesture( 55 | TapGesture() 56 | .targetedToAnyEntity() 57 | ) 58 | .task { self.model.run() } 59 | .task { await self.model.processDeviceAnchorUpdates() } 60 | .task { self.model.startserver() } 61 | .task(priority: .low) { await self.model.processReconstructionUpdates() } 62 | // 这几个task才是核心发送信息的东西,RealityView无关紧要 63 | 64 | } 65 | static let attachmentID: String = "resultLabel" 66 | static let webViewAttachmentID: String = "webViewAttachment" 67 | 68 | } 69 | 70 | struct WebView: UIViewRepresentable { 71 | var url: URL 72 | 73 | func makeUIView(context: Context) -> some UIView { 74 | let configuration = WKWebViewConfiguration() 75 | configuration.allowsPictureInPictureMediaPlayback = true 76 | configuration.allowsInlinePredictions = true 77 | configuration.allowsInlineMediaPlayback = true 78 | configuration.allowsAirPlayForMediaPlayback = true 79 | 80 | let webView = WKWebView(frame: .zero, configuration: configuration) 81 | webView.load(URLRequest(url: url)) 82 | 83 | return webView 84 | } 85 | 86 | func updateUIView(_ uiView: UIViewType, context: Context) { 87 | 88 | } 89 | } 90 | 91 | 92 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/📏Unit.swift: -------------------------------------------------------------------------------- 1 | import Foundation 2 | 3 | enum 📏Unit: String { 4 | case centiMeters, meters, inches, feet, yards 5 | } 6 | 7 | extension 📏Unit: CaseIterable, Identifiable { 8 | var id: Self { self } 9 | var value: UnitLength { 10 | switch self { 11 | case .centiMeters: .centimeters 12 | case .meters: .meters 13 | case .inches: .inches 14 | case .feet: .feet 15 | case .yards: .yards 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/🛠️Menu/🛠️MenuTop.swift: -------------------------------------------------------------------------------- 1 | import SwiftUI 2 | 3 | struct 🛠️MenuTop: View { 4 | // @EnvironmentObject var model: 🥽AppModel 5 | @Environment(\.dismissImmersiveSpace) var dismissImmersiveSpace 6 | var body: some View { 7 | VStack(spacing: 24) { 8 | HStack(spacing: 28) { 9 | Button { 10 | Task { await self.dismissImmersiveSpace() } 11 | } label: { 12 | HStack(spacing: 8) { 13 | Image(systemName: "escape") 14 | .imageScale(.small) 15 | Text("Exit") 16 | } 17 | .font(.title.weight(.regular)) 18 | .padding(.vertical, 12) 19 | .padding(.horizontal, 20) 20 | } 21 | .buttonStyle(.plain) 22 | .glassBackgroundEffect()} 23 | } 24 | } 25 | } 26 | 27 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/🛠️Menu/🛠️Panel.swift: -------------------------------------------------------------------------------- 1 | enum 🛠️Panel { 2 | case setting, about 3 | } 4 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/🛠️Menu/🛠️SettingPanel.swift: -------------------------------------------------------------------------------- 1 | import SwiftUI 2 | 3 | struct 🛠️SettingPanel: View { 4 | // @EnvironmentObject var model: 🥽AppModel 5 | var body: some View { 6 | VStack(spacing: 24) { 7 | HStack { 8 | Spacer() 9 | Text("Unit") 10 | .font(.largeTitle.weight(.semibold)) 11 | Spacer() 12 | } 13 | .frame(height: 60) 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/🧑HeadTrackingComponent&System.swift: -------------------------------------------------------------------------------- 1 | import RealityKit 2 | import ARKit 3 | import SwiftUI 4 | 5 | struct 🧑HeadTrackingComponent: Component, Codable { 6 | init() {} 7 | } 8 | 9 | struct 🧑HeadTrackingSystem: System { 10 | private static let query = EntityQuery(where: .has(🧑HeadTrackingComponent.self)) 11 | 12 | private let session = ARKitSession() 13 | private let provider = WorldTrackingProvider() 14 | 15 | init(scene: RealityKit.Scene) { 16 | self.setUpSession() 17 | } 18 | 19 | private func setUpSession() { 20 | Task { 21 | do { 22 | try await self.session.run([self.provider]) 23 | } catch { 24 | assertionFailure() 25 | } 26 | } 27 | } 28 | 29 | func update(context: SceneUpdateContext) { 30 | let entities = context.scene.performQuery(Self.query).map { $0 } 31 | 32 | guard !entities.isEmpty, 33 | let deviceAnchor = self.provider.queryDeviceAnchor(atTimestamp: CACurrentMediaTime()) else { return } 34 | 35 | let cameraTransform = Transform(matrix: deviceAnchor.originFromAnchorTransform) 36 | 37 | // 计算前方1米的位置 38 | let forwardVector = cameraTransform.matrix.columns.2 // Z方向向量 39 | let forwardOffset = simd_make_float3(forwardVector.x, forwardVector.y, forwardVector.z) * -1.0 // 取反方向 40 | let targetPosition = cameraTransform.translation + forwardOffset // 前方1米位置 41 | 42 | 43 | for entity in entities { 44 | // 设置实体的新位置 45 | entity.setPosition(targetPosition, relativeTo: nil) 46 | 47 | // 使实体朝向摄像头位置 48 | entity.look(at: cameraTransform.translation, 49 | from: targetPosition, 50 | relativeTo: nil, 51 | forward: .positiveZ) 52 | 53 | // entity.look(at: cameraTransform.translation, 54 | // from: entity.position(relativeTo: nil), 55 | // relativeTo: nil, 56 | // forward: .positiveZ) 57 | } 58 | } 59 | } 60 | 61 | 62 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/🧩Model.swift: -------------------------------------------------------------------------------- 1 | import RealityKit 2 | 3 | enum 🧩Model { 4 | static func fingerTip(_ selected: Bool = false) -> ModelComponent { 5 | .init(mesh: .generateSphere(radius: 0.005), 6 | materials: [SimpleMaterial(color: selected ? .red : .blue, 7 | isMetallic: true)]) 8 | } 9 | static func line(_ length: Float) -> ModelComponent { 10 | ModelComponent(mesh: .generateBox(width: 0.01, 11 | height: 0.01, 12 | depth: length, 13 | cornerRadius: 0.005), 14 | materials: [SimpleMaterial(color: .white, isMetallic: true)]) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /vision_pro_app/Tracking Streamer/🧩Name.swift: -------------------------------------------------------------------------------- 1 | enum 🧩Name { 2 | static let fingerLeft = "fingerLeft" 3 | static let fingerRight = "fingerRight" 4 | static let line = "line" 5 | static let resultLabel = "resultLabel" 6 | } 7 | -------------------------------------------------------------------------------- /vision_pro_app/assets/axis_convention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/axis_convention.png -------------------------------------------------------------------------------- /vision_pro_app/assets/change_ip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/change_ip.png -------------------------------------------------------------------------------- /vision_pro_app/assets/coord_system.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/coord_system.png -------------------------------------------------------------------------------- /vision_pro_app/assets/hand_skeleton_convention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/hand_skeleton_convention.png -------------------------------------------------------------------------------- /vision_pro_app/assets/home_button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/home_button.png -------------------------------------------------------------------------------- /vision_pro_app/assets/main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/main.png -------------------------------------------------------------------------------- /vision_pro_app/assets/short_paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/short_paper.pdf -------------------------------------------------------------------------------- /vision_pro_app/assets/short_paper_new.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/short_paper_new.pdf -------------------------------------------------------------------------------- /vision_pro_app/assets/visionpro_main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/assets/visionpro_main.png -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/avp_stream/.DS_Store -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from avp_stream.streamer import VisionProStreamer 3 | 4 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/assets/huge_axis.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/assets/normal_axis.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/assets/small_axis.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/grpc_msg/__init__.py: -------------------------------------------------------------------------------- 1 | import avp_stream.grpc_msg.handtracking_pb2 as handtracking_pb2 2 | import avp_stream.grpc_msg.handtracking_pb2_grpc as handtracking_pb2_grpc -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/grpc_msg/handtracking.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package handtracking; 4 | 5 | // Represents a 4x4 transformation matrix for a joint 6 | message Matrix4x4 { 7 | float m00 = 1; 8 | float m01 = 2; 9 | float m02 = 3; 10 | float m03 = 4; 11 | float m10 = 5; 12 | float m11 = 6; 13 | float m12 = 7; 14 | float m13 = 8; 15 | float m20 = 9; 16 | float m21 = 10; 17 | float m22 = 11; 18 | float m23 = 12; 19 | float m30 = 13; 20 | float m31 = 14; 21 | float m32 = 15; 22 | float m33 = 16; 23 | } 24 | 25 | // The skeleton of a hand, comprising multiple 4x4 matrices (one per joint) 26 | message Skeleton { 27 | repeated Matrix4x4 jointMatrices = 1; // Array of 4x4 matrices, expecting 24 per hand based on your structure 28 | } 29 | 30 | // The hand tracking information, including the full 4x4 matrix for the wrist and the skeleton 31 | message Hand { 32 | Matrix4x4 wristMatrix = 1; // 4x4 matrix for the wrist position and orientation 33 | Skeleton skeleton = 2; // The hand's skeleton 34 | } 35 | 36 | // The overall hand update message, including data for both hands 37 | message HandUpdate { 38 | Hand left_hand = 1; 39 | Hand right_hand = 2; 40 | Matrix4x4 Head = 3; 41 | } 42 | 43 | // The hand tracking service definition. 44 | service HandTrackingService { 45 | rpc StreamHandUpdates(HandUpdate) returns (stream HandUpdate) {} 46 | } 47 | 48 | // Acknowledgement message for hand updates 49 | message HandUpdateAck { 50 | string message = 1; 51 | } 52 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/grpc_msg/handtracking_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: handtracking.proto 4 | # Protobuf Python Version: 4.25.0 5 | """Generated protocol buffer code.""" 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | from google.protobuf.internal import builder as _builder 10 | # @@protoc_insertion_point(imports) 11 | 12 | _sym_db = _symbol_database.Default() 13 | 14 | 15 | 16 | 17 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12handtracking.proto\x12\x0chandtracking\"\xdb\x01\n\tMatrix4x4\x12\x0b\n\x03m00\x18\x01 \x01(\x02\x12\x0b\n\x03m01\x18\x02 \x01(\x02\x12\x0b\n\x03m02\x18\x03 \x01(\x02\x12\x0b\n\x03m03\x18\x04 \x01(\x02\x12\x0b\n\x03m10\x18\x05 \x01(\x02\x12\x0b\n\x03m11\x18\x06 \x01(\x02\x12\x0b\n\x03m12\x18\x07 \x01(\x02\x12\x0b\n\x03m13\x18\x08 \x01(\x02\x12\x0b\n\x03m20\x18\t \x01(\x02\x12\x0b\n\x03m21\x18\n \x01(\x02\x12\x0b\n\x03m22\x18\x0b \x01(\x02\x12\x0b\n\x03m23\x18\x0c \x01(\x02\x12\x0b\n\x03m30\x18\r \x01(\x02\x12\x0b\n\x03m31\x18\x0e \x01(\x02\x12\x0b\n\x03m32\x18\x0f \x01(\x02\x12\x0b\n\x03m33\x18\x10 \x01(\x02\":\n\x08Skeleton\x12.\n\rjointMatrices\x18\x01 \x03(\x0b\x32\x17.handtracking.Matrix4x4\"^\n\x04Hand\x12,\n\x0bwristMatrix\x18\x01 \x01(\x0b\x32\x17.handtracking.Matrix4x4\x12(\n\x08skeleton\x18\x02 \x01(\x0b\x32\x16.handtracking.Skeleton\"\x82\x01\n\nHandUpdate\x12%\n\tleft_hand\x18\x01 \x01(\x0b\x32\x12.handtracking.Hand\x12&\n\nright_hand\x18\x02 \x01(\x0b\x32\x12.handtracking.Hand\x12%\n\x04Head\x18\x03 \x01(\x0b\x32\x17.handtracking.Matrix4x4\" \n\rHandUpdateAck\x12\x0f\n\x07message\x18\x01 \x01(\t2b\n\x13HandTrackingService\x12K\n\x11StreamHandUpdates\x12\x18.handtracking.HandUpdate\x1a\x18.handtracking.HandUpdate\"\x00\x30\x01\x62\x06proto3') 18 | 19 | _globals = globals() 20 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) 21 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'handtracking_pb2', _globals) 22 | if _descriptor._USE_C_DESCRIPTORS == False: 23 | DESCRIPTOR._options = None 24 | _globals['_MATRIX4X4']._serialized_start=37 25 | _globals['_MATRIX4X4']._serialized_end=256 26 | _globals['_SKELETON']._serialized_start=258 27 | _globals['_SKELETON']._serialized_end=316 28 | _globals['_HAND']._serialized_start=318 29 | _globals['_HAND']._serialized_end=412 30 | _globals['_HANDUPDATE']._serialized_start=415 31 | _globals['_HANDUPDATE']._serialized_end=545 32 | _globals['_HANDUPDATEACK']._serialized_start=547 33 | _globals['_HANDUPDATEACK']._serialized_end=579 34 | _globals['_HANDTRACKINGSERVICE']._serialized_start=581 35 | _globals['_HANDTRACKINGSERVICE']._serialized_end=679 36 | # @@protoc_insertion_point(module_scope) 37 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/grpc_msg/handtracking_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import avp_stream.grpc_msg.handtracking_pb2 as handtracking__pb2 6 | 7 | 8 | class HandTrackingServiceStub(object): 9 | """The hand tracking service definition. 10 | """ 11 | 12 | def __init__(self, channel): 13 | """Constructor. 14 | 15 | Args: 16 | channel: A grpc.Channel. 17 | """ 18 | self.StreamHandUpdates = channel.unary_stream( 19 | '/handtracking.HandTrackingService/StreamHandUpdates', 20 | request_serializer=handtracking__pb2.HandUpdate.SerializeToString, 21 | response_deserializer=handtracking__pb2.HandUpdate.FromString, 22 | ) 23 | 24 | 25 | class HandTrackingServiceServicer(object): 26 | """The hand tracking service definition. 27 | """ 28 | 29 | def StreamHandUpdates(self, request, context): 30 | """Missing associated documentation comment in .proto file.""" 31 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 32 | context.set_details('Method not implemented!') 33 | raise NotImplementedError('Method not implemented!') 34 | 35 | 36 | def add_HandTrackingServiceServicer_to_server(servicer, server): 37 | rpc_method_handlers = { 38 | 'StreamHandUpdates': grpc.unary_stream_rpc_method_handler( 39 | servicer.StreamHandUpdates, 40 | request_deserializer=handtracking__pb2.HandUpdate.FromString, 41 | response_serializer=handtracking__pb2.HandUpdate.SerializeToString, 42 | ), 43 | } 44 | generic_handler = grpc.method_handlers_generic_handler( 45 | 'handtracking.HandTrackingService', rpc_method_handlers) 46 | server.add_generic_rpc_handlers((generic_handler,)) 47 | 48 | 49 | # This class is part of an EXPERIMENTAL API. 50 | class HandTrackingService(object): 51 | """The hand tracking service definition. 52 | """ 53 | 54 | @staticmethod 55 | def StreamHandUpdates(request, 56 | target, 57 | options=(), 58 | channel_credentials=None, 59 | call_credentials=None, 60 | insecure=False, 61 | compression=None, 62 | wait_for_ready=None, 63 | timeout=None, 64 | metadata=None): 65 | return grpc.experimental.unary_stream(request, target, '/handtracking.HandTrackingService/StreamHandUpdates', 66 | handtracking__pb2.HandUpdate.SerializeToString, 67 | handtracking__pb2.HandUpdate.FromString, 68 | options, channel_credentials, 69 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 70 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/isaac_env.py: -------------------------------------------------------------------------------- 1 | import isaacgym 2 | import torch 3 | from isaacgym import gymapi 4 | from isaacgym import gymutil 5 | from isaacgym import gymtorch 6 | 7 | import numpy as np 8 | import torch 9 | import time 10 | from pathlib import Path 11 | 12 | from avp_stream import VisionProStreamer 13 | from avp_stream.utils.isaac_utils import * 14 | from avp_stream.utils.se3_utils import * 15 | from avp_stream.utils.trn_constants import * 16 | from copy import deepcopy 17 | from typing import * 18 | 19 | CUR_PATH = Path(__file__).parent.resolve() 20 | 21 | class IsaacVisualizerEnv: 22 | 23 | def __init__(self, args): 24 | 25 | self.args = args 26 | 27 | # acquire gym interface 28 | self.gym = gymapi.acquire_gym() 29 | 30 | # set torch device 31 | self.device = 'cpu' # i'll just fix this to CUDA 32 | 33 | # configure sim 34 | self.sim_params = default_sim_params(use_gpu = True if self.device == 'cuda:0' else False) 35 | 36 | # create sim 37 | self.sim = self.gym.create_sim(0, 0, gymapi.SIM_PHYSX, self.sim_params) 38 | if self.sim is None: 39 | raise Exception("Failed to create sim") 40 | 41 | # load assets 42 | self.num_envs = 1 43 | 44 | # create viewer 45 | self.viewer = self.gym.create_viewer(self.sim, gymapi.CameraProperties()) 46 | if self.viewer is None: 47 | raise Exception("Failed to create viewer") 48 | 49 | # create env 50 | self._load_asset() 51 | self.create_env() 52 | 53 | # setup viewer camera 54 | middle_env = self.num_envs // 2 55 | setup_viewer_camera(self.gym, self.envs[middle_env], self.viewer) 56 | 57 | # ==== prepare tensors ===== 58 | # from now on, we will use the tensor API that can run on CPU or GPU 59 | self.gym.prepare_sim(self.sim) 60 | self.initialize_tensors() 61 | 62 | 63 | 64 | def _load_asset(self): 65 | 66 | self.axis = load_axis(self.gym, self.sim, self.device, 'normal', f'{CUR_PATH}/assets') 67 | self.small_axis = load_axis(self.gym, self.sim, self.device, 'small', f'{CUR_PATH}/assets') 68 | self.huge_axis = load_axis(self.gym, self.sim, self.device, 'huge', f'{CUR_PATH}/assets') 69 | 70 | asset_options = gymapi.AssetOptions() 71 | asset_options.disable_gravity = True 72 | asset_options.fix_base_link = True 73 | self.sphere = self.gym.create_sphere(self.sim, 0.008, asset_options) 74 | 75 | 76 | def create_env(self): 77 | spacing = 1.0 78 | env_lower = gymapi.Vec3(-spacing, -spacing, 0.0) 79 | env_upper = gymapi.Vec3(spacing, spacing, spacing) 80 | 81 | plane_params = gymapi.PlaneParams() 82 | plane_params.normal = gymapi.Vec3(0, 0, 1) 83 | self.gym.add_ground(self.sim, plane_params) 84 | 85 | # create env 86 | self.envs = [] 87 | self.robot_actor_idxs_over_sim = [] 88 | self.env_side_actor_idxs_over_sim = [] 89 | 90 | for env_idx in range(self.num_envs): 91 | env = self.gym.create_env(self.sim, env_lower, env_upper, 1) 92 | self.envs.append(env) 93 | 94 | self.head_axis = self.gym.create_actor(env, self.axis, gymapi.Transform(), 'head', 0) 95 | 96 | self.right_wrist_axis = self.gym.create_actor(env, self.axis, gymapi.Transform(), 'right_wrist', 1) 97 | self.left_wrist_axis = self.gym.create_actor(env, self.axis, gymapi.Transform(), 'left_wrist', 2) 98 | 99 | 100 | # SPHERE 101 | for i in range(25): 102 | 103 | finger_1 = self.gym.create_actor(env, self.sphere, gymapi.Transform(), f'right_finger_{i}', 3 + i ) 104 | if i in [0, 4, 9, 14, 19, 24]: 105 | self.gym.set_rigid_body_color(env, finger_1, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 0)) 106 | else: 107 | self.gym.set_rigid_body_color(env, finger_1, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 1)) 108 | 109 | for i in range(25): 110 | finger_2 = self.gym.create_actor(env, self.sphere, gymapi.Transform(), f'left_finger_{i}', 28 + i ) 111 | 112 | if i in [0, 4, 9, 14, 19, 24]: 113 | self.gym.set_rigid_body_color(env, finger_2, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 0)) 114 | else: 115 | self.gym.set_rigid_body_color(env, finger_2, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(1, 1, 1)) 116 | 117 | # SMALL AXIS 118 | for i in range(25): 119 | finger_1 = self.gym.create_actor(env, self.small_axis, gymapi.Transform(), f'right_finger_{i}', 53 + i ) 120 | 121 | for i in range(25): 122 | finger_2 = self.gym.create_actor(env, self.small_axis, gymapi.Transform(), f'left_finger_{i}', 78 + i ) 123 | 124 | self.env_axis = self.gym.create_actor(env, self.huge_axis, gymapi.Transform(), 'env_axis', 103 ) 125 | 126 | 127 | def initialize_tensors(self): 128 | 129 | refresh_tensors(self.gym, self.sim) 130 | # get jacobian tensor 131 | # get rigid body state tensor 132 | _rb_states = self.gym.acquire_rigid_body_state_tensor(self.sim) 133 | self.rb_states = gymtorch.wrap_tensor(_rb_states).view(self.num_envs, -1, 13) 134 | 135 | # get actor root state tensor 136 | _root_states = self.gym.acquire_actor_root_state_tensor(self.sim) 137 | root_states = gymtorch.wrap_tensor(_root_states).view(self.num_envs, -1, 13) 138 | self.root_state = root_states 139 | 140 | self.gym.simulate(self.sim) 141 | self.gym.fetch_results(self.sim, True) 142 | self.gym.step_graphics(self.sim) 143 | self.gym.draw_viewer(self.viewer, self.sim, False) 144 | self.gym.sync_frame_time(self.sim) 145 | 146 | # will be overloaded 147 | def step(self, transformation: Dict[str, torch.Tensor], sync_frame_time = False): 148 | 149 | self.simulate() 150 | 151 | new_root_state = self.modify_root_state(transformation) 152 | env_side_actor_idxs = torch.arange(0, 103, dtype = torch.int32) 153 | self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(new_root_state), gymtorch.unwrap_tensor(env_side_actor_idxs), len(env_side_actor_idxs)) 154 | 155 | # update viewer 156 | self.render(sync_frame_time) 157 | 158 | def move_camera(self): 159 | 160 | head_xyz = self.visionos_head[:, :3, 3] 161 | head_ydir = self.visionos_head[:, :3, 1] 162 | 163 | cam_pos = head_xyz - head_ydir * 0.5 164 | cam_target = head_xyz + head_ydir * 0.5 165 | cam_target[..., -1] -= 0.2 166 | 167 | cam_pos = gymapi.Vec3(*cam_pos[0]) 168 | cam_target = gymapi.Vec3(*cam_target[0]) 169 | 170 | self.gym.viewer_camera_look_at(self.viewer, self.envs[0], cam_pos, cam_target) 171 | 172 | def simulate(self): 173 | # step the physics 174 | self.gym.simulate(self.sim) 175 | 176 | # refresh tensors 177 | refresh_tensors(self.gym, self.sim) 178 | 179 | 180 | def render(self, sync_frame_time = True): 181 | 182 | # update viewer 183 | if self.args.follow: 184 | self.move_camera() 185 | self.gym.step_graphics(self.sim) 186 | self.gym.draw_viewer(self.viewer, self.sim, False) 187 | if sync_frame_time: 188 | self.gym.sync_frame_time(self.sim) 189 | 190 | def modify_root_state(self, transformations): 191 | 192 | new_root_state = self.root_state 193 | 194 | self.visionos_head = transformations['head'] 195 | 196 | self.sim_right_wrist = transformations['right_wrist'] #@ VISIONOS_RIGHT_HAND_TO_LEAP 197 | self.sim_left_wrist = transformations['left_wrist'] # @ VISIONOS_LEFT_HAND_TO_LEAP 198 | 199 | sim_right_fingers = torch.cat([self.sim_right_wrist @ finger for finger in transformations['right_fingers']], dim = 0) 200 | sim_left_fingers = torch.cat([self.sim_left_wrist @ finger for finger in transformations['left_fingers']], dim = 0) 201 | 202 | self.sim_right_fingers = sim_right_fingers 203 | self.sim_left_fingers = sim_left_fingers 204 | 205 | new_root_state = deepcopy(self.root_state) 206 | new_root_state[:, 0, :7] = mat2posquat(self.visionos_head ) 207 | new_root_state[:, 1, :7] = mat2posquat(self.sim_right_wrist ) 208 | new_root_state[:, 2, :7] = mat2posquat(self.sim_left_wrist ) 209 | new_root_state[:, 3:28, :7] = mat2posquat(self.sim_right_fingers )# 210 | new_root_state[:, 28:53, :7] = mat2posquat(self.sim_left_fingers )# 211 | new_root_state[:, 53:78, :7] = mat2posquat(self.sim_right_fingers)# 212 | new_root_state[:, 78:103, :7] = mat2posquat(self.sim_left_fingers ) 213 | # new_root_state[:, 103, :7] = mat2posquat(transformed_wrist_right) 214 | new_root_state = new_root_state.view(-1, 13) 215 | 216 | return new_root_state 217 | 218 | 219 | def np2tensor(data: Dict[str, np.ndarray], device) -> Dict[str, torch.Tensor]: 220 | for key in data.keys(): 221 | data[key] = torch.tensor(data[key], dtype = torch.float32, device = device) 222 | return data 223 | 224 | 225 | if __name__=="__main__": 226 | 227 | import argparse 228 | import os 229 | 230 | parser = argparse.ArgumentParser() 231 | parser.add_argument('--ip', type = str, required = True) 232 | parser.add_argument('--record', action = 'store_true') 233 | parser.add_argument('--follow', action = 'store_true', help = "The viewpoint follows the users head") 234 | args = parser.parse_args() 235 | 236 | s = VisionProStreamer(args.ip, args.record) 237 | 238 | env = IsaacVisualizerEnv(args) 239 | while True: 240 | t0 = time.time() 241 | latest = s.latest 242 | env.step(np2tensor(latest, env.device)) 243 | print(time.time() - t0) 244 | 245 | 246 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/streamer.py: -------------------------------------------------------------------------------- 1 | import grpc 2 | from avp_stream.grpc_msg import * 3 | from threading import Thread 4 | from avp_stream.utils.grpc_utils import * 5 | import time 6 | import numpy as np 7 | 8 | 9 | YUP2ZUP = np.array([[[1, 0, 0, 0], 10 | [0, 0, -1, 0], 11 | [0, 1, 0, 0], 12 | [0, 0, 0, 1]]], dtype = np.float64) 13 | 14 | 15 | class VisionProStreamer: 16 | 17 | def __init__(self, ip, record = True): 18 | 19 | # Vision Pro IP 20 | self.ip = ip 21 | self.record = record 22 | self.recording = [] 23 | self.latest = None 24 | self.axis_transform = YUP2ZUP 25 | self.start_streaming() 26 | 27 | def start_streaming(self): 28 | 29 | stream_thread = Thread(target = self.stream) 30 | stream_thread.start() 31 | while self.latest is None: 32 | pass 33 | print(' == DATA IS FLOWING IN! ==') 34 | print('Ready to start streaming.') 35 | 36 | 37 | def stream(self): 38 | 39 | request = handtracking_pb2.HandUpdate() 40 | try: 41 | with grpc.insecure_channel(f"{self.ip}:12345") as channel: 42 | stub = handtracking_pb2_grpc.HandTrackingServiceStub(channel) 43 | responses = stub.StreamHandUpdates(request) 44 | for response in responses: 45 | transformations = { 46 | "left_wrist": self.axis_transform @ process_matrix(response.left_hand.wristMatrix), 47 | "right_wrist": self.axis_transform @ process_matrix(response.right_hand.wristMatrix), 48 | "left_fingers": process_matrices(response.left_hand.skeleton.jointMatrices), 49 | "right_fingers": process_matrices(response.right_hand.skeleton.jointMatrices), 50 | "head": rotate_head(self.axis_transform @ process_matrix(response.Head)) , 51 | "left_pinch_distance": get_pinch_distance(response.left_hand.skeleton.jointMatrices), 52 | "right_pinch_distance": get_pinch_distance(response.right_hand.skeleton.jointMatrices), 53 | # "rgb": response.rgb, # TODO: should figure out how to get the rgb image from vision pro 54 | } 55 | transformations["right_wrist_roll"] = get_wrist_roll(transformations["right_wrist"]) 56 | transformations["left_wrist_roll"] = get_wrist_roll(transformations["left_wrist"]) 57 | if self.record: 58 | self.recording.append(transformations) 59 | self.latest = transformations 60 | 61 | except Exception as e: 62 | print(f"An error occurred: {e}") 63 | pass 64 | 65 | def get_latest(self): 66 | return self.latest 67 | 68 | def get_recording(self): 69 | return self.recording 70 | 71 | 72 | if __name__ == "__main__": 73 | 74 | streamer = VisionProStreamer(ip = '10.29.230.57') 75 | while True: 76 | 77 | latest = streamer.get_latest() 78 | print(latest) -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YanjieZe/Humanoid-Teleoperation/8221c58e0005de7efcd071f2373a888751727811/vision_pro_app/avp_stream/utils/__init__.py -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/utils/grpc_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import * 3 | 4 | def process_matrix(message): 5 | m = np.array([[[message.m00, message.m01, message.m02, message.m03], 6 | [message.m10, message.m11, message.m12, message.m13], 7 | [message.m20, message.m21, message.m22, message.m23], 8 | [0, 0, 0, 1]]]) 9 | return m 10 | 11 | def process_matrices(skeleton, matrix = np.eye(4)): 12 | return np.concatenate([matrix @ process_matrix(joint) for joint in skeleton], axis = 0) 13 | 14 | 15 | def rotate_head(R, degrees=-90): 16 | # Convert degrees to radians 17 | theta = np.radians(degrees) 18 | # Create the rotation matrix for rotating around the x-axis 19 | R_x = np.array([[ 20 | [1, 0, 0, 0], 21 | [0, np.cos(theta), -np.sin(theta), 0], 22 | [0, np.sin(theta), np.cos(theta), 0], 23 | [0, 0, 0, 1] 24 | ]]) 25 | R_rotated = R @ R_x 26 | return R_rotated 27 | 28 | 29 | def get_pinch_distance(finger_messages): 30 | fingers = process_matrices(finger_messages) 31 | thumb = fingers[4, :3, 3] 32 | index = fingers[9, :3, 3] 33 | 34 | return np.linalg.norm(thumb - index) 35 | 36 | def get_wrist_roll(mat): 37 | """ 38 | returns roll, pitch, yaw in radians 39 | """ 40 | R = mat[0, :3, :3] 41 | 42 | # Calculate angles for rotation around z and y axis to align the first column with [1, 0, 0] 43 | # Angle to rotate around z-axis to align the projection on the XY plane 44 | theta_z = np.arctan2(R[1, 0], R[0, 0]) # arctan2(y, x) 45 | 46 | # Rotate R around the z-axis by -theta_z to align its x-axis on the XY plane 47 | Rz = np.array([ 48 | [np.cos(-theta_z), -np.sin(-theta_z), 0], 49 | [np.sin(-theta_z), np.cos(-theta_z), 0], 50 | [0, 0, 1] 51 | ]) 52 | R_after_z = Rz @ R 53 | 54 | # Angle to rotate around y-axis to align the x-axis with the global x-axis 55 | theta_y = np.arctan2(R_after_z[0, 2], R_after_z[0, 0]) # arctan2(z, x) 56 | 57 | # Since the goal is to align the x-axis, the rotation around the x-axis might not be necessary 58 | # unless there are specific orientations required for the y and z axes after the alignment. 59 | 60 | # Calculated angles (converted to degrees for easier interpretation) 61 | theta_z_deg = np.degrees(theta_z) 62 | theta_y_deg = np.degrees(theta_y) 63 | 64 | Ry = np.array([ 65 | [np.cos(-theta_y), 0, np.sin(-theta_y)], 66 | [0, 1, 0], 67 | [-np.sin(-theta_y), 0, np.cos(-theta_y)] 68 | ]) 69 | R_after_y = Ry @ R_after_z 70 | 71 | # Angle to rotate around x-axis to align the y-axis and z-axis properly with the global y-axis and z-axis 72 | theta_x = np.arctan2(R_after_y[1, 2], R_after_y[1, 1]) # arctan2(z, y) of the second row 73 | 74 | # Calculated angle (converted to degrees for easier interpretation) 75 | # theta_x_deg = np.degrees(theta_x) 76 | 77 | return theta_x 78 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/utils/isaac_utils.py: -------------------------------------------------------------------------------- 1 | import isaacgym 2 | import torch 3 | from isaacgym import gymapi 4 | import numpy as np 5 | import argparse 6 | import torch.nn.functional as F 7 | from typing import * 8 | import yaml 9 | import os 10 | 11 | asset_loaded = False 12 | 13 | def load_cfg(cfg_path: str) -> Dict: 14 | cfg = yaml.load(open(cfg_path, 'r'), Loader=yaml.FullLoader) 15 | return cfg 16 | 17 | def default_sim_params(use_gpu, up_axis = 'Z', hz = 60.0, substeps = 4, num_position_iterations = 8, num_velocity_iterations = 2): 18 | sim_params = gymapi.SimParams() 19 | sim_params.up_axis = gymapi.UP_AXIS_Y if up_axis == 'Y' else gymapi.UP_AXIS_Z 20 | sim_params.gravity = gymapi.Vec3(0.0, -9.8, 0.0) if up_axis == 'Y' else gymapi.Vec3(0.0, 0.0, -9.8) 21 | sim_params.dt = 1.0 / hz 22 | sim_params.substeps = substeps 23 | sim_params.use_gpu_pipeline = use_gpu 24 | sim_params.physx.solver_type = 1 25 | sim_params.physx.num_position_iterations = num_position_iterations 26 | sim_params.physx.num_velocity_iterations = num_velocity_iterations 27 | sim_params.physx.rest_offset = 0.0 28 | sim_params.physx.contact_offset = 0.001 29 | sim_params.physx.friction_offset_threshold = 0.001 30 | sim_params.physx.friction_correlation_distance = 0.0005 31 | sim_params.physx.num_threads = 4 32 | sim_params.physx.use_gpu = use_gpu 33 | return sim_params 34 | 35 | 36 | def load_axis(gym, sim, device, size, asset_root = './assets'): 37 | 38 | robot_asset_file = "{}_axis.urdf".format(size) 39 | asset_options = gymapi.AssetOptions() 40 | asset_options.armature = 0.01 41 | asset_options.fix_base_link = True 42 | asset_options.disable_gravity = True 43 | robot_asset = gym.load_asset(sim, asset_root, robot_asset_file, asset_options) 44 | 45 | return robot_asset 46 | 47 | 48 | 49 | def load_left_leap_hand_asset(gym, sim, device, asset_root = '../bidex_sim/assets'): 50 | 51 | robot_asset_file = "robots/hands/allegro_hand/allegro_hand_left.urdf" 52 | asset_options = gymapi.AssetOptions() 53 | asset_options.armature = 0.01 54 | asset_options.fix_base_link = False 55 | asset_options.disable_gravity = True 56 | asset_options.flip_visual_attachments = False 57 | asset_options.use_mesh_materials = True 58 | asset_options.vhacd_enabled = True 59 | asset_options.vhacd_params = gymapi.VhacdParams() 60 | asset_options.vhacd_params.resolution = 1000000 61 | robot_asset = gym.load_asset(sim, asset_root, robot_asset_file, asset_options) 62 | 63 | return robot_asset 64 | 65 | def load_ur3e_asset(gym, sim, device, asset_root = '../bidex_sim/assets', hand = None, chirality = None, control = {'arm': 'POS', 'hand': 'POS'}): 66 | 67 | if hand is None: 68 | robot_asset_file = "robots/ur_description/urdf/ur3e.urdf" 69 | else: 70 | robot_asset_file = "robots/ur_description/urdf/ur3e_{}_{}.urdf".format(hand, chirality) 71 | 72 | asset_options = gymapi.AssetOptions() 73 | asset_options.armature = 0.01 74 | asset_options.fix_base_link = True 75 | asset_options.disable_gravity = True 76 | asset_options.flip_visual_attachments = False 77 | asset_options.use_mesh_materials = True 78 | asset_options.vhacd_enabled = True 79 | asset_options.vhacd_params = gymapi.VhacdParams() 80 | if control['arm'] == 'POS': 81 | asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS 82 | elif control['arm'] == 'EFFORT': 83 | asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT 84 | asset_options.vhacd_params.resolution = 1000000 85 | robot_asset = gym.load_asset(sim, asset_root, robot_asset_file, asset_options) 86 | robot_dof_props = gym.get_asset_dof_properties(robot_asset) 87 | for i in range(6, 22): 88 | robot_dof_props['driveMode'][i] = gymapi.DOF_MODE_POS 89 | robot_dof_props['stiffness'][i] = 10000 90 | robot_dof_props['damping'][i] = 500 91 | return robot_asset, robot_dof_props 92 | 93 | 94 | def refresh_tensors(gym, sim): 95 | # refresh tensors 96 | gym.refresh_rigid_body_state_tensor(sim) 97 | gym.refresh_actor_root_state_tensor(sim) 98 | gym.refresh_dof_state_tensor(sim) 99 | gym.refresh_jacobian_tensors(sim) 100 | gym.refresh_mass_matrix_tensors(sim) 101 | 102 | def setup_viewer_camera(gym, env, viewer): 103 | cam_pos = gymapi.Vec3(0.0, -0.05, 1.55) 104 | cam_target = gymapi.Vec3(0.0, 0.5, 0.4) 105 | middle_env = env 106 | gym.viewer_camera_look_at(viewer, middle_env, cam_pos, cam_target) 107 | 108 | def adjust_viewer_camera(gym, env, viewer, cam_pos, cam_target): 109 | gym.viewer_camera_look_at(viewer, env, cam_pos, cam_target) -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/utils/se3_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torch.nn.functional as F 4 | import numpy as np 5 | from typing import * 6 | 7 | def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor: 8 | """ 9 | Convert a unit quaternion to a standard form: one in which the real 10 | part is non negative. 11 | 12 | Args: 13 | quaternions: Quaternions with real part first, 14 | as tensor of shape (..., 4). 15 | 16 | Returns: 17 | Standardized quaternions as tensor of shape (..., 4). 18 | """ 19 | return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) 20 | 21 | 22 | def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: 23 | """ 24 | Returns torch.sqrt(torch.max(0, x)) 25 | but with a zero subgradient where x is 0. 26 | """ 27 | ret = torch.zeros_like(x) 28 | positive_mask = x > 0 29 | ret[positive_mask] = torch.sqrt(x[positive_mask]) 30 | return ret 31 | 32 | 33 | def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor: 34 | """ 35 | Convert rotations given as rotation matrices to quaternions. 36 | 37 | Args: 38 | matrix: Rotation matrices as tensor of shape (..., 3, 3). 39 | 40 | Returns: 41 | quaternions with real part first, as tensor of shape (..., 4). 42 | """ 43 | if matrix.size(-1) != 3 or matrix.size(-2) != 3: 44 | raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") 45 | 46 | batch_dim = matrix.shape[:-2] 47 | m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind( 48 | matrix.reshape(batch_dim + (9,)), dim=-1 49 | ) 50 | 51 | q_abs = _sqrt_positive_part( 52 | torch.stack( 53 | [ 54 | 1.0 + m00 + m11 + m22, 55 | 1.0 + m00 - m11 - m22, 56 | 1.0 - m00 + m11 - m22, 57 | 1.0 - m00 - m11 + m22, 58 | ], 59 | dim=-1, 60 | ) 61 | ) 62 | 63 | # we produce the desired quaternion multiplied by each of r, i, j, k 64 | quat_by_rijk = torch.stack( 65 | [ 66 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 67 | # `int`. 68 | torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), 69 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 70 | # `int`. 71 | torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), 72 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 73 | # `int`. 74 | torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), 75 | # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and 76 | # `int`. 77 | torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), 78 | ], 79 | dim=-2, 80 | ) 81 | 82 | # We floor here at 0.1 but the exact level is not important; if q_abs is small, 83 | # the candidate won't be picked. 84 | flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) 85 | quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) 86 | 87 | # if not for numerical problems, quat_candidates[i] should be same (up to a sign), 88 | # forall i; we pick the best-conditioned one (with the largest denominator) 89 | out = quat_candidates[ 90 | F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, : 91 | ].reshape(batch_dim + (4,)) 92 | return standardize_quaternion(out) 93 | 94 | def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: 95 | """ 96 | Convert rotations given as quaternions to rotation matrices. 97 | 98 | Args: 99 | quaternions: quaternions with real part first, 100 | as tensor of shape (..., 4). 101 | 102 | Returns: 103 | Rotation matrices as tensor of shape (..., 3, 3). 104 | """ 105 | r, i, j, k = torch.unbind(quaternions, -1) 106 | # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. 107 | two_s = 2.0 / (quaternions * quaternions).sum(-1) 108 | 109 | o = torch.stack( 110 | ( 111 | 1 - two_s * (j * j + k * k), 112 | two_s * (i * j - k * r), 113 | two_s * (i * k + j * r), 114 | two_s * (i * j + k * r), 115 | 1 - two_s * (i * i + k * k), 116 | two_s * (j * k - i * r), 117 | two_s * (i * k - j * r), 118 | two_s * (j * k + i * r), 119 | 1 - two_s * (i * i + j * j), 120 | ), 121 | -1, 122 | ) 123 | return o.reshape(quaternions.shape[:-1] + (3, 3)) 124 | 125 | 126 | 127 | def isaac_mat2quat(mat: torch.Tensor) -> torch.Tensor: 128 | """ 129 | returns posquat in pos + xyzw format (following isaacgym convention) 130 | """ 131 | wxyz = matrix_to_quaternion(mat) 132 | xyzw = torch.cat([wxyz[:, 1:], wxyz[:, :1]], dim=-1) 133 | return xyzw 134 | 135 | def mat2posquat(mat: torch.Tensor) -> torch.Tensor: 136 | pos = mat[..., :3, 3] 137 | quat = isaac_mat2quat(mat[..., :3, :3]) 138 | return torch.cat([pos, quat], dim=-1) 139 | 140 | def posquat2mat(posquat: torch.Tensor) -> torch.Tensor: 141 | """ 142 | convert pos + quaternion in xyzw format to matrix 143 | """ 144 | batch = posquat.shape[0] 145 | pos = posquat[..., :3] 146 | quat_xyzw = posquat[..., 3:] 147 | quat_wxyz = torch.cat([quat_xyzw[:, 3:], quat_xyzw[:, :3]], dim=-1) 148 | rot_mat = quaternion_to_matrix(quat_wxyz) 149 | 150 | result = torch.eye(4, device=pos.device, dtype=pos.dtype).unsqueeze(0).expand(batch, -1, -1) 151 | result[..., :3, :3] = rot_mat 152 | result[..., :3, 3] = pos 153 | 154 | return result 155 | 156 | -------------------------------------------------------------------------------- /vision_pro_app/avp_stream/utils/trn_constants.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from avp_stream.utils.se3_utils import * 3 | 4 | 5 | VISIONOS_TO_ISAAC = torch.tensor([[1, 0, 0, 0], 6 | [0, 0, -1, 0], 7 | [0, 1, 0, 0], 8 | [0, 0, 0, 1]], dtype = torch.float32).unsqueeze(0) 9 | 10 | FRAME_TRANSFORMATION = torch.tensor([[-1, 0, 0], 11 | [0, 0, 1], 12 | [0, 1, 0]], dtype = torch.float32).unsqueeze(0) 13 | FRAME_QUAT = isaac_mat2quat(FRAME_TRANSFORMATION) 14 | 15 | FRAME_44 = torch.tensor([[-1, 0, 0, 0.7], 16 | [0, 0, 1, 0.9], 17 | [0, 1, 0, 1.6], 18 | [0, 0, 0, 1]], dtype = torch.float32).unsqueeze(0) 19 | 20 | 21 | ROTATE_90DEG_AROUND_X = torch.tensor([[1, 0, 0, 0], 22 | [0, 0, -1, 0], 23 | [0, 1, 0, 0], 24 | [0, 0, 0, 1]], dtype = torch.float32).unsqueeze(0) 25 | 26 | ROTATE_90DEG_AROUND_Y = torch.tensor([[0, 0, 1, 0], 27 | [0, 1, 0, 0], 28 | [-1, 0, 0, 0], 29 | [0, 0, 0, 1]], dtype = torch.float32).unsqueeze(0) 30 | 31 | ROTATE_NEG_90DEG_AROUND_Y = torch.tensor([[0, 0, -1, 0], 32 | [0, 1, 0, 0], 33 | [1, 0, 0, 0], 34 | [0, 0, 0, 1]], dtype = torch.float32).unsqueeze(0) 35 | 36 | ROTATE_180DEG_AROUND_Z = torch.tensor([[-1, 0, 0, 0], 37 | [0, -1, 0, 0], 38 | [0, 0, 1, 0], 39 | [0, 0, 0, 1]], dtype=torch.float32).unsqueeze(0) 40 | 41 | 42 | VISIONOS_RIGHT_HAND_TO_LEAP = ROTATE_90DEG_AROUND_X @ ROTATE_NEG_90DEG_AROUND_Y 43 | 44 | VISIONOS_LEFT_HAND_TO_LEAP = ROTATE_90DEG_AROUND_X @ ROTATE_90DEG_AROUND_Y 45 | 46 | VISIONOS_RIGHT_FINGERS_TO_LEAP = ROTATE_90DEG_AROUND_Y 47 | -------------------------------------------------------------------------------- /vision_pro_app/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='avp_stream', 5 | version='1.0', 6 | description='This python package streams diverse tracking data available from AVP to any devices that can communicate with gRPC.', 7 | author='Younghyo Park', 8 | author_email='younghyo@mit.edu', 9 | packages=find_packages(), 10 | install_requires=[ 11 | 'numpy', 'grpcio', 'grpcio-tools', 'matplotlib' 12 | ], 13 | extras_require={ 14 | }, 15 | ) --------------------------------------------------------------------------------