├── .gitignore
├── README.md
├── demos
├── IsaacSim
│ ├── README.md
│ ├── env_demo.py
│ ├── gallery
│ │ ├── .thumbs
│ │ │ └── 256x256
│ │ │ │ ├── abc.usda.png
│ │ │ │ ├── bcd.usda.png
│ │ │ │ └── gallery.usd.png
│ │ ├── gallery.py
│ │ └── gallery.usd
│ ├── lift_soft_bodies
│ │ ├── basket.obj
│ │ └── lift_soft_bodies.py
│ ├── lift_soft_cloth
│ │ ├── cloth.jpg
│ │ ├── jacket.usd
│ │ └── lift_soft_cloth.py
│ ├── lift_teddy_bear.py
│ ├── meta_quest_3_ik_demo.py
│ └── spawn_demo.py
├── SimulationFramework
│ ├── hand_over.py
│ ├── pick_and_place.py
│ ├── pick_and_place_with_meta_quest.py
│ ├── pick_ycb_object.py
│ └── ycb_object_pick_and_place.py
├── fancy_gym
│ ├── BoxPushingDense.py
│ └── TableTennis.py
├── genesis
│ ├── anymal_c_example.py
│ ├── drone_example.py
│ └── franka_example.py
├── libero
│ ├── libero_example.py
│ └── libero_robot_control.py
├── metaworld
│ └── metaworld_example.py
├── mink
│ └── aloha_example.py
├── mujoco
│ ├── mujoco_menagerie.py
│ └── table_tennis_two_players
│ │ ├── assets
│ │ ├── bats.xml
│ │ ├── include_free_ball.xml
│ │ ├── include_table.xml
│ │ ├── include_target_ball.xml
│ │ └── table_tennis_env.xml
│ │ └── table_tennis.py
├── point_cloud
│ └── point_cloud_loader.py
└── robocasa
│ └── robocasa_example.py
├── scripts
└── change_name.py
├── setup.py
├── simpub
├── __init__.py
├── core
│ ├── __init__.py
│ ├── log.py
│ ├── net_manager.py
│ ├── simpub_server.py
│ └── utils.py
├── parser
│ ├── coppelia_sim.py
│ ├── gs.py
│ ├── isaacsim.py
│ ├── mesh_utils.py
│ └── mj.py
├── sim
│ ├── __init__.py
│ ├── coppelia_sim_publisher.py
│ ├── fancy_gym.py
│ ├── genesis_publisher.py
│ ├── isaacsim_publisher.py
│ ├── mj_publisher.py
│ ├── robocasa_publisher.py
│ └── sf_publisher.py
├── simdata.py
└── xr_device
│ ├── IsaacLab_se3_SimPubHandTracking.py
│ ├── QRAlignment.py
│ ├── __init__.py
│ ├── meta_quest3.py
│ └── xr_device.py
└── test
├── mj_parser_test.py
├── test_net_manager.py
└── test_stop_node.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | scenes/
7 | models copy/
8 | test.py
9 | # *.xml
10 | # C extensions
11 | *.so
12 | *.json
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | share/python-wheels/
29 | *.egg-info/
30 | .installed.cfg
31 | *.egg
32 | MANIFEST
33 |
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .nox/
48 | .coverage
49 | .coverage.*
50 | .cache
51 | nosetests.xml
52 | coverage.xml
53 | *.cover
54 | *.py,cover
55 | .hypothesis/
56 | .pytest_cache/
57 | cover/
58 |
59 | # Translations
60 | *.mo
61 | *.pot
62 |
63 | # Django stuff:
64 | *.log
65 | local_settings.py
66 | db.sqlite3
67 | db.sqlite3-journal
68 |
69 | # Flask stuff:
70 | instance/
71 | .webassets-cache
72 |
73 | # Scrapy stuff:
74 | .scrapy
75 |
76 | # Sphinx documentation
77 | docs/_build/
78 |
79 | # PyBuilder
80 | .pybuilder/
81 | target/
82 |
83 | # Jupyter Notebook
84 | .ipynb_checkpoints
85 |
86 | # IPython
87 | profile_default/
88 | ipython_config.py
89 |
90 | # pyenv
91 | # For a library or package, you might want to ignore these files since the code is
92 | # intended to run in multiple environments; otherwise, check them in:
93 | # .python-version
94 |
95 | # pipenv
96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
99 | # install all needed dependencies.
100 | #Pipfile.lock
101 |
102 | # poetry
103 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
104 | # This is especially recommended for binary packages to ensure reproducibility, and is more
105 | # commonly ignored for libraries.
106 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
107 | #poetry.lock
108 |
109 | # pdm
110 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
111 | #pdm.lock
112 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
113 | # in version control.
114 | # https://pdm.fming.dev/#use-with-ide
115 | .pdm.toml
116 |
117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
118 | __pypackages__/
119 |
120 | # Celery stuff
121 | celerybeat-schedule
122 | celerybeat.pid
123 |
124 | # SageMath parsed files
125 | *.sage.py
126 |
127 | # Environments
128 | .env
129 | .venv
130 | env/
131 | venv/
132 | ENV/
133 | env.bak/
134 | venv.bak/
135 |
136 | # Spyder project settings
137 | .spyderproject
138 | .spyproject
139 |
140 | # Rope project settings
141 | .ropeproject
142 |
143 | # mkdocs documentation
144 | /site
145 |
146 | # mypy
147 | .mypy_cache/
148 | .dmypy.json
149 | dmypy.json
150 |
151 | # Pyre type checker
152 | .pyre/
153 |
154 | # pytype static type analyzer
155 | .pytype/
156 |
157 | # Cython debug symbols
158 | cython_debug/
159 |
160 | # PyCharm
161 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
162 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
163 | # and can be added to the global gitignore or merged into this file. For a more nuclear
164 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
165 | #.idea/
166 |
167 | # Mujoco
168 | MUJOCO_LOG.TXT
169 |
170 | # vscode
171 | .vscode/
172 |
173 | # texture cache for isaacsim
174 | .textures
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SimPublisher
2 |
3 | This repo is a versatile tool designed to seamlessly rendering objects from simulation or sensors to Mixed Reality (MR) or Augmented Reality (AR) headsets.
4 |
5 | ## Isaac Sim
6 | Please check [this](./demos/IsaacSim/README.md) for information on how to setup simpub with isaac sim.
7 |
8 | ## Table of Contents
9 | - [Introduction](#Introduction)
10 | - [Features](#Features)
11 | - [Installation](#Installation)
12 | - [Usage](#Usage)
13 |
14 | ## Introduction
15 |
16 | Are you looking forward to integrate your Human-Robot Interaction (HRI) application with MR/VR/AR headsets effortlessly?
17 | This repository is perfect for you.
18 |
19 | We provide a ROS-style interface that allows you to easily use Python code to project simulation scenes onto MR headsets. Additionally, you can use input data from the headset to control virtual robots and objects within the simulation environment or even real robots.
20 |
21 | This repo uses zmq to to communicate with MR application with automaticlly searching devices and reconnecting features.
22 |
23 | We also offer a [Unity Application](https://github.com/intuitive-robots/IRXR-Unity) that is easy to deploy for projecting and updating simulation scenes in the MR headset.
24 |
25 | ## Features
26 |
27 | ### Easy Connection to Headset
28 |
29 | - **Automatically Searching**: SimPub will search all the devices in the subnet and connect them fully automatically.
30 | - **Reconnecting**: Reconnecting to the PC if you shutdown the Python script.
31 | - **Remote Logger**: The log will be sent to the PC Simulation including FPS and latency.
32 |
33 | ### Supported Simulation Environment
34 |
35 | - [Mujoco](https://mujoco.readthedocs.io/en/stable/overview.html)
36 | - [SimulationFramework](https://github.com/ALRhub/SimulationFrameworkPublic)
37 | - [FancyGym](https://github.com/ALRhub/fancy_gym)
38 | - [IsaacLab](https://github.com/isaac-sim/IsaacLab)
39 |
40 | ### Supported Headset
41 |
42 | - Meta Quest3
43 | - HoloLens2
44 |
45 | ## Installation
46 |
47 | 1. Install the dependency
48 | ```bash
49 | pip install zmq trimesh
50 | ```
51 |
52 | 2. Install this repo
53 | ```bash
54 | cd $the_path_to_this_project
55 | pip install -e .
56 | ```
57 |
58 | ## Usage
59 |
60 | 1. Deploy the Unity application to your headset with the device name.
61 | Please refer to the [website](https://github.com/intuitive-robots/IRXR-Unity).
62 |
63 | 2. Connect your simulation PC and headset to the same subnet.
64 | For example, if your simulation PC address is `192.168.0.152`,
65 | the headset address should share the same prefix, such as `192.168.0.142` or `192.168.0.73`.
66 | We recommend using a single WiFi router for PC-headset communication to ensure optimal connectivity,.
67 | Additionally, using a wired cable to connect your PC can significantly reduce latency.
68 |
69 | 3. Run the usage examples under the folder `/demos/`, then wear the headset, start the unity application and enjoy!
70 |
71 |
--------------------------------------------------------------------------------
/demos/IsaacSim/README.md:
--------------------------------------------------------------------------------
1 | # Isaac Lab Setup Guide
2 |
3 | ## Quick Start
4 |
5 | Please follow the [readme](../../README.md) in the root folder for installing simpub.
6 |
7 | You can install Isaac Sim and Isaac Lab following [this page](https://isaac-sim.github.io/IsaacLab/source/setup/installation/binaries_installation.html). You could also install isaac sim with pip, but I didn't test on it.
8 |
9 | **IsaacSim Installation.** Please follow [this link](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html#installation-using-pip) to install IsaacSim, especially the cached extension dependencies.
10 |
11 | **VSCode Extension.** If you encouter freezing of IsaacSim, please try to disable the VSCode extension.
12 |
13 | Now you can activate your Isaac Lab conda environment, and run some demos. For example, run this in the root folder:
14 | ```
15 | python demos/IsaacSim/spawn_demo.py
16 | ```
17 |
18 | - [spawn_demo.py](./spawn_demo.py) is an example of spawing primitives manually.
19 | - [env_demo.py](./env_demo.py) is an example of using environments.
20 | - [meta_quest_3_ik_demo.py](./meta_quest_3_ik_demo.py) is an interactive scene where you can control an robot arm with joysticks with meta quest 3. (It might not work properly...)
21 |
22 | To use simpub with Isaac Sim in your code, simply add this after the scene has finished initialization (please check the demos):
23 | ```
24 | from simpub.sim.isaacsim_publisher import IsaacSimPublisher
25 |
26 | # use only one of these
27 | publisher = IsaacSimPublisher(host="192.168.0.134", stage=sim.stage) # for InteractiveScene
28 | publisher = IsaacSimPublisher(host="192.168.0.134", stage=env.sim.stage) # for environments
29 | ```
30 |
31 | ## Tips
32 |
33 | The file [demos/IsaacSim/script.py](./script.py) provides some information on how to retrieve materials from usd primitives. But it's not complete yet. You still have to figure out how to retireve albedo color and texture from the Material object. You might find [this](https://openusd.org/dev/api/class_usd_shade_material_binding_a_p_i.html) helpful (or not).
34 |
35 | Currently simpub doesn't support normal indexing. So one vertex will only have one normal, which is the cause of the rendering artifacts. Per-vertex normals are smoothed, so you can't render flat surfaces and sharp edges. To solve this, triangle faces in meshes should not only index vertices, but also normals. This means a vertex will have different normals on different triangles.
36 |
37 | Check [this](https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/usd_fabric_usdrt.html) for the different between usd, usdrt and fabric api. Basically, in python, you should use usd for cpu simulation and usdrt for gpu simulation.
38 |
39 | In omniverse, there are primitive prototypes that can be instantiated. Currently, IsaacSimPublisher doesn't handle this properly. It'll create a different mesh object and store the same geometry repeatedly for all instances of a prototype. For large scenes with complex geometries, this might be a problem.
40 |
41 | Currently usdrt stage are created for both cpu and gpu simulation. This works but might result in unexpected things. Maybe add a new parameter to the publisher indicating where the simulation is running and use only usd stage for cpu simulation.
42 |
--------------------------------------------------------------------------------
/demos/IsaacSim/env_demo.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022-2024, The Isaac Lab Project Developers.
2 | # All rights reserved.
3 | #
4 | # SPDX-License-Identifier: BSD-3-Clause
5 |
6 | import argparse
7 | import numpy as np
8 | import torch
9 | # import carb
10 | from scipy.spatial.transform import Rotation
11 |
12 | from isaaclab.app import AppLauncher
13 | import gymnasium as gym
14 |
15 | # add argparse arguments
16 | parser = argparse.ArgumentParser(
17 | description="This script demonstrates different single-arm manipulators."
18 | )
19 | # append AppLauncher cli args
20 | AppLauncher.add_app_launcher_args(parser)
21 | # parse the arguments
22 | args_cli = parser.parse_args()
23 |
24 | # launch omniverse app
25 | app_launcher = AppLauncher(args_cli)
26 | simulation_app = app_launcher.app
27 |
28 | # isaac sim modules are only available after launching it (?)
29 |
30 | from pxr import Usd
31 |
32 | import isaacsim.core.utils.prims as prim_utils
33 |
34 | import isaaclab.sim as sim_utils
35 | from isaaclab.assets import Articulation, RigidObjectCfg
36 | from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg
37 | from isaaclab.sim.spawners.shapes.shapes_cfg import CapsuleCfg
38 | from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg
39 | from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR
40 | # from omni.isaac.lab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
41 | # from omni.isaac.lab.envs.mdp.actions.actions_cfg import (
42 | # DifferentialInverseKinematicsActionCfg,
43 | # )
44 |
45 | from isaaclab.devices import Se3Gamepad, Se3Keyboard, Se3SpaceMouse
46 |
47 | import isaaclab_tasks # noqa: F401
48 | from isaaclab_tasks.utils import parse_env_cfg
49 |
50 | from simpub.sim.isaacsim_publisher import IsaacSimPublisher
51 | # from simpub.xr_device.meta_quest3 import MetaQuest3
52 |
53 | print(ISAACLAB_NUCLEUS_DIR)
54 |
55 |
56 | def pre_process_actions(
57 | task: str, delta_pose: torch.Tensor, gripper_command: bool
58 | ) -> torch.Tensor:
59 | """Pre-process actions for the environment."""
60 | # compute actions based on environment
61 | if "Reach" in task:
62 | # note: reach is the only one that uses a different action space
63 | # compute actions
64 | return delta_pose
65 | else:
66 | # resolve gripper command
67 | gripper_vel = torch.zeros(delta_pose.shape[0], 1, device=delta_pose.device)
68 | gripper_vel[:] = -1.0 if gripper_command else 1.0
69 | # compute actions
70 | return torch.concat([delta_pose, gripper_vel], dim=1)
71 |
72 |
73 | def main():
74 | """Running keyboard teleoperation with Isaac Lab manipulation environment."""
75 | task = "Isaac-Lift-Cube-Franka-IK-Rel-v0"
76 |
77 | # parse configuration
78 | # why using undefined parameter (use_gpu) doesn't raise error?
79 | env_cfg = parse_env_cfg(
80 | task_name=task,
81 | num_envs=1,
82 | use_fabric=True,
83 | )
84 |
85 | # add some primitives to the env
86 | env_cfg.scene.cube2 = RigidObjectCfg(
87 | prim_path="{ENV_REGEX_NS}/Cube2",
88 | init_state=RigidObjectCfg.InitialStateCfg(
89 | pos=[0.7, 0, 0.155], rot=[1, 0, 0, 0]
90 | ),
91 | spawn=UsdFileCfg(
92 | usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
93 | scale=(0.8, 0.8, 0.8),
94 | rigid_props=RigidBodyPropertiesCfg(
95 | solver_position_iteration_count=16,
96 | solver_velocity_iteration_count=1,
97 | max_angular_velocity=1000.0,
98 | max_linear_velocity=1000.0,
99 | max_depenetration_velocity=5.0,
100 | disable_gravity=False,
101 | ),
102 | ),
103 | )
104 |
105 | env_cfg.scene.cube3 = RigidObjectCfg(
106 | prim_path="{ENV_REGEX_NS}/Capsule1",
107 | init_state=RigidObjectCfg.InitialStateCfg(
108 | pos=[0.6, 0, 0.055], rot=[1, 0, 0, 0]
109 | ),
110 | spawn=CapsuleCfg(
111 | radius=0.03,
112 | height=0.1,
113 | axis="Y",
114 | rigid_props=sim_utils.RigidBodyPropertiesCfg(),
115 | mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
116 | collision_props=sim_utils.CollisionPropertiesCfg(),
117 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.1, 0.1, 1.0)),
118 | ),
119 | )
120 |
121 | # # you should probably set use_relative_mode to False when using meta quest 3 input
122 | # # but currently it does not work (seems to be a problem of coordiante system alignment)
123 | # env_cfg.actions.arm_action = DifferentialInverseKinematicsActionCfg(
124 | # asset_name="robot",
125 | # joint_names=["panda_joint.*"],
126 | # body_name="panda_hand",
127 | # controller=DifferentialIKControllerCfg(
128 | # command_type="pose", use_relative_mode=False, ik_method="dls"
129 | # ),
130 | # scale=1.0,
131 | # body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(
132 | # pos=[0.0, 0.0, 0.107]
133 | # ),
134 | # )
135 |
136 | # modify configuration
137 | env_cfg.terminations.time_out = None
138 |
139 | # create environment
140 | env = gym.make(task, cfg=env_cfg).unwrapped
141 | # print(env.sim)
142 |
143 | # check environment name (for reach , we don't allow the gripper)
144 | if "Reach" in task:
145 | carb.log_warn(
146 | f"The environment '{task}' does not support gripper control. The device command will be ignored."
147 | )
148 |
149 | # create controller
150 | sensitivity = 10
151 | teleop_interface = Se3Keyboard(
152 | pos_sensitivity=0.005 * sensitivity,
153 | rot_sensitivity=0.005 * sensitivity,
154 | )
155 |
156 | # add teleoperation key for env reset
157 | teleop_interface.add_callback("L", env.reset)
158 | # print helper for keyboard
159 | print(teleop_interface)
160 |
161 | # reset environment
162 | env.reset()
163 | teleop_interface.reset()
164 |
165 | print("simulation context:", env.sim)
166 | print("stage:", env.sim.stage)
167 |
168 | if env.sim is not None and env.sim.stage is not None:
169 | print("parsing usd stage...")
170 | publisher = IsaacSimPublisher(host="192.168.0.134", stage=env.sim.stage)
171 | # publisher = IsaacSimPublisher(host="127.0.0.1", stage=env.sim.stage)
172 |
173 | # meta_quest3 = MetaQuest3("ALRMetaQuest3")
174 |
175 | # simulate environment
176 | while simulation_app.is_running():
177 | # run everything in inference mode
178 | with torch.inference_mode():
179 | # # get meta quest 3 input (does not work...)
180 | # input_data = meta_quest3.get_input_data()
181 | # if input_data is None:
182 | # continue
183 |
184 | # input_pos = input_data["right"]["pos"]
185 | # input_pos[2] -= 0.1
186 | # input_rot = input_data["right"]["rot"]
187 | # input_rot = Rotation.from_quat(input_rot).as_quat()
188 | # input_gripper = input_data["right"]["index_trigger"]
189 |
190 | # input_pos = [0.4, 0.0, 0.2]
191 | # input_rot = Rotation.from_euler("XYZ", [0, 180, 0], degrees=True).as_quat()
192 |
193 | # get keyboard command
194 | delta_pose, gripper_command = teleop_interface.advance()
195 |
196 | # # get command from meta quest 3
197 | # delta_pose = np.array(
198 | # [
199 | # input_pos[0],
200 | # input_pos[1],
201 | # input_pos[2],
202 | # input_rot[3],
203 | # input_rot[0],
204 | # input_rot[1],
205 | # input_rot[2],
206 | # ]
207 | # )
208 | # print(delta_pose)
209 | # gripper_command = input_gripper
210 |
211 | delta_pose = delta_pose.astype("float32")
212 | # convert to torch
213 | delta_pose = torch.tensor(delta_pose, device=env.unwrapped.device).repeat(
214 | env.unwrapped.num_envs, 1
215 | )
216 | # pre-process actions
217 | actions = pre_process_actions(task, delta_pose, gripper_command)
218 | # apply actions
219 | env.step(actions)
220 |
221 | # close the simulator
222 | env.close()
223 |
224 |
225 | if __name__ == "__main__":
226 | main()
227 | simulation_app.close()
228 |
--------------------------------------------------------------------------------
/demos/IsaacSim/gallery/.thumbs/256x256/abc.usda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/demos/IsaacSim/gallery/.thumbs/256x256/abc.usda.png
--------------------------------------------------------------------------------
/demos/IsaacSim/gallery/.thumbs/256x256/bcd.usda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/demos/IsaacSim/gallery/.thumbs/256x256/bcd.usda.png
--------------------------------------------------------------------------------
/demos/IsaacSim/gallery/.thumbs/256x256/gallery.usd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/demos/IsaacSim/gallery/.thumbs/256x256/gallery.usd.png
--------------------------------------------------------------------------------
/demos/IsaacSim/gallery/gallery.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022-2024, The Isaac Lab Project Developers.
2 | # All rights reserved.
3 | #
4 | # SPDX-License-Identifier: BSD-3-Clause
5 |
6 | """
7 | This script demonstrates different legged robots.
8 |
9 | .. code-block:: bash
10 |
11 | # Usage
12 | ./isaaclab.sh -p source/standalone/demos/quadrupeds.py
13 |
14 | """
15 |
16 | """Launch Isaac Sim Simulator first."""
17 |
18 | import argparse
19 | import math
20 | import time
21 | from pathlib import Path
22 |
23 | from isaaclab.app import AppLauncher
24 |
25 | # add argparse arguments
26 | parser = argparse.ArgumentParser(description="This script demonstrates different legged robots.")
27 | # append AppLauncher cli args
28 | AppLauncher.add_app_launcher_args(parser)
29 | # parse the arguments
30 | args_cli = parser.parse_args()
31 |
32 | # launch omniverse app
33 | app_launcher = AppLauncher(args_cli)
34 | simulation_app = app_launcher.app
35 |
36 | """Rest everything follows."""
37 |
38 | import numpy as np
39 | import isaacsim.core.utils.prims as prim_utils
40 | import isaaclab.sim as sim_utils
41 | import torch
42 | from isaacsim.core.utils.rotations import euler_angles_to_quat
43 | from isaaclab.assets import Articulation, ArticulationCfg, AssetBaseCfg
44 | from isaaclab.sim import UsdFileCfg
45 | from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR
46 | from isaaclab_assets import (
47 | FRANKA_PANDA_CFG,
48 | G1_MINIMAL_CFG,
49 | H1_CFG,
50 | H1_MINIMAL_CFG,
51 | KINOVA_GEN3_N7_CFG,
52 | SAWYER_CFG,
53 | UR10_CFG,
54 | )
55 | from isaaclab_assets.robots.anymal import ANYMAL_B_CFG, ANYMAL_C_CFG, ANYMAL_D_CFG
56 | from isaaclab_assets.robots.spot import SPOT_CFG
57 | from isaaclab_assets.robots.unitree import (
58 | UNITREE_A1_CFG,
59 | UNITREE_GO1_CFG,
60 | UNITREE_GO2_CFG,
61 | )
62 |
63 | from simpub.sim.isaacsim_publisher import IsaacSimPublisher
64 |
65 |
66 | def design_scene():
67 | scene_entities = {}
68 |
69 | scene_asset_cfg = AssetBaseCfg(
70 | prim_path="/World/scene",
71 | init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0], rot=[1.0, 0.0, 0.0, 0.0]),
72 | spawn=UsdFileCfg(
73 | usd_path=f"{Path(__file__).parent.as_posix()}/gallery.usd",
74 | ),
75 | )
76 | scene_asset_cfg.spawn.func(scene_asset_cfg.prim_path, scene_asset_cfg.spawn)
77 |
78 | anymal_b_cfg = ANYMAL_B_CFG
79 | anymal_b_cfg.prim_path = "/World/scene/anymal_b"
80 | anymal_b_cfg.spawn = None
81 | scene_entities["anymal_b"] = Articulation(cfg=anymal_b_cfg)
82 |
83 | anymal_d_cfg = ANYMAL_D_CFG
84 | anymal_d_cfg.prim_path = "/World/scene/anymal_d"
85 | anymal_d_cfg.spawn = None
86 | scene_entities["anymal_d"] = Articulation(cfg=anymal_d_cfg)
87 |
88 | go2_cfg = UNITREE_GO2_CFG
89 | go2_cfg.prim_path = "/World/scene/go2"
90 | go2_cfg.spawn = None
91 | scene_entities["go2"] = Articulation(cfg=go2_cfg)
92 |
93 | a1_cfg = UNITREE_A1_CFG
94 | a1_cfg.prim_path = "/World/scene/a1"
95 | a1_cfg.spawn = None
96 | scene_entities["a1"] = Articulation(cfg=a1_cfg)
97 |
98 | franka_cfg = FRANKA_PANDA_CFG
99 | franka_cfg.prim_path = "/World/scene/franka_panda"
100 | franka_cfg.spawn = None
101 | scene_entities["franka"] = Articulation(cfg=franka_cfg)
102 |
103 | ur10_cfg = UR10_CFG
104 | ur10_cfg.prim_path = "/World/scene/ur10"
105 | ur10_cfg.spawn = None
106 | scene_entities["ur10"] = Articulation(cfg=ur10_cfg)
107 |
108 | gen3n7_cfg = KINOVA_GEN3_N7_CFG
109 | gen3n7_cfg.prim_path = "/World/scene/gen3n7"
110 | gen3n7_cfg.spawn = None
111 | scene_entities["gen3n7"] = Articulation(cfg=gen3n7_cfg)
112 |
113 | sawyer_cfg = SAWYER_CFG
114 | sawyer_cfg.prim_path = "/World/scene/sawyer"
115 | sawyer_cfg.spawn = None
116 | scene_entities["sawyer"] = Articulation(cfg=sawyer_cfg)
117 |
118 | h1_cfg = H1_CFG
119 | h1_cfg.prim_path = "/World/scene/h1"
120 | h1_cfg.spawn = None
121 | scene_entities["h1"] = Articulation(cfg=h1_cfg)
122 |
123 | # return the scene information
124 | return scene_entities
125 |
126 |
127 | def run_simulator(
128 | sim: sim_utils.SimulationContext,
129 | entities: dict[str, Articulation],
130 | ):
131 | """Runs the simulation loop."""
132 | # Define simulation stepping
133 | sim_dt = sim.get_physics_dt()
134 | sim_time = 0.0
135 | count = 0
136 |
137 | for robot in entities.values():
138 | robot.data.default_root_state = robot.data.root_state_w.clone()
139 |
140 | elbow_indices, _ = entities["h1"].find_joints(".*_elbow")
141 | default_joint_pos = entities["h1"].data.default_joint_pos.clone()
142 | default_joint_pos[0, elbow_indices] = -0.2
143 | entities["h1"].data.default_joint_pos = default_joint_pos
144 |
145 | # Simulate physics
146 | while simulation_app.is_running():
147 | # reset
148 | if count % 2000 == 0:
149 | # reset counters
150 | sim_time = 0.0
151 | count = 0
152 | # reset robots
153 | for robot in entities.values():
154 | # root state
155 | root_state = robot.data.default_root_state.clone()
156 | robot.write_root_state_to_sim(root_state)
157 | # joint state
158 | joint_pos, joint_vel = (
159 | robot.data.default_joint_pos.clone(),
160 | robot.data.default_joint_vel.clone(),
161 | )
162 | robot.write_joint_state_to_sim(joint_pos, joint_vel)
163 | # reset the internal state
164 | robot.reset()
165 | print("[INFO]: Resetting robots state...")
166 | # apply default actions to the quadrupedal robots
167 | for robot in entities.values():
168 | # generate random joint positions
169 | joint_pos_target = robot.data.default_joint_pos # + torch.randn_like(robot.data.joint_pos) * 0.1
170 | # apply action to the robot
171 | robot.set_joint_position_target(joint_pos_target)
172 | # write data to sim
173 | robot.write_data_to_sim()
174 | # perform step
175 | sim.step()
176 | # update sim-time
177 | sim_time += sim_dt
178 | count += 1
179 | # update buffers
180 | for robot in entities.values():
181 | robot.update(sim_dt)
182 |
183 |
184 | def main():
185 | """Main function."""
186 |
187 | # Initialize the simulation context
188 | sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01))
189 | # Set main camera
190 | sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
191 | # design scene
192 | scene_entities = design_scene()
193 | # Play the simulator
194 | sim.reset()
195 |
196 | _ = IsaacSimPublisher(
197 | host="127.0.0.1",
198 | stage=sim.stage,
199 | ignored_prim_paths=[
200 | "/World/scene/GroundPlane",
201 | "/World/scene/Cube_04",
202 | "/World/scene/Cube_05",
203 | "/World/scene/Cube_06",
204 | ],
205 | texture_cache_dir=f"{Path(__file__).parent.as_posix()}/.textures",
206 | )
207 |
208 | # Now we are ready!
209 | print("[INFO]: Setup complete...")
210 | # Run the simulator
211 | run_simulator(sim, scene_entities)
212 |
213 |
214 | if __name__ == "__main__":
215 | # run the main function
216 | main()
217 | # close sim app
218 | simulation_app.close()
219 |
--------------------------------------------------------------------------------
/demos/IsaacSim/gallery/gallery.usd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/demos/IsaacSim/gallery/gallery.usd
--------------------------------------------------------------------------------
/demos/IsaacSim/lift_soft_bodies/lift_soft_bodies.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022-2024, The Isaac Lab Project Developers.
2 | # All rights reserved.
3 | #
4 | # SPDX-License-Identifier: BSD-3-Clause
5 |
6 | """
7 | Script to demonstrate lifting a deformable object with a robotic arm.
8 |
9 | The state machine is implemented in the kernel function `infer_state_machine`.
10 | It uses the `warp` library to run the state machine in parallel on the GPU.
11 |
12 | .. code-block:: bash
13 |
14 | ./isaaclab.sh -p source/standalone/environments/state_machine/lift_teddy_bear.py
15 |
16 | """
17 |
18 | """Launch Omniverse Toolkit first."""
19 |
20 | import argparse
21 | import copy
22 | import math
23 | from pathlib import Path
24 |
25 | import numpy as np
26 | from isaaclab.app import AppLauncher
27 |
28 | # add argparse arguments
29 | parser = argparse.ArgumentParser(description="Pick and lift a teddy bear with a robotic arm.")
30 | parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
31 | # append AppLauncher cli args
32 | AppLauncher.add_app_launcher_args(parser)
33 | # parse the arguments
34 | args_cli = parser.parse_args()
35 |
36 | # launch omniverse app
37 | app_launcher = AppLauncher(headless=args_cli.headless)
38 | simulation_app = app_launcher.app
39 |
40 | """Rest everything else."""
41 |
42 | from collections.abc import Callable
43 | from dataclasses import MISSING
44 |
45 | import gymnasium as gym
46 | import isaacsim.core.utils.prims as prim_utils
47 | import isaaclab.sim as sim_utils
48 | import torch
49 | import trimesh
50 | from isaaclab.assets import DeformableObjectCfg
51 | from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
52 | from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
53 | from isaaclab.sim.spawners.meshes import meshes
54 | from isaaclab.sim.spawners.meshes.meshes_cfg import MeshCfg
55 | from isaaclab.sim.utils import clone
56 | from isaaclab.utils import configclass
57 | from isaaclab_tasks.manager_based.manipulation.lift import mdp
58 | from isaaclab_tasks.manager_based.manipulation.lift.config.franka.ik_abs_env_cfg import FrankaTeddyBearLiftEnvCfg
59 | from isaaclab_tasks.manager_based.manipulation.lift.lift_env_cfg import LiftEnvCfg
60 | from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
61 |
62 | from simpub.sim.isaacsim_publisher import IsaacSimPublisher
63 |
64 |
65 | @clone
66 | def _spaw_func(
67 | prim_path: str,
68 | cfg: "MeshTrimeshCfg",
69 | translation: tuple[float, float, float] | None = None,
70 | orientation: tuple[float, float, float, float] | None = None,
71 | ):
72 | mesh_obj: trimesh.Trimesh = trimesh.load(cfg.mesh)
73 | mesh_obj.fix_normals()
74 | # mesh_obj.apply_transform(trimesh.transformations.scale_matrix(0.75))
75 | mesh_obj.apply_transform(trimesh.transformations.euler_matrix(math.pi * 0.5, 0, math.pi * 0.5))
76 | meshes._spawn_mesh_geom_from_mesh(
77 | prim_path=prim_path,
78 | cfg=cfg,
79 | mesh=mesh_obj,
80 | translation=translation,
81 | orientation=orientation,
82 | )
83 | return prim_utils.get_prim_at_path(prim_path)
84 |
85 |
86 | @configclass
87 | class MeshTrimeshCfg(MeshCfg):
88 | """Configuration parameters for a sphere mesh prim with deformable properties.
89 |
90 | See :meth:`spawn_mesh_sphere` for more information.
91 | """
92 |
93 | func: Callable = _spaw_func
94 |
95 | mesh: str = MISSING
96 |
97 |
98 | class FrankaSoftBasketLiftEnvCfg(FrankaTeddyBearLiftEnvCfg):
99 | def __post_init__(self):
100 | super().__post_init__()
101 |
102 | self.episode_length_s = 20.0
103 |
104 | self.events.reset_object_position = None
105 |
106 | self.scene.table = None
107 | self.scene.plane.init_state.pos = [0, 0, 0]
108 |
109 | self.scene.robot_2 = copy.deepcopy(self.scene.robot)
110 | self.scene.robot_2.prim_path = "{ENV_REGEX_NS}/Robot2"
111 | self.scene.robot_2.init_state.pos = [1.4, 0, 0]
112 | self.scene.robot_2.init_state.rot = [0, 0, 0, 1]
113 |
114 | self.actions.arm_action_2 = DifferentialInverseKinematicsActionCfg(
115 | asset_name="robot_2",
116 | joint_names=["panda_joint.*"],
117 | body_name="panda_hand",
118 | controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"),
119 | body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
120 | )
121 | self.actions.gripper_action_2 = mdp.BinaryJointPositionActionCfg(
122 | asset_name="robot_2",
123 | joint_names=["panda_finger.*"],
124 | open_command_expr={"panda_finger_.*": 0.04},
125 | close_command_expr={"panda_finger_.*": 0.0},
126 | )
127 |
128 | self.scene.object = DeformableObjectCfg(
129 | prim_path="{ENV_REGEX_NS}/Object",
130 | init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.7, 0, 0.5), rot=(0.707, 0, 0, 0.707)),
131 | spawn=MeshTrimeshCfg(
132 | mesh=f"{Path(__file__).parent.as_posix()}/basket.obj",
133 | deformable_props=sim_utils.DeformableBodyPropertiesCfg(rest_offset=0.0, contact_offset=0.001),
134 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.1, 0.0)),
135 | physics_material=sim_utils.DeformableBodyMaterialCfg(
136 | poissons_ratio=0.4, youngs_modulus=5e2, dynamic_friction=100.0
137 | ),
138 | mass_props=sim_utils.MassPropertiesCfg(mass=0.1),
139 | ),
140 | )
141 |
142 | self.scene.prop_0 = DeformableObjectCfg(
143 | prim_path="{ENV_REGEX_NS}/Prop0",
144 | spawn=sim_utils.MeshCuboidCfg(
145 | size=(0.4, 0.4, 0.2),
146 | deformable_props=sim_utils.DeformableBodyPropertiesCfg(rest_offset=0.0, contact_offset=0.001),
147 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.5, 0.5)),
148 | physics_material=sim_utils.DeformableBodyMaterialCfg(poissons_ratio=0.4, youngs_modulus=1e8),
149 | ),
150 | init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.7, 0, 0.2)),
151 | debug_vis=True,
152 | )
153 |
154 | self.scene.prop_1 = DeformableObjectCfg(
155 | prim_path="{ENV_REGEX_NS}/Prop1",
156 | spawn=sim_utils.MeshSphereCfg(
157 | radius=0.1,
158 | deformable_props=sim_utils.DeformableBodyPropertiesCfg(rest_offset=0.0, contact_offset=0.001),
159 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.2, 0.5, 0.0)),
160 | physics_material=sim_utils.DeformableBodyMaterialCfg(poissons_ratio=0.4, youngs_modulus=1e2),
161 | mass_props=sim_utils.MassPropertiesCfg(mass=0.01),
162 | ),
163 | init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.7, -0.1, 1.0)),
164 | debug_vis=True,
165 | )
166 |
167 | self.scene.prop_2 = DeformableObjectCfg(
168 | prim_path="{ENV_REGEX_NS}/Prop2",
169 | spawn=sim_utils.MeshSphereCfg(
170 | radius=0.1,
171 | deformable_props=sim_utils.DeformableBodyPropertiesCfg(rest_offset=0.0, contact_offset=0.001),
172 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.1, 0.3)),
173 | physics_material=sim_utils.DeformableBodyMaterialCfg(poissons_ratio=0.4, youngs_modulus=1e2),
174 | mass_props=sim_utils.MassPropertiesCfg(mass=0.01),
175 | ),
176 | init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.7, -0.1, 1.2)),
177 | debug_vis=True,
178 | )
179 |
180 | self.scene.prop_3 = DeformableObjectCfg(
181 | prim_path="{ENV_REGEX_NS}/Prop3",
182 | spawn=sim_utils.MeshSphereCfg(
183 | radius=0.1,
184 | deformable_props=sim_utils.DeformableBodyPropertiesCfg(rest_offset=0.0, contact_offset=0.001),
185 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.7, 0.3)),
186 | physics_material=sim_utils.DeformableBodyMaterialCfg(poissons_ratio=0.4, youngs_modulus=1e2),
187 | mass_props=sim_utils.MassPropertiesCfg(mass=0.01),
188 | ),
189 | init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.7, -0.1, 1.4)),
190 | debug_vis=True,
191 | )
192 |
193 |
194 | gym.register(
195 | id="Isaac-Lift-Soft-Basket-Franka-IK-Abs-v0",
196 | entry_point="isaaclab.envs:ManagerBasedRLEnv",
197 | kwargs={
198 | "env_cfg_entry_point": f"{__name__}:FrankaSoftBasketLiftEnvCfg",
199 | },
200 | disable_env_checker=True,
201 | )
202 |
203 |
204 | def main():
205 | # parse configuration
206 | env_cfg: LiftEnvCfg = parse_env_cfg(
207 | "Isaac-Lift-Soft-Basket-Franka-IK-Abs-v0",
208 | device=args_cli.device,
209 | num_envs=args_cli.num_envs,
210 | )
211 |
212 | env_cfg.viewer.eye = (2.1, 1.0, 1.3)
213 |
214 | # create environment
215 | env = gym.make("Isaac-Lift-Soft-Basket-Franka-IK-Abs-v0", cfg=env_cfg).unwrapped
216 | # reset environment at start
217 | env.reset()
218 |
219 | # start simpub server
220 | if env.sim is not None and env.sim.stage is not None:
221 | print("parsing usd stage...")
222 | # publisher = IsaacSimPublisher(host="127.0.0.1", stage=env.unwrapped.sim.stage)
223 | publisher = IsaacSimPublisher(host="127.0.0.1", stage=env.sim.stage)
224 |
225 | # create action buffers (position + quaternion)
226 | actions = torch.zeros(env.action_space.shape, device=env.device)
227 |
228 | # fmt: off
229 | ee_poses = torch.tensor(
230 | [
231 | [0.40, 0.03, 0.62, 0.5609855, 0.4304593, 0.4304593, 0.5609855, 1.0] * 2,
232 | [0.53, 0.03, 0.62, 0.5609855, 0.4304593, 0.4304593, 0.5609855, 1.0] * 2,
233 | [0.53, 0.03, 0.62, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
234 | [0.53, 0.03, 0.90, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
235 | [0.53, 0.03, 0.70, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
236 | [0.53, 0.03, 0.90, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
237 | [0.53, 0.03, 0.70, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
238 | [0.53, 0.03, 0.90, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
239 | [0.53, 0.03, 0.70, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
240 | [0.53, 0.03, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0] * 2,
241 |
242 | [0.53, 0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0, 0.53, -0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0],
243 | [0.53, -0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0, 0.53, 0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0],
244 | [0.53, 0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0, 0.53, -0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0],
245 | [0.53, -0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0, 0.53, 0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0],
246 | [0.53, 0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0, 0.53, -0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0],
247 | [0.53, -0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0, 0.53, 0.15, 1.00, 0.5609855, 0.4304593, 0.4304593, 0.5609855, -1.0],
248 | ]
249 | )
250 | # fmt: on
251 | actions = ee_poses[0, :].reshape(1, -1)
252 |
253 | time_step = env_cfg.sim.dt * env_cfg.decimation
254 | time_elapsed = 0
255 | i_action = 0
256 |
257 | while simulation_app.is_running():
258 | # run everything in inference mode
259 | with torch.inference_mode():
260 | # step environment
261 | dones = env.step(actions)[-2]
262 |
263 | if time_elapsed >= 1.0:
264 | i_action = np.clip(i_action + 1, 0, len(ee_poses) - 1)
265 | actions = ee_poses[i_action, :].reshape(1, -1)
266 | time_elapsed = 0.0
267 |
268 | if torch.any(dones):
269 | i_action = 0
270 | actions = ee_poses[i_action, :].reshape(1, -1)
271 | time_elapsed = 0.0
272 |
273 | time_elapsed += time_step
274 |
275 | # close the environment
276 | env.close()
277 |
278 |
279 | if __name__ == "__main__":
280 | # run the main function
281 | main()
282 | # close sim app
283 | simulation_app.close()
284 |
--------------------------------------------------------------------------------
/demos/IsaacSim/lift_soft_cloth/cloth.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/demos/IsaacSim/lift_soft_cloth/cloth.jpg
--------------------------------------------------------------------------------
/demos/IsaacSim/lift_soft_cloth/jacket.usd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/demos/IsaacSim/lift_soft_cloth/jacket.usd
--------------------------------------------------------------------------------
/demos/IsaacSim/lift_soft_cloth/lift_soft_cloth.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022-2024, The Isaac Lab Project Developers.
2 | # All rights reserved.
3 | #
4 | # SPDX-License-Identifier: BSD-3-Clause
5 |
6 | """
7 | Script to demonstrate lifting a deformable object with a robotic arm.
8 |
9 | The state machine is implemented in the kernel function `infer_state_machine`.
10 | It uses the `warp` library to run the state machine in parallel on the GPU.
11 |
12 | .. code-block:: bash
13 |
14 | ./isaaclab.sh -p source/standalone/environments/state_machine/lift_teddy_bear.py
15 |
16 | """
17 |
18 | """Launch Omniverse Toolkit first."""
19 |
20 | import argparse
21 | import copy
22 | import math
23 | from pathlib import Path
24 |
25 | import numpy as np
26 | from isaaclab.app import AppLauncher
27 |
28 | # add argparse arguments
29 | parser = argparse.ArgumentParser(description="Pick and lift a teddy bear with a robotic arm.")
30 | parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
31 | # append AppLauncher cli args
32 | AppLauncher.add_app_launcher_args(parser)
33 | # parse the arguments
34 | args_cli = parser.parse_args()
35 |
36 | # launch omniverse app
37 | app_launcher = AppLauncher(headless=args_cli.headless)
38 | simulation_app = app_launcher.app
39 |
40 | """Rest everything else."""
41 |
42 | from collections.abc import Callable
43 | from dataclasses import MISSING
44 |
45 | import gymnasium as gym
46 | import isaacsim.core.utils.prims as prim_utils
47 | import isaaclab.sim as sim_utils
48 | import torch
49 | import trimesh
50 | from isaaclab.assets import DeformableObjectCfg, RigidObjectCfg
51 | from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
52 | from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
53 | from isaaclab.sim import UsdFileCfg
54 | from isaaclab.sim.spawners.meshes import meshes
55 | from isaaclab.sim.spawners.meshes.meshes_cfg import MeshCfg
56 | from isaaclab.sim.utils import clone
57 | from isaaclab.utils import configclass
58 | from isaaclab_tasks.manager_based.manipulation.lift import mdp
59 | from isaaclab_tasks.manager_based.manipulation.lift.config.franka.ik_abs_env_cfg import FrankaTeddyBearLiftEnvCfg
60 | from isaaclab_tasks.manager_based.manipulation.lift.lift_env_cfg import LiftEnvCfg
61 | from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
62 |
63 | from simpub.sim.isaacsim_publisher import IsaacSimPublisher
64 |
65 |
66 | class FrankaSoftBasketLiftEnvCfg(FrankaTeddyBearLiftEnvCfg):
67 | def __post_init__(self):
68 | super().__post_init__()
69 |
70 | self.episode_length_s = 20.0
71 |
72 | self.events.reset_object_position = None
73 |
74 | self.scene.table = None
75 | self.scene.plane.init_state.pos = [0, 0, 0]
76 |
77 | self.scene.object = DeformableObjectCfg(
78 | prim_path="{ENV_REGEX_NS}/Object",
79 | init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.85, 0.1, 0.5), rot=(1, 0, 0, 0)),
80 | spawn=UsdFileCfg(
81 | usd_path=f"{Path(__file__).parent.as_posix()}/jacket.usd",
82 | scale=(0.75, 0.75, 0.75),
83 | # deformable_props=sim_utils.DeformableBodyPropertiesCfg(rest_offset=0.0, contact_offset=0.001),
84 | # physics_material=sim_utils.DeformableBodyMaterialCfg(poissons_ratio=0.4, youngs_modulus=1e5),
85 | ),
86 | )
87 |
88 | self.scene.prop_0 = RigidObjectCfg(
89 | prim_path="{ENV_REGEX_NS}/Prop0",
90 | spawn=sim_utils.MeshCuboidCfg(
91 | size=(1.5, 0.6, 0.2),
92 | rigid_props=sim_utils.RigidBodyPropertiesCfg(),
93 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.5, 0.5)),
94 | physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=20.0, dynamic_friction=20.0),
95 | collision_props=sim_utils.CollisionPropertiesCfg(),
96 | mass_props=sim_utils.MassPropertiesCfg(mass=50.0),
97 | ),
98 | init_state=RigidObjectCfg.InitialStateCfg(pos=(1.0, 0.1, 0.2)),
99 | debug_vis=True,
100 | )
101 |
102 |
103 | gym.register(
104 | id="Isaac-Lift-Soft-Basket-Franka-IK-Abs-v0",
105 | entry_point="isaaclab.envs:ManagerBasedRLEnv",
106 | kwargs={
107 | "env_cfg_entry_point": f"{__name__}:FrankaSoftBasketLiftEnvCfg",
108 | },
109 | disable_env_checker=True,
110 | )
111 |
112 |
113 | def main():
114 | # parse configuration
115 | env_cfg: LiftEnvCfg = parse_env_cfg(
116 | "Isaac-Lift-Soft-Basket-Franka-IK-Abs-v0",
117 | device=args_cli.device,
118 | num_envs=args_cli.num_envs,
119 | )
120 |
121 | env_cfg.viewer.eye = (2.1, 1.0, 1.3)
122 |
123 | # create environment
124 | env = gym.make("Isaac-Lift-Soft-Basket-Franka-IK-Abs-v0", cfg=env_cfg).unwrapped
125 | # reset environment at start
126 | env.reset()
127 |
128 | # start simpub server
129 | if env.sim is not None and env.sim.stage is not None:
130 | print("parsing usd stage...")
131 | # publisher = IsaacSimPublisher(host="127.0.0.1", stage=env.unwrapped.sim.stage)
132 | publisher = IsaacSimPublisher(host="127.0.0.1", stage=env.sim.stage)
133 |
134 | # create action buffers (position + quaternion)
135 | actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device)
136 |
137 | ee_poses = torch.tensor(
138 | [
139 | [0.2, 0.03, 0.82, 0.5609855, 0.4304593, 0.4304593, 0.5609855, 1.0],
140 | [0.45, 0.025, 0.35, 0, 0, 1, 0, 1.0],
141 | [0.45, 0.025, 0.35, 0, -0.258819, 0.9659258, 0, 1.0],
142 | [0.45, 0.025, 0.21, 0, -0.258819, 0.9659258, 0, 1.0],
143 | [0.45, 0.025, 0.21, 0, -0.258819, 0.9659258, 0, -1.0],
144 | [0.45, 0.025, 0.21, 0, -0.258819, 0.9659258, 0, -1.0],
145 | [0.6, -0.15, 0.21, 0, -0.258819, 0.9659258, 0, -1.0],
146 | ]
147 | )
148 | actions = ee_poses[0, :].reshape(1, -1)
149 |
150 | time_step = env_cfg.sim.dt * env_cfg.decimation
151 | time_elapsed = 0
152 | i_action = 0
153 |
154 | while simulation_app.is_running():
155 | # run everything in inference mode
156 | with torch.inference_mode():
157 | # step environment
158 | dones = env.step(actions)[-2]
159 |
160 | if time_elapsed >= 1.0:
161 | i_action = np.clip(i_action + 1, 0, len(ee_poses) - 1)
162 | actions = ee_poses[i_action, :].reshape(1, -1)
163 | time_elapsed = 0.0
164 |
165 | if torch.any(dones):
166 | i_action = 0
167 | actions = ee_poses[i_action, :].reshape(1, -1)
168 | time_elapsed = 0.0
169 |
170 | time_elapsed += time_step
171 |
172 | # close the environment
173 | env.close()
174 |
175 |
176 | if __name__ == "__main__":
177 | # run the main function
178 | main()
179 | # close sim app
180 | simulation_app.close()
181 |
--------------------------------------------------------------------------------
/demos/IsaacSim/meta_quest_3_ik_demo.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022-2024, The Isaac Lab Project Developers.
2 | # All rights reserved.
3 | #
4 | # SPDX-License-Identifier: BSD-3-Clause
5 |
6 | import argparse
7 | import numpy as np
8 | import torch
9 | from scipy.spatial.transform import Rotation
10 | import math
11 |
12 | from isaaclab.app import AppLauncher
13 | import gymnasium as gym
14 |
15 | # add argparse arguments
16 | parser = argparse.ArgumentParser(
17 | description="This script demonstrates different single-arm manipulators."
18 | )
19 | # append AppLauncher cli args
20 | AppLauncher.add_app_launcher_args(parser)
21 | # parse the arguments
22 | args_cli = parser.parse_args()
23 |
24 | # launch omniverse app
25 | app_launcher = AppLauncher(args_cli)
26 | simulation_app = app_launcher.app
27 |
28 | # isaac sim modules are only available after launching it (?)
29 |
30 | from pxr import Usd
31 |
32 | import carb
33 |
34 | import isaacsim.core.utils.prims as prim_utils
35 |
36 | import isaaclab.sim as sim_utils
37 | from isaaclab.assets import Articulation, RigidObjectCfg
38 | from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg
39 | from isaaclab.sim.spawners.shapes.shapes_cfg import CapsuleCfg
40 | from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg
41 | from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR
42 | from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
43 | from isaaclab.envs.mdp.actions.actions_cfg import (
44 | DifferentialInverseKinematicsActionCfg,
45 | )
46 | from isaaclab.utils.math import quat_from_euler_xyz, euler_xyz_from_quat, quat_mul
47 |
48 | from isaaclab.devices import Se3Gamepad, Se3Keyboard, Se3SpaceMouse
49 |
50 | import isaaclab_tasks # noqa: F401
51 | from isaaclab_tasks.utils import parse_env_cfg
52 |
53 | from simpub.sim.isaacsim_publisher import IsaacSimPublisher
54 | from simpub.xr_device.meta_quest3 import MetaQuest3
55 |
56 | print(ISAACLAB_NUCLEUS_DIR)
57 |
58 |
59 | def pre_process_actions(
60 | task: str, delta_pose: torch.Tensor, gripper_command: bool
61 | ) -> torch.Tensor:
62 | """Pre-process actions for the environment."""
63 | # compute actions based on environment
64 | if "Reach" in task:
65 | # note: reach is the only one that uses a different action space
66 | # compute actions
67 | return delta_pose
68 | else:
69 | # resolve gripper command
70 | gripper_vel = torch.zeros(delta_pose.shape[0], 1, device=delta_pose.device)
71 | gripper_vel[:] = -1.0 if gripper_command else 1.0
72 | # compute actions
73 | return torch.concat([delta_pose, gripper_vel], dim=1)
74 |
75 |
76 | def main():
77 | """Running keyboard teleoperation with Isaac Lab manipulation environment."""
78 | task = "Isaac-Lift-Cube-Franka-IK-Rel-v0"
79 |
80 | # parse configuration
81 | # why using undefined parameter (use_gpu) doesn't raise error?
82 | env_cfg = parse_env_cfg(
83 | task_name=task,
84 | num_envs=1,
85 | use_fabric=True,
86 | )
87 |
88 | # add some primitives to the env
89 | env_cfg.scene.cube2 = RigidObjectCfg(
90 | prim_path="{ENV_REGEX_NS}/Cube2",
91 | init_state=RigidObjectCfg.InitialStateCfg(
92 | pos=[0.7, 0, 0.155], rot=[1, 0, 0, 0]
93 | ),
94 | spawn=UsdFileCfg(
95 | usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
96 | scale=(0.8, 0.8, 0.8),
97 | rigid_props=RigidBodyPropertiesCfg(
98 | solver_position_iteration_count=16,
99 | solver_velocity_iteration_count=1,
100 | max_angular_velocity=1000.0,
101 | max_linear_velocity=1000.0,
102 | max_depenetration_velocity=5.0,
103 | disable_gravity=False,
104 | ),
105 | ),
106 | )
107 |
108 | env_cfg.scene.cube3 = RigidObjectCfg(
109 | prim_path="{ENV_REGEX_NS}/Cube3",
110 | init_state=RigidObjectCfg.InitialStateCfg(
111 | pos=[0.4, 0, 0.155], rot=[1, 0, 0, 0]
112 | ),
113 | spawn=UsdFileCfg(
114 | usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
115 | scale=(0.8, 0.8, 0.8),
116 | rigid_props=RigidBodyPropertiesCfg(
117 | solver_position_iteration_count=16,
118 | solver_velocity_iteration_count=1,
119 | max_angular_velocity=1000.0,
120 | max_linear_velocity=1000.0,
121 | max_depenetration_velocity=5.0,
122 | disable_gravity=False,
123 | ),
124 | ),
125 | )
126 |
127 | env_cfg.scene.cube4 = RigidObjectCfg(
128 | prim_path="{ENV_REGEX_NS}/Cube4",
129 | init_state=RigidObjectCfg.InitialStateCfg(
130 | pos=[0.4, 0, 0.155], rot=[1, 0, 0, 0]
131 | ),
132 | spawn=sim_utils.CuboidCfg(
133 | size=(0.05, 0.1, 0.2),
134 | rigid_props=sim_utils.RigidBodyPropertiesCfg(),
135 | mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
136 | collision_props=sim_utils.CollisionPropertiesCfg(),
137 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 1.0)),
138 | ),
139 | )
140 |
141 | env_cfg.scene.capsule1 = RigidObjectCfg(
142 | prim_path="{ENV_REGEX_NS}/Capsule1",
143 | init_state=RigidObjectCfg.InitialStateCfg(
144 | pos=[0.6, 0.2, 0.055], rot=[1, 0, 0, 0]
145 | ),
146 | spawn=CapsuleCfg(
147 | radius=0.03,
148 | height=0.1,
149 | axis="Y",
150 | rigid_props=sim_utils.RigidBodyPropertiesCfg(),
151 | mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
152 | collision_props=sim_utils.CollisionPropertiesCfg(),
153 | visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.1, 0.1, 1.0)),
154 | ),
155 | )
156 |
157 | # you should probably set use_relative_mode to False when using meta quest 3 input
158 | # but currently it does not work (seems to be a problem of coordiante system alignment)
159 | env_cfg.actions.arm_action = DifferentialInverseKinematicsActionCfg(
160 | asset_name="robot",
161 | joint_names=["panda_joint.*"],
162 | body_name="panda_hand",
163 | controller=DifferentialIKControllerCfg(
164 | command_type="pose", use_relative_mode=False, ik_method="dls"
165 | ),
166 | scale=1.0,
167 | body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(
168 | pos=[0.0, 0.0, 0.107]
169 | ),
170 | )
171 |
172 | def update_z(pos: tuple[float, float, float]):
173 | return (pos[0], pos[1], pos[2] + 1.05)
174 |
175 | # move objects above ground (z=0)
176 | env_cfg.scene.table.init_state.pos = update_z(env_cfg.scene.table.init_state.pos)
177 | env_cfg.scene.plane.init_state.pos = update_z(env_cfg.scene.plane.init_state.pos)
178 | env_cfg.scene.robot.init_state.pos = update_z(env_cfg.scene.robot.init_state.pos)
179 | env_cfg.scene.light.init_state.pos = update_z(env_cfg.scene.light.init_state.pos)
180 | env_cfg.scene.object.init_state.pos = update_z(env_cfg.scene.object.init_state.pos)
181 | env_cfg.scene.cube2.init_state.pos = update_z(env_cfg.scene.cube2.init_state.pos)
182 | env_cfg.scene.cube3.init_state.pos = update_z(env_cfg.scene.cube3.init_state.pos)
183 | env_cfg.scene.cube4.init_state.pos = update_z(env_cfg.scene.cube4.init_state.pos)
184 | env_cfg.scene.capsule1.init_state.pos = update_z(
185 | env_cfg.scene.capsule1.init_state.pos
186 | )
187 |
188 | # modify configuration
189 | env_cfg.terminations.time_out = None
190 |
191 | # create environment
192 | env = gym.make(task, cfg=env_cfg).unwrapped
193 | # print(env.sim)
194 |
195 | # check environment name (for reach , we don't allow the gripper)
196 | if "Reach" in task:
197 | carb.log_warn(
198 | f"The environment '{task}' does not support gripper control. The device command will be ignored."
199 | )
200 |
201 | # create controller
202 | sensitivity = 10
203 | teleop_interface = Se3Keyboard(
204 | pos_sensitivity=0.005 * sensitivity,
205 | rot_sensitivity=0.005 * sensitivity,
206 | )
207 |
208 | # add teleoperation key for env reset
209 | teleop_interface.add_callback("L", env.reset)
210 | # print helper for keyboard
211 | print(teleop_interface)
212 |
213 | # reset environment
214 | env.reset()
215 | teleop_interface.reset()
216 |
217 | print("simulation context:", env.sim)
218 | print("stage:", env.sim.stage)
219 |
220 | if env.sim is not None and env.sim.stage is not None:
221 | print("parsing usd stage...")
222 | publisher = IsaacSimPublisher(host="192.168.0.134", stage=env.sim.stage)
223 | # publisher = IsaacSimPublisher(host="127.0.0.1", stage=env.sim.stage)
224 |
225 | meta_quest3 = MetaQuest3("ALRMetaQuest3")
226 |
227 | # DifferentialIKController takes position relative to the robot base (what's that anyway..?)
228 | input_pos = [0.3, 0.3, 0.7]
229 | input_rot = quat_from_euler_xyz(
230 | torch.tensor([-math.pi]), torch.tensor([0]), torch.tensor([0])
231 | )[0].numpy()
232 | print("downward rot:", input_rot)
233 | input_gripper = False
234 |
235 | # simulate environment
236 | while simulation_app.is_running():
237 | # run everything in inference mode
238 | with torch.inference_mode():
239 | # get meta quest 3 input (does not work...)
240 | input_data = meta_quest3.get_input_data()
241 | if input_data is not None:
242 | if input_data["right"]["hand_trigger"]:
243 | env.reset()
244 | else:
245 | input_pos = input_data["right"]["pos"]
246 | input_rot = input_data["right"]["rot"]
247 | # print(input_rot)
248 | input_gripper = input_data["right"]["index_trigger"]
249 |
250 | # you should probably convert the input rotation into local frame,
251 | # instead of using this hack here...
252 | euler = euler_xyz_from_quat(torch.tensor([input_rot]))
253 | rot_z = euler[0].numpy()[0]
254 | quat_1 = quat_from_euler_xyz(
255 | torch.tensor([-math.pi]),
256 | torch.tensor([0]),
257 | torch.tensor([0]),
258 | )
259 | quat_2 = quat_from_euler_xyz(
260 | torch.tensor([0]),
261 | torch.tensor([0]),
262 | torch.tensor([rot_z]),
263 | )
264 | input_rot = quat_mul(quat_2, quat_1)[0].numpy()
265 |
266 | # should actually convert to local frame...
267 | input_pos[2] -= 1.05
268 | # input_rot = quat_from_euler_xyz(
269 | # torch.tensor([-math.pi]), torch.tensor([0]), torch.tensor([0])
270 | # )[0].numpy()
271 |
272 | # get keyboard command
273 | delta_pose, gripper_command = teleop_interface.advance()
274 |
275 | # get command from meta quest 3
276 | delta_pose = np.array(
277 | [
278 | input_pos[0],
279 | input_pos[1],
280 | input_pos[2],
281 | input_rot[0],
282 | input_rot[1],
283 | input_rot[2],
284 | input_rot[3],
285 | ]
286 | )
287 | # print(delta_pose)
288 | gripper_command = input_gripper
289 |
290 | delta_pose = delta_pose.astype("float32")
291 | # convert to torch
292 | delta_pose = torch.tensor(delta_pose, device=env.unwrapped.device).repeat(
293 | env.unwrapped.num_envs, 1
294 | )
295 | # pre-process actions
296 | actions = pre_process_actions(task, delta_pose, gripper_command)
297 | # apply actions
298 | env.step(actions)
299 |
300 | # close the simulator
301 | env.close()
302 |
303 |
304 | if __name__ == "__main__":
305 | main()
306 | simulation_app.close()
307 |
--------------------------------------------------------------------------------
/demos/SimulationFramework/hand_over.py:
--------------------------------------------------------------------------------
1 | from alr_sim.sims.SimFactory import SimRepository
2 | from alr_sim.sims.mj_beta.mj_utils.mj_scene_object import YCBMujocoObject
3 | import os
4 | import argparse
5 | import zmq
6 | import threading
7 | import json
8 | import traceback
9 |
10 | from simpub.sim.sf_publisher import SFPublisher
11 | from alr_sim.controllers.Controller import JointPDController
12 |
13 |
14 | class RealRobotVTController(JointPDController):
15 | def __init__(self, real_robot_ip):
16 | super().__init__()
17 | self.sub_socket = zmq.Context().socket(zmq.SUB)
18 | self.sub_socket.connect(f"tcp://{real_robot_ip}:5555")
19 | self.sub_socket.setsockopt_string(zmq.SUBSCRIBE, "")
20 | self.data = None
21 | self.sub_task = threading.Thread(target=self.subscribe_task)
22 | self.sub_task.start()
23 |
24 | def subscribe_task(self):
25 | try:
26 | while True:
27 | msg = self.sub_socket.recv_string()
28 | self.data = json.loads(msg)
29 | except Exception:
30 | traceback.print_exc()
31 |
32 | def getControl(self, robot):
33 | if self.data is None:
34 | return super().getControl(robot)
35 | self.setSetPoint(
36 | desired_pos=self.data['q'], desired_vel=self.data['dq']
37 | )
38 | if self.data['gripper_width'][0] < 0.9 * self.data['gripper_width'][1]:
39 | robot.set_gripper_cmd_type = 2 # Move
40 | else:
41 | robot.set_gripper_cmd_type = 1 # Grasp
42 | robot.set_gripper_width = self.goal_gripper_width_fct()
43 | return super().getControl(robot)
44 |
45 |
46 | if __name__ == "__main__":
47 |
48 | argparse.ArgumentParser()
49 | parser = argparse.ArgumentParser()
50 | parser.add_argument("--folder", type=str)
51 | parser.add_argument("--host", type=str, default="127.0.0.1")
52 | args = parser.parse_args()
53 |
54 | ycb_base_folder = os.path.join(args.folder, "SF-ObjectDataset/YCB")
55 |
56 | hammer = YCBMujocoObject(
57 | ycb_base_folder=ycb_base_folder,
58 | object_id="048_hammer",
59 | object_name="hammer",
60 | pos=[0.4, 0.0, 0.1],
61 | quat=[0, 0, 0, 1],
62 | static=False,
63 | alpha=1.0,
64 | visual_only=False,
65 | )
66 | object_list = [hammer]
67 |
68 | # Setup the scene
69 | sim_factory = SimRepository.get_factory("mj_beta")
70 |
71 | # Setting the dt to 0.0005 to reduce jittering of the gripper due to more difficult Physics Simulation
72 | scene = sim_factory.create_scene(object_list=object_list, dt=0.001)
73 | robot1 = sim_factory.create_robot(
74 | scene,
75 | dt=0.001,
76 | base_position=[0.0, 0.5, 0.0]
77 | )
78 | robot2 = sim_factory.create_robot(
79 | scene,
80 | dt=0.001,
81 | base_position=[0.0, -0.5, 0.0]
82 | )
83 | controller1 = RealRobotVTController("")
84 | controller2 = RealRobotVTController("")
85 | scene.start()
86 | controller1.executeController(robot1, maxDuration=1000, block=False)
87 | controller2.executeController(robot2, maxDuration=1000, block=False)
88 | publisher = SFPublisher(scene, args.host)
89 |
90 | try:
91 | while True:
92 | scene.next_step()
93 | except KeyboardInterrupt:
94 | publisher.shutdown()
95 | print("Simulation finished")
96 |
--------------------------------------------------------------------------------
/demos/SimulationFramework/pick_and_place.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from alr_sim.sims.SimFactory import SimRepository
4 | from alr_sim.sims.universal_sim.PrimitiveObjects import Box
5 | from simpub.sim.sf_publisher import SFPublisher
6 |
7 | if __name__ == "__main__":
8 |
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument("--host", type=str, default="127.0.0.1")
11 | args = parser.parse_args()
12 | host = args.host
13 |
14 | box1 = Box(
15 | name="box1",
16 | init_pos=[0.5, -0.2, 0.5],
17 | init_quat=[0, 1, 0, 0],
18 | rgba=[0.1, 0.25, 0.3, 1],
19 | )
20 | box2 = Box(
21 | name="box2",
22 | init_pos=[0.6, -0.1, 0.5],
23 | init_quat=[0, 1, 0, 0],
24 | rgba=[0.2, 0.3, 0.7, 1],
25 | )
26 | box3 = Box(
27 | name="box3",
28 | init_pos=[0.4, -0.1, 0.5],
29 | init_quat=[0, 1, 0, 0],
30 | rgba=[1, 0, 0, 1],
31 | )
32 | box4 = Box(
33 | name="box4",
34 | init_pos=[0.6, -0.0, 0.5],
35 | init_quat=[0, 1, 0, 0],
36 | rgba=[1, 0, 0, 1],
37 | )
38 | box5 = Box(
39 | name="box5",
40 | init_pos=[0.6, 0.1, 0.5],
41 | init_quat=[0, 1, 0, 0],
42 | rgba=[1, 1, 1, 1],
43 | )
44 | box6 = Box(
45 | name="box6",
46 | init_pos=[0.6, 0.2, 0.5],
47 | init_quat=[0, 1, 0, 0],
48 | rgba=[1, 0, 0, 1],
49 | )
50 |
51 | table = Box(
52 | name="table0",
53 | init_pos=[0.5, 0.0, 0.2],
54 | init_quat=[0, 1, 0, 0],
55 | size=[0.25, 0.35, 0.2],
56 | static=True,
57 | )
58 |
59 | object_list = [box1, box2, box3, box4, box5, box6, table]
60 |
61 | # Setup the scene
62 | sim_factory = SimRepository.get_factory("mj_beta")
63 |
64 | scene = sim_factory.create_scene(object_list=object_list)
65 | robot = sim_factory.create_robot(scene)
66 |
67 | scene.start()
68 |
69 | assert host is not None, "Please specify the host"
70 | publisher = SFPublisher(
71 | scene,
72 | host, no_tracked_objects=["table_plane", "table0"],
73 | )
74 |
75 | duration = 2
76 |
77 | robot.set_desired_gripper_width(0.0)
78 |
79 | home_position = robot.current_c_pos.copy()
80 | home_orientation = robot.current_c_quat.copy()
81 |
82 | # execute the pick and place movements
83 | robot.gotoCartPositionAndQuat(
84 | [0.5, -0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
85 | )
86 | robot.set_desired_gripper_width(0.04)
87 | robot.smooth_spline = False
88 | robot.gotoCartPositionAndQuat(
89 | [0.5, -0.2, 0.52 - 0.1], [0, 1, 0, 0], duration=duration
90 | )
91 | robot.close_fingers(duration=0.3)
92 | robot.gotoCartPositionAndQuat(
93 | home_position, home_orientation, duration=duration
94 | )
95 | robot.gotoCartPositionAndQuat(
96 | [0.5, 0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
97 | )
98 | robot.set_desired_gripper_width(0.04)
99 | robot.gotoCartPositionAndQuat(
100 | [0.6, -0.1, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
101 | )
102 | robot.gotoCartPositionAndQuat(
103 | [0.6, -0.1, 0.52 - 0.1], [0, 1, 0, 0], duration=duration
104 | )
105 | robot.close_fingers(duration=0.3)
106 | robot.gotoCartPositionAndQuat(
107 | home_position, home_orientation, duration=duration
108 | )
109 | robot.gotoCartPositionAndQuat(
110 | [0.5, 0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
111 | )
112 | robot.set_desired_gripper_width(0.04)
113 | robot.gotoCartPositionAndQuat(
114 | [0.4, -0.1, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
115 | )
116 | robot.gotoCartPositionAndQuat(
117 | [0.4, -0.1, 0.52 - 0.1], [0, 1, 0, 0], duration=duration
118 | )
119 | robot.close_fingers(duration=0.3)
120 | robot.gotoCartPositionAndQuat(
121 | home_position, home_orientation, duration=duration
122 | )
123 | robot.gotoCartPositionAndQuat(
124 | [0.5, 0.2, 0.65 - 0.1], [0, 1, 0, 0], duration=duration
125 | )
126 | robot.set_desired_gripper_width(0.04)
127 | robot.gotoCartPositionAndQuat(
128 | home_position, home_orientation, duration=duration
129 | )
130 |
131 | while True:
132 | scene.next_step()
133 |
--------------------------------------------------------------------------------
/demos/SimulationFramework/pick_and_place_with_meta_quest.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import numpy as np
3 |
4 | from alr_sim.sims.SimFactory import SimRepository
5 | from alr_sim.sims.universal_sim.PrimitiveObjects import Box
6 | from alr_sim.controllers.IKControllers import CartPosQuatImpedenceController
7 | from alr_sim.sims.mj_beta import MjRobot
8 |
9 | from simpub.sim.sf_publisher import SFPublisher
10 | from simpub.xr_device import MetaQuest3
11 |
12 |
13 | class MetaQuest3Controller(CartPosQuatImpedenceController):
14 |
15 | def __init__(self, device):
16 | super().__init__()
17 | self.device: MetaQuest3 = device
18 |
19 | def getControl(self, robot: MjRobot):
20 | input_data = self.device.get_input_data()
21 | if input_data is not None:
22 | hand = input_data["right"]
23 | desired_pos = hand["pos"]
24 | desired_quat = hand["rot"]
25 | desired_pos_local = robot._localize_cart_pos(desired_pos)
26 | desired_quat_local = robot._localize_cart_quat(desired_quat)
27 | if hand["index_trigger"]:
28 | robot.close_fingers(duration=0.0)
29 | else:
30 | robot.open_fingers()
31 | self.setSetPoint(
32 | np.hstack((desired_pos_local, desired_quat_local))
33 | )
34 | return super().getControl(robot)
35 |
36 |
37 | if __name__ == "__main__":
38 |
39 | parser = argparse.ArgumentParser()
40 | parser.add_argument("--host", type=str, default="127.0.0.1")
41 | args = parser.parse_args()
42 |
43 | box1 = Box(
44 | name="box1",
45 | init_pos=[0.5, -0.2, 0.5],
46 | init_quat=[0, 1, 0, 0],
47 | rgba=[0.1, 0.25, 0.3, 1],
48 | )
49 | box2 = Box(
50 | name="box2",
51 | init_pos=[0.6, -0.1, 0.5],
52 | init_quat=[0, 1, 0, 0],
53 | rgba=[0.2, 0.3, 0.7, 1],
54 | )
55 | box3 = Box(
56 | name="box3",
57 | init_pos=[0.4, -0.1, 0.5],
58 | init_quat=[0, 1, 0, 0],
59 | rgba=[1, 0, 0, 1],
60 | )
61 | box4 = Box(
62 | name="box4",
63 | init_pos=[0.6, -0.0, 0.5],
64 | init_quat=[0, 1, 0, 0],
65 | rgba=[1, 0, 0, 1],
66 | )
67 | box5 = Box(
68 | name="box5",
69 | init_pos=[0.6, 0.1, 0.5],
70 | init_quat=[0, 1, 0, 0],
71 | rgba=[1, 1, 1, 1],
72 | )
73 | box6 = Box(
74 | name="box6",
75 | init_pos=[0.6, 0.2, 0.5],
76 | init_quat=[0, 1, 0, 0],
77 | rgba=[1, 0, 0, 1],
78 | )
79 |
80 | table = Box(
81 | name="table0",
82 | init_pos=[0.5, 0.0, 0.2],
83 | init_quat=[0, 1, 0, 0],
84 | size=[0.25, 0.35, 0.2],
85 | static=True,
86 | )
87 |
88 | object_list = [box1, box2, box3, box4, box5, box6, table]
89 |
90 | # Setup the scene
91 | sim_factory = SimRepository.get_factory("mj_beta")
92 |
93 | scene = sim_factory.create_scene(object_list=object_list)
94 | robot = sim_factory.create_robot(scene)
95 |
96 | scene.start()
97 |
98 | publisher = SFPublisher(
99 | scene, args.host, no_tracked_objects=["table_plane", "table0"]
100 | )
101 | meta_quest3 = MetaQuest3("IRLMQ3-1")
102 | robot_controller = MetaQuest3Controller(meta_quest3)
103 | robot_controller.executeController(robot, maxDuration=1000, block=False)
104 |
105 | while True:
106 | scene.next_step()
107 |
--------------------------------------------------------------------------------
/demos/SimulationFramework/pick_ycb_object.py:
--------------------------------------------------------------------------------
1 | from alr_sim.core.logger import RobotPlotFlags
2 | from alr_sim.sims.SimFactory import SimRepository
3 | from alr_sim.sims.mj_beta.mj_utils.mj_scene_object import YCBMujocoObject
4 | import os
5 | import argparse
6 |
7 |
8 | from simpub.sim.sf_publisher import SFPublisher
9 |
10 | if __name__ == "__main__":
11 |
12 | argparse.ArgumentParser()
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument("--folder", type=str)
15 | parser.add_argument("--host", type=str, default="127.0.0.1")
16 | parser.add_argument("--object_id", type=str, default="013_apple")
17 | args = parser.parse_args()
18 |
19 | ycb_base_folder = os.path.join(args.folder, "SF-ObjectDataset/YCB")
20 | clamp = YCBMujocoObject(
21 | ycb_base_folder=ycb_base_folder,
22 | object_id="051_large_clamp",
23 | object_name="clamp",
24 | pos=[0.4, 0, 0.1],
25 | quat=[0, 0, 0, 1],
26 | static=False,
27 | alpha=1.0,
28 | visual_only=False,
29 | )
30 |
31 | lemon = YCBMujocoObject(
32 | ycb_base_folder=ycb_base_folder,
33 | object_id="014_lemon",
34 | object_name="lemon",
35 | pos=[0.4, 0.2, 0.1],
36 | quat=[0, 0, 0, 1],
37 | static=False,
38 | alpha=1.0,
39 | visual_only=False,
40 | )
41 |
42 | mug = YCBMujocoObject(
43 | ycb_base_folder=ycb_base_folder,
44 | object_id="025_mug",
45 | object_name="mug",
46 | pos=[0.2, 0.1, 0.1],
47 | quat=[0, 0, 0, 1],
48 | static=False,
49 | alpha=1.0,
50 | visual_only=False,
51 | )
52 |
53 | hammer = YCBMujocoObject(
54 | ycb_base_folder=ycb_base_folder,
55 | object_id="048_hammer",
56 | object_name="hammer",
57 | pos=[0.3, -0.2, 0.1],
58 | quat=[0, 0, 0, 1],
59 | static=False,
60 | alpha=1.0,
61 | visual_only=False,
62 | )
63 |
64 | object_list = [clamp, lemon, mug, hammer]
65 |
66 | # Setup the scene
67 | sim_factory = SimRepository.get_factory("mj_beta")
68 |
69 | # Setting the dt to 0.0005 to reduce jittering of the gripper due to more difficult Physics Simulation
70 | scene = sim_factory.create_scene(object_list=object_list, dt=0.0005)
71 | robot = sim_factory.create_robot(scene, dt=0.0005)
72 | scene.start()
73 |
74 | publisher = SFPublisher(scene, args.host)
75 |
76 | robot.set_desired_gripper_width(0.4) # we set the gripper to clos at the beginning
77 |
78 | # execute the pick and place movements
79 | robot.gotoCartPositionAndQuat(
80 | [0.4, 0, 0.1], [0, 0.7071068, -0.7071068, 0], duration=8
81 | )
82 | robot.gotoCartPositionAndQuat(
83 | [0.4, 0, -0.01], [0, 0.7071068, -0.7071068, 0], duration=2
84 | )
85 | robot.close_fingers()
86 | robot.gotoCartPositionAndQuat(
87 | [0.4, 0, 0.1], [0, 0.7071068, -0.7071068, 0], duration=8
88 | )
89 | robot.gotoCartPositionAndQuat(
90 | [0.8, 0, 0.1], [0, 0.7071068, -0.7071068, 0], duration=4
91 | )
92 |
93 | robot.open_fingers()
94 |
95 | while True:
96 | scene.next_step()
97 |
--------------------------------------------------------------------------------
/demos/SimulationFramework/ycb_object_pick_and_place.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from alr_sim.sims.SimFactory import SimRepository
3 | from alr_sim.sims.mj_beta.mj_utils.mj_scene_object import YCBMujocoObject
4 | from simpub.sim.sf_publisher import SFPublisher
5 |
6 | if __name__ == "__main__":
7 |
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument("--host", type=str, default="127.0.0.1")
10 | args = parser.parse_args()
11 | host = args.host
12 |
13 | ycb_base_folder = "/home/xinkai/project/SF-ObjectDataset/YCB"
14 | clamp = YCBMujocoObject(
15 | ycb_base_folder=ycb_base_folder,
16 | object_id="051_large_clamp",
17 | object_name="clamp",
18 | pos=[0.4, 0, 0.1],
19 | quat=[0, 0, 0, 1],
20 | static=False,
21 | alpha=1.0,
22 | visual_only=False,
23 | )
24 |
25 | lemon = YCBMujocoObject(
26 | ycb_base_folder=ycb_base_folder,
27 | object_id="014_lemon",
28 | object_name="lemon",
29 | pos=[0.4, 0.2, 0.1],
30 | quat=[0, 0, 0, 1],
31 | static=False,
32 | alpha=1.0,
33 | visual_only=False,
34 | )
35 |
36 | mug = YCBMujocoObject(
37 | ycb_base_folder=ycb_base_folder,
38 | object_id="025_mug",
39 | object_name="mug",
40 | pos=[0.2, 0.1, 0.1],
41 | quat=[0, 0, 0, 1],
42 | static=False,
43 | alpha=1.0,
44 | visual_only=False,
45 | )
46 |
47 | hammer = YCBMujocoObject(
48 | ycb_base_folder=ycb_base_folder,
49 | object_id="048_hammer",
50 | object_name="hammer",
51 | pos=[0.3, -0.2, 0.1],
52 | quat=[0, 0, 0, 1],
53 | static=False,
54 | alpha=1.0,
55 | visual_only=False,
56 | )
57 |
58 | object_list = [clamp, lemon, mug, hammer]
59 |
60 | # Setup the scene
61 | sim_factory = SimRepository.get_factory("mj_beta")
62 | scene = sim_factory.create_scene(object_list=object_list, dt=0.0005)
63 | robot = sim_factory.create_robot(scene, dt=0.0005)
64 | scene.start()
65 |
66 | assert host is not None, "Please specify the host"
67 | publisher = SFPublisher(
68 | scene, host, no_tracked_objects=["table_plane", "table0"]
69 | )
70 |
71 | robot.set_desired_gripper_width(0.4)
72 |
73 | # execute the pick and place movements
74 | robot.gotoCartPositionAndQuat(
75 | [0.4, 0, 0.1], [0, 0.7071068, -0.7071068, 0], duration=8
76 | )
77 | robot.gotoCartPositionAndQuat(
78 | [0.4, 0, -0.01], [0, 0.7071068, -0.7071068, 0], duration=2
79 | )
80 | robot.close_fingers()
81 | robot.gotoCartPositionAndQuat(
82 | [0.4, 0, 0.1], [0, 0.7071068, -0.7071068, 0], duration=8
83 | )
84 | robot.gotoCartPositionAndQuat(
85 | [0.8, 0, 0.1], [0, 0.7071068, -0.7071068, 0], duration=4
86 | )
87 |
88 | robot.open_fingers()
89 |
90 | while True:
91 | scene.next_step()
92 |
--------------------------------------------------------------------------------
/demos/fancy_gym/BoxPushingDense.py:
--------------------------------------------------------------------------------
1 | import fancy_gym
2 | import time
3 |
4 | from simpub.sim.fancy_gym import FancyGymPublisher
5 |
6 | env_name = "BoxPushingDense-v0"
7 |
8 | env = fancy_gym.make(env_name, seed=1)
9 | obs = env.reset()
10 |
11 | publisher = FancyGymPublisher(env, "127.0.0.1")
12 |
13 | while True:
14 | action = env.action_space.sample()
15 | obs, reward, done, info = env.step(action)
16 | env.render()
17 | time.sleep(0.01)
18 | if done:
19 | obs = env.reset()
20 |
--------------------------------------------------------------------------------
/demos/fancy_gym/TableTennis.py:
--------------------------------------------------------------------------------
1 | import fancy_gym
2 | import time
3 |
4 | from simpub.sim.fancy_gym import FancyGymPublisher
5 |
6 | env_name = "TableTennis2D-v0"
7 |
8 | env = fancy_gym.make(env_name, seed=1)
9 | obs = env.reset()
10 |
11 | publisher = FancyGymPublisher(env, "192.168.0.134")
12 |
13 | while True:
14 | action = env.action_space.sample()
15 | obs, reward, done, info = env.step(action)
16 | env.render()
17 | time.sleep(0.01)
18 | if done:
19 | obs = env.reset()
20 |
--------------------------------------------------------------------------------
/demos/genesis/anymal_c_example.py:
--------------------------------------------------------------------------------
1 |
2 | import genesis as gs
3 | import numpy as np
4 | from simpub.sim.genesis_publisher import GenesisPublisher
5 |
6 | ########################## init ##########################
7 | gs.init(backend=gs.gpu)
8 |
9 | ########################## create a scene ##########################
10 | scene = gs.Scene(
11 | viewer_options=gs.options.ViewerOptions(
12 | camera_pos=(3, -1, 1.5),
13 | camera_lookat=(0.0, 0.0, 0.5),
14 | camera_fov=30,
15 | max_FPS=60,
16 | ),
17 | sim_options=gs.options.SimOptions(
18 | dt=0.01,
19 | ),
20 | show_viewer=True,
21 | )
22 |
23 | ########################## entities ##########################
24 | plane = scene.add_entity(
25 | gs.morphs.Plane(),
26 | )
27 | cube = scene.add_entity(
28 | gs.morphs.Box(
29 | size=(0.04, 0.04, 0.04),
30 | pos=(0.65, 0.0, 0.02),
31 | )
32 | )
33 | franka = scene.add_entity(
34 | gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
35 | )
36 | publisher = GenesisPublisher(scene)
37 | ########################## build ##########################
38 | scene.build()
39 |
40 | motors_dof = np.arange(7)
41 | fingers_dof = np.arange(7, 9)
42 |
43 | # set control gains
44 | franka.set_dofs_kp(
45 | np.array([4500, 4500, 3500, 3500, 2000, 2000, 2000, 100, 100]),
46 | )
47 | franka.set_dofs_kv(
48 | np.array([450, 450, 350, 350, 200, 200, 200, 10, 10]),
49 | )
50 | franka.set_dofs_force_range(
51 | np.array([-87, -87, -87, -87, -12, -12, -12, -100, -100]),
52 | np.array([87, 87, 87, 87, 12, 12, 12, 100, 100]),
53 | )
54 |
55 | end_effector = franka.get_link("hand")
56 |
57 | # move to pre-grasp pose
58 | qpos = franka.inverse_kinematics(
59 | link=end_effector,
60 | pos=np.array([0.65, 0.0, 0.25]),
61 | quat=np.array([0, 1, 0, 0]),
62 | )
63 | # gripper open pos
64 | qpos[-2:] = 0.04
65 | path = franka.plan_path(
66 | qpos_goal=qpos,
67 | num_waypoints=200, # 2s duration
68 | )
69 | # execute the planned path
70 | for waypoint in path:
71 | franka.control_dofs_position(waypoint)
72 | scene.step()
73 |
74 | # allow robot to reach the last waypoint
75 | for i in range(100):
76 | scene.step()
77 |
78 | # reach
79 | qpos = franka.inverse_kinematics(
80 | link=end_effector,
81 | pos=np.array([0.65, 0.0, 0.130]),
82 | quat=np.array([0, 1, 0, 0]),
83 | )
84 | print(qpos)
85 | franka.control_dofs_position(qpos[:-2], motors_dof)
86 | for i in range(100):
87 | scene.step()
88 |
89 | # grasp
90 | franka.control_dofs_position(qpos[:-2], motors_dof)
91 | franka.control_dofs_force(np.array([-0.5, -0.5]), fingers_dof)
92 |
93 | for i in range(100):
94 | scene.step()
95 |
96 | # lift
97 | qpos = franka.inverse_kinematics(
98 | link=end_effector,
99 | pos=np.array([0.65, 0.0, 0.28]),
100 | quat=np.array([0, 1, 0, 0]),
101 | )
102 | print(qpos)
103 | franka.control_dofs_position(qpos[:-2], motors_dof)
104 | for i in range(200000):
105 | scene.step()
106 | publisher.spin()
107 |
--------------------------------------------------------------------------------
/demos/genesis/drone_example.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import numpy as np
3 |
4 | import genesis as gs
5 |
6 | from simpub.sim.genesis_publisher import GenesisPublisher
7 |
8 | def main():
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument("-v", "--vis", action="store_true", default=True)
11 | parser.add_argument("-m", "--mac", action="store_true", default=False)
12 | args = parser.parse_args()
13 |
14 | ########################## init ##########################
15 | gs.init(backend=gs.cpu)
16 |
17 | ########################## create a scene ##########################
18 | viewer_options = gs.options.ViewerOptions(
19 | camera_pos=(2.5, 0.0, 1.5),
20 | camera_lookat=(0.0, 0.0, 0.5),
21 | camera_fov=30,
22 | max_FPS=60,
23 | )
24 |
25 | scene = gs.Scene(
26 | sim_options=gs.options.SimOptions(
27 | dt=0.01,
28 | ),
29 | viewer_options=viewer_options,
30 | show_viewer=args.vis,
31 | )
32 |
33 | ########################## entities ##########################
34 | plane = scene.add_entity(
35 | gs.morphs.Plane(),
36 | )
37 |
38 | drones = []
39 | distance = 0.2
40 | square_size = 5
41 | for i in range(square_size):
42 | for j in range(square_size):
43 | drone = scene.add_entity(
44 | morph=gs.morphs.Drone(
45 | file="urdf/drones/cf2x.urdf",
46 | pos=(i * distance, j * distance, 0.02),
47 | ),
48 | )
49 | drones.append(drone)
50 |
51 | ########################## build ##########################
52 | publisher = GenesisPublisher(scene, host="192.168.0.134")
53 | scene.build()
54 | if args.mac:
55 | gs.tools.run_in_another_thread(fn=run_sim, args=(scene, drone, args.vis))
56 | if args.vis:
57 | scene.viewer.start()
58 | else:
59 | run_sim(scene, drones, args.vis)
60 | publisher.spin()
61 |
62 |
63 | def run_sim(scene, drones, enable_vis):
64 | traj = np.array(
65 | [
66 | [1.0, 1.0, 0.98824805, 1.0],
67 | [0.67815816, 1.0, 1.0, 1.0],
68 | [1.0, 0.87905186, 0.8319297, 1.0],
69 | [1.0, 0.85295373, 0.94554883, 1.0],
70 | [1.0, 1.0, 0.9663153, 1.0],
71 | [1.0, 1.0, 1.0, 1.0],
72 | [1.0, 1.0, 1.0, 1.0],
73 | [1.0, 1.0, 1.0, 1.0],
74 | [1.0, 1.0, 1.0, 1.0],
75 | [1.0, 1.0, 1.0, 1.0],
76 | [1.0, 1.0, 1.0, 1.0],
77 | [1.0, 1.0, 1.0, 1.0],
78 | [1.0, 1.0, 1.0, 1.0],
79 | [1.0, 1.0, 1.0, 1.0],
80 | [1.0, 1.0, 1.0, 1.0],
81 | [1.0, 1.0, 1.0, 1.0],
82 | [1.0, 1.0, 1.0, 1.0],
83 | [1.0, 1.0, 1.0, 1.0],
84 | [1.0, 1.0, 1.0, 1.0],
85 | [1.0, 1.0, 1.0, 1.0],
86 | [1.0, 1.0, 1.0, 1.0],
87 | [1.0, 1.0, 1.0, 1.0],
88 | [1.0, 1.0, 1.0, 1.0],
89 | [1.0, 1.0, 1.0, 1.0],
90 | [1.0, 1.0, 1.0, 1.0],
91 | [1.0, 1.0, 1.0, 1.0],
92 | [1.0, 1.0, 1.0, 1.0],
93 | [1.0, 1.0, 1.0, 1.0],
94 | [1.0, 1.0, 1.0, 1.0],
95 | [1.0, 1.0, 1.0, 1.0],
96 | [1.0, 1.0, 1.0, 1.0],
97 | [1.0, 1.0, 1.0, 1.0],
98 | [1.0, 1.0, 1.0, 1.0],
99 | [1.0, 1.0, 1.0, 1.0],
100 | [1.0, 1.0, 1.0, 1.0],
101 | [1.0, 1.0, 1.0, 1.0],
102 | [1.0, 1.0, 1.0, 1.0],
103 | [1.0, 0.9954323, 1.0, 1.0],
104 | [1.0, 0.9974212, 1.0, 1.0],
105 | [1.0, 0.99529535, 1.0, 1.0],
106 | [1.0, 0.9965133, 1.0, 1.0],
107 | [1.0, 0.99495167, 1.0, 1.0],
108 | [1.0, 0.99533206, 1.0, 1.0],
109 | [1.0, 0.9941533, 1.0, 1.0],
110 | [1.0, 0.9937679, 1.0, 1.0],
111 | [1.0, 0.9926078, 1.0, 1.0],
112 | [1.0, 0.99150425, 1.0, 1.0],
113 | [1.0, 0.9899133, 1.0, 1.0],
114 | [1.0, 0.9879518, 1.0, 1.0],
115 | [1.0, 0.985401, 1.0, 0.93023926],
116 | [1.0, 0.9757207, 1.0, 0.8165948],
117 | [1.0, 0.94503635, 1.0, 0.47085124],
118 | [1.0, 0.9973584, 1.0, 0.92234856],
119 | [1.0, 0.97841257, 1.0, 0.9260282],
120 | [1.0, 0.9749091, 1.0, 0.8766382],
121 | [1.0, 0.9661152, 1.0, 0.8508391],
122 | [1.0, 0.9588664, 1.0, 0.8184431],
123 | [1.0, 0.9513355, 1.0, 0.78656846],
124 | [1.0, 0.9438352, 1.0, 0.7555151],
125 | [1.0, 0.9364986, 1.0, 0.7264303],
126 | [1.0, 0.92944163, 1.0, 0.70034355],
127 | [1.0, 0.9227477, 1.0, 0.6780561],
128 | [1.0, 0.91646177, 1.0, 0.6601221],
129 | [1.0, 0.91059643, 1.0, 0.6468646],
130 | [1.0, 0.90513545, 1.0, 0.6384181],
131 | [1.0, 0.9000414, 1.0, 0.63476765],
132 | [1.0, 0.8952549, 1.0, 0.6357777],
133 | [1.0, 0.89069635, 1.0, 0.64121217],
134 | [1.0, 0.8862596, 1.0, 0.6507421],
135 | [1.0, 0.8818036, 1.0, 0.66393715],
136 | [1.0, 0.8771375, 1.0, 0.68025357],
137 | [1.0, 0.8720022, 1.0, 0.6990145],
138 | [1.0, 0.8660441, 1.0, 0.719399],
139 | [1.0, 0.85878307, 1.0, 0.7404561],
140 | [1.0, 0.84957653, 1.0, 0.7611621],
141 | [1.0, 0.83757895, 1.0, 0.78056324],
142 | [1.0, 0.8216941, 1.0, 0.7980228],
143 | [1.0, 0.8005113, 1.0, 0.8135467],
144 | [0.99108833, 0.77218705, 1.0, 0.828064],
145 | [0.97618765, 0.7336579, 1.0, 0.8425417],
146 | [0.9570234, 0.6814261, 1.0, 0.8581086],
147 | [0.92717046, 0.6107281, 1.0, 0.8740242],
148 | [0.87264377, 0.51361734, 1.0, 0.8831364],
149 | [0.7667494, 0.37425363, 1.0, 0.86565703],
150 | [0.58684665, 0.15105894, 1.0, 0.792903],
151 | [0.40309954, -0.22192897, 1.0, 0.6778493],
152 | [0.30307913, -0.6645406, 0.32032692, 0.5561814],
153 | [0.1432502, -1.0, -0.7834326, 0.30731094],
154 | [-0.05076139, -1.0, -1.0, -0.18850122],
155 | [-0.2028995, -1.0, -1.0, -0.47833002],
156 | [-0.33243275, -1.0, -1.0, -0.63186795],
157 | [-0.43252927, -1.0, -0.93684345, -0.7109936],
158 | [-0.50198543, -1.0, -0.8966909, -0.7451998],
159 | [-0.55477273, -1.0, -0.87718576, -0.7572431],
160 | [-0.59963316, -1.0, -0.8707306, -0.7596289],
161 | [-0.641077, -1.0, -0.8736891, -0.7593429],
162 | [-0.68137753, -1.0, -0.8838504, -0.76042706],
163 | [-0.72137207, -1.0, -0.8991675, -0.7649133],
164 | [-0.76085263, -1.0, -0.9172805, -0.77330756],
165 | [-0.7989401, -1.0, -0.93559915, -0.78509957],
166 | [-0.8345167, -1.0, -0.95170647, -0.799334],
167 | [-0.86661166, -1.0, -0.963775, -0.8150858],
168 | [-0.8946256, -1.0, -0.97078484, -0.8317086],
169 | [-0.91837806, -1.0, -0.97249895, -0.84885603],
170 | [-0.9380378, -1.0, -0.96929866, -0.8663847],
171 | [-0.9540071, -1.0, -0.96197397, -0.8842322],
172 | [-0.9668097, -1.0, -0.9515317, -0.90233094],
173 | [-0.97700363, -1.0, -0.9390431, -0.9205734],
174 | [-0.98512334, -1.0, -0.92553586, -0.93881696],
175 | [-0.99164504, -1.0, -0.91191846, -0.9569116],
176 | [-0.99697274, -1.0, -0.8989406, -0.97473806],
177 | [-1.0, -1.0, -0.8871773, -0.99223274],
178 | [-1.0, -1.0, -0.87696224, -1.0],
179 | [-1.0, -1.0, -0.86855894, -1.0],
180 | [-1.0, -1.0, -0.8622101, -1.0],
181 | [-1.0, -1.0, -0.85795015, -1.0],
182 | [-1.0, -1.0, -0.8557587, -1.0],
183 | [-1.0, -1.0, -0.8555704, -1.0],
184 | [-1.0, -1.0, -0.8572821, -1.0],
185 | [-1.0, -1.0, -0.860755, -1.0],
186 | [-1.0, -1.0, -0.8658133, -1.0],
187 | [-1.0, -1.0, -0.87224096, -1.0],
188 | [-1.0, -1.0, -0.87977535, -1.0],
189 | [-1.0, -1.0, -0.8881058, -1.0],
190 | [-1.0, -1.0, -0.89687437, -1.0],
191 | [-1.0, -1.0, -0.9056818, -1.0],
192 | [-1.0, -1.0, -0.91409653, -1.0],
193 | [-1.0, -1.0, -0.9216669, -1.0],
194 | [-1.0, -1.0, -0.9279278, -1.0],
195 | [-1.0, -1.0, -0.93239695, -1.0],
196 | [-0.9961943, -1.0, -0.9345514, -1.0],
197 | [-0.9834586, -1.0, -0.93362767, -1.0],
198 | [-0.9671113, -1.0, -0.9284645, -1.0],
199 | [-0.94588476, -1.0, -0.91775376, -1.0],
200 | [-0.91785616, -1.0, -0.8995549, -1.0],
201 | [-0.88016766, -1.0, -0.87093955, -1.0],
202 | [-0.8287441, -1.0, -0.82749766, -1.0],
203 | [-0.7582472, -1.0, -0.76269644, -1.0],
204 | [-0.66290134, -1.0, -0.66715723, -1.0],
205 | [-0.5392508, -1.0, -0.5280629, -1.0],
206 | [-0.39078623, -1.0, -0.3290425, -1.0],
207 | [-0.2295668, -1.0, -0.05206226, -1.0],
208 | [-0.06826158, -1.0, 0.30915332, -1.0],
209 | [0.08895309, -1.0, 0.7070197, -1.0],
210 | [0.2400503, -1.0, 1.0, -1.0],
211 | [0.3742329, -1.0, 1.0, -1.0],
212 | [0.48094982, -1.0, 1.0, -1.0],
213 | [0.56609666, -1.0, 1.0, -1.0],
214 | [0.63677347, -0.88508135, 1.0, -1.0],
215 | [0.7058708, -0.694147, 1.0, -1.0],
216 | [0.7992784, -0.5113944, 1.0, -1.0],
217 | [0.9422653, -0.2919022, 1.0, -1.0],
218 | ],
219 | dtype=np.float32,
220 | )
221 |
222 | for i in range(len(traj)):
223 | # 14468 is hover rpm
224 | for drone in drones:
225 | drone.set_propellels_rpm((1 + 0.05 * traj[i]) * 14468.429183500699)
226 | scene.step()
227 |
228 | # while True:
229 | # drone.set_propellels_rpm((1 + 0.05 * np.array([1.0, 1.0, 1.0, 1.0])) * 14468.429183500699)
230 | # scene.step()
231 |
232 | # if enable_vis:
233 | # scene.viewer.stop()
234 |
235 |
236 |
237 |
238 | if __name__ == "__main__":
239 | main()
--------------------------------------------------------------------------------
/demos/genesis/franka_example.py:
--------------------------------------------------------------------------------
1 | import genesis as gs
2 | import numpy as np
3 |
4 | from simpub.sim.genesis_publisher import GenesisPublisher
5 |
6 | if __name__ == "__main__":
7 | gs.init(backend=gs.gpu)
8 | viewer_options = gs.options.ViewerOptions(
9 | camera_pos=(0, -3.5, 2.5),
10 | camera_lookat=(0.0, 0.0, 0.5),
11 | camera_fov=40,
12 | max_FPS=60,
13 | )
14 |
15 | scene = gs.Scene(
16 | viewer_options=viewer_options,
17 | sim_options=gs.options.SimOptions(
18 | dt=0.01,
19 | ),
20 | show_viewer=False,
21 | )
22 | # plane = scene.add_entity(
23 | # gs.morphs.Plane(),
24 | # )
25 | franka = scene.add_entity(
26 | gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
27 | )
28 |
29 | publisher = GenesisPublisher(scene)
30 | scene.build()
31 |
32 | jnt_names = [
33 | "joint1",
34 | "joint2",
35 | "joint3",
36 | "joint4",
37 | "joint5",
38 | "joint6",
39 | "joint7",
40 | "finger_joint1",
41 | "finger_joint2",
42 | ]
43 | dofs_idx = [franka.get_joint(name).dof_idx_local for name in jnt_names]
44 |
45 | # Optional: set control gains
46 | franka.set_dofs_kp(
47 | np.array([4500, 4500, 3500, 3500, 2000, 2000, 2000, 100, 100]),
48 | dofs_idx,
49 | )
50 | franka.set_dofs_kv(
51 | np.array([450, 450, 350, 350, 200, 200, 200, 10, 10]),
52 | dofs_idx,
53 | )
54 | franka.set_dofs_force_range(
55 | np.array([-87, -87, -87, -87, -12, -12, -12, -100, -100]),
56 | np.array([87, 87, 87, 87, 12, 12, 12, 100, 100]),
57 | dofs_idx,
58 | )
59 | # Hard reset
60 | for i in range(150):
61 | if i < 50:
62 | franka.set_dofs_position(np.array([1, 1, 0, 0, 0, 0, 0, 0.04, 0.04]), dofs_idx)
63 | elif i < 100:
64 | franka.set_dofs_position(np.array([-1, 0.8, 1, -2, 1, 0.5, -0.5, 0.04, 0.04]), dofs_idx)
65 | else:
66 | franka.set_dofs_position(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]), dofs_idx)
67 |
68 | scene.step()
69 |
70 | # PD control
71 | for i in range(12500):
72 | if i == 0:
73 | franka.control_dofs_position(
74 | np.array([1, 1, 0, 0, 0, 0, 0, 0.04, 0.04]),
75 | dofs_idx,
76 | )
77 | elif i == 250:
78 | franka.control_dofs_position(
79 | np.array([-1, 0.8, 1, -2, 1, 0.5, -0.5, 0.04, 0.04]),
80 | dofs_idx,
81 | )
82 | elif i == 500:
83 | franka.control_dofs_position(
84 | np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
85 | dofs_idx,
86 | )
87 | elif i == 750:
88 | # control first dof with velocity, and the rest with position
89 | franka.control_dofs_position(
90 | np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])[1:],
91 | dofs_idx[1:],
92 | )
93 | franka.control_dofs_velocity(
94 | np.array([1.0, 0, 0, 0, 0, 0, 0, 0, 0])[:1],
95 | dofs_idx[:1],
96 | )
97 | elif i == 1000:
98 | franka.control_dofs_force(
99 | np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
100 | dofs_idx,
101 | )
102 | # This is the internal control force computed based on the given control command
103 | # If using force control, it's the same as the given control command
104 | print("control force:", franka.get_dofs_control_force(dofs_idx))
105 |
106 | scene.step()
107 | publisher.spin()
--------------------------------------------------------------------------------
/demos/libero/libero_example.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from robosuite.environments.base import MujocoEnv as RobosuiteEnv
4 | import xml.etree.ElementTree as ET
5 | import os
6 |
7 | from simpub.xr_device.meta_quest3 import MetaQuest3 as MetaQuest3Sim
8 | from simpub.sim.mj_publisher import MujocoPublisher
9 |
10 | import libero
11 | from robosuite import load_controller_config
12 | import libero.libero.envs.bddl_utils as BDDLUtils
13 | from libero.libero.envs import TASK_MAPPING
14 | from robosuite.robots import ROBOT_CLASS_MAPPING
15 |
16 |
17 | class RobosuitePublisher(MujocoPublisher):
18 |
19 | def __init__(self, env: RobosuiteEnv, host):
20 | super().__init__(
21 | env.sim.model._model,
22 | env.sim.data._data,
23 | host,
24 | visible_geoms_groups=[1, 2, 3, 4],
25 | no_rendered_objects=["world"],
26 | )
27 |
28 |
29 | def select_file_from_txt(bddl_dataset_name: str, index: int = None) -> str:
30 |
31 | bddl_base_path = os.path.join(libero.__path__[0], "libero", "bddl_files", bddl_dataset_name)
32 | task_info_path = os.path.join(bddl_base_path, "tasks_info.txt")
33 | with open(task_info_path, "r") as file:
34 | bddl_paths = [line.strip() for line in file.readlines()]
35 |
36 | if index < 0 or index >= len(bddl_paths):
37 | raise IndexError("Index is out of file bounds")
38 | return os.path.join(libero.__path__[0], bddl_paths[index])
39 |
40 | if __name__ == "__main__":
41 | # Get controller config
42 | controller_config = load_controller_config(default_controller="JOINT_POSITION")
43 | # Create argument configuration
44 | config = {
45 | "robots": ["Panda"],
46 | "controller_configs": controller_config,
47 | }
48 |
49 | bddl_file = pick_one_bddl_file("libero_spatial")
50 | problem_info = BDDLUtils.get_problem_info(bddl_file)
51 | # Create environment
52 | problem_name = problem_info["problem_name"]
53 | domain_name = problem_info["domain_name"]
54 | language_instruction = problem_info["language_instruction"]
55 | if "TwoArm" in problem_name:
56 | config["env_configuration"] = "single-arm-opposed"
57 | print(language_instruction)
58 | env = TASK_MAPPING[problem_name](
59 | bddl_file_name=bddl_file,
60 | **config,
61 | has_renderer=True,
62 | has_offscreen_renderer=False,
63 | render_camera="agentview",
64 | ignore_done=True,
65 | use_camera_obs=False,
66 | reward_shaping=True,
67 | control_freq=20,
68 | )
69 |
70 | # reset the environment
71 | env.reset()
72 | publisher = RobosuitePublisher(env)
73 | while True:
74 | action = np.random.randn(env.robots[0].dof) # sample random action
75 | obs, reward, done, info = env.step(action) # take action in the environment
76 | env.render() # render on display
77 |
--------------------------------------------------------------------------------
/demos/libero/libero_robot_control.py:
--------------------------------------------------------------------------------
1 | """
2 | Modified from robosuite example scripts.
3 | A script to collect a batch of human demonstrations that can be used
4 | to generate a learning curriculum (see `demo_learning_curriculum.py`).
5 |
6 | The demonstrations can be played back using the `playback_demonstrations_from_pkl.py`
7 | script.
8 |
9 | """
10 |
11 | import argparse
12 | import numpy as np
13 | import os
14 | import json
15 | from scipy.spatial.transform import Rotation as R
16 | import zmq
17 | import threading
18 | from termcolor import colored
19 |
20 | from robosuite import load_controller_config
21 | from robosuite.wrappers import VisualizationWrapper
22 | import libero.libero.envs.bddl_utils as BDDLUtils
23 | from libero.libero.envs import TASK_MAPPING
24 | from libero_example import select_file_from_txt, RobosuitePublisher
25 |
26 | from simpub.xr_device.meta_quest3 import MetaQuest3
27 |
28 |
29 | class MQ3CartController:
30 |
31 | def __init__(self, meta_quest3: MetaQuest3):
32 | self.meta_quest3 = meta_quest3
33 | self.last_state = None
34 |
35 | def get_action(self, obs):
36 | input_data = self.meta_quest3.get_input_data()
37 | action = np.zeros(7)
38 | if input_data is None:
39 | return action
40 | hand = input_data["right"]
41 | if self.last_state is not None and hand["hand_trigger"]:
42 | desired_pos, desired_quat = hand["pos"], hand["rot"]
43 | last_pos, last_quat = self.last_state
44 | action[0:3] = (np.array(desired_pos) - np.array(last_pos)) * 100
45 | d_rot = R.from_quat(desired_quat) * R.from_quat(last_quat).inv()
46 | action[3:6] = d_rot.as_euler("xyz") * 5
47 | if hand["index_trigger"]:
48 | action[-1] = 10
49 | else:
50 | action[-1] = -10
51 | # print(action)
52 | if not hand["hand_trigger"]:
53 | self.last_state = None
54 | else:
55 | self.last_state = (hand["pos"], hand["rot"])
56 | return action
57 |
58 |
59 | class RealRobotJointPDController:
60 |
61 | def __init__(self, real_robot_ip):
62 | self.pgain = np.array(
63 | [1000.0, 1000.0, 1000.0, 1000.0, 200.0, 200.0, 300.0]
64 | )
65 | self.dgain = np.array([50.0, 50.0, 50.0, 50.0, 6.0, 5.0, 6.0])
66 |
67 | self.sub_socket = zmq.Context().socket(zmq.SUB)
68 | self.sub_socket.connect(f"tcp://{real_robot_ip}:5555")
69 | self.sub_socket.setsockopt_string(zmq.SUBSCRIBE, "")
70 | self.data = None
71 | self.sub_task = threading.Thread(target=self.subscribe_task)
72 | self.sub_task.start()
73 |
74 | def subscribe_task(self):
75 | try:
76 | print('Start to sub')
77 | while True:
78 | msg = self.sub_socket.recv_string()
79 | # print(msg)
80 | self.data = json.loads(msg)
81 | except Exception as e:
82 | print(e)
83 |
84 | def get_action(self, obs):
85 | """
86 | Calculates the robot joint acceleration based on
87 | - the current joint velocity
88 | - the current joint positions
89 |
90 | :param robot: instance of the robot
91 | :return: target joint acceleration (num_joints, )
92 | """
93 | joint_pos, joint_vel = obs['robot0_joint_pos'], obs['robot0_joint_vel']
94 | if self.data is None:
95 | return np.zeros(8)
96 | qd_d = self.data['q'] - joint_pos
97 | vd_d = self.data['dq'] - joint_vel
98 | action = np.zeros(8)
99 | action[0:7] = self.pgain * qd_d + self.dgain * vd_d # original
100 | if self.data['gripper_width'][0] < 0.9 * self.data['gripper_width'][1]:
101 | action[-1] = 10
102 | else:
103 | action[-1] = -10
104 | return action
105 |
106 |
107 | if __name__ == "__main__":
108 | # Arguments
109 | parser = argparse.ArgumentParser()
110 | parser.add_argument("--host", type=str, default="127.0.0.1")
111 | parser.add_argument("--device", type=str, default="meta_quest3")
112 | parser.add_argument("--task-id", type=int, default=19)
113 | parser.add_argument("--datasets", type=str, default="libero_90")
114 | parser.add_argument("--vendor-id", type=int, default=1133)
115 | parser.add_argument("--product-id", type=int, default=50726)
116 |
117 | args = parser.parse_args()
118 |
119 | if args.device == "real_robot":
120 | controller_method = "JOINT_POSITION"
121 | else:
122 | controller_method = "OSC_POSE"
123 | controller_config = load_controller_config(
124 | default_controller=controller_method
125 | )
126 |
127 | # Create argument configuration
128 | config = {
129 | "robots": ["Panda"],
130 | "controller_configs": controller_config,
131 | }
132 | bddl_file = select_file_from_txt(args.datasets, args.task_id)
133 | print("Selected task: ", bddl_file)
134 | assert os.path.exists(bddl_file)
135 | problem_info = BDDLUtils.get_problem_info(bddl_file)
136 |
137 | # Create environment
138 | problem_name = problem_info["problem_name"]
139 | domain_name = problem_info["domain_name"]
140 | language_instruction = problem_info["language_instruction"]
141 | text = colored(language_instruction, "red", attrs=["bold"])
142 | print("Goal of the following task: ", text)
143 |
144 | if "TwoArm" in problem_name:
145 | config["env_configuration"] = "single-arm-opposed"
146 | env = TASK_MAPPING[problem_name](
147 | bddl_file_name=bddl_file,
148 | **config,
149 | has_renderer=True,
150 | has_offscreen_renderer=False,
151 | render_camera="agentview",
152 | ignore_done=True,
153 | use_camera_obs=False,
154 | reward_shaping=True,
155 | control_freq=20,
156 | )
157 |
158 | # Wrap this with visualization wrapper
159 | env = VisualizationWrapper(env)
160 | obs = env.reset()
161 | publisher = RobosuitePublisher(env, args.host)
162 | # initialize device
163 | if args.device == "meta_quest3":
164 | virtual_controller = MQ3CartController(
165 | MetaQuest3(device_name="IRLMQ3-1")
166 | )
167 | elif args.device == "real_robot":
168 | virtual_controller = RealRobotJointPDController('141.3.53.152')
169 | else:
170 | raise Exception(
171 | "Invalid device choice: choose either 'keyboard' or 'spacemouse'."
172 | )
173 | while True:
174 | obs, _, _, _ = env.step(virtual_controller.get_action(obs))
175 | env.render()
176 |
--------------------------------------------------------------------------------
/demos/metaworld/metaworld_example.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import random
3 | import numpy as np
4 | from scipy.spatial.transform import Rotation as R
5 | import metaworld
6 | from simpub.sim.mj_publisher import MujocoPublisher
7 | from simpub.xr_device.meta_quest3 import MetaQuest3
8 |
9 |
10 | class MQ3CartController:
11 |
12 | def __init__(self, meta_quest3: MetaQuest3):
13 | self.meta_quest3 = meta_quest3
14 | self.last_state = None
15 |
16 | def get_action(self):
17 | input_data = self.meta_quest3.get_input_data()
18 | action = np.zeros(4)
19 | if input_data is None:
20 | return action
21 | hand = input_data["right"]
22 | if self.last_state is not None and hand["hand_trigger"]:
23 | desired_pos = hand["pos"]
24 | last_pos, last_quat = self.last_state
25 | action[0:3] = (np.array(desired_pos) - np.array(last_pos)) * 100
26 | # d_rot = R.from_quat(desired_quat) * R.from_quat(last_quat).inv()
27 | # action[3:6] = d_rot.as_euler("xyz") * 5
28 | if hand["index_trigger"]:
29 | action[-1] = 10
30 | else:
31 | action[-1] = -10
32 | if not hand["hand_trigger"]:
33 | self.last_state = None
34 | else:
35 | self.last_state = (hand["pos"], hand["rot"])
36 | return action
37 |
38 | def stop(self):
39 | pass
40 |
41 |
42 | def main():
43 | # Arguments
44 | parser = argparse.ArgumentParser()
45 | parser.add_argument("--host", type=str, default="127.0.0.1")
46 | parser.add_argument("--env_name", type=str, default="basketball-v2")
47 | args = parser.parse_args()
48 |
49 | # Initialize MetaWorld Benchmark
50 | ml1 = metaworld.ML1(args.env_name)
51 |
52 | # # Create Environment
53 | if args.env_name not in ml1.train_classes:
54 | raise ValueError(
55 | f"Environment '{args.env_name}' is not available."
56 | f"Available environments: {list(ml1.train_classes.keys())}")
57 |
58 | env = ml1.train_classes[args.env_name]()
59 | task = random.choice(ml1.train_tasks)
60 | env.set_task(task)
61 |
62 | # Initialize MujocoPublisher
63 | MujocoPublisher(
64 | env.model,
65 | env.data,
66 | host=args.host,
67 | visible_geoms_groups=list(range(3))
68 | )
69 | controller = MQ3CartController(MetaQuest3("IRLMQ3-1"))
70 |
71 | # Main Loop
72 | env.reset()
73 | env.max_path_length = 1000000000
74 | while True:
75 | action = controller.get_action()
76 | env.step(action)
77 | continue
78 |
79 |
80 | if __name__ == "__main__":
81 | main()
82 |
--------------------------------------------------------------------------------
/demos/mink/aloha_example.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import Optional, Sequence
3 |
4 | import mujoco
5 | import mujoco.viewer
6 | import numpy as np
7 | from loop_rate_limiters import RateLimiter
8 |
9 | import mink
10 |
11 | from simpub.sim.mj_publisher import MujocoPublisher
12 | from simpub.xr_device.meta_quest3 import MetaQuest3
13 |
14 | from scipy.spatial.transform import Rotation as R
15 |
16 | # Function to apply Z-axis rotation to a quaternion
17 | def apply_z_rotation(quat, z_angle = np.pi / 2):
18 | """
19 | Apply a rotation around the Z-axis to a given quaternion.
20 |
21 | Args:
22 | quat: The original quaternion (x, y, z, w).
23 | z_angle: The rotation angle around the Z-axis in radians.
24 |
25 | Returns:
26 | A new quaternion after applying the Z-axis rotation.
27 | """
28 | # Convert the input quaternion to a rotation object
29 | rotation = R.from_quat(quat)
30 |
31 | # Create a rotation around the Z-axis
32 | z_rotation = R.from_euler('z', z_angle)
33 |
34 | # Combine the rotations
35 | new_rotation = rotation * z_rotation # Order matters: z_rotation is applied first
36 |
37 | # Convert back to quaternion
38 | return new_rotation.as_quat()
39 |
40 |
41 |
42 | _HERE = Path(__file__).parent
43 | _XML = _HERE / "aloha" / "scene.xml"
44 |
45 | # Single arm joint names.
46 | _JOINT_NAMES = [
47 | "waist",
48 | "shoulder",
49 | "elbow",
50 | "forearm_roll",
51 | "wrist_angle",
52 | "wrist_rotate",
53 | ]
54 |
55 | # Single arm velocity limits, taken from:
56 | # https://github.com/Interbotix/interbotix_ros_manipulators/blob/main/interbotix_ros_xsarms/interbotix_xsarm_descriptions/urdf/vx300s.urdf.xacro
57 | _VELOCITY_LIMITS = {k: np.pi for k in _JOINT_NAMES}
58 |
59 |
60 | def compensate_gravity(
61 | model: mujoco.MjModel,
62 | data: mujoco.MjData,
63 | subtree_ids: Sequence[int],
64 | qfrc_applied: Optional[np.ndarray] = None,
65 | ) -> None:
66 | """Compute forces to counteract gravity for the given subtrees.
67 |
68 | Args:
69 | model: Mujoco model.
70 | data: Mujoco data.
71 | subtree_ids: List of subtree ids. A subtree is defined as the kinematic tree
72 | starting at the body and including all its descendants. Gravity
73 | compensation forces will be applied to all bodies in the subtree.
74 | qfrc_applied: Optional array to store the computed forces. If not provided,
75 | the applied forces in `data` are used.
76 | """
77 | qfrc_applied = data.qfrc_applied if qfrc_applied is None else qfrc_applied
78 | qfrc_applied[:] = 0.0 # Don't accumulate from previous calls.
79 | jac = np.empty((3, model.nv))
80 | for subtree_id in subtree_ids:
81 | total_mass = model.body_subtreemass[subtree_id]
82 | mujoco.mj_jacSubtreeCom(model, data, jac, subtree_id)
83 | qfrc_applied[:] -= model.opt.gravity * total_mass @ jac
84 |
85 |
86 | if __name__ == "__main__":
87 | model = mujoco.MjModel.from_xml_path(str(_XML))
88 | data = mujoco.MjData(model)
89 |
90 | publisher = MujocoPublisher(model, data, host="192.168.0.134")
91 | mq3 = MetaQuest3("IRLMQ3-1")
92 |
93 | # Bodies for which to apply gravity compensation.
94 | left_subtree_id = model.body("left/base_link").id
95 | right_subtree_id = model.body("right/base_link").id
96 |
97 | # Get the dof and actuator ids for the joints we wish to control.
98 | joint_names: list[str] = []
99 | velocity_limits: dict[str, float] = {}
100 | for prefix in ["left", "right"]:
101 | for n in _JOINT_NAMES:
102 | name = f"{prefix}/{n}"
103 | joint_names.append(name)
104 | velocity_limits[name] = _VELOCITY_LIMITS[n]
105 | dof_ids = np.array([model.joint(name).id for name in joint_names])
106 | actuator_ids = np.array([model.actuator(name).id for name in joint_names])
107 |
108 | configuration = mink.Configuration(model)
109 |
110 | tasks = [
111 | l_ee_task := mink.FrameTask(
112 | frame_name="left/gripper",
113 | frame_type="site",
114 | position_cost=1.0,
115 | orientation_cost=1.0,
116 | lm_damping=1.0,
117 | ),
118 | r_ee_task := mink.FrameTask(
119 | frame_name="right/gripper",
120 | frame_type="site",
121 | position_cost=1.0,
122 | orientation_cost=1.0,
123 | lm_damping=1.0,
124 | ),
125 | posture_task := mink.PostureTask(model, cost=1e-4),
126 | ]
127 |
128 | # Enable collision avoidance between the following geoms.
129 | l_wrist_geoms = mink.get_subtree_geom_ids(model, model.body("left/wrist_link").id)
130 | r_wrist_geoms = mink.get_subtree_geom_ids(model, model.body("right/wrist_link").id)
131 | l_geoms = mink.get_subtree_geom_ids(model, model.body("left/upper_arm_link").id)
132 | r_geoms = mink.get_subtree_geom_ids(model, model.body("right/upper_arm_link").id)
133 | frame_geoms = mink.get_body_geom_ids(model, model.body("metal_frame").id)
134 | collision_pairs = [
135 | (l_wrist_geoms, r_wrist_geoms),
136 | (l_geoms + r_geoms, frame_geoms + ["table"]),
137 | ]
138 | collision_avoidance_limit = mink.CollisionAvoidanceLimit(
139 | model=model,
140 | geom_pairs=collision_pairs, # type: ignore
141 | minimum_distance_from_collisions=0.05,
142 | collision_detection_distance=0.1,
143 | )
144 |
145 | limits = [
146 | mink.ConfigurationLimit(model=model),
147 | mink.VelocityLimit(model, velocity_limits),
148 | collision_avoidance_limit,
149 | ]
150 |
151 | l_mid = model.body("left/target").mocapid[0]
152 | r_mid = model.body("right/target").mocapid[0]
153 | solver = "quadprog"
154 | pos_threshold = 5e-3
155 | ori_threshold = 5e-3
156 | max_iters = 5
157 |
158 | left_gripper_actuator = model.actuator("left/gripper").id
159 | right_gripper_actuator = model.actuator("right/gripper").id
160 |
161 |
162 |
163 | with mujoco.viewer.launch_passive(
164 | model=model, data=data, show_left_ui=False, show_right_ui=False
165 | ) as viewer:
166 | mujoco.mjv_defaultFreeCamera(model, viewer.cam)
167 |
168 | # Initialize to the home keyframe.
169 | mujoco.mj_resetDataKeyframe(model, data, model.key("neutral_pose").id)
170 | configuration.update(data.qpos)
171 | mujoco.mj_forward(model, data)
172 | posture_task.set_target_from_configuration(configuration)
173 |
174 | # Initialize mocap targets at the end-effector site.
175 | mink.move_mocap_to_frame(model, data, "left/target", "left/gripper", "site")
176 | mink.move_mocap_to_frame(model, data, "right/target", "right/gripper", "site")
177 |
178 | rate = RateLimiter(frequency=200.0, warn=False)
179 | while viewer.is_running():
180 | # Update task targets.
181 | l_ee_task.set_target(mink.SE3.from_mocap_name(model, data, "left/target"))
182 | r_ee_task.set_target(mink.SE3.from_mocap_name(model, data, "right/target"))
183 |
184 | # Update posture task target.
185 | input_data = mq3.get_input_data()
186 | if input_data is not None:
187 | left_hand = input_data["left"]
188 | right_hand = input_data["right"]
189 | if left_hand["hand_trigger"]:
190 | pos = np.array(input_data["left"]["pos"])
191 | pos[0] = pos[0] + 0.1
192 | data.mocap_pos[model.body("left/target").mocapid[0]] = pos
193 | rot = input_data["left"]["rot"]
194 | rot = apply_z_rotation(rot, z_angle = - np.pi / 2)
195 | data.mocap_quat[model.body("left/target").mocapid[0]] = np.array([rot[3], rot[0], rot[1], rot[2]])
196 | if left_hand["index_trigger"]:
197 | data.ctrl[left_gripper_actuator] = 0.002
198 | else:
199 | data.ctrl[left_gripper_actuator] = 0.037
200 | if right_hand["hand_trigger"]:
201 | pos = np.array(input_data["right"]["pos"])
202 | pos[0] = pos[0] - 0.1
203 | data.mocap_pos[model.body("right/target").mocapid[0]] = pos
204 | rot = input_data["right"]["rot"]
205 | rot = apply_z_rotation(rot, z_angle = np.pi / 2)
206 | data.mocap_quat[model.body("right/target").mocapid[0]] = np.array([rot[3], rot[0], rot[1], rot[2]])
207 | if right_hand["index_trigger"]:
208 | data.ctrl[right_gripper_actuator] = 0.002
209 | else:
210 | data.ctrl[right_gripper_actuator] = 0.037
211 |
212 | # Compute velocity and integrate into the next configuration.
213 | for i in range(max_iters):
214 | vel = mink.solve_ik(
215 | configuration,
216 | tasks,
217 | rate.dt,
218 | solver,
219 | limits=limits,
220 | damping=1e-5,
221 | )
222 | configuration.integrate_inplace(vel, rate.dt)
223 |
224 | l_err = l_ee_task.compute_error(configuration)
225 | l_pos_achieved = np.linalg.norm(l_err[:3]) <= pos_threshold
226 | l_ori_achieved = np.linalg.norm(l_err[3:]) <= ori_threshold
227 | r_err = l_ee_task.compute_error(configuration)
228 | r_pos_achieved = np.linalg.norm(r_err[:3]) <= pos_threshold
229 | r_ori_achieved = np.linalg.norm(r_err[3:]) <= ori_threshold
230 | if (
231 | l_pos_achieved
232 | and l_ori_achieved
233 | and r_pos_achieved
234 | and r_ori_achieved
235 | ):
236 | break
237 |
238 | data.ctrl[actuator_ids] = configuration.q[dof_ids]
239 | compensate_gravity(model, data, [left_subtree_id, right_subtree_id])
240 | mujoco.mj_step(model, data)
241 |
242 | # Visualize at fixed FPS.
243 | viewer.sync()
244 | # rate.sleep()
245 |
--------------------------------------------------------------------------------
/demos/mujoco/mujoco_menagerie.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import mujoco
3 | import os
4 | import time
5 |
6 | from simpub.sim.mj_publisher import MujocoPublisher
7 |
8 |
9 | if __name__ == '__main__':
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--path", type=str)
12 | # parser.add_argument("--name", type=str, default="unitree_h1")
13 | # parser.add_argument("--name", type=str, default="unitree_g1")
14 | parser.add_argument("--name", type=str, default="boston_dynamics_spot")
15 | parser.add_argument("--host", type=str, default="127.0.0.1")
16 | args = parser.parse_args()
17 | xml_path = os.path.join(
18 | args.path, "mujoco_menagerie", args.name, "scene_arm.xml"
19 | )
20 | model = mujoco.MjModel.from_xml_path(xml_path)
21 | data = mujoco.MjData(model)
22 | publisher = MujocoPublisher(
23 | model,
24 | data,
25 | args.host,
26 | visible_geoms_groups=list(range(1, 3))
27 | )
28 | mujoco.mj_step(model, data)
29 | while True:
30 | time.sleep(0.1)
31 |
--------------------------------------------------------------------------------
/demos/mujoco/table_tennis_two_players/assets/bats.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
13 |
14 |
--------------------------------------------------------------------------------
/demos/mujoco/table_tennis_two_players/assets/include_free_ball.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/demos/mujoco/table_tennis_two_players/assets/include_table.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/demos/mujoco/table_tennis_two_players/assets/include_target_ball.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/demos/mujoco/table_tennis_two_players/assets/table_tennis_env.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/demos/mujoco/table_tennis_two_players/table_tennis.py:
--------------------------------------------------------------------------------
1 | import mujoco
2 | from mujoco import mj_name2id, mjtObj # type: ignore
3 | import numpy as np
4 | import time
5 | import os
6 | import argparse
7 | from scipy.spatial.transform import Rotation
8 |
9 | from simpub.sim.mj_publisher import MujocoPublisher
10 | from simpub.xr_device.meta_quest3 import MetaQuest3
11 |
12 |
13 | def check_episode_and_rest(mj_model, mj_data):
14 | target_ball_id = mj_name2id(model, mjtObj.mjOBJ_BODY, "target_ball")
15 | object_position = mj_data.xpos[target_ball_id]
16 | if abs(object_position[0]) > 2 or abs(object_position[1]) > 2:
17 | reset_ball(mj_model, mj_data)
18 |
19 |
20 | def reset_ball(mj_model, mj_data):
21 | print("Resetting ball")
22 | target_ball_id = mj_name2id(model, mjtObj.mjOBJ_BODY, "target_ball")
23 | body_jnt_addr = mj_model.body_jntadr[target_ball_id]
24 | qposadr = mj_model.jnt_qposadr[body_jnt_addr]
25 | mj_data.qpos[qposadr:qposadr + 3] = np.array([0, 0, -0.3])
26 | mj_data.qvel[qposadr:qposadr + 3] = np.array([1.5, 0, 0])
27 |
28 |
29 | def update_bat(mj_model, mj_data, player: MetaQuest3, bat_name, hand="right"):
30 | """
31 | Update a bat in MuJoCo using velocity control to track XR controller position and orientation.
32 |
33 | Args:
34 | mj_model: MuJoCo model
35 | mj_data: MuJoCo data
36 | player: MetaQuest3 object with controller data
37 | bat_name: Name of the bat body in the MuJoCo model
38 | hand: Which hand controller to use ("right" or "left")
39 | """
40 | # Control parameters - tune these for responsiveness vs. stability
41 | pos_gain = 100.0 # Position tracking gain
42 | rot_gain = 0.5 # Rotation tracking gain
43 | max_vel = 100.0 # Maximum linear velocity
44 | max_angvel = 40.0 # Maximum angular velocity
45 | # Get bat body ID
46 | bat_id = mj_name2id(mj_model, mjtObj.mjOBJ_BODY, bat_name)
47 | if bat_id < 0:
48 | print(f"Warning: Could not find body named '{bat_name}'")
49 | return
50 | # Find the joint that controls this bat
51 | bat_joint_id = None
52 | for j in range(mj_model.njnt):
53 | if mj_model.jnt_bodyid[j] == bat_id:
54 | bat_joint_id = j
55 | break
56 | if bat_joint_id is None:
57 | print(f"Warning: Could not find joint for '{bat_name}'")
58 | return
59 | bat_dofadr = mj_model.jnt_dofadr[bat_joint_id]
60 | # Get player input
61 | player_input = player.get_input_data()
62 | if player_input is None or hand not in player_input:
63 | mj_data.qvel[bat_dofadr:bat_dofadr+3] = np.array([0, 0, 0])
64 | # Angular velocity - next 3 DOFs
65 | mj_data.qvel[bat_dofadr+3:bat_dofadr+6] = np.array([0, 0, 0])
66 | return
67 | # Get target position and orientation from controller
68 | target_pos = np.array(player_input[hand]["pos"])
69 | quat = player_input[hand]["rot"]
70 | # Convert to MuJoCo quaternion format (w, x, y, z)
71 | target_quat = np.array([quat[3], quat[0], quat[1], quat[2]])
72 | # Get current object position and orientation directly from MuJoCo
73 | current_pos = mj_data.xpos[bat_id].copy()
74 | current_quat = mj_data.xquat[bat_id].copy()
75 | # Calculate position error and desired velocity
76 | pos_error = target_pos - current_pos
77 | desired_vel = pos_gain * pos_error
78 | # Limit maximum velocity
79 | vel_norm = np.linalg.norm(desired_vel)
80 | if vel_norm > max_vel:
81 | desired_vel = desired_vel * (max_vel / vel_norm)
82 | desired_angular_vel = rot_gain * get_rot_vec_error(target_quat, current_quat)
83 | # Limit maximum angular velocity
84 | ang_vel_norm = np.linalg.norm(desired_angular_vel)
85 | if ang_vel_norm > max_angvel:
86 | desired_angular_vel = desired_angular_vel * (max_angvel / ang_vel_norm)
87 | mj_data.qvel[bat_dofadr:bat_dofadr+3] = desired_vel
88 | mj_data.qvel[bat_dofadr+3:bat_dofadr+6] = desired_angular_vel
89 |
90 |
91 | def get_rot_vec_error(target_quat, current_quat):
92 | """
93 | Calculate the rotation vector error between target and current quaternions.
94 |
95 | Args:
96 | target_quat: Target quaternion in [w, x, y, z] format
97 | current_quat: Current quaternion in [w, x, y, z] format
98 | """
99 | # Convert from [w, x, y, z] to SciPy's [x, y, z, w] format
100 | target_scipy_quat = np.array([target_quat[1], target_quat[2], target_quat[3], target_quat[0]])
101 | current_scipy_quat = np.array([current_quat[1], current_quat[2], current_quat[3], current_quat[0]])
102 |
103 | # Create Rotation objects
104 | target_rot = Rotation.from_quat(target_scipy_quat)
105 | current_rot = Rotation.from_quat(current_scipy_quat)
106 | rot_error = current_rot.inv() * target_rot
107 | error = rot_error.as_rotvec(degrees=True)
108 | # error = target_rot.as_rotvec(degrees=True) - current_rot.as_rotvec(degrees=True)
109 | # for i in range(3):
110 | # if error[i] > 180:
111 | # error[i] -= 360
112 | # if error[i] < -180:
113 | # error[i] += 360
114 | return error
115 |
116 |
117 | if __name__ == '__main__':
118 | parser = argparse.ArgumentParser()
119 | parser.add_argument("--host", type=str, default="127.0.0.1")
120 | args = parser.parse_args()
121 | xml_path = os.path.join(
122 | os.path.dirname(os.path.abspath(__file__)),
123 | "assets/table_tennis_env.xml"
124 | )
125 | model = mujoco.MjModel.from_xml_path(xml_path)
126 | data = mujoco.MjData(model)
127 | last_time = time.time()
128 | publisher = MujocoPublisher(model, data, args.host)
129 | player1 = MetaQuest3("IRLMQ3-2")
130 | # player1 = MetaQuest3("UnityNode")
131 | # # player2 = MetaQuest3("ALR2")
132 | # while not player1.connected:
133 | # time.sleep(0.01)
134 | count = 0
135 | reset_ball(model, data)
136 | while True:
137 | try:
138 | mujoco.mj_step(model, data)
139 | interval = time.time() - last_time
140 | if interval < 0.002:
141 | time.sleep(0.002 - interval)
142 | last_time = time.time()
143 | update_bat(model, data, player1, "bat1")
144 | # if count % 10 == 0:
145 | check_episode_and_rest(model, data)
146 | # count += 1
147 | except KeyboardInterrupt:
148 | break
149 | publisher.shutdown()
150 |
--------------------------------------------------------------------------------
/demos/point_cloud/point_cloud_loader.py:
--------------------------------------------------------------------------------
1 | from simpub.core.net_manager import init_node
2 | from simpub.xr_device.xr_device import XRDevice
3 | from simpub.core.net_manager import Publisher
4 | import numpy as np
5 |
6 |
7 | def generate_random_point_cloud(num_points=10000):
8 | xyz = np.random.uniform(-1, 1, (num_points, 3)).astype(np.float32)
9 | rgb = np.random.uniform(0, 1, (num_points, 3)).astype(np.float32)
10 | size = (np.ones((num_points, 1)) * 0.01).astype(np.float32)
11 | cloud = np.hstack((xyz, rgb, size))
12 | return cloud.astype(np.float32).tobytes()
13 |
14 |
15 | if __name__ == "__main__":
16 |
17 | net_manager = init_node("192.168.0.134", "PointCloud")
18 | net_manager.start_node_broadcast()
19 | unity_editor = XRDevice("UnityNode")
20 | publisher = Publisher("PointCloud")
21 | try:
22 | while True:
23 | pointcloud_bytes = generate_random_point_cloud()
24 | publisher.publish_bytes(pointcloud_bytes)
25 | except KeyboardInterrupt:
26 | pass
27 |
28 | # data = simpublisher.generate_point_cloud_data(rgb_image, depth_image)
29 | # simpublisher.join()
--------------------------------------------------------------------------------
/demos/robocasa/robocasa_example.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import numpy as np
4 | import robosuite
5 | from robosuite.controllers import load_composite_controller_config
6 | from robosuite.wrappers import VisualizationWrapper
7 | from termcolor import colored
8 | import robocasa.models.scenes.scene_registry
9 | from termcolor import colored
10 |
11 | from simpub.sim.robocasa_publisher import RobocasaPublisher
12 |
13 | if __name__ == "__main__":
14 | # Arguments
15 | parser = argparse.ArgumentParser()
16 | parser.add_argument("--host", type=str, default="192.168.0.134")
17 | parser.add_argument("--layout", type=int, default=5)
18 | parser.add_argument("--style", type=int, default=5)
19 | parser.add_argument("--robot", type=str, default="PandaOmron")
20 | args = parser.parse_args()
21 |
22 | # Create argument configuration
23 | config = {
24 | "env_name": "PnPCounterToCab",
25 | "robots": "PandaOmron",
26 | "controller_configs": load_composite_controller_config(robot=args.robot),
27 | "translucent_robot": False,
28 | }
29 | print(colored("Initializing environment...", "yellow"))
30 | env = robosuite.make(
31 | **config,
32 | has_renderer=True,
33 | has_offscreen_renderer=False,
34 | render_camera=None,
35 | ignore_done=True,
36 | use_camera_obs=False,
37 | control_freq=20,
38 | renderer="mjviewer",
39 | )
40 |
41 | # Grab reference to controller config and convert it to json-encoded string
42 |
43 | env.layout_and_style_ids = [[args.layout, args.style]]
44 | env.reset()
45 | env.render()
46 |
47 | publisher = RobocasaPublisher(env, args.host)
48 |
49 | while True:
50 | obs, _, _, _ = env.step(np.zeros(env.action_dim))
51 | env.render()
--------------------------------------------------------------------------------
/scripts/change_name.py:
--------------------------------------------------------------------------------
1 | from simpub.core.simpub_server import init_net_manager
2 | from simpub.xr_device.xr_device import XRDevice
3 | from simpub.core.log import logger
4 |
5 | import time
6 | import argparse
7 |
8 | if __name__ == "__main__":
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--host", type=str, default="127.0.0.1")
12 | parser.add_argument("--old", type=str, default="UnityClient")
13 | parser.add_argument("--new", type=str, default="UnityClient")
14 | args = parser.parse_args()
15 | net_manager = init_net_manager(args.host)
16 | net_manager.start()
17 | xr_device = XRDevice(args.old)
18 | while not xr_device.connected:
19 | time.sleep(0.01)
20 | result = xr_device.request("ChangeHostName", args.new)
21 | logger.info(result)
22 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup(
4 | name='simpub',
5 | version='1.3.1',
6 | install_requires=["zmq", "numpy", "scipy", "colorama", "opencv-python", "trimesh"],
7 | include_package_data=True,
8 | packages=['simpub', 'simpub.parser', 'simpub.sim']
9 | )
10 |
--------------------------------------------------------------------------------
/simpub/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/simpub/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .net_manager import init_node
2 |
--------------------------------------------------------------------------------
/simpub/core/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from colorama import init, Fore
3 |
4 | init(autoreset=True)
5 |
6 | # Define a new log level
7 | REMOTE_LOG_LEVEL_NUM = 25
8 | logging.addLevelName(REMOTE_LOG_LEVEL_NUM, "REMOTELOG")
9 |
10 |
11 | class CustomLogger(logging.Logger):
12 | def remote_log(self, message, *args, **kws):
13 | if self.isEnabledFor(REMOTE_LOG_LEVEL_NUM):
14 | self._log(REMOTE_LOG_LEVEL_NUM, message, args, **kws)
15 |
16 |
17 | class CustomFormatter(logging.Formatter):
18 | """Custom log formatter that adds colors"""
19 | FORMAT = "[%(asctime)s] [%(levelname)s] %(message)s"
20 |
21 | FORMATS = {
22 | logging.DEBUG: Fore.YELLOW + FORMAT + Fore.RESET,
23 | logging.INFO: Fore.BLUE + FORMAT + Fore.RESET,
24 | logging.WARNING: Fore.RED + FORMAT + Fore.RESET,
25 | logging.ERROR: Fore.MAGENTA + FORMAT + Fore.RESET,
26 | logging.CRITICAL: Fore.CYAN + FORMAT + Fore.RESET,
27 | # Add custom level format
28 | REMOTE_LOG_LEVEL_NUM: Fore.GREEN + FORMAT + Fore.RESET,
29 | }
30 |
31 | def format(self, record):
32 | log_fmt = self.FORMATS.get(record.levelno, self.FORMAT)
33 | formatter = logging.Formatter(log_fmt)
34 | return formatter.format(record)
35 |
36 |
37 | def get_logger():
38 | """Create and return a custom logger"""
39 | # logger = logging.getLogger("SimPublisher")
40 | logger = CustomLogger("SimPublisher")
41 | logger.setLevel(logging.DEBUG)
42 |
43 | # Create console handler and set level
44 | ch = logging.StreamHandler()
45 | ch.setLevel(logging.DEBUG)
46 |
47 | # Create custom formatter and add to handler
48 | formatter = CustomFormatter()
49 | ch.setFormatter(formatter)
50 |
51 | # Add handler to logger
52 | if not logger.handlers:
53 | logger.addHandler(ch)
54 |
55 | return logger
56 |
57 |
58 | # Get the logger
59 | logger = get_logger()
60 |
--------------------------------------------------------------------------------
/simpub/core/simpub_server.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import abc
3 | from typing import Dict, List, Optional, Set
4 | from asyncio import sleep as asyncio_sleep
5 | import traceback
6 |
7 | from ..simdata import SimScene
8 | from .net_manager import NodeManager, Streamer, StrBytesService, init_node
9 | from .log import logger
10 | from .utils import send_request, HashIdentifier
11 |
12 |
13 | class ServerBase(abc.ABC):
14 |
15 | def __init__(self, ip_addr: str, node_name: str):
16 | self.ip_addr: str = ip_addr
17 | self.net_manager = init_node(ip_addr, node_name)
18 | self.initialize()
19 | self.net_manager.start_node_broadcast()
20 |
21 | def spin(self):
22 | self.net_manager.spin()
23 |
24 | @abc.abstractmethod
25 | def initialize(self):
26 | raise NotImplementedError
27 |
28 | def shutdown(self):
29 | self.net_manager.stop_node()
30 |
31 |
32 | class MsgServer(ServerBase):
33 |
34 | def initialize(self):
35 | pass
36 |
37 |
38 | class SimPublisher(ServerBase):
39 |
40 | def __init__(
41 | self,
42 | sim_scene: SimScene,
43 | ip_addr: str = "127.0.0.1",
44 | no_rendered_objects: Optional[List[str]] = None,
45 | no_tracked_objects: Optional[List[str]] = None,
46 | fps: int = 30,
47 | ) -> None:
48 | self.sim_scene = sim_scene
49 | self.fps = fps
50 | if no_rendered_objects is None:
51 | self.no_rendered_objects = []
52 | else:
53 | self.no_rendered_objects = no_rendered_objects
54 | if no_tracked_objects is None:
55 | self.no_tracked_objects = []
56 | else:
57 | self.no_tracked_objects = no_tracked_objects
58 | super().__init__(ip_addr, "SimPublisher")
59 |
60 | def initialize(self) -> None:
61 | self.scene_update_streamer = Streamer(
62 | topic_name="SceneUpdate",
63 | update_func=self.get_update,
64 | fps=self.fps,
65 | start_streaming=True)
66 | self.asset_service = StrBytesService("Asset", self._on_asset_request)
67 | self.xr_device_set: Set[HashIdentifier] = set()
68 | self.net_manager.submit_task(self.search_xr_device, self.net_manager)
69 |
70 | async def search_xr_device(self, node: NodeManager):
71 | while node.running:
72 | for xr_info in node.nodes_info_manager.nodes_info.values():
73 | if xr_info["nodeID"] in self.xr_device_set:
74 | continue
75 | if "LoadSimScene" not in xr_info["serviceList"]:
76 | continue
77 | try:
78 | self.xr_device_set.add(xr_info["nodeID"])
79 | scene_string = f"LoadSimScene|{self.sim_scene.to_string()}"
80 | ip, port = xr_info["addr"]["ip"], xr_info["servicePort"]
81 | self.net_manager.submit_task(
82 | send_request,
83 | scene_string,
84 | f"tcp://{ip}:{port}",
85 | self.net_manager.zmq_context
86 | )
87 | logger.info(f"The Scene is sent to {xr_info['name']}")
88 | except Exception as e:
89 | logger.error(f"Error when sending scene to xr device: {e}")
90 | traceback.print_exc()
91 | await asyncio_sleep(0.5)
92 |
93 | def _on_asset_request(self, req: str) -> bytes:
94 | return self.sim_scene.raw_data[req]
95 |
96 | @abc.abstractmethod
97 | def get_update(self) -> Dict:
98 | raise NotImplementedError
--------------------------------------------------------------------------------
/simpub/core/utils.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | import zmq.asyncio
3 | import struct
4 | import socket
5 | from typing import List, TypedDict
6 | import enum
7 | from traceback import print_exc
8 |
9 | from .log import logger
10 |
11 | IPAddress = str
12 | Port = int
13 | TopicName = str
14 | ServiceName = str
15 | AsyncSocket = zmq.asyncio.Socket
16 | HashIdentifier = str
17 |
18 | BROADCAST_INTERVAL = 0.5
19 | HEARTBEAT_INTERVAL = 0.2
20 | DISCOVERY_PORT = int(7720)
21 |
22 |
23 | class NodeAddress(TypedDict):
24 | ip: IPAddress
25 | port: Port
26 |
27 |
28 | def create_address(ip: IPAddress, port: Port) -> NodeAddress:
29 | return {"ip": ip, "port": port}
30 |
31 |
32 | class MSG(enum.Enum):
33 | SERVICE_ERROR = b'\x10'
34 | SERVICE_TIMEOUT = b'\x11'
35 |
36 |
37 | class NodeInfo(TypedDict):
38 | name: str
39 | nodeID: str # hash code since bytes is not JSON serializable
40 | addr: NodeAddress
41 | type: str
42 | servicePort: int
43 | topicPort: int
44 | serviceList: List[ServiceName]
45 | topicList: List[TopicName]
46 |
47 |
48 | async def send_request(
49 | msg: str,
50 | addr: str,
51 | context: zmq.asyncio.Context
52 | ) -> str:
53 | req_socket = context.socket(zmq.REQ)
54 | req_socket.connect(addr)
55 | try:
56 | await req_socket.send_string(msg)
57 | except Exception as e:
58 | logger.error(
59 | f"Error when sending message from send_message function in "
60 | f"simpub.core.utils: {e}"
61 | )
62 | result = await req_socket.recv_string()
63 | req_socket.close()
64 | return result
65 |
66 |
67 | # def calculate_broadcast_addr(ip_addr: IPAddress) -> IPAddress:
68 | # ip_bin = struct.unpack("!I", socket.inet_aton(ip_addr))[0]
69 | # netmask_bin = struct.unpack("!I", socket.inet_aton("255.255.255.0"))[0]
70 | # broadcast_bin = ip_bin | ~netmask_bin & 0xFFFFFFFF
71 | # return socket.inet_ntoa(struct.pack("!I", broadcast_bin))
72 |
73 |
74 | def create_udp_socket() -> socket.socket:
75 | return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
76 |
77 |
78 | def get_zmq_socket_port(socket: zmq.asyncio.Socket) -> int:
79 | endpoint: bytes = socket.getsockopt(zmq.LAST_ENDPOINT) # type: ignore
80 | return int(endpoint.decode().split(":")[-1])
81 |
82 |
83 | def split_byte(bytes_msg: bytes) -> List[bytes]:
84 | return bytes_msg.split(b"|", 1)
85 |
86 |
87 | def split_byte_to_str(bytes_msg: bytes) -> List[str]:
88 | return [item.decode() for item in split_byte(bytes_msg)]
89 |
90 |
91 | def split_str(str_msg: str) -> List[str]:
92 | return str_msg.split("|", 1)
93 |
94 |
95 | # def search_for_master_node(
96 | # local_ip: Optional[IPAddress] = None,
97 | # search_time: int = 5,
98 | # time_out: float = 0.1
99 | # ) -> Optional[Tuple[IPAddress, str]]:
100 | # if local_ip is not None:
101 | # broadcast_ip = calculate_broadcast_addr(local_ip)
102 | # else:
103 | # broadcast_ip = "255.255.255.255"
104 | # with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as _socket:
105 | # # wait for response
106 | # _socket.bind(("0.0.0.0", 0))
107 | # _socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
108 | # for _ in range(search_time):
109 | # _socket.sendto(
110 | # EchoHeader.PING.value, (broadcast_ip, DISCOVERY_PORT)
111 | # )
112 | # _socket.settimeout(time_out)
113 | # try:
114 | # data, addr = _socket.recvfrom(1024)
115 | # logger.info(f"Find a master node at {addr[0]}:{addr[1]}")
116 | # return addr[0], data.decode()
117 | # except socket.timeout:
118 | # continue
119 | # except KeyboardInterrupt:
120 | # break
121 | # except Exception as e:
122 | # logger.error(f"Error when searching for master node: {e}")
123 | # print_exc()
124 | # logger.info("No master node found, start as master node")
125 | # return None
126 |
--------------------------------------------------------------------------------
/simpub/parser/gs.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple, List, Union, Dict
2 | import genesis as gs
3 | from genesis.engine.entities import RigidEntity
4 | from genesis.engine.entities.rigid_entity import RigidLink, RigidGeom
5 | import numpy as np
6 |
7 |
8 | from ..simdata import SimScene, SimObject, SimTransform, SimVisual, VisualType, SimMaterial, SimMesh
9 |
10 |
11 | def gs2unity_pos(pos: List[float]) -> List[float]:
12 | return [-pos[1], pos[2], pos[0]]
13 |
14 |
15 | def gs2unity_quat(quat: List[float]) -> List[float]:
16 | return [quat[2], -quat[3], -quat[1], quat[0]]
17 |
18 |
19 | class GenesisSceneParser:
20 | def __init__(self, gs_scene: gs.Scene):
21 | self.gs_scene = gs_scene
22 | self.sim_scene, self.update_dict = self.parse_gs_scene(gs_scene, ["plane_baselink"])
23 |
24 | def parse_gs_scene(self, gs_scene: gs.Scene, no_rendered_objects) -> Tuple[SimScene, Dict[str, Union[RigidEntity, RigidLink]]]:
25 | sim_scene = SimScene()
26 | update_dict: Dict[str, Union[RigidEntity, RigidLink]] = {}
27 | body_hierarchy = {}
28 | sim_scene.root = SimObject(name="genesis_scene")
29 | for gs_entity in gs_scene.entities:
30 | sim_object, idx = self.process_rigid_entity(gs_entity, sim_scene)
31 | body_hierarchy[idx] = {
32 | "parent_id": -1,
33 | "sim_object": sim_object,
34 | }
35 | update_dict[sim_object.name] = gs_entity
36 | for link in gs_entity.links:
37 | sim_object, parent_id, idx = self.process_link(
38 | link, sim_scene
39 | )
40 | # update the body hierarchy dictionary
41 | body_hierarchy[idx] = {
42 | "parent_id": parent_id,
43 | "sim_object": sim_object,
44 | }
45 | update_dict[sim_object.name] = link
46 | for body_info in body_hierarchy.values():
47 | parent_id = body_info["parent_id"]
48 | if parent_id == -1:
49 | sim_scene.root.children.append(body_info["sim_object"])
50 | if parent_id in body_hierarchy:
51 | parent_info = body_hierarchy[parent_id]
52 | parent_object: SimObject = parent_info["sim_object"]
53 | parent_object.children.append(body_info["sim_object"])
54 | return sim_scene, update_dict
55 |
56 | def process_rigid_entity(
57 | self, gs_rigid_entity: RigidEntity, sim_scene: SimScene
58 | ) -> Tuple[SimObject, int]:
59 | sim_object = SimObject(name=str(gs_rigid_entity.uid))
60 | pos: List[float] = [0.0, 0.0, 0.0]
61 | rot: List[float] = [0.0, 0.0, 0.0, 1.0]
62 | if self.gs_scene.is_built:
63 | pos = gs_rigid_entity.get_pos()
64 | rot = gs_rigid_entity.get_quat()
65 | assert type(pos) is List[float]
66 | assert type(rot) is List[float]
67 | sim_object.trans = SimTransform(
68 | gs2unity_pos(pos),
69 | gs2unity_quat(rot),
70 | )
71 | return sim_object, gs_rigid_entity.base_link_idx
72 |
73 | def process_link(
74 | self, gs_link: RigidLink, sim_scene: SimScene
75 | ) -> Tuple[SimObject, int, int]:
76 | sim_object = SimObject(name=gs_link.name + f"{np.random.rand()}")
77 | sim_object.trans = SimTransform(
78 | gs2unity_pos(gs_link.pos),
79 | gs2unity_quat(gs_link.quat),
80 | )
81 | if gs_link.name in ["plane_baselink"]:
82 | return sim_object, gs_link.parent_idx, gs_link.idx
83 | for gs_geom in gs_link.vgeoms:
84 | sim_visual = self.process_vgeom(gs_geom, sim_scene)
85 | sim_object.visuals.append(sim_visual)
86 | return sim_object, gs_link.parent_idx, gs_link.idx
87 |
88 | def process_vgeom(
89 | self, gs_vgeom: RigidGeom, sim_scene: SimScene
90 | ) -> SimVisual:
91 | sim_visual = SimVisual(
92 | str(gs_vgeom.uid),
93 | type=VisualType.MESH,
94 | trans=SimTransform(
95 | gs2unity_pos(gs_vgeom.init_pos),
96 | gs2unity_quat(gs_vgeom.init_quat),
97 | ),
98 | )
99 | sim_visual.mesh = self.process_mesh(gs_vgeom, sim_scene)
100 | # sim_visual.material = SimMaterial(
101 | # color=[1.0, 1.0, 1.0, 1.0],
102 | # )
103 | sim_visual.material = self.parse_material(gs_vgeom)
104 | return sim_visual
105 |
106 | def process_mesh(
107 | self, gs_vgeom: RigidGeom, sim_scene: SimScene
108 | ) -> SimMesh:
109 | gs_trimesh_obj = gs_vgeom.get_trimesh()
110 | return SimMesh.create_mesh(
111 | sim_scene,
112 | gs_trimesh_obj.vertices,
113 | gs_trimesh_obj.faces,
114 | )
115 |
116 | # TODO: Implement the material and texture from trimesh
117 | def parse_material(self, gs_vgeom: RigidGeom):
118 | gs_trimesh_obj = gs_vgeom.get_trimesh()
119 | if isinstance(gs_trimesh_obj.visual, gs.ext.trimesh.visual.color.ColorVisuals):
120 | return SimMaterial(color=list(gs_trimesh_obj.visual.face_colors[0] / 255.0))
121 | else:
122 | return SimMaterial(color=[0.0, 0.0, 0.0, 1.0])
123 |
124 | # def parse_mesh(self, gs_mesh: gs.Mesh):
125 | # pass
126 |
127 |
--------------------------------------------------------------------------------
/simpub/parser/mesh_utils.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 |
3 | import numpy as np
4 | import numpy.typing as npt
5 |
6 |
7 | @dataclass
8 | class Mesh:
9 | vertex_buf: npt.NDArray = np.array([])
10 | # normal_buf: npt.NDArray = np.array([])
11 | index_buf: npt.NDArray = np.array([])
12 | uv_buf: npt.NDArray | None = None
13 |
14 | def __post_init__(self):
15 | if self.uv_buf is not None and len(self.uv_buf) == 0:
16 | self.uv_buf = None
17 |
18 | assert type(self.vertex_buf) in {np.ndarray, list}
19 | # assert type(self.normal_buf) in {np.ndarray, list}
20 | assert type(self.index_buf) in {np.ndarray, list}
21 | assert self.uv_buf is None or type(self.uv_buf) in {np.ndarray, list}
22 |
23 | self.vertex_buf = np.array(self.vertex_buf)
24 | # self.normal_buf = np.array(self.normal_buf)
25 | self.index_buf = np.array(self.index_buf)
26 | if self.uv_buf is not None:
27 | self.uv_buf = np.array(self.uv_buf)
28 |
29 | assert self.vertex_buf.shape[1] == 3
30 | # assert self.normal_buf.shape[1] == 3
31 | assert self.index_buf.shape[1] in {3, 4}
32 | if self.uv_buf is not None:
33 | assert self.uv_buf.shape[1] == 2
34 |
35 | # assert self.vertex_buf.shape[0] <= self.normal_buf.shape[0]
36 |
37 | # we must have one normal for each index (a vertex on a face)
38 | # assert self.normal_buf.shape[0] == self.index_buf.shape[0] * self.index_buf.shape[1]
39 |
40 | # the same for uv
41 | # if self.uv_buf is not None:
42 | # assert self.normal_buf.shape[0] == self.uv_buf.shape[0]
43 |
44 |
45 | def split_mesh_faces(input_mesh: Mesh) -> Mesh:
46 | # TODO: FIX!
47 | # Don't use untill it is fixed. Right now it creates vertices that are not supposed to exist
48 |
49 | # # no need to process if each vertex has only one normal
50 | # if input_mesh.vertex_buf.shape[0] == input_mesh.normal_buf.shape[0]:
51 | # return input_mesh
52 |
53 | vertex_buf = []
54 | index_buf = []
55 |
56 | for tri in input_mesh.index_buf:
57 | if len(tri) == 3:
58 | index_buf.append(
59 | [
60 | len(vertex_buf) + 0,
61 | len(vertex_buf) + 1,
62 | len(vertex_buf) + 2,
63 | ]
64 | )
65 | else:
66 | assert len(tri) == 4
67 | index_buf.append(
68 | [
69 | len(vertex_buf) + 0,
70 | len(vertex_buf) + 1,
71 | len(vertex_buf) + 2,
72 | len(vertex_buf) + 3,
73 | ]
74 | )
75 |
76 | vertex_buf.append(input_mesh.vertex_buf[tri[0]])
77 | vertex_buf.append(input_mesh.vertex_buf[tri[1]])
78 | vertex_buf.append(input_mesh.vertex_buf[tri[2]])
79 | if len(tri) == 4:
80 | vertex_buf.append(input_mesh.vertex_buf[tri[3]])
81 |
82 | # uv_buf.append(input_mesh.uv_buf[tri[0]])
83 | # uv_buf.append(input_mesh.uv_buf[tri[1]])
84 | # uv_buf.append(input_mesh.uv_buf[tri[2]])
85 | # if len(tri) == 4:
86 | # uv_buf.append(input_mesh.uv_buf[tri[3]])
87 |
88 | # assert len(vertex_buf) == input_mesh.normal_buf.shape[0]
89 | assert len(vertex_buf) == len(index_buf) * len(index_buf[0])
90 |
91 | if input_mesh.uv_buf is not None:
92 | assert len(vertex_buf) == input_mesh.uv_buf.shape[0]
93 |
94 | return Mesh(
95 | vertex_buf=vertex_buf,
96 | # normal_buf=input_mesh.normal_buf,
97 | index_buf=index_buf,
98 | uv_buf=input_mesh.uv_buf,
99 | )
100 |
--------------------------------------------------------------------------------
/simpub/parser/mj.py:
--------------------------------------------------------------------------------
1 | import mujoco
2 | import numpy as np
3 | from typing import List, Dict, Callable
4 |
5 | from ..simdata import SimObject, SimScene, SimTransform, SimVisual
6 | from ..simdata import SimMaterial, SimTexture, SimMesh
7 | from ..simdata import VisualType
8 | from ..core.log import logger
9 |
10 |
11 | def plane2unity_scale(scale: List[float]) -> List[float]:
12 | return list(map(abs, [scale[0] * 2, 0.001, scale[1] * 2]))
13 |
14 |
15 | def box2unity_scale(scale: List[float]) -> List[float]:
16 | return [abs(scale[i]) * 2 for i in [1, 2, 0]]
17 |
18 |
19 | def sphere2unity_scale(scale: List[float]) -> List[float]:
20 | return [abs(scale[0]) * 2] * 3
21 |
22 |
23 | def cylinder2unity_scale(scale: List[float]) -> List[float]:
24 | if len(scale) == 3:
25 | return list(map(abs, [scale[0] * 2, scale[1], scale[0] * 2]))
26 | if len(scale) == 2:
27 | return list(map(abs, [scale[0] * 2, scale[1], scale[0] * 2]))
28 | elif len(scale) == 1:
29 | return list(map(abs, [scale[0] * 2, scale[0] * 2, scale[0] * 2]))
30 | raise ValueError("Only support scale with one, two or three components.")
31 |
32 |
33 | def capsule2unity_scale(scale: List[float]) -> List[float]:
34 | if len(scale) == 2:
35 | return list(map(abs, [scale[0], scale[1], scale[0]]))
36 | elif len(scale) == 1:
37 | return list(map(abs, [scale[0] * 2, scale[0] * 2, scale[0] * 2]))
38 | elif len(scale) == 3:
39 | return list(map(abs, [scale[0], scale[1], scale[0]]))
40 | raise ValueError("Only support scale with one, two or three components.")
41 |
42 |
43 | ScaleMap: Dict[str, Callable] = {
44 | VisualType.PLANE: lambda x: plane2unity_scale(x),
45 | VisualType.CUBE: lambda x: box2unity_scale(x),
46 | VisualType.SPHERE: lambda x: sphere2unity_scale(x),
47 | VisualType.CYLINDER: lambda x: cylinder2unity_scale(x),
48 | VisualType.CAPSULE: lambda x: capsule2unity_scale(x),
49 | VisualType.NONE: lambda x: capsule2unity_scale(x),
50 | }
51 |
52 |
53 | def scale2unity(scale: List[float], visual_type: str) -> List[float]:
54 | if visual_type in ScaleMap:
55 | return ScaleMap[visual_type](scale)
56 | else:
57 | return [1, 1, 1]
58 |
59 |
60 | MJModelGeomTypeMap = {
61 | mujoco.mjtGeom.mjGEOM_SPHERE: VisualType.SPHERE,
62 | mujoco.mjtGeom.mjGEOM_CAPSULE: VisualType.CAPSULE,
63 | mujoco.mjtGeom.mjGEOM_ELLIPSOID: VisualType.CAPSULE,
64 | mujoco.mjtGeom.mjGEOM_CYLINDER: VisualType.CYLINDER,
65 | mujoco.mjtGeom.mjGEOM_BOX: VisualType.CUBE,
66 | mujoco.mjtGeom.mjGEOM_MESH: VisualType.MESH,
67 | mujoco.mjtGeom.mjGEOM_PLANE: VisualType.PLANE,
68 | }
69 |
70 |
71 | def mj2unity_pos(pos: List[float]) -> List[float]:
72 | return [-pos[1], pos[2], pos[0]]
73 |
74 |
75 | def mj2unity_quat(quat: List[float]) -> List[float]:
76 | return [quat[2], -quat[3], -quat[1], quat[0]]
77 |
78 |
79 | class MjModelParser:
80 | def __init__(self, mj_model, visible_geoms_groups, no_rendered_objects=None):
81 | if no_rendered_objects is None:
82 | self.no_rendered_objects = []
83 | else:
84 | self.no_rendered_objects = no_rendered_objects
85 | self.visible_geoms_groups = visible_geoms_groups
86 | self.parse_model(mj_model)
87 | self.sim_scene.process_sim_obj(self.sim_scene.root)
88 |
89 | def parse(self):
90 | return self.sim_scene
91 |
92 | def parse_model(self, mj_model):
93 | sim_scene = SimScene()
94 | self.sim_scene = sim_scene
95 | # create a dictionary to store the body hierarchy
96 | body_hierarchy = {}
97 | for body_id in range(mj_model.nbody):
98 | sim_object, parent_id = self.process_body(
99 | mj_model, body_id
100 | )
101 | if sim_object is None:
102 | continue
103 | # update the body hierarchy dictionary
104 | body_hierarchy[body_id] = {
105 | "parent_id": parent_id,
106 | "sim_object": sim_object,
107 | }
108 | # create a tree structure from the body hierarchy
109 | for body_id, body_info in body_hierarchy.items():
110 | parent_id = body_info["parent_id"]
111 | if parent_id == -1:
112 | continue
113 | if parent_id in body_hierarchy:
114 | parent_info = body_hierarchy[parent_id]
115 | parent_object: SimObject = parent_info["sim_object"]
116 | parent_object.children.append(body_info["sim_object"])
117 | return sim_scene
118 |
119 | def process_body(self, mj_model, body_id: int):
120 | body_name = mujoco.mj_id2name(
121 | mj_model, mujoco.mjtObj.mjOBJ_BODY, body_id
122 | )
123 | # if body_name in self.no_rendered_objects:
124 | # return None, -1
125 | parent_id = mj_model.body_parentid[body_id]
126 | sim_object = SimObject(name=body_name)
127 | if parent_id == body_id:
128 | self.sim_scene.root = sim_object
129 | parent_id = -1
130 | # update the transform information
131 | trans = sim_object.trans
132 | trans.pos = mj2unity_pos(mj_model.body_pos[body_id].tolist())
133 | trans.rot = mj2unity_quat(mj_model.body_quat[body_id].tolist())
134 | # if the body does not have any geom, return the object
135 | if mj_model.body_geomadr[body_id] == -1 or body_name in self.no_rendered_objects:
136 | return sim_object, parent_id
137 | # process the geoms attached to the body
138 | num_geoms = mj_model.body_geomnum[body_id]
139 | for geom_id in range(
140 | mj_model.body_geomadr[body_id],
141 | mj_model.body_geomadr[body_id] + num_geoms
142 | ):
143 | geom_group = int(mj_model.geom_group[geom_id])
144 | # check if the geom participates in rendering
145 | if geom_group not in self.visible_geoms_groups:
146 | continue
147 |
148 | sim_visual = self.process_geoms(mj_model, geom_id)
149 | sim_object.visuals.append(sim_visual)
150 | return sim_object, parent_id
151 |
152 | def process_geoms(self, mj_model, geom_id: int):
153 | geom_name = mujoco.mj_id2name(
154 | mj_model, mujoco.mjtObj.mjOBJ_GEOM, geom_id
155 | )
156 | geom_type = mj_model.geom_type[geom_id]
157 | geom_pos = mj2unity_pos(mj_model.geom_pos[geom_id].tolist())
158 | geom_quat = mj2unity_quat(mj_model.geom_quat[geom_id].tolist())
159 | visual_type = MJModelGeomTypeMap[geom_type]
160 | geom_scale = scale2unity(
161 | mj_model.geom_size[geom_id].tolist(), visual_type
162 | )
163 | trans = SimTransform(
164 | pos=geom_pos, rot=geom_quat, scale=geom_scale
165 | )
166 | geom_color = mj_model.geom_rgba[geom_id].tolist()
167 | sim_visual = SimVisual(
168 | name=geom_name,
169 | type=visual_type,
170 | trans=trans,
171 | )
172 | # attach mesh id if geom type is mesh
173 | if geom_type == mujoco.mjtGeom.mjGEOM_MESH:
174 | mesh_id = mj_model.geom_dataid[geom_id]
175 | sim_visual.mesh = self.process_mesh(mj_model, mesh_id)
176 | # attach material id if geom has an associated material
177 | mat_id = mj_model.geom_matid[geom_id]
178 | if mat_id != -1:
179 | sim_visual.material = self.process_material(
180 | mj_model, mat_id
181 | )
182 | else:
183 | sim_visual.material = SimMaterial(geom_color)
184 | return sim_visual
185 |
186 | def process_mesh(self, mj_model, mesh_id: int):
187 | # vertices
188 | start_vert = mj_model.mesh_vertadr[mesh_id]
189 | num_verts = mj_model.mesh_vertnum[mesh_id]
190 | vertices = mj_model.mesh_vert[start_vert:start_vert + num_verts]
191 | vertices = vertices.astype(np.float32)
192 | # faces
193 | start_face = mj_model.mesh_faceadr[mesh_id]
194 | num_faces = mj_model.mesh_facenum[mesh_id]
195 | faces = mj_model.mesh_face[start_face:start_face + num_faces]
196 | faces = faces.astype(np.int32)
197 | # normals
198 | if hasattr(mj_model, "mesh_normaladr"):
199 | start_norm = mj_model.mesh_normaladr[mesh_id]
200 | num_norm = mj_model.mesh_normalnum[mesh_id]
201 | else:
202 | start_norm = start_vert
203 | num_norm = num_verts
204 | norms = None
205 | if num_norm == num_verts:
206 | norms = mj_model.mesh_normal[start_norm:start_norm + num_norm]
207 | norms = norms.astype(np.float32)
208 | # uv coordinates
209 | start_uv = mj_model.mesh_texcoordadr[mesh_id]
210 | mesh_texcoord = None
211 | faces_uv = None
212 | if start_uv != -1:
213 | num_texcoord = mj_model.mesh_texcoordnum[mesh_id]
214 | mesh_texcoord = mj_model.mesh_texcoord[
215 | start_uv:start_uv + num_texcoord
216 | ]
217 | faces_uv = mj_model.mesh_facetexcoord[
218 | start_face:start_face + num_faces
219 | ]
220 | mesh = SimMesh.create_mesh(
221 | scene=self.sim_scene,
222 | vertices=vertices,
223 | faces=faces,
224 | vertex_normals=norms,
225 | mesh_texcoord=mesh_texcoord,
226 | faces_uv=faces_uv,
227 | )
228 | return mesh
229 |
230 | def process_material(self, mj_model, mat_id: int):
231 | # build material information
232 | mat_color = mj_model.mat_rgba[mat_id]
233 | mat_emissionColor = mj_model.mat_emission[mat_id] * mat_color
234 | mat_color = mat_color.tolist()
235 | mat_emissionColor = mat_emissionColor.tolist()
236 | mat_specular = float(mj_model.mat_specular[mat_id])
237 | mat_shininess = float(mj_model.mat_shininess[mat_id])
238 | mat_reflectance = float(mj_model.mat_reflectance[mat_id])
239 | tex_id = mj_model.mat_texid[mat_id]
240 | mat_texture = None
241 | # support the 2.x version of mujoco
242 | if isinstance(tex_id, np.integer):
243 | tex_id = int(tex_id)
244 | # only for mjTEXROLE_RGB which support 3.x version of mujoco
245 | # The second element of the texture array is base color (albedo)
246 | # TODO: Check after which mujoco version the tex id array is supported
247 | elif isinstance(tex_id, np.ndarray):
248 | tex_id = int(tex_id[1])
249 | else:
250 | logger.warning(
251 | f"Texture id is of type {type(tex_id)},"
252 | "which is not supported."
253 | )
254 | if tex_id != -1:
255 | mat_texture = self.process_texture(mj_model, tex_id)
256 | mat_texture.textureScale = mj_model.mat_texrepeat[mat_id].tolist()
257 | material = SimMaterial(
258 | color=mat_color,
259 | emissionColor=mat_emissionColor,
260 | specular=mat_specular,
261 | shininess=mat_shininess,
262 | reflectance=mat_reflectance,
263 | texture=mat_texture,
264 | )
265 | return material
266 |
267 | def process_texture(self, mj_model, tex_id: int):
268 | # build texture information
269 | # TODO: Support for more texture types
270 | # get the texture data
271 | tex_height = mj_model.tex_height[tex_id].item()
272 | tex_width = mj_model.tex_width[tex_id].item()
273 | # only we only supported texture channel number is 3
274 | if hasattr(mj_model, "tex_nchannel"):
275 | tex_nchannel = mj_model.tex_nchannel[tex_id]
276 | else:
277 | tex_nchannel = 3
278 | assert tex_nchannel == 3, "Only support texture with 3 channels."
279 | start_tex = mj_model.tex_adr[tex_id]
280 | num_tex_data = tex_height * tex_width * tex_nchannel
281 | if hasattr(mj_model, "tex_data"):
282 | tex_data: np.ndarray = mj_model.tex_data[
283 | start_tex:start_tex + num_tex_data
284 | ]
285 | else:
286 | tex_data: np.ndarray = mj_model.tex_rgb[
287 | start_tex:start_tex + num_tex_data
288 | ]
289 | return SimTexture.create_texture(
290 | tex_data, tex_height, tex_width, self.sim_scene
291 | )
292 |
--------------------------------------------------------------------------------
/simpub/sim/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/simpub/sim/__init__.py
--------------------------------------------------------------------------------
/simpub/sim/coppelia_sim_publisher.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from ..parser.coppelia_sim import CoppeliasSimParser
4 | from ..simdata import SimObject, SimScene, SimTransform, SimVisual
5 | from ..simdata import SimMaterial, SimTexture, SimMesh
6 | from ..simdata import VisualType
7 | from ..core.log import logger
8 |
9 |
10 | from mujoco import mj_name2id, mjtObj
11 | from typing import List, Dict, Tuple, Optional
12 | import numpy as np
13 |
14 | from ..core.simpub_server import SimPublisher
15 | from ..parser.mj import MjModelParser
16 | from ..simdata import SimObject
17 |
18 |
19 | class CoppeliaSimPublisher(SimPublisher):
20 |
21 | def __init__(
22 | self, sim,
23 | host: str = "127.0.0.1",
24 | no_rendered_objects: Optional[List[str]] = None,
25 | no_tracked_objects: Optional[List[str]] = None,
26 | visual_layer_list: Optional[List[int]] = None,
27 | ) -> None:
28 | # self.mj_model = mj_model
29 | # self.mj_data = mj_data
30 | self.sim = sim
31 | self.parser = CoppeliasSimParser(sim, visual_layer_list)
32 | sim_scene = self.parser.parse()
33 |
34 | self.tracked_obj_trans: Dict[str, Tuple[np.ndarray, np.ndarray]] = {}
35 | super().__init__(
36 | sim_scene,
37 | host,
38 | no_rendered_objects,
39 | no_tracked_objects,
40 | )
41 | # self.set_update_objects(self.sim_scene.root)
42 |
43 | def set_update_objects(self, obj: Optional[SimObject]):
44 | if obj is None:
45 | return
46 | if obj.name in self.no_tracked_objects:
47 | return
48 | body_id = str(obj.name)
49 | pos = self.sim.getObjectPosition(body_id, self.sim.handle_world)
50 | rot = self.sim.getObjectQuaternion(body_id, self.sim.handle_world)
51 |
52 | trans: Tuple[np.ndarray, np.ndarray] = (pos, rot)
53 | self.tracked_obj_trans[obj.name] = trans
54 | for child in obj.children:
55 | self.set_update_objects(child)
56 |
57 | def get_update(self) -> Dict[str, List[float]]:
58 | state = {}
59 | for name, trans in self.tracked_obj_trans.items():
60 | pos, rot = trans
61 | state[name] = [
62 | -pos[1], pos[2], pos[0], rot[2], -rot[3], -rot[1], rot[0]
63 | ]
64 | return state
65 |
--------------------------------------------------------------------------------
/simpub/sim/fancy_gym.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from gym.envs.mujoco.mujoco_env import MujocoEnv
3 |
4 | from .mj_publisher import MujocoPublisher
5 |
6 |
7 | class FancyGymPublisher(MujocoPublisher):
8 |
9 | def __init__(
10 | self,
11 | mj_env: MujocoEnv,
12 | host: str = "127.0.0.1",
13 | no_rendered_objects: List[str] = None,
14 | no_tracked_objects: List[str] = None,
15 | ) -> None:
16 |
17 | super().__init__(
18 | mj_env.model,
19 | mj_env.data,
20 | host,
21 | no_rendered_objects,
22 | no_tracked_objects,
23 | )
24 |
--------------------------------------------------------------------------------
/simpub/sim/genesis_publisher.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Dict, Tuple, Set
2 | import numpy as np
3 | import taichi as ti
4 |
5 | from genesis.engine.scene import Scene as GSScene
6 | from genesis.engine.entities import RigidEntity
7 | from genesis.engine.entities.rigid_entity import RigidLink, RigidGeom
8 |
9 | from ..parser.gs import GenesisSceneParser, gs2unity_pos, gs2unity_quat
10 | from ..core.simpub_server import SimPublisher
11 | from ..core.net_manager import NodeManager, Streamer, StrBytesService
12 | from ..core.utils import send_request, HashIdentifier
13 |
14 | @ti.data_oriented
15 | class GenesisPublisher(SimPublisher):
16 |
17 | def __init__(
18 | self,
19 | gs_scene: GSScene,
20 | host: str = "127.0.0.1",
21 | no_rendered_objects: Optional[List[str]] = None,
22 | no_tracked_objects: Optional[List[str]] = None,
23 | ) -> None:
24 | self.parser = GenesisSceneParser(gs_scene)
25 | sim_scene = self.parser.sim_scene
26 | self.update_dict = self.parser.update_dict
27 | self.tracked_obj_trans: Dict[str, Tuple[np.ndarray, np.ndarray]] = {}
28 | super().__init__(
29 | sim_scene,
30 | host,
31 | no_rendered_objects,
32 | no_tracked_objects,
33 | fps=60,
34 | )
35 |
36 | def initialize(self) -> None:
37 | self.scene_update_streamer = Streamer(
38 | # topic_name="RelatedUpdate",
39 | topic_name="SceneUpdate",
40 | update_func=self.get_update,
41 | fps=self.fps,
42 | start_streaming=True)
43 | self.asset_service = StrBytesService("Asset", self._on_asset_request)
44 | self.xr_device_set: Set[HashIdentifier] = set()
45 | self.net_manager.submit_task(self.search_xr_device, self.net_manager)
46 |
47 | def get_update(self) -> Dict[str, List[float]]:
48 | # TODO: Fix the problem with taiqi kernel issue
49 | # return {}
50 | if not self.parser.gs_scene.is_built:
51 | return {}
52 | state = {}
53 | try:
54 | for name, item in self.update_dict.items():
55 | if name in self.no_tracked_objects:
56 | continue
57 | if isinstance(item, RigidEntity):
58 | pos = gs2unity_pos(item.get_pos().tolist())
59 | quat = gs2unity_quat(item.get_quat().tolist())
60 | elif isinstance(item, RigidLink):
61 | pos = gs2unity_pos(item.get_pos().tolist())
62 | quat = gs2unity_quat(item.get_quat().tolist())
63 | else:
64 | continue
65 | state[name] = [
66 | pos[0], pos[1], pos[2], quat[0], quat[1], quat[2], quat[3]
67 | ]
68 | except Exception:
69 | return {}
70 | return state
71 |
--------------------------------------------------------------------------------
/simpub/sim/isaacsim_publisher.py:
--------------------------------------------------------------------------------
1 | import io
2 |
3 | import numpy as np
4 | from pxr import Usd
5 | from usdrt import Rt
6 | from usdrt import UsdGeom as RtGeom
7 |
8 | from ..core.net_component import ByteStreamer
9 | from ..core.simpub_server import SimPublisher
10 | from ..parser.isaacsim import IsaacSimStageParser
11 |
12 |
13 | class IsaacSimPublisher(SimPublisher):
14 | def __init__(
15 | self,
16 | host: str,
17 | stage: Usd.Stage,
18 | ignored_prim_paths: list[str] = [],
19 | texture_cache_dir: str = None,
20 | ) -> None:
21 | self.parser = IsaacSimStageParser(
22 | stage=stage,
23 | ignored_prim_paths=ignored_prim_paths,
24 | texture_cache_dir=texture_cache_dir,
25 | )
26 | self.sim_scene = self.parser.parse_scene()
27 |
28 | # set usdrt stage and tracked prims
29 | self.rt_stage = self.parser.get_usdrt_stage()
30 | self.tracked_prims, self.tracked_deform_prims = self.parser.get_tracked_prims()
31 |
32 | self.sim_scene.process_sim_obj(self.sim_scene.root)
33 |
34 | super().__init__(self.sim_scene, host)
35 |
36 | # add deformable update streamer
37 | self.deform_update_streamer = ByteStreamer("DeformUpdate", self.get_deform_update, start_streaming=True)
38 |
39 | def get_update(self) -> dict[str, list[float]]:
40 | state = {}
41 | for tracked_prim in self.tracked_prims:
42 | prim_name = tracked_prim["name"]
43 | prim_path = tracked_prim["prim_path"]
44 | # get prim with usdrt api (necessary for getting updates from physics simulation)
45 | rt_prim = self.rt_stage.GetPrimAtPath(prim_path)
46 | rt_prim = Rt.Xformable(rt_prim)
47 | pos = rt_prim.GetWorldPositionAttr().Get()
48 | rot = rt_prim.GetWorldOrientationAttr().Get()
49 | # convert pos and rot to unity coord system
50 | state[prim_name] = [
51 | pos[1],
52 | pos[2],
53 | -pos[0],
54 | -rot.GetImaginary()[1],
55 | -rot.GetImaginary()[2],
56 | rot.GetImaginary()[0],
57 | rot.GetReal(),
58 | ]
59 | return state
60 |
61 | def get_deform_update(self) -> bytes:
62 | ###################################################
63 | ## Write binary data to send as update
64 | ## Structure:
65 | ##
66 | ## L: Length of update string containing all deformable meshes [ 4 bytes ]
67 | ## S: Update string, semicolon seperated list of meshes contained in this update [ ? bytes ]
68 | ## N: Number of verticies for each mesh [ num_meshes x 4 bytes]
69 | ## V: Verticies for each mesh [ ? bytes for each mesh ]
70 | ##
71 | ## | L | S ... S | N ... N | V ... V |
72 | #####################################################
73 |
74 | state = {}
75 | mesh_vert_len = np.ndarray(len(self.tracked_deform_prims), dtype=np.uint32) # N
76 | update_list = "" # S
77 |
78 | for idx, tracked_prim in enumerate(self.tracked_deform_prims):
79 | mesh_name = tracked_prim["name"]
80 | prim_path = tracked_prim["prim_path"]
81 |
82 | vertices = np.asarray(
83 | self.rt_stage.GetPrimAtPath(prim_path).GetAttribute(RtGeom.Tokens.points).Get(),
84 | dtype=np.float32,
85 | )
86 | vertices = vertices[:, [1, 2, 0]]
87 | vertices[:, 2] = -vertices[:, 2]
88 | vertices = vertices.flatten()
89 | state[mesh_name] = np.ascontiguousarray(vertices)
90 |
91 | mesh_vert_len[idx] = vertices.size
92 | update_list += f"{mesh_name};"
93 |
94 | update_list = str.encode(update_list)
95 | bin_buffer = io.BytesIO()
96 |
97 | bin_buffer.write(len(update_list).to_bytes(4, "little")) # L
98 | bin_buffer.write(update_list) # S
99 |
100 | bin_buffer.write(mesh_vert_len) # N
101 |
102 | for hash in state:
103 | bin_buffer.write(state[hash]) # V
104 |
105 | return bin_buffer.getvalue()
106 |
--------------------------------------------------------------------------------
/simpub/sim/mj_publisher.py:
--------------------------------------------------------------------------------
1 | from mujoco import mj_name2id, mjtObj
2 | from typing import List, Dict, Tuple, Optional
3 | import numpy as np
4 |
5 | from ..core.simpub_server import SimPublisher
6 | from ..parser.mj import MjModelParser
7 | from ..simdata import SimObject
8 |
9 |
10 | class MujocoPublisher(SimPublisher):
11 |
12 | def __init__(
13 | self,
14 | mj_model,
15 | mj_data,
16 | host: str = "127.0.0.1",
17 | no_rendered_objects: Optional[List[str]] = None,
18 | no_tracked_objects: Optional[List[str]] = None,
19 | visible_geoms_groups: Optional[List[int]] = None,
20 | ) -> None:
21 | self.mj_model = mj_model
22 | self.mj_data = mj_data
23 | # default setting for visible geoms groups
24 | if visible_geoms_groups is None:
25 | visible_geoms_groups = list(range(5))
26 | self.parser = MjModelParser(mj_model, visible_geoms_groups, no_rendered_objects)
27 | sim_scene = self.parser.parse()
28 | self.tracked_obj_trans: Dict[str, Tuple[np.ndarray, np.ndarray]] = {}
29 | super().__init__(
30 | sim_scene,
31 | host,
32 | no_rendered_objects,
33 | no_tracked_objects,
34 | )
35 | self.set_update_objects(self.sim_scene.root)
36 |
37 | def set_update_objects(self, obj: Optional[SimObject]):
38 | if obj is None:
39 | return
40 | if obj.name in self.no_tracked_objects:
41 | return
42 | body_id = mj_name2id(self.mj_model, mjtObj.mjOBJ_BODY, obj.name)
43 | pos = self.mj_data.xpos[body_id]
44 | rot = self.mj_data.xquat[body_id]
45 | trans: Tuple[np.ndarray, np.ndarray] = (pos, rot)
46 | self.tracked_obj_trans[obj.name] = trans
47 | for child in obj.children:
48 | self.set_update_objects(child)
49 |
50 | def get_update(self) -> Dict[str, List[float]]:
51 | state = {}
52 | for name, trans in self.tracked_obj_trans.items():
53 | pos, rot = trans
54 | state[name] = [
55 | -pos[1], pos[2], pos[0], rot[2], -rot[3], -rot[1], rot[0]
56 | ]
57 | return state
58 |
--------------------------------------------------------------------------------
/simpub/sim/robocasa_publisher.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from robosuite.environments.base import MujocoEnv
4 |
5 | from .mj_publisher import MujocoPublisher
6 |
7 |
8 | class RobocasaPublisher(MujocoPublisher):
9 |
10 | def __init__(
11 | self,
12 | env: MujocoEnv,
13 | host: str = "127.0.0.1",
14 | no_rendered_objects: Optional[List[str]] = None,
15 | no_tracked_objects: Optional[List[str]] = None,
16 | ) -> None:
17 | super().__init__(
18 | env.sim.model._model,
19 | env.sim.data._data,
20 | host,
21 | no_rendered_objects,
22 | no_tracked_objects,
23 | visible_geoms_groups=list(range(1, 5)),
24 | )
25 |
--------------------------------------------------------------------------------
/simpub/sim/sf_publisher.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from alr_sim.sims.mj_beta import MjScene
4 |
5 | from .mj_publisher import MujocoPublisher
6 |
7 |
8 | class SFPublisher(MujocoPublisher):
9 |
10 | def __init__(
11 | self,
12 | sf_mj_sim: MjScene,
13 | host: str = "127.0.0.1",
14 | no_rendered_objects: Optional[List[str]] = None,
15 | no_tracked_objects: Optional[List[str]] = None,
16 | ) -> None:
17 | super().__init__(
18 | sf_mj_sim.model,
19 | sf_mj_sim.data,
20 | host,
21 | no_rendered_objects,
22 | no_tracked_objects,
23 | visible_geoms_groups=list(range(0, 3)),
24 | )
25 |
--------------------------------------------------------------------------------
/simpub/simdata.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from dataclasses import dataclass, field, fields, asdict
3 | from typing import Optional, Tuple, List, Dict
4 | from enum import Enum
5 | import numpy as np
6 | import random
7 | import json
8 | import trimesh
9 | import io
10 | from hashlib import md5
11 | import cv2
12 | import abc
13 |
14 |
15 | class VisualType(str, Enum):
16 | CUBE = "CUBE"
17 | SPHERE = "SPHERE"
18 | CAPSULE = "CAPSULE"
19 | CYLINDER = "CYLINDER"
20 | PLANE = "PLANE"
21 | QUAD = "QUAD"
22 | MESH = "MESH"
23 | NONE = "NONE"
24 |
25 |
26 | @dataclass
27 | class SimData:
28 |
29 | def to_dict(self):
30 | return {
31 | f.name: getattr(self, f.name)
32 | for f in fields(self)
33 | if not f.metadata.get("exclude", False)
34 | }
35 |
36 |
37 | @dataclass
38 | class SimMaterial(SimData):
39 | # All the color values are within the 0 - 1.0 range
40 | color: List[float]
41 | emissionColor: Optional[List[float]] = None
42 | specular: float = 0.5
43 | shininess: float = 0.5
44 | reflectance: float = 0.0
45 | texture: Optional[SimTexture] = None
46 |
47 |
48 | @dataclass
49 | class SimAsset(SimData):
50 | hash: str
51 | # scene: SimScene = field(init=True, metadata={"exclude": True})
52 |
53 | # def __post_init__(self):
54 | # raw_data = self.generate_raw_data()
55 | # self.hash = self.generate_hash(raw_data)
56 |
57 | @abc.abstractmethod
58 | def generate_raw_data(self) -> bytes:
59 | raise NotImplementedError
60 |
61 | @staticmethod
62 | def generate_hash(data: bytes) -> str:
63 | return md5(data).hexdigest()
64 |
65 | @staticmethod
66 | def write_to_buffer(
67 | bin_buffer: io.BytesIO,
68 | data: np.ndarray,
69 | ) -> Tuple[int, int]:
70 | # change all float nparray to float32 and all int nparray to int32
71 | if data.dtype == np.float64:
72 | data = data.astype(np.float32)
73 | elif data.dtype == np.int64:
74 | data = data.astype(np.int32)
75 | byte_data = data.tobytes()
76 | layout = bin_buffer.tell(), len(byte_data)
77 | bin_buffer.write(byte_data)
78 | return layout
79 |
80 | def update_raw_data(
81 | self,
82 | raw_data: Dict[str, bytes],
83 | new_data: bytes
84 | ) -> None:
85 | if self.hash in raw_data:
86 | raw_data.pop(self.hash)
87 | self.hash = md5(new_data).hexdigest()
88 | raw_data[self.hash] = new_data
89 |
90 |
91 | @dataclass
92 | class SimMesh(SimAsset):
93 | # (offset: bytes, count: int)
94 | verticesLayout: Tuple[int, int]
95 | indicesLayout: Tuple[int, int]
96 | normalsLayout: Tuple[int, int]
97 | uvLayout: Optional[Tuple[int, int]] = None
98 |
99 | @staticmethod
100 | def create_mesh(
101 | scene: SimScene,
102 | vertices: np.ndarray,
103 | faces: np.ndarray,
104 | vertex_normals: Optional[np.ndarray] = None,
105 | face_normals: Optional[np.ndarray] = None,
106 | mesh_texcoord: Optional[np.ndarray] = None,
107 | vertex_uvs: Optional[np.ndarray] = None,
108 | faces_uv: Optional[np.ndarray] = None,
109 | ) -> SimMesh:
110 | uvs = None
111 | if vertex_uvs is not None:
112 | assert vertex_uvs.shape[0] == vertices.shape[0], (
113 | f"Number of vertex uvs ({vertex_uvs.shape[0]}) must be equal "
114 | f"to number of vertices ({vertices.shape[0]})"
115 | )
116 | uvs = vertex_uvs.flatten()
117 | elif faces_uv is not None and mesh_texcoord is None:
118 | vertices, faces = SimMesh.generate_vertex_by_faces(vertices, faces)
119 | uvs = faces_uv.flatten()
120 | elif mesh_texcoord is not None and faces_uv is not None:
121 | if mesh_texcoord.shape[0] == vertices.shape[0]:
122 | uvs = mesh_texcoord
123 | else:
124 | vertices, faces, uvs, vertex_normals = (
125 | SimMesh.generate_vertex_uv_from_face_uv(
126 | vertices,
127 | faces_uv,
128 | faces,
129 | mesh_texcoord,
130 | )
131 | )
132 | assert uvs.shape[0] == vertices.shape[0], (
133 | f"Number of mesh texcoords ({mesh_texcoord.shape[0]}) must be "
134 | f"equal to number of vertices ({vertices.shape[0]})"
135 | )
136 | uvs = uvs.flatten()
137 | # create trimesh object
138 | trimesh_obj = trimesh.Trimesh(
139 | vertices=vertices,
140 | faces=faces,
141 | vertex_normals=vertex_normals,
142 | face_normals=face_normals,
143 | process=False,
144 | )
145 | trimesh_obj.fix_normals()
146 | vertices = trimesh_obj.vertices
147 | indices = trimesh_obj.faces
148 | normals = trimesh_obj.vertex_normals
149 | # Vertices
150 | vertices = vertices.astype(np.float32)
151 | num_vertices = vertices.shape[0]
152 | vertices = vertices[:, [1, 2, 0]]
153 | vertices[:, 0] = -vertices[:, 0]
154 | vertices = vertices.flatten()
155 | # Indices / faces
156 | indices = indices.astype(np.int32)
157 | indices = indices[:, [2, 1, 0]]
158 | indices = indices.flatten()
159 | # Normals
160 | normals = normals.astype(np.float32)
161 | normals = normals[:, [1, 2, 0]]
162 | normals[:, 0] = -normals[:, 0]
163 | normals = normals.flatten()
164 | assert normals.size == num_vertices * 3, (
165 | f"Number of vertex normals ({normals.shape[0]}) must be equal "
166 | f"to number of vertices ({num_vertices})"
167 | )
168 | assert np.max(indices) < num_vertices, (
169 | f"Index value exceeds number of vertices: {np.max(indices)} >= "
170 | f"{num_vertices}"
171 | )
172 | assert indices.size % 3 == 0, (
173 | f"Number of indices ({indices.size}) must be a multiple of 3"
174 | )
175 | # write to buffer
176 | bin_buffer = io.BytesIO()
177 | vertices_layout = SimMesh.write_to_buffer(bin_buffer, vertices)
178 | indices_layout = SimMesh.write_to_buffer(bin_buffer, indices)
179 | normals_layout = SimMesh.write_to_buffer(bin_buffer, normals)
180 | # UVs
181 | uv_layout = (0, 0)
182 | if uvs is not None:
183 | assert uvs.size == num_vertices * 2, (
184 | f"Number of vertex uvs ({uvs.shape[0]}) must be equal to "
185 | f"number of vertices ({num_vertices})"
186 | )
187 | uv_layout = SimMesh.write_to_buffer(bin_buffer, uvs)
188 | bin_data = bin_buffer.getvalue()
189 | hash = SimMesh.generate_hash(bin_data)
190 | scene.raw_data[hash] = bin_data
191 | return SimMesh(
192 | hash=hash,
193 | verticesLayout=vertices_layout,
194 | indicesLayout=indices_layout,
195 | normalsLayout=normals_layout,
196 | uvLayout=uv_layout,
197 | )
198 |
199 | @staticmethod
200 | def generate_vertex_by_faces(
201 | vertices: np.ndarray,
202 | faces: np.ndarray,
203 | ):
204 | new_vertices = []
205 | for face in faces:
206 | new_vertices.append(vertices[face])
207 | new_vertices = np.concatenate(new_vertices)
208 | new_faces = np.arange(new_vertices.shape[0]).reshape(-1, 3)
209 | return new_vertices, new_faces
210 |
211 | @staticmethod
212 | def generate_vertex_uv_from_face_uv(
213 | vertices: np.ndarray,
214 | face_texcoord: np.ndarray,
215 | faces: np.ndarray,
216 | mesh_texcoord: np.ndarray,
217 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, None]:
218 | new_vertices = []
219 | new_faces = []
220 | vertex_texcoord = []
221 | for face, face_texcoord in zip(faces, face_texcoord):
222 | new_vertices.append(vertices[face])
223 | vertex_texcoord.append(mesh_texcoord[face_texcoord])
224 | for item in new_vertices:
225 | assert item.shape == (3, 3), f"Shape of item is {item.shape}"
226 | new_vertices = np.concatenate(new_vertices)
227 | vertex_texcoord = np.concatenate(vertex_texcoord)
228 | new_faces = np.arange(new_vertices.shape[0]).reshape(-1, 3)
229 | return new_vertices, new_faces, vertex_texcoord, None
230 |
231 |
232 | @dataclass
233 | class SimTexture(SimAsset):
234 | width: int
235 | height: int
236 | # TODO: new texture type
237 | textureType: str
238 | textureScale: Tuple[int, int]
239 |
240 | @staticmethod
241 | def compress_image(
242 | image: np.ndarray,
243 | height: int,
244 | width: int,
245 | max_texture_size_kb: int = 5000,
246 | min_scale: float = 0.5,
247 | ) -> Tuple[np.ndarray, int, int]:
248 | # compress the texture data
249 | max_texture_size = max_texture_size_kb * 1024
250 | # Compute scale factor based on texture size
251 | scale = np.sqrt(image.nbytes / max_texture_size)
252 | # Adjust scale factor for small textures
253 | if scale < 1: # Texture is already under the limit
254 | scale = 1 # No resizing needed
255 | elif scale < 1 + min_scale: # Gradual scaling for small images
256 | scale = 1 + (scale - 1) * min_scale
257 | else: # Normal scaling for larger textures
258 | scale = int(scale) + 1
259 | new_width = int(width // scale)
260 | new_height = int(height // scale)
261 | # Reshape and resize the texture data
262 | compressed_image = cv2.resize(
263 | image.reshape(height, width, -1),
264 | (new_width, new_height),
265 | interpolation=cv2.INTER_LINEAR,
266 | # interpolation=cv2.INTER_AREA if scale > 2 else cv2.INTER_LINEAR,
267 | )
268 | return compressed_image, new_height, new_width
269 |
270 | @staticmethod
271 | def create_texture(
272 | image: np.ndarray,
273 | height: int,
274 | width: int,
275 | scene: SimScene,
276 | texture_scale: Tuple[int, int] = field(default_factory=lambda: (1, 1)),
277 | texture_type: str = "2D",
278 | ) -> SimTexture:
279 | image = image.astype(np.uint8)
280 | image, height, width = SimTexture.compress_image(image, height, width)
281 | image_byte = image.astype(np.uint8).tobytes()
282 | hash = SimTexture.generate_hash(image_byte)
283 | scene.raw_data[hash] = image_byte
284 | return SimTexture(
285 | hash=hash,
286 | width=image.shape[1],
287 | height=image.shape[0],
288 | textureType=texture_type,
289 | textureScale=texture_scale,
290 | )
291 |
292 |
293 | @dataclass
294 | class SimTransform(SimData):
295 | pos: List[float] = field(default_factory=lambda: [0, 0, 0])
296 | rot: List[float] = field(default_factory=lambda: [0, 0, 0, 1])
297 | scale: List[float] = field(default_factory=lambda: [1, 1, 1])
298 |
299 | def __add__(self, other: SimTransform):
300 | pos = np.array(self.pos) + np.array(other.pos)
301 | rot = np.array(self.rot) + np.array(other.rot)
302 | scale = np.array(self.scale) * np.array(other.scale)
303 | return SimTransform(
304 | pos=pos.tolist(),
305 | rot=rot.tolist(),
306 | scale=scale.tolist(),
307 | )
308 |
309 |
310 | @dataclass
311 | class SimVisual(SimData):
312 | name: str
313 | type: VisualType
314 | trans: SimTransform
315 | material: Optional[SimMaterial] = None
316 | mesh: Optional[SimMesh] = None
317 | # TODO: easily set up transparency
318 | # def setup_transparency(self):
319 | # if self.material is not None:
320 | # self.material = self
321 |
322 |
323 | @dataclass
324 | class SimObject(SimData):
325 | name: str
326 | trans: SimTransform = field(default_factory=SimTransform)
327 | visuals: List[SimVisual] = field(default_factory=list)
328 | children: List[SimObject] = field(default_factory=list)
329 |
330 |
331 | class SimScene:
332 | def __init__(self) -> None:
333 | self.root: Optional[SimObject] = None
334 | self.id: str = str(random.randint(int(1e9), int(1e10 - 1)))
335 | self.raw_data: Dict[str, bytes] = dict()
336 |
337 | def to_string(self) -> str:
338 | if self.root is None:
339 | raise ValueError("Root object is not set")
340 | dict_data = {
341 | "root": asdict(self.root),
342 | "id": self.id,
343 | }
344 | return json.dumps(dict_data)
345 |
346 | def process_sim_obj(self, sim_obj: SimObject) -> None:
347 | # for visual in sim_obj.visuals:
348 | # if visual.mesh is not None:
349 | # visual.mesh.generate_normals(self.raw_data)
350 | for child in sim_obj.children:
351 | self.process_sim_obj(child)
352 |
--------------------------------------------------------------------------------
/simpub/xr_device/IsaacLab_se3_SimPubHandTracking.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 |
3 | import numpy as np
4 | from isaaclab.devices import DeviceBase
5 | from isaacsim.core.utils.rotations import quat_to_euler_angles
6 | from simpub.xr_device.meta_quest3 import MetaQuest3
7 |
8 |
9 | class Se3SimPubHandTracking(DeviceBase):
10 | def __init__(self, device_name="ALRMetaQuest3", hand="right"):
11 | self.meta_quest3 = MetaQuest3(device_name)
12 | assert hand == "right" or hand == "left", (
13 | "hand={} invalid. Only right and left are supported".format(hand)
14 | )
15 | self.hand = hand
16 | self._close_gripper = False
17 | self._tracking_active = False
18 | self._lock_rot = False
19 |
20 | self.add_callback("B", self._toggle_gripper_state)
21 | self.add_callback("right:index_trigger@press", self._toggle_tracking)
22 | self.add_callback("right:index_trigger@release", self._toggle_tracking)
23 | self.add_callback("left:index_trigger@press", self._toggle_lock_rot)
24 | self.add_callback("left:index_trigger@release", self._toggle_lock_rot)
25 |
26 | def reset(self):
27 | """Reset the internals."""
28 | self._close_gripper = False
29 | return
30 |
31 | def add_callback(
32 | self,
33 | key: str,
34 | func: Callable,
35 | ):
36 | """Add additional functions to bind keyboard.
37 |
38 | Args:
39 | key: The button to check against.
40 | func: The function to call when key is pressed. The callback function should not
41 | take any arguments.
42 | """
43 | # get hand
44 | if ":" in key:
45 | hand, action = key.split(":")
46 | else:
47 | hand = "right"
48 | action = key
49 | # get button press type
50 | if "@" in action:
51 | key, press_type = action.split("@")
52 | else:
53 | key = action
54 | press_type = "press"
55 | # register callback
56 | if key in ["A", "B", "X", "Y"]:
57 | self.meta_quest3.register_button_press_event(key, func)
58 | elif key in ["hand_trigger", "index_trigger"]:
59 | if press_type == "press":
60 | self.meta_quest3.register_trigger_press_event(key, hand, func)
61 | else:
62 | self.meta_quest3.register_trigger_release_event(key, hand, func)
63 | return
64 |
65 | def _get_input_data(self):
66 | input_data = self.meta_quest3.get_input_data()
67 |
68 | if input_data is not None:
69 | # hand controller info with order in unity coords:
70 | # pos: z, -x, y / rot: -z, x, -y, w
71 | # to isaac sim coords
72 | # x := -z, y:= x, z:= y
73 | input_data["left"]["pos"] = [
74 | -input_data["left"]["pos"][0],
75 | -input_data["left"]["pos"][1],
76 | input_data["left"]["pos"][2],
77 | ]
78 | # to isaac sim rot:
79 | # w:= w, x:= z, y:= -x, z:= -y
80 | input_data["left"]["rot"] = [
81 | input_data["left"]["rot"][3],
82 | -input_data["left"]["rot"][0],
83 | -input_data["left"]["rot"][1],
84 | input_data["left"]["rot"][2],
85 | ]
86 |
87 | # same for right hand
88 | input_data["right"]["pos"] = [
89 | -input_data["right"]["pos"][0],
90 | -input_data["right"]["pos"][1],
91 | input_data["right"]["pos"][2],
92 | ]
93 | input_data["right"]["rot"] = [
94 | input_data["right"]["rot"][3],
95 | -input_data["right"]["rot"][0],
96 | -input_data["right"]["rot"][1],
97 | input_data["right"]["rot"][2],
98 | ]
99 |
100 | return input_data
101 |
102 | def _toggle_gripper_state(self):
103 | self._close_gripper = not self._close_gripper
104 |
105 | def _toggle_tracking(self):
106 | self._tracking_active = not self._tracking_active
107 |
108 | def _toggle_lock_rot(self):
109 | self._lock_rot = not self._lock_rot
110 |
111 |
112 | class Se3SimPubHandTrackingRel(Se3SimPubHandTracking):
113 | def __init__(
114 | self,
115 | device_name="ALRMetaQuest3",
116 | hand="right",
117 | # factor of 10 corresponds to 1:1 tracking
118 | delta_pos_scale_factor=10,
119 | delta_rot_scale_factor=10,
120 | ):
121 | super().__init__(device_name, hand)
122 | self._pos = np.zeros(3)
123 | self._rot = np.zeros(3)
124 | self._delta_pos_scale_factor = delta_pos_scale_factor
125 | self._delta_rot_scale_factor = delta_rot_scale_factor
126 |
127 | def advance(self) -> tuple[np.ndarray, bool]:
128 | """Provides the joystick event state.
129 | Returns:
130 | The processed output form the joystick.
131 | """
132 | input_data = self._get_input_data()
133 |
134 | d_rot = np.zeros(3)
135 | d_pos = np.zeros(3)
136 |
137 | if input_data is not None:
138 | cur_rot = quat_to_euler_angles(np.asarray(input_data[self.hand]["rot"]))
139 | if self._tracking_active:
140 | # calculate position difference
141 | d_pos = np.asarray(input_data[self.hand]["pos"]) - self._pos
142 | d_pos *= self._delta_pos_scale_factor
143 | # calculate rotation difference
144 | if not self._lock_rot:
145 | d_rot = cur_rot - self._rot
146 | d_rot *= self._delta_rot_scale_factor
147 |
148 | # store values needed to calculate offset next update cycle
149 | self._pos = input_data[self.hand]["pos"]
150 | self._rot = cur_rot
151 |
152 | return np.concatenate([d_pos, d_rot]), self._close_gripper
153 |
--------------------------------------------------------------------------------
/simpub/xr_device/QRAlignment.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intuitive-robots/SimPublisher/6f0348f678038ca6738100cfa1ab757989dde006/simpub/xr_device/QRAlignment.py
--------------------------------------------------------------------------------
/simpub/xr_device/__init__.py:
--------------------------------------------------------------------------------
1 | from .xr_device import XRDevice
2 | from .meta_quest3 import MetaQuest3
3 |
--------------------------------------------------------------------------------
/simpub/xr_device/meta_quest3.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, TypedDict, Callable, Dict, List
2 | import json
3 | from asyncio import sleep as async_sleep
4 |
5 | from simpub.core.net_manager import Publisher
6 | from .xr_device import XRDevice
7 |
8 |
9 | class MetaQuest3Hand(TypedDict):
10 | pos: List[float]
11 | rot: List[float]
12 | index_trigger: bool
13 | hand_trigger: bool
14 |
15 |
16 | class MetaQuest3InputData(TypedDict):
17 | left: MetaQuest3Hand
18 | right: MetaQuest3Hand
19 | A: bool
20 | B: bool
21 | X: bool
22 | Y: bool
23 |
24 |
25 | # TODO: Vibration Data Structure
26 | class Vibration(TypedDict):
27 | hand: str
28 | amplitude: float
29 |
30 |
31 | class MetaQuest3(XRDevice):
32 | def __init__(
33 | self,
34 | device_name: str,
35 | ) -> None:
36 | super().__init__(device_name)
37 | self.last_input_data: Optional[MetaQuest3InputData] = None
38 | self.input_data: Optional[MetaQuest3InputData] = None
39 | self.input_subscriber = self.register_topic_callback(
40 | f"{device_name}/InputData", self.update
41 | )
42 | self.start_vib_pub = Publisher(f"{device_name}/StartVibration")
43 | self.stop_vib_pub = Publisher(f"{device_name}/StopVibration")
44 | self.button_press_event: Dict[str, List[Callable]] = {
45 | "A": [],
46 | "B": [],
47 | "X": [],
48 | "Y": [],
49 | }
50 | self.left_trigger_press_event: Dict[str, List[Callable]] = {
51 | "hand_trigger": [],
52 | "index_trigger": [],
53 | }
54 | self.left_trigger_release_event: Dict[str, List[Callable]] = {
55 | "hand_trigger": [],
56 | "index_trigger": [],
57 | }
58 | self.right_trigger_press_event: Dict[str, List[Callable]] = {
59 | "hand_trigger": [],
60 | "index_trigger": [],
61 | }
62 | self.right_trigger_release_event: Dict[str, List[Callable]] = {
63 | "hand_trigger": [],
64 | "index_trigger": [],
65 | }
66 | self.on_vibration = {"left": False, "right": False}
67 |
68 | def update(self, data: str):
69 | self.last_input_data = self.input_data
70 | self.input_data = json.loads(data)
71 | if self.last_input_data is None:
72 | return
73 | if self.input_data is None:
74 | return
75 | for button, callbacks in self.button_press_event.items():
76 | if self.input_data[button] and not self.last_input_data[button]:
77 | [callback() for callback in callbacks]
78 | left_hand = self.input_data["left"]
79 | last_left_hand = self.last_input_data["left"]
80 | for trigger, callbacks in self.left_trigger_press_event.items():
81 | if left_hand[trigger] and not last_left_hand[trigger]:
82 | [callback() for callback in callbacks]
83 | for trigger, callbacks in self.left_trigger_release_event.items():
84 | if not left_hand[trigger] and last_left_hand[trigger]:
85 | [callback() for callback in callbacks]
86 | right_hand = self.input_data["right"]
87 | last_right_hand = self.last_input_data["right"]
88 | for trigger, callbacks in self.right_trigger_press_event.items():
89 | if right_hand[trigger] and not last_right_hand[trigger]:
90 | [callback() for callback in callbacks]
91 | for trigger, callbacks in self.right_trigger_release_event.items():
92 | if not right_hand[trigger] and last_right_hand[trigger]:
93 | [callback() for callback in callbacks]
94 |
95 | def register_button_press_event(self, button: str, callback: Callable):
96 | # button should be one of A, B, X, Y
97 | self.button_press_event[button].append(callback)
98 |
99 | def register_trigger_press_event(
100 | self, trigger: str, hand: str, callback: Callable
101 | ):
102 | # hand should be one of left or right
103 | # trigger should be one of hand_trigger or index_trigger
104 | if hand == "left":
105 | self.left_trigger_press_event[trigger].append(callback)
106 | elif hand == "right":
107 | self.right_trigger_press_event[trigger].append(callback)
108 | else:
109 | raise ValueError("Invalid hand")
110 |
111 | def register_trigger_release_event(
112 | self, trigger: str, hand: str, callback: Callable
113 | ):
114 | # hand should be one of
115 | # left_hand, left_trigger, right_hand, right_trigger
116 | if hand == "left":
117 | self.left_trigger_release_event[trigger].append(callback)
118 | elif hand == "right":
119 | self.right_trigger_release_event[trigger].append(callback)
120 | else:
121 | raise ValueError("Invalid hand")
122 |
123 | def get_input_data(self) -> Optional[MetaQuest3InputData]:
124 | return self.input_data
125 |
126 | # TODO: Vibration Data Structure
127 | def start_vibration(self, hand: str = "right", duration=0.5):
128 | if not self.on_vibration[hand]:
129 | self.on_vibration[hand] = True
130 | self.manager.submit_task(
131 | self.start_vibration_async, hand, duration,
132 | )
133 |
134 | async def start_vibration_async(self, hand: str = "right", duration=0.5):
135 | self.start_vib_pub.publish_string(hand)
136 | if duration > 1.5:
137 | while duration < 0:
138 | await async_sleep(1.5)
139 | self.start_vib_pub.publish_string(hand)
140 | duration -= 1.5
141 | else:
142 | await async_sleep(duration)
143 | self.stop_vibration(hand)
144 |
145 | def stop_vibration(self, hand: str = "right"):
146 | if self.on_vibration[hand]:
147 | self.on_vibration[hand] = False
148 | self.stop_vib_pub.publish_string(hand)
149 |
--------------------------------------------------------------------------------
/simpub/xr_device/xr_device.py:
--------------------------------------------------------------------------------
1 | import json
2 | import zmq
3 | import zmq.asyncio
4 | from typing import Optional, Dict, Callable
5 | from asyncio import sleep as async_sleep
6 |
7 | from ..core.log import logger
8 | from ..core.net_manager import NodeManager, NodeInfo
9 | from ..core.net_manager import TopicName
10 | # from ..core.net_component import Subscriber
11 | from ..core.utils import AsyncSocket
12 |
13 |
14 | class InputData:
15 | def __init__(self, json_str: str) -> None:
16 | self.json_str = json_str
17 | self.data = json.loads(json_str)
18 |
19 |
20 | class XRDevice:
21 | type = "XRDevice"
22 |
23 | def __init__(
24 | self,
25 | device_name: str = "UnityClient",
26 | ) -> None:
27 | if NodeManager.manager is None:
28 | raise Exception("NodeManager is not initialized")
29 | self.manager: NodeManager = NodeManager.manager
30 | self.connected = False
31 | self.device_name = device_name
32 | self.device_info: Optional[NodeInfo] = None
33 | self.req_socket: AsyncSocket = self.manager.create_socket(zmq.REQ)
34 | self.sub_socket: AsyncSocket = self.manager.create_socket(zmq.SUB)
35 | # subscriber
36 | self.sub_topic_callback: Dict[TopicName, Callable] = {}
37 | self.register_topic_callback(f"{device_name}/Log", self.print_log)
38 | self.manager.submit_task(self.wait_for_connection)
39 |
40 | async def wait_for_connection(self):
41 | logger.info(f"Waiting for connection to {self.device_name}")
42 | while not self.connected:
43 | device_info = self.manager.nodes_info_manager.get_node_info(
44 | self.device_name
45 | )
46 | if device_info is not None:
47 | self.device_info = device_info
48 | self.connected = True
49 | logger.info(f"Connected to {self.device_name}")
50 | break
51 | await async_sleep(0.01)
52 | if self.device_info is None:
53 | return
54 | self.connect_to_client(self.device_info)
55 |
56 | def connect_to_client(self, info: NodeInfo):
57 | self.req_socket.connect(
58 | f"tcp://{info['addr']['ip']}:{info['servicePort']}"
59 | )
60 | self.sub_socket.connect(
61 | f"tcp://{info['addr']['ip']}:{info['topicPort']}"
62 | )
63 | self.sub_socket.setsockopt_string(zmq.SUBSCRIBE, "")
64 | self.manager.submit_task(self.subscribe_loop)
65 |
66 | def register_topic_callback(self, topic: TopicName, callback: Callable):
67 | self.sub_topic_callback[topic] = callback
68 |
69 | def request(self, service: str, req: str) -> str:
70 | future = self.manager.submit_task(
71 | self.request_async, service, req
72 | )
73 | if future is None:
74 | logger.error("Future is None")
75 | return ""
76 | try:
77 | result = future.result()
78 | return result
79 | except Exception as e:
80 | logger.error(f"Error occurred when waiting for a response: {e}")
81 | return ""
82 |
83 | async def request_async(self, service: str, req: str) -> str:
84 | if self.device_info is None:
85 | logger.error(f"Device {self.device_name} is not connected")
86 | return ""
87 | if service not in self.device_info["serviceList"]:
88 | logger.error(f"\"{service}\" Service is not available")
89 | return ""
90 | await self.req_socket.send_string(f"{service}:{req}")
91 | return await self.req_socket.recv_string()
92 |
93 | async def subscribe_loop(self):
94 | try:
95 | while self.connected:
96 | message = await self.sub_socket.recv_string()
97 | topic, msg = message.split("|", 1)
98 | if topic in self.sub_topic_callback:
99 | self.sub_topic_callback[topic](msg)
100 | await async_sleep(0.01)
101 | except Exception as e:
102 | logger.error(
103 | f"{e} from subscribe loop in device {self.device_name}"
104 | )
105 |
106 | def print_log(self, log: str):
107 | logger.remote_log(f"{self.type} Log: {log}")
108 |
109 | def get_input_data(self) -> InputData:
110 | raise NotImplementedError
111 |
--------------------------------------------------------------------------------
/test/mj_parser_test.py:
--------------------------------------------------------------------------------
1 | import simpub.parser.mj as mj_parser
2 |
3 | import argparse
4 |
5 | from alr_sim.sims.SimFactory import SimRepository
6 | from alr_sim.sims.universal_sim.PrimitiveObjects import Box
7 | from simpub.sim.sf_publisher import SFPublisher
8 | from simpub.xr_device import XRDevice
9 |
10 | if __name__ == "__main__":
11 |
12 | parser = argparse.ArgumentParser()
13 | parser.add_argument("--host", type=str, default="127.0.0.1")
14 | parser.add_argument("--device", type=str, default="UnityClient")
15 | args = parser.parse_args()
16 | host = args.host
17 |
18 | box1 = Box(
19 | name="box1",
20 | init_pos=[0.5, -0.2, 0.5],
21 | init_quat=[0, 1, 0, 0],
22 | rgba=[0.1, 0.25, 0.3, 1],
23 | )
24 | box2 = Box(
25 | name="box2",
26 | init_pos=[0.6, -0.1, 0.5],
27 | init_quat=[0, 1, 0, 0],
28 | rgba=[0.2, 0.3, 0.7, 1],
29 | )
30 | box3 = Box(
31 | name="box3",
32 | init_pos=[0.4, -0.1, 0.5],
33 | init_quat=[0, 1, 0, 0],
34 | rgba=[1, 0, 0, 1],
35 | )
36 | box4 = Box(
37 | name="box4",
38 | init_pos=[0.6, -0.0, 0.5],
39 | init_quat=[0, 1, 0, 0],
40 | rgba=[1, 0, 0, 1],
41 | )
42 | box5 = Box(
43 | name="box5",
44 | init_pos=[0.6, 0.1, 0.5],
45 | init_quat=[0, 1, 0, 0],
46 | rgba=[1, 1, 1, 1],
47 | )
48 | box6 = Box(
49 | name="box6",
50 | init_pos=[0.6, 0.2, 0.5],
51 | init_quat=[0, 1, 0, 0],
52 | rgba=[1, 0, 0, 1],
53 | )
54 |
55 | table = Box(
56 | name="table0",
57 | init_pos=[0.5, 0.0, 0.2],
58 | init_quat=[0, 1, 0, 0],
59 | size=[0.25, 0.35, 0.2],
60 | static=True,
61 | )
62 |
63 | object_list = [box1, box2, box3, box4, box5, box6, table]
64 |
65 | # Setup the scene
66 | sim_factory = SimRepository.get_factory("mj_beta")
67 |
68 | scene = sim_factory.create_scene(object_list=object_list)
69 | robot = sim_factory.create_robot(scene)
70 |
71 | scene.start()
72 |
73 | # assert host is not None, "Please specify the host"
74 | publisher = SFPublisher(
75 | scene, host, no_tracked_objects=["table_plane", "table0"]
76 | )
77 |
78 | duration = 2
79 |
80 | robot.set_desired_gripper_width(0.0)
81 |
82 | home_position = robot.current_c_pos.copy()
83 | home_orientation = robot.current_c_quat.copy()
84 |
85 | # execute the pick and place movements
86 | robot.gotoCartPositionAndQuat(
87 | [0.5, -0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
88 | )
89 | robot.set_desired_gripper_width(0.04)
90 | robot.smooth_spline = False
91 | robot.gotoCartPositionAndQuat(
92 | [0.5, -0.2, 0.52 - 0.1], [0, 1, 0, 0], duration=duration
93 | )
94 | robot.close_fingers(duration=0.3)
95 | robot.gotoCartPositionAndQuat(
96 | home_position, home_orientation, duration=duration
97 | )
98 | robot.gotoCartPositionAndQuat(
99 | [0.5, 0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
100 | )
101 | robot.set_desired_gripper_width(0.04)
102 | robot.gotoCartPositionAndQuat(
103 | [0.6, -0.1, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
104 | )
105 | robot.gotoCartPositionAndQuat(
106 | [0.6, -0.1, 0.52 - 0.1], [0, 1, 0, 0], duration=duration
107 | )
108 | robot.close_fingers(duration=0.3)
109 | robot.gotoCartPositionAndQuat(
110 | home_position, home_orientation, duration=duration
111 | )
112 | robot.gotoCartPositionAndQuat(
113 | [0.5, 0.2, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
114 | )
115 | robot.set_desired_gripper_width(0.04)
116 | robot.gotoCartPositionAndQuat(
117 | [0.4, -0.1, 0.6 - 0.1], [0, 1, 0, 0], duration=duration
118 | )
119 | robot.gotoCartPositionAndQuat(
120 | [0.4, -0.1, 0.52 - 0.1], [0, 1, 0, 0], duration=duration
121 | )
122 | robot.close_fingers(duration=0.3)
123 | robot.gotoCartPositionAndQuat(
124 | home_position, home_orientation, duration=duration
125 | )
126 | robot.gotoCartPositionAndQuat(
127 | [0.5, 0.2, 0.65 - 0.1], [0, 1, 0, 0], duration=duration
128 | )
129 | robot.set_desired_gripper_width(0.04)
130 | robot.gotoCartPositionAndQuat(
131 | home_position, home_orientation, duration=duration
132 | )
133 |
134 | while True:
135 | scene.next_step()
136 |
137 |
--------------------------------------------------------------------------------
/test/test_net_manager.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from simpub.core import init_node
4 | from simpub.xr_device.meta_quest3 import MetaQuest3
5 |
6 | manager = init_node("192.168.0.134", f"test_node_{random.randint(0, 1000)}")
7 | manager.start_node_broadcast()
8 | meta = MetaQuest3("IRLMQ3-1")
9 | manager.spin()
10 |
--------------------------------------------------------------------------------
/test/test_stop_node.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import mujoco
3 | import os
4 | import time
5 |
6 | from simpub.sim.mj_publisher import MujocoPublisher
7 |
8 |
9 | if __name__ == '__main__':
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--path", type=str, default="/home/xinkai/repository")
12 | # parser.add_argument("--name", type=str, default="unitree_h1")
13 | # parser.add_argument("--name", type=str, default="unitree_g1")
14 | parser.add_argument("--name", type=str, default="boston_dynamics_spot")
15 | parser.add_argument("--host", type=str, default="127.0.0.1")
16 | args = parser.parse_args()
17 | xml_path = os.path.join(
18 | args.path, "mujoco_menagerie", args.name, "scene_arm.xml"
19 | )
20 | model = mujoco.MjModel.from_xml_path(xml_path)
21 | data = mujoco.MjData(model)
22 | publisher = MujocoPublisher(
23 | model,
24 | data,
25 | args.host,
26 | visible_geoms_groups=list(range(1, 3))
27 | )
28 | for _ in range(100):
29 | mujoco.mj_step(model, data)
30 | time.sleep(0.01)
31 | publisher.shutdown()
32 |
--------------------------------------------------------------------------------