├── .gitignore ├── LICENSE ├── README.md ├── demo.py ├── environments.md ├── examples └── demo_env.py ├── images ├── env_hammer_place.png ├── env_kitchen.png └── env_tool_use.png ├── requirements.txt ├── robosuite_task_zoo ├── __init__.py ├── environments │ ├── __init__.py │ └── manipulation │ │ ├── __init__.py │ │ ├── hammer_place.py │ │ ├── kitchen.py │ │ ├── multitask_kitchen.py │ │ └── tool_use.py └── models │ ├── __init__.py │ ├── hammer_place │ ├── __init__.py │ ├── cabinet.xml │ ├── cabinet.xml~ │ └── cabinet_object.py │ ├── kitchen │ ├── __init__.py │ ├── burnerplate.stl │ ├── button.xml │ ├── button.xml~ │ ├── button_object.py │ ├── cabinet.xml │ ├── cabinet_object.py │ ├── pot_object.py │ ├── serving_region.py │ ├── serving_region.xml │ ├── stove.xml │ └── stove_object.py │ ├── multitask_kitchen │ ├── __init__.py │ ├── burnerplate.stl │ ├── button.xml │ ├── button_object.py │ ├── cabinet.xml │ ├── cabinet_object.py │ ├── pot_object.py │ ├── serving_region.py │ ├── serving_region.xml │ ├── stove.xml │ └── stove_object.py │ └── tool_use │ ├── __init__.py │ ├── lshape_tool.py │ └── pot_object.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | # mac 104 | .DS_Store 105 | 106 | # mujoco-key 107 | mjkey.txt 108 | 109 | .mujocomanip_temp_model.xml 110 | 111 | *.jpg 112 | .idea 113 | 114 | .pytest_cache/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 ARISE Initiative 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # robosuite Task Zoo 2 | 3 | This repository hosts a collection of robosuite tasks designed by 4 | community users for their own research. It is intended to standardize 5 | experimental validation of simulated tasks and expedite the 6 | development of new environments. 7 | 8 | To use this repository, you need to first install robosuite. For more 9 | information, please refer to [robosuite](https://github.com/ARISE-Initiative/robosuite). Then git clone the repository and then do: 10 | 11 | ``` shell 12 | pip install -e . 13 | ``` 14 | 15 | For details of enviornments, please refer to [environments.md](./environments.md) 16 | -------------------------------------------------------------------------------- /demo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import robosuite 4 | import robosuite_task_zoo 5 | 6 | robots = "Panda" 7 | env = robosuite_task_zoo.environments.manipulation.NewLift( 8 | robots, 9 | has_renderer=True, 10 | has_offscreen_renderer=False, 11 | use_camera_obs=False, 12 | control_freq=20, 13 | ) 14 | 15 | # reset env 16 | env.reset() 17 | env.viewer.set_camera(camera_id=0) 18 | 19 | # get action limits 20 | low, high = env.action_spec 21 | 22 | for i in range(1000): 23 | action = np.random.uniform(low, high) 24 | obs, reward, done, _ = env.step(action) 25 | env.render() -------------------------------------------------------------------------------- /environments.md: -------------------------------------------------------------------------------- 1 | # Environments 2 | 3 | Enviornments are based on robosuite, and for more information on the 4 | details of robosuite, including how to create an environment, please 5 | refer to [robosuite doc](https://robosuite.ai/docs) 6 | 7 | We describe the tasks included in this repository. 8 | 9 | ## Task Descriptions 10 | 11 | We provide a brief description of each environment included in this repo. The current focuse is on manipulation environments. 12 | 13 | ### ToolUse 14 | ![env_tool_use](./images/env_tool_use.png) 15 | - **Description**: A simple tabletop domain to place a hammer into the cabinet 16 | and close the cabinet. 17 | - **Robot-type**: Single-Arm 18 | - **Task-type**: Single-Task 19 | - **Has rewards**: Yes 20 | - **Has demonstrations**: Yes 21 | - **No. of Demonstrations**: 100 22 | - **Robot**: Panda 23 | - **Data**: [Link](https://utexas.box.com/shared/static/om0pegpm0hdi12clydau36d3vy0yz516.zip) 24 | - **Action Space**: OSC_POSITION (3) + Gripper (1) 25 | - **Observations**: Workspace images, Eye-in-Hand images, Proprioception 26 | - **Reference**: [Bottom-Up Skill Discovery from Unsegmented Demonstrations for Long-Horizon Robot Manipulation](http://arxiv.org/abs/2109.13841) 27 | 28 | ### HammerPlace 29 | ![env_hammer_place](./images/env_hammer_place.png) 30 | - **Description**: A simple tabletop domain to place a hammer into the cabinet 31 | and close the cabinet. 32 | - **Robot-type**: Single-Arm 33 | - **Task-type**: Single-Task 34 | - **Has rewards**: Yes 35 | - **Has demonstrations**: Yes 36 | - **No. of Demonstrations**: 100 37 | - **Robot**: Panda 38 | - **Data**: [Link](https://utexas.box.com/shared/static/om0pegpm0hdi12clydau36d3vy0yz516.zip) 39 | - **Action Space**: OSC_POSITION (3) + Gripper (1) 40 | - **Observations**: Workspace images, Eye-in-Hand images, Proprioception 41 | - **Reference**: [Bottom-Up Skill Discovery from Unsegmented Demonstrations for Long-Horizon Robot Manipulation](http://arxiv.org/abs/2109.13841) 42 | 43 | ### Kitchen 44 | ![env_kitchen](./images/env_kitchen.png) 45 | - **Description**: A simple tabletop domain to place a hammer into the cabinet 46 | and close the cabinet. 47 | - **Robot-type**: Single-Arm 48 | - **Task-type**: Single-Task 49 | - **Has rewards**: Yes 50 | - **Has demonstrations**: Yes 51 | - **No. of Demonstrations**: 100 52 | - **Robot**: Panda 53 | - **Data**: [Link](https://utexas.box.com/shared/static/om0pegpm0hdi12clydau36d3vy0yz516.zip) 54 | - **Action Space**: OSC_POSITION (3) + Gripper (1) 55 | - **Observations**: Workspace images, Eye-in-Hand images, Proprioception 56 | - **Reference**: [Bottom-Up Skill Discovery from Unsegmented Demonstrations for Long-Horizon Robot Manipulation](http://arxiv.org/abs/2109.13841) 57 | 58 | 59 | ### MultitaskKitchenDomain 60 | - **Description**: A simple tabletop domain to place a hammer into the cabinet 61 | and close the cabinet. 62 | - **Robot-type**: Single-Arm 63 | - **Task-type**: Multi-Task 64 | - **No. of Tasks**: 3 65 | - **Has rewards**: Yes 66 | - **Has demonstrations**: Yes 67 | - **No. of Demonstrations in total**: 360 68 | - **No. of Demonstrations each task**: 120 69 | - **Robot**: Panda 70 | - **Data**: [Link](https://utexas.box.com/shared/static/om0pegpm0hdi12clydau36d3vy0yz516.zip) 71 | - **Action Space**: OSC_POSITION (3) + Gripper (1) 72 | - **Observations**: Workspace images, Eye-in-Hand images, Proprioception 73 | - **Reference**: [Bottom-Up Skill Discovery from Unsegmented Demonstrations for Long-Horizon Robot Manipulation](http://arxiv.org/abs/2109.13841) 74 | 75 | 76 | -------------------------------------------------------------------------------- /examples/demo_env.py: -------------------------------------------------------------------------------- 1 | import robosuite as suite 2 | from robosuite.devices import SpaceMouse 3 | from robosuite.utils.input_utils import input2action 4 | 5 | import robosuite_task_zoo 6 | 7 | import robosuite.utils.macros as macros 8 | macros.IMAGE_CONVENTION = "opencv" 9 | 10 | from robosuite.environments.base import REGISTERED_ENVS, MujocoEnv 11 | 12 | # device = SpaceMouse(9583, 50734, pos_sensitivity=1.0, rot_sensitivity=0.1) 13 | # device.start_control() 14 | 15 | options = {} 16 | options["robots"] = ["Panda"] 17 | 18 | options["controller_configs"] = suite.load_controller_config(default_controller="OSC_POSITION") 19 | 20 | options["env_name"] = "MultitaskKitchenDomain" 21 | 22 | 23 | env = suite.make(**options, 24 | has_renderer=True, 25 | has_offscreen_renderer=False, 26 | ignore_done=True, 27 | use_camera_obs=False, 28 | horizon=100, 29 | control_freq=20, 30 | task_id=0) 31 | 32 | 33 | env.reset() 34 | for _ in range(100): 35 | env.step([1.] * 4) 36 | env.render() 37 | -------------------------------------------------------------------------------- /images/env_hammer_place.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/images/env_hammer_place.png -------------------------------------------------------------------------------- /images/env_kitchen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/images/env_kitchen.png -------------------------------------------------------------------------------- /images/env_tool_use.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/images/env_tool_use.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/requirements.txt -------------------------------------------------------------------------------- /robosuite_task_zoo/__init__.py: -------------------------------------------------------------------------------- 1 | import robosuite_task_zoo.environments.manipulation 2 | 3 | 4 | __version__ = "0.1.0" 5 | -------------------------------------------------------------------------------- /robosuite_task_zoo/environments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/robosuite_task_zoo/environments/__init__.py -------------------------------------------------------------------------------- /robosuite_task_zoo/environments/manipulation/__init__.py: -------------------------------------------------------------------------------- 1 | from .tool_use import ToolUseEnv 2 | from .hammer_place import HammerPlaceEnv 3 | from .kitchen import KitchenEnv 4 | from .multitask_kitchen import MultitaskKitchenDomain 5 | -------------------------------------------------------------------------------- /robosuite_task_zoo/environments/manipulation/hammer_place.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import numpy as np 3 | from copy import deepcopy 4 | from robosuite.environments.manipulation.single_arm_env import SingleArmEnv 5 | 6 | from robosuite.models.arenas import TableArena 7 | from robosuite.models.objects import CylinderObject, BoxObject 8 | 9 | from robosuite.models.objects import HammerObject 10 | from robosuite.models.tasks import ManipulationTask 11 | from robosuite.utils.placement_samplers import UniformRandomSampler, SequentialCompositeSampler 12 | from robosuite.utils.observables import Observable, sensor 13 | from robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements, add_material 14 | from robosuite.utils.buffers import RingBuffer 15 | import robosuite.utils.transform_utils as T 16 | 17 | from robosuite_task_zoo.models.hammer_place import CabinetObject 18 | 19 | class HammerPlaceEnv(SingleArmEnv): 20 | def __init__( 21 | self, 22 | robots, 23 | env_configuration="default", 24 | controller_configs=None, 25 | gripper_types="default", 26 | initialization_noise="default", 27 | use_latch=False, 28 | use_camera_obs=True, 29 | use_object_obs=True, 30 | reward_scale=1.0, 31 | reward_shaping=False, 32 | placement_initializer=None, 33 | has_renderer=False, 34 | has_offscreen_renderer=True, 35 | render_camera="frontview", 36 | render_collision_mesh=False, 37 | render_visual_mesh=True, 38 | render_gpu_device_id=-1, 39 | control_freq=20, 40 | horizon=1000, 41 | ignore_done=False, 42 | hard_reset=True, 43 | camera_names="agentview", 44 | camera_heights=256, 45 | camera_widths=256, 46 | camera_depths=False, 47 | contact_threshold=2.0 48 | ): 49 | # settings for table top (hardcoded since it's not an essential part of the environment) 50 | self.table_full_size = (0.8, 0.8, 0.05) 51 | self.table_offset = (-0.2, 0, 0.90) 52 | 53 | # reward configuration 54 | self.reward_scale = reward_scale 55 | self.reward_shaping = reward_shaping 56 | 57 | # whether to use ground-truth object states 58 | self.use_object_obs = use_object_obs 59 | 60 | # object placement initializer 61 | self.placement_initializer = placement_initializer 62 | 63 | # ee resets 64 | self.ee_force_bias = np.zeros(3) 65 | self.ee_torque_bias = np.zeros(3) 66 | 67 | # Thresholds 68 | self.contact_threshold = contact_threshold 69 | 70 | # History observations 71 | self._history_force_torque = None 72 | self._recent_force_torque = None 73 | 74 | self.objects = [] 75 | 76 | super().__init__( 77 | robots=robots, 78 | env_configuration=env_configuration, 79 | controller_configs=controller_configs, 80 | mount_types="default", 81 | gripper_types=gripper_types, 82 | initialization_noise=initialization_noise, 83 | use_camera_obs=use_camera_obs, 84 | has_renderer=has_renderer, 85 | has_offscreen_renderer=has_offscreen_renderer, 86 | render_camera=render_camera, 87 | render_collision_mesh=render_collision_mesh, 88 | render_visual_mesh=render_visual_mesh, 89 | render_gpu_device_id=render_gpu_device_id, 90 | control_freq=control_freq, 91 | horizon=horizon, 92 | ignore_done=ignore_done, 93 | hard_reset=hard_reset, 94 | camera_names=camera_names, 95 | camera_heights=camera_heights, 96 | camera_widths=camera_widths, 97 | camera_depths=camera_depths, 98 | ) 99 | 100 | def reward(self, action=None): 101 | """ 102 | Reward function for the task. 103 | 104 | Sparse un-normalized reward: 105 | 106 | - a discrete reward of 1.0 is provided if the drawer is opened 107 | 108 | Un-normalized summed components if using reward shaping: 109 | 110 | - Reaching: in [0, 0.25], proportional to the distance between drawer handle and robot arm 111 | - Rotating: in [0, 0.25], proportional to angle rotated by drawer handled 112 | - Note that this component is only relevant if the environment is using the locked drawer version 113 | 114 | Note that a successfully completed task (drawer opened) will return 1.0 irregardless of whether the environment 115 | is using sparse or shaped rewards 116 | 117 | Note that the final reward is normalized and scaled by reward_scale / 1.0 as 118 | well so that the max score is equal to reward_scale 119 | 120 | Args: 121 | action (np.array): [NOT USED] 122 | 123 | Returns: 124 | float: reward value 125 | """ 126 | reward = 0. 127 | 128 | # sparse completion reward 129 | if self._check_success(): 130 | reward = 1.0 131 | 132 | # Scale reward if requested 133 | if self.reward_scale is not None: 134 | reward *= self.reward_scale / 1.0 135 | 136 | return reward 137 | 138 | def _load_model(self): 139 | """ 140 | Loads an xml model, puts it in self.model 141 | """ 142 | super()._load_model() 143 | 144 | # Adjust base pose accordingly 145 | xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0]) 146 | self.robots[0].robot_model.set_base_xpos(xpos) 147 | 148 | # load model for table top workspace 149 | mujoco_arena = TableArena( 150 | table_full_size=self.table_full_size, 151 | table_offset=self.table_offset, 152 | table_friction=(0.6, 0.005, 0.0001) 153 | ) 154 | 155 | # Arena always gets set to zero origin 156 | mujoco_arena.set_origin([0, 0, 0]) 157 | 158 | # Modify default agentview camera 159 | mujoco_arena.set_camera( 160 | camera_name="agentview", 161 | pos=[0.5386131746834771, -4.392035683362857e-09, 1.4903500240372423], 162 | quat=[0.6380177736282349, 0.3048497438430786, 0.30484986305236816, 0.6380177736282349] 163 | ) 164 | 165 | mujoco_arena.set_camera( 166 | camera_name="sideview", 167 | pos=[0.5586131746834771, 0.3, 1.2903500240372423], 168 | quat=[0.4144233167171478, 0.3100920617580414, 0.49641484022140503, 0.6968992352485657] 169 | ) 170 | 171 | 172 | bread = CustomMaterial( 173 | texture="Bread", 174 | tex_name="bread", 175 | mat_name="MatBread", 176 | tex_attrib={"type": "cube"}, 177 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 178 | ) 179 | 180 | darkwood = CustomMaterial( 181 | texture="WoodDark", 182 | tex_name="darkwood", 183 | mat_name="MatDarkWood", 184 | tex_attrib={"type": "cube"}, 185 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 186 | ) 187 | 188 | lightwood = CustomMaterial( 189 | texture="WoodLight", 190 | tex_name="lightwood", 191 | mat_name="MatLightWood", 192 | tex_attrib={"type": "cube"}, 193 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 194 | ) 195 | 196 | metal = CustomMaterial( 197 | texture="Metal", 198 | tex_name="metal", 199 | mat_name="MatMetal", 200 | tex_attrib={"type": "cube"}, 201 | mat_attrib={"specular": "1", "shininess": "0.3", "rgba": "0.9 0.9 0.9 1"} 202 | ) 203 | 204 | tex_attrib = { 205 | "type": "cube" 206 | } 207 | 208 | mat_attrib = { 209 | "texrepeat": "1 1", 210 | "specular": "0.4", 211 | "shininess": "0.1" 212 | } 213 | 214 | greenwood = CustomMaterial( 215 | texture="WoodGreen", 216 | tex_name="greenwood", 217 | mat_name="greenwood_mat", 218 | tex_attrib=tex_attrib, 219 | mat_attrib=mat_attrib, 220 | ) 221 | redwood = CustomMaterial( 222 | texture="WoodRed", 223 | tex_name="redwood", 224 | mat_name="MatRedWood", 225 | tex_attrib=tex_attrib, 226 | mat_attrib=mat_attrib, 227 | ) 228 | 229 | bluewood = CustomMaterial( 230 | texture="WoodBlue", 231 | tex_name="bluewood", 232 | mat_name="handle1_mat", 233 | tex_attrib={"type": "cube"}, 234 | mat_attrib={"texrepeat": "1 1", "specular": "0.4", "shininess": "0.1"}, 235 | ) 236 | 237 | ceramic = CustomMaterial( 238 | texture="Ceramic", 239 | tex_name="ceramic", 240 | mat_name="MatCeramic", 241 | tex_attrib=tex_attrib, 242 | mat_attrib=mat_attrib, 243 | ) 244 | 245 | ingredient_size = [0.03, 0.018, 0.025] 246 | 247 | self.sorting_object = HammerObject(name="hammer", 248 | handle_length=(0.045, 0.05), 249 | handle_radius=(0.012, 0.012), 250 | head_density_ratio=1.0 251 | ) 252 | 253 | self.cabinet_object = CabinetObject( 254 | name="CabinetObject") 255 | cabinet_object = self.cabinet_object.get_obj(); cabinet_object.set("pos", array_to_string((0.2, 0.30, 0.03))); mujoco_arena.table_body.append(cabinet_object) 256 | 257 | for obj_body in [ 258 | self.cabinet_object, 259 | ]: 260 | for material in [lightwood, darkwood, metal, redwood, ceramic]: 261 | tex_element, mat_element, _, used = add_material(root=obj_body.worldbody, 262 | naming_prefix=obj_body.naming_prefix, 263 | custom_material=deepcopy(material)) 264 | obj_body.asset.append(tex_element) 265 | obj_body.asset.append(mat_element) 266 | 267 | ingredient_size = [0.015, 0.025, 0.02] 268 | 269 | self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler") 270 | 271 | self.placement_initializer.append_sampler( 272 | sampler = UniformRandomSampler( 273 | name="ObjectSampler-pot", 274 | mujoco_objects=self.sorting_object, 275 | x_range=[0.10, 0.18], 276 | y_range=[-0.20, -0.13], 277 | rotation=(-0.1, 0.1), 278 | rotation_axis='z', 279 | ensure_object_boundary_in_range=False, 280 | ensure_valid_placement=True, 281 | reference_pos=self.table_offset, 282 | z_offset=0.02, 283 | )) 284 | 285 | mujoco_objects = [ 286 | self.sorting_object, 287 | ] 288 | 289 | # task includes arena, robot, and objects of interest 290 | self.model = ManipulationTask( 291 | mujoco_arena=mujoco_arena, 292 | mujoco_robots=[robot.robot_model for robot in self.robots], 293 | mujoco_objects=mujoco_objects, 294 | ) 295 | self.objects = [ 296 | self.sorting_object, 297 | self.cabinet_object, 298 | ] 299 | self.model.merge_assets(self.sorting_object) 300 | self.model.merge_assets(self.cabinet_object) 301 | 302 | 303 | def _setup_references(self): 304 | """ 305 | Sets up references to important components. A reference is typically an 306 | index or a list of indices that point to the corresponding elements 307 | in a flatten array, which is how MuJoCo stores physical simulation data. 308 | """ 309 | super()._setup_references() 310 | 311 | # Additional object references from this env 312 | self.object_body_ids = dict() 313 | 314 | self.cabinet_qpos_addrs = self.sim.model.get_joint_qpos_addr(self.cabinet_object.joints[0]) 315 | 316 | self.sorting_object_id = self.sim.model.body_name2id(self.sorting_object.root_body) 317 | self.cabinet_object_id = self.sim.model.body_name2id(self.cabinet_object.root_body) 318 | 319 | self.obj_body_id = {} 320 | for obj in self.objects: 321 | self.obj_body_id[obj.name] = self.sim.model.body_name2id(obj.root_body) 322 | 323 | def _setup_observables(self): 324 | """ 325 | Sets up observables to be used for this environment. Creates object-based observables if enabled 326 | 327 | Returns: 328 | OrderedDict: Dictionary mapping observable names to its corresponding Observable object 329 | """ 330 | observables = super()._setup_observables() 331 | 332 | observables["robot0_joint_pos"]._active = True 333 | 334 | # low-level object information 335 | if self.use_object_obs: 336 | # Get robot prefix and define observables modality 337 | pf = self.robots[0].robot_model.naming_prefix 338 | modality = "object" 339 | sensors = [] 340 | names = [s.__name__ for s in sensors] 341 | 342 | # Create observables 343 | for name, s in zip(names, sensors): 344 | observables[name] = Observable( 345 | name=name, 346 | sensor=s, 347 | sampling_rate=self.control_freq, 348 | ) 349 | 350 | pf = self.robots[0].robot_model.naming_prefix 351 | modality = f"{pf}proprio" 352 | 353 | @sensor(modality="object") 354 | def world_pose_in_gripper(obs_cache): 355 | return T.pose_inv(T.pose2mat((obs_cache[f"{pf}eef_pos"], obs_cache[f"{pf}eef_quat"]))) if\ 356 | f"{pf}eef_pos" in obs_cache and f"{pf}eef_quat" in obs_cache else np.eye(4) 357 | 358 | sensors.append(world_pose_in_gripper) 359 | names.append("world_pose_in_gripper") 360 | 361 | for (i, obj) in enumerate(self.objects): 362 | obj_sensors, obj_sensor_names = self._create_obj_sensors(obj_name=obj.name, modality="object") 363 | 364 | sensors += obj_sensors 365 | names += obj_sensor_names 366 | 367 | @sensor(modality=modality) 368 | def gripper_contact(obs_cache): 369 | return self._has_gripper_contact 370 | 371 | @sensor(modality=modality) 372 | def force_norm(obs_cache): 373 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) 374 | 375 | sensors += [gripper_contact, force_norm] 376 | names += [f"{pf}contact", f"{pf}eef_force_norm"] 377 | 378 | for name, s in zip(names, sensors): 379 | if name == "world_pose_in_gripper": 380 | observables[name] = Observable( 381 | name=name, 382 | sensor=s, 383 | sampling_rate=self.control_freq, 384 | enabled=True, 385 | active=False, 386 | ) 387 | else: 388 | observables[name] = Observable( 389 | name=name, 390 | sensor=s, 391 | sampling_rate=self.control_freq 392 | ) 393 | 394 | return observables 395 | 396 | def _create_obj_sensors(self, obj_name, modality="object"): 397 | """ 398 | Helper function to create sensors for a given object. This is abstracted in a separate function call so that we 399 | don't have local function naming collisions during the _setup_observables() call. 400 | 401 | Args: 402 | obj_name (str): Name of object to create sensors for 403 | modality (str): Modality to assign to all sensors 404 | 405 | Returns: 406 | 2-tuple: 407 | sensors (list): Array of sensors for the given obj 408 | names (list): array of corresponding observable names 409 | """ 410 | pf = self.robots[0].robot_model.naming_prefix 411 | 412 | @sensor(modality=modality) 413 | def obj_pos(obs_cache): 414 | return np.array(self.sim.data.body_xpos[self.obj_body_id[obj_name]]) 415 | 416 | @sensor(modality=modality) 417 | def obj_quat(obs_cache): 418 | return T.convert_quat(self.sim.data.body_xquat[self.obj_body_id[obj_name]], to="xyzw") 419 | 420 | @sensor(modality=modality) 421 | def obj_to_eef_pos(obs_cache): 422 | # Immediately return default value if cache is empty 423 | if any([name not in obs_cache for name in 424 | [f"{obj_name}_pos", f"{obj_name}_quat", "world_pose_in_gripper"]]): 425 | return np.zeros(3) 426 | obj_pose = T.pose2mat((obs_cache[f"{obj_name}_pos"], obs_cache[f"{obj_name}_quat"])) 427 | rel_pose = T.pose_in_A_to_pose_in_B(obj_pose, obs_cache["world_pose_in_gripper"]) 428 | rel_pos, rel_quat = T.mat2pose(rel_pose) 429 | obs_cache[f"{obj_name}_to_{pf}eef_quat"] = rel_quat 430 | return rel_pos 431 | 432 | @sensor(modality=modality) 433 | def obj_to_eef_quat(obs_cache): 434 | return obs_cache[f"{obj_name}_to_{pf}eef_quat"] if \ 435 | f"{obj_name}_to_{pf}eef_quat" in obs_cache else np.zeros(4) 436 | 437 | sensors = [obj_pos, obj_quat, obj_to_eef_pos, obj_to_eef_quat] 438 | names = [f"{obj_name}_pos", f"{obj_name}_quat", f"{obj_name}_to_{pf}eef_pos", f"{obj_name}_to_{pf}eef_quat"] 439 | 440 | return sensors, names 441 | 442 | 443 | def _reset_internal(self): 444 | """ 445 | Resets simulation internal configurations. 446 | """ 447 | super()._reset_internal() 448 | 449 | # Reset all object positions using initializer sampler if we're not directly loading from an xml 450 | if not self.deterministic_reset: 451 | 452 | # Sample from the placement initializer for all objects 453 | object_placements = self.placement_initializer.sample() 454 | 455 | for obj_pos, obj_quat, obj in object_placements.values(): 456 | self.sim.data.set_joint_qpos(obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)])) 457 | 458 | self.ee_force_bias = np.zeros(3) 459 | self.ee_torque_bias = np.zeros(3) 460 | self._history_force_torque = RingBuffer(dim=6, length=16) 461 | self._recent_force_torque = [] 462 | 463 | def _check_success(self): 464 | """ 465 | Check if drawer has been opened. 466 | 467 | Returns: 468 | bool: True if drawer has been opened 469 | """ 470 | object_pos = self.sim.data.body_xpos[self.sorting_object_id] 471 | object_in_drawer = 1.0 > object_pos[2] > 0.94 and object_pos[1] > 0.22 472 | 473 | cabinet_closed = self.sim.data.qpos[self.cabinet_qpos_addrs] > -0.01 474 | 475 | return object_in_drawer and cabinet_closed 476 | 477 | def visualize(self, vis_settings): 478 | """ 479 | In addition to super call, visualize gripper site proportional to the distance to the drawer handle. 480 | 481 | Args: 482 | vis_settings (dict): Visualization keywords mapped to T/F, determining whether that specific 483 | component should be visualized. Should have "grippers" keyword as well as any other relevant 484 | options specified. 485 | """ 486 | # Run superclass method first 487 | super().visualize(vis_settings=vis_settings) 488 | 489 | def step(self, action): 490 | if self.action_dim == 4: 491 | action = np.array(action) 492 | action = np.concatenate((action[:3], action[-1:]), axis=-1) 493 | 494 | self._recent_force_torque = [] 495 | obs, reward, done, info = super().step(action) 496 | info["history_ft"] = np.clip(np.copy(self._history_force_torque.buf), a_min=None, a_max=2) 497 | info["recent_ft"] = np.array(self._recent_force_torque) 498 | done = self._check_success() 499 | return obs, reward, done, info 500 | 501 | 502 | def _pre_action(self, action, policy_step=False): 503 | super()._pre_action(action, policy_step=policy_step) 504 | 505 | self._history_force_torque.push(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 506 | self._recent_force_torque.append(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 507 | 508 | def _post_action(self, action): 509 | reward, done, info = super()._post_action(action) 510 | 511 | # print("Cabinet joint: " , self.sim.data.qpos[self.cabinet_qpos_addrs]) 512 | if np.linalg.norm(self.ee_force_bias) == 0: 513 | self.ee_force_bias = self.robots[0].ee_force 514 | self.ee_torque_bias = self.robots[0].ee_torque 515 | 516 | return reward, done, info 517 | 518 | @property 519 | def _has_gripper_contact(self): 520 | """ 521 | Determines whether the gripper is making contact with an object, as defined by the eef force surprassing 522 | a certain threshold defined by self.contact_threshold 523 | 524 | Returns: 525 | bool: True if contact is surpasses given threshold magnitude 526 | """ 527 | 528 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) > self.contact_threshold 529 | 530 | def get_state_vector(self, obs): 531 | return np.concatenate([obs["robot0_gripper_qpos"], 532 | obs["robot0_eef_pos"], 533 | obs["robot0_eef_quat"]]) 534 | -------------------------------------------------------------------------------- /robosuite_task_zoo/environments/manipulation/kitchen.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import numpy as np 3 | from copy import deepcopy 4 | from robosuite.environments.manipulation.single_arm_env import SingleArmEnv 5 | 6 | import robosuite.utils.transform_utils as T 7 | from robosuite.models.arenas import TableArena 8 | from robosuite.models.objects import CylinderObject, BoxObject 9 | 10 | from robosuite.models.tasks import ManipulationTask 11 | from robosuite.utils.placement_samplers import UniformRandomSampler, SequentialCompositeSampler 12 | from robosuite.utils.observables import Observable, sensor 13 | from robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements, add_material 14 | from robosuite.utils.buffers import RingBuffer 15 | 16 | from robosuite_task_zoo.models.kitchen import PotObject, StoveObject, ButtonObject, ServingRegionObject 17 | 18 | class KitchenEnv(SingleArmEnv): 19 | """ 20 | Kitchen Env: The task is: place plate on the stove, cook with different ingradients and place the plate on the serving region. 21 | """ 22 | def __init__( 23 | self, 24 | robots, 25 | env_configuration="default", 26 | controller_configs=None, 27 | gripper_types="default", 28 | initialization_noise="default", 29 | use_latch=False, 30 | use_camera_obs=True, 31 | use_object_obs=True, 32 | reward_scale=1.0, 33 | reward_shaping=False, 34 | placement_initializer=None, 35 | has_renderer=False, 36 | has_offscreen_renderer=True, 37 | render_camera="frontview", 38 | render_collision_mesh=False, 39 | render_visual_mesh=True, 40 | render_gpu_device_id=-1, 41 | control_freq=20, 42 | horizon=1000, 43 | ignore_done=False, 44 | hard_reset=True, 45 | camera_names="agentview", 46 | camera_heights=256, 47 | camera_widths=256, 48 | camera_depths=False, 49 | contact_threshold=2.0 50 | ): 51 | # settings for table top (hardcoded since it's not an essential part of the environment) 52 | self.table_full_size = (1.0, 0.8, 0.05) 53 | self.table_offset = (-0.2, 0, 0.90) 54 | 55 | # reward configuration 56 | self.reward_scale = reward_scale 57 | self.reward_shaping = reward_shaping 58 | 59 | # whether to use ground-truth object states 60 | self.use_object_obs = use_object_obs 61 | 62 | # object placement initializer 63 | self.placement_initializer = placement_initializer 64 | 65 | # ee resets 66 | self.ee_force_bias = np.zeros(3) 67 | self.ee_torque_bias = np.zeros(3) 68 | 69 | # Thresholds 70 | self.contact_threshold = contact_threshold 71 | 72 | # History observations 73 | self._history_force_torque = None 74 | self._recent_force_torque = None 75 | 76 | self.stoves = {} 77 | self.buttons = {} 78 | self.buttons_on = {} 79 | self.button_qpos_addrs = {} 80 | self.num_stoves = 0 81 | self.has_stove_turned_on = False 82 | 83 | self.objects = [] 84 | 85 | super().__init__( 86 | robots=robots, 87 | env_configuration=env_configuration, 88 | controller_configs=controller_configs, 89 | mount_types="default", 90 | gripper_types=gripper_types, 91 | initialization_noise=initialization_noise, 92 | use_camera_obs=use_camera_obs, 93 | has_renderer=has_renderer, 94 | has_offscreen_renderer=has_offscreen_renderer, 95 | render_camera=render_camera, 96 | render_collision_mesh=render_collision_mesh, 97 | render_visual_mesh=render_visual_mesh, 98 | render_gpu_device_id=render_gpu_device_id, 99 | control_freq=control_freq, 100 | horizon=horizon, 101 | ignore_done=ignore_done, 102 | hard_reset=hard_reset, 103 | camera_names=camera_names, 104 | camera_heights=camera_heights, 105 | camera_widths=camera_widths, 106 | camera_depths=camera_depths, 107 | ) 108 | 109 | def reward(self, action=None): 110 | """ 111 | Reward function for the task. 112 | 113 | Sparse un-normalized reward: 114 | 115 | - a discrete reward of 1.0 is provided if the drawer is opened 116 | 117 | Un-normalized summed components if using reward shaping: 118 | 119 | - Reaching: in [0, 0.25], proportional to the distance between drawer handle and robot arm 120 | - Rotating: in [0, 0.25], proportional to angle rotated by drawer handled 121 | - Note that this component is only relevant if the environment is using the locked drawer version 122 | 123 | Note that a successfully completed task (drawer opened) will return 1.0 irregardless of whether the environment 124 | is using sparse or shaped rewards 125 | 126 | Note that the final reward is normalized and scaled by reward_scale / 1.0 as 127 | well so that the max score is equal to reward_scale 128 | 129 | Args: 130 | action (np.array): [NOT USED] 131 | 132 | Returns: 133 | float: reward value 134 | """ 135 | reward = 0. 136 | 137 | # sparse completion reward 138 | if self._check_success(): 139 | reward = 1.0 140 | 141 | # Scale reward if requested 142 | if self.reward_scale is not None: 143 | reward *= self.reward_scale / 1.0 144 | 145 | return reward 146 | 147 | def _load_model(self): 148 | """ 149 | Loads an xml model, puts it in self.model 150 | """ 151 | super()._load_model() 152 | 153 | # Adjust base pose accordingly 154 | xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0]) 155 | self.robots[0].robot_model.set_base_xpos(xpos) 156 | 157 | # load model for table top workspace 158 | mujoco_arena = TableArena( 159 | table_full_size=self.table_full_size, 160 | table_offset=self.table_offset, 161 | table_friction=(0.6, 0.005, 0.0001) 162 | ) 163 | 164 | # Arena always gets set to zero origin 165 | mujoco_arena.set_origin([0, 0, 0]) 166 | 167 | # Modify default agentview camera 168 | mujoco_arena.set_camera( 169 | camera_name="agentview", 170 | pos=[0.5386131746834771, -4.392035683362857e-09, 1.4903500240372423], 171 | quat=[0.6380177736282349, 0.3048497438430786, 0.30484986305236816, 0.6380177736282349] 172 | ) 173 | 174 | mujoco_arena.set_camera( 175 | camera_name="sideview", 176 | pos=[0.5586131746834771, 0.3, 1.2903500240372423], 177 | quat=[0.4144233167171478, 0.3100920617580414, 178 | 0.49641484022140503, 0.6968992352485657] 179 | ) 180 | 181 | 182 | bread = CustomMaterial( 183 | texture="Bread", 184 | tex_name="bread", 185 | mat_name="MatBread", 186 | tex_attrib={"type": "cube"}, 187 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 188 | ) 189 | darkwood = CustomMaterial( 190 | texture="WoodDark", 191 | tex_name="darkwood", 192 | mat_name="MatDarkWood", 193 | tex_attrib={"type": "cube"}, 194 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 195 | ) 196 | 197 | metal = CustomMaterial( 198 | texture="Metal", 199 | tex_name="metal", 200 | mat_name="MatMetal", 201 | tex_attrib={"type": "cube"}, 202 | mat_attrib={"specular": "1", "shininess": "0.3", "rgba": "0.9 0.9 0.9 1"} 203 | ) 204 | 205 | tex_attrib = { 206 | "type": "cube" 207 | } 208 | 209 | mat_attrib = { 210 | "texrepeat": "1 1", 211 | "specular": "0.4", 212 | "shininess": "0.1" 213 | } 214 | 215 | greenwood = CustomMaterial( 216 | texture="WoodGreen", 217 | tex_name="greenwood", 218 | mat_name="greenwood_mat", 219 | tex_attrib=tex_attrib, 220 | mat_attrib=mat_attrib, 221 | ) 222 | redwood = CustomMaterial( 223 | texture="WoodRed", 224 | tex_name="redwood", 225 | mat_name="MatRedWood", 226 | tex_attrib=tex_attrib, 227 | mat_attrib=mat_attrib, 228 | ) 229 | 230 | bluewood = CustomMaterial( 231 | texture="WoodBlue", 232 | tex_name="bluewood", 233 | mat_name="handle1_mat", 234 | tex_attrib={"type": "cube"}, 235 | mat_attrib={"texrepeat": "1 1", "specular": "0.4", "shininess": "0.1"}, 236 | ) 237 | 238 | self.stove_object_1 = StoveObject( 239 | name="Stove1", 240 | joints=None, 241 | ) 242 | stove_body = self.stove_object_1.get_obj(); stove_body.set("pos", array_to_string((0.23, 0.095, 0.02))); mujoco_arena.table_body.append(stove_body) 243 | 244 | self.button_object_1 = ButtonObject( 245 | name="Button1", 246 | ) 247 | 248 | button_body = self.button_object_1.get_obj(); button_body.set("quat", array_to_string((0., 0., 0., 1.))); button_body.set("pos", array_to_string((0.06, 0.10, 0.02))); mujoco_arena.table_body.append(button_body) 249 | 250 | self.serving_region = ServingRegionObject( 251 | name="ServingRegionRed" 252 | ) 253 | serving_region_object = self.serving_region.get_obj(); serving_region_object.set("pos", array_to_string((0.345, -0.15, 0.003))); mujoco_arena.table_body.append(serving_region_object) 254 | 255 | self.pot_object = PotObject( 256 | name="PotObject", 257 | ) 258 | 259 | for obj_body in [ 260 | self.button_object_1, 261 | self.stove_object_1, 262 | self.serving_region, 263 | ]: 264 | for material in [darkwood, metal, redwood]: 265 | tex_element, mat_element, _, used = add_material(root=obj_body.worldbody, 266 | naming_prefix=obj_body.naming_prefix, 267 | custom_material=deepcopy(material)) 268 | obj_body.asset.append(tex_element) 269 | obj_body.asset.append(mat_element) 270 | 271 | ingredient_size = [0.015, 0.025, 0.02] 272 | 273 | self.bread_ingredient = BoxObject( 274 | name="cube_bread", 275 | size_min=ingredient_size, 276 | size_max=ingredient_size, 277 | rgba=[1, 0, 0, 1], 278 | material=bread, 279 | density=500., 280 | ) 281 | 282 | self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler") 283 | 284 | # Create placement initializer 285 | 286 | self.placement_initializer.append_sampler( 287 | sampler = UniformRandomSampler( 288 | name="ObjectSampler-bread", 289 | mujoco_objects=self.bread_ingredient, 290 | x_range=[0.05, 0.08], 291 | y_range=[-0.18, -0.12], 292 | rotation=(-np.pi / 2., -np.pi / 2.), 293 | rotation_axis='z', 294 | ensure_object_boundary_in_range=False, 295 | ensure_valid_placement=True, 296 | reference_pos=self.table_offset, 297 | z_offset=0.01, 298 | )) 299 | 300 | self.placement_initializer.append_sampler( 301 | sampler = UniformRandomSampler( 302 | name="ObjectSampler-pot", 303 | mujoco_objects=self.pot_object, 304 | x_range=[0.175, 0.18], 305 | y_range=[-0.15, -0.13], 306 | rotation=(-0.1, 0.1), 307 | rotation_axis='z', 308 | ensure_object_boundary_in_range=False, 309 | ensure_valid_placement=True, 310 | reference_pos=self.table_offset, 311 | z_offset=0.02, 312 | )) 313 | 314 | 315 | mujoco_objects = [self.bread_ingredient, 316 | self.pot_object, 317 | ] 318 | 319 | # task includes arena, robot, and objects of interest 320 | self.model = ManipulationTask( 321 | mujoco_arena=mujoco_arena, 322 | mujoco_robots=[robot.robot_model for robot in self.robots], 323 | mujoco_objects=mujoco_objects, 324 | ) 325 | self.stoves = {1: self.stove_object_1, 326 | # 2: self.stove_object_2 327 | } 328 | 329 | self.num_stoves = len(self.stoves.keys()) 330 | 331 | self.buttons = {1: self.button_object_1, 332 | # 2: self.button_object_2, 333 | } 334 | self.buttons_on = {1: False, 335 | # 2: False 336 | } 337 | 338 | self.objects = [ 339 | self.stove_object_1, 340 | self.bread_ingredient, 341 | self.pot_object, 342 | self.serving_region 343 | ] 344 | 345 | self.model.merge_assets(self.button_object_1) 346 | self.model.merge_assets(self.stove_object_1) 347 | self.model.merge_assets(self.serving_region) 348 | 349 | 350 | def _setup_references(self): 351 | """ 352 | Sets up references to important components. A reference is typically an 353 | index or a list of indices that point to the corresponding elements 354 | in a flatten array, which is how MuJoCo stores physical simulation data. 355 | """ 356 | super()._setup_references() 357 | 358 | # Additional object references from this env 359 | self.object_body_ids = dict() 360 | self.object_body_ids["stove_1"] = self.sim.model.body_name2id(self.stove_object_1.root_body) 361 | # self.object_body_ids["stove_2"] = self.sim.model.body_name2id(self.stove_object_2.root_body) 362 | 363 | self.pot_object_id = self.sim.model.body_name2id(self.pot_object.root_body) 364 | self.button_qpos_addrs.update({1: self.sim.model.get_joint_qpos_addr(self.button_object_1.joints[0])}) 365 | # self.button_qpos_addrs.update({2: self.sim.model.get_joint_qpos_addr(self.button_object_2.joints[0])}) 366 | 367 | self.serving_region_id = self.sim.model.body_name2id(self.serving_region.root_body) 368 | 369 | self.sim.data.set_joint_qpos(self.button_object_1.joints[0], np.array([-0.4])) 370 | 371 | self.obj_body_id = {} 372 | for obj in self.objects: 373 | self.obj_body_id[obj.name] = self.sim.model.body_name2id(obj.root_body) 374 | 375 | def _setup_observables(self): 376 | """ 377 | Sets up observables to be used for this environment. Creates object-based observables if enabled 378 | 379 | Returns: 380 | OrderedDict: Dictionary mapping observable names to its corresponding Observable object 381 | """ 382 | observables = super()._setup_observables() 383 | 384 | observables["robot0_joint_pos"]._active = True 385 | 386 | # low-level object information 387 | if self.use_object_obs: 388 | # Get robot prefix and define observables modality 389 | pf = self.robots[0].robot_model.naming_prefix 390 | modality = "object" 391 | sensors = [] 392 | names = [s.__name__ for s in sensors] 393 | 394 | # Also append handle qpos if we're using a locked drawer version with rotatable handle 395 | 396 | # Create observables 397 | for name, s in zip(names, sensors): 398 | observables[name] = Observable( 399 | name=name, 400 | sensor=s, 401 | sampling_rate=self.control_freq, 402 | ) 403 | 404 | pf = self.robots[0].robot_model.naming_prefix 405 | modality = f"{pf}proprio" 406 | 407 | @sensor(modality="object") 408 | def world_pose_in_gripper(obs_cache): 409 | return T.pose_inv(T.pose2mat((obs_cache[f"{pf}eef_pos"], obs_cache[f"{pf}eef_quat"]))) if\ 410 | f"{pf}eef_pos" in obs_cache and f"{pf}eef_quat" in obs_cache else np.eye(4) 411 | 412 | sensors.append(world_pose_in_gripper) 413 | names.append("world_pose_in_gripper") 414 | 415 | for (i, obj) in enumerate(self.objects): 416 | obj_sensors, obj_sensor_names = self._create_obj_sensors(obj_name=obj.name, modality="object") 417 | 418 | sensors += obj_sensors 419 | names += obj_sensor_names 420 | 421 | @sensor(modality=modality) 422 | def gripper_contact(obs_cache): 423 | return self._has_gripper_contact 424 | 425 | @sensor(modality=modality) 426 | def force_norm(obs_cache): 427 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) 428 | 429 | sensors += [gripper_contact, force_norm] 430 | names += [f"{pf}contact", f"{pf}eef_force_norm"] 431 | 432 | for name, s in zip(names, sensors): 433 | if name == "world_pose_in_gripper": 434 | observables[name] = Observable( 435 | name=name, 436 | sensor=s, 437 | sampling_rate=self.control_freq, 438 | enabled=True, 439 | active=False, 440 | ) 441 | else: 442 | observables[name] = Observable( 443 | name=name, 444 | sensor=s, 445 | sampling_rate=self.control_freq 446 | ) 447 | 448 | return observables 449 | 450 | def _create_obj_sensors(self, obj_name, modality="object"): 451 | """ 452 | Helper function to create sensors for a given object. This is abstracted in a separate function call so that we 453 | don't have local function naming collisions during the _setup_observables() call. 454 | 455 | Args: 456 | obj_name (str): Name of object to create sensors for 457 | modality (str): Modality to assign to all sensors 458 | 459 | Returns: 460 | 2-tuple: 461 | sensors (list): Array of sensors for the given obj 462 | names (list): array of corresponding observable names 463 | """ 464 | pf = self.robots[0].robot_model.naming_prefix 465 | 466 | @sensor(modality=modality) 467 | def obj_pos(obs_cache): 468 | return np.array(self.sim.data.body_xpos[self.obj_body_id[obj_name]]) 469 | 470 | @sensor(modality=modality) 471 | def obj_quat(obs_cache): 472 | return T.convert_quat(self.sim.data.body_xquat[self.obj_body_id[obj_name]], to="xyzw") 473 | 474 | @sensor(modality=modality) 475 | def obj_to_eef_pos(obs_cache): 476 | # Immediately return default value if cache is empty 477 | if any([name not in obs_cache for name in 478 | [f"{obj_name}_pos", f"{obj_name}_quat", "world_pose_in_gripper"]]): 479 | return np.zeros(3) 480 | obj_pose = T.pose2mat((obs_cache[f"{obj_name}_pos"], obs_cache[f"{obj_name}_quat"])) 481 | rel_pose = T.pose_in_A_to_pose_in_B(obj_pose, obs_cache["world_pose_in_gripper"]) 482 | rel_pos, rel_quat = T.mat2pose(rel_pose) 483 | obs_cache[f"{obj_name}_to_{pf}eef_quat"] = rel_quat 484 | return rel_pos 485 | 486 | @sensor(modality=modality) 487 | def obj_to_eef_quat(obs_cache): 488 | return obs_cache[f"{obj_name}_to_{pf}eef_quat"] if \ 489 | f"{obj_name}_to_{pf}eef_quat" in obs_cache else np.zeros(4) 490 | 491 | sensors = [obj_pos, obj_quat, obj_to_eef_pos, obj_to_eef_quat] 492 | names = [f"{obj_name}_pos", f"{obj_name}_quat", f"{obj_name}_to_{pf}eef_pos", f"{obj_name}_to_{pf}eef_quat"] 493 | 494 | return sensors, names 495 | 496 | 497 | def _reset_internal(self): 498 | """ 499 | Resets simulation internal configurations. 500 | """ 501 | super()._reset_internal() 502 | self.has_stove_turned_on = False 503 | # Reset all object positions using initializer sampler if we're not directly loading from an xml 504 | if not self.deterministic_reset: 505 | 506 | # Sample from the placement initializer for all objects 507 | object_placements = self.placement_initializer.sample() 508 | 509 | for obj_pos, obj_quat, obj in object_placements.values(): 510 | self.sim.data.set_joint_qpos(obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)])) 511 | 512 | self.ee_force_bias = np.zeros(3) 513 | self.ee_torque_bias = np.zeros(3) 514 | self._history_force_torque = RingBuffer(dim=6, length=16) 515 | self._recent_force_torque = [] 516 | 517 | def _check_success(self): 518 | """ 519 | Check if drawer has been opened. 520 | 521 | Returns: 522 | bool: True if drawer has been opened 523 | """ 524 | pot_pos = self.sim.data.body_xpos[self.pot_object_id] 525 | serving_region_pos = self.sim.data.body_xpos[self.serving_region_id] 526 | dist_serving_pot = serving_region_pos - pot_pos 527 | pot_in_serving_region = np.abs(dist_serving_pot[0]) < 0.05 and np.abs(dist_serving_pot[1]) < 0.10 and np.abs(dist_serving_pot[2]) < 0.05 528 | stove_turned_off = not self.buttons_on[1] 529 | if not stove_turned_off: 530 | self.has_stove_turned_on = True 531 | object_in_pot = self.check_contact(self.bread_ingredient, self.pot_object) 532 | return pot_in_serving_region and stove_turned_off and object_in_pot and self.has_stove_turned_on 533 | 534 | def visualize(self, vis_settings): 535 | """ 536 | In addition to super call, visualize gripper site proportional to the distance to the drawer handle. 537 | 538 | Args: 539 | vis_settings (dict): Visualization keywords mapped to T/F, determining whether that specific 540 | component should be visualized. Should have "grippers" keyword as well as any other relevant 541 | options specified. 542 | """ 543 | # Run superclass method first 544 | super().visualize(vis_settings=vis_settings) 545 | 546 | def step(self, action): 547 | if self.action_dim == 4: 548 | action = np.array(action) 549 | action = np.concatenate((action[:3], action[-1:]), axis=-1) 550 | 551 | self._recent_force_torque = [] 552 | obs, reward, done, info = super().step(action) 553 | info["history_ft"] = np.clip(np.copy(self._history_force_torque.buf), a_min=None, a_max=2) 554 | info["recent_ft"] = np.array(self._recent_force_torque) 555 | done = self._check_success() 556 | return obs, reward, done, info 557 | 558 | 559 | def _pre_action(self, action, policy_step=False): 560 | super()._pre_action(action, policy_step=policy_step) 561 | 562 | self._history_force_torque.push(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 563 | self._recent_force_torque.append(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 564 | 565 | def _post_action(self, action): 566 | reward, done, info = super()._post_action(action) 567 | 568 | # Check if stove is turned on or not 569 | self._post_process() 570 | 571 | if np.linalg.norm(self.ee_force_bias) == 0: 572 | self.ee_force_bias = self.robots[0].ee_force 573 | self.ee_torque_bias = self.robots[0].ee_torque 574 | 575 | return reward, done, info 576 | 577 | def _post_process(self): 578 | stoves_on = {1: False, 579 | 2: False} 580 | 581 | for i in range(1, self.num_stoves+1): 582 | if self.buttons_on[i]: 583 | if self.sim.data.qpos[self.button_qpos_addrs[i]] < 0.0: 584 | self.buttons_on[i] = False 585 | else: 586 | if self.sim.data.qpos[self.button_qpos_addrs[i]] >= 0.0: 587 | self.buttons_on[i] = True 588 | 589 | for stove_num, stove_status in self.buttons_on.items(): 590 | self.stoves[stove_num].set_sites_visibility(sim=self.sim, visible=stove_status) 591 | 592 | @property 593 | def _has_gripper_contact(self): 594 | """ 595 | Determines whether the gripper is making contact with an object, as defined by the eef force surprassing 596 | a certain threshold defined by self.contact_threshold 597 | 598 | Returns: 599 | bool: True if contact is surpasses given threshold magnitude 600 | """ 601 | # return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) > self.contact_threshold 602 | 603 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) > self.contact_threshold 604 | 605 | 606 | def get_state_vector(self, obs): 607 | return np.concatenate([obs["robot0_gripper_qpos"], 608 | obs["robot0_eef_pos"], 609 | obs["robot0_eef_quat"]]) 610 | -------------------------------------------------------------------------------- /robosuite_task_zoo/environments/manipulation/multitask_kitchen.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import numpy as np 3 | from copy import deepcopy 4 | from robosuite.environments.manipulation.single_arm_env import SingleArmEnv 5 | 6 | from robosuite.models.arenas import TableArena 7 | from robosuite.models.objects import CylinderObject, BoxObject 8 | from robosuite.models.tasks import ManipulationTask 9 | from robosuite.utils.placement_samplers import UniformRandomSampler, SequentialCompositeSampler 10 | from robosuite.utils.observables import Observable, sensor 11 | from robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements, add_material 12 | from robosuite.utils.buffers import RingBuffer 13 | import robosuite.utils.transform_utils as T 14 | 15 | from robosuite_task_zoo.models.multitask_kitchen import PotObject, StoveObject, ButtonObject, CabinetObject, ServingRegionObject 16 | 17 | class MultitaskKitchenDomain(SingleArmEnv): 18 | """ 19 | MultitaskKitchenDomain: A multi-task environment with different varaints (different initial configurations). 20 | """ 21 | def __init__( 22 | self, 23 | robots, 24 | env_configuration="default", 25 | controller_configs=None, 26 | gripper_types="default", 27 | initialization_noise="default", 28 | use_latch=False, 29 | use_camera_obs=True, 30 | use_object_obs=True, 31 | reward_scale=1.0, 32 | reward_shaping=False, 33 | placement_initializer=None, 34 | has_renderer=False, 35 | has_offscreen_renderer=True, 36 | render_camera="frontview", 37 | render_collision_mesh=False, 38 | render_visual_mesh=True, 39 | render_gpu_device_id=-1, 40 | control_freq=20, 41 | horizon=1000, 42 | ignore_done=False, 43 | hard_reset=True, 44 | camera_names="agentview", 45 | camera_heights=256, 46 | camera_widths=256, 47 | camera_depths=False, 48 | contact_threshold=2.0, 49 | task_id=0 50 | ): 51 | # settings for table top (hardcoded since it's not an essential part of the environment) 52 | self.table_full_size = (1.0, 0.8, 0.05) 53 | self.table_offset = (-0.2, 0, 0.90) 54 | 55 | # reward configuration 56 | self.reward_scale = reward_scale 57 | self.reward_shaping = reward_shaping 58 | 59 | # whether to use ground-truth object states 60 | self.use_object_obs = use_object_obs 61 | 62 | # object placement initializer 63 | self.placement_initializer = placement_initializer 64 | 65 | # ee resets 66 | self.ee_force_bias = np.zeros(3) 67 | self.ee_torque_bias = np.zeros(3) 68 | 69 | # Thresholds 70 | self.contact_threshold = contact_threshold 71 | 72 | # History observations 73 | self._history_force_torque = None 74 | self._recent_force_torque = None 75 | 76 | self.stoves = {} 77 | self.buttons = {} 78 | self.buttons_on = {} 79 | self.button_qpos_addrs = {} 80 | self.num_stoves = 0 81 | self.has_stove_turned_on = False 82 | 83 | self.objects = [] 84 | 85 | self.task_id = task_id 86 | 87 | self.task_id_mapping = {0: "Task 1, Variant 1", 88 | 1: "Task 3, Variant 1", 89 | 2: "Task 2, Variant 1", 90 | 3: "Task 2, Variant 2", 91 | 4: "Task 1, Variant 2", 92 | 5: "Task 2, Variant 3", 93 | 6: "Task 1, Variant 3", 94 | 7: "Task 3, Variant 2", 95 | 8: "Task 3, variant 3"} 96 | print("The current task info: ", self.task_id_mapping[self.task_id]) 97 | self.max_tasks = 9 98 | 99 | self.steps = 0 100 | 101 | super().__init__( 102 | robots=robots, 103 | env_configuration=env_configuration, 104 | controller_configs=controller_configs, 105 | mount_types="default", 106 | gripper_types=gripper_types, 107 | initialization_noise=initialization_noise, 108 | use_camera_obs=use_camera_obs, 109 | has_renderer=has_renderer, 110 | has_offscreen_renderer=has_offscreen_renderer, 111 | render_camera=render_camera, 112 | render_collision_mesh=render_collision_mesh, 113 | render_visual_mesh=render_visual_mesh, 114 | render_gpu_device_id=render_gpu_device_id, 115 | control_freq=control_freq, 116 | horizon=horizon, 117 | ignore_done=ignore_done, 118 | hard_reset=hard_reset, 119 | camera_names=camera_names, 120 | camera_heights=camera_heights, 121 | camera_widths=camera_widths, 122 | camera_depths=camera_depths, 123 | ) 124 | 125 | def task_info(self, task_id=None): 126 | """Print the task id and variant id in the paper""" 127 | if task_id is None: 128 | return self.task_id_mapping[task_id] 129 | else: 130 | return self.task_id_mapping[self.task_id] 131 | 132 | def update_task(self, task_id): 133 | self.task_id = task_id 134 | assert(self.task_id < self.max_tasks) 135 | def reward(self, action=None): 136 | """ 137 | Reward function for the task. 138 | 139 | Sparse un-normalized reward: 140 | 141 | - a discrete reward of 1.0 is provided if the drawer is opened 142 | 143 | Un-normalized summed components if using reward shaping: 144 | 145 | - Reaching: in [0, 0.25], proportional to the distance between drawer handle and robot arm 146 | - Rotating: in [0, 0.25], proportional to angle rotated by drawer handled 147 | - Note that this component is only relevant if the environment is using the locked drawer version 148 | 149 | Note that a successfully completed task (drawer opened) will return 1.0 irregardless of whether the environment 150 | is using sparse or shaped rewards 151 | 152 | Note that the final reward is normalized and scaled by reward_scale / 1.0 as 153 | well so that the max score is equal to reward_scale 154 | 155 | Args: 156 | action (np.array): [NOT USED] 157 | 158 | Returns: 159 | float: reward value 160 | """ 161 | reward = 0. 162 | 163 | # sparse completion reward 164 | if self._check_success(): 165 | reward = 1.0 166 | 167 | # Scale reward if requested 168 | if self.reward_scale is not None: 169 | reward *= self.reward_scale / 1.0 170 | 171 | return reward 172 | 173 | def _load_model(self): 174 | """ 175 | Loads an xml model, puts it in self.model 176 | """ 177 | super()._load_model() 178 | 179 | # Adjust base pose accordingly 180 | xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0]) 181 | self.robots[0].robot_model.set_base_xpos(xpos) 182 | 183 | # load model for table top workspace 184 | mujoco_arena = TableArena( 185 | table_full_size=self.table_full_size, 186 | table_offset=self.table_offset, 187 | table_friction=(0.6, 0.005, 0.0001) 188 | ) 189 | 190 | # Arena always gets set to zero origin 191 | mujoco_arena.set_origin([0, 0, 0]) 192 | 193 | # Modify default agentview camera 194 | mujoco_arena.set_camera( 195 | camera_name="agentview", 196 | pos=[0.5386131746834771, -4.392035683362857e-09, 1.4903500240372423], 197 | quat=[0.6380177736282349, 0.3048497438430786, 0.30484986305236816, 0.6380177736282349] 198 | ) 199 | 200 | mujoco_arena.set_camera( 201 | camera_name="sideview", 202 | pos=[0.5586131746834771, 0.3, 1.2903500240372423], 203 | quat=[0.4144233167171478, 0.3100920617580414, 204 | 0.49641484022140503, 0.6968992352485657] 205 | ) 206 | 207 | 208 | bread = CustomMaterial( 209 | texture="Bread", 210 | tex_name="bread", 211 | mat_name="MatBread", 212 | tex_attrib={"type": "cube"}, 213 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 214 | ) 215 | darkwood = CustomMaterial( 216 | texture="WoodDark", 217 | tex_name="darkwood", 218 | mat_name="MatDarkWood", 219 | tex_attrib={"type": "cube"}, 220 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 221 | ) 222 | 223 | metal = CustomMaterial( 224 | texture="Metal", 225 | tex_name="metal", 226 | mat_name="MatMetal", 227 | tex_attrib={"type": "cube"}, 228 | mat_attrib={"specular": "1", "shininess": "0.3", "rgba": "0.9 0.9 0.9 1"} 229 | ) 230 | 231 | ceramic = CustomMaterial( 232 | texture="Ceramic", 233 | tex_name="ceramic", 234 | mat_name="MatCeramic", 235 | tex_attrib={"type": "cube"}, 236 | mat_attrib={"specular": "1", "shininess": "0.3", "rgba": "0.9 0.9 0.9 1"} 237 | ) 238 | 239 | tex_attrib = { 240 | "type": "cube" 241 | } 242 | 243 | mat_attrib = { 244 | "texrepeat": "1 1", 245 | "specular": "0.4", 246 | "shininess": "0.1" 247 | } 248 | 249 | greenwood = CustomMaterial( 250 | texture="WoodGreen", 251 | tex_name="greenwood", 252 | mat_name="greenwood_mat", 253 | tex_attrib=tex_attrib, 254 | mat_attrib=mat_attrib, 255 | ) 256 | redwood = CustomMaterial( 257 | texture="WoodRed", 258 | tex_name="redwood", 259 | mat_name="MatRedWood", 260 | tex_attrib=tex_attrib, 261 | mat_attrib=mat_attrib, 262 | ) 263 | 264 | bluewood = CustomMaterial( 265 | texture="WoodBlue", 266 | tex_name="bluewood", 267 | mat_name="MatBlueWood", 268 | # tex_attrib=tex_attrib, 269 | # mat_attrib=mat_attrib, 270 | tex_attrib={"type": "cube"}, 271 | mat_attrib={"texrepeat": "1 1", "specular": "0.4", "shininess": "0.1"}, 272 | ) 273 | 274 | self.stove_object_1 = StoveObject( 275 | name="Stove1", 276 | joints=None, 277 | ) 278 | stove_body = self.stove_object_1.get_obj(); stove_body.set("pos", array_to_string((0.23, 0.18, 0.02))); mujoco_arena.table_body.append(stove_body) 279 | 280 | self.button_object_1 = ButtonObject( 281 | name="Button1", 282 | ) 283 | 284 | button_body = self.button_object_1.get_obj(); button_body.set("quat", array_to_string((0., 0., 0., 1.))); button_body.set("pos", array_to_string((0.06, 0.20, 0.02))); mujoco_arena.table_body.append(button_body) 285 | 286 | self.serving_region = ServingRegionObject( 287 | name="ServingRegionRed" 288 | ) 289 | serving_region_object = self.serving_region.get_obj(); serving_region_object.set("pos", array_to_string((0.35, -0.02, 0.003))); mujoco_arena.table_body.append(serving_region_object) 290 | 291 | 292 | self.pot_object = PotObject( 293 | name="PotObject", 294 | ) 295 | 296 | 297 | self.cabinet_object = CabinetObject( 298 | name="CabinetObject") 299 | cabinet_object = self.cabinet_object.get_obj(); cabinet_object.set("quat", array_to_string((0., 0., 0., 1.))); cabinet_object.set("pos", array_to_string((0.15, -0.30, 0.03))); mujoco_arena.table_body.append(cabinet_object) 300 | 301 | bread_ingredient_size = [0.015, 0.023, 0.02] 302 | veg_ingredient_size = [0.015, 0.027, 0.02] 303 | cake_ingredient_size = [0.015, 0.023, 0.02] 304 | orange_ingredient_size = [0.015, 0.027, 0.02] 305 | 306 | self.bread_ingredient = BoxObject( 307 | name="cube_bread", 308 | size_min=bread_ingredient_size, # [0.015, 0.015, 0.015], 309 | size_max=bread_ingredient_size, # [0.018, 0.018, 0.018]) 310 | rgba=[1, 0, 0, 1], 311 | material=bread, 312 | density=500., 313 | ) 314 | 315 | 316 | for obj_body in [ 317 | self.button_object_1, 318 | self.stove_object_1, 319 | self.serving_region, 320 | # self.cleaning_region, 321 | self.cabinet_object, 322 | ]: 323 | for material in [darkwood, metal, redwood, ceramic, bluewood]: 324 | tex_element, mat_element, _, used = add_material(root=obj_body.worldbody, 325 | naming_prefix=obj_body.naming_prefix, 326 | custom_material=deepcopy(material)) 327 | obj_body.asset.append(tex_element) 328 | obj_body.asset.append(mat_element) 329 | 330 | 331 | self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler") 332 | 333 | # Create placement initializer 334 | 335 | valid_placement = False 336 | 337 | if self.task_id == 1: 338 | self.placement_initializer.append_sampler( 339 | sampler = UniformRandomSampler( 340 | name="ObjectSampler-bread", 341 | mujoco_objects=self.bread_ingredient, 342 | x_range=[0.22, 0.24], 343 | y_range=[-0.01, 0.01], 344 | rotation=(-np.pi / 2., -np.pi / 2.), 345 | rotation_axis='z', 346 | ensure_object_boundary_in_range=False, 347 | ensure_valid_placement=valid_placement, 348 | reference_pos=self.table_offset, 349 | z_offset=0.30, 350 | )) 351 | 352 | else: 353 | self.placement_initializer.append_sampler( 354 | sampler = UniformRandomSampler( 355 | name="ObjectSampler-bread", 356 | mujoco_objects=self.bread_ingredient, 357 | x_range=[-0.05, -0.01], 358 | y_range=[0.04, 0.08], 359 | rotation=(-np.pi / 2., -np.pi / 2.), 360 | rotation_axis='z', 361 | ensure_object_boundary_in_range=False, 362 | ensure_valid_placement=valid_placement, 363 | reference_pos=self.table_offset, 364 | z_offset=0.01, 365 | )) 366 | 367 | if self.task_id not in []: 368 | self.placement_initializer.append_sampler( 369 | sampler = UniformRandomSampler( 370 | name="ObjectSampler-pot", 371 | mujoco_objects=self.pot_object, 372 | x_range=[0.21, 0.25], 373 | y_range=[-0.02, 0.02], 374 | rotation=(-0.1, 0.1), 375 | rotation_axis='z', 376 | ensure_object_boundary_in_range=False, 377 | ensure_valid_placement=valid_placement, 378 | reference_pos=self.table_offset, 379 | z_offset=0.0, 380 | )) 381 | else: 382 | self.placement_initializer.append_sampler( 383 | sampler = UniformRandomSampler( 384 | name="ObjectSampler-pot", 385 | mujoco_objects=self.pot_object, 386 | x_range=[0.24, 0.25], 387 | y_range=[0.15, 0.16], 388 | rotation=(-0.1, 0.1), 389 | rotation_axis='z', 390 | ensure_object_boundary_in_range=False, 391 | ensure_valid_placement=valid_placement, 392 | reference_pos=self.table_offset, 393 | z_offset=0.05, 394 | )) 395 | 396 | mujoco_objects = [self.bread_ingredient, 397 | # self.hammer_object, 398 | self.pot_object, 399 | ] 400 | 401 | # task includes arena, robot, and objects of interest 402 | self.model = ManipulationTask( 403 | mujoco_arena=mujoco_arena, 404 | mujoco_robots=[robot.robot_model for robot in self.robots], 405 | mujoco_objects=mujoco_objects, 406 | ) 407 | self.stoves = {1: self.stove_object_1, 408 | } 409 | 410 | self.num_stoves = len(self.stoves.keys()) 411 | 412 | self.buttons = {1: self.button_object_1, 413 | } 414 | self.buttons_on = {1: False, 415 | } 416 | 417 | self.objects = [ 418 | self.stove_object_1, 419 | self.bread_ingredient, 420 | self.pot_object, 421 | self.serving_region, 422 | self.cabinet_object, 423 | ] 424 | 425 | self.model.merge_assets(self.button_object_1) 426 | self.model.merge_assets(self.stove_object_1) 427 | self.model.merge_assets(self.serving_region) 428 | self.model.merge_assets(self.cabinet_object) 429 | # self.model.merge_assets(self.hammer_object) 430 | 431 | def _setup_references(self): 432 | """ 433 | Sets up references to important components. A reference is typically an 434 | index or a list of indices that point to the corresponding elements 435 | in a flatten array, which is how MuJoCo stores physical simulation data. 436 | """ 437 | super()._setup_references() 438 | 439 | # Additional object references from this env 440 | self.object_body_ids = dict() 441 | self.object_body_ids["stove_1"] = self.sim.model.body_name2id(self.stove_object_1.root_body) 442 | 443 | self.pot_object_id = self.sim.model.body_name2id(self.pot_object.root_body) 444 | self.button_qpos_addrs.update({1: self.sim.model.get_joint_qpos_addr(self.button_object_1.joints[0])}) 445 | self.serving_region_id = self.sim.model.body_name2id(self.serving_region.root_body) 446 | self.cabinet_qpos_addrs = self.sim.model.get_joint_qpos_addr(self.cabinet_object.joints[0]) 447 | 448 | if self.task_id in [0, 3, 4, 5]: 449 | self.sim.data.set_joint_qpos(self.button_object_1.joints[0], np.array([np.random.uniform(0.1, 0.3)])) 450 | else: 451 | self.sim.data.set_joint_qpos(self.button_object_1.joints[0], np.array([-0.4])) 452 | 453 | self.obj_body_id = {} 454 | for obj in self.objects: 455 | self.obj_body_id[obj.name] = self.sim.model.body_name2id(obj.root_body) 456 | 457 | def _setup_observables(self): 458 | """ 459 | Sets up observables to be used for this environment. Creates object-based observables if enabled 460 | 461 | Returns: 462 | OrderedDict: Dictionary mapping observable names to its corresponding Observable object 463 | """ 464 | observables = super()._setup_observables() 465 | 466 | observables["robot0_joint_pos"]._active = True 467 | 468 | # low-level object information 469 | if self.use_object_obs: 470 | # Get robot prefix and define observables modality 471 | pf = self.robots[0].robot_model.naming_prefix 472 | modality = "object" 473 | sensors = [] 474 | names = [s.__name__ for s in sensors] 475 | 476 | # Also append handle qpos if we're using a locked drawer version with rotatable handle 477 | 478 | # Create observables 479 | for name, s in zip(names, sensors): 480 | observables[name] = Observable( 481 | name=name, 482 | sensor=s, 483 | sampling_rate=self.control_freq, 484 | ) 485 | 486 | pf = self.robots[0].robot_model.naming_prefix 487 | modality = f"{pf}proprio" 488 | 489 | @sensor(modality="object") 490 | def world_pose_in_gripper(obs_cache): 491 | return T.pose_inv(T.pose2mat((obs_cache[f"{pf}eef_pos"], obs_cache[f"{pf}eef_quat"]))) if\ 492 | f"{pf}eef_pos" in obs_cache and f"{pf}eef_quat" in obs_cache else np.eye(4) 493 | 494 | sensors.append(world_pose_in_gripper) 495 | names.append("world_pose_in_gripper") 496 | 497 | for (i, obj) in enumerate(self.objects): 498 | obj_sensors, obj_sensor_names = self._create_obj_sensors(obj_name=obj.name, modality="object") 499 | 500 | sensors += obj_sensors 501 | names += obj_sensor_names 502 | 503 | @sensor(modality=modality) 504 | def gripper_contact(obs_cache): 505 | return self._has_gripper_contact 506 | 507 | @sensor(modality=modality) 508 | def force_norm(obs_cache): 509 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) 510 | 511 | sensors += [gripper_contact, force_norm] 512 | names += [f"{pf}contact", f"{pf}eef_force_norm"] 513 | 514 | for name, s in zip(names, sensors): 515 | if name == "world_pose_in_gripper": 516 | observables[name] = Observable( 517 | name=name, 518 | sensor=s, 519 | sampling_rate=self.control_freq, 520 | enabled=True, 521 | active=False, 522 | ) 523 | else: 524 | observables[name] = Observable( 525 | name=name, 526 | sensor=s, 527 | sampling_rate=self.control_freq 528 | ) 529 | 530 | return observables 531 | 532 | def _create_obj_sensors(self, obj_name, modality="object"): 533 | """ 534 | Helper function to create sensors for a given object. This is abstracted in a separate function call so that we 535 | don't have local function naming collisions during the _setup_observables() call. 536 | 537 | Args: 538 | obj_name (str): Name of object to create sensors for 539 | modality (str): Modality to assign to all sensors 540 | 541 | Returns: 542 | 2-tuple: 543 | sensors (list): Array of sensors for the given obj 544 | names (list): array of corresponding observable names 545 | """ 546 | pf = self.robots[0].robot_model.naming_prefix 547 | 548 | @sensor(modality=modality) 549 | def obj_pos(obs_cache): 550 | return np.array(self.sim.data.body_xpos[self.obj_body_id[obj_name]]) 551 | 552 | @sensor(modality=modality) 553 | def obj_quat(obs_cache): 554 | return T.convert_quat(self.sim.data.body_xquat[self.obj_body_id[obj_name]], to="xyzw") 555 | 556 | @sensor(modality=modality) 557 | def obj_to_eef_pos(obs_cache): 558 | # Immediately return default value if cache is empty 559 | if any([name not in obs_cache for name in 560 | [f"{obj_name}_pos", f"{obj_name}_quat", "world_pose_in_gripper"]]): 561 | return np.zeros(3) 562 | obj_pose = T.pose2mat((obs_cache[f"{obj_name}_pos"], obs_cache[f"{obj_name}_quat"])) 563 | rel_pose = T.pose_in_A_to_pose_in_B(obj_pose, obs_cache["world_pose_in_gripper"]) 564 | rel_pos, rel_quat = T.mat2pose(rel_pose) 565 | obs_cache[f"{obj_name}_to_{pf}eef_quat"] = rel_quat 566 | return rel_pos 567 | 568 | @sensor(modality=modality) 569 | def obj_to_eef_quat(obs_cache): 570 | return obs_cache[f"{obj_name}_to_{pf}eef_quat"] if \ 571 | f"{obj_name}_to_{pf}eef_quat" in obs_cache else np.zeros(4) 572 | 573 | sensors = [obj_pos, obj_quat, obj_to_eef_pos, obj_to_eef_quat] 574 | names = [f"{obj_name}_pos", f"{obj_name}_quat", f"{obj_name}_to_{pf}eef_pos", f"{obj_name}_to_{pf}eef_quat"] 575 | 576 | return sensors, names 577 | 578 | 579 | def _reset_internal(self): 580 | """ 581 | Resets simulation internal configurations. 582 | """ 583 | super()._reset_internal() 584 | 585 | # Reset all object positions using initializer sampler if we're not directly loading from an xml 586 | if not self.deterministic_reset: 587 | 588 | # Sample from the placement initializer for all objects 589 | object_placements = self.placement_initializer.sample() 590 | 591 | for obj_pos, obj_quat, obj in object_placements.values(): 592 | self.sim.data.set_joint_qpos(obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)])) 593 | 594 | self.ee_force_bias = np.zeros(3) 595 | self.ee_torque_bias = np.zeros(3) 596 | self._history_force_torque = RingBuffer(dim=6, length=16) 597 | self._recent_force_torque = [] 598 | 599 | self.steps = 0 600 | 601 | def _check_success(self): 602 | """ 603 | Check if drawer has been opened. 604 | 605 | Returns: 606 | bool: True if drawer has been opened 607 | """ 608 | pot_pos = self.sim.data.body_xpos[self.pot_object_id] 609 | serving_region_pos = self.sim.data.body_xpos[self.serving_region_id] 610 | dist_serving_pot = serving_region_pos - pot_pos 611 | pot_in_serving_region = np.abs(dist_serving_pot[0]) < 0.05 and np.abs(dist_serving_pot[1]) < 0.10 and np.abs(dist_serving_pot[2]) < 0.05 612 | stove_turned_off = not self.buttons_on[1] 613 | 614 | stove_range = 0.08 615 | pot_on_stove = 0.05 - stove_range < pot_pos[0] < 0.05 + stove_range and 0.20 - stove_range < pot_pos[1] < 0.20 + stove_range and 0.90 < pot_pos[2] < 0.925 616 | 617 | if not stove_turned_off: 618 | self.has_stove_turned_on = True 619 | object_in_pot = self.check_contact(self.bread_ingredient, self.pot_object) 620 | 621 | cabinet_closed = self.sim.data.qpos[self.cabinet_qpos_addrs] > -0.01 622 | 623 | if self.task_id == 0: 624 | # task 1 625 | # (stove already turned on) close the cabinet, put object in the pot, place pot on the stove 626 | return cabinet_closed and object_in_pot and pot_on_stove 627 | elif self.task_id == 1: 628 | # task 2 629 | # close the cabinet, push object to the region 630 | return cabinet_closed and stove_turned_off and object_in_pot and pot_in_serving_region 631 | elif self.task_id == 2: 632 | # task 3 633 | # turn on the button, put object in the pot, push to the region 634 | return not stove_turned_off and object_in_pot and pot_in_serving_region 635 | elif self.task_id == 3: 636 | # task 3 637 | # (stove already turned on) object in the pot, push the pot 638 | return object_in_pot and pot_in_serving_region 639 | elif self.task_id == 4: 640 | # task 1 641 | # (stove already turned on) put object in the pot, place it on the stove 642 | return object_in_pot and pot_on_stove 643 | elif self.task_id == 7: 644 | # task 2 645 | # (cabinet already closed) 646 | return cabinet_closed and stove_turned_off and object_in_pot and pot_in_serving_region 647 | # Test 648 | elif self.task_id == 5: 649 | # (stove already turned on) Close the cabinet, put object in the pot, push the pot 650 | return cabinet_closed and object_in_pot and pot_in_serving_region 651 | elif self.task_id == 6: 652 | # Turn on the button, put object in the pot, place pot on the stove 653 | return not stove_turned_off and object_in_pot and pot_on_stove 654 | elif self.task_id == 8: 655 | # task 2 656 | return cabinet_closed and stove_turned_off and object_in_pot and pot_in_serving_region 657 | 658 | return False 659 | 660 | def visualize(self, vis_settings): 661 | """ 662 | In addition to super call, visualize gripper site proportional to the distance to the drawer handle. 663 | 664 | Args: 665 | vis_settings (dict): Visualization keywords mapped to T/F, determining whether that specific 666 | component should be visualized. Should have "grippers" keyword as well as any other relevant 667 | options specified. 668 | """ 669 | # Run superclass method first 670 | super().visualize(vis_settings=vis_settings) 671 | 672 | def step(self, action): 673 | if self.action_dim == 4: 674 | action = np.array(action) 675 | action = np.concatenate((action[:3], action[-1:]), axis=-1) 676 | 677 | self._recent_force_torque = [] 678 | obs, reward, done, info = super().step(action) 679 | 680 | self.steps += 1 681 | if self.task_id in [0, 1, 5, 8] and self.steps < 5: 682 | for _ in range(2): 683 | super().step(action) 684 | self.steps += 1 685 | 686 | for _ in range(3): 687 | self.sim.data.set_joint_qpos(self.cabinet_object.joints[0], np.array([-0.10])) 688 | super().step(action) 689 | self.steps += 1 690 | 691 | 692 | info["history_ft"] = np.clip(np.copy(self._history_force_torque.buf), a_min=None, a_max=2) 693 | info["recent_ft"] = np.array(self._recent_force_torque) 694 | done = self._check_success() 695 | return obs, reward, done, info 696 | 697 | def _pre_action(self, action, policy_step=False): 698 | super()._pre_action(action, policy_step=policy_step) 699 | 700 | self._history_force_torque.push(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 701 | self._recent_force_torque.append(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 702 | 703 | 704 | def _post_action(self, action): 705 | 706 | reward, done, info = super()._post_action(action) 707 | 708 | # # Check if stove is turned on or not 709 | self._post_process() 710 | 711 | if np.linalg.norm(self.ee_force_bias) == 0: 712 | self.ee_force_bias = self.robots[0].ee_force 713 | self.ee_torque_bias = self.robots[0].ee_torque 714 | 715 | return reward, done, info 716 | 717 | def _post_process(self): 718 | stoves_on = {1: False, 719 | 2: False} 720 | 721 | for i in range(1, self.num_stoves+1): 722 | if self.buttons_on[i]: 723 | if self.sim.data.qpos[self.button_qpos_addrs[i]] < 0.0: 724 | self.buttons_on[i] = False 725 | else: 726 | if self.sim.data.qpos[self.button_qpos_addrs[i]] >= 0.0: 727 | self.buttons_on[i] = True 728 | 729 | for stove_num, stove_status in self.buttons_on.items(): 730 | self.stoves[stove_num].set_sites_visibility(sim=self.sim, visible=stove_status) 731 | 732 | 733 | @property 734 | def _has_gripper_contact(self): 735 | """ 736 | Determines whether the gripper is making contact with an object, as defined by the eef force surprassing 737 | a certain threshold defined by self.contact_threshold 738 | 739 | Returns: 740 | bool: True if contact is surpasses given threshold magnitude 741 | """ 742 | # return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) > self.contact_threshold 743 | 744 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) > self.contact_threshold 745 | 746 | def get_state_vector(self, obs): 747 | return np.concatenate([obs["robot0_gripper_qpos"], 748 | obs["robot0_eef_pos"], 749 | obs["robot0_eef_quat"]]) 750 | 751 | 752 | -------------------------------------------------------------------------------- /robosuite_task_zoo/environments/manipulation/tool_use.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import numpy as np 3 | from copy import deepcopy 4 | from robosuite.environments.manipulation.single_arm_env import SingleArmEnv 5 | 6 | from robosuite.models.arenas import TableArena 7 | from robosuite.models.objects import CylinderObject, BoxObject 8 | from robosuite.models.tasks import ManipulationTask 9 | from robosuite.utils.placement_samplers import UniformRandomSampler, SequentialCompositeSampler 10 | from robosuite.utils.observables import Observable, sensor 11 | from robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements, add_material 12 | from robosuite.utils.buffers import RingBuffer 13 | import robosuite.utils.transform_utils as T 14 | 15 | from robosuite_task_zoo.models.tool_use import LShapeTool, PotObject 16 | 17 | class ToolUseEnvBase(SingleArmEnv): 18 | """ 19 | Kitchen Env: The task is: place plate on the stove, cook with different ingradients and place the plate on the serving region. 20 | """ 21 | def __init__( 22 | self, 23 | robots, 24 | env_configuration="default", 25 | controller_configs=None, 26 | gripper_types="default", 27 | initialization_noise="default", 28 | use_latch=False, 29 | use_camera_obs=True, 30 | use_object_obs=True, 31 | reward_scale=1.0, 32 | reward_shaping=False, 33 | placement_initializer=None, 34 | has_renderer=False, 35 | has_offscreen_renderer=True, 36 | render_camera="frontview", 37 | render_collision_mesh=False, 38 | render_visual_mesh=True, 39 | render_gpu_device_id=-1, 40 | control_freq=20, 41 | horizon=1000, 42 | ignore_done=False, 43 | hard_reset=True, 44 | camera_names="agentview", 45 | camera_heights=256, 46 | camera_widths=256, 47 | camera_depths=False, 48 | contact_threshold=2.0, 49 | cube_x_range = [0.29, 0.30], 50 | cube_y_range = [-0.14, -0.12], 51 | cube_rotation_range = (-np.pi / 2, -np.pi / 2), 52 | tool_x_range = [0.07, 0.07], 53 | tool_y_range = [-0.05, -0.05], 54 | tool_rotation_range = (0., 0.), 55 | 56 | ): 57 | # settings for table top (hardcoded since it's not an essential part of the environment) 58 | self.table_full_size = (0.8, 0.8, 0.05) 59 | self.table_offset = (-0.2, 0, 0.90) 60 | 61 | # reward configuration 62 | self.reward_scale = reward_scale 63 | self.reward_shaping = reward_shaping 64 | 65 | # whether to use ground-truth object states 66 | self.use_object_obs = use_object_obs 67 | 68 | # object placement initializer 69 | self.placement_initializer = placement_initializer 70 | 71 | # ee resets 72 | self.ee_force_bias = np.zeros(3) 73 | self.ee_torque_bias = np.zeros(3) 74 | 75 | # Thresholds 76 | self.contact_threshold = contact_threshold 77 | 78 | # History observations 79 | self._history_force_torque = None 80 | self._recent_force_torque = None 81 | 82 | self.objects = [] 83 | 84 | self.cube_x_range = cube_x_range 85 | self.cube_y_range = cube_y_range 86 | self.cube_rotation_range = cube_rotation_range 87 | 88 | self.tool_x_range = tool_x_range 89 | self.tool_y_range = tool_y_range 90 | self.tool_rotation_range = tool_rotation_range 91 | 92 | 93 | super().__init__( 94 | robots=robots, 95 | env_configuration=env_configuration, 96 | controller_configs=controller_configs, 97 | mount_types="default", 98 | gripper_types=gripper_types, 99 | initialization_noise=initialization_noise, 100 | use_camera_obs=use_camera_obs, 101 | has_renderer=has_renderer, 102 | has_offscreen_renderer=has_offscreen_renderer, 103 | render_camera=render_camera, 104 | render_collision_mesh=render_collision_mesh, 105 | render_visual_mesh=render_visual_mesh, 106 | render_gpu_device_id=render_gpu_device_id, 107 | control_freq=control_freq, 108 | horizon=horizon, 109 | ignore_done=ignore_done, 110 | hard_reset=hard_reset, 111 | camera_names=camera_names, 112 | camera_heights=camera_heights, 113 | camera_widths=camera_widths, 114 | camera_depths=camera_depths, 115 | ) 116 | 117 | def reward(self, action=None): 118 | """ 119 | Reward function for the task. 120 | 121 | Sparse un-normalized reward: 122 | 123 | - a discrete reward of 1.0 is provided if the drawer is opened 124 | 125 | Un-normalized summed components if using reward shaping: 126 | 127 | - Reaching: in [0, 0.25], proportional to the distance between drawer handle and robot arm 128 | - Rotating: in [0, 0.25], proportional to angle rotated by drawer handled 129 | - Note that this component is only relevant if the environment is using the locked drawer version 130 | 131 | Note that a successfully completed task (drawer opened) will return 1.0 irregardless of whether the environment 132 | is using sparse or shaped rewards 133 | 134 | Note that the final reward is normalized and scaled by reward_scale / 1.0 as 135 | well so that the max score is equal to reward_scale 136 | 137 | Args: 138 | action (np.array): [NOT USED] 139 | 140 | Returns: 141 | float: reward value 142 | """ 143 | reward = 0. 144 | 145 | # sparse completion reward 146 | if self._check_success(): 147 | reward = 1.0 148 | 149 | # Scale reward if requested 150 | if self.reward_scale is not None: 151 | reward *= self.reward_scale / 1.0 152 | 153 | return reward 154 | 155 | def _load_model(self): 156 | """ 157 | Loads an xml model, puts it in self.model 158 | """ 159 | super()._load_model() 160 | 161 | # Adjust base pose accordingly 162 | xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0]) 163 | self.robots[0].robot_model.set_base_xpos(xpos) 164 | 165 | # load model for table top workspace 166 | mujoco_arena = TableArena( 167 | table_full_size=self.table_full_size, 168 | table_offset=self.table_offset, 169 | table_friction=(0.6, 0.005, 0.0001) 170 | ) 171 | 172 | # Arena always gets set to zero origin 173 | mujoco_arena.set_origin([0, 0, 0]) 174 | 175 | # Modify default agentview camera 176 | mujoco_arena.set_camera( 177 | camera_name="agentview", 178 | pos=[0.5386131746834771, -4.392035683362857e-09, 1.4903500240372423], 179 | quat=[0.6380177736282349, 0.3048497438430786, 0.30484986305236816, 0.6380177736282349] 180 | ) 181 | 182 | mujoco_arena.set_camera( 183 | camera_name="sideview", 184 | pos=[0.5586131746834771, 0.3, 1.2903500240372423], 185 | quat=[0.4144233167171478, 0.3100920617580414, 186 | 0.49641484022140503, 0.6968992352485657] 187 | ) 188 | 189 | 190 | # initialize objects of interest 191 | darkwood = CustomMaterial( 192 | texture="WoodDark", 193 | tex_name="darkwood", 194 | mat_name="MatDarkWood", 195 | tex_attrib={"type": "cube"}, 196 | mat_attrib={"texrepeat": "3 3", "specular": "0.4","shininess": "0.1"} 197 | ) 198 | 199 | metal = CustomMaterial( 200 | texture="Metal", 201 | tex_name="metal", 202 | mat_name="MatMetal", 203 | tex_attrib={"type": "cube"}, 204 | mat_attrib={"specular": "1", "shininess": "0.3", "rgba": "0.9 0.9 0.9 1"} 205 | ) 206 | 207 | tex_attrib = { 208 | "type": "cube" 209 | } 210 | 211 | mat_attrib = { 212 | "texrepeat": "1 1", 213 | "specular": "0.4", 214 | "shininess": "0.1" 215 | } 216 | 217 | greenwood = CustomMaterial( 218 | texture="WoodGreen", 219 | tex_name="greenwood", 220 | mat_name="greenwood_mat", 221 | tex_attrib=tex_attrib, 222 | mat_attrib=mat_attrib, 223 | ) 224 | redwood = CustomMaterial( 225 | texture="WoodRed", 226 | tex_name="redwood", 227 | mat_name="MatRedWood", 228 | tex_attrib=tex_attrib, 229 | mat_attrib=mat_attrib, 230 | ) 231 | 232 | bluewood = CustomMaterial( 233 | texture="WoodBlue", 234 | tex_name="bluewood", 235 | mat_name="handle1_mat", 236 | tex_attrib={"type": "cube"}, 237 | mat_attrib={"texrepeat": "1 1", "specular": "0.4", "shininess": "0.1"}, 238 | ) 239 | 240 | ceramic = CustomMaterial( 241 | texture="Ceramic", 242 | tex_name="ceramic", 243 | mat_name="MatCeramic", 244 | tex_attrib=tex_attrib, 245 | mat_attrib=mat_attrib, 246 | ) 247 | 248 | ingredient_size = [0.02, 0.025, 0.02] 249 | self.cube = BoxObject( 250 | name="cube_bread", 251 | size_min=ingredient_size, 252 | size_max=ingredient_size, 253 | rgba=[1, 0, 0, 1], 254 | material=bluewood, 255 | density=500., 256 | ) 257 | 258 | self.lshape_tool = LShapeTool( 259 | name="LShapeTool", 260 | ) 261 | self.pot_object = PotObject( 262 | name="PotObject", 263 | ) 264 | pot_object = self.pot_object.get_obj(); pot_object.set("pos", array_to_string((0.0, 0.18, self.table_offset[2] + 0.05))) 265 | 266 | 267 | self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler") 268 | 269 | # Create placement initializer 270 | self.placement_initializer.append_sampler( 271 | sampler = UniformRandomSampler( 272 | name="ObjectSampler-cube", 273 | mujoco_objects=self.cube, 274 | x_range=self.cube_x_range, 275 | y_range=self.cube_y_range, 276 | rotation=self.cube_rotation_range, 277 | rotation_axis='z', 278 | ensure_object_boundary_in_range=False, 279 | ensure_valid_placement=True, 280 | reference_pos=self.table_offset, 281 | z_offset=0.01, 282 | )) 283 | 284 | self.placement_initializer.append_sampler( 285 | sampler = UniformRandomSampler( 286 | name="ObjectSampler-lshape", 287 | mujoco_objects=self.lshape_tool, 288 | x_range=self.tool_x_range, 289 | y_range=self.tool_y_range, 290 | rotation=self.tool_rotation_range, 291 | rotation_axis='z', 292 | ensure_object_boundary_in_range=False, 293 | ensure_valid_placement=True, 294 | reference_pos=self.table_offset, 295 | z_offset=0.02, 296 | )) 297 | 298 | mujoco_objects = [ 299 | self.pot_object, 300 | self.cube, 301 | self.lshape_tool 302 | ] 303 | 304 | # task includes arena, robot, and objects of interest 305 | self.model = ManipulationTask( 306 | mujoco_arena=mujoco_arena, 307 | mujoco_robots=[robot.robot_model for robot in self.robots], 308 | mujoco_objects=mujoco_objects, 309 | ) 310 | self.objects = [ 311 | self.pot_object, 312 | self.cube, 313 | self.lshape_tool 314 | ] 315 | 316 | def _setup_references(self): 317 | """ 318 | Sets up references to important components. A reference is typically an 319 | index or a list of indices that point to the corresponding elements 320 | in a flatten array, which is how MuJoCo stores physical simulation data. 321 | """ 322 | super()._setup_references() 323 | 324 | # Additional object references from this env 325 | self.object_body_ids = dict() 326 | 327 | self.pot_object_id = self.sim.model.body_name2id(self.pot_object.root_body) 328 | self.lshape_tool_id = self.sim.model.body_name2id(self.lshape_tool.root_body) 329 | self.cube_id = self.sim.model.body_name2id(self.cube.root_body) 330 | 331 | self.obj_body_id = {} 332 | for obj in self.objects: 333 | self.obj_body_id[obj.name] = self.sim.model.body_name2id(obj.root_body) 334 | 335 | def _setup_observables(self): 336 | """ 337 | Sets up observables to be used for this environment. Creates object-based observables if enabled 338 | 339 | Returns: 340 | OrderedDict: Dictionary mapping observable names to its corresponding Observable object 341 | """ 342 | observables = super()._setup_observables() 343 | 344 | observables["robot0_joint_pos"]._active = True 345 | 346 | # low-level object information 347 | if self.use_object_obs: 348 | # Get robot prefix and define observables modality 349 | pf = self.robots[0].robot_model.naming_prefix 350 | modality = "object" 351 | sensors = [] 352 | names = [s.__name__ for s in sensors] 353 | 354 | # Also append handle qpos if we're using a locked drawer version with rotatable handle 355 | 356 | # Create observables 357 | for name, s in zip(names, sensors): 358 | observables[name] = Observable( 359 | name=name, 360 | sensor=s, 361 | sampling_rate=self.control_freq, 362 | ) 363 | 364 | pf = self.robots[0].robot_model.naming_prefix 365 | modality = f"{pf}proprio" 366 | 367 | @sensor(modality="object") 368 | def world_pose_in_gripper(obs_cache): 369 | return T.pose_inv(T.pose2mat((obs_cache[f"{pf}eef_pos"], obs_cache[f"{pf}eef_quat"]))) if\ 370 | f"{pf}eef_pos" in obs_cache and f"{pf}eef_quat" in obs_cache else np.eye(4) 371 | 372 | sensors.append(world_pose_in_gripper) 373 | names.append("world_pose_in_gripper") 374 | 375 | @sensor(modality=modality) 376 | def gripper_contact(obs_cache): 377 | return self._has_gripper_contact 378 | 379 | @sensor(modality=modality) 380 | def force_norm(obs_cache): 381 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) 382 | 383 | sensors += [gripper_contact, force_norm] 384 | names += [f"{pf}contact", f"{pf}eef_force_norm"] 385 | 386 | for name, s in zip(names, sensors): 387 | if name == "world_pose_in_gripper": 388 | observables[name] = Observable( 389 | name=name, 390 | sensor=s, 391 | sampling_rate=self.control_freq, 392 | enabled=True, 393 | active=False, 394 | ) 395 | else: 396 | observables[name] = Observable( 397 | name=name, 398 | sensor=s, 399 | sampling_rate=self.control_freq 400 | ) 401 | 402 | return observables 403 | 404 | def _create_obj_sensors(self, obj_name, modality="object"): 405 | """ 406 | Helper function to create sensors for a given object. This is abstracted in a separate function call so that we 407 | don't have local function naming collisions during the _setup_observables() call. 408 | 409 | Args: 410 | obj_name (str): Name of object to create sensors for 411 | modality (str): Modality to assign to all sensors 412 | 413 | Returns: 414 | 2-tuple: 415 | sensors (list): Array of sensors for the given obj 416 | names (list): array of corresponding observable names 417 | """ 418 | pf = self.robots[0].robot_model.naming_prefix 419 | 420 | @sensor(modality=modality) 421 | def obj_pos(obs_cache): 422 | return np.array(self.sim.data.body_xpos[self.obj_body_id[obj_name]]) 423 | 424 | @sensor(modality=modality) 425 | def obj_quat(obs_cache): 426 | return T.convert_quat(self.sim.data.body_xquat[self.obj_body_id[obj_name]], to="xyzw") 427 | 428 | @sensor(modality=modality) 429 | def obj_to_eef_pos(obs_cache): 430 | # Immediately return default value if cache is empty 431 | if any([name not in obs_cache for name in 432 | [f"{obj_name}_pos", f"{obj_name}_quat", "world_pose_in_gripper"]]): 433 | return np.zeros(3) 434 | obj_pose = T.pose2mat((obs_cache[f"{obj_name}_pos"], obs_cache[f"{obj_name}_quat"])) 435 | rel_pose = T.pose_in_A_to_pose_in_B(obj_pose, obs_cache["world_pose_in_gripper"]) 436 | rel_pos, rel_quat = T.mat2pose(rel_pose) 437 | obs_cache[f"{obj_name}_to_{pf}eef_quat"] = rel_quat 438 | return rel_pos 439 | 440 | @sensor(modality=modality) 441 | def obj_to_eef_quat(obs_cache): 442 | return obs_cache[f"{obj_name}_to_{pf}eef_quat"] if \ 443 | f"{obj_name}_to_{pf}eef_quat" in obs_cache else np.zeros(4) 444 | 445 | sensors = [obj_pos, obj_quat, obj_to_eef_pos, obj_to_eef_quat] 446 | names = [f"{obj_name}_pos", f"{obj_name}_quat", f"{obj_name}_to_{pf}eef_pos", f"{obj_name}_to_{pf}eef_quat"] 447 | 448 | return sensors, names 449 | 450 | 451 | def _reset_internal(self): 452 | """ 453 | Resets simulation internal configurations. 454 | """ 455 | super()._reset_internal() 456 | 457 | # Reset all object positions using initializer sampler if we're not directly loading from an xml 458 | if not self.deterministic_reset: 459 | 460 | # Sample from the placement initializer for all objects 461 | object_placements = self.placement_initializer.sample() 462 | 463 | for obj_pos, obj_quat, obj in object_placements.values(): 464 | self.sim.data.set_joint_qpos(obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)])) 465 | 466 | self.ee_force_bias = np.zeros(3) 467 | self.ee_torque_bias = np.zeros(3) 468 | self._history_force_torque = RingBuffer(dim=6, length=16) 469 | self._recent_force_torque = [] 470 | 471 | def _check_success(self): 472 | """ 473 | Check if drawer has been opened. 474 | 475 | Returns: 476 | bool: True if drawer has been opened 477 | """ 478 | 479 | pot_pos = self.sim.data.body_xpos[self.pot_object_id] 480 | cube_pos = self.sim.data.body_xpos[self.cube_id] 481 | object_in_pot = self.check_contact(self.cube, self.pot_object) and np.linalg.norm(pot_pos[:2] - cube_pos[:2]) < 0.06 and np.abs(pot_pos[2] - cube_pos[2]) < 0.05 482 | 483 | return object_in_pot 484 | 485 | def visualize(self, vis_settings): 486 | """ 487 | In addition to super call, visualize gripper site proportional to the distance to the drawer handle. 488 | 489 | Args: 490 | vis_settings (dict): Visualization keywords mapped to T/F, determining whether that specific 491 | component should be visualized. Should have "grippers" keyword as well as any other relevant 492 | options specified. 493 | """ 494 | # Run superclass method first 495 | super().visualize(vis_settings=vis_settings) 496 | 497 | def step(self, action): 498 | if self.action_dim == 4: 499 | action = np.array(action) 500 | action = np.concatenate((action[:3], action[-1:]), axis=-1) 501 | 502 | self._recent_force_torque = [] 503 | obs, reward, done, info = super().step(action) 504 | info["history_ft"] = np.clip(np.copy(self._history_force_torque.buf), a_min=None, a_max=2) 505 | info["recent_ft"] = np.array(self._recent_force_torque) 506 | done = self._check_success() 507 | return obs, reward, done, info 508 | 509 | 510 | def _pre_action(self, action, policy_step=False): 511 | super()._pre_action(action, policy_step=policy_step) 512 | 513 | self._history_force_torque.push(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 514 | self._recent_force_torque.append(np.hstack((self.robots[0].ee_force - self.ee_force_bias, self.robots[0].ee_torque - self.ee_torque_bias))) 515 | 516 | def _post_action(self, action): 517 | reward, done, info = super()._post_action(action) 518 | 519 | if np.linalg.norm(self.ee_force_bias) == 0: 520 | self.ee_force_bias = self.robots[0].ee_force 521 | self.ee_torque_bias = self.robots[0].ee_torque 522 | 523 | return reward, done, info 524 | 525 | @property 526 | def _has_gripper_contact(self): 527 | """ 528 | Determines whether the gripper is making contact with an object, as defined by the eef force surprassing 529 | a certain threshold defined by self.contact_threshold 530 | 531 | Returns: 532 | bool: True if contact is surpasses given threshold magnitude 533 | """ 534 | return np.linalg.norm(self.robots[0].ee_force - self.ee_force_bias) > self.contact_threshold 535 | 536 | 537 | def get_state_vector(self, obs): 538 | return np.concatenate([obs["robot0_gripper_qpos"], 539 | obs["robot0_eef_pos"], 540 | obs["robot0_eef_quat"]]) 541 | 542 | def _post_process(self): 543 | pass 544 | 545 | class ToolUseEnv(ToolUseEnvBase): 546 | """Hardest varaint""" 547 | def __init__(self, *args, **kwargs): 548 | kwargs["cube_x_range"] = [0.29, 0.32] 549 | kwargs["cube_y_range"] = [-0.25, -0.10] 550 | kwargs["tool_x_range"] = [0.06, 0.08] 551 | kwargs["tool_y_range"] = [-0.23, -0.03] 552 | 553 | super().__init__(*args, **kwargs) 554 | 555 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/robosuite_task_zoo/models/__init__.py -------------------------------------------------------------------------------- /robosuite_task_zoo/models/hammer_place/__init__.py: -------------------------------------------------------------------------------- 1 | from .cabinet_object import CabinetObject 2 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/hammer_place/cabinet.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/hammer_place/cabinet.xml~: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/hammer_place/cabinet_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | import pathlib 9 | absolute_path = pathlib.Path(__file__).parent.absolute() 10 | 11 | 12 | class CabinetObject(MujocoXMLObject): 13 | def __init__( 14 | self, 15 | name, 16 | joints=None): 17 | 18 | super().__init__(str(absolute_path) + "/" + "cabinet.xml", 19 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 20 | 21 | @property 22 | def bottom_offset(self): 23 | return np.array([0, 0, -2 * self.height]) 24 | 25 | @property 26 | def top_offset(self): 27 | return np.array([0, 0, 2 * self.height]) 28 | 29 | @property 30 | def horizontal_radius(self): 31 | return self.length * np.sqrt(2) 32 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/__init__.py: -------------------------------------------------------------------------------- 1 | from .cabinet_object import CabinetObject 2 | from .button_object import ButtonObject 3 | from .serving_region import ServingRegionObject 4 | from .stove_object import StoveObject 5 | from .pot_object import PotObject 6 | 7 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/burnerplate.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/robosuite_task_zoo/models/kitchen/burnerplate.stl -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/button.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 14 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/button.xml~: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 17 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/button_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | import pathlib 8 | absolute_path = pathlib.Path(__file__).parent.absolute() 9 | 10 | class ButtonObject(MujocoXMLObject): 11 | def __init__(self, name, friction=None, damping=None): 12 | super().__init__(str(absolute_path) + "/" + "button.xml", 13 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 14 | 15 | # Set relevant body names 16 | self.hinge_joint = self.naming_prefix + "hinge" 17 | 18 | self.friction = friction 19 | self.damping = damping 20 | 21 | def _set_friction(self, friction): 22 | """ 23 | Helper function to override the drawer friction directly in the XML 24 | 25 | Args: 26 | friction (3-tuple of float): friction parameters to override the ones specified in the XML 27 | """ 28 | hinge = find_elements(root=self.worldbody, tags="joint", attribs={"name": self.hinge_joint}, return_first=True) 29 | hinge.set("frictionloss", array_to_string(np.array([friction]))) 30 | 31 | def _set_damping(self, damping): 32 | """ 33 | Helper function to override the drawer friction directly in the XML 34 | 35 | Args: 36 | damping (float): damping parameter to override the ones specified in the XML 37 | """ 38 | hinge = find_elements(root=self.worldbody, tags="joint", attribs={"name": self.hinge_joint}, return_first=True) 39 | hinge.set("damping", array_to_string(np.array([damping]))) 40 | 41 | @property 42 | def important_sites(self): 43 | """ 44 | Returns: 45 | dict: In addition to any default sites for this object, also provides the following entries 46 | 47 | :`'handle'`: Name of drawer handle location site 48 | """ 49 | # Get dict from super call and add to it 50 | dic = super().important_sites 51 | dic.update({ 52 | "handle": self.naming_prefix + "handle" 53 | }) 54 | return dic 55 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/cabinet.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/cabinet_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | import pathlib 9 | absolute_path = pathlib.Path(__file__).parent.absolute() 10 | 11 | 12 | class CabinetObject(MujocoXMLObject): 13 | def __init__( 14 | self, 15 | name, 16 | joints=None): 17 | 18 | super().__init__(str(absolute_path) + "/" + "cabinet.xml", 19 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 20 | 21 | @property 22 | def bottom_offset(self): 23 | return np.array([0, 0, -2 * self.height]) 24 | 25 | @property 26 | def top_offset(self): 27 | return np.array([0, 0, 2 * self.height]) 28 | 29 | @property 30 | def horizontal_radius(self): 31 | return self.length * np.sqrt(2) 32 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/pot_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | 9 | class PotObject(CompositeObject): 10 | def __init__( 11 | self, 12 | name, 13 | tunnel_size=(0.1, 0.015, 0.06), 14 | density=1000, 15 | use_texture=True): 16 | 17 | self._name = name 18 | self.length = tunnel_size[1] 19 | self.height = tunnel_size[2] 20 | 21 | self.use_texture = use_texture 22 | 23 | base_args = { 24 | "total_size": self.length / 2.0, 25 | "name": self.name, 26 | "locations_relative_to_center": True, 27 | "obj_types": "all", 28 | } 29 | site_attrs = [] 30 | obj_args = {} 31 | 32 | # Long sides 33 | flat_size = [tunnel_size[0] - 0.04, tunnel_size[1], 0.005] 34 | side_size = [0.02, tunnel_size[1], tunnel_size[2] + 0.005] 35 | 36 | r = np.pi / 2 37 | 38 | geom_mat = "steel_scratched_mat" 39 | pot_width = 0.0 40 | pot_length = 0.06 41 | 42 | edge_width = 0.007 43 | geom_frictions = (0.005, 0.005, 0.0001) 44 | 45 | solref = (0.02, 1.) 46 | 47 | add_to_dict( 48 | dic=obj_args, 49 | geom_types="box", 50 | geom_locations=(0., 0., 0.0025), 51 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 52 | geom_sizes=np.array([pot_length - 0.01 - 0.007, pot_length - 0.007, 0.005]), 53 | geom_names=f"body_0", 54 | geom_rgbas=None, 55 | geom_materials=geom_mat, 56 | geom_frictions=(0.0005, 0.005, 0.0001), 57 | # geom_frictions=(0.0, 0.0, 0.0), 58 | solref=solref, 59 | ) 60 | 61 | pot_height = 0.035 62 | add_to_dict( 63 | dic=obj_args, 64 | geom_types="box", 65 | geom_locations=(pot_width, -pot_length, pot_height - 0.0025), 66 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 67 | geom_sizes=np.array([pot_length + 0.007 - 0.01, edge_width, pot_height]), 68 | geom_names=f"body_1", 69 | geom_rgbas=None, 70 | geom_materials=geom_mat, 71 | geom_frictions=geom_frictions, 72 | solref=solref, 73 | density=density) 74 | 75 | add_to_dict( 76 | dic=obj_args, 77 | geom_types="box", 78 | geom_locations=(pot_width, pot_length, pot_height - 0.0025), 79 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 80 | geom_sizes=np.array([pot_length + 0.007 - 0.01, edge_width, pot_height]), 81 | geom_names=f"body_2", 82 | geom_rgbas=None, 83 | geom_materials=geom_mat, 84 | geom_frictions=geom_frictions, 85 | solref=solref, 86 | density=density) 87 | 88 | add_to_dict( 89 | dic=obj_args, 90 | geom_types="box", 91 | geom_locations=(pot_length - 0.01, pot_width, pot_height - 0.0025), 92 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 93 | geom_sizes=np.array([0.007, pot_length - 0.007, pot_height]), 94 | geom_names=f"body_3", 95 | geom_rgbas=None, 96 | geom_materials=geom_mat, 97 | geom_frictions=geom_frictions, 98 | solref=solref, 99 | density=density) 100 | 101 | add_to_dict( 102 | dic=obj_args, 103 | geom_types="box", 104 | geom_locations=(-pot_length + 0.01, pot_width, pot_height - 0.0025), 105 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 106 | geom_sizes=np.array([0.007, pot_length - 0.007, pot_height]), 107 | geom_names=f"body_4", 108 | geom_rgbas=None, 109 | geom_materials=geom_mat, 110 | geom_frictions=geom_frictions, 111 | solref=solref, 112 | density=density) 113 | 114 | handle_radius = 0.01 115 | handle_width = 0.055 116 | handle_length = edge_width * 2 117 | handle_friction = 1.0 118 | 119 | for (direction, y) in zip(['left', 'right'], [pot_length , -pot_length]): 120 | add_to_dict( 121 | dic=obj_args, 122 | geom_types="box", 123 | geom_locations=(0.0, y, 2 * pot_height + 0.007), 124 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 125 | geom_sizes=np.array([handle_width / 2, handle_length / 2, handle_radius]), 126 | geom_names=f"handle_{direction}_1", 127 | geom_rgbas=None, 128 | geom_materials=geom_mat, 129 | # geom_frictions=(handle_friction, 0.005, 0.0001), 130 | geom_frictions=geom_frictions, 131 | solref=solref, 132 | density=density) 133 | 134 | 135 | bottom_site = self.get_site_attrib_template() 136 | top_site = self.get_site_attrib_template() 137 | horizontal_site = self.get_site_attrib_template() 138 | 139 | bottom_site.update({ 140 | "name": "bottom", 141 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 142 | "size": "0.005", 143 | "rgba": "0 0 0 0" 144 | }) 145 | 146 | top_site.update({ 147 | "name": "top", 148 | "pos": array_to_string(np.array([0., 0., 2 * self.height])), 149 | "size": "0.005", 150 | "rgba": "0 0 0 0" 151 | }) 152 | 153 | bottom_site.update({ 154 | "name": "bottom", 155 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 156 | "size": "0.005", 157 | "rgba": "0 0 0 0" 158 | }) 159 | 160 | obj_args.update(base_args) 161 | 162 | obj_args["sites"] = site_attrs 163 | obj_args["joints"] = [{"type": "free", "damping":"0.0005"}] 164 | 165 | super().__init__(**obj_args) 166 | 167 | 168 | tex_attrib = { 169 | "type": "cube", 170 | } 171 | mat_attrib = { 172 | "texrepeat": "3 3", 173 | "specular": "0.4", 174 | "shininess": "0.1", 175 | } 176 | steel_scratched_material = CustomMaterial( 177 | texture="SteelScratched", 178 | tex_name="steel_scratched_tex", 179 | mat_name="steel_scratched_mat", 180 | tex_attrib=tex_attrib, 181 | mat_attrib=mat_attrib, 182 | ) 183 | 184 | self.append_material(steel_scratched_material) 185 | 186 | @property 187 | def bottom_offset(self): 188 | return np.array([0, 0, -2 * self.height]) 189 | 190 | @property 191 | def top_offset(self): 192 | return np.array([0, 0, 2 * self.height]) 193 | 194 | @property 195 | def horizontal_radius(self): 196 | return self.length * np.sqrt(2) 197 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/serving_region.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | import pathlib 9 | absolute_path = pathlib.Path(__file__).parent.absolute() 10 | 11 | 12 | class ServingRegionObject(MujocoXMLObject): 13 | def __init__( 14 | self, 15 | name, 16 | joints=None): 17 | 18 | super().__init__(str(absolute_path) + "/" + "serving_region.xml", 19 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 20 | 21 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/serving_region.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/stove.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 17 | 18 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/kitchen/stove_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | import pathlib 9 | absolute_path = pathlib.Path(__file__).parent.absolute() 10 | 11 | 12 | class StoveObject(MujocoXMLObject): 13 | def __init__( 14 | self, 15 | name, 16 | joints=None): 17 | 18 | super().__init__(str(absolute_path) + "/" + "stove.xml", 19 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 20 | 21 | @property 22 | def bottom_offset(self): 23 | return np.array([0, 0, -2 * self.height]) 24 | 25 | @property 26 | def top_offset(self): 27 | return np.array([0, 0, 2 * self.height]) 28 | 29 | @property 30 | def horizontal_radius(self): 31 | return self.length * np.sqrt(2) 32 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/__init__.py: -------------------------------------------------------------------------------- 1 | from .cabinet_object import CabinetObject 2 | from .button_object import ButtonObject 3 | from .serving_region import ServingRegionObject 4 | from .stove_object import StoveObject 5 | from .pot_object import PotObject 6 | 7 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/burnerplate.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ARISE-Initiative/robosuite-task-zoo/74eab7f88214c21ca1ae8617c2b2f8d19718a9ed/robosuite_task_zoo/models/multitask_kitchen/burnerplate.stl -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/button.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 14 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/button_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | import pathlib 8 | absolute_path = pathlib.Path(__file__).parent.absolute() 9 | 10 | class ButtonObject(MujocoXMLObject): 11 | def __init__(self, name, friction=None, damping=None): 12 | # if lock: 13 | # xml_path = "objects/drawer_lock.xml" 14 | super().__init__(str(absolute_path) + "/" + "button.xml", 15 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 16 | 17 | # Set relevant body names 18 | self.hinge_joint = self.naming_prefix + "hinge" 19 | 20 | self.friction = friction 21 | self.damping = damping 22 | # if self.friction is not None: 23 | # self._set_friction(self.friction) 24 | # if self.damping is not None: 25 | # self._set_damping(self.damping) 26 | 27 | def _set_friction(self, friction): 28 | """ 29 | Helper function to override the drawer friction directly in the XML 30 | 31 | Args: 32 | friction (3-tuple of float): friction parameters to override the ones specified in the XML 33 | """ 34 | hinge = find_elements(root=self.worldbody, tags="joint", attribs={"name": self.hinge_joint}, return_first=True) 35 | hinge.set("frictionloss", array_to_string(np.array([friction]))) 36 | 37 | def _set_damping(self, damping): 38 | """ 39 | Helper function to override the drawer friction directly in the XML 40 | 41 | Args: 42 | damping (float): damping parameter to override the ones specified in the XML 43 | """ 44 | hinge = find_elements(root=self.worldbody, tags="joint", attribs={"name": self.hinge_joint}, return_first=True) 45 | hinge.set("damping", array_to_string(np.array([damping]))) 46 | 47 | @property 48 | def important_sites(self): 49 | """ 50 | Returns: 51 | dict: In addition to any default sites for this object, also provides the following entries 52 | 53 | :`'handle'`: Name of drawer handle location site 54 | """ 55 | # Get dict from super call and add to it 56 | dic = super().important_sites 57 | dic.update({ 58 | "handle": self.naming_prefix + "handle" 59 | }) 60 | return dic 61 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/cabinet.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/cabinet_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | import pathlib 9 | absolute_path = pathlib.Path(__file__).parent.absolute() 10 | 11 | 12 | class CabinetObject(MujocoXMLObject): 13 | def __init__( 14 | self, 15 | name, 16 | joints=None): 17 | 18 | super().__init__(str(absolute_path) + "/" + "cabinet.xml", 19 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 20 | 21 | @property 22 | def bottom_offset(self): 23 | return np.array([0, 0, -2 * self.height]) 24 | 25 | @property 26 | def top_offset(self): 27 | return np.array([0, 0, 2 * self.height]) 28 | 29 | @property 30 | def horizontal_radius(self): 31 | return self.length * np.sqrt(2) 32 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/pot_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | 9 | class PotObject(CompositeObject): 10 | def __init__( 11 | self, 12 | name, 13 | tunnel_size=(0.1, 0.015, 0.06), 14 | density=1000, 15 | use_texture=True): 16 | 17 | self._name = name 18 | self.length = tunnel_size[1] 19 | self.height = tunnel_size[2] 20 | 21 | self.use_texture = use_texture 22 | 23 | base_args = { 24 | "total_size": self.length / 2.0, 25 | "name": self.name, 26 | "locations_relative_to_center": True, 27 | "obj_types": "all", 28 | } 29 | site_attrs = [] 30 | obj_args = {} 31 | 32 | # Long sides 33 | flat_size = [tunnel_size[0] - 0.04, tunnel_size[1], 0.005] 34 | side_size = [0.02, tunnel_size[1], tunnel_size[2] + 0.005] 35 | 36 | r = np.pi / 2 37 | 38 | geom_mat = "steel_scratched_mat" 39 | pot_width = 0.0 40 | pot_length = 0.06 41 | 42 | edge_width = 0.007 43 | geom_frictions = (0.005, 0.005, 0.0001) 44 | 45 | solref = (0.02, 1.) 46 | 47 | add_to_dict( 48 | dic=obj_args, 49 | geom_types="box", 50 | geom_locations=(0., 0., 0.0025), 51 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 52 | geom_sizes=np.array([pot_length - 0.01 - 0.007, pot_length - 0.007, 0.005]), 53 | geom_names=f"body_0", 54 | geom_rgbas=None, 55 | geom_materials=geom_mat, 56 | geom_frictions=(0.0005, 0.005, 0.0001), 57 | # geom_frictions=(0.0, 0.0, 0.0), 58 | solref=solref, 59 | ) 60 | 61 | pot_height = 0.035 62 | add_to_dict( 63 | dic=obj_args, 64 | geom_types="box", 65 | geom_locations=(pot_width, -pot_length, pot_height - 0.0025), 66 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 67 | geom_sizes=np.array([pot_length + 0.007 - 0.01, edge_width, pot_height]), 68 | geom_names=f"body_1", 69 | geom_rgbas=None, 70 | geom_materials=geom_mat, 71 | geom_frictions=geom_frictions, 72 | solref=solref, 73 | density=density) 74 | 75 | add_to_dict( 76 | dic=obj_args, 77 | geom_types="box", 78 | geom_locations=(pot_width, pot_length, pot_height - 0.0025), 79 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 80 | geom_sizes=np.array([pot_length + 0.007 - 0.01, edge_width, pot_height]), 81 | geom_names=f"body_2", 82 | geom_rgbas=None, 83 | geom_materials=geom_mat, 84 | geom_frictions=geom_frictions, 85 | solref=solref, 86 | density=density) 87 | 88 | add_to_dict( 89 | dic=obj_args, 90 | geom_types="box", 91 | geom_locations=(pot_length - 0.01, pot_width, pot_height - 0.0025), 92 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 93 | geom_sizes=np.array([0.007, pot_length - 0.007, pot_height]), 94 | geom_names=f"body_3", 95 | geom_rgbas=None, 96 | geom_materials=geom_mat, 97 | geom_frictions=geom_frictions, 98 | solref=solref, 99 | density=density) 100 | 101 | add_to_dict( 102 | dic=obj_args, 103 | geom_types="box", 104 | geom_locations=(-pot_length + 0.01, pot_width, pot_height - 0.0025), 105 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 106 | geom_sizes=np.array([0.007, pot_length - 0.007, pot_height]), 107 | geom_names=f"body_4", 108 | geom_rgbas=None, 109 | geom_materials=geom_mat, 110 | geom_frictions=geom_frictions, 111 | solref=solref, 112 | density=density) 113 | 114 | handle_radius = 0.01 115 | handle_width = 0.055 116 | handle_length = edge_width * 2 117 | handle_friction = 1.0 118 | 119 | for (direction, y) in zip(['left', 'right'], [pot_length , -pot_length]): 120 | add_to_dict( 121 | dic=obj_args, 122 | geom_types="box", 123 | geom_locations=(0.0, y, 2 * pot_height + 0.007), 124 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 125 | geom_sizes=np.array([handle_width / 2, handle_length / 2, handle_radius]), 126 | geom_names=f"handle_{direction}_1", 127 | geom_rgbas=None, 128 | geom_materials=geom_mat, 129 | geom_frictions=geom_frictions, 130 | solref=solref, 131 | density=density) 132 | 133 | bottom_site = self.get_site_attrib_template() 134 | top_site = self.get_site_attrib_template() 135 | horizontal_site = self.get_site_attrib_template() 136 | 137 | bottom_site.update({ 138 | "name": "bottom", 139 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 140 | "size": "0.005", 141 | "rgba": "0 0 0 0" 142 | }) 143 | 144 | top_site.update({ 145 | "name": "top", 146 | "pos": array_to_string(np.array([0., 0., 2 * self.height])), 147 | "size": "0.005", 148 | "rgba": "0 0 0 0" 149 | }) 150 | 151 | bottom_site.update({ 152 | "name": "bottom", 153 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 154 | "size": "0.005", 155 | "rgba": "0 0 0 0" 156 | }) 157 | 158 | obj_args.update(base_args) 159 | 160 | obj_args["sites"] = site_attrs 161 | obj_args["joints"] = [{"type": "free", "damping":"0.0005"}] 162 | 163 | super().__init__(**obj_args) 164 | 165 | 166 | tex_attrib = { 167 | "type": "cube", 168 | } 169 | mat_attrib = { 170 | "texrepeat": "3 3", 171 | "specular": "0.4", 172 | "shininess": "0.1", 173 | } 174 | steel_scratched_material = CustomMaterial( 175 | texture="SteelScratched", 176 | tex_name="steel_scratched_tex", 177 | mat_name="steel_scratched_mat", 178 | tex_attrib=tex_attrib, 179 | mat_attrib=mat_attrib, 180 | ) 181 | 182 | self.append_material(steel_scratched_material) 183 | 184 | @property 185 | def bottom_offset(self): 186 | return np.array([0, 0, -2 * self.height]) 187 | 188 | @property 189 | def top_offset(self): 190 | return np.array([0, 0, 2 * self.height]) 191 | 192 | @property 193 | def horizontal_radius(self): 194 | return self.length * np.sqrt(2) 195 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/serving_region.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | import pathlib 9 | absolute_path = pathlib.Path(__file__).parent.absolute() 10 | 11 | 12 | class ServingRegionObject(MujocoXMLObject): 13 | def __init__( 14 | self, 15 | name, 16 | joints=None): 17 | 18 | super().__init__(str(absolute_path) + "/" + "serving_region.xml", 19 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 20 | 21 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/serving_region.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/stove.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 17 | 18 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/multitask_kitchen/stove_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | import pathlib 9 | absolute_path = pathlib.Path(__file__).parent.absolute() 10 | 11 | 12 | class StoveObject(MujocoXMLObject): 13 | def __init__( 14 | self, 15 | name, 16 | joints=None): 17 | 18 | super().__init__(str(absolute_path) + "/" + "stove.xml", 19 | name=name, joints=None, obj_type="all", duplicate_collision_geoms=True) 20 | 21 | @property 22 | def bottom_offset(self): 23 | return np.array([0, 0, -2 * self.height]) 24 | 25 | @property 26 | def top_offset(self): 27 | return np.array([0, 0, 2 * self.height]) 28 | 29 | @property 30 | def horizontal_radius(self): 31 | return self.length * np.sqrt(2) 32 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/tool_use/__init__.py: -------------------------------------------------------------------------------- 1 | from .lshape_tool import LShapeTool 2 | from .pot_object import PotObject 3 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/tool_use/lshape_tool.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | class LShapeTool(CompositeObject): 9 | def __init__( 10 | self, 11 | name, 12 | tunnel_size=(0.1, 0.015, 0.06), 13 | density=100, 14 | use_texture=True): 15 | 16 | self._name = name 17 | 18 | self.length = 0 19 | self.height = 0 20 | 21 | self.use_texture = use_texture 22 | 23 | base_args = { 24 | "total_size": self.length / 2.0, 25 | "name": self.name, 26 | "locations_relative_to_center": True, 27 | "obj_types": "all", 28 | } 29 | site_attrs = [] 30 | obj_args = {} 31 | 32 | # Long sides 33 | flat_size = [tunnel_size[0] - 0.04, tunnel_size[1], 0.005] 34 | side_size = [0.02, tunnel_size[1], tunnel_size[2] + 0.005] 35 | 36 | r = np.pi / 2 37 | 38 | geom_mat = "MatRedWood" 39 | pot_width = 0.0 40 | pot_length = 0.06 41 | 42 | edge_width = 0.007 43 | geom_frictions = (0.005, 0.005, 0.0001) 44 | 45 | solref = (0.02, 1.) 46 | 47 | add_to_dict( 48 | dic=obj_args, 49 | geom_types="box", 50 | geom_locations=(0., 0., 0.0), 51 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 52 | geom_sizes=np.array([0.11, 0.011, 0.02]), 53 | geom_names=f"body_0", 54 | geom_rgbas=None, 55 | geom_materials=geom_mat, 56 | geom_frictions=(0.0005, 0.005, 0.0001), 57 | # geom_frictions=(0.0, 0.0, 0.0), 58 | solref=solref, 59 | ) 60 | 61 | add_to_dict( 62 | dic=obj_args, 63 | geom_types="box", 64 | geom_locations=(0.11, 0.05, 0.0), 65 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 66 | geom_sizes=np.array([0.006, 0.06, 0.02]), 67 | geom_names=f"body_1", 68 | geom_rgbas=None, 69 | geom_materials=geom_mat, 70 | geom_frictions=geom_frictions, 71 | solref=solref, 72 | density=density * 0.1) 73 | 74 | bottom_site = self.get_site_attrib_template() 75 | top_site = self.get_site_attrib_template() 76 | horizontal_site = self.get_site_attrib_template() 77 | 78 | bottom_site.update({ 79 | "name": "bottom", 80 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 81 | "size": "0.005", 82 | "rgba": "0 0 0 0" 83 | }) 84 | 85 | top_site.update({ 86 | "name": "top", 87 | "pos": array_to_string(np.array([0., 0., 2 * self.height])), 88 | "size": "0.005", 89 | "rgba": "0 0 0 0" 90 | }) 91 | 92 | bottom_site.update({ 93 | "name": "bottom", 94 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 95 | "size": "0.005", 96 | "rgba": "0 0 0 0" 97 | }) 98 | 99 | obj_args.update(base_args) 100 | 101 | obj_args["sites"] = site_attrs 102 | obj_args["joints"] = [{"type": "free", "damping":"0.0005"}] 103 | 104 | super().__init__(**obj_args) 105 | 106 | 107 | tex_attrib = { 108 | "type": "cube", 109 | } 110 | mat_attrib = { 111 | "texrepeat": "3 3", 112 | "specular": "0.4", 113 | "shininess": "0.1", 114 | } 115 | steel_scratched_material = CustomMaterial( 116 | texture="WoodRed", 117 | tex_name="red_wood", 118 | mat_name="MatRedWood", 119 | tex_attrib=tex_attrib, 120 | mat_attrib=mat_attrib, 121 | ) 122 | 123 | self.append_material(steel_scratched_material) 124 | 125 | @property 126 | def bottom_offset(self): 127 | return np.array([0, 0, -2 * self.height]) 128 | 129 | @property 130 | def top_offset(self): 131 | return np.array([0, 0, 2 * self.height]) 132 | 133 | @property 134 | def horizontal_radius(self): 135 | return self.length * np.sqrt(2) 136 | -------------------------------------------------------------------------------- /robosuite_task_zoo/models/tool_use/pot_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from robosuite.models.objects import MujocoXMLObject, CompositeObject 3 | from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, find_elements, CustomMaterial, add_to_dict, RED, GREEN, BLUE 4 | from robosuite.models.objects import BoxObject 5 | import robosuite.utils.transform_utils as T 6 | 7 | 8 | 9 | class PotObject(CompositeObject): 10 | def __init__( 11 | self, 12 | name, 13 | tunnel_size=(0.1, 0.015, 0.06), 14 | density=1000, 15 | use_texture=True): 16 | 17 | self._name = name 18 | self.length = tunnel_size[1] 19 | self.height = tunnel_size[2] 20 | 21 | self.use_texture = use_texture 22 | 23 | base_args = { 24 | "total_size": self.length / 2.0, 25 | "name": self.name, 26 | "locations_relative_to_center": True, 27 | "obj_types": "all", 28 | } 29 | site_attrs = [] 30 | obj_args = {} 31 | 32 | # Long sides 33 | flat_size = [tunnel_size[0] - 0.04, tunnel_size[1], 0.005] 34 | side_size = [0.02, tunnel_size[1], tunnel_size[2] + 0.005] 35 | 36 | r = np.pi / 2 37 | 38 | geom_mat = "steel_scratched_mat" 39 | pot_width = 0.0 40 | pot_length = 0.06 41 | 42 | edge_width = 0.007 43 | geom_frictions = (0.005, 0.005, 0.0001) 44 | 45 | solref = (0.02, 1.) 46 | 47 | add_to_dict( 48 | dic=obj_args, 49 | geom_types="box", 50 | geom_locations=(0., 0., 0.0025), 51 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 52 | geom_sizes=np.array([pot_length - 0.01 - 0.007, pot_length - 0.007, 0.005]), 53 | geom_names=f"body_0", 54 | geom_rgbas=None, 55 | geom_materials=geom_mat, 56 | geom_frictions=(0.0005, 0.005, 0.0001), 57 | # geom_frictions=(0.0, 0.0, 0.0), 58 | solref=solref, 59 | ) 60 | 61 | pot_height = 0.035 62 | add_to_dict( 63 | dic=obj_args, 64 | geom_types="box", 65 | geom_locations=(pot_width, -pot_length, pot_height - 0.0025), 66 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 67 | geom_sizes=np.array([pot_length + 0.007 - 0.01, edge_width, pot_height]), 68 | geom_names=f"body_1", 69 | geom_rgbas=None, 70 | geom_materials=geom_mat, 71 | geom_frictions=geom_frictions, 72 | solref=solref, 73 | density=density) 74 | 75 | add_to_dict( 76 | dic=obj_args, 77 | geom_types="box", 78 | geom_locations=(pot_width, pot_length, pot_height - 0.0025), 79 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 80 | geom_sizes=np.array([pot_length + 0.007 - 0.01, edge_width, pot_height]), 81 | geom_names=f"body_2", 82 | geom_rgbas=None, 83 | geom_materials=geom_mat, 84 | geom_frictions=geom_frictions, 85 | solref=solref, 86 | density=density) 87 | 88 | add_to_dict( 89 | dic=obj_args, 90 | geom_types="box", 91 | geom_locations=(pot_length - 0.01, pot_width, pot_height - 0.0025), 92 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 93 | geom_sizes=np.array([0.007, pot_length - 0.007, pot_height]), 94 | geom_names=f"body_3", 95 | geom_rgbas=None, 96 | geom_materials=geom_mat, 97 | geom_frictions=geom_frictions, 98 | solref=solref, 99 | density=density) 100 | 101 | add_to_dict( 102 | dic=obj_args, 103 | geom_types="box", 104 | geom_locations=(-pot_length + 0.01, pot_width, pot_height - 0.0025), 105 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 106 | geom_sizes=np.array([0.007, pot_length - 0.007, pot_height]), 107 | geom_names=f"body_4", 108 | geom_rgbas=None, 109 | geom_materials=geom_mat, 110 | geom_frictions=geom_frictions, 111 | solref=solref, 112 | density=density) 113 | 114 | handle_radius = 0.01 115 | handle_width = 0.055 116 | handle_length = edge_width * 2 117 | handle_friction = 1.0 118 | 119 | for (direction, y) in zip(['left', 'right'], [pot_length , -pot_length]): 120 | add_to_dict( 121 | dic=obj_args, 122 | geom_types="box", 123 | geom_locations=(0.0, y, 2 * pot_height + 0.007), 124 | geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0, 0])), to="wxyz"), 125 | geom_sizes=np.array([handle_width / 2, handle_length / 2, handle_radius]), 126 | geom_names=f"handle_{direction}_1", 127 | geom_rgbas=None, 128 | geom_materials=geom_mat, 129 | # geom_frictions=(handle_friction, 0.005, 0.0001), 130 | geom_frictions=geom_frictions, 131 | solref=solref, 132 | density=density) 133 | 134 | bottom_site = self.get_site_attrib_template() 135 | top_site = self.get_site_attrib_template() 136 | horizontal_site = self.get_site_attrib_template() 137 | 138 | bottom_site.update({ 139 | "name": "bottom", 140 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 141 | "size": "0.005", 142 | "rgba": "0 0 0 0" 143 | }) 144 | 145 | top_site.update({ 146 | "name": "top", 147 | "pos": array_to_string(np.array([0., 0., 2 * self.height])), 148 | "size": "0.005", 149 | "rgba": "0 0 0 0" 150 | }) 151 | 152 | bottom_site.update({ 153 | "name": "bottom", 154 | "pos": array_to_string(np.array([0., 0., -2 * self.height])), 155 | "size": "0.005", 156 | "rgba": "0 0 0 0" 157 | }) 158 | 159 | obj_args.update(base_args) 160 | 161 | obj_args["sites"] = site_attrs 162 | obj_args["joints"] = [{"type": "free", "damping":"0.0005"}] 163 | 164 | super().__init__(**obj_args) 165 | 166 | 167 | tex_attrib = { 168 | "type": "cube", 169 | } 170 | mat_attrib = { 171 | "texrepeat": "3 3", 172 | "specular": "0.4", 173 | "shininess": "0.1", 174 | } 175 | steel_scratched_material = CustomMaterial( 176 | texture="SteelScratched", 177 | tex_name="steel_scratched_tex", 178 | mat_name="steel_scratched_mat", 179 | tex_attrib=tex_attrib, 180 | mat_attrib=mat_attrib, 181 | ) 182 | 183 | self.append_material(steel_scratched_material) 184 | 185 | @property 186 | def bottom_offset(self): 187 | return np.array([0, 0, -2 * self.height]) 188 | 189 | @property 190 | def top_offset(self): 191 | return np.array([0, 0, 2 * self.height]) 192 | 193 | @property 194 | def horizontal_radius(self): 195 | return self.length * np.sqrt(2) 196 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | # read the contents of your README file 4 | from os import path 5 | this_directory = path.abspath(path.dirname(__file__)) 6 | with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: 7 | lines = f.readlines() 8 | long_description = ''.join(lines) 9 | 10 | print([ 11 | package for package in find_packages() if package.startswith("robosuite") 12 | ]) 13 | 14 | setup( 15 | name="robosuite_task_zoo", 16 | packages=[ 17 | package for package in find_packages() if package.startswith("robosuite") 18 | ], 19 | install_requires=[ 20 | "numpy>=1.13.3", 21 | "numba>=0.49.1", 22 | "scipy>=1.2.3", 23 | "mujoco-py>=2.0.2.9", 24 | ], 25 | eager_resources=['*'], 26 | include_package_data=True, 27 | python_requires='>=3', 28 | description="robosuite task zoo", 29 | author="Yuke Zhu", 30 | url="https://github.com/ARISE-Initiative/robosuite-task-zoo", 31 | author_email="yukez@cs.utexas.edu", 32 | version="0.1", 33 | long_description=long_description, 34 | long_description_content_type='text/markdown' 35 | ) 36 | --------------------------------------------------------------------------------