├── .gitignore
├── LICENSE
├── README.md
├── package.xml
├── requirements.txt
├── scripts
├── convonet_setup.py
├── generate_data_s2m.sh
├── generate_data_seg.py
└── generate_data_seg_test.py
├── setup.py
└── src
└── s2u
├── perception.py
├── simulation.py
└── utils
├── __init__.py
├── axis2transform.py
├── btsim.py
├── implicit.py
├── io.py
├── libmesh
├── .gitignore
├── __init__.py
├── inside_mesh.py
└── triangle_hash.pyx
├── mesh2sdf.py
├── misc.py
├── parallel_exec.py
├── saver.py
├── transform.py
├── urdfEditor.py
└── visual.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | /data
132 | /notebooks
133 | .DS_Store
134 | .vscode
135 | outputs
136 | *.swp
137 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 UT Robot Perception and Learning Lab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # [Ditto](https://ut-austin-rpl.github.io/Ditto/) data generation
2 |
3 | This repo stores the codes for data generation of *Ditto: Building Digital Twins of Articulated Objects from Interaction*. Given URDF file of an articulated object, we spwan it in Pybullet simulation, and interact with it by directly manipulating the joint state. We collect multiview depth observations of the object before and after interaction, as well as the ground truth occupancy data and segmentation.
4 |
5 | ## Installation
6 |
7 | 1. `pip install -r requirements.txt`
8 | 2. `pip install -e .`
9 | 3. `python scripts/convonet_setup.py build_ext --inplace`
10 |
11 | ## URDF preparation
12 |
13 | Download Shape2Motion\_urdfs.zip from [here](https://utexas.box.com/s/dx8kanv7zs7rdmu7kehc804vsp2gw25z), unzip it and put it under `data/urdfs`. If you use this data, remember to cite [Shape2Motion](https://arxiv.org/abs/1903.03911) paper.
14 |
15 | ## Generate data
16 |
17 | You can either directly run `scripts/generate_data_s2m.sh` or run the commands in that bash file.
18 |
19 | ## Citation
20 |
21 | If you find this repo useful, please cite
22 |
23 | ```
24 | @inproceedings{jiang2022ditto,
25 | title={Ditto: Building Digital Twins of Articulated Objects from Interaction},
26 | author={Jiang, Zhenyu and Hsu, Cheng-Chun and Zhu, Yuke},
27 | booktitle={arXiv preprint arXiv:2202.08227},
28 | year={2022}
29 | }
30 | ```
31 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | s2u
4 | 0.1.0
5 | Ditto data generation
6 |
7 | Zhenyu Jiang
8 |
9 | BSD
10 | https://github.com/UT-Austin-RPL/Ditto_data_gen
11 |
12 | catkin
13 |
14 |
15 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | matplotlib
4 | open3d
5 | pybullet==2.7.9
6 | tqdm
7 | catkin_pkg
8 | trimesh
9 | urdfpy
10 | cython
11 | treelib
12 |
--------------------------------------------------------------------------------
/scripts/convonet_setup.py:
--------------------------------------------------------------------------------
1 | try:
2 | from setuptools import setup
3 | except ImportError:
4 | from distutils.core import setup
5 | from distutils.extension import Extension
6 | from Cython.Build import cythonize
7 | from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
8 | import numpy
9 |
10 |
11 | # Get the numpy include directory.
12 | numpy_include_dir = numpy.get_include()
13 |
14 | # Extensions
15 | # triangle hash (efficient mesh intersection)
16 | triangle_hash_module = Extension(
17 | 'src.s2u.utils.libmesh.triangle_hash',
18 | sources=[
19 | 'src/s2u/utils/libmesh/triangle_hash.pyx'
20 | ],
21 | libraries=['m'], # Unix-like specific
22 | include_dirs=[numpy_include_dir]
23 | )
24 | # Gather all extension modules
25 | ext_modules = [
26 | triangle_hash_module,
27 | ]
28 |
29 | setup(
30 | ext_modules=cythonize(ext_modules),
31 | cmdclass={
32 | 'build_ext': BuildExtension
33 | }
34 | )
35 |
--------------------------------------------------------------------------------
/scripts/generate_data_s2m.sh:
--------------------------------------------------------------------------------
1 | python scripts/generate_data_seg.py --object-set Shape2Motion/faucet/train data/Shape2Motion/faucet_train_1K --num-scenes 1000 --pos-rot 0 --global-scaling 0.6 --num-proc 50 --sample-method mix --dense-photo
2 | python scripts/generate_data_seg.py --object-set Shape2Motion/faucet/val data/Shape2Motion/faucet_val_50 --num-scenes 50 --pos-rot 0 --global-scaling 0.6 --num-proc 25 --sample-method uniform --dense-photo --canonical
3 | python scripts/generate_data_seg_test.py --object-set Shape2Motion/faucet/test data/Shape2Motion/faucet_test_standard --pos-rot 0 --global-scaling 0.6 --dense-photo --mp
4 |
5 | python scripts/generate_data_seg.py --object-set Shape2Motion/oven/train data/Shape2Motion/oven_train_1K --num-scenes 1000 --pos-rot 1 --global-scaling 0.5 --num-proc 50 --sample-method mix --dense-photo
6 | python scripts/generate_data_seg.py --object-set Shape2Motion/oven/test data/Shape2Motion/oven_test_50 --num-scenes 50 --pos-rot 1 --global-scaling 0.5 --num-proc 25 --sample-method uniform --dense-photo --canonical
7 | python scripts/scripts/generate_data_seg_test.py --object-set Shape2Motion/oven/test data/Shape2Motion/oven_test_standard --pos-rot 1 --global-scaling 0.5 --dense-photo --mp
8 |
9 | python scripts/generate_data_seg.py --object-set Shape2Motion/cabinet/train data/Shape2Motion/cabinet_train_1K --num-scenes 1000 --pos-rot 1 --global-scaling 0.8 --num-proc 50 --sample-method mix --dense-photo
10 | python scripts/generate_data_seg.py --object-set Shape2Motion/cabinet/test data/Shape2Motion/cabinet_test_50 --num-scenes 50 --pos-rot 1 --global-scaling 0.8 --num-proc 25 --sample-method uniform --dense-photo --canonical
11 | python scripts/scripts/generate_data_seg_test.py --object-set Shape2Motion/cabinet/test data/Shape2Motion/cabinet_test_standard --pos-rot 1 --global-scaling 0.8 --dense-photo --mp
12 |
13 | python scripts/generate_data_seg.py --object-set Shape2Motion/laptop/train data/Shape2Motion/laptop_train_1K --num-scenes 1000 --pos-rot 0 --global-scaling 0.8 --num-proc 50 --sample-method mix --dense-photo
14 | python scripts/generate_data_seg.py --object-set Shape2Motion/laptop/test data/Shape2Motion/laptop_test_50 --num-scenes 50 --pos-rot 0 --global-scaling 0.8 --num-proc 25 --sample-method uniform --dense-photo --canonical
15 | python scripts/scripts/generate_data_seg_test.py --object-set Shape2Motion/laptop/test data/Shape2Motion/laptop_test_standard --pos-rot 0 --global-scaling 0.8 --dense-photo --mp
16 |
--------------------------------------------------------------------------------
/scripts/generate_data_seg.py:
--------------------------------------------------------------------------------
1 | import trimesh
2 | import argparse
3 | from pathlib import Path
4 |
5 | import numpy as np
6 | from tqdm import tqdm
7 | import multiprocessing as mp
8 |
9 | from tqdm import tqdm
10 | import numpy as np
11 |
12 | from s2u.simulation import ArticulatedObjectManipulationSim
13 | from s2u.utils.axis2transform import axis2transformation
14 | from s2u.utils.saver import get_mesh_pose_dict_from_world
15 | from s2u.utils.visual import as_mesh
16 | from s2u.utils.implicit import sample_iou_points_occ
17 | from s2u.utils.io import write_data
18 |
19 |
20 | def binary_occ(occ_list, idx):
21 | occ_fore = occ_list.pop(idx)
22 | occ_back = np.zeros_like(occ_fore)
23 | for o in occ_list:
24 | occ_back += o
25 | return occ_fore, occ_back
26 |
27 |
28 | def sample_occ(sim, num_point, method, var=0.005):
29 | result_dict = get_mesh_pose_dict_from_world(sim.world, False)
30 | obj_name = str(sim.object_urdfs[sim.object_idx])
31 | obj_name = '/'.join(obj_name.split('/')[-4:-1])
32 | mesh_dict = {}
33 | whole_scene = trimesh.Scene()
34 | for k, v in result_dict.items():
35 | scene = trimesh.Scene()
36 | for mesh_path, scale, pose in v:
37 | mesh = trimesh.load(mesh_path)
38 | mesh.apply_scale(scale)
39 | mesh.apply_transform(pose)
40 | scene.add_geometry(mesh)
41 | whole_scene.add_geometry(mesh)
42 | mesh_dict[k] = as_mesh(scene)
43 | points_occ, occ_list = sample_iou_points_occ(mesh_dict.values(),
44 | whole_scene.bounds,
45 | num_point,
46 | method,
47 | var=var)
48 | return points_occ, occ_list
49 |
50 | def sample_occ_binary(sim, mobile_links, num_point, method, var=0.005):
51 | result_dict = get_mesh_pose_dict_from_world(sim.world, False)
52 | new_dict = {'0_0': [], '0_1': []}
53 | obj_name = str(sim.object_urdfs[sim.object_idx])
54 | obj_name = '/'.join(obj_name.split('/')[-4:-1])
55 | whole_scene = trimesh.Scene()
56 | static_scene = trimesh.Scene()
57 | mobile_scene = trimesh.Scene()
58 | for k, v in result_dict.items():
59 | body_uid, link_index = k.split('_')
60 | link_index = int(link_index)
61 | if link_index in mobile_links:
62 | new_dict['0_1'] += v
63 | else:
64 | new_dict['0_0'] += v
65 | for mesh_path, scale, pose in v:
66 | if mesh_path.startswith('#'): # primitive
67 | mesh = trimesh.creation.box(extents=scale, transform=pose)
68 | else:
69 | mesh = trimesh.load(mesh_path)
70 | mesh.apply_scale(scale)
71 | mesh.apply_transform(pose)
72 | if link_index in mobile_links:
73 | mobile_scene.add_geometry(mesh)
74 | else:
75 | static_scene.add_geometry(mesh)
76 | whole_scene.add_geometry(mesh)
77 | static_mesh = as_mesh(static_scene)
78 | mobile_mesh = as_mesh(mobile_scene)
79 | points_occ, occ_list = sample_iou_points_occ((static_mesh, mobile_mesh),
80 | whole_scene.bounds,
81 | num_point,
82 | method,
83 | var=var)
84 | return points_occ, occ_list, new_dict
85 |
86 |
87 | def main(args, rank):
88 |
89 | np.random.seed()
90 | seed = np.random.randint(0, 1000) + rank
91 | np.random.seed(seed)
92 | sim = ArticulatedObjectManipulationSim(args.object_set,
93 | size=0.3,
94 | gui=args.sim_gui,
95 | global_scaling=args.global_scaling,
96 | dense_photo=args.dense_photo)
97 | scenes_per_worker = args.num_scenes // args.num_proc
98 | pbar = tqdm(total=scenes_per_worker, disable=rank != 0)
99 |
100 | if rank == 0:
101 | print(f'Number of objects: {len(sim.object_urdfs)}')
102 |
103 | for _ in range(scenes_per_worker):
104 |
105 | sim.reset(canonical=args.canonical)
106 | object_path = str(sim.object_urdfs[sim.object_idx])
107 |
108 |
109 | result = collect_observations(
110 | sim, args)
111 |
112 | result['object_path'] = object_path
113 |
114 | write_data(args.root, result)
115 |
116 | pbar.update()
117 |
118 | pbar.close()
119 | print('Process %d finished!' % rank)
120 |
121 |
122 | def get_limit(v, args):
123 | joint_type = v[2]
124 | # specify revolute angle range for shape2motion
125 | if joint_type == 0 and not args.is_syn:
126 | if args.pos_rot:
127 | lower_limit = 0
128 | range_lim = np.pi / 2
129 | higher_limit = np.pi / 2
130 | else:
131 | lower_limit = - np.pi / 4
132 | range_lim = np.pi / 2
133 | higher_limit = np.pi / 4
134 | else:
135 | lower_limit = v[8]
136 | higher_limit = v[9]
137 | range_lim = higher_limit - lower_limit
138 | return lower_limit, higher_limit, range_lim
139 |
140 | def collect_observations(sim, args):
141 | if args.is_syn:
142 | joint_info = sim.get_joint_info_w_sub()
143 | else:
144 | joint_info = sim.get_joint_info()
145 | all_joints = list(joint_info.keys())
146 | joint_index = all_joints.pop(np.random.randint(len(all_joints)))
147 |
148 | if args.rand_state:
149 | for x in all_joints:
150 | v = joint_info[x]
151 | if args.is_syn:
152 | v = v[0]
153 | lower_limit, higher_limit, range_lim = get_limit(v, args)
154 | start_state = np.random.uniform(lower_limit, higher_limit)
155 | sim.set_joint_state(x, start_state)
156 |
157 | v = joint_info[joint_index]
158 | if args.is_syn:
159 | v = v[0]
160 | axis, moment = sim.get_joint_screw(joint_index)
161 | joint_type = v[2]
162 | # not set
163 | lower_limit, higher_limit, range_lim = get_limit(v, args)
164 |
165 | move_range = np.random.uniform(range_lim * args.range_scale, range_lim)
166 | start_state = np.random.uniform(lower_limit, higher_limit - move_range)
167 | end_state = start_state + move_range
168 | if np.random.uniform(0, 1) > 0.5:
169 | start_state, end_state = end_state, start_state
170 |
171 | sim.set_joint_state(joint_index, start_state)
172 |
173 | if args.is_syn:
174 | _, start_pc, start_seg_label, start_mesh_pose_dict = sim.acquire_segmented_pc(6, joint_info[joint_index][1])
175 | start_p_occ, start_occ_list, start_mesh_pose_dict = sample_occ_binary(sim, joint_info[joint_index][1], args.num_point_occ, args.sample_method, args.occ_var)
176 | else:
177 | _, start_pc, start_seg_label, start_mesh_pose_dict = sim.acquire_segmented_pc(6, [joint_index])
178 | start_p_occ, start_occ_list = sample_occ(sim, args.num_point_occ, args.sample_method, args.occ_var)
179 | # canonicalize start pc
180 | axis, moment = sim.get_joint_screw(joint_index)
181 | state_change = end_state - start_state
182 | if joint_type == 0:
183 | transformation = axis2transformation(axis, np.cross(axis, moment), state_change)
184 | else:
185 | transformation = np.eye(4)
186 | transformation[:3, 3] = axis * state_change
187 |
188 | mobile_start_pc = start_pc[start_seg_label].copy()
189 | rotated = transformation[:3, :3].dot(mobile_start_pc.T) + transformation[:3, [3]]
190 | rotated = rotated.T
191 | canonical_start_pc = start_pc.copy()
192 | canonical_start_pc[start_seg_label] = rotated
193 |
194 | sim.set_joint_state(joint_index, end_state)
195 |
196 | if args.is_syn:
197 | _, end_pc, end_seg_label, end_mesh_pose_dict = sim.acquire_segmented_pc(6, joint_info[joint_index][1])
198 | end_p_occ, end_occ_list, end_mesh_pose_dict = sample_occ_binary(sim, joint_info[joint_index][1], args.num_point_occ, args.sample_method, args.occ_var)
199 | else:
200 | _, end_pc, end_seg_label, end_mesh_pose_dict = sim.acquire_segmented_pc(6, [joint_index])
201 | end_p_occ, end_occ_list = sample_occ(sim, args.num_point_occ, args.sample_method, args.occ_var)
202 | # canonicalize end pc
203 | axis, moment = sim.get_joint_screw(joint_index)
204 | state_change = start_state - end_state
205 | if joint_type == 0:
206 | transformation = axis2transformation(axis, np.cross(axis, moment), state_change)
207 | else:
208 | transformation = np.eye(4)
209 | transformation[:3, 3] = axis * state_change
210 | mobile_end_pc = end_pc[end_seg_label].copy()
211 | rotated = transformation[:3, :3].dot(mobile_end_pc.T) + transformation[:3, [3]]
212 | rotated = rotated.T
213 | canonical_end_pc = end_pc.copy()
214 | canonical_end_pc[end_seg_label] = rotated
215 |
216 | result = {
217 | 'pc_start': start_pc,
218 | 'pc_start_end': canonical_start_pc,
219 | 'pc_seg_start': start_seg_label,
220 | 'pc_end': end_pc,
221 | 'pc_end_start': canonical_end_pc,
222 | 'pc_seg_end': end_seg_label,
223 | 'state_start': start_state,
224 | 'state_end': end_state,
225 | 'screw_axis': axis,
226 | 'screw_moment': moment,
227 | 'joint_type': joint_type,
228 | 'joint_index': 1 if args.is_syn else joint_index,
229 | 'start_p_occ': start_p_occ,
230 | 'start_occ_list': start_occ_list,
231 | 'end_p_occ': end_p_occ,
232 | 'end_occ_list': end_occ_list,
233 | 'start_mesh_pose_dict': start_mesh_pose_dict,
234 | 'end_mesh_pose_dict': end_mesh_pose_dict
235 | }
236 |
237 | return result
238 |
239 |
240 | if __name__ == "__main__":
241 | parser = argparse.ArgumentParser()
242 | parser.add_argument("root", type=Path)
243 | parser.add_argument("--object-set", type=str)
244 | parser.add_argument("--num-scenes", type=int, default=10000)
245 | parser.add_argument("--num-proc", type=int, default=1)
246 | parser.add_argument("--sim-gui", action="store_true")
247 | parser.add_argument("--range-scale", type=float, default=0.3)
248 | parser.add_argument("--num-point-occ", type=int, default=100000)
249 | parser.add_argument("--occ-var", type=float, default=0.005)
250 | parser.add_argument("--pos-rot", type=int, required=True)
251 | parser.add_argument("--canonical", action="store_true")
252 | parser.add_argument("--sample-method", type=str, default='mix')
253 | parser.add_argument("--rand-state", action="store_true", help='set static joints at random state')
254 | parser.add_argument("--global-scaling", type=float, default=0.5)
255 | parser.add_argument("--dense-photo", action="store_true")
256 |
257 |
258 | args = parser.parse_args()
259 | if 'syn' in args.object_set:
260 | args.is_syn = True
261 | else:
262 | args.is_syn = False
263 | (args.root / "scenes").mkdir(parents=True)
264 | if args.num_proc > 1:
265 | #print(args.num_proc)
266 | pool = mp.get_context("spawn").Pool(processes=args.num_proc)
267 | for i in range(args.num_proc):
268 | pool.apply_async(func=main, args=(args, i))
269 | pool.close()
270 | pool.join()
271 | else:
272 | main(args, 0)
273 |
--------------------------------------------------------------------------------
/scripts/generate_data_seg_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import trimesh
4 | import argparse
5 | from pathlib import Path
6 |
7 | import numpy as np
8 | from tqdm import tqdm
9 | import multiprocessing as mp
10 |
11 | from tqdm import tqdm
12 | import numpy as np
13 |
14 | from s2u.simulation import ArticulatedObjectManipulationSim
15 | from s2u.utils.axis2transform import axis2transformation
16 | from s2u.utils.saver import get_mesh_pose_dict_from_world
17 | from s2u.utils.visual import as_mesh
18 | from s2u.utils.implicit import sample_iou_points_occ
19 | from s2u.utils.io import write_data
20 |
21 |
22 |
23 | def binary_occ(occ_list, idx):
24 | occ_fore = occ_list.pop(idx)
25 | occ_back = np.zeros_like(occ_fore)
26 | for o in occ_list:
27 | occ_back += o
28 | return occ_fore, occ_back
29 |
30 |
31 | def sample_occ(sim, num_point, method, var=0.005):
32 | result_dict = get_mesh_pose_dict_from_world(sim.world, False)
33 | obj_name = str(sim.object_urdfs[sim.object_idx])
34 | obj_name = '/'.join(obj_name.split('/')[-4:-1])
35 | mesh_dict = {}
36 | whole_scene = trimesh.Scene()
37 | for k, v in result_dict.items():
38 | scene = trimesh.Scene()
39 | for mesh_path, scale, pose in v:
40 | mesh = trimesh.load(mesh_path)
41 | mesh.apply_scale(scale)
42 | mesh.apply_transform(pose)
43 | scene.add_geometry(mesh)
44 | whole_scene.add_geometry(mesh)
45 | mesh_dict[k] = as_mesh(scene)
46 | points_occ, occ_list = sample_iou_points_occ(mesh_dict.values(),
47 | whole_scene.bounds,
48 | num_point,
49 | method,
50 | var=var)
51 | return points_occ, occ_list
52 |
53 | def sample_occ_binary(sim, mobile_links, num_point, method, var=0.005):
54 | result_dict = get_mesh_pose_dict_from_world(sim.world, False)
55 | new_dict = {'0_0': [], '0_1': []}
56 | obj_name = str(sim.object_urdfs[sim.object_idx])
57 | obj_name = '/'.join(obj_name.split('/')[-4:-1])
58 | whole_scene = trimesh.Scene()
59 | static_scene = trimesh.Scene()
60 | mobile_scene = trimesh.Scene()
61 | for k, v in result_dict.items():
62 | body_uid, link_index = k.split('_')
63 | link_index = int(link_index)
64 | if link_index in mobile_links:
65 | new_dict['0_1'] += v
66 | else:
67 | new_dict['0_0'] += v
68 | for mesh_path, scale, pose in v:
69 | if mesh_path.startswith('#'): # primitive
70 | mesh = trimesh.creation.box(extents=scale, transform=pose)
71 | else:
72 | mesh = trimesh.load(mesh_path)
73 | mesh.apply_scale(scale)
74 | mesh.apply_transform(pose)
75 | if link_index in mobile_links:
76 | mobile_scene.add_geometry(mesh)
77 | else:
78 | static_scene.add_geometry(mesh)
79 | whole_scene.add_geometry(mesh)
80 | static_mesh = as_mesh(static_scene)
81 | mobile_mesh = as_mesh(mobile_scene)
82 | points_occ, occ_list = sample_iou_points_occ((static_mesh, mobile_mesh),
83 | whole_scene.bounds,
84 | num_point,
85 | method,
86 | var=var)
87 | return points_occ, occ_list, new_dict
88 |
89 |
90 | def main(args, obj_idx_list):
91 | sim = ArticulatedObjectManipulationSim(args.object_set,
92 | size=0.3,
93 | gui=args.sim_gui,
94 | global_scaling=args.global_scaling,
95 | dense_photo=args.dense_photo)
96 | for idx in obj_idx_list:
97 |
98 | sim.reset(idx, canonical=True)
99 | object_path = str(sim.object_urdfs[sim.object_idx])
100 |
101 |
102 | results = collect_observations(
103 | sim, args)
104 |
105 | for result in results:
106 | result['object_path'] = object_path
107 | write_data(args.root, result)
108 | print(f'Object {idx} finished! Write {len(results)} items!')
109 |
110 |
111 | def get_limit(v, args):
112 | joint_type = v[2]
113 | # specify revolute angle range for shape2motion
114 | if joint_type == 0 and not args.is_syn:
115 | if args.pos_rot:
116 | lower_limit = 0
117 | range_lim = np.pi / 2
118 | higher_limit = np.pi / 2
119 | else:
120 | lower_limit = - np.pi / 4
121 | range_lim = np.pi / 2
122 | higher_limit = np.pi / 4
123 | else:
124 | lower_limit = v[8]
125 | higher_limit = v[9]
126 | range_lim = higher_limit - lower_limit
127 | return lower_limit, higher_limit, range_lim
128 |
129 | def collect_observations(sim, args):
130 | if args.is_syn:
131 | joint_info = sim.get_joint_info_w_sub()
132 | else:
133 | joint_info = sim.get_joint_info()
134 | all_joints = list(joint_info.keys())
135 | results = []
136 | for joint_index in all_joints:
137 | for tmp_index in all_joints:
138 | if tmp_index != joint_index:
139 | if args.rand_state:
140 | v = joint_info[tmp_index]
141 | lower_limit, higher_limit, range_lim = get_limit(v[0], args)
142 | start_state = np.random.uniform(lower_limit, higher_limit)
143 | sim.set_joint_state(tmp_index, start_state)
144 | else:
145 | sim.set_joint_state(tmp_index, 0)
146 |
147 | v = joint_info[joint_index]
148 | if args.is_syn:
149 | v = v[0]
150 | axis, moment = sim.get_joint_screw(joint_index)
151 | joint_type = v[2]
152 | # not set
153 | lower_limit, higher_limit, range_lim = get_limit(v, args)
154 |
155 | start_state = higher_limit - range_lim / 4
156 | end_state = lower_limit + range_lim / 4
157 |
158 | if np.random.uniform(0, 1) > 0.5:
159 | start_state, end_state = end_state, start_state
160 |
161 | sim.set_joint_state(joint_index, start_state)
162 | if args.is_syn:
163 | _, start_pc, start_seg_label, start_mesh_pose_dict = sim.acquire_segmented_pc(6, joint_info[joint_index][1])
164 | start_p_occ, start_occ_list, start_mesh_pose_dict = sample_occ_binary(sim, joint_info[joint_index][1], args.num_point_occ, args.sample_method, args.occ_var)
165 | else:
166 | _, start_pc, start_seg_label, start_mesh_pose_dict = sim.acquire_segmented_pc(6, [joint_index])
167 | start_p_occ, start_occ_list = sample_occ(sim, args.num_point_occ, args.sample_method, args.occ_var)
168 | # canonicalize start pc
169 | axis, moment = sim.get_joint_screw(joint_index)
170 | state_change = end_state - start_state
171 | if joint_type == 0:
172 | transformation = axis2transformation(axis, np.cross(axis, moment), state_change)
173 | else:
174 | transformation = np.eye(4)
175 | transformation[:3, 3] = axis * state_change
176 |
177 | mobile_start_pc = start_pc[start_seg_label].copy()
178 | rotated = transformation[:3, :3].dot(mobile_start_pc.T) + transformation[:3, [3]]
179 | rotated = rotated.T
180 | canonical_start_pc = start_pc.copy()
181 | canonical_start_pc[start_seg_label] = rotated
182 |
183 | sim.set_joint_state(joint_index, end_state)
184 | if args.is_syn:
185 | _, end_pc, end_seg_label, end_mesh_pose_dict = sim.acquire_segmented_pc(6, joint_info[joint_index][1])
186 | end_p_occ, end_occ_list, end_mesh_pose_dict = sample_occ_binary(sim, joint_info[joint_index][1], args.num_point_occ, args.sample_method, args.occ_var)
187 | else:
188 | _, end_pc, end_seg_label, end_mesh_pose_dict = sim.acquire_segmented_pc(6, [joint_index])
189 | end_p_occ, end_occ_list = sample_occ(sim, args.num_point_occ, args.sample_method, args.occ_var)
190 | # canonicalize end pc
191 | axis, moment = sim.get_joint_screw(joint_index)
192 | state_change = start_state - end_state
193 | if joint_type == 0:
194 | transformation = axis2transformation(axis, np.cross(axis, moment), state_change)
195 | else:
196 | transformation = np.eye(4)
197 | transformation[:3, 3] = axis * state_change
198 | mobile_end_pc = end_pc[end_seg_label].copy()
199 | rotated = transformation[:3, :3].dot(mobile_end_pc.T) + transformation[:3, [3]]
200 | rotated = rotated.T
201 | canonical_end_pc = end_pc.copy()
202 | canonical_end_pc[end_seg_label] = rotated
203 |
204 | result = {
205 | 'pc_start': start_pc,
206 | 'pc_start_end': canonical_start_pc,
207 | 'pc_seg_start': start_seg_label,
208 | 'pc_end': end_pc,
209 | 'pc_end_start': canonical_end_pc,
210 | 'pc_seg_end': end_seg_label,
211 | 'state_start': start_state,
212 | 'state_end': end_state,
213 | 'screw_axis': axis,
214 | 'screw_moment': moment,
215 | 'joint_type': joint_type,
216 | 'joint_index': 1 if args.is_syn else joint_index,
217 | 'start_p_occ': start_p_occ,
218 | 'start_occ_list': start_occ_list,
219 | 'end_p_occ': end_p_occ,
220 | 'end_occ_list': end_occ_list,
221 | 'start_mesh_pose_dict': start_mesh_pose_dict,
222 | 'end_mesh_pose_dict': end_mesh_pose_dict
223 | }
224 | results.append(result)
225 |
226 | return results
227 |
228 |
229 | if __name__ == "__main__":
230 | parser = argparse.ArgumentParser()
231 | parser.add_argument("root", type=Path)
232 | parser.add_argument("--object-set", type=str)
233 | parser.add_argument("--sim-gui", action="store_true")
234 | parser.add_argument("--num-point-occ", type=int, default=100000)
235 | parser.add_argument("--occ-var", type=float, default=0.005)
236 | parser.add_argument("--sample-method", type=str, default='uniform')
237 | parser.add_argument("--rand-state", action="store_true", help='set static joints at random state')
238 | parser.add_argument("--global-scaling", type=float, default=0.5)
239 | parser.add_argument("--dense-photo", action="store_true")
240 | parser.add_argument("--mp", action="store_true", help='mulitprocess')
241 | parser.add_argument("--pos-rot", type=int, required=True)
242 |
243 |
244 | args = parser.parse_args()
245 | if 'syn' in args.object_set:
246 | args.is_syn = True
247 | else:
248 | args.is_syn = False
249 |
250 | num_proc = len(glob.glob(os.path.join('data/urdfs', args.object_set, '*', '*.urdf')))
251 | print(f'Number of objects: {num_proc}')
252 | (args.root / "scenes").mkdir(parents=True)
253 |
254 | if args.mp:
255 | pool = mp.get_context("spawn").Pool(processes=num_proc)
256 | for i in range(num_proc):
257 | pool.apply_async(func=main, args=(args, [i]))
258 | pool.close()
259 | pool.join()
260 | else:
261 | main(args, range(num_proc))
262 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD!
2 |
3 | from distutils.core import setup
4 |
5 | from catkin_pkg.python_setup import generate_distutils_setup
6 |
7 | # Fetch values from package.xml.
8 | setup_args = generate_distutils_setup(
9 | packages=["s2u", "s2u.utils",], package_dir={"": "src"},
10 | )
11 |
12 | setup(**setup_args)
13 |
--------------------------------------------------------------------------------
/src/s2u/perception.py:
--------------------------------------------------------------------------------
1 | from math import cos, sin
2 | import time
3 |
4 | import numpy as np
5 | import open3d as o3d
6 |
7 | from s2u.utils.transform import Transform
8 |
9 |
10 | class CameraIntrinsic(object):
11 | """Intrinsic parameters of a pinhole camera model.
12 |
13 | Attributes:
14 | width (int): The width in pixels of the camera.
15 | height(int): The height in pixels of the camera.
16 | K: The intrinsic camera matrix.
17 | """
18 |
19 | def __init__(self, width, height, fx, fy, cx, cy):
20 | self.width = width
21 | self.height = height
22 | self.K = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
23 |
24 | @property
25 | def fx(self):
26 | return self.K[0, 0]
27 |
28 | @property
29 | def fy(self):
30 | return self.K[1, 1]
31 |
32 | @property
33 | def cx(self):
34 | return self.K[0, 2]
35 |
36 | @property
37 | def cy(self):
38 | return self.K[1, 2]
39 |
40 | def to_dict(self):
41 | """Serialize intrinsic parameters to a dict object."""
42 | data = {
43 | "width": self.width,
44 | "height": self.height,
45 | "K": self.K.flatten().tolist(),
46 | }
47 | return data
48 |
49 | @classmethod
50 | def from_dict(cls, data):
51 | """Deserialize intrinisic parameters from a dict object."""
52 | intrinsic = cls(
53 | width=data["width"],
54 | height=data["height"],
55 | fx=data["K"][0],
56 | fy=data["K"][4],
57 | cx=data["K"][2],
58 | cy=data["K"][5],
59 | )
60 | return intrinsic
61 |
62 |
63 | class TSDFVolume(object):
64 | """Integration of multiple depth images using a TSDF."""
65 |
66 | def __init__(self, size, resolution, color_type=o3d.pipelines.integration.TSDFVolumeColorType.NoColor):
67 | self.size = size
68 | self.resolution = resolution
69 | self.voxel_size = self.size / self.resolution
70 | self.sdf_trunc = 4 * self.voxel_size
71 |
72 | self._volume = o3d.pipelines.integration.UniformTSDFVolume(
73 | length=self.size,
74 | resolution=self.resolution,
75 | sdf_trunc=self.sdf_trunc,
76 | color_type=color_type,
77 | )
78 |
79 | def integrate(self, depth_img, intrinsic, extrinsic):
80 | """
81 | Args:
82 | depth_img: The depth image.
83 | intrinsic: The intrinsic parameters of a pinhole camera model.
84 | extrinsics: The transform from the TSDF to camera coordinates, T_eye_task.
85 | """
86 | rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
87 | o3d.geometry.Image(np.empty_like(depth_img)),
88 | o3d.geometry.Image(depth_img),
89 | depth_scale=1.0,
90 | depth_trunc=2.0,
91 | convert_rgb_to_intensity=False,
92 | )
93 | intrinsic = o3d.camera.PinholeCameraIntrinsic(
94 | width=intrinsic.width,
95 | height=intrinsic.height,
96 | fx=intrinsic.fx,
97 | fy=intrinsic.fy,
98 | cx=intrinsic.cx,
99 | cy=intrinsic.cy,
100 | )
101 | extrinsic = extrinsic.as_matrix()
102 | self._volume.integrate(rgbd, intrinsic, extrinsic)
103 |
104 | def integrate_rgb(self, depth_img, rgb, intrinsic, extrinsic):
105 | """
106 | Args:
107 | depth_img: The depth image.
108 | intrinsic: The intrinsic parameters of a pinhole camera model.
109 | extrinsics: The transform from the TSDF to camera coordinates, T_eye_task.
110 | """
111 | rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
112 | o3d.geometry.Image(rgb),
113 | o3d.geometry.Image(depth_img),
114 | depth_scale=1.0,
115 | depth_trunc=2.0,
116 | convert_rgb_to_intensity=False,
117 | )
118 | intrinsic = o3d.camera.PinholeCameraIntrinsic(
119 | width=intrinsic.width,
120 | height=intrinsic.height,
121 | fx=intrinsic.fx,
122 | fy=intrinsic.fy,
123 | cx=intrinsic.cx,
124 | cy=intrinsic.cy,
125 | )
126 | extrinsic = extrinsic.as_matrix()
127 | self._volume.integrate(rgbd, intrinsic, extrinsic)
128 |
129 | def get_grid(self):
130 | # TODO(mbreyer) very slow (~35 ms / 50 ms of the whole pipeline)
131 | shape = (1, self.resolution, self.resolution, self.resolution)
132 | tsdf_grid = np.zeros(shape, dtype=np.float32)
133 | voxels = self._volume.extract_voxel_grid().get_voxels()
134 | for voxel in voxels:
135 | i, j, k = voxel.grid_index
136 | tsdf_grid[0, i, j, k] = voxel.color[0]
137 | return tsdf_grid
138 |
139 | def get_cloud(self):
140 | return self._volume.extract_point_cloud()
141 |
142 |
143 | def create_tsdf(size, resolution, depth_imgs, intrinsic, extrinsics):
144 | tsdf = TSDFVolume(size, resolution)
145 | for i in range(depth_imgs.shape[0]):
146 | extrinsic = Transform.from_list(extrinsics[i])
147 | tsdf.integrate(depth_imgs[i], intrinsic, extrinsic)
148 | return tsdf
149 |
150 |
151 | def camera_on_sphere(origin, radius, theta, phi):
152 | eye = np.r_[
153 | radius * sin(theta) * cos(phi),
154 | radius * sin(theta) * sin(phi),
155 | radius * cos(theta),
156 | ]
157 | target = np.array([0.0, 0.0, 0.0])
158 | up = np.array([0.0, 0.0, 1.0]) # this breaks when looking straight down
159 | return Transform.look_at(eye, target, up) * origin.inverse()
160 |
--------------------------------------------------------------------------------
/src/s2u/simulation.py:
--------------------------------------------------------------------------------
1 | import json
2 | import trimesh
3 | from collections import OrderedDict
4 | from pathlib import Path
5 | import numpy as np
6 | from treelib import Node, Tree
7 | import pybullet
8 |
9 | from s2u.utils import btsim, workspace_lines
10 | from s2u.perception import *
11 | from s2u.utils.transform import Transform, Rotation, get_transform
12 | from s2u.utils.saver import get_mesh_pose_dict_from_world
13 | from s2u.utils.visual import as_mesh
14 |
15 |
16 | class ArticulatedObjectManipulationSim(object):
17 | def __init__(self, object_set, size=0.3, global_scaling=0.5, gui=True, seed=None, add_noise=False, name_list=None, dense_photo=False, urdf_root=None):
18 |
19 | self.size = size
20 | if urdf_root is None:
21 | self.urdf_root = Path('data/urdfs')
22 | else:
23 | self.urdf_root = Path(urdf_root)
24 | self.object_set = object_set
25 | self.discover_objects(name_list)
26 |
27 | self.gui = gui
28 | self.global_scaling = global_scaling
29 | self.dense_photo = dense_photo
30 |
31 | self.rng = np.random.RandomState(seed) if seed else np.random
32 | self.world = btsim.BtWorld(self.gui)
33 | #intrinsic = CameraIntrinsic(640, 480, 540.0, 540.0, 320.0, 240.0)
34 | intrinsic = CameraIntrinsic(1280, 960, 1080.0, 1080.0, 640.0, 480.0)
35 | self.camera = self.world.add_camera(intrinsic, 0.1, 2.0)
36 |
37 | def save_state(self):
38 | self._snapshot_id = self.world.save_state()
39 |
40 | def restore_state(self):
41 | self.world.restore_state(self._snapshot_id)
42 |
43 | def discover_objects(self, name_list=None):
44 | root = self.urdf_root / self.object_set
45 | self.object_urdfs = []
46 | self.object_bbox = []
47 | if name_list is None:
48 | for f in root.iterdir():
49 | if not f.is_dir():
50 | continue
51 | for f_tmp in f.iterdir():
52 | if f_tmp.suffix == ".urdf":
53 | self.object_urdfs.append(f_tmp)
54 | if f_tmp.name == "bounding_box.json":
55 | with open(f_tmp, 'r') as f:
56 | d_tmp = json.load(f)
57 | self.object_bbox.append(np.array((d_tmp['min'], d_tmp['max'])))
58 | else:
59 | for name in name_list:
60 | f = root / name
61 | if not f.is_dir():
62 | continue
63 | for f_tmp in f.iterdir():
64 | if f_tmp.suffix == ".urdf":
65 | self.object_urdfs.append(f_tmp)
66 | if f_tmp.name == "bounding_box.json":
67 | with open(f_tmp, 'r') as f:
68 | d_tmp = json.load(f)
69 | self.object_bbox.append(np.array((d_tmp['min'], d_tmp['max'])))
70 |
71 |
72 | def reset(self, index=None, canonical=False):
73 | self.world.reset()
74 | self.world.set_gravity([0.0, 0.0, -9.81])
75 | self.draw_workspace()
76 |
77 | if self.gui:
78 | self.world.p.resetDebugVisualizerCamera(
79 | cameraDistance=self.size * 2,
80 | cameraYaw=-60,
81 | cameraPitch=-45,
82 | cameraTargetPosition=[0, 0, 0],
83 | )
84 |
85 | table_height = 0.05
86 | # self.place_table(table_height)
87 | self.generate_scene(table_height, index, canonical)
88 |
89 | def draw_workspace(self):
90 | points = workspace_lines(self.size)
91 | color = [0.5, 0.5, 0.5]
92 | for i in range(0, len(points), 2):
93 | self.world.p.addUserDebugLine(
94 | lineFromXYZ=points[i], lineToXYZ=points[i + 1], lineColorRGB=color
95 | )
96 |
97 | def place_table(self, height):
98 | urdf = self.urdf_root / "setup" / "plane.urdf"
99 | pose = Transform(Rotation.identity(), [self.size / 2, self.size / 2, height])
100 | self.world.load_urdf(urdf, pose, scale=self.size)
101 |
102 | def generate_scene(self, table_height, index=None, canonical=False):
103 | # drop objects
104 | if index is None:
105 | idx = self.rng.choice(np.arange(len(self.object_urdfs)), size=1)[0]
106 | else:
107 | idx = index
108 |
109 | self.object_idx = idx
110 | urdf = self.object_urdfs[idx]
111 | bbox = self.object_bbox[idx]
112 | obj_scale = bbox[1] - bbox[0]
113 | obj_scale = np.sqrt((obj_scale ** 2).sum())
114 | # import pdb; pdb.set_trace()
115 | if canonical:
116 | scale = self.global_scaling * self.size / obj_scale
117 | new_bbox = bbox * scale
118 | angle = 0
119 | rotation = Rotation.identity()
120 | xy = np.ones(2) * self.size / 2.0
121 | z = self.size / 2 - new_bbox[:, 2].mean()
122 | pose = Transform(rotation, np.r_[xy, z])
123 | else:
124 | scale = self.global_scaling * self.rng.uniform(0.8, 1.0) * self.size / obj_scale
125 | new_bbox = bbox * scale
126 |
127 | angle = self.rng.uniform(0.0, 2.0 * np.pi)
128 | rotation = Rotation.from_rotvec(angle * np.r_[0.0, 0.0, 1.0])
129 | #xy = self.rng.uniform(1.0 / 3.0 * self.size, 2.0 / 3.0 * self.size, 2)
130 | xy = np.ones(2) * self.size / 2.0
131 | z = self.size / 2 - new_bbox[:, 2].mean()
132 | pose = Transform(rotation, np.r_[xy, z])
133 |
134 | self.object = self.world.load_urdf(urdf, pose, scale=scale, useFixedBase=True)
135 | # self.wait_for_objects_to_rest(timeout=1.0)
136 |
137 | def get_joint_info(self):
138 | num_joints = self.world.p.getNumJoints(self.object.uid)
139 | joint_info = OrderedDict()
140 | for joint_idx in range(num_joints):
141 | v = self.world.p.getJointInfo(self.object.uid, joint_idx)
142 | if v[2] in [0, 1]: # not revoluted or prsimatic
143 | joint_info[joint_idx] = v
144 | return joint_info
145 |
146 | def get_joint_info_w_sub(self):
147 | tree = Tree()
148 | tree.create_node('base', -1)
149 | joint_info = OrderedDict()
150 | num_joints = self.world.p.getNumJoints(self.object.uid)
151 | mobile_roots = []
152 | for joint_idx in range(num_joints):
153 | v = self.world.p.getJointInfo(self.object.uid, joint_idx)
154 | parent = v[-1]
155 | joint_name = v[1].decode('UTF-8')
156 | tree.create_node(joint_name, v[0], parent=parent, data=v)
157 | if v[2] in [0, 1]:
158 | mobile_roots.append(v[0])
159 | for node in mobile_roots:
160 | sub_nodes = tuple(tree.expand_tree(node))
161 | joint_info[node] = (tree.get_node(node).data, sub_nodes)
162 | return joint_info
163 |
164 | def set_joint_state(self, joint_index, target_value):
165 | self.world.p.resetJointState(self.object.uid, joint_index, target_value)
166 |
167 | def get_joint_screw(self, joint_index):
168 | joint_info_dict = self.get_joint_info()
169 | v = joint_info_dict[joint_index]
170 | joint_type = v[2]
171 | joint_axis = v[-4]
172 | joint_pos_parent = v[-3]
173 | joint_ori_parent = v[-2]
174 | parent_index = v[-1]
175 | if parent_index == -1: # baselink
176 | parent_link_state = self.world.p.getBasePositionAndOrientation(self.object.uid)
177 | else:
178 | parent_link_state = self.world.p.getLinkState(self.object.uid, parent_index)
179 | parent_link_trans = get_transform(parent_link_state[0], parent_link_state[1])
180 | relative_trans = get_transform(joint_pos_parent, joint_ori_parent)
181 | axis_trans = parent_link_trans * relative_trans
182 | axis_global = axis_trans.rotation.as_matrix().dot(joint_axis)
183 | axis_global /= np.sqrt(np.sum(axis_global ** 2))
184 | point_on_axis = axis_trans.translation
185 | moment = np.cross(point_on_axis, axis_global)
186 | return axis_global, moment
187 |
188 | def get_joint_state(self, joint_index):
189 | return self.world.p.getJointState(self.object.uid, joint_index)
190 |
191 | def acquire_tsdf(self, n, N=None):
192 | """Render synthetic depth images from n viewpoints and integrate into a TSDF.
193 | If N is None, the n viewpoints are equally distributed on circular trajectory.
194 | If N is given, the first n viewpoints on a circular trajectory consisting of N points are rendered.
195 | """
196 |
197 | mesh_pose_dict = get_mesh_pose_dict_from_world(self.world, False)
198 | # tsdf = TSDFVolume(self.size, 40)
199 | high_res_tsdf = TSDFVolume(self.size, 120)
200 |
201 | origin = Transform(Rotation.identity(), np.r_[self.size / 2, self.size / 2, self.size / 2])
202 | r = 1.2 * self.size
203 | extrinsics = []
204 |
205 | if self.dense_photo:
206 |
207 | theta = np.pi / 8.0
208 | N = N if N else n
209 | phi_list = 2.0 * np.pi * np.arange(n) / N
210 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
211 |
212 | theta = np.pi / 4.0
213 | N = N if N else n
214 | phi_list = 2.0 * np.pi * np.arange(n) / N + 2.0 * np.pi / (N * 3)
215 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
216 |
217 | theta = np.pi / 2.0
218 | N = N if N else n
219 | phi_list = 2.0 * np.pi * np.arange(n) / N + 4.0 * np.pi / (N * 3)
220 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
221 | else:
222 | theta = np.pi / 4.0
223 | N = N if N else n
224 | phi_list = 2.0 * np.pi * np.arange(n) / N
225 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
226 |
227 | timing = 0.0
228 | depth_imgs = []
229 | for extrinsic in extrinsics:
230 | depth_img = self.camera.render(extrinsic)[1]
231 | high_res_tsdf.integrate(depth_img, self.camera.intrinsic, extrinsic)
232 | depth_imgs.append(depth_img)
233 | pc = high_res_tsdf.get_cloud()
234 | pc = np.asarray(pc.points)
235 |
236 | return depth_imgs, pc, mesh_pose_dict
237 |
238 |
239 | def acquire_segmented_pc(self, n, mobile_links, N=None):
240 | """Render synthetic depth images from n viewpoints and integrate into a TSDF.
241 | If N is None, the n viewpoints are equally distributed on circular trajectory.
242 | If N is given, the first n viewpoints on a circular trajectory consisting of N points are rendered.
243 | """
244 |
245 | mesh_pose_dict = get_mesh_pose_dict_from_world(self.world, False)
246 | # tsdf = TSDFVolume(self.size, 40)
247 | high_res_tsdf = TSDFVolume(self.size, 120, color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)
248 |
249 | origin = Transform(Rotation.identity(), np.r_[self.size / 2, self.size / 2, self.size / 2])
250 | r = 1.2 * self.size
251 | extrinsics = []
252 |
253 | if self.dense_photo:
254 |
255 | theta = np.pi / 8.0
256 | N = N if N else n
257 | phi_list = 2.0 * np.pi * np.arange(n) / N
258 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
259 |
260 | theta = np.pi / 4.0
261 | N = N if N else n
262 | phi_list = 2.0 * np.pi * np.arange(n) / N + 2.0 * np.pi / (N * 3)
263 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
264 |
265 | theta = np.pi / 2.0
266 | N = N if N else n
267 | phi_list = 2.0 * np.pi * np.arange(n) / N + 4.0 * np.pi / (N * 3)
268 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
269 | else:
270 | theta = np.pi / 4.0
271 | N = N if N else n
272 | phi_list = 2.0 * np.pi * np.arange(n) / N
273 | extrinsics += [camera_on_sphere(origin, r, theta, phi) for phi in phi_list]
274 |
275 | timing = 0.0
276 | depth_imgs = []
277 | for extrinsic in extrinsics:
278 | rgb_img, depth_img, (seg_uid, seg_link) = self.camera.render(extrinsic, flags=pybullet.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX)
279 | seg_uid += 1
280 | seg_link_mobile = np.zeros_like(seg_link)
281 | for l in mobile_links:
282 | seg_link_mobile += seg_link == l
283 | seg_rgb = np.logical_and(seg_uid.astype(bool), seg_link_mobile)
284 | seg_rgb = (seg_rgb.astype(np.float32) * 255).astype(np.uint8)
285 | seg_rgb = np.stack([seg_rgb] * 3, axis=-1)
286 | # import pdb; pdb.set_trace()
287 | high_res_tsdf.integrate_rgb(depth_img, seg_rgb, self.camera.intrinsic, extrinsic)
288 | depth_imgs.append(depth_img)
289 | tmp = high_res_tsdf._volume.extract_point_cloud()
290 | pc = np.asarray(tmp.points)
291 | colors = np.asarray(tmp.colors)
292 | colors = np.mean(colors, axis=1)
293 | seg_mask = colors > 0.5
294 | return depth_imgs, pc, seg_mask, mesh_pose_dict
295 |
296 | def acquire_link_pc(self, link, num_points=2048):
297 | result_dict = get_mesh_pose_dict_from_world(self.world, False)
298 | scene = trimesh.Scene()
299 | for mesh_path, scale, pose in result_dict[f'0_{link}']:
300 | if mesh_path.startswith('#'): # primitive
301 | mesh = trimesh.creation.box(extents=scale, transform=pose)
302 | else:
303 | mesh = trimesh.load(mesh_path)
304 | mesh.apply_scale(scale)
305 | mesh.apply_transform(pose)
306 | scene.add_geometry(mesh)
307 | scene_mesh = as_mesh(scene)
308 | pc, _ = trimesh.sample.sample_surface(scene_mesh, num_points)
309 | return pc
310 |
311 | def acquire_full_pc(self, num_points=2048):
312 | result_dict = get_mesh_pose_dict_from_world(self.world, False)
313 | scene = trimesh.Scene()
314 | for k, v in result_dict.items():
315 | for mesh_path, scale, pose in v:
316 | if mesh_path.startswith('#'): # primitive
317 | mesh = trimesh.creation.box(extents=scale, transform=pose)
318 | else:
319 | mesh = trimesh.load(mesh_path)
320 | mesh.apply_scale(scale)
321 | mesh.apply_transform(pose)
322 | scene.add_geometry(mesh)
323 | scene_mesh = as_mesh(scene)
324 | pc, _ = trimesh.sample.sample_surface(scene_mesh, num_points)
325 | return pc, result_dict
326 |
327 | def wait_for_objects_to_rest(self, timeout=2.0, tol=0.01):
328 | timeout = self.world.sim_time + timeout
329 | objects_resting = False
330 | while not objects_resting and self.world.sim_time < timeout:
331 | # simulate a quarter of a second
332 | for _ in range(60):
333 | self.world.step()
334 | # check whether all objects are resting
335 | objects_resting = True
336 | for _, body in self.world.bodies.items():
337 | if np.linalg.norm(body.get_velocity()) > tol:
338 | objects_resting = False
339 | break
--------------------------------------------------------------------------------
/src/s2u/utils/__init__.py:
--------------------------------------------------------------------------------
1 | def workspace_lines(size):
2 | return [
3 | [0.0, 0.0, 0.0],
4 | [size, 0.0, 0.0],
5 | [size, 0.0, 0.0],
6 | [size, size, 0.0],
7 | [size, size, 0.0],
8 | [0.0, size, 0.0],
9 | [0.0, size, 0.0],
10 | [0.0, 0.0, 0.0],
11 | [0.0, 0.0, size],
12 | [size, 0.0, size],
13 | [size, 0.0, size],
14 | [size, size, size],
15 | [size, size, size],
16 | [0.0, size, size],
17 | [0.0, size, size],
18 | [0.0, 0.0, size],
19 | [0.0, 0.0, 0.0],
20 | [0.0, 0.0, size],
21 | [size, 0.0, 0.0],
22 | [size, 0.0, size],
23 | [size, size, 0.0],
24 | [size, size, size],
25 | [0.0, size, 0.0],
26 | [0.0, size, size],
27 | ]
28 |
29 |
--------------------------------------------------------------------------------
/src/s2u/utils/axis2transform.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def skew(vector: np.ndarray) -> np.ndarray:
4 | # vector: 3
5 | result = np.zeros((3, 3))
6 | result[0, 1] = -vector[2]
7 | result[0, 2] = vector[1]
8 | result[1, 0] = vector[2]
9 | result[1, 2] = -vector[0]
10 | result[2, 0] = -vector[1]
11 | result[2, 1] = vector[0]
12 | return result
13 |
14 | def rotation_matrix_from_axis(axis: np.ndarray, theta: np.ndarray) -> np.ndarray:
15 | R = np.eye(3) * np.cos(theta)
16 | R += skew(axis) * np.sin(theta)
17 | R += (1 - np.cos(theta)) * np.outer(axis, axis)
18 | return R
19 |
20 |
21 | def axis2transformation(axis, center, theta):
22 | rot = rotation_matrix_from_axis(axis, theta)
23 | translation = - rot.dot(center) + center
24 | trans_mat = np.eye(4)
25 | trans_mat[:3, :3] = rot
26 | trans_mat[:3, 3] = translation
27 | return trans_mat
--------------------------------------------------------------------------------
/src/s2u/utils/btsim.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import pickle
4 | import numpy as np
5 | import pybullet
6 | from pybullet_utils import bullet_client
7 |
8 | from s2u.utils.transform import Rotation, Transform
9 | from s2u.utils.saver import get_mesh_pose_dict_from_world
10 |
11 | #assert pybullet.isNumpyEnabled(), "Pybullet needs to be built with NumPy"
12 |
13 |
14 | class BtWorld(object):
15 | """Interface to a PyBullet physics server.
16 |
17 | Attributes:
18 | dt: Time step of the physics simulation.
19 | rtf: Real time factor. If negative, the simulation is run as fast as possible.
20 | sim_time: Virtual time elpased since the last simulation reset.
21 | """
22 |
23 | def __init__(self, gui=True, save_dir=None, save_freq=8):
24 | connection_mode = pybullet.GUI if gui else pybullet.DIRECT
25 | self.p = bullet_client.BulletClient(connection_mode)
26 |
27 | self.gui = gui
28 | self.dt = 1.0 / 240.0
29 | self.solver_iterations = 150
30 | self.save_dir = save_dir
31 | self.save_freq = save_freq
32 | self.sim_step = 0
33 |
34 | self.reset()
35 |
36 | def set_gravity(self, gravity):
37 | self.p.setGravity(*gravity)
38 |
39 | def load_urdf(self, urdf_path, pose, scale=1.0, useFixedBase=False):
40 | body = Body.from_urdf(self.p, urdf_path, pose, scale, useFixedBase)
41 | self.bodies[body.uid] = body
42 | return body
43 |
44 | def load_mjcf(self, mjcf_path):
45 | body_list = Body.from_mjcf(self.p, mjcf_path)
46 | for body in body_list:
47 | self.bodies[body.uid] = body
48 | return body_list
49 |
50 | def remove_body(self, body):
51 | self.p.removeBody(body.uid)
52 | del self.bodies[body.uid]
53 |
54 | def add_constraint(self, *argv, **kwargs):
55 | """See `Constraint` below."""
56 | constraint = Constraint(self.p, *argv, **kwargs)
57 | return constraint
58 |
59 | def add_camera(self, intrinsic, near, far):
60 | camera = Camera(self.p, intrinsic, near, far)
61 | return camera
62 |
63 | def get_contacts(self, bodyA):
64 | points = self.p.getContactPoints(bodyA.uid)
65 | contacts = []
66 | for point in points:
67 | contact = Contact(
68 | bodyA=self.bodies[point[1]],
69 | bodyB=self.bodies[point[2]],
70 | point=point[5],
71 | normal=point[7],
72 | depth=point[8],
73 | force=point[9],
74 | )
75 | contacts.append(contact)
76 | return contacts
77 |
78 | def reset(self):
79 | self.p.resetSimulation()
80 | self.p.setPhysicsEngineParameter(
81 | fixedTimeStep=self.dt, numSolverIterations=self.solver_iterations
82 | )
83 | self.bodies = {}
84 | self.sim_time = 0.0
85 |
86 | def step(self):
87 | self.p.stepSimulation()
88 |
89 |
90 | if self.gui:
91 | time.sleep(self.dt)
92 | if self.save_dir:
93 | if self.sim_step % self.save_freq == 0:
94 | mesh_pose_dict = get_mesh_pose_dict_from_world(self, self.p._client)
95 | with open(os.path.join(self.save_dir, f'{self.sim_step:08d}.pkl'), 'wb') as f:
96 | pickle.dump(mesh_pose_dict, f)
97 |
98 | self.sim_time += self.dt
99 | self.sim_step += 1
100 |
101 | def save_state(self):
102 | return self.p.saveState()
103 |
104 | def restore_state(self, state_uid):
105 | self.p.restoreState(stateId=state_uid)
106 |
107 | def close(self):
108 | self.p.disconnect()
109 |
110 |
111 | class Body(object):
112 | """Interface to a multibody simulated in PyBullet.
113 |
114 | Attributes:
115 | uid: The unique id of the body within the physics server.
116 | name: The name of the body.
117 | joints: A dict mapping joint names to Joint objects.
118 | links: A dict mapping link names to Link objects.
119 | """
120 |
121 | def __init__(self, physics_client, body_uid, scale):
122 | self.p = physics_client
123 | self.uid = body_uid
124 | self.scale = scale
125 | self.name = self.p.getBodyInfo(self.uid)[1].decode("utf-8")
126 | self.joints, self.links = {}, {}
127 | for i in range(self.p.getNumJoints(self.uid)):
128 | joint_info = self.p.getJointInfo(self.uid, i)
129 | joint_name = joint_info[1].decode("utf8")
130 | self.joints[joint_name] = Joint(self.p, self.uid, i)
131 | link_name = joint_info[12].decode("utf8")
132 | self.links[link_name] = Link(self.p, self.uid, i)
133 |
134 | @classmethod
135 | def from_urdf(cls, physics_client, urdf_path, pose, scale, useFixedBase):
136 | body_uid = physics_client.loadURDF(
137 | str(urdf_path),
138 | pose.translation,
139 | pose.rotation.as_quat(),
140 | globalScaling=scale,
141 | useFixedBase=useFixedBase,
142 | )
143 | return cls(physics_client, body_uid, scale)
144 |
145 | @classmethod
146 | def from_mjcf(cls, physics_client, mjcf_path):
147 | body_uids = physics_client.loadMJCF(
148 | str(mjcf_path),
149 | )
150 | return [cls(physics_client, body_uid, 1.0) for body_uid in body_uids]
151 |
152 | def get_pose(self):
153 | pos, ori = self.p.getBasePositionAndOrientation(self.uid)
154 | return Transform(Rotation.from_quat(ori), np.asarray(pos))
155 |
156 | def set_pose(self, pose):
157 | self.p.resetBasePositionAndOrientation(
158 | self.uid, pose.translation, pose.rotation.as_quat()
159 | )
160 |
161 | def get_velocity(self):
162 | linear, angular = self.p.getBaseVelocity(self.uid)
163 | return linear, angular
164 |
165 |
166 | class Link(object):
167 | """Interface to a link simulated in Pybullet.
168 |
169 | Attributes:
170 | link_index: The index of the joint.
171 | """
172 |
173 | def __init__(self, physics_client, body_uid, link_index):
174 | self.p = physics_client
175 | self.body_uid = body_uid
176 | self.link_index = link_index
177 |
178 | def get_pose(self):
179 | link_state = self.p.getLinkState(self.body_uid, self.link_index)
180 | pos, ori = link_state[0], link_state[1]
181 | return Transform(Rotation.from_quat(ori), pos)
182 |
183 |
184 | class Joint(object):
185 | """Interface to a joint simulated in PyBullet.
186 |
187 | Attributes:
188 | joint_index: The index of the joint.
189 | lower_limit: Lower position limit of the joint.
190 | upper_limit: Upper position limit of the joint.
191 | effort: The maximum joint effort.
192 | """
193 |
194 | def __init__(self, physics_client, body_uid, joint_index):
195 | self.p = physics_client
196 | self.body_uid = body_uid
197 | self.joint_index = joint_index
198 |
199 | joint_info = self.p.getJointInfo(body_uid, joint_index)
200 | self.lower_limit = joint_info[8]
201 | self.upper_limit = joint_info[9]
202 | self.effort = joint_info[10]
203 |
204 | def get_position(self):
205 | joint_state = self.p.getJointState(self.body_uid, self.joint_index)
206 | return joint_state[0]
207 |
208 | def set_position(self, position, kinematics=False):
209 | if kinematics:
210 | self.p.resetJointState(self.body_uid, self.joint_index, position)
211 | self.p.setJointMotorControl2(
212 | self.body_uid,
213 | self.joint_index,
214 | pybullet.POSITION_CONTROL,
215 | targetPosition=position,
216 | force=self.effort,
217 | )
218 |
219 |
220 | class Constraint(object):
221 | """Interface to a constraint in PyBullet.
222 |
223 | Attributes:
224 | uid: The unique id of the constraint within the physics server.
225 | """
226 |
227 | def __init__(
228 | self,
229 | physics_client,
230 | parent,
231 | parent_link,
232 | child,
233 | child_link,
234 | joint_type,
235 | joint_axis,
236 | parent_frame,
237 | child_frame,
238 | ):
239 | """
240 | Create a new constraint between links of bodies.
241 |
242 | Args:
243 | parent:
244 | parent_link: None for the base.
245 | child: None for a fixed frame in world coordinates.
246 |
247 | """
248 | self.p = physics_client
249 | parent_body_uid = parent.uid
250 | parent_link_index = parent_link.link_index if parent_link else -1
251 | child_body_uid = child.uid if child else -1
252 | child_link_index = child_link.link_index if child_link else -1
253 |
254 | self.uid = self.p.createConstraint(
255 | parentBodyUniqueId=parent_body_uid,
256 | parentLinkIndex=parent_link_index,
257 | childBodyUniqueId=child_body_uid,
258 | childLinkIndex=child_link_index,
259 | jointType=joint_type,
260 | jointAxis=joint_axis,
261 | parentFramePosition=parent_frame.translation,
262 | parentFrameOrientation=parent_frame.rotation.as_quat(),
263 | childFramePosition=child_frame.translation,
264 | childFrameOrientation=child_frame.rotation.as_quat(),
265 | )
266 |
267 | def change(self, **kwargs):
268 | self.p.changeConstraint(self.uid, **kwargs)
269 |
270 |
271 | class Contact(object):
272 | """Contact point between two multibodies.
273 |
274 | Attributes:
275 | point: Contact point.
276 | normal: Normal vector from ... to ...
277 | depth: Penetration depth
278 | force: Contact force acting on body ...
279 | """
280 |
281 | def __init__(self, bodyA, bodyB, point, normal, depth, force):
282 | self.bodyA = bodyA
283 | self.bodyB = bodyB
284 | self.point = point
285 | self.normal = normal
286 | self.depth = depth
287 | self.force = force
288 |
289 |
290 | class Camera(object):
291 | """Virtual RGB-D camera based on the PyBullet camera interface.
292 |
293 | Attributes:
294 | intrinsic: The camera intrinsic parameters.
295 | """
296 |
297 | def __init__(
298 | self,
299 | physics_client,
300 | intrinsic,
301 | near,
302 | far):
303 | self.intrinsic = intrinsic
304 | self.near = near
305 | self.far = far
306 | self.proj_matrix = _build_projection_matrix(intrinsic, near, far)
307 | self.p = physics_client
308 |
309 | def render(self, extrinsic, flags=pybullet.ER_NO_SEGMENTATION_MASK):
310 | """Render synthetic RGB and depth images.
311 |
312 | Args:
313 | extrinsic: Extrinsic parameters, T_cam_ref.
314 | """
315 | # Construct OpenGL compatible view and projection matrices.
316 | gl_view_matrix = extrinsic.as_matrix()
317 | gl_view_matrix[2, :] *= -1 # flip the Z axis
318 | gl_view_matrix = gl_view_matrix.flatten(order="F")
319 | gl_proj_matrix = self.proj_matrix.flatten(order="F")
320 |
321 | result = self.p.getCameraImage(
322 | width=self.intrinsic.width,
323 | height=self.intrinsic.height,
324 | viewMatrix=gl_view_matrix,
325 | projectionMatrix=gl_proj_matrix,
326 | renderer=pybullet.ER_TINY_RENDERER,
327 | flags=flags
328 | )
329 |
330 | rgb, z_buffer = result[2][:, :, :3], result[3]
331 | depth = (
332 | 1.0 * self.far * self.near / (self.far - (self.far - self.near) * z_buffer)
333 | )
334 |
335 | if flags == pybullet.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX:
336 | seg = result[4]
337 | seg_shape = seg.shape
338 | flat_seg = seg.reshape(-1)
339 | non_neg_seg_idx = flat_seg >= 0
340 | obj_uid = - np.ones_like(flat_seg)
341 | link_index = - 2 * np.ones_like(flat_seg)
342 | obj_uid[non_neg_seg_idx] = flat_seg[non_neg_seg_idx] & ((1 << 24) - 1)
343 | link_index[non_neg_seg_idx] = (flat_seg[non_neg_seg_idx] >> 24) - 1
344 | obj_uid = obj_uid.reshape(seg_shape)
345 | link_index = link_index.reshape(seg_shape)
346 | return rgb, depth, (obj_uid, link_index)
347 | else:
348 | return rgb, depth
349 |
350 |
351 | def _build_projection_matrix(intrinsic, near, far):
352 | perspective = np.array(
353 | [
354 | [intrinsic.fx, 0.0, -intrinsic.cx, 0.0],
355 | [0.0, intrinsic.fy, -intrinsic.cy, 0.0],
356 | [0.0, 0.0, near + far, near * far],
357 | [0.0, 0.0, -1.0, 0.0],
358 | ]
359 | )
360 | ortho = _gl_ortho(0.0, intrinsic.width, intrinsic.height, 0.0, near, far)
361 | return np.matmul(ortho, perspective)
362 |
363 |
364 | def _gl_ortho(left, right, bottom, top, near, far):
365 | ortho = np.diag(
366 | [2.0 / (right - left), 2.0 / (top - bottom), -2.0 / (far - near), 1.0]
367 | )
368 | ortho[0, 3] = -(right + left) / (right - left)
369 | ortho[1, 3] = -(top + bottom) / (top - bottom)
370 | ortho[2, 3] = -(far + near) / (far - near)
371 | return ortho
372 |
--------------------------------------------------------------------------------
/src/s2u/utils/implicit.py:
--------------------------------------------------------------------------------
1 | import trimesh
2 | import numpy as np
3 | from s2u.utils.libmesh import check_mesh_contains
4 | from s2u.utils.visual import as_mesh
5 |
6 | def sample_iou_points_uni(bounds, num_point, padding=0.1, whole_space=False, size=0.3):
7 | points = np.random.rand(num_point, 3).astype(np.float32)
8 | scale = (bounds[1] - bounds[0]).max() * (1 + padding)
9 | center = (bounds[1] + bounds[0]) / 2
10 | if whole_space:
11 | points *= size + 2 * padding
12 | points -= padding
13 | else:
14 | points = (points - 0.5)* scale + center
15 | # occ = check_mesh_contains(mesh, points)
16 | return points.astype(np.float16)
17 |
18 | def sample_iou_points_surf(mesh, num_point, var=0.01):
19 | surf_points = mesh.sample(num_point)
20 | variation = np.random.randn(num_point, 3) * var
21 | points = surf_points + variation
22 | # occ = check_mesh_contains(mesh, points)
23 | return points.astype(np.float16)
24 |
25 | def sample_iou_points_uni_surf(
26 | mesh_list,
27 | bounds,
28 | num_point,
29 | padding=0.1,
30 | var=0.01
31 | ):
32 | num_point_uniform = num_point // 4
33 | num_point_surface = num_point - num_point_uniform
34 | points_uniform = np.random.rand(num_point_uniform, 3).astype(np.float32) - 0.5
35 | scale = (bounds[1] - bounds[0]).max() * (1 + padding)
36 | center = (bounds[1] + bounds[0]) / 2
37 | points_uniform = points_uniform * scale + center
38 | num_point_surface_per_mesh = num_point_surface // (len(mesh_list) * 2)
39 | points_surface = []
40 | for mesh in mesh_list:
41 | for var_local in [var * scale, var * scale * 0.1]:
42 | points_surface_mesh = mesh.sample(num_point_surface_per_mesh)
43 | # add variation
44 | variation = np.random.randn(num_point_surface_per_mesh, 3) * var_local
45 | points_surface_mesh = points_surface_mesh + variation
46 | points_surface.append(points_surface_mesh)
47 |
48 | points_surface.append(points_uniform)
49 |
50 | points = np.concatenate(points_surface, axis=0)
51 | return points.astype(np.float16)
52 |
53 | def sample_iou_points_occ(mesh_list, bounds, num_point, method, padding=0.1, var=0.01):
54 | if method == 'uniform':
55 | points = sample_iou_points_uni(bounds, num_point, padding)
56 | elif method == 'surface':
57 | full_mesh = as_mesh(trimesh.Scene(mesh_list))
58 | points = sample_iou_points_surf(full_mesh, num_point, var)
59 | elif method == 'mix':
60 | points = sample_iou_points_uni_surf(mesh_list, bounds, num_point, padding, var)
61 | occ_list = []
62 | for mesh in mesh_list:
63 | occ = check_mesh_contains(mesh, points)
64 | occ_list.append(occ)
65 | return points.astype(np.float16), occ_list
66 |
--------------------------------------------------------------------------------
/src/s2u/utils/io.py:
--------------------------------------------------------------------------------
1 | import json
2 | import uuid
3 |
4 | import numpy as np
5 |
6 | def write_data(root, data_dict):
7 | scene_id = uuid.uuid4().hex
8 | path = root / "scenes" / (scene_id + ".npz")
9 | assert not path.exists()
10 | np.savez_compressed(path, **data_dict)
11 | return scene_id
--------------------------------------------------------------------------------
/src/s2u/utils/libmesh/.gitignore:
--------------------------------------------------------------------------------
1 | triangle_hash.cpp
2 | build
3 |
--------------------------------------------------------------------------------
/src/s2u/utils/libmesh/__init__.py:
--------------------------------------------------------------------------------
1 | from .inside_mesh import (
2 | check_mesh_contains, MeshIntersector, TriangleIntersector2d
3 | )
4 |
5 |
6 | __all__ = [
7 | check_mesh_contains, MeshIntersector, TriangleIntersector2d
8 | ]
9 |
--------------------------------------------------------------------------------
/src/s2u/utils/libmesh/inside_mesh.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .triangle_hash import TriangleHash as _TriangleHash
3 |
4 |
5 | def check_mesh_contains(mesh, points, hash_resolution=512):
6 | intersector = MeshIntersector(mesh, hash_resolution)
7 | contains = intersector.query(points)
8 | return contains
9 |
10 |
11 | class MeshIntersector:
12 | def __init__(self, mesh, resolution=512):
13 | triangles = mesh.vertices[mesh.faces].astype(np.float64)
14 | n_tri = triangles.shape[0]
15 |
16 | self.resolution = resolution
17 | self.bbox_min = triangles.reshape(3 * n_tri, 3).min(axis=0)
18 | self.bbox_max = triangles.reshape(3 * n_tri, 3).max(axis=0)
19 | # Tranlate and scale it to [0.5, self.resolution - 0.5]^3
20 | self.scale = (resolution - 1) / (self.bbox_max - self.bbox_min)
21 | self.translate = 0.5 - self.scale * self.bbox_min
22 |
23 | self._triangles = triangles = self.rescale(triangles)
24 | # assert(np.allclose(triangles.reshape(-1, 3).min(0), 0.5))
25 | # assert(np.allclose(triangles.reshape(-1, 3).max(0), resolution - 0.5))
26 |
27 | triangles2d = triangles[:, :, :2]
28 | self._tri_intersector2d = TriangleIntersector2d(
29 | triangles2d, resolution)
30 |
31 | def query(self, points):
32 | # Rescale points
33 | points = self.rescale(points)
34 |
35 | # placeholder result with no hits we'll fill in later
36 | contains = np.zeros(len(points), dtype=np.bool)
37 |
38 | # cull points outside of the axis aligned bounding box
39 | # this avoids running ray tests unless points are close
40 | inside_aabb = np.all(
41 | (0 <= points) & (points <= self.resolution), axis=1)
42 | if not inside_aabb.any():
43 | return contains
44 |
45 | # Only consider points inside bounding box
46 | mask = inside_aabb
47 | points = points[mask]
48 |
49 | # Compute intersection depth and check order
50 | points_indices, tri_indices = self._tri_intersector2d.query(points[:, :2])
51 |
52 | triangles_intersect = self._triangles[tri_indices]
53 | points_intersect = points[points_indices]
54 |
55 | depth_intersect, abs_n_2 = self.compute_intersection_depth(
56 | points_intersect, triangles_intersect)
57 |
58 | # Count number of intersections in both directions
59 | smaller_depth = depth_intersect >= points_intersect[:, 2] * abs_n_2
60 | bigger_depth = depth_intersect < points_intersect[:, 2] * abs_n_2
61 | points_indices_0 = points_indices[smaller_depth]
62 | points_indices_1 = points_indices[bigger_depth]
63 |
64 | nintersect0 = np.bincount(points_indices_0, minlength=points.shape[0])
65 | nintersect1 = np.bincount(points_indices_1, minlength=points.shape[0])
66 |
67 | # Check if point contained in mesh
68 | contains1 = (np.mod(nintersect0, 2) == 1)
69 | contains2 = (np.mod(nintersect1, 2) == 1)
70 | if (contains1 != contains2).any():
71 | print('Warning: contains1 != contains2 for some points.')
72 | contains[mask] = (contains1 & contains2)
73 | return contains
74 |
75 | def compute_intersection_depth(self, points, triangles):
76 | t1 = triangles[:, 0, :]
77 | t2 = triangles[:, 1, :]
78 | t3 = triangles[:, 2, :]
79 |
80 | v1 = t3 - t1
81 | v2 = t2 - t1
82 | # v1 = v1 / np.linalg.norm(v1, axis=-1, keepdims=True)
83 | # v2 = v2 / np.linalg.norm(v2, axis=-1, keepdims=True)
84 |
85 | normals = np.cross(v1, v2)
86 | alpha = np.sum(normals[:, :2] * (t1[:, :2] - points[:, :2]), axis=1)
87 |
88 | n_2 = normals[:, 2]
89 | t1_2 = t1[:, 2]
90 | s_n_2 = np.sign(n_2)
91 | abs_n_2 = np.abs(n_2)
92 |
93 | mask = (abs_n_2 != 0)
94 |
95 | depth_intersect = np.full(points.shape[0], np.nan)
96 | depth_intersect[mask] = \
97 | t1_2[mask] * abs_n_2[mask] + alpha[mask] * s_n_2[mask]
98 |
99 | # Test the depth:
100 | # TODO: remove and put into tests
101 | # points_new = np.concatenate([points[:, :2], depth_intersect[:, None]], axis=1)
102 | # alpha = (normals * t1).sum(-1)
103 | # mask = (depth_intersect == depth_intersect)
104 | # assert(np.allclose((points_new[mask] * normals[mask]).sum(-1),
105 | # alpha[mask]))
106 | return depth_intersect, abs_n_2
107 |
108 | def rescale(self, array):
109 | array = self.scale * array + self.translate
110 | return array
111 |
112 |
113 | class TriangleIntersector2d:
114 | def __init__(self, triangles, resolution=128):
115 | self.triangles = triangles
116 | self.tri_hash = _TriangleHash(triangles, resolution)
117 |
118 | def query(self, points):
119 | point_indices, tri_indices = self.tri_hash.query(points)
120 | point_indices = np.array(point_indices, dtype=np.int64)
121 | tri_indices = np.array(tri_indices, dtype=np.int64)
122 | points = points[point_indices]
123 | triangles = self.triangles[tri_indices]
124 | mask = self.check_triangles(points, triangles)
125 | point_indices = point_indices[mask]
126 | tri_indices = tri_indices[mask]
127 | return point_indices, tri_indices
128 |
129 | def check_triangles(self, points, triangles):
130 | contains = np.zeros(points.shape[0], dtype=np.bool)
131 | A = triangles[:, :2] - triangles[:, 2:]
132 | A = A.transpose([0, 2, 1])
133 | y = points - triangles[:, 2]
134 |
135 | detA = A[:, 0, 0] * A[:, 1, 1] - A[:, 0, 1] * A[:, 1, 0]
136 |
137 | mask = (np.abs(detA) != 0.)
138 | A = A[mask]
139 | y = y[mask]
140 | detA = detA[mask]
141 |
142 | s_detA = np.sign(detA)
143 | abs_detA = np.abs(detA)
144 |
145 | u = (A[:, 1, 1] * y[:, 0] - A[:, 0, 1] * y[:, 1]) * s_detA
146 | v = (-A[:, 1, 0] * y[:, 0] + A[:, 0, 0] * y[:, 1]) * s_detA
147 |
148 | sum_uv = u + v
149 | contains[mask] = (
150 | (0 < u) & (u < abs_detA) & (0 < v) & (v < abs_detA)
151 | & (0 < sum_uv) & (sum_uv < abs_detA)
152 | )
153 | return contains
154 |
155 |
--------------------------------------------------------------------------------
/src/s2u/utils/libmesh/triangle_hash.pyx:
--------------------------------------------------------------------------------
1 |
2 | # distutils: language=c++
3 | import numpy as np
4 | cimport numpy as np
5 | cimport cython
6 | from libcpp.vector cimport vector
7 | from libc.math cimport floor, ceil
8 |
9 | cdef class TriangleHash:
10 | cdef vector[vector[int]] spatial_hash
11 | cdef int resolution
12 |
13 | def __cinit__(self, double[:, :, :] triangles, int resolution):
14 | self.spatial_hash.resize(resolution * resolution)
15 | self.resolution = resolution
16 | self._build_hash(triangles)
17 |
18 | @cython.boundscheck(False) # Deactivate bounds checking
19 | @cython.wraparound(False) # Deactivate negative indexing.
20 | cdef int _build_hash(self, double[:, :, :] triangles):
21 | assert(triangles.shape[1] == 3)
22 | assert(triangles.shape[2] == 2)
23 |
24 | cdef int n_tri = triangles.shape[0]
25 | cdef int bbox_min[2]
26 | cdef int bbox_max[2]
27 |
28 | cdef int i_tri, j, x, y
29 | cdef int spatial_idx
30 |
31 | for i_tri in range(n_tri):
32 | # Compute bounding box
33 | for j in range(2):
34 | bbox_min[j] = min(
35 | triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j]
36 | )
37 | bbox_max[j] = max(
38 | triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j]
39 | )
40 | bbox_min[j] = min(max(bbox_min[j], 0), self.resolution - 1)
41 | bbox_max[j] = min(max(bbox_max[j], 0), self.resolution - 1)
42 |
43 | # Find all voxels where bounding box intersects
44 | for x in range(bbox_min[0], bbox_max[0] + 1):
45 | for y in range(bbox_min[1], bbox_max[1] + 1):
46 | spatial_idx = self.resolution * x + y
47 | self.spatial_hash[spatial_idx].push_back(i_tri)
48 |
49 | @cython.boundscheck(False) # Deactivate bounds checking
50 | @cython.wraparound(False) # Deactivate negative indexing.
51 | cpdef query(self, double[:, :] points):
52 | assert(points.shape[1] == 2)
53 | cdef int n_points = points.shape[0]
54 |
55 | cdef vector[int] points_indices
56 | cdef vector[int] tri_indices
57 | # cdef int[:] points_indices_np
58 | # cdef int[:] tri_indices_np
59 |
60 | cdef int i_point, k, x, y
61 | cdef int spatial_idx
62 |
63 | for i_point in range(n_points):
64 | x = int(points[i_point, 0])
65 | y = int(points[i_point, 1])
66 | if not (0 <= x < self.resolution and 0 <= y < self.resolution):
67 | continue
68 |
69 | spatial_idx = self.resolution * x + y
70 | for i_tri in self.spatial_hash[spatial_idx]:
71 | points_indices.push_back(i_point)
72 | tri_indices.push_back(i_tri)
73 |
74 | points_indices_np = np.zeros(points_indices.size(), dtype=np.int32)
75 | tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32)
76 |
77 | cdef int[:] points_indices_view = points_indices_np
78 | cdef int[:] tri_indices_view = tri_indices_np
79 |
80 | for k in range(points_indices.size()):
81 | points_indices_view[k] = points_indices[k]
82 |
83 | for k in range(tri_indices.size()):
84 | tri_indices_view[k] = tri_indices[k]
85 |
86 | return points_indices_np, tri_indices_np
87 |
--------------------------------------------------------------------------------
/src/s2u/utils/mesh2sdf.py:
--------------------------------------------------------------------------------
1 | import mesh_to_sdf
2 | import numpy as np
3 | import trimesh
4 | from mesh_to_sdf import get_surface_point_cloud, mesh_to_sdf
5 | from mesh_to_sdf.utils import sample_uniform_points_in_unit_sphere
6 |
7 | def scale_to_unit_sphere(mesh):
8 | if isinstance(mesh, trimesh.Scene):
9 | mesh = mesh.dump().sum()
10 |
11 | center = mesh.bounding_box.centroid
12 | vertices = mesh.vertices - mesh.bounding_box.centroid
13 | distances = np.linalg.norm(vertices, axis=1)
14 | scale = np.max(distances)
15 | vertices /= scale
16 |
17 |
18 | return trimesh.Trimesh(vertices=vertices, faces=mesh.faces), center, scale
19 |
20 | def sample_sdf_near_surface(mesh,
21 | number_of_points=500000,
22 | surface_point_method='scan',
23 | sign_method='normal',
24 | scan_count=100,
25 | scan_resolution=400,
26 | sample_point_count=10000000,
27 | normal_sample_count=11,
28 | min_size=0,
29 | return_gradients=False,
30 | uniform=False):
31 | mesh, center, scale = scale_to_unit_sphere(mesh)
32 |
33 | if surface_point_method == 'sample' and sign_method == 'depth':
34 | print(
35 | "Incompatible methods for sampling points and determining sign, using sign_method='normal' instead."
36 | )
37 | sign_method = 'normal'
38 | if uniform:
39 | point = sample_uniform_points_in_unit_sphere(number_of_points)
40 | sdf = mesh_to_sdf(mesh, point)
41 | else:
42 | surface_point_cloud = get_surface_point_cloud(
43 | mesh,
44 | surface_point_method,
45 | 1,
46 | scan_count,
47 | scan_resolution,
48 | sample_point_count,
49 | calculate_normals=sign_method == 'normal' or return_gradients)
50 |
51 | point, sdf = surface_point_cloud.sample_sdf_near_surface(
52 | number_of_points, surface_point_method == 'scan', sign_method,
53 | normal_sample_count, min_size, return_gradients)
54 | point = point * scale + center
55 | return point, sdf
--------------------------------------------------------------------------------
/src/s2u/utils/misc.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import trimesh
3 | from sklearn.neighbors import KDTree
4 | from scipy.spatial import KDTree as KDTree_scipy
5 | from s2u.utils.visual import as_mesh
6 |
7 | def sample_point_cloud(pc, num_point):
8 | num_point_all = pc.shape[0]
9 | idxs = np.random.choice(np.arange(num_point_all), size=(num_point,), replace=num_point > num_point_all)
10 | return pc[idxs]
11 |
12 | def collect_density_per_object(anchors, points, values, gridnum=35):
13 | pos_neighbors = np.zeros((anchors.shape[0], 1))
14 | neg_neighbors = np.zeros((anchors.shape[0], 1))
15 |
16 | step = 1.0 / gridnum
17 | tree = KDTree(points)
18 | inds = tree.query_radius(anchors, r=step / 2.0)
19 | for p_id in range(anchors.shape[0]):
20 | nlist = inds[p_id]
21 | if (len(nlist) > 0):
22 | vs = values[nlist]
23 | posnum = np.sum(vs < 0)
24 | negnum = np.sum(vs > 0)
25 | else:
26 | posnum = 0
27 | negnum = 0
28 | pos_neighbors[p_id, 0] = posnum
29 | neg_neighbors[p_id, 0] = negnum
30 |
31 | return pos_neighbors, neg_neighbors
32 |
33 |
34 | def estimate_density(points, values):
35 | # compute the inner/outer density
36 | # occ -0.5 to {-0.5, 0.5}
37 | bound_max, bound_min = np.max(points, 0), np.min(points, 0)
38 | center = (bound_max + bound_min) / 2
39 | scale = (bound_max - bound_min).max()
40 | normed_points = (points - center) / scale
41 |
42 | if values.min() < 0:
43 | tmp_val = values
44 | else:
45 | tmp_val = values - 0.5
46 | pos_neighbors, neg_neighbors = collect_density_per_object(normed_points,
47 | normed_points,
48 | tmp_val,
49 | gridnum=20)
50 | densities = np.zeros((points.shape[0], 1))
51 | for i in range(points.shape[0]):
52 | v = values[i]
53 | if (v < 0):
54 | densities[i] = 1 / (pos_neighbors[i] + 0.01)
55 | else:
56 | densities[i] = 1 / (neg_neighbors[i] + 0.01)
57 |
58 | return densities
59 |
60 | def norm_mesh(mesh: trimesh.Trimesh) -> trimesh.Trimesh:
61 | bounds = mesh.bounds
62 | center = bounds.mean(0)
63 | scale = (bounds[1] - bounds[0]).max()
64 | transform_mat = np.eye(4)
65 | transform_mat[:3, 3] = -center
66 | transform_mat[:3] /= scale
67 | mesh.apply_transform(transform_mat)
68 | return mesh
69 |
70 | def get_labeled_surface_pc(mesh_pose_dict, mobile_links, num_point=2048):
71 | mesh_dict = {'s': trimesh.Scene(), 'm': trimesh.Scene()}
72 | for k, v in mesh_pose_dict.items():
73 | for mesh_path, scale, pose in v:
74 | if mesh_path.startswith('#'): # primitive
75 | mesh = trimesh.creation.box(extents=scale, transform=pose)
76 | else:
77 | mesh = trimesh.load(mesh_path)
78 | mesh.apply_scale(scale)
79 | mesh.apply_transform(pose)
80 | if int(k.split('_')[1]) in mobile_links:
81 | mesh_dict['m'].add_geometry(mesh)
82 | else:
83 | mesh_dict['s'].add_geometry(mesh)
84 | sample_points = []
85 | labels = []
86 | sample_points.append(as_mesh(mesh_dict['s']).sample(num_point))
87 | labels.append(np.zeros(num_point))
88 | sample_points.append(as_mesh(mesh_dict['m']).sample(num_point))
89 | labels.append(np.ones(num_point))
90 | sample_points = np.concatenate(sample_points, 0)
91 | labels = np.concatenate(labels, 0)
92 | return sample_points, labels
93 |
94 | def segment_pc(pc, mesh_pose_dict, mobile_links):
95 | surface_pc, label = get_labeled_surface_pc(mesh_pose_dict, mobile_links)
96 | tree = KDTree_scipy(surface_pc)
97 | _, nearest_neighbor_idxs = tree.query(pc)
98 | seg_label = label[nearest_neighbor_idxs].astype(bool)
99 | return seg_label
--------------------------------------------------------------------------------
/src/s2u/utils/parallel_exec.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | ##########
4 | #
5 | # Parallel execution
6 | # Minhyuk Sung
7 | # Jan. 2016
8 | #
9 | ##########
10 |
11 | import functools
12 | import getpass
13 | import multiprocessing as mp
14 | import subprocess
15 | import time
16 |
17 |
18 | def exec_cmd(name, cmd):
19 | #print(cmd)
20 | #print('Job [' + name + '] Started.')
21 | subprocess.call(cmd, shell=True)
22 | print('Job [' + name + '] Finished.')
23 |
24 |
25 | def log_result(result):
26 | g_results.append(result)
27 | num_completed_jobs = len(g_results)
28 | num_remaining_jobs = g_num_total_jobs - num_completed_jobs
29 | elapsed_time = time.time() - g_starting_time
30 |
31 | msg = 'Batch: ' + g_batch_name + '. '
32 | msg = msg + 'Elapsed time: ' + \
33 | time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) + '. '
34 | msg = msg + 'Waiting for ' + str(num_remaining_jobs) + \
35 | ' / ' + str(g_num_total_jobs) + ' jobs...'
36 | print(msg)
37 |
38 |
39 | def run_parallel(batch_name, cmd_list):
40 | global g_results
41 | global g_starting_time
42 | global g_num_total_jobs
43 | global g_batch_name
44 |
45 | g_results = []
46 | g_starting_time = time.time()
47 | g_num_total_jobs = len(cmd_list)
48 | g_batch_name = batch_name
49 | num_processors = int(0.8 * mp.cpu_count())
50 | if g_num_total_jobs == 0: return;
51 |
52 | msg = 'Batch (' + g_batch_name + ') Starting...'
53 | print(msg)
54 | print(' - Job count = ' + str(g_num_total_jobs))
55 | print(' - Processor count = ' + str(num_processors))
56 |
57 | pool = mp.Pool(processes=num_processors)
58 | for args in cmd_list:
59 | pool.apply_async(func=exec_cmd, args=args, callback=log_result)
60 | pool.close()
61 | pool.join()
62 |
63 | msg = 'Batch (' + g_batch_name + ') Completed.'
64 | msg = msg + ' Current time: ' + \
65 | time.strftime("%H:%M:%S", time.localtime(time.time())) + '. '
66 | print(msg)
67 |
68 |
--------------------------------------------------------------------------------
/src/s2u/utils/saver.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import pybullet
3 | import numpy as np
4 | from s2u.utils.transform import Rotation, Transform
5 | from s2u.utils.visual import as_mesh
6 |
7 | def get_mesh_pose_dict_from_world(world,
8 | exclude_plane=True):
9 | mesh_pose_dict = {}
10 | for uid in world.bodies.keys():
11 | _, name = world.p.getBodyInfo(uid)
12 | name = name.decode('utf8')
13 | if name == 'plane' and exclude_plane:
14 | continue
15 | body = world.bodies[uid]
16 | for visual in world.p.getVisualShapeData(uid):
17 | object_name, mesh_path, scale, pose = get_mesh_pose(
18 | visual, world.p._client)
19 | mesh_path = mesh_path.decode('utf8')
20 | if object_name not in mesh_pose_dict.keys():
21 | mesh_pose_dict[object_name] = []
22 | if mesh_path == '': # primitive
23 | linkIndex = visual[1]
24 | mesh_path = f'#{uid}_{linkIndex}'
25 | mesh_pose_dict[object_name].append((mesh_path, scale, pose.as_matrix()))
26 | return mesh_pose_dict
27 |
28 | def get_mesh_pose(visual, physicsClientId):
29 | objectUniqueId = visual[0]
30 | linkIndex = visual[1]
31 | visualGeometryType = visual[2]
32 | scale = visual[3]
33 | meshAssetFileName = visual[4]
34 | localVisualFramePosition = visual[5]
35 | localVisualFrameOrientation = visual[6]
36 | rgbaColor = visual[7]
37 |
38 | visual_offset = Transform(Rotation.from_quat(localVisualFrameOrientation),
39 | np.array(localVisualFramePosition))
40 | if linkIndex != -1:
41 | linkState = get_link_pose((objectUniqueId, linkIndex), physicsClientId)
42 | linkOffsetState = get_link_local_offset((objectUniqueId, linkIndex), physicsClientId)
43 | linkOffsetState.translation = np.array([0, 0, 0])
44 | linkOffsetState = linkOffsetState * visual_offset
45 | else:
46 | linkState = get_body_pose(objectUniqueId, physicsClientId)
47 | linkOffsetState = visual_offset
48 | # transform = linkState
49 | transform = linkState * linkOffsetState
50 |
51 | # Name to use for visii components
52 | object_name = str(objectUniqueId) + "_" + str(linkIndex)
53 | return object_name, meshAssetFileName, scale, transform
54 |
55 | def get_link_local_offset(link_uid, physicsClientId=0):
56 | """Get the local offset of the link.
57 | Args:
58 | link_uid: A tuple of the body Unique ID and the link index.
59 | Returns:
60 | An instance of Pose.
61 | """
62 | body_uid, link_ind = link_uid
63 | _, _, position, quaternion, _, _ = pybullet.getLinkState(
64 | bodyUniqueId=body_uid, linkIndex=link_ind, physicsClientId=physicsClientId)
65 | ori = Rotation.from_quat(quaternion)
66 | return Transform(ori, np.array(position))
67 |
68 | def get_link_pose(link_uid, physicsClientId):
69 | """Get the pose of the link.
70 | Args:
71 | link_uid: A tuple of the body Unique ID and the link index.
72 | Returns:
73 | An instance of Pose.
74 | """
75 | body_uid, link_ind = link_uid
76 | _, _, _, _, position, quaternion = pybullet.getLinkState(
77 | bodyUniqueId=body_uid, linkIndex=link_ind,
78 | physicsClientId=physicsClientId)
79 | ori = Rotation.from_quat(quaternion)
80 | return Transform(ori, np.array(position))
81 |
82 | def get_link_center_pose(link_uid, physicsClientId):
83 | """Get the pose of the link center of mass.
84 | Args:
85 | link_uid: A tuple of the body Unique ID and the link index.
86 | Returns:
87 | An instance of Pose.
88 | """
89 | body_uid, link_ind = link_uid
90 | position, quaternion, _, _, _, _ = pybullet.getLinkState(
91 | bodyUniqueId=body_uid, linkIndex=link_ind,
92 | physicsClientId=physicsClientId)
93 | ori = Rotation.from_quat(quaternion)
94 | return Transform(ori, np.array(position))
95 |
96 | def get_body_pose(body_uid, physicsClientId):
97 | """Get the pose of the body.
98 | The pose of the body is defined as the pose of the base of the body.
99 | Args:
100 | body_uid: The body Unique ID.
101 | Returns:
102 | An instance of Pose.
103 | """
104 | position, quaternion = pybullet.getBasePositionAndOrientation(
105 | bodyUniqueId=body_uid, physicsClientId=physicsClientId)
106 |
107 | tmp = pybullet.getDynamicsInfo(body_uid, -1, physicsClientId)
108 | local_inertial_pos, local_inertial_ori = tmp[3], tmp[4]
109 | local_transform = Transform(Rotation.from_quat(local_inertial_ori),
110 | np.array(local_inertial_pos))
111 |
112 | base_frame_transform = Transform(
113 | Rotation.from_quat(quaternion),
114 | np.array(position)) * local_transform.inverse()
115 |
116 | return base_frame_transform
117 |
--------------------------------------------------------------------------------
/src/s2u/utils/transform.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.spatial.transform
3 |
4 |
5 | class Rotation(scipy.spatial.transform.Rotation):
6 | @classmethod
7 | def identity(cls):
8 | return cls.from_quat([0.0, 0.0, 0.0, 1.0])
9 |
10 |
11 | class Transform(object):
12 | """Rigid spatial transform between coordinate systems in 3D space.
13 |
14 | Attributes:
15 | rotation (scipy.spatial.transform.Rotation)
16 | translation (np.ndarray)
17 | """
18 |
19 | def __init__(self, rotation, translation):
20 | assert isinstance(rotation, scipy.spatial.transform.Rotation)
21 | assert isinstance(translation, (np.ndarray, list))
22 |
23 | self.rotation = rotation
24 | self.translation = np.asarray(translation, np.double)
25 |
26 | def as_matrix(self):
27 | """Represent as a 4x4 matrix."""
28 | return np.vstack(
29 | (np.c_[self.rotation.as_matrix(), self.translation], [0.0, 0.0, 0.0, 1.0])
30 | )
31 |
32 | def to_dict(self):
33 | """Serialize Transform object into a dictionary."""
34 | return {
35 | "rotation": self.rotation.as_quat().tolist(),
36 | "translation": self.translation.tolist(),
37 | }
38 |
39 | def to_list(self):
40 | return np.r_[self.rotation.as_quat(), self.translation]
41 |
42 | def __mul__(self, other):
43 | """Compose this transform with another."""
44 | rotation = self.rotation * other.rotation
45 | translation = self.rotation.apply(other.translation) + self.translation
46 | return self.__class__(rotation, translation)
47 |
48 | def transform_point(self, point):
49 | return self.rotation.apply(point) + self.translation
50 |
51 | def transform_vector(self, vector):
52 | return self.rotation.apply(vector)
53 |
54 | def inverse(self):
55 | """Compute the inverse of this transform."""
56 | rotation = self.rotation.inv()
57 | translation = -rotation.apply(self.translation)
58 | return self.__class__(rotation, translation)
59 |
60 | @classmethod
61 | def from_matrix(cls, m):
62 | """Initialize from a 4x4 matrix."""
63 | rotation = Rotation.from_matrix(m[:3, :3])
64 | translation = m[:3, 3]
65 | return cls(rotation, translation)
66 |
67 | @classmethod
68 | def from_dict(cls, dictionary):
69 | rotation = Rotation.from_quat(dictionary["rotation"])
70 | translation = np.asarray(dictionary["translation"])
71 | return cls(rotation, translation)
72 |
73 | @classmethod
74 | def from_list(cls, list):
75 | rotation = Rotation.from_quat(list[:4])
76 | translation = list[4:]
77 | return cls(rotation, translation)
78 |
79 | @classmethod
80 | def identity(cls):
81 | """Initialize with the identity transformation."""
82 | rotation = Rotation.from_quat([0.0, 0.0, 0.0, 1.0])
83 | translation = np.array([0.0, 0.0, 0.0])
84 | return cls(rotation, translation)
85 |
86 | @classmethod
87 | def look_at(cls, eye, center, up):
88 | """Initialize with a LookAt matrix.
89 |
90 | Returns:
91 | T_eye_ref, the transform from camera to the reference frame, w.r.t.
92 | which the input arguments were defined.
93 | """
94 | eye = np.asarray(eye)
95 | center = np.asarray(center)
96 |
97 | forward = center - eye
98 | forward /= np.linalg.norm(forward)
99 |
100 | right = np.cross(forward, up)
101 | right /= np.linalg.norm(right)
102 |
103 | up = np.asarray(up) / np.linalg.norm(up)
104 | up = np.cross(right, forward)
105 |
106 | m = np.eye(4, 4)
107 | m[:3, 0] = right
108 | m[:3, 1] = -up
109 | m[:3, 2] = forward
110 | m[:3, 3] = eye
111 |
112 | return cls.from_matrix(m).inverse()
113 |
114 | def skew(vector):
115 | return np.array([[0, -vector[2], vector[1]], [vector[2], 0, -vector[0]],
116 | [-vector[1], vector[0], 0]])
117 |
118 | def axis2transform(axis, up=np.array([0.0, 0.0, 1.0])):
119 | v = np.cross(up, axis)
120 | s = np.sqrt(np.sum(v**2))
121 | if s == 0:
122 | return Transform(Rotation.identity(), np.zeros(3))
123 | c = np.dot(axis, up)
124 | vx = skew(v)
125 | R = np.eye(3) + vx + vx.dot(vx) / (1 + c)
126 | return Transform(Rotation.from_matrix(R), np.zeros(3))
127 |
128 | def get_transform(pos, quat):
129 | return Transform(Rotation.from_quat(np.array(quat)), np.array(pos))
130 |
131 | def rotation_matrix_from_axis(axis, theta):
132 | R = np.eye(3) * np.cos(theta)
133 | R += skew(axis) * np.sin(theta)
134 | R += (1 - np.cos(theta)) * np.outer(axis, axis)
135 | return R
136 |
137 | def transform_from_screw(l, m, theta, t):
138 | p = np.cross(l, m)
139 | rotation = rotation_matrix_from_axis(l, theta)
140 | translation = -rotation.dot(p) + p + t * l
141 | rotation = Rotation.from_matrix(rotation)
142 | transform = Transform(rotation, translation)
143 | return transform
--------------------------------------------------------------------------------
/src/s2u/utils/urdfEditor.py:
--------------------------------------------------------------------------------
1 | # modified from
2 | # https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_utils/urdfEditor.py
3 |
4 | import pybullet as p
5 | import time
6 |
7 |
8 | class UrdfInertial(object):
9 |
10 | def __init__(self):
11 | self.mass = 1
12 | self.inertia_xxyyzz = [7, 8, 9]
13 | self.origin_rpy = [1, 2, 3]
14 | self.origin_xyz = [4, 5, 6]
15 |
16 |
17 | class UrdfContact(object):
18 |
19 | def __init__(self):
20 | self.lateral_friction = 1
21 | self.rolling_friction = 0
22 | self.spinning_friction = 0
23 |
24 |
25 | class UrdfLink(object):
26 |
27 | def __init__(self):
28 | self.link_name = "dummy"
29 | self.urdf_inertial = UrdfInertial()
30 | self.urdf_visual_shapes = []
31 | self.urdf_collision_shapes = []
32 |
33 |
34 | class UrdfVisual(object):
35 |
36 | def __init__(self):
37 | self.origin_rpy = [1, 2, 3]
38 | self.origin_xyz = [4, 5, 6]
39 | self.geom_type = p.GEOM_BOX
40 | self.geom_radius = 1
41 | self.geom_extents = [7, 8, 9]
42 | self.geom_length = 10
43 | self.geom_meshfilename = "meshfile"
44 | self.geom_meshscale = [1, 1, 1]
45 | self.material_rgba = [1, 0, 0, 1]
46 | self.material_name = ""
47 |
48 |
49 | class UrdfCollision(object):
50 |
51 | def __init__(self):
52 | self.origin_rpy = [1, 2, 3]
53 | self.origin_xyz = [4, 5, 6]
54 | self.geom_type = p.GEOM_BOX
55 | self.geom_radius = 1
56 | self.geom_length = 2
57 | self.geom_extents = [7, 8, 9]
58 | self.geom_meshfilename = "meshfile"
59 | self.geom_meshscale = [1, 1, 1]
60 |
61 |
62 | class UrdfJoint(object):
63 |
64 | def __init__(self):
65 | self.link = UrdfLink()
66 | self.joint_name = "joint_dummy"
67 | self.joint_type = p.JOINT_REVOLUTE
68 | self.joint_lower_limit = 0
69 | self.joint_upper_limit = -1
70 | self.parent_name = "parentName"
71 | self.child_name = "childName"
72 | self.joint_origin_xyz = [1, 2, 3]
73 | self.joint_origin_rpy = [1, 2, 3]
74 | self.joint_axis_xyz = [1, 2, 3]
75 |
76 |
77 | class UrdfEditor(object):
78 |
79 | def __init__(self):
80 | self.initialize()
81 |
82 | def initialize(self):
83 | self.urdfLinks = []
84 | self.urdfJoints = []
85 | self.robotName = ""
86 | self.linkNameToIndex = {}
87 | self.jointTypeToName={p.JOINT_FIXED: "JOINT_FIXED" ,\
88 | p.JOINT_REVOLUTE: "JOINT_REVOLUTE",\
89 | p.JOINT_PRISMATIC: "JOINT_PRISMATIC" }
90 |
91 | def convertLinkFromMultiBody(self, bodyUid, linkIndex, urdfLink, physicsClientId):
92 | dyn = p.getDynamicsInfo(bodyUid, linkIndex, physicsClientId=physicsClientId)
93 | urdfLink.urdf_inertial.mass = dyn[0]
94 | urdfLink.urdf_inertial.inertia_xxyyzz = dyn[2]
95 | #todo
96 | urdfLink.urdf_inertial.origin_xyz = dyn[3]
97 | rpy = p.getEulerFromQuaternion(dyn[4])
98 | urdfLink.urdf_inertial.origin_rpy = rpy
99 |
100 | visualShapes = p.getVisualShapeData(bodyUid, physicsClientId=physicsClientId)
101 | matIndex = 0
102 | for v in visualShapes:
103 | if (v[1] == linkIndex):
104 | urdfVisual = UrdfVisual()
105 | urdfVisual.geom_type = v[2]
106 | if (v[2] == p.GEOM_BOX):
107 | urdfVisual.geom_extents = v[3]
108 | if (v[2] == p.GEOM_SPHERE):
109 | urdfVisual.geom_radius = v[3][0]
110 | if (v[2] == p.GEOM_MESH):
111 | urdfVisual.geom_meshfilename = v[4].decode("utf-8")
112 | urdfVisual.geom_meshscale = v[3]
113 | if (v[2] == p.GEOM_CYLINDER):
114 | urdfVisual.geom_length = v[3][0]
115 | urdfVisual.geom_radius = v[3][1]
116 | if (v[2] == p.GEOM_CAPSULE):
117 | urdfVisual.geom_length = v[3][0]
118 | urdfVisual.geom_radius = v[3][1]
119 | urdfVisual.origin_xyz = v[5]
120 | urdfVisual.origin_rpy = p.getEulerFromQuaternion(v[6])
121 | urdfVisual.material_rgba = v[7]
122 | name = 'mat_{}_{}'.format(linkIndex, matIndex)
123 | urdfVisual.material_name = name
124 | urdfLink.urdf_visual_shapes.append(urdfVisual)
125 | matIndex = matIndex + 1
126 |
127 | collisionShapes = p.getCollisionShapeData(bodyUid, linkIndex, physicsClientId=physicsClientId)
128 | for v in collisionShapes:
129 | urdfCollision = UrdfCollision()
130 | urdfCollision.geom_type = v[2]
131 | if (v[2] == p.GEOM_BOX):
132 | urdfCollision.geom_extents = v[3]
133 | if (v[2] == p.GEOM_SPHERE):
134 | urdfCollision.geom_radius = v[3][0]
135 | if (v[2] == p.GEOM_MESH):
136 | urdfCollision.geom_meshfilename = v[4].decode("utf-8")
137 | urdfCollision.geom_meshscale = v[3]
138 | if (v[2] == p.GEOM_CYLINDER):
139 | urdfCollision.geom_length = v[3][0]
140 | urdfCollision.geom_radius = v[3][1]
141 | if (v[2] == p.GEOM_CAPSULE):
142 | urdfCollision.geom_length = v[3][0]
143 | urdfCollision.geom_radius = v[3][1]
144 | pos,orn = p.multiplyTransforms(dyn[3],dyn[4],\
145 | v[5], v[6])
146 | urdfCollision.origin_xyz = pos
147 | urdfCollision.origin_rpy = p.getEulerFromQuaternion(orn)
148 | urdfLink.urdf_collision_shapes.append(urdfCollision)
149 |
150 | def initializeFromBulletBody(self, bodyUid, physicsClientId):
151 | self.initialize()
152 |
153 | #always create a base link
154 | baseLink = UrdfLink()
155 | baseLinkIndex = -1
156 | self.convertLinkFromMultiBody(bodyUid, baseLinkIndex, baseLink, physicsClientId)
157 | baseLink.link_name = p.getBodyInfo(bodyUid, physicsClientId=physicsClientId)[0].decode("utf-8")
158 | self.linkNameToIndex[baseLink.link_name] = len(self.urdfLinks)
159 | self.urdfLinks.append(baseLink)
160 |
161 | #optionally create child links and joints
162 | for j in range(p.getNumJoints(bodyUid, physicsClientId=physicsClientId)):
163 | jointInfo = p.getJointInfo(bodyUid, j, physicsClientId=physicsClientId)
164 | urdfLink = UrdfLink()
165 | self.convertLinkFromMultiBody(bodyUid, j, urdfLink, physicsClientId)
166 | urdfLink.link_name = jointInfo[12].decode("utf-8")
167 | self.linkNameToIndex[urdfLink.link_name] = len(self.urdfLinks)
168 | self.urdfLinks.append(urdfLink)
169 |
170 | urdfJoint = UrdfJoint()
171 | urdfJoint.link = urdfLink
172 | urdfJoint.joint_name = jointInfo[1].decode("utf-8")
173 | urdfJoint.joint_type = jointInfo[2]
174 | urdfJoint.joint_lower_limit = jointInfo[8]
175 | urdfJoint.joint_upper_limit = jointInfo[9]
176 | urdfJoint.joint_axis_xyz = jointInfo[13]
177 | orgParentIndex = jointInfo[16]
178 | if (orgParentIndex < 0):
179 | urdfJoint.parent_name = baseLink.link_name
180 | else:
181 | parentJointInfo = p.getJointInfo(bodyUid, orgParentIndex, physicsClientId=physicsClientId)
182 | urdfJoint.parent_name = parentJointInfo[12].decode("utf-8")
183 | urdfJoint.child_name = urdfLink.link_name
184 |
185 | #todo, compensate for inertia/link frame offset
186 |
187 | dynChild = p.getDynamicsInfo(bodyUid, j, physicsClientId=physicsClientId)
188 | childInertiaPos = dynChild[3]
189 | childInertiaOrn = dynChild[4]
190 | parentCom2JointPos = jointInfo[14]
191 | parentCom2JointOrn = jointInfo[15]
192 | tmpPos, tmpOrn = p.multiplyTransforms(childInertiaPos, childInertiaOrn, parentCom2JointPos,
193 | parentCom2JointOrn)
194 | tmpPosInv, tmpOrnInv = p.invertTransform(tmpPos, tmpOrn)
195 | dynParent = p.getDynamicsInfo(bodyUid, orgParentIndex, physicsClientId=physicsClientId)
196 | parentInertiaPos = dynParent[3]
197 | parentInertiaOrn = dynParent[4]
198 |
199 | pos, orn = p.multiplyTransforms(parentInertiaPos, parentInertiaOrn, tmpPosInv, tmpOrnInv)
200 | pos, orn_unused = p.multiplyTransforms(parentInertiaPos, parentInertiaOrn,
201 | parentCom2JointPos, [0, 0, 0, 1])
202 |
203 | urdfJoint.joint_origin_xyz = pos
204 | urdfJoint.joint_origin_rpy = p.getEulerFromQuaternion(orn)
205 |
206 | self.urdfJoints.append(urdfJoint)
207 |
208 | def writeInertial(self, file, urdfInertial, precision=5):
209 | file.write("\t\t\n")
210 | str = '\t\t\t\n'.format(\
211 | urdfInertial.origin_rpy[0],urdfInertial.origin_rpy[1],urdfInertial.origin_rpy[2],\
212 | urdfInertial.origin_xyz[0],urdfInertial.origin_xyz[1],urdfInertial.origin_xyz[2], prec=precision)
213 | file.write(str)
214 | str = '\t\t\t\n'.format(urdfInertial.mass, prec=precision)
215 | file.write(str)
216 | str = '\t\t\t\n'.format(\
217 | urdfInertial.inertia_xxyyzz[0],\
218 | urdfInertial.inertia_xxyyzz[1],\
219 | urdfInertial.inertia_xxyyzz[2],prec=precision)
220 | file.write(str)
221 | file.write("\t\t\n")
222 |
223 | def writeVisualShape(self, file, urdfVisual, precision=5):
224 | #we don't support loading capsule types from visuals, so auto-convert from collision shape
225 | if urdfVisual.geom_type == p.GEOM_CAPSULE:
226 | return
227 |
228 | file.write("\t\t\n")
229 | str = '\t\t\t\n'.format(\
230 | urdfVisual.origin_rpy[0],urdfVisual.origin_rpy[1],urdfVisual.origin_rpy[2],
231 | urdfVisual.origin_xyz[0],urdfVisual.origin_xyz[1],urdfVisual.origin_xyz[2], prec=precision)
232 | file.write(str)
233 | file.write("\t\t\t\n")
234 | if urdfVisual.geom_type == p.GEOM_BOX:
235 | str = '\t\t\t\t\n'.format(urdfVisual.geom_extents[0],\
236 | urdfVisual.geom_extents[1],urdfVisual.geom_extents[2], prec=precision)
237 | file.write(str)
238 | if urdfVisual.geom_type == p.GEOM_SPHERE:
239 | str = '\t\t\t\t\n'.format(urdfVisual.geom_radius,\
240 | prec=precision)
241 | file.write(str)
242 | if urdfVisual.geom_type == p.GEOM_MESH:
243 |
244 | str = '\t\t\t\t\n'.format(
245 | urdfVisual.geom_meshfilename,urdfVisual.geom_meshscale[0],urdfVisual.geom_meshscale[1],urdfVisual.geom_meshscale[2],\
246 | prec=precision)
247 | file.write(str)
248 | if urdfVisual.geom_type == p.GEOM_CYLINDER:
249 | str = '\t\t\t\t\n'.format(\
250 | urdfVisual.geom_length, urdfVisual.geom_radius, prec=precision)
251 | file.write(str)
252 | if urdfVisual.geom_type == p.GEOM_CAPSULE:
253 | str = '\t\t\t\t\n'.format(\
254 | urdfVisual.geom_length, urdfVisual.geom_radius, prec=precision)
255 | file.write(str)
256 |
257 | file.write("\t\t\t\n")
258 | str = '\t\t\t\n'.format(urdfVisual.material_name)
259 | file.write(str)
260 | str = '\t\t\t\t\n'.format(urdfVisual.material_rgba[0],\
261 | urdfVisual.material_rgba[1],urdfVisual.material_rgba[2],urdfVisual.material_rgba[3],prec=precision)
262 | file.write(str)
263 | file.write("\t\t\t\n")
264 | file.write("\t\t\n")
265 |
266 | def writeCollisionShape(self, file, urdfCollision, precision=5):
267 | file.write("\t\t\n")
268 | str = '\t\t\t\n'.format(\
269 | urdfCollision.origin_rpy[0],urdfCollision.origin_rpy[1],urdfCollision.origin_rpy[2],
270 | urdfCollision.origin_xyz[0],urdfCollision.origin_xyz[1],urdfCollision.origin_xyz[2], prec=precision)
271 | file.write(str)
272 | file.write("\t\t\t\n")
273 | if urdfCollision.geom_type == p.GEOM_BOX:
274 | str = '\t\t\t\t\n'.format(urdfCollision.geom_extents[0],\
275 | urdfCollision.geom_extents[1],urdfCollision.geom_extents[2], prec=precision)
276 | file.write(str)
277 | if urdfCollision.geom_type == p.GEOM_SPHERE:
278 | str = '\t\t\t\t\n'.format(urdfCollision.geom_radius,\
279 | prec=precision)
280 | file.write(str)
281 | if urdfCollision.geom_type == p.GEOM_MESH:
282 | str = '\t\t\t\t\n'.format(urdfCollision.geom_meshfilename,\
283 | urdfCollision.geom_meshscale[0],urdfCollision.geom_meshscale[1],urdfCollision.geom_meshscale[2],prec=precision)
284 | file.write(str)
285 | if urdfCollision.geom_type == p.GEOM_CYLINDER:
286 | str = '\t\t\t\t\n'.format(\
287 | urdfCollision.geom_length, urdfCollision.geom_radius, prec=precision)
288 | file.write(str)
289 | if urdfCollision.geom_type == p.GEOM_CAPSULE:
290 | str = '\t\t\t\t\n'.format(\
291 | urdfCollision.geom_length, urdfCollision.geom_radius, prec=precision)
292 | file.write(str)
293 | file.write("\t\t\t\n")
294 | file.write("\t\t\n")
295 |
296 | def writeLink(self, file, urdfLink, saveVisuals):
297 | file.write("\t\n")
300 |
301 | self.writeInertial(file, urdfLink.urdf_inertial)
302 | hasCapsules = False
303 | for v in urdfLink.urdf_visual_shapes:
304 | if (v.geom_type == p.GEOM_CAPSULE):
305 | hasCapsules = True
306 | if (saveVisuals and not hasCapsules):
307 | for v in urdfLink.urdf_visual_shapes:
308 | self.writeVisualShape(file, v)
309 | for c in urdfLink.urdf_collision_shapes:
310 | self.writeCollisionShape(file, c)
311 | file.write("\t\n")
312 |
313 | def writeJoint(self, file, urdfJoint, precision=5):
314 | jointTypeStr = "invalid"
315 | if urdfJoint.joint_type == p.JOINT_REVOLUTE:
316 | if urdfJoint.joint_upper_limit < urdfJoint.joint_lower_limit:
317 | jointTypeStr = "continuous"
318 | else:
319 | jointTypeStr = "revolute"
320 | if urdfJoint.joint_type == p.JOINT_FIXED:
321 | jointTypeStr = "fixed"
322 | if urdfJoint.joint_type == p.JOINT_PRISMATIC:
323 | jointTypeStr = "prismatic"
324 | str = '\t\n'.format(urdfJoint.joint_name, jointTypeStr)
325 | file.write(str)
326 | str = '\t\t\n'.format(urdfJoint.parent_name)
327 | file.write(str)
328 | str = '\t\t\n'.format(urdfJoint.child_name)
329 | file.write(str)
330 |
331 | if urdfJoint.joint_type == p.JOINT_PRISMATIC or urdfJoint.joint_type == p.JOINT_REVOLUTE:
332 | lowerLimit = urdfJoint.joint_lower_limit
333 | upperLimit = urdfJoint.joint_upper_limit
334 | str = '\t\t\n'.format(
335 | lowerLimit, upperLimit, prec=precision)
336 | file.write(str)
337 |
338 | file.write("\t\t\n")
339 | str = '\t\t\n'.format(urdfJoint.joint_origin_xyz[0],\
340 | urdfJoint.joint_origin_xyz[1],urdfJoint.joint_origin_xyz[2], prec=precision)
341 | str = '\t\t\n'.format(urdfJoint.joint_origin_rpy[0],\
342 | urdfJoint.joint_origin_rpy[1],urdfJoint.joint_origin_rpy[2],\
343 | urdfJoint.joint_origin_xyz[0],\
344 | urdfJoint.joint_origin_xyz[1],urdfJoint.joint_origin_xyz[2], prec=precision)
345 |
346 | file.write(str)
347 | str = '\t\t\n'.format(urdfJoint.joint_axis_xyz[0],\
348 | urdfJoint.joint_axis_xyz[1],urdfJoint.joint_axis_xyz[2], prec=precision)
349 | file.write(str)
350 | file.write("\t\n")
351 |
352 | def saveUrdf(self, fileName, saveVisuals=True):
353 | file = open(fileName, "w")
354 | file.write("\n")
355 | file.write("\n")
358 |
359 | for link in self.urdfLinks:
360 | self.writeLink(file, link, saveVisuals)
361 |
362 | for joint in self.urdfJoints:
363 | self.writeJoint(file, joint)
364 |
365 | file.write("\n")
366 | file.close()
367 |
368 |
369 | #def addLink(...)
370 |
371 | def joinUrdf(self,
372 | childEditor,
373 | parentLinkIndex=0,
374 | jointPivotXYZInParent=[0, 0, 0],
375 | jointPivotRPYInParent=[0, 0, 0],
376 | jointPivotXYZInChild=[0, 0, 0],
377 | jointPivotRPYInChild=[0, 0, 0],
378 | parentPhysicsClientId=0,
379 | childPhysicsClientId=0):
380 |
381 | childLinkIndex = len(self.urdfLinks)
382 | insertJointIndex = len(self.urdfJoints)
383 |
384 | #combine all links, and add a joint
385 |
386 | for link in childEditor.urdfLinks:
387 | self.linkNameToIndex[link.link_name] = len(self.urdfLinks)
388 | self.urdfLinks.append(link)
389 | for joint in childEditor.urdfJoints:
390 | self.urdfJoints.append(joint)
391 | #add a new joint between a particular
392 |
393 | jointPivotQuatInChild = p.getQuaternionFromEuler(jointPivotRPYInChild)
394 | invJointPivotXYZInChild, invJointPivotQuatInChild = p.invertTransform(
395 | jointPivotXYZInChild, jointPivotQuatInChild)
396 |
397 | #apply this invJointPivot***InChild to all inertial, visual and collision element in the child link
398 | #inertial
399 | pos, orn = p.multiplyTransforms(self.urdfLinks[childLinkIndex].urdf_inertial.origin_xyz,
400 | p.getQuaternionFromEuler(
401 | self.urdfLinks[childLinkIndex].urdf_inertial.origin_rpy),
402 | invJointPivotXYZInChild,
403 | invJointPivotQuatInChild,
404 | physicsClientId=parentPhysicsClientId)
405 | self.urdfLinks[childLinkIndex].urdf_inertial.origin_xyz = pos
406 | self.urdfLinks[childLinkIndex].urdf_inertial.origin_rpy = p.getEulerFromQuaternion(orn)
407 | #all visual
408 | for v in self.urdfLinks[childLinkIndex].urdf_visual_shapes:
409 | pos, orn = p.multiplyTransforms(v.origin_xyz, p.getQuaternionFromEuler(v.origin_rpy),
410 | invJointPivotXYZInChild, invJointPivotQuatInChild)
411 | v.origin_xyz = pos
412 | v.origin_rpy = p.getEulerFromQuaternion(orn)
413 | #all collision
414 | for c in self.urdfLinks[childLinkIndex].urdf_collision_shapes:
415 | pos, orn = p.multiplyTransforms(c.origin_xyz, p.getQuaternionFromEuler(c.origin_rpy),
416 | invJointPivotXYZInChild, invJointPivotQuatInChild)
417 | c.origin_xyz = pos
418 | c.origin_rpy = p.getEulerFromQuaternion(orn)
419 |
420 | childLink = self.urdfLinks[childLinkIndex]
421 | parentLink = self.urdfLinks[parentLinkIndex]
422 |
423 | joint = UrdfJoint()
424 | joint.link = childLink
425 | joint.joint_name = "joint_dummy1"
426 | joint.joint_type = p.JOINT_REVOLUTE
427 | joint.joint_lower_limit = 0
428 | joint.joint_upper_limit = -1
429 | joint.parent_name = parentLink.link_name
430 | joint.child_name = childLink.link_name
431 | joint.joint_origin_xyz = jointPivotXYZInParent
432 | joint.joint_origin_rpy = jointPivotRPYInParent
433 | joint.joint_axis_xyz = [0, 0, 1]
434 |
435 | #the following commented line would crash PyBullet, it messes up the joint indexing/ordering
436 | #self.urdfJoints.append(joint)
437 |
438 | #so make sure to insert the joint in the right place, to links/joints match
439 | self.urdfJoints.insert(insertJointIndex, joint)
440 | return joint
441 |
442 | def createMultiBody(self,
443 | basePosition=[0, 0, 0],
444 | baseOrientation=[0, 0, 0, 1],
445 | physicsClientId=0):
446 | #assume link[0] is base
447 | if (len(self.urdfLinks) == 0):
448 | return -1
449 |
450 | #for i in range (len(self.urdfLinks)):
451 | # print("link", i, "=",self.urdfLinks[i].link_name)
452 |
453 | base = self.urdfLinks[0]
454 |
455 | #v.tmp_collision_shape_ids=[]
456 | baseMass = base.urdf_inertial.mass
457 | baseCollisionShapeIndex = -1
458 | baseShapeTypeArray = []
459 | baseRadiusArray = []
460 | baseHalfExtentsArray = []
461 | lengthsArray = []
462 | fileNameArray = []
463 | meshScaleArray = []
464 | basePositionsArray = []
465 | baseOrientationsArray = []
466 |
467 | for v in base.urdf_collision_shapes:
468 | shapeType = v.geom_type
469 | baseShapeTypeArray.append(shapeType)
470 | baseHalfExtentsArray.append(
471 | [0.5 * v.geom_extents[0], 0.5 * v.geom_extents[1], 0.5 * v.geom_extents[2]])
472 | baseRadiusArray.append(v.geom_radius)
473 | lengthsArray.append(v.geom_length)
474 | fileNameArray.append(v.geom_meshfilename)
475 | meshScaleArray.append(v.geom_meshscale)
476 | basePositionsArray.append(v.origin_xyz)
477 | orn = p.getQuaternionFromEuler(v.origin_rpy)
478 | baseOrientationsArray.append(orn)
479 |
480 | if (len(baseShapeTypeArray)):
481 | #print("fileNameArray=",fileNameArray)
482 | baseCollisionShapeIndex = p.createCollisionShapeArray(
483 | shapeTypes=baseShapeTypeArray,
484 | radii=baseRadiusArray,
485 | halfExtents=baseHalfExtentsArray,
486 | lengths=lengthsArray,
487 | fileNames=fileNameArray,
488 | meshScales=meshScaleArray,
489 | collisionFramePositions=basePositionsArray,
490 | collisionFrameOrientations=baseOrientationsArray,
491 | physicsClientId=physicsClientId)
492 |
493 | urdfVisuals = base.urdf_visual_shapes
494 |
495 | shapeTypes = [v.geom_type for v in urdfVisuals]
496 | halfExtents = [[ext * 0.5 for ext in v.geom_extents] for v in urdfVisuals]
497 | radii = [v.geom_radius for v in urdfVisuals]
498 | lengths = [v.geom_length for v in urdfVisuals]
499 | fileNames = [v.geom_meshfilename for v in urdfVisuals]
500 | meshScales = [v.geom_meshscale for v in urdfVisuals]
501 | rgbaColors = [v.material_rgba for v in urdfVisuals]
502 | visualFramePositions = [v.origin_xyz for v in urdfVisuals]
503 | visualFrameOrientations = [p.getQuaternionFromEuler(v.origin_rpy) for v in urdfVisuals]
504 | baseVisualShapeIndex = -1
505 |
506 | if (len(shapeTypes)):
507 | #print("len(shapeTypes)=",len(shapeTypes))
508 | #print("len(halfExtents)=",len(halfExtents))
509 | #print("len(radii)=",len(radii))
510 | #print("len(lengths)=",len(lengths))
511 | #print("len(fileNames)=",len(fileNames))
512 | #print("len(meshScales)=",len(meshScales))
513 | #print("len(rgbaColors)=",len(rgbaColors))
514 | #print("len(visualFramePositions)=",len(visualFramePositions))
515 | #print("len(visualFrameOrientations)=",len(visualFrameOrientations))
516 |
517 | baseVisualShapeIndex = p.createVisualShapeArray(
518 | shapeTypes=shapeTypes,
519 | halfExtents=halfExtents,
520 | radii=radii,
521 | lengths=lengths,
522 | fileNames=fileNames,
523 | meshScales=meshScales,
524 | rgbaColors=rgbaColors,
525 | visualFramePositions=visualFramePositions,
526 | visualFrameOrientations=visualFrameOrientations,
527 | physicsClientId=physicsClientId)
528 |
529 | linkMasses = []
530 | linkCollisionShapeIndices = []
531 | linkVisualShapeIndices = []
532 | linkPositions = []
533 | linkOrientations = []
534 |
535 | linkInertialFramePositions = []
536 | linkInertialFrameOrientations = []
537 | linkParentIndices = []
538 | linkJointTypes = []
539 | linkJointAxis = []
540 |
541 | for joint in self.urdfJoints:
542 | link = joint.link
543 | linkMass = link.urdf_inertial.mass
544 | linkCollisionShapeIndex = -1
545 | linkVisualShapeIndex = -1
546 | linkPosition = [0, 0, 0]
547 | linkOrientation = [0, 0, 0]
548 | linkInertialFramePosition = [0, 0, 0]
549 | linkInertialFrameOrientation = [0, 0, 0]
550 | linkParentIndex = self.linkNameToIndex[joint.parent_name]
551 | linkJointType = joint.joint_type
552 | linkJointAx = joint.joint_axis_xyz
553 | linkShapeTypeArray = []
554 | linkRadiusArray = []
555 | linkHalfExtentsArray = []
556 | lengthsArray = []
557 | fileNameArray = []
558 | linkMeshScaleArray = []
559 | linkPositionsArray = []
560 | linkOrientationsArray = []
561 |
562 | for v in link.urdf_collision_shapes:
563 | shapeType = v.geom_type
564 | linkShapeTypeArray.append(shapeType)
565 | linkHalfExtentsArray.append(
566 | [0.5 * v.geom_extents[0], 0.5 * v.geom_extents[1], 0.5 * v.geom_extents[2]])
567 | linkRadiusArray.append(v.geom_radius)
568 | lengthsArray.append(v.geom_length)
569 | fileNameArray.append(v.geom_meshfilename)
570 | linkMeshScaleArray.append(v.geom_meshscale)
571 | linkPositionsArray.append(v.origin_xyz)
572 | linkOrientationsArray.append(p.getQuaternionFromEuler(v.origin_rpy))
573 |
574 | if (len(linkShapeTypeArray)):
575 | linkCollisionShapeIndex = p.createCollisionShapeArray(
576 | shapeTypes=linkShapeTypeArray,
577 | radii=linkRadiusArray,
578 | halfExtents=linkHalfExtentsArray,
579 | lengths=lengthsArray,
580 | fileNames=fileNameArray,
581 | meshScales=linkMeshScaleArray,
582 | collisionFramePositions=linkPositionsArray,
583 | collisionFrameOrientations=linkOrientationsArray,
584 | physicsClientId=physicsClientId)
585 |
586 | urdfVisuals = link.urdf_visual_shapes
587 | linkVisualShapeIndex = -1
588 | shapeTypes = [v.geom_type for v in urdfVisuals]
589 | halfExtents = [[ext * 0.5 for ext in v.geom_extents] for v in urdfVisuals]
590 | radii = [v.geom_radius for v in urdfVisuals]
591 | lengths = [v.geom_length for v in urdfVisuals]
592 | fileNames = [v.geom_meshfilename for v in urdfVisuals]
593 | meshScales = [v.geom_meshscale for v in urdfVisuals]
594 | rgbaColors = [v.material_rgba for v in urdfVisuals]
595 | visualFramePositions = [v.origin_xyz for v in urdfVisuals]
596 | visualFrameOrientations = [p.getQuaternionFromEuler(v.origin_rpy) for v in urdfVisuals]
597 |
598 | if (len(shapeTypes)):
599 | print("fileNames=", fileNames)
600 |
601 | linkVisualShapeIndex = p.createVisualShapeArray(
602 | shapeTypes=shapeTypes,
603 | halfExtents=halfExtents,
604 | radii=radii,
605 | lengths=lengths,
606 | fileNames=fileNames,
607 | meshScales=meshScales,
608 | rgbaColors=rgbaColors,
609 | visualFramePositions=visualFramePositions,
610 | visualFrameOrientations=visualFrameOrientations,
611 | physicsClientId=physicsClientId)
612 |
613 | linkMasses.append(linkMass)
614 | linkCollisionShapeIndices.append(linkCollisionShapeIndex)
615 | linkVisualShapeIndices.append(linkVisualShapeIndex)
616 | linkPositions.append(joint.joint_origin_xyz)
617 | linkOrientations.append(p.getQuaternionFromEuler(joint.joint_origin_rpy))
618 | linkInertialFramePositions.append(link.urdf_inertial.origin_xyz)
619 | linkInertialFrameOrientations.append(p.getQuaternionFromEuler(link.urdf_inertial.origin_rpy))
620 | linkParentIndices.append(linkParentIndex)
621 | linkJointTypes.append(joint.joint_type)
622 | linkJointAxis.append(joint.joint_axis_xyz)
623 | obUid = p.createMultiBody(baseMass,\
624 | baseCollisionShapeIndex=baseCollisionShapeIndex,
625 | baseVisualShapeIndex=baseVisualShapeIndex,
626 | basePosition=basePosition,
627 | baseOrientation=baseOrientation,
628 | baseInertialFramePosition=base.urdf_inertial.origin_xyz,
629 | baseInertialFrameOrientation=p.getQuaternionFromEuler(base.urdf_inertial.origin_rpy),
630 | linkMasses=linkMasses,
631 | linkCollisionShapeIndices=linkCollisionShapeIndices,
632 | linkVisualShapeIndices=linkVisualShapeIndices,
633 | linkPositions=linkPositions,
634 | linkOrientations=linkOrientations,
635 | linkInertialFramePositions=linkInertialFramePositions,
636 | linkInertialFrameOrientations=linkInertialFrameOrientations,
637 | linkParentIndices=linkParentIndices,
638 | linkJointTypes=linkJointTypes,
639 | linkJointAxis=linkJointAxis,
640 | physicsClientId=physicsClientId)
641 | return obUid
642 |
643 | def __del__(self):
644 | pass
645 |
--------------------------------------------------------------------------------
/src/s2u/utils/visual.py:
--------------------------------------------------------------------------------
1 | import os
2 | # os.environ["PYOPENGL_PLATFORM"] = "egl"
3 | import subprocess
4 | import trimesh
5 | try:
6 | import pyrender
7 | except:
8 | pass
9 | import numpy as np
10 | from PIL import Image
11 | import matplotlib.pylab as plt
12 | from mpl_toolkits.mplot3d import Axes3D
13 | from scipy.spatial.transform import Rotation as R
14 |
15 |
16 | from s2u.utils.transform import Transform, Rotation
17 |
18 | def vector_to_rotation(vector):
19 | z = np.array(vector)
20 | z = z / np.linalg.norm(z)
21 | x = np.array([1, 0, 0])
22 | x = x - z*(x.dot(z)/z.dot(z))
23 | x = x / np.linalg.norm(x)
24 | y = np.cross(z, x)
25 | return np.c_[x, y, z]
26 |
27 | #########
28 | # Render
29 | #########
30 | def get_camera_pose(radius, center=np.zeros(3), ax=0, ay=0, az=0):
31 | rotation = R.from_euler('xyz', (ax, ay, az)).as_matrix()
32 | vec = np.array([0, 0, radius])
33 | translation = rotation.dot(vec) + center
34 | camera_pose = np.zeros((4, 4))
35 | camera_pose[3, 3] = 1
36 | camera_pose[:3, :3] = rotation
37 | camera_pose[:3, 3] = translation
38 | return camera_pose
39 |
40 | def render_mesh(mesh, camera, light, camera_pose, light_pose, renderer):
41 | r_scene = pyrender.Scene()
42 | o_mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
43 | r_scene.add(o_mesh)
44 | r_scene.add(camera, name='camera', pose=camera_pose)
45 | r_scene.add(light, name='light', pose=light_pose)
46 | color_img, _ = renderer.render(r_scene)
47 | return Image.fromarray(color_img)
48 |
49 | #########
50 | # Plot
51 | #########
52 |
53 | def plot_3d_point_cloud(x,
54 | y,
55 | z,
56 | show=True,
57 | show_axis=True,
58 | in_u_sphere=False,
59 | marker='.',
60 | s=8,
61 | alpha=.8,
62 | figsize=(5, 5),
63 | elev=10,
64 | azim=240,
65 | axis=None,
66 | title=None,
67 | lim=None,
68 | *args,
69 | **kwargs):
70 |
71 | if axis is None:
72 | fig = plt.figure(figsize=figsize)
73 | ax = fig.add_subplot(111, projection='3d')
74 | else:
75 | ax = axis
76 | fig = axis
77 |
78 | if title is not None:
79 | plt.title(title)
80 |
81 | sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, *args, **kwargs)
82 | ax.view_init(elev=elev, azim=azim)
83 |
84 | if lim:
85 | ax.set_xlim3d(*lim[0])
86 | ax.set_ylim3d(*lim[1])
87 | ax.set_zlim3d(*lim[2])
88 | elif in_u_sphere:
89 | ax.set_xlim3d(-0.5, 0.5)
90 | ax.set_ylim3d(-0.5, 0.5)
91 | ax.set_zlim3d(-0.5, 0.5)
92 | else:
93 | lim = (min(np.min(x), np.min(y),
94 | np.min(z)), max(np.max(x), np.max(y), np.max(z)))
95 | ax.set_xlim(1.3 * lim[0], 1.3 * lim[1])
96 | ax.set_ylim(1.3 * lim[0], 1.3 * lim[1])
97 | ax.set_zlim(1.3 * lim[0], 1.3 * lim[1])
98 | plt.tight_layout()
99 |
100 | if not show_axis:
101 | plt.axis('off')
102 |
103 | if show:
104 | plt.show()
105 |
106 | return fig
107 |
108 |
109 | def plot_3d_point_cloud_dict(name_dict, lim, size=2):
110 | num_plots = len(name_dict)
111 | fig = plt.figure(figsize=(size * num_plots, size))
112 | ax = {}
113 | for i, (k, v) in enumerate(name_dict.items()):
114 | ax[k] = fig.add_subplot(1, num_plots, i + 1, projection='3d')
115 | plot_3d_point_cloud(v[0], v[1], v[2], axis=ax[k], show=False, lim=lim)
116 | ax[k].set_title(k)
117 | plt.tight_layout()
118 | return fig
119 |
120 |
121 | def visualize_pc_screw(pc_start, pc_end, screw_axis, screw_moment):
122 | bound_max = np.maximum(pc_start.max(0), pc_end.max(0))
123 | bound_min = np.maximum(pc_start.min(0), pc_end.min(0))
124 |
125 | screw_point = np.cross(screw_axis, screw_moment)
126 | t_min = (bound_min - screw_point) / screw_axis
127 | t_max = (bound_max - screw_point) / screw_axis
128 | axis_index = np.argmin(np.abs(t_max - t_min))
129 | start_point = screw_point + screw_axis * t_min[axis_index]
130 | end_point = screw_point + screw_axis * t_max[axis_index]
131 | points = np.stack((start_point, end_point), axis=1)
132 |
133 | lim = [(bound_min.min(), bound_max.max())] * 3
134 |
135 | fig = plt.figure()
136 | ax_start = fig.add_subplot(121, projection='3d')
137 | ax_start.plot(*points, color='red')
138 | plot_3d_point_cloud(*pc_start.T, lim=lim, axis=ax_start)
139 |
140 | ax_end = fig.add_subplot(122, projection='3d')
141 | ax_end.plot(*points, color='red')
142 | plot_3d_point_cloud(*pc_end.T, lim=lim, axis=ax_end)
143 |
144 | return fig
145 |
146 | #########
147 | # Mesh
148 | #########
149 |
150 | def as_mesh(scene_or_mesh):
151 | """
152 | Convert a possible scene to a mesh.
153 | The returned mesh has only vertex and face data.
154 | """
155 | if isinstance(scene_or_mesh, trimesh.Scene):
156 | if len(scene_or_mesh.geometry) == 0:
157 | mesh = None # empty scene
158 | else:
159 | # we lose texture information here
160 | mesh = trimesh.util.concatenate(
161 | tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
162 | for g in scene_or_mesh.geometry.values()))
163 | else:
164 | assert(isinstance(scene_or_mesh, trimesh.Trimesh))
165 | mesh = scene_or_mesh
166 | return mesh
167 |
--------------------------------------------------------------------------------