├── srv
├── Seed.srv
└── Reset.srv
├── requirements.txt
├── msg
└── AABBox.msg
├── cfg
├── hw
│ ├── scene01.yaml
│ ├── scene02.yaml
│ ├── scene03.yaml
│ ├── scene04.yaml
│ ├── T_base_tag.txt
│ └── apriltag.yaml
├── sim
│ └── mustard.yaml
├── active_grasp.yaml
└── active_grasp.rviz
├── src
└── active_grasp
│ ├── __init__.py
│ ├── timer.py
│ ├── bbox.py
│ ├── baselines.py
│ ├── rviz.py
│ ├── nbv.py
│ ├── policy.py
│ ├── simulation.py
│ └── controller.py
├── test
├── test_sim_scene.py
└── test_clustering.py
├── setup.py
├── scripts
├── calibrate_roi.py
├── run.py
├── hw_node.py
├── print_stats.ipynb
└── bt_sim_node.py
├── CMakeLists.txt
├── launch
├── apriltag.launch
├── env.launch
└── hw.launch
├── package.xml
├── LICENSE
├── README.md
└── .gitignore
/srv/Seed.srv:
--------------------------------------------------------------------------------
1 | uint32 seed
2 | ---
3 |
--------------------------------------------------------------------------------
/srv/Reset.srv:
--------------------------------------------------------------------------------
1 | ---
2 | active_grasp/AABBox bbox
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numba
2 | pybullet==2.7.9
3 | trimesh
4 |
--------------------------------------------------------------------------------
/msg/AABBox.msg:
--------------------------------------------------------------------------------
1 | geometry_msgs/Point min
2 | geometry_msgs/Point max
3 |
--------------------------------------------------------------------------------
/cfg/hw/scene01.yaml:
--------------------------------------------------------------------------------
1 | target:
2 | min: [0.18, 0.04, 0.0]
3 | max: [0.28, 0.24, 0.09]
4 | q0: [0.013, -1.38, -0.021, -2.15, -0.05, 1.27, 0.75]
5 |
--------------------------------------------------------------------------------
/cfg/hw/scene02.yaml:
--------------------------------------------------------------------------------
1 | target:
2 | min: [0.18, 0.04, 0.0]
3 | max: [0.28, 0.24, 0.09]
4 | q0: [0.56, -1.40, 0.70, -2.17, -0.14, 1.26, 1.64]
5 |
--------------------------------------------------------------------------------
/cfg/hw/scene03.yaml:
--------------------------------------------------------------------------------
1 | target:
2 | min: [0.08, 0.13, 0.0]
3 | max: [0.24, 0.23, 0.14]
4 | q0: [-0.083, -1.172, -0.067, -1.847, -0.029, 1.128, 0.597]
5 |
--------------------------------------------------------------------------------
/cfg/sim/mustard.yaml:
--------------------------------------------------------------------------------
1 | center: [0.5, 0.2, 0.25]
2 | q: [0.0, -1.39, 0.0, -2.36, 0.0, 1.57, 0.79]
3 | objects:
4 | - object_id: ycb/006_mustard_bottle
5 | xyz: [0.0, 0.0, 0.0]
6 | rpy: [0, 0, -50]
7 | scale: 0.8
8 |
--------------------------------------------------------------------------------
/cfg/hw/scene04.yaml:
--------------------------------------------------------------------------------
1 | target:
2 | min: [0.1, 0.02, 0.0]
3 | max: [0.28, 0.24, 0.08]
4 | q0: [-0.2038460316093344, -1.5706782408931799, 0.08710045849053434, -1.9481923636386265, 0.11544798021816888, 1.1516802040659084, 0.897635449346578]
5 |
--------------------------------------------------------------------------------
/src/active_grasp/__init__.py:
--------------------------------------------------------------------------------
1 | from .policy import register
2 | from .baselines import *
3 | from .nbv import NextBestView
4 |
5 | register("initial-view", InitialView)
6 | register("top-view", TopView)
7 | register("top-trajectory", TopTrajectory)
8 | register("fixed-trajectory", FixedTrajectory)
9 | register("nbv", NextBestView)
10 |
--------------------------------------------------------------------------------
/test/test_sim_scene.py:
--------------------------------------------------------------------------------
1 | from active_grasp.simulation import Simulation
2 |
3 |
4 | def main():
5 | gui = True
6 | scene_id = "random"
7 | vgn_path = "../vgn/assets/models/vgn_conv.pth"
8 | sim = Simulation(gui, scene_id, vgn_path)
9 | while True:
10 | sim.reset()
11 |
12 |
13 | if __name__ == "__main__":
14 | main()
15 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD!
2 |
3 | from distutils.core import setup
4 |
5 | from catkin_pkg.python_setup import generate_distutils_setup
6 |
7 | # Fetch values from package.xml.
8 | setup_args = generate_distutils_setup(
9 | packages=["active_grasp"],
10 | package_dir={"": "src"},
11 | )
12 |
13 | setup(**setup_args)
14 |
--------------------------------------------------------------------------------
/scripts/calibrate_roi.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import numpy as np
4 | import rospy
5 |
6 | from robot_helpers.ros import tf
7 |
8 |
9 | def main():
10 | rospy.init_node("calibrate_roi")
11 | tf.init()
12 | T_base_roi = tf.lookup("panda_link0", "tag_0")
13 | np.savetxt("cfg/hw/T_base_tag.txt", T_base_roi.as_matrix())
14 |
15 |
16 | if __name__ == "__main__":
17 | main()
18 |
--------------------------------------------------------------------------------
/cfg/hw/T_base_tag.txt:
--------------------------------------------------------------------------------
1 | 9.988132416105248712e-01 -4.436147888855850735e-02 -2.010391937646777843e-02 3.888990423782812678e-01
2 | 4.445135767602544780e-02 9.990033571245667821e-01 4.045893548226569235e-03 -1.957674877513267131e-01
3 | 1.990440112722792476e-02 -4.934738561008635525e-03 9.997897094744979674e-01 1.942989171479588273e-01
4 | 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
5 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.1)
2 | project(active_grasp)
3 |
4 | find_package(catkin REQUIRED COMPONENTS
5 | rospy
6 | geometry_msgs
7 | std_msgs
8 | message_generation
9 | )
10 |
11 | catkin_python_setup()
12 |
13 | add_message_files(
14 | FILES
15 | AABBox.msg
16 | )
17 |
18 | add_service_files(
19 | FILES
20 | Reset.srv
21 | Seed.srv
22 | )
23 |
24 | generate_messages(
25 | DEPENDENCIES
26 | geometry_msgs
27 | std_msgs
28 | )
29 |
--------------------------------------------------------------------------------
/cfg/hw/apriltag.yaml:
--------------------------------------------------------------------------------
1 | # AprilTag 3 code parameters
2 | tag_family: 'tag36h11' # options: tagStandard52h13, tagStandard41h12, tag36h11, tag25h9, tag16h5, tagCustom48h12, tagCircle21h7, tagCircle49h12
3 | tag_threads: 2 # default: 2
4 | tag_decimate: 1.0 # default: 1.0
5 | tag_blur: 0.0 # default: 0.0
6 | tag_refine_edges: 1 # default: 1
7 | tag_debug: 0 # default: 0
8 | publish_tf: true # default: false
9 |
10 | # Definitions of tags to detect
11 | standalone_tags: [{id: 0, size: 0.091}]
12 | tag_bundles: []
13 |
--------------------------------------------------------------------------------
/launch/apriltag.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | active_grasp
4 | 0.1.0
5 | TODO
6 | Michel Breyer
7 | TODO
8 |
9 |
10 | catkin
11 |
12 | message_generation
13 |
14 | message_runtime
15 |
16 | panda_moveit_config
17 | geometry_msgs
18 | robot_helpers
19 | rospy
20 | std_msgs
21 | trac_ik
22 | vgn
23 |
24 |
--------------------------------------------------------------------------------
/src/active_grasp/timer.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 |
4 | class Timer:
5 | timers = dict()
6 |
7 | def __init__(self, name):
8 | self.name = name
9 | self.timers.setdefault(name, 0)
10 |
11 | def __enter__(self):
12 | self.start()
13 | return self
14 |
15 | def __exit__(self, *exc_info):
16 | self.stop()
17 |
18 | @classmethod
19 | def reset(cls):
20 | cls.timers = dict()
21 |
22 | def start(self):
23 | self.tic = time.perf_counter()
24 |
25 | def stop(self):
26 | elapsed_time = time.perf_counter() - self.tic
27 | self.timers[self.name] += elapsed_time
28 | # with open(f"{self.name}.txt", "a") as f:
29 | # f.write(f"{elapsed_time}\n")
30 |
--------------------------------------------------------------------------------
/test/test_clustering.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import open3d as o3d
4 |
5 |
6 | def main():
7 | cloud_file = "1636465097.pcd"
8 |
9 | # eps, min_points = 0.02, 10
10 | eps, min_points = 0.01, 8
11 |
12 | cloud = o3d.io.read_point_cloud(cloud_file)
13 |
14 | labels = np.array(cloud.cluster_dbscan(eps=eps, min_points=min_points))
15 | max_label = labels.max()
16 | print(f"point cloud has {max_label + 1} clusters")
17 | colors = plt.get_cmap("tab20")(labels / (max_label if max_label > 0 else 1))
18 | colors[labels < 0] = 0
19 | cloud.colors = o3d.utility.Vector3dVector(colors[:, :3])
20 | o3d.visualization.draw_geometries([cloud])
21 |
22 |
23 | if __name__ == "__main__":
24 | main()
25 |
--------------------------------------------------------------------------------
/cfg/active_grasp.yaml:
--------------------------------------------------------------------------------
1 | bt_sim:
2 | gui: False
3 | gripper_force: 10
4 | scene: random
5 |
6 | hw:
7 | roi_calib_file: $(find active_grasp)/cfg/hw/T_base_tag.txt
8 | scene_file: $(find active_grasp)/cfg/hw/scene01.yaml
9 |
10 | grasp_controller:
11 | base_frame_id: panda_link0
12 | ee_grasp_offset: [0.0, 0.0, -0.383, 0.924, 0.0, 0.0, 0.065] # offset to panda_link8
13 | control_rate: 30
14 | linear_vel: 0.05
15 | camera:
16 | frame_id: camera_depth_optical_frame
17 | info_topic: /camera/depth/camera_info
18 | depth_topic: /camera/depth/image_rect_raw
19 | min_z_dist: 0.3
20 |
21 | cartesian_velocity_controller:
22 | topic: /cartesian_velocity_controller/set_command
23 |
24 | vgn:
25 | model: $(find vgn)/assets/models/vgn_conv.pth
26 | finger_depth: 0.05
27 | qual_threshold: 0.9
28 |
29 | policy:
30 | rate: 4
31 | window_size: 12
32 |
33 | nbv_grasp:
34 | max_views: 80
35 | min_gain: 10
36 | downsample: 10 # 10/20 for sim/hw respectively
37 |
--------------------------------------------------------------------------------
/src/active_grasp/bbox.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import numpy as np
3 |
4 | import active_grasp.msg
5 | from robot_helpers.ros.conversions import to_point_msg, from_point_msg
6 |
7 |
8 | class AABBox:
9 | def __init__(self, bbox_min, bbox_max):
10 | self.min = np.asarray(bbox_min)
11 | self.max = np.asarray(bbox_max)
12 | self.center = 0.5 * (self.min + self.max)
13 | self.size = self.max - self.min
14 |
15 | @property
16 | def corners(self):
17 | return list(itertools.product(*np.vstack((self.min, self.max)).T))
18 |
19 | def is_inside(self, p):
20 | return np.all(p > self.min) and np.all(p < self.max)
21 |
22 |
23 | def from_bbox_msg(msg):
24 | aabb_min = from_point_msg(msg.min)
25 | aabb_max = from_point_msg(msg.max)
26 | return AABBox(aabb_min, aabb_max)
27 |
28 |
29 | def to_bbox_msg(bbox):
30 | msg = active_grasp.msg.AABBox()
31 | msg.min = to_point_msg(bbox.min)
32 | msg.max = to_point_msg(bbox.max)
33 | return msg
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Michel Breyer, Autonomous Systems Lab, ETH Zurich
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/src/active_grasp/baselines.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from .policy import SingleViewPolicy, MultiViewPolicy, compute_error
4 |
5 |
6 | class InitialView(SingleViewPolicy):
7 | def update(self, img, x, q):
8 | self.x_d = x
9 | super().update(img, x, q)
10 |
11 |
12 | class TopView(SingleViewPolicy):
13 | def activate(self, bbox, view_sphere):
14 | super().activate(bbox, view_sphere)
15 | self.x_d = self.view_sphere.get_view(0.0, 0.0)
16 | self.done = False if self.solve_cam_ik(self.q0, self.x_d) else True
17 |
18 |
19 | class TopTrajectory(MultiViewPolicy):
20 | def activate(self, bbox, view_sphere):
21 | super().activate(bbox, view_sphere)
22 | self.x_d = self.view_sphere.get_view(0.0, 0.0)
23 | self.done = False if self.solve_cam_ik(self.q0, self.x_d) else True
24 |
25 | def update(self, img, x, q):
26 | self.integrate(img, x, q)
27 | linear, _ = compute_error(self.x_d, x)
28 | if np.linalg.norm(linear) < 0.02:
29 | self.done = True
30 |
31 |
32 | class FixedTrajectory(MultiViewPolicy):
33 | def activate(self, bbox, view_sphere):
34 | pass
35 |
36 | def update(self, img, x, q):
37 | pass
38 |
--------------------------------------------------------------------------------
/launch/env.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Closed-Loop Next-Best-View Planning for Target-Driven Grasping
2 |
3 | This repository contains the implementation of our IROS 2022 submission, _"Closed-Loop Next-Best-View Planning for Target-Driven Grasping"_. [[Paper](http://arxiv.org/abs/2207.10543)][[Video](https://youtu.be/67W_VbSsAMQ)]
4 |
5 | ## Setup
6 |
7 | The experiments were conducted with a Franka Emika Panda arm and a Realsense D435 attached to the wrist of the robot. The code was developed and tested on Ubuntu 20.04 with ROS Noetic. It depends on the following external packages:
8 |
9 | - [MoveIt](https://github.com/ros-planning/panda_moveit_config)
10 | - [robot_helpers](https://github.com/mbreyer/robot_helpers)
11 | - [TRAC-IK](http://wiki.ros.org/trac_ik)
12 | - [VGN](https://github.com/ethz-asl/vgn/tree/devel)
13 | - franka_ros and realsense2_camera (only required for hardware experiments)
14 |
15 | Additional Python dependencies can be installed with
16 |
17 | ```
18 | pip install -r requirements.txt
19 | ```
20 |
21 | Run `catkin build active_grasp` to build the package.
22 |
23 | Finally, download the [assets folder](https://drive.google.com/file/d/1xJF9Cd82ybCH3nCdXtQRktTr4swDcNFD/view) and extract it inside the repository.
24 |
25 | ## Experiments
26 |
27 | Start a roscore.
28 |
29 | ```
30 | roscore
31 | ```
32 |
33 | To run simulation experiments.
34 |
35 | ```
36 | roslaunch active_grasp env.launch sim:=true
37 | python3 scripts/run.py nbv
38 | ```
39 |
40 | To run real-world experiments.
41 |
42 | ```
43 | roslaunch active_grasp hw.launch
44 | roslaunch active_grasp env.launch sim:=false
45 | python3 scripts/run.py nbv --wait-for-input
46 | ```
47 |
--------------------------------------------------------------------------------
/launch/hw.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 | [franka_state_controller/joint_states, franka_gripper/joint_states]
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/scripts/run.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | from datetime import datetime
5 | import pandas as pd
6 | from pathlib import Path
7 | import rospy
8 | from tqdm import tqdm
9 |
10 | from active_grasp.controller import *
11 | from active_grasp.policy import make, registry
12 | from active_grasp.srv import Seed
13 | from robot_helpers.ros import tf
14 |
15 |
16 | def main():
17 | rospy.init_node("grasp_controller")
18 | tf.init()
19 |
20 | parser = create_parser()
21 | args = parser.parse_args()
22 |
23 | policy = make(args.policy)
24 | controller = GraspController(policy)
25 | logger = Logger(args)
26 |
27 | seed_simulation(args.seed)
28 | rospy.sleep(1.0) # Prevents a rare race condiion
29 |
30 | for _ in tqdm(range(args.runs), disable=args.wait_for_input):
31 | if args.wait_for_input:
32 | controller.gripper.move(0.08)
33 | controller.switch_to_joint_trajectory_control()
34 | controller.moveit.goto("ready", velocity_scaling=0.4)
35 | i = input("Run policy? [y/n] ")
36 | if i != "y":
37 | exit()
38 | rospy.loginfo("Running policy ...")
39 | info = controller.run()
40 | logger.log_run(info)
41 |
42 |
43 | def create_parser():
44 | parser = argparse.ArgumentParser()
45 | parser.add_argument("policy", type=str, choices=registry.keys())
46 | parser.add_argument("--runs", type=int, default=10)
47 | parser.add_argument("--wait-for-input", action="store_true")
48 | parser.add_argument("--logdir", type=Path, default="logs")
49 | parser.add_argument("--seed", type=int, default=1)
50 | return parser
51 |
52 |
53 | class Logger:
54 | def __init__(self, args):
55 | args.logdir.mkdir(parents=True, exist_ok=True)
56 | stamp = datetime.now().strftime("%y%m%d-%H%M%S")
57 | name = "{}_policy={},seed={}.csv".format(
58 | stamp,
59 | args.policy,
60 | args.seed,
61 | )
62 | self.path = args.logdir / name
63 |
64 | def log_run(self, info):
65 | df = pd.DataFrame.from_records([info])
66 | df.to_csv(self.path, mode="a", header=not self.path.exists(), index=False)
67 |
68 |
69 | def seed_simulation(seed):
70 | rospy.ServiceProxy("seed", Seed)(seed)
71 | rospy.sleep(1.0)
72 |
73 |
74 | if __name__ == "__main__":
75 | main()
76 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | .vscode/
132 | assets/
133 | logs/
134 |
--------------------------------------------------------------------------------
/scripts/hw_node.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from controller_manager_msgs.srv import *
4 | import geometry_msgs.msg
5 | import numpy as np
6 | import rospy
7 |
8 | from active_grasp.bbox import AABBox, to_bbox_msg
9 | from active_grasp.rviz import Visualizer
10 | from active_grasp.srv import *
11 | from robot_helpers.io import load_yaml
12 | from robot_helpers.ros.conversions import to_pose_msg
13 | from robot_helpers.ros.moveit import MoveItClient
14 | from robot_helpers.ros.panda import PandaGripperClient
15 | from robot_helpers.spatial import Transform
16 |
17 |
18 | class HwNode:
19 | def __init__(self):
20 | self.load_parameters()
21 | self.init_robot_connection()
22 | self.init_visualizer()
23 | self.advertise_services()
24 | rospy.spin()
25 |
26 | def load_parameters(self):
27 | self.cfg = rospy.get_param("hw")
28 | self.T_base_roi = Transform.from_matrix(np.loadtxt(self.cfg["roi_calib_file"]))
29 |
30 | def init_robot_connection(self):
31 | self.gripper = PandaGripperClient()
32 | self.switch_controller = rospy.ServiceProxy(
33 | "controller_manager/switch_controller", SwitchController
34 | )
35 | self.moveit = MoveItClient("panda_arm")
36 | rospy.Timer(rospy.Duration(1), self.publish_table_co)
37 |
38 | def init_visualizer(self):
39 | self.vis = Visualizer()
40 | rospy.Timer(rospy.Duration(1), self.draw_bbox)
41 |
42 | def advertise_services(self):
43 | rospy.Service("seed", Seed, self.seed)
44 | rospy.Service("reset", Reset, self.reset)
45 |
46 | def seed(self, req):
47 | self.rng = np.random.default_rng(req.seed)
48 | rospy.loginfo(f"Seeded the rng with {req.seed}.")
49 | return SeedResponse()
50 |
51 | def reset(self, req):
52 | q0, bbox = self.load_config()
53 |
54 | # Move to the initial configuration
55 | self.switch_to_joint_trajectory_controller()
56 | q0 += self.rng.uniform(-0.069, 0.069, 7)
57 | self.moveit.goto(q0, velocity_scaling=0.4)
58 | self.gripper.move(0.08)
59 |
60 | return ResetResponse(to_bbox_msg(bbox))
61 |
62 | def load_config(self):
63 | scene_config = load_yaml(self.cfg["scene_file"])
64 | q0 = scene_config["q0"]
65 | bbox_min = self.T_base_roi.apply(scene_config["target"]["min"])
66 | bbox_max = self.T_base_roi.apply(scene_config["target"]["max"])
67 | bbox = AABBox(bbox_min, bbox_max)
68 | return q0, bbox
69 |
70 | def switch_to_joint_trajectory_controller(self):
71 | req = SwitchControllerRequest()
72 | req.start_controllers = ["position_joint_trajectory_controller"]
73 | req.stop_controllers = ["cartesian_velocity_controller"]
74 | req.strictness = 1
75 | self.switch_controller(req)
76 |
77 | def draw_bbox(self, event):
78 | _, bbox = self.load_config()
79 | self.vis.bbox("panda_link0", bbox)
80 |
81 | def publish_table_co(self, event):
82 | msg = geometry_msgs.msg.PoseStamped()
83 | msg.header.frame_id = "panda_link0"
84 | msg.pose = to_pose_msg(self.T_base_roi * Transform.t_[0.15, 0.15, 0.005])
85 | self.moveit.scene.add_box("table", msg, size=(0.8, 0.8, 0.01))
86 |
87 |
88 | def main():
89 | rospy.init_node("hw")
90 | HwNode()
91 |
92 |
93 | if __name__ == "__main__":
94 | main()
95 |
--------------------------------------------------------------------------------
/scripts/print_stats.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import pandas as pd"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "Generate table entry of a single experiment."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "metadata": {},
23 | "outputs": [],
24 | "source": [
25 | "logfile = \"\"\n",
26 | "\n",
27 | "df = pd.read_csv(logfile)\n",
28 | "df = df[df.result != \"no_motion_plan_found\"]\n",
29 | "\n",
30 | "n_attempts = len(df.index)\n",
31 | "n_succeeded = (df.result == \"succeeded\").sum()\n",
32 | "n_failed = (df.result == \"failed\").sum()\n",
33 | "n_aborted = (df.result == \"aborted\").sum()\n",
34 | "\n",
35 | "views_mean = df.view_count.mean()\n",
36 | "views_std = df.view_count.std()\n",
37 | "\n",
38 | "search_time_mean = df.search_time.mean()\n",
39 | "search_time_std = df.search_time.std()\n",
40 | "\n",
41 | "total_time_mean = (df.search_time + df.grasp_time).mean()\n",
42 | "total_time_std = (df.search_time + df.grasp_time).std()\n",
43 | "\n",
44 | "print(f\"${(n_succeeded / n_attempts) * 100:.0f}$ & ${(n_failed / n_attempts) * 100:.0f}$ & ${(n_aborted / n_attempts) * 100:.0f}$ & ${views_mean:.0f} \\pm {views_std:.0f}$ & ${search_time_mean:.1f} \\pm {search_time_std:.1f}$ & ${total_time_mean:.1f} \\pm {total_time_std:.1f}$\")\n",
45 | "# print(f\"${n_succeeded}/{n_attempts}$ & ${n_failed}/{n_attempts}$ & ${n_aborted}/{n_attempts}$ & ${views_mean:.0f} \\pm {views_std:.0f}$ & ${search_time_mean:.1f} \\pm {search_time_std:.1f}$ & ${total_time_mean:.1f} \\pm {total_time_std:.1f}$\")"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "metadata": {},
51 | "source": [
52 | "Compute timings"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "view_generation = df.view_generation / df.view_count\n",
62 | "state_update = df.state_update / df.view_count\n",
63 | "tsdf_update = df.tsdf_integration / df.view_count\n",
64 | "grasp_prediction = df.grasp_prediction / df.view_count\n",
65 | "grasp_selection = df.grasp_selection / df.view_count\n",
66 | "ig_computation = df.ig_computation / df.view_count\n",
67 | "\n",
68 | "print(f\"View generation: {view_generation.mean():.3f}\")\n",
69 | "print(f\"State update: {state_update.mean():.3f}\")\n",
70 | "print(f\" TSDF update: {tsdf_update.mean():.3f}\")\n",
71 | "print(f\" Grasp prediction: {grasp_prediction.mean():.3f}\")\n",
72 | "print(f\" Grasp selection: {grasp_selection.mean():.3f}\")\n",
73 | "print(f\"IG computation: {ig_computation.mean():.3f}\")\n",
74 | "print(\"---\")\n",
75 | "print(f\"Total time: {view_generation.mean() + state_update.mean() + ig_computation.mean():.3f}\")"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": []
84 | }
85 | ],
86 | "metadata": {
87 | "interpreter": {
88 | "hash": "fb16c2a7860a3d6783f021a002ede0627d3977ca7b794dfd7ea4f613fe21e5c4"
89 | },
90 | "kernelspec": {
91 | "display_name": "Python 3.8.10 64-bit ('noetic': venv)",
92 | "name": "python3"
93 | },
94 | "language_info": {
95 | "codemirror_mode": {
96 | "name": "ipython",
97 | "version": 3
98 | },
99 | "file_extension": ".py",
100 | "mimetype": "text/x-python",
101 | "name": "python",
102 | "nbconvert_exporter": "python",
103 | "pygments_lexer": "ipython3",
104 | "version": "3.8.10"
105 | },
106 | "orig_nbformat": 4
107 | },
108 | "nbformat": 4,
109 | "nbformat_minor": 2
110 | }
111 |
--------------------------------------------------------------------------------
/src/active_grasp/rviz.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from robot_helpers.ros.rviz import *
4 | from robot_helpers.spatial import Transform
5 | import vgn.rviz
6 | from vgn.utils import *
7 |
8 |
9 | cm = lambda s: tuple([float(1 - s), float(s), float(0)])
10 | red = [1.0, 0.0, 0.0]
11 | blue = [0, 0.6, 1.0]
12 | grey = [0.9, 0.9, 0.9]
13 |
14 |
15 | class Visualizer(vgn.rviz.Visualizer):
16 | def clear_ig_views(self):
17 | markers = [Marker(action=Marker.DELETE, ns="ig_views", id=i) for i in range(24)]
18 | self.draw(markers)
19 |
20 | def bbox(self, frame, bbox):
21 | pose = Transform.identity()
22 | scale = [0.004, 0.0, 0.0]
23 | color = red
24 | lines = box_lines(bbox.min, bbox.max)
25 | marker = create_line_list_marker(frame, pose, scale, color, lines, "bbox")
26 | self.draw([marker])
27 |
28 | def ig_views(self, frame, intrinsic, views, values):
29 | vmin, vmax = min(values), max(values)
30 | scale = [0.002, 0.0, 0.0]
31 | near, far = 0.0, 0.02
32 | markers = []
33 | for i, (view, value) in enumerate(zip(views, values)):
34 | color = cm((value - vmin) / (vmax - vmin))
35 | marker = create_view_marker(
36 | frame,
37 | view,
38 | scale,
39 | color,
40 | intrinsic,
41 | near,
42 | far,
43 | ns="ig_views",
44 | id=i,
45 | )
46 | markers.append(marker)
47 | self.draw(markers)
48 |
49 | def path(self, frame, intrinsic, views):
50 | markers = []
51 | points = [p.translation for p in views]
52 |
53 | spheres = create_sphere_list_marker(
54 | frame,
55 | Transform.identity(),
56 | np.full(3, 0.008),
57 | blue,
58 | points,
59 | "path",
60 | 0,
61 | )
62 | markers.append(spheres)
63 |
64 | if len(views) > 1:
65 | lines = create_line_strip_marker(
66 | frame,
67 | Transform.identity(),
68 | [0.002, 0.0, 0.0],
69 | blue,
70 | points,
71 | "path",
72 | 1,
73 | )
74 | markers.append(lines)
75 |
76 | for i, view in enumerate(views[::4]):
77 | markers.append(
78 | create_view_marker(
79 | frame,
80 | view,
81 | [0.002, 0.0, 0.0],
82 | blue,
83 | intrinsic,
84 | 0.0,
85 | 0.02,
86 | ns="views",
87 | id=i,
88 | )
89 | )
90 |
91 | self.draw(markers)
92 |
93 | def point(self, frame, position):
94 | marker = create_sphere_marker(
95 | frame,
96 | Transform.from_translation(position),
97 | np.full(3, 0.01),
98 | [0, 0, 1],
99 | "point",
100 | )
101 | self.draw([marker])
102 |
103 | def rays(self, frame, origin, directions, t_max=1.0):
104 | lines = [[origin, origin + t_max * direction] for direction in directions]
105 | marker = create_line_list_marker(
106 | frame,
107 | Transform.identity(),
108 | [0.001, 0.0, 0.0],
109 | grey,
110 | lines,
111 | "rays",
112 | )
113 | self.draw([marker])
114 |
115 |
116 | def create_view_marker(frame, pose, scale, color, intrinsic, near, far, ns="", id=0):
117 | marker = create_marker(Marker.LINE_LIST, frame, pose, scale, color, ns, id)
118 | x_n = near * intrinsic.width / (2.0 * intrinsic.fx)
119 | y_n = near * intrinsic.height / (2.0 * intrinsic.fy)
120 | z_n = near
121 | x_f = far * intrinsic.width / (2.0 * intrinsic.fx)
122 | y_f = far * intrinsic.height / (2.0 * intrinsic.fy)
123 | z_f = far
124 | points = [
125 | [x_n, y_n, z_n],
126 | [-x_n, y_n, z_n],
127 | [-x_n, y_n, z_n],
128 | [-x_n, -y_n, z_n],
129 | [-x_n, -y_n, z_n],
130 | [x_n, -y_n, z_n],
131 | [x_n, -y_n, z_n],
132 | [x_n, y_n, z_n],
133 | [x_f, y_f, z_f],
134 | [-x_f, y_f, z_f],
135 | [-x_f, y_f, z_f],
136 | [-x_f, -y_f, z_f],
137 | [-x_f, -y_f, z_f],
138 | [x_f, -y_f, z_f],
139 | [x_f, -y_f, z_f],
140 | [x_f, y_f, z_f],
141 | [x_n, y_n, z_n],
142 | [x_f, y_f, z_f],
143 | [-x_n, y_n, z_n],
144 | [-x_f, y_f, z_f],
145 | [-x_n, -y_n, z_n],
146 | [-x_f, -y_f, z_f],
147 | [x_n, -y_n, z_n],
148 | [x_f, -y_f, z_f],
149 | ]
150 | marker.points = [to_point_msg(p) for p in points]
151 | return marker
152 |
--------------------------------------------------------------------------------
/src/active_grasp/nbv.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | from numba import jit
3 | import numpy as np
4 | import rospy
5 |
6 | from .policy import MultiViewPolicy
7 | from .timer import Timer
8 |
9 |
10 | @jit(nopython=True)
11 | def get_voxel_at(voxel_size, p):
12 | index = (p / voxel_size).astype(np.int64)
13 | return index if (index >= 0).all() and (index < 40).all() else None
14 |
15 |
16 | # Note that the jit compilation takes some time the first time raycast is called
17 | @jit(nopython=True)
18 | def raycast(
19 | voxel_size,
20 | tsdf_grid,
21 | ori,
22 | pos,
23 | fx,
24 | fy,
25 | cx,
26 | cy,
27 | u_min,
28 | u_max,
29 | v_min,
30 | v_max,
31 | t_min,
32 | t_max,
33 | t_step,
34 | ):
35 | voxel_indices = []
36 | for u in range(u_min, u_max):
37 | for v in range(v_min, v_max):
38 | direction = np.asarray([(u - cx) / fx, (v - cy) / fy, 1.0])
39 | direction = ori @ (direction / np.linalg.norm(direction))
40 | t, tsdf_prev = t_min, -1.0
41 | while t < t_max:
42 | p = pos + t * direction
43 | t += t_step
44 | index = get_voxel_at(voxel_size, p)
45 | if index is not None:
46 | i, j, k = index
47 | tsdf = tsdf_grid[i, j, k]
48 | if tsdf * tsdf_prev < 0 and tsdf_prev > -1: # crossed a surface
49 | break
50 | voxel_indices.append(index)
51 | tsdf_prev = tsdf
52 | return voxel_indices
53 |
54 |
55 | class NextBestView(MultiViewPolicy):
56 | def __init__(self):
57 | super().__init__()
58 | self.min_z_dist = rospy.get_param("~camera/min_z_dist")
59 | self.max_views = rospy.get_param("nbv_grasp/max_views")
60 | self.min_gain = rospy.get_param("nbv_grasp/min_gain")
61 | self.downsample = rospy.get_param("nbv_grasp/downsample")
62 | self.compile()
63 |
64 | def compile(self):
65 | # Trigger the JIT compilation
66 | raycast(
67 | 1.0,
68 | np.zeros((40, 40, 40), dtype=np.float32),
69 | np.eye(3),
70 | np.zeros(3),
71 | 1.0,
72 | 1.0,
73 | 1.0,
74 | 1.0,
75 | 0,
76 | 1,
77 | 0,
78 | 1,
79 | 0.0,
80 | 1.0,
81 | 0.1,
82 | )
83 |
84 | def activate(self, bbox, view_sphere):
85 | super().activate(bbox, view_sphere)
86 |
87 | def update(self, img, x, q):
88 | if len(self.views) > self.max_views or self.best_grasp_prediction_is_stable():
89 | self.done = True
90 | else:
91 | with Timer("state_update"):
92 | self.integrate(img, x, q)
93 | with Timer("view_generation"):
94 | views = self.generate_views(q)
95 | with Timer("ig_computation"):
96 | gains = [self.ig_fn(v, self.downsample) for v in views]
97 | with Timer("cost_computation"):
98 | costs = [self.cost_fn(v) for v in views]
99 | utilities = gains / np.sum(gains) - costs / np.sum(costs)
100 | self.vis.ig_views(self.base_frame, self.intrinsic, views, utilities)
101 | i = np.argmax(utilities)
102 | nbv, gain = views[i], gains[i]
103 |
104 | if gain < self.min_gain and len(self.views) > self.T:
105 | self.done = True
106 |
107 | self.x_d = nbv
108 |
109 | def best_grasp_prediction_is_stable(self):
110 | if self.best_grasp:
111 | t = (self.T_task_base * self.best_grasp.pose).translation
112 | i, j, k = (t / self.tsdf.voxel_size).astype(int)
113 | qs = self.qual_hist[:, i, j, k]
114 | if np.count_nonzero(qs) == self.T and np.mean(qs) > 0.9:
115 | return True
116 | return False
117 |
118 | def generate_views(self, q):
119 | thetas = np.deg2rad([15, 30])
120 | phis = np.arange(8) * np.deg2rad(45)
121 | view_candidates = []
122 | for theta, phi in itertools.product(thetas, phis):
123 | view = self.view_sphere.get_view(theta, phi)
124 | if self.solve_cam_ik(q, view):
125 | view_candidates.append(view)
126 | return view_candidates
127 |
128 | def ig_fn(self, view, downsample):
129 | tsdf_grid, voxel_size = self.tsdf.get_grid(), self.tsdf.voxel_size
130 | tsdf_grid = -1.0 + 2.0 * tsdf_grid # Open3D maps tsdf to [0,1]
131 |
132 | # Downsample the sensor resolution
133 | fx = self.intrinsic.fx / downsample
134 | fy = self.intrinsic.fy / downsample
135 | cx = self.intrinsic.cx / downsample
136 | cy = self.intrinsic.cy / downsample
137 |
138 | # Project bbox onto the image plane to get better bounds
139 | T_cam_base = view.inv()
140 | corners = np.array([T_cam_base.apply(p) for p in self.bbox.corners]).T
141 | u = (fx * corners[0] / corners[2] + cx).round().astype(int)
142 | v = (fy * corners[1] / corners[2] + cy).round().astype(int)
143 | u_min, u_max = u.min(), u.max()
144 | v_min, v_max = v.min(), v.max()
145 |
146 | t_min = 0.0 # self.min_z_dist
147 | t_max = corners[2].max() # This bound might be a bit too short
148 | t_step = np.sqrt(3) * voxel_size # Could be replaced with line rasterization
149 |
150 | # Cast rays from the camera view (we'll work in the task frame from now on)
151 | view = self.T_task_base * view
152 | ori, pos = view.rotation.as_matrix(), view.translation
153 |
154 | voxel_indices = raycast(
155 | voxel_size,
156 | tsdf_grid,
157 | ori,
158 | pos,
159 | fx,
160 | fy,
161 | cx,
162 | cy,
163 | u_min,
164 | u_max,
165 | v_min,
166 | v_max,
167 | t_min,
168 | t_max,
169 | t_step,
170 | )
171 |
172 | # Count rear side voxels within the bounding box
173 | indices = np.unique(voxel_indices, axis=0)
174 | bbox_min = self.T_task_base.apply(self.bbox.min) / voxel_size
175 | bbox_max = self.T_task_base.apply(self.bbox.max) / voxel_size
176 | mask = np.array([((i > bbox_min) & (i < bbox_max)).all() for i in indices])
177 | i, j, k = indices[mask].T
178 | tsdfs = tsdf_grid[i, j, k]
179 | ig = np.logical_and(tsdfs > -1.0, tsdfs < 0.0).sum()
180 |
181 | return ig
182 |
183 | def cost_fn(self, view):
184 | return 1.0
185 |
--------------------------------------------------------------------------------
/src/active_grasp/policy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from sensor_msgs.msg import CameraInfo
3 | from pathlib import Path
4 | import rospy
5 | from trac_ik_python.trac_ik import IK
6 |
7 | from robot_helpers.ros import tf
8 | from robot_helpers.ros.conversions import *
9 | from vgn.detection import *
10 | from vgn.perception import UniformTSDFVolume
11 |
12 | from .timer import Timer
13 | from .rviz import Visualizer
14 |
15 |
16 | def solve_ik(q0, pose, solver):
17 | x, y, z = pose.translation
18 | qx, qy, qz, qw = pose.rotation.as_quat()
19 | return solver.get_ik(q0, x, y, z, qx, qy, qz, qw)
20 |
21 |
22 | class Policy:
23 | def __init__(self):
24 | self.load_parameters()
25 | self.init_ik_solver()
26 | self.init_visualizer()
27 |
28 | def load_parameters(self):
29 | self.base_frame = rospy.get_param("~base_frame_id")
30 | self.T_grasp_ee = Transform.from_list(rospy.get_param("~ee_grasp_offset")).inv()
31 | self.cam_frame = rospy.get_param("~camera/frame_id")
32 | self.task_frame = "task"
33 | info_topic = rospy.get_param("~camera/info_topic")
34 | msg = rospy.wait_for_message(info_topic, CameraInfo, rospy.Duration(2.0))
35 | self.intrinsic = from_camera_info_msg(msg)
36 | self.qual_thresh = rospy.get_param("vgn/qual_threshold")
37 |
38 | def init_ik_solver(self):
39 | self.q0 = [0.0, -0.79, 0.0, -2.356, 0.0, 1.57, 0.79]
40 | self.cam_ik_solver = IK(self.base_frame, self.cam_frame)
41 | self.ee_ik_solver = IK(self.base_frame, "panda_link8")
42 |
43 | def solve_cam_ik(self, q0, view):
44 | return solve_ik(q0, view, self.cam_ik_solver)
45 |
46 | def solve_ee_ik(self, q0, pose):
47 | return solve_ik(q0, pose, self.ee_ik_solver)
48 |
49 | def init_visualizer(self):
50 | self.vis = Visualizer()
51 |
52 | def activate(self, bbox, view_sphere):
53 | self.vis.clear()
54 |
55 | self.bbox = bbox
56 | self.view_sphere = view_sphere
57 |
58 | self.calibrate_task_frame()
59 | self.vis.bbox(self.base_frame, self.bbox)
60 |
61 | self.tsdf = UniformTSDFVolume(0.3, 40)
62 | self.vgn = VGN(Path(rospy.get_param("vgn/model")))
63 |
64 | self.views = []
65 | self.best_grasp = None
66 | self.x_d = None
67 | self.done = False
68 | self.info = {}
69 |
70 | def calibrate_task_frame(self):
71 | xyz = np.r_[self.bbox.center[:2] - 0.15, self.bbox.min[2] - 0.05]
72 | self.T_base_task = Transform.from_translation(xyz)
73 | self.T_task_base = self.T_base_task.inv()
74 | tf.broadcast(self.T_base_task, self.base_frame, self.task_frame)
75 | rospy.sleep(1.0) # Wait for tf tree to be updated
76 | self.vis.roi(self.task_frame, 0.3)
77 |
78 | def update(self, img, x, q):
79 | raise NotImplementedError
80 |
81 | def filter_grasps(self, out, q):
82 | grasps, qualities = select_local_maxima(
83 | self.tsdf.voxel_size,
84 | out,
85 | self.qual_thresh,
86 | )
87 | filtered_grasps, filtered_qualities = [], []
88 | for grasp, quality in zip(grasps, qualities):
89 | pose = self.T_base_task * grasp.pose
90 | tip = pose.rotation.apply([0, 0, 0.05]) + pose.translation
91 | if self.bbox.is_inside(tip):
92 | grasp.pose = pose
93 | q_grasp = self.solve_ee_ik(q, pose * self.T_grasp_ee)
94 | if q_grasp is not None:
95 | filtered_grasps.append(grasp)
96 | filtered_qualities.append(quality)
97 | return filtered_grasps, filtered_qualities
98 |
99 | def deactivate(self):
100 | self.vis.clear_ig_views()
101 |
102 |
103 | def select_best_grasp(grasps, qualities):
104 | i = np.argmax(qualities)
105 | return grasps[i], qualities[i]
106 |
107 |
108 | class SingleViewPolicy(Policy):
109 | def update(self, img, x, q):
110 | linear, _ = compute_error(self.x_d, x)
111 | if np.linalg.norm(linear) < 0.02:
112 | self.views.append(x)
113 | self.tsdf.integrate(img, self.intrinsic, x.inv() * self.T_base_task)
114 | tsdf_grid, voxel_size = self.tsdf.get_grid(), self.tsdf.voxel_size
115 |
116 | scene_cloud = self.tsdf.get_scene_cloud()
117 | self.vis.scene_cloud(self.task_frame, np.asarray(scene_cloud.points))
118 |
119 | map_cloud = self.tsdf.get_map_cloud()
120 | self.vis.map_cloud(
121 | self.task_frame,
122 | np.asarray(map_cloud.points),
123 | np.expand_dims(np.asarray(map_cloud.colors)[:, 0], 1),
124 | )
125 |
126 | out = self.vgn.predict(tsdf_grid)
127 | self.vis.quality(self.task_frame, voxel_size, out.qual, 0.5)
128 |
129 | grasps, qualities = self.filter_grasps(out, q)
130 |
131 | if len(grasps) > 0:
132 | self.best_grasp, quality = select_best_grasp(grasps, qualities)
133 | self.vis.grasp(self.base_frame, self.best_grasp, quality)
134 |
135 | self.done = True
136 |
137 |
138 | class MultiViewPolicy(Policy):
139 | def __init__(self):
140 | super().__init__()
141 | self.T = rospy.get_param("policy/window_size")
142 |
143 | def activate(self, bbox, view_sphere):
144 | super().activate(bbox, view_sphere)
145 | self.qual_hist = np.zeros((self.T,) + (40,) * 3, np.float32)
146 |
147 | def integrate(self, img, x, q):
148 | self.views.append(x)
149 | self.vis.path(self.base_frame, self.intrinsic, self.views)
150 |
151 | with Timer("tsdf_integration"):
152 | self.tsdf.integrate(img, self.intrinsic, x.inv() * self.T_base_task)
153 |
154 | scene_cloud = self.tsdf.get_scene_cloud()
155 | self.vis.scene_cloud(self.task_frame, np.asarray(scene_cloud.points))
156 |
157 | map_cloud = self.tsdf.get_map_cloud()
158 | self.vis.map_cloud(
159 | self.task_frame,
160 | np.asarray(map_cloud.points),
161 | np.expand_dims(np.asarray(map_cloud.colors)[:, 0], 1),
162 | )
163 |
164 | with Timer("grasp_prediction"):
165 | tsdf_grid = self.tsdf.get_grid()
166 | out = self.vgn.predict(tsdf_grid)
167 | self.vis.quality(self.task_frame, self.tsdf.voxel_size, out.qual, 0.9)
168 |
169 | t = (len(self.views) - 1) % self.T
170 | self.qual_hist[t, ...] = out.qual
171 |
172 | with Timer("grasp_selection"):
173 | grasps, qualities = self.filter_grasps(out, q)
174 |
175 | if len(grasps) > 0:
176 | self.best_grasp, quality = select_best_grasp(grasps, qualities)
177 | self.vis.grasp(self.base_frame, self.best_grasp, quality)
178 | else:
179 | self.best_grasp = None
180 | self.vis.clear_grasp()
181 |
182 |
183 | def compute_error(x_d, x):
184 | linear = x_d.translation - x.translation
185 | angular = (x_d.rotation * x.rotation.inv()).as_rotvec()
186 | return linear, angular
187 |
188 |
189 | registry = {}
190 |
191 |
192 | def register(id, cls):
193 | global registry
194 | registry[id] = cls
195 |
196 |
197 | def make(id, *args, **kwargs):
198 | if id in registry:
199 | return registry[id](*args, **kwargs)
200 | else:
201 | raise ValueError("{} policy does not exist.".format(id))
202 |
--------------------------------------------------------------------------------
/src/active_grasp/simulation.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import pybullet as p
3 | import pybullet_data
4 | import rospkg
5 |
6 | from active_grasp.bbox import AABBox
7 | from robot_helpers.bullet import *
8 | from robot_helpers.io import load_yaml
9 | from robot_helpers.model import KDLModel
10 | from robot_helpers.spatial import Rotation
11 | from vgn.perception import UniformTSDFVolume
12 | from vgn.utils import find_urdfs, view_on_sphere
13 | from vgn.detection import VGN, select_local_maxima
14 |
15 | # import vgn.visualizer as vis
16 |
17 | rospack = rospkg.RosPack()
18 | pkg_root = Path(rospack.get_path("active_grasp"))
19 | urdfs_dir = pkg_root / "assets"
20 |
21 |
22 | class Simulation:
23 | """Robot is placed s.t. world and base frames are the same"""
24 |
25 | def __init__(self, gui, scene_id, vgn_path):
26 | self.configure_physics_engine(gui, 60, 4)
27 | self.configure_visualizer()
28 | self.seed()
29 | self.load_robot()
30 | self.load_vgn(Path(vgn_path))
31 | self.scene = get_scene(scene_id)
32 |
33 | def configure_physics_engine(self, gui, rate, sub_step_count):
34 | self.rate = rate
35 | self.dt = 1.0 / self.rate
36 | p.connect(p.GUI if gui else p.DIRECT)
37 | p.setAdditionalSearchPath(pybullet_data.getDataPath())
38 | p.setPhysicsEngineParameter(fixedTimeStep=self.dt, numSubSteps=sub_step_count)
39 | p.setGravity(0.0, 0.0, -9.81)
40 |
41 | def configure_visualizer(self):
42 | # p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
43 | p.resetDebugVisualizerCamera(1.2, 30, -30, [0.4, 0.0, 0.2])
44 |
45 | def seed(self, seed=None):
46 | self.rng = np.random.default_rng(seed) if seed else np.random
47 |
48 | def load_robot(self):
49 | panda_urdf_path = urdfs_dir / "franka/panda_arm_hand.urdf"
50 | self.arm = BtPandaArm(panda_urdf_path)
51 | self.gripper = BtPandaGripper(self.arm)
52 | self.model = KDLModel.from_urdf_file(
53 | panda_urdf_path, self.arm.base_frame, self.arm.ee_frame
54 | )
55 | self.camera = BtCamera(320, 240, 0.96, 0.01, 1.0, self.arm.uid, 11)
56 |
57 | def load_vgn(self, model_path):
58 | self.vgn = VGN(model_path)
59 |
60 | def reset(self):
61 | valid = False
62 | while not valid:
63 | self.set_arm_configuration([0.0, -1.39, 0.0, -2.36, 0.0, 1.57, 0.79])
64 | self.scene.clear()
65 | q = self.scene.generate(self.rng)
66 | self.set_arm_configuration(q)
67 | uid = self.select_target()
68 | bbox = self.get_target_bbox(uid)
69 | valid = self.check_for_grasps(bbox)
70 | return bbox
71 |
72 | def set_arm_configuration(self, q):
73 | for i, q_i in enumerate(q):
74 | p.resetJointState(self.arm.uid, i, q_i, 0)
75 | p.resetJointState(self.arm.uid, 9, 0.04, 0)
76 | p.resetJointState(self.arm.uid, 10, 0.04, 0)
77 | self.gripper.set_desired_width(0.4)
78 |
79 | def select_target(self):
80 | _, _, mask = self.camera.get_image()
81 | uids, counts = np.unique(mask, return_counts=True)
82 | mask = np.isin(uids, self.scene.object_uids) # remove ids of the floor, etc
83 | uids, counts = uids[mask], counts[mask]
84 | target_uid = uids[np.argmin(counts)]
85 | p.changeVisualShape(target_uid, -1, rgbaColor=[1, 0, 0, 1])
86 | return target_uid
87 |
88 | def get_target_bbox(self, uid):
89 | aabb_min, aabb_max = p.getAABB(uid)
90 | return AABBox(aabb_min, aabb_max)
91 |
92 | def check_for_grasps(self, bbox):
93 | origin = Transform.from_translation(self.scene.origin)
94 | origin.translation[2] -= 0.05
95 | center = Transform.from_translation(self.scene.center)
96 |
97 | # First, reconstruct the scene from many views
98 | tsdf = UniformTSDFVolume(self.scene.length, 40)
99 | r = 2.0 * self.scene.length
100 | theta = np.pi / 4.0
101 | phis = np.linspace(0.0, 2.0 * np.pi, 5)
102 | for view in [view_on_sphere(center, r, theta, phi) for phi in phis]:
103 | depth_img = self.camera.get_image(view)[1]
104 | tsdf.integrate(depth_img, self.camera.intrinsic, view.inv() * origin)
105 | voxel_size, tsdf_grid = tsdf.voxel_size, tsdf.get_grid()
106 |
107 | # Then check whether VGN can find any grasps on the target
108 | out = self.vgn.predict(tsdf_grid)
109 | grasps, qualities = select_local_maxima(voxel_size, out, threshold=0.9)
110 |
111 | # vis.scene_cloud(voxel_size, tsdf.get_scene_cloud())
112 | # vis.grasps(grasps, qualities, 0.05)
113 | # vis.show()
114 |
115 | for grasp in grasps:
116 | pose = origin * grasp.pose
117 | tip = pose.rotation.apply([0, 0, 0.05]) + pose.translation
118 | if bbox.is_inside(tip):
119 | return True
120 | return False
121 |
122 | def step(self):
123 | p.stepSimulation()
124 |
125 |
126 | class Scene:
127 | def __init__(self):
128 | self.support_urdf = urdfs_dir / "plane/model.urdf"
129 | self.support_uid = -1
130 | self.object_uids = []
131 |
132 | def clear(self):
133 | self.remove_support()
134 | self.remove_all_objects()
135 |
136 | def generate(self, rng):
137 | raise NotImplementedError
138 |
139 | def add_support(self, pos):
140 | self.support_uid = p.loadURDF(str(self.support_urdf), pos, globalScaling=0.3)
141 |
142 | def remove_support(self):
143 | p.removeBody(self.support_uid)
144 |
145 | def add_object(self, urdf, ori, pos, scale=1.0):
146 | uid = p.loadURDF(str(urdf), pos, ori.as_quat(), globalScaling=scale)
147 | self.object_uids.append(uid)
148 | return uid
149 |
150 | def remove_object(self, uid):
151 | p.removeBody(uid)
152 | self.object_uids.remove(uid)
153 |
154 | def remove_all_objects(self):
155 | for uid in list(self.object_uids):
156 | self.remove_object(uid)
157 |
158 |
159 | class YamlScene(Scene):
160 | def __init__(self, config_name):
161 | super().__init__()
162 | self.config_path = pkg_root / "cfg/sim" / config_name
163 |
164 | def load_config(self):
165 | self.scene = load_yaml(self.config_path)
166 | self.center = np.asarray(self.scene["center"])
167 | self.length = 0.3
168 | self.origin = self.center - np.r_[0.5 * self.length, 0.5 * self.length, 0.0]
169 |
170 | def generate(self, rng):
171 | self.load_config()
172 | self.add_support(self.center)
173 | for object in self.scene["objects"]:
174 | urdf = urdfs_dir / object["object_id"] / "model.urdf"
175 | ori = Rotation.from_euler("xyz", object["rpy"], degrees=True)
176 | pos = self.center + np.asarray(object["xyz"])
177 | scale = object.get("scale", 1)
178 | if randomize := object.get("randomize", False):
179 | angle = rng.uniform(-randomize["rot"], randomize["rot"])
180 | ori = Rotation.from_euler("z", angle, degrees=True) * ori
181 | b = np.asarray(randomize["pos"])
182 | pos += rng.uniform(-b, b)
183 | self.add_object(urdf, ori, pos, scale)
184 | for _ in range(60):
185 | p.stepSimulation()
186 | return self.scene["q"]
187 |
188 |
189 | class RandomScene(Scene):
190 | def __init__(self):
191 | super().__init__()
192 | self.center = np.r_[0.5, 0.0, 0.2]
193 | self.length = 0.3
194 | self.origin = self.center - np.r_[0.5 * self.length, 0.5 * self.length, 0.0]
195 | self.object_urdfs = find_urdfs(urdfs_dir / "test")
196 |
197 | def generate(self, rng, object_count=4, attempts=10):
198 | self.add_support(self.center)
199 | urdfs = rng.choice(self.object_urdfs, object_count)
200 | for urdf in urdfs:
201 | scale = rng.uniform(0.8, 1.0)
202 | uid = self.add_object(urdf, Rotation.identity(), np.zeros(3), scale)
203 | lower, upper = p.getAABB(uid)
204 | z_offset = 0.5 * (upper[2] - lower[2]) + 0.002
205 | state_id = p.saveState()
206 | for _ in range(attempts):
207 | # Try to place and check for collisions
208 | ori = Rotation.from_euler("z", rng.uniform(0, 2 * np.pi))
209 | pos = np.r_[rng.uniform(0.2, 0.8, 2) * self.length, z_offset]
210 | p.resetBasePositionAndOrientation(uid, self.origin + pos, ori.as_quat())
211 | p.stepSimulation()
212 | if not p.getContactPoints(uid):
213 | break
214 | else:
215 | p.restoreState(stateId=state_id)
216 | else:
217 | # No placement found, remove the object
218 | self.remove_object(uid)
219 | q = [0.0, -1.39, 0.0, -2.36, 0.0, 1.57, 0.79]
220 | q += rng.uniform(-0.08, 0.08, 7)
221 | return q
222 |
223 |
224 | def get_scene(scene_id):
225 | if scene_id.endswith(".yaml"):
226 | return YamlScene(scene_id)
227 | elif scene_id == "random":
228 | return RandomScene()
229 |
230 | else:
231 | raise ValueError("Unknown scene {}.".format(scene_id))
232 |
--------------------------------------------------------------------------------
/src/active_grasp/controller.py:
--------------------------------------------------------------------------------
1 | from controller_manager_msgs.srv import *
2 | import copy
3 | import cv_bridge
4 | from geometry_msgs.msg import Twist
5 | import numpy as np
6 | import rospy
7 | from sensor_msgs.msg import Image
8 | import trimesh
9 |
10 | from .bbox import from_bbox_msg
11 | from .timer import Timer
12 | from active_grasp.srv import Reset, ResetRequest
13 | from robot_helpers.ros import tf
14 | from robot_helpers.ros.conversions import *
15 | from robot_helpers.ros.panda import PandaArmClient, PandaGripperClient
16 | from robot_helpers.ros.moveit import MoveItClient, create_collision_object_from_mesh
17 | from robot_helpers.spatial import Rotation, Transform
18 | from vgn.utils import look_at, cartesian_to_spherical, spherical_to_cartesian
19 |
20 |
21 | class GraspController:
22 | def __init__(self, policy):
23 | self.policy = policy
24 | self.load_parameters()
25 | self.init_service_proxies()
26 | self.init_robot_connection()
27 | self.init_moveit()
28 | self.init_camera_stream()
29 |
30 | def load_parameters(self):
31 | self.base_frame = rospy.get_param("~base_frame_id")
32 | self.T_grasp_ee = Transform.from_list(rospy.get_param("~ee_grasp_offset")).inv()
33 | self.cam_frame = rospy.get_param("~camera/frame_id")
34 | self.depth_topic = rospy.get_param("~camera/depth_topic")
35 | self.min_z_dist = rospy.get_param("~camera/min_z_dist")
36 | self.control_rate = rospy.get_param("~control_rate")
37 | self.linear_vel = rospy.get_param("~linear_vel")
38 | self.policy_rate = rospy.get_param("policy/rate")
39 |
40 | def init_service_proxies(self):
41 | self.reset_env = rospy.ServiceProxy("reset", Reset)
42 | self.switch_controller = rospy.ServiceProxy(
43 | "controller_manager/switch_controller", SwitchController
44 | )
45 |
46 | def init_robot_connection(self):
47 | self.arm = PandaArmClient()
48 | self.gripper = PandaGripperClient()
49 | topic = rospy.get_param("cartesian_velocity_controller/topic")
50 | self.cartesian_vel_pub = rospy.Publisher(topic, Twist, queue_size=10)
51 |
52 | def init_moveit(self):
53 | self.moveit = MoveItClient("panda_arm")
54 | rospy.sleep(1.0) # Wait for connections to be established.
55 | self.moveit.move_group.set_planner_id("RRTstarkConfigDefault")
56 | self.moveit.move_group.set_planning_time(3.0)
57 |
58 | def switch_to_cartesian_velocity_control(self):
59 | req = SwitchControllerRequest()
60 | req.start_controllers = ["cartesian_velocity_controller"]
61 | req.stop_controllers = ["position_joint_trajectory_controller"]
62 | req.strictness = 1
63 | self.switch_controller(req)
64 |
65 | def switch_to_joint_trajectory_control(self):
66 | req = SwitchControllerRequest()
67 | req.start_controllers = ["position_joint_trajectory_controller"]
68 | req.stop_controllers = ["cartesian_velocity_controller"]
69 | req.strictness = 1
70 | self.switch_controller(req)
71 |
72 | def init_camera_stream(self):
73 | self.cv_bridge = cv_bridge.CvBridge()
74 | rospy.Subscriber(self.depth_topic, Image, self.sensor_cb, queue_size=1)
75 |
76 | def sensor_cb(self, msg):
77 | self.latest_depth_msg = msg
78 |
79 | def run(self):
80 | bbox = self.reset()
81 | self.switch_to_cartesian_velocity_control()
82 | with Timer("search_time"):
83 | grasp = self.search_grasp(bbox)
84 | if grasp:
85 | self.switch_to_joint_trajectory_control()
86 | with Timer("grasp_time"):
87 | res = self.execute_grasp(grasp)
88 | else:
89 | res = "aborted"
90 | return self.collect_info(res)
91 |
92 | def reset(self):
93 | Timer.reset()
94 | self.moveit.scene.clear()
95 | res = self.reset_env(ResetRequest())
96 | rospy.sleep(1.0) # Wait for the TF tree to be updated.
97 | return from_bbox_msg(res.bbox)
98 |
99 | def search_grasp(self, bbox):
100 | self.view_sphere = ViewHalfSphere(bbox, self.min_z_dist)
101 | self.policy.activate(bbox, self.view_sphere)
102 | timer = rospy.Timer(rospy.Duration(1.0 / self.control_rate), self.send_vel_cmd)
103 | r = rospy.Rate(self.policy_rate)
104 | while not self.policy.done:
105 | img, pose, q = self.get_state()
106 | self.policy.update(img, pose, q)
107 | r.sleep()
108 | rospy.sleep(0.2) # Wait for a zero command to be sent to the robot.
109 | self.policy.deactivate()
110 | timer.shutdown()
111 | return self.policy.best_grasp
112 |
113 | def get_state(self):
114 | q, _ = self.arm.get_state()
115 | msg = copy.deepcopy(self.latest_depth_msg)
116 | img = self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32) * 0.001
117 | pose = tf.lookup(self.base_frame, self.cam_frame, msg.header.stamp)
118 | return img, pose, q
119 |
120 | def send_vel_cmd(self, event):
121 | if self.policy.x_d is None or self.policy.done:
122 | cmd = np.zeros(6)
123 | else:
124 | x = tf.lookup(self.base_frame, self.cam_frame)
125 | cmd = self.compute_velocity_cmd(self.policy.x_d, x)
126 | self.cartesian_vel_pub.publish(to_twist_msg(cmd))
127 |
128 | def compute_velocity_cmd(self, x_d, x):
129 | r, theta, phi = cartesian_to_spherical(x.translation - self.view_sphere.center)
130 | e_t = x_d.translation - x.translation
131 | e_n = (x.translation - self.view_sphere.center) * (self.view_sphere.r - r) / r
132 | linear = 1.0 * e_t + 6.0 * (r < self.view_sphere.r) * e_n
133 | scale = np.linalg.norm(linear) + 1e-6
134 | linear *= np.clip(scale, 0.0, self.linear_vel) / scale
135 | angular = self.view_sphere.get_view(theta, phi).rotation * x.rotation.inv()
136 | angular = 1.0 * angular.as_rotvec()
137 | return np.r_[linear, angular]
138 |
139 | def execute_grasp(self, grasp):
140 | self.create_collision_scene()
141 | T_base_grasp = self.postprocess(grasp.pose)
142 | self.gripper.move(0.08)
143 | T_base_approach = T_base_grasp * Transform.t_[0, 0, -0.06] * self.T_grasp_ee
144 | success, plan = self.moveit.plan(T_base_approach, 0.2, 0.2)
145 | if success:
146 | self.moveit.scene.clear()
147 | self.moveit.execute(plan)
148 | rospy.sleep(0.5) # Wait for the planning scene to be updated
149 | self.moveit.gotoL(T_base_grasp * self.T_grasp_ee)
150 | rospy.sleep(0.5)
151 | self.gripper.grasp()
152 | T_base_retreat = Transform.t_[0, 0, 0.05] * T_base_grasp * self.T_grasp_ee
153 | self.moveit.gotoL(T_base_retreat)
154 | rospy.sleep(1.0) # Wait to see whether the object slides out of the hand
155 | success = self.gripper.read() > 0.002
156 | return "succeeded" if success else "failed"
157 | else:
158 | return "no_motion_plan_found"
159 |
160 | def create_collision_scene(self):
161 | # Segment support surface
162 | cloud = self.policy.tsdf.get_scene_cloud()
163 | cloud = cloud.transform(self.policy.T_base_task.as_matrix())
164 | _, inliers = cloud.segment_plane(0.01, 3, 1000)
165 | support_cloud = cloud.select_by_index(inliers)
166 | cloud = cloud.select_by_index(inliers, invert=True)
167 | # o3d.io.write_point_cloud(f"{time.time():.0f}.pcd", cloud)
168 |
169 | # Add collision object for the support
170 | self.add_collision_mesh("support", compute_convex_hull(support_cloud))
171 |
172 | # Cluster cloud
173 | labels = np.array(cloud.cluster_dbscan(eps=0.01, min_points=8))
174 |
175 | # Generate convex collision objects for each segment
176 | self.hulls = []
177 | for label in range(labels.max() + 1):
178 | segment = cloud.select_by_index(np.flatnonzero(labels == label))
179 | try:
180 | hull = compute_convex_hull(segment)
181 | name = f"object_{label}"
182 | self.add_collision_mesh(name, hull)
183 | self.hulls.append(hull)
184 | except:
185 | # Qhull fails in some edge cases
186 | pass
187 |
188 | def add_collision_mesh(self, name, mesh):
189 | frame, pose = self.base_frame, Transform.identity()
190 | co = create_collision_object_from_mesh(name, frame, pose, mesh)
191 | self.moveit.scene.add_object(co)
192 |
193 | def postprocess(self, T_base_grasp):
194 | rot = T_base_grasp.rotation
195 | if rot.as_matrix()[:, 0][0] < 0: # Ensure that the camera is pointing forward
196 | T_base_grasp.rotation = rot * Rotation.from_euler("z", np.pi)
197 | T_base_grasp *= Transform.t_[0.0, 0.0, 0.01]
198 | return T_base_grasp
199 |
200 | def collect_info(self, result):
201 | points = [p.translation for p in self.policy.views]
202 | d = np.sum([np.linalg.norm(p2 - p1) for p1, p2 in zip(points, points[1:])])
203 | info = {
204 | "result": result,
205 | "view_count": len(points),
206 | "distance": d,
207 | }
208 | info.update(self.policy.info)
209 | info.update(Timer.timers)
210 | return info
211 |
212 |
213 | def compute_convex_hull(cloud):
214 | hull, _ = cloud.compute_convex_hull()
215 | triangles, vertices = np.asarray(hull.triangles), np.asarray(hull.vertices)
216 | return trimesh.base.Trimesh(vertices, triangles)
217 |
218 |
219 | class ViewHalfSphere:
220 | def __init__(self, bbox, min_z_dist):
221 | self.center = bbox.center
222 | self.r = 0.5 * bbox.size[2] + min_z_dist
223 |
224 | def get_view(self, theta, phi):
225 | eye = self.center + spherical_to_cartesian(self.r, theta, phi)
226 | up = np.r_[1.0, 0.0, 0.0]
227 | return look_at(eye, self.center, up)
228 |
229 | def sample_view(self):
230 | raise NotImplementedError
231 |
--------------------------------------------------------------------------------
/scripts/bt_sim_node.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from actionlib import SimpleActionServer
4 | import control_msgs.msg as control_msgs
5 | from controller_manager_msgs.srv import *
6 | import cv_bridge
7 | from franka_msgs.msg import FrankaState, ErrorRecoveryAction
8 | from franka_gripper.msg import *
9 | from geometry_msgs.msg import Twist
10 | import numpy as np
11 | import rospy
12 | from sensor_msgs.msg import JointState, Image, CameraInfo
13 | from scipy import interpolate
14 | from std_msgs.msg import Header
15 | from threading import Thread
16 |
17 | from active_grasp.bbox import to_bbox_msg
18 | from active_grasp.srv import *
19 | from active_grasp.simulation import Simulation
20 | from robot_helpers.ros.conversions import *
21 | from vgn.simulation import apply_noise
22 |
23 |
24 | class BtSimNode:
25 | def __init__(self):
26 | gui = rospy.get_param("~gui")
27 | scene_id = rospy.get_param("~scene")
28 | vgn_path = rospy.get_param("vgn/model")
29 | self.sim = Simulation(gui, scene_id, vgn_path)
30 | self.init_plugins()
31 | self.advertise_services()
32 |
33 | def init_plugins(self):
34 | self.plugins = [
35 | PhysicsPlugin(self.sim),
36 | RobotStatePlugin(self.sim.arm, self.sim.gripper),
37 | MoveActionPlugin(self.sim.gripper),
38 | GraspActionPlugin(self.sim.gripper),
39 | GripperActionPlugin(),
40 | CameraPlugin(self.sim.camera),
41 | MockActionsPlugin(),
42 | ]
43 | self.controllers = {
44 | "cartesian_velocity_controller": CartesianVelocityControllerPlugin(
45 | self.sim.arm, self.sim.model
46 | ),
47 | "position_joint_trajectory_controller": JointTrajectoryControllerPlugin(
48 | self.sim.arm
49 | ),
50 | }
51 |
52 | def start_plugins(self):
53 | for plugin in self.plugins + list(self.controllers.values()):
54 | plugin.thread.start()
55 |
56 | def activate_plugins(self):
57 | for plugin in self.plugins:
58 | plugin.activate()
59 |
60 | def deactivate_plugins(self):
61 | for plugin in self.plugins:
62 | plugin.deactivate()
63 |
64 | def deactivate_controllers(self):
65 | for controller in self.controllers.values():
66 | controller.deactivate()
67 |
68 | def advertise_services(self):
69 | rospy.Service("seed", Seed, self.seed)
70 | rospy.Service("reset", Reset, self.reset)
71 | rospy.Service(
72 | "/controller_manager/switch_controller",
73 | SwitchController,
74 | self.switch_controller,
75 | )
76 |
77 | def seed(self, req):
78 | self.sim.seed(req.seed)
79 | rospy.loginfo(f"Seeded the rng with {req.seed}.")
80 | return SeedResponse()
81 |
82 | def reset(self, req):
83 | self.deactivate_plugins()
84 | self.deactivate_controllers()
85 | rospy.sleep(1.0) # TODO replace with a read-write lock
86 | bbox = self.sim.reset()
87 | self.activate_plugins()
88 | return ResetResponse(to_bbox_msg(bbox))
89 |
90 | def switch_controller(self, req):
91 | for controller in req.stop_controllers:
92 | self.controllers[controller].deactivate()
93 | for controller in req.start_controllers:
94 | self.controllers[controller].activate()
95 | return SwitchControllerResponse(ok=True)
96 |
97 | def run(self):
98 | self.start_plugins()
99 | self.activate_plugins()
100 | rospy.spin()
101 |
102 |
103 | class Plugin:
104 | """A plugin that spins at a constant rate in its own thread."""
105 |
106 | def __init__(self, rate):
107 | self.rate = rate
108 | self.thread = Thread(target=self.loop, daemon=True)
109 | self.is_running = False
110 |
111 | def activate(self):
112 | self.is_running = True
113 |
114 | def deactivate(self):
115 | self.is_running = False
116 |
117 | def loop(self):
118 | rate = rospy.Rate(self.rate)
119 | while not rospy.is_shutdown():
120 | if self.is_running:
121 | self.update()
122 | rate.sleep()
123 |
124 | def update(self):
125 | raise NotImplementedError
126 |
127 |
128 | class PhysicsPlugin(Plugin):
129 | def __init__(self, sim):
130 | super().__init__(sim.rate)
131 | self.sim = sim
132 |
133 | def update(self):
134 | self.sim.step()
135 |
136 |
137 | class RobotStatePlugin(Plugin):
138 | def __init__(self, arm, gripper, rate=30):
139 | super().__init__(rate)
140 | self.arm = arm
141 | self.gripper = gripper
142 | self.arm_state_pub = rospy.Publisher(
143 | "/franka_state_controller/franka_states", FrankaState, queue_size=10
144 | )
145 | self.gripper_state_pub = rospy.Publisher(
146 | "/franka_gripper/joint_states", JointState, queue_size=10
147 | )
148 | self.joint_states_pub = rospy.Publisher(
149 | "joint_states", JointState, queue_size=10
150 | )
151 |
152 | def update(self):
153 | q, dq = self.arm.get_state()
154 | width = self.gripper.read()
155 | header = Header(stamp=rospy.Time.now())
156 |
157 | msg = FrankaState(header=header, q=q, dq=dq)
158 | self.arm_state_pub.publish(msg)
159 |
160 | msg = JointState(header=header)
161 | msg.name = ["panda_finger_joint1", "panda_finger_joint2"]
162 | msg.position = [0.5 * width, 0.5 * width]
163 | self.gripper_state_pub.publish(msg)
164 |
165 | msg = JointState(header=header)
166 | msg.name = ["panda_joint{}".format(i) for i in range(1, 8)] + [
167 | "panda_finger_joint1",
168 | "panda_finger_joint2",
169 | ]
170 | msg.position = np.r_[q, 0.5 * width, 0.5 * width]
171 | self.joint_states_pub.publish(msg)
172 |
173 |
174 | class CartesianVelocityControllerPlugin(Plugin):
175 | def __init__(self, arm, model, rate=30):
176 | super().__init__(rate)
177 | self.arm = arm
178 | self.model = model
179 | topic = rospy.get_param("cartesian_velocity_controller/topic")
180 | rospy.Subscriber(topic, Twist, self.target_cb)
181 |
182 | def target_cb(self, msg):
183 | self.dx_d = from_twist_msg(msg)
184 |
185 | def activate(self):
186 | self.dx_d = np.zeros(6)
187 | self.is_running = True
188 |
189 | def deactivate(self):
190 | self.dx_d = np.zeros(6)
191 | self.is_running = False
192 | self.arm.set_desired_joint_velocities(np.zeros(7))
193 |
194 | def update(self):
195 | q, _ = self.arm.get_state()
196 | J_pinv = np.linalg.pinv(self.model.jacobian(q))
197 | cmd = np.dot(J_pinv, self.dx_d)
198 | self.arm.set_desired_joint_velocities(cmd)
199 |
200 |
201 | class JointTrajectoryControllerPlugin(Plugin):
202 | def __init__(self, arm, rate=30):
203 | super().__init__(rate)
204 | self.arm = arm
205 | self.dt = 1.0 / self.rate # TODO this might not be reliable
206 | self.init_action_server()
207 |
208 | def init_action_server(self):
209 | name = "position_joint_trajectory_controller/follow_joint_trajectory"
210 | self.action_server = SimpleActionServer(
211 | name, control_msgs.FollowJointTrajectoryAction, auto_start=False
212 | )
213 | self.action_server.register_goal_callback(self.action_goal_cb)
214 | self.action_server.start()
215 |
216 | def action_goal_cb(self):
217 | goal = self.action_server.accept_new_goal()
218 | self.interpolate_trajectory(goal.trajectory.points)
219 | self.elapsed_time = 0.0
220 |
221 | def interpolate_trajectory(self, points):
222 | t, y = np.zeros(len(points)), np.zeros((7, len(points)))
223 | for i, point in enumerate(points):
224 | t[i] = point.time_from_start.to_sec()
225 | y[:, i] = point.positions
226 | self.m = interpolate.interp1d(t, y)
227 | self.duration = t[-1]
228 |
229 | def update(self):
230 | if self.action_server.is_active():
231 | self.elapsed_time += self.dt
232 | if self.elapsed_time > self.duration:
233 | self.action_server.set_succeeded()
234 | return
235 | self.arm.set_desired_joint_positions(self.m(self.elapsed_time))
236 |
237 |
238 | class MoveActionPlugin(Plugin):
239 | def __init__(self, gripper, rate=10):
240 | super().__init__(rate)
241 | self.gripper = gripper
242 | self.dt = 1.0 / self.rate
243 | self.init_action_server()
244 |
245 | def init_action_server(self):
246 | name = "/franka_gripper/move"
247 | self.action_server = SimpleActionServer(name, MoveAction, auto_start=False)
248 | self.action_server.register_goal_callback(self.action_goal_cb)
249 | self.action_server.start()
250 |
251 | def action_goal_cb(self):
252 | self.elapsed_time = 0.0
253 | goal = self.action_server.accept_new_goal()
254 | self.gripper.set_desired_width(goal.width)
255 |
256 | def update(self):
257 | if self.action_server.is_active():
258 | self.elapsed_time += self.dt
259 | if self.elapsed_time > 1.0:
260 | self.action_server.set_succeeded()
261 |
262 |
263 | class GraspActionPlugin(Plugin):
264 | def __init__(self, gripper, rate=10):
265 | super().__init__(rate)
266 | self.gripper = gripper
267 | self.dt = 1.0 / self.rate
268 | self.force = rospy.get_param("~gripper_force")
269 | self.init_action_server()
270 |
271 | def init_action_server(self):
272 | name = "/franka_gripper/grasp"
273 | self.action_server = SimpleActionServer(name, GraspAction, auto_start=False)
274 | self.action_server.register_goal_callback(self.action_goal_cb)
275 | self.action_server.start()
276 |
277 | def action_goal_cb(self):
278 | self.elapsed_time = 0.0
279 | goal = self.action_server.accept_new_goal()
280 | self.gripper.set_desired_speed(-0.1, force=self.force)
281 |
282 | def update(self):
283 | if self.action_server.is_active():
284 | self.elapsed_time += self.dt
285 | if self.elapsed_time > 1.0:
286 | self.action_server.set_succeeded()
287 |
288 |
289 | class GripperActionPlugin(Plugin):
290 | """Empty action server to make MoveIt happy"""
291 |
292 | def __init__(self, rate=1):
293 | super().__init__(rate)
294 | self.init_action_server()
295 |
296 | def init_action_server(self):
297 | name = "/franka_gripper/gripper_action"
298 | self.action_server = SimpleActionServer(
299 | name, control_msgs.GripperCommandAction, auto_start=False
300 | )
301 | self.action_server.register_goal_callback(self.action_goal_cb)
302 | self.action_server.start()
303 |
304 | def action_goal_cb(self):
305 | self.action_server.accept_new_goal()
306 |
307 | def update(self):
308 | if self.action_server.is_active():
309 | self.action_server.set_succeeded()
310 |
311 |
312 | class CameraPlugin(Plugin):
313 | def __init__(self, camera, name="camera", rate=5):
314 | super().__init__(rate)
315 | self.camera = camera
316 | self.name = name
317 | self.cam_noise = rospy.get_param("~cam_noise", False)
318 | self.cv_bridge = cv_bridge.CvBridge()
319 | self.init_publishers()
320 |
321 | def init_publishers(self):
322 | topic = self.name + "/depth/camera_info"
323 | self.info_pub = rospy.Publisher(topic, CameraInfo, queue_size=10)
324 | topic = self.name + "/depth/image_rect_raw"
325 | self.depth_pub = rospy.Publisher(topic, Image, queue_size=10)
326 |
327 | def update(self):
328 | stamp = rospy.Time.now()
329 |
330 | msg = to_camera_info_msg(self.camera.intrinsic)
331 | msg.header.frame_id = self.name + "_optical_frame"
332 | msg.header.stamp = stamp
333 | self.info_pub.publish(msg)
334 |
335 | _, depth, _ = self.camera.get_image()
336 |
337 | if self.cam_noise:
338 | depth = apply_noise(depth)
339 |
340 | msg = self.cv_bridge.cv2_to_imgmsg((1000 * depth).astype(np.uint16))
341 | msg.header.stamp = stamp
342 | self.depth_pub.publish(msg)
343 |
344 |
345 | class MockActionsPlugin(Plugin):
346 | def __init__(self):
347 | super().__init__(1)
348 | self.init_recovery_action_server()
349 | self.init_homing_action_server()
350 |
351 | def init_homing_action_server(self):
352 | self.homing_as = SimpleActionServer(
353 | "/franka_gripper/homing", HomingAction, auto_start=False
354 | )
355 | self.homing_as.register_goal_callback(self.action_goal_cb)
356 | self.homing_as.start()
357 |
358 | def init_recovery_action_server(self):
359 | self.recovery_as = SimpleActionServer(
360 | "/franka_control/error_recovery", ErrorRecoveryAction, auto_start=False
361 | )
362 | self.recovery_as.register_goal_callback(self.action_goal_cb)
363 | self.recovery_as.start()
364 |
365 | def action_goal_cb(self):
366 | pass
367 |
368 | def update(self):
369 | pass
370 |
371 |
372 | def main():
373 | rospy.init_node("bt_sim")
374 | server = BtSimNode()
375 | server.run()
376 |
377 |
378 | if __name__ == "__main__":
379 | main()
380 |
--------------------------------------------------------------------------------
/cfg/active_grasp.rviz:
--------------------------------------------------------------------------------
1 | Panels:
2 | - Class: rviz/Displays
3 | Help Height: 70
4 | Name: Displays
5 | Property Tree Widget:
6 | Expanded:
7 | - /TF1/Frames1
8 | - /Markers1/Namespaces1
9 | Splitter Ratio: 0.6881287693977356
10 | Tree Height: 710
11 | - Class: rviz/Selection
12 | Name: Selection
13 | - Class: rviz/Tool Properties
14 | Expanded:
15 | - /2D Pose Estimate1
16 | - /2D Nav Goal1
17 | - /Publish Point1
18 | Name: Tool Properties
19 | Splitter Ratio: 0.5886790156364441
20 | - Class: rviz/Views
21 | Expanded:
22 | - /Current View1
23 | Name: Views
24 | Splitter Ratio: 0.5
25 | - Class: rviz/Time
26 | Experimental: false
27 | Name: Time
28 | SyncMode: 0
29 | SyncSource: SceneCloud
30 | Preferences:
31 | PromptSaveOnExit: true
32 | Toolbars:
33 | toolButtonStyle: 2
34 | Visualization Manager:
35 | Class: ""
36 | Displays:
37 | - Alpha: 0.5
38 | Cell Size: 1
39 | Class: rviz/Grid
40 | Color: 160; 160; 164
41 | Enabled: true
42 | Line Style:
43 | Line Width: 0.029999999329447746
44 | Value: Lines
45 | Name: Grid
46 | Normal Cell Count: 0
47 | Offset:
48 | X: 0
49 | Y: 0
50 | Z: 0
51 | Plane: XY
52 | Plane Cell Count: 10
53 | Reference Frame:
54 | Value: true
55 | - Class: rviz/TF
56 | Enabled: false
57 | Frame Timeout: 15
58 | Frames:
59 | All Enabled: false
60 | Marker Alpha: 1
61 | Marker Scale: 0.30000001192092896
62 | Name: TF
63 | Show Arrows: false
64 | Show Axes: true
65 | Show Names: false
66 | Tree:
67 | {}
68 | Update Interval: 0
69 | Value: false
70 | - Alpha: 0.20000000298023224
71 | Class: rviz/RobotModel
72 | Collision Enabled: false
73 | Enabled: true
74 | Links:
75 | All Links Enabled: true
76 | Expand Joint Details: false
77 | Expand Link Details: false
78 | Expand Tree: false
79 | Link Tree Style: Links in Alphabetic Order
80 | camera_depth_optical_frame:
81 | Alpha: 1
82 | Show Axes: false
83 | Show Trail: false
84 | panda_hand:
85 | Alpha: 1
86 | Show Axes: false
87 | Show Trail: false
88 | Value: true
89 | panda_leftfinger:
90 | Alpha: 1
91 | Show Axes: false
92 | Show Trail: false
93 | Value: true
94 | panda_link0:
95 | Alpha: 1
96 | Show Axes: false
97 | Show Trail: false
98 | Value: true
99 | panda_link1:
100 | Alpha: 1
101 | Show Axes: false
102 | Show Trail: false
103 | Value: true
104 | panda_link2:
105 | Alpha: 1
106 | Show Axes: false
107 | Show Trail: false
108 | Value: true
109 | panda_link3:
110 | Alpha: 1
111 | Show Axes: false
112 | Show Trail: false
113 | Value: true
114 | panda_link4:
115 | Alpha: 1
116 | Show Axes: false
117 | Show Trail: false
118 | Value: true
119 | panda_link5:
120 | Alpha: 1
121 | Show Axes: false
122 | Show Trail: false
123 | Value: true
124 | panda_link6:
125 | Alpha: 1
126 | Show Axes: false
127 | Show Trail: false
128 | Value: true
129 | panda_link7:
130 | Alpha: 1
131 | Show Axes: false
132 | Show Trail: false
133 | Value: true
134 | panda_link8:
135 | Alpha: 1
136 | Show Axes: false
137 | Show Trail: false
138 | Value: true
139 | panda_rightfinger:
140 | Alpha: 1
141 | Show Axes: false
142 | Show Trail: false
143 | Value: true
144 | Name: RobotModel
145 | Robot Description: robot_description
146 | TF Prefix: ""
147 | Update Interval: 0
148 | Value: true
149 | Visual Enabled: true
150 | - Class: moveit_rviz_plugin/PlanningScene
151 | Enabled: false
152 | Move Group Namespace: ""
153 | Name: PlanningScene
154 | Planning Scene Topic: move_group/monitored_planning_scene
155 | Robot Description: robot_description
156 | Scene Geometry:
157 | Scene Alpha: 0.8999999761581421
158 | Scene Color: 50; 230; 50
159 | Scene Display Time: 0.009999999776482582
160 | Show Scene Geometry: true
161 | Voxel Coloring: Z-Axis
162 | Voxel Rendering: Occupied Voxels
163 | Scene Robot:
164 | Attached Body Color: 150; 50; 150
165 | Links:
166 | All Links Enabled: true
167 | Expand Joint Details: false
168 | Expand Link Details: false
169 | Expand Tree: false
170 | Link Tree Style: Links in Alphabetic Order
171 | Robot Alpha: 1
172 | Show Robot Collision: false
173 | Show Robot Visual: false
174 | Value: false
175 | - Alpha: 1
176 | Autocompute Intensity Bounds: true
177 | Autocompute Value Bounds:
178 | Max Value: 10
179 | Min Value: -10
180 | Value: true
181 | Axis: Z
182 | Channel Name: intensity
183 | Class: rviz/PointCloud2
184 | Color: 255; 255; 255
185 | Color Transformer: RGB8
186 | Decay Time: 0
187 | Enabled: true
188 | Invert Rainbow: false
189 | Max Color: 255; 255; 255
190 | Min Color: 0; 0; 0
191 | Name: PointCloud
192 | Position Transformer: XYZ
193 | Queue Size: 10
194 | Selectable: true
195 | Size (Pixels): 3
196 | Size (m): 0.0020000000949949026
197 | Style: Spheres
198 | Topic: /camera/depth/color/points
199 | Unreliable: false
200 | Use Fixed Frame: true
201 | Use rainbow: true
202 | Value: true
203 | - Class: rviz/Image
204 | Enabled: false
205 | Image Topic: /camera/color/image_raw
206 | Max Value: 1
207 | Median window: 5
208 | Min Value: 0
209 | Name: ColorImage
210 | Normalize Range: true
211 | Queue Size: 2
212 | Transport Hint: raw
213 | Unreliable: false
214 | Value: false
215 | - Class: rviz/Image
216 | Enabled: false
217 | Image Topic: /camera/depth/image_rect_raw
218 | Max Value: 1
219 | Median window: 5
220 | Min Value: 0
221 | Name: DepthImage
222 | Normalize Range: true
223 | Queue Size: 2
224 | Transport Hint: raw
225 | Unreliable: false
226 | Value: false
227 | - Class: rviz/Image
228 | Enabled: false
229 | Image Topic: /camera/infra1/image_rect_raw
230 | Max Value: 1
231 | Median window: 5
232 | Min Value: 0
233 | Name: InfraImage
234 | Normalize Range: true
235 | Queue Size: 2
236 | Transport Hint: raw
237 | Unreliable: false
238 | Value: false
239 | - Class: rviz/MarkerArray
240 | Enabled: true
241 | Marker Topic: visualization_marker_array
242 | Name: Markers
243 | Namespaces:
244 | bbox: true
245 | grasp: true
246 | ig_views: true
247 | path: true
248 | views: true
249 | Queue Size: 100
250 | Value: true
251 | - Alpha: 1
252 | Autocompute Intensity Bounds: true
253 | Autocompute Value Bounds:
254 | Max Value: 10
255 | Min Value: -10
256 | Value: true
257 | Axis: Z
258 | Channel Name: intensity
259 | Class: rviz/PointCloud2
260 | Color: 255; 255; 255
261 | Color Transformer: Intensity
262 | Decay Time: 0
263 | Enabled: true
264 | Invert Rainbow: false
265 | Max Color: 255; 255; 255
266 | Min Color: 0; 0; 0
267 | Name: SceneCloud
268 | Position Transformer: XYZ
269 | Queue Size: 10
270 | Selectable: true
271 | Size (Pixels): 3
272 | Size (m): 0.009999999776482582
273 | Style: Spheres
274 | Topic: /scene_cloud
275 | Unreliable: false
276 | Use Fixed Frame: true
277 | Use rainbow: true
278 | Value: true
279 | - Alpha: 0.20000000298023224
280 | Autocompute Intensity Bounds: false
281 | Autocompute Value Bounds:
282 | Max Value: 10
283 | Min Value: -10
284 | Value: true
285 | Axis: Z
286 | Channel Name: distance
287 | Class: rviz/PointCloud2
288 | Color: 255; 255; 255
289 | Color Transformer: Intensity
290 | Decay Time: 0
291 | Enabled: false
292 | Invert Rainbow: false
293 | Max Color: 255; 255; 255
294 | Max Intensity: 1
295 | Min Color: 0; 0; 0
296 | Min Intensity: 0
297 | Name: MapCloud
298 | Position Transformer: XYZ
299 | Queue Size: 10
300 | Selectable: true
301 | Size (Pixels): 3
302 | Size (m): 0.007499999832361937
303 | Style: Boxes
304 | Topic: /map_cloud
305 | Unreliable: false
306 | Use Fixed Frame: true
307 | Use rainbow: true
308 | Value: false
309 | - Alpha: 0.20000000298023224
310 | Autocompute Intensity Bounds: false
311 | Autocompute Value Bounds:
312 | Max Value: 10
313 | Min Value: -10
314 | Value: true
315 | Axis: Z
316 | Channel Name: intensity
317 | Class: rviz/PointCloud2
318 | Color: 255; 255; 255
319 | Color Transformer: Intensity
320 | Decay Time: 0
321 | Enabled: true
322 | Invert Rainbow: false
323 | Max Color: 0; 255; 0
324 | Max Intensity: 1
325 | Min Color: 255; 0; 0
326 | Min Intensity: 0.800000011920929
327 | Name: GraspQuality
328 | Position Transformer: XYZ
329 | Queue Size: 10
330 | Selectable: true
331 | Size (Pixels): 3
332 | Size (m): 0.007499999832361937
333 | Style: Boxes
334 | Topic: /quality
335 | Unreliable: false
336 | Use Fixed Frame: true
337 | Use rainbow: false
338 | Value: true
339 | Enabled: true
340 | Global Options:
341 | Background Color: 48; 48; 48
342 | Default Light: true
343 | Fixed Frame: panda_link0
344 | Frame Rate: 30
345 | Name: root
346 | Tools:
347 | - Class: rviz/Interact
348 | Hide Inactive Objects: true
349 | - Class: rviz/MoveCamera
350 | - Class: rviz/Select
351 | - Class: rviz/FocusCamera
352 | - Class: rviz/Measure
353 | - Class: rviz/SetInitialPose
354 | Theta std deviation: 0.2617993950843811
355 | Topic: /initialpose
356 | X std deviation: 0.5
357 | Y std deviation: 0.5
358 | - Class: rviz/SetGoal
359 | Topic: /move_base_simple/goal
360 | - Class: rviz/PublishPoint
361 | Single click: true
362 | Topic: /clicked_point
363 | Value: true
364 | Views:
365 | Current:
366 | Class: rviz/Orbit
367 | Distance: 1.2000000476837158
368 | Enable Stereo Rendering:
369 | Stereo Eye Separation: 0.05999999865889549
370 | Stereo Focal Distance: 1
371 | Swap Stereo Eyes: false
372 | Value: false
373 | Field of View: 0.7799999713897705
374 | Focal Point:
375 | X: 0.44999998807907104
376 | Y: 0
377 | Z: 0.4000000059604645
378 | Focal Shape Fixed Size: false
379 | Focal Shape Size: 0.05000000074505806
380 | Invert Z Axis: false
381 | Name: Current View
382 | Near Clip Distance: 0.009999999776482582
383 | Pitch: 0.25999999046325684
384 | Target Frame:
385 | Yaw: 1.0399999618530273
386 | Saved:
387 | - Class: rviz/Orbit
388 | Distance: 1.2000000476837158
389 | Enable Stereo Rendering:
390 | Stereo Eye Separation: 0.05999999865889549
391 | Stereo Focal Distance: 1
392 | Swap Stereo Eyes: false
393 | Value: false
394 | Field of View: 0.7799999713897705
395 | Focal Point:
396 | X: 0.44999998807907104
397 | Y: 0
398 | Z: 0.4000000059604645
399 | Focal Shape Fixed Size: false
400 | Focal Shape Size: 0.05000000074505806
401 | Invert Z Axis: false
402 | Name: Panda
403 | Near Clip Distance: 0.009999999776482582
404 | Pitch: 0.25999999046325684
405 | Target Frame:
406 | Yaw: 1.0399999618530273
407 | Window Geometry:
408 | ColorImage:
409 | collapsed: false
410 | DepthImage:
411 | collapsed: false
412 | Displays:
413 | collapsed: false
414 | Height: 960
415 | Hide Left Dock: false
416 | Hide Right Dock: true
417 | InfraImage:
418 | collapsed: false
419 | QMainWindow State: 000000ff00000000fd00000004000000000000028300000366fc0200000016fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afc0000003d00000366000000e60100001cfa000000000100000002fb000000100044006900730070006c0061007900730100000000000001f30000015600fffffffb000000140043006f006c006f00720049006d0061006700650100000000ffffffff0000008300fffffffb000000140043006f006c006f00720049006d00610067006501000001e4000001bf0000000000000000fb00000014004400650070007400680049006d006100670065000000029a000001090000001600fffffffb000000140043006f006c006f00720049006d00610067006501000001d2000001920000000000000000fb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a004400650070007400680000000166000000b50000000000000000fb000000280020002d0020005400720061006a006500630074006f0072007900200053006c00690064006500720000000000ffffffff0000000000000000fb0000001c004d006f00740069006f006e0050006c0061006e006e0069006e006700000001f4000001af0000000000000000fb00000044004d006f00740069006f006e0050006c0061006e006e0069006e00670020002d0020005400720061006a006500630074006f0072007900200053006c00690064006500720000000000ffffffff0000000000000000fb0000001c00460069006c0074006500720065006400200043006c006f00750064000000029a000001050000000000000000fb000000140043006f006c006f00720049006d006100670065010000019f000002040000000000000000fb0000000a0049006d00610067006500000001bc0000008a0000000000000000fb0000000a0049006d006100670065000000021c000000ee0000000000000000fb0000000c00430061006d00650072006100000002db000000c80000000000000000fb000000140043006f006c006f00720049006d006100670065010000019f000002040000000000000000fb0000000a0049006d006100670065010000019c000002070000000000000000000000010000017b00000366fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d00000366000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000780000000a9fc0100000002fb000000140049006e0066007200610049006d00610067006502000000000000006b000001f30000002efb0000000a00560069006500770073030000004e00000080000002e100000197000000030000056300000039fc0100000002fb0000000800540069006d0065000000000000000563000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004f70000036600000004000000040000000800000008fc00000002000000020000000000000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
420 | Selection:
421 | collapsed: false
422 | Time:
423 | collapsed: false
424 | Tool Properties:
425 | collapsed: false
426 | Views:
427 | collapsed: true
428 | Width: 1920
429 | X: 0
430 | Y: 27
431 |
--------------------------------------------------------------------------------