├── .gitignore ├── LICENSE ├── README ├── calib ├── opt_depth_image.py └── record_depth_and_joints.py ├── doc ├── Makefile ├── _templates │ └── layout.html ├── conf.py ├── figures.key ├── figures.pdf ├── index.rst ├── ps3-annotated.jpg ├── ps3.jpg └── schematic.png ├── example_pipeline └── overhand.py ├── fastrapp ├── CMakeLists.txt ├── FindEigen.cmake ├── FindNumpy.cmake ├── boost-python.cmake ├── config.cpp ├── config.hpp ├── fastrapp.cpp ├── main.cmake ├── numpy_utils.hpp ├── record_rgbd_video.cpp └── stl_to_string.hpp ├── notes ├── picloud_setup.txt └── todo.txt ├── pr2 └── base_trajectory_controller.py ├── rapprentice ├── PR2.py ├── __init__.py ├── animate_traj.py ├── bag_proc.py ├── berkeley_pr2.py ├── call_and_print.py ├── cloud_proc_funcs.py ├── clouds.py ├── colorize.py ├── conversions.py ├── cv_plot_utils.py ├── func_utils.py ├── interactive_roi.py ├── math_utils.py ├── planning.py ├── plotting_openrave.py ├── plotting_plt.py ├── pr2_trajectories.py ├── registration.py ├── resampling.py ├── retiming.py ├── ros2rave.py ├── svds.py ├── task_execution.py ├── testing.py ├── tps.py ├── transformations.py └── yes_or_no.py ├── runpylint.sh ├── scripts ├── animate_demo.py ├── command_pr2.py ├── create_new_task_dir.py ├── do_task.py ├── download_sampledata.py ├── generate_annotations.py ├── generate_h5.py ├── record_demo.py ├── upload_sampledata.py ├── view_demos.py └── view_kinect.py └── test ├── register_rope_pairs.py ├── run_all_tests.py ├── run_registration.py ├── test_registration_synthetic.py ├── tps_unit_tests.py └── verify_h5.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | __pycache__ 21 | 22 | # Installer logs 23 | pip-log.txt 24 | 25 | # Unit test / coverage reports 26 | .coverage 27 | .tox 28 | nosetests.xml 29 | 30 | # Translations 31 | *.mo 32 | 33 | # Mr Developer 34 | .mr.developer.cfg 35 | .project 36 | .pydevproject 37 | 38 | 39 | 40 | ############## 41 | junk 42 | fastmath 43 | *.wpr 44 | *.wpu 45 | 46 | doc/_build -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013, John Schulman 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | 6 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | 11 | http://opensource.org/licenses/BSD-2-Clause -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Documentation: http://rll.berkeley.edu/rapprentice -------------------------------------------------------------------------------- /calib/opt_depth_image.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | parser = argparse.ArgumentParser() 3 | parser.add_argument("fname") 4 | args = parser.parse_args() 5 | 6 | 7 | import numpy as np 8 | import itertools 9 | import cv2 10 | from rapprentice import ros2rave 11 | import sensorsimpy, trajoptpy, openravepy 12 | from rapprentice import berkeley_pr2 13 | import scipy.optimize as opt 14 | from rapprentice import clouds 15 | 16 | DEBUG_PLOT = True 17 | 18 | 19 | npzfile = np.load(args.fname) 20 | 21 | depth_images = npzfile["depth_images"] 22 | valid_masks = [(d > 0) & ((clouds.depth_to_xyz(d, 525)**2).sum(axis=2) < 1.5**2) for d in depth_images] 23 | depth_images = depth_images/1000. 24 | joint_positions = npzfile["joint_positions"] 25 | joint_names = npzfile["names"] if "names" in npzfile else npzfile["joint_names"] 26 | 27 | env = openravepy.Environment() 28 | env.StopSimulation() 29 | env.Load("robots/pr2-beta-static.zae") 30 | robot = env.GetRobots()[0] 31 | 32 | viewer = trajoptpy.GetViewer(env) 33 | viewer.Step() 34 | sensor = sensorsimpy.CreateFakeKinect(env) 35 | 36 | r2r = ros2rave.RosToRave(robot, joint_names) 37 | 38 | rave_inds = r2r.rave_inds 39 | rave_values = np.array([r2r.convert(ros_values) for ros_values in joint_positions]) 40 | robot.SetActiveDOFs(rave_inds) 41 | robot.SetActiveDOFValues(rave_values[0]) 42 | 43 | n_imgs = len(depth_images) 44 | assert len(joint_positions) == n_imgs 45 | 46 | MAX_DIST = 1.5 47 | 48 | def calc_Thk(xyz, rod): 49 | T_h_k = openravepy.matrixFromAxisAngle(rod) 50 | T_h_k[:3,3] += xyz 51 | return T_h_k 52 | 53 | def calculate_sim_depths(xyz, rod, f): 54 | T_h_k = calc_Thk(xyz, rod) 55 | T_w_k = robot.GetLink("head_plate_frame").GetTransform().dot(T_h_k) 56 | 57 | sensor.SetPose(T_w_k[:3,3], openravepy.quatFromRotationMatrix(T_w_k[:3,:3])) 58 | sensor.SetIntrinsics(f) 59 | out = [] 60 | 61 | for dofs in rave_values: 62 | robot.SetActiveDOFValues(dofs) 63 | viewer.UpdateSceneData() 64 | sensor.Update() 65 | out.append(sensor.GetDepthImage()) 66 | 67 | return out 68 | 69 | def calculate_depth_error(xyz, rod, f): 70 | errs = np.empty(n_imgs) 71 | sim_depths = calculate_sim_depths(xyz, rod, f) 72 | for i in xrange(n_imgs): 73 | sim_depth = sim_depths[i] 74 | real_depth = depth_images[i] 75 | valid_mask = valid_masks[i] 76 | errs[i] = (real_depth - sim_depth)[valid_mask].__abs__().sum() 77 | 78 | return errs.sum() 79 | 80 | def plot_real_and_sim(x): 81 | print x 82 | sim_depths = calculate_sim_depths(*vec2args(x)) 83 | for i in xrange(1): 84 | sim_depth = sim_depths[i] 85 | real_depth = depth_images[i] 86 | cv2.imshow("sim%i"%i, sim_depth/MAX_DIST) 87 | cv2.imshow("real%i"%i, real_depth/MAX_DIST) 88 | cv2.imshow("valid%i"%i, valid_masks[i].astype('uint8')*255) 89 | cv2.moveWindow("sim%i"%i, 0, 0) 90 | cv2.moveWindow("real%i"%i, 640, 0) 91 | cv2.moveWindow("valid%i"%i, 1280, 0) 92 | #print "press key to continue" 93 | cv2.waitKey(10) 94 | 95 | 96 | def calc_error_wrapper(x): 97 | return calculate_depth_error(*vec2args(x)) 98 | def vec2args(x): 99 | return x[0:3], x[3:6], x[6] 100 | def args2vec(xyz, rod, f): 101 | out = np.empty(7) 102 | out[0:3] = xyz 103 | out[3:6] = rod 104 | out[6] = f 105 | return out 106 | 107 | T_w_k = berkeley_pr2.get_kinect_transform(robot) 108 | 109 | #o = T_w_k[:3,3] 110 | #x = T_w_k[:3,0] 111 | #y = T_w_k[:3,1] 112 | #z = T_w_k[:3,2] 113 | #handles = [] 114 | #handles.append(env.drawarrow(o, o+.2*x, .005,(1,0,0,1))) 115 | #handles.append(env.drawarrow(o, o+.2*y, .005,(0,1,0,1))) 116 | #handles.append(env.drawarrow(o, o+.2*z, .005,(0,0,1,1))) 117 | #viewer.Idle() 118 | 119 | 120 | T_h_k_init = berkeley_pr2.T_h_k 121 | 122 | xyz_init = T_h_k_init[:3,3] 123 | rod_init = openravepy.axisAngleFromRotationMatrix(T_h_k_init[:3,:3]) 124 | f_init = 525 125 | 126 | #from rapprentice.math_utils import linspace2d 127 | 128 | 129 | 130 | #sim_depths = calculate_sim_depths(xyz_init, rod_init, f_init) 131 | #cv2.imshow('hi',np.clip(sim_depths[0]/3,0,1)) 132 | #cv2.waitKey() 133 | #plot_real_and_sim(args2vec(xyz_init, rod_init, f_init)) 134 | #calculate_depth_error(xyz_init, rod_init, f_init) 135 | soln = opt.fmin(calc_error_wrapper, args2vec(xyz_init, rod_init, f_init),callback=plot_real_and_sim) 136 | (best_xyz, best_rod, best_f) = vec2args(soln) 137 | print "xyz, rod:", best_xyz, best_rod 138 | print "T_h_k:", calc_Thk(best_xyz, best_rod) 139 | print "f:",best_f -------------------------------------------------------------------------------- /calib/record_depth_and_joints.py: -------------------------------------------------------------------------------- 1 | import cloudprocpy 2 | import rospy 3 | from rapprentice.PR2 import TopicListener 4 | import sensor_msgs.msg as sm 5 | import numpy as np 6 | 7 | 8 | rospy.init_node("record_d_and_j", disable_signals = True) 9 | 10 | 11 | grabber = cloudprocpy.CloudGrabber() 12 | grabber.startRGBD() 13 | 14 | joint_listener = TopicListener("/joint_states", sm.JointState) 15 | 16 | depth_images = [] 17 | joint_positions = [] 18 | 19 | 20 | while True: 21 | exit_requested = False 22 | while True: 23 | 24 | print "press 'a' to acquire image. press 'e' to exit" 25 | k = raw_input() 26 | if k == 'a': 27 | break 28 | elif k == 'e': 29 | exit_requested=True 30 | break 31 | else: 32 | print "invalid selection" 33 | if exit_requested: 34 | break 35 | 36 | joint_positions.append(joint_listener.last_msg.position) 37 | _, d = grabber.getRGBD() 38 | depth_images.append(d) 39 | 40 | 41 | fname = "/tmp/calib_data.npz" 42 | print "saving to %s"%fname 43 | np.savez(fname, depth_images=depth_images, joint_positions=joint_positions, joint_names = joint_listener.last_msg.name) 44 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | upload: 18 | rsync -azvu --delete --progress ./ pabbeel@rll.berkeley.edu:/var/www/rapprentice/doc/ 19 | 20 | help: 21 | @echo "Please use \`make ' where is one of" 22 | @echo " html to make standalone HTML files" 23 | @echo " dirhtml to make HTML files named index.html in directories" 24 | @echo " singlehtml to make a single large HTML file" 25 | @echo " pickle to make pickle files" 26 | @echo " json to make JSON files" 27 | @echo " htmlhelp to make HTML files and a HTML help project" 28 | @echo " qthelp to make HTML files and a qthelp project" 29 | @echo " devhelp to make HTML files and a Devhelp project" 30 | @echo " epub to make an epub" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " text to make text files" 34 | @echo " man to make manual pages" 35 | @echo " changes to make an overview of all changed/added/deprecated items" 36 | @echo " linkcheck to check all external links for integrity" 37 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 38 | 39 | clean: 40 | -rm -rf $(BUILDDIR)/* 41 | 42 | html: 43 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 44 | @echo 45 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 46 | 47 | dirhtml: 48 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 49 | @echo 50 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 51 | 52 | singlehtml: 53 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 54 | @echo 55 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 56 | 57 | pickle: 58 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 59 | @echo 60 | @echo "Build finished; now you can process the pickle files." 61 | 62 | json: 63 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 64 | @echo 65 | @echo "Build finished; now you can process the JSON files." 66 | 67 | htmlhelp: 68 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 69 | @echo 70 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 71 | ".hhp project file in $(BUILDDIR)/htmlhelp." 72 | 73 | qthelp: 74 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 75 | @echo 76 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 77 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 78 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rapprentice.qhcp" 79 | @echo "To view the help file:" 80 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rapprentice.qhc" 81 | 82 | devhelp: 83 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 84 | @echo 85 | @echo "Build finished." 86 | @echo "To view the help file:" 87 | @echo "# mkdir -p $$HOME/.local/share/devhelp/rapprentice" 88 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rapprentice" 89 | @echo "# devhelp" 90 | 91 | epub: 92 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 93 | @echo 94 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 95 | 96 | latex: 97 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 98 | @echo 99 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 100 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 101 | "(use \`make latexpdf' here to do that automatically)." 102 | 103 | latexpdf: 104 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 105 | @echo "Running LaTeX files through pdflatex..." 106 | make -C $(BUILDDIR)/latex all-pdf 107 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 108 | 109 | text: 110 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 111 | @echo 112 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 113 | 114 | man: 115 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 116 | @echo 117 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 118 | 119 | changes: 120 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 121 | @echo 122 | @echo "The overview file is in $(BUILDDIR)/changes." 123 | 124 | linkcheck: 125 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 126 | @echo 127 | @echo "Link check complete; look for any errors in the above output " \ 128 | "or in $(BUILDDIR)/linkcheck/output.txt." 129 | 130 | doctest: 131 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 132 | @echo "Testing of doctests in the sources finished, look at the " \ 133 | "results in $(BUILDDIR)/doctest/output.txt." 134 | -------------------------------------------------------------------------------- /doc/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | 3 | {%- block extrahead %} 4 | 7 | {% endblock %} 8 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # rapprentice documentation build configuration file, created by 4 | # sphinx-quickstart on Mon Apr 15 13:35:19 2013. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | #sys.path.insert(0, os.path.abspath('.')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = [] 29 | 30 | # Add any paths that contain templates here, relative to this directory. 31 | templates_path = ['_templates'] 32 | 33 | # The suffix of source filenames. 34 | source_suffix = '.rst' 35 | 36 | # The encoding of source files. 37 | #source_encoding = 'utf-8-sig' 38 | 39 | # The master toctree document. 40 | master_doc = 'index' 41 | 42 | # General information about the project. 43 | project = u'rapprentice' 44 | copyright = u'2013, John Schulman' 45 | 46 | # The version info for the project you're documenting, acts as replacement for 47 | # |version| and |release|, also used in various other places throughout the 48 | # built documents. 49 | # 50 | # The short X.Y version. 51 | version = '0' 52 | # The full version, including alpha/beta/rc tags. 53 | release = '0' 54 | 55 | # The language for content autogenerated by Sphinx. Refer to documentation 56 | # for a list of supported languages. 57 | #language = None 58 | 59 | # There are two options for replacing |today|: either, you set today to some 60 | # non-false value, then it is used: 61 | #today = '' 62 | # Else, today_fmt is used as the format for a strftime call. 63 | #today_fmt = '%B %d, %Y' 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | exclude_patterns = ['_build'] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. See the documentation for 93 | # a list of builtin themes. 94 | html_theme = 'nature' 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | #html_theme_options = {} 100 | 101 | # Add any paths that contain custom themes here, relative to this directory. 102 | #html_theme_path = [] 103 | 104 | # The name for this set of Sphinx documents. If None, it defaults to 105 | # " v documentation". 106 | #html_title = None 107 | 108 | # A shorter title for the navigation bar. Default is the same as html_title. 109 | #html_short_title = None 110 | 111 | # The name of an image file (relative to this directory) to place at the top 112 | # of the sidebar. 113 | #html_logo = None 114 | 115 | # The name of an image file (within the static path) to use as favicon of the 116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 117 | # pixels large. 118 | #html_favicon = None 119 | 120 | # Add any paths that contain custom static files (such as style sheets) here, 121 | # relative to this directory. They are copied after the builtin static files, 122 | # so a file named "default.css" will overwrite the builtin "default.css". 123 | html_static_path = ['_static'] 124 | 125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 126 | # using the given strftime format. 127 | #html_last_updated_fmt = '%b %d, %Y' 128 | 129 | # If true, SmartyPants will be used to convert quotes and dashes to 130 | # typographically correct entities. 131 | #html_use_smartypants = True 132 | 133 | # Custom sidebar templates, maps document names to template names. 134 | #html_sidebars = {} 135 | 136 | # Additional templates that should be rendered to pages, maps page names to 137 | # template names. 138 | #html_additional_pages = {} 139 | 140 | # If false, no module index is generated. 141 | #html_domain_indices = True 142 | 143 | # If false, no index is generated. 144 | #html_use_index = True 145 | 146 | # If true, the index is split into individual pages for each letter. 147 | #html_split_index = False 148 | 149 | # If true, links to the reST sources are added to the pages. 150 | #html_show_sourcelink = True 151 | 152 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 153 | #html_show_sphinx = True 154 | 155 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 156 | #html_show_copyright = True 157 | 158 | # If true, an OpenSearch description file will be output, and all pages will 159 | # contain a tag referring to it. The value of this option must be the 160 | # base URL from which the finished HTML is served. 161 | #html_use_opensearch = '' 162 | 163 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 164 | #html_file_suffix = None 165 | 166 | # Output file base name for HTML help builder. 167 | htmlhelp_basename = 'rapprenticedoc' 168 | 169 | 170 | # -- Options for LaTeX output -------------------------------------------------- 171 | 172 | # The paper size ('letter' or 'a4'). 173 | #latex_paper_size = 'letter' 174 | 175 | # The font size ('10pt', '11pt' or '12pt'). 176 | #latex_font_size = '10pt' 177 | 178 | # Grouping the document tree into LaTeX files. List of tuples 179 | # (source start file, target name, title, author, documentclass [howto/manual]). 180 | latex_documents = [ 181 | ('index', 'rapprentice.tex', u'rapprentice Documentation', 182 | u'John Schulman', 'manual'), 183 | ] 184 | 185 | # The name of an image file (relative to this directory) to place at the top of 186 | # the title page. 187 | #latex_logo = None 188 | 189 | # For "manual" documents, if this is true, then toplevel headings are parts, 190 | # not chapters. 191 | #latex_use_parts = False 192 | 193 | # If true, show page references after internal links. 194 | #latex_show_pagerefs = False 195 | 196 | # If true, show URL addresses after external links. 197 | #latex_show_urls = False 198 | 199 | # Additional stuff for the LaTeX preamble. 200 | #latex_preamble = '' 201 | 202 | # Documents to append as an appendix to all manuals. 203 | #latex_appendices = [] 204 | 205 | # If false, no module index is generated. 206 | #latex_domain_indices = True 207 | 208 | 209 | # -- Options for manual page output -------------------------------------------- 210 | 211 | # One entry per manual page. List of tuples 212 | # (source start file, name, description, authors, manual section). 213 | man_pages = [ 214 | ('index', 'rapprentice', u'rapprentice Documentation', 215 | [u'John Schulman'], 1) 216 | ] 217 | -------------------------------------------------------------------------------- /doc/figures.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/joschu/rapprentice/9232a6a21e2c80f00854912f07dcdc725b0be95a/doc/figures.key -------------------------------------------------------------------------------- /doc/figures.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/joschu/rapprentice/9232a6a21e2c80f00854912f07dcdc725b0be95a/doc/figures.pdf -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. rapprentice documentation master file, created by 2 | sphinx-quickstart on Mon Apr 15 13:35:19 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ******************************************************** 7 | Rapprentice: teaching robots by example 8 | ******************************************************** 9 | 10 | .. Contents: 11 | .. 12 | .. .. toctree:: 13 | .. :maxdepth: 2 14 | .. 15 | 16 | Dependencies and build procedure 17 | ================================== 18 | 19 | Download the source from https://github.com/joschu/rapprentice 20 | 21 | Dependencies: 22 | 23 | - pcl >= 1.6 24 | - `trajopt `_. devel branch. Build with the option BUILD_CLOUDPROC enabled. 25 | - python modules: scipy, h5py, networkx, cv2 (OpenCV) 26 | 27 | Build procedure: 28 | 29 | - Build the fastrapp subdirectory using the usual cmake procedure. This includes a boost python module with a few functions, and a program for recording RGB+Depth video (from Primesense cameras). Let's assume now that your build directory is ``fastrapp_build_dir``. 30 | - Add the rapprentice root directory and fastrapp_build_dir/lib to your ``PYTHONPATH``. 31 | - Add ``fastrapp_build_dir/bin`` to your $PATH. 32 | 33 | Training 34 | ================================================ 35 | 36 | Overview 37 | ----------- 38 | 39 | The training pipeline is illustrated below. 40 | 41 | .. figure:: schematic.png 42 | :width: 75% 43 | :alt: schematic for recording and processing demonstrations 44 | 45 | File formats: 46 | 47 | - RGBD video: a directory with a series of png files (depth images, measured in millimeters) and jpg files (rgb images) and another file with the ROS timestamps. 48 | - annotations file: yaml 49 | - master task file: yaml 50 | - processed demonstration file: hdf5 51 | 52 | See the ``sampledata`` directory for examples of these formats. 53 | 54 | Teaching procedure 55 | --------------------- 56 | 57 | Use the ps3 controller to indicate "look", "start", and "stop" times. 58 | 59 | .. figure:: ps3-annotated.jpg 60 | :width: 40% 61 | :alt: ps3 62 | 63 | 64 | Processing training data 65 | -------------------------- 66 | 67 | You'll presumably collect multiple runs of the whole task. Then you run a script to generate an hdf5 file that aggregates all these demonstrations, which are broken into segments. 68 | 69 | To see an example of how to run the data processing scripts, see the script ``example_pipeline/overhand.py``, which processes an example dataset, which contains demonstrations of tying an overhand knot in rope. To run the script, you'll need to download the sample data with ``scripts/download_sampledata.py``. 70 | 71 | 72 | Execution 73 | ============= 74 | 75 | :: 76 | 77 | ./do_task.py h5file 78 | 79 | You can run this program in various simulation configurations that let you test your algorithm without using the robot. 80 | 81 | 82 | Tips for debugging execution 83 | ------------------------------- 84 | 85 | - First make sure plots are enabled for the registration algorithm so you can see the demonstration point cloud (or landmarks) being warped to match the current point cloud. Check that the transformation looks good and the transformation is sending the points to the right place. 86 | - Next, enable plotting for the trajectory optimization algorithm. Look at the purple lines, which indicate the position error. Make sure the target positions and orientations (indicated by axes) are correct. 87 | - Look at the output of the trajectory optimization algorithm, which might tell you if something funny is going on. 88 | 89 | 90 | Extras 91 | ======== 92 | 93 | Various other scripts are included in the ``scripts`` directory: 94 | 95 | - ``view_kinect.py``: view the live rgb+depth images from your Primesense camera. 96 | - ``command_pr2.py``: for conveniently ordering the pr2 around, run ``ipython -i command_pr2.py``. Then you can control the pr2 with ipython by typing commands like ``pr2.rarm.goto_posure(`side`)`` or ``pr2.head.set_pan_tilt(0,1)``. 97 | - ``animate_demo.py`` animates a demonstration. 98 | 99 | 100 | Miscellaneous notes 101 | ===================== 102 | 103 | ``PR2.py`` is set up so you can send commands to multiple bodyparts simultaneously. So most of the commands, like ``goto_joint_positions`` are non-blocking. If you want to wait until all commands are done, do ``pr2.join_all()``. 104 | 105 | Indices and tables 106 | ================== 107 | 108 | * :ref:`genindex` 109 | * :ref:`modindex` 110 | * :ref:`search` 111 | 112 | -------------------------------------------------------------------------------- /doc/ps3-annotated.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/joschu/rapprentice/9232a6a21e2c80f00854912f07dcdc725b0be95a/doc/ps3-annotated.jpg -------------------------------------------------------------------------------- /doc/ps3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/joschu/rapprentice/9232a6a21e2c80f00854912f07dcdc725b0be95a/doc/ps3.jpg -------------------------------------------------------------------------------- /doc/schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/joschu/rapprentice/9232a6a21e2c80f00854912f07dcdc725b0be95a/doc/schematic.png -------------------------------------------------------------------------------- /example_pipeline/overhand.py: -------------------------------------------------------------------------------- 1 | import os 2 | from rapprentice.call_and_print import call_and_print 3 | os.chdir("../scripts") 4 | od="~/henry_sandbox/rapprentice/sampledata/overhand" 5 | call_and_print("./generate_annotations.py %(od)s/demo0_2013-04-20-15-58-09.bag %(od)s/demo0_2013-04-20-15-58-09.bag.ann.yaml"%dict(od=od)) 6 | call_and_print("./generate_h5.py %(od)s/overhand.yaml"%dict(od=od)) 7 | -------------------------------------------------------------------------------- /fastrapp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | project(fastrapp) 3 | 4 | list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}) 5 | 6 | find_package(OpenCV REQUIRED) 7 | find_package(PCL 1.6 REQUIRED) 8 | include(boost-python.cmake) 9 | 10 | # http://cmake.3232098.n2.nabble.com/Default-value-for-CMAKE-BUILD-TYPE-td7550756.html 11 | if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) 12 | message(STATUS "Setting build type to 'Debug' as none was specified.") 13 | set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) 14 | # Set the possible values of build type for cmake-gui 15 | set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" 16 | "MinSizeRel" "RelWithDebInfo") 17 | endif() 18 | 19 | 20 | set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 21 | set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 22 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) 23 | set(BUILD_SHARED_LIBS true) 24 | 25 | 26 | option(OPT_ROS "build stuff involving ros, e.g. record_demo program" OFF) 27 | 28 | if (OPT_ROS) 29 | set(ROS_VERSION "groovy" CACHE STRING "what version of ros") 30 | 31 | set(ROS_INCLUDE_DIRS "/opt/ros/${ROS_VERSION}/include") 32 | set(ROS_LIB_DIR "/opt/ros/${ROS_VERSION}/lib") 33 | set(ROS_LIBRARIES "${ROS_LIB_DIR}/libroscpp.so" "${ROS_LIB_DIR}/librostime.so") 34 | endif (OPT_ROS) 35 | 36 | include_directories(${PCL_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${ROS_INCLUDE_DIRS}) 37 | link_directories(${PCL_LIBRARY_DIRS} ${OpenCV_LIBRARY_DIRS}) 38 | add_definitions(${PCL_DEFINITIONS}) 39 | add_definitions("-DEIGEN_DEFAULT_TO_ROW_MAJOR") 40 | 41 | if (OPT_ROS) 42 | add_executable(record_rgbd_video record_rgbd_video.cpp config.cpp) 43 | target_link_libraries(record_rgbd_video ${OpenCV_LIBRARIES} ${PCL_LIBRARIES} ${ROS_LIBRARIES} boost_program_options) 44 | endif(OPT_ROS) 45 | 46 | include_directories(${PYTHON_NUMPY_INCLUDE_DIR}) 47 | boost_python_module(fastrapp fastrapp.cpp) 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /fastrapp/FindEigen.cmake: -------------------------------------------------------------------------------- 1 | find_package(PkgConfig) 2 | pkg_check_modules(PC_EIGEN eigen3) 3 | set(EIGEN_DEFINITIONS ${PC_EIGEN_CFLAGS_OTHER}) 4 | 5 | find_path(EIGEN_INCLUDE_DIR Eigen/Core 6 | HINTS ${PC_EIGEN_INCLUDEDIR} ${PC_EIGEN_INCLUDE_DIRS} "${EIGEN_ROOT}" "$ENV{EIGEN_ROOT}" 7 | PATHS "$ENV{PROGRAMFILES}/Eigen" "$ENV{PROGRAMW6432}/Eigen" 8 | "$ENV{PROGRAMFILES}/Eigen 3.0.0" "$ENV{PROGRAMW6432}/Eigen 3.0.0" 9 | PATH_SUFFIXES eigen3 include/eigen3 include) 10 | 11 | set(EIGEN_INCLUDE_DIRS ${EIGEN_INCLUDE_DIR}) 12 | 13 | include(FindPackageHandleStandardArgs) 14 | find_package_handle_standard_args(Eigen DEFAULT_MSG EIGEN_INCLUDE_DIR) 15 | 16 | mark_as_advanced(EIGEN_INCLUDE_DIR) 17 | 18 | if(EIGEN_FOUND) 19 | #message(STATUS "Eigen found (include: ${EIGEN_INCLUDE_DIRS})") 20 | endif(EIGEN_FOUND) 21 | 22 | 23 | set(Eigen_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS}) 24 | set(Eigen_FOUND ${EIGEN_FOUND}) 25 | -------------------------------------------------------------------------------- /fastrapp/FindNumpy.cmake: -------------------------------------------------------------------------------- 1 | # 2 | # $Id: $ 3 | # 4 | # Author(s): Anton Deguet 5 | # Created on: 2010-01-20 6 | # 7 | # (C) Copyright 2010 Johns Hopkins University (JHU), All Rights 8 | # Reserved. 9 | # 10 | # --- begin cisst license - do not edit --- 11 | # 12 | # This software is provided "as is" under an open source license, with 13 | # no warranty. The complete license can be found in license.txt and 14 | # http://www.cisst.org/cisst/license.txt. 15 | # 16 | # --- end cisst license --- 17 | # 18 | # File based on FindNUMARRAY distributed with ITK 3.4 (see itk.org) 19 | # 20 | # Main modifications: 21 | # - use Numpy instead of Numarray for all naming 22 | # - added path for Python 2.5 and 2.6 23 | # - renamed python script generated (det_npp became determineNumpyPath) 24 | # - use lower case for CMake commands and keywords 25 | # - updated python script to use get_include, not get_numpy_include which is now deprecated 26 | # 27 | # --- 28 | # 29 | # Try to find numpy python package 30 | # Once done this will define 31 | # 32 | # PYTHON_NUMPY_FOUND - system has numpy development package and it should be used 33 | # PYTHON_NUMPY_INCLUDE_DIR - directory where the arrayobject.h header file can be found 34 | # 35 | # 36 | 37 | if (PYTHON_NUMPY_INCLUDE_DIR) 38 | return() 39 | endif() 40 | 41 | set(PYTHON_EXECUTABLE "python") 42 | if(PYTHON_EXECUTABLE) 43 | file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/determineNumpyPath.py "try: import numpy; print numpy.get_include()\nexcept: pass\n") 44 | exec_program("${PYTHON_EXECUTABLE}" 45 | ARGS "\"${CMAKE_CURRENT_BINARY_DIR}/determineNumpyPath.py\"" 46 | OUTPUT_VARIABLE NUMPY_PATH 47 | ) 48 | endif(PYTHON_EXECUTABLE) 49 | 50 | find_path(_PYTHON_NUMPY_INCLUDE_DIR arrayobject.h 51 | "${NUMPY_PATH}/numpy/" 52 | "${PYTHON_INCLUDE_PATH}/numpy/" 53 | /usr/include/python2.6/numpy/ 54 | /usr/include/python2.5/numpy/ 55 | /usr/include/python2.4/numpy/ 56 | /usr/include/python2.3/numpy/ 57 | DOC "Directory where the arrayobject.h header file can be found. This file is part of the numpy package" 58 | ) 59 | 60 | if(_PYTHON_NUMPY_INCLUDE_DIR) 61 | set(PYTHON_NUMPY_FOUND 1 CACHE INTERNAL "Python numpy development package is available") 62 | get_filename_component(PYTHON_NUMPY_INCLUDE_DIR ${_PYTHON_NUMPY_INCLUDE_DIR} PATH) 63 | endif(_PYTHON_NUMPY_INCLUDE_DIR) 64 | -------------------------------------------------------------------------------- /fastrapp/boost-python.cmake: -------------------------------------------------------------------------------- 1 | function(boost_python_module NAME) 2 | find_package(Boost COMPONENTS python REQUIRED) 3 | find_package(PythonLibs REQUIRED) 4 | find_package(Numpy) 5 | 6 | 7 | 8 | set(DEP_LIBS 9 | ${Boost_PYTHON_LIBRARY} 10 | ${PYTHON_LIBRARIES} 11 | ) 12 | #these are required includes for every ecto module 13 | include_directories( 14 | ${PYTHON_INCLUDE_PATH} 15 | ${Boost_INCLUDE_DIRS} 16 | ) 17 | add_library(${NAME} SHARED 18 | ${ARGN} 19 | ) 20 | set_target_properties(${NAME} 21 | PROPERTIES 22 | OUTPUT_NAME ${NAME} 23 | COMPILE_FLAGS "${FASTIDIOUS_FLAGS}" 24 | LINK_FLAGS -dynamic 25 | PREFIX "" 26 | ) 27 | if( WIN32 ) 28 | set_target_properties(${NAME} PROPERTIES SUFFIX ".pyd") 29 | elseif( APPLE OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") 30 | # on mac osx, python cannot import libraries with .dylib extension 31 | set_target_properties(${NAME} PROPERTIES SUFFIX ".so") 32 | endif() 33 | target_link_libraries(${NAME} 34 | ${DEP_LIBS} 35 | ) 36 | endfunction() 37 | -------------------------------------------------------------------------------- /fastrapp/config.cpp: -------------------------------------------------------------------------------- 1 | #include "config.hpp" 2 | using namespace std; 3 | 4 | namespace util { 5 | 6 | void CommandParser::read(int argc, char* argv[]) { 7 | // create boost options_description based on variables, parser 8 | po::options_description od; 9 | od.add_options()("help,h", "produce help message"); 10 | for (int i=0; i < configs.size(); ++i) { 11 | for (int j = 0; j < configs[i].params.size(); ++j) { 12 | configs[i].params[j]->addToBoost(od); 13 | } 14 | } 15 | po::variables_map vm; 16 | po::store(po::command_line_parser(argc, argv) 17 | .options(od) 18 | .run() 19 | , vm); 20 | if (vm.count("help")) { 21 | std::cout << "usage: " << argv[0] << " [options]" << std::endl; 22 | std::cout << od << std::endl; 23 | exit(0); 24 | } 25 | po::notify(vm); 26 | } 27 | 28 | 29 | } 30 | -------------------------------------------------------------------------------- /fastrapp/config.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "stl_to_string.hpp" 9 | 10 | namespace util { 11 | 12 | 13 | namespace po = boost::program_options; 14 | 15 | struct ParameterBase { 16 | std::string m_name; 17 | std::string m_desc; 18 | ParameterBase(const std::string& name, const std::string& desc) : m_name(name), m_desc(desc) {} 19 | virtual void addToBoost(po::options_description&) = 0; 20 | virtual ~ParameterBase() {} 21 | }; 22 | typedef boost::shared_ptr ParameterBasePtr; 23 | 24 | template 25 | struct ParameterVec : ParameterBase { 26 | std::vector* m_value; 27 | ParameterVec(std::string name, std::vector* value, std::string desc) : 28 | ParameterBase(name, desc), 29 | m_value(value) {} 30 | void addToBoost(po::options_description& od) { 31 | od.add_options()(m_name.c_str(), po::value(m_value)->default_value(*m_value, Str(*m_value))->multitoken(), m_desc.c_str()); 32 | } 33 | }; 34 | 35 | template 36 | struct Parameter : ParameterBase { 37 | T* m_value; 38 | Parameter(std::string name, T* value, std::string desc) : 39 | ParameterBase(name, desc), 40 | m_value(value) {} 41 | void addToBoost(po::options_description& od) { 42 | od.add_options()(m_name.c_str(), po::value(m_value)->default_value(*m_value, Str(*m_value)), m_desc.c_str()); 43 | } 44 | }; 45 | 46 | struct Config { 47 | std::vector< ParameterBasePtr > params; 48 | void add(ParameterBase* param) {params.push_back(ParameterBasePtr(param));} 49 | }; 50 | 51 | struct CommandParser { 52 | std::vector configs; 53 | void addGroup(const Config& config) { 54 | configs.push_back(config); 55 | } 56 | CommandParser(const Config& config) { 57 | addGroup(config); 58 | } 59 | void read(int argc, char* argv[]); 60 | }; 61 | 62 | } 63 | -------------------------------------------------------------------------------- /fastrapp/fastrapp.cpp: -------------------------------------------------------------------------------- 1 | #include "numpy_utils.hpp" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | using namespace Eigen; 14 | using namespace std; 15 | namespace py = boost::python; 16 | 17 | 18 | ///////////////// 19 | 20 | 21 | 22 | 23 | // http://en.wikipedia.org/wiki/Axis_angle#Log_map_from_SO.283.29_to_so.283.29 24 | Vector3d LogMap(const Matrix3d& m) { 25 | double cosarg = (m.trace() - 1)/2; 26 | cosarg = fmin(cosarg, 1); 27 | cosarg = fmax(cosarg, -1); 28 | double theta = acos( cosarg ); 29 | if (theta==0) return Vector3d::Zero(); 30 | else return theta*(1/(2*sin(theta))) * Vector3d(m(2,1) - m(1,2), m(0,2)-m(2,0), m(1,0)-m(0,1)); 31 | } 32 | 33 | double RotReg(const Matrix3d& b, const Vector3d& rot_coeffs, double scale_coeff) { 34 | // regularize rotation using polar decomposition 35 | JacobiSVD svd(b.transpose(), ComputeFullU | ComputeFullV); 36 | Vector3d s = svd.singularValues(); 37 | if (b.determinant() <= 0) return INFINITY; 38 | return LogMap(svd.matrixU() * svd.matrixV().transpose()).cwiseAbs().dot(rot_coeffs) + s.array().log().square().sum()*scale_coeff; 39 | } 40 | 41 | Matrix3d RotRegGrad(const Matrix3d& b, const Vector3d& rot_coeffs, double scale_coeff) { 42 | Matrix3d out; 43 | double y0 = RotReg(b, rot_coeffs, scale_coeff); 44 | Matrix3d xpert = b; 45 | double epsilon = 1e-5; 46 | for (int i=0; i < 3; ++i) { 47 | for (int j=0; j < 3; ++j) { 48 | xpert(i,j) = b(i,j) + epsilon; 49 | out(i,j) = (RotReg(xpert, rot_coeffs, scale_coeff) - y0)/epsilon; 50 | xpert(i,j) = b(i,j); 51 | } 52 | } 53 | return out; 54 | } 55 | 56 | Vector3d gRotCoeffs; 57 | double gScaleCoeff; 58 | 59 | void PySetCoeffs(py::object rot_coeffs, py::object scale_coeff) { 60 | gRotCoeffs = Vector3d(py::extract(rot_coeffs[0]), py::extract(rot_coeffs[1]), py::extract(rot_coeffs[2])); 61 | gScaleCoeff = py::extract(scale_coeff); 62 | } 63 | 64 | 65 | double PyRotReg(const py::object& m ){ 66 | const double* data = getPointer(m); 67 | return RotReg( Map< const Matrix >(data), gRotCoeffs, gScaleCoeff); 68 | } 69 | 70 | py::object PyRotRegGrad(const py::object& m) { 71 | static py::object np_mod = py::import("numpy"); 72 | py::object out = np_mod.attr("empty")(py::make_tuple(3,3)); 73 | const double* data = getPointer(m); 74 | Matrix g = RotRegGrad( Map< const Matrix >(data), gRotCoeffs, gScaleCoeff); 75 | memcpy(getPointer(out), g.data(), sizeof(double)*9); 76 | return out; 77 | } 78 | 79 | 80 | 81 | //////////////// 82 | 83 | void Interp(float x0, float x1, const VectorXf& y0, const VectorXf& y1, const VectorXf& newxs, MatrixXf& newys) { 84 | float dx = x1 - x0; 85 | for (int i=0; i < newxs.size(); ++i) { 86 | newys.row(i) = ((x1 - newxs[i])/dx) * y0 + ((newxs[i] - x0)/dx) * y1; 87 | } 88 | } 89 | 90 | vector Resample(const MatrixXf& x, const VectorXf& _t, float max_err, float max_dx, float max_dt) { 91 | int N = x.rows(); // number of timesteps 92 | VectorXf t; 93 | if (_t.size() == 0){ 94 | t.resize(N); 95 | for (int i=0; i < N; ++i) t[i] = i; 96 | } 97 | else t=_t; 98 | VectorXi cost(N); // shortest path cost 99 | VectorXi pred(N); // shortest path predecessor 100 | 101 | MatrixXf q(20, x.cols()); // scratch space for interpolation 102 | q.setConstant(-666); 103 | 104 | const int NOPRED = -666; 105 | const int BIGINT = 999999; 106 | 107 | pred.setConstant(NOPRED); 108 | cost.setConstant(BIGINT); 109 | cost(0) = 0; 110 | pred(0) = NOPRED; 111 | for (int iSrc = 0; iSrc < N; ++iSrc) { 112 | for (int iTarg = iSrc+1; iTarg < N; ++iTarg) { 113 | float dx = (x.row(iTarg) - x.row(iSrc)).maxCoeff(); 114 | float dt = t(iTarg) - t(iSrc); 115 | int seglen = iTarg - iSrc + 1; 116 | if (q.rows() < seglen) q.resize(2*q.rows(), q.cols()); 117 | Interp(t(iSrc), t(iTarg), x.row(iSrc), x.row(iTarg), t.middleRows(iSrc, seglen), q); 118 | float err = (q.block(0,0,seglen, x.cols()) - x.middleRows(iSrc, seglen)).cwiseAbs().maxCoeff(); 119 | if ((dx <= max_dx) && (dt <= max_dt || iTarg == iSrc+1) && (err <= max_err)) { 120 | int newcost = cost(iSrc) + 1; 121 | if (newcost < cost(iTarg)) { 122 | cost(iTarg) = newcost; 123 | pred(iTarg) = iSrc; 124 | } 125 | } 126 | else break; 127 | } 128 | } 129 | 130 | int i=N-1; 131 | vector revpath; 132 | while (i > 0) { 133 | revpath.push_back(i); 134 | i = pred(i); 135 | } 136 | revpath.push_back(0); 137 | std::reverse(revpath.begin(), revpath.end()); 138 | return revpath; 139 | } 140 | 141 | py::object pyResample(py::object x, py::object t, float max_err, float max_dx, float max_dt) { 142 | x = np_mod.attr("array")(x, "float32"); 143 | int xdim0 = py::extract(x.attr("shape")[0]); 144 | int xdim1 = py::extract(x.attr("shape")[1]); 145 | float* xdata = getPointer(x); 146 | t = np_mod.attr("array")(t, "float32"); 147 | int tdim0 = py::extract(t.attr("__len__")()); 148 | float* tdata = getPointer(t); 149 | vector inds = Resample(Map(xdata, xdim0, xdim1), Map(tdata, tdim0), max_err, max_dx, max_dt); 150 | return toNdarray1(inds.data(), inds.size()); 151 | } 152 | //BOOST_PYTHON_FUNCTION_OVERLOADS(resample_overloads, pyResample, 3, 5); 153 | 154 | /////////////////// 155 | 156 | 157 | BOOST_PYTHON_MODULE(fastrapp) { 158 | 159 | np_mod = py::import("numpy"); 160 | 161 | py::def("resample", &pyResample, (py::arg("x"), py::arg("t"), py::arg("max_err"), py::arg("max_dx")=INFINITY, py::arg("max_dt")=INFINITY)); 162 | 163 | py::def("set_coeffs", &PySetCoeffs, (py::arg("rot_coeffs"), py::arg("scale_coeff"))); 164 | py::def("rot_reg", &PyRotReg, (py::arg("B"))); 165 | py::def("rot_reg_grad", &PyRotRegGrad, (py::arg("B"))); 166 | 167 | 168 | } 169 | -------------------------------------------------------------------------------- /fastrapp/main.cmake: -------------------------------------------------------------------------------- 1 | list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/modules) 2 | include(${CMAKE_SOURCE_DIR}/cmake/boost-python.cmake) -------------------------------------------------------------------------------- /fastrapp/numpy_utils.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | namespace py = boost::python; 5 | 6 | py::object np_mod; 7 | 8 | py::list toPyList(const std::vector& x) { 9 | py::list out; 10 | for (int i=0; i < x.size(); ++i) out.append(x[i]); 11 | return out; 12 | } 13 | 14 | template 15 | struct type_traits { 16 | static const char* npname; 17 | }; 18 | template<> const char* type_traits::npname = "float32"; 19 | template<> const char* type_traits::npname = "int32"; 20 | template<> const char* type_traits::npname = "float64"; 21 | template<> const char* type_traits::npname = "uint8"; 22 | 23 | template 24 | T* getPointer(const py::object& arr) { 25 | long int i = py::extract(arr.attr("ctypes").attr("data")); 26 | T* p = (T*)i; 27 | return p; 28 | } 29 | 30 | template 31 | py::object toNdarray1(const T* data, size_t dim0) { 32 | py::object out = np_mod.attr("empty")(py::make_tuple(dim0), type_traits::npname); 33 | T* p = getPointer(out); 34 | memcpy(p, data, dim0*sizeof(T)); 35 | return out; 36 | } 37 | template 38 | py::object toNdarray2(const T* data, size_t dim0, size_t dim1) { 39 | py::object out = np_mod.attr("empty")(py::make_tuple(dim0, dim1), type_traits::npname); 40 | T* pout = getPointer(out); 41 | memcpy(pout, data, dim0*dim1*sizeof(T)); 42 | return out; 43 | } 44 | template 45 | py::object toNdarray3(const T* data, size_t dim0, size_t dim1, size_t dim2) { 46 | py::object out = np_mod.attr("empty")(py::make_tuple(dim0, dim1, dim2), type_traits::npname); 47 | T* pout = getPointer(out); 48 | memcpy(pout, data, dim0*dim1*dim2*sizeof(T)); 49 | return out; 50 | } -------------------------------------------------------------------------------- /fastrapp/record_rgbd_video.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #define Header Header1 //holy shit this actually works 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "config.hpp" 11 | using namespace std; 12 | namespace fs = boost::filesystem; 13 | using namespace util; 14 | 15 | // parameters 16 | int downsample = 3; 17 | bool wait_for_prompt = false; 18 | bool verbose = true; 19 | //// 20 | 21 | bool frame_requested = false; // only relevant when wait_for_prompt = true; 22 | 23 | vector stamps; 24 | int sizes[2] = {480, 640}; 25 | cv::Mat rgb_mat(2, sizes, CV_8UC3); 26 | cv::Mat depth_mat(2, sizes,CV_16UC1); 27 | 28 | fs::path rgbd_dir; 29 | 30 | void callback (const boost::shared_ptr& rgb, const boost::shared_ptr& depth, float constant) { 31 | static int total_counter=0, save_counter=0; 32 | if ( (wait_for_prompt && frame_requested) || (!wait_for_prompt && (total_counter % downsample == 0)) ) { 33 | stamps.push_back(ros::Time::now().toSec()); 34 | const XnDepthPixel* depth_data = depth->getDepthMetaData().Data(); //unsigned short 35 | 36 | rgb->fillRGB(640, 480, rgb_mat.data, 640*3); 37 | for (int i=0; i < 480; ++i) { 38 | for (int j=0; j < 640; ++j) { 39 | cv::Vec3b& pix = rgb_mat.at(i,j); 40 | swap(pix[0], pix[2]); 41 | } 42 | } 43 | 44 | 45 | depth->fillDepthImageRaw(640, 480, (unsigned short*) depth_mat.data); 46 | 47 | bool success1 = cv::imwrite( (rgbd_dir / (boost::format("depth%.2i.png")%save_counter).str()).string(), depth_mat); 48 | bool success2 = cv::imwrite( (rgbd_dir / (boost::format("rgb%.2i.jpg")%save_counter).str()).string(), rgb_mat); 49 | if (!success1 || !success2) throw std::runtime_error("failed to write image"); 50 | if (verbose) printf("saved rgb/depth images %i\n", save_counter); 51 | 52 | 53 | save_counter++; 54 | 55 | frame_requested = false; 56 | } 57 | 58 | total_counter++; 59 | 60 | } 61 | 62 | 63 | 64 | 65 | int main(int argc, char** argv) { 66 | 67 | Config config; 68 | config.add(new Parameter("downsample", &downsample, "ratio to downsample by")); 69 | config.add(new Parameter("on_prompt", &wait_for_prompt, "only record image after user presses enter")); 70 | config.add(new Parameter("verbose", &verbose, "verbose")); 71 | string outdir; 72 | config.add(new Parameter("out", &outdir, "output directory")); 73 | CommandParser(config).read(argc, argv); 74 | 75 | 76 | 77 | ros::init(argc, argv, "record_rgbd_video"); 78 | ros::NodeHandle nh; 79 | bool time_ok = ros::Time::waitForValid(ros::WallDuration(1)); 80 | pcl::OpenNIGrabber interface; 81 | // connect callback function for desired signal. In this case its a point cloud with color values 82 | boost::function&, const boost::shared_ptr&, float)> f(&callback); 83 | boost::signals2::connection c = interface.registerCallback (f); 84 | 85 | 86 | if (outdir.empty()) rgbd_dir = fs::unique_path("rgbd_%%%%%%%"); 87 | else rgbd_dir = outdir; 88 | 89 | create_directories(rgbd_dir); 90 | // start receiving point clouds 91 | printf("saving data to %s\n", rgbd_dir.string().c_str()); 92 | 93 | cv::Size size(640, 480); 94 | 95 | interface.start(); 96 | 97 | if (wait_for_prompt) { 98 | while (ros::ok()) { 99 | printf("press enter to acquire next frame\n"); 100 | cin.get(); 101 | frame_requested = true; 102 | } 103 | } 104 | else { 105 | printf("press ctrl-c to stop\n"); 106 | while (ros::ok()) usleep(1e6 * .01); 107 | } 108 | 109 | interface.stop(); 110 | 111 | ofstream stampfile((rgbd_dir / "stamps.txt").string().c_str()); 112 | stampfile << setprecision(20); 113 | for (int i=0; i < stamps.size(); ++i) { 114 | stampfile << stamps[i] << endl; 115 | } 116 | stampfile.close(); 117 | 118 | printf("rgbd video: done\n"); 119 | } 120 | -------------------------------------------------------------------------------- /fastrapp/stl_to_string.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | namespace util { 9 | 10 | using std::string; 11 | using std::vector; 12 | 13 | //std::string Str(const vector& x); 14 | //std::string Str(const vector& x); 15 | //std::string Str(const vector& x); 16 | 17 | template 18 | std::string Str(const vector& x) { 19 | std::stringstream ss; 20 | ss << "("; 21 | if (x.size() > 0) ss << x[0]; 22 | for(size_t i = 1; i < x.size(); ++i) 23 | ss << ", " << x[i]; 24 | ss << ")"; 25 | return ss.str(); 26 | } 27 | 28 | 29 | template 30 | std::string Str(const std::set& x) { 31 | std::stringstream ss; 32 | ss << "{"; 33 | typename std::set::const_iterator it = x.begin(); 34 | if (x.size() > 0) { 35 | ss << *it; 36 | ++it; 37 | for( ; it != x.end(); ++it) 38 | ss << ", " << *it; 39 | } 40 | ss << "}"; 41 | return ss.str(); 42 | } 43 | 44 | template 45 | std::string Str(const T& x) { 46 | std::stringstream ss; 47 | ss << x; 48 | return ss.str(); 49 | } 50 | #define CSTR(x) util::Str(x).c_str() 51 | 52 | 53 | template 54 | std::string Str(const typename std::map& x) { 55 | std::stringstream ss; 56 | ss << "{"; 57 | typename std::map::const_iterator it = x.begin(); 58 | if (x.size() > 0) { 59 | ss << Str(it->first) << " : " << Str(it->second); 60 | ++it; 61 | for( ; it != x.end(); ++it) 62 | ss << ", " << Str(it->first) << " : " << Str(it->second); 63 | } 64 | ss << "}"; 65 | return ss.str(); 66 | } 67 | 68 | 69 | 70 | 71 | } 72 | 73 | -------------------------------------------------------------------------------- /notes/picloud_setup.txt: -------------------------------------------------------------------------------- 1 | 2 | sudo echo "deb http://ppa.launchpad.net/openrave/testing/ubuntu precise main" > openrave-testing.list 3 | sudo echo "deb-src http://ppa.launchpad.net/openrave/testing/ubuntu precise main" >> openrave-testing.list 4 | sudo mv openrave-testing.list /etc/apt/sources.list.d 5 | 6 | echo "deb http://ppa.launchpad.net/v-launchpad-jochen-sprickerhof-de/pcl/ubuntu precise main" > pcl.list 7 | echo "deb-src http://ppa.launchpad.net/v-launchpad-jochen-sprickerhof-de/pcl/ubuntu precise main" >> pcl.list 8 | sudo mv pcl.list /etc/apt/sources.list.d 9 | 10 | sudo apt-get update 11 | 12 | 13 | 14 | sudo apt-get install openrave0.9-dp 15 | sudo apt-get install openrave0.9-dp-base 16 | sudo apt-get install openrave0.9-dp-dev 17 | sudo apt-get install libopenscenegraph-dev cmake libboost-all-dev libeigen3-dev python-numpy 18 | sudo apt-get install libpcl-all-dev 19 | sudo apt-get install emacs 20 | sudo apt-get install python-networkx python-joblib 21 | 22 | 23 | cd /usr/lib/python2.7/dist-packages/openravepy 24 | sudo ln -s _openravepy_0_9 _openravepy_ 25 | cd ~ 26 | 27 | git clone https://github.com/joschu/trajopt.git 28 | git clone https://github.com/hojonathanho/bulletsim.git 29 | git clone https://github.com/joschu/rapprentice.git 30 | 31 | mkdir -p ~/build 32 | mkdir -p ~/build/trajopt 33 | mkdir -p ~/build/bulletsim 34 | 35 | cd ~/build/trajopt 36 | cmake ~/trajopt -DBUILD_CLOUDPROC=ON 37 | make -j 38 | 39 | 40 | cd ~/bulletsim 41 | git checkout lite 42 | cd ~/build/bulletsim 43 | cmake ~/bulletsim 44 | make -j 45 | 46 | cd ~/build/rapprentice 47 | cmake ~/rapprentice/fastrapp 48 | make -j 49 | 50 | echo "export PYTHONPATH=~/trajopt:~/build/trajopt/lib:~/bulletsim:~/build/bulletsim/lib:~/rapprentice:~/build/rapprentice/lib:$PYTHONPATH" >> .bashrc -------------------------------------------------------------------------------- /notes/todo.txt: -------------------------------------------------------------------------------- 1 | - check that ros master is not localhost for recording 2 | 3 | - allow user to separate raw data from yaml files and hdf5 4 | - make more unit tests for registration 5 | - update do_task.py with new scheme -------------------------------------------------------------------------------- /pr2/base_trajectory_controller.py: -------------------------------------------------------------------------------- 1 | from brett2.PR2 import PR2 2 | #roslib.load_manifest("nav_msgs"); import nav_msgs.msg as nm 3 | import trajectory_msgs.msg as tm 4 | import numpy as np 5 | from numpy import sin, cos 6 | import rospy 7 | import scipy.interpolate as si 8 | from Queue import Queue, Empty 9 | from threading import Thread 10 | import jds_utils.conversions as conv 11 | import kinematics.kinematics_utils as ku 12 | from time import time, sleep 13 | 14 | class Spline2D(object): 15 | def __init__(self,x,y): 16 | self.Fs = [si.InterpolatedUnivariateSpline(x, ycol) for ycol in y.T] 17 | def __call__(self, x,nu=0): 18 | return np.array([F(x,nu=nu) for F in self.Fs]).T 19 | 20 | class TrajectoryController: 21 | def __init__(self): 22 | self.brett = PR2() 23 | self.sub = rospy.Subscriber("base_traj_controller/command", tm.JointTrajectory, self.callback) 24 | self.q = Queue() 25 | self.F = None 26 | self.stop_requested = False 27 | self.ctrl_loop_running = False 28 | def callback(self, msg): 29 | joints = [] 30 | ts = [] 31 | for jtp in msg.points: 32 | joints.append((jtp.positions[0], jtp.positions[1], jtp.positions[2])) 33 | ts.append(jtp.time_from_start.to_sec()) 34 | self.q.put( (np.array(joints),np.array(ts)) ) 35 | self.msg = msg 36 | 37 | def listen_loop(self): 38 | while not rospy.is_shutdown(): 39 | try: 40 | joints, ts = self.q.get(timeout=.01) 41 | if self.ctrl_loop_running: self.stop_requested = True 42 | while self.ctrl_loop_running: sleep(.001) 43 | ctrl_thread = Thread(target = self.control_loop, args=(joints, ts)) 44 | ctrl_thread.start() 45 | 46 | except Empty: 47 | pass 48 | def control_loop(self,joints, ts): 49 | raise 50 | 51 | class BaseTrajectoryController(TrajectoryController): 52 | def control_loop(self,joints,ts): 53 | print "running control loop with new trajectory" 54 | 55 | 56 | F = Spline2D(ts, joints) 57 | 58 | t_start = time() 59 | duration = ts[-1] 60 | 61 | prev_err = None 62 | prev_time = None 63 | 64 | kp = 1 65 | kd = .1 66 | 67 | use_relative = False 68 | frame_id = self.msg.header.frame_id 69 | if "base" in frame_id: 70 | use_relative = True 71 | pos_start = self.brett.base.get_pose("odom_combined") 72 | elif "odom_combined" in frame_id or "map" in frame_id: 73 | pass 74 | else: 75 | raise Exception("invalid frame %s for base traj"%frame_id) 76 | 77 | while True: 78 | 79 | if rospy.is_shutdown(): 80 | return 81 | if self.stop_requested: 82 | self.ctrl_loop_running = False 83 | rospy.loginfo("stop requested--leaving control loop") 84 | return 85 | 86 | t_elapsed = time() - t_start 87 | if t_elapsed > duration+5: 88 | rospy.loginfo("time elapsed (+1sec)--leaving control loop") 89 | return 90 | 91 | else: 92 | if use_relative: 93 | # invert transform from orig position 94 | pos_cur = self.brett.base.get_pose("odom_combined") 95 | pos_cur -= pos_start 96 | a = pos_start[2] 97 | pos_cur[:2] = np.array([[cos(a), sin(a)],[-sin(a), cos(a)]]).dot(pos_cur[:2]) 98 | else: 99 | pos_cur = self.brett.base.get_pose("odom_combined") 100 | 101 | if t_elapsed > duration: pos_targ = joints[-1] 102 | else: pos_targ = F(t_elapsed, nu = 0) 103 | 104 | 105 | pos_targ[2] = ku.closer_ang(pos_targ[2], pos_cur[2]) 106 | err = (pos_targ - pos_cur) 107 | 108 | 109 | 110 | twist = kp*err 111 | if prev_err is not None: twist += kd*(err - prev_err)/(t_elapsed - prev_time) 112 | prev_err = err 113 | prev_time = t_elapsed 114 | 115 | a = pos_cur[2] 116 | twist[0:2] = np.dot( 117 | np.array([[np.cos(a), np.sin(a)], 118 | [-np.sin(a), np.cos(a)]]) , 119 | twist[0:2]) 120 | 121 | self.brett.base.set_twist(twist) 122 | pos_prev = pos_cur 123 | sleep(.01) 124 | 125 | 126 | if __name__ == "__main__": 127 | import rospy 128 | rospy.init_node("base_traj_controller", disable_signals = True) 129 | controller = BaseTrajectoryController() 130 | controller.listen_loop() -------------------------------------------------------------------------------- /rapprentice/PR2.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np, os.path as osp 3 | import openravepy as rave 4 | from numpy import inf, zeros, dot, r_, pi 5 | from numpy.linalg import norm, inv 6 | from threading import Thread 7 | 8 | from rapprentice import retiming, math_utils as mu,conversions as conv, func_utils, resampling 9 | 10 | import roslib 11 | roslib.load_manifest("pr2_controllers_msgs") 12 | roslib.load_manifest("move_base_msgs") 13 | import pr2_controllers_msgs.msg as pcm 14 | import trajectory_msgs.msg as tm 15 | import sensor_msgs.msg as sm 16 | import actionlib 17 | import rospy 18 | import geometry_msgs.msg as gm 19 | import move_base_msgs.msg as mbm 20 | 21 | VEL_RATIO = .2 22 | ACC_RATIO = .3 23 | 24 | class IKFail(Exception): 25 | pass 26 | 27 | class TopicListener(object): 28 | "stores last message" 29 | last_msg = None 30 | def __init__(self,topic_name,msg_type): 31 | self.sub = rospy.Subscriber(topic_name,msg_type,self.callback) 32 | 33 | rospy.loginfo('waiting for the first message: %s'%topic_name) 34 | while self.last_msg is None: rospy.sleep(.01) 35 | rospy.loginfo('ok: %s'%topic_name) 36 | 37 | def callback(self,msg): 38 | self.last_msg = msg 39 | 40 | 41 | class JustWaitThread(Thread): 42 | 43 | def __init__(self, duration): 44 | Thread.__init__(self) 45 | self.wants_exit = False 46 | self.duration = duration 47 | 48 | def run(self): 49 | t_done = rospy.Time.now() + rospy.Duration(self.duration) 50 | while not self.wants_exit and rospy.Time.now() < t_done and not rospy.is_shutdown(): 51 | rospy.sleep(.01) 52 | 53 | 54 | class GripperTrajectoryThread(Thread): 55 | 56 | def __init__(self, gripper, times, angles): 57 | Thread.__init__(self) 58 | self.wants_exit = False 59 | self.gripper = gripper 60 | self.times = times 61 | self.angles = angles 62 | 63 | def run(self): 64 | t_start = rospy.Time.now() 65 | self.gripper.set_angle_target(self.angles[0]) 66 | for i in xrange(1,len(self.times)): 67 | if rospy.is_shutdown() or self.wants_exit: break 68 | duration_elapsed = rospy.Time.now() - t_start 69 | self.gripper.set_angle_target(self.angles[i]) 70 | rospy.sleep(rospy.Duration(self.times[i]) - duration_elapsed) 71 | 72 | 73 | 74 | class PR2(object): 75 | 76 | pending_threads = [] 77 | wait = True # deprecated way of blocking / not blocking 78 | 79 | @func_utils.once 80 | def create(rave_only=False): 81 | return PR2(rave_only) 82 | 83 | def __init__(self): 84 | 85 | # set up openrave 86 | self.env = rave.Environment() 87 | self.env.StopSimulation() 88 | self.env.Load("robots/pr2-beta-static.zae") # todo: use up-to-date urdf 89 | self.robot = self.env.GetRobots()[0] 90 | 91 | self.joint_listener = TopicListener("/joint_states", sm.JointState) 92 | 93 | # rave to ros conversions 94 | joint_msg = self.get_last_joint_message() 95 | ros_names = joint_msg.name 96 | inds_ros2rave = np.array([self.robot.GetJointIndex(name) for name in ros_names]) 97 | self.good_ros_inds = np.flatnonzero(inds_ros2rave != -1) # ros joints inds with matching rave joint 98 | self.rave_inds = inds_ros2rave[self.good_ros_inds] # openrave indices corresponding to those joints 99 | self.update_rave() 100 | 101 | self.larm = Arm(self, "l") 102 | self.rarm = Arm(self, "r") 103 | self.lgrip = Gripper(self, "l") 104 | self.rgrip = Gripper(self, "r") 105 | self.head = Head(self) 106 | self.torso = Torso(self) 107 | self.base = Base(self) 108 | 109 | 110 | rospy.on_shutdown(self.stop_all) 111 | 112 | def _set_rave_limits_to_soft_joint_limits(self): 113 | # make the joint limits match the PR2 soft limits 114 | low_limits, high_limits = self.robot.GetDOFLimits() 115 | rarm_low_limits = [-2.1353981634, -0.3536, -3.75, -2.1213, None, -2.0, None] 116 | rarm_high_limits = [0.564601836603, 1.2963, 0.65, -0.15, None, -0.1, None] 117 | for rarm_index, low, high in zip(self.robot.GetManipulator("rightarm").GetArmIndices(), rarm_low_limits, rarm_high_limits): 118 | if low is not None and high is not None: 119 | low_limits[rarm_index] = low 120 | high_limits[rarm_index] = high 121 | larm_low_limits = [-0.564601836603, -0.3536, -0.65, -2.1213, None, -2.0, None] 122 | larm_high_limits = [2.1353981634, 1.2963, 3.75, -0.15, None, -0.1, None] 123 | for larm_index, low, high in zip(self.robot.GetManipulator("leftarm").GetArmIndices(), larm_low_limits, larm_high_limits): 124 | if low is not None and high is not None: 125 | low_limits[larm_index] = low 126 | high_limits[larm_index] = high 127 | self.robot.SetDOFLimits(low_limits, high_limits) 128 | 129 | 130 | def start_thread(self, thread): 131 | self.pending_threads.append(thread) 132 | thread.start() 133 | 134 | def get_last_joint_message(self): 135 | return self.joint_listener.last_msg 136 | 137 | def update_rave(self): 138 | ros_values = self.get_last_joint_message().position 139 | rave_values = [ros_values[i_ros] for i_ros in self.good_ros_inds] 140 | self.robot.SetJointValues(rave_values[:20],self.rave_inds[:20]) 141 | self.robot.SetJointValues(rave_values[20:],self.rave_inds[20:]) 142 | 143 | # if use_map: 144 | # (trans,rot) = self.tf_listener.lookupTransform('/map', '/base_link', rospy.Time(0)) 145 | # self.robot.SetTransform(conv.trans_rot_to_hmat(trans, rot)) 146 | 147 | def update_rave_without_ros(self, joint_vals): 148 | self.robot.SetJointValues(joint_vals) 149 | 150 | def join_all(self): 151 | for thread in self.pending_threads: 152 | thread.join() 153 | self.pending_threads = [] 154 | def is_moving(self): 155 | return any(thread.is_alive() for thread in self.pending_threads) 156 | 157 | def stop_all(self): 158 | self.larm.stop() 159 | self.rarm.stop() 160 | self.head.stop() 161 | self.torso.stop() 162 | for thread in self.pending_threads: 163 | thread.wants_exit = True 164 | 165 | 166 | class TrajectoryControllerWrapper(object): 167 | 168 | def __init__(self, pr2, controller_name): 169 | self.pr2 = pr2 170 | self.controller_name = controller_name 171 | 172 | self.joint_names = rospy.get_param("/%s/joints"%controller_name) 173 | 174 | self.n_joints = len(self.joint_names) 175 | 176 | msg = self.pr2.get_last_joint_message() 177 | self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names] 178 | self.rave_joint_inds = [pr2.robot.GetJointIndex(name) for name in self.joint_names] 179 | 180 | self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory) 181 | 182 | all_vel_limits = self.pr2.robot.GetDOFVelocityLimits() 183 | self.vel_limits = np.array([all_vel_limits[i_rave]*VEL_RATIO for i_rave in self.rave_joint_inds]) 184 | all_acc_limits = self.pr2.robot.GetDOFVelocityLimits() 185 | self.acc_limits = np.array([all_acc_limits[i_rave]*ACC_RATIO for i_rave in self.rave_joint_inds]) 186 | 187 | def get_joint_positions(self): 188 | msg = self.pr2.get_last_joint_message() 189 | return np.array([msg.position[i] for i in self.ros_joint_inds]) 190 | 191 | 192 | def goto_joint_positions(self, positions_goal): 193 | 194 | positions_cur = self.get_joint_positions() 195 | assert len(positions_goal) == len(positions_cur) 196 | 197 | duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf) 198 | 199 | jt = tm.JointTrajectory() 200 | jt.joint_names = self.joint_names 201 | jt.header.stamp = rospy.Time.now() 202 | 203 | jtp = tm.JointTrajectoryPoint() 204 | jtp.positions = positions_goal 205 | jtp.velocities = zeros(len(positions_goal)) 206 | jtp.time_from_start = rospy.Duration(duration) 207 | 208 | jt.points = [jtp] 209 | self.controller_pub.publish(jt) 210 | 211 | rospy.loginfo("%s: starting %.2f sec traj", self.controller_name, duration) 212 | self.pr2.start_thread(JustWaitThread(duration)) 213 | 214 | 215 | def follow_joint_trajectory(self, traj): 216 | traj = np.r_[np.atleast_2d(self.get_joint_positions()), traj] 217 | for i in [2,4,6]: 218 | traj[:,i] = np.unwrap(traj[:,i]) 219 | 220 | times = retiming.retime_with_vel_limits(traj, self.vel_limits) 221 | times_up = np.arange(0,times[-1],.1) 222 | traj_up = mu.interp2d(times_up, times, traj) 223 | vels = resampling.get_velocities(traj_up, times_up, .001) 224 | self.follow_timed_joint_trajectory(traj_up, vels, times_up) 225 | 226 | 227 | def follow_timed_joint_trajectory(self, positions, velocities, times): 228 | 229 | jt = tm.JointTrajectory() 230 | jt.joint_names = self.joint_names 231 | jt.header.stamp = rospy.Time.now() 232 | 233 | for (position, velocity, time) in zip(positions, velocities, times): 234 | jtp = tm.JointTrajectoryPoint() 235 | jtp.positions = position 236 | jtp.velocities = velocity 237 | jtp.time_from_start = rospy.Duration(time) 238 | jt.points.append(jtp) 239 | 240 | self.controller_pub.publish(jt) 241 | rospy.loginfo("%s: starting %.2f sec traj", self.controller_name, times[-1]) 242 | self.pr2.start_thread(JustWaitThread(times[-1])) 243 | 244 | def stop(self): 245 | jt = tm.JointTrajectory() 246 | jt.joint_names = self.joint_names 247 | jt.header.stamp = rospy.Time.now() 248 | self.controller_pub.publish(jt) 249 | 250 | 251 | def mirror_arm_joints(x): 252 | "mirror image of joints (r->l or l->r)" 253 | return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]] 254 | 255 | 256 | def transform_relative_pose_for_ik(manip, matrix4, ref_frame, targ_frame): 257 | robot = manip.GetRobot() 258 | 259 | 260 | if ref_frame == "world": 261 | worldFromRef = np.eye(4) 262 | else: 263 | ref = robot.GetLink(ref_frame) 264 | worldFromRef = ref.GetTransform() 265 | 266 | if targ_frame == "end_effector": 267 | targFromEE = np.eye(4) 268 | else: 269 | targ = robot.GetLink(targ_frame) 270 | worldFromTarg = targ.GetTransform() 271 | worldFromEE = manip.GetEndEffectorTransform() 272 | targFromEE = dot(inv(worldFromTarg), worldFromEE) 273 | 274 | refFromTarg_new = matrix4 275 | worldFromEE_new = dot(dot(worldFromRef, refFromTarg_new), targFromEE) 276 | 277 | return worldFromEE_new 278 | 279 | def cart_to_joint(manip, matrix4, ref_frame, targ_frame, filter_options = 0): 280 | robot = manip.GetRobot() 281 | worldFromEE = transform_relative_pose_for_ik(manip, matrix4, ref_frame, targ_frame) 282 | joint_positions = manip.FindIKSolution(worldFromEE, filter_options) 283 | if joint_positions is None: return joint_positions 284 | current_joints = robot.GetDOFValues(manip.GetArmIndices()) 285 | joint_positions_unrolled = closer_joint_angles(joint_positions, current_joints) 286 | return joint_positions_unrolled 287 | 288 | 289 | 290 | class Arm(TrajectoryControllerWrapper): 291 | 292 | L_POSTURES = dict( 293 | untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0], 294 | tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09], 295 | up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27], 296 | side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074] 297 | ) 298 | 299 | def __init__(self, pr2, lr): 300 | TrajectoryControllerWrapper.__init__(self,pr2, "%s_arm_controller"%lr) 301 | self.lr = lr 302 | self.lrlong = {"r":"right", "l":"left"}[lr] 303 | self.tool_frame = "%s_gripper_tool_frame"%lr 304 | 305 | self.manip = pr2.robot.GetManipulator("%sarm"%self.lrlong) 306 | self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped) 307 | 308 | 309 | def goto_posture(self, name): 310 | l_joints = self.L_POSTURES[name] 311 | joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints) 312 | self.goto_joint_positions(joints) 313 | 314 | def goto_joint_positions(self, positions_goal): 315 | positions_cur = self.get_joint_positions() 316 | positions_goal = closer_joint_angles(positions_goal, positions_cur) 317 | TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal) 318 | 319 | 320 | def set_cart_target(self, quat, xyz, ref_frame): 321 | ps = gm.PoseStamped() 322 | ps.header.frame_id = ref_frame 323 | ps.header.stamp = rospy.Time(0) 324 | ps.pose.position = gm.Point(xyz[0],xyz[1],xyz[2]) 325 | ps.pose.orientation = gm.Quaternion(*quat) 326 | self.cart_command.publish(ps) 327 | 328 | def cart_to_joint(self, matrix4, ref_frame, targ_frame, filter_options = 0): 329 | self.pr2.update_rave() 330 | return cart_to_joint(self.manip, matrix4, ref_frame, targ_frame, filter_options) 331 | 332 | def goto_pose_matrix(self, matrix4, ref_frame, targ_frame, filter_options = 0): 333 | """ 334 | IKFO_CheckEnvCollisions = 1 335 | IKFO_IgnoreSelfCollisions = 2 336 | IKFO_IgnoreJointLimits = 4 337 | IKFO_IgnoreCustomFilters = 8 338 | IKFO_IgnoreEndEffectorCollisions = 16 339 | """ 340 | self.pr2.update_rave() 341 | joint_positions = cart_to_joint(self.manip, matrix4, ref_frame, targ_frame, filter_options) 342 | if joint_positions is not None: self.goto_joint_positions(joint_positions) 343 | else: raise IKFail 344 | 345 | def get_pose_matrix(self, ref_frame, targ_frame): 346 | self.pr2.update_rave() 347 | 348 | worldFromRef = self.pr2.robot.GetLink(ref_frame).GetTransform() 349 | worldFromTarg = self.pr2.robot.GetLink(targ_frame).GetTransform() 350 | refFromTarg = dot(inv(worldFromRef), worldFromTarg) 351 | 352 | return refFromTarg 353 | 354 | 355 | class Head(TrajectoryControllerWrapper): 356 | def __init__(self, pr2): 357 | TrajectoryControllerWrapper.__init__(self,pr2,"head_traj_controller") 358 | 359 | def set_pan_tilt(self, pan, tilt): 360 | self.goto_joint_positions([pan, tilt]) 361 | def look_at(self, xyz_target, reference_frame, camera_frame): 362 | self.pr2.update_rave() 363 | 364 | worldFromRef = self.pr2.robot.GetLink(reference_frame).GetTransform() 365 | worldFromCam = self.pr2.robot.GetLink(camera_frame).GetTransform() 366 | refFromCam = dot(inv(worldFromRef), worldFromCam) 367 | 368 | xyz_cam = refFromCam[:3,3] 369 | ax = xyz_target - xyz_cam # pointing axis 370 | pan = np.arctan(ax[1]/ax[0]) 371 | tilt = np.arcsin(-ax[2] / norm(ax)) 372 | self.set_pan_tilt(pan,tilt) 373 | 374 | 375 | class Torso(TrajectoryControllerWrapper): 376 | def __init__(self,pr2): 377 | TrajectoryControllerWrapper.__init__(self,pr2, "torso_controller") 378 | self.torso_client = actionlib.SimpleActionClient('torso_controller/position_joint_action', pcm.SingleJointPositionAction) 379 | self.torso_client.wait_for_server() 380 | 381 | def set_height(self, h): 382 | self.torso_client.send_goal(pcm.SingleJointPositionGoal(position = h)) 383 | self.torso_client.wait_for_result() # todo: actually check result 384 | # def goto_joint_positions(self, positions_goal): 385 | # self.set_height(positions_goal[0]) 386 | def go_up(self): 387 | self.set_height(.3) 388 | def go_down(self): 389 | self.set_height(.02) 390 | 391 | 392 | class Gripper(object): 393 | default_max_effort = 40 394 | def __init__(self,pr2,lr): 395 | assert isinstance(pr2, PR2) 396 | self.pr2 = pr2 397 | self.lr = lr 398 | self.controller_name = "%s_gripper_controller"%self.lr 399 | self.joint_names = [rospy.get_param("/%s/joint"%self.controller_name)] 400 | self.n_joints = len(self.joint_names) 401 | 402 | msg = self.pr2.get_last_joint_message() 403 | self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names] 404 | self.rave_joint_inds = [pr2.robot.GetJointIndex(name) for name in self.joint_names] 405 | 406 | self.controller_pub = rospy.Publisher("%s/command"%self.controller_name, pcm.Pr2GripperCommand) 407 | 408 | self.grip_client = actionlib.SimpleActionClient('%s_gripper_controller/gripper_action'%lr, pcm.Pr2GripperCommandAction) 409 | #self.grip_client.wait_for_server() 410 | self.vel_limits = [.033] 411 | self.acc_limits = [inf] 412 | 413 | self.diag_pub = rospy.Publisher("/%s_gripper_traj_diagnostic"%lr, tm.JointTrajectory) 414 | # XXX 415 | self.closed_angle = 0 416 | 417 | def set_angle(self, a, max_effort = default_max_effort): 418 | self.grip_client.send_goal(pcm.Pr2GripperCommandGoal(pcm.Pr2GripperCommand(position=a,max_effort=max_effort))) 419 | self.pr2.start_thread(Thread(target=self.grip_client.wait_for_result)) 420 | def open(self, max_effort=default_max_effort): 421 | self.set_angle(.08, max_effort = max_effort) 422 | def close(self,max_effort=default_max_effort): 423 | self.set_angle(-.01, max_effort = max_effort) 424 | def is_closed(self): # (and empty) 425 | return self.get_angle() <= self.closed_angle# + 0.001 #.0025 426 | def set_angle_target(self, position, max_effort = default_max_effort): 427 | self.controller_pub.publish(pcm.Pr2GripperCommand(position=position,max_effort=max_effort)) 428 | def follow_timed_trajectory(self, times, angs): 429 | times_up = np.arange(0,times[-1]+1e-4,.1) 430 | angs_up = np.interp(times_up,times,angs) 431 | 432 | 433 | jt = tm.JointTrajectory() 434 | jt.header.stamp = rospy.Time.now() 435 | jt.joint_names = ["%s_gripper_joint"%self.lr] 436 | for (t,a) in zip(times, angs): 437 | jtp = tm.JointTrajectoryPoint() 438 | jtp.time_from_start = rospy.Duration(t) 439 | jtp.positions = [a] 440 | jt.points.append(jtp) 441 | self.diag_pub.publish(jt) 442 | 443 | self.pr2.start_thread(GripperTrajectoryThread(self, times_up, angs_up)) 444 | #self.pr2.start_thread(GripperTrajectoryThread(self, times, angs)) 445 | def get_angle(self): 446 | return self.pr2.joint_listener.last_msg.position[self.ros_joint_inds[0]] 447 | def get_velocity(self): 448 | return self.pr2.joint_listener.last_msg.velocity[self.ros_joint_inds[0]] 449 | def get_effort(self): 450 | return self.pr2.joint_listener.last_msg.effort[self.ros_joint_inds[0]] 451 | def get_joint_positions(self): 452 | return [self.get_angle()] 453 | 454 | class Base(object): 455 | 456 | def __init__(self, pr2): 457 | self.pr2 = pr2 458 | self.action_client = actionlib.SimpleActionClient('move_base',mbm.MoveBaseAction) 459 | self.command_pub = rospy.Publisher('base_controller/command', gm.Twist) 460 | self.traj_pub = rospy.Publisher("base_traj_controller/command", tm.JointTrajectory) 461 | self.vel_limits = [.2,.2,.3] 462 | self.acc_limits = [2,2,2] # note: I didn't think these thru 463 | self.n_joints = 3 464 | 465 | def goto_pose(self, xya, frame_id): 466 | trans,rot = conv.xya_to_trans_rot(xya) 467 | pose = conv.trans_rot_to_pose(trans, rot) 468 | ps = gm.PoseStamped() 469 | ps.pose = pose 470 | ps.header.frame_id = frame_id 471 | ps.header.stamp = rospy.Time(0) 472 | 473 | goal = mbm.MoveBaseGoal() 474 | goal.target_pose = ps 475 | goal.header.frame_id = frame_id 476 | rospy.loginfo('Sending move base goal') 477 | finished = self.action_client.send_goal_and_wait(goal) 478 | rospy.loginfo('Move base action returned %d.' % finished) 479 | return finished 480 | 481 | def get_pose(self, ref_frame): 482 | # todo: use AMCL directly instead of TF 483 | raise NotImplementedError 484 | 485 | def set_twist(self,xya): 486 | vx, vy, omega = xya 487 | twist = gm.Twist() 488 | twist.linear.x = vx 489 | twist.linear.y = vy 490 | twist.angular.z = omega 491 | self.command_pub.publish(twist) 492 | 493 | def follow_timed_trajectory(self, times, xyas, frame_id): 494 | jt = tm.JointTrajectory() 495 | jt.header.frame_id = frame_id 496 | n = len(xyas) 497 | assert len(times) == n 498 | for i in xrange(n): 499 | jtp = tm.JointTrajectoryPoint() 500 | jtp.time_from_start = rospy.Duration(times[i]) 501 | jtp.positions = xyas[i] 502 | jt.points.append(jtp) 503 | self.traj_pub.publish(jt) 504 | 505 | 506 | def smaller_ang(x): 507 | return (x + pi)%(2*pi) - pi 508 | def closer_ang(x,a,dir=0): 509 | """ 510 | find angle y (==x mod 2*pi) that is close to a 511 | dir == 0: minimize absolute value of difference 512 | dir == 1: y > x 513 | dir == 2: y < x 514 | """ 515 | if dir == 0: 516 | return a + smaller_ang(x-a) 517 | elif dir == 1: 518 | return a + (x-a)%(2*pi) 519 | elif dir == -1: 520 | return a + (x-a)%(2*pi) - 2*pi 521 | def closer_joint_angles(pos,seed): 522 | result = np.array(pos) 523 | for i in [2,4,6]: 524 | result[i] = closer_ang(pos[i],seed[i],0) 525 | return result 526 | -------------------------------------------------------------------------------- /rapprentice/__init__.py: -------------------------------------------------------------------------------- 1 | import logging as _logging 2 | 3 | LOG = _logging.getLogger("rapprentice") 4 | LOG.setLevel(_logging.DEBUG) 5 | 6 | _ch = _logging.StreamHandler() 7 | _ch.setLevel(_logging.DEBUG) 8 | _formatter = _logging.Formatter('%(levelname)s - %(message)s') 9 | _ch.setFormatter(_formatter) 10 | LOG.addHandler(_ch) 11 | 12 | -------------------------------------------------------------------------------- /rapprentice/animate_traj.py: -------------------------------------------------------------------------------- 1 | import trajoptpy, openravepy 2 | 3 | 4 | def animate_traj(traj, robot, pause=True, restore=True): 5 | """make sure to set active DOFs beforehand""" 6 | if restore: _saver = openravepy.RobotStateSaver(robot) 7 | viewer = trajoptpy.GetViewer(robot.GetEnv()) 8 | for (i,dofs) in enumerate(traj): 9 | print "step %i/%i"%(i+1,len(traj)) 10 | robot.SetActiveDOFValues(dofs) 11 | if pause: viewer.Idle() 12 | else: viewer.Step() 13 | -------------------------------------------------------------------------------- /rapprentice/bag_proc.py: -------------------------------------------------------------------------------- 1 | from rapprentice import ros2rave, func_utils, berkeley_pr2 2 | import fastrapp 3 | import numpy as np 4 | import cv2 5 | import openravepy 6 | import os.path as osp 7 | 8 | def extract_joints(bag): 9 | """returns (names, traj) 10 | """ 11 | traj = [] 12 | stamps = [] 13 | for (_, msg, _) in bag.read_messages(topics=['/joint_states']): 14 | traj.append(msg.position) 15 | stamps.append(msg.header.stamp.to_sec()) 16 | assert len(traj) > 0 17 | names = msg.name 18 | return names, stamps, traj 19 | 20 | def extract_joy(bag): 21 | """sounds morbid 22 | """ 23 | 24 | stamps = [] 25 | meanings = [] 26 | button2meaning = { 27 | 12: "look", 28 | 0: "start", 29 | 3: "stop", 30 | 7: "l_open", 31 | 5: "l_close", 32 | 15: "r_open", 33 | 13: "r_close", 34 | 14: "done" 35 | } 36 | check_buttons = button2meaning.keys() 37 | message_stream = bag.read_messages(topics=['/joy']) 38 | (_,last_msg,_) = message_stream.next() 39 | for (_, msg, _) in message_stream: 40 | for i in check_buttons: 41 | if msg.buttons[i] and not last_msg.buttons[i]: 42 | stamps.append(msg.header.stamp.to_sec()) 43 | meanings.append(button2meaning[i]) 44 | last_msg = msg 45 | 46 | return stamps, meanings 47 | 48 | 49 | def find_disjoint_subsequences(li, seq): 50 | """ 51 | Returns a list of tuples (i,j,k,...) so that seq == (li[i], li[j], li[k],...) 52 | Greedily find first tuple, then second, etc. 53 | """ 54 | subseqs = [] 55 | cur_subseq_inds = [] 56 | for (i_el, el) in enumerate(li): 57 | if el == seq[len(cur_subseq_inds)]: 58 | cur_subseq_inds.append(i_el) 59 | if len(cur_subseq_inds) == len(seq): 60 | subseqs.append(cur_subseq_inds) 61 | cur_subseq_inds = [] 62 | return subseqs 63 | 64 | def joy_to_annotations(stamps, meanings): 65 | """return a list of dicts giving info for each segment 66 | [{"look": 1234, "start": 2345, "stop": 3456},...] 67 | """ 68 | out = [] 69 | ind_tuples = find_disjoint_subsequences(meanings, ["look","start","stop"]) 70 | for tup in ind_tuples: 71 | out.append({"look":stamps[tup[0]], "start":stamps[tup[1]], "stop":stamps[tup[2]]}) 72 | 73 | done_inds = [i for (i,meaning) in enumerate(meanings) if meaning=="done"] 74 | for ind in done_inds: 75 | out.append({"done":None,"look":stamps[ind], "start":stamps[ind], "stop":stamps[ind]+1}) 76 | 77 | return out 78 | 79 | def add_kinematics_to_group(group, linknames, manipnames, jointnames, robot): 80 | "do forward kinematics on those links" 81 | if robot is None: robot = get_robot() 82 | r2r = ros2rave.RosToRave(robot, group["joint_states"]["name"]) 83 | link2hmats = dict([(linkname, []) for linkname in linknames]) 84 | links = [robot.GetLink(linkname) for linkname in linknames] 85 | rave_traj = [] 86 | rave_inds = r2r.rave_inds 87 | for ros_vals in group["joint_states"]["position"]: 88 | r2r.set_values(robot, ros_vals) 89 | rave_vals = r2r.convert(ros_vals) 90 | robot.SetDOFValues(rave_vals, rave_inds) 91 | rave_traj.append(rave_vals) 92 | for (linkname,link) in zip(linknames, links): 93 | link2hmats[linkname].append(link.GetTransform()) 94 | for (linkname, hmats) in link2hmats.items(): 95 | group.create_group(linkname) 96 | group[linkname]["hmat"] = np.array(hmats) 97 | 98 | rave_traj = np.array(rave_traj) 99 | rave_ind_list = list(rave_inds) 100 | for manipname in manipnames: 101 | arm_inds = robot.GetManipulator(manipname).GetArmIndices() 102 | group[manipname] = rave_traj[:,[rave_ind_list.index(i) for i in arm_inds]] 103 | 104 | for jointname in jointnames: 105 | joint_ind = robot.GetJointIndex(jointname) 106 | group[jointname] = rave_traj[:,rave_ind_list.index(joint_ind)] 107 | 108 | 109 | 110 | 111 | @func_utils.once 112 | def get_robot(): 113 | env = openravepy.Environment() 114 | env.Load("robots/pr2-beta-static.zae") 115 | robot = env.GetRobots()[0] 116 | return robot 117 | 118 | def add_bag_to_hdf(bag, annotations, hdfroot, demo_name): 119 | joint_names, stamps, traj = extract_joints(bag) 120 | traj = np.asarray(traj) 121 | stamps = np.asarray(stamps) 122 | 123 | robot = get_robot() 124 | 125 | for seg_info in annotations: 126 | 127 | 128 | group = hdfroot.create_group(demo_name + "_" + seg_info["name"]) 129 | 130 | start = seg_info["start"] 131 | stop = seg_info["stop"] 132 | 133 | [i_start, i_stop] = np.searchsorted(stamps, [start, stop]) 134 | 135 | stamps_seg = stamps[i_start:i_stop+1] 136 | traj_seg = traj[i_start:i_stop+1] 137 | sample_inds = fastrapp.resample(traj_seg, np.arange(len(traj_seg)), .01, np.inf, np.inf) 138 | print "trajectory has length", len(sample_inds),len(traj_seg) 139 | 140 | 141 | traj_ds = traj_seg[sample_inds,:] 142 | stamps_ds = stamps_seg[sample_inds] 143 | 144 | group["description"] = seg_info["description"] 145 | group["stamps"] = stamps_ds 146 | group.create_group("joint_states") 147 | group["joint_states"]["name"] = joint_names 148 | group["joint_states"]["position"] = traj_ds 149 | link_names = ["l_gripper_tool_frame","r_gripper_tool_frame","l_gripper_r_finger_tip_link","l_gripper_l_finger_tip_frame","r_gripper_r_finger_tip_link","r_gripper_l_finger_tip_frame"] 150 | special_joint_names = ["l_gripper_joint", "r_gripper_joint"] 151 | manip_names = ["leftarm", "rightarm"] 152 | 153 | add_kinematics_to_group(group, link_names, manip_names, special_joint_names, robot) 154 | 155 | def get_video_frames(video_dir, frame_stamps): 156 | video_stamps = np.loadtxt(osp.join(video_dir,"stamps.txt")) 157 | frame_inds = np.searchsorted(video_stamps, frame_stamps) 158 | 159 | rgbs = [] 160 | depths = [] 161 | for frame_ind in frame_inds: 162 | rgb = cv2.imread(osp.join(video_dir,"rgb%i.jpg"%frame_ind)) 163 | assert rgb is not None 164 | rgbs.append(rgb) 165 | depth = cv2.imread(osp.join(video_dir,"depth%i.png"%frame_ind),2) 166 | assert depth is not None 167 | depths.append(depth) 168 | return rgbs, depths 169 | 170 | 171 | def add_rgbd_to_hdf(video_dir, annotations, hdfroot, demo_name): 172 | 173 | frame_stamps = [seg_info["look"] for seg_info in annotations] 174 | 175 | rgb_imgs, depth_imgs = get_video_frames(video_dir, frame_stamps) 176 | 177 | for (i_seg, seg_info) in enumerate(annotations): 178 | group = hdfroot[demo_name + "_" + seg_info["name"]] 179 | group["rgb"] = rgb_imgs[i_seg] 180 | group["depth"] = depth_imgs[i_seg] 181 | robot = get_robot() 182 | r2r = ros2rave.RosToRave(robot, group["joint_states"]["name"]) 183 | r2r.set_values(robot, group["joint_states"]["position"][0]) 184 | T_w_h = robot.GetLink("head_plate_frame").GetTransform() 185 | T_w_k = T_w_h.dot(berkeley_pr2.T_h_k) 186 | group["T_w_k"] = T_w_k 187 | 188 | -------------------------------------------------------------------------------- /rapprentice/berkeley_pr2.py: -------------------------------------------------------------------------------- 1 | """ 2 | put Berkeley-specific parameters here 3 | """ 4 | 5 | 6 | 7 | import numpy as np 8 | 9 | 10 | 11 | """ 12 | Berkeley PR2's kinect transform 13 | Here's what's in the URDF: 14 | 15 | 16 | 17 | 18 | 19 | 20 | """ 21 | 22 | #T_b_o = np.array([[0,0,1,0],[-1,0,0,0],[0,-1,0,0],[0,0,0,1]]) 23 | #T_b_o = np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]]) 24 | 25 | T_h_k = np.array([[-0.02102462, -0.03347223, 0.99921848, -0.186996 ], 26 | [-0.99974787, -0.00717795, -0.02127621, 0.04361884], 27 | [ 0.0078845, -0.99941387, -0.03331288, 0.22145804], 28 | [ 0., 0., 0., 1. ]]) 29 | 30 | f = 544.260779961 31 | 32 | def get_kinect_transform(robot): 33 | T_w_h = robot.GetLink("head_plate_frame").GetTransform() 34 | T_w_k = T_w_h.dot(T_h_k) 35 | return T_w_k 36 | 37 | -------------------------------------------------------------------------------- /rapprentice/call_and_print.py: -------------------------------------------------------------------------------- 1 | from rapprentice.colorize import colorize 2 | import subprocess 3 | 4 | def call_and_print(cmd,color='green',check=True): 5 | print colorize(cmd, color, bold=True) 6 | if check: subprocess.check_call(cmd, shell=True) 7 | else: subprocess.call(cmd, shell=True) 8 | -------------------------------------------------------------------------------- /rapprentice/cloud_proc_funcs.py: -------------------------------------------------------------------------------- 1 | import cloudprocpy 2 | from rapprentice import berkeley_pr2, clouds 3 | import cv2, numpy as np 4 | import skimage.morphology as skim 5 | DEBUG_PLOTS=False 6 | 7 | def extract_red(rgb, depth, T_w_k): 8 | """ 9 | extract red points and downsample 10 | """ 11 | 12 | hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV) 13 | h = hsv[:,:,0] 14 | s = hsv[:,:,1] 15 | v = hsv[:,:,2] 16 | 17 | h_mask = (h<15) | (h>145) 18 | s_mask = (s > 30 ) 19 | v_mask = (v > 100) 20 | red_mask = h_mask & s_mask & v_mask 21 | 22 | valid_mask = depth > 0 23 | 24 | xyz_k = clouds.depth_to_xyz(depth, berkeley_pr2.f) 25 | xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:] 26 | 27 | z = xyz_w[:,:,2] 28 | z0 = xyz_k[:,:,2] 29 | 30 | height_mask = xyz_w[:,:,2] > .7 # TODO pass in parameter 31 | 32 | good_mask = red_mask & height_mask & valid_mask 33 | good_mask = skim.remove_small_objects(good_mask,min_size=64) 34 | 35 | if DEBUG_PLOTS: 36 | cv2.imshow("z0",z0/z0.max()) 37 | cv2.imshow("z",z/z.max()) 38 | cv2.imshow("hue", h_mask.astype('uint8')*255) 39 | cv2.imshow("sat", s_mask.astype('uint8')*255) 40 | cv2.imshow("val", v_mask.astype('uint8')*255) 41 | cv2.imshow("final",good_mask.astype('uint8')*255) 42 | cv2.imshow("rgb", rgb) 43 | cv2.waitKey() 44 | 45 | 46 | 47 | 48 | good_xyz = xyz_w[good_mask] 49 | 50 | 51 | return clouds.downsample(good_xyz, .025) 52 | 53 | 54 | def grabcut(rgb, depth, T_w_k): 55 | xyz_k = clouds.depth_to_xyz(depth, berkeley_pr2.f) 56 | xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:] 57 | 58 | valid_mask = depth > 0 59 | 60 | import interactive_roi as ir 61 | xys = ir.get_polyline(rgb, "rgb") 62 | xy_corner1 = np.clip(np.array(xys).min(axis=0), [0,0], [639,479]) 63 | xy_corner2 = np.clip(np.array(xys).max(axis=0), [0,0], [639,479]) 64 | polymask = ir.mask_from_poly(xys) 65 | #cv2.imshow("mask",mask) 66 | 67 | xy_tl = np.array([xy_corner1, xy_corner2]).min(axis=0) 68 | xy_br = np.array([xy_corner1, xy_corner2]).max(axis=0) 69 | 70 | xl, yl = xy_tl 71 | w, h = xy_br - xy_tl 72 | mask = np.zeros((h,w),dtype='uint8') 73 | mask[polymask[yl:yl+h, xl:xl+w] > 0] = cv2.GC_PR_FGD 74 | print mask.shape 75 | #mask[h//4:3*h//4, w//4:3*w//4] = cv2.GC_PR_FGD 76 | 77 | tmp1 = np.zeros((1, 13 * 5)) 78 | tmp2 = np.zeros((1, 13 * 5)) 79 | cv2.grabCut(rgb[yl:yl+h, xl:xl+w, :],mask,(0,0,0,0),tmp1, tmp2,10,mode=cv2.GC_INIT_WITH_MASK) 80 | 81 | mask = mask % 2 82 | #mask = ndi.binary_erosion(mask, utils_images.disk(args.erode)).astype('uint8') 83 | contours = cv2.findContours(mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)[0] 84 | cv2.drawContours(rgb[yl:yl+h, xl:xl+w, :],contours,-1,(0,255,0),thickness=2) 85 | 86 | cv2.imshow('rgb', rgb) 87 | print "press enter to continue" 88 | cv2.waitKey() 89 | 90 | zsel = xyz_w[yl:yl+h, xl:xl+w, 2] 91 | mask = (mask%2==1) & np.isfinite(zsel)# & (zsel - table_height > -1) 92 | mask &= valid_mask[yl:yl+h, xl:xl+w] 93 | 94 | xyz_sel = xyz_w[yl:yl+h, xl:xl+w,:][mask.astype('bool')] 95 | return clouds.downsample(xyz_sel, .01) 96 | #rgb_sel = rgb[yl:yl+h, xl:xl+w,:][mask.astype('bool')] 97 | 98 | 99 | 100 | def extract_red_alphashape(cloud, robot): 101 | """ 102 | extract red, get alpha shape, downsample 103 | """ 104 | raise NotImplementedError 105 | 106 | # downsample cloud 107 | cloud_ds = cloudprocpy.downsampleCloud(cloud, .01) 108 | 109 | # transform into body frame 110 | xyz1_kinect = cloud_ds.to2dArray() 111 | xyz1_kinect[:,3] = 1 112 | T_w_k = berkeley_pr2.get_kinect_transform(robot) 113 | xyz1_robot = xyz1_kinect.dot(T_w_k.T) 114 | 115 | # compute 2D alpha shape 116 | xyz1_robot_flat = xyz1_robot.copy() 117 | xyz1_robot_flat[:,2] = 0 # set z coordinates to zero 118 | xyz1_robot_flatalphashape = cloudprocpy.computeAlphaShape(xyz1_robot_flat) 119 | 120 | # unfortunately pcl alpha shape func throws out the indices, so we have to use nearest neighbor search 121 | cloud_robot_flatalphashape = cloudprocpy.CloudXYZ() 122 | cloud_robot_flatalphashape.from2dArray(xyz1_robot_flatalphashape) 123 | cloud_robot_flat = cloudprocpy.CloudXYZ() 124 | cloud_robot_flat.from2dArray(xyz1_robot_flat) 125 | alpha_inds = cloudprocpy.getNearestNeighborIndices(xyz1_robot_flatalphashape, xyz1_robot_flat) 126 | 127 | xyz_robot_alphashape = xyz1_robot_flatalphashape[:,:3] 128 | 129 | # put back z coordinate 130 | xyz_robot_alphashape[:,2] = xyz1_robot[alpha_inds,2] 131 | 132 | return xyz_robot_alphashape[:,:3] 133 | -------------------------------------------------------------------------------- /rapprentice/clouds.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | cx = 320.-.5 4 | cy = 240.-.5 5 | DEFAULT_F = 535. 6 | 7 | def xyZ_to_XY(x,y,Z,f=DEFAULT_F): 8 | X = (x - cx)*(Z/f) 9 | Y = (y - cy)*(Z/f) 10 | return (X,Y) 11 | 12 | def XYZ_to_xy(X,Y,Z,f=DEFAULT_F): 13 | x = X*(f/Z) + cx 14 | y = Y*(f/Z) + cy 15 | return (x,y) 16 | 17 | def depth_to_xyz(depth,f=DEFAULT_F): 18 | x,y = np.meshgrid(np.arange(640), np.arange(480)) 19 | assert depth.shape == (480, 640) 20 | XYZ = np.empty((480,640,3)) 21 | Z = XYZ[:,:,2] = depth / 1000. # convert mm -> meters 22 | XYZ[:,:,0] = (x - cx)*(Z/f) 23 | XYZ[:,:,1] = (y - cy)*(Z/f) 24 | 25 | return XYZ 26 | 27 | def downsample(xyz, v): 28 | import cloudprocpy 29 | cloud = cloudprocpy.CloudXYZ() 30 | xyz1 = np.ones((len(xyz),4),'float') 31 | xyz1[:,:3] = xyz 32 | cloud.from2dArray(xyz1) 33 | cloud = cloudprocpy.downsampleCloud(cloud, v) 34 | return cloud.to2dArray()[:,:3] 35 | 36 | -------------------------------------------------------------------------------- /rapprentice/colorize.py: -------------------------------------------------------------------------------- 1 | """ 2 | add terminal color codes to strings 3 | """ 4 | 5 | color2num = dict( 6 | gray=30, 7 | red=31, 8 | green=32, 9 | yellow=33, 10 | blue=34, 11 | magenta=35, 12 | cyan=36, 13 | white=37, 14 | crimson=38 15 | ) 16 | 17 | 18 | def colorize(string, color, bold=False, highlight = False): 19 | attr = [] 20 | num = color2num[color] 21 | if highlight: num += 10 22 | attr.append(str(num)) 23 | if bold: attr.append('1') 24 | return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string) -------------------------------------------------------------------------------- /rapprentice/conversions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from rapprentice import transformations 3 | try: 4 | import geometry_msgs.msg as gm 5 | import rospy 6 | except ImportError: 7 | print "couldn't import ros stuff" 8 | 9 | def pose_to_trans_rot(pose): 10 | return (pose.position.x, pose.position.y, pose.position.z),\ 11 | (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w) 12 | 13 | 14 | def hmat_to_pose(hmat): 15 | trans,rot = hmat_to_trans_rot(hmat) 16 | return trans_rot_to_pose(trans,rot) 17 | 18 | def pose_to_hmat(pose): 19 | trans,rot = pose_to_trans_rot(pose) 20 | hmat = trans_rot_to_hmat(trans,rot) 21 | return hmat 22 | 23 | def hmat_to_trans_rot(hmat): 24 | ''' 25 | Converts a 4x4 homogenous rigid transformation matrix to a translation and a 26 | quaternion rotation. 27 | ''' 28 | _scale, _shear, angles, trans, _persp = transformations.decompose_matrix(hmat) 29 | rot = transformations.quaternion_from_euler(*angles) 30 | return trans, rot 31 | 32 | # converts a list of hmats separate lists of translations and orientations 33 | def hmats_to_transs_rots(hmats): 34 | transs, rots = [], [] 35 | for hmat in hmats: 36 | trans, rot = hmat_to_trans_rot(hmat) 37 | transs.append(trans) 38 | rots.append(rot) 39 | return transs, rots 40 | 41 | def trans_rot_to_hmat(trans, rot): 42 | ''' 43 | Converts a rotation and translation to a homogeneous transform. 44 | 45 | **Args:** 46 | 47 | **trans (np.array):** Translation (x, y, z). 48 | 49 | **rot (np.array):** Quaternion (x, y, z, w). 50 | 51 | **Returns:** 52 | H (np.array): 4x4 homogenous transform matrix. 53 | ''' 54 | H = transformations.quaternion_matrix(rot) 55 | H[0:3, 3] = trans 56 | return H 57 | 58 | def xya_to_trans_rot(xya): 59 | x,y,a = xya 60 | return np.r_[x, y, 0], yaw_to_quat(a) 61 | 62 | def trans_rot_to_xya(trans, rot): 63 | x = trans[0] 64 | y = trans[1] 65 | a = quat_to_yaw(rot) 66 | return (x,y,a) 67 | 68 | def quat_to_yaw(q): 69 | e = transformations.euler_from_quaternion(q) 70 | return e[2] 71 | def yaw_to_quat(yaw): 72 | return transformations.quaternion_from_euler(0, 0, yaw) 73 | 74 | def quat2mat(quat): 75 | return transformations.quaternion_matrix(quat)[:3, :3] 76 | 77 | def mat2quat(mat33): 78 | mat44 = np.eye(4) 79 | mat44[:3,:3] = mat33 80 | return transformations.quaternion_from_matrix(mat44) 81 | 82 | def mats2quats(mats): 83 | return np.array([mat2quat(mat) for mat in mats]) 84 | 85 | def quats2mats(quats): 86 | return np.array([quat2mat(quat) for quat in quats]) 87 | 88 | def xyzs_quats_to_poses(xyzs, quats): 89 | poses = [] 90 | for (xyz, quat) in zip(xyzs, quats): 91 | poses.append(gm.Pose(gm.Point(*xyz), gm.Quaternion(*quat))) 92 | return poses 93 | 94 | def rod2mat(rod): 95 | theta = np.linalg.norm(rod) 96 | if theta==0: return np.eye(3) 97 | 98 | r = rod/theta 99 | rx,ry,rz = r 100 | mat = ( 101 | np.cos(theta)*np.eye(3) 102 | + (1 - np.cos(theta))*np.outer(r,r) 103 | + np.sin(theta)*np.array([[0,-rz,ry],[rz,0,-rx],[-ry,rx,0]])) 104 | return mat 105 | 106 | def point_stamed_to_pose_stamped(pts,orientation=(0,0,0,1)): 107 | """convert pointstamped to posestamped""" 108 | ps = gm.PoseStamped() 109 | ps.pose.position = pts.point 110 | ps.pose.orientation = orientation 111 | ps.header.frame_id = pts.header.frame_id 112 | return ps 113 | 114 | def array_to_pose_array(xyz_arr, frame_id, quat_arr=None): 115 | assert quat_arr is None or len(xyz_arr) == len(quat_arr) 116 | pose_array = gm.PoseArray() 117 | for index, xyz in enumerate(xyz_arr): 118 | pose = gm.Pose() 119 | pose.position = gm.Point(*xyz) 120 | pose.orientation = gm.Quaternion(0,0,0,1) if quat_arr is None else gm.Quaternion(*(quat_arr[index])) 121 | pose_array.poses.append(pose) 122 | pose_array.header.frame_id = frame_id 123 | pose_array.header.stamp = rospy.Time.now() 124 | return pose_array 125 | 126 | def trans_rot_to_pose(trans, rot): 127 | pose = gm.Pose() 128 | pose.position = gm.Point(*trans) 129 | pose.orientation = gm.Quaternion(*rot) 130 | return pose 131 | 132 | 133 | -------------------------------------------------------------------------------- /rapprentice/cv_plot_utils.py: -------------------------------------------------------------------------------- 1 | import cv2, numpy as np 2 | 3 | class Colors: 4 | RED = (0,0,255) 5 | GREEN = (0,255,0) 6 | BLUE = (255,0,0) 7 | 8 | 9 | 10 | class ClickGetter: 11 | xy = None 12 | done = False 13 | def callback(self,event, x, y, _flags, _param): 14 | if self.done: 15 | return 16 | elif event == cv2.EVENT_LBUTTONDOWN: 17 | self.xy = (x,y) 18 | self.done = True 19 | def get_click(windowname, img): 20 | cg = ClickGetter() 21 | cv2.setMouseCallback(windowname, cg.callback) 22 | while not cg.done: 23 | cv2.imshow(windowname, img) 24 | cv2.waitKey(10) 25 | return cg.xy 26 | 27 | def draw_img(img, colormap = None, min_size = 1): 28 | if img.dtype == np.bool: 29 | img = img.astype('uint8') 30 | if img.dtype == np.float32 or img.dtype == np.float64: 31 | img = img.astype("float32") 32 | minval = img.min() 33 | maxval = img.max() 34 | img = (img-minval)/(maxval-minval) 35 | img = (img*256).astype('uint8') 36 | if img.shape[0] < min_size: 37 | ratio = int(np.ceil(float(min_size)/img.shape[0])) 38 | img = cv2.resize(img, (img.shape[0]*ratio, img.shape[1]*ratio)) 39 | 40 | if colormap is not None: 41 | img = colormap[img] 42 | 43 | cv2.imshow("draw_img", img) 44 | cv2.waitKey() 45 | cv2.destroyWindow("draw_img") 46 | 47 | 48 | def tile_images(imgs, nrows, ncols, row_titles = None, col_titles = None, max_width = 1000): 49 | assert nrows*ncols >= len(imgs) 50 | if nrows*ncols > len(imgs): 51 | imgs = [img for img in imgs] 52 | imgs.extend([np.zeros_like(imgs[0]) for _ in xrange(nrows*ncols - len(imgs))]) 53 | full_width = imgs[0].shape[1]*ncols 54 | if full_width > max_width: 55 | ratio = float(max_width)/ full_width 56 | for i in xrange(len(imgs)): 57 | imgs[i] = cv2.resize(imgs[i], (int(imgs[i].shape[1]*ratio), int(imgs[i].shape[0]*ratio))) 58 | 59 | if col_titles is not None: raise NotImplementedError 60 | imgrows = [] 61 | for irow in xrange(nrows): 62 | rowimgs = imgs[irow*ncols:(irow+1)*ncols] 63 | if row_titles is not None: 64 | rowimgs[0] = rowimgs[0].copy() 65 | cv2.putText(rowimgs[0], row_titles[irow], (10,10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0), thickness = 1) 66 | imgrows.append(np.concatenate(rowimgs,1)) 67 | bigimg = np.concatenate(imgrows, 0) 68 | return bigimg 69 | 70 | def label2rgb(labels): 71 | max_label = labels.max() 72 | rgbs = (np.random.rand(max_label+1,3)*256).astype('uint8') 73 | return rgbs[labels] 74 | 75 | def inttuple(x,y): 76 | return int(np.round(x)), int(np.round(y)) 77 | 78 | 79 | def circle_with_ori(img, x, y, theta): 80 | cv2.circle(img, inttuple(x, y), 17, (0,255,255), 3) 81 | cv2.line(img, inttuple(x,y), inttuple(x+17*np.cos(theta), y+17*np.sin(theta)), (0,255,255), 3) 82 | 83 | 84 | 85 | CM_JET = np.array([ 86 | [ 0, 0, 128], 87 | [ 0, 0, 132], 88 | [ 0, 0, 137], 89 | [ 0, 0, 141], 90 | [ 0, 0, 146], 91 | [ 0, 0, 150], 92 | [ 0, 0, 155], 93 | [ 0, 0, 159], 94 | [ 0, 0, 164], 95 | [ 0, 0, 169], 96 | [ 0, 0, 173], 97 | [ 0, 0, 178], 98 | [ 0, 0, 182], 99 | [ 0, 0, 187], 100 | [ 0, 0, 191], 101 | [ 0, 0, 196], 102 | [ 0, 0, 201], 103 | [ 0, 0, 205], 104 | [ 0, 0, 210], 105 | [ 0, 0, 214], 106 | [ 0, 0, 219], 107 | [ 0, 0, 223], 108 | [ 0, 0, 228], 109 | [ 0, 0, 232], 110 | [ 0, 0, 237], 111 | [ 0, 0, 242], 112 | [ 0, 0, 246], 113 | [ 0, 0, 251], 114 | [ 0, 0, 255], 115 | [ 0, 0, 0], 116 | [ 0, 0, 0], 117 | [ 0, 0, 0], 118 | [ 0, 0, 0], 119 | [ 0, 4, 0], 120 | [ 0, 8, 0], 121 | [ 0, 12, 0], 122 | [ 0, 16, 0], 123 | [ 0, 20, 0], 124 | [ 0, 24, 0], 125 | [ 0, 28, 0], 126 | [ 0, 32, 0], 127 | [ 0, 36, 0], 128 | [ 0, 40, 0], 129 | [ 0, 44, 0], 130 | [ 0, 48, 0], 131 | [ 0, 52, 0], 132 | [ 0, 56, 0], 133 | [ 0, 60, 0], 134 | [ 0, 64, 0], 135 | [ 0, 68, 0], 136 | [ 0, 72, 0], 137 | [ 0, 76, 0], 138 | [ 0, 80, 0], 139 | [ 0, 84, 0], 140 | [ 0, 88, 0], 141 | [ 0, 92, 0], 142 | [ 0, 96, 0], 143 | [ 0, 100, 0], 144 | [ 0, 104, 0], 145 | [ 0, 108, 0], 146 | [ 0, 112, 0], 147 | [ 0, 116, 0], 148 | [ 0, 120, 0], 149 | [ 0, 124, 0], 150 | [ 0, 129, 0], 151 | [ 0, 133, 0], 152 | [ 0, 137, 0], 153 | [ 0, 141, 0], 154 | [ 0, 145, 0], 155 | [ 0, 149, 0], 156 | [ 0, 153, 0], 157 | [ 0, 157, 0], 158 | [ 0, 161, 0], 159 | [ 0, 165, 0], 160 | [ 0, 169, 0], 161 | [ 0, 173, 0], 162 | [ 0, 177, 0], 163 | [ 0, 181, 0], 164 | [ 0, 185, 0], 165 | [ 0, 189, 0], 166 | [ 0, 193, 0], 167 | [ 0, 197, 0], 168 | [ 0, 201, 0], 169 | [ 0, 205, 0], 170 | [ 0, 209, 0], 171 | [ 0, 213, 0], 172 | [ 0, 217, 0], 173 | [ 0, 221, 255], 174 | [ 0, 225, 251], 175 | [ 0, 229, 248], 176 | [ 2, 233, 245], 177 | [ 5, 237, 242], 178 | [ 8, 241, 238], 179 | [ 12, 245, 235], 180 | [ 15, 249, 232], 181 | [ 18, 253, 229], 182 | [ 21, 0, 225], 183 | [ 25, 0, 222], 184 | [ 28, 0, 219], 185 | [ 31, 0, 216], 186 | [ 34, 0, 212], 187 | [ 38, 0, 209], 188 | [ 41, 0, 206], 189 | [ 44, 0, 203], 190 | [ 47, 0, 199], 191 | [ 51, 0, 196], 192 | [ 54, 0, 193], 193 | [ 57, 0, 190], 194 | [ 60, 0, 187], 195 | [ 63, 0, 183], 196 | [ 67, 0, 180], 197 | [ 70, 0, 177], 198 | [ 73, 0, 174], 199 | [ 76, 0, 170], 200 | [ 80, 0, 167], 201 | [ 83, 0, 164], 202 | [ 86, 0, 161], 203 | [ 89, 0, 157], 204 | [ 93, 0, 154], 205 | [ 96, 0, 151], 206 | [ 99, 0, 148], 207 | [102, 0, 144], 208 | [106, 0, 141], 209 | [109, 0, 138], 210 | [112, 0, 135], 211 | [115, 0, 131], 212 | [119, 0, 128], 213 | [122, 0, 125], 214 | [125, 0, 122], 215 | [128, 0, 119], 216 | [131, 0, 115], 217 | [135, 0, 112], 218 | [138, 0, 109], 219 | [141, 0, 106], 220 | [144, 0, 102], 221 | [148, 0, 99], 222 | [151, 0, 96], 223 | [154, 0, 93], 224 | [157, 0, 89], 225 | [161, 0, 86], 226 | [164, 0, 83], 227 | [167, 0, 80], 228 | [170, 0, 76], 229 | [174, 0, 73], 230 | [177, 0, 70], 231 | [180, 0, 67], 232 | [183, 0, 63], 233 | [187, 0, 60], 234 | [190, 0, 57], 235 | [193, 0, 54], 236 | [196, 0, 51], 237 | [199, 0, 47], 238 | [203, 0, 44], 239 | [206, 0, 41], 240 | [209, 0, 38], 241 | [212, 0, 34], 242 | [216, 0, 31], 243 | [219, 0, 28], 244 | [222, 0, 25], 245 | [225, 0, 21], 246 | [229, 0, 18], 247 | [232, 0, 15], 248 | [235, 0, 12], 249 | [238, 0, 8], 250 | [242, 253, 5], 251 | [245, 249, 2], 252 | [248, 245, 0], 253 | [251, 241, 0], 254 | [255, 238, 0], 255 | [ 0, 234, 0], 256 | [ 0, 230, 0], 257 | [ 0, 226, 0], 258 | [ 0, 223, 0], 259 | [ 0, 219, 0], 260 | [ 0, 215, 0], 261 | [ 0, 212, 0], 262 | [ 0, 208, 0], 263 | [ 0, 204, 0], 264 | [ 0, 200, 0], 265 | [ 0, 197, 0], 266 | [ 0, 193, 0], 267 | [ 0, 189, 0], 268 | [ 0, 186, 0], 269 | [ 0, 182, 0], 270 | [ 0, 178, 0], 271 | [ 0, 174, 0], 272 | [ 0, 171, 0], 273 | [ 0, 167, 0], 274 | [ 0, 163, 0], 275 | [ 0, 160, 0], 276 | [ 0, 156, 0], 277 | [ 0, 152, 0], 278 | [ 0, 148, 0], 279 | [ 0, 145, 0], 280 | [ 0, 141, 0], 281 | [ 0, 137, 0], 282 | [ 0, 134, 0], 283 | [ 0, 130, 0], 284 | [ 0, 126, 0], 285 | [ 0, 122, 0], 286 | [ 0, 119, 0], 287 | [ 0, 115, 0], 288 | [ 0, 111, 0], 289 | [ 0, 108, 0], 290 | [ 0, 104, 0], 291 | [ 0, 100, 0], 292 | [ 0, 96, 0], 293 | [ 0, 93, 0], 294 | [ 0, 89, 0], 295 | [ 0, 85, 0], 296 | [ 0, 81, 0], 297 | [ 0, 78, 0], 298 | [ 0, 74, 0], 299 | [ 0, 70, 0], 300 | [ 0, 67, 0], 301 | [ 0, 63, 0], 302 | [ 0, 59, 0], 303 | [ 0, 55, 0], 304 | [ 0, 52, 0], 305 | [ 0, 48, 0], 306 | [ 0, 44, 0], 307 | [ 0, 41, 0], 308 | [ 0, 37, 0], 309 | [ 0, 33, 0], 310 | [ 0, 29, 0], 311 | [ 0, 26, 0], 312 | [ 0, 22, 0], 313 | [255, 18, 0], 314 | [251, 15, 0], 315 | [246, 11, 0], 316 | [242, 7, 0], 317 | [237, 3, 0], 318 | [232, 0, 0], 319 | [228, 0, 0], 320 | [223, 0, 0], 321 | [219, 0, 0], 322 | [214, 0, 0], 323 | [210, 0, 0], 324 | [205, 0, 0], 325 | [201, 0, 0], 326 | [196, 0, 0], 327 | [191, 0, 0], 328 | [187, 0, 0], 329 | [182, 0, 0], 330 | [178, 0, 0], 331 | [173, 0, 0], 332 | [169, 0, 0], 333 | [164, 0, 0], 334 | [159, 0, 0], 335 | [155, 0, 0], 336 | [150, 0, 0], 337 | [146, 0, 0], 338 | [141, 0, 0], 339 | [137, 0, 0], 340 | [132, 0, 0], 341 | [128, 0, 0]], dtype=np.uint8) 342 | 343 | -------------------------------------------------------------------------------- /rapprentice/func_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Useful function decorators, and functional programming stuff 3 | """ 4 | 5 | import functools 6 | 7 | class once: 8 | def __init__(self,fn): 9 | self.fn = fn 10 | self.out = None 11 | self.first_time = True 12 | def __call__(self,*args,**kw): 13 | if self.first_time: 14 | self.out = self.fn(*args,**kw) 15 | self.first_time = False 16 | return self.out 17 | 18 | def disp_args(*args,**kw): 19 | return ",".join([str(arg) for arg in args] + ["%s=%s"%(str(key),str(val)) for (key,val) in kw.items()]) 20 | 21 | TAB_LEVEL = 0 22 | def verbose(fn): 23 | @functools.wraps(fn) 24 | def new_ver(*args,**kw): 25 | global TAB_LEVEL 26 | print("\t"*TAB_LEVEL+"%s(%s)"%(fn.__name__,disp_args(*args,**kw))) 27 | TAB_LEVEL += 1 28 | result = fn(*args,**kw) 29 | TAB_LEVEL -= 1 30 | print("\t"*TAB_LEVEL+"=> %s"%str(result)) 31 | return result 32 | return new_ver 33 | 34 | 35 | class memoized(object): 36 | '''Decorator. Caches a function's return value each time it is called. 37 | If called later with the same arguments, the cached value is returned 38 | (not reevaluated). 39 | ''' 40 | def __init__(self, func): 41 | self.func = func 42 | self.cache = {} 43 | def __call__(self, *args): 44 | try: 45 | return self.cache[args] 46 | except KeyError: 47 | value = self.func(*args) 48 | self.cache[args] = value 49 | return value 50 | #except TypeError: 51 | ## uncachable -- for instance, passing a list as an argument. 52 | ## Better to not cache than to blow up entirely. 53 | #return self.func(*args) 54 | def __repr__(self): 55 | '''Return the function's docstring.''' 56 | return self.func.__doc__ 57 | def __get__(self, obj, objtype): 58 | '''Support instance methods.''' 59 | return functools.partial(self.__call__, obj) -------------------------------------------------------------------------------- /rapprentice/interactive_roi.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | def get_polyline(image,window_name): 5 | cv2.namedWindow(window_name) 6 | class GetPoly: 7 | xys = [] 8 | done = False 9 | def callback(self,event, x, y, flags, param): 10 | if self.done == True: 11 | pass 12 | elif event == cv2.EVENT_LBUTTONDOWN: 13 | self.xys.append((x,y)) 14 | elif event == cv2.EVENT_MBUTTONDOWN: 15 | self.done = True 16 | gp = GetPoly() 17 | cv2.setMouseCallback(window_name,gp.callback) 18 | print "press middle mouse button or 'c' key to complete the polygon" 19 | while not gp.done: 20 | im_copy = image.copy() 21 | for (x,y) in gp.xys: 22 | cv2.circle(im_copy,(x,y),2,(0,255,0)) 23 | if len(gp.xys) > 1 and not gp.done: 24 | cv2.polylines(im_copy,[np.array(gp.xys).astype('int32')],False,(0,255,0),1) 25 | cv2.imshow(window_name,im_copy) 26 | key = cv2.waitKey(50) 27 | if key == ord('c'): gp.done = True 28 | #cv2.destroyWindow(window_name) 29 | return gp.xys 30 | 31 | def get_polygon_and_prompt(image, window_name): 32 | im_copy = image.copy() 33 | xys = get_polyline(image,window_name) 34 | assert len(xys)>1 35 | cv2.polylines(im_copy,[np.array(xys+xys[0:1]).astype('int32')],False,(0,0,255),1) 36 | cv2.imshow(window_name,im_copy) 37 | 38 | print "press 'a' to accept, 'r' to reject" 39 | while True: 40 | key = cv2.waitKey(0) 41 | if key == ord('a'): return np.array(xys) 42 | else: exit(1) 43 | 44 | return np.array(xys) 45 | 46 | 47 | def mask_from_poly(xys,shape=(480,640)): 48 | img = np.zeros(shape,dtype='uint8') 49 | xys = np.array(xys).astype('int32') 50 | cv2.fillConvexPoly(img,xys,(255,0,0)) 51 | return img.copy() 52 | -------------------------------------------------------------------------------- /rapprentice/math_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple functions on numpy arrays 3 | """ 4 | from __future__ import division 5 | import numpy as np 6 | 7 | def interp2d(x,xp,yp): 8 | "Same as np.interp, but yp is 2d" 9 | yp = np.asarray(yp) 10 | assert yp.ndim == 2 11 | return np.array([np.interp(x,xp,col) for col in yp.T]).T 12 | def normalize(x): 13 | return x / np.linalg.norm(x) 14 | def normr(x): 15 | assert x.ndim == 2 16 | return x/norms(x,1)[:,None] 17 | def normc(x): 18 | assert x.ndim == 2 19 | return x/norms(x,0)[None,:] 20 | def norms(x,ax): 21 | return np.sqrt((x**2).sum(axis=ax)) 22 | def intround(x): 23 | return np.round(x).astype('int32') 24 | def deriv(x): 25 | T = len(x) 26 | return interp2d(np.arange(T),np.arange(.5,T-.5),x[1:]-x[:-1]) 27 | def linspace2d(start,end,n): 28 | cols = [np.linspace(s,e,n) for (s,e) in zip(start,end)] 29 | return np.array(cols).T 30 | def remove_duplicate_rows(mat): 31 | diffs = mat[1:] - mat[:-1] 32 | return mat[np.r_[True,(abs(diffs) >= 1e-5).any(axis=1)]] -------------------------------------------------------------------------------- /rapprentice/planning.py: -------------------------------------------------------------------------------- 1 | import openravepy,trajoptpy, numpy as np, json 2 | 3 | 4 | def plan_follow_traj(robot, manip_name, ee_link, new_hmats, old_traj): 5 | 6 | n_steps = len(new_hmats) 7 | assert old_traj.shape[0] == n_steps 8 | assert old_traj.shape[1] == 7 9 | 10 | arm_inds = robot.GetManipulator(manip_name).GetArmIndices() 11 | 12 | ee_linkname = ee_link.GetName() 13 | 14 | init_traj = old_traj.copy() 15 | #init_traj[0] = robot.GetDOFValues(arm_inds) 16 | 17 | request = { 18 | "basic_info" : { 19 | "n_steps" : n_steps, 20 | "manip" : manip_name, 21 | "start_fixed" : False 22 | }, 23 | "costs" : [ 24 | { 25 | "type" : "joint_vel", 26 | "params": {"coeffs" : [n_steps/5.]} 27 | }, 28 | { 29 | "type" : "collision", 30 | "params" : {"coeffs" : [10],"dist_pen" : [0.01]} 31 | } 32 | ], 33 | "constraints" : [ 34 | ], 35 | "init_info" : { 36 | "type":"given_traj", 37 | "data":[x.tolist() for x in init_traj] 38 | } 39 | } 40 | 41 | poses = [openravepy.poseFromMatrix(hmat) for hmat in new_hmats] 42 | for (i_step,pose) in enumerate(poses): 43 | request["costs"].append( 44 | {"type":"pose", 45 | "params":{ 46 | "xyz":pose[4:7].tolist(), 47 | "wxyz":pose[0:4].tolist(), 48 | "link":ee_linkname, 49 | "timestep":i_step, 50 | "pos_coeffs":[10,10,10], 51 | "rot_coeffs":[10,10,10] 52 | } 53 | }) 54 | 55 | s = json.dumps(request) 56 | prob = trajoptpy.ConstructProblem(s, robot.GetEnv()) # create object that stores optimization problem 57 | result = trajoptpy.OptimizeProblem(prob) # do optimization 58 | traj = result.GetTraj() 59 | 60 | saver = openravepy.RobotStateSaver(robot) 61 | pos_errs = [] 62 | for i_step in xrange(1,n_steps): 63 | row = traj[i_step] 64 | robot.SetDOFValues(row, arm_inds) 65 | tf = ee_link.GetTransform() 66 | pos = tf[:3,3] 67 | pos_err = np.linalg.norm(poses[i_step][4:7] - pos) 68 | pos_errs.append(pos_err) 69 | pos_errs = np.array(pos_errs) 70 | 71 | print "planned trajectory for %s. max position error: %.3f. all position errors: %s"%(manip_name, pos_errs.max(), pos_errs) 72 | 73 | return traj -------------------------------------------------------------------------------- /rapprentice/plotting_openrave.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def draw_grid(env, f, mins, maxes, xres = .1, yres = .1, zres = .04): 4 | 5 | xmin, ymin, zmin = mins 6 | xmax, ymax, zmax = maxes 7 | 8 | nfine = 30 9 | xcoarse = np.arange(xmin, xmax, xres) 10 | ycoarse = np.arange(ymin, ymax, yres) 11 | if zres == -1: zcoarse = [(zmin+zmax)/2.] 12 | else: zcoarse = np.arange(zmin, zmax, zres) 13 | xfine = np.linspace(xmin, xmax, nfine) 14 | yfine = np.linspace(ymin, ymax, nfine) 15 | zfine = np.linspace(zmin, zmax, nfine) 16 | 17 | lines = [] 18 | if len(zcoarse) > 1: 19 | for x in xcoarse: 20 | for y in ycoarse: 21 | xyz = np.zeros((nfine, 3)) 22 | xyz[:,0] = x 23 | xyz[:,1] = y 24 | xyz[:,2] = zfine 25 | lines.append(f(xyz)) 26 | 27 | for y in ycoarse: 28 | for z in zcoarse: 29 | xyz = np.zeros((nfine, 3)) 30 | xyz[:,0] = xfine 31 | xyz[:,1] = y 32 | xyz[:,2] = z 33 | lines.append(f(xyz)) 34 | 35 | for z in zcoarse: 36 | for x in xcoarse: 37 | xyz = np.zeros((nfine, 3)) 38 | xyz[:,0] = x 39 | xyz[:,1] = yfine 40 | xyz[:,2] = z 41 | lines.append(f(xyz)) 42 | 43 | handles = [] 44 | 45 | for line in lines: 46 | handles.append(env.drawlinestrip(line,1,(1,1,0,1))) 47 | 48 | return handles 49 | -------------------------------------------------------------------------------- /rapprentice/plotting_plt.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plotting functions using matplotlib 3 | """ 4 | import numpy as np 5 | def plot_warped_grid_2d(f, mins, maxes, grid_res=None, flipax = True): 6 | import matplotlib.pyplot as plt 7 | import matplotlib 8 | xmin, ymin = mins 9 | xmax, ymax = maxes 10 | ncoarse = 10 11 | nfine = 30 12 | 13 | if grid_res is None: 14 | xcoarse = np.linspace(xmin, xmax, ncoarse) 15 | ycoarse = np.linspace(ymin, ymax, ncoarse) 16 | else: 17 | xcoarse = np.arange(xmin, xmax, grid_res) 18 | ycoarse = np.arange(ymin, ymax, grid_res) 19 | xfine = np.linspace(xmin, xmax, nfine) 20 | yfine = np.linspace(ymin, ymax, nfine) 21 | 22 | lines = [] 23 | 24 | sgn = -1 if flipax else 1 25 | 26 | for x in xcoarse: 27 | xy = np.zeros((nfine, 2)) 28 | xy[:,0] = x 29 | xy[:,1] = yfine 30 | lines.append(f(xy)[:,::sgn]) 31 | 32 | for y in ycoarse: 33 | xy = np.zeros((nfine, 2)) 34 | xy[:,0] = xfine 35 | xy[:,1] = y 36 | lines.append(f(xy)[:,::sgn]) 37 | 38 | lc = matplotlib.collections.LineCollection(lines,colors='gray',lw=2) 39 | ax = plt.gca() 40 | ax.add_collection(lc) 41 | plt.draw() 42 | 43 | def plot_correspondence(x_nd, y_nd): 44 | lines = np.array(zip(x_nd, y_nd)) 45 | import matplotlib.pyplot as plt 46 | import matplotlib 47 | lc = matplotlib.collections.LineCollection(lines) 48 | ax = plt.gca() 49 | ax.add_collection(lc) 50 | plt.draw() 51 | -------------------------------------------------------------------------------- /rapprentice/pr2_trajectories.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from rapprentice import conversions as conv, math_utils as mu, \ 3 | retiming, PR2, resampling 4 | from rapprentice import LOG 5 | 6 | def make_joint_traj(xyzs, quats, manip, ref_frame, targ_frame, filter_options = 0): 7 | "do ik and then fill in the points where ik failed" 8 | 9 | n = len(xyzs) 10 | assert len(quats) == n 11 | 12 | robot = manip.GetRobot() 13 | joint_inds = manip.GetArmIndices() 14 | robot.SetActiveDOFs(joint_inds) 15 | orig_joint = robot.GetActiveDOFValues() 16 | 17 | joints = [] 18 | inds = [] 19 | 20 | for i in xrange(0,n): 21 | mat4 = conv.trans_rot_to_hmat(xyzs[i], quats[i]) 22 | joint = PR2.cart_to_joint(manip, mat4, ref_frame, targ_frame, filter_options) 23 | if joint is not None: 24 | joints.append(joint) 25 | inds.append(i) 26 | robot.SetActiveDOFValues(joint) 27 | 28 | 29 | robot.SetActiveDOFValues(orig_joint) 30 | 31 | 32 | LOG.info("found ik soln for %i of %i points",len(inds), n) 33 | if len(inds) > 2: 34 | joints2 = mu.interp2d(np.arange(n), inds, joints) 35 | return joints2, inds 36 | else: 37 | return np.zeros((n, len(joints))), [] 38 | 39 | 40 | 41 | def follow_body_traj(pr2, bodypart2traj, wait=True, base_frame = "/base_footprint", speed_factor=1): 42 | 43 | LOG.info("following trajectory with bodyparts %s", " ".join(bodypart2traj.keys())) 44 | 45 | name2part = {"lgrip":pr2.lgrip, 46 | "rgrip":pr2.rgrip, 47 | "larm":pr2.larm, 48 | "rarm":pr2.rarm, 49 | "base":pr2.base} 50 | for partname in bodypart2traj: 51 | if partname not in name2part: 52 | raise Exception("invalid part name %s"%partname) 53 | 54 | 55 | #### Go to initial positions ####### 56 | 57 | 58 | for (name, part) in name2part.items(): 59 | if name in bodypart2traj: 60 | part_traj = bodypart2traj[name] 61 | if name == "lgrip" or name == "rgrip": 62 | part.set_angle(np.squeeze(part_traj)[0]) 63 | elif name == "larm" or name == "rarm": 64 | part.goto_joint_positions(part_traj[0]) 65 | elif name == "base": 66 | part.goto_pose(part_traj[0], base_frame) 67 | pr2.join_all() 68 | 69 | 70 | #### Construct total trajectory so we can retime it ####### 71 | 72 | 73 | n_dof = 0 74 | trajectories = [] 75 | vel_limits = [] 76 | acc_limits = [] 77 | bodypart2inds = {} 78 | for (name, part) in name2part.items(): 79 | if name in bodypart2traj: 80 | traj = bodypart2traj[name] 81 | if traj.ndim == 1: traj = traj.reshape(-1,1) 82 | trajectories.append(traj) 83 | vel_limits.extend(part.vel_limits) 84 | acc_limits.extend(part.acc_limits) 85 | bodypart2inds[name] = range(n_dof, n_dof+part.n_joints) 86 | n_dof += part.n_joints 87 | 88 | trajectories = np.concatenate(trajectories, 1) 89 | 90 | vel_limits = np.array(vel_limits)*speed_factor 91 | 92 | 93 | times = retiming.retime_with_vel_limits(trajectories, vel_limits) 94 | times_up = np.linspace(0, times[-1], int(np.ceil(times[-1]/.1))) 95 | traj_up = mu.interp2d(times_up, times, trajectories) 96 | 97 | 98 | #### Send all part trajectories ########### 99 | 100 | for (name, part) in name2part.items(): 101 | if name in bodypart2traj: 102 | part_traj = traj_up[:,bodypart2inds[name]] 103 | if name == "lgrip" or name == "rgrip": 104 | part.follow_timed_trajectory(times_up, part_traj.flatten()) 105 | elif name == "larm" or name == "rarm": 106 | vels = resampling.get_velocities(part_traj, times_up, .001) 107 | part.follow_timed_joint_trajectory(part_traj, vels, times_up) 108 | elif name == "base": 109 | part.follow_timed_trajectory(times_up, part_traj, base_frame) 110 | 111 | if wait: pr2.join_all() 112 | 113 | return True 114 | 115 | 116 | def flatten_compound_dtype(compound_array): 117 | arrays = [] 118 | for desc in compound_array.dtype.descr: 119 | field = desc[0] 120 | arr = compound_array[field] 121 | if arr.ndim == 1: 122 | float_arr = arr[:,None].astype('float') 123 | elif arr.ndim == 2: 124 | float_arr = arr.astype('float') 125 | else: 126 | raise Exception("subarray with field %s must be 1d or 2d"%field) 127 | arrays.append(float_arr) 128 | 129 | return np.concatenate(arrays, axis=1) 130 | 131 | def follow_rave_trajectory(pr2, ravetraj, dof_inds, use_base = False, base_frame="/base_footprint"): 132 | 133 | assert ravetraj.shape[1] == len(dof_inds) + 3*int(use_base) 134 | bodypart2traj = {} 135 | 136 | 137 | rave2ros = {} 138 | name2part = {"l_gripper":pr2.lgrip, 139 | "r_gripper":pr2.rgrip, 140 | "l_arm":pr2.larm, 141 | "r_arm":pr2.rarm} 142 | for (partname, part) in name2part.items(): 143 | for (ijoint, jointname) in enumerate(part.joint_names): 144 | rave2ros[pr2.robot.GetJoint(jointname).GetDOFIndex()] = (partname, ijoint) 145 | bodypart2traj[partname] = np.repeat(np.asarray(part.get_joint_positions())[None,:], len(ravetraj), axis=0) 146 | 147 | 148 | 149 | bodypart2used = {} 150 | 151 | for (ravecol, dof_ind) in enumerate(dof_inds): 152 | if dof_ind in rave2ros: 153 | partname, partind = rave2ros[dof_ind] 154 | bodypart2traj[partname][:,partind] = ravetraj[:,ravecol] 155 | elif dof_ind == pr2.robot.GetJoint("r_gripper_l_finger_joint").GetDOFIndex(): 156 | partname, partind = "r_gripper", 0 157 | bodypart2traj[partname][:,partind] = ravetraj[:,ravecol]/5.81 158 | elif dof_ind == pr2.robot.GetJoint("l_gripper_l_finger_joint").GetDOFIndex(): 159 | partname, partind = "l_gripper", 0 160 | bodypart2traj[partname][:,partind] = ravetraj[:,ravecol]/5.81 161 | else: 162 | jointname = pr2.robot.GetJointFromDOFIndex(dof_ind).GetName() 163 | raise Exception("I don't know how to control this joint %s"%jointname) 164 | bodypart2used[partname] = True 165 | 166 | for partname in list(bodypart2traj.keys()): 167 | if partname not in bodypart2used: 168 | del bodypart2traj[partname] 169 | 170 | if use_base: 171 | base_traj = ravetraj[:,-3:] 172 | bodypart2traj["base"] = base_traj 173 | 174 | follow_body_traj(pr2, bodypart2traj, base_frame = base_frame) 175 | -------------------------------------------------------------------------------- /rapprentice/registration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Register point clouds to each other 3 | 4 | 5 | arrays are named like name_abc 6 | abc are subscripts and indicate the what that tensor index refers to 7 | 8 | index name conventions: 9 | m: test point index 10 | n: training point index 11 | a: input coordinate 12 | g: output coordinate 13 | d: gripper coordinate 14 | """ 15 | 16 | from __future__ import division 17 | import numpy as np 18 | import scipy.spatial.distance as ssd 19 | from rapprentice import tps, svds, math_utils 20 | # from svds import svds 21 | 22 | 23 | 24 | class Transformation(object): 25 | """ 26 | Object oriented interface for transformations R^d -> R^d 27 | """ 28 | def transform_points(self, x_ma): 29 | raise NotImplementedError 30 | def compute_jacobian(self, x_ma): 31 | raise NotImplementedError 32 | 33 | 34 | def transform_bases(self, x_ma, rot_mad, orthogonalize=True, orth_method = "cross"): 35 | """ 36 | orthogonalize: none, svd, qr 37 | """ 38 | 39 | grad_mga = self.compute_jacobian(x_ma) 40 | newrot_mgd = np.array([grad_ga.dot(rot_ad) for (grad_ga, rot_ad) in zip(grad_mga, rot_mad)]) 41 | 42 | 43 | if orthogonalize: 44 | if orth_method == "qr": 45 | newrot_mgd = orthogonalize3_qr(newrot_mgd) 46 | elif orth_method == "svd": 47 | newrot_mgd = orthogonalize3_svd(newrot_mgd) 48 | elif orth_method == "cross": 49 | newrot_mgd = orthogonalize3_cross(newrot_mgd) 50 | else: raise Exception("unknown orthogonalization method %s"%orthogonalize) 51 | return newrot_mgd 52 | 53 | def transform_hmats(self, hmat_mAD): 54 | """ 55 | Transform (D+1) x (D+1) homogenius matrices 56 | """ 57 | hmat_mGD = np.empty_like(hmat_mAD) 58 | hmat_mGD[:,:3,3] = self.transform_points(hmat_mAD[:,:3,3]) 59 | hmat_mGD[:,:3,:3] = self.transform_bases(hmat_mAD[:,:3,3], hmat_mAD[:,:3,:3]) 60 | hmat_mGD[:,3,:] = np.array([0,0,0,1]) 61 | return hmat_mGD 62 | 63 | def compute_numerical_jacobian(self, x_d, epsilon=0.0001): 64 | "numerical jacobian" 65 | x0 = np.asfarray(x_d) 66 | f0 = self.transform_points(x0) 67 | jac = np.zeros(len(x0), len(f0)) 68 | dx = np.zeros(len(x0)) 69 | for i in range(len(x0)): 70 | dx[i] = epsilon 71 | jac[i] = (self.transform_points(x0+dx) - f0) / epsilon 72 | dx[i] = 0. 73 | return jac.transpose() 74 | 75 | class ThinPlateSpline(Transformation): 76 | """ 77 | members: 78 | x_na: centers of basis functions 79 | w_ng: 80 | lin_ag: transpose of linear part, so you take x_na.dot(lin_ag) 81 | trans_g: translation part 82 | 83 | """ 84 | def __init__(self, d=3): 85 | "initialize as identity" 86 | self.x_na = np.zeros((0,d)) 87 | self.lin_ag = np.eye(d) 88 | self.trans_g = np.zeros(d) 89 | self.w_ng = np.zeros((0,d)) 90 | 91 | def transform_points(self, x_ma): 92 | y_ng = tps.tps_eval(x_ma, self.lin_ag, self.trans_g, self.w_ng, self.x_na) 93 | return y_ng 94 | def compute_jacobian(self, x_ma): 95 | grad_mga = tps.tps_grad(x_ma, self.lin_ag, self.trans_g, self.w_ng, self.x_na) 96 | return grad_mga 97 | 98 | class Affine(Transformation): 99 | def __init__(self, lin_ag, trans_g): 100 | self.lin_ag = lin_ag 101 | self.trans_g = trans_g 102 | def transform_points(self, x_ma): 103 | return x_ma.dot(self.lin_ag) + self.trans_g[None,:] 104 | def compute_jacobian(self, x_ma): 105 | return np.repeat(self.lin_ag.T[None,:,:],len(x_ma), axis=0) 106 | 107 | class Composition(Transformation): 108 | def __init__(self, fs): 109 | "applied from first to last (left to right)" 110 | self.fs = fs 111 | def transform_points(self, x_ma): 112 | for f in self.fs: x_ma = f.transform_points(x_ma) 113 | return x_ma 114 | def compute_jacobian(self, x_ma): 115 | grads = [] 116 | for f in self.fs: 117 | grad_mga = f.compute_jacobian(x_ma) 118 | grads.append(grad_mga) 119 | x_ma = f.transform_points(x_ma) 120 | totalgrad = grads[0] 121 | for grad in grads[1:]: 122 | totalgrad = (grad[:,:,:,None] * totalgrad[:,None,:,:]).sum(axis=-2) 123 | return totalgrad 124 | 125 | def fit_ThinPlateSpline(x_na, y_ng, bend_coef=.1, rot_coef = 1e-5, wt_n=None): 126 | """ 127 | x_na: source cloud 128 | y_nd: target cloud 129 | smoothing: penalize non-affine part 130 | angular_spring: penalize rotation 131 | wt_n: weight the points 132 | """ 133 | f = ThinPlateSpline() 134 | f.lin_ag, f.trans_g, f.w_ng = tps.tps_fit3(x_na, y_ng, bend_coef, rot_coef, wt_n) 135 | f.x_na = x_na 136 | return f 137 | 138 | def fit_ThinPlateSpline_RotReg(x_na, y_ng, bend_coef = .1, rot_coefs = (0.01,0.01,0.0025),scale_coef=.01): 139 | import fastrapp 140 | f = ThinPlateSpline() 141 | rfunc = fastrapp.rot_reg 142 | fastrapp.set_coeffs(rot_coefs, scale_coef) 143 | f.lin_ag, f.trans_g, f.w_ng = tps.tps_fit_regrot(x_na, y_ng, bend_coef, rfunc) 144 | f.x_na = x_na 145 | return f 146 | 147 | 148 | 149 | def loglinspace(a,b,n): 150 | "n numbers between a to b (inclusive) with constant ratio between consecutive numbers" 151 | return np.exp(np.linspace(np.log(a),np.log(b),n)) 152 | 153 | 154 | 155 | def unit_boxify(x_na): 156 | ranges = x_na.ptp(axis=0) 157 | dlarge = ranges.argmax() 158 | unscaled_translation = - (x_na.min(axis=0) + x_na.max(axis=0))/2 159 | scaling = 1./ranges[dlarge] 160 | scaled_translation = unscaled_translation * scaling 161 | return x_na*scaling + scaled_translation, (scaling, scaled_translation) 162 | 163 | def unscale_tps_3d(f, src_params, targ_params): 164 | """Only works in 3d!!""" 165 | assert len(f.trans_g) == 3 166 | p,q = src_params 167 | r,s = targ_params 168 | print p,q,r,s 169 | fnew = ThinPlateSpline() 170 | fnew.x_na = (f.x_na - q[None,:])/p 171 | fnew.w_ng = f.w_ng * p / r 172 | fnew.lin_ag = f.lin_ag * p / r 173 | fnew.trans_g = (f.trans_g + f.lin_ag.T.dot(q) - s)/r 174 | 175 | return fnew 176 | 177 | def unscale_tps(f, src_params, targ_params): 178 | """Only works in 3d!!""" 179 | p,q = src_params 180 | r,s = targ_params 181 | 182 | d = len(q) 183 | 184 | lin_in = np.eye(d)*p 185 | trans_in = q 186 | aff_in = Affine(lin_in, trans_in) 187 | 188 | lin_out = np.eye(d)/r 189 | trans_out = -s/r 190 | aff_out = Affine(lin_out, trans_out) 191 | 192 | return Composition([aff_in, f, aff_out]) 193 | 194 | 195 | 196 | def tps_rpm(x_nd, y_md, n_iter = 20, reg_init = .1, reg_final = .001, rad_init = .1, rad_final = .005, rot_reg=1e-4, 197 | plotting = False, f_init = None, plot_cb = None): 198 | """ 199 | tps-rpm algorithm mostly as described by chui and rangaran 200 | reg_init/reg_final: regularization on curvature 201 | rad_init/rad_final: radius for correspondence calculation (meters) 202 | plotting: 0 means don't plot. integer n means plot every n iterations 203 | """ 204 | _,d=x_nd.shape 205 | regs = loglinspace(reg_init, reg_final, n_iter) 206 | rads = loglinspace(rad_init, rad_final, n_iter) 207 | if f_init is not None: 208 | f = f_init 209 | else: 210 | f = ThinPlateSpline(d) 211 | # f.trans_g = np.median(y_md,axis=0) - np.median(x_nd,axis=0) 212 | 213 | for i in xrange(n_iter): 214 | xwarped_nd = f.transform_points(x_nd) 215 | corr_nm = calc_correspondence_matrix(xwarped_nd, y_md, r=rads[i], p=.1, max_iter=10) 216 | 217 | wt_n = corr_nm.sum(axis=1) 218 | 219 | 220 | targ_nd = (corr_nm/wt_n[:,None]).dot(y_md) 221 | 222 | if plotting and i%plotting==0: 223 | plot_cb(x_nd, y_md, targ_nd, corr_nm, wt_n, f) 224 | 225 | 226 | f = fit_ThinPlateSpline(x_nd, targ_nd, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg) 227 | 228 | return f 229 | 230 | def tps_rpm_bij(x_nd, y_md, n_iter = 20, reg_init = .1, reg_final = .001, rad_init = .1, rad_final = .005, rot_reg = 1e-3, 231 | plotting = False, plot_cb = None): 232 | """ 233 | tps-rpm algorithm mostly as described by chui and rangaran 234 | reg_init/reg_final: regularization on curvature 235 | rad_init/rad_final: radius for correspondence calculation (meters) 236 | plotting: 0 means don't plot. integer n means plot every n iterations 237 | """ 238 | 239 | _,d=x_nd.shape 240 | regs = loglinspace(reg_init, reg_final, n_iter) 241 | rads = loglinspace(rad_init, rad_final, n_iter) 242 | 243 | f = ThinPlateSpline(d) 244 | f.trans_g = np.median(y_md,axis=0) - np.median(x_nd,axis=0) 245 | 246 | g = ThinPlateSpline(d) 247 | g.trans_g = -f.trans_g 248 | 249 | 250 | # r_N = None 251 | 252 | for i in xrange(n_iter): 253 | xwarped_nd = f.transform_points(x_nd) 254 | ywarped_md = g.transform_points(y_md) 255 | 256 | fwddist_nm = ssd.cdist(xwarped_nd, y_md,'euclidean') 257 | invdist_nm = ssd.cdist(x_nd, ywarped_md,'euclidean') 258 | 259 | r = rads[i] 260 | prob_nm = np.exp( -(fwddist_nm + invdist_nm) / (2*r) ) 261 | corr_nm, r_N, _ = balance_matrix3(prob_nm, 10, 1e-1, 2e-1) 262 | corr_nm += 1e-9 263 | 264 | wt_n = corr_nm.sum(axis=1) 265 | wt_m = corr_nm.sum(axis=0) 266 | 267 | 268 | xtarg_nd = (corr_nm/wt_n[:,None]).dot(y_md) 269 | ytarg_md = (corr_nm/wt_m[None,:]).T.dot(x_nd) 270 | 271 | if plotting and i%plotting==0: 272 | plot_cb(x_nd, y_md, xtarg_nd, corr_nm, wt_n, f) 273 | 274 | f = fit_ThinPlateSpline(x_nd, xtarg_nd, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg) 275 | g = fit_ThinPlateSpline(y_md, ytarg_md, bend_coef = regs[i], wt_n=wt_m, rot_coef = rot_reg) 276 | 277 | f._cost = tps.tps_cost(f.lin_ag, f.trans_g, f.w_ng, f.x_na, xtarg_nd, regs[i], wt_n=wt_n)/wt_n.mean() 278 | g._cost = tps.tps_cost(g.lin_ag, g.trans_g, g.w_ng, g.x_na, ytarg_md, regs[i], wt_n=wt_m)/wt_m.mean() 279 | return f,g 280 | 281 | def tps_reg_cost(f): 282 | K_nn = tps.tps_kernel_matrix(f.x_na) 283 | cost = 0 284 | for w in f.w_ng.T: 285 | cost += w.dot(K_nn.dot(w)) 286 | return cost 287 | 288 | def logmap(m): 289 | "http://en.wikipedia.org/wiki/Axis_angle#Log_map_from_SO.283.29_to_so.283.29" 290 | theta = np.arccos(np.clip((np.trace(m) - 1)/2,-1,1)) 291 | return (1/(2*np.sin(theta))) * np.array([[m[2,1] - m[1,2], m[0,2]-m[2,0], m[1,0]-m[0,1]]]), theta 292 | 293 | 294 | def balance_matrix3(prob_nm, max_iter, p, outlierfrac, r_N = None): 295 | 296 | n,m = prob_nm.shape 297 | prob_NM = np.empty((n+1, m+1), 'f4') 298 | prob_NM[:n, :m] = prob_nm 299 | prob_NM[:n, m] = p 300 | prob_NM[n, :m] = p 301 | prob_NM[n, m] = p*np.sqrt(n*m) 302 | 303 | a_N = np.ones((n+1),'f4') 304 | a_N[n] = m*outlierfrac 305 | b_M = np.ones((m+1),'f4') 306 | b_M[m] = n*outlierfrac 307 | 308 | if r_N is None: r_N = np.ones(n+1,'f4') 309 | 310 | for _ in xrange(max_iter): 311 | c_M = b_M/r_N.dot(prob_NM) 312 | r_N = a_N/prob_NM.dot(c_M) 313 | 314 | prob_NM *= r_N[:,None] 315 | prob_NM *= c_M[None,:] 316 | 317 | return prob_NM[:n, :m], r_N, c_M 318 | 319 | def balance_matrix(prob_nm, p, max_iter=20, ratio_err_tol=1e-3): 320 | n,m = prob_nm.shape 321 | pnoverm = (float(p)*float(n)/float(m)) 322 | for _ in xrange(max_iter): 323 | colsums = pnoverm + prob_nm.sum(axis=0) 324 | prob_nm /= + colsums[None,:] 325 | rowsums = p + prob_nm.sum(axis=1) 326 | prob_nm /= rowsums[:,None] 327 | 328 | if ((rowsums-1).__abs__() < ratio_err_tol).all() and ((colsums-1).__abs__() < ratio_err_tol).all(): 329 | break 330 | 331 | 332 | return prob_nm 333 | 334 | def calc_correspondence_matrix(x_nd, y_md, r, p, max_iter=20): 335 | dist_nm = ssd.cdist(x_nd, y_md,'euclidean') 336 | 337 | 338 | prob_nm = np.exp(-dist_nm / r) 339 | # Seems to work better without **2 340 | # return balance_matrix(prob_nm, p=p, max_iter = max_iter, ratio_err_tol = ratio_err_tol) 341 | outlierfrac = 1e-1 342 | return balance_matrix3(prob_nm, max_iter, p, outlierfrac) 343 | 344 | 345 | def nan2zero(x): 346 | np.putmask(x, np.isnan(x), 0) 347 | return x 348 | 349 | 350 | def fit_score(src, targ, dist_param): 351 | "how good of a partial match is src to targ" 352 | sqdists = ssd.cdist(src, targ,'sqeuclidean') 353 | return -np.exp(-sqdists/dist_param**2).sum() 354 | 355 | def orthogonalize3_cross(mats_n33): 356 | "turns each matrix into a rotation" 357 | 358 | x_n3 = mats_n33[:,:,0] 359 | # y_n3 = mats_n33[:,:,1] 360 | z_n3 = mats_n33[:,:,2] 361 | 362 | znew_n3 = math_utils.normr(z_n3) 363 | ynew_n3 = math_utils.normr(np.cross(znew_n3, x_n3)) 364 | xnew_n3 = math_utils.normr(np.cross(ynew_n3, znew_n3)) 365 | 366 | return np.concatenate([xnew_n3[:,:,None], ynew_n3[:,:,None], znew_n3[:,:,None]],2) 367 | 368 | def orthogonalize3_svd(x_k33): 369 | u_k33, _s_k3, v_k33 = svds.svds(x_k33) 370 | return (u_k33[:,:,:,None] * v_k33[:,None,:,:]).sum(axis=2) 371 | 372 | def orthogonalize3_qr(_x_k33): 373 | raise NotImplementedError -------------------------------------------------------------------------------- /rapprentice/resampling.py: -------------------------------------------------------------------------------- 1 | """ 2 | Resample time serieses to reduce the number of datapoints 3 | """ 4 | from __future__ import division 5 | import numpy as np 6 | from rapprentice import LOG 7 | import rapprentice.math_utils as mu 8 | import fastrapp 9 | import scipy.interpolate as si 10 | import openravepy 11 | 12 | def lerp(a, b, fracs): 13 | "linearly interpolate between a and b" 14 | fracs = fracs[:,None] 15 | return a*(1-fracs) + b*fracs 16 | 17 | def adaptive_resample(x, t = None, max_err = np.inf, max_dx = np.inf, max_dt = np.inf, normfunc = None): 18 | """ 19 | SLOW VERSION 20 | """ 21 | #return np.arange(0, len(x), 100) 22 | LOG.info("resampling a path of length %i", len(x)) 23 | x = np.asarray(x) 24 | 25 | if x.ndim == 1: x = x[:,None] 26 | else: assert x.ndim == 2 27 | 28 | if normfunc is None: normfunc = np.linalg.norm 29 | 30 | if t is None: t = np.arange(len(x)) 31 | else: t = np.asarray(t) 32 | assert(t.ndim == 1) 33 | 34 | n = len(t) 35 | assert len(x) == n 36 | 37 | import networkx as nx 38 | g = nx.DiGraph() 39 | g.add_nodes_from(xrange(n)) 40 | for i_src in xrange(n): 41 | for i_targ in xrange(i_src+1, n): 42 | normdx = normfunc(x[i_targ] - x[i_src]) 43 | dt = t[i_targ] - t[i_src] 44 | errs = lerp(x[i_src], x[i_targ], (t[i_src:i_targ+1] - t[i_src])/dt) - x[i_src:i_targ+1] 45 | interp_err = errs.__abs__().max()#np.array([normfunc(err) for err in errs]).max() 46 | if normdx > max_dx or dt > max_dt or interp_err > max_err: break 47 | g.add_edge(i_src, i_targ) 48 | resample_inds = nx.shortest_path(g, 0, n-1) 49 | return resample_inds 50 | 51 | 52 | def get_velocities(positions, times, tol): 53 | positions = np.atleast_2d(positions) 54 | n = len(positions) 55 | deg = min(3, n - 1) 56 | 57 | good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)] 58 | good_positions = positions[good_inds] 59 | good_times = times[good_inds] 60 | 61 | if len(good_inds) == 1: 62 | return np.zeros(positions[0:1].shape) 63 | 64 | (tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg) 65 | #smooth_positions = np.r_[si.splev(times,tck,der=0)].T 66 | velocities = np.r_[si.splev(times,tck,der=1)].T 67 | return velocities 68 | 69 | def smooth_positions(positions, tol): 70 | times = np.arange(len(positions)) 71 | positions = np.atleast_2d(positions) 72 | n = len(positions) 73 | deg = min(3, n - 1) 74 | 75 | good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)] 76 | good_positions = positions[good_inds] 77 | good_times = times[good_inds] 78 | 79 | if len(good_inds) == 1: 80 | return np.zeros(positions[0:1].shape) 81 | 82 | (tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg) 83 | spos = np.r_[si.splev(times,tck,der=0)].T 84 | return spos 85 | 86 | def unif_resample(x,n,weights,tol=.001,deg=3): 87 | x = np.atleast_2d(x) 88 | weights = np.atleast_2d(weights) 89 | x = mu.remove_duplicate_rows(x) 90 | x_scaled = x * weights 91 | dl = mu.norms(x_scaled[1:] - x_scaled[:-1],1) 92 | l = np.cumsum(np.r_[0,dl]) 93 | 94 | (tck,_) = si.splprep(x_scaled.T,k=deg,s = tol**2*len(x),u=l) 95 | 96 | newu = np.linspace(0,l[-1],n) 97 | out_scaled = np.array(si.splev(newu,tck)).T 98 | out = out_scaled/weights 99 | return out 100 | 101 | def test_resample(): 102 | x = [0,0,0,1,2,3,4,4,4] 103 | t = range(len(x)) 104 | inds = adaptive_resample(x, max_err = 1e-5) 105 | assert inds == [0, 2, 6, 8] 106 | inds = adaptive_resample(x, t=t, max_err = 0) 107 | assert inds == [0, 2, 6, 8] 108 | print "success" 109 | 110 | 111 | inds1 = fastrapp.resample(np.array(x)[:,None], t, 0, np.inf, np.inf) 112 | print inds1 113 | assert inds1.tolist() == [0,2,6,8] 114 | 115 | def test_resample_big(): 116 | from time import time 117 | t = np.linspace(0,1,1000) 118 | x0 = np.sin(t)[:,None] 119 | x = x0 + np.random.randn(len(x0), 50)*.1 120 | tstart = time() 121 | inds0 = adaptive_resample(x, t=t, max_err = .05, max_dt = .1) 122 | print time() - tstart, "seconds" 123 | 124 | print "now doing cpp version" 125 | tstart = time() 126 | inds1 = fastrapp.resample(x, t, .05, np.inf, .1) 127 | print time() - tstart, "seconds" 128 | 129 | assert np.allclose(inds0, inds1) 130 | 131 | def interp_quats(newtimes, oldtimes, oldquats): 132 | "should actually do slerp" 133 | quats_unnormed = mu.interp2d(newtimes, oldtimes, oldquats) 134 | return mu.normr(quats_unnormed) 135 | 136 | 137 | def interp_hmats(newtimes, oldtimes, oldhmats): 138 | oldposes = openravepy.poseFromMatrices(oldhmats) 139 | newposes = np.empty((len(newtimes), 7)) 140 | newposes[:,4:7] = mu.interp2d(newtimes, oldtimes, oldposes[:,4:7]) 141 | newposes[:,0:4] = interp_quats(newtimes, oldtimes, oldposes[:,0:4]) 142 | newhmats = openravepy.matrixFromPoses(newposes) 143 | return newhmats 144 | 145 | if __name__ == "__main__": 146 | test_resample() 147 | test_resample_big() -------------------------------------------------------------------------------- /rapprentice/retiming.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import scipy.interpolate as si 4 | 5 | def shortest_path(ncost_nk,ecost_nkk): 6 | """ 7 | ncost_nk: N x K 8 | ecost_nkk (N-1) x K x K 9 | """ 10 | N,K = ncost_nk.shape 11 | cost_nk = np.empty((N,K),dtype='float') 12 | prev_nk = np.empty((N-1,K),dtype='int') 13 | cost_nk[0] = ncost_nk[0] 14 | for n in xrange(1,N): 15 | cost_kk = ecost_nkk[n-1] + cost_nk[n-1][:,None] + ncost_nk[n][None,:] 16 | cost_nk[n] = cost_kk.min(axis=0) 17 | prev_nk[n-1] = cost_kk.argmin(axis=0) 18 | 19 | cost = cost_nk[N-1].min() 20 | 21 | path = np.empty(N,dtype='int') 22 | path[N-1] = cost_nk[N-1].argmin() 23 | for n in xrange(N-1,0,-1): 24 | path[n-1] = prev_nk[n-1,path[n]] 25 | 26 | return path,cost 27 | 28 | def remove_duplicate_rows(mat): 29 | diffs = mat[1:] - mat[:-1] 30 | return mat[np.r_[True,(abs(diffs) >= 1e-6).any(axis=1)]] 31 | 32 | def loglinspace(a,b,n): 33 | return np.exp(np.linspace(np.log(a),np.log(b),n)) 34 | 35 | def retime2(positions, vel_limits_j): 36 | good_inds = np.r_[True,(abs(positions[1:] - positions[:-1]) >= 1e-6).any(axis=1)] 37 | positions = positions[good_inds] 38 | 39 | move_nj = positions[1:] - positions[:-1] 40 | dur_n = (np.abs(move_nj) / vel_limits_j[None,:]).max(axis=1) # time according to velocity limit 41 | times = np.cumsum(np.r_[0,dur_n]) 42 | return times, good_inds 43 | 44 | def retime_with_vel_limits(positions, vel_limits_j): 45 | 46 | move_nj = positions[1:] - positions[:-1] 47 | dur_n = (np.abs(move_nj) / vel_limits_j[None,:]).max(axis=1) # time according to velocity limit 48 | times = np.cumsum(np.r_[0,dur_n]) 49 | 50 | return times 51 | 52 | 53 | def retime(positions, vel_limits_j, acc_limits_j): 54 | positions, vel_limits_j, acc_limits_j = np.asarray(positions), np.asarray(vel_limits_j), np.asarray(acc_limits_j) 55 | 56 | good_inds = np.r_[True,(abs(positions[1:] - positions[:-1]) >= 1e-6).any(axis=1)] 57 | positions = positions[good_inds] 58 | 59 | move_nj = positions[1:] - positions[:-1] 60 | mindur_n = (np.abs(move_nj) / vel_limits_j[None,:]).max(axis=1) # time according to velocity limit 61 | 62 | # maximum duration is set by assuming that you start out at zero velocity, accelerate until max velocity, coast, then decelerate to zero 63 | # this might give you a triangular or trapezoidal velocity profile 64 | # "triangle" velocity profile : dist = 2 * (1/2) * amax * (t/2)^2 65 | # "trapezoid" velocity profile: dist = t * vmax - (vmax / amax) * vmax 66 | maxdur_triangle_n = np.sqrt(4 * np.abs(move_nj) / acc_limits_j[None,:]).max(axis=1) 67 | maxdur_trapezoid_n = (np.abs(move_nj)/vel_limits_j[None,:] + (vel_limits_j / acc_limits_j)[None,:]).max(axis=1) 68 | 69 | print (np.abs(move_nj)/vel_limits_j[None,:] + (vel_limits_j / acc_limits_j)[None,:]).argmax(axis=1) 70 | print np.argmax([maxdur_triangle_n, maxdur_trapezoid_n],axis=0) 71 | 72 | maxdur_n = np.max([maxdur_triangle_n, maxdur_trapezoid_n],axis=0)*10 73 | #print maxdur_triangle_n 74 | #print maxdur_trapezoid_n 75 | print maxdur_n 76 | K = 20 77 | N = len(mindur_n) 78 | 79 | ncost_nk = np.empty((N,K)) 80 | ecost_nkk = np.empty((N,K,K)) 81 | 82 | dur_nk = np.array([loglinspace(mindur,maxdur,K) for (mindur,maxdur) in zip(mindur_n, maxdur_n)]) 83 | 84 | ncost_nk = dur_nk 85 | 86 | 87 | acc_njkk = move_nj[1:,:,None,None]/dur_nk[1:,None,None,:] - move_nj[:-1,:,None,None]/dur_nk[1:,None,:,None] 88 | ratio_nkk = np.abs(acc_njkk / acc_limits_j[None,:,None,None]).max(axis=1) 89 | ecost_nkk = 1e5 * (ratio_nkk > 1) 90 | 91 | 92 | path, _cost = shortest_path(ncost_nk, ecost_nkk) 93 | #for i in xrange(len(path)-1): 94 | #print ecost_nkk[i,path[i], path[i+1]] 95 | 96 | dur_n = [dur_nk[n,k] for (n,k) in enumerate(path)] 97 | times = np.cumsum(np.r_[0,dur_n]) 98 | return times, good_inds 99 | 100 | def make_traj_with_limits(positions, vel_limits_j, acc_limits_j): 101 | times, inds = retime(positions, vel_limits_j, acc_limits_j) 102 | positions = positions[inds] 103 | 104 | deg = min(3, len(positions) - 1) 105 | s = len(positions)*.001**2 106 | (tck, _) = si.splprep(positions.T, s=s, u=times, k=deg) 107 | smooth_positions = np.r_[si.splev(times,tck,der=0)].T 108 | smooth_velocities = np.r_[si.splev(times,tck,der=1)].T 109 | return smooth_positions, smooth_velocities, times 110 | 111 | -------------------------------------------------------------------------------- /rapprentice/ros2rave.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class RosToRave(object): 4 | 5 | "take in ROS joint_states messages and use it to update an openrave robot" 6 | 7 | def __init__(self, robot, ros_names): 8 | self.initialized = False 9 | 10 | self.ros_names = ros_names 11 | inds_ros2rave = np.array([robot.GetJointIndex(name) for name in self.ros_names]) 12 | self.good_ros_inds = np.flatnonzero(inds_ros2rave != -1) # ros joints inds with matching rave joint 13 | self.rave_inds = inds_ros2rave[self.good_ros_inds] # openrave indices corresponding to those joints 14 | 15 | def convert(self, ros_values): 16 | return [ros_values[i_ros] for i_ros in self.good_ros_inds] 17 | def set_values(self, robot, ros_values): 18 | rave_values = [ros_values[i_ros] for i_ros in self.good_ros_inds] 19 | robot.SetDOFValues(rave_values,self.rave_inds, 0) 20 | -------------------------------------------------------------------------------- /rapprentice/svds.py: -------------------------------------------------------------------------------- 1 | """ 2 | Slightly faster way to compute lots of svds 3 | """ 4 | 5 | 6 | import numpy as np 7 | 8 | 9 | def svds(x_k33): 10 | K = len(x_k33) 11 | option = "A" 12 | m = 3 13 | n = 3 14 | nvt = 3 15 | lwork = 575 16 | iwork = np.zeros(24, 'int32') 17 | work = np.zeros((575)) 18 | s_k3 = np.zeros((K,3)) 19 | fu_k33 = np.zeros((K,3,3)) 20 | fvt_k33 = np.zeros((K,3,3)) 21 | fx_k33 = x_k33.transpose(0,2,1).copy() 22 | lapack_routine = np.linalg.lapack_lite.dgesdd 23 | for k in xrange(K): 24 | lapack_routine(option, m, n, fx_k33[k], m, s_k3[k], fu_k33[k], m, fvt_k33[k], nvt, 25 | work, lwork, iwork, 0) 26 | return fu_k33.transpose(0,2,1), s_k3, fvt_k33.transpose(0,2,1) 27 | 28 | 29 | def svds_slow(x_k33): 30 | s2,u2,v2 = [],[],[] 31 | for x_33 in x_k33: 32 | u,s,vt = np.linalg.svd(x_33) 33 | s2.append(s) 34 | u2.append(u) 35 | v2.append(vt) 36 | s2 = np.array(s2) 37 | u2 = np.array(u2) 38 | v2 = np.array(v2) 39 | return u2,s2,v2 40 | 41 | def test_svds(): 42 | x_k33 = np.random.randn(1000,3,3) 43 | 44 | u1,s1,v1 = svds(x_k33) 45 | u2,s2,v2 = svds_slow(x_k33) 46 | assert np.allclose(u1,u2) 47 | assert np.allclose(s1,s2) 48 | assert np.allclose(v1,v2) 49 | if __name__ == "__main__": 50 | test_svds() -------------------------------------------------------------------------------- /rapprentice/task_execution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Misc functions that are useful in the top-level task-execution scripts 3 | """ 4 | 5 | 6 | def request_int_in_range(too_high_val): 7 | while True: 8 | try: 9 | choice_ind = int(raw_input()) 10 | except ValueError: 11 | pass 12 | if choice_ind <= too_high_val: 13 | return choice_ind 14 | print "invalid selection. try again" 15 | 16 | -------------------------------------------------------------------------------- /rapprentice/testing.py: -------------------------------------------------------------------------------- 1 | import sys, traceback 2 | from time import time 3 | from rapprentice.colorize import colorize 4 | 5 | TEST_FUNCS = {} 6 | 7 | 8 | def testme(func): 9 | TEST_FUNCS[func.__name__] = func 10 | return func 11 | def test_all(stop=False): 12 | nPass,nFail = 0,0 13 | for (name,func) in TEST_FUNCS.items(): 14 | print colorize("function: %s"%name,"green") 15 | try: 16 | t_start = time() 17 | func() 18 | t_elapsed = time() - t_start 19 | print colorize("PASSED (%.3f sec)"%t_elapsed,"blue") 20 | nPass += 1 21 | except Exception: 22 | traceback.print_exc(file=sys.stdout) 23 | if stop: raise 24 | print colorize("FAILED","red") 25 | nFail += 1 26 | 27 | 28 | print "%i passed, %i failed"%(nPass,nFail) -------------------------------------------------------------------------------- /rapprentice/yes_or_no.py: -------------------------------------------------------------------------------- 1 | def yes_or_no(question): 2 | assert isinstance(question,str) or isinstance(question,unicode) 3 | while True: 4 | yn = raw_input(question + " (y/n): ") 5 | if yn=='y': return True 6 | elif yn=='n': return False -------------------------------------------------------------------------------- /runpylint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ `uname` = "Darwin" ] 4 | then 5 | disabled="C,R,F0401,W0142,W1401" 6 | else 7 | disabled="C,R,F0401,W0142" 8 | fi 9 | pylint -f colorized --disable=$disabled -r n -i y rapprentice | grep -v openravepy 10 | cd scripts 11 | pylint -f colorized --disable=$disabled -r n -i y *.py | grep -v openravepy 12 | cd .. 13 | -------------------------------------------------------------------------------- /scripts/animate_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | 4 | """ 5 | Animate demonstration trajectory 6 | """ 7 | 8 | 9 | import argparse 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument("h5file") 12 | parser.add_argument("--seg") 13 | parser.add_argument("--nopause", action="store_true") 14 | args = parser.parse_args() 15 | 16 | import h5py, openravepy,trajoptpy 17 | from rapprentice import animate_traj, ros2rave,clouds 18 | from numpy import asarray 19 | import numpy as np 20 | 21 | hdf = h5py.File(args.h5file) 22 | 23 | segnames = [args.seg] if args.seg else hdf.keys() 24 | 25 | env = openravepy.Environment() 26 | env.StopSimulation() 27 | env.Load("robots/pr2-beta-static.zae") 28 | robot = env.GetRobots()[0] 29 | viewer = trajoptpy.GetViewer(env) 30 | 31 | 32 | for segname in segnames: 33 | 34 | 35 | seg_info = hdf[segname] 36 | 37 | 38 | from rapprentice import berkeley_pr2 39 | 40 | r2r = ros2rave.RosToRave(robot, seg_info["joint_states"]["name"]) 41 | rave_traj = [r2r.convert(row) for row in asarray(seg_info["joint_states"]["position"])] 42 | robot.SetActiveDOFs(r2r.rave_inds) 43 | robot.SetActiveDOFValues(rave_traj[0]) 44 | 45 | handles = [] 46 | T_w_k = berkeley_pr2.get_kinect_transform(robot) 47 | o = T_w_k[:3,3] 48 | x = T_w_k[:3,0] 49 | y = T_w_k[:3,1] 50 | z = T_w_k[:3,2] 51 | 52 | handles.append(env.drawarrow(o, o+.3*x, .005,(1,0,0,1))) 53 | handles.append(env.drawarrow(o, o+.3*y, .005,(0,1,0,1))) 54 | handles.append(env.drawarrow(o, o+.3*z, .005,(0,0,1,1))) 55 | 56 | 57 | XYZ_k = clouds.depth_to_xyz(np.asarray(seg_info["depth"]), berkeley_pr2.f) 58 | Twk = asarray(seg_info["T_w_k"]) 59 | XYZ_w = XYZ_k.dot(Twk[:3,:3].T) + Twk[:3,3][None,None,:] 60 | RGB = np.asarray(seg_info["rgb"]) 61 | handles.append(env.plot3(XYZ_w.reshape(-1,3), 2, RGB.reshape(-1,3)[:,::-1]/255.)) 62 | 63 | 64 | 65 | animate_traj.animate_traj(rave_traj, robot, pause = not args.nopause) 66 | 67 | print "DONE" 68 | trajoptpy.GetViewer(env).Idle() 69 | 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /scripts/command_pr2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ipython -i 2 | from rapprentice.PR2 import PR2 3 | import rospy 4 | rospy.init_node("command_pr2", disable_signals = True) 5 | pr2 = PR2() -------------------------------------------------------------------------------- /scripts/create_new_task_dir.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | parser = argparse.ArgumentParser(description="create default directory structure and files for a new task") 4 | parser.add_argument("path", help='parent directory, e.g. "~/Data"') 5 | parser.add_argument("taskname", help='description of task, e.g. "overhand"') 6 | args = parser.parse_args() 7 | 8 | import os,os.path as osp 9 | datadir = osp.join(args.path, args.taskname) 10 | assert not osp.exists(datadir) 11 | 12 | 13 | os.mkdir(datadir) 14 | with open(osp.join(datadir, args.taskname + ".yaml"),"w") as fh: 15 | fh.write( 16 | """ 17 | name: %s 18 | h5path: %s 19 | bags: 20 | """%(args.taskname, args.taskname+".h5")) 21 | -------------------------------------------------------------------------------- /scripts/do_task.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | usage=""" 5 | 6 | Run in simulation with a translation and a rotation of fake data: 7 | ./do_task.py ~/Data/sampledata/overhand/overhand.h5 --fake_data_segment=overhand0_seg00 --execution=0 --animation=1 --select_manual --fake_data_transform .1 .1 .1 .1 .1 .1 8 | 9 | Run in simulation choosing the closest demo, single threaded 10 | ./do_task.py ~/Data/all.h5 --fake_data_segment=demo1-seg00 --execution=0 --animation=1 --parallel=0 11 | 12 | Actually run on the robot without pausing or animating 13 | ./do_task.py ~/Data/overhand2/all.h5 --execution=1 --animation=0 14 | 15 | """ 16 | parser = argparse.ArgumentParser(usage=usage) 17 | parser.add_argument("h5file", type=str) 18 | parser.add_argument("--cloud_proc_func", default="extract_red") 19 | parser.add_argument("--cloud_proc_mod", default="rapprentice.cloud_proc_funcs") 20 | 21 | parser.add_argument("--execution", type=int, default=0) 22 | parser.add_argument("--animation", type=int, default=0) 23 | parser.add_argument("--parallel", type=int, default=1) 24 | 25 | parser.add_argument("--prompt", action="store_true") 26 | parser.add_argument("--show_neighbors", action="store_true") 27 | parser.add_argument("--select_manual", action="store_true") 28 | parser.add_argument("--log", action="store_true") 29 | 30 | parser.add_argument("--fake_data_segment",type=str) 31 | parser.add_argument("--fake_data_transform", type=float, nargs=6, metavar=("tx","ty","tz","rx","ry","rz"), 32 | default=[0,0,0,0,0,0], help="translation=(tx,ty,tz), axis-angle rotation=(rx,ry,rz)") 33 | 34 | parser.add_argument("--interactive",action="store_true") 35 | 36 | args = parser.parse_args() 37 | 38 | if args.fake_data_segment is None: assert args.execution==1 39 | 40 | ################### 41 | 42 | 43 | """ 44 | Workflow: 45 | 1. Fake data + animation only 46 | --fake_data_segment=xxx --execution=0 47 | 2. Fake data + Gazebo. Set Gazebo to initial state of fake data segment so we'll execute the same thing. 48 | --fake_data_segment=xxx --execution=1 49 | This is just so we know the robot won't do something stupid that we didn't catch with openrave only mode. 50 | 3. Real data + Gazebo 51 | --execution=1 52 | The problem is that the gazebo robot is in a different state from the real robot, in particular, the head tilt angle. TODO: write a script that sets gazebo head to real robot head 53 | 4. Real data + Real execution. 54 | --execution=1 55 | 56 | The question is, do you update the robot's head transform. 57 | If you're using fake data, don't update it. 58 | 59 | """ 60 | 61 | 62 | from rapprentice import registration, colorize, berkeley_pr2, \ 63 | animate_traj, ros2rave, plotting_openrave, task_execution, \ 64 | planning, tps, func_utils, resampling, clouds 65 | from rapprentice import math_utils as mu 66 | from rapprentice.yes_or_no import yes_or_no 67 | 68 | try: 69 | from rapprentice import pr2_trajectories, PR2 70 | import rospy 71 | except ImportError: 72 | print "Couldn't import ros stuff" 73 | 74 | import cloudprocpy, trajoptpy, openravepy 75 | import os, numpy as np, h5py, time 76 | from numpy import asarray 77 | import importlib 78 | 79 | cloud_proc_mod = importlib.import_module(args.cloud_proc_mod) 80 | cloud_proc_func = getattr(cloud_proc_mod, args.cloud_proc_func) 81 | 82 | 83 | def redprint(msg): 84 | print colorize.colorize(msg, "red", bold=True) 85 | 86 | def split_trajectory_by_gripper(seg_info): 87 | rgrip = asarray(seg_info["r_gripper_joint"]) 88 | lgrip = asarray(seg_info["l_gripper_joint"]) 89 | 90 | thresh = .04 # open/close threshold 91 | 92 | n_steps = len(lgrip) 93 | 94 | 95 | # indices BEFORE transition occurs 96 | l_openings = np.flatnonzero((lgrip[1:] >= thresh) & (lgrip[:-1] < thresh)) 97 | r_openings = np.flatnonzero((rgrip[1:] >= thresh) & (rgrip[:-1] < thresh)) 98 | l_closings = np.flatnonzero((lgrip[1:] < thresh) & (lgrip[:-1] >= thresh)) 99 | r_closings = np.flatnonzero((rgrip[1:] < thresh) & (rgrip[:-1] >= thresh)) 100 | 101 | before_transitions = np.r_[l_openings, r_openings, l_closings, r_closings] 102 | after_transitions = before_transitions+1 103 | seg_starts = np.unique(np.r_[0, after_transitions]) 104 | seg_ends = np.unique(np.r_[before_transitions, n_steps-1]) 105 | 106 | return seg_starts, seg_ends 107 | 108 | def binarize_gripper(angle): 109 | open_angle = .08 110 | closed_angle = 0 111 | thresh = .04 112 | if angle > thresh: return open_angle 113 | else: return closed_angle 114 | 115 | 116 | 117 | 118 | 119 | def set_gripper_maybesim(lr, value): 120 | if args.execution: 121 | gripper = {"l":Globals.pr2.lgrip, "r":Globals.pr2.rgrip}[lr] 122 | gripper.set_angle(value) 123 | Globals.pr2.join_all() 124 | else: 125 | Globals.robot.SetDOFValues([value*5], [Globals.robot.GetJoint("%s_gripper_l_finger_joint"%lr).GetDOFIndex()]) 126 | return True 127 | 128 | def exec_traj_maybesim(bodypart2traj): 129 | if args.animation: 130 | dof_inds = [] 131 | trajs = [] 132 | for (part_name, traj) in bodypart2traj.items(): 133 | manip_name = {"larm":"leftarm","rarm":"rightarm"}[part_name] 134 | dof_inds.extend(Globals.robot.GetManipulator(manip_name).GetArmIndices()) 135 | trajs.append(traj) 136 | full_traj = np.concatenate(trajs, axis=1) 137 | Globals.robot.SetActiveDOFs(dof_inds) 138 | animate_traj.animate_traj(full_traj, Globals.robot, restore=False,pause=True) 139 | if args.execution: 140 | if not args.prompt or yes_or_no("execute?"): 141 | pr2_trajectories.follow_body_traj(Globals.pr2, bodypart2traj) 142 | else: 143 | return False 144 | 145 | return True 146 | 147 | 148 | def find_closest_manual(demofile, _new_xyz): 149 | "for now, just prompt the user" 150 | seg_names = demofile.keys() 151 | print "choose from the following options (type an integer)" 152 | for (i, seg_name) in enumerate(seg_names): 153 | print "%i: %s"%(i,seg_name) 154 | choice_ind = task_execution.request_int_in_range(len(seg_names)) 155 | chosen_seg = seg_names[choice_ind] 156 | return chosen_seg 157 | 158 | def registration_cost(xyz0, xyz1): 159 | scaled_xyz0, _ = registration.unit_boxify(xyz0) 160 | scaled_xyz1, _ = registration.unit_boxify(xyz1) 161 | f,g = registration.tps_rpm_bij(scaled_xyz0, scaled_xyz1, rot_reg=1e-3, n_iter=30) 162 | cost = registration.tps_reg_cost(f) + registration.tps_reg_cost(g) 163 | return cost 164 | 165 | DS_SIZE = .025 166 | 167 | @func_utils.once 168 | def get_downsampled_clouds(demofile): 169 | return [clouds.downsample(seg["cloud_xyz"], DS_SIZE) for seg in demofile.values()] 170 | 171 | def find_closest_auto(demofile, new_xyz): 172 | if args.parallel: 173 | from joblib import Parallel, delayed 174 | demo_clouds = [asarray(seg["cloud_xyz"]) for seg in demofile.values()] 175 | keys = demofile.keys() 176 | if args.parallel: 177 | costs = Parallel(n_jobs=3,verbose=100)(delayed(registration_cost)(demo_cloud, new_xyz) for demo_cloud in demo_clouds) 178 | else: 179 | costs = [] 180 | for (i,ds_cloud) in enumerate(demo_clouds): 181 | costs.append(registration_cost(ds_cloud, new_xyz)) 182 | print "completed %i/%i"%(i+1, len(demo_clouds)) 183 | 184 | print "costs\n",costs 185 | if args.show_neighbors: 186 | nshow = min(5, len(keys)) 187 | import cv2, rapprentice.cv_plot_utils as cpu 188 | sortinds = np.argsort(costs)[:nshow] 189 | near_rgbs = [asarray(demofile[keys[i]]["rgb"]) for i in sortinds] 190 | bigimg = cpu.tile_images(near_rgbs, 1, nshow) 191 | cv2.imshow("neighbors", bigimg) 192 | print "press any key to continue" 193 | cv2.waitKey() 194 | 195 | ibest = np.argmin(costs) 196 | return keys[ibest] 197 | 198 | 199 | def arm_moved(joint_traj): 200 | if len(joint_traj) < 2: return False 201 | return ((joint_traj[1:] - joint_traj[:-1]).ptp(axis=0) > .01).any() 202 | 203 | def tpsrpm_plot_cb(x_nd, y_md, targ_Nd, corr_nm, wt_n, f): 204 | ypred_nd = f.transform_points(x_nd) 205 | handles = [] 206 | handles.append(Globals.env.plot3(ypred_nd, 3, (0,1,0))) 207 | handles.extend(plotting_openrave.draw_grid(Globals.env, f.transform_points, x_nd.min(axis=0), x_nd.max(axis=0), xres = .1, yres = .1, zres = .04)) 208 | Globals.viewer.Step() 209 | 210 | 211 | def unif_resample(traj, max_diff, wt = None): 212 | """ 213 | Resample a trajectory so steps have same length in joint space 214 | """ 215 | import scipy.interpolate as si 216 | tol = .005 217 | if wt is not None: 218 | wt = np.atleast_2d(wt) 219 | traj = traj*wt 220 | 221 | 222 | dl = mu.norms(traj[1:] - traj[:-1],1) 223 | l = np.cumsum(np.r_[0,dl]) 224 | goodinds = np.r_[True, dl > 1e-8] 225 | deg = min(3, sum(goodinds) - 1) 226 | if deg < 1: return traj, np.arange(len(traj)) 227 | 228 | nsteps = max(int(np.ceil(float(l[-1])/max_diff)),2) 229 | newl = np.linspace(0,l[-1],nsteps) 230 | 231 | ncols = traj.shape[1] 232 | colstep = 10 233 | traj_rs = np.empty((nsteps,ncols)) 234 | for istart in xrange(0, traj.shape[1], colstep): 235 | (tck,_) = si.splprep(traj[goodinds, istart:istart+colstep].T,k=deg,s = tol**2*len(traj),u=l[goodinds]) 236 | traj_rs[:,istart:istart+colstep] = np.array(si.splev(newl,tck)).T 237 | if wt is not None: traj_rs = traj_rs/wt 238 | 239 | newt = np.interp(newl, l, np.arange(len(traj))) 240 | 241 | return traj_rs, newt 242 | 243 | 244 | ################### 245 | 246 | 247 | class Globals: 248 | robot = None 249 | env = None 250 | 251 | pr2 = None 252 | 253 | def main(): 254 | 255 | demofile = h5py.File(args.h5file, 'r') 256 | 257 | trajoptpy.SetInteractive(args.interactive) 258 | 259 | 260 | if args.log: 261 | LOG_DIR = osp.join(osp.expanduser("~/Data/do_task_logs"), datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) 262 | os.mkdir(LOG_DIR) 263 | LOG_COUNT = 0 264 | 265 | 266 | if args.execution: 267 | rospy.init_node("exec_task",disable_signals=True) 268 | Globals.pr2 = PR2.PR2() 269 | Globals.env = Globals.pr2.env 270 | Globals.robot = Globals.pr2.robot 271 | 272 | else: 273 | Globals.env = openravepy.Environment() 274 | Globals.env.StopSimulation() 275 | Globals.env.Load("robots/pr2-beta-static.zae") 276 | Globals.robot = Globals.env.GetRobots()[0] 277 | 278 | if not args.fake_data_segment: 279 | grabber = cloudprocpy.CloudGrabber() 280 | grabber.startRGBD() 281 | 282 | Globals.viewer = trajoptpy.GetViewer(Globals.env) 283 | 284 | ##################### 285 | 286 | while True: 287 | 288 | 289 | redprint("Acquire point cloud") 290 | if args.fake_data_segment: 291 | fake_seg = demofile[args.fake_data_segment] 292 | new_xyz = np.squeeze(fake_seg["cloud_xyz"]) 293 | hmat = openravepy.matrixFromAxisAngle(args.fake_data_transform[3:6]) 294 | hmat[:3,3] = args.fake_data_transform[0:3] 295 | new_xyz = new_xyz.dot(hmat[:3,:3].T) + hmat[:3,3][None,:] 296 | r2r = ros2rave.RosToRave(Globals.robot, asarray(fake_seg["joint_states"]["name"])) 297 | r2r.set_values(Globals.robot, asarray(fake_seg["joint_states"]["position"][0])) 298 | else: 299 | Globals.pr2.head.set_pan_tilt(0,1.2) 300 | Globals.pr2.rarm.goto_posture('side') 301 | Globals.pr2.larm.goto_posture('side') 302 | Globals.pr2.join_all() 303 | time.sleep(.5) 304 | 305 | 306 | Globals.pr2.update_rave() 307 | 308 | rgb, depth = grabber.getRGBD() 309 | T_w_k = berkeley_pr2.get_kinect_transform(Globals.robot) 310 | new_xyz = cloud_proc_func(rgb, depth, T_w_k) 311 | 312 | #grab_end(new_xyz) 313 | 314 | 315 | if args.log: 316 | LOG_COUNT += 1 317 | import cv2 318 | cv2.imwrite(osp.join(LOG_DIR,"rgb%i.png"%LOG_COUNT), rgb) 319 | cv2.imwrite(osp.join(LOG_DIR,"depth%i.png"%LOG_COUNT), depth) 320 | np.save(osp.join(LOG_DIR,"xyz%i.npy"%LOG_COUNT), new_xyz) 321 | 322 | ################################ 323 | redprint("Finding closest demonstration") 324 | if args.select_manual: 325 | seg_name = find_closest_manual(demofile, new_xyz) 326 | else: 327 | seg_name = find_closest_auto(demofile, new_xyz) 328 | 329 | seg_info = demofile[seg_name] 330 | redprint("closest demo: %s"%(seg_name)) 331 | if "done" in seg_name: 332 | redprint("DONE!") 333 | break 334 | 335 | 336 | if args.log: 337 | with open(osp.join(LOG_DIR,"neighbor%i.txt"%LOG_COUNT),"w") as fh: fh.write(seg_name) 338 | ################################ 339 | 340 | redprint("Generating end-effector trajectory") 341 | 342 | handles = [] 343 | old_xyz = np.squeeze(seg_info["cloud_xyz"]) 344 | handles.append(Globals.env.plot3(old_xyz,5, (1,0,0))) 345 | handles.append(Globals.env.plot3(new_xyz,5, (0,0,1))) 346 | 347 | 348 | scaled_old_xyz, src_params = registration.unit_boxify(old_xyz) 349 | scaled_new_xyz, targ_params = registration.unit_boxify(new_xyz) 350 | f,_ = registration.tps_rpm_bij(scaled_old_xyz, scaled_new_xyz, plot_cb = tpsrpm_plot_cb, 351 | plotting=5 if args.animation else 0,rot_reg=np.r_[1e-4,1e-4,1e-1], n_iter=50, reg_init=10, reg_final=.1) 352 | f = registration.unscale_tps(f, src_params, targ_params) 353 | 354 | handles.extend(plotting_openrave.draw_grid(Globals.env, f.transform_points, old_xyz.min(axis=0)-np.r_[0,0,.1], old_xyz.max(axis=0)+np.r_[0,0,.1], xres = .1, yres = .1, zres = .04)) 355 | 356 | link2eetraj = {} 357 | for lr in 'lr': 358 | link_name = "%s_gripper_tool_frame"%lr 359 | old_ee_traj = asarray(seg_info[link_name]["hmat"]) 360 | new_ee_traj = f.transform_hmats(old_ee_traj) 361 | link2eetraj[link_name] = new_ee_traj 362 | 363 | handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1))) 364 | handles.append(Globals.env.drawlinestrip(new_ee_traj[:,:3,3], 2, (0,1,0,1))) 365 | 366 | miniseg_starts, miniseg_ends = split_trajectory_by_gripper(seg_info) 367 | success = True 368 | print colorize.colorize("mini segments:", "red"), miniseg_starts, miniseg_ends 369 | for (i_miniseg, (i_start, i_end)) in enumerate(zip(miniseg_starts, miniseg_ends)): 370 | 371 | if args.execution=="real": Globals.pr2.update_rave() 372 | 373 | 374 | ################################ 375 | redprint("Generating joint trajectory for segment %s, part %i"%(seg_name, i_miniseg)) 376 | 377 | 378 | 379 | # figure out how we're gonna resample stuff 380 | lr2oldtraj = {} 381 | for lr in 'lr': 382 | manip_name = {"l":"leftarm", "r":"rightarm"}[lr] 383 | old_joint_traj = asarray(seg_info[manip_name][i_start:i_end+1]) 384 | #print (old_joint_traj[1:] - old_joint_traj[:-1]).ptp(axis=0), i_start, i_end 385 | if arm_moved(old_joint_traj): 386 | lr2oldtraj[lr] = old_joint_traj 387 | if len(lr2oldtraj) > 0: 388 | old_total_traj = np.concatenate(lr2oldtraj.values(), 1) 389 | JOINT_LENGTH_PER_STEP = .1 390 | _, timesteps_rs = unif_resample(old_total_traj, JOINT_LENGTH_PER_STEP) 391 | #### 392 | 393 | 394 | ### Generate fullbody traj 395 | bodypart2traj = {} 396 | for (lr,old_joint_traj) in lr2oldtraj.items(): 397 | 398 | manip_name = {"l":"leftarm", "r":"rightarm"}[lr] 399 | 400 | old_joint_traj_rs = mu.interp2d(timesteps_rs, np.arange(len(old_joint_traj)), old_joint_traj) 401 | 402 | ee_link_name = "%s_gripper_tool_frame"%lr 403 | new_ee_traj = link2eetraj[ee_link_name][i_start:i_end+1] 404 | new_ee_traj_rs = resampling.interp_hmats(timesteps_rs, np.arange(len(new_ee_traj)), new_ee_traj) 405 | if args.execution: Globals.pr2.update_rave() 406 | new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name, 407 | Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs) 408 | part_name = {"l":"larm", "r":"rarm"}[lr] 409 | bodypart2traj[part_name] = new_joint_traj 410 | 411 | 412 | 413 | ################################ 414 | redprint("Executing joint trajectory for segment %s, part %i using arms '%s'"%(seg_name, i_miniseg, bodypart2traj.keys())) 415 | 416 | for lr in 'lr': 417 | success &= set_gripper_maybesim(lr, binarize_gripper(seg_info["%s_gripper_joint"%lr][i_start])) 418 | # Doesn't actually check if grab occurred, unfortunately 419 | 420 | 421 | if not success: break 422 | 423 | if len(bodypart2traj) > 0: 424 | success &= exec_traj_maybesim(bodypart2traj) 425 | 426 | if not success: break 427 | 428 | 429 | 430 | redprint("Segment %s result: %s"%(seg_name, success)) 431 | 432 | 433 | if args.fake_data_segment: break 434 | 435 | if __name__ == "__main__": 436 | main() 437 | -------------------------------------------------------------------------------- /scripts/download_sampledata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | parser = argparse.ArgumentParser() 4 | parser.add_argument("path", help="script will create a sampledata directory in this location") 5 | parser.add_argument("--use_rsync", action="store_true") 6 | args = parser.parse_args() 7 | 8 | import os, urllib2, shutil 9 | from rapprentice.yes_or_no import yes_or_no 10 | from rapprentice.call_and_print import call_and_print 11 | 12 | if args.use_rsync: 13 | call_and_print("rsync -azvu --delete pabbeel@rll.berkeley.edu:/var/www/rapprentice/sampledata %s"%args.path) 14 | 15 | else: 16 | os.chdir(args.path) 17 | if os.path.exists("sampledata"): 18 | yn = yes_or_no("delete old directory") 19 | if yn: shutil.rmtree("sampledata") 20 | else: raise IOError 21 | os.mkdir("sampledata") 22 | 23 | print "downloading archive file" 24 | urlinfo = urllib2.urlopen("http://rll.berkeley.edu/rapprentice/sampledata.tar") 25 | with open("sampledata.tar","w") as fh: 26 | fh.write(urlinfo.read()) 27 | print "unpacking file" 28 | call_and_print("tar xvf sampledata.tar") 29 | os.unlink("sampledata.tar") -------------------------------------------------------------------------------- /scripts/generate_annotations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | parser = argparse.ArgumentParser() 5 | parser.add_argument("bagfile") 6 | parser.add_argument("outfile") 7 | args = parser.parse_args() 8 | from rapprentice import bag_proc 9 | 10 | import yaml, rosbag 11 | 12 | bag = rosbag.Bag(args.bagfile) 13 | stamps, meanings = bag_proc.extract_joy(bag) 14 | print "joystick button presses:" 15 | for (stamp, meaning) in zip(stamps, meanings): 16 | print "\t%.4f: %s"%(stamp/1.e9, meaning) 17 | 18 | seg_infos = bag_proc.joy_to_annotations(stamps, meanings) 19 | for (i_seg, seg_info) in enumerate(seg_infos): 20 | if "done" in seg_info: 21 | seg_info["name"] = "done" 22 | else: 23 | seg_info["name"] = "seg%.2i"%i_seg 24 | seg_info["description"] = "(no description)" 25 | print "segment info: " 26 | print seg_infos 27 | 28 | print "writing to %s"%args.outfile 29 | with open(args.outfile,"w") as fh: 30 | yaml.dump(seg_infos,fh) 31 | print "done" 32 | 33 | -------------------------------------------------------------------------------- /scripts/generate_h5.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Generate hdf5 file based on a yaml task file that specifies bag files and annotation files 5 | """ 6 | 7 | import argparse 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("task_file") 10 | parser.add_argument("--no_prompt", action="store_true") 11 | parser.add_argument("--cloud_proc_func", default="extract_red") 12 | parser.add_argument("--cloud_proc_mod", default="rapprentice.cloud_proc_funcs") 13 | parser.add_argument("--no_clouds") 14 | parser.add_argument("--clouds_only", action="store_true") 15 | args = parser.parse_args() 16 | 17 | 18 | 19 | import os, os.path as osp 20 | import rosbag 21 | import h5py 22 | from rapprentice import bag_proc 23 | import yaml 24 | import importlib, inspect 25 | import numpy as np 26 | 27 | cloud_proc_mod = importlib.import_module(args.cloud_proc_mod) 28 | cloud_proc_func = getattr(cloud_proc_mod, args.cloud_proc_func) 29 | 30 | 31 | task_dir = osp.dirname(args.task_file) 32 | 33 | with open(args.task_file, "r") as fh: task_info = yaml.load(fh) 34 | h5path = osp.join(task_dir, task_info["h5path"].strip()) 35 | 36 | 37 | if args.clouds_only: 38 | hdf = h5py.File(h5path, "r+") 39 | else: 40 | if osp.exists(h5path): 41 | os.unlink(h5path) 42 | hdf = h5py.File(h5path) 43 | 44 | 45 | bag_infos = task_info["bags"] 46 | 47 | for (i_bag, bag_info) in enumerate(bag_infos): 48 | bag_file = osp.join(task_dir, bag_info["bag_file"]) 49 | ann_file = osp.join(task_dir, bag_info["annotation_file"]) 50 | video_dir = bag_info["video_dir"] 51 | 52 | demo_name = bag_info["demo_name"] if "demo_name" in bag_info else "demo%i"%i_bag 53 | 54 | bag = rosbag.Bag(bag_file) 55 | with open(ann_file, "r") as fh: annotations = yaml.load(fh) 56 | 57 | bag_proc.add_bag_to_hdf(bag, annotations, hdf, demo_name) 58 | bag_proc.add_rgbd_to_hdf(osp.join(task_dir, video_dir), annotations, hdf, demo_name) 59 | 60 | 61 | 62 | ### Now process all of the point clouds #### 63 | if not args.no_clouds: 64 | for (seg_name, seg_info) in hdf.items(): 65 | 66 | for field in ["cloud_xyz", "cloud_proc_func", "cloud_proc_mod", "cloud_proc_code"]: 67 | if field in seg_info: del seg_info[field] 68 | 69 | seg_info["cloud_xyz"] = cloud_proc_func(np.asarray(seg_info["rgb"]), np.asarray(seg_info["depth"]), np.asarray(seg_info["T_w_k"])) 70 | seg_info["cloud_proc_func"] = args.cloud_proc_func 71 | seg_info["cloud_proc_mod"] = args.cloud_proc_mod 72 | seg_info["cloud_proc_code"] = inspect.getsource(cloud_proc_func) 73 | 74 | 75 | -------------------------------------------------------------------------------- /scripts/record_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | parser = argparse.ArgumentParser() 5 | parser.add_argument("demo_prefix") 6 | parser.add_argument("master_file") 7 | parser.add_argument("--downsample", default=3, type=int) 8 | args = parser.parse_args() 9 | 10 | import subprocess, signal 11 | from rapprentice.colorize import colorize 12 | import time, os, shutil 13 | from rapprentice.call_and_print import call_and_print 14 | from rapprentice.yes_or_no import yes_or_no 15 | import os.path as osp 16 | import itertools 17 | import yaml 18 | 19 | 20 | if "localhost" in os.getenv("ROS_MASTER_URI"): 21 | raise Exception("on localhost!") 22 | 23 | 24 | started_bag = False 25 | started_video = False 26 | 27 | localtime = time.localtime() 28 | time_string = time.strftime("%Y-%m-%d-%H-%M-%S", localtime) 29 | 30 | os.chdir(osp.dirname(args.master_file)) 31 | 32 | 33 | if not osp.exists(args.master_file): 34 | yn = yes_or_no("master file does not exist. create?") 35 | basename = raw_input("what is the base name?\n").strip() 36 | if yn: 37 | with open(args.master_file,"w") as fh: fh.write(""" 38 | name: %s 39 | h5path: %s 40 | bags: 41 | """%(basename, basename+".h5")) 42 | else: 43 | print "exiting." 44 | exit(0) 45 | with open(args.master_file, "r") as fh: master_info = yaml.load(fh) 46 | if master_info["bags"] is None: master_info["bags"] = [] 47 | for suffix in itertools.chain("", (str(i) for i in itertools.count())): 48 | demo_name = args.demo_prefix + suffix 49 | if not any(bag["demo_name"] == demo_name for bag in master_info["bags"]): 50 | break 51 | print demo_name 52 | 53 | try: 54 | 55 | bag_cmd = "rosbag record /joint_states /joy -O %s"%demo_name 56 | print colorize(bag_cmd, "green") 57 | bag_handle = subprocess.Popen(bag_cmd, shell=True) 58 | time.sleep(1) 59 | poll_result = bag_handle.poll() 60 | print "poll result", poll_result 61 | if poll_result is not None: 62 | raise Exception("problem starting video recording") 63 | else: started_bag = True 64 | 65 | video_cmd = "record_rgbd_video --out=%s --downsample=%i"%(demo_name, args.downsample) 66 | print colorize(video_cmd, "green") 67 | video_handle = subprocess.Popen(video_cmd, shell=True) 68 | started_video = True 69 | 70 | 71 | time.sleep(9999) 72 | 73 | except KeyboardInterrupt: 74 | print colorize("got control-c", "green") 75 | 76 | finally: 77 | 78 | if started_bag: 79 | print "stopping bag" 80 | bag_handle.send_signal(signal.SIGINT) 81 | bag_handle.wait() 82 | if started_video: 83 | print "stopping video" 84 | video_handle.send_signal(signal.SIGINT) 85 | video_handle.wait() 86 | 87 | 88 | bagfilename = demo_name+".bag" 89 | if yes_or_no("save demo?"): 90 | annfilename = demo_name+".ann.yaml" 91 | call_and_print("generate_annotations.py %s %s"%(bagfilename, annfilename),check=False) 92 | with open(args.master_file,"a") as fh: 93 | fh.write("\n" 94 | "- bag_file: %(bagfilename)s\n" 95 | " annotation_file: %(annfilename)s\n" 96 | " video_dir: %(videodir)s\n" 97 | " demo_name: %(demoname)s"%dict(bagfilename=bagfilename, annfilename=annfilename, videodir=demo_name, demoname=demo_name)) 98 | else: 99 | if osp.exists(demo_name): shutil.rmtree(demo_name) #video dir 100 | if osp.exists(bagfilename): os.unlink(bagfilename) 101 | -------------------------------------------------------------------------------- /scripts/upload_sampledata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--delete", action="store_true") 7 | parser.add_argument("sampledata_dir") 8 | args = parser.parse_args() 9 | 10 | import subprocess,os 11 | import os.path as osp 12 | print "cding to %s"%osp.dirname(args.sampledata_dir) 13 | os.chdir(osp.dirname(args.sampledata_dir)) 14 | 15 | 16 | print "creating tar file" 17 | subprocess.check_call('tar cvf sampledata.tar sampledata',shell=True) 18 | print "uploading" 19 | subprocess.check_call("rsync -azvu --progress %s sampledata pabbeel@rll.berkeley.edu:/var/www/rapprentice"%("--delete" if args.delete else ""), shell=True) 20 | subprocess.check_call("rsync -azvu --progress %s sampledata.tar pabbeel@rll.berkeley.edu:/var/www/rapprentice"%("--delete" if args.delete else ""), shell=True) 21 | -------------------------------------------------------------------------------- /scripts/view_demos.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | parser = argparse.ArgumentParser() 5 | parser.add_argument("h5file") 6 | parser.add_argument("--pattern") 7 | args = parser.parse_args() 8 | 9 | import h5py 10 | import rapprentice.cv_plot_utils as cpu 11 | import numpy as np 12 | import cv2 13 | import fnmatch 14 | 15 | hdf = h5py.File(args.h5file,"r") 16 | all_imgnames = [(np.asarray(seg["rgb"]),name) for (name,seg) in hdf.items() if (args.pattern is None) or fnmatch.fnmatch(name, args.pattern)] 17 | 18 | nrows = 7 19 | chunksize = nrows**2 20 | 21 | 22 | 23 | for i in xrange(0,len(all_imgnames),chunksize): 24 | imgnames = all_imgnames[i:i+chunksize] 25 | imgs = [] 26 | for (img, name) in imgnames: 27 | cv2.putText(img, name,(30,30), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), thickness = 3) 28 | 29 | imgs.append(img) 30 | bigimg = cpu.tile_images(imgs, nrows, nrows) 31 | cv2.imshow("bigimg", bigimg) 32 | cv2.waitKey() -------------------------------------------------------------------------------- /scripts/view_kinect.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import cloudprocpy, cv2 4 | import numpy as np 5 | import subprocess 6 | 7 | subprocess.call("killall XnSensorServer", shell=True) 8 | 9 | cmap = np.zeros((256, 3),dtype='uint8') 10 | cmap[:,0] = range(256) 11 | cmap[:,2] = range(256)[::-1] 12 | cmap[0] = [0,0,0] 13 | 14 | grabber = cloudprocpy.CloudGrabber() 15 | grabber.startRGBD() 16 | 17 | 18 | try: 19 | while True: 20 | rgb, depth = grabber.getRGBD() 21 | cv2.imshow("rgb", rgb) 22 | cv2.imshow("depth", cmap[np.fmin((depth*.064).astype('int'), 255)]) 23 | cv2.waitKey(30) 24 | except KeyboardInterrupt: 25 | print "got Control-C" 26 | -------------------------------------------------------------------------------- /test/register_rope_pairs.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import rapprentice.registration as reg, rapprentice.cloud_proc_funcs as cpf 3 | from joblib import Memory, Parallel, delayed 4 | import cPickle, h5py, os.path as osp, numpy as np, scipy.spatial.distance as ssd 5 | from time import time 6 | 7 | aa = np.asarray 8 | mem = Memory(cachedir='/tmp/joblib') 9 | 10 | cached_extract_red = mem.cache(cpf.extract_red) 11 | 12 | @mem.cache(ignore=["hdf"]) 13 | def extract_clouds(hdf): 14 | return [cached_extract_red(aa(seg["rgb"]), aa(seg["depth"]), aa(seg["T_w_k"])) for (key,seg) in hdf.items()] 15 | 16 | if __name__ == "__main__": 17 | hdf = h5py.File("/Users/joschu/Data/knots/master.h5", "r") 18 | clouds = extract_clouds(hdf) 19 | 20 | 21 | dim = 2 22 | 23 | 24 | for cloud0 in clouds: 25 | for cloud1 in clouds: 26 | if dim ==2 : 27 | cloud0 = cloud0[:,:2] 28 | cloud1 = cloud1[:,:2] 29 | scaled_cloud0, src_params = reg.unit_boxify(cloud0) 30 | scaled_cloud1, targ_params = reg.unit_boxify(cloud1) 31 | 32 | print "downsampled to %i and %i pts"%(len(scaled_cloud0),len(scaled_cloud1)) 33 | 34 | tstart = time() 35 | fest_scaled,gest_scaled = reg.tps_rpm_bij(scaled_cloud0, scaled_cloud1, n_iter=10, reg_init = 10, reg_final=.01) 36 | print "%.3f elapsed"%(time() - tstart) 37 | 38 | cost = reg.tps_reg_cost(fest_scaled) + reg.tps_reg_cost(gest_scaled) 39 | print "cost: %.3f"%cost 40 | 41 | import matplotlib.pyplot as plt 42 | plt.clf() 43 | # plt.plot(xys[:,1], xys[:,0],'r.') 44 | # plt.plot(fxys[:,1], fxys[:,0],'b.') 45 | # plt.plot(fxys_est[:,1], fxys_est[:,0],'g.') 46 | 47 | scaled_fcloud = fest_scaled.transform_points(scaled_cloud0) 48 | plt.plot(scaled_cloud0[:,1], scaled_cloud0[:,0],'r.') 49 | plt.plot(scaled_cloud1[:,1], scaled_cloud1[:,0],'b.') 50 | plt.plot(scaled_fcloud[:,1], scaled_fcloud[:,0],'g.') 51 | 52 | 53 | if dim == 2: 54 | from rapprentice.plotting_plt import plot_warped_grid_2d 55 | plot_warped_grid_2d(fest_scaled.transform_points, [-.5,-.5], [.5,.5]) 56 | 57 | 58 | elif dim == 3: 59 | env= openravepy.RaveGetEnvironment(1) 60 | if env is None: env = openravepy.Environment() 61 | from rapprentice.plotting_openrave import draw_grid 62 | draw_grid(fest_scaled, [-.5,-.5,-.5], [.5,.5,.5]) 63 | 64 | plt.draw() 65 | raw_input() 66 | -------------------------------------------------------------------------------- /test/run_all_tests.py: -------------------------------------------------------------------------------- 1 | import rapprentice, os, os.path as osp 2 | from rapprentice.call_and_print import call_and_print 3 | assert osp.basename(os.getcwd()) == "test" 4 | call_and_print("python tps_unit_tests.py") 5 | call_and_print("python ../scripts/download_sampledata.py ~/Data --use_rsync") 6 | call_and_print("python ../scripts/generate_h5.py ~/Data/sampledata/overhand/overhand.yaml") 7 | call_and_print("python test_registration_synthetic.py --plotting=0") 8 | -------------------------------------------------------------------------------- /test/run_registration.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | parser = argparse.ArgumentParser() 3 | parser.add_argument("--group") 4 | args = parser.parse_args() 5 | 6 | import yaml 7 | import numpy as np 8 | from lfd import registration, tps 9 | import os.path as osp 10 | 11 | tps.VERBOSE=False 12 | np.seterr(all='raise') 13 | np.set_printoptions(precision=3) 14 | 15 | def axisanglepart(m): 16 | if np.linalg.det(m) < 0 or np.isnan(m).any(): raise Exception 17 | u,s,vh = np.linalg.svd(m) 18 | rotpart = u.dot(vh) 19 | theta = np.arccos((np.trace(rotpart) - 1)/2) 20 | print "theta", theta 21 | return (1/(2*np.sin(theta))) * np.array([[rotpart[2,1] - rotpart[1,2], rotpart[0,2]-rotpart[2,0], rotpart[1,0]-rotpart[0,1]]]) 22 | 23 | 24 | with open("ground_truth.yaml","r") as fh: 25 | gtdata = yaml.load(fh) 26 | 27 | grouplist = gtdata if args.group is None else [group for group in gtdata if group["name"] == args.group] 28 | 29 | for group in grouplist: 30 | print "group: ", group["name"] 31 | xyz_base = np.loadtxt(osp.join("../test/test_pcs",group["base"])) 32 | 33 | def plot_cb(x_nd, y_md, targ_nd, corr_nm, wt_n, f): 34 | return 35 | #return 36 | from mayavi import mlab 37 | fignum=0 38 | fignum = mlab.figure(0) 39 | mlab.clf(figure=fignum) 40 | x,y,z = x_nd.T 41 | mlab.points3d(x,y,z, color=(1,0,0),scale_factor=.01,figure=fignum) 42 | x,y,z, = f.transform_points(x_nd).T 43 | mlab.points3d(x,y,z, color=(0,1,0),scale_factor=.01,figure=fignum) 44 | x,y,z = y_md.T 45 | mlab.points3d(x,y,z, color=(0,0,1),scale_factor=.01,figure=fignum) 46 | #raw_input() 47 | 48 | for other in group["others"]: 49 | xyz_other = np.loadtxt(osp.join("../test/test_pcs", other["file"])) 50 | f = registration.tps_rpm_zrot(xyz_base, xyz_other,reg_init=2,reg_final=.5,n_iter=9, 51 | verbose=True, rot_param = (0.01,0.01,0.0025),scale_param=.01, plot_cb = plot_cb, plotting=1) 52 | T_calc = np.c_[ f.lin_ag.T, f.trans_g.reshape(3,1) ] # (.01,.01,.005) 53 | print "result" 54 | print T_calc 55 | print "axis-angle:", 56 | print axisanglepart(T_calc[:3,:3]) 57 | if "transform" in other: 58 | T_other_base = np.array(other["transform"]).reshape(4,4) 59 | print "actual" 60 | print T_other_base 61 | 62 | import transform_gui 63 | transformer = transform_gui.CloudAffineTransformer(xyz_base, xyz_other, T_calc) 64 | transformer.configure_traits() 65 | -------------------------------------------------------------------------------- /test/test_registration_synthetic.py: -------------------------------------------------------------------------------- 1 | import argparse, os.path as osp 2 | parser = argparse.ArgumentParser() 3 | parser.add_argument("--mode",choices=["self","pairwise"],default="self") 4 | parser.add_argument("--sampledata_dir",default=osp.expanduser("~/Data/sampledata")) 5 | parser.add_argument("--plotting", type=int,default=1) 6 | parser.add_argument("--to3d", action="store_true") 7 | args = parser.parse_args() 8 | 9 | from collections import defaultdict 10 | import cv2, numpy as np 11 | from rapprentice.plotting_plt import plot_warped_grid_2d 12 | import rapprentice.registration as reg 13 | from time import time 14 | import os, os.path as osp 15 | from glob import glob 16 | 17 | def pixel_downsample(xy, s): 18 | xy = xy[np.isfinite(xy[:,0])] 19 | d = defaultdict(list) 20 | for (i,pt) in enumerate(xy): 21 | x,y = pt 22 | d[(int(x/s), int(y/s))].append(i) 23 | return np.array([xy[inds].mean(axis=0) for inds in d.values()]) 24 | def voxel_downsample(xyz, s): 25 | xyz = xyz[np.isfinite(xyz[:,0])] 26 | d = defaultdict(list) 27 | for (i,pt) in enumerate(xyz): 28 | x,y,z = pt 29 | d[(int(x/s), int(y/s), int(z/s))].append(i) 30 | return np.array([xyz[inds].mean(axis=0) for inds in d.values()]) 31 | 32 | 33 | def registration(xys, fxys): 34 | if args.to3d: 35 | xys = np.c_[xys, np.zeros((len(xys)))] 36 | fxys = np.c_[fxys, np.zeros((len(fxys)))] 37 | 38 | 39 | scaled_xys, src_params = reg.unit_boxify(xys) 40 | scaled_fxys, targ_params = reg.unit_boxify(fxys) 41 | 42 | downsample = voxel_downsample if args.to3d else pixel_downsample 43 | scaled_ds_xys = downsample(scaled_xys, .03) 44 | scaled_ds_fxys = downsample(scaled_fxys, .03) 45 | 46 | print "downsampled to %i and %i pts"%(len(scaled_ds_xys),len(scaled_ds_fxys)) 47 | tstart = time() 48 | 49 | # fest_scaled = reg.tps_rpm(scaled_ds_xys, scaled_ds_fxys, n_iter=10, reg_init = 10, reg_final=.01) 50 | fest_scaled,_ = reg.tps_rpm_bij(scaled_ds_xys, scaled_ds_fxys, n_iter=10, reg_init = 10, reg_final=.01) 51 | 52 | 53 | 54 | print "time: %.4f"%(time()-tstart) 55 | fest = reg.unscale_tps(fest_scaled, src_params, targ_params) 56 | fxys_est = fest.transform_points(xys) 57 | if len(fxys_est) == len(fxys): print "error:", np.abs(fxys_est - fxys).mean() 58 | 59 | if args.plotting: 60 | import matplotlib.pyplot as plt 61 | plt.clf() 62 | # plt.plot(xys[:,1], xys[:,0],'r.') 63 | # plt.plot(fxys[:,1], fxys[:,0],'b.') 64 | # plt.plot(fxys_est[:,1], fxys_est[:,0],'g.') 65 | 66 | scaled_ds_fxys_est = fest_scaled.transform_points(scaled_ds_xys) 67 | plt.plot(scaled_ds_xys[:,1], scaled_ds_xys[:,0],'r.') 68 | plt.plot(scaled_ds_fxys[:,1], scaled_ds_fxys[:,0],'b.') 69 | plt.plot(scaled_ds_fxys_est[:,1], scaled_ds_fxys_est[:,0],'g.') 70 | 71 | 72 | def to2d(f): 73 | def f2d(x): 74 | return f(np.c_[x, np.zeros((len(x),1))])[:,:2] 75 | return f2d 76 | transform_func = to2d(fest_scaled.transform_points) if args.to3d else fest_scaled.transform_points 77 | plot_warped_grid_2d(transform_func, [-.5,-.5], [.5,.5]) 78 | plt.draw() 79 | plt.ginput() 80 | 81 | def main(): 82 | 83 | assert osp.exists(osp.join(args.sampledata_dir, "letter_images")) 84 | 85 | fnames = glob(osp.join(args.sampledata_dir, "letter_images", "*.png")) 86 | 87 | if args.mode == "self": 88 | for fname in fnames: 89 | im = cv2.imread(fname, 0) 90 | assert im is not None 91 | xs, ys = np.nonzero(im==0) 92 | 93 | 94 | xys = np.c_[xs, ys] 95 | R = np.array([[.7, .4], [.3,.9]]) 96 | T = np.array([[.3,.4]]) 97 | fxys = (xys.dot(R) + T)**2 98 | 99 | xys[:,0] *= -1 100 | fxys[:,0] *= -1 101 | 102 | registration(xys, fxys) 103 | 104 | else: 105 | import itertools 106 | for (fname0, fname1) in itertools.combinations(fnames, 2): 107 | if osp.basename(fname0)[0] == osp.basename(fname1)[0]: 108 | print "registering %s to %s"%(fname0, fname1) 109 | 110 | im0 = cv2.imread(fname0, 0) 111 | im1 = cv2.imread(fname1, 0) 112 | assert im0 is not None and im1 is not None 113 | xs, ys = np.nonzero(im0==0) 114 | fxs, fys = np.nonzero(im1==0) 115 | xs = xs * -1 116 | fxs = fxs * -1 117 | 118 | 119 | xys = np.c_[xs, ys] 120 | fxys = np.c_[fxs, fys] 121 | 122 | registration(xys, fxys) 123 | if __name__ == "__main__": 124 | main() 125 | -------------------------------------------------------------------------------- /test/tps_unit_tests.py: -------------------------------------------------------------------------------- 1 | 2 | from rapprentice import testing, tps 3 | import numpy as np 4 | 5 | @testing.testme 6 | def nonrigidity_gradient(): 7 | import numdifftools as ndt 8 | D = 3 9 | pts0 = np.random.randn(10,D) 10 | pts_eval = np.random.randn(3,D) 11 | def err_partial(params): 12 | lin_ag = params[0:9].reshape(3,3) 13 | trans_g = params[9:12] 14 | w_ng = params[12:].reshape(-1,3) 15 | return tps.tps_nr_err(pts_eval, lin_ag, trans_g, w_ng, pts0) 16 | lin_ag, trans_g, w_ng = np.random.randn(D,D), np.random.randn(D), np.random.randn(len(pts0), D) 17 | J1 = tps.tps_nr_grad(pts_eval, lin_ag, trans_g, w_ng, pts0) 18 | J = ndt.Jacobian(err_partial)(np.r_[lin_ag.flatten(), trans_g.flatten(), w_ng.flatten()]) 19 | assert np.allclose(J1, J) 20 | 21 | @testing.testme 22 | def jacobian_of_tps(): 23 | import numdifftools as ndt 24 | D = 3 25 | pts0 = np.random.randn(100,3) 26 | pts_eval = np.random.randn(20,D) 27 | lin_ag, trans_g, w_ng, x_na = np.random.randn(D,D), np.random.randn(D), np.random.randn(len(pts0), D), pts0 28 | def eval_partial(x_ma_flat): 29 | x_ma = x_ma_flat.reshape(-1,3) 30 | return tps.tps_eval(x_ma, lin_ag, trans_g, w_ng, pts0) 31 | for i in xrange(len(pts_eval)): 32 | rots = ndt.Jacobian(eval_partial)(pts_eval[i]) 33 | rots1 = tps.tps_grad(pts_eval[i:i+1], lin_ag, trans_g, w_ng, pts0) 34 | assert np.allclose(rots1, rots) 35 | 36 | @testing.testme 37 | def fitting_methods_equivalent(): 38 | pts0 = np.random.randn(200,3) 39 | pts1 = np.random.randn(200,3) 40 | bend_coef = 13 41 | lin_ag, trans_g, w_ng = tps.tps_fit(pts0, pts1, bend_coef, 0) 42 | lin2_ag, trans2_g, w2_ng = tps.tps_fit2(pts0, pts1, bend_coef, 0) 43 | lin3_ag, trans3_g, w3_ng = tps.tps_fit3(pts0, pts1, bend_coef, 0, np.ones(len(pts0))) 44 | 45 | assert np.allclose(lin_ag, lin2_ag) 46 | assert np.allclose(trans_g, trans2_g) 47 | assert np.allclose(w_ng, w2_ng) 48 | 49 | assert np.allclose(lin_ag, lin3_ag) 50 | assert np.allclose(trans_g, trans3_g) 51 | assert np.allclose(w_ng, w3_ng) 52 | 53 | 54 | lin2_ag, trans2_g, w2_ng = tps.tps_fit2(pts0, pts1, bend_coef, .01) 55 | lin3_ag, trans3_g, w3_ng = tps.tps_fit3(pts0, pts1, bend_coef, .01, np.ones(len(pts0))) 56 | 57 | assert np.allclose(lin2_ag, lin3_ag) 58 | assert np.allclose(trans2_g, trans3_g) 59 | assert np.allclose(w2_ng, w3_ng) 60 | 61 | 62 | # @testing.testme 63 | def check_that_nr_fit_runs(): 64 | from jds_image_proc.clouds import voxel_downsample 65 | #from brett2.ros_utils import RvizWrapper 66 | #import lfd.registration as lr 67 | ##import lfd.warping as lw 68 | #if rospy.get_name() == "/unnamed": 69 | #rospy.init_node("test_rigidity", disable_signals=True) 70 | #from time import sleep 71 | #sleep(1) 72 | #rviz = RvizWrapper.create() 73 | 74 | pts0 = np.loadtxt("../test/rope_control_points.txt") 75 | pts1 = np.loadtxt("../test/bad_rope.txt") 76 | pts_rigid = voxel_downsample(pts0[:10], .02) 77 | #lr.Globals.setup() 78 | np.seterr(all='ignore') 79 | np.set_printoptions(suppress=True) 80 | 81 | lin_ag, trans_g, w_eg, x_ea = tps.tps_nr_fit_enhanced(pts0, pts1, 0.01, pts_rigid, 0.001, method="newton",plotting=0) 82 | #lin_ag2, trans_g2, w_ng2 = tps_fit(pts0, pts1, .01, .01) 83 | #assert np.allclose(w_ng, w_ng2) 84 | def eval_partial(x_ma): 85 | return tps_eval(x_ma, lin_ag, trans_g, w_eg, x_ea) 86 | #lr.plot_orig_and_warped_clouds(eval_partial, pts0, pts1, res=.008) 87 | #handles = lw.draw_grid(rviz, eval_partial, pts0.min(axis=0), pts0.max(axis=0), 'base_footprint') 88 | 89 | grads = tps.tps_grad(pts_rigid, lin_ag, trans_g, w_eg, x_ea) 90 | #print "worst violation:",np.max(np.abs([grad.T.dot(grad)-np.eye(3) for grad in grads])) 91 | 92 | @testing.testme 93 | def tps_fit_fixedrot_is_minimizer(): 94 | """ 95 | check that tps_fit_fixedrot minimizes tps_cost subject to constraint 96 | """ 97 | x_na = np.random.randn(100,3) 98 | y_ng = np.random.randn(100,3) 99 | bend_coef = .1 100 | lin_ag = np.random.randn(3,3) 101 | trans_g, w_ng = tps.tps_fit_fixedrot(x_na, y_ng, bend_coef, lin_ag) 102 | hopefully_min_cost = tps.tps_cost(lin_ag, trans_g, w_ng, x_na, y_ng, bend_coef) 103 | 104 | n_tries = 50 105 | other_costs = np.empty(n_tries) 106 | for i in xrange(n_tries): 107 | N = len(x_na) 108 | _u,_s,_vh = np.linalg.svd(np.c_[x_na, np.ones((N,1))], full_matrices=True) 109 | pert = .01*_u[:,4:].dot(np.random.randn(N-4,3)) 110 | assert np.allclose(x_na.T.dot(pert),np.zeros((3,3))) 111 | other_costs[i] = tps.tps_cost(lin_ag, trans_g, w_ng+pert, x_na, y_ng, bend_coef) 112 | assert (other_costs > hopefully_min_cost).all() 113 | 114 | 115 | @testing.testme 116 | def tps_fit_is_minimizer(): 117 | """ 118 | check that tps_fit_fixedrot minimizes tps_cost subject to constraint 119 | """ 120 | x_na = np.random.randn(100,3) 121 | y_ng = np.random.randn(100,3) 122 | bend_coef = 10 123 | lin_ag = np.random.randn(3,3) 124 | trans_g, w_ng = tps.tps_fit_fixedrot(x_na, y_ng, bend_coef, lin_ag) 125 | hopefully_min_cost = tps.tps_cost(lin_ag, trans_g, w_ng, x_na, y_ng, bend_coef) 126 | 127 | n_tries = 50 128 | other_costs = np.empty(n_tries) 129 | N = len(x_na) 130 | _u,_s,_vh = np.linalg.svd(np.c_[x_na, np.ones((N,1))], full_matrices=True) 131 | for i in xrange(n_tries): 132 | pert = .01*_u[:,4:].dot(np.random.randn(N-4,3)) 133 | assert np.allclose(x_na.T.dot(pert),np.zeros((3,3))) 134 | assert np.allclose(pert.sum(axis=0),np.zeros(3)) 135 | other_costs[i] = tps.tps_cost(lin_ag, trans_g, w_ng+pert, x_na, y_ng, bend_coef) 136 | assert (other_costs > hopefully_min_cost).all() 137 | 138 | 139 | @testing.testme 140 | def tps_regrot_with_quad_cost(): 141 | x_na = np.random.randn(100,3) 142 | y_ng = np.random.randn(100,3) 143 | bend_coef = 10 144 | rot_coef = 19 145 | # wt_n = np.random.rand(len(x_na)) 146 | def rfunc(b): 147 | return rot_coef*((b - np.eye(3))**2).sum() 148 | correct_lin_ag, correct_trans_g, correct_w_ng = tps.tps_fit2(x_na, y_ng, bend_coef, rot_coef) 149 | lin_ag, trans_g, w_ng = tps.tps_fit_regrot(x_na, y_ng, bend_coef, rfunc, max_iter=30) 150 | assert np.allclose(correct_trans_g, trans_g, atol=1e-2) 151 | assert np.allclose(correct_lin_ag, lin_ag, atol=1e-2) 152 | assert np.allclose(correct_w_ng, w_ng,atol=1e-2) 153 | 154 | @testing.testme 155 | def rot_reg_works(): 156 | from numpy import sin,cos 157 | from rapprentice import registration 158 | import fastrapp 159 | x = np.random.randn(100,3) 160 | a = .1 161 | R = np.array([[cos(a), sin(a),0],[-sin(a), cos(a), 0],[0,0,1]]) 162 | y = x.dot(R.T) 163 | f = registration.fit_ThinPlateSpline_RotReg(x, y, bend_coef = .1, rot_coefs = [.1,.1,0], scale_coef = 1) 164 | assert np.allclose(R.T, f.lin_ag, atol = 1e-4) 165 | 166 | 167 | if __name__ == "__main__": 168 | # tps.VERBOSE = True 169 | tps.ENABLE_SLOW_TESTS=True 170 | np.seterr(all='ignore') 171 | testing.test_all() 172 | # tps_regrot_with_quad_cost() 173 | # fitting_methods_equivalent() 174 | -------------------------------------------------------------------------------- /test/verify_h5.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | 3 | parser = argparse.ArgumentParser() 4 | parser.add_argument("h5file") 5 | args = parser.parse_args() 6 | 7 | 8 | import argparse 9 | 10 | hdf = h5py.File(args.h5file) 11 | 12 | env = openravepy.Environment() 13 | env.Load("robots/pr2-beta-static.zae") 14 | robot = env.GetRobots()[0] 15 | 16 | for (seg_name, seg_info) in hdf.items(): 17 | pass --------------------------------------------------------------------------------