├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.rst ├── bigdata ├── download.py └── upload.py ├── cmake ├── boost-python.cmake └── modules │ └── FindNumpy.cmake ├── data └── bookshelve.env.xml ├── doc ├── Makefile ├── apidoc.py ├── conf.py ├── documenting.rst ├── generate_module_rst.sh ├── index.rst ├── install.rst ├── install_gpu.rst ├── lfd.demonstration.rst ├── lfd.environment.rst ├── lfd.registration.rst ├── lfd.rst ├── lfd.tpsopt.rst ├── lfd.transfer.rst ├── lfd.util.rst ├── misc.rst ├── modules.rst └── open_index.sh ├── examples ├── drop_objs.py ├── move_box.py ├── move_rope.py ├── register.py └── rope_register_transfer.py ├── lfd ├── __init__.py ├── action_selection.py ├── demonstration │ ├── __init__.py │ └── demonstration.py ├── environment │ ├── __init__.py │ ├── environment.py │ ├── robot_world.py │ ├── settings.py │ ├── sim_util.py │ ├── simulation.py │ └── simulation_object.py ├── lfmd │ ├── __init__.py │ ├── analyze_data.py │ └── combine_force_file.py ├── mmqe │ ├── __init__.py │ ├── auto_performance.py │ ├── build.py │ ├── colorize.py │ ├── constraints.py │ ├── features.py │ ├── max_margin.py │ ├── search.py │ └── util.py ├── rapprentice │ ├── #clouds.py# │ ├── #ropesim.py# │ ├── PR2.py │ ├── __init__.py │ ├── animate_traj.py │ ├── bag_proc.py │ ├── berkeley_pr2.py │ ├── cloud_proc_funcs.py │ ├── clouds.py │ ├── compute_decomps.py │ ├── conversions.py │ ├── cv_plot_utils.py │ ├── eq_solve_exps.py │ ├── eq_solve_exps.py.lprof │ ├── eval_util.py │ ├── func_utils.py │ ├── interactive_roi.py │ ├── kernprof.py │ ├── kinematics_utils.py │ ├── knot_classifier.py │ ├── knot_identification.py │ ├── math_utils.py │ ├── plotting_openrave.py │ ├── plotting_plt.py │ ├── pr2_trajectories.py │ ├── registration.py │ ├── resampling.py │ ├── retiming.py │ ├── rope_initialization.py │ ├── ropesim.py │ ├── ros2rave.py │ ├── svds.py │ ├── task_execution.py │ ├── tps.py │ ├── tps_registration.py │ ├── tps_registration_parallel.py │ ├── transformations.py │ ├── util.py │ └── yes_or_no.py ├── registration │ ├── __init__.py │ ├── plotting_openrave.py │ ├── registration.py │ ├── settings.py │ ├── solver.py │ ├── tps.py │ └── transformation.py ├── settings.py ├── tpsopt │ ├── README.md │ ├── __init__.py │ ├── batchtps.py │ ├── clouds.py │ ├── culinalg_exts.py │ ├── optimization_notes.txt │ ├── precompute.py │ ├── registration.py │ ├── settings.py │ ├── tps.py │ └── transformations.py ├── transfer │ ├── __init__.py │ ├── planning.py │ ├── registration_transfer.py │ ├── settings.py │ └── transfer.py └── util │ ├── __init__.py │ ├── colorize.py │ └── util.py ├── requirements_doc.txt ├── scripts ├── auto_performance.py ├── build_all.sh ├── constants.py ├── cross_val.sh ├── eval.py ├── fig8_cross_val.sh ├── filter_labeled_examples.py ├── gen_tasks.py ├── label.py ├── make_lfd_settings_package.py ├── print_results.py ├── sample_landmarks.py └── test_baseline.sh ├── src ├── CMakeLists.txt └── tpsopt │ ├── CMakeLists.txt │ ├── cuda_funcs.cpp │ ├── numpy_utils.hpp │ ├── tps.cu │ └── tps.cuh └── test ├── test_registration.py └── test_simulation.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | _build 12 | eggs 13 | parts 14 | bin 15 | var 16 | sdist 17 | develop-eggs 18 | .installed.cfg 19 | lib 20 | lib64 21 | __pycache__ 22 | 23 | # Installer logs 24 | pip-log.txt 25 | 26 | # Unit test / coverage reports 27 | .coverage 28 | .tox 29 | nosetests.xml 30 | 31 | # Translations 32 | *.mo 33 | 34 | # Mr Developer 35 | .mr.developer.cfg 36 | .project 37 | .pydevproject 38 | 39 | # Gurobi logs 40 | gurobi.log 41 | 42 | # Data files 43 | data 44 | *.pkl 45 | # ignore all files in bigdata except for .py files 46 | bigdata/* 47 | !bigdata/*.py 48 | *.h5 49 | 50 | # Temporary files 51 | *~ 52 | 53 | .idea 54 | tpsopt/CMakeCache.txt 55 | tpsopt/CMakeFiles 56 | tpsopt/Makefile 57 | tpsopt/cmake_install.cmake 58 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | project(lfd) 3 | 4 | list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules") 5 | 6 | # http://cmake.3232098.n2.nabble.com/Default-value-for-CMAKE-BUILD-TYPE-td7550756.html 7 | if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) 8 | message(STATUS "Setting build type to 'Release' as none was specified.") 9 | set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) 10 | # Set the possible values of build type for cmake-gui 11 | set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" 12 | "MinSizeRel" "RelWithDebInfo") 13 | endif() 14 | 15 | set(CMAKE_CXX_FLAGS "-fPIC") 16 | set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 17 | set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 18 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) 19 | set(BUILD_SHARED_LIBS true) 20 | 21 | # external libraries 22 | find_package(CUDA REQUIRED) 23 | find_package(Boost COMPONENTS python REQUIRED) 24 | 25 | include("${CMAKE_SOURCE_DIR}/cmake/boost-python.cmake") 26 | 27 | add_subdirectory(src) 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | License for LfD 2 | ================== 3 | 4 | TODO 5 | 6 | 7 | Licenses for incorporated software 8 | ================================== 9 | 10 | The included script apidoc.py was derived from code under the following 11 | license: 12 | 13 | ---------------------------------------------------------------------- 14 | Copyright (c) 2007-2014 by the Sphinx team (see AUTHORS file). 15 | All rights reserved. 16 | 17 | Redistribution and use in source and binary forms, with or without 18 | modification, are permitted provided that the following conditions are 19 | met: 20 | 21 | * Redistributions of source code must retain the above copyright 22 | notice, this list of conditions and the following disclaimer. 23 | 24 | * Redistributions in binary form must reproduce the above copyright 25 | notice, this list of conditions and the following disclaimer in the 26 | documentation and/or other materials provided with the distribution. 27 | 28 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 | 40 | ------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | LfD: Learning from Demonstrations for Robotic Manipulation 2 | ========================================================== 3 | 4 | Package Description 5 | ------------------- 6 | The *LfD* package generalizes robot trajectories from demonstrations into new situations, thus enabling robots to learn to perform tasks from demonstrations. 7 | 8 | .. image:: https://readthedocs.org/projects/lfd/badge/?version=latest 9 | :target: https://readthedocs.org/projects/lfd/?badge=latest 10 | :alt: Documentation Status 11 | 12 | 13 | Documentation 14 | ------------- 15 | Package documentation is available at ``_. 16 | -------------------------------------------------------------------------------- /bigdata/download.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | import os, subprocess, urllib2, tarfile 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--rsync",action="store_true") 7 | args = parser.parse_args() 8 | 9 | assert os.getcwd().endswith("bigdata") 10 | 11 | if args.rsync: 12 | subprocess.check_call("rsync -azvu pabbeel@rll.berkeley.edu:/var/www/lfd/bigdata/ ./ --exclude '*.py'", shell=True) 13 | else: 14 | print "downloading tar file (this might take a while)" 15 | urlinfo = urllib2.urlopen("http://rll.berkeley.edu/lfd/bigdata/all.tar.gz") 16 | with open("all.tar.gz","w") as fh: 17 | fh.write(urlinfo.read()) 18 | 19 | print "unpacking file" 20 | with tarfile.open("all.tar.gz") as tar: 21 | tar.extractall(".") 22 | -------------------------------------------------------------------------------- /bigdata/upload.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import os, subprocess, tarfile 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--delete", action="store_true") 8 | args = parser.parse_args() 9 | 10 | assert os.getcwd().endswith("bigdata") 11 | 12 | print "creating tar file" 13 | with tarfile.open("all.tar.gz", "w") as tar: 14 | for dirname, dirnames, fnames in os.walk('.'): 15 | for fname in fnames: 16 | if not (fname.endswith("py") or fname.endswith("tar.gz") or fname.endswith("cache.h5")): 17 | tar.add(os.path.join(dirname, fname)) 18 | 19 | print "uploading" 20 | subprocess.check_call("rsync -azvu %s all.tar.gz pabbeel@rll.berkeley.edu:/var/www/lfd/bigdata/ --exclude '*.py'"%("--delete" if args.delete else ""), shell=True) 21 | -------------------------------------------------------------------------------- /cmake/boost-python.cmake: -------------------------------------------------------------------------------- 1 | function(boost_python_module NAME) 2 | find_package(PythonLibs 2 REQUIRED) 3 | find_package(Boost COMPONENTS python REQUIRED) 4 | find_package(Numpy) 5 | 6 | 7 | 8 | set(DEP_LIBS 9 | ${Boost_PYTHON_LIBRARY} 10 | ${PYTHON_LIBRARIES} 11 | ) 12 | #these are required includes for every ecto module 13 | include_directories( 14 | ${PYTHON_INCLUDE_PATH} 15 | ${Boost_INCLUDE_DIRS} 16 | ) 17 | add_library(${NAME} SHARED 18 | ${ARGN} 19 | ) 20 | set_target_properties(${NAME} 21 | PROPERTIES 22 | OUTPUT_NAME ${NAME} 23 | COMPILE_FLAGS "${FASTIDIOUS_FLAGS}" 24 | LINK_FLAGS -dynamic 25 | PREFIX "" 26 | ) 27 | if( WIN32 ) 28 | set_target_properties(${NAME} PROPERTIES SUFFIX ".pyd") 29 | elseif( APPLE OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") 30 | # on mac osx, python cannot import libraries with .dylib extension 31 | set_target_properties(${NAME} PROPERTIES SUFFIX ".so") 32 | endif() 33 | target_link_libraries(${NAME} 34 | ${DEP_LIBS} 35 | ) 36 | endfunction() 37 | -------------------------------------------------------------------------------- /cmake/modules/FindNumpy.cmake: -------------------------------------------------------------------------------- 1 | # 2 | # $Id: $ 3 | # 4 | # Author(s): Anton Deguet 5 | # Created on: 2010-01-20 6 | # 7 | # (C) Copyright 2010 Johns Hopkins University (JHU), All Rights 8 | # Reserved. 9 | # 10 | # --- begin cisst license - do not edit --- 11 | # 12 | # This software is provided "as is" under an open source license, with 13 | # no warranty. The complete license can be found in license.txt and 14 | # http://www.cisst.org/cisst/license.txt. 15 | # 16 | # --- end cisst license --- 17 | # 18 | # File based on FindNUMARRAY distributed with ITK 3.4 (see itk.org) 19 | # 20 | # Main modifications: 21 | # - use Numpy instead of Numarray for all naming 22 | # - added path for Python 2.5 and 2.6 23 | # - renamed python script generated (det_npp became determineNumpyPath) 24 | # - use lower case for CMake commands and keywords 25 | # - updated python script to use get_include, not get_numpy_include which is now deprecated 26 | # 27 | # --- 28 | # 29 | # Try to find numpy python package 30 | # Once done this will define 31 | # 32 | # PYTHON_NUMPY_FOUND - system has numpy development package and it should be used 33 | # PYTHON_NUMPY_INCLUDE_DIR - directory where the arrayobject.h header file can be found 34 | # 35 | # 36 | 37 | if (PYTHON_NUMPY_INCLUDE_DIR) 38 | return() 39 | endif() 40 | 41 | set(PYTHON_EXECUTABLE "python") 42 | if(PYTHON_EXECUTABLE) 43 | file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/determineNumpyPath.py "try: import numpy; print numpy.get_include()\nexcept: pass\n") 44 | exec_program("${PYTHON_EXECUTABLE}" 45 | ARGS "\"${CMAKE_CURRENT_BINARY_DIR}/determineNumpyPath.py\"" 46 | OUTPUT_VARIABLE NUMPY_PATH 47 | ) 48 | endif(PYTHON_EXECUTABLE) 49 | 50 | find_path(_PYTHON_NUMPY_INCLUDE_DIR arrayobject.h 51 | "${NUMPY_PATH}/numpy/" 52 | "${PYTHON_INCLUDE_PATH}/numpy/" 53 | /usr/include/python2.6/numpy/ 54 | /usr/include/python2.5/numpy/ 55 | /usr/include/python2.4/numpy/ 56 | /usr/include/python2.3/numpy/ 57 | DOC "Directory where the arrayobject.h header file can be found. This file is part of the numpy package" 58 | ) 59 | 60 | if(_PYTHON_NUMPY_INCLUDE_DIR) 61 | set(PYTHON_NUMPY_FOUND 1 CACHE INTERNAL "Python numpy development package is available") 62 | get_filename_component(PYTHON_NUMPY_INCLUDE_DIR ${_PYTHON_NUMPY_INCLUDE_DIR} PATH) 63 | endif(_PYTHON_NUMPY_INCLUDE_DIR) 64 | -------------------------------------------------------------------------------- /data/bookshelve.env.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 0.15 0.55 0.01 6 | 1.0 0.78 0.20 7 | 0 0 1 90 8 | 1 0 0 0 9 | 10 | 11 | 12 | 13 | 14 | 15 | 0.15 0.55 0.01 16 | 1.0 0.78 0.60 17 | 0 0 1 90 18 | 1 0 0 0 19 | 20 | 21 | 22 | 23 | 24 | 25 | 0.15 0.55 0.01 26 | 1.0 0.78 1.00 27 | 0 0 1 90 28 | 1 0 0 0 29 | 30 | 31 | 32 | 33 | 34 | 35 | 0.15 0.55 0.01 36 | 1.0 0.78 1.40 37 | 0 0 1 90 38 | 1 0 0 0 39 | 40 | 41 | 42 | 43 | 44 | 45 | 0.15 0.01 0.7 46 | 0.45 0.78 0.7 47 | 0 0 1 90 48 | 1 0 0 0 49 | 50 | 51 | 52 | 53 | 54 | 55 | 0.15 0.01 0.7 56 | 1.55 0.78 0.7 57 | 0 0 1 90 58 | 1 0 0 0 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | @echo " coverage to run coverage check of the documentation (if enabled)" 49 | 50 | clean: 51 | rm -rf $(BUILDDIR)/* 52 | 53 | html: 54 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 55 | @echo 56 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 57 | 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | singlehtml: 64 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 65 | @echo 66 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 67 | 68 | pickle: 69 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 70 | @echo 71 | @echo "Build finished; now you can process the pickle files." 72 | 73 | json: 74 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 75 | @echo 76 | @echo "Build finished; now you can process the JSON files." 77 | 78 | htmlhelp: 79 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 80 | @echo 81 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 82 | ".hhp project file in $(BUILDDIR)/htmlhelp." 83 | 84 | qthelp: 85 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 86 | @echo 87 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 88 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 89 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/lfd.qhcp" 90 | @echo "To view the help file:" 91 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/lfd.qhc" 92 | 93 | devhelp: 94 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 95 | @echo 96 | @echo "Build finished." 97 | @echo "To view the help file:" 98 | @echo "# mkdir -p $$HOME/.local/share/devhelp/lfd" 99 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/lfd" 100 | @echo "# devhelp" 101 | 102 | epub: 103 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 104 | @echo 105 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 106 | 107 | latex: 108 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 109 | @echo 110 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 111 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 112 | "(use \`make latexpdf' here to do that automatically)." 113 | 114 | latexpdf: 115 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 116 | @echo "Running LaTeX files through pdflatex..." 117 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 118 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 119 | 120 | latexpdfja: 121 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 122 | @echo "Running LaTeX files through platex and dvipdfmx..." 123 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 124 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 125 | 126 | text: 127 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 128 | @echo 129 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 130 | 131 | man: 132 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 133 | @echo 134 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 135 | 136 | texinfo: 137 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 138 | @echo 139 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 140 | @echo "Run \`make' in that directory to run these through makeinfo" \ 141 | "(use \`make info' here to do that automatically)." 142 | 143 | info: 144 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 145 | @echo "Running Texinfo files through makeinfo..." 146 | make -C $(BUILDDIR)/texinfo info 147 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 148 | 149 | gettext: 150 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 151 | @echo 152 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 153 | 154 | changes: 155 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 156 | @echo 157 | @echo "The overview file is in $(BUILDDIR)/changes." 158 | 159 | linkcheck: 160 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 161 | @echo 162 | @echo "Link check complete; look for any errors in the above output " \ 163 | "or in $(BUILDDIR)/linkcheck/output.txt." 164 | 165 | doctest: 166 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 167 | @echo "Testing of doctests in the sources finished, look at the " \ 168 | "results in $(BUILDDIR)/doctest/output.txt." 169 | 170 | coverage: 171 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 172 | @echo "Testing of coverage in the sources finished, look at the " \ 173 | "results in $(BUILDDIR)/coverage/python.txt." 174 | 175 | xml: 176 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 177 | @echo 178 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 179 | 180 | pseudoxml: 181 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 182 | @echo 183 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 184 | -------------------------------------------------------------------------------- /doc/documenting.rst: -------------------------------------------------------------------------------- 1 | .. _documenting: 2 | 3 | Documenting 4 | =========== 5 | 6 | 7 | Dependencies 8 | ------------ 9 | 10 | - `Sphinx `_ >= 1.3b. 11 | - `sphinx_rtd_theme `_ 12 | - `mock `_ 13 | 14 | Instructions 15 | ------------ 16 | 17 | Install Sphinx, sphinx_rtd_theme and mock with pip. :: 18 | 19 | sudo pip install sphinx>=1.3b1 sphinx_rtd_theme mock 20 | 21 | The documentation is generated from ReStructured Text using Sphinx. 22 | 23 | The documentation sources are in the ``doc/`` directory. To locally build the documentation, go to the ``doc/`` directory and run:: 24 | 25 | make html 26 | 27 | The built documentation will be in the ``_build/html/`` directory. 28 | 29 | The online documentation can be found at `rll.berkeley.edu/lfd `_. Whenever new commits are pushed to the ``master`` branch, the docs are rebuilt from this branch (assuming the build doesn't fail). 30 | 31 | Use `Google `_ style docstrings for documenting code. These `Sections `_ can be used inside the docstrings. For docstring examples, see `Example Google Style Python Docstrings `_ or the module :mod:`lfd.registration`. 32 | -------------------------------------------------------------------------------- /doc/generate_module_rst.sh: -------------------------------------------------------------------------------- 1 | # exclude paths after ../lfd 2 | python apidoc.py -M -f -o . ../lfd ../lfd/rapprentice/ ../lfd/lfmd/ ../lfd/mmqe/ 3 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | lfd: Learning from Demonstrations for Robotic Manipulation 2 | ========================================================== 3 | 4 | *lfd* is a software framework for generalizing robot trajectories from demonstrations into new situations, thus enabling robots to learn to perform tasks from demonstrations. 5 | 6 | This software follows the line of work of these papers: [ISRR2013]_, [IROS2013]_, [IROS2014]_. 7 | 8 | Source code is available on `github `_. 9 | 10 | Contents 11 | -------- 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | 16 | install 17 | install_gpu 18 | documenting 19 | misc 20 | 21 | 22 | Index 23 | ----- 24 | 25 | * :ref:`genindex` 26 | * :ref:`modindex` 27 | * :ref:`search` 28 | 29 | 30 | References 31 | ---------- 32 | 33 | .. [ISRR2013] John Schulman, Jonathan Ho, Cameron Lee, Pieter Abbeel, "`Learning from Demonstrations through the Use of Non-Rigid Registration `_," *in Proceedings of the 16th International Symposium on Robotics Research (ISRR), 2013*. 34 | .. [IROS2013] John Schulman, Ankush Gupta, Sibi Venkatesan, Mallory Tayson-Frederick, Pieter Abbeel, "`A Case Study of Trajectory Transfer through Non-Rigid Registration for a Simplified Suturing Scenario `_," *in Proceedings of the 26th IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2013*. 35 | .. [IROS2014] Alex X. Lee, Sandy H. Huang, Dylan Hadfield-Menell, Eric Tzeng, Pieter Abbeel, "`Unifying Scene Registration and Trajectory Optimization for Learning from Demonstrations with Application to Manipulation of Deformable Objects `_," *in Proceedings of the 27th IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2014*. 36 | -------------------------------------------------------------------------------- /doc/install.rst: -------------------------------------------------------------------------------- 1 | .. _install: 2 | 3 | Installation 4 | ============ 5 | 6 | This code has been tested on Ubuntu 12.04. 7 | 8 | 9 | Dependencies 10 | ------------ 11 | 12 | - `bulletsim `_ 13 | - `trajopt `_ 14 | 15 | - `OpenRAVE `_ >= 0.9 16 | - `PCL `_ 1.7 17 | - Python 2.7 18 | - NumPy >= 1.8.1 19 | - SciPy >= 0.9 20 | - HDF5 21 | - `h5py `_ 22 | - `joblib `_ 23 | 24 | 25 | Instructions 26 | ------------ 27 | 28 | - Install `bulletsim `_ from source. Use the ``lite`` branch and follow its README instructions. 29 | - Install `trajopt `_ from source. Follow the installation instructions but with the following modifications: 30 | 31 | - Use `this fork `_ and the ``trajopt-jointopt`` branch instead. 32 | - Install `OpenRAVE `_ 0.9 or later from the `OpenRAVE testing `_ PPA. :: 33 | 34 | sudo add-apt-repository ppa:openrave/testing 35 | sudo apt-get update 36 | sudo apt-get install openrave 37 | 38 | - Install `PCL `_ 1.7. :: 39 | 40 | sudo apt-get install libpcl-1.7-all 41 | 42 | - Run the ``cmake`` command with the option ``BUILD_CLOUDPROC=ON``, that is: :: 43 | 44 | cmake /path/to/trajopt -DBUILD_CLOUDPROC=ON 45 | 46 | - Install NumPy, SciPy and HDF5. :: 47 | 48 | sudo apt-get install python-numpy python-scipy libhdf5-serial-dev 49 | 50 | - Install h5py and joblib with pip. :: 51 | 52 | sudo pip install h5py joblib 53 | 54 | 55 | Add the following path to your ``PYTHONPATH``:: 56 | 57 | /path/to/lfd 58 | 59 | Now you should be able to run the scripts in the ``examples`` directory. 60 | 61 | 62 | Running the Test Suite 63 | ---------------------- 64 | 65 | You can run the test suite using this command:: 66 | 67 | python -m unittest discover -s /path/to/lfd/test/ 68 | -------------------------------------------------------------------------------- /doc/install_gpu.rst: -------------------------------------------------------------------------------- 1 | .. _install_gpu: 2 | 3 | Additional Installation to Use the GPU 4 | ====================================== 5 | 6 | 7 | Dependencies 8 | ------------ 9 | 10 | - gfortran 11 | - cmake 12 | - boost-python 13 | - `CUDA `_ >= 6.0 14 | - PyCUDA >= 2013.1.1 15 | - `CUDA SciKit `_ >= 0.5.0 16 | - Mako 17 | - `CULA `_ >= R12 (optional) 18 | 19 | 20 | Instructions 21 | ------------ 22 | 23 | - CUDA: 24 | 25 | - Get the CUDA installers from the `CUDA download site `_ and install it. :: 26 | 27 | sudo dpkg -i cuda-repo-ubuntu1204_6.5-14_amd64.deb 28 | sudo apt-get update 29 | 30 | - Then you can install the CUDA Toolkit using apt-get. :: 31 | 32 | sudo apt-get install cuda 33 | 34 | - You should reboot the system afterwards and verify the driver installation with the nvidia-settings utility. 35 | - Set the environment variable ``CUDA_HOME`` to point to the CUDA home directory. Also, add the CUDA binary and library directory to your ``PATH`` and ``LD_LIBRARY_PATH``. :: 36 | 37 | export CUDA_HOME=/usr/local/cuda 38 | export PATH=${CUDA_HOME}/bin:${PATH} 39 | export LD_LIBRARY_PATH=${CUDA_HOME}/lib64:$LD_LIBRARY_PATH 40 | 41 | - Install PyCUDA with pip. Make sure that ``PATH`` is defined as root. :: 42 | 43 | sudo PATH=$PATH pip install pycuda 44 | 45 | - Install CUDA SciKit with pip. :: 46 | 47 | sudo pip install pycuda scikits.cuda>=0.5.0a1 Mako 48 | 49 | - CULA (optional): 50 | 51 | - Linear systems can optionally be solved on the GPU using the CULA Dense toolkit. 52 | - Download and install the full edition of `CULA `_. The full edition is required since the free edition only has single precision functions. The full edition is free for academic use, but requires registration. 53 | - As recommended by the installation, set the environment variables ``CULA_ROOT`` and ``CULA_INC_PATH`` to point to the CULA root and include directories. Also, add the CULA library directory to your ``LD_LIBRARY_PATH``. :: 54 | 55 | export CULA_ROOT=/usr/local/cula 56 | export CULA_INC_PATH=$CULA_ROOT/include 57 | export LD_LIBRARY_PATH=${CULA_ROOT}/lib64:$LD_LIBRARY_PATH 58 | 59 | - Build the lfd sources with cmake as you would normally do. :: 60 | 61 | mkdir build_lfd 62 | cd build_lfd 63 | cmake /path/to/lfd 64 | make -j 65 | 66 | To use the compiled libraries from python, add the following path to your ``PYTHONPATH``: :: 67 | 68 | /path/to/build_lfd/lib 69 | 70 | For more information, check out the README from the `tpsopt `_ module. 71 | -------------------------------------------------------------------------------- /doc/lfd.demonstration.rst: -------------------------------------------------------------------------------- 1 | lfd.demonstration package 2 | ========================= 3 | 4 | .. automodule:: lfd.demonstration 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Class inheritance diagram 10 | ------------------------- 11 | 12 | .. inheritance-diagram:: lfd.demonstration.demonstration 13 | :parts: 1 14 | 15 | Submodules 16 | ---------- 17 | 18 | lfd.demonstration.demonstration module 19 | -------------------------------------- 20 | 21 | .. automodule:: lfd.demonstration.demonstration 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | 27 | -------------------------------------------------------------------------------- /doc/lfd.environment.rst: -------------------------------------------------------------------------------- 1 | lfd.environment package 2 | ======================= 3 | 4 | .. automodule:: lfd.environment 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Class inheritance diagram 10 | ------------------------- 11 | 12 | .. inheritance-diagram:: lfd.environment.environment lfd.environment.robot_world lfd.environment.settings lfd.environment.sim_util lfd.environment.simulation lfd.environment.simulation_object 13 | :parts: 1 14 | 15 | Submodules 16 | ---------- 17 | 18 | lfd.environment.environment module 19 | ---------------------------------- 20 | 21 | .. automodule:: lfd.environment.environment 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | lfd.environment.robot_world module 27 | ---------------------------------- 28 | 29 | .. automodule:: lfd.environment.robot_world 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | lfd.environment.settings module 35 | ------------------------------- 36 | 37 | .. automodule:: lfd.environment.settings 38 | :members: 39 | :undoc-members: 40 | :show-inheritance: 41 | 42 | lfd.environment.sim_util module 43 | ------------------------------- 44 | 45 | .. automodule:: lfd.environment.sim_util 46 | :members: 47 | :undoc-members: 48 | :show-inheritance: 49 | 50 | lfd.environment.simulation module 51 | --------------------------------- 52 | 53 | .. automodule:: lfd.environment.simulation 54 | :members: 55 | :undoc-members: 56 | :show-inheritance: 57 | 58 | lfd.environment.simulation_object module 59 | ---------------------------------------- 60 | 61 | .. automodule:: lfd.environment.simulation_object 62 | :members: 63 | :undoc-members: 64 | :show-inheritance: 65 | 66 | 67 | -------------------------------------------------------------------------------- /doc/lfd.registration.rst: -------------------------------------------------------------------------------- 1 | lfd.registration package 2 | ======================== 3 | 4 | .. automodule:: lfd.registration 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Class inheritance diagram 10 | ------------------------- 11 | 12 | .. inheritance-diagram:: lfd.registration.plotting_openrave lfd.registration.registration lfd.registration.settings lfd.registration.solver lfd.registration.tps lfd.registration.transformation 13 | :parts: 1 14 | 15 | Submodules 16 | ---------- 17 | 18 | lfd.registration.plotting_openrave module 19 | ----------------------------------------- 20 | 21 | .. automodule:: lfd.registration.plotting_openrave 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | lfd.registration.registration module 27 | ------------------------------------ 28 | 29 | .. automodule:: lfd.registration.registration 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | lfd.registration.settings module 35 | -------------------------------- 36 | 37 | .. automodule:: lfd.registration.settings 38 | :members: 39 | :undoc-members: 40 | :show-inheritance: 41 | 42 | lfd.registration.solver module 43 | ------------------------------ 44 | 45 | .. automodule:: lfd.registration.solver 46 | :members: 47 | :undoc-members: 48 | :show-inheritance: 49 | 50 | lfd.registration.tps module 51 | --------------------------- 52 | 53 | .. automodule:: lfd.registration.tps 54 | :members: 55 | :undoc-members: 56 | :show-inheritance: 57 | 58 | lfd.registration.transformation module 59 | -------------------------------------- 60 | 61 | .. automodule:: lfd.registration.transformation 62 | :members: 63 | :undoc-members: 64 | :show-inheritance: 65 | 66 | 67 | -------------------------------------------------------------------------------- /doc/lfd.rst: -------------------------------------------------------------------------------- 1 | lfd package 2 | =========== 3 | 4 | .. automodule:: lfd 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | lfd.demonstration 15 | lfd.environment 16 | lfd.registration 17 | lfd.tpsopt 18 | lfd.transfer 19 | lfd.util 20 | 21 | Class inheritance diagram 22 | ------------------------- 23 | 24 | .. inheritance-diagram:: lfd.action_selection lfd.settings 25 | :parts: 1 26 | 27 | Submodules 28 | ---------- 29 | 30 | lfd.action_selection module 31 | --------------------------- 32 | 33 | .. automodule:: lfd.action_selection 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | 38 | lfd.settings module 39 | ------------------- 40 | 41 | .. automodule:: lfd.settings 42 | :members: 43 | :undoc-members: 44 | :show-inheritance: 45 | 46 | 47 | -------------------------------------------------------------------------------- /doc/lfd.tpsopt.rst: -------------------------------------------------------------------------------- 1 | lfd.tpsopt package 2 | ================== 3 | 4 | .. automodule:: lfd.tpsopt 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Class inheritance diagram 10 | ------------------------- 11 | 12 | .. inheritance-diagram:: lfd.tpsopt.batchtps lfd.tpsopt.clouds lfd.tpsopt.culinalg_exts lfd.tpsopt.precompute lfd.tpsopt.registration lfd.tpsopt.settings lfd.tpsopt.tps lfd.tpsopt.transformations 13 | :parts: 1 14 | 15 | Submodules 16 | ---------- 17 | 18 | lfd.tpsopt.batchtps module 19 | -------------------------- 20 | 21 | .. automodule:: lfd.tpsopt.batchtps 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | lfd.tpsopt.clouds module 27 | ------------------------ 28 | 29 | .. automodule:: lfd.tpsopt.clouds 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | lfd.tpsopt.culinalg_exts module 35 | ------------------------------- 36 | 37 | .. automodule:: lfd.tpsopt.culinalg_exts 38 | :members: 39 | :undoc-members: 40 | :show-inheritance: 41 | 42 | lfd.tpsopt.precompute module 43 | ---------------------------- 44 | 45 | .. automodule:: lfd.tpsopt.precompute 46 | :members: 47 | :undoc-members: 48 | :show-inheritance: 49 | 50 | lfd.tpsopt.registration module 51 | ------------------------------ 52 | 53 | .. automodule:: lfd.tpsopt.registration 54 | :members: 55 | :undoc-members: 56 | :show-inheritance: 57 | 58 | lfd.tpsopt.settings module 59 | -------------------------- 60 | 61 | .. automodule:: lfd.tpsopt.settings 62 | :members: 63 | :undoc-members: 64 | :show-inheritance: 65 | 66 | lfd.tpsopt.tps module 67 | --------------------- 68 | 69 | .. automodule:: lfd.tpsopt.tps 70 | :members: 71 | :undoc-members: 72 | :show-inheritance: 73 | 74 | lfd.tpsopt.transformations module 75 | --------------------------------- 76 | 77 | .. automodule:: lfd.tpsopt.transformations 78 | :members: 79 | :undoc-members: 80 | :show-inheritance: 81 | 82 | 83 | -------------------------------------------------------------------------------- /doc/lfd.transfer.rst: -------------------------------------------------------------------------------- 1 | lfd.transfer package 2 | ==================== 3 | 4 | .. automodule:: lfd.transfer 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Class inheritance diagram 10 | ------------------------- 11 | 12 | .. inheritance-diagram:: lfd.transfer.planning lfd.transfer.registration_transfer lfd.transfer.settings lfd.transfer.transfer 13 | :parts: 1 14 | 15 | Submodules 16 | ---------- 17 | 18 | lfd.transfer.planning module 19 | ---------------------------- 20 | 21 | .. automodule:: lfd.transfer.planning 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | lfd.transfer.registration_transfer module 27 | ----------------------------------------- 28 | 29 | .. automodule:: lfd.transfer.registration_transfer 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | lfd.transfer.settings module 35 | ---------------------------- 36 | 37 | .. automodule:: lfd.transfer.settings 38 | :members: 39 | :undoc-members: 40 | :show-inheritance: 41 | 42 | lfd.transfer.transfer module 43 | ---------------------------- 44 | 45 | .. automodule:: lfd.transfer.transfer 46 | :members: 47 | :undoc-members: 48 | :show-inheritance: 49 | 50 | 51 | -------------------------------------------------------------------------------- /doc/lfd.util.rst: -------------------------------------------------------------------------------- 1 | lfd.util package 2 | ================ 3 | 4 | .. automodule:: lfd.util 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Class inheritance diagram 10 | ------------------------- 11 | 12 | .. inheritance-diagram:: lfd.util.colorize lfd.util.util 13 | :parts: 1 14 | 15 | Submodules 16 | ---------- 17 | 18 | lfd.util.colorize module 19 | ------------------------ 20 | 21 | .. automodule:: lfd.util.colorize 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | lfd.util.util module 27 | -------------------- 28 | 29 | .. automodule:: lfd.util.util 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | 35 | -------------------------------------------------------------------------------- /doc/misc.rst: -------------------------------------------------------------------------------- 1 | .. _misc: 2 | 3 | Miscellaneous 4 | ============= 5 | 6 | 7 | Settings files 8 | -------------- 9 | The ``lfd`` package and its subpackages have a ``settings.py`` file with setting variables. 10 | You can easily override these variables by creating a package ``lfd_settings`` with the same structure as ``lfd``. 11 | The variable that should be overriden can be defined in the corresponding ``settings.py`` file of the ``lfd_settings`` package. 12 | 13 | This is best illustrated with an example. 14 | Suppose you want to override the ``EM_ITER`` variable from the :mod:`lfd.registration.settings` module to be ``5`` instead. 15 | First, you can generate the ``lfd_settings`` package with the provided script: :: 16 | 17 | cd /path/to/lfd 18 | python scripts/make_lfd_settings_package.py ../lfd_settings/lfd_settings 19 | 20 | Remember to add the ``lfd_settings`` package to your ``PYTHONPATH``: :: 21 | 22 | export PYTHONPATH=/path/to/lfd_settings:$PYTHONPATH 23 | 24 | Then, in the file ``/path/to/lfd_settings/lfd_settings/registration/settings.py``, add python code that overrides the ``EM_ITER`` variable: :: 25 | 26 | EM_ITER = 5 27 | 28 | 29 | Downloading test data 30 | --------------------- 31 | 32 | First navigate to the ``bigdata`` directory, and then run the ``download.py`` script. 33 | 34 | 35 | Cache files 36 | ----------- 37 | By default, some functions cache results in the default cache directory ``/path/to/lfd/.cache/``. If you are running out of space, consider deleting this directory. 38 | -------------------------------------------------------------------------------- /doc/modules.rst: -------------------------------------------------------------------------------- 1 | lfd 2 | === 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | lfd 8 | -------------------------------------------------------------------------------- /doc/open_index.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | index=_build/html/index.html 4 | if [ `uname` = Linux ] 5 | then 6 | google-chrome $index 7 | else 8 | open -a Google\ Chrome $index 9 | fi -------------------------------------------------------------------------------- /examples/drop_objs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | 5 | import openravepy 6 | 7 | import numpy as np 8 | 9 | from lfd.environment.simulation import DynamicSimulation 10 | from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, RopeSimulationObject, CylinderSimulationObject 11 | from lfd.environment import sim_util 12 | 13 | 14 | table_height = 0.77 15 | helix_ang0 = 0 16 | helix_ang1 = 4*np.pi 17 | helix_radius = .2 18 | helix_center = np.r_[.6, 0] 19 | helix_height0 = table_height + .15 20 | helix_height1 = table_height + .15 + .3 21 | helix_length = np.linalg.norm(np.r_[(helix_ang1 - helix_ang0) * helix_radius, helix_height1 - helix_height0]) 22 | num = np.round(helix_length/.02) 23 | helix_angs = np.linspace(helix_ang0, helix_ang1, num) 24 | helix_heights = np.linspace(helix_height0, helix_height1, num) 25 | init_rope_nodes = np.c_[helix_center + helix_radius * np.c_[np.cos(helix_angs), np.sin(helix_angs)], helix_heights] 26 | rope_params = sim_util.RopeParams() 27 | 28 | cyl_radius = 0.025 29 | cyl_height = 0.3 30 | cyl_pos0 = np.r_[.6, helix_radius, table_height + .25] 31 | cyl_pos1 = np.r_[.6, -helix_radius, table_height + .35] 32 | 33 | sim_objs = [] 34 | sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False)) 35 | sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False)) 36 | sim_objs.append(RopeSimulationObject("rope", init_rope_nodes, rope_params)) 37 | sim_objs.append(CylinderSimulationObject("cyl0", cyl_pos0, cyl_radius, cyl_height, dynamic=True)) 38 | sim_objs.append(CylinderSimulationObject("cyl1", cyl_pos1, cyl_radius, cyl_height, dynamic=True)) 39 | 40 | sim = DynamicSimulation() 41 | sim.add_objects(sim_objs) 42 | sim.create_viewer() 43 | 44 | sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()]) 45 | sim_util.reset_arms_to_side(sim) 46 | 47 | # rotate cylinders by 90 deg 48 | for i in range(2): 49 | bt_cyl = sim.bt_env.GetObjectByName('cyl%d'%i) 50 | T = openravepy.matrixFromAxisAngle(np.array([np.pi/2,0,0])) 51 | T[:3,3] = bt_cyl.GetTransform()[:3,3] 52 | bt_cyl.SetTransform(T) # SetTransform needs to be used in the Bullet object, not the openrave body 53 | sim.update() 54 | 55 | sim.settle(max_steps=1000) 56 | sim.viewer.Idle() 57 | -------------------------------------------------------------------------------- /examples/move_box.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | 5 | import openravepy 6 | 7 | import numpy as np 8 | 9 | from lfd.rapprentice import planning, resampling 10 | from lfd.environment.simulation import DynamicSimulationRobotWorld 11 | from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject 12 | from lfd.environment import sim_util 13 | 14 | 15 | table_height = 0.77 16 | box_length = 0.04 17 | box_depth = 0.12 18 | box0_pos = np.r_[.6, -.3, table_height+box_depth/2] 19 | box1_pos = np.r_[.6, 0, table_height+box_depth/2] 20 | sim_objs = [] 21 | sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False)) 22 | sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False)) 23 | sim_objs.append(BoxSimulationObject("box0", box0_pos, [box_length/2, box_length/2, box_depth/2], dynamic=True)) 24 | sim_objs.append(BoxSimulationObject("box1", box1_pos, [box_length/2, box_length/2, box_depth/2], dynamic=True)) 25 | 26 | sim = DynamicSimulationRobotWorld() 27 | sim.add_objects(sim_objs) 28 | sim.create_viewer() 29 | 30 | sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()]) 31 | sim_util.reset_arms_to_side(sim) 32 | 33 | # rotate box0 by 22.5 degrees 34 | bt_box0 = sim.bt_env.GetObjectByName('box0') 35 | T = openravepy.matrixFromAxisAngle(np.array([0,0,np.pi/8])) 36 | T[:3,3] = bt_box0.GetTransform()[:3,3] 37 | bt_box0.SetTransform(T) # SetTransform needs to be used in the Bullet object, not the openrave body 38 | sim.update() 39 | 40 | lr = 'r' 41 | manip_name = {"l":"leftarm", "r":"rightarm"}[lr] 42 | ee_link_name = "%s_gripper_tool_frame"%lr 43 | ee_link = sim.robot.GetLink(ee_link_name) 44 | R = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]) 45 | n_steps = 10 46 | 47 | def get_move_traj(t_start, t_end, start_fixed): 48 | hmat_start = np.r_[np.c_[R, t_start], np.c_[0,0,0,1]] 49 | hmat_end = np.r_[np.c_[R, t_end], np.c_[0,0,0,1]] 50 | new_hmats = np.asarray(resampling.interp_hmats(np.arange(n_steps), np.r_[0, n_steps-1], [hmat_start, hmat_end])) 51 | dof_vals = sim.robot.GetManipulator(manip_name).GetArmDOFValues() 52 | old_traj = np.tile(dof_vals, (n_steps,1)) 53 | 54 | traj, _, _ = planning.plan_follow_traj(sim.robot, manip_name, ee_link, new_hmats, old_traj, start_fixed=start_fixed, beta_rot=10000.0) 55 | return traj 56 | 57 | move_height = .3 58 | dof_inds = sim_util.dof_inds_from_name(sim.robot, manip_name) 59 | 60 | traj = get_move_traj(box0_pos + np.r_[0,0,move_height], box0_pos + np.r_[0,0,box_depth/2-0.02], False) 61 | sim.execute_trajectory((traj, dof_inds)) 62 | sim.close_gripper(lr) 63 | 64 | traj = get_move_traj(box0_pos + np.r_[0,0,box_depth/2-0.02], box0_pos + np.r_[0,0,move_height], True) 65 | sim.execute_trajectory((traj, dof_inds)) 66 | 67 | traj = get_move_traj(box0_pos + np.r_[0,0,move_height], box1_pos + np.r_[0,0,move_height], True) 68 | sim.execute_trajectory((traj, dof_inds)) 69 | 70 | traj = get_move_traj(box1_pos + np.r_[0,0,move_height], box1_pos + np.r_[0,0,box_depth+box_depth/2-0.02+0.001], True) 71 | sim.execute_trajectory((traj, dof_inds)) 72 | sim.open_gripper(lr) 73 | 74 | traj = get_move_traj(box1_pos + np.r_[0,0,box_depth+box_depth/2-0.02+0.002], box1_pos + np.r_[0,0,move_height], True) 75 | sim.execute_trajectory((traj, dof_inds)) 76 | 77 | sim.settle() 78 | sim.viewer.Idle() 79 | -------------------------------------------------------------------------------- /examples/move_rope.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | 5 | import numpy as np 6 | 7 | import lfd.rapprentice.math_utils as mu 8 | from lfd.environment.simulation import DynamicSimulationRobotWorld 9 | from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject 10 | from lfd.environment import sim_util 11 | from lfd.demonstration.demonstration import AugmentedTrajectory 12 | from lfd.environment import environment 13 | from lfd.transfer import planning 14 | 15 | def create_cylinder_grid(cyl_pos0, cyl_pos1, cyl_pos2, cyl_radius, cyl_height): 16 | sample_grid = np.array(np.meshgrid(np.linspace(0,1,5), np.linspace(0,1,5))).T.reshape((-1,2)) 17 | cyl_positions = cyl_pos0 + sample_grid[:,0][:,None].dot(cyl_pos1[None,:] - cyl_pos0[None,:]) + sample_grid[:,1][:,None].dot(cyl_pos2[None,:] - cyl_pos0[None,:]) 18 | cyl_sim_objs = [] 19 | for (i,cyl_pos) in enumerate(cyl_positions): 20 | cyl_sim_objs.append(CylinderSimulationObject("cyl%i"%i, cyl_pos, cyl_radius, cyl_height, dynamic=True)) 21 | return cyl_sim_objs 22 | 23 | def color_cylinders(cyl_sim_objs): 24 | for sim_obj in cyl_sim_objs: 25 | color = np.random.random(3) 26 | for bt_obj in sim_obj.get_bullet_objects(): 27 | for link in bt_obj.GetKinBody().GetLinks(): 28 | for geom in link.GetGeometries(): 29 | geom.SetDiffuseColor(color) 30 | 31 | def create_rope(rope_poss, capsule_height=.02): 32 | rope_pos_dists = np.linalg.norm(np.diff(rope_poss, axis=0), axis=1) 33 | xp = np.r_[0, np.cumsum(rope_pos_dists/capsule_height)] 34 | init_rope_nodes = mu.interp2d(np.arange(xp[-1]+1), xp, rope_poss) 35 | rope_sim_obj = RopeSimulationObject("rope", init_rope_nodes) 36 | return rope_sim_obj 37 | 38 | def create_augmented_traj(robot, pick_pos, drop_pos, pick_R, drop_R, move_height, pos_displacement_per_step=.02): 39 | pos_traj = np.array([pick_pos + np.r_[0,0,move_height], 40 | pick_pos, 41 | pick_pos + np.r_[0,0,move_height], 42 | drop_pos + np.r_[0,0,move_height], 43 | drop_pos, 44 | drop_pos + np.r_[0,0,move_height]]) 45 | R_traj = np.array([pick_R, pick_R, pick_R, drop_R, drop_R, drop_R]) 46 | ee_traj = np.empty((len(pos_traj), 4, 4)) 47 | ee_traj[:] = np.eye(4) 48 | ee_traj[:,:3,3] = pos_traj 49 | ee_traj[:,:3,:3] = R_traj 50 | open_finger_traj = np.array([False, False, False, False, True, False]) 51 | close_finger_traj = np.array([False, True, False, False, False, False]) 52 | open_finger_value = sim_util.get_binary_gripper_angle(True) 53 | closed_finger_value = sim_util.get_binary_gripper_angle(False) 54 | finger_traj = np.array([open_finger_value, open_finger_value, closed_finger_value, closed_finger_value, closed_finger_value, open_finger_value])[:,None] 55 | 56 | lr = 'r' # use right arm/gripper 57 | aug_traj = AugmentedTrajectory(lr2ee_traj={lr: ee_traj}, lr2finger_traj={lr: finger_traj}, lr2open_finger_traj={lr: open_finger_traj}, lr2close_finger_traj={lr: close_finger_traj}) 58 | 59 | # resample augmented trajectory according to the position displacement 60 | pos_dists = np.linalg.norm(np.diff(pos_traj, axis=0), axis=1) 61 | new_times = np.r_[0, np.cumsum(pos_dists/pos_displacement_per_step)] 62 | timesteps_rs = np.interp(np.arange(new_times[-1]+1), new_times, np.arange(len(new_times))) 63 | aug_traj_rs = aug_traj.get_resampled_traj(timesteps_rs) 64 | 65 | # do motion planning for aug_traj_rs 66 | manip_name = {"l":"leftarm", "r":"rightarm"}[lr] 67 | ee_link_name = "%s_gripper_tool_frame"%lr 68 | ee_link = robot.GetLink(ee_link_name) 69 | dof_vals = robot.GetManipulator(manip_name).GetArmDOFValues() 70 | init_traj = np.tile(dof_vals, (aug_traj_rs.n_steps,1)) 71 | arm_traj, _, _ = planning.plan_follow_traj(robot, manip_name, ee_link, aug_traj_rs.lr2ee_traj['r'], init_traj, no_collision_cost_first=True) 72 | aug_traj_rs.lr2arm_traj[lr] = arm_traj 73 | 74 | return aug_traj_rs 75 | 76 | def main(): 77 | # define simulation objects 78 | table_height = 0.77 79 | cyl_radius = 0.025 80 | cyl_height = 0.3 81 | cyl_pos0 = np.r_[.7, -.15, table_height+cyl_height/2] 82 | cyl_pos1 = np.r_[.7, .15, table_height+cyl_height/2] 83 | cyl_pos2 = np.r_[.4, -.15, table_height+cyl_height/2] 84 | rope_poss = np.array([[.2, -.2, table_height+0.006], 85 | [.8, -.2, table_height+0.006], 86 | [.8, .2, table_height+0.006], 87 | [.2, .2, table_height+0.006]]) 88 | 89 | sim_objs = [] 90 | sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False)) 91 | sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False)) 92 | cyl_sim_objs = create_cylinder_grid(cyl_pos0, cyl_pos1, cyl_pos2, cyl_radius, cyl_height) 93 | sim_objs.extend(cyl_sim_objs) 94 | rope_sim_obj = create_rope(rope_poss) 95 | sim_objs.append(rope_sim_obj) 96 | 97 | # initialize simulation world and environment 98 | sim = DynamicSimulationRobotWorld() 99 | sim.add_objects(sim_objs) 100 | sim.create_viewer() 101 | 102 | sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()]) 103 | sim_util.reset_arms_to_side(sim) 104 | 105 | color_cylinders(cyl_sim_objs) 106 | 107 | env = environment.LfdEnvironment(sim, sim) 108 | 109 | # define augmented trajectory 110 | pick_pos = rope_poss[0] + .1 * (rope_poss[1] - rope_poss[0]) 111 | drop_pos = rope_poss[3] + .1 * (rope_poss[2] - rope_poss[3]) + np.r_[0, .2, 0] 112 | pick_R = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]) 113 | drop_R = np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]) 114 | move_height = .2 115 | aug_traj = create_augmented_traj(sim.robot, pick_pos, drop_pos, pick_R, drop_R, move_height) 116 | 117 | env.execute_augmented_trajectory(aug_traj) 118 | 119 | if __name__ == '__main__': 120 | main() 121 | -------------------------------------------------------------------------------- /examples/register.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | 5 | import numpy as np 6 | 7 | from lfd.environment.simulation import StaticSimulation 8 | from lfd.environment.simulation_object import BoxSimulationObject 9 | from lfd.registration.registration import TpsRpmRegistrationFactory 10 | from lfd.registration.plotting_openrave import registration_plot_cb 11 | from lfd.demonstration.demonstration import Demonstration, SceneState 12 | 13 | 14 | np.random.seed(0) 15 | 16 | table_height = 0.77 17 | table = BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False) 18 | 19 | sim = StaticSimulation() 20 | sim.add_objects([table]) 21 | sim.create_viewer() 22 | 23 | def generate_cloud(x_center_pert=0, max_noise=0.02): 24 | # generates 40 cm by 60 cm cloud with optional pertubation along the x-axis 25 | grid = np.array(np.meshgrid(np.linspace(-.2,.2,21), np.linspace(-.3,.3,31))).T.reshape((-1,2)) 26 | grid = np.c_[grid, np.zeros(len(grid))] + np.array([.5, 0, table_height+max_noise]) 27 | cloud = grid + x_center_pert * np.c_[(0.3 - np.abs(grid[:,1]-0))/0.3, np.zeros((len(grid),2))] + (np.random.random((len(grid), 3)) - 0.5) * 2 * max_noise 28 | return cloud 29 | 30 | demos = {} 31 | for x_center_pert in np.arange(-0.1, 0.6, 0.1): 32 | demo_name = "demo_{}".format(x_center_pert) 33 | demo_cloud = generate_cloud(x_center_pert=x_center_pert) 34 | demo_scene_state = SceneState(demo_cloud, downsample_size=0.025) 35 | demo = Demonstration(demo_name, demo_scene_state, None) 36 | demos[demo_name] = demo 37 | 38 | test_cloud = generate_cloud(x_center_pert=0.2) 39 | test_scene_state = SceneState(test_cloud, downsample_size=0.025) 40 | 41 | plot_cb = lambda i, i_em, x_nd, y_md, xtarg_nd, wt_n, f, corr_nm, rad: registration_plot_cb(sim, x_nd, y_md, f) 42 | 43 | reg_factory = TpsRpmRegistrationFactory(demos) 44 | regs = reg_factory.batch_register(test_scene_state, callback=plot_cb) 45 | -------------------------------------------------------------------------------- /examples/rope_register_transfer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | 5 | import numpy as np 6 | 7 | from lfd.environment.simulation import DynamicSimulationRobotWorld 8 | from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject 9 | from lfd.environment import environment 10 | from lfd.environment import sim_util 11 | from lfd.demonstration.demonstration import Demonstration 12 | from lfd.registration.registration import TpsRpmRegistrationFactory 13 | from lfd.registration.plotting_openrave import registration_plot_cb 14 | from lfd.transfer.transfer import FingerTrajectoryTransferer 15 | from lfd.transfer.registration_transfer import TwoStepRegistrationAndTrajectoryTransferer 16 | from move_rope import create_augmented_traj, create_rope 17 | 18 | def create_rope_demo(env, rope_poss): 19 | rope_sim_obj = create_rope(rope_poss) 20 | env.sim.add_objects([rope_sim_obj]) 21 | env.sim.settle() 22 | scene_state = env.observe_scene() 23 | env.sim.remove_objects([rope_sim_obj]) 24 | 25 | pick_pos = rope_poss[0] + .1 * (rope_poss[1] - rope_poss[0]) 26 | drop_pos = rope_poss[3] + .1 * (rope_poss[2] - rope_poss[3]) + np.r_[0, .2, 0] 27 | pick_R = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]) 28 | drop_R = np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]) 29 | move_height = .2 30 | aug_traj = create_augmented_traj(env.sim.robot, pick_pos, drop_pos, pick_R, drop_R, move_height) 31 | 32 | demo = Demonstration("rope_demo", scene_state, aug_traj) 33 | return demo 34 | 35 | def main(): 36 | # define simulation objects 37 | table_height = 0.77 38 | sim_objs = [] 39 | sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False)) 40 | sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False)) 41 | 42 | # initialize simulation world and environment 43 | sim = DynamicSimulationRobotWorld() 44 | sim.add_objects(sim_objs) 45 | sim.create_viewer() 46 | 47 | sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()]) 48 | sim.robot.SetDOFValues([1.25], [sim.robot.GetJoint('head_tilt_joint').GetJointIndex()]) # move head down so it can see the rope 49 | sim_util.reset_arms_to_side(sim) 50 | 51 | env = environment.LfdEnvironment(sim, sim, downsample_size=0.025) 52 | 53 | demo_rope_poss = np.array([[.2, -.2, table_height+0.006], 54 | [.8, -.2, table_height+0.006], 55 | [.8, .2, table_height+0.006], 56 | [.2, .2, table_height+0.006]]) 57 | demo = create_rope_demo(env, demo_rope_poss) 58 | 59 | test_rope_poss = np.array([[.2, -.2, table_height+0.006], 60 | [.5, -.4, table_height+0.006], 61 | [.8, .0, table_height+0.006], 62 | [.8, .2, table_height+0.006], 63 | [.6, .0, table_height+0.006], 64 | [.4, .2, table_height+0.006], 65 | [.2, .2, table_height+0.006]]) 66 | test_rope_sim_obj = create_rope(test_rope_poss) 67 | sim.add_objects([test_rope_sim_obj]) 68 | sim.settle() 69 | test_scene_state = env.observe_scene() 70 | 71 | reg_factory = TpsRpmRegistrationFactory() 72 | traj_transferer = FingerTrajectoryTransferer(sim) 73 | 74 | plot_cb = lambda i, i_em, x_nd, y_md, xtarg_nd, wt_n, f, corr_nm, rad: registration_plot_cb(sim, x_nd, y_md, f) 75 | reg_and_traj_transferer = TwoStepRegistrationAndTrajectoryTransferer(reg_factory, traj_transferer) 76 | test_aug_traj = reg_and_traj_transferer.transfer(demo, test_scene_state, callback=plot_cb, plotting=True) 77 | 78 | env.execute_augmented_trajectory(test_aug_traj) 79 | 80 | if __name__ == '__main__': 81 | main() 82 | -------------------------------------------------------------------------------- /lfd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/__init__.py -------------------------------------------------------------------------------- /lfd/action_selection.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | 5 | from lfd.environment import simulation_object 6 | from lfd.mmqe.search import beam_search 7 | from lfd.rapprentice.knot_classifier import isKnot as is_knot 8 | 9 | 10 | class ActionSelection(object): 11 | def __init__(self, registration_factory): 12 | """Inits ActionSelection 13 | 14 | Args: 15 | registration_factory: RegistrationFactory 16 | """ 17 | self.registration_factory = registration_factory 18 | 19 | def plan_agenda(self, scene_state): 20 | """Plans an agenda of demonstrations for the given scene_state 21 | 22 | Args: 23 | scene_state: SceneState of the scene for which the agenda is returned 24 | 25 | Returns: 26 | An agenda, which is a list of demonstration names, and a list of the values of the respective demonstrations 27 | """ 28 | raise NotImplementedError 29 | 30 | class GreedyActionSelection(ActionSelection): 31 | def plan_agenda(self, scene_state, timestep): 32 | action2q_value = self.registration_factory.batch_cost(scene_state) 33 | q_values, agenda = zip(*sorted([(q_value, action) for (action, q_value) in action2q_value.items()])) 34 | # Return false for goal not found 35 | return (agenda, q_values), False 36 | 37 | class FeatureActionSelection(ActionSelection): 38 | def __init__(self, registration_factory, features, actions, demos, 39 | width, depth, simulator=None, lfd_env=None): 40 | self.features = features 41 | self.actions = actions.keys() 42 | # self.features.set_name2ind(self.actions) 43 | self.demos = demos 44 | self.width = width 45 | self.depth = depth 46 | self.transferer = simulator 47 | self.lfd_env = lfd_env 48 | super(FeatureActionSelection, self).__init__(registration_factory) 49 | 50 | def plan_agenda(self, scene_state, timestep): 51 | def evaluator(state, ts): 52 | try: 53 | score = np.dot(self.features.features(state, timestep=ts), self.features.weights) + self.features.w0 54 | except: 55 | return -np.inf*np.r_[np.ones(len(self.features.weights))] 56 | # if np.max(score) > -.2: 57 | # import ipdb; ipdb.set_trace() 58 | return score 59 | 60 | def simulate_transfer(state, action, next_state_id): 61 | aug_traj=self.transferer.transfer(self.demos[action], state, plotting=False) 62 | self.lfd_env.execute_augmented_trajectory(aug_traj, step_viewer=0) 63 | result_state = self.lfd_env.observe_scene() 64 | 65 | # Get the rope simulation object and determine if it's a knot 66 | for sim_obj in self.lfd_env.sim.sim_objs: 67 | if isinstance(sim_obj, simulation_object.RopeSimulationObject): 68 | rope_sim_obj = sim_obj 69 | break 70 | rope_knot = is_knot(rope_sim_obj.rope.GetControlPoints()) 71 | return (result_state, next_state_id, rope_knot) 72 | 73 | return beam_search(scene_state, timestep, self.features.src_ctx.seg_names, simulate_transfer, 74 | evaluator, self.lfd_env.sim, width=self.width, 75 | depth=self.depth) 76 | -------------------------------------------------------------------------------- /lfd/demonstration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/demonstration/__init__.py -------------------------------------------------------------------------------- /lfd/environment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/environment/__init__.py -------------------------------------------------------------------------------- /lfd/environment/environment.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | from lfd.rapprentice import eval_util 5 | from lfd.demonstration import demonstration 6 | from lfd.environment import simulation_object 7 | 8 | class LfdEnvironment(object): 9 | def __init__(self, world, sim, downsample_size=0): 10 | """Inits LfdEnvironment 11 | 12 | Args: 13 | world: RobotWorld 14 | sim: StaticSimulation that contains a robot 15 | downsample_size: if downsample_size is positive, the clouds are downsampled to a voxel size of downsample_size, else they are not downsampled 16 | """ 17 | self.world = world 18 | self.sim = sim 19 | self.downsample_size = downsample_size 20 | 21 | def execute_augmented_trajectory(self, aug_traj, step_viewer=1, interactive=False, sim_callback=None, check_feasible=False): 22 | open_or_close_finger_traj = np.zeros(aug_traj.n_steps, dtype=bool) 23 | if aug_traj.lr2open_finger_traj is not None: 24 | for lr in aug_traj.lr2open_finger_traj.keys(): 25 | open_or_close_finger_traj = np.logical_or(open_or_close_finger_traj, aug_traj.lr2open_finger_traj[lr]) 26 | if aug_traj.lr2close_finger_traj is not None: 27 | for lr in aug_traj.lr2close_finger_traj.keys(): 28 | open_or_close_finger_traj = np.logical_or(open_or_close_finger_traj, aug_traj.lr2close_finger_traj[lr]) 29 | open_or_close_inds = np.where(open_or_close_finger_traj)[0] 30 | 31 | traj, dof_inds = aug_traj.get_full_traj(self.sim.robot) 32 | feasible = True 33 | misgraspl = False 34 | misgraspr = False 35 | lr2gripper_open = {'l':True, 'r':True} 36 | for (start_ind, end_ind) in zip(np.r_[0, open_or_close_inds], np.r_[open_or_close_inds+1, aug_traj.n_steps]): 37 | if aug_traj.lr2open_finger_traj is not None: 38 | for lr in aug_traj.lr2open_finger_traj.keys(): 39 | if aug_traj.lr2open_finger_traj[lr][start_ind]: 40 | target_val = None 41 | joint_ind = self.sim.robot.GetJoint("%s_gripper_l_finger_joint"%lr).GetDOFIndex() 42 | if joint_ind in dof_inds: 43 | target_val = traj[start_ind, dof_inds.index(joint_ind)] 44 | self.world.open_gripper(lr, target_val=target_val, step_viewer=step_viewer) 45 | lr2gripper_open[lr] = True 46 | if aug_traj.lr2close_finger_traj is not None: 47 | for lr in aug_traj.lr2close_finger_traj.keys(): 48 | if aug_traj.lr2close_finger_traj[lr][start_ind]: 49 | n_cnts = len(self.sim.constraints[lr]) 50 | self.world.close_gripper(lr, step_viewer=step_viewer) 51 | if len(self.sim.constraints[lr]) == n_cnts and lr == 'l': 52 | misgraspl = True 53 | elif lr == 'l': 54 | misgraspl = False 55 | elif len(self.sim.constraints[lr]) == n_cnts and lr=='r': 56 | misgraspr = True 57 | else: 58 | misgraspr = False 59 | #misgrasp |= len(self.sim.constraints[lr]) == n_cnts 60 | lr2gripper_open[lr] = False 61 | misgrasp = misgraspl or misgraspr 62 | # don't execute trajectory for finger joint if the corresponding gripper is closed 63 | active_inds = np.ones(len(dof_inds), dtype=bool) 64 | for lr in 'lr': 65 | if not lr2gripper_open[lr]: 66 | joint_ind = self.sim.robot.GetJoint("%s_gripper_l_finger_joint"%lr).GetDOFIndex() 67 | if joint_ind in dof_inds: 68 | active_inds[dof_inds.index(joint_ind)] = False 69 | miniseg_traj = traj[start_ind:end_ind, active_inds] 70 | miniseg_dof_inds = list(np.asarray(dof_inds)[active_inds]) 71 | full_traj = (miniseg_traj, miniseg_dof_inds) 72 | feasible &= eval_util.traj_is_safe(self.sim, full_traj, 0) 73 | if check_feasible and not feasible: 74 | break 75 | self.world.execute_trajectory(full_traj, step_viewer=step_viewer, interactive=interactive, sim_callback=sim_callback) 76 | return feasible, misgrasp 77 | 78 | def observe_scene(self): 79 | full_cloud = self.world.observe_cloud() 80 | return demonstration.SceneState(full_cloud, downsample_size=self.downsample_size) 81 | 82 | class GroundTruthRopeLfdEnvironment(LfdEnvironment): 83 | def __init__(self, world, sim, upsample=0, upsample_rad=1, downsample_size=0): 84 | """Inits GroundTruthRopeLfdEnvironment 85 | 86 | Args: 87 | world: RobotWorld 88 | sim: DynamicSimulation that should containing exactly one rope when observe_scene is called 89 | downsample_size: if downsample_size is positive, the clouds are downsampled to a voxel size of downsample_size, else they are not downsampled 90 | """ 91 | super(GroundTruthRopeLfdEnvironment, self).__init__(world, sim, downsample_size=downsample_size) 92 | 93 | self.upsample = upsample 94 | self.upsample_rad = upsample_rad 95 | 96 | def observe_scene(self): 97 | rope_sim_objs = [sim_obj for sim_obj in self.sim.sim_objs if isinstance(sim_obj, simulation_object.RopeSimulationObject)] 98 | assert len(rope_sim_objs) == 1 99 | rope_sim_obj = rope_sim_objs[0] 100 | return demonstration.GroundTruthRopeSceneState(rope_sim_obj.rope.GetControlPoints(), 101 | rope_sim_obj.rope_params.radius, 102 | upsample=self.upsample, 103 | upsample_rad=self.upsample_rad, 104 | downsample_size=self.downsample_size) 105 | -------------------------------------------------------------------------------- /lfd/environment/robot_world.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | class RobotWorld(object): 4 | def __init__(self): 5 | raise NotImplementedError 6 | 7 | def observe_cloud(self): 8 | raise NotImplementedError 9 | 10 | def open_gripper(self): 11 | raise NotImplementedError 12 | 13 | def close_gripper(self): 14 | raise NotImplementedError 15 | 16 | def execute_trajectory(self): 17 | raise NotImplementedError 18 | 19 | class RealRobotWorld(RobotWorld): 20 | def __init__(self): 21 | raise NotImplementedError 22 | 23 | def observe_cloud(self): 24 | raise NotImplementedError 25 | 26 | def open_gripper(self): 27 | raise NotImplementedError 28 | 29 | def close_gripper(self): 30 | raise NotImplementedError 31 | 32 | def execute_trajectory(self): 33 | raise NotImplementedError 34 | -------------------------------------------------------------------------------- /lfd/environment/settings.py: -------------------------------------------------------------------------------- 1 | #: 2 | GRIPPER_OPEN_CLOSE_THRESH = 0.04 # .06 for fig8 (thick rope), 0.04 for thin rope (overhand) 3 | 4 | #: 5 | ROPE_RADIUS = .005 6 | #: 7 | ROPE_ANG_STIFFNESS = .1 8 | #: 9 | ROPE_ANG_DAMPING = 1 10 | #: 11 | ROPE_LIN_DAMPING = .75 12 | #: 13 | ROPE_ANG_LIMIT = .4 14 | #: 15 | ROPE_LIN_STOP_ERP = .2 16 | #: 17 | ROPE_MASS = 1.0 18 | #: 19 | ROPE_RADIUS_THICK = .008 20 | 21 | #: window properties for the viewer's window 22 | WINDOW_PROP = [0,0,1500,1500] 23 | #: transposed homogeneous matrix for the viewer's camera 24 | CAMERA_MATRIX = [[ 0, 1, 0, 0], 25 | [ -1, 0, 0.5, 0], 26 | [ 0.5, 0, 1, 0], 27 | [ 2.25, 0, 4.5, 1]] 28 | 29 | try: 30 | from lfd_settings.environment.settings import * 31 | except ImportError: 32 | pass 33 | -------------------------------------------------------------------------------- /lfd/lfmd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/lfmd/__init__.py -------------------------------------------------------------------------------- /lfd/lfmd/combine_force_file.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import h5py 3 | import pickle 4 | import numpy as np 5 | import IPython as ipy 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("actionfile", type=str, help="h5 file that has the action information to which the forces needs to be added") 9 | parser.add_argument("forcefile", type=str, help="pickle file that has the force information") 10 | args = parser.parse_args() 11 | 12 | actions = h5py.File(args.actionfile) 13 | forcefile = open(args.forcefile, "rb") 14 | forces = pickle.load(forcefile) 15 | 16 | for action, seg_info in actions.iteritems(): 17 | for lr in 'lr': 18 | seg_info["%s_gripper_force"%lr] = np.asarray(forces[action][lr]) 19 | 20 | actions.close() 21 | forcefile.close() 22 | -------------------------------------------------------------------------------- /lfd/mmqe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/mmqe/__init__.py -------------------------------------------------------------------------------- /lfd/mmqe/auto_performance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import h5py 5 | from lfd.rapprentice.knot_classifier import isKnot 6 | import sys 7 | import os.path as osp 8 | 9 | from string import lower 10 | 11 | C_vals = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100, 1000] 12 | C_strs = ['1e-05', '0.0001', '0.001', '0.01', '1.0', '10.0', '100.0', '1000.0'] 13 | feature_types = ['base', 'mul', 'mul_s', 'mul_quad', 'landmark'] 14 | MODEL_TYPE='bellman' 15 | 16 | def estimate_performance(results_file): 17 | if type(results_file) is str: 18 | results_file = h5py.File(results_file, 'r') 19 | 20 | num_knots = 0 21 | knot_inds = [] 22 | not_inds = [] 23 | ctr = 0 24 | n_checks = len(results_file) - 1 25 | for (i_task, task_info) in results_file.iteritems(): 26 | sys.stdout.write("\rchecking task {} / {} ".format(ctr, n_checks)) 27 | sys.stdout.flush() 28 | ctr += 1 29 | if str(i_task) == 'args': 30 | continue 31 | # if int(i_task) > 3: 32 | # break 33 | N_steps = len(task_info) 34 | final_cld = task_info[str(N_steps-1)]['next_state']['rope_nodes'][:] 35 | if isKnot(final_cld): 36 | knot_inds.append(int(i_task)) 37 | num_knots += 1 38 | else: 39 | not_inds.append(int(i_task)) 40 | 41 | print 42 | 43 | return num_knots, knot_inds, not_inds 44 | 45 | if __name__ == '__main__': 46 | parser = argparse.ArgumentParser() 47 | parser.add_argument("outfile") 48 | parser.add_argument("--baseline", type=str, default='../data/evals/7_3_0.1_baseline.h5') 49 | args = parser.parse_args() 50 | recompute_results = True 51 | if osp.exists(args.outfile): 52 | user_resp = raw_input("Overwrite results file {}[y/N]".format(args.outfile)) 53 | recompute_results = lower(user_resp) == 'y' 54 | if recompute_results: 55 | outf = h5py.File(args.outfile, 'w') 56 | base_successes, a, b = estimate_performance(args.baseline) 57 | base_rate = base_successes / float(len(a) + len(b)) 58 | outf['base_rate'] = base_rate 59 | for f in feature_types: 60 | for c in C_strs: 61 | results_fname = '../data/evals/jul_6_{}_0.1_c={}_{}.h5'.format(f, c, MODEL_TYPE) 62 | print "checking {}".format(results_fname) 63 | 64 | num_successes, knot_inds, not_inds = estimate_performance(results_fname) 65 | print "Success rate:", num_successes / float(len(knot_inds) + len(not_inds)) 66 | key = str((f, float(c))) 67 | outf[key] = num_successes / float(len(knot_inds) + len(not_inds)) 68 | else: 69 | outf = h5py.File(args.outfile, 'r') 70 | for c in C_strs: 71 | sys.stdout.write('\t\t{}'.format(c)) 72 | print 73 | for f in feature_types: 74 | if f =='mul_quad' or f == 'landmark': 75 | sys.stdout.write('{}\t'.format(f)) 76 | else: 77 | sys.stdout.write('{}\t\t'.format(f)) 78 | for c in C_strs: 79 | sys.stdout.write('{:.2f}'.format(outf[str((f, float(c)))][()])) 80 | sys.stdout.write('\t\t') 81 | print 82 | sys.stdout.write('baseline\t') 83 | for c in C_strs: 84 | sys.stdout.write('{:.2f}'.format(outf['base_rate'][()])) 85 | sys.stdout.write('\t\t') 86 | print 87 | outf.close() 88 | -------------------------------------------------------------------------------- /lfd/mmqe/colorize.py: -------------------------------------------------------------------------------- 1 | """ 2 | add terminal color codes to strings 3 | """ 4 | 5 | color2num = dict( 6 | gray=30, 7 | red=31, 8 | green=32, 9 | yellow=33, 10 | blue=34, 11 | magenta=35, 12 | cyan=36, 13 | white=37, 14 | crimson=38 15 | ) 16 | 17 | 18 | def colorize(string, color, bold=False, highlight = False): 19 | attr = [] 20 | num = color2num[color] 21 | if highlight: num += 10 22 | attr.append(str(num)) 23 | if bold: attr.append('1') 24 | return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string) 25 | -------------------------------------------------------------------------------- /lfd/mmqe/constraints.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions and classes to compute constraints for the optimizations in max_margin.py 3 | """ 4 | 5 | import h5py 6 | 7 | import numpy as np 8 | 9 | 10 | class ConstraintGenerator(object): 11 | """ 12 | base class for computing MM constraints 13 | """ 14 | def __init__(self, feature, margin, actionfile): 15 | self.actionfile = h5py.File(actionfile, 'r') 16 | self.feature = feature 17 | self.margin = margin 18 | self.n_constrs = 0 19 | 20 | def compute_constrs(self, state, exp_a, timestep=-1): 21 | """ 22 | computes a single constraint 23 | None for the expert action indicates this is an unrecoverable state 24 | """ 25 | #ipdb.set_trace() 26 | phi = self.feature.features(state, timestep=timestep) 27 | if exp_a == 'failure': 28 | return -1, phi, -1 29 | feat_i = self.feature.get_ind(exp_a) 30 | f_mask = np.ones(self.feature.N, dtype=np.bool) 31 | f_mask[feat_i] = 0 32 | margin_i = self.margin.get_ind(exp_a) 33 | m_mask = np.ones(self.feature.N, dtype=np.bool) 34 | m_mask[margin_i] = 0 35 | margins = self.margin.get_margins(state, exp_a) 36 | 37 | exp_phi = phi[feat_i] 38 | return exp_phi, phi[f_mask, :], margins[m_mask] 39 | 40 | def store_constrs(self, exp_phi, phi, margins, exp_a, outfile, constr_k=None): 41 | if constr_k is None: 42 | constr_k = str(self.n_constrs) 43 | self.n_constrs += 1 44 | constr_g = outfile.create_group(constr_k) 45 | constr_g['exp_action_name'] = exp_a 46 | constr_g['exp_phi'] = exp_phi 47 | constr_g['rhs_phi'] = phi 48 | constr_g['margin'] = margins 49 | 50 | 51 | class Margin(object): 52 | """ 53 | base clase for computing margins 54 | """ 55 | 56 | def __init__(self, actions): 57 | self.actions = actions 58 | self.N = len(actions) 59 | 60 | def get_margins(self, state, a): 61 | return np.ones(self.N) 62 | 63 | def get_ind(self, s): 64 | raise NotImplementedError 65 | 66 | class BatchCPMargin(Margin): 67 | 68 | def __init__(self, feature): 69 | Margin.__init__(self, feature.src_ctx.seg_names) 70 | self.feature = feature 71 | self.src_ctx = feature.src_ctx 72 | self.tgt_ctx = feature.tgt_ctx 73 | self.name2ind = feature.name2ind 74 | 75 | def get_margins(self, state, a): 76 | """ 77 | returns a margin that is the normalized sum of distances of warped trajectories 78 | to the warped trajectory associated with a 79 | 80 | assumes that self.feature has already been run through batch_tps_rpm_bij 81 | with the appropriate target (i.e. the state this is called with) 82 | """ 83 | return self.src_ctx.traj_cost(a, self.tgt_ctx) 84 | 85 | def get_ind(self, s): 86 | return self.feature.get_ind(s) 87 | 88 | -------------------------------------------------------------------------------- /lfd/mmqe/search.py: -------------------------------------------------------------------------------- 1 | """ 2 | classes and methods to do a tree search to find the best option 3 | """ 4 | 5 | import numpy as np 6 | 7 | class SearchNode(object): 8 | 9 | id_map = {} 10 | action2ind = None 11 | ind2action = None 12 | _id = 0 13 | 14 | def __init__(self, ID, state, child_vals, child_ids=None, parent = None): 15 | self.ID = ID 16 | self.state = state 17 | self.child_vals = child_vals 18 | self.value = None 19 | SearchNode.id_map[ID] = self 20 | self.n_expands = 0 21 | self.child_expands = np.zeros(child_vals.shape[0]) 22 | 23 | if child_ids is None: 24 | self.child_ids = [SearchNode.get_UID() for _ in child_vals] 25 | else: 26 | self.child_ids = child_ids 27 | 28 | self.child_id2ind = dict([(C_ID, i) for i, C_ID in enumerate(self.child_ids)]) 29 | 30 | 31 | if parent is None: 32 | self.parent = self 33 | else: 34 | self.parent = parent 35 | 36 | def best_k(self, k): 37 | """ 38 | returns the best k children for this node. 39 | returns a list of tuples (v, child_a, node_id) 40 | """ 41 | best_k = sorted(zip(self.child_vals, SearchNode.ind2action), key=lambda x: -x[0])[:k] 42 | return [(v, a, self.ID) for (v, a) in best_k] 43 | 44 | def update(self, v, c_id): 45 | """ 46 | updates this node's value based on the updated value for the child c_id 47 | """ 48 | c_ind = self.child_id2ind[c_id] 49 | self.child_vals[c_ind] = v 50 | self.child_expands[c_ind] += 1 51 | 52 | 53 | @staticmethod 54 | def set_actions(ind2action): 55 | SearchNode.ind2action = ind2action 56 | SearchNode.action2ind = dict([(a, i) for i, a in enumerate(ind2action)]) 57 | 58 | @staticmethod 59 | def get_UID(): 60 | uid = SearchNode._id 61 | SearchNode._id += 1 62 | return uid 63 | 64 | class ExpandingNode(SearchNode): 65 | """ 66 | placeholder to cover nodes that are currently being expanded 67 | """ 68 | def __init__(self, ID, parent): 69 | self.ID = ID 70 | self.parent = parent 71 | SearchNode.id_map[ID] = self 72 | 73 | class MaxNode(SearchNode): 74 | def __init__(self, ID, state, child_vals, child_ids=None, parent = None): 75 | SearchNode.__init__(self, ID, state, child_vals, child_ids=child_ids, parent=parent) 76 | self.value = np.max(self.child_vals) 77 | 78 | def update(self, v, c_id): 79 | SearchNode.update(self, v, c_id) 80 | expanded_inds = self.child_expands == np.max(self.child_expands) 81 | old_value = self.value 82 | self.value = np.max(self.child_vals[expanded_inds]) 83 | if self.parent != self: 84 | self.parent.update(self.value, self.ID) 85 | 86 | def select_best(self): 87 | expanded_inds = self.child_expands == np.max(self.child_expands) 88 | shifted_vals = self.child_vals + np.max(self.child_vals) * (expanded_inds - 1) 89 | return [SearchNode.ind2action[np.argmax(shifted_vals)]], [np.max(self.child_vals)] 90 | 91 | # env is for resetting the state at each step 92 | def beam_search(start_state, timestep, actions, expander, evaluator, sim, width=1, depth=1): 93 | id2simstate = {} 94 | SearchNode.set_actions(actions) 95 | root_id = SearchNode.get_UID() 96 | id2simstate[root_id] = sim.get_state() 97 | root_vals = evaluator(start_state, timestep) 98 | root = MaxNode(root_id, start_state, root_vals) 99 | agenda = [root] 100 | goal_found = False 101 | for d in range(depth): 102 | next_states = [] 103 | for s in agenda: 104 | if len(agenda) >= 3: 105 | next_states.extend(s.best_k(width/2))#only expand at most half from the same child 106 | else: 107 | next_states.extend(s.best_k(width))#unless there are only a couple options 108 | agenda = sorted(next_states, key=lambda x:-x[0])[:width] 109 | expand_res = [] 110 | for v, a, P_ID in agenda: 111 | print 'parent ID:\t{}'.format(P_ID) 112 | parent_node = SearchNode.id_map[P_ID] 113 | parent_state = parent_node.state 114 | child_id = parent_node.child_ids[SearchNode.action2ind[a]] 115 | sim.set_state(id2simstate[P_ID]) 116 | expand_res.append(expander(parent_state, a, child_id)) 117 | id2simstate[child_id] = sim.get_state() 118 | child_node = ExpandingNode(child_id, parent_node) 119 | agenda = [] 120 | for res in expand_res: 121 | next_s, next_s_id, is_goal = res 122 | parent = SearchNode.id_map[next_s_id].parent 123 | del SearchNode.id_map[next_s_id] 124 | 125 | if is_goal: 126 | goal_found = True 127 | parent.update(np.inf, next_s_id) 128 | break 129 | #elif not res.feasible or res.misgrasp: 130 | # parent.update(-np.inf, next_s_id) 131 | # continue 132 | child_vals = evaluator(next_s, timestep+d+1) 133 | child_node = MaxNode(next_s_id, next_s, child_vals, parent=parent) 134 | parent.update(child_node.value, next_s_id) 135 | agenda.append(child_node) 136 | if goal_found: 137 | break 138 | # Reset back to the original state before returning 139 | sim.set_state(id2simstate[root_id]) 140 | return root.select_best(), goal_found 141 | -------------------------------------------------------------------------------- /lfd/rapprentice/#clouds.py#: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | cx = 320.-.5 4 | cy = 240.-.5 5 | DEFAULT_F = 535. 6 | 7 | def xyZ_to_XY(x,y,Z,f=DEFAULT_F): 8 | X = (x - cx)*(Z/f) 9 | Y = (y - cy)*(Z/f) 10 | return (X,Y) 11 | 12 | def XYZ_to_xy(X,Y,Z,f=DEFAULT_F): 13 | x = X*(f/Z) + cx 14 | y = Y*(f/Z) + cy 15 | return (x,y) 16 | 17 | def depth_to_xyz(depth,f=DEFAULT_F): 18 | x,y = np.meshgrid(np.arange(640), np.arange(480)) 19 | assert depth.shape == (480, 640) 20 | XYZ = np.empty((480,640,3)) 21 | Z = XYZ[:,:,2] = depth / 1000. # convert mm -> meters 22 | XYZ[:,:,0] = (x - cx)*(Z/f) 23 | XYZ[:,:,1] = (y - cy)*(Z/f) 24 | 25 | return XYZ 26 | 27 | def downsample(xyz, v): 28 | import cloudprocpy 29 | cloud = cloudprocpy.CloudXYZ() 30 | xyz1 = np.ones((len(xyz),4),'float') 31 | xyz1[:,:3] = xyz 32 | cloud.from2dArray(xyz1) 33 | cloud = cloudprocpy.downsampleCloud(cloud, v) 34 | return cloud.to2dArray()[:,:3] 35 | 36 | -------------------------------------------------------------------------------- /lfd/rapprentice/#ropesim.py#: -------------------------------------------------------------------------------- 1 | import bulletsimpy 2 | import numpy as np 3 | from rapprentice import math_utils, retiming 4 | import trajoptpy 5 | 6 | def transform(hmat, p): 7 | return hmat[:3,:3].dot(p) + hmat[:3,3] 8 | 9 | def in_grasp_region(robot, lr, pt): 10 | tol = .00 11 | 12 | manip_name = {"l": "leftarm", "r": "rightarm"}[lr] 13 | manip = robot.GetManipulator(manip_name) 14 | l_finger = robot.GetLink("%s_gripper_l_finger_tip_link"%lr) 15 | r_finger = robot.GetLink("%s_gripper_r_finger_tip_link"%lr) 16 | 17 | def on_inner_side(pt, finger_lr): 18 | finger = l_finger 19 | closing_dir = np.cross(manip.GetLocalToolDirection(), [-1, 0, 0]) 20 | local_inner_pt = np.array([0.234402, -0.299, 0])/20. 21 | if finger_lr == "r": 22 | finger = r_finger 23 | closing_dir *= -1 24 | local_inner_pt[1] *= -1 25 | inner_pt = transform(finger.GetTransform(), local_inner_pt) 26 | return manip.GetTransform()[:3,:3].dot(closing_dir).dot(pt - inner_pt) > 0 27 | 28 | # check that pt is behind the gripper tip 29 | pt_local = transform(np.linalg.inv(manip.GetTransform()), pt) 30 | if pt_local[2] > .03 + tol: 31 | return False 32 | 33 | # check that pt is within the finger width 34 | if abs(pt_local[0]) > .01 + tol: 35 | return False 36 | 37 | # check that pt is between the fingers 38 | if not on_inner_side(pt, "l") or not on_inner_side(pt, "r"): 39 | return False 40 | 41 | return True 42 | 43 | def retime_traj(robot, inds, traj, max_cart_vel=.02, upsample_time=.1): 44 | """retime a trajectory so that it executes slowly enough for the simulation""" 45 | cart_traj = np.empty((len(traj), 6)) 46 | leftarm, rightarm = robot.GetManipulator("leftarm"), robot.GetManipulator("rightarm") 47 | with robot: 48 | for i in range(len(traj)): 49 | robot.SetDOFValues(traj[i], inds) 50 | cart_traj[i,:3] = leftarm.GetTransform()[:3,3] 51 | cart_traj[i,3:] = rightarm.GetTransform()[:3,3] 52 | 53 | times = retiming.retime_with_vel_limits(cart_traj, np.repeat(max_cart_vel, 6)) 54 | times_up = np.linspace(0, times[-1], times[-1]/upsample_time) if times[-1] > upsample_time else times 55 | traj_up = math_utils.interp2d(times_up, times, traj) 56 | return traj_up 57 | 58 | 59 | class Simulation(object): 60 | def __init__(self, env, robot): 61 | self.env = env 62 | self.robot = robot 63 | self.bt_env = None 64 | self.bt_robot = None 65 | self.rope = None 66 | self.constraints = {"l": [], "r": []} 67 | 68 | self.rope_params = bulletsimpy.CapsuleRopeParams() 69 | self.rope_params.radius = 0.005 70 | self.rope_params.angStiffness = .1 71 | self.rope_params.angDamping = 1 72 | self.rope_params.linDamping = .75 73 | self.rope_params.angLimit = .4 74 | self.rope_params.linStopErp = .2 75 | 76 | def create(self, rope_pts): 77 | self.bt_env = bulletsimpy.BulletEnvironment(self.env, []) 78 | self.bt_env.SetGravity([0, 0, -9.8]) 79 | self.bt_robot = self.bt_env.GetObjectByName(self.robot.GetName()) 80 | self.rope = bulletsimpy.CapsuleRope(self.bt_env, 'rope', rope_pts, self.rope_params) 81 | 82 | # self.rope.UpdateRave() 83 | # self.env.UpdatePublishedBodies() 84 | # trajoptpy.GetViewer(self.env).Idle() 85 | 86 | self.settle() 87 | 88 | def step(self): 89 | self.bt_robot.UpdateBullet() 90 | self.bt_env.Step(.01, 200, .005) 91 | self.rope.UpdateRave() 92 | self.env.UpdatePublishedBodies() 93 | 94 | def settle(self, max_steps=100, tol=.001, animate=False): 95 | """Keep stepping until the rope doesn't move, up to some tolerance""" 96 | prev_nodes = self.rope.GetNodes() 97 | for i in range(max_steps): 98 | self.bt_env.Step(.01, 200, .005) 99 | if animate: 100 | self.rope.UpdateRave() 101 | self.env.UpdatePublishedBodies() 102 | if i % 10 == 0 and i != 0: 103 | curr_nodes = self.rope.GetNodes() 104 | diff = np.sqrt(((curr_nodes - prev_nodes)**2).sum(axis=1)) 105 | if diff.max() < tol: 106 | break 107 | prev_nodes = curr_nodes 108 | self.rope.UpdateRave() 109 | self.env.UpdatePublishedBodies() 110 | print "settled in %d iterations" % (i+1) 111 | 112 | def observe_cloud(self, upsample=0): 113 | pts = self.rope.GetControlPoints() 114 | if upsample == 0: 115 | return pts 116 | lengths = np.r_[0, self.rope.GetHalfHeights() * 2] 117 | summed_lengths = np.cumsum(lengths) 118 | assert len(lengths) == len(pts) 119 | return math_utils.interp2d(np.linspace(0, summed_lengths[-1], upsample), summed_lengths, pts) 120 | 121 | def grab_rope(self, lr): 122 | nodes, ctl_pts = self.rope.GetNodes(), self.rope.GetControlPoints() 123 | 124 | graspable_nodes = np.array([in_grasp_region(self.robot, lr, n) for n in nodes]) 125 | graspable_ctl_pts = np.array([in_grasp_region(self.robot, lr, n) for n in ctl_pts]) 126 | graspable_inds = np.flatnonzero(np.logical_or(graspable_nodes, np.logical_or(graspable_ctl_pts[:-1], graspable_ctl_pts[1:]))) 127 | print 'graspable inds for %s: %s' % (lr, str(graspable_inds)) 128 | if len(graspable_inds) == 0: 129 | return False 130 | 131 | robot_link = self.robot.GetLink("%s_gripper_l_finger_tip_link"%lr) 132 | rope_links = self.rope.GetKinBody().GetLinks() 133 | for i_node in graspable_inds: 134 | for i_cnt in range(max(0, i_node-1), min(len(nodes), i_node+2)): 135 | cnt = self.bt_env.AddConstraint({ 136 | "type": "generic6dof", 137 | "params": { 138 | "link_a": robot_link, 139 | "link_b": rope_links[i_cnt], 140 | "frame_in_a": np.linalg.inv(robot_link.GetTransform()).dot(rope_links[i_cnt].GetTransform()), 141 | "frame_in_b": np.eye(4), 142 | "use_linear_reference_frame_a": False, 143 | "stop_erp": .8, 144 | "stop_cfm": .1, 145 | "disable_collision_between_linked_bodies": True, 146 | } 147 | }) 148 | self.constraints[lr].append(cnt) 149 | 150 | return True 151 | 152 | def release_rope(self, lr): 153 | print 'RELEASE: %s (%d constraints)' % (lr, len(self.constraints[lr])) 154 | for c in self.constraints[lr]: 155 | self.bt_env.RemoveConstraint(c) 156 | self.constraints[lr] = [] 157 | -------------------------------------------------------------------------------- /lfd/rapprentice/__init__.py: -------------------------------------------------------------------------------- 1 | import logging as _logging 2 | 3 | LOG = _logging.getLogger("rapprentice") 4 | LOG.setLevel(_logging.DEBUG) 5 | 6 | _ch = _logging.StreamHandler() 7 | _ch.setLevel(_logging.DEBUG) 8 | _formatter = _logging.Formatter('%(levelname)s - %(message)s') 9 | _ch.setFormatter(_formatter) 10 | LOG.addHandler(_ch) 11 | 12 | -------------------------------------------------------------------------------- /lfd/rapprentice/animate_traj.py: -------------------------------------------------------------------------------- 1 | import trajoptpy, openravepy 2 | import sys 3 | 4 | def animate_traj(traj, robot, pause=True, step_viewer=1, restore=True, callback=None, execute_step_cond=None): 5 | """make sure to set active DOFs beforehand""" 6 | if restore: _saver = openravepy.RobotStateSaver(robot) 7 | if step_viewer or pause: viewer = trajoptpy.GetViewer(robot.GetEnv()) 8 | for (i,dofs) in enumerate(traj): 9 | sys.stdout.write("step %i/%i\r"%(i+1,len(traj))) 10 | sys.stdout.flush() 11 | if callback is not None: callback(i) 12 | if execute_step_cond is not None and not execute_step_cond(i): continue 13 | robot.SetActiveDOFValues(dofs) 14 | if pause: viewer.Idle() 15 | elif step_viewer!=0 and (i%step_viewer==0 or i==len(traj)-1): viewer.Step() 16 | sys.stdout.write("\n") 17 | -------------------------------------------------------------------------------- /lfd/rapprentice/bag_proc.py: -------------------------------------------------------------------------------- 1 | from lfd.rapprentice import ros2rave, func_utils, berkeley_pr2 2 | import fastrapp 3 | import numpy as np 4 | import cv2 5 | import openravepy 6 | import os.path as osp 7 | 8 | def extract_joints(bag): 9 | """returns (names, traj) 10 | """ 11 | traj = [] 12 | stamps = [] 13 | for (_, msg, _) in bag.read_messages(topics=['/joint_states']): 14 | traj.append(msg.position) 15 | stamps.append(msg.header.stamp.to_sec()) 16 | assert len(traj) > 0 17 | names = msg.name 18 | return names, stamps, traj 19 | 20 | def extract_joy(bag): 21 | """sounds morbid 22 | """ 23 | 24 | stamps = [] 25 | meanings = [] 26 | button2meaning = { 27 | 12: "look", 28 | 0: "start", 29 | 3: "stop", 30 | 7: "l_open", 31 | 5: "l_close", 32 | 15: "r_open", 33 | 13: "r_close", 34 | 14: "done" 35 | } 36 | check_buttons = button2meaning.keys() 37 | message_stream = bag.read_messages(topics=['/joy']) 38 | (_,last_msg,_) = message_stream.next() 39 | for (_, msg, _) in message_stream: 40 | for i in check_buttons: 41 | if msg.buttons[i] and not last_msg.buttons[i]: 42 | stamps.append(msg.header.stamp.to_sec()) 43 | meanings.append(button2meaning[i]) 44 | last_msg = msg 45 | 46 | return stamps, meanings 47 | 48 | 49 | def find_disjoint_subsequences(li, seq): 50 | """ 51 | Returns a list of tuples (i,j,k,...) so that seq == (li[i], li[j], li[k],...) 52 | Greedily find first tuple, then second, etc. 53 | """ 54 | subseqs = [] 55 | cur_subseq_inds = [] 56 | for (i_el, el) in enumerate(li): 57 | if el == seq[len(cur_subseq_inds)]: 58 | cur_subseq_inds.append(i_el) 59 | if len(cur_subseq_inds) == len(seq): 60 | subseqs.append(cur_subseq_inds) 61 | cur_subseq_inds = [] 62 | return subseqs 63 | 64 | def joy_to_annotations(stamps, meanings): 65 | """return a list of dicts giving info for each segment 66 | [{"look": 1234, "start": 2345, "stop": 3456},...] 67 | """ 68 | out = [] 69 | ind_tuples = find_disjoint_subsequences(meanings, ["look","start","stop"]) 70 | for tup in ind_tuples: 71 | out.append({"look":stamps[tup[0]], "start":stamps[tup[1]], "stop":stamps[tup[2]]}) 72 | return out 73 | 74 | def add_kinematics_to_group(group, linknames, manipnames, jointnames, robot): 75 | "do forward kinematics on those links" 76 | if robot is None: robot = get_robot() 77 | r2r = ros2rave.RosToRave(robot, group["joint_states"]["name"]) 78 | link2hmats = dict([(linkname, []) for linkname in linknames]) 79 | links = [robot.GetLink(linkname) for linkname in linknames] 80 | rave_traj = [] 81 | rave_inds = r2r.rave_inds 82 | for ros_vals in group["joint_states"]["position"]: 83 | r2r.set_values(robot, ros_vals) 84 | rave_vals = r2r.convert(ros_vals) 85 | robot.SetDOFValues(rave_vals, rave_inds) 86 | rave_traj.append(rave_vals) 87 | for (linkname,link) in zip(linknames, links): 88 | link2hmats[linkname].append(link.GetTransform()) 89 | for (linkname, hmats) in link2hmats.items(): 90 | group.create_group(linkname) 91 | group[linkname]["hmat"] = np.array(hmats) 92 | 93 | rave_traj = np.array(rave_traj) 94 | rave_ind_list = list(rave_inds) 95 | for manipname in manipnames: 96 | arm_inds = robot.GetManipulator(manipname).GetArmIndices() 97 | group[manipname] = rave_traj[:,[rave_ind_list.index(i) for i in arm_inds]] 98 | 99 | for jointname in jointnames: 100 | joint_ind = robot.GetJointIndex(jointname) 101 | group[jointname] = rave_traj[:,rave_ind_list.index(joint_ind)] 102 | 103 | 104 | 105 | 106 | @func_utils.once 107 | def get_robot(): 108 | env = openravepy.Environment() 109 | env.Load("robots/pr2-beta-static.zae") 110 | robot = env.GetRobots()[0] 111 | return robot 112 | 113 | def add_bag_to_hdf(bag, annotations, hdfroot, demo_name): 114 | joint_names, stamps, traj = extract_joints(bag) 115 | traj = np.asarray(traj) 116 | stamps = np.asarray(stamps) 117 | 118 | robot = get_robot() 119 | 120 | for seg_info in annotations: 121 | 122 | 123 | group = hdfroot.create_group(demo_name + "_" + seg_info["name"]) 124 | 125 | start = seg_info["start"] 126 | stop = seg_info["stop"] 127 | 128 | [i_start, i_stop] = np.searchsorted(stamps, [start, stop]) 129 | 130 | stamps_seg = stamps[i_start:i_stop+1] 131 | traj_seg = traj[i_start:i_stop+1] 132 | sample_inds = fastrapp.resample(traj_seg, np.arange(len(traj_seg)), .01, np.inf, np.inf) 133 | print "trajectory has length", len(sample_inds),len(traj_seg) 134 | 135 | 136 | traj_ds = traj_seg[sample_inds,:] 137 | stamps_ds = stamps_seg[sample_inds] 138 | 139 | group["description"] = seg_info["description"] 140 | group["stamps"] = stamps_ds 141 | group.create_group("joint_states") 142 | group["joint_states"]["name"] = joint_names 143 | group["joint_states"]["position"] = traj_ds 144 | link_names = ["l_gripper_tool_frame","r_gripper_tool_frame","l_gripper_r_finger_tip_link","l_gripper_l_finger_tip_frame","r_gripper_r_finger_tip_link","r_gripper_l_finger_tip_frame"] 145 | special_joint_names = ["l_gripper_joint", "r_gripper_joint"] 146 | manip_names = ["leftarm", "rightarm"] 147 | 148 | add_kinematics_to_group(group, link_names, manip_names, special_joint_names, robot) 149 | 150 | def get_video_frames(video_dir, frame_stamps): 151 | video_stamps = np.loadtxt(osp.join(video_dir,"stamps.txt")) 152 | frame_inds = np.searchsorted(video_stamps, frame_stamps) 153 | 154 | rgbs = [] 155 | depths = [] 156 | for frame_ind in frame_inds: 157 | rgb = cv2.imread(osp.join(video_dir,"rgb%i.jpg"%frame_ind)) 158 | assert rgb is not None 159 | rgbs.append(rgb) 160 | depth = cv2.imread(osp.join(video_dir,"depth%i.png"%frame_ind),2) 161 | assert depth is not None 162 | depths.append(depth) 163 | return rgbs, depths 164 | 165 | 166 | def add_rgbd_to_hdf(video_dir, annotations, hdfroot, demo_name): 167 | 168 | frame_stamps = [seg_info["look"] for seg_info in annotations] 169 | 170 | rgb_imgs, depth_imgs = get_video_frames(video_dir, frame_stamps) 171 | 172 | for (i_seg, seg_info) in enumerate(annotations): 173 | group = hdfroot[demo_name + "_" + seg_info["name"]] 174 | group["rgb"] = rgb_imgs[i_seg] 175 | group["depth"] = depth_imgs[i_seg] 176 | robot = get_robot() 177 | r2r = ros2rave.RosToRave(robot, group["joint_states"]["name"]) 178 | r2r.set_values(robot, group["joint_states"]["position"][0]) 179 | T_w_h = robot.GetLink("head_plate_frame").GetTransform() 180 | T_w_k = T_w_h.dot(berkeley_pr2.T_h_k) 181 | group["T_w_k"] = T_w_k 182 | 183 | -------------------------------------------------------------------------------- /lfd/rapprentice/berkeley_pr2.py: -------------------------------------------------------------------------------- 1 | """ 2 | put Berkeley-specific parameters here 3 | """ 4 | 5 | 6 | 7 | import numpy as np 8 | 9 | 10 | 11 | """ 12 | Berkeley PR2's kinect transform 13 | Here's what's in the URDF: 14 | 15 | 16 | 17 | 18 | 19 | 20 | """ 21 | 22 | #T_b_o = np.array([[0,0,1,0],[-1,0,0,0],[0,-1,0,0],[0,0,0,1]]) 23 | #T_b_o = np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]]) 24 | 25 | T_h_k = np.array([[-0.02102462, -0.03347223, 0.99921848, -0.186996 ], 26 | [-0.99974787, -0.00717795, -0.02127621, 0.04361884], 27 | [ 0.0078845, -0.99941387, -0.03331288, 0.22145804], 28 | [ 0., 0., 0., 1. ]]) 29 | 30 | f = 544.260779961 31 | 32 | def get_kinect_transform(robot): 33 | T_w_h = robot.GetLink("head_plate_frame").GetTransform() 34 | T_w_k = T_w_h.dot(T_h_k) 35 | return T_w_k 36 | 37 | -------------------------------------------------------------------------------- /lfd/rapprentice/cloud_proc_funcs.py: -------------------------------------------------------------------------------- 1 | import cloudprocpy 2 | from lfd.rapprentice import berkeley_pr2, clouds 3 | import cv2, numpy as np 4 | 5 | DEBUG_PLOTS = False 6 | 7 | 8 | def extract_red(rgb, depth, T_w_k): 9 | """ 10 | extract red points and downsample 11 | """ 12 | 13 | hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV) 14 | h = hsv[:,:,0] 15 | s = hsv[:,:,1] 16 | v = hsv[:,:,2] 17 | 18 | red_mask = ((h<10) | (h>150)) & (s > 100) & (v > 100) 19 | 20 | valid_mask = depth > 0 21 | 22 | xyz_k = clouds.depth_to_xyz(depth, berkeley_pr2.f) 23 | xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:] 24 | 25 | z = xyz_w[:,:,2] 26 | z0 = xyz_k[:,:,2] 27 | if DEBUG_PLOTS: 28 | cv2.imshow("z0",z0/z0.max()) 29 | cv2.imshow("z",z/z.max()) 30 | cv2.imshow("rgb", rgb) 31 | cv2.waitKey() 32 | 33 | height_mask = xyz_w[:,:,2] > .7 # TODO pass in parameter 34 | 35 | 36 | good_mask = red_mask & height_mask & valid_mask 37 | if DEBUG_PLOTS: 38 | cv2.imshow("red",red_mask.astype('uint8')*255) 39 | cv2.imshow("above_table", height_mask.astype('uint8')*255) 40 | cv2.imshow("red and above table", good_mask.astype('uint8')*255) 41 | print "press enter to continue" 42 | cv2.waitKey() 43 | 44 | 45 | 46 | 47 | good_xyz = xyz_w[good_mask] 48 | 49 | 50 | return clouds.downsample(good_xyz, .01) 51 | 52 | 53 | def grabcut(rgb, depth, T_w_k): 54 | xyz_k = clouds.depth_to_xyz(depth, berkeley_pr2.f) 55 | xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:] 56 | 57 | valid_mask = depth > 0 58 | 59 | import interactive_roi as ir 60 | xys = ir.get_polyline(rgb, "rgb") 61 | xy_corner1 = np.clip(np.array(xys).min(axis=0), [0,0], [639,479]) 62 | xy_corner2 = np.clip(np.array(xys).max(axis=0), [0,0], [639,479]) 63 | polymask = ir.mask_from_poly(xys) 64 | #cv2.imshow("mask",mask) 65 | 66 | xy_tl = np.array([xy_corner1, xy_corner2]).min(axis=0) 67 | xy_br = np.array([xy_corner1, xy_corner2]).max(axis=0) 68 | 69 | xl, yl = xy_tl 70 | w, h = xy_br - xy_tl 71 | mask = np.zeros((h,w),dtype='uint8') 72 | mask[polymask[yl:yl+h, xl:xl+w] > 0] = cv2.GC_PR_FGD 73 | print mask.shape 74 | #mask[h//4:3*h//4, w//4:3*w//4] = cv2.GC_PR_FGD 75 | 76 | tmp1 = np.zeros((1, 13 * 5)) 77 | tmp2 = np.zeros((1, 13 * 5)) 78 | cv2.grabCut(rgb[yl:yl+h, xl:xl+w, :],mask,(0,0,0,0),tmp1, tmp2,10,mode=cv2.GC_INIT_WITH_MASK) 79 | 80 | mask = mask % 2 81 | #mask = ndi.binary_erosion(mask, utils_images.disk(args.erode)).astype('uint8') 82 | contours = cv2.findContours(mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)[0] 83 | cv2.drawContours(rgb[yl:yl+h, xl:xl+w, :],contours,-1,(0,255,0),thickness=2) 84 | 85 | cv2.imshow('rgb', rgb) 86 | print "press enter to continue" 87 | cv2.waitKey() 88 | 89 | zsel = xyz_w[yl:yl+h, xl:xl+w, 2] 90 | mask = (mask%2==1) & np.isfinite(zsel)# & (zsel - table_height > -1) 91 | mask &= valid_mask[yl:yl+h, xl:xl+w] 92 | 93 | xyz_sel = xyz_w[yl:yl+h, xl:xl+w,:][mask.astype('bool')] 94 | return clouds.downsample(xyz_sel, .01) 95 | #rgb_sel = rgb[yl:yl+h, xl:xl+w,:][mask.astype('bool')] 96 | 97 | 98 | 99 | def extract_red_alphashape(cloud, robot): 100 | """ 101 | extract red, get alpha shape, downsample 102 | """ 103 | raise NotImplementedError 104 | 105 | # downsample cloud 106 | cloud_ds = cloudprocpy.downsampleCloud(cloud, .01) 107 | 108 | # transform into body frame 109 | xyz1_kinect = cloud_ds.to2dArray() 110 | xyz1_kinect[:,3] = 1 111 | T_w_k = berkeley_pr2.get_kinect_transform(robot) 112 | xyz1_robot = xyz1_kinect.dot(T_w_k.T) 113 | 114 | # compute 2D alpha shape 115 | xyz1_robot_flat = xyz1_robot.copy() 116 | xyz1_robot_flat[:,2] = 0 # set z coordinates to zero 117 | xyz1_robot_flatalphashape = cloudprocpy.computeAlphaShape(xyz1_robot_flat) 118 | 119 | # unfortunately pcl alpha shape func throws out the indices, so we have to use nearest neighbor search 120 | cloud_robot_flatalphashape = cloudprocpy.CloudXYZ() 121 | cloud_robot_flatalphashape.from2dArray(xyz1_robot_flatalphashape) 122 | cloud_robot_flat = cloudprocpy.CloudXYZ() 123 | cloud_robot_flat.from2dArray(xyz1_robot_flat) 124 | alpha_inds = cloudprocpy.getNearestNeighborIndices(xyz1_robot_flatalphashape, xyz1_robot_flat) 125 | 126 | xyz_robot_alphashape = xyz1_robot_flatalphashape[:,:3] 127 | 128 | # put back z coordinate 129 | xyz_robot_alphashape[:,2] = xyz1_robot[alpha_inds,2] 130 | 131 | return xyz_robot_alphashape[:,:3] 132 | -------------------------------------------------------------------------------- /lfd/rapprentice/clouds.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | cx = 320.-.5 4 | cy = 240.-.5 5 | DEFAULT_F = 535. 6 | 7 | def xyZ_to_XY(x,y,Z,f=DEFAULT_F): 8 | X = (x - cx)*(Z/f) 9 | Y = (y - cy)*(Z/f) 10 | return (X,Y) 11 | 12 | def XYZ_to_xy(X,Y,Z,f=DEFAULT_F): 13 | x = X*(f/Z) + cx 14 | y = Y*(f/Z) + cy 15 | return (x,y) 16 | 17 | def depth_to_xyz(depth,f=DEFAULT_F): 18 | x,y = np.meshgrid(np.arange(640), np.arange(480)) 19 | assert depth.shape == (480, 640) 20 | XYZ = np.empty((480,640,3)) 21 | Z = XYZ[:,:,2] = depth / 1000. # convert mm -> meters 22 | XYZ[:,:,0] = (x - cx)*(Z/f) 23 | XYZ[:,:,1] = (y - cy)*(Z/f) 24 | 25 | return XYZ 26 | 27 | def downsample(xyz, v): 28 | import cloudprocpy 29 | if xyz.shape[1] == 3: 30 | cloud = cloudprocpy.CloudXYZ() 31 | xyz1 = np.ones((len(xyz),4),'float') 32 | xyz1[:,:3] = xyz 33 | cloud.from2dArray(xyz1) 34 | cloud = cloudprocpy.downsampleCloud(cloud, v) 35 | return cloud.to2dArray()[:,:3] 36 | else: 37 | # rgb fields needs to be packed and upacked as described in here 38 | # http://docs.pointclouds.org/1.7.0/structpcl_1_1_point_x_y_z_r_g_b.html 39 | 40 | xyzrgb = xyz 41 | n = xyzrgb.shape[0] 42 | cloud = cloudprocpy.CloudXYZRGB() 43 | xyzrgb1 = np.ones((n,8),'float') 44 | xyzrgb1[:,:3] = xyzrgb[:,:3] 45 | xyzrgb1[:,4] = cloudprocpy.packRGBs(xyzrgb[:,3:] * 255.0) 46 | xyzrgb1[:,5:] = np.zeros((n,3)) # padding that is not used. set to zero just in case 47 | cloud.from2dArray(xyzrgb1) 48 | cloud = cloudprocpy.downsampleColorCloud(cloud, v) 49 | xyzrgb1 = cloud.to2dArray() 50 | return np.c_[xyzrgb1[:,:3], cloudprocpy.unpackRGBs(xyzrgb1[:,4]) / 255.0] 51 | -------------------------------------------------------------------------------- /lfd/rapprentice/compute_decomps.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy as np 3 | import scipy.linalg 4 | import argparse 5 | 6 | from lfd.rapprentice.tps import tps_kernel_matrix 7 | 8 | def parse_arguments(): 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('datafile', type=str) 11 | parser.add_argument('--bend_coeff_init', type=float, default=10) 12 | parser.add_argument('--bend_coeff_final', type=float, default=.1) 13 | parser.add_argument('--n_iter', type=int, default=20) 14 | parser.add_argument('--verbose', action='store_true') 15 | return parser.parse_args() 16 | 17 | def get_lu_decomp(x_na, bend_coef, rot_coef): 18 | """ 19 | precomputes the LU decomposition and other intermediate results needed 20 | to fit a TPS to x_na with bend_coeff 21 | 22 | all thats needed is to compute the righthand side and do a forward solve 23 | """ 24 | 25 | n,d = x_na.shape 26 | Q = np.c_[np.ones((n,1)), x_na, K_nn] 27 | QWQ = Q.T.dot(WQ) 28 | H = Q.T.dot(Q) 29 | H[d+1:,d+1:] += bend_coef * K_nn 30 | rot_coefs = np.ones(d) * rot_coef if np.isscalar(rot_coef) else rot_coef 31 | H[1:d+1, 1:d+1] += np.diag(rot_coefs) 32 | 33 | A = np.r_[np.zeros((d+1,d+1)), np.c_[np.ones((n,1)), x_na]].T 34 | 35 | # f = -WQ.T.dot(y_ng) 36 | # f[1:d+1,0:d] -= np.diag(rot_coefs) 37 | 38 | _u,_s,_vh = np.linalg.svd(A.T) 39 | N = _u[:,n_cnts:] 40 | 41 | p, l, u, = scipy.linalg.lu(N.T.dot(H.dot(N))) 42 | 43 | # z = np.linalg.solve(N.T.dot(H.dot(N)), -N.T.dot(f)) 44 | # x = N.dot(z) 45 | 46 | res_dict = {'p' : p, 'l' : l, 'u' : u, 'N' : N, 'rot_coeffs' : rot_coeffs} 47 | 48 | return bend_coeff, res_dict 49 | 50 | 51 | def main(): 52 | args = parse_arguments() 53 | 54 | f = h5py.File(args.datafile, 'r') 55 | 56 | bend_coefs = np.loglinspace(args.bend_coef_init, args.bend_coeff_final, args.n_iter) 57 | 58 | for seg_name, seg_info in f.iteritems(): 59 | if 'LU' not in seg_info: 60 | lu_group = seg_info.create_group('LU') 61 | else: 62 | lu_group = seg_info['LU'] 63 | x_na = seg_info['cloud_xyz'][:] 64 | for bend_coeff in bend_coeffs: 65 | if str(bend_coeff) in lu_group: 66 | continue 67 | 68 | bend_coeff_g = lu_group.create_group(str(bend_coeff)) 69 | _, res = get_lu_decomp(x_na, bend_coeff) 70 | for k, v in res.iteritems(): 71 | bend_coeff_g[k] = v 72 | 73 | if args.verbose: 74 | print 'segment {} bend_coeff {}'.format(seg_name, bend_coeff) 75 | 76 | f.close() 77 | 78 | if __name__=='__main__': 79 | main() 80 | 81 | -------------------------------------------------------------------------------- /lfd/rapprentice/conversions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from lfd.rapprentice import transformations 3 | try: 4 | import geometry_msgs.msg as gm 5 | import rospy 6 | except ImportError: 7 | print "couldn't import ros stuff" 8 | 9 | def pose_to_trans_rot(pose): 10 | return (pose.position.x, pose.position.y, pose.position.z),\ 11 | (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w) 12 | 13 | 14 | def hmat_to_pose(hmat): 15 | trans,rot = hmat_to_trans_rot(hmat) 16 | return trans_rot_to_pose(trans,rot) 17 | 18 | def pose_to_hmat(pose): 19 | trans,rot = pose_to_trans_rot(pose) 20 | hmat = trans_rot_to_hmat(trans,rot) 21 | return hmat 22 | 23 | def hmat_to_trans_rot(hmat): 24 | ''' 25 | Converts a 4x4 homogenous rigid transformation matrix to a translation and a 26 | quaternion rotation. 27 | ''' 28 | _scale, _shear, angles, trans, _persp = transformations.decompose_matrix(hmat) 29 | rot = transformations.quaternion_from_euler(*angles) 30 | return trans, rot 31 | 32 | # converts a list of hmats separate lists of translations and orientations 33 | def hmats_to_transs_rots(hmats): 34 | transs, rots = [], [] 35 | for hmat in hmats: 36 | trans, rot = hmat_to_trans_rot(hmat) 37 | transs.append(trans) 38 | rots.append(rot) 39 | return transs, rots 40 | 41 | def trans_rot_to_hmat(trans, rot): 42 | ''' 43 | Converts a rotation and translation to a homogeneous transform. 44 | 45 | **Args:** 46 | 47 | **trans (np.array):** Translation (x, y, z). 48 | 49 | **rot (np.array):** Quaternion (x, y, z, w). 50 | 51 | **Returns:** 52 | H (np.array): 4x4 homogenous transform matrix. 53 | ''' 54 | H = transformations.quaternion_matrix(rot) 55 | H[0:3, 3] = trans 56 | return H 57 | 58 | def xya_to_trans_rot(xya): 59 | x,y,a = xya 60 | return np.r_[x, y, 0], yaw_to_quat(a) 61 | 62 | def trans_rot_to_xya(trans, rot): 63 | x = trans[0] 64 | y = trans[1] 65 | a = quat_to_yaw(rot) 66 | return (x,y,a) 67 | 68 | def quat_to_yaw(q): 69 | e = transformations.euler_from_quaternion(q) 70 | return e[2] 71 | def yaw_to_quat(yaw): 72 | return transformations.quaternion_from_euler(0, 0, yaw) 73 | 74 | def quat2mat(quat): 75 | return transformations.quaternion_matrix(quat)[:3, :3] 76 | 77 | def mat2quat(mat33): 78 | mat44 = np.eye(4) 79 | mat44[:3,:3] = mat33 80 | return transformations.quaternion_from_matrix(mat44) 81 | 82 | def mats2quats(mats): 83 | return np.array([mat2quat(mat) for mat in mats]) 84 | 85 | def quats2mats(quats): 86 | return np.array([quat2mat(quat) for quat in quats]) 87 | 88 | def xyzs_quats_to_poses(xyzs, quats): 89 | poses = [] 90 | for (xyz, quat) in zip(xyzs, quats): 91 | poses.append(gm.Pose(gm.Point(*xyz), gm.Quaternion(*quat))) 92 | return poses 93 | 94 | def rod2mat(rod): 95 | theta = np.linalg.norm(rod) 96 | if theta==0: return np.eye(3) 97 | 98 | r = rod/theta 99 | rx,ry,rz = r 100 | mat = ( 101 | np.cos(theta)*np.eye(3) 102 | + (1 - np.cos(theta))*np.outer(r,r) 103 | + np.sin(theta)*np.array([[0,-rz,ry],[rz,0,-rx],[-ry,rx,0]])) 104 | return mat 105 | 106 | def point_stamed_to_pose_stamped(pts,orientation=(0,0,0,1)): 107 | """convert pointstamped to posestamped""" 108 | ps = gm.PoseStamped() 109 | ps.pose.position = pts.point 110 | ps.pose.orientation = orientation 111 | ps.header.frame_id = pts.header.frame_id 112 | return ps 113 | 114 | def array_to_pose_array(xyz_arr, frame_id, quat_arr=None): 115 | assert quat_arr is None or len(xyz_arr) == len(quat_arr) 116 | pose_array = gm.PoseArray() 117 | for index, xyz in enumerate(xyz_arr): 118 | pose = gm.Pose() 119 | pose.position = gm.Point(*xyz) 120 | pose.orientation = gm.Quaternion(0,0,0,1) if quat_arr is None else gm.Quaternion(*(quat_arr[index])) 121 | pose_array.poses.append(pose) 122 | pose_array.header.frame_id = frame_id 123 | pose_array.header.stamp = rospy.Time.now() 124 | return pose_array 125 | 126 | def trans_rot_to_pose(trans, rot): 127 | pose = gm.Pose() 128 | pose.position = gm.Point(*trans) 129 | pose.orientation = gm.Quaternion(*rot) 130 | return pose 131 | 132 | 133 | -------------------------------------------------------------------------------- /lfd/rapprentice/eq_solve_exps.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy as np 3 | import scipy.sparse, scipy.sparse.linalg 4 | import IPython as ipy 5 | import pycuda.gpuarray as gpuarray 6 | import pycuda.autoinit 7 | 8 | import scikits.cuda.linalg as culinalg 9 | from lfd.rapprentice.culinalg_wrappers import dot 10 | 11 | culinalg.init() 12 | 13 | @profile 14 | def test_eqs(fname): 15 | f = h5py.File(fname, 'r') 16 | 17 | #test out with 100 eqs 18 | 19 | lhs = [] 20 | rhs = [] 21 | 22 | for i in range(100): 23 | lhs.append(f[str(i)]['lhs'][:]) 24 | rhs.append(f[str(i)]['rhs'][:]) 25 | 26 | full_lhs = scipy.sparse.block_diag(lhs) 27 | full_rhs = np.vstack(rhs) 28 | 29 | sp_ans = scipy.sparse.linalg.spsolve(full_lhs, full_rhs) 30 | 31 | np_ans = [] 32 | for i in range(100): 33 | np_ans.append(np.linalg.solve(lhs[i], rhs[i])) 34 | # l_gpu = gpuarray.to_gpu(lhs[i]) 35 | # r_gpu = gpuarray.to_gpu(rhs[i]) 36 | # culinalg.cho_solve(l_gpu, r_gpu) 37 | np_ans = np.vstack(np_ans) 38 | 39 | assert np.allclose(sp_ans, np_ans) 40 | 41 | 42 | 43 | @profile 44 | def test_hardware(): 45 | N = 15000 46 | A = np.random.rand(N*N).reshape(N, N) 47 | A = A.astype('float') 48 | x = np.random.rand(N) 49 | 50 | A_gpu = gpuarray.to_gpu(A) 51 | # x_gpu = gpuarray.to_gpu(x) 52 | b_gpu = dot(A_gpu, A_gpu).get() 53 | b = A.dot(A) 54 | 55 | assert np.allclose(b_gpu, b) 56 | 57 | 58 | 59 | 60 | # fname = '../data/eq_test.h5' 61 | # test_eqs(fname) 62 | 63 | test_hardware() 64 | 65 | -------------------------------------------------------------------------------- /lfd/rapprentice/eq_solve_exps.py.lprof: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/rapprentice/eq_solve_exps.py.lprof -------------------------------------------------------------------------------- /lfd/rapprentice/func_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Useful function decorators, and functional programming stuff 3 | """ 4 | 5 | import functools 6 | 7 | class once: 8 | def __init__(self,fn): 9 | self.fn = fn 10 | self.out = None 11 | self.first_time = True 12 | def __call__(self,*args,**kw): 13 | if self.first_time: 14 | self.out = self.fn(*args,**kw) 15 | self.first_time = False 16 | return self.out 17 | 18 | def disp_args(*args,**kw): 19 | return ",".join([str(arg) for arg in args] + ["%s=%s"%(str(key),str(val)) for (key,val) in kw.items()]) 20 | 21 | TAB_LEVEL = 0 22 | def verbose(fn): 23 | @functools.wraps(fn) 24 | def new_ver(*args,**kw): 25 | global TAB_LEVEL 26 | print("\t"*TAB_LEVEL+"%s(%s)"%(fn.__name__,disp_args(*args,**kw))) 27 | TAB_LEVEL += 1 28 | result = fn(*args,**kw) 29 | TAB_LEVEL -= 1 30 | print("\t"*TAB_LEVEL+"=> %s"%str(result)) 31 | return result 32 | return new_ver 33 | 34 | 35 | class memoized(object): 36 | '''Decorator. Caches a function's return value each time it is called. 37 | If called later with the same arguments, the cached value is returned 38 | (not reevaluated). 39 | ''' 40 | def __init__(self, func): 41 | self.func = func 42 | self.cache = {} 43 | def __call__(self, *args): 44 | try: 45 | return self.cache[args] 46 | except KeyError: 47 | value = self.func(*args) 48 | self.cache[args] = value 49 | return value 50 | #except TypeError: 51 | ## uncachable -- for instance, passing a list as an argument. 52 | ## Better to not cache than to blow up entirely. 53 | #return self.func(*args) 54 | def __repr__(self): 55 | '''Return the function's docstring.''' 56 | return self.func.__doc__ 57 | def __get__(self, obj, objtype): 58 | '''Support instance methods.''' 59 | return functools.partial(self.__call__, obj) -------------------------------------------------------------------------------- /lfd/rapprentice/interactive_roi.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | def get_polyline(image,window_name): 5 | cv2.namedWindow(window_name) 6 | class GetPoly: 7 | xys = [] 8 | done = False 9 | def callback(self,event, x, y, flags, param): 10 | if self.done == True: 11 | pass 12 | elif event == cv2.EVENT_LBUTTONDOWN: 13 | self.xys.append((x,y)) 14 | elif event == cv2.EVENT_MBUTTONDOWN: 15 | self.done = True 16 | gp = GetPoly() 17 | cv2.setMouseCallback(window_name,gp.callback) 18 | print "press middle mouse button or 'c' key to complete the polygon" 19 | while not gp.done: 20 | im_copy = image.copy() 21 | for (x,y) in gp.xys: 22 | cv2.circle(im_copy,(x,y),2,(0,255,0)) 23 | if len(gp.xys) > 1 and not gp.done: 24 | cv2.polylines(im_copy,[np.array(gp.xys).astype('int32')],False,(0,255,0),1) 25 | cv2.imshow(window_name,im_copy) 26 | key = cv2.waitKey(50) 27 | if key == ord('c'): gp.done = True 28 | #cv2.destroyWindow(window_name) 29 | return gp.xys 30 | 31 | def get_polygon_and_prompt(image, window_name): 32 | im_copy = image.copy() 33 | xys = get_polyline(image,window_name) 34 | assert len(xys)>1 35 | cv2.polylines(im_copy,[np.array(xys+xys[0:1]).astype('int32')],False,(0,0,255),1) 36 | cv2.imshow(window_name,im_copy) 37 | 38 | print "press 'a' to accept, 'r' to reject" 39 | while True: 40 | key = cv2.waitKey(0) 41 | if key == ord('a'): return np.array(xys) 42 | else: exit(1) 43 | 44 | return np.array(xys) 45 | 46 | 47 | def mask_from_poly(xys,shape=(480,640)): 48 | img = np.zeros(shape,dtype='uint8') 49 | xys = np.array(xys).astype('int32') 50 | cv2.fillConvexPoly(img,xys,(255,0,0)) 51 | return img.copy() 52 | -------------------------------------------------------------------------------- /lfd/rapprentice/kinematics_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.interpolate as si 3 | from numpy import pi 4 | from lfd.rapprentice import math_utils as mu 5 | 6 | def smaller_ang(x): 7 | return (x + pi)%(2*pi) - pi 8 | def closer_ang(x,a,dir=0): 9 | """ 10 | find angle y (==x mod 2*pi) that is close to a 11 | dir == 0: minimize absolute value of difference 12 | dir == 1: y > x 13 | dir == 2: y < x 14 | """ 15 | if dir == 0: 16 | return a + smaller_ang(x-a) 17 | elif dir == 1: 18 | return a + (x-a)%(2*pi) 19 | elif dir == -1: 20 | return a + (x-a)%(2*pi) - 2*pi 21 | 22 | def closer_joint_angles(pos,seed): 23 | result = np.array(pos) 24 | for i in [2,4,6]: 25 | result[i] = closer_ang(pos[i],seed[i],0) 26 | return result 27 | 28 | 29 | def get_velocities(positions, times, tol): 30 | positions = np.atleast_2d(positions) 31 | n = len(positions) 32 | deg = min(3, n - 1) 33 | 34 | good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)] 35 | good_positions = positions[good_inds] 36 | good_times = times[good_inds] 37 | 38 | if len(good_inds) == 1: 39 | return np.zeros(positions[0:1].shape) 40 | 41 | (tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg) 42 | #smooth_positions = np.r_[si.splev(times,tck,der=0)].T 43 | velocities = np.r_[si.splev(times,tck,der=1)].T 44 | return velocities 45 | 46 | def smooth_positions(positions, tol): 47 | times = np.arange(len(positions)) 48 | positions = np.atleast_2d(positions) 49 | n = len(positions) 50 | deg = min(3, n - 1) 51 | 52 | good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)] 53 | good_positions = positions[good_inds] 54 | good_times = times[good_inds] 55 | 56 | if len(good_inds) == 1: 57 | return np.zeros(positions[0:1].shape) 58 | 59 | (tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg) 60 | smooth_positions = np.r_[si.splev(times,tck,der=0)].T 61 | return smooth_positions 62 | 63 | def unif_resample(x,n,weights,tol=.001,deg=3): 64 | x = np.atleast_2d(x) 65 | weights = np.atleast_2d(weights) 66 | x = mu.remove_duplicate_rows(x) 67 | x_scaled = x * weights 68 | dl = mu.norms(x_scaled[1:] - x_scaled[:-1],1) 69 | l = np.cumsum(np.r_[0,dl]) 70 | (tck,_) = si.splprep(x_scaled.T,k=deg,s = tol**2*len(x),u=l) 71 | 72 | newu = np.linspace(0,l[-1],n) 73 | out_scaled = np.array(si.splev(newu,tck)).T 74 | out = out_scaled/weights 75 | return out -------------------------------------------------------------------------------- /lfd/rapprentice/knot_identification.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import multiprocessing 3 | import numpy as np 4 | from lfd.rapprentice import math_utils, LOG 5 | 6 | def intersect_segs(ps_n2, q_22): 7 | """Takes a list of 2d nodes (ps_n2) of a piecewise linear curve and two points representing a single segment (q_22) 8 | and returns indices into ps_n2 of intersections with the segment.""" 9 | assert ps_n2.shape[1] == 2 and q_22.shape == (2, 2) 10 | 11 | def cross(a_n2, b_n2): 12 | return a_n2[:,0]*b_n2[:,1] - a_n2[:,1]*b_n2[:,0] 13 | 14 | rs = ps_n2[1:,:] - ps_n2[:-1,:] 15 | s = q_22[1,:] - q_22[0,:] 16 | denom = cross(rs, s[None,:]) 17 | qmp = q_22[0,:][None,:] - ps_n2[:-1,:] 18 | ts = cross(qmp, s[None,:]) / denom # zero denom will make the corresponding element of 'intersections' false 19 | us = cross(qmp, rs) / denom # same here 20 | intersections = np.flatnonzero((ts > 0) & (ts < 1) & (us > 0) & (us < 1)) 21 | return intersections, ts, us 22 | 23 | def rope_has_intersections(ctl_pts): 24 | for i in range(len(ctl_pts) - 1): 25 | curr_seg = ctl_pts[i:i+2,:] 26 | intersections, ts, us = intersect_segs(ctl_pts[:,:2], curr_seg[:,:2]) 27 | if len(intersections) != 0: 28 | return True 29 | return False 30 | 31 | def compute_dt_code(ctl_pts, plotting=False): 32 | """Takes rope control points (Nx3 array), closes the loop, and computes the Dowker-Thistlethwaite code for the knot. 33 | The z-value for the points are used for determining over/undercrossings. 34 | Follows procedure outlined here: http://katlas.math.toronto.edu/wiki/DT_(Dowker-Thistlethwaite)_Codes 35 | """ 36 | 37 | # First, close the loop by introducing extra points under the table and toward the robot (by subtracting z and x values) 38 | # first_pt, last_pt = ctl_pts[0], ctl_pts[-1] 39 | # flipped = False 40 | # if first_pt[1] > last_pt[1]: 41 | # first_pt, last_pt = last_pt, first_pt 42 | # flipped = True 43 | # min_z = ctl_pts[:,2].min() 44 | # extra_first_pt, extra_last_pt = first_pt + [-.1, -.1, min_z-1], last_pt + [-.1, .1, min_z-1] 45 | # if flipped: 46 | # extra_pts = [extra_first_pt, extra_first_pt + [-1, 0, 0], extra_last_pt + [-1, 0, 0], extra_last_pt, last_pt] 47 | # else: 48 | # extra_pts = [extra_last_pt, extra_last_pt + [-1, 0, 0], extra_first_pt + [-1, 0, 0], extra_first_pt, first_pt] 49 | # ctl_pts = np.append(ctl_pts, extra_pts, axis=0) 50 | 51 | if plotting: 52 | import trajoptpy, openravepy 53 | env = openravepy.Environment() 54 | viewer = trajoptpy.GetViewer(env) 55 | handles = [] 56 | handles.append(env.plot3(ctl_pts, 5, [0, 0, 1])) 57 | viewer.Idle() 58 | 59 | # Upsampling loop: upsample until every segment has at most one crossing 60 | need_upsample_ind = None 61 | upsample_iters = 0 62 | max_upsample_iters = 10 63 | while True: 64 | counter = 1 65 | crossings = defaultdict(list) 66 | # Walk along rope: for each segment, compute intersections with all other segments 67 | for i in range(len(ctl_pts) - 1): 68 | curr_seg = ctl_pts[i:i+2,:] 69 | intersections, ts, us = intersect_segs(ctl_pts[:,:2], curr_seg[:,:2]) 70 | 71 | if len(intersections) == 0: 72 | continue 73 | if len(intersections) != 1: 74 | LOG.debug('warning: more than one intersection for segment %d, now upsampling', i) 75 | need_upsample_ind = i 76 | break 77 | 78 | # for each intersection, determine and record over/undercrossing 79 | i_int = intersections[0] 80 | if plotting: 81 | handles.append(env.drawlinestrip(ctl_pts[i_int:i_int+2], 5, [1, 0, 0])) 82 | int_point_rope = ctl_pts[i_int] + ts[i_int]*(ctl_pts[i_int+1] - ctl_pts[i_int]) 83 | int_point_curr_seg = curr_seg[0] + us[i_int]*(curr_seg[1] - curr_seg[0]) 84 | #assert np.allclose(int_point_rope[:2], int_point_curr_seg[:2]) 85 | above = int_point_curr_seg[2] > int_point_rope[2] 86 | crossings[tuple(sorted((i, i_int)))].append(-counter if counter % 2 == 0 and above else counter) 87 | counter += 1 88 | if plotting: viewer.Idle() 89 | # upsample if necessary 90 | if need_upsample_ind is not None and upsample_iters < max_upsample_iters: 91 | spacing = np.linspace(0, 1, len(ctl_pts)) 92 | new_spacing = np.insert(spacing, need_upsample_ind+1, (spacing[need_upsample_ind]+spacing[need_upsample_ind+1])/2.) 93 | ctl_pts = math_utils.interp2d(new_spacing, spacing, ctl_pts) 94 | upsample_iters += 1 95 | need_upsample = None 96 | continue 97 | break 98 | 99 | # Extract relevant part of crossing data to produce DT code 100 | out = [] 101 | for pair in crossings.itervalues(): 102 | assert len(pair) == 2 103 | odd = [p for p in pair if p % 2 == 1][0] 104 | even = [p for p in pair if p % 2 == 0][0] 105 | out.append((odd, even)) 106 | out.sort() 107 | dt_code = [-o[1] for o in out] 108 | return dt_code 109 | 110 | def _dt_code_to_knot(dt_code): 111 | import snappy 112 | try: 113 | m = snappy.Manifold("DT:[%s]" % ",".join(map(str, dt_code))) 114 | knot = snappy.HTLinkExteriors.identify(m) 115 | return knot.name() 116 | except: 117 | import traceback 118 | traceback.print_exc() 119 | return None 120 | 121 | 122 | def dt_code_to_knot(dt_code): 123 | def dt_code_to_knot_wrapper(q, x): 124 | result = _dt_code_to_knot(x) 125 | q.put(result) 126 | q.close() 127 | 128 | q = multiprocessing.Queue(1) 129 | proc = multiprocessing.Process(target=dt_code_to_knot_wrapper, args=(q, dt_code)) 130 | proc.start() 131 | TIMEOUT = 1 132 | try: 133 | result = q.get(True, TIMEOUT) 134 | except: 135 | LOG.warn("Timeout for knot identification exceeded, assuming no knot") 136 | result = None 137 | finally: 138 | proc.terminate() 139 | return result 140 | 141 | def identify_knot(ctl_pts): 142 | """Given control points from a rope, gives a knot name if identified by snappy, or None otherwise""" 143 | 144 | try: 145 | dt_code = compute_dt_code(ctl_pts) 146 | print 'dt code', dt_code 147 | return dt_code_to_knot(dt_code) 148 | except: 149 | import traceback 150 | traceback.print_exc() 151 | return None 152 | 153 | def main(): 154 | #dt_code = [8, 6, -4, -10, 2] 155 | #dt_code = [4, 6, 2, -10, 8] 156 | dt_code = [4, 6, 2, -8] 157 | # m = snappy.Manifold("DT:[%s]" % ",".join(map(str, dt_code))) 158 | # knot = snappy.HTLinkExteriors.identify(m) 159 | # print knot.name() 160 | #print dt_code_to_knot(dt_code) 161 | #return 162 | 163 | import cPickle 164 | with open("results/single_example_no_failures_100_03cm_s0.pkl", "r") as f: experiments = cPickle.load(f) 165 | log = experiments[2][1] 166 | rope_nodes = [] 167 | for entry in log: 168 | if 'sim_rope_nodes_after_full_traj' in entry.name: 169 | rope_nodes.append(entry.data) 170 | 171 | for i, n in enumerate(rope_nodes): 172 | knot = identify_knot(n) 173 | print "[%d/%d] %s" % (i+1, len(rope_nodes), knot) 174 | 175 | if __name__ == '__main__': 176 | main() 177 | -------------------------------------------------------------------------------- /lfd/rapprentice/math_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple functions on numpy arrays 3 | """ 4 | from __future__ import division 5 | import numpy as np 6 | 7 | def interp2d(x,xp,yp): 8 | "Same as np.interp, but yp is 2d" 9 | yp = np.asarray(yp) 10 | assert yp.ndim == 2 11 | return np.array([np.interp(x,xp,col) for col in yp.T]).T 12 | def interp_mat(x, xp): 13 | """ 14 | interp_mat(x, xp).dot(fp) should be the same as np.interp(x, xp, fp) 15 | """ 16 | u_ixps = np.interp(x, xp, range(len(xp))) 17 | m = np.zeros((len(x), len(xp))) 18 | for ix, u_ixp in enumerate(u_ixps): 19 | u, ixp = np.modf(u_ixp) 20 | m[ix, ixp] = 1.-u 21 | if ixp+1 < m.shape[1]: # the last u is zero by definition 22 | m[ix, ixp+1] = u 23 | return m 24 | def normalize(x): 25 | return x / np.linalg.norm(x) 26 | def normr(x): 27 | assert x.ndim == 2 28 | return x/norms(x,1)[:,None] 29 | def normc(x): 30 | assert x.ndim == 2 31 | return x/norms(x,0)[None,:] 32 | def norms(x,ax): 33 | return np.sqrt((x**2).sum(axis=ax)) 34 | def intround(x): 35 | return np.round(x).astype('int32') 36 | def deriv(x): 37 | T = len(x) 38 | return interp2d(np.arange(T),np.arange(.5,T-.5),x[1:]-x[:-1]) 39 | def linspace2d(start,end,n): 40 | cols = [np.linspace(s,e,n) for (s,e) in zip(start,end)] 41 | return np.array(cols).T 42 | def remove_duplicate_rows(mat): 43 | diffs = mat[1:] - mat[:-1] 44 | return mat[np.r_[True,(abs(diffs) >= 1e-5).any(axis=1)]] 45 | def invertHmat(hmat): 46 | R = hmat[:3,:3] 47 | t = hmat[:3,3] 48 | hmat_inv = np.r_[np.c_[R.T, -R.T.dot(t)], hmat[3,:][None,:]] 49 | return hmat_inv 50 | -------------------------------------------------------------------------------- /lfd/rapprentice/plotting_openrave.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def draw_grid(env, f, mins, maxes, xres = .1, yres = .1, zres = .04, color = (1,1,0,1)): 4 | 5 | xmin, ymin, zmin = mins 6 | xmax, ymax, zmax = maxes 7 | 8 | nfine = 30 9 | xcoarse = np.arange(xmin, xmax, xres) 10 | xmax = xcoarse[-1]; 11 | ycoarse = np.arange(ymin, ymax, yres) 12 | ymax = ycoarse[-1]; 13 | if zres == -1: 14 | zcoarse = [(zmin+zmax)/2.] 15 | else: 16 | zcoarse = np.arange(zmin, zmax, zres) 17 | zmax = zcoarse[-1]; 18 | xfine = np.linspace(xmin, xmax, nfine) 19 | yfine = np.linspace(ymin, ymax, nfine) 20 | zfine = np.linspace(zmin, zmax, nfine) 21 | 22 | lines = [] 23 | if len(zcoarse) > 1: 24 | for x in xcoarse: 25 | for y in ycoarse: 26 | xyz = np.zeros((nfine, 3)) 27 | xyz[:,0] = x 28 | xyz[:,1] = y 29 | xyz[:,2] = zfine 30 | lines.append(f(xyz)) 31 | 32 | for y in ycoarse: 33 | for z in zcoarse: 34 | xyz = np.zeros((nfine, 3)) 35 | xyz[:,0] = xfine 36 | xyz[:,1] = y 37 | xyz[:,2] = z 38 | lines.append(f(xyz)) 39 | 40 | for z in zcoarse: 41 | for x in xcoarse: 42 | xyz = np.zeros((nfine, 3)) 43 | xyz[:,0] = x 44 | xyz[:,1] = yfine 45 | xyz[:,2] = z 46 | lines.append(f(xyz)) 47 | 48 | handles = [] 49 | 50 | for line in lines: 51 | handles.append(env.drawlinestrip(line,1,color)) 52 | 53 | return handles 54 | -------------------------------------------------------------------------------- /lfd/rapprentice/pr2_trajectories.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from lfd.rapprentice import conversions as conv, math_utils as mu, \ 3 | retiming, PR2, resampling 4 | from lfd.rapprentice import LOG 5 | 6 | def make_joint_traj(xyzs, quats, manip, ref_frame, targ_frame, filter_options = 0): 7 | "do ik and then fill in the points where ik failed" 8 | 9 | n = len(xyzs) 10 | assert len(quats) == n 11 | 12 | robot = manip.GetRobot() 13 | joint_inds = manip.GetArmIndices() 14 | robot.SetActiveDOFs(joint_inds) 15 | orig_joint = robot.GetActiveDOFValues() 16 | 17 | joints = [] 18 | inds = [] 19 | 20 | for i in xrange(0,n): 21 | mat4 = conv.trans_rot_to_hmat(xyzs[i], quats[i]) 22 | joint = PR2.cart_to_joint(manip, mat4, ref_frame, targ_frame, filter_options) 23 | if joint is not None: 24 | joints.append(joint) 25 | inds.append(i) 26 | robot.SetActiveDOFValues(joint) 27 | 28 | 29 | robot.SetActiveDOFValues(orig_joint) 30 | 31 | 32 | LOG.info("found ik soln for %i of %i points",len(inds), n) 33 | if len(inds) > 2: 34 | joints2 = mu.interp2d(np.arange(n), inds, joints) 35 | return joints2, inds 36 | else: 37 | return np.zeros((n, len(joints))), [] 38 | 39 | 40 | 41 | def follow_body_traj(pr2, bodypart2traj, wait=True, base_frame = "/base_footprint", speed_factor=1): 42 | 43 | LOG.info("following trajectory with bodyparts %s", " ".join(bodypart2traj.keys())) 44 | 45 | name2part = {"lgrip":pr2.lgrip, 46 | "rgrip":pr2.rgrip, 47 | "larm":pr2.larm, 48 | "rarm":pr2.rarm, 49 | "base":pr2.base} 50 | for partname in bodypart2traj: 51 | if partname not in name2part: 52 | raise Exception("invalid part name %s"%partname) 53 | 54 | 55 | #### Go to initial positions ####### 56 | 57 | 58 | for (name, part) in name2part.items(): 59 | if name in bodypart2traj: 60 | part_traj = bodypart2traj[name] 61 | if name == "lgrip" or name == "rgrip": 62 | part.set_angle(np.squeeze(part_traj)[0]) 63 | elif name == "larm" or name == "rarm": 64 | part.goto_joint_positions(part_traj[0]) 65 | elif name == "base": 66 | part.goto_pose(part_traj[0], base_frame) 67 | pr2.join_all() 68 | 69 | 70 | #### Construct total trajectory so we can retime it ####### 71 | 72 | 73 | n_dof = 0 74 | trajectories = [] 75 | vel_limits = [] 76 | acc_limits = [] 77 | bodypart2inds = {} 78 | for (name, part) in name2part.items(): 79 | if name in bodypart2traj: 80 | traj = bodypart2traj[name] 81 | if traj.ndim == 1: traj = traj.reshape(-1,1) 82 | trajectories.append(traj) 83 | vel_limits.extend(part.vel_limits) 84 | acc_limits.extend(part.acc_limits) 85 | bodypart2inds[name] = range(n_dof, n_dof+part.n_joints) 86 | n_dof += part.n_joints 87 | 88 | trajectories = np.concatenate(trajectories, 1) 89 | 90 | vel_limits = np.array(vel_limits)*speed_factor 91 | 92 | 93 | times = retiming.retime_with_vel_limits(trajectories, vel_limits) 94 | times_up = np.linspace(0, times[-1], int(np.ceil(times[-1]/.1))) 95 | traj_up = mu.interp2d(times_up, times, trajectories) 96 | 97 | 98 | #### Send all part trajectories ########### 99 | 100 | for (name, part) in name2part.items(): 101 | if name in bodypart2traj: 102 | part_traj = traj_up[:,bodypart2inds[name]] 103 | if name == "lgrip" or name == "rgrip": 104 | part.follow_timed_trajectory(times_up, part_traj.flatten()) 105 | elif name == "larm" or name == "rarm": 106 | vels = resampling.get_velocities(part_traj, times_up, .001) 107 | part.follow_timed_joint_trajectory(part_traj, vels, times_up) 108 | elif name == "base": 109 | part.follow_timed_trajectory(times_up, part_traj, base_frame) 110 | 111 | if wait: pr2.join_all() 112 | 113 | return True 114 | 115 | 116 | def flatten_compound_dtype(compound_array): 117 | arrays = [] 118 | for desc in compound_array.dtype.descr: 119 | field = desc[0] 120 | arr = compound_array[field] 121 | if arr.ndim == 1: 122 | float_arr = arr[:,None].astype('float') 123 | elif arr.ndim == 2: 124 | float_arr = arr.astype('float') 125 | else: 126 | raise Exception("subarray with field %s must be 1d or 2d"%field) 127 | arrays.append(float_arr) 128 | 129 | return np.concatenate(arrays, axis=1) 130 | 131 | def follow_rave_trajectory(pr2, ravetraj, dof_inds, use_base = False, base_frame="/base_footprint"): 132 | 133 | assert ravetraj.shape[1] == len(dof_inds) + 3*int(use_base) 134 | bodypart2traj = {} 135 | 136 | 137 | rave2ros = {} 138 | name2part = {"l_gripper":pr2.lgrip, 139 | "r_gripper":pr2.rgrip, 140 | "l_arm":pr2.larm, 141 | "r_arm":pr2.rarm} 142 | for (partname, part) in name2part.items(): 143 | for (ijoint, jointname) in enumerate(part.joint_names): 144 | rave2ros[pr2.robot.GetJoint(jointname).GetDOFIndex()] = (partname, ijoint) 145 | bodypart2traj[partname] = np.repeat(np.asarray(part.get_joint_positions())[None,:], len(ravetraj), axis=0) 146 | 147 | 148 | 149 | bodypart2used = {} 150 | 151 | for (ravecol, dof_ind) in enumerate(dof_inds): 152 | if dof_ind in rave2ros: 153 | partname, partind = rave2ros[dof_ind] 154 | bodypart2traj[partname][:,partind] = ravetraj[:,ravecol] 155 | elif dof_ind == pr2.robot.GetJoint("r_gripper_l_finger_joint").GetDOFIndex(): 156 | partname, partind = "r_gripper", 0 157 | bodypart2traj[partname][:,partind] = ravetraj[:,ravecol]/5.81 158 | elif dof_ind == pr2.robot.GetJoint("l_gripper_l_finger_joint").GetDOFIndex(): 159 | partname, partind = "l_gripper", 0 160 | bodypart2traj[partname][:,partind] = ravetraj[:,ravecol]/5.81 161 | else: 162 | jointname = pr2.robot.GetJointFromDOFIndex(dof_ind).GetName() 163 | raise Exception("I don't know how to control this joint %s"%jointname) 164 | bodypart2used[partname] = True 165 | 166 | for partname in list(bodypart2traj.keys()): 167 | if partname not in bodypart2used: 168 | del bodypart2traj[partname] 169 | 170 | if use_base: 171 | base_traj = ravetraj[:,-3:] 172 | bodypart2traj["base"] = base_traj 173 | 174 | follow_body_traj(pr2, bodypart2traj, base_frame = base_frame) 175 | -------------------------------------------------------------------------------- /lfd/rapprentice/resampling.py: -------------------------------------------------------------------------------- 1 | """ 2 | Resample time serieses to reduce the number of datapoints 3 | """ 4 | from __future__ import division 5 | import numpy as np 6 | from lfd.rapprentice import LOG 7 | import lfd.rapprentice.math_utils as mu 8 | #import fastrapp 9 | import scipy.interpolate as si 10 | import openravepy 11 | 12 | def lerp(a, b, fracs): 13 | "linearly interpolate between a and b" 14 | fracs = fracs[:,None] 15 | return a*(1-fracs) + b*fracs 16 | 17 | def adaptive_resample(x, t = None, max_err = np.inf, max_dx = np.inf, max_dt = np.inf, normfunc = None): 18 | """ 19 | SLOW VERSION 20 | """ 21 | #return np.arange(0, len(x), 100) 22 | LOG.info("resampling a path of length %i", len(x)) 23 | x = np.asarray(x) 24 | 25 | if x.ndim == 1: x = x[:,None] 26 | else: assert x.ndim == 2 27 | 28 | if normfunc is None: normfunc = np.linalg.norm 29 | 30 | if t is None: t = np.arange(len(x)) 31 | else: t = np.asarray(t) 32 | assert(t.ndim == 1) 33 | 34 | n = len(t) 35 | assert len(x) == n 36 | 37 | import networkx as nx 38 | g = nx.DiGraph() 39 | g.add_nodes_from(xrange(n)) 40 | for i_src in xrange(n): 41 | for i_targ in xrange(i_src+1, n): 42 | normdx = normfunc(x[i_targ] - x[i_src]) 43 | dt = t[i_targ] - t[i_src] 44 | errs = lerp(x[i_src], x[i_targ], (t[i_src:i_targ+1] - t[i_src])/dt) - x[i_src:i_targ+1] 45 | interp_err = errs.__abs__().max()#np.array([normfunc(err) for err in errs]).max() 46 | if normdx > max_dx or dt > max_dt or interp_err > max_err: break 47 | g.add_edge(i_src, i_targ) 48 | resample_inds = nx.shortest_path(g, 0, n-1) 49 | return resample_inds 50 | 51 | 52 | def get_velocities(positions, times, tol): 53 | positions = np.atleast_2d(positions) 54 | n = len(positions) 55 | deg = min(3, n - 1) 56 | 57 | good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)] 58 | good_positions = positions[good_inds] 59 | good_times = times[good_inds] 60 | 61 | if len(good_inds) == 1: 62 | return np.zeros(positions[0:1].shape) 63 | 64 | (tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg) 65 | #smooth_positions = np.r_[si.splev(times,tck,der=0)].T 66 | velocities = np.r_[si.splev(times,tck,der=1)].T 67 | return velocities 68 | 69 | def smooth_positions(positions, tol): 70 | times = np.arange(len(positions)) 71 | positions = np.atleast_2d(positions) 72 | n = len(positions) 73 | deg = min(3, n - 1) 74 | 75 | good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)] 76 | good_positions = positions[good_inds] 77 | good_times = times[good_inds] 78 | 79 | if len(good_inds) == 1: 80 | return np.zeros(positions[0:1].shape) 81 | 82 | (tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg) 83 | spos = np.r_[si.splev(times,tck,der=0)].T 84 | return spos 85 | 86 | def unif_resample(x,n,weights,tol=.001,deg=3): 87 | x = np.atleast_2d(x) 88 | weights = np.atleast_2d(weights) 89 | x = mu.remove_duplicate_rows(x) 90 | x_scaled = x * weights 91 | dl = mu.norms(x_scaled[1:] - x_scaled[:-1],1) 92 | l = np.cumsum(np.r_[0,dl]) 93 | 94 | (tck,_) = si.splprep(x_scaled.T,k=deg,s = tol**2*len(x),u=l) 95 | 96 | newu = np.linspace(0,l[-1],n) 97 | out_scaled = np.array(si.splev(newu,tck)).T 98 | out = out_scaled/weights 99 | return out 100 | 101 | def test_resample(): 102 | x = [0,0,0,1,2,3,4,4,4] 103 | t = range(len(x)) 104 | inds = adaptive_resample(x, max_err = 1e-5) 105 | assert inds == [0, 2, 6, 8] 106 | inds = adaptive_resample(x, t=t, max_err = 0) 107 | assert inds == [0, 2, 6, 8] 108 | print "success" 109 | 110 | 111 | inds1 = fastrapp.resample(np.array(x)[:,None], t, 0, np.inf, np.inf) 112 | print inds1 113 | assert inds1.tolist() == [0,2,6,8] 114 | 115 | def test_resample_big(): 116 | from time import time 117 | t = np.linspace(0,1,1000) 118 | x0 = np.sin(t)[:,None] 119 | x = x0 + np.random.randn(len(x0), 50)*.1 120 | tstart = time() 121 | inds0 = adaptive_resample(x, t=t, max_err = .05, max_dt = .1) 122 | print time() - tstart, "seconds" 123 | 124 | print "now doing cpp version" 125 | tstart = time() 126 | inds1 = fastrapp.resample(x, t, .05, np.inf, .1) 127 | print time() - tstart, "seconds" 128 | 129 | assert np.allclose(inds0, inds1) 130 | 131 | def interp_quats(newtimes, oldtimes, oldquats): 132 | u_ioldtimes = np.interp(newtimes, oldtimes, range(len(oldtimes))) 133 | newquats = np.empty((len(u_ioldtimes), oldquats.shape[1])) 134 | for i, u_ioldtime in enumerate(u_ioldtimes): 135 | u, ioldtime = np.modf(u_ioldtime) 136 | if ioldtime+1 < oldquats.shape[0]: 137 | newquats[i,:] = openravepy.quatSlerp(oldquats[ioldtime,:], oldquats[ioldtime+1,:], u) 138 | else: # the last u is zero by definition 139 | newquats[i,:] = oldquats[-1,:] 140 | return newquats 141 | 142 | def interp_hmats(newtimes, oldtimes, oldhmats): 143 | oldposes = openravepy.poseFromMatrices(oldhmats) 144 | newposes = np.empty((len(newtimes), 7)) 145 | newposes[:,4:7] = mu.interp2d(newtimes, oldtimes, oldposes[:,4:7]) 146 | newposes[:,0:4] = interp_quats(newtimes, oldtimes, oldposes[:,0:4]) 147 | newhmats = openravepy.matrixFromPoses(newposes) 148 | return newhmats 149 | 150 | if __name__ == "__main__": 151 | test_resample() 152 | test_resample_big() -------------------------------------------------------------------------------- /lfd/rapprentice/retiming.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import scipy.interpolate as si 4 | 5 | def shortest_path(ncost_nk,ecost_nkk): 6 | """ 7 | ncost_nk: N x K 8 | ecost_nkk (N-1) x K x K 9 | """ 10 | N,K = ncost_nk.shape 11 | cost_nk = np.empty((N,K),dtype='float') 12 | prev_nk = np.empty((N-1,K),dtype='int') 13 | cost_nk[0] = ncost_nk[0] 14 | for n in xrange(1,N): 15 | cost_kk = ecost_nkk[n-1] + cost_nk[n-1][:,None] + ncost_nk[n][None,:] 16 | cost_nk[n] = cost_kk.min(axis=0) 17 | prev_nk[n-1] = cost_kk.argmin(axis=0) 18 | 19 | cost = cost_nk[N-1].min() 20 | 21 | path = np.empty(N,dtype='int') 22 | path[N-1] = cost_nk[N-1].argmin() 23 | for n in xrange(N-1,0,-1): 24 | path[n-1] = prev_nk[n-1,path[n]] 25 | 26 | return path,cost 27 | 28 | def remove_duplicate_rows(mat): 29 | diffs = mat[1:] - mat[:-1] 30 | return mat[np.r_[True,(abs(diffs) >= 1e-6).any(axis=1)]] 31 | 32 | def loglinspace(a,b,n): 33 | return np.exp(np.linspace(np.log(a),np.log(b),n)) 34 | 35 | def retime2(positions, vel_limits_j): 36 | good_inds = np.r_[True,(abs(positions[1:] - positions[:-1]) >= 1e-6).any(axis=1)] 37 | positions = positions[good_inds] 38 | 39 | move_nj = positions[1:] - positions[:-1] 40 | dur_n = (np.abs(move_nj) / vel_limits_j[None,:]).max(axis=1) # time according to velocity limit 41 | times = np.cumsum(np.r_[0,dur_n]) 42 | return times, good_inds 43 | 44 | def retime_with_vel_limits(positions, vel_limits_j): 45 | 46 | move_nj = positions[1:] - positions[:-1] 47 | dur_n = (np.abs(move_nj) / vel_limits_j[None,:]).max(axis=1) # time according to velocity limit 48 | times = np.cumsum(np.r_[0,dur_n]) 49 | 50 | return times 51 | 52 | 53 | def retime(positions, vel_limits_j, acc_limits_j): 54 | positions, vel_limits_j, acc_limits_j = np.asarray(positions), np.asarray(vel_limits_j), np.asarray(acc_limits_j) 55 | 56 | good_inds = np.r_[True,(abs(positions[1:] - positions[:-1]) >= 1e-6).any(axis=1)] 57 | positions = positions[good_inds] 58 | 59 | move_nj = positions[1:] - positions[:-1] 60 | mindur_n = (np.abs(move_nj) / vel_limits_j[None,:]).max(axis=1) # time according to velocity limit 61 | 62 | # maximum duration is set by assuming that you start out at zero velocity, accelerate until max velocity, coast, then decelerate to zero 63 | # this might give you a triangular or trapezoidal velocity profile 64 | # "triangle" velocity profile : dist = 2 * (1/2) * amax * (t/2)^2 65 | # "trapezoid" velocity profile: dist = t * vmax - (vmax / amax) * vmax 66 | maxdur_triangle_n = np.sqrt(4 * np.abs(move_nj) / acc_limits_j[None,:]).max(axis=1) 67 | maxdur_trapezoid_n = (np.abs(move_nj)/vel_limits_j[None,:] + (vel_limits_j / acc_limits_j)[None,:]).max(axis=1) 68 | 69 | print (np.abs(move_nj)/vel_limits_j[None,:] + (vel_limits_j / acc_limits_j)[None,:]).argmax(axis=1) 70 | print np.argmax([maxdur_triangle_n, maxdur_trapezoid_n],axis=0) 71 | 72 | maxdur_n = np.max([maxdur_triangle_n, maxdur_trapezoid_n],axis=0)*10 73 | #print maxdur_triangle_n 74 | #print maxdur_trapezoid_n 75 | print maxdur_n 76 | K = 20 77 | N = len(mindur_n) 78 | 79 | ncost_nk = np.empty((N,K)) 80 | ecost_nkk = np.empty((N,K,K)) 81 | 82 | dur_nk = np.array([loglinspace(mindur,maxdur,K) for (mindur,maxdur) in zip(mindur_n, maxdur_n)]) 83 | 84 | ncost_nk = dur_nk 85 | 86 | 87 | acc_njkk = move_nj[1:,:,None,None]/dur_nk[1:,None,None,:] - move_nj[:-1,:,None,None]/dur_nk[1:,None,:,None] 88 | ratio_nkk = np.abs(acc_njkk / acc_limits_j[None,:,None,None]).max(axis=1) 89 | ecost_nkk = 1e5 * (ratio_nkk > 1) 90 | 91 | 92 | path, _cost = shortest_path(ncost_nk, ecost_nkk) 93 | #for i in xrange(len(path)-1): 94 | #print ecost_nkk[i,path[i], path[i+1]] 95 | 96 | dur_n = [dur_nk[n,k] for (n,k) in enumerate(path)] 97 | times = np.cumsum(np.r_[0,dur_n]) 98 | return times, good_inds 99 | 100 | def make_traj_with_limits(positions, vel_limits_j, acc_limits_j): 101 | times, inds = retime(positions, vel_limits_j, acc_limits_j) 102 | positions = positions[inds] 103 | 104 | deg = min(3, len(positions) - 1) 105 | s = len(positions)*.001**2 106 | (tck, _) = si.splprep(positions.T, s=s, u=times, k=deg) 107 | smooth_positions = np.r_[si.splev(times,tck,der=0)].T 108 | smooth_velocities = np.r_[si.splev(times,tck,der=1)].T 109 | return smooth_positions, smooth_velocities, times 110 | 111 | -------------------------------------------------------------------------------- /lfd/rapprentice/ros2rave.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class RosToRave(object): 4 | 5 | "take in ROS joint_states messages and use it to update an openrave robot" 6 | 7 | def __init__(self, robot, ros_names): 8 | self.initialized = False 9 | 10 | self.ros_names = ros_names 11 | inds_ros2rave = np.array([robot.GetJointIndex(name) for name in self.ros_names]) 12 | self.good_ros_inds = np.flatnonzero(inds_ros2rave != -1) # ros joints inds with matching rave joint 13 | self.rave_inds = inds_ros2rave[self.good_ros_inds] # openrave indices corresponding to those joints 14 | 15 | def convert(self, ros_values): 16 | return [ros_values[i_ros] for i_ros in self.good_ros_inds] 17 | def set_values(self, robot, ros_values): 18 | rave_values = [ros_values[i_ros] for i_ros in self.good_ros_inds] 19 | robot.SetDOFValues(rave_values,self.rave_inds, 0) 20 | -------------------------------------------------------------------------------- /lfd/rapprentice/svds.py: -------------------------------------------------------------------------------- 1 | """ 2 | Slightly faster way to compute lots of svds 3 | """ 4 | 5 | 6 | import numpy as np 7 | 8 | 9 | def svds(x_k33): 10 | K = len(x_k33) 11 | option = "A" 12 | m = 3 13 | n = 3 14 | nvt = 3 15 | lwork = 575 16 | iwork = np.zeros(24, 'int32') 17 | work = np.zeros((575)) 18 | s_k3 = np.zeros((K,3)) 19 | fu_k33 = np.zeros((K,3,3)) 20 | fvt_k33 = np.zeros((K,3,3)) 21 | fx_k33 = x_k33.transpose(0,2,1).copy() 22 | lapack_routine = np.linalg.lapack_lite.dgesdd 23 | for k in xrange(K): 24 | lapack_routine(option, m, n, fx_k33[k], m, s_k3[k], fu_k33[k], m, fvt_k33[k], nvt, 25 | work, lwork, iwork, 0) 26 | return fu_k33.transpose(0,2,1), s_k3, fvt_k33.transpose(0,2,1) 27 | 28 | 29 | def svds_slow(x_k33): 30 | s2,u2,v2 = [],[],[] 31 | for x_33 in x_k33: 32 | u,s,vt = np.linalg.svd(x_33) 33 | s2.append(s) 34 | u2.append(u) 35 | v2.append(vt) 36 | s2 = np.array(s2) 37 | u2 = np.array(u2) 38 | v2 = np.array(v2) 39 | return u2,s2,v2 40 | 41 | def test_svds(): 42 | x_k33 = np.random.randn(1000,3,3) 43 | 44 | u1,s1,v1 = svds(x_k33) 45 | u2,s2,v2 = svds_slow(x_k33) 46 | assert np.allclose(u1,u2) 47 | assert np.allclose(s1,s2) 48 | assert np.allclose(v1,v2) 49 | if __name__ == "__main__": 50 | test_svds() -------------------------------------------------------------------------------- /lfd/rapprentice/task_execution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Misc functions that are useful in the top-level task-execution scripts 3 | """ 4 | 5 | 6 | def request_int_in_range(too_high_val): 7 | while True: 8 | try: 9 | choice_ind = int(raw_input()) 10 | except ValueError: 11 | pass 12 | if choice_ind <= too_high_val: 13 | return choice_ind 14 | print "invalid selection. try again" 15 | 16 | 17 | from datetime import datetime 18 | class ExecutionLogEntry(object): 19 | def __init__(self, step, name, data, description=""): 20 | self.time = datetime.now().isoformat() 21 | self.step, self.name, self.data, self.description = step, name, data, description 22 | 23 | # class Log(object): 24 | # def __init__(self, filename, max_unwritten=10): 25 | # self.filename = filename 26 | # self.hdf = h5py.File(filename, "w") 27 | # self.unwritten_buf = [] 28 | # self.num_written = 0 29 | # self.max_unwritten = max_unwritten 30 | 31 | # def append(self, entry): 32 | # self.unwritten_buf.append(entry) 33 | # if len(self.unwritten_buf) > self.max_unwritten: 34 | # self.flush() 35 | 36 | # def __call__(self, *args, **kwargs): 37 | # self.append(LogEntry(*args, **kwargs)) 38 | 39 | # def _write_entry(self, entry, group_name): 40 | # group = self.hdf.create_group(group_name) 41 | # for k, v in entry.__dict__.items(): 42 | # group[k] = v 43 | 44 | # def flush(self): 45 | # for entry in self.unwritten_buf: 46 | # self._write_entry(entry, "%015d" % self.num_written) 47 | # self.num_written += 1 48 | # self.hdf.flush() 49 | # self.unwritten_buf = [] 50 | 51 | # def close(self): 52 | # self.flush() 53 | # self.hdf.close() 54 | import cPickle 55 | class ExecutionLog(object): 56 | def __init__(self, filename, max_unwritten=10): 57 | self.filename = filename 58 | self.entries = [] 59 | self.max_unwritten = max_unwritten 60 | self.num_unwritten = 0 61 | 62 | def append(self, entry): 63 | self.entries.append(entry) 64 | self.num_unwritten += 1 65 | if self.num_unwritten > self.max_unwritten: 66 | self.flush() 67 | 68 | def __call__(self, *args, **kwargs): 69 | self.append(ExecutionLogEntry(*args, **kwargs)) 70 | 71 | def flush(self): 72 | with open(self.filename, "w") as f: 73 | cPickle.dump(self.entries, f, protocol=cPickle.HIGHEST_PROTOCOL) 74 | self.num_unwritten = 0 75 | 76 | def close(self): 77 | self.flush() 78 | -------------------------------------------------------------------------------- /lfd/rapprentice/tps_registration_parallel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tps, registration 3 | from registration import tps_rpm_bij 4 | from registration import Transformation, ThinPlateSpline # classes need to be imported this way in order to be defined properly in the cluster 5 | import pp 6 | import IPython as ipy 7 | 8 | def tps_rpm_bij_grid(x_knd, y_lmd, n_iter = 20, reg_init = .1, reg_final = .001, rad_init = .1, rad_final = .005, rot_reg = 1e-3, 9 | plotting = False, plot_cb = None, x_weights = None, y_weights = None, outlierprior = .1, outlierfrac = 2e-1, 10 | parallel = False, ppservers=(), partition_step=None): 11 | """ 12 | If parallel=True the computation is split among the cores of the local machine and the cores of the cluster 13 | So, if parallel=True and ppservers=(), the computation is split only among the local cores 14 | If parallel=True and partition_step is especified, the computation is split in blocks of partition_step x partition_step 15 | """ 16 | #TODO warn if default parameters are different? 17 | if len(x_knd.shape) == 2: 18 | x_knd.resize((1,)+x_knd.shape) 19 | if len(y_lmd.shape) == 2: 20 | y_lmd.resize((1,)+y_lmd.shape) 21 | k = x_knd.shape[0] 22 | l = y_lmd.shape[0] 23 | tps_tups = np.empty((k,l), dtype=object) 24 | 25 | if not parallel: 26 | for i in range(k): 27 | for j in range(l): 28 | tps_tups[i,j] = tps_rpm_bij(x_knd[i], y_lmd[j], n_iter=n_iter, reg_init=reg_init, reg_final=reg_final, 29 | rad_init=rad_init, rad_final=rad_final, rot_reg=rot_reg, 30 | plotting=plotting, plot_cb=plot_cb, x_weights=x_weights, y_weights=y_weights, 31 | outlierprior=outlierprior, outlierfrac=outlierfrac) 32 | else: 33 | job_server = pp.Server(ppservers=ppservers) 34 | #TODO check if servers on nodes are running 35 | print "Starting pp with", job_server.get_ncpus(), "local workers" 36 | 37 | # make sure to change the order of optional arguments if the function's signature is changed 38 | # parallel=False 39 | opt_arg_vals = (n_iter, reg_init, reg_final, rad_init, rad_final, rot_reg, plotting, plot_cb, x_weights, y_weights, outlierprior, outlierfrac, False, ppservers) 40 | dep_funcs = () 41 | modules = ("import numpy as np", 42 | "import scipy.spatial.distance as ssd") 43 | myglobals = {'tps_rpm_bij':registration.tps_rpm_bij, 44 | 'loglinspace':registration.loglinspace, 45 | 'Transformation':Transformation, 46 | 'ThinPlateSpline':ThinPlateSpline, 47 | 'tps_eval':tps.tps_eval, 48 | 'tps_kernel_matrix2':tps.tps_kernel_matrix2, 49 | 'tps_apply_kernel':tps.tps_apply_kernel, 50 | 'balance_matrix3':registration.balance_matrix3, 51 | 'fit_ThinPlateSpline':registration.fit_ThinPlateSpline, 52 | 'tps_fit3':tps.tps_fit3, 53 | 'tps_kernel_matrix':tps.tps_kernel_matrix, 54 | 'solve_eqp1':tps.solve_eqp1, 55 | 'tps_cost':tps.tps_cost 56 | } 57 | if partition_step: 58 | partitions = [(i,min(i+partition_step,k),j,min(j+partition_step,l)) for i in range(0,k,partition_step) for j in range(0,l,partition_step)] 59 | else: 60 | partitions = [(i,i+1,0,l) for i in range(k)] 61 | jobs = [job_server.submit(tps_rpm_bij_grid, (x_knd[i_start:i_end], y_lmd[j_start:j_end])+opt_arg_vals, dep_funcs, modules, globals=myglobals) 62 | for (i_start,i_end,j_start,j_end) in partitions] 63 | for ((i_start,i_end,j_start,j_end),job) in zip(partitions, jobs): 64 | tps_tups[i_start:i_end,j_start:j_end] = job() 65 | job_server.print_stats() 66 | 67 | return tps_tups 68 | -------------------------------------------------------------------------------- /lfd/rapprentice/yes_or_no.py: -------------------------------------------------------------------------------- 1 | def yes_or_no(question): 2 | assert isinstance(question,str) or isinstance(question,unicode) 3 | while True: 4 | yn = raw_input(question + " (y/n): ") 5 | if yn=='y': return True 6 | elif yn=='n': return False -------------------------------------------------------------------------------- /lfd/registration/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | # initialize pycuda 3 | import pycuda.autoinit 4 | 5 | # initialize libraries used by scikits.cuda 6 | import scikits.cuda.linalg 7 | scikits.cuda.linalg.init() 8 | 9 | _has_cuda = True 10 | _has_cula = scikits.cuda.linalg._has_cula 11 | except (ImportError, OSError): 12 | _has_cuda = False 13 | _has_cula = False 14 | -------------------------------------------------------------------------------- /lfd/registration/plotting_openrave.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from lfd.rapprentice import plotting_openrave 4 | 5 | # TODO: rapprentice.plotting_openrave and other openrave plottings should go in this file 6 | 7 | def registration_plot_cb(sim, x_nd, y_md, f): 8 | if sim.viewer: 9 | handles = [] 10 | handles.append(sim.env.plot3(x_nd, 5, (1,0,0))) 11 | handles.append(sim.env.plot3(y_md, 5, (0,0,1))) 12 | xwarped_nd = f.transform_points(x_nd) 13 | handles.append(sim.env.plot3(xwarped_nd, 5, (0,1,0))) 14 | handles.extend(plotting_openrave.draw_grid(sim.env, f.transform_points, x_nd.min(axis=0), x_nd.max(axis=0), xres = .1, yres = .1, zres = .04)) 15 | sim.viewer.Step() 16 | -------------------------------------------------------------------------------- /lfd/registration/settings.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # registration 4 | #: number of outer iterations for RPM algorithms 5 | N_ITER = 20 6 | #: number of inner iterations for RPM algorithms 7 | EM_ITER = 1 8 | #: initial and final smoothing regularizer coefficient for RPM algorithms 9 | REG = (.1, .0001) 10 | #: initial and final temperature for RPM algorithms 11 | RAD = (.01, .0001) 12 | #: rotation regularizer coefficients 13 | ROT_REG = np.r_[1e-4, 1e-4, 1e-1] 14 | #: 15 | OUTLIER_PRIOR = .1 16 | #: 17 | OURLIER_FRAC = 1e-2 18 | 19 | # registration with gpu 20 | #: 21 | MAX_CLD_SIZE = 150 22 | #: 23 | BEND_COEF_DIGITS = 6 24 | #: 25 | OUTLIER_CUTOFF = 1e-2 26 | 27 | try: 28 | from lfd_settings.registration.settings import * 29 | except ImportError: 30 | pass 31 | -------------------------------------------------------------------------------- /lfd/registration/transformation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Register point clouds to each other 3 | 4 | 5 | arrays are named like name_abc 6 | abc are subscripts and indicate the what that tensor index refers to 7 | 8 | index name conventions: 9 | m: test point index 10 | n: training point index 11 | a: input coordinate 12 | g: output coordinate 13 | d: gripper coordinate 14 | """ 15 | 16 | from __future__ import division 17 | import numpy as np 18 | from lfd.rapprentice import svds, math_utils 19 | 20 | class Transformation(object): 21 | """ 22 | Object oriented interface for transformations R^d -> R^d 23 | """ 24 | def transform_points(self, x_ma): 25 | raise NotImplementedError 26 | def compute_jacobian(self, x_ma): 27 | raise NotImplementedError 28 | 29 | def transform_vectors(self, x_ma, v_ma): 30 | grad_mga = self.compute_jacobian(x_ma) 31 | return np.einsum('ijk,ik->ij', grad_mga, v_ma) # matrix multiply each jac with each vector 32 | 33 | def transform_bases(self, x_ma, rot_mad, orthogonalize=True, orth_method = "cross"): 34 | """ 35 | orthogonalize: none, svd, qr 36 | """ 37 | 38 | grad_mga = self.compute_jacobian(x_ma) 39 | newrot_mgd = np.array([grad_ga.dot(rot_ad) for (grad_ga, rot_ad) in zip(grad_mga, rot_mad)]) 40 | 41 | 42 | if orthogonalize: 43 | if orth_method == "qr": 44 | newrot_mgd = orthogonalize3_qr(newrot_mgd) 45 | elif orth_method == "svd": 46 | newrot_mgd = orthogonalize3_svd(newrot_mgd) 47 | elif orth_method == "cross": 48 | newrot_mgd = orthogonalize3_cross(newrot_mgd) 49 | else: raise Exception("unknown orthogonalization method %s"%orthogonalize) 50 | return newrot_mgd 51 | 52 | def transform_hmats(self, hmat_mAD): 53 | """ 54 | Transform (D+1) x (D+1) homogenius matrices 55 | """ 56 | hmat_mGD = np.empty_like(hmat_mAD) 57 | hmat_mGD[:,:3,3] = self.transform_points(hmat_mAD[:,:3,3]) 58 | hmat_mGD[:,:3,:3] = self.transform_bases(hmat_mAD[:,:3,3], hmat_mAD[:,:3,:3]) 59 | hmat_mGD[:,3,:] = np.array([0,0,0,1]) 60 | return hmat_mGD 61 | 62 | def compute_numerical_jacobian(self, x_d, epsilon=0.0001): 63 | "numerical jacobian" 64 | x0 = np.asfarray(x_d) 65 | f0 = self.transform_points(x0) 66 | jac = np.zeros(len(x0), len(f0)) 67 | dx = np.zeros(len(x0)) 68 | for i in range(len(x0)): 69 | dx[i] = epsilon 70 | jac[i] = (self.transform_points(x0+dx) - f0) / epsilon 71 | dx[i] = 0. 72 | return jac.transpose() 73 | 74 | class Affine(Transformation): 75 | def __init__(self, lin_ag, trans_g): 76 | self.lin_ag = lin_ag 77 | self.trans_g = trans_g 78 | def transform_points(self, x_ma): 79 | return x_ma.dot(self.lin_ag) + self.trans_g[None,:] 80 | def compute_jacobian(self, x_ma): 81 | return np.repeat(self.lin_ag.T[None,:,:],len(x_ma), axis=0) 82 | 83 | class Composition(Transformation): 84 | def __init__(self, fs): 85 | "applied from first to last (left to right)" 86 | self.fs = fs 87 | def transform_points(self, x_ma): 88 | for f in self.fs: x_ma = f.transform_points(x_ma) 89 | return x_ma 90 | def compute_jacobian(self, x_ma): 91 | grads = [] 92 | for f in self.fs: 93 | grad_mga = f.compute_jacobian(x_ma) 94 | grads.append(grad_mga) 95 | x_ma = f.transform_points(x_ma) 96 | totalgrad = grads[0] 97 | for grad in grads[1:]: 98 | totalgrad = (grad[:,:,:,None] * totalgrad[:,None,:,:]).sum(axis=-2) 99 | return totalgrad 100 | 101 | def orthogonalize3_cross(mats_n33): 102 | "turns each matrix into a rotation" 103 | 104 | x_n3 = mats_n33[:,:,0] 105 | # y_n3 = mats_n33[:,:,1] 106 | z_n3 = mats_n33[:,:,2] 107 | 108 | znew_n3 = math_utils.normr(z_n3) 109 | ynew_n3 = math_utils.normr(np.cross(znew_n3, x_n3)) 110 | xnew_n3 = math_utils.normr(np.cross(ynew_n3, znew_n3)) 111 | 112 | return np.concatenate([xnew_n3[:,:,None], ynew_n3[:,:,None], znew_n3[:,:,None]],2) 113 | 114 | def orthogonalize3_svd(x_k33): 115 | u_k33, _s_k3, v_k33 = svds.svds(x_k33) 116 | return (u_k33[:,:,:,None] * v_k33[:,None,:,:]).sum(axis=2) 117 | 118 | def orthogonalize3_qr(_x_k33): 119 | raise NotImplementedError 120 | -------------------------------------------------------------------------------- /lfd/settings.py: -------------------------------------------------------------------------------- 1 | #: 2 | DEBUG = False 3 | 4 | try: 5 | from lfd_settings.settings import * 6 | except ImportError: 7 | pass 8 | -------------------------------------------------------------------------------- /lfd/tpsopt/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/tpsopt/__init__.py -------------------------------------------------------------------------------- /lfd/tpsopt/clouds.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | cx = 320.-.5 4 | cy = 240.-.5 5 | DEFAULT_F = 535. 6 | 7 | def xyZ_to_XY(x,y,Z,f=DEFAULT_F): 8 | X = (x - cx)*(Z/f) 9 | Y = (y - cy)*(Z/f) 10 | return (X,Y) 11 | 12 | def XYZ_to_xy(X,Y,Z,f=DEFAULT_F): 13 | x = X*(f/Z) + cx 14 | y = Y*(f/Z) + cy 15 | return (x,y) 16 | 17 | def depth_to_xyz(depth,f=DEFAULT_F): 18 | x,y = np.meshgrid(np.arange(640), np.arange(480)) 19 | assert depth.shape == (480, 640) 20 | XYZ = np.empty((480,640,3)) 21 | Z = XYZ[:,:,2] = depth / 1000. # convert mm -> meters 22 | XYZ[:,:,0] = (x - cx)*(Z/f) 23 | XYZ[:,:,1] = (y - cy)*(Z/f) 24 | 25 | return XYZ 26 | 27 | def downsample(xyz, v): 28 | import cloudprocpy 29 | cloud = cloudprocpy.CloudXYZ() 30 | xyz1 = np.ones((len(xyz),4),'float') 31 | xyz1[:,:3] = xyz 32 | cloud.from2dArray(xyz1) 33 | cloud = cloudprocpy.downsampleCloud(cloud, v) 34 | return cloud.to2dArray()[:,:3] 35 | 36 | -------------------------------------------------------------------------------- /lfd/tpsopt/optimization_notes.txt: -------------------------------------------------------------------------------- 1 | current time in my kernels: 6.7678ms 2 | 3 | Some basic set of ideas for final optimized code 4 | 5 | -- batched matrix multiplies to initialize the matrices of sq differences 6 | -- might be an issue here with getting column major/row major correct 7 | -- needs 5 multiplies to setup matrices (RW and CW) (~600us) 8 | precompue X'X, need to get Y'Y, X'Y (4 total) 9 | need to get transpose for RW order (1 total) 10 | -- then need to copy over (beat 1ms to get improve) 11 | probably doable b/c we're going to have very good access patterns 12 | -- don't copy over --> just adapt normalization to use implicit values for the outlier priors 13 | 14 | -- have normalization not write to corr_nm, just return col and row sums (2ms) 15 | -- don't recompute sums for getTargPts 16 | -- batched matrix multiply to compute xt, yt (~200us) 17 | 18 | Estimated improvement -- .6 ms from norm, 1.2 ms from getTgtPts, 1ms from init 19 | -------------------------------------------------------------------------------- /lfd/tpsopt/registration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Register point clouds to each other 3 | 4 | 5 | arrays are named like name_abc 6 | abc are subscripts and indicate the what that tensor index refers to 7 | 8 | index name conventions: 9 | m: test point index 10 | n: training point index 11 | a: input coordinate 12 | g: output coordinate 13 | d: gripper coordinate 14 | """ 15 | 16 | from __future__ import division 17 | 18 | import numpy as np 19 | import scipy.spatial.distance as ssd 20 | 21 | from lfd.tpsopt.transformations import ThinPlateSpline, fit_ThinPlateSpline 22 | import tps 23 | from settings import BEND_COEF_DIGITS 24 | 25 | # from svds import svds 26 | 27 | 28 | def loglinspace(a,b,n): 29 | "n numbers between a to b (inclusive) with constant ratio between consecutive numbers" 30 | return np.exp(np.linspace(np.log(a),np.log(b),n)) 31 | 32 | def registration_cost(xyz0, xyz1, f_p_mats=None, f_o_mats=None, b_p_mats=None, b_o_mats=None): 33 | if f_p_mats is None: 34 | f, g = tps_rpm_bij(xyz0, xyz1, n_iter=10) 35 | else: 36 | f, g = tps_rpm_bij_presolve(xyz0, xyz1, n_iter=10, f_p_mats=f_p_mats, f_o_mats=f_o_mats, 37 | b_p_mats=b_p_mats, b_o_mats=b_o_mats) 38 | return f._cost + g._cost 39 | 40 | def unit_boxify(x_na): 41 | ranges = x_na.ptp(axis=0) 42 | dlarge = ranges.argmax() 43 | unscaled_translation = - (x_na.min(axis=0) + x_na.max(axis=0))/2 44 | scaling = 1./ranges[dlarge] 45 | scaled_translation = unscaled_translation * scaling 46 | return x_na*scaling + scaled_translation, (scaling, scaled_translation) 47 | 48 | def unscale_tps(f, src_params, targ_params): 49 | """Only works in 3d!!""" 50 | p,q = src_params 51 | r,s = targ_params 52 | 53 | d = len(q) 54 | 55 | lin_in = np.eye(d)*p 56 | trans_in = q 57 | aff_in = Affine(lin_in, trans_in) 58 | 59 | lin_out = np.eye(d)/r 60 | trans_out = -s/r 61 | aff_out = Affine(lin_out, trans_out) 62 | 63 | return Composition([aff_in, f, aff_out]) 64 | 65 | # @profile 66 | def tps_rpm_bij(x_nd, y_md, fsolve, gsolve,n_iter = 20, reg_init = .1, reg_final = .001, rad_init = .1, 67 | rad_final = .005, rot_reg = 1e-3, outlierprior=1e-1, outlierfrac=2e-1, vis_cost_xy=None, 68 | return_corr=False, check_solver=False): 69 | """ 70 | tps-rpm algorithm mostly as described by chui and rangaran 71 | reg_init/reg_final: regularization on curvature 72 | rad_init/rad_final: radius for correspondence calculation (meters) 73 | plotting: 0 means don't plot. integer n means plot every n iterations 74 | """ 75 | 76 | _,d=x_nd.shape 77 | regs = np.around(loglinspace(reg_init, reg_final, n_iter), BEND_COEF_DIGITS) 78 | rads = loglinspace(rad_init, rad_final, n_iter) 79 | 80 | f = ThinPlateSpline(d) 81 | scale = (np.max(y_md,axis=0) - np.min(y_md,axis=0)) / (np.max(x_nd,axis=0) - np.min(x_nd,axis=0)) 82 | f.lin_ag = np.diag(scale) # align the mins and max 83 | f.trans_g = np.median(y_md,axis=0) - np.median(x_nd,axis=0) * scale # align the medians 84 | 85 | g = ThinPlateSpline(d) 86 | g.lin_ag = np.diag(1./scale) 87 | g.trans_g = -np.diag(1./scale).dot(f.trans_g) 88 | 89 | 90 | # r_N = None 91 | 92 | for i in xrange(n_iter): 93 | xwarped_nd = f.transform_points(x_nd) 94 | ywarped_md = g.transform_points(y_md) 95 | 96 | fwddist_nm = ssd.cdist(xwarped_nd, y_md,'euclidean') 97 | invdist_nm = ssd.cdist(x_nd, ywarped_md,'euclidean') 98 | 99 | r = rads[i] 100 | prob_nm = np.exp( -(fwddist_nm + invdist_nm) / (2*r) ) 101 | corr_nm, r_N, _ = balance_matrix(prob_nm, 10, outlierprior, outlierfrac) 102 | corr_nm += 1e-9 103 | 104 | wt_n = corr_nm.sum(axis=1) 105 | wt_m = corr_nm.sum(axis=0) 106 | 107 | xtarg_nd = (corr_nm/wt_n[:,None]).dot(y_md) 108 | ytarg_md = (corr_nm/wt_m[None,:]).T.dot(x_nd) 109 | 110 | fsolve.solve(wt_n, xtarg_nd, regs[i], rot_reg, f) 111 | gsolve.solve(wt_m, ytarg_md, regs[i], rot_reg, g) 112 | if check_solver: 113 | f_test = fit_ThinPlateSpline(x_nd, xtarg_nd, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg) 114 | g_test = fit_ThinPlateSpline(y_md, ytarg_md, bend_coef = regs[i], wt_n=wt_m, rot_coef = rot_reg) 115 | tol = 1e-4 116 | assert np.allclose(f.trans_g, f_test.trans_g, atol=tol) 117 | assert np.allclose(f.lin_ag, f_test.lin_ag, atol=tol) 118 | assert np.allclose(f.w_ng, f_test.w_ng, atol=tol) 119 | assert np.allclose(g.trans_g, g_test.trans_g, atol=tol) 120 | assert np.allclose(g.lin_ag, g_test.lin_ag, atol=tol) 121 | assert np.allclose(g.w_ng, g_test.w_ng, atol=tol) 122 | 123 | f._cost = tps.tps_cost(f.lin_ag, f.trans_g, f.w_ng, f.x_na, xtarg_nd, regs[i], wt_n=wt_n)/wt_n.mean() 124 | g._cost = tps.tps_cost(g.lin_ag, g.trans_g, g.w_ng, g.x_na, ytarg_md, regs[i], wt_n=wt_m)/wt_m.mean() 125 | if return_corr: 126 | return (f, g), corr_nm 127 | return f,g 128 | 129 | def balance_matrix(prob_nm, max_iter, p, outlierfrac, r_N = None): 130 | 131 | n,m = prob_nm.shape 132 | prob_NM = np.empty((n+1, m+1), 'f4') 133 | prob_NM[:n, :m] = prob_nm 134 | prob_NM[:n, m] = p 135 | prob_NM[n, :m] = p 136 | prob_NM[n, m] = p*np.sqrt(n*m) 137 | 138 | a_N = np.ones((n+1),'f4') 139 | a_N[n] = m*outlierfrac 140 | b_M = np.ones((m+1),'f4') 141 | b_M[m] = n*outlierfrac 142 | 143 | if r_N is None: r_N = np.ones(n+1,'f4') 144 | 145 | for _ in xrange(max_iter): 146 | c_M = b_M/r_N.dot(prob_NM) 147 | r_N = a_N/prob_NM.dot(c_M) 148 | 149 | prob_NM *= r_N[:,None] 150 | prob_NM *= c_M[None,:] 151 | 152 | return prob_NM[:n, :m], r_N, c_M 153 | -------------------------------------------------------------------------------- /lfd/tpsopt/settings.py: -------------------------------------------------------------------------------- 1 | N_ITER_CHEAP = 10 2 | N_ITER_EXACT = 50 3 | EM_ITER_CHEAP = 1 4 | DEFAULT_LAMBDA = (.1, .001) 5 | MAX_CLD_SIZE = 150 6 | MAX_TRAJ_LEN = 100 7 | EXACT_LAMBDA = (10, .001) 8 | DATA_DIM = 3 9 | #DS_SIZE = 0.03 # for fig8 10 | DS_SIZE = 0.025 # for overhand 11 | N_STREAMS = 10 12 | DEFAULT_NORM_ITERS = 10 13 | BEND_COEF_DIGITS = 6 14 | GRIPPER_OPEN_CLOSE_THRESH = 0.04 # 0.07 for thick rope... 15 | 16 | try: 17 | from lfd_settings.tpsopt.settings import * 18 | except ImportError: 19 | pass 20 | -------------------------------------------------------------------------------- /lfd/transfer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rll/lfd/9fef15f556cba49dd4b42c0c29505a4137f95fc5/lfd/transfer/__init__.py -------------------------------------------------------------------------------- /lfd/transfer/settings.py: -------------------------------------------------------------------------------- 1 | #: 2 | JOINT_LENGTH_PER_STEP = .1 3 | #: 4 | FINGER_CLOSE_RATE = .1 5 | 6 | #: TPS objective coefficient in unified trajectory optimization 7 | ALPHA = 1000000.0 8 | #: pose position or finger position objective coefficient in trajectory optimization 9 | BETA_POS = 1000000.0 10 | #: pose rotation objective coefficient in trajectory optimization 11 | BETA_ROT = 100.0 12 | #: joint velocity objective coefficient in trajectory optimization 13 | GAMMA = 1000.0 14 | #: whether to use collision cost in trajectory optimization 15 | USE_COLLISION_COST = True 16 | 17 | try: 18 | from lfd_settings.transfer.settings import * 19 | except ImportError: 20 | pass 21 | -------------------------------------------------------------------------------- /lfd/util/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'alex' 2 | -------------------------------------------------------------------------------- /lfd/util/colorize.py: -------------------------------------------------------------------------------- 1 | """ 2 | add terminal color codes to strings 3 | """ 4 | 5 | color2num = dict( 6 | gray=30, 7 | red=31, 8 | green=32, 9 | yellow=33, 10 | blue=34, 11 | magenta=35, 12 | cyan=36, 13 | white=37, 14 | crimson=38 15 | ) 16 | 17 | 18 | def colorize(string, color, bold=False, highlight = False): 19 | attr = [] 20 | num = color2num[color] 21 | if highlight: num += 10 22 | attr.append(str(num)) 23 | if bold: attr.append('1') 24 | return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string) -------------------------------------------------------------------------------- /lfd/util/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import time 5 | import argparse 6 | from lfd.util import colorize 7 | 8 | def redprint(msg): 9 | print colorize.colorize(msg, "red", bold=True) 10 | 11 | def yellowprint(msg): 12 | print colorize.colorize(msg, "yellow", bold=True) 13 | 14 | class ArgumentParser(argparse.ArgumentParser): 15 | def parse_args(self, *args, **kw): 16 | res = argparse.ArgumentParser.parse_args(self, *args, **kw) 17 | from argparse import _HelpAction, _SubParsersAction 18 | for x in self._subparsers._actions: 19 | if not isinstance(x, _SubParsersAction): 20 | continue 21 | v = x.choices[res.subparser_name] # select the subparser name 22 | subparseargs = {} 23 | for x1 in v._optionals._actions: # loop over the actions 24 | if isinstance(x1, _HelpAction): # skip help 25 | continue 26 | n = x1.dest 27 | if hasattr(res, n): # pop the argument 28 | subparseargs[n] = getattr(res, n) 29 | delattr(res, n) 30 | res.__setattr__(res.subparser_name, argparse.Namespace(**subparseargs)) 31 | return res 32 | 33 | class Bunch(object): 34 | def __init__(self, adict): 35 | self.__dict__.update(adict) 36 | 37 | # Define a context manager to suppress stdout 38 | class suppress_stdout(object): 39 | ''' 40 | A context manager for doing a "deep suppression" of stdout in 41 | Python, i.e. will suppress all print, even if the print originates in a 42 | compiled C/Fortran sub-function. 43 | ''' 44 | def __init__(self): 45 | # Open a null file 46 | while (True): 47 | try: 48 | self.null_fds = os.open(os.devnull,os.O_RDWR) 49 | break 50 | except OSError: 51 | time.sleep(1) 52 | # Save the actual stdout file descriptor 53 | self.save_fds = os.dup(1) 54 | 55 | def __enter__(self): 56 | # Assign the null pointers to stdout 57 | os.dup2(self.null_fds,1) 58 | os.close(self.null_fds) 59 | 60 | def __exit__(self, *_): 61 | # Re-assign the real stdout back 62 | os.dup2(self.save_fds,1) 63 | # Close the null file 64 | os.close(self.save_fds) 65 | -------------------------------------------------------------------------------- /requirements_doc.txt: -------------------------------------------------------------------------------- 1 | Sphinx>=1.3b1 -------------------------------------------------------------------------------- /scripts/auto_performance.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import h5py 3 | from lfd.rapprentice import eval_util 4 | 5 | def estimate_performance(fname): 6 | results_file = h5py.File(fname, 'r') 7 | loadresult_items = eval_util.get_indexed_items(results_file) 8 | 9 | num_knots = 0 10 | num_misgrasps = 0 11 | num_infeasible = 0 12 | action_time = 0 13 | exec_time = 0 14 | 15 | for i_task, task_info in loadresult_items: 16 | knot_exists = False 17 | infeasible = False 18 | misgrasp = False 19 | 20 | for i_step in range(len(task_info)): 21 | results = eval_util.load_task_results_step(fname, i_task, i_step) 22 | 23 | eval_stats = results['eval_stats'] 24 | misgrasp |= eval_stats.misgrasp 25 | infeasible |= not eval_stats.feasible 26 | action_time += eval_stats.action_elapsed_time 27 | exec_time += eval_stats.exec_elapsed_time 28 | 29 | if results['knot']: 30 | knot_exists = True 31 | elif i_step == len(task_info)-1: 32 | print i_task 33 | 34 | if infeasible: 35 | num_infeasible += 1 36 | if misgrasp: 37 | num_misgrasps += 1 38 | 39 | if knot_exists: 40 | num_knots += 1 41 | num_tasks = len(loadresult_items) 42 | 43 | print "# Misgrasps:", num_misgrasps 44 | print "# Infeasible:", num_infeasible 45 | print "Time taken to choose demo:", action_time, "seconds" 46 | print "Time taken to warp and execute demo:", exec_time, "seconds" 47 | return num_knots, num_tasks 48 | 49 | if __name__ == '__main__': 50 | parser = argparse.ArgumentParser() 51 | parser.add_argument("results_file", type=str) 52 | args = parser.parse_args() 53 | 54 | results_file = h5py.File(args.results_file, 'r') 55 | 56 | num_successes, num_tasks = estimate_performance(args.results_file) 57 | print "Successes / Total: %d/%d" % (num_successes, num_tasks) 58 | print "Success rate:", float(num_successes)/float(num_tasks) 59 | -------------------------------------------------------------------------------- /scripts/build_all.sh: -------------------------------------------------------------------------------- 1 | 2 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_40a.h5 ../data/misc/overhand_actions.h5 --C 100 3 | 4 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_40b.h5 ../bigdata/misc/overhand_actions.h5 --C 100 5 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_40b.h5 ../bigdata/misc/overhand_actions.h5 --C 100 6 | 7 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_40c.h5 ../bigdata/misc/overhand_actions.h5 --C 100 8 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_40c.h5 ../bigdata/misc/overhand_actions.h5 --C 100 9 | 10 | 11 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_80a.h5 ../bigdata/misc/overhand_actions.h5 --C 100 12 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_80a.h5 ../bigdata/misc/overhand_actions.h5 --C 100 13 | 14 | 15 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_80b.h5 ../bigdata/misc/overhand_actions.h5 --C 100 16 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_80b.h5 ../bigdata/misc/overhand_actions.h5 --C 100 17 | 18 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_80c.h5 ../bigdata/misc/overhand_actions.h5 --C 100 19 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_80c.h5 ../bigdata/misc/overhand_actions.h5 --C 100 20 | 21 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_120a.h5 ../bigdata/misc/overhand_actions.h5 --C 100 22 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_120a.h5 ../bigdata/misc/overhand_actions.h5 --C 100 23 | 24 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_120b.h5 ../bigdata/misc/overhand_actions.h5 --C 100 25 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_120b.h5 ../bigdata/misc/overhand_actions.h5 --C 100 26 | 27 | 28 | #python build.py mul_s bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_sampledlabel_120c.h5 ../bigdata/misc/overhand_actions.h5 --C 100 29 | python build.py mul_grip bellman ../data/misc/Oct1_landmarks.h5 full ../data/labels/Oct1_fig8_sampledlabel_120c.h5 ../bigdata/misc/overhand_actions.h5 --C 100 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /scripts/constants.py: -------------------------------------------------------------------------------- 1 | MAX_ACTIONS_TO_TRY = 10 # Number of actions to try (ranked by cost), if TrajOpt trajectory is infeasible 2 | -------------------------------------------------------------------------------- /scripts/fig8_cross_val.sh: -------------------------------------------------------------------------------- 1 | # Figure 8 Cross Validation 2 | # Baseline on r0.1, r0.15, n5, n7 3 | python -i eval.py --resultfile ../data/results/results_fig8_Sep18_r0.1n5_greedy.h5 eval --ground_truth 0 ../data/misc/fig8_finalds.h5 ../data/misc/Sep19_fig8_r0.1_n5.h5 greedy finger bij --fake_data_segment demo3_seg00_Sep09 --num_steps 6 --gpu --downsample_size 0.03 4 | 5 | #python eval.py --resultfile ../data/results/results_fig8_Sep18_r0.1n7_greedy.h5 eval --ground_truth 0 ../data/misc/fig8_finalds.h5 ../data/misc/Sep19_fig8_r0.1_n7.h5 greedy finger bij --fake_data_segment demo3_seg00_Sep09 --num_steps 6 --gpu 6 | 7 | #python eval.py --resultfile ../data/results/results_fig8_Sep18_r0.15n5_greedy.h5 eval --ground_truth 0 ../data/misc/fig8_finalds.h5 ../data/misc/Sep19_fig8_r0.15_n5.h5 greedy finger bij --fake_data_segment demo3_seg00_Sep09 --num_steps 6 --gpu 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /scripts/filter_labeled_examples.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Run this script with: 4 | # ./filter_labeled_examples.py 5 | # Include flag --remove_short_traj to remove trajectories with length < 4. 6 | # Include flag --remove_deadend_traj to remove trajectories that end in a 7 | # deadend (as opposed to a knot). 8 | # Include flag --remove_long_traj to remove trajectories with length > 5. 9 | # 10 | # Outputs a new labeled examples file, excluding trajectories depending on which 11 | # flags are set (see above explanation). Renumbers the trajectories so that 12 | # their ids are in consecutive numerical order, starting from 0. 13 | # Assumes ids of the labelled examples are in consecutive numerical order, 14 | # starting from 0. 15 | 16 | import argparse, h5py 17 | 18 | def filter_labeled_examples(examples, output, remove_short, remove_long, 19 | remove_deadend): 20 | num_examples = len(examples.keys()) 21 | if num_examples == 0: 22 | return 23 | 24 | output_id = 0 # To keep track of renumbering 25 | prev_start = 0 26 | for i in range(num_examples): 27 | k = str(i) 28 | pred = int(examples[k]['pred'][()]) 29 | if pred == i and i != 0: 30 | to_remove = False 31 | if remove_short and i - prev_start < 4: 32 | # trajectory has less than 4 steps, inc. endstate 33 | to_remove = True 34 | if remove_long and i - prev_start > 5: 35 | to_remove = True 36 | if remove_deadend and 'deadend' in examples[str(i-1)].keys() and \ 37 | examples[str(i-1)]['deadend'][()] == 1: 38 | # trajectory ends in a deadend 39 | to_remove = True 40 | if to_remove: 41 | print "Removing trajectory starting at id ", prev_start, ", length: ", i - prev_start 42 | for i_rm in range(i - prev_start): 43 | output_id -= 1 44 | print "Deleting output id ", output_id 45 | del output[str(output_id)] 46 | print "Adding again at output id ", output_id 47 | prev_start = i 48 | 49 | new_group = output.create_group(str(output_id)) 50 | for group_key in examples[k].keys(): 51 | # Update the value of 'pred' correctly (with the renumbering) 52 | if group_key == 'pred': 53 | assert pred == i or pred == i-1, "Invalid predecessor value for %i"%i 54 | if pred == i: 55 | new_group[group_key] = str(output_id) 56 | else: 57 | new_group[group_key] = str(output_id - 1) 58 | else: 59 | new_group[group_key] = examples[k][group_key][()] 60 | output_id += 1 61 | 62 | to_remove = False 63 | if remove_short and num_examples - prev_start < 4: 64 | to_remove = True 65 | if remove_long and num_examples - prev_start > 6: 66 | to_remove = True 67 | if remove_deadend and 'deadend' in examples[str(num_examples-1)].keys() and \ 68 | examples[str(num_examples-1)]['deadend'][()] == 1: 69 | to_remove = True 70 | if to_remove: 71 | print "Removing trajectory starting at id ", prev_start, ", length: ", num_examples - prev_start 72 | for i_rm in range(num_examples - prev_start): 73 | output_id -= 1 74 | print "Deleting output id ", output_id 75 | del output[str(output_id)] 76 | 77 | if __name__ == '__main__': 78 | parser = argparse.ArgumentParser() 79 | parser.add_argument('examples_file') 80 | parser.add_argument('output_examples_file') 81 | parser.add_argument('--remove_short_traj', action='store_true') 82 | parser.add_argument('--remove_long_traj', action='store_true') 83 | parser.add_argument('--remove_deadend_traj', action='store_true') 84 | args = parser.parse_args() 85 | 86 | examples = h5py.File(args.examples_file, 'r') 87 | output = h5py.File(args.output_examples_file, 'w') 88 | filter_labeled_examples(examples, output, args.remove_short_traj, 89 | args.remove_long_traj, args.remove_deadend_traj) 90 | examples.close() 91 | output.close() 92 | -------------------------------------------------------------------------------- /scripts/make_lfd_settings_package.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import lfd 4 | 5 | def make_settings_tree(src, dst): 6 | names = os.listdir(src) 7 | for name in names: 8 | srcname = os.path.join(src, name) 9 | dstname = os.path.join(dst, name) 10 | if os.path.isdir(srcname): 11 | make_settings_tree(srcname, dstname) 12 | elif name == 'settings.py': 13 | if not os.path.isdir(dst): 14 | os.makedirs(dst) 15 | open(dstname, 'a') 16 | open(os.path.join(dst, '__init__.py'), 'a') 17 | 18 | def make_lfd_settings_package(lfd_settings_name): 19 | """ Makes the lfd_settings package. 20 | 21 | Makes the lfd_settings package with settings.py files with the same 22 | subpackage and module structure as the lfd package. Only makes subpackages 23 | and modules for which the corresponding one in the lfd package has a 24 | settings.py file. 25 | """ 26 | lfd_name = os.path.dirname(lfd.__file__) 27 | make_settings_tree(lfd_name, lfd_settings_name) 28 | 29 | def main(): 30 | parser = argparse.ArgumentParser() 31 | parser.add_argument('lfd_settings_name', type=str, help="Destination name for the lfd_settings package.") 32 | args = parser.parse_args() 33 | 34 | make_lfd_settings_package(args.lfd_settings_name) 35 | 36 | if __name__ == '__main__': 37 | main() 38 | -------------------------------------------------------------------------------- /scripts/print_results.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import h5py 3 | 4 | def estimate_performance(args, results_file): 5 | 6 | num_knots = 0 7 | num_tasks = len(results_file)-1 8 | 9 | start_i = min([int(i) for i in results_file.keys() if i != 'args']) 10 | succ = [] 11 | failures = [] 12 | goal_failures = [] 13 | for i in range(start_i, num_tasks+start_i): 14 | num_steps = len(results_file[str(i)]) 15 | found_goal = False 16 | is_knot = results_file[str(i)][str(num_steps-1)]['results']['knot'][()] 17 | if is_knot: 18 | num_knots += 1 19 | succ.append(i) 20 | else: 21 | failures.append(i) 22 | if args.report_goal: 23 | for j in range(num_steps): 24 | if results_file[str(i)][str(j)]['results']['found_goal'][()] and j<3: 25 | found_goal = True 26 | if not(found_goal): 27 | goal_failures.append(i) 28 | print 'Failures:', failures 29 | print 'Successes', succ 30 | if args.report_goal: 31 | print 'Goal Failures:', goal_failures 32 | return num_knots, num_tasks, failures 33 | 34 | if __name__ == '__main__': 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument("results_file", type=str) 37 | parser.add_argument('--report_goal', action='store_true') 38 | 39 | args = parser.parse_args() 40 | 41 | results_file = h5py.File(args.results_file, 'r') 42 | 43 | num_successes, num_tasks, failures = estimate_performance(args, results_file) 44 | print "Successes / Total: %d/%d" % (num_successes, num_tasks) 45 | print "Success rate:", float(num_successes)/float(num_tasks) 46 | -------------------------------------------------------------------------------- /scripts/sample_landmarks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Run this script with: 4 | # ./filter_labeled_examples.py 5 | # 6 | # Make sure to run precompute without fill_traj on the example file first. 7 | 8 | import argparse, h5py 9 | import random 10 | 11 | def gen_landmarks(examples, landmarks): 12 | failure_keys = [k for k in examples.keys() if k.startswith('f')] 13 | landmark_keys = [k for k in examples.keys() if k.startswith('(')] 14 | 15 | print len(failure_keys) 16 | 17 | sampled_keys = random.sample(failure_keys, 20) + random.sample(landmark_keys, 50) 18 | 19 | for key in sampled_keys: 20 | landmarks.copy(examples[key], key) 21 | 22 | if __name__ == '__main__': 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument('examples_file') 25 | parser.add_argument('landmark_file') 26 | args = parser.parse_args() 27 | 28 | examples = h5py.File(args.examples_file, 'r') 29 | landmarks = h5py.File(args.landmark_file, 'a') 30 | gen_landmarks(examples, landmarks) 31 | examples.close() 32 | landmarks.close() 33 | -------------------------------------------------------------------------------- /scripts/test_baseline.sh: -------------------------------------------------------------------------------- 1 | # Scripts to run eval on different levels of difficulty 2 | 3 | echo RUNNING Baseline 4 | # no features 5 | python eval.py --animation 0 --i_end 100 --resultfile ../data/results/Sep13_r0.15_n7_no_feature.h5 eval --ground_truth 0 ../bigdata/misc/overhand_actions.h5 ../data/misc/Sep13_r0.15_n7.h5 greedy finger bij 6 | 7 | python eval.py --animation 0 --i_end 100 --resultfile ../data/results/Sep13_r0.15_n9_no_feature.h5 eval --ground_truth 0 ../bigdata/misc/overhand_actions.h5 ../data/misc/Sep13_r0.15_n9.h5 greedy finger bij 8 | 9 | python eval.py --animation 0 --i_end 100 --resultfile ../data/results/Sep13_r0.1_n7_no_feature.h5 eval --ground_truth 0 ../bigdata/misc/overhand_actions.h5 ../data/misc/Sep13_r0.1_n7.h5 greedy finger bij 10 | 11 | python eval.py --animation 0 --i_end 100 --resultfile ../data/results/Sep13_r0.1_n9_no_feature.h5 eval --ground_truth 0 ../bigdata/misc/overhand_actions.h5 ../data/misc/Sep13_r0.1_n9.h5 greedy finger bij 12 | 13 | python eval.py --animation 0 --i_end 100 --resultfile ../data/results/Sep13_r0.2_n7_no_feature.h5 eval --ground_truth 0 ../bigdata/misc/overhand_actions.h5 ../data/misc/Sep13_r0.2_n7.h5 greedy finger bij 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories( 2 | ${CMAKE_SOURCE_DIR}/src 3 | ) 4 | 5 | add_subdirectory(tpsopt) 6 | -------------------------------------------------------------------------------- /src/tpsopt/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Append current NVCC flags by something, eg comput capability 2 | set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} --gpu-architecture sm_20) 3 | 4 | ## template for cuda -> boost python 5 | cuda_add_library(tps SHARED tps.cu) 6 | target_link_libraries(tps ${CUDA_LIBRARIES} ${CUDA_CUBLAS_LIBRARIES}) 7 | 8 | boost_python_module(cuda_funcs cuda_funcs.cpp) 9 | target_link_libraries(cuda_funcs tps) 10 | -------------------------------------------------------------------------------- /src/tpsopt/cuda_funcs.cpp: -------------------------------------------------------------------------------- 1 | #include "numpy_utils.hpp" 2 | #include 3 | #include 4 | #include "tps.cuh" 5 | 6 | namespace py = boost::python; 7 | 8 | void pyFloatPrintArr(py::object x, int N){ 9 | float* p = getGPUPointer(x); 10 | printf("x ptr is %li\n", (long int) p); 11 | gpuPrintArr(p, N); 12 | } 13 | 14 | void pyIntPrintArr(py::object x, int N){ 15 | int* p = getGPUPointer(x); 16 | printf("x ptr is %li\n", (long int) p); 17 | gpuPrintArr(p, N); 18 | } 19 | 20 | void pyFillMat(py::object dest, py::object val, py::object dims, int N){ 21 | float** dest_ptr = getGPUPointer(dest); 22 | float** val_ptr = getGPUPointer(val); 23 | int* dims_ptr = getGPUPointer(dims); 24 | 25 | fillMat(dest_ptr, val_ptr, dims_ptr, N); 26 | } 27 | 28 | void pySqDiffMat(py::object x, py::object y, py::object z, int N, bool overwrite){ 29 | float** x_ptr = getGPUPointer(x); 30 | float** y_ptr = getGPUPointer(y); 31 | float* z_ptr = getGPUPointer(z); 32 | 33 | sqDiffMat(x_ptr, y_ptr, z_ptr, N, overwrite); 34 | } 35 | 36 | void pyGramMatDist(py::object x, py::object y, py::object dims, float sigma, py::object z, int N){ 37 | float** x_ptr = getGPUPointer(x); 38 | float** y_ptr = getGPUPointer(y); 39 | 40 | int* dims_ptr = getGPUPointer(dims); 41 | float* z_ptr = getGPUPointer(z); 42 | 43 | gramMatDist(x_ptr, y_ptr, dims_ptr, sigma, z_ptr, N); 44 | } 45 | 46 | void pyClosestPointCost(py::object x, py::object y, py::object xdims, py::object ydims, py::object res, int N){ 47 | float** x_ptr = getGPUPointer(x); 48 | float** y_ptr = getGPUPointer(y); 49 | 50 | int* xdims_ptr = getGPUPointer(xdims); 51 | int* ydims_ptr = getGPUPointer(ydims); 52 | 53 | float* res_ptr = getGPUPointer(res); 54 | 55 | closestPointCost(x_ptr, y_ptr, xdims_ptr, ydims_ptr, res_ptr, N); 56 | } 57 | 58 | void pyScalePoints(py::object x, py::object xdims, float scale, float t0, float t1, float t2, int N){ 59 | float** x_ptr = getGPUPointer(x); 60 | int* xdims_ptr = getGPUPointer(xdims); 61 | 62 | scalePoints(x_ptr, xdims_ptr, scale, t0, t1, t2, N); 63 | } 64 | 65 | void pyInitProbNM(py::object x, py::object y, py::object xw, py::object yw, 66 | py::object xdims, py::object ydims, int N, 67 | float outlierprior, float outlierfrac, float T, 68 | py::object corr_cm, py::object corr_rm){ 69 | /* 70 | * Initilialized correspondence matrix returned in corr 71 | */ 72 | float** x_ptr = getGPUPointer(x); 73 | float** xw_ptr = getGPUPointer(xw); 74 | int* xdims_ptr = getGPUPointer(xdims); 75 | 76 | float** y_ptr = getGPUPointer(y); 77 | float** yw_ptr = getGPUPointer(yw); 78 | int* ydims_ptr = getGPUPointer(ydims); 79 | 80 | float** corr_ptr_cm = getGPUPointer(corr_cm); 81 | float** corr_ptr_rm = getGPUPointer(corr_rm); 82 | 83 | initProbNM(x_ptr, y_ptr, xw_ptr, yw_ptr, N, xdims_ptr, ydims_ptr, 84 | outlierprior, outlierfrac, T, corr_ptr_cm, corr_ptr_rm); 85 | } 86 | 87 | void pyNormProbNM(py::object corr_cm, py::object corr_rm, py::object xdims, 88 | py::object ydims, int N, float outlier_frac, int norm_iters, 89 | py::object row_coeffs, py::object rn_col_coeffs, py::object cn_col_coeffs){ 90 | 91 | float** corr_ptr_cm = getGPUPointer(corr_cm); 92 | float** corr_ptr_rm = getGPUPointer(corr_rm); 93 | 94 | float** r_c = getGPUPointer(row_coeffs); 95 | float** rn_c_c = getGPUPointer(rn_col_coeffs); 96 | float** cn_c_c = getGPUPointer(cn_col_coeffs); 97 | 98 | int* xdims_ptr = getGPUPointer(xdims); 99 | int* ydims_ptr = getGPUPointer(ydims); 100 | 101 | normProbNM(corr_ptr_cm, corr_ptr_rm, xdims_ptr, ydims_ptr, 102 | N, outlier_frac, norm_iters, r_c, rn_c_c, cn_c_c); 103 | } 104 | 105 | void pyGetTargPts(py::object x, py::object y, py::object xw, py::object yw, 106 | py::object corr_cm, py::object corr_rm , 107 | py::object row_coeffs, py::object rn_col_coeffs, py::object cn_col_coeffs, 108 | py::object xdims, py::object ydims, float cutoff, 109 | int N, py::object xt, py::object yt){ 110 | /* 111 | * target vectors returned in xt and yt 112 | */ 113 | 114 | float** x_ptr = getGPUPointer(x); 115 | float** xw_ptr = getGPUPointer(xw); 116 | int* xdims_ptr = getGPUPointer(xdims); 117 | 118 | float** y_ptr = getGPUPointer(y); 119 | float** yw_ptr = getGPUPointer(yw); 120 | int* ydims_ptr = getGPUPointer(ydims); 121 | 122 | float** corr_ptr_cm = getGPUPointer(corr_cm); 123 | float** corr_ptr_rm = getGPUPointer(corr_rm); 124 | 125 | float** r_c = getGPUPointer(row_coeffs); 126 | float** rn_c_c = getGPUPointer(rn_col_coeffs); 127 | float** cn_c_c = getGPUPointer(cn_col_coeffs); 128 | 129 | 130 | float** xt_ptr = getGPUPointer(xt); 131 | float** yt_ptr = getGPUPointer(yt); 132 | 133 | getTargPts(x_ptr, y_ptr, xw_ptr, yw_ptr, corr_ptr_cm, corr_ptr_rm, r_c, rn_c_c, cn_c_c, 134 | xdims_ptr, ydims_ptr, cutoff, N, xt_ptr, yt_ptr); 135 | } 136 | 137 | void pyCheckCudaErr(){ 138 | checkCudaErr(); 139 | } 140 | 141 | void pyResetDevice(){ 142 | resetDevice(); 143 | } 144 | 145 | BOOST_PYTHON_MODULE(cuda_funcs) { 146 | py::def("float_gpu_print_arr", &pyFloatPrintArr, (py::arg("x"), py::arg("N"))); 147 | py::def("int_gpu_print_arr", &pyIntPrintArr, (py::arg("x"), py::arg("N"))); 148 | 149 | py::def("fill_mat", &pyFillMat, (py::arg("dest"), py::arg("vals"), py::arg("dims"), py::arg("N"))); 150 | 151 | py::def("sq_diffs", &pySqDiffMat, (py::arg("x"), py::arg("y"), py::arg("z"), py::arg("N"), py::arg("overwrite"))); 152 | 153 | py::def("gram_mat_dist", &pyGramMatDist, (py::arg("x"), py::arg("y"), py::arg("dims"), 154 | py::arg("sigma"), py::arg("z"), py::arg("N"))); 155 | 156 | py::def("closest_point_cost", &pyClosestPointCost, (py::arg("x"), py::arg("y"), py::arg("xdims"), py::arg("ydims"), py::arg("N"))); 157 | 158 | py::def("scale_points", &pyScalePoints, (py::arg("x"), py::arg("xdims"), py::arg("scale"), py::arg("t0"), py::arg("t1"), py::arg("t2"), py::arg("N"))); 159 | 160 | py::def("init_prob_nm", &pyInitProbNM, (py::arg("x"), py::arg("y"), py::arg("xw"), py::arg("yw"), 161 | py::arg("xdims"), py::arg("ydims"), py::arg("N"), 162 | py::arg("outlierprior"), py::arg("outlierfrac"), py::arg("T"), 163 | py::arg("corr_cm"), py::arg("corr_rm"))); 164 | 165 | py::def("norm_prob_nm", &pyNormProbNM, (py::arg("corr_cm"), py::arg("corr_rm"), 166 | py::arg("xdims"), py::arg("ydims"), 167 | py::arg("N"), py::arg("outlier_frac"), 168 | py::arg("norm_iters"), py::arg("row_coeffs"), 169 | py::arg("rn_col_coeffs"), py::arg("cn_col_coeffs"))); 170 | 171 | py::def("get_targ_pts", &pyGetTargPts, (py::arg("x"), py::arg("y"), py::arg("xw"), py::arg("yw"), 172 | py::arg("corr_cm"), py::arg("corr_rm"), 173 | py::arg("row_coeffs"), py::arg("rn_col_coeffs"), 174 | py::arg("cn_col_coeffs"), 175 | py::arg("xdims"), py::arg("ydims"), 176 | py::arg("outlier_cutoff"), py::arg("N"), 177 | py::arg("xt"), py::arg("yt"))); 178 | py::def("check_cuda_err", &pyCheckCudaErr); 179 | py::def("reset_cuda", &pyResetDevice); 180 | } 181 | -------------------------------------------------------------------------------- /src/tpsopt/numpy_utils.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | namespace py = boost::python; 5 | 6 | py::object np_mod; 7 | 8 | py::list toPyList(const std::vector& x) { 9 | py::list out; 10 | for (int i=0; i < x.size(); ++i) out.append(x[i]); 11 | return out; 12 | } 13 | 14 | template 15 | struct type_traits { 16 | static const char* npname; 17 | }; 18 | template<> const char* type_traits::npname = "float32"; 19 | template<> const char* type_traits::npname = "int32"; 20 | template<> const char* type_traits::npname = "float64"; 21 | template<> const char* type_traits::npname = "uint8"; 22 | 23 | template 24 | T* getListPointer(const py::list& l){ 25 | py::ssize_t len = py::len(l); 26 | T* retval = new T[len]; 27 | for(int i = 0; i < len; ++i) retval[i] = py::extract(l[i]); 28 | return retval; 29 | } 30 | 31 | template 32 | T* getGPUPointer(const py::object& arr) { 33 | long int i = py::extract(arr.attr("ptr")); 34 | T* p = (T*)i; 35 | return p; 36 | } 37 | 38 | template 39 | T* getPointer(const py::object& arr) { 40 | long int i = py::extract(arr.attr("ctypes").attr("data")); 41 | T* p = (T*)i; 42 | return p; 43 | } 44 | 45 | template 46 | py::object toNdarray1(const T* data, size_t dim0) { 47 | py::object out = np_mod.attr("empty")(py::make_tuple(dim0), type_traits::npname); 48 | T* p = getPointer(out); 49 | memcpy(p, data, dim0*sizeof(T)); 50 | return out; 51 | } 52 | template 53 | py::object toNdarray2(const T* data, size_t dim0, size_t dim1) { 54 | py::object out = np_mod.attr("empty")(py::make_tuple(dim0, dim1), type_traits::npname); 55 | T* pout = getPointer(out); 56 | memcpy(pout, data, dim0*dim1*sizeof(T)); 57 | return out; 58 | } 59 | template 60 | py::object toNdarray3(const T* data, size_t dim0, size_t dim1, size_t dim2) { 61 | py::object out = np_mod.attr("empty")(py::make_tuple(dim0, dim1, dim2), type_traits::npname); 62 | T* pout = getPointer(out); 63 | memcpy(pout, data, dim0*dim1*dim2*sizeof(T)); 64 | return out; 65 | } 66 | -------------------------------------------------------------------------------- /src/tpsopt/tps.cuh: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #define MAX_DIM 150 5 | #define DATA_DIM 3 6 | 7 | #define MIN(a, b) ((a < b) ? a : b) 8 | #define MAX(a, b) ((a > b) ? a : b) 9 | 10 | void gpuPrintArr(float* x, int N); 11 | void gpuPrintArr(int* x, int N); 12 | 13 | void fillMat(float* dest_ptr[], float* val_ptr[], int* dims, int N); 14 | 15 | void sqDiffMat(float* x_ptr[], float* y_ptr[], float* z, int N, bool overwrite); 16 | 17 | void gramMatDist(float* x_ptr[], float* y_ptr[], int* dims, float sigma, float* z, int N); 18 | 19 | void closestPointCost(float* x_ptr[], float* y_ptr[], int* xdims, int* ydims, float* res, int N); 20 | 21 | void scalePoints(float* x_ptr[], int* xdims, float scale, float t0, float t1, float t2, int N); 22 | 23 | void initProbNM(float* x[], float* y[], float* xw[], float* yw[], 24 | int N, int* xdims, int* ydims, float outlierprior, float outlierfrac, 25 | float T, float* corr_cm[], float* corr_rm[]); 26 | 27 | void normProbNM(float* corr_cm[], float* corr_rm[], int* xdims, int* ydims, int N, 28 | float outlier_frac, int norm_iters, 29 | float* row_c_res[], float* cm_col_c_res[], float* rm_col_c_res[]); 30 | 31 | void getTargPts(float* x[], float* y[], float* xw[], float* yw[], 32 | float* corr_cm[], float* corr_rm[], 33 | float* row_c_ptrs[], float* cm_col_c_ptrs[], float* rm_col_c_ptrs[], 34 | int* xdims, int* ydims, float cutoff, int N, 35 | float* xt[], float* yt[]); 36 | 37 | void checkCudaErr(); 38 | 39 | void resetDevice(); 40 | -------------------------------------------------------------------------------- /test/test_registration.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | 5 | import numpy as np 6 | from lfd.demonstration.demonstration import Demonstration, SceneState 7 | from lfd.registration.registration import TpsRpmRegistration, TpsRpmRegistrationFactory 8 | from lfd.registration import tps, solver 9 | from lfd.registration import _has_cuda 10 | from tempfile import mkdtemp 11 | import sys, time 12 | import unittest 13 | 14 | class TestRegistration(unittest.TestCase): 15 | def setUp(self): 16 | np.random.seed(0) 17 | 18 | def generate_cloud(x_center_pert=0, max_noise=0.02): 19 | # generates 40 cm by 60 cm cloud with optional pertubation along the x-axis 20 | grid = np.array(np.meshgrid(np.linspace(-.2,.2,21), np.linspace(-.3,.3,31))).T.reshape((-1,2)) 21 | grid = np.c_[grid, np.zeros(len(grid))] 22 | cloud = grid + x_center_pert * np.c_[(0.3 - np.abs(grid[:,1]-0))/0.3, np.zeros((len(grid),2))] + (np.random.random((len(grid), 3)) - 0.5) * 2 * max_noise 23 | return cloud 24 | 25 | self.demos = {} 26 | for x_center_pert in np.arange(0.1, 0.4, 0.1): 27 | demo_name = "demo_{}".format(x_center_pert) 28 | demo_cloud = generate_cloud(x_center_pert=x_center_pert) 29 | demo_scene_state = SceneState(demo_cloud, downsample_size=0.025) 30 | demo = Demonstration(demo_name, demo_scene_state, None) 31 | self.demos[demo_name] = demo 32 | 33 | test_cloud = generate_cloud(x_center_pert=0.2) 34 | self.test_scene_state = SceneState(test_cloud, downsample_size=0.025) 35 | 36 | def test_tps_rpm_solvers(self): 37 | tmp_cachedir = mkdtemp() 38 | 39 | reg_factory = TpsRpmRegistrationFactory(self.demos, f_solver_factory=None) 40 | sys.stdout.write("computing costs: no solver... ") 41 | sys.stdout.flush() 42 | start_time = time.time() 43 | costs = reg_factory.batch_cost(self.test_scene_state) 44 | print "done in {}s".format(time.time() - start_time) 45 | 46 | reg_factory_solver = TpsRpmRegistrationFactory(self.demos, f_solver_factory=solver.CpuTpsSolverFactory(cachedir=tmp_cachedir)) 47 | sys.stdout.write("computing costs: solver... ") 48 | sys.stdout.flush() 49 | start_time = time.time() 50 | costs_solver = reg_factory_solver.batch_cost(self.test_scene_state) 51 | print "done in {}s".format(time.time() - start_time) 52 | sys.stdout.write("computing costs: cached solver... ") 53 | sys.stdout.flush() 54 | start_time = time.time() 55 | costs_solver_cached = reg_factory_solver.batch_cost(self.test_scene_state) 56 | print "done in {}s".format(time.time() - start_time) 57 | 58 | if _has_cuda: 59 | reg_factory_gpu = TpsRpmRegistrationFactory(self.demos, f_solver_factory=solver.GpuTpsSolverFactory(cachedir=tmp_cachedir)) 60 | sys.stdout.write("computing costs: gpu solver... ") 61 | sys.stdout.flush() 62 | start_time = time.time() 63 | costs_gpu = reg_factory_gpu.batch_cost(self.test_scene_state) 64 | print "done in {}s".format(time.time() - start_time) 65 | sys.stdout.write("computing costs: cached gpu solver... ") 66 | sys.stdout.flush() 67 | start_time = time.time() 68 | costs_gpu_cached = reg_factory_gpu.batch_cost(self.test_scene_state) 69 | print "done in {}s".format(time.time() - start_time) 70 | else: 71 | print "couldn't run GPU tests since the GPU is not configured properly" 72 | 73 | for demo_name in self.demos.keys(): 74 | self.assertTrue(np.allclose(costs[demo_name], costs_solver[demo_name])) 75 | self.assertTrue(np.allclose(costs[demo_name], costs_solver_cached[demo_name])) 76 | if _has_cuda: 77 | self.assertTrue(np.allclose(costs[demo_name], costs_gpu[demo_name])) 78 | self.assertTrue(np.allclose(costs[demo_name], costs_gpu_cached[demo_name])) 79 | 80 | def test_tps_objective(self): 81 | reg_factory = TpsRpmRegistrationFactory({}, f_solver_factory=solver.CpuTpsSolverFactory(use_cache=False)) 82 | reg = reg_factory.register(self.demos.values()[0], self.test_scene_state) 83 | 84 | x_na = reg.f.x_na 85 | y_ng = reg.f.y_ng 86 | wt_n = reg.f.wt_n 87 | rot_coef = reg.f.rot_coef 88 | bend_coef = reg.f.bend_coef 89 | 90 | # code from tps_fit3 91 | n,d = x_na.shape 92 | 93 | K_nn = tps.tps_kernel_matrix(x_na) 94 | Q = np.c_[np.ones((n,1)), x_na, K_nn] 95 | rot_coefs = np.ones(d) * rot_coef if np.isscalar(rot_coef) else np.asarray(rot_coef) 96 | A = np.r_[np.zeros((d+1,d+1)), np.c_[np.ones((n,1)), x_na]].T 97 | 98 | WQ = wt_n[:,None] * Q 99 | QWQ = Q.T.dot(WQ) 100 | H = QWQ 101 | H[d+1:,d+1:] += bend_coef * K_nn 102 | H[1:d+1, 1:d+1] += np.diag(rot_coefs) 103 | 104 | f = -WQ.T.dot(y_ng) 105 | f[1:d+1,0:d] -= np.diag(rot_coefs) 106 | 107 | # optimum point 108 | theta = np.r_[reg.f.trans_g[None,:], reg.f.lin_ag, reg.f.w_ng] 109 | 110 | # equality constraint 111 | self.assertTrue(np.allclose(A.dot(theta), np.zeros((4,3)))) 112 | # objective 113 | obj = np.trace(theta.T.dot(H.dot(theta))) + 2*np.trace(f.T.dot(theta)) \ 114 | + np.trace(y_ng.T.dot(wt_n[:,None]*y_ng)) + rot_coefs.sum() # constant 115 | self.assertTrue(np.allclose(obj, reg.f.get_objective().sum())) 116 | 117 | def test_tpsrpm_objective_monotonicity(self): 118 | n_iter = 10 119 | em_iter = 10 120 | reg_factory = TpsRpmRegistrationFactory(n_iter=n_iter, em_iter=em_iter, f_solver_factory=solver.AutoTpsSolverFactory(use_cache=False)) 121 | 122 | objs = np.zeros((n_iter, em_iter)) 123 | def callback(i, i_em, x_nd, y_md, xtarg_nd, wt_n, f, corr_nm, rad): 124 | objs[i, i_em] = TpsRpmRegistration.get_objective2(x_nd, y_md, f, corr_nm, rad).sum() 125 | 126 | reg = reg_factory.register(self.demos.values()[0], self.test_scene_state, callback=callback) 127 | print np.diff(objs, axis=1) <= 0 # TODO assert when monotonicity is more robust 128 | 129 | if __name__ == '__main__': 130 | unittest.main() 131 | -------------------------------------------------------------------------------- /test/test_simulation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | 5 | import openravepy 6 | import unittest 7 | 8 | import numpy as np 9 | import trajoptpy 10 | 11 | from lfd.environment.simulation import DynamicSimulation 12 | from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject 13 | from lfd.environment import sim_util 14 | 15 | 16 | class TestSimulation(unittest.TestCase): 17 | def setUp(self): 18 | table_height = 0.77 19 | helix_ang0 = 0 20 | helix_ang1 = 4*np.pi 21 | helix_radius = .2 22 | helix_center = np.r_[.6, 0] 23 | helix_height0 = table_height + .15 24 | helix_height1 = table_height + .15 + .3 25 | helix_length = np.linalg.norm(np.r_[(helix_ang1 - helix_ang0) * helix_radius, helix_height1 - helix_height0]) 26 | num = np.round(helix_length/.02) 27 | helix_angs = np.linspace(helix_ang0, helix_ang1, num) 28 | helix_heights = np.linspace(helix_height0, helix_height1, num) 29 | init_rope_nodes = np.c_[helix_center + helix_radius * np.c_[np.cos(helix_angs), np.sin(helix_angs)], helix_heights] 30 | rope_params = sim_util.RopeParams() 31 | 32 | cyl_radius = 0.025 33 | cyl_height = 0.3 34 | cyl_pos0 = np.r_[.6, helix_radius, table_height + .25] 35 | cyl_pos1 = np.r_[.6, -helix_radius, table_height + .35] 36 | 37 | sim_objs = [] 38 | sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False)) 39 | sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False)) 40 | sim_objs.append(RopeSimulationObject("rope", init_rope_nodes, rope_params)) 41 | sim_objs.append(CylinderSimulationObject("cyl0", cyl_pos0, cyl_radius, cyl_height, dynamic=True)) 42 | sim_objs.append(CylinderSimulationObject("cyl1", cyl_pos1, cyl_radius, cyl_height, dynamic=True)) 43 | 44 | self.sim = DynamicSimulation() 45 | self.sim.add_objects(sim_objs) 46 | self.sim.robot.SetDOFValues([0.25], [self.sim.robot.GetJoint('torso_lift_joint').GetJointIndex()]) 47 | sim_util.reset_arms_to_side(self.sim) 48 | 49 | # rotate cylinders by 90 deg 50 | for i in range(2): 51 | bt_cyl = self.sim.bt_env.GetObjectByName('cyl%d'%i) 52 | T = openravepy.matrixFromAxisAngle(np.array([np.pi/2,0,0])) 53 | T[:3,3] = bt_cyl.GetTransform()[:3,3] 54 | bt_cyl.SetTransform(T) # SetTransform needs to be used in the Bullet object, not the openrave body 55 | self.sim.update() 56 | 57 | def test_reproducibility(self): 58 | sim_state0 = self.sim.get_state() 59 | 60 | self.sim.set_state(sim_state0) 61 | self.sim.settle(max_steps=1000) 62 | sim_state1 = self.sim.get_state() 63 | 64 | self.sim.set_state(sim_state0) 65 | self.sim.settle(max_steps=1000) 66 | sim_state2 = self.sim.get_state() 67 | 68 | self.assertArrayDictEqual(sim_state1[1], sim_state2[1]) 69 | 70 | def test_viewer_side_effects(self): 71 | """ 72 | Check if stepping the viewer has side effects in the simulation 73 | """ 74 | sim_state0 = self.sim.get_state() 75 | 76 | self.sim.set_state(sim_state0) 77 | self.sim.settle(max_steps=1000, step_viewer=0) 78 | sim_state1 = self.sim.get_state() 79 | 80 | # create viewer 81 | viewer = trajoptpy.GetViewer(self.sim.env) 82 | 83 | self.sim.set_state(sim_state0) 84 | self.sim.settle(max_steps=1000, step_viewer=10) 85 | sim_state2 = self.sim.get_state() 86 | 87 | self.assertArrayDictEqual(sim_state1[1], sim_state2[1]) 88 | 89 | def test_remove_sim_obj(self): 90 | sim_state0 = self.sim.get_state() 91 | 92 | self.sim.set_state(sim_state0) 93 | self.sim.settle(max_steps=1000) 94 | sim_state1 = self.sim.get_state() 95 | 96 | rope = [sim_obj for sim_obj in self.sim.sim_objs if isinstance(sim_obj, RopeSimulationObject)][0] 97 | box = BoxSimulationObject("box", [0]*3, [.1]*3, dynamic=False) 98 | 99 | self.sim.remove_objects([rope]) 100 | self.sim.set_state(sim_state0) 101 | self.sim.settle(max_steps=1000) 102 | sim_state2 = self.sim.get_state() # this adds another rope that has the same properties as rope 103 | 104 | self.sim.add_objects([box]) 105 | self.sim.set_state(sim_state0) 106 | self.sim.settle(max_steps=1000) 107 | sim_state3 = self.sim.get_state() # this removes the recently added box 108 | 109 | rope = [sim_obj for sim_obj in self.sim.sim_objs if isinstance(sim_obj, RopeSimulationObject)][0] 110 | 111 | self.sim.remove_objects([rope]) 112 | self.sim.add_objects([box]) 113 | self.sim.set_state(sim_state0) 114 | self.sim.settle(max_steps=1000) 115 | sim_state4 = self.sim.get_state() 116 | 117 | self.assertArrayDictEqual(sim_state1[1], sim_state2[1]) 118 | self.assertArrayDictEqual(sim_state1[1], sim_state3[1]) 119 | self.assertArrayDictEqual(sim_state1[1], sim_state4[1]) 120 | 121 | def assertArrayDictEqual(self, d0, d1): 122 | self.assertSetEqual(set(d0.keys()), set(d1.keys())) 123 | for (k, v0) in d0.iteritems(): 124 | v1 = d1[k] 125 | self.assertTrue(np.all(v0 == v1)) 126 | 127 | if __name__ == '__main__': 128 | unittest.main() 129 | --------------------------------------------------------------------------------