├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── Dockerfile
├── LICENSE
├── README.md
├── core
├── action_interface.py
├── agent.py
├── const.py
└── utils
│ ├── general_utils.py
│ ├── ros_transform_utils.py
│ └── transform_utils.py
├── launch
├── bringup_interface.launch
├── top_realsense.launch
└── wrist_realsense.launch
├── main.py
├── media
└── assembly_timelapse.png
├── package.xml
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "planner"]
2 | path = planner
3 | url = git@github.com:applied-ai-lab/ramp-planner.git
4 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.0.2)
2 | project(ramp)
3 |
4 | ## Compile as C++11, supported in ROS Kinetic and newer
5 | # add_compile_options(-std=c++11)
6 |
7 | ## Find catkin macros and libraries
8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9 | ## is used, also find other catkin packages
10 | find_package(catkin REQUIRED)
11 |
12 | ## System dependencies are found with CMake's conventions
13 | # find_package(Boost REQUIRED COMPONENTS system)
14 |
15 |
16 | ## Uncomment this if the package has a setup.py. This macro ensures
17 | ## modules and global scripts declared therein get installed
18 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
19 | # catkin_python_setup()
20 |
21 | ################################################
22 | ## Declare ROS messages, services and actions ##
23 | ################################################
24 |
25 | ## To declare and build messages, services or actions from within this
26 | ## package, follow these steps:
27 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
28 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
29 | ## * In the file package.xml:
30 | ## * add a build_depend tag for "message_generation"
31 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
32 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
33 | ## but can be declared for certainty nonetheless:
34 | ## * add a exec_depend tag for "message_runtime"
35 | ## * In this file (CMakeLists.txt):
36 | ## * add "message_generation" and every package in MSG_DEP_SET to
37 | ## find_package(catkin REQUIRED COMPONENTS ...)
38 | ## * add "message_runtime" and every package in MSG_DEP_SET to
39 | ## catkin_package(CATKIN_DEPENDS ...)
40 | ## * uncomment the add_*_files sections below as needed
41 | ## and list every .msg/.srv/.action file to be processed
42 | ## * uncomment the generate_messages entry below
43 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
44 |
45 | ## Generate messages in the 'msg' folder
46 | # add_message_files(
47 | # FILES
48 | # Message1.msg
49 | # Message2.msg
50 | # )
51 |
52 | ## Generate services in the 'srv' folder
53 | # add_service_files(
54 | # FILES
55 | # Service1.srv
56 | # Service2.srv
57 | # )
58 |
59 | ## Generate actions in the 'action' folder
60 | # add_action_files(
61 | # FILES
62 | # Action1.action
63 | # Action2.action
64 | # )
65 |
66 | ## Generate added messages and services with any dependencies listed here
67 | # generate_messages(
68 | # DEPENDENCIES
69 | # std_msgs # Or other packages containing msgs
70 | # )
71 |
72 | ################################################
73 | ## Declare ROS dynamic reconfigure parameters ##
74 | ################################################
75 |
76 | ## To declare and build dynamic reconfigure parameters within this
77 | ## package, follow these steps:
78 | ## * In the file package.xml:
79 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
80 | ## * In this file (CMakeLists.txt):
81 | ## * add "dynamic_reconfigure" to
82 | ## find_package(catkin REQUIRED COMPONENTS ...)
83 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
84 | ## and list every .cfg file to be processed
85 |
86 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
87 | # generate_dynamic_reconfigure_options(
88 | # cfg/DynReconf1.cfg
89 | # cfg/DynReconf2.cfg
90 | # )
91 |
92 | ###################################
93 | ## catkin specific configuration ##
94 | ###################################
95 | ## The catkin_package macro generates cmake config files for your package
96 | ## Declare things to be passed to dependent projects
97 | ## INCLUDE_DIRS: uncomment this if your package contains header files
98 | ## LIBRARIES: libraries you create in this project that dependent projects also need
99 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
100 | ## DEPENDS: system dependencies of this project that dependent projects also need
101 | catkin_package(
102 | # INCLUDE_DIRS include
103 | # LIBRARIES panda_interface
104 | # CATKIN_DEPENDS other_catkin_pkg
105 | # DEPENDS system_lib
106 | )
107 |
108 | ###########
109 | ## Build ##
110 | ###########
111 |
112 | ## Specify additional locations of header files
113 | ## Your package locations should be listed before other locations
114 | include_directories(
115 | # include
116 | # ${catkin_INCLUDE_DIRS}
117 | )
118 |
119 | ## Declare a C++ library
120 | # add_library(${PROJECT_NAME}
121 | # src/${PROJECT_NAME}/panda_interface.cpp
122 | # )
123 |
124 | ## Add cmake target dependencies of the library
125 | ## as an example, code may need to be generated before libraries
126 | ## either from message generation or dynamic reconfigure
127 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
128 |
129 | ## Declare a C++ executable
130 | ## With catkin_make all packages are built within a single CMake context
131 | ## The recommended prefix ensures that target names across packages don't collide
132 | # add_executable(${PROJECT_NAME}_node src/panda_interface_node.cpp)
133 |
134 | ## Rename C++ executable without prefix
135 | ## The above recommended prefix causes long target names, the following renames the
136 | ## target back to the shorter version for ease of user use
137 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
138 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
139 |
140 | ## Add cmake target dependencies of the executable
141 | ## same as for the library above
142 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
143 |
144 | ## Specify libraries to link a library or executable target against
145 | # target_link_libraries(${PROJECT_NAME}_node
146 | # ${catkin_LIBRARIES}
147 | # )
148 |
149 | #############
150 | ## Install ##
151 | #############
152 |
153 | # all install targets should use catkin DESTINATION variables
154 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
155 |
156 | ## Mark executable scripts (Python etc.) for installation
157 | ## in contrast to setup.py, you can choose the destination
158 | # catkin_install_python(PROGRAMS
159 | # scripts/my_python_script
160 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
161 | # )
162 |
163 | ## Mark executables for installation
164 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
165 | # install(TARGETS ${PROJECT_NAME}_node
166 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
167 | # )
168 |
169 | ## Mark libraries for installation
170 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
171 | # install(TARGETS ${PROJECT_NAME}
172 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
173 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
174 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
175 | # )
176 |
177 | ## Mark cpp header files for installation
178 | # install(DIRECTORY include/${PROJECT_NAME}/
179 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
180 | # FILES_MATCHING PATTERN "*.h"
181 | # PATTERN ".svn" EXCLUDE
182 | # )
183 |
184 | ## Mark other files for installation (e.g. launch and bag files, etc.)
185 | # install(FILES
186 | # # myfile1
187 | # # myfile2
188 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
189 | # )
190 |
191 | #############
192 | ## Testing ##
193 | #############
194 |
195 | ## Add gtest based cpp test target and link libraries
196 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_panda_interface.cpp)
197 | # if(TARGET ${PROJECT_NAME}-test)
198 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
199 | # endif()
200 |
201 | ## Add folders to be run by python nosetests
202 | # catkin_add_nosetests(test)
203 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # pytorch latest (pip)
6 | # ==================================================================
7 |
8 | FROM nvidia/cudagl:11.1.1-devel-ubuntu20.04
9 | ENV LANG C.UTF-8
10 | ENV TZS=Europe/London
11 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
12 |
13 |
14 | # ==================================================================
15 | # User setting
16 | # ------------------------------------------------------------------
17 |
18 | ARG USER
19 | ARG PASSWD
20 | ARG USER_ID
21 |
22 | RUN apt-get update && \
23 | apt-get install -y sudo
24 |
25 | ENV HOME /home/$USER
26 |
27 | RUN useradd -m $USER -u $USER_ID && \
28 | gpasswd -a $USER sudo && \
29 | echo "$USER:$PASSWD" | chpasswd && \
30 | echo 'Defaults visiblepw' >> /etc/sudoers && \
31 | echo "$USER ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
32 |
33 | USER $USER
34 | WORKDIR $HOME
35 |
36 | RUN APT_INSTALL="sudo apt-get install -y --no-install-recommends" && \
37 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
38 | GIT_CLONE="git clone --depth 10" && \
39 |
40 | sudo rm -rf /var/lib/apt/lists/* \
41 | /etc/apt/sources.list.d/cuda.list \
42 | /etc/apt/sources.list.d/nvidia-ml.list && \
43 |
44 | sudo apt-get update && \
45 |
46 | # ==================================================================
47 | # tools
48 | # ------------------------------------------------------------------
49 |
50 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
51 | build-essential \
52 | apt-utils \
53 | ca-certificates \
54 | wget \
55 | git \
56 | vim \
57 | libssl-dev \
58 | curl \
59 | unzip \
60 | unrar \
61 | zsh \
62 | neovim \
63 | tmux \
64 | && \
65 |
66 | $GIT_CLONE https://github.com/Kitware/CMake ~/cmake && \
67 | cd ~/cmake && \
68 | ./bootstrap && \
69 | sudo make -j"$(nproc)" install
70 |
71 | # ==================================================================
72 | # zsh
73 | # ------------------------------------------------------------------
74 | RUN APT_INSTALL="sudo apt-get install -y --no-install-recommends" && \
75 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
76 | GIT_CLONE="git clone --depth 10" && \
77 |
78 | zsh && \
79 | sudo chsh -s $(which zsh) && \
80 |
81 | # ==================================================================
82 | # python
83 | # ------------------------------------------------------------------
84 |
85 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
86 | software-properties-common \
87 | && \
88 | sudo add-apt-repository ppa:deadsnakes/ppa && \
89 | sudo apt-get update && \
90 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
91 | python3-pip \
92 | python3.8 \
93 | python3.8-dev \
94 | python3-distutils-extra \
95 | && \
96 | wget -O ~/get-pip.py \
97 | https://bootstrap.pypa.io/get-pip.py && \
98 | sudo ln -s /usr/bin/python3 /usr/local/bin/python && \
99 | $PIP_INSTALL \
100 | numpy \
101 | scipy \
102 | pandas \
103 | cloudpickle \
104 | scikit-image>=0.14.2 \
105 | scikit-learn \
106 | matplotlib \
107 | Cython \
108 | tqdm \
109 | h5py \
110 | && \
111 |
112 | # ==================================================================
113 | # pytorch
114 | # ------------------------------------------------------------------
115 |
116 | $PIP_INSTALL \
117 | future \
118 | numpy \
119 | protobuf \
120 | enum34 \
121 | pyyaml \
122 | typing \
123 | && \
124 | $PIP_INSTALL \
125 | torch \
126 | torchvision \
127 | torchaudio -f \
128 | https://download.pytorch.org/whl/cu112/torch_stable.html \
129 | && \
130 |
131 | # ==================================================================
132 | # zsh
133 | # ------------------------------------------------------------------
134 |
135 | sh -c "$(wget -O- https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" && \
136 | $GIT_CLONE https://github.com/junjungoal/dotfiles.git && \
137 | sh dotfiles/dotfilesLink.sh && \
138 | $GIT_CLONE https://github.com/zsh-users/zsh-autosuggestions ~/.zsh/zsh-autosuggestions && \
139 | $(which zsh) -c "source ~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh" && \
140 | $(which zsh) -c "source ~/.zshrc" && \
141 | rm -rf ~/.vim && \
142 | $GIT_CLONE https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim && \
143 | mkdir .config && \
144 | cp -r dotfiles/config/* .config/ && \
145 |
146 |
147 | # ==================================================================
148 | # MUJOCO
149 | # ------------------------------------------------------------------
150 | mkdir -p $HOME/.mujoco \
151 | && wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz -O mujoco.tar.gz \
152 | && tar -xvf mujoco.tar.gz \
153 | && mv mujoco210 $HOME/.mujoco/ \
154 | && rm mujoco.tar.gz
155 |
156 | ENV LD_LIBRARY_PATH $HOME/.mujoco/mujoco210/bin:${LD_LIBRARY_PATH}
157 |
158 | ENV LD_LIBRARY_PATH /usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
159 |
160 | RUN PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
161 |
162 | $PIP_INSTALL \
163 | mujoco-py \
164 | gym
165 |
166 | # ===================================================================
167 | # Install ROS noetic dependencies.
168 | # ===================================================================
169 |
170 | RUN echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/ros-latest.list
171 | RUN curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | sudo apt-key add -
172 |
173 | RUN APT_INSTALL="sudo apt-get install -y --no-install-recommends" && \
174 | sudo apt-get update && \
175 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
176 | pkg-config \
177 | libxau-dev \
178 | libxdmcp-dev \
179 | libxcb1-dev \
180 | libxext-dev \
181 | libx11-dev \
182 | git-core \
183 | python-argparse \
184 | python3-rosdep \
185 | python3-rosinstall \
186 | python3-vcstools \
187 | python3-wstool \
188 | ros-noetic-desktop-full \
189 | ros-noetic-actionlib \
190 | ros-noetic-actionlib-msgs \
191 | ros-noetic-control-msgs \
192 | ros-noetic-cv-bridge \
193 | ros-noetic-rospy-message-converter \
194 | ros-noetic-rviz \
195 | ros-noetic-tf2-ros \
196 | ros-noetic-trajectory-msgs \
197 | ros-noetic-xacro \
198 | ros-noetic-moveit \
199 | ros-noetic-mocap-optitrack \
200 | ros-noetic-joy \
201 | ros-noetic-combined-robot-hw \
202 | ros-noetic-dynamic-reconfigure \
203 | build-essential \
204 | cmake \
205 | libpoco-dev \
206 | libeigen3-dev \
207 | dbus-x11 && \
208 | sudo apt-get clean && \
209 | sudo rm -rf /var/lib/apt/lists/*
210 |
211 |
212 | RUN \
213 | sudo rosdep init && \
214 | rosdep update
215 |
216 | RUN mkdir -p ~/catkin_ws/src
217 |
218 | RUN ["/bin/zsh", "-c", \
219 | "source /opt/ros/noetic/setup.zsh && \
220 | cd ~/catkin_ws && \
221 | catkin_make"]
222 |
223 | # =========================================================================================
224 | # Install Franka
225 | # =========================================================================================
226 |
227 | RUN pip install numpy-quaternion
228 |
229 | # # Download and build libfranka
230 | RUN ["/bin/zsh", "-c", \
231 | "git clone --recursive https://github.com/frankaemika/libfranka $HOME/libfranka && \
232 | mkdir $HOME/libfranka/build && \
233 | cd $HOME/libfranka/build && \
234 | cmake -DCMAKE_BUILD_TYPE=Release .. && \
235 | cmake --build ."]
236 |
237 | RUN ["/bin/zsh", "-c", \
238 | "cd ~/catkin_ws/src && \
239 | git clone --recursive https://github.com/frankaemika/franka_ros && \
240 | git clone https://github.com/junjungoal/panda_robot.git && \
241 | git clone https://github.com/ros-planning/panda_moveit_config.git && \
242 | cd ~/catkin_ws/src/franka_ros && \
243 | git checkout 0.7.1 && \
244 | cd ~/catkin_ws/ && \
245 | rosdep install --from-paths src --ignore-src --rosdistro noetic -r -y --skip-keys libfranka && \
246 | source /opt/ros/noetic/setup.zsh && \
247 | catkin_make -DCMAKE_BUILD_TYPE=Release -DFranka_DIR:PATH=$HOME/libfranka/build && \
248 | source ~/catkin_ws/devel/setup.zsh && \
249 | echo 'source /opt/ros/noetic/setup.zsh' >> ~/.zshrc && \
250 | echo 'source ~/catkin_ws/devel/setup.zsh' >> ~/.zshrc"]
251 |
252 | # =========================================================================================
253 | # Install Realsense
254 | # =========================================================================================
255 | RUN sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE
256 | RUN sudo add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u
257 |
258 | RUN APT_INSTALL="sudo apt-get install -y --no-install-recommends" && \
259 | sudo apt-get update && \
260 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
261 | librealsense2-dkms \
262 | librealsense2-utils \
263 | librealsense2-dev \
264 | librealsense2-dbg \
265 | ros-noetic-realsense2-camera \
266 | swig
267 |
268 | # git clone https://github.com/pal-robotics/ddynamic_reconfigure.git && \
269 | # git clone https://github.com/ros/dynamic_reconfigure.git && \
270 | RUN ["/bin/zsh", "-c", \
271 | "cd ~/catkin_ws/src && \
272 | git clone https://github.com/IntelRealSense/realsense-ros.git && \
273 | git clone https://github.com/usrl-uofsc/stag_ros.git && \
274 | cd ~/catkin_ws/ && \
275 | source ~/.zshrc && \
276 | catkin_make -DCATKIN_ENABLE_TESTING=False -DCMAKE_BUILD_TYPE=Release -DBUILD_WITH_OPENMP=OFF && \
277 | catkin_make install --pkg realsense2_camera && \
278 | source ~/catkin_ws/devel/setup.zsh"]
279 |
280 | RUN ["/bin/zsh", "-c", \
281 | "cd ~/ && \
282 | git clone https://github.com/stevengj/nlopt.git && \
283 | cd ~/nlopt && \
284 | mkdir build && \
285 | cd ~/nlopt/build && \
286 | cmake .. && \
287 | make && \
288 | sudo make install"]
289 |
290 | RUN ["/bin/zsh", "-c", \
291 | "cd ~/catkin_ws/src && \
292 | git clone -b devel https://clemi@bitbucket.org/clemi/trac_ik.git && \
293 | cd ~/catkin_ws/ && \
294 | source ~/.zshrc && \
295 | catkin_make -DBUILD_WITH_OPENMP=OFF -DCMAKE_BUILD_TYPE=Release -DFranka_DIR:PATH=/home/jyamada/libfranka/build -DCATKIN_BLACKLIST_PACKAGE='trac_ik;trac_ik_examples' && \
296 | source ~/catkin_ws/devel/setup.zsh"]
297 |
298 | RUN pip install wandb sympy
299 |
300 | RUN echo "export ROS_MASTER_URI=http://bamtoo:11311" >> ~/.zshrc
301 | RUN echo "export ROS_IP=192.168.4.3" >> ~/.zshrc
302 |
303 | # nvidia-container-runtime
304 | ENV NVIDIA_VISIBLE_DEVICES \
305 | ${NVIDIA_VISIBLE_DEVICES:-all}
306 | ENV NVIDIA_DRIVER_CAPABILITIES \
307 | ${NVIDIA_DRIVER_CAPABILITIES:+$NVIDIA_DRIVER_CAPABILITIES,}graphics
308 | ENV LIBGL_ALWAYS_INDIRECT 0
309 |
310 |
311 | RUN sudo apt-get update && \
312 | sudo apt-get -y install \
313 | libosmesa6-dev \
314 | patchelf \
315 | ros-noetic-realsense2-camera \
316 | ros-noetic-realsense2-description
317 |
318 | Run pip install tensorboardX \
319 | moviepy \
320 | imageio \
321 | funcsigs
322 | RUN ["/bin/zsh", "-c", \
323 | "nvim +VundleInstall +qall"]
324 | # ==================================================================
325 | # config & cleanup
326 | # ------------------------------------------------------------------
327 |
328 | RUN mkdir -p ~/projects
329 |
330 | RUN sudo ldconfig && \
331 | sudo apt-get clean && \
332 | sudo apt-get autoremove && \
333 | sudo rm -rf /var/lib/apt/lists/* /tmp/*
334 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RAMP: A Benchmark for Evaluating Robotic Assembly Manipulation and Planning
2 | A baseline approach to solving the easy class of assemblies in the [RAMP benchmark](https://sites.google.com/oxfordrobotics.institute/ramp).
3 |
4 | 
5 |
6 | ## Installation Instruction
7 | ```
8 | cd ~/catkin_ws/src/
9 | git clone --recursive --branch main git@github.com:junjungoal/ramp.git
10 | cd ramp
11 | git submodule update --init --recursive
12 | ```
13 |
14 | ### Docker Instruction
15 | ```
16 | docker build -t assembly_benchmark:latest --build-arg USER=${USER} --build-arg PASSWD=root --build-arg USER_ID=$(id -u) .
17 | docker run -it -d --net=host --gpus all -e HOST_UID=${UID} -e USER=${USER} -e DISPLAY=$DISPLAY -v /tmp/.X11-unix/:/tmp/.X11-unix:rw -v $HOME/.Xauthority:/home/jyamada/.Xauthority:rw --privileged --name=assembly_benchmark assembly_benchmark zsh
18 | docker exec -it assembly_benchmark zsh
19 | ```
20 | ### High-level planner setup
21 | To setup a high-level planner, please refer to [here](https://github.com/applied-ai-lab/ramp-planner).
22 |
23 | ### ROS library installation
24 |
25 | ```
26 | cd ~/catkin_ws/src
27 |
28 | # clone necessary libraries
29 | git clone -b panda_ros_interface --recursive https://github.com/junjungoal/franka_ros
30 | git clone -b melodic-devel https://github.com/junjungoal/panda_moveit_config.git
31 | git clone -b assembly_benchmark https://github.com/junjungoal/apriltag_ros.git
32 | git clone https://github.com/junjungoal/panda_ros_interface.git
33 |
34 | # building ROS libraries
35 | rosdep install --from-paths src --ignore-src --rosdistro noetic -r -y --skip-keys libfranka
36 | catkin_make -DCMAKE_BUILD_TYPE=Release -DFranka_DIR:PATH=$HOME/libfranka/build
37 | source /opt/ros/noetic/setup.zsh
38 | echo 'source /opt/ros/noetic/setup.zsh' >> ~/.zshrc
39 | echo 'source ~/catkin_ws/devel/setup.zsh' >> ~/.zshrc
40 | ```
41 |
42 |
43 | ## Example Commands
44 |
45 | Run the following commands in different terminal sessions.
46 | ```
47 | roslaunch panda_interface robot_bringup.launch # bring up the robot controller
48 | roslaunch ramp bringup_interface.launch
49 | ```
50 |
51 | To evaluate the baseline, run
52 | ```
53 | python -m main
54 | ```
55 |
56 |
--------------------------------------------------------------------------------
/core/action_interface.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os, sys
4 | import numpy as np
5 | import re
6 | import datetime
7 | import math
8 | import copy
9 | import rospy
10 | import matplotlib.pyplot as plt
11 | import message_filters
12 | from stag_ros.msg import *
13 | from apriltag_ros.msg import *
14 | from geometry_msgs.msg import Twist
15 |
16 | from panda_interface.arm import PandaArmInterface
17 | from core.utils.general_utils import AttrDict
18 | import core.utils.ros_transform_utils as tfu
19 | from core.utils.transform_utils import (
20 | quat2mat,
21 | mat2quat,
22 | axisangle2quat,
23 | quat2axisangle,
24 | quat_multiply,
25 | make_pose,
26 | mat2pose,
27 | )
28 | from core.const import BEAMS, PEGS
29 | from planner.beam_assembly.beam_assembly_parser import ElementType
30 |
31 |
32 | class ActionInterface(object):
33 | def __init__(self, fixed_beam_id="b7"):
34 | """
35 | Args:
36 | fixed_beam_id: ID of a beam fixed on a table
37 | """
38 |
39 | rospy.init_node("action_interface")
40 | self.top_marker_subscriber = rospy.Subscriber(
41 | "top_camera/tag_detections",
42 | AprilTagDetectionArray,
43 | self._update_top_markers,
44 | )
45 | self.wrist_marker_subscriber = rospy.Subscriber(
46 | "wrist_camera/tag_detections",
47 | AprilTagDetectionArray,
48 | self._update_wrist_markers,
49 | )
50 | self.top_markers = AttrDict()
51 | self.wrist_markers = AttrDict()
52 |
53 | self.fixed_beam_id = fixed_beam_id
54 |
55 | # Panda arm interface
56 | self.robot = PandaArmInterface(AttrDict())
57 | self.rate = rospy.Rate(5)
58 |
59 | self.base_orientation = axisangle2quat(np.array([-np.pi, 0, 0]))
60 | self.beams = BEAMS
61 | self.pegs = PEGS
62 |
63 | self.grasped_beam_id = None
64 | self.grasped_beam_link_id = None
65 |
66 | # add collision boxes around a robot arm to avoid unexpected collision with a table and wall
67 | self.construct_scene()
68 |
69 | def _update_top_markers(self, top_msg):
70 | """
71 | Update marker poses observed by a top-view camera
72 | """
73 | self.top_current_markers = AttrDict()
74 | for tag_marker in top_msg.detections:
75 | marker_pose = tfu.current_robot_pose(
76 | "panda_link0", "tag_" + str(tag_marker.id[0])
77 | )
78 | self.top_markers["tag_{}".format(tag_marker.id[0])] = AttrDict(
79 | pose=marker_pose, time=datetime.datetime.now()
80 | )
81 | self.top_current_markers["tag_{}".format(tag_marker.id[0])] = marker_pose
82 |
83 | def _update_wrist_markers(self, wrist_msg):
84 | """
85 | Update marker poses observed by a wrist-camera
86 | """
87 | self.wrist_current_markers = AttrDict()
88 | for tag_marker in wrist_msg.detections:
89 | marker_pose = tfu.current_robot_pose(
90 | "panda_link0", "tag_" + str(tag_marker.id[0])
91 | )
92 | self.wrist_markers["tag_{}".format(tag_marker.id[0])] = AttrDict(
93 | pose=marker_pose, time=datetime.datetime.now()
94 | )
95 | self.wrist_current_markers["tag_{}".format(tag_marker.id[0])] = marker_pose
96 |
97 | def pickup_beam(self, target):
98 | """
99 | Grasp a beam at a specific link
100 | Args:
101 | target (str): A target beam id with link id for grasping. E.g. b7l1
102 | """
103 |
104 | beam_id, link_id = self.get_beam_link_id(target)
105 |
106 | link_pose = self.get_beam_link_pose(beam_id, link_id)
107 | pos, ori = mat2pose(link_pose)
108 | grasped_link_euler = quat2axisangle(ori)
109 |
110 | # use these information to update the approach pose later.
111 | self.grasped_link_euler = grasped_link_euler
112 | self.grasped_beam_id = beam_id
113 | self.grasped_beam_link_id = link_id
114 |
115 | # open the gripper and move above the target link
116 | self.robot.gripper.open()
117 | self.robot.move_to_cartesian_pose([pos[0], pos[1], 0.15])
118 |
119 | # close the gripper slightly so that the gripper does not hit to other objects
120 | self.robot.gripper.move_joints(0.035)
121 | self.robot.move_to_cartesian_pose([pos[0], pos[1], 0.035])
122 | # grasp the beam
123 | self.robot.gripper.grasp(0.01, force=80)
124 | print("Grasped!")
125 |
126 | # move upwards
127 | self.robot.move_to_cartesian_delta([0, 0, 0.1])
128 | return True
129 |
130 | def move_to_input_area(self):
131 | """
132 | Move to the input area.
133 | """
134 | print("Moving to the input area")
135 | self.robot.move_to_cartesian_pose(
136 | [
137 | self.input_origin.position.x - 0.2,
138 | self.input_origin.position.y + 0.1,
139 | self.input_origin.position.z + 0.4,
140 | ]
141 | )
142 |
143 | def move_to_intermediate_area(self):
144 | """
145 | Move to the intermediate area.
146 | """
147 | print("Moving to the intermediate area")
148 | self.robot.move_to_cartesian_pose(
149 | [
150 | self.input_origin.position.x - 0.2,
151 | self.input_origin.position.y - 0.1,
152 | self.input_origin.position.z + 0.4,
153 | ]
154 | )
155 | self.move_neutral_ori()
156 |
157 | def move_to_assembly_area(self):
158 | """
159 | Move to the assembly area.
160 | """
161 | print("Moving to the assembly area")
162 | self.robot.move_to_cartesian_pose(
163 | [
164 | self.input_origin.position.x - 0.2,
165 | self.input_origin.position.y - 0.3,
166 | self.input_origin.position.z + 0.4,
167 | ]
168 | )
169 |
170 | def move_beam_to_approach_pose(self, target, insert_end):
171 | """
172 | Move the beam to the approach pose for insertion
173 | Args:
174 | target (str): A target beam id.
175 | insert_end (BeamComponent): An instance of BeamComponent class containing the insertion endpoint joint.
176 | """
177 |
178 | print("Moving to the approach pose")
179 | # Transform the approach pose of the insertion endpoint to the approach poes of the link that the robot grasps.
180 | self.update_beam_approach_pose(
181 | self.grasped_beam_id, self.grasped_beam_link_id, insert_end
182 | )
183 | self.move_neutral_ori()
184 |
185 | target_pose = self.beams[target]
186 | trans, quat = mat2pose(target_pose)
187 | euler = quat2axisangle(quat)
188 |
189 | self.robot.move_to_cartesian_pose([trans[0], trans[1], 0.15])
190 |
191 | # FIXME: rotate a gripper with the difference in orientation of grasped and target beam
192 | if np.abs(self.grasped_link_euler[2] - euler[2]) > 2:
193 | self.rotate_gripper(np.pi)
194 | elif np.abs(self.grasped_link_euler[2] - euler[2]) > 1.0:
195 | self.rotate_gripper(np.pi / 2.0)
196 |
197 | # Slowly move the beam to the approach pose.
198 | current_ori = self.robot.tip_pose().orientation
199 | self.robot.move_to_cartesian_delta([0, 0, -0.05])
200 | self.robot.move_to_cartesian_delta([0, 0, -0.05])
201 | self.robot.move_to_cartesian_pose([trans[0], trans[1], 0.038])
202 |
203 | def move_to_beam(self, target):
204 | """
205 | Move to the origin of the target beam
206 | Args:
207 | target (str): A target beam id
208 | """
209 |
210 | print("Moving to the beam {}".format(target))
211 | self.move_neutral_ori()
212 | beam_id = self.get_beam_id(target)
213 |
214 | marker_pose = self.get_marker_pose(self.beams[beam_id].origin_id)
215 | self.robot.move_to_cartesian_pose(
216 | [marker_pose.position.x - 0.01, marker_pose.position.y, 0.2]
217 | )
218 | rospy.sleep(0.5)
219 |
220 | def move_to_peg(self, target):
221 | """
222 | Move to the target peg
223 | Args:
224 | target (str): A target peg id
225 | """
226 |
227 | print("Moving to the peg {}".format(target))
228 | peg_id = re.match("(p\d*)i", target)[1]
229 | marker_id = self.pegs[peg_id]
230 | marker_pose = self.get_marker_pose(marker_id)
231 | self.robot.move_to_cartesian_pose(
232 | [
233 | marker_pose.position.x,
234 | marker_pose.position.y,
235 | marker_pose.position.z + 0.15,
236 | ]
237 | )
238 | self.move_neutral_ori()
239 | return True
240 |
241 | def pickup_peg(self, target):
242 | """
243 | Grasp a peg
244 | Args:
245 | marker_id: the peg's marker id
246 | """
247 |
248 | print("Pick up the peg {}".format(target))
249 | marker_id = self.pegs[target]
250 | marker_pose = self.get_marker_pose(marker_id)
251 | self.robot.gripper.open()
252 |
253 | self.robot.move_to_cartesian_pose(
254 | [marker_pose.position.x, marker_pose.position.y, 0.085]
255 | )
256 | self.robot.gripper.grasp(0.01, force=100)
257 | rospy.sleep(0.5)
258 | self.robot.move_to_cartesian_delta([0, 0, 0.165])
259 | self.move_neutral_ori()
260 | return True
261 |
262 | def insert_peg(self, bj1, bj2, peg):
263 | """
264 | Insert the grasped peg to the hole of bj1 and bj2.
265 | Args:
266 | bj1: A beam id with a joint id
267 | bj2: A beam id with a joint id
268 | peg: A peg id
269 | """
270 |
271 | print("Inserting the peg {}...".format(peg))
272 | # Move closer
273 | beam1_id, beam1_joint_id = self.get_beam_joint_id(bj1)
274 | beam2_id, beam2_joint_id = self.get_beam_joint_id(bj2)
275 | pose1 = self.get_beam_joint_pose(beam1_id, beam1_joint_id)
276 | pose2 = self.get_beam_joint_pose(beam2_id, beam2_joint_id)
277 | pos = (mat2pose(pose1)[0] + mat2pose(pose2)[0]) / 2.0
278 | self.robot.move_to_cartesian_pose([pos[0] - 0.03, pos[1], 0.25])
279 |
280 | rospy.sleep(0.5)
281 |
282 | # Move closer again to observe more accurate hole pose.
283 | beam1_id, beam1_joint_id = self.get_beam_joint_id(bj1)
284 | beam2_id, beam2_joint_id = self.get_beam_joint_id(bj2)
285 | pose1 = self.get_beam_joint_pose(beam1_id, beam1_joint_id)
286 | pose2 = self.get_beam_joint_pose(beam2_id, beam2_joint_id)
287 | pos = (mat2pose(pose1)[0] + mat2pose(pose2)[0]) / 2.0
288 | self.robot.move_to_cartesian_pose([pos[0] - 0.03, pos[1], 0.25])
289 |
290 | # Observe a sequence of hole poses and average them to acquire the accurate hole pose.
291 | pos_list = []
292 | for _ in range(20):
293 | pose1 = self.get_beam_joint_pose(beam1_id, beam1_joint_id)
294 | pose2 = self.get_beam_joint_pose(beam2_id, beam2_joint_id)
295 | pos = (mat2pose(pose1)[0] + mat2pose(pose2)[0]) / 2.0
296 | rospy.sleep(0.05)
297 | pos_list.append(pos)
298 | pos = np.mean(pos_list, axis=0)
299 | self.robot.move_to_cartesian_pose([pos[0] + 0.005, pos[1], 0.12])
300 |
301 | current_pose = self.robot.tip_pose()
302 | base_ori = current_pose.orientation
303 | inserted = False
304 | trials = 0
305 | while not inserted:
306 | hit_bottom = False
307 | while not hit_bottom:
308 | # Keep moving downwards until the peg hits to the surface of the beams
309 | desired_pos = self.robot.tip_pose().position
310 | desired_pos[-1] -= 0.007
311 | self.exec_pose_cmd(desired_pos, base_ori)
312 | hit_bottom = np.abs(self.robot.tip_effort().force[2]) > 4.0
313 |
314 | print("Hit to the surface of the beam")
315 |
316 | z_pos = self.robot.tip_pose().position[-1]
317 | max_timesteps = 120
318 | current_pos = self.robot.tip_pose().position
319 | # Apply spiral search for the peg insertion
320 | pring("Searching the hole...")
321 | for t in range(max_timesteps):
322 | rad = math.radians(t * 4)
323 | x = 0.005 * math.exp(0.02 * rad) * math.cos(rad)
324 | y = 0.005 * math.exp(0.02 * rad) * math.sin(rad)
325 | desired_pos = np.array(
326 | [
327 | current_pos[0] + x,
328 | current_pos[1] + y,
329 | self.robot.tip_pose().position[2] - 0.005,
330 | ]
331 | )
332 | self.exec_pose_cmd(pos=desired_pos, ori=base_ori)
333 | # stop if the tip of the robot is lower than a threshold
334 | if (
335 | z_pos - self.robot.tip_pose().position[-1] > 0.005
336 | or self.robot.tip_pose().position[-1] < 0.09
337 | ):
338 | inserted = True
339 | break
340 |
341 | # Move upwards and observe the hole position again to repeat the insertion process
342 | if not inserted:
343 | print("Retry the insertion")
344 | self.robot.move_to_cartesian_pose([pos[0] - 0.02, pos[1], 0.2])
345 | rospy.sleep(0.5)
346 | for _ in range(20):
347 | pose = self.get_beam_joint_pose(beam1_id, beam1_joint_id)
348 | pose2 = self.get_beam_joint_pose(beam2_id, beam2_joint_id)
349 | pos = (mat2pose(pose)[0] + mat2pose(pose2)[0]) / 2.0
350 | rospy.sleep(0.05)
351 | pos_list.append(pos)
352 | pos = np.mean(pos_list, axis=0)
353 | self.robot.move_to_cartesian_pose([pos[0] + 0.005, pos[1], 0.13])
354 |
355 | trials += 1
356 | if trials > 5:
357 | break
358 |
359 | # Move backwards and forward to avoid from jamming.
360 | print("Avoid from jamming..")
361 | for t in range(50):
362 | rad = math.radians(t * 6)
363 | x = 0.005 * math.cos(rad)
364 | self.exec_pose_delta_cmd(np.array([x, 0, -0.015]))
365 | if (
366 | np.abs(self.robot.tip_effort().force[2]) < 3
367 | and self.robot.tip_pose().position[-1] < 0.09
368 | ):
369 | break
370 | self.exec_pose_delta_cmd(np.array([0, 0, 0]))
371 | self.robot.recover_from_errors()
372 | return True
373 |
374 | def assemble_beam_square(self, grasped_beam, target_beam_component):
375 | """
376 | Assemble beams.
377 | Args:
378 | grasped_beam (str): A grasped beam id with a joint id
379 | target_beam_component (BeamComponent): A grasped beam component
380 | """
381 | target_beam = target_beam_component.name
382 | current_pose = self.robot.tip_pose()
383 |
384 | grasped_beam_id, grasped_joint_id = self.get_beam_joint_id(grasped_beam)
385 | beam_id, joint_id = self.get_beam_joint_id(target_beam)
386 |
387 | if target_beam_component.type.value == ElementType.THRU_F.value:
388 | self.assemble_thru_f(grasped_beam, target_beam)
389 | elif target_beam_component.type.value == ElementType.IN_F_END.value:
390 | self.assemble_in_f_end(grasped_beam, target_beam)
391 | elif target_beam_component.type.value == ElementType.IN_F.value:
392 | self.assemble_in_f(grasped_beam, target_beam)
393 |
394 | self.exec_pose_delta_cmd(np.array([0, 0, 0]))
395 | rospy.sleep(0.5)
396 | return True
397 |
398 | def assemble_thru_f(self, grasped_beam, target_beam):
399 | """
400 | Assembly for thru_f joint type
401 | Assemble beams.
402 | Args:
403 | grasped_beam (str): A grasped beam id with a joint id
404 | target_beamt (str): A target beam id with a joint id
405 | """
406 |
407 | print("Assemble thru_f...")
408 | grasped_beam_id, grasped_joint_id = self.get_beam_joint_id(grasped_beam)
409 | beam_id, joint_id = self.get_beam_joint_id(target_beam)
410 |
411 | base_pos = self.robot.tip_pose().position
412 | base_ori = self.robot.tip_pose().orientation
413 | connect = False
414 |
415 | # find the insertion axis
416 | insert_axis = np.where(
417 | np.abs(
418 | self.beams["{}t".format(grasped_beam_id)][:3, 3]
419 | - self.beams["{}a".format(grasped_beam_id)][:3, 3]
420 | )[:2]
421 | > 0.02
422 | )[0][0]
423 |
424 | # get the insertion direction
425 | insert_sign = np.sign(
426 | self.beams["{}t".format(grasped_beam_id)][:3, 3][insert_axis]
427 | - self.beams["{}a".format(grasped_beam_id)][:3, 3][insert_axis]
428 | )
429 |
430 | # Move to the target pose with an impedance controller
431 | while not connect:
432 | current_pose = self.robot.tip_pose()
433 | desired_position = current_pose.position
434 | mask = np.ones_like(desired_position, bool)
435 | mask[insert_axis] = False
436 | desired_position[insert_axis] += insert_sign * 0.01
437 | desired_position[mask] = base_pos[mask]
438 | self.exec_pose_cmd(desired_position, ori=base_ori)
439 | connect = np.abs(self.robot.tip_effort().force[insert_axis]) > 8.0
440 |
441 | print("Hit to the other beam")
442 |
443 | # if the current robot pose is not at the target pose
444 | current_pose = self.robot.tip_pose()
445 |
446 | t = 0
447 | base_pose = self.robot.tip_pose()
448 | base_pos = base_pose.position
449 | # Search the target beam joint to connect the beams.
450 | print("Searching the target beam joint to connect the grasped beam...")
451 | while (
452 | current_pose.position[insert_axis]
453 | - self.beams["{}t".format(grasped_beam_id)][insert_axis, 3]
454 | ) > 0.01 or np.abs(
455 | self.robot.tip_effort().force[insert_axis]
456 | ) < 8.0: # FIXME
457 | current_pose = self.robot.tip_pose()
458 | current_pos = current_pose.position
459 | rad = math.radians(t * 4)
460 | offset = 0.01 * math.exp(0.06 * rad) * math.sin(rad)
461 |
462 | desired_pos = current_pos.copy()
463 | desired_pos[-1] = base_pos[2]
464 | desired_pos[insert_axis] += insert_sign * 0.006
465 | mask = np.ones_like(desired_pos, bool)
466 | mask[insert_axis] = False
467 | mask[-1] = False
468 | desired_pos[mask] += offset
469 | self.exec_pose_cmd(pos=desired_pos, ori=base_ori)
470 | t += 1
471 |
472 | # Adjust the beam position to align holes.
473 | print("Adjusting the beam position to align holes...")
474 | for _ in range(2):
475 | current_pose = self.robot.tip_pose()
476 | current_pos = current_pose.position
477 | # fix this heuristic
478 | delta_insert = (
479 | self.beams["{}t".format(grasped_beam_id)][insert_axis, 3] - 0.01
480 | ) - current_pos[insert_axis]
481 | delta_pos = np.zeros_like(current_pos)
482 | delta_pos[insert_axis] = delta_insert
483 | self.exec_pose_delta_cmd(delta_pos)
484 |
485 | def assemble_in_f(self, grasped_beam, target_beam):
486 | """
487 | Assembly for in_f joint type
488 | Assemble beams.
489 | Args:
490 | grasped_beam (str): A grasped beam id with a joint id
491 | target_beamt (str): A target beam id with a joint id
492 | """
493 |
494 | print("Assemble in_f...")
495 |
496 | grasped_beam_id, grasped_joint_id = self.get_beam_joint_id(grasped_beam)
497 | beam_id, joint_id = self.get_beam_joint_id(target_beam)
498 |
499 | base_pos = self.robot.tip_pose().position
500 | base_ori = self.robot.tip_pose().orientation
501 | connect = False
502 | # find the insertion axis
503 | insert_axis = np.where(
504 | np.abs(
505 | self.beams["{}t".format(grasped_beam_id)][:3, 3]
506 | - self.beams["{}a".format(grasped_beam_id)][:3, 3]
507 | )[:2]
508 | > 0.02
509 | )[0][0]
510 | # get the insertion direction
511 | insert_sign = np.sign(
512 | self.beams["{}t".format(grasped_beam_id)][:3, 3][insert_axis]
513 | - self.beams["{}a".format(grasped_beam_id)][:3, 3][insert_axis]
514 | )
515 |
516 | # Move to the target pose with an impedance controller
517 | while not connect:
518 | current_pose = self.robot.tip_pose()
519 | desired_position = current_pose.position
520 | mask = np.ones_like(desired_position, bool)
521 | mask[insert_axis] = False
522 | desired_position[insert_axis] += insert_sign * 0.006
523 | desired_position[mask] = base_pos[mask]
524 | self.exec_pose_cmd(desired_position, ori=base_ori)
525 | connect = np.abs(self.robot.tip_effort().force[insert_axis]) > 3.5
526 | print("Hit to the surface of the beam")
527 |
528 | t = 0
529 | base_pose = self.robot.tip_pose()
530 | base_pos = base_pose.position
531 | side_axis = 0 if insert_axis == 1 else 1
532 | # Insert the beams
533 | print("Searching the target beam joint to connect the grasped beam...")
534 | while (
535 | np.abs(self.robot.tip_effort().force[insert_axis]) < 8.0
536 | or np.abs(self.robot.tip_effort().force[side_axis]) < 8.0
537 | ): # FIXME
538 | current_pose = self.robot.tip_pose()
539 | current_pos = current_pose.position
540 | rad = math.radians(t * 10)
541 | offset = 0.005 * math.exp(0.04 * rad) * math.sin(rad)
542 |
543 | desired_pos = current_pos.copy()
544 | desired_pos[-1] = base_pos[2]
545 | if np.abs(self.robot.tip_effort().force[insert_axis]) > 5:
546 | desired_pos[insert_axis] += insert_sign * 0.003
547 | else:
548 | desired_pos[insert_axis] += insert_sign * 0.006
549 | mask = np.ones_like(desired_pos, bool)
550 | mask[insert_axis] = False
551 | mask[-1] = False
552 | desired_pos[mask] = base_pos[mask] + offset
553 | self.exec_pose_cmd(pos=desired_pos, ori=base_ori)
554 | t += 1
555 |
556 | def assemble_in_f_end(self, grasped_beam, target_beam):
557 | """
558 | Assembly for in_f_end joint type
559 | Assemble beams.
560 | Args:
561 | grasped_beam (str): A grasped beam id with a joint id
562 | target_beamt (str): A target beam id with a joint id
563 | """
564 |
565 | print("Assemble in_f_end...")
566 | grasped_beam_id, grasped_joint_id = self.get_beam_joint_id(grasped_beam)
567 | beam_id, joint_id = self.get_beam_joint_id(target_beam)
568 |
569 | target_beam_marker_pose = self.get_marker_pose(
570 | self.beams[beam_id].joint_to_marker[joint_id].marker_id
571 | )
572 |
573 | grasped_beam_marker_mat = self.beams["{}a".format(grasped_beam_id)]
574 | target_beam_marker_mat = make_pose(
575 | [
576 | target_beam_marker_pose.position.x,
577 | target_beam_marker_pose.position.y,
578 | target_beam_marker_pose.position.z,
579 | ],
580 | quat2mat(
581 | [
582 | target_beam_marker_pose.orientation.x,
583 | target_beam_marker_pose.orientation.y,
584 | target_beam_marker_pose.orientation.z,
585 | target_beam_marker_pose.orientation.w,
586 | ]
587 | ),
588 | )
589 |
590 | beam_angles = quat2axisangle(
591 | mat2quat(
592 | np.dot(
593 | target_beam_marker_mat[:3, :3], grasped_beam_marker_mat[:3, :3].T
594 | )
595 | )
596 | )
597 |
598 | base_pos = self.robot.tip_pose().position
599 | base_ori = self.robot.tip_pose().orientation
600 | connect = False
601 | # Move to the target pose with an impedance controller
602 | while not connect:
603 | current_pose = self.robot.tip_pose()
604 | desired_position = current_pose.position
605 | desired_position[0] -= 0.008
606 | self.exec_pose_cmd(
607 | [desired_position[0], base_pos[1], base_pos[2]], ori=base_ori
608 | )
609 | connect = np.abs(self.robot.tip_effort().force[0]) > 8.0
610 | print("Hit to the surface of the beam")
611 |
612 | t = 0
613 | base_pose = self.robot.tip_pose()
614 | base_pos = base_pose.position
615 | # Search the target beam joint to connect the beams.
616 | print("Searching the target beam joint to connect the grasped beam...")
617 | while (
618 | current_pose.position[:1] - self.beams["{}t".format(grasped_beam_id)][:1, 3]
619 | > 0.01
620 | or np.abs(self.robot.tip_effort().force[0]) < 8.0
621 | ): # FIXME
622 | current_pose = self.robot.tip_pose()
623 | current_pos = current_pose.position
624 | rad = math.radians(t * 4)
625 | y = 0.01 * math.exp(0.06 * rad) * math.sin(rad)
626 | desired_pos = np.array(
627 | [current_pos[0] - 0.004, base_pos[1] + y, base_pos[2]]
628 | )
629 | self.exec_pose_cmd(pos=desired_pos, ori=base_ori)
630 | t += 1
631 |
632 | connect = False
633 | pre_F = self.robot.tip_effort().force[1]
634 | # Align the beams
635 | print("Aligning the beams...")
636 | while not connect:
637 | current_pose = self.robot.tip_pose()
638 | desired_position = current_pose.position
639 | desired_position[0] -= 0.008
640 | desired_position[1] += np.sin(beam_angles[-1]) * 0.008
641 | self.exec_pose_cmd(
642 | np.array([desired_position[0], desired_position[1], base_pos[2]]),
643 | ori=base_ori,
644 | )
645 | current_pose = self.robot.tip_pose()
646 | self.exec_pose_cmd(current_pose.position, ori=base_ori)
647 | connect = np.abs(self.robot.tip_effort().force[1]) > 6.0
648 |
649 | def put_down(self):
650 | """
651 | Put down the grasped object
652 | """
653 | print("Put down the grasped object")
654 | self.exec_pose_delta_cmd(np.array([0, 0, 0.0]))
655 | rospy.sleep(0.5)
656 | self.robot.switch_controllers("position_joint_trajectory_controller")
657 | self.robot.recover_from_errors()
658 | self.robot.gripper.open()
659 | self.robot.move_to_cartesian_delta([0, 0, 0.05])
660 | self.robot.move_to_cartesian_delta([0, 0, 0.1])
661 | return True
662 |
663 | def exec_pose_cmd(self, pos, ori=None):
664 | """
665 | Execute the cartesian impedance controller
666 | Args:
667 | pos: (x, y, z) position
668 | ori: (x, y, z, w) quaternion
669 | """
670 | if ori is None:
671 | ori = self.robot.tip_pose().orientation
672 | self.robot.exec_cartesian_pose_impedance_cmd(pos, ori=ori)
673 | self.rate.sleep()
674 |
675 | def exec_pose_delta_cmd(self, pos, ori=None):
676 | """
677 | Execute the cartesian impedance controller taking delta pose and orientation from the current pose as input.
678 | Args:
679 | pos: (x, y, z) delta position
680 | ori: (x, y, z, w) delta quaternion
681 | """
682 | current_pose = self.robot.tip_pose()
683 | desired_pos = current_pose.position + pos
684 | if ori is None:
685 | desired_ori = self.robot.tip_pose().orientation
686 | else:
687 | desired_ori = quat_multiply(current_pose.orientation, ori)
688 | self.robot.exec_cartesian_pose_impedance_cmd(desired_pos, ori=desired_ori)
689 | self.rate.sleep()
690 |
691 | def move_neutral_ori(self):
692 | """
693 | Rotate the gripper back to the base orientation.
694 | """
695 | current_pose = self.robot.tip_pose()
696 | self.robot.move_to_cartesian_pose(
697 | current_pose.position, ori=self.base_orientation
698 | )
699 |
700 | def rotate_gripper(self, z_angle):
701 | """
702 | Rotate the gripper
703 | Args:
704 | z_angle: (ax) axis-angle of the z-coordinate
705 | """
706 | current_pose = self.robot.tip_pose()
707 | current_pos = current_pose.position
708 | current_ori = current_pose.orientation
709 | desired_angles = quat2axisangle(current_ori)
710 | desired_angles[-1] += z_angle
711 |
712 | desired_angles[-1] = np.clip(desired_angles[-1], -2.0, 2.0)
713 | # self.robot.move_to_cartesian_pose(current_pos,
714 | # ori=desired_ori)
715 | self.robot.move_to_cartesian_delta(
716 | np.zeros(3), ori=axisangle2quat(np.array([0, 0, z_angle]))
717 | )
718 |
719 | def cap_beams(self, grasped_beam, target_beam):
720 | """
721 | Capping beams
722 | Args:
723 | grasped_beam (str): A grasped beam id with a joint id
724 | target_beam (str): A target beam id with a joint id
725 | """
726 |
727 | print("Cap the beams")
728 | base_ori = self.robot.tip_pose().orientation
729 | grasped_beam_id, grasped_joint_id = self.get_beam_joint_id(grasped_beam)
730 | target_beam_id, target_joint_id = self.get_beam_joint_id(target_beam)
731 |
732 | grasped_beam_pos, grasped_beam_quat = mat2pose(
733 | self.get_beam_joint_pose(grasped_beam_id, grasped_joint_id)
734 | )
735 | target_beam_pos, target_beam_quat = mat2pose(
736 | self.get_beam_joint_pose(target_beam_id, target_joint_id)
737 | )
738 |
739 | grasped_beam_z_angle = quat2axisangle(grasped_beam_quat)[-1]
740 | target_beam_z_angle = quat2axisangle(target_beam_quat)[-1]
741 |
742 | # self.robot.move_to_cartesian_delta([0, -0.005, 0])
743 | self.robot.move_to_cartesian_delta([0, -0.008, 0])
744 | self.robot.move_to_cartesian_delta([-0.015, 0, 0])
745 |
746 | rospy.sleep(0.5)
747 | connect = False
748 | pre_F = self.robot.tip_effort().force[0]
749 | base_pos = self.robot.tip_pose().position
750 | t = 0
751 | # capping beams using a cartesian impedance controller
752 | print("Capping the beams...")
753 | while not connect:
754 | desired_pos = self.robot.tip_pose().position
755 | self.exec_pose_cmd(
756 | [desired_pos[0] - 0.01, base_pos[1], base_pos[2] - 0.005], ori=base_ori
757 | )
758 | desired_pos = self.robot.tip_pose().position
759 | self.exec_pose_cmd([desired_pos[0], base_pos[1], base_pos[2]], ori=base_ori)
760 | target_beam_pos, target_beam_quat = mat2pose(
761 | self.get_beam_joint_pose(target_beam_id, target_joint_id)
762 | )
763 | connect = (
764 | np.abs(self.robot.tip_effort().force[0]) > 8.0
765 | and np.linalg.norm(
766 | target_beam_pos[0] - self.robot.tip_pose().position[0]
767 | )
768 | < 0.008
769 | )
770 | t += 1
771 | if t > 50:
772 | break
773 |
774 | return True
775 |
776 | def push(self, beam_id, connected_beam_joint, pre_cap=False):
777 | """
778 | Capping a square
779 | Args:
780 | beam_id: the first beam whose position needs to be adjusted
781 | connected_beam_joint: A beam id with a joint id connected to the pushed beam
782 | """
783 |
784 | print("Push the beam {}".format(beam_id))
785 | pos, ori = mat2pose(self.get_beam_joint_pose(beam_id, "j1")) # fix this later
786 |
787 | self.robot.move_to_cartesian_pose([pos[0] - 0.005, pos[1], pos[2] + 0.2])
788 | rospy.sleep(0.5)
789 |
790 | pos, ori = mat2pose(self.get_beam_link_pose(beam_id, "l1")) # fix this later
791 |
792 | self.robot.move_to_cartesian_pose([pos[0], pos[1], pos[2] + 0.2])
793 |
794 | pos, ori = mat2pose(self.get_beam_link_pose(beam_id, "l1")) # fix this later
795 |
796 | self.robot.move_to_cartesian_pose([pos[0], pos[1], 0.044])
797 |
798 | connected_beam_id, connected_joint_id = self.get_beam_joint_id(
799 | connected_beam_joint
800 | )
801 | origin_pos, origin_ori = mat2pose(self.get_beam_origin_pose(connected_beam_id))
802 |
803 | direction = np.cos(quat2axisangle(origin_ori)[2])
804 | if "j1" not in connected_joint_id:
805 | direction *= -1
806 |
807 | # if the beam is already capped, push the beam to the other direction
808 | if pre_cap:
809 | direction *= -1
810 | base_pose = self.robot.tip_pose()
811 | base_pos = base_pose.position
812 | base_ori = base_pose.orientation
813 | pushed = False
814 |
815 | self.robot.recover_from_errors()
816 |
817 | # pushing a beam
818 | for _ in range(100):
819 | current_pose = self.robot.tip_pose()
820 | desired_position = current_pose.position
821 | # desired_position[1] += direction * 0.01
822 | desired_position[1] += direction * 0.008
823 | self.exec_pose_cmd(
824 | np.array([base_pos[0], desired_position[1], base_pos[2]]), ori=base_ori
825 | )
826 | pushed = np.abs(self.robot.tip_effort().force[1]) > 3.5
827 |
828 | # if the beam is not capped yet, the robot should not push the beam too much
829 | if not pre_cap:
830 | if np.abs(self.robot.tip_pose().position[1] - base_pos[1]) > 0.02:
831 | pushed = True
832 |
833 | if pushed:
834 | break
835 |
836 | self.robot.recover_from_errors()
837 | self.robot.move_to_cartesian_delta(np.array([0.0, -direction * 0.01, 0]))
838 | self.robot.move_to_cartesian_delta(np.array([0.0, 0.0, 0.2]))
839 | self.move_neutral_ori()
840 |
841 | def get_beam_origin_pose(self, beam_id):
842 | """
843 | Get the origin pose of the given beam id
844 | Args:
845 | beam_id (str): A beam id
846 | """
847 | beam_info = self.beams[beam_id]
848 | origin_id = beam_info.origin_id
849 | marker_pose = self.get_marker_pose(origin_id)
850 | pos = marker_pose.position
851 | ori = marker_pose.orientation
852 | rot_mat = quat2mat([ori.x, ori.y, ori.z, ori.w])
853 | pose_mat = make_pose([pos.x, pos.y, pos.z], rot_mat)
854 | local_pose_mat = make_pose(beam_info.offset, np.eye(3))
855 | origin_pose = np.dot(pose_mat, local_pose_mat)
856 |
857 | return origin_pose
858 |
859 | def get_beam_link_pose(self, beam_id, link_id):
860 | """
861 | Get the origin pose of the given beam id
862 | Args:
863 | beam_id (str): A beam id
864 | link_id (str): A link id
865 | """
866 |
867 | origin_pose = self.get_beam_origin_pose(beam_id)
868 | beam_info = self.beams[beam_id]
869 | local_pose_mat = make_pose(beam_info.link_offsets[link_id], np.eye(3))
870 | link_pose = np.dot(origin_pose, local_pose_mat)
871 | return link_pose
872 |
873 | def get_beam_joint_pose(self, beam_id, joint_id=None):
874 | """
875 | Calculate the joint poes of the beam
876 | Args:
877 | beam_id (str): A beam id with or without a joint id
878 | joint_id (str): A joint id. Optional if the beam_id contains the joint id
879 | """
880 |
881 | if joint_id is None:
882 | beam_id, joint_id = self.get_beam_joint_id(beam_id)
883 |
884 | if joint_id not in self.beams[beam_id].joint_to_marker.keys():
885 | origin_pose = self.get_beam_origin_pose(beam_id)
886 | beam_info = self.beams[beam_id]
887 | local_pose_mat = make_pose(beam_info.joint_offsets[joint_id], np.eye(3))
888 | joint_pose = np.dot(origin_pose, local_pose_mat)
889 | else:
890 | joint_info = self.beams[beam_id].joint_to_marker[joint_id]
891 | marker_pose = self.get_marker_pose(joint_info.marker_id)
892 | pos = marker_pose.position
893 | ori = marker_pose.orientation
894 | rot_mat = quat2mat([ori.x, ori.y, ori.z, ori.w])
895 | pose_mat = make_pose([pos.x, pos.y, pos.z], rot_mat)
896 | local_pose_mat = make_pose(joint_info.offset, np.eye(3))
897 | joint_pose = np.dot(pose_mat, local_pose_mat)
898 |
899 | return joint_pose
900 |
901 | def get_beam_joint_id(self, beam):
902 | """
903 | get the beam and joint id
904 | args:
905 | beam (str): a beam id with a joint id
906 | """
907 | group = re.match("(b\d*)(j\d.*)", beam)
908 | return group[1], group[2]
909 |
910 | def get_beam_link_id(self, beam):
911 | """
912 | get the beam and link id
913 | args:
914 | beam (str): a beam id with a link id
915 | """
916 | group = re.match("(b\d*)(l\d.*)", beam)
917 | return group[1], group[2]
918 |
919 | def get_beam_id(self, beam):
920 | """
921 | get the beam id
922 | args:
923 | beam (str): A beam id with a joint or link id
924 | """
925 | group = re.match("(b\d*)(.*)", target)
926 | beam_id = group[1]
927 | return beam_id
928 |
929 | def record_fixed_origin(self):
930 | """
931 | Record the fixed beam origin pose and the pose of the marker_id 0.
932 | """
933 | self.fixed_beam_origin = self.get_beam_joint_pose(self.fixed_beam_id, "j1")
934 | self.input_origin = self.get_marker_pose(0)
935 |
936 | def load_insertion_poses(self, poses):
937 | """
938 | Load approach and target poses for insertion.
939 | Args:
940 | poses (dict): A dictionary containing the appraoch and target poses for insertion.
941 | E.g. {'b1a': pose_for_approach, 'b1t': pose_for_target}
942 | """
943 | for key in poses.keys():
944 | trans = poses[key][:3, 3]
945 | new_trans = [trans[0], -trans[2], trans[1]]
946 | euler = quat2axisangle(mat2quat(poses[key][:3, :3]))
947 | new_quat = quat2mat(axisangle2quat([euler[0], euler[2], euler[1]]))
948 | new_pose = make_pose(new_trans, new_quat)
949 |
950 | self.beams[key] = np.dot(self.fixed_beam_origin, new_pose)
951 |
952 | def update_beam_approach_pose(self, beam_id, link_id, insert_end):
953 | """
954 | Transform the approach poes of the insertion endpoint to the apporach pose of the grasped link
955 | """
956 | group = re.match("(b\d*)(j\d*)", insert_end.name)
957 |
958 | approach_pose_wrt_origin = self.beams["{}a".format(beam_id)]
959 | beam_info = self.beams[beam_id]
960 |
961 | # local_pose_mat = make_pose((beam_info.link_offsets[link_id] - beam_info.joint_offsets[group[2]]), np.eye(3))
962 | local_pose_mat = make_pose(
963 | (beam_info.link_offsets[link_id] - beam_info.joint_offsets["j1"]), np.eye(3)
964 | )
965 | self.beams["{}a".format(beam_id)] = np.dot(
966 | approach_pose_wrt_origin, local_pose_mat
967 | )
968 |
969 | target_pose_wrt_origin = self.beams["{}t".format(beam_id)]
970 | beam_info = self.beams[beam_id]
971 | self.beams["{}t".format(beam_id)] = np.dot(
972 | target_pose_wrt_origin, local_pose_mat
973 | )
974 |
975 | def get_marker_pose(self, marker_id):
976 | """
977 | Returns a marker pose in the global coordinate system
978 | Args:
979 | marker_id (int): ID of a marker
980 | """
981 |
982 | top_marker_data, wrist_marker_data = None, None
983 | if "tag_{}".format(marker_id) in self.top_markers:
984 | top_marker_data = self.top_markers["tag_{}".format(marker_id)]
985 |
986 | if "tag_{}".format(marker_id) in self.wrist_markers:
987 | wrist_marker_data = self.wrist_markers["tag_{}".format(marker_id)]
988 |
989 | if top_marker_data is not None and wrist_marker_data is None:
990 | return top_marker_data.pose
991 | elif top_marker_data is None and wrist_marker_data is not None:
992 | return wrist_marker_data.pose
993 | elif top_marker_data is None and wrist_marker_data is None:
994 | return None
995 | else:
996 | if abs((top_marker_data.time - wrist_marker_data.time).seconds) > 5:
997 | if top_marker_data.time > wrist_marker_data.time:
998 | return top_marker_data.pose
999 | else:
1000 | return wrist_marker_data.pose
1001 | else:
1002 | # if time difference is short, then prioritise marker pose obserbed by a wrist camera, because it's potentially more accurate.
1003 | return wrist_marker_data.pose
1004 |
1005 | def construct_scene(self):
1006 | """
1007 | Add boxex around the robot arm to avoid unexpected collisions
1008 | """
1009 | self.robot.movegroup_interface.add_box2scene(
1010 | "table", [0.655, 0, -0.07], [0, 0, 0, 1.0], (1.0, 0.9, 0.1)
1011 | )
1012 | self.robot.movegroup_interface.add_box2scene(
1013 | "back_wall", [-0.4, 0, 0.5], [0, 0, 0, 1.0], (0.1, 1.0, 0.9)
1014 | )
1015 | self.robot.movegroup_interface.add_box2scene(
1016 | "right_wall", [0.655, 0.7, 0.5], [0, 0, 0, 1.0], (1.0, 0.1, 1.0)
1017 | )
1018 | self.robot.movegroup_interface.add_box2scene(
1019 | "left_wall", [0.655, -0.7, 0.5], [0, 0, 0, 1.0], (1.0, 0.1, 1.0)
1020 | )
1021 |
--------------------------------------------------------------------------------
/core/agent.py:
--------------------------------------------------------------------------------
1 | import os, sys
2 | import pickle
3 | import numpy as np
4 | import asyncio
5 | import copy
6 | import logging
7 | import re
8 | import time
9 |
10 | from core.action_interface import ActionInterface
11 |
12 | from sparc_planning.src.beam_domain_coarse import generate_coarse_beam_domain
13 | from sparc_planning.src.beam_domain_fine import generate_fine_beam_domain
14 | from sparc_planning.src.beam_domain_abstract import generate_abstract_beam_domain
15 | from sparc_planning.src.al_structures import ActionInstance, GoalDefinition
16 | from sparc_planning.src.planning import plan
17 | from sparc_planning.src.sparc_io import extract_states_from_answer_set
18 | from sparc_planning.src.zooming import remove_chars_from_last_number, zoom
19 | from beam_assembly.beam_assembly_parser import load_beam_xml, load_assembly_xml
20 | from beam_assembly.beam_to_sparc import create_sparc_data
21 | from beam_assembly.beam_insertion import (
22 | get_target_connection,
23 | calculate_insertion_poses,
24 | calculate_approach,
25 | get_insert_end,
26 | )
27 |
28 |
29 | MIN_COARSE_PLAN_LENGTH = 50
30 | MAX_COARSE_PLAN_LENGTH = 55
31 | FINE_ITERATION_MIN = 1
32 | FINE_ITERATION_LIMIT = 10
33 | COARSE_ITERATION_MIN = 5
34 | COARSE_PLAN_ITERATION_LIMIT = 32
35 |
36 | # set up logging
37 | logger = logging.basicConfig(
38 | level=logging.INFO,
39 | format="%(asctime)s - %(filename)s:%(lineno)s - %(levelname)s - %(message)s",
40 | datefmt="%m-%d %H:%M",
41 | )
42 |
43 |
44 | class Agent:
45 | def __init__(self, beam_xml, assembly_xml, domain_generator, fixed_beam_id="b7"):
46 | """
47 | Args:
48 | beam_xml: A xml file containing a set of beams
49 | assembly_xml: A xml file for a task specification
50 | domain_generator: A function that generates an example domain of the task
51 | """
52 |
53 | # interface for primitive actions
54 | self.fixed_beam_id = fixed_beam_id
55 | self.interface = ActionInterface(fixed_beam_id)
56 |
57 | # grep template
58 | self.action_template = "occurs\((.*),(.*)\)"
59 | self.abst_action_template = "(.*)\((.*)\)"
60 |
61 | # generate beam domain for abstract, coarse, fine-level plannig
62 | self.abstract = generate_abstract_beam_domain()
63 | self.coarse = generate_coarse_beam_domain()
64 | self.fine = generate_fine_beam_domain()
65 |
66 | # load beam information
67 | self.beams = load_beam_xml(beam_xml)
68 |
69 | # load assembly information
70 | self.assembly = load_assembly_xml(self.beams, assembly_xml)
71 |
72 | (
73 | coarse_sorts,
74 | self.xml_coarse_statics,
75 | fine_sorts,
76 | self.xml_fine_statics,
77 | ) = create_sparc_data(self.assembly)
78 |
79 | coarse_sort_dict = dict(zip([s.name for s in coarse_sorts], coarse_sorts))
80 | fine_sort_dict = dict(zip([s.name for s in fine_sorts], fine_sorts))
81 |
82 | logging.debug(f"Fine sorts: {[s.name for s in fine_sorts]}")
83 |
84 | # override abstract sorts with xml derived data
85 | for i, sort in enumerate(self.abstract.sorts):
86 | if sort.name in coarse_sort_dict.keys():
87 | self.abstract.sorts[i].instances = coarse_sort_dict[sort.name].instances
88 | # override coarse sorts with xml derived data
89 | for i, sort in enumerate(self.coarse.sorts):
90 | if sort.name in coarse_sort_dict.keys():
91 | self.coarse.sorts[i].instances = coarse_sort_dict[sort.name].instances
92 | # override fine sorts with xml derived data
93 | for i, sort in enumerate(self.fine.sorts):
94 | if sort.name in fine_sort_dict.keys():
95 | self.fine.sorts[i].instances = fine_sort_dict[sort.name].instances
96 |
97 | self.abstract.domain_setup = [
98 | f"holds(in_assembly_c({self.assembly.base.name}),true,0)."
99 | ] + self.xml_coarse_statics
100 |
101 | (
102 | self.coarse_fluents,
103 | example_coarse_statics,
104 | self.fine_fluents,
105 | example_fine_statics,
106 | ) = domain_generator()
107 |
108 | self.coarse_statics = self.xml_coarse_statics + example_coarse_statics
109 | self.fine_statics = self.xml_fine_statics + example_fine_statics
110 |
111 | self.coarse.domain_setup = self.coarse_fluents + self.coarse_statics
112 | self.fine.domain_setup = self.fine_fluents + self.fine_statics
113 |
114 | # automatically generate goal definition for abstract level (add all beams to assembly)
115 | for beam in self.assembly.beams:
116 | if beam == self.assembly.base:
117 | continue
118 | self.abstract.goal_description.append(
119 | GoalDefinition("in_assembly_c", [beam.name], True)
120 | )
121 |
122 | self.abstract_prog = self.abstract.to_sparc_program()
123 |
124 | self.abs_prog = self.abstract.to_sparc_program()
125 | self.abs_prog.save(
126 | os.path.join(
127 | os.environ["PLANNER_PATH"], "sparc_planning/sparc_files/abs_temp.sp"
128 | )
129 | )
130 |
131 | async def run_abstract_plan(self):
132 | """
133 | Run abstract-level plannig
134 | """
135 | abstract_plan = await plan(
136 | os.path.join(
137 | os.environ["PLANNER_PATH"], "sparc_planning/sparc_files/abs_temp.sp"
138 | ),
139 | max_length=len(self.assembly.beams),
140 | min_length=len(self.assembly.beams) - 1,
141 | )
142 | # collect results
143 | abstract_states, abstract_actions = extract_states_from_answer_set(
144 | abstract_plan[0]
145 | ) # [0] just takes first answer set (valid plan), further work could explore which route to take
146 | return abstract_states, abstract_actions
147 |
148 | async def run_coarse_plan(self, abstract_states, abstract_actions):
149 | """
150 | run coarse-level plannig
151 | args:
152 | abstract_states: a list of abstract-level states
153 | abstract_actions: a list of abstract-level actions
154 | """
155 | all_coarse_actions = []
156 | all_coarse_states = []
157 | coarse_goals = []
158 | coarse_plan_length = 0
159 |
160 | coarse_state_fluents = copy.deepcopy(self.coarse_fluents)
161 |
162 | for step in abstract_actions:
163 | inserted_beam = re.split("\(|\)|\,", step)[2]
164 | coarse_goals.append(
165 | [GoalDefinition("in_assembly_c", [inserted_beam], True)]
166 | )
167 | fastening_goal = []
168 |
169 | for i in range(len(self.assembly.connections)):
170 | conn = self.assembly.connections[i]
171 | B1 = conn.element1.name
172 | B2 = conn.element2.name
173 | fastening_goal.append(GoalDefinition("fastened_c", [B1, B2, f"P{i}"], True))
174 | coarse_goals.append(fastening_goal)
175 |
176 | for goal in coarse_goals:
177 | # extend goal
178 | self.coarse.goal_description += goal
179 | coarse_prog = self.coarse.to_sparc_program()
180 | coarse_prog.save(
181 | os.path.join(
182 | os.environ["PLANNER_PATH"], "sparc_planning/sparc_files/temp.sp"
183 | )
184 | )
185 | # run coarse planner
186 | coarse_plan = await plan(
187 | os.path.join(
188 | os.environ["PLANNER_PATH"], "sparc_planning/sparc_files/temp.sp"
189 | ),
190 | max_length=coarse_plan_length + COARSE_PLAN_ITERATION_LIMIT,
191 | min_length=coarse_plan_length + COARSE_ITERATION_MIN,
192 | )
193 | # collect results
194 | coarse_states, coarse_actions = extract_states_from_answer_set(
195 | coarse_plan[0], min_time_step=coarse_plan_length
196 | ) # [0] just takes first answer set (valid plan), further work could explore which route to take
197 | logging.info(coarse_actions)
198 | coarse_plan_length += len(coarse_actions)
199 | all_coarse_actions += coarse_actions
200 | all_coarse_states += coarse_states[
201 | :-1
202 | ] # last state will be same as first state of next loop
203 | # override state for next iteration start
204 | self.coarse.domain_setup = coarse_states[-1].fluents + self.coarse_statics
205 | self.coarse.start_step = coarse_plan_length
206 | all_coarse_states.append(coarse_states[-1]) # add end state data
207 | return all_coarse_states, all_coarse_actions
208 |
209 | async def run_fine_plan(self, coarse_states, coarse_actions):
210 | """
211 | run fine-level plannig
212 | args:
213 | coarse_states: a list of coarse-level states
214 | coarse_actions: a list of coarse-level actions
215 | """
216 | all_fine_actions = []
217 | fine_actions_per_coarse = []
218 | fine_plan_length = 0
219 |
220 | fine_state_fluents = copy.deepcopy(self.fine_fluents)
221 |
222 | for i in range(len(coarse_actions)):
223 | _, course_action_name, *objs = re.split("\(|\)|\,", coarse_actions[i])[:-3]
224 |
225 | # ZOOM
226 | logging.info(f"Coarse Action: {course_action_name}")
227 | logging.info(f"Action objects: {objs}")
228 | actr = ActionInstance(
229 | self.coarse.get_action_by_name(course_action_name),
230 | objs,
231 | coarse_states[i].time_step,
232 | )
233 | logging.info("Building zoomed system description...")
234 | zoomed_fine_res_description = zoom(
235 | coarse_states[i],
236 | coarse_states[i + 1],
237 | actr,
238 | self.coarse,
239 | self.fine,
240 | True,
241 | )
242 | fine_prog = zoomed_fine_res_description.to_sparc_program()
243 | fine_prog.save(
244 | os.path.join(
245 | os.environ["PLANNER_PATH"],
246 | "sparc_planning/sparc_files/fine_temp.sp",
247 | )
248 | )
249 | logging.info("Zooming complete")
250 |
251 | # run fine planner
252 | fine_plan = await plan(
253 | os.path.join(
254 | os.environ["PLANNER_PATH"],
255 | "sparc_planning/sparc_files/fine_temp.sp",
256 | ),
257 | max_length=fine_plan_length + FINE_ITERATION_LIMIT,
258 | min_length=fine_plan_length + FINE_ITERATION_MIN,
259 | )
260 | # collect results
261 | fine_states, fine_actions = extract_states_from_answer_set(fine_plan[0])
262 | logging.info(fine_actions)
263 | fine_plan_length += len(fine_actions)
264 | # update fine state history
265 | # only recieve a partial state back from zoomed fine res system
266 | # need to update full state, so un-used fluents need to be progressed in time (did not change)
267 | updated_fluents = fine_states[-1].fluents
268 | # state strings are timestamped so this needs to be fixed for comparison
269 | s1flu = [
270 | remove_chars_from_last_number(f1)
271 | for f1 in fine_state_fluents
272 | if f1[0] != "%"
273 | ] # if f1[0] != '%' ignores comment lines
274 | s2flu = [remove_chars_from_last_number(f2) for f2 in updated_fluents]
275 | s1funcs, s1fun_vals = [], []
276 | for func in s1flu:
277 | split = re.split("\(|\)|\,", func)
278 | s1funcs.append(
279 | split[0] + "(" + split[1] + "(" + ",".join(split[2:-3]) + ")"
280 | )
281 | s1fun_vals.append(split[-2])
282 | s2funcs = []
283 | for func in s2flu:
284 | split = re.split("\(|\)|\,", func)
285 | s2funcs.append(
286 | split[0] + "(" + split[1] + "(" + ",".join(split[2:-3]) + ")"
287 | )
288 |
289 | # find fluents which havent been updated in zoomed description
290 | # functions in s1 but not in s2
291 | funcs = {*s1funcs}.difference({*s2funcs})
292 | unchanged_fluents = [
293 | f"{f},{s1fun_vals[s1funcs.index(f)]},{fine_plan_length})."
294 | for f in funcs
295 | ]
296 | # update fluents at this step
297 | fine_state_fluents = unchanged_fluents + updated_fluents
298 | # update fine domain for next planning step
299 | self.fine.domain_setup = (
300 | self.fine_statics + fine_state_fluents + self.xml_fine_statics
301 | )
302 | self.fine.start_step = fine_plan_length
303 | all_fine_actions += fine_actions
304 | fine_actions_per_coarse.append(fine_actions)
305 |
306 | logging.info(f"Coarse Actions: {coarse_actions}")
307 | logging.info(f"Fine Actions: {all_fine_actions}")
308 | return all_fine_actions, fine_actions_per_coarse
309 |
310 | def plan(self):
311 | """
312 | Planning a high-level actions to assemble beams.
313 | """
314 | abstract_states, abstract_actions = asyncio.run(self.run_abstract_plan())
315 | coarse_states, coarse_actions = asyncio.run(
316 | self.run_coarse_plan(abstract_states, abstract_actions)
317 | )
318 | fine_actions, fine_actions_per_coarse = asyncio.run(
319 | self.run_fine_plan(coarse_states, coarse_actions)
320 | )
321 | return coarse_actions, fine_actions, fine_actions_per_coarse
322 |
323 | def execute(self, coarse_actions, fine_actions, fine_actions_per_coarse):
324 | """
325 | Execute low-level actions given a plan
326 | Args:
327 | coarse_actions: a list of coarse-leve actions
328 | fine_actions: a list of fine-level actions
329 | fine_actions_per_coarse: A nested list containing fine-level actions for each coarse-level action.
330 | """
331 | print("Coarse action: ", coarse_actions, "\n")
332 | print("Fine actions: ", fine_actions)
333 |
334 | start_idx = 0
335 |
336 | fine_actions_per_coarse = self.preprocess_plan(
337 | coarse_actions, fine_actions_per_coarse
338 | )
339 |
340 | self.interface.robot.move_to_neutral()
341 | self.interface.move_to_beam(
342 | "{}j1".format(self.fixed_beam_id)
343 | ) # move to the base beam origin
344 | self.interface.record_fixed_origin()
345 |
346 | for fine_action in fine_actions_per_coarse:
347 | # insertion_poses = calculate_insertion_poses(self.assembly, fine_action, 0.04, 0.2)
348 | insertion_poses = calculate_insertion_poses(
349 | self.assembly, fine_action, 0.055, 0.2
350 | )
351 | if len(insertion_poses) != 0:
352 | self.interface.load_insertion_poses(insertion_poses)
353 |
354 | self.interface.robot.move_to_neutral()
355 | # for i, coarse_action in enumerate(coarse_actions):
356 | for i, coarse_action in enumerate(coarse_actions[start_idx:]):
357 | print("Executing coarse action: ", coarse_action)
358 | # for action in fine_actions_per_coarse[i]:
359 | for action in fine_actions_per_coarse[start_idx:][i]:
360 | group = re.match(self.action_template, action)
361 | abst_action_group = re.match(self.abst_action_template, group[1])
362 | abst_action, action_args = abst_action_group[1], abst_action_group[
363 | 2
364 | ].split(",")
365 | order = group[2]
366 |
367 | action_args.remove("rob0")
368 | print("Executing {}".format(action))
369 | if abst_action == "move_f":
370 | target = action_args[0]
371 | if target == "above_input_area":
372 | self.interface.move_to_input_area()
373 | elif target == "above_intermediate_area":
374 | self.interface.move_to_intermediate_area()
375 | elif target == "above_assembly_area":
376 | self.interface.move_to_assembly_area()
377 | elif re.search("(b\d*)i", target) or re.search("(b\d*)t", target):
378 | # move to the beam input or target pose
379 | self.interface.move_to_beam(target)
380 | elif re.search("(b\d*)a", target):
381 | # move to the approach pose of the beam for insertion
382 | insert_end = get_insert_end(
383 | self.assembly, fine_actions_per_coarse[start_idx:][i]
384 | )
385 | self.interface.move_beam_to_approach_pose(target, insert_end)
386 | elif re.search("(p\d*)i", target):
387 | # move to the peg input pose
388 | self.interface.move_to_peg(target)
389 | else:
390 | raise NotImplementedError
391 | elif abst_action == "pick_up_f": # Pick up action
392 | target = action_args[0]
393 | if re.search("(b\d*)(l\d*)", target):
394 | self.interface.pickup_beam(target)
395 | elif re.search("(p\d*)", target):
396 | # pick up peg
397 | self.interface.pickup_peg(target)
398 | else:
399 | raise NotImplementedError
400 | elif abst_action == "putdown_f":
401 | # put down a rasped object
402 | self.interface.put_down()
403 | pass
404 | elif abst_action == "assemble_f_square":
405 | # assemble square action
406 | grasped_beam = action_args[0]
407 | target_beam = self.get_connected_beam(grasped_beam)
408 | self.interface.assemble_beam_square(grasped_beam, target_beam)
409 | elif abst_action == "fasten":
410 | # insert a peg into a hole
411 | bj1, bj2, peg = action_args
412 | self.interface.insert_peg(bj1, bj2, peg)
413 | elif abst_action == "assemble_f_cap":
414 | # capping with a beam
415 | grasped_beam = action_args[0]
416 | target_beam = self.get_connected_beam(grasped_beam)
417 | self.interface.cap_beams(grasped_beam, target_beam.name)
418 | elif abst_action == "push":
419 | # push a beam
420 | target = action_args[0]
421 | connected_beams = self.get_connected_beams(target)
422 | for beam in connected_beams:
423 | if self.assembly.base.name in beam:
424 | connected_beam_joint = beam
425 | # pre_cap = self.is_capped(fine_actions_per_coarse[:i], target)
426 | pre_cap = self.is_capped(
427 | fine_actions_per_coarse[: start_idx + i], target
428 | )
429 | self.interface.push(target, connected_beam_joint, pre_cap)
430 | else:
431 | raise NotImplementedError
432 | self.interface.put_down()
433 | self.interface.move_to_intermediate_area()
434 |
435 | def get_connected_beam(self, beam):
436 | beam_group = re.match("(b\d*)(j.*)", beam)
437 | for connection in self.assembly.get_beam_connections(beam_group[1]):
438 | if beam in connection.joint1.name:
439 | target_beam = connection.joint2
440 | elif beam in connection.joint2.name:
441 | target_beam = connection.joint1
442 | return target_beam
443 |
444 | def get_connected_beams(self, beam):
445 | beam_group = re.match("(b\d*)", beam)
446 | connected_beams = []
447 | for connection in self.assembly.get_beam_connections(beam_group[1]):
448 | if beam in connection.joint1.name:
449 | connected_beams.append(connection.joint2.name)
450 | elif beam in connection.joint2.name:
451 | connected_beams.append(connection.joint1.name)
452 | return connected_beams
453 |
454 | def preprocess_plan(self, coarse_actions, fine_actions_per_coarse):
455 | """
456 | Preprocess a generated high-level plan. In order to insert a beam to the other beam stably, a robot needs to grasp a link next to a joint that is inserted to the other beam. However, our current planner does not plan to generate such a plan. Thus, we manually update the grasp link position.
457 | Args:
458 | coarse_actions: A list of coarse-level actions
459 | fine_actions_per_coarse: A nested list containing fine-level actions for each coarse-level action.
460 | """
461 |
462 | pickup_idx = None
463 | pickup_link = None
464 | for i, fine_actions in enumerate(fine_actions_per_coarse):
465 | for j, action in enumerate(fine_actions):
466 | group = re.match(self.action_template, action)
467 | abst_action_group = re.match(self.abst_action_template, group[1])
468 | abst_action, action_args = abst_action_group[1], abst_action_group[
469 | 2
470 | ].split(",")
471 | action_args.remove("rob0")
472 | if abst_action == "pick_up_f": # Pick up action
473 | target = action_args[0]
474 | if re.search("(b\d*)(l\d*)", target):
475 | pickup_idx = (i, j)
476 | pickup_link = target
477 |
478 | if abst_action == "assemble_f_square":
479 | grasped_beam = action_args[0]
480 | group = re.match("(b\d*)(j\d*)", grasped_beam)
481 | if pickup_idx is None:
482 | raise ValueError("pick up action is not found before assembly")
483 | fine_actions_per_coarse[pickup_idx[0]][
484 | pickup_idx[1]
485 | ] = fine_actions_per_coarse[pickup_idx[0]][pickup_idx[1]].replace(
486 | pickup_link, "{}l{}".format(group[1], group[2][1:])
487 | )
488 | pickup_idx = None
489 | pickup_link = None
490 | return fine_actions_per_coarse
491 |
492 | def is_capped(self, fine_actions_per_coarse, target):
493 | """
494 | Check if a beam is already capped.
495 | Args:
496 | fine_actions_per_coarse: A nested list containing fine-level actions for each coarse-level action.
497 | target: A target beam
498 | """
499 | for fine_actions in fine_actions_per_coarse:
500 | for action in fine_actions:
501 | group = re.match(self.action_template, action)
502 | abst_action_group = re.match(self.abst_action_template, group[1])
503 | abst_action, action_args = abst_action_group[1], abst_action_group[
504 | 2
505 | ].split(",")
506 | if "cap" in abst_action:
507 | beam_id, _ = self.interface.get_beam_joint_id(action_args[1])
508 | for connected_beam in self.get_connected_beams(beam_id):
509 | if target in connected_beam:
510 | return True
511 | return False
512 |
--------------------------------------------------------------------------------
/core/const.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from core.utils.general_utils import AttrDict
4 |
5 | BEAMS = {
6 | "b1": AttrDict(
7 | marker_ids=np.array([5, 6]),
8 | origin_id=5,
9 | offset=np.array([0, 0.0365, -0.0261]),
10 | joint_to_marker=AttrDict(
11 | j1=AttrDict(marker_id=5, offset=np.array([0, 0.0365, -0.0261])),
12 | j3=AttrDict(marker_id=6, offset=np.array([0, 0.0365, -0.0261])),
13 | ),
14 | joint_offsets=AttrDict(
15 | j1=np.array([0, 0.0, 0]),
16 | j2=np.array([0, -0.1462, 0]),
17 | j3=np.array([0, -0.2924, 0]),
18 | ),
19 | link_offsets=AttrDict(
20 | l1=np.array([0, -0.082125, 0]),
21 | l2=np.array([0.0, -0.210275, 0]),
22 | ),
23 | ),
24 | "b2": AttrDict(
25 | marker_ids=np.array([7, 8]),
26 | origin_id=7,
27 | offset=np.array([0, 0.0365, -0.0261]),
28 | joint_to_marker=AttrDict(
29 | j1=AttrDict(marker_id=7, offset=np.array([0, 0.0365, -0.0261])),
30 | j3=AttrDict(marker_id=8, offset=np.array([0, 0.0365, -0.0261])),
31 | ),
32 | joint_offsets=AttrDict(
33 | j1=np.array([0, 0.0, 0]),
34 | j2=np.array([0, -0.1462, 0]),
35 | j3=np.array([0, -0.2924, 0]),
36 | ),
37 | link_offsets=AttrDict(
38 | l1=np.array([0, -0.082125, 0]),
39 | l2=np.array([0.0, -0.210275, 0]),
40 | ),
41 | ),
42 | "b3": AttrDict(
43 | marker_ids=np.array([3, 4]),
44 | origin_id=3,
45 | offset=np.array([0, 0.0365, -0.0261]),
46 | joint_to_marker=AttrDict(
47 | j1=AttrDict(marker_id=3, offset=np.array([0, 0.0365, -0.0261])),
48 | j3=AttrDict(marker_id=4, offset=np.array([0, 0.0365, -0.0261])),
49 | ),
50 | joint_offsets=AttrDict(
51 | j1=np.array([0, 0.0, 0]),
52 | j2=np.array([0, -0.1462, 0]),
53 | j3=np.array([0, -0.2924, 0]),
54 | ),
55 | link_offsets=AttrDict(
56 | l1=np.array([0, -0.082125, 0]),
57 | l2=np.array([0.0, -0.210275, 0]),
58 | ),
59 | ),
60 | "b4": AttrDict(
61 | marker_ids=np.array([9, 10]),
62 | origin_id=9,
63 | offset=np.array([0, 0.0365, -0.0294]),
64 | joint_to_marker=AttrDict(
65 | j1=AttrDict(marker_id=9, offset=np.array([0, 0.0365, -0.0294])),
66 | j3=AttrDict(marker_id=10, offset=np.array([0, 0.0365, -0.0294])),
67 | ),
68 | joint_offsets=AttrDict(
69 | j1=np.array([0, 0.0, 0]),
70 | j2=np.array([0, -0.1462, 0]),
71 | j3=np.array([0, -0.2924, 0]),
72 | ),
73 | link_offsets=AttrDict(
74 | l1=np.array([0, -0.082125, 0]),
75 | l2=np.array([0.0, -0.210275, 0]),
76 | ),
77 | ),
78 | "b5": AttrDict(
79 | marker_ids=np.array([1, 2]),
80 | origin_id=1,
81 | offset=np.array([0, 0.0365, -0.0294]),
82 | joint_to_marker=AttrDict(
83 | j1=AttrDict(marker_id=1, offset=np.array([0, 0.0365, -0.0294])),
84 | j3=AttrDict(marker_id=2, offset=np.array([0, 0.0365, -0.0294])),
85 | ),
86 | joint_offsets=AttrDict(
87 | j1=np.array([0, -0.0, 0]),
88 | j2=np.array([0, -0.1462, 0]),
89 | j3=np.array([0, -0.2924, 0]),
90 | ),
91 | link_offsets=AttrDict(
92 | l1=np.array([0, -0.082125, 0]),
93 | l2=np.array([0.0, -0.210275, 0]),
94 | ),
95 | ),
96 | "b6": AttrDict(
97 | marker_ids=np.array([17, 18]),
98 | origin_id=17,
99 | offset=np.array([0, 0.0365, -0.0261]),
100 | joint_to_marker=AttrDict(
101 | j1=AttrDict(marker_id=17, offset=np.array([0, 0.0365, -0.0261])),
102 | j3=AttrDict(marker_id=18, offset=np.array([0, 0.0365, -0.0261])),
103 | ),
104 | joint_offsets=AttrDict(
105 | j1=np.array([0, 0.0, 0]),
106 | j2=np.array([0, 0.18952, 0]),
107 | j3=np.array([0, 0.37904, 0]),
108 | ),
109 | link_offsets=AttrDict(
110 | l1=np.array([0, 0.103785, 0]),
111 | l2=np.array([0.0, 0.275255, 0]),
112 | ),
113 | ),
114 | "b7": AttrDict(
115 | marker_ids=np.array([13, 14]),
116 | origin_id=13,
117 | offset=np.array([0, 0.0299, -0.0296]),
118 | joint_to_marker=AttrDict(
119 | j1=AttrDict(marker_id=13, offset=np.array([0, 0.0299, -0.0296])),
120 | j5=AttrDict(marker_id=14, offset=np.array([0, 0.0299, -0.0296])),
121 | ),
122 | joint_offsets=AttrDict(
123 | j1=np.array([0, 0.0, 0]),
124 | j2=np.array([0, -0.1262, 0]),
125 | j3=np.array([0, -0.2468, 0]),
126 | j4=np.array([0, -0.3674, 0]),
127 | j5=np.array([0, -0.4936, 0]),
128 | ),
129 | link_offsets=AttrDict(
130 | l1=np.array([0, -0.0582, 0]),
131 | l2=np.array([0, -0.1942, 0]),
132 | l3=np.array([0, -0.2994, 0]),
133 | l4=np.array([0, -0.4354, 0]),
134 | ),
135 | ),
136 | "b8": AttrDict(
137 | marker_ids=np.array([11, 12]),
138 | origin_id=11,
139 | offset=np.array([0, 0.0299, -0.0296]),
140 | joint_to_marker=AttrDict(
141 | j1=AttrDict(marker_id=11, offset=np.array([0, 0.0299, -0.0296])),
142 | j5=AttrDict(marker_id=12, offset=np.array([0, 0.0299, -0.0296])),
143 | ),
144 | joint_offsets=AttrDict(
145 | j1=np.array([0, 0.0, 0]),
146 | j2=np.array([0, -0.1262, 0]),
147 | j3=np.array([0, -0.2468, 0]),
148 | j4=np.array([0, -0.3674, 0]),
149 | j5=np.array([0, -0.4936, 0]),
150 | ),
151 | link_offsets=AttrDict(
152 | # l1=np.array([0, -0.0582, 0]),
153 | l1=np.array([0, -0.0482, 0]),
154 | # l2=np.array([0, -0.1942, 0]),
155 | l2=np.array([0, -0.1842, 0]),
156 | # l3=np.array([0, -0.2994, 0]),
157 | l3=np.array([0, -0.29, 0]),
158 | # l4=np.array([0, -0.4354, 0]),
159 | l4=np.array([0, -0.4254, 0]),
160 | ),
161 | ),
162 | "b9": AttrDict(
163 | marker_ids=np.array([15, 16]),
164 | origin_id=15,
165 | offset=np.array([0, 0.0365, -0.0261]),
166 | joint_to_marker=AttrDict(
167 | j1=AttrDict(marker_id=15, offset=np.array([0, 0.0365, -0.0261])),
168 | j5=AttrDict(marker_id=16, offset=np.array([0, 0.0365, -0.0261])),
169 | ),
170 | joint_offsets=AttrDict(
171 | j1=np.array([0, 0.0, 0]),
172 | j2=np.array([0, -0.1262, 0]),
173 | j3=np.array([0, -0.2468, 0]),
174 | j4=np.array([0, -0.3674, 0]),
175 | j5=np.array([0, -0.4936, 0]),
176 | ),
177 | link_offsets=AttrDict(
178 | l1=np.array([0, -0.062125, 0]),
179 | # l1=np.array([0, -0.072125, 0]),
180 | l2=np.array([0, -0.17888, 0]),
181 | l3=np.array([0, -0.3148, 0]),
182 | l4=np.array([0, -0.421475, 0]),
183 | ),
184 | ),
185 | }
186 |
187 | PEGS = AttrDict(p1=17, p2=18, p3=19, p4=21, p5=22, p6=23)
188 |
--------------------------------------------------------------------------------
/core/utils/general_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from contextlib import contextmanager
3 | import time
4 | import copy
5 | import re
6 | import random
7 | import functools
8 | from functools import partial, reduce
9 | import collections
10 | from collections import OrderedDict
11 |
12 | from core.utils import ndim
13 |
14 |
15 | class AttrDict(dict):
16 | __setattr__ = dict.__setitem__
17 |
18 | def __getattr__(self, attr):
19 | # Take care that getattr() raises AttributeError, not KeyError.
20 | # Required e.g. for hasattr(), deepcopy and OrderedDict.
21 | try:
22 | return self.__getitem__(attr)
23 | except KeyError:
24 | raise AttributeError("Attribute %r not found" % attr)
25 |
26 | def __getstate__(self):
27 | return self
28 |
29 | def __setstate__(self, d):
30 | self = d
31 |
--------------------------------------------------------------------------------
/core/utils/ros_transform_utils.py:
--------------------------------------------------------------------------------
1 | # Helper Functions for interfacing with TF2
2 | # Stolen from the ACRV 2017 Amazon Robotics Challenge
3 |
4 | import rospy
5 | import geometry_msgs.msg as gmsg
6 | import tf2_ros
7 | import tf2_geometry_msgs
8 |
9 | # Lazy create on use (convert_pose) to avoid errors.
10 | tfBuffer = None
11 | listener = None
12 |
13 |
14 | def _init_tf():
15 | # Create buffer and listener
16 | # Something has changed in tf that means this must happen after init_node
17 | global tfBuffer, listener
18 | tfBuffer = tf2_ros.Buffer()
19 | listener = tf2_ros.TransformListener(tfBuffer)
20 |
21 |
22 | def quaternion_to_list(quaternion):
23 | return [quaternion.x, quaternion.y, quaternion.z, quaternion.w]
24 |
25 |
26 | def list_to_quaternion(l):
27 | q = gmsg.Quaternion()
28 | q.x = l[0]
29 | q.y = l[1]
30 | q.z = l[2]
31 | q.w = l[3]
32 | return q
33 |
34 |
35 | def convert_pose(pose, from_frame, to_frame):
36 | """
37 | Convert a pose or transform between frames using tf.
38 | pose -> A geometry_msgs.msg/Pose that defines the robots position and orientation in a reference_frame
39 | from_frame -> A string that defines the original reference_frame of the robot
40 | to_frame -> A string that defines the desired reference_frame of the robot to convert to
41 | """
42 | global tfBuffer, listener
43 |
44 | if tfBuffer is None or listener is None:
45 | _init_tf()
46 |
47 | try:
48 | trans = tfBuffer.lookup_transform(
49 | to_frame, from_frame, rospy.Time(0), rospy.Duration(1.0)
50 | )
51 | # except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException), e:
52 | except (
53 | tf2_ros.LookupException,
54 | tf2_ros.ConnectivityException,
55 | tf2_ros.ExtrapolationException,
56 | ) as e:
57 | print(e)
58 | rospy.logerr("FAILED TO GET TRANSFORM FROM %s to %s" % (to_frame, from_frame))
59 | return None
60 |
61 | spose = gmsg.PoseStamped()
62 | spose.pose = pose
63 | spose.header.stamp = rospy.Time().now
64 | spose.header.frame_id = from_frame
65 |
66 | p2 = tf2_geometry_msgs.do_transform_pose(spose, trans)
67 |
68 | return p2.pose
69 |
70 |
71 | def current_robot_pose(reference_frame, base_frame):
72 | """
73 | Get the current pose of the robot in the given reference frame
74 | reference_frame -> A string that defines the reference_frame that the robots current pose will be defined in
75 | """
76 | # Create Pose
77 | p = gmsg.Pose()
78 | p.orientation.w = 1.0
79 |
80 | # Transforms robots current pose to the base reference frame
81 | return convert_pose(p, base_frame, reference_frame)
82 |
83 |
84 | def publish_stamped_transform(stamped_transform, seconds=1):
85 | """
86 | Publish a stamped transform for debugging purposes.
87 | stamped_transform -> A geometry_msgs/TransformStamped to be published
88 | seconds -> An int32 that defines the duration the transform will be broadcast for
89 | """
90 | # Create broadcast node
91 | br = tf2_ros.TransformBroadcaster()
92 |
93 | # Publish once first.
94 | stamped_transform.header.stamp = rospy.Time.now()
95 | br.sendTransform(stamped_transform)
96 |
97 | # Publish transform for set time.
98 | i = 0
99 | iterations = seconds / 0.05
100 | while not rospy.is_shutdown() and i < iterations:
101 | stamped_transform.header.stamp = rospy.Time.now()
102 | br.sendTransform(stamped_transform)
103 | rospy.sleep(0.05)
104 | i += 1
105 |
106 |
107 | def publish_transform(transform, reference_frame, name, seconds=1):
108 | """
109 | Publish a Transform for debugging purposes.
110 | transform -> A geometry_msgs/Transform to be published
111 | reference_frame -> A string defining the reference frame of the transform
112 | seconds -> An int32 that defines the duration the transform will be broadcast for
113 | """
114 | # Create a stamped_transform and store the transform in it
115 | st = gmsg.TransformStamped()
116 | st.transform = transform
117 | st.header.frame_id = reference_frame
118 | st.child_frame_id = name
119 |
120 | # Call the publish_stamped_transform function
121 | publish_stamped_transform(st, seconds)
122 |
123 |
124 | def publish_pose_as_transform(pose, reference_frame, name, seconds=1):
125 | """
126 | Publish a pose as a transform so that it is visualised in rviz.
127 | pose -> A geometry_msgs.msg/Pose to be converted into a transform and published
128 | reference_frame -> A string defining the reference_frame of the pose
129 | name -> A string defining the child frame of the transform
130 | seconds -> An int32 that defines the duration the transform will be broadcast for
131 | """
132 |
133 | # Create a broadcast node and a stamped transform to broadcast
134 | t = gmsg.TransformStamped()
135 |
136 | # Prepare broadcast message
137 | t.header.frame_id = reference_frame
138 | t.child_frame_id = name
139 |
140 | # Copy in pose values to transform
141 | t.transform.translation = pose.position
142 | t.transform.rotation = pose.orientation
143 |
144 | # Call the publish_stamped_transform function
145 | publish_stamped_transform(t, seconds)
146 |
147 |
148 | def publish_tf_quaterion_as_transform(
149 | translation, quaternion, reference_frame, name, seconds=1
150 | ):
151 | qm = gmsg.Transform()
152 | qm.translation.x = translation[0]
153 | qm.translation.y = translation[1]
154 | qm.translation.z = translation[2]
155 | qm.rotation.x = quaternion[0]
156 | qm.rotation.y = quaternion[1]
157 | qm.rotation.z = quaternion[2]
158 | qm.rotation.w = quaternion[3]
159 | publish_transform(qm, reference_frame, name, seconds)
160 |
161 |
162 | def align_pose_orientation_to_frame(
163 | from_pose, from_reference_frame, to_reference_frame
164 | ):
165 | """
166 | Update the orientation of from_pose so that it matches the orientation of to_reference_frame
167 | Useful for aligning a desired position pose with a gripper, for example.
168 | from_pose -> A geometry_msgs.msg/Pose to allign
169 | from_reference_frame -> A string defining the reference_frame of the pose
170 | to_reference_frame -> A string defining the reference_frame to allign to
171 | """
172 | # Create transform
173 | p = gmsg.Pose()
174 | p.orientation.w = 1.0
175 |
176 | # Convert reference frame orientation from -> to
177 | pose_orientation = convert_pose(p, to_reference_frame, from_reference_frame)
178 |
179 | # Copy orientation to pose.
180 | from_pose.orientation = pose_orientation.orientation
181 |
182 | return from_pose
183 |
--------------------------------------------------------------------------------
/core/utils/transform_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility functions of matrix and vector transformations.
3 | NOTE: convention for quaternions is (x, y, z, w)
4 | Reference: https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/utils/transform_utils.py
5 | """
6 |
7 | import math
8 | import numpy as np
9 | import torch
10 | import torch.nn.functional as F
11 |
12 | # from core.utils.numba_utils import jit_decorator
13 |
14 |
15 | PI = np.pi
16 | EPS = np.finfo(float).eps * 4.0
17 |
18 | # axis sequences for Euler angles
19 | _NEXT_AXIS = [1, 2, 0, 1]
20 |
21 | # map axes strings to/from tuples of inner axis, parity, repetition, frame
22 | _AXES2TUPLE = {
23 | "sxyz": (0, 0, 0, 0),
24 | "sxyx": (0, 0, 1, 0),
25 | "sxzy": (0, 1, 0, 0),
26 | "sxzx": (0, 1, 1, 0),
27 | "syzx": (1, 0, 0, 0),
28 | "syzy": (1, 0, 1, 0),
29 | "syxz": (1, 1, 0, 0),
30 | "syxy": (1, 1, 1, 0),
31 | "szxy": (2, 0, 0, 0),
32 | "szxz": (2, 0, 1, 0),
33 | "szyx": (2, 1, 0, 0),
34 | "szyz": (2, 1, 1, 0),
35 | "rzyx": (0, 0, 0, 1),
36 | "rxyx": (0, 0, 1, 1),
37 | "ryzx": (0, 1, 0, 1),
38 | "rxzx": (0, 1, 1, 1),
39 | "rxzy": (1, 0, 0, 1),
40 | "ryzy": (1, 0, 1, 1),
41 | "rzxy": (1, 1, 0, 1),
42 | "ryxy": (1, 1, 1, 1),
43 | "ryxz": (2, 0, 0, 1),
44 | "rzxz": (2, 0, 1, 1),
45 | "rxyz": (2, 1, 0, 1),
46 | "rzyz": (2, 1, 1, 1),
47 | }
48 |
49 | _TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
50 |
51 |
52 | def convert_quat(q, to="xyzw"):
53 | """
54 | Converts quaternion from one convention to another.
55 | The convention to convert TO is specified as an optional argument.
56 | If to == 'xyzw', then the input is in 'wxyz' format, and vice-versa.
57 | Args:
58 | q (np.array): a 4-dim array corresponding to a quaternion
59 | to (str): either 'xyzw' or 'wxyz', determining which convention to convert to.
60 | """
61 | if to == "xyzw":
62 | return q[[1, 2, 3, 0]]
63 | if to == "wxyz":
64 | return q[[3, 0, 1, 2]]
65 | raise Exception("convert_quat: choose a valid `to` argument (xyzw or wxyz)")
66 |
67 |
68 | def quat_multiply(quaternion1, quaternion0):
69 | """
70 | Return multiplication of two quaternions (q1 * q0).
71 | E.g.:
72 | >>> q = quat_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
73 | >>> np.allclose(q, [-44, -14, 48, 28])
74 | True
75 | Args:
76 | quaternion1 (np.array): (x,y,z,w) quaternion
77 | quaternion0 (np.array): (x,y,z,w) quaternion
78 | Returns:
79 | np.array: (x,y,z,w) multiplied quaternion
80 | """
81 | x0, y0, z0, w0 = quaternion0
82 | x1, y1, z1, w1 = quaternion1
83 | return np.array(
84 | (
85 | x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
86 | -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
87 | x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0,
88 | -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
89 | ),
90 | dtype=np.float32,
91 | )
92 |
93 |
94 | def quat_conjugate(quaternion):
95 | """
96 | Return conjugate of quaternion.
97 | E.g.:
98 | >>> q0 = random_quaternion()
99 | >>> q1 = quat_conjugate(q0)
100 | >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
101 | True
102 | Args:
103 | quaternion (np.array): (x,y,z,w) quaternion
104 | Returns:
105 | np.array: (x,y,z,w) quaternion conjugate
106 | """
107 | return np.array(
108 | (-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]),
109 | dtype=np.float32,
110 | )
111 |
112 |
113 | def quat_inverse(quaternion):
114 | """
115 | Return inverse of quaternion.
116 | E.g.:
117 | >>> q0 = random_quaternion()
118 | >>> q1 = quat_inverse(q0)
119 | >>> np.allclose(quat_multiply(q0, q1), [0, 0, 0, 1])
120 | True
121 | Args:
122 | quaternion (np.array): (x,y,z,w) quaternion
123 | Returns:
124 | np.array: (x,y,z,w) quaternion inverse
125 | """
126 | return quat_conjugate(quaternion) / np.dot(quaternion, quaternion)
127 |
128 |
129 | def quat_distance(quaternion1, quaternion0):
130 | """
131 | Returns distance between two quaternions, such that distance * quaternion0 = quaternion1
132 | Args:
133 | quaternion1 (np.array): (x,y,z,w) quaternion
134 | quaternion0 (np.array): (x,y,z,w) quaternion
135 | Returns:
136 | np.array: (x,y,z,w) quaternion distance
137 | """
138 | return quat_multiply(quaternion1, quat_inverse(quaternion0))
139 |
140 |
141 | def quat_slerp(quat0, quat1, fraction, shortestpath=True):
142 | """
143 | Return spherical linear interpolation between two quaternions.
144 | E.g.:
145 | >>> q0 = random_quat()
146 | >>> q1 = random_quat()
147 | >>> q = quat_slerp(q0, q1, 0.0)
148 | >>> np.allclose(q, q0)
149 | True
150 | >>> q = quat_slerp(q0, q1, 1.0)
151 | >>> np.allclose(q, q1)
152 | True
153 | >>> q = quat_slerp(q0, q1, 0.5)
154 | >>> angle = math.acos(np.dot(q0, q))
155 | >>> np.allclose(2.0, math.acos(np.dot(q0, q1)) / angle) or \
156 | np.allclose(2.0, math.acos(-np.dot(q0, q1)) / angle)
157 | True
158 | Args:
159 | quat0 (np.array): (x,y,z,w) quaternion startpoint
160 | quat1 (np.array): (x,y,z,w) quaternion endpoint
161 | fraction (float): fraction of interpolation to calculate
162 | shortestpath (bool): If True, will calculate the shortest path
163 | Returns:
164 | np.array: (x,y,z,w) quaternion distance
165 | """
166 | q0 = unit_vector(quat0[:4])
167 | q1 = unit_vector(quat1[:4])
168 | if fraction == 0.0:
169 | return q0
170 | elif fraction == 1.0:
171 | return q1
172 | d = np.dot(q0, q1)
173 | if abs(abs(d) - 1.0) < EPS:
174 | return q0
175 | if shortestpath and d < 0.0:
176 | # invert rotation
177 | d = -d
178 | q1 *= -1.0
179 | angle = math.acos(np.clip(d, -1, 1))
180 | if abs(angle) < EPS:
181 | return q0
182 | isin = 1.0 / math.sin(angle)
183 | q0 *= math.sin((1.0 - fraction) * angle) * isin
184 | q1 *= math.sin(fraction * angle) * isin
185 | q0 += q1
186 | return q0
187 |
188 |
189 | def random_quat(rand=None):
190 | """
191 | Return uniform random unit quaternion.
192 | E.g.:
193 | >>> q = random_quat()
194 | >>> np.allclose(1.0, vector_norm(q))
195 | True
196 | >>> q = random_quat(np.random.random(3))
197 | >>> q.shape
198 | (4,)
199 | Args:
200 | rand (3-array or None): If specified, must be three independent random variables that are uniformly distributed
201 | between 0 and 1.
202 | Returns:
203 | np.array: (x,y,z,w) random quaternion
204 | """
205 | if rand is None:
206 | rand = np.random.rand(3)
207 | else:
208 | assert len(rand) == 3
209 | r1 = np.sqrt(1.0 - rand[0])
210 | r2 = np.sqrt(rand[0])
211 | pi2 = math.pi * 2.0
212 | t1 = pi2 * rand[1]
213 | t2 = pi2 * rand[2]
214 | return np.array(
215 | (np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2),
216 | dtype=np.float32,
217 | )
218 |
219 |
220 | def random_axis_angle(angle_limit=None, random_state=None):
221 | """
222 | Samples an axis-angle rotation by first sampling a random axis
223 | and then sampling an angle. If @angle_limit is provided, the size
224 | of the rotation angle is constrained.
225 | If @random_state is provided (instance of np.random.RandomState), it
226 | will be used to generate random numbers.
227 | Args:
228 | angle_limit (None or float): If set, determines magnitude limit of angles to generate
229 | random_state (None or RandomState): RNG to use if specified
230 | Raises:
231 | AssertionError: [Invalid RNG]
232 | """
233 | if angle_limit is None:
234 | angle_limit = 2.0 * np.pi
235 |
236 | if random_state is not None:
237 | assert isinstance(random_state, np.random.RandomState)
238 | npr = random_state
239 | else:
240 | npr = np.random
241 |
242 | # sample random axis using a normalized sample from spherical Gaussian.
243 | # see (http://extremelearning.com.au/how-to-generate-uniformly-random-points-on-n-spheres-and-n-balls/)
244 | # for why it works.
245 | random_axis = npr.randn(3)
246 | random_axis /= np.linalg.norm(random_axis)
247 | random_angle = npr.uniform(low=0.0, high=angle_limit)
248 | return random_axis, random_angle
249 |
250 |
251 | def vec(values):
252 | """
253 | Converts value tuple into a numpy vector.
254 | Args:
255 | values (n-array): a tuple of numbers
256 | Returns:
257 | np.array: vector of given values
258 | """
259 | return np.array(values, dtype=np.float32)
260 |
261 |
262 | def mat4(array):
263 | """
264 | Converts an array to 4x4 matrix.
265 | Args:
266 | array (n-array): the array in form of vec, list, or tuple
267 | Returns:
268 | np.array: a 4x4 numpy matrix
269 | """
270 | return np.array(array, dtype=np.float32).reshape((4, 4))
271 |
272 |
273 | def mat2pose(hmat):
274 | """
275 | Converts a homogeneous 4x4 matrix into pose.
276 | Args:
277 | hmat (np.array): a 4x4 homogeneous matrix
278 | Returns:
279 | 2-tuple:
280 | - (np.array) (x,y,z) position array in cartesian coordinates
281 | - (np.array) (x,y,z,w) orientation array in quaternion form
282 | """
283 | pos = hmat[:3, 3]
284 | orn = mat2quat(hmat[:3, :3])
285 | return pos, orn
286 |
287 |
288 | # @jit_decorator
289 | def mat2quat(rmat):
290 | """
291 | Converts given rotation matrix to quaternion.
292 | Args:
293 | rmat (np.array): 3x3 rotation matrix
294 | Returns:
295 | np.array: (x,y,z,w) float quaternion angles
296 | """
297 | M = np.asarray(rmat).astype(np.float32)[:3, :3]
298 |
299 | m00 = M[0, 0]
300 | m01 = M[0, 1]
301 | m02 = M[0, 2]
302 | m10 = M[1, 0]
303 | m11 = M[1, 1]
304 | m12 = M[1, 2]
305 | m20 = M[2, 0]
306 | m21 = M[2, 1]
307 | m22 = M[2, 2]
308 | # symmetric matrix K
309 | K = np.array(
310 | [
311 | [m00 - m11 - m22, np.float32(0.0), np.float32(0.0), np.float32(0.0)],
312 | [m01 + m10, m11 - m00 - m22, np.float32(0.0), np.float32(0.0)],
313 | [m02 + m20, m12 + m21, m22 - m00 - m11, np.float32(0.0)],
314 | [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
315 | ]
316 | )
317 | K /= 3.0
318 | # quaternion is Eigen vector of K that corresponds to largest eigenvalue
319 | w, V = np.linalg.eigh(K)
320 | inds = np.array([3, 0, 1, 2])
321 | q1 = V[inds, np.argmax(w)]
322 | if q1[0] < 0.0:
323 | np.negative(q1, q1)
324 | inds = np.array([1, 2, 3, 0])
325 | return q1[inds]
326 |
327 |
328 | def euler2mat(euler):
329 | """
330 | Converts euler angles into rotation matrix form
331 | Args:
332 | euler (np.array): (r,p,y) angles
333 | Returns:
334 | np.array: 3x3 rotation matrix
335 | Raises:
336 | AssertionError: [Invalid input shape]
337 | """
338 |
339 | euler = np.asarray(euler, dtype=np.float64)
340 | assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler)
341 |
342 | ai, aj, ak = -euler[..., 2], -euler[..., 1], -euler[..., 0]
343 | si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
344 | ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
345 | cc, cs = ci * ck, ci * sk
346 | sc, ss = si * ck, si * sk
347 |
348 | mat = np.empty(euler.shape[:-1] + (3, 3), dtype=np.float64)
349 | mat[..., 2, 2] = cj * ck
350 | mat[..., 2, 1] = sj * sc - cs
351 | mat[..., 2, 0] = sj * cc + ss
352 | mat[..., 1, 2] = cj * sk
353 | mat[..., 1, 1] = sj * ss + cc
354 | mat[..., 1, 0] = sj * cs - sc
355 | mat[..., 0, 2] = -sj
356 | mat[..., 0, 1] = cj * si
357 | mat[..., 0, 0] = cj * ci
358 | return mat
359 |
360 |
361 | def mat2euler(rmat, axes="sxyz"):
362 | """
363 | Converts given rotation matrix to euler angles in radian.
364 | Args:
365 | rmat (np.array): 3x3 rotation matrix
366 | axes (str): One of 24 axis sequences as string or encoded tuple (see top of this module)
367 | Returns:
368 | np.array: (r,p,y) converted euler angles in radian vec3 float
369 | """
370 | try:
371 | firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
372 | except (AttributeError, KeyError):
373 | firstaxis, parity, repetition, frame = axes
374 |
375 | i = firstaxis
376 | j = _NEXT_AXIS[i + parity]
377 | k = _NEXT_AXIS[i - parity + 1]
378 |
379 | M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]
380 | if repetition:
381 | sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k])
382 | if sy > EPS:
383 | ax = math.atan2(M[i, j], M[i, k])
384 | ay = math.atan2(sy, M[i, i])
385 | az = math.atan2(M[j, i], -M[k, i])
386 | else:
387 | ax = math.atan2(-M[j, k], M[j, j])
388 | ay = math.atan2(sy, M[i, i])
389 | az = 0.0
390 | else:
391 | cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i])
392 | if cy > EPS:
393 | ax = math.atan2(M[k, j], M[k, k])
394 | ay = math.atan2(-M[k, i], cy)
395 | az = math.atan2(M[j, i], M[i, i])
396 | else:
397 | ax = math.atan2(-M[j, k], M[j, j])
398 | ay = math.atan2(-M[k, i], cy)
399 | az = 0.0
400 |
401 | if parity:
402 | ax, ay, az = -ax, -ay, -az
403 | if frame:
404 | ax, az = az, ax
405 | return vec((ax, ay, az))
406 |
407 |
408 | def pose2mat(pose):
409 | """
410 | Converts pose to homogeneous matrix.
411 | Args:
412 | pose (2-tuple): a (pos, orn) tuple where pos is vec3 float cartesian, and
413 | orn is vec4 float quaternion.
414 | Returns:
415 | np.array: 4x4 homogeneous matrix
416 | """
417 | homo_pose_mat = np.zeros((4, 4), dtype=np.float32)
418 | homo_pose_mat[:3, :3] = quat2mat(pose[1])
419 | homo_pose_mat[:3, 3] = np.array(pose[0], dtype=np.float32)
420 | homo_pose_mat[3, 3] = 1.0
421 | return homo_pose_mat
422 |
423 |
424 | # @jit_decorator
425 | def quat2mat(quaternion):
426 | """
427 | Converts given quaternion to matrix.
428 | Args:
429 | quaternion (np.array): (x,y,z,w) vec4 float angles
430 | Returns:
431 | np.array: 3x3 rotation matrix
432 | """
433 | # awkward semantics for use with numba
434 | inds = np.array([3, 0, 1, 2])
435 | q = np.asarray(quaternion).copy().astype(np.float32)[inds]
436 |
437 | n = np.dot(q, q)
438 | if n < EPS:
439 | return np.identity(3)
440 | q *= math.sqrt(2.0 / n)
441 | q2 = np.outer(q, q)
442 | return np.array(
443 | [
444 | [1.0 - q2[2, 2] - q2[3, 3], q2[1, 2] - q2[3, 0], q2[1, 3] + q2[2, 0]],
445 | [q2[1, 2] + q2[3, 0], 1.0 - q2[1, 1] - q2[3, 3], q2[2, 3] - q2[1, 0]],
446 | [q2[1, 3] - q2[2, 0], q2[2, 3] + q2[1, 0], 1.0 - q2[1, 1] - q2[2, 2]],
447 | ]
448 | )
449 |
450 |
451 | def quat2axisangle(quat):
452 | """
453 | Converts quaternion to axis-angle format.
454 | Returns a unit vector direction scaled by its angle in radians.
455 | Args:
456 | quat (np.array): (x,y,z,w) vec4 float angles
457 | Returns:
458 | np.array: (ax,ay,az) axis-angle exponential coordinates
459 | """
460 | # clip quaternion
461 | if quat[3] > 1.0:
462 | quat[3] = 1.0
463 | elif quat[3] < -1.0:
464 | quat[3] = -1.0
465 |
466 | den = np.sqrt(1.0 - quat[3] * quat[3])
467 | if math.isclose(den, 0.0):
468 | # This is (close to) a zero degree rotation, immediately return
469 | return np.zeros(3)
470 |
471 | return (quat[:3] * 2.0 * math.acos(quat[3])) / den
472 |
473 |
474 | def axisangle2quat(vec):
475 | """
476 | Converts scaled axis-angle to quat.
477 | Args:
478 | vec (np.array): (ax,ay,az) axis-angle exponential coordinates
479 | Returns:
480 | np.array: (x,y,z,w) vec4 float angles
481 | """
482 | # Grab angle
483 | angle = np.linalg.norm(vec)
484 |
485 | # handle zero-rotation case
486 | if math.isclose(angle, 0.0):
487 | return np.array([0.0, 0.0, 0.0, 1.0])
488 |
489 | # make sure that axis is a unit vector
490 | axis = vec / angle
491 |
492 | q = np.zeros(4)
493 | q[3] = np.cos(angle / 2.0)
494 | q[:3] = axis * np.sin(angle / 2.0)
495 | return q
496 |
497 |
498 | def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B):
499 | """
500 | Converts a homogenous matrix corresponding to a point C in frame A
501 | to a homogenous matrix corresponding to the same point C in frame B.
502 | Args:
503 | pose_A (np.array): 4x4 matrix corresponding to the pose of C in frame A
504 | pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B
505 | Returns:
506 | np.array: 4x4 matrix corresponding to the pose of C in frame B
507 | """
508 |
509 | # pose of A in B takes a point in A and transforms it to a point in C.
510 |
511 | # pose of C in B = pose of A in B * pose of C in A
512 | # take a point in C, transform it to A, then to B
513 | # T_B^C = T_A^C * T_B^A
514 | return pose_A_in_B.dot(pose_A)
515 |
516 |
517 | def pose_inv(pose):
518 | """
519 | Computes the inverse of a homogeneous matrix corresponding to the pose of some
520 | frame B in frame A. The inverse is the pose of frame A in frame B.
521 | Args:
522 | pose (np.array): 4x4 matrix for the pose to inverse
523 | Returns:
524 | np.array: 4x4 matrix for the inverse pose
525 | """
526 |
527 | # Note, the inverse of a pose matrix is the following
528 | # [R t; 0 1]^-1 = [R.T -R.T*t; 0 1]
529 |
530 | # Intuitively, this makes sense.
531 | # The original pose matrix translates by t, then rotates by R.
532 | # We just invert the rotation by applying R-1 = R.T, and also translate back.
533 | # Since we apply translation first before rotation, we need to translate by
534 | # -t in the original frame, which is -R-1*t in the new frame, and then rotate back by
535 | # R-1 to align the axis again.
536 |
537 | pose_inv = np.zeros((4, 4))
538 | pose_inv[:3, :3] = pose[:3, :3].T
539 | pose_inv[:3, 3] = -pose_inv[:3, :3].dot(pose[:3, 3])
540 | pose_inv[3, 3] = 1.0
541 | return pose_inv
542 |
543 |
544 | def _skew_symmetric_translation(pos_A_in_B):
545 | """
546 | Helper function to get a skew symmetric translation matrix for converting quantities
547 | between frames.
548 | Args:
549 | pos_A_in_B (np.array): (x,y,z) position of A in frame B
550 | Returns:
551 | np.array: 3x3 skew symmetric translation matrix
552 | """
553 | return np.array(
554 | [
555 | 0.0,
556 | -pos_A_in_B[2],
557 | pos_A_in_B[1],
558 | pos_A_in_B[2],
559 | 0.0,
560 | -pos_A_in_B[0],
561 | -pos_A_in_B[1],
562 | pos_A_in_B[0],
563 | 0.0,
564 | ]
565 | ).reshape((3, 3))
566 |
567 |
568 | def vel_in_A_to_vel_in_B(vel_A, ang_vel_A, pose_A_in_B):
569 | """
570 | Converts linear and angular velocity of a point in frame A to the equivalent in frame B.
571 | Args:
572 | vel_A (np.array): (vx,vy,vz) linear velocity in A
573 | ang_vel_A (np.array): (wx,wy,wz) angular velocity in A
574 | pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B
575 | Returns:
576 | 2-tuple:
577 | - (np.array) (vx,vy,vz) linear velocities in frame B
578 | - (np.array) (wx,wy,wz) angular velocities in frame B
579 | """
580 | pos_A_in_B = pose_A_in_B[:3, 3]
581 | rot_A_in_B = pose_A_in_B[:3, :3]
582 | skew_symm = _skew_symmetric_translation(pos_A_in_B)
583 | vel_B = rot_A_in_B.dot(vel_A) + skew_symm.dot(rot_A_in_B.dot(ang_vel_A))
584 | ang_vel_B = rot_A_in_B.dot(ang_vel_A)
585 | return vel_B, ang_vel_B
586 |
587 |
588 | def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B):
589 | """
590 | Converts linear and rotational force at a point in frame A to the equivalent in frame B.
591 | Args:
592 | force_A (np.array): (fx,fy,fz) linear force in A
593 | torque_A (np.array): (tx,ty,tz) rotational force (moment) in A
594 | pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B
595 | Returns:
596 | 2-tuple:
597 | - (np.array) (fx,fy,fz) linear forces in frame B
598 | - (np.array) (tx,ty,tz) moments in frame B
599 | """
600 | pos_A_in_B = pose_A_in_B[:3, 3]
601 | rot_A_in_B = pose_A_in_B[:3, :3]
602 | skew_symm = _skew_symmetric_translation(pos_A_in_B)
603 | force_B = rot_A_in_B.T.dot(force_A)
604 | torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A)
605 | return force_B, torque_B
606 |
607 |
608 | def rotation_matrix(angle, direction, point=None):
609 | """
610 | Returns matrix to rotate about axis defined by point and direction.
611 | E.g.:
612 | >>> angle = (random.random() - 0.5) * (2*math.pi)
613 | >>> direc = numpy.random.random(3) - 0.5
614 | >>> point = numpy.random.random(3) - 0.5
615 | >>> R0 = rotation_matrix(angle, direc, point)
616 | >>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
617 | >>> is_same_transform(R0, R1)
618 | True
619 | >>> R0 = rotation_matrix(angle, direc, point)
620 | >>> R1 = rotation_matrix(-angle, -direc, point)
621 | >>> is_same_transform(R0, R1)
622 | True
623 | >>> I = numpy.identity(4, numpy.float32)
624 | >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
625 | True
626 | >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
627 | ... direc, point)))
628 | True
629 | Args:
630 | angle (float): Magnitude of rotation
631 | direction (np.array): (ax,ay,az) axis about which to rotate
632 | point (None or np.array): If specified, is the (x,y,z) point about which the rotation will occur
633 | Returns:
634 | np.array: 4x4 homogeneous matrix that includes the desired rotation
635 | """
636 | sina = math.sin(angle)
637 | cosa = math.cos(angle)
638 | direction = unit_vector(direction[:3])
639 | # rotation matrix around unit vector
640 | R = np.array(
641 | ((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=np.float32
642 | )
643 | R += np.outer(direction, direction) * (1.0 - cosa)
644 | direction *= sina
645 | R += np.array(
646 | (
647 | (0.0, -direction[2], direction[1]),
648 | (direction[2], 0.0, -direction[0]),
649 | (-direction[1], direction[0], 0.0),
650 | ),
651 | dtype=np.float32,
652 | )
653 | M = np.identity(4)
654 | M[:3, :3] = R
655 | if point is not None:
656 | # rotation not around origin
657 | point = np.array(point[:3], dtype=np.float32, copy=False)
658 | M[:3, 3] = point - np.dot(R, point)
659 | return M
660 |
661 |
662 | def clip_translation(dpos, limit):
663 | """
664 | Limits a translation (delta position) to a specified limit
665 | Scales down the norm of the dpos to 'limit' if norm(dpos) > limit, else returns immediately
666 | Args:
667 | dpos (n-array): n-dim Translation being clipped (e,g.: (x, y, z)) -- numpy array
668 | limit (float): Value to limit translation by -- magnitude (scalar, in same units as input)
669 | Returns:
670 | 2-tuple:
671 | - (np.array) Clipped translation (same dimension as inputs)
672 | - (bool) whether the value was clipped or not
673 | """
674 | input_norm = np.linalg.norm(dpos)
675 | return (dpos * limit / input_norm, True) if input_norm > limit else (dpos, False)
676 |
677 |
678 | def clip_rotation(quat, limit):
679 | """
680 | Limits a (delta) rotation to a specified limit
681 | Converts rotation to axis-angle, clips, then re-converts back into quaternion
682 | Args:
683 | quat (np.array): (x,y,z,w) rotation being clipped
684 | limit (float): Value to limit rotation by -- magnitude (scalar, in radians)
685 | Returns:
686 | 2-tuple:
687 | - (np.array) Clipped rotation quaternion (x, y, z, w)
688 | - (bool) whether the value was clipped or not
689 | """
690 | clipped = False
691 |
692 | # First, normalize the quaternion
693 | quat = quat / np.linalg.norm(quat)
694 |
695 | den = np.sqrt(max(1 - quat[3] * quat[3], 0))
696 | if den == 0:
697 | # This is a zero degree rotation, immediately return
698 | return quat, clipped
699 | else:
700 | # This is all other cases
701 | x = quat[0] / den
702 | y = quat[1] / den
703 | z = quat[2] / den
704 | a = 2 * math.acos(quat[3])
705 |
706 | # Clip rotation if necessary and return clipped quat
707 | if abs(a) > limit:
708 | a = limit * np.sign(a) / 2
709 | sa = math.sin(a)
710 | ca = math.cos(a)
711 | quat = np.array([x * sa, y * sa, z * sa, ca])
712 | clipped = True
713 |
714 | return quat, clipped
715 |
716 |
717 | def make_pose(translation, rotation):
718 | """
719 | Makes a homogeneous pose matrix from a translation vector and a rotation matrix.
720 | Args:
721 | translation (np.array): (x,y,z) translation value
722 | rotation (np.array): a 3x3 matrix representing rotation
723 | Returns:
724 | pose (np.array): a 4x4 homogeneous matrix
725 | """
726 | pose = np.zeros((4, 4))
727 | pose[:3, :3] = rotation
728 | pose[:3, 3] = translation
729 | pose[3, 3] = 1.0
730 | return pose
731 |
732 |
733 | def unit_vector(data, axis=None, out=None):
734 | """
735 | Returns ndarray normalized by length, i.e. eucledian norm, along axis.
736 | E.g.:
737 | >>> v0 = numpy.random.random(3)
738 | >>> v1 = unit_vector(v0)
739 | >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
740 | True
741 | >>> v0 = numpy.random.rand(5, 4, 3)
742 | >>> v1 = unit_vector(v0, axis=-1)
743 | >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
744 | >>> numpy.allclose(v1, v2)
745 | True
746 | >>> v1 = unit_vector(v0, axis=1)
747 | >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
748 | >>> numpy.allclose(v1, v2)
749 | True
750 | >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float32)
751 | >>> unit_vector(v0, axis=1, out=v1)
752 | >>> numpy.allclose(v1, v2)
753 | True
754 | >>> list(unit_vector([]))
755 | []
756 | >>> list(unit_vector([1.0]))
757 | [1.0]
758 | Args:
759 | data (np.array): data to normalize
760 | axis (None or int): If specified, determines specific axis along data to normalize
761 | out (None or np.array): If specified, will store computation in this variable
762 | Returns:
763 | None or np.array: If @out is not specified, will return normalized vector. Otherwise, stores the output in @out
764 | """
765 | if out is None:
766 | data = np.array(data, dtype=np.float32, copy=True)
767 | if data.ndim == 1:
768 | data /= math.sqrt(np.dot(data, data))
769 | return data
770 | else:
771 | if out is not data:
772 | out[:] = np.array(data, copy=False)
773 | data = out
774 | length = np.atleast_1d(np.sum(data * data, axis))
775 | np.sqrt(length, length)
776 | if axis is not None:
777 | length = np.expand_dims(length, axis)
778 | data /= length
779 | if out is None:
780 | return data
781 |
782 |
783 | def get_orientation_error(target_orn, current_orn):
784 | """
785 | Returns the difference between two quaternion orientations as a 3 DOF numpy array.
786 | For use in an impedance controller / task-space PD controller.
787 | Args:
788 | target_orn (np.array): (x, y, z, w) desired quaternion orientation
789 | current_orn (np.array): (x, y, z, w) current quaternion orientation
790 | Returns:
791 | orn_error (np.array): (ax,ay,az) current orientation error, corresponds to
792 | (target_orn - current_orn)
793 | """
794 | current_orn = np.array(
795 | [current_orn[3], current_orn[0], current_orn[1], current_orn[2]]
796 | )
797 | target_orn = np.array([target_orn[3], target_orn[0], target_orn[1], target_orn[2]])
798 |
799 | pinv = np.zeros((3, 4))
800 | pinv[0, :] = [-current_orn[1], current_orn[0], -current_orn[3], current_orn[2]]
801 | pinv[1, :] = [-current_orn[2], current_orn[3], current_orn[0], -current_orn[1]]
802 | pinv[2, :] = [-current_orn[3], -current_orn[2], current_orn[1], current_orn[0]]
803 | orn_error = 2.0 * pinv.dot(np.array(target_orn))
804 | return orn_error
805 |
806 |
807 | def get_pose_error(target_pose, current_pose):
808 | """
809 | Computes the error corresponding to target pose - current pose as a 6-dim vector.
810 | The first 3 components correspond to translational error while the last 3 components
811 | correspond to the rotational error.
812 | Args:
813 | target_pose (np.array): a 4x4 homogenous matrix for the target pose
814 | current_pose (np.array): a 4x4 homogenous matrix for the current pose
815 | Returns:
816 | np.array: 6-dim pose error.
817 | """
818 | error = np.zeros(6)
819 |
820 | # compute translational error
821 | target_pos = target_pose[:3, 3]
822 | current_pos = current_pose[:3, 3]
823 | pos_err = target_pos - current_pos
824 |
825 | # compute rotational error
826 | r1 = current_pose[:3, 0]
827 | r2 = current_pose[:3, 1]
828 | r3 = current_pose[:3, 2]
829 | r1d = target_pose[:3, 0]
830 | r2d = target_pose[:3, 1]
831 | r3d = target_pose[:3, 2]
832 | rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d))
833 |
834 | error[:3] = pos_err
835 | error[3:] = rot_err
836 | return error
837 |
838 |
839 | # @jit_decorator
840 | def matrix_inverse(matrix):
841 | """
842 | Helper function to have an efficient matrix inversion function.
843 | Args:
844 | matrix (np.array): 2d-array representing a matrix
845 | Returns:
846 | np.array: 2d-array representing the matrix inverse
847 | """
848 | return np.linalg.inv(matrix)
849 |
850 |
851 | def spatial_transform(image, z_where, out_dims, inverse=False):
852 | """spatial transformer network used to scale and shift input according to z_where in:
853 | 1/ x -> x_att -- shapes (H, W) -> (attn_window, attn_window) -- thus inverse = False
854 | 2/ y_att -> y -- (attn_window, attn_window) -> (H, W) -- thus inverse = True
855 | inverting the affine transform as follows: A_inv ( A * image ) = image
856 | A = [R | T] where R is rotation component of angle alpha, T is [tx, ty] translation component
857 | A_inv rotates by -alpha and translates by [-tx, -ty]
858 | if x' = R * x + T --> x = R_inv * (x' - T) = R_inv * x - R_inv * T
859 | here, z_where is 3-dim [scale, tx, ty] so inverse transform is [1/scale, -tx/scale, -ty/scale]
860 | R = [[s, 0], -> R_inv = [[1/s, 0],
861 | [0, s]] [0, 1/s]]
862 | """
863 | # 1. construct 2x3 affine matrix for each datapoint in the minibatch
864 | theta = torch.zeros(2, 3).repeat(image.shape[0], 1, 1).to(image.device)
865 | # set scaling
866 | theta[:, 0, 0] = z_where[:, 0] if not inverse else 1 / (z_where[:, 0] + 1e-9)
867 | theta[:, 1, 1] = z_where[:, 1] if not inverse else 1 / (z_where[:, 1] + 1e-9)
868 |
869 | # set translation
870 | theta[:, 0, -1] = (
871 | z_where[:, 2] if not inverse else -z_where[:, 2] / (z_where[:, 0] + 1e-9)
872 | )
873 | theta[:, 1, -1] = (
874 | z_where[:, 3] if not inverse else -z_where[:, 3] / (z_where[:, 1] + 1e-9)
875 | )
876 | # 2. construct sampling grid
877 | grid = F.affine_grid(theta, torch.Size(out_dims))
878 | # 3. sample image from grid
879 | return F.grid_sample(image, grid)
880 |
--------------------------------------------------------------------------------
/launch/bringup_interface.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/launch/top_realsense.launch:
--------------------------------------------------------------------------------
1 |
13 |
14 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
199 |
200 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
214 |
215 |
217 |
218 |
219 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
--------------------------------------------------------------------------------
/launch/wrist_realsense.launch:
--------------------------------------------------------------------------------
1 |
13 |
14 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
196 |
197 |
198 |
199 |
200 |
205 |
206 |
208 |
209 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os, sys
2 | import numpy as np
3 |
4 | from core.agent import Agent
5 | import argparse
6 |
7 | if __name__ == '__main__':
8 | parser = argparse.ArgumentParser(
9 | description='Run the RAMP simulator with the planner for a given assembly.')
10 | parser.add_argument("-a", "--assembly", type=str,
11 | required=True, help='The assembly XML file to use.')
12 |
13 | if assembly == "assembly_easy_1.xml":
14 | from sparc_planning.src.example_domains.example_easy_1 import generate_domain_setup
15 | elif assembly == "assembly_easy_2.xml":
16 | from sparc_planning.src.example_domains.example_easy_2 import generate_domain_setup
17 | elif assembly == "assembly_easy_3.xml":
18 | from sparc_planning.src.example_domains.example_easy_3 import generate_domain_setup
19 |
20 | agent = Agent(beam_xml=os.path.join(os.environ['PLANNER_PATH'], 'example_beamset_latest.xml'),
21 | assembly_xml=os.path.join(os.environ['PLANNER_PATH'], assembly),
22 | domain_generator=generate_domain_setup)
23 | coarse_actions, fine_actions, fine_actions_per_coarse = agent.plan()
24 | print('Execute...')
25 | agent.execute(coarse_actions, fine_actions, fine_actions_per_coarse)
26 |
--------------------------------------------------------------------------------
/media/assembly_timelapse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/applied-ai-lab/ramp/55b2d49a1ef2e47fc92c1f1203f2212a702b9242/media/assembly_timelapse.png
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | ramp
4 | 0.0.1
5 | RAMP package
6 |
7 |
8 |
9 | Jun Yamada
10 |
11 |
12 |
13 |
14 |
15 | TODO
16 | catkin
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 |
3 | setup(name='core', version='0.0.1', packages=['core'])
4 |
--------------------------------------------------------------------------------