├── .gitignore ├── CMakeLists.txt ├── config ├── config_pose.yaml └── config_pose.yaml.backup ├── docker ├── Dockerfile.kinetic ├── init_workspace.sh ├── readme.md └── run_dope_docker.sh ├── dope_objects.png ├── example_tmux.png ├── license.md ├── package.xml ├── readme.md ├── requirements.txt ├── src ├── camera.py ├── dope.py └── inference │ ├── cuboid.py │ ├── cuboid_pnp_solver.py │ └── detector.py └── weights └── readme.md /.gitignore: -------------------------------------------------------------------------------- 1 | /weights 2 | *.pyc 3 | *._ 4 | 5 | __pycache__ 6 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(dope) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | rospy 12 | std_msgs 13 | tf2 14 | ) 15 | 16 | ## System dependencies are found with CMake's conventions 17 | # find_package(Boost REQUIRED COMPONENTS system) 18 | 19 | 20 | ## Uncomment this if the package has a setup.py. This macro ensures 21 | ## modules and global scripts declared therein get installed 22 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 23 | # catkin_python_setup() 24 | 25 | ################################################ 26 | ## Declare ROS messages, services and actions ## 27 | ################################################ 28 | 29 | ## To declare and build messages, services or actions from within this 30 | ## package, follow these steps: 31 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 32 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 33 | ## * In the file package.xml: 34 | ## * add a build_depend tag for "message_generation" 35 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 36 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 37 | ## but can be declared for certainty nonetheless: 38 | ## * add a exec_depend tag for "message_runtime" 39 | ## * In this file (CMakeLists.txt): 40 | ## * add "message_generation" and every package in MSG_DEP_SET to 41 | ## find_package(catkin REQUIRED COMPONENTS ...) 42 | ## * add "message_runtime" and every package in MSG_DEP_SET to 43 | ## catkin_package(CATKIN_DEPENDS ...) 44 | ## * uncomment the add_*_files sections below as needed 45 | ## and list every .msg/.srv/.action file to be processed 46 | ## * uncomment the generate_messages entry below 47 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 48 | 49 | ## Generate messages in the 'msg' folder 50 | # add_message_files( 51 | # FILES 52 | # Message1.msg 53 | # Message2.msg 54 | # ) 55 | 56 | ## Generate services in the 'srv' folder 57 | # add_service_files( 58 | # FILES 59 | # Service1.srv 60 | # Service2.srv 61 | # ) 62 | 63 | ## Generate actions in the 'action' folder 64 | # add_action_files( 65 | # FILES 66 | # Action1.action 67 | # Action2.action 68 | # ) 69 | 70 | ## Generate added messages and services with any dependencies listed here 71 | # generate_messages( 72 | # DEPENDENCIES 73 | # std_msgs 74 | # ) 75 | 76 | ################################################ 77 | ## Declare ROS dynamic reconfigure parameters ## 78 | ################################################ 79 | 80 | ## To declare and build dynamic reconfigure parameters within this 81 | ## package, follow these steps: 82 | ## * In the file package.xml: 83 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 84 | ## * In this file (CMakeLists.txt): 85 | ## * add "dynamic_reconfigure" to 86 | ## find_package(catkin REQUIRED COMPONENTS ...) 87 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 88 | ## and list every .cfg file to be processed 89 | 90 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 91 | # generate_dynamic_reconfigure_options( 92 | # cfg/DynReconf1.cfg 93 | # cfg/DynReconf2.cfg 94 | # ) 95 | 96 | ################################### 97 | ## catkin specific configuration ## 98 | ################################### 99 | ## The catkin_package macro generates cmake config files for your package 100 | ## Declare things to be passed to dependent projects 101 | ## INCLUDE_DIRS: uncomment this if your package contains header files 102 | ## LIBRARIES: libraries you create in this project that dependent projects also need 103 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 104 | ## DEPENDS: system dependencies of this project that dependent projects also need 105 | catkin_package( 106 | # INCLUDE_DIRS include 107 | # LIBRARIES dope 108 | # CATKIN_DEPENDS rospy std_msgs tf2 109 | # DEPENDS system_lib 110 | ) 111 | 112 | ########### 113 | ## Build ## 114 | ########### 115 | 116 | ## Specify additional locations of header files 117 | ## Your package locations should be listed before other locations 118 | include_directories( 119 | # include 120 | ${catkin_INCLUDE_DIRS} 121 | ) 122 | 123 | ## Declare a C++ library 124 | # add_library(${PROJECT_NAME} 125 | # src/${PROJECT_NAME}/dope/dope.cpp 126 | # ) 127 | 128 | ## Add cmake target dependencies of the library 129 | ## as an example, code may need to be generated before libraries 130 | ## either from message generation or dynamic reconfigure 131 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 132 | 133 | ## Declare a C++ executable 134 | ## With catkin_make all packages are built within a single CMake context 135 | ## The recommended prefix ensures that target names across packages don't collide 136 | # add_executable(${PROJECT_NAME}_node src/dope_vis_node.cpp) 137 | 138 | ## Rename C++ executable without prefix 139 | ## The above recommended prefix causes long target names, the following renames the 140 | ## target back to the shorter version for ease of user use 141 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 142 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 143 | 144 | ## Add cmake target dependencies of the executable 145 | ## same as for the library above 146 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 147 | 148 | ## Specify libraries to link a library or executable target against 149 | # target_link_libraries(${PROJECT_NAME}_node 150 | # ${catkin_LIBRARIES} 151 | # ) 152 | 153 | ############# 154 | ## Install ## 155 | ############# 156 | 157 | # all install targets should use catkin DESTINATION variables 158 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 159 | 160 | ## Mark executable scripts (Python etc.) for installation 161 | ## in contrast to setup.py, you can choose the destination 162 | # install(PROGRAMS 163 | # scripts/my_python_script 164 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 165 | # ) 166 | 167 | ## Mark executables and/or libraries for installation 168 | # install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node 169 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 170 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 171 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 172 | # ) 173 | 174 | ## Mark cpp header files for installation 175 | # install(DIRECTORY include/${PROJECT_NAME}/ 176 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 177 | # FILES_MATCHING PATTERN "*.h" 178 | # PATTERN ".svn" EXCLUDE 179 | # ) 180 | 181 | ## Mark other files for installation (e.g. launch and bag files, etc.) 182 | # install(FILES 183 | # # myfile1 184 | # # myfile2 185 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 186 | # ) 187 | 188 | ############# 189 | ## Testing ## 190 | ############# 191 | 192 | ## Add gtest based cpp test target and link libraries 193 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_dope_vis.cpp) 194 | # if(TARGET ${PROJECT_NAME}-test) 195 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 196 | # endif() 197 | 198 | ## Add folders to be run by python nosetests 199 | # catkin_add_nosetests(test) 200 | -------------------------------------------------------------------------------- /config/config_pose.yaml: -------------------------------------------------------------------------------- 1 | topic_camera: "/camera/color/image_raw" 2 | topic_publishing: "dope" 3 | frame_id: "/dope" 4 | 5 | # Comment any of these lines to prevent detection / pose estimation of that object 6 | weights: { 7 | "cracker":"cracker_60.pth", 8 | "gelatin":"gelatin_60.pth", 9 | "meat":"meat_20.pth", 10 | "mustard":"mustard_60.pth", 11 | "soup":"soup_60.pth", 12 | "sugar":"sugar_60.pth" 13 | } 14 | 15 | # Cuboid dimension in cm x,y,z 16 | dimensions: { 17 | "cracker": [16.403600692749023,21.343700408935547,7.179999828338623], 18 | "gelatin": [8.918299674987793, 7.311500072479248, 2.9983000755310059], 19 | "meat": [10.164673805236816,8.3542995452880859,5.7600898742675781], 20 | "mustard": [9.6024150848388672,19.130100250244141,5.824894905090332], 21 | "soup": [6.7659378051757813,10.185500144958496,6.771425724029541], 22 | "sugar": [9.267730712890625,17.625339508056641,4.5134143829345703], 23 | } 24 | 25 | draw_colors: { 26 | "cracker": [13, 255, 128], # green 27 | "gelatin": [255, 255, 255], # while 28 | "meat": [0, 104, 255], # blue 29 | "mustard": [217,12, 232], # magenta 30 | "soup": [255, 101, 0], # orange 31 | "sugar": [232, 222, 12], # yellow 32 | } 33 | 34 | # Camera intrinsics (Logitech C920) 35 | camera_settings: { 36 | "name": "logitech_c920", 37 | "width": 640, 38 | "height": 480, 39 | "fx": 641.5, 40 | "fy": 641.5, 41 | "cx": 320.0, 42 | "cy": 240.0 43 | } 44 | 45 | # Config params for DOPE 46 | thresh_angle: 0.5 47 | thresh_map: 0.01 48 | sigma: 3 49 | thresh_points: 0.1 50 | -------------------------------------------------------------------------------- /config/config_pose.yaml.backup: -------------------------------------------------------------------------------- 1 | topic_camera: "/dope/webcam_rgb_raw" 2 | topic_publishing: "dope" 3 | frame_id: "/dope" 4 | 5 | # Comment any of these lines to prevent detection / pose estimation of that object 6 | weights: { 7 | # "cracker":"cracker_60.pth", 8 | # "gelatin":"gelatin_60.pth", 9 | # "meat":"meat_20.pth", 10 | # "mustard":"mustard_60.pth", 11 | "soup":"soup_60.pth", 12 | #"sugar":"sugar_60.pth" 13 | } 14 | 15 | # Cuboid dimension in cm x,y,z 16 | dimensions: { 17 | "cracker": [16.403600692749023,21.343700408935547,7.179999828338623], 18 | "gelatin": [8.918299674987793, 7.311500072479248, 2.9983000755310059], 19 | "meat": [10.164673805236816,8.3542995452880859,5.7600898742675781], 20 | "mustard": [9.6024150848388672,19.130100250244141,5.824894905090332], 21 | "soup": [6.7659378051757813,10.185500144958496,6.771425724029541], 22 | "sugar": [9.267730712890625,17.625339508056641,4.5134143829345703], 23 | } 24 | 25 | draw_colors: { 26 | "cracker": [13, 255, 128], # green 27 | "gelatin": [255, 255, 255], # while 28 | "meat": [0, 104, 255], # blue 29 | "mustard": [217,12, 232], # magenta 30 | "soup": [255, 101, 0], # orange 31 | "sugar": [232, 222, 12], # yellow 32 | } 33 | 34 | # Camera intrinsics (Logitech C920) 35 | camera_settings: { 36 | "name": "logitech_c920", 37 | "width": 640, 38 | "height": 480, 39 | "fx": 641.5, 40 | "fy": 641.5, 41 | "cx": 320.0, 42 | "cy": 240.0 43 | } 44 | 45 | # Config params for DOPE 46 | thresh_angle: 0.5 47 | thresh_map: 0.01 48 | sigma: 3 49 | thresh_points: 0.1 50 | -------------------------------------------------------------------------------- /docker/Dockerfile.kinetic: -------------------------------------------------------------------------------- 1 | FROM nvidia/cudagl:9.0-devel-ubuntu16.04 2 | 3 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 4 | # Full license terms provided in LICENSE.md file. 5 | 6 | # Build with: 7 | # nvidia-docker build -t nvidia-dope:kinetic-v1 -f Dockerfile.kinetic .. 8 | 9 | ENV HOME /root 10 | 11 | WORKDIR ${HOME} 12 | 13 | RUN apt-get update && apt-get -y --no-install-recommends install software-properties-common 14 | 15 | # cuDNN version must match the one used by TensorRT. 16 | # TRT 4.0 is compiled with cuDNN 7.1. 17 | 18 | RUN apt-get update && apt-get -y --no-install-recommends install \ 19 | ant \ 20 | bzip2 \ 21 | ca-certificates \ 22 | ccache \ 23 | cmake \ 24 | curl \ 25 | genromfs \ 26 | git \ 27 | gosu \ 28 | iproute \ 29 | iputils-ping \ 30 | less \ 31 | lcov \ 32 | libcudnn7=7.1.4.18-1+cuda9.0 \ 33 | libcudnn7-dev=7.1.4.18-1+cuda9.0 \ 34 | libeigen3-dev \ 35 | libopencv-dev \ 36 | make \ 37 | nano \ 38 | net-tools \ 39 | ninja-build \ 40 | openjdk-8-jdk \ 41 | patch \ 42 | pkg-config \ 43 | protobuf-compiler \ 44 | python-argparse \ 45 | python-dev \ 46 | python-empy \ 47 | python-numpy \ 48 | python-pip \ 49 | python-serial \ 50 | python-software-properties \ 51 | rsync \ 52 | s3cmd \ 53 | software-properties-common \ 54 | sudo \ 55 | unzip \ 56 | xsltproc \ 57 | wget \ 58 | zip \ 59 | && apt-get -y autoremove \ 60 | && apt-get clean autoclean \ 61 | # pip 62 | && pip install setuptools wheel \ 63 | && pip install 'matplotlib==2.2.2' --force-reinstall \ 64 | # coveralls code coverage reporting 65 | && pip install cpp-coveralls \ 66 | # jinja template generation 67 | && pip install jinja2 \ 68 | # cleanup 69 | && rm -rf /var/lib/apt/lists/{apt,dpkg,cache,log} /tmp/* /var/tmp/* 70 | 71 | # ROS Kinetic 72 | WORKDIR ${HOME} 73 | RUN apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116 \ 74 | && sh -c 'echo "deb http://packages.ros.org/ros/ubuntu xenial main" > /etc/apt/sources.list.d/ros-latest.list' \ 75 | && sh -c 'echo "deb http://packages.ros.org/ros-shadow-fixed/ubuntu/ xenial main" > /etc/apt/sources.list.d/ros-shadow.list' \ 76 | && apt-get update && apt-get -y --no-install-recommends install \ 77 | ros-kinetic-gazebo-ros-pkgs \ 78 | ros-kinetic-mavros \ 79 | ros-kinetic-mavros-extras \ 80 | ros-kinetic-ros-base \ 81 | ros-kinetic-rviz \ 82 | ros-kinetic-tf2 \ 83 | ros-kinetic-cv-bridge \ 84 | && apt-get -y autoremove \ 85 | && apt-get clean autoclean \ 86 | && rm -rf /var/lib/apt/lists/{apt,dpkg,cache,log} /tmp/* /var/tmp/* 87 | 88 | # Initialize ROS 89 | RUN geographiclib-get-geoids egm96-5 \ 90 | && rosdep init \ 91 | && rosdep update 92 | 93 | RUN echo 'source /opt/ros/kinetic/setup.bash' >> ${HOME}/.bashrc 94 | 95 | # Install OpenCV with CUDA support. 96 | # REVIEW alexeyk: JetPack 3.2 comes with OpenCV 3.3.1 _without_ CUDA support. 97 | WORKDIR ${HOME} 98 | RUN git clone http://github.com/opencv/opencv.git && cd opencv \ 99 | && git checkout 3.3.1 \ 100 | && mkdir build && cd build \ 101 | && cmake -D CMAKE_BUILD_TYPE=RELEASE \ 102 | -D CMAKE_INSTALL_PREFIX=/usr/local \ 103 | -D WITH_CUDA=OFF \ 104 | -D WITH_OPENCL=OFF \ 105 | -D ENABLE_FAST_MATH=1 \ 106 | -D CUDA_FAST_MATH=1 \ 107 | -D WITH_CUBLAS=1 \ 108 | -D BUILD_DOCS=OFF \ 109 | -D BUILD_PERF_TESTS=OFF \ 110 | -D BUILD_TESTS=OFF \ 111 | .. \ 112 | && make -j `nproc` \ 113 | && make install \ 114 | && cd ${HOME} && rm -rf ./opencv/ 115 | 116 | # Setup catkin workspace 117 | ENV CATKIN_WS ${HOME}/catkin_ws 118 | COPY docker/init_workspace.sh ${HOME} 119 | RUN ${HOME}/init_workspace.sh 120 | 121 | ENV CCACHE_CPP2=1 122 | ENV CCACHE_MAXSIZE=1G 123 | ENV DISPLAY :0 124 | ENV TERM=xterm 125 | # Some QT-Apps don't not show controls without this 126 | ENV QT_X11_NO_MITSHM 1 127 | 128 | COPY requirements.txt ${HOME} 129 | RUN pip install --no-cache-dir -r requirements.txt -------------------------------------------------------------------------------- /docker/init_workspace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | 4 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 5 | # Full license terms provided in LICENSE.md file. 6 | 7 | # Stop in case of any error. 8 | set -e 9 | 10 | source /opt/ros/kinetic/setup.bash 11 | 12 | # Create catkin workspace. 13 | mkdir -p ${CATKIN_WS}/src 14 | cd ${CATKIN_WS}/src 15 | catkin_init_workspace 16 | cd .. 17 | catkin_make 18 | source devel/setup.bash 19 | -------------------------------------------------------------------------------- /docker/readme.md: -------------------------------------------------------------------------------- 1 | ## DOPE in a Docker Container 2 | 3 | Running ROS inside of [Docker](https://www.docker.com/) is an excellent way to 4 | experiment with DOPE, as it allows the user to completely isolate all software and configuration 5 | changes from the host system. This document describes how to create and run a 6 | Docker image that contains a complete ROS environment that supports DOPE, 7 | including all required components, such as ROS Kinetic, rviz, CUDA with cuDNN, 8 | and other packages. 9 | 10 | The current configuration assumes all components are installed on an x86 host 11 | platform running Ubuntu 16.04. Further, use of the DOPE Docker container requires 12 | an NVIDIA GPU to be present, and it uses 13 | [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker/) for seamless CUDA usage. 14 | 15 | (This setup was tested with NVIDIA Docker v2. Although these steps should work 16 | with NVIDIA Docker v1, that version is no longer supported by NVIDIA; rather, users are encouraged to 17 | [upgrade](https://github.com/nvidia/nvidia-docker/wiki/Installation-(version-2.0)).) 18 | 19 | 20 | ### Steps 21 | 22 | 1. **Download the DOPE code** 23 | ``` 24 | $ git clone https://github.com/NVlabs/Deep_Object_Pose.git dope 25 | ``` 26 | 27 | 2. **Create the container** 28 | ``` 29 | $ cd dope/docker 30 | $ nvidia-docker build -t nvidia-dope:realsensed435 -f Dockerfile.kinetic .. 31 | ``` 32 | This will take several minutes and requires an internet connection. 33 | 34 | 3. **Plug in your camera** 35 | Docker will not recognize a USB device that is plugged in after the container is started. 36 | 37 | 4. **Run the container** 38 | ``` 39 | $ ./run_dope_docker.sh [name] [host dir] [container dir] 40 | ``` 41 | Parameters: 42 | - `name` is an optional field that specifies the name of this image. By default, it is `nvidia-dope-v1`. By using different names, you can create multiple containers from the same image. 43 | - `host dir` and `container dir` are a pair of optional fields that allow you to specify a mapping between a directory on your host machine and a location inside the container. This is useful for sharing code and data between the two systems. By default, it maps the directory containing dope to `/root/catkin_ws/src/dope` in the container. 44 | 45 | Only the first invocation of this script with a given name will create a container. Subsequent executions will attach to the running container allowing you -- in effect -- to have multiple terminal sessions into a single container. 46 | 47 | 5. **Build DOPE** 48 | Return to step 5 of the [installation instructions](../readme.md). 49 | 50 | *Note:* Since the Docker container binds directly to the host's network, it will see `roscore` even if running outside the docker container. 51 | 52 | 53 | ### Acknowledgment 54 | 55 | The DOPE Docker image is based on NVIDIA's [Redtail Docker image](https://github.com/NVIDIA-Jetson/redtail/wiki/testing-in-simulator#redtail-docker). 56 | -------------------------------------------------------------------------------- /docker/run_dope_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 4 | # Full license terms provided in LICENSE.md file. 5 | 6 | IMAGE_NAME="pauloabelha/nvidia-dope-realsensed435" 7 | TAG_NAME="latest" 8 | 9 | CONTAINER_NAME=$1 10 | if [[ -z "${CONTAINER_NAME}" ]]; then 11 | CONTAINER_NAME=nvidia-dope-realsensed435 12 | fi 13 | 14 | # This specifies a mapping between a host directory and a directory in the 15 | # docker container. This mapping should be changed if you wish to have access to 16 | # a different directory 17 | HOST_DIR=$2 18 | if [[ -z "${HOST_DIR}" ]]; then 19 | HOST_DIR=`realpath ${PWD}/..` 20 | fi 21 | 22 | CONTAINER_DIR=$3 23 | if [[ -z "${CONTAINER_DIR}" ]]; then 24 | CONTAINER_DIR=/root/catkin_ws/src/dope 25 | fi 26 | 27 | echo "Container name : ${CONTAINER_NAME}" 28 | echo "Host directory : ${HOST_DIR}" 29 | echo "Container directory: ${CONTAINER_DIR}" 30 | DOPE_ID=`docker ps -aqf "name=^/${CONTAINER_NAME}$"` 31 | if [ -z "${DOPE_ID}" ]; then 32 | echo "Creating new DOPE docker container." 33 | xhost + 34 | nvidia-docker run -it --privileged --network=host -v ${HOST_DIR}:${CONTAINER_DIR}:rw -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix${DISPLAY} --name=${CONTAINER_NAME} $IMAGE_NAME:$TAG_NAME bash 35 | else 36 | echo "Found DOPE docker container: ${DOPE_ID}." 37 | # Check if the container is already running and start if necessary. 38 | if [ -z `docker ps -qf "name=^/${CONTAINER_NAME}$"` ]; then 39 | xhost +local:${DOPE_ID} 40 | echo "Starting and attaching to ${CONTAINER_NAME} container..." 41 | docker start ${DOPE_ID} 42 | docker attach ${DOPE_ID} 43 | else 44 | echo "Found running ${CONTAINER_NAME} container, attaching bash..." 45 | docker exec -it ${DOPE_ID} bash 46 | fi 47 | fi 48 | -------------------------------------------------------------------------------- /dope_objects.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pauloabelha/Deep_Object_Pose/02714c8eb89ab7273bbe2f59449816485d2eb2d6/dope_objects.png -------------------------------------------------------------------------------- /example_tmux.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pauloabelha/Deep_Object_Pose/02714c8eb89ab7273bbe2f59449816485d2eb2d6/example_tmux.png -------------------------------------------------------------------------------- /license.md: -------------------------------------------------------------------------------- 1 | # Creative Commons 2 | 3 | ## [Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode) 4 | 5 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 6 | 7 | ### Using Creative Commons Public Licenses 8 | 9 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 10 | 11 | * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). 12 | 13 | * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). 14 | 15 | ## Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License 16 | 17 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 18 | 19 | ### Section 1 – Definitions. 20 | 21 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 22 | 23 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 24 | 25 | c. __BY-NC-SA Compatible License__ means a license listed at [creativecommons.org/compatiblelicenses](http://creativecommons.org/compatiblelicenses), approved by Creative Commons as essentially the equivalent of this Public License. 26 | 27 | d. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 28 | 29 | e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 30 | 31 | f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 32 | 33 | g. __License Elements__ means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike. 34 | 35 | h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 36 | 37 | i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 38 | 39 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. 40 | 41 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. 42 | 43 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 44 | 45 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 46 | 47 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 48 | 49 | ### Section 2 – Scope. 50 | 51 | a. ___License grant.___ 52 | 53 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 54 | 55 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and 56 | 57 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 58 | 59 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 60 | 61 | 3. __Term.__ The term of this Public License is specified in Section 6(a). 62 | 63 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 64 | 65 | 5. __Downstream recipients.__ 66 | 67 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 68 | 69 | B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. 70 | 71 | C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 72 | 73 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 74 | 75 | b. ___Other rights.___ 76 | 77 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 78 | 79 | 2. Patent and trademark rights are not licensed under this Public License. 80 | 81 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. 82 | 83 | ### Section 3 – License Conditions. 84 | 85 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 86 | 87 | a. ___Attribution.___ 88 | 89 | 1. If You Share the Licensed Material (including in modified form), You must: 90 | 91 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 92 | 93 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 94 | 95 | ii. a copyright notice; 96 | 97 | iii. a notice that refers to this Public License; 98 | 99 | iv. a notice that refers to the disclaimer of warranties; 100 | 101 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 102 | 103 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 104 | 105 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 106 | 107 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 108 | 109 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 110 | 111 | b. ___ShareAlike.___ 112 | 113 | In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 114 | 115 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License. 116 | 117 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 118 | 119 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. 120 | 121 | ### Section 4 – Sui Generis Database Rights. 122 | 123 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 124 | 125 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; 126 | 127 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and 128 | 129 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 130 | 131 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 132 | 133 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability. 134 | 135 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ 136 | 137 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ 138 | 139 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 140 | 141 | ### Section 6 – Term and Termination. 142 | 143 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 144 | 145 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 146 | 147 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 148 | 149 | 2. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 150 | 151 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 152 | 153 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 154 | 155 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 156 | 157 | ### Section 7 – Other Terms and Conditions. 158 | 159 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 160 | 161 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 162 | 163 | ### Section 8 – Interpretation. 164 | 165 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 166 | 167 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 168 | 169 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 170 | 171 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 172 | 173 | ``` 174 | Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 175 | 176 | Creative Commons may be contacted at [creativecommons.org](http://creativecommons.org/). 177 | ``` 178 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dope 4 | 0.0.0 5 | The DOPE package for deep object pose estimation 6 | 7 | 8 | 9 | 10 | jtremblay 11 | 12 | 13 | 14 | 15 | 16 | CC BY-NC-SA 4.0 17 | 18 | 19 | 20 | 21 | 22 | https://research.nvidia.com/publication/2018-09_Deep-Object-Pose 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | rospy 53 | std_msgs 54 | tf2 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | [![License CC BY-NC-SA 4.0](https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-blue.svg)](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode) 2 | ![Python 2.7](https://img.shields.io/badge/python-2.7-green.svg) 3 | # Deep Object Pose Estimation - ROS Inference (RealSenseD435) 4 | 5 | # Disclaimer 6 | This README has been modified from the original repo to contain instructions only for setting up the Docker image I've built to work with the Intel RealSenseD435 camera. I've kept images, paragraphs and sentences untouched and added some of my own to the original README file. 7 | The Docker image should have you up and running with an Intel RealSense D435 out of the box. In case you want to do it form scratch, please have a [look here](https://github.com/intel-ros/realsense/#installation-instructions). 8 | 9 | # Instructions 10 | This is the official DOPE ROS package for detection and 6-DoF pose estimation of **known objects** from an RGB camera. The network has been trained on the following YCB objects: cracker box, sugar box, tomato soup can, mustard bottle, potted meat can, and gelatin box. For more details, see our [CoRL 2018 paper](https://arxiv.org/abs/1809.10790) and [video](https://youtu.be/yVGViBqWtBI). 11 | 12 | *Note:* Currently this package contains inference only. 13 | 14 | ![DOPE Objects](dope_objects.png) 15 | 16 | ## Installing 17 | 18 | 1. **Install Nvidia-Docker** 19 | Please follow the [Nvidia-Docker quickstart installation](https://github.com/NVIDIA/nvidia-docker) 20 | 21 | 2. **Pull the Docker image** 22 | 23 | ``` 24 | nvidia-docker pull pauloabelha/nvidia-dope-realsensed435 25 | ``` 26 | Pulling the image might take several minuts. Once the pull finishes, please check if the image is available for nvidia-docker: 27 | ``` 28 | nvidia-docker images 29 | ``` 30 | 31 | You should see something similar to: 32 | ``` 33 | cvlab@unibham:~$ nvidia-docker images 34 | REPOSITORY | TAG | IMAGE ID | CREATED | SIZE 35 | pauloabelha/nvidia-dope-realsensed435 | latest | c002a2f82c69 | 14 hours ago | 7.45GB 36 | nvidia-dope | kinetic-v1 | efca8e87496b | 19 hours ago | 7.16GB 37 | nvidia/cuda | 9.0-base | 74f5aea45cf6 | 4 weeks ago | 134MB 38 | nvidia/cudagl | 9.0-devel-ubuntu16.04 | b953c82b3472 | 6 weeks ago | 2GB 39 | ``` 40 | Please make sure you can see the realsensed435 image: pauloabelha/nvidia-dope-realsensed435 41 | 42 | 3. **Clone the DOPE code** 43 | ``` 44 | $ cd ~ 45 | $ git clone https://github.com/pauloabelha/Deep_Object_Pose.git dope 46 | ``` 47 | 48 | 4. **Plug in your Intel RealSense D435 camera** 49 | Docker will not recognize a USB device that is plugged in after the container is started. 50 | 51 | 5. **Run the container** 52 | ``` 53 | $ ./run_dope_docker.sh [name] [host dir] [container dir] 54 | ``` 55 | 56 | Parameters: 57 | - `name` is an optional field that specifies the name of this image. By default, it is `nvidia-dope-realsensed435`. By using different names, you can create multiple containers from the same image. 58 | - `host dir` and `container dir` are a pair of optional fields that allow you to specify a mapping between a directory on your host machine and a location inside the container. This is useful for sharing code and data between the two systems. By default, it maps the directory containing dope to `/root/catkin_ws/src/dope` in the container. 59 | 60 | Only the first invocation of this script with a given name will create a container. Subsequent executions will attach to the running container allowing you -- in effect -- to have multiple terminal sessions into a single container. 61 | After running the container you should find yourself inside of it at: 62 | ``` 63 | $ root@unibham:~#` 64 | 65 | 7. **Build** 66 | ``` 67 | $ cd ~/catkin_ws 68 | $ catkin_make 69 | ``` 70 | 71 | 8. **Download [the weights](https://drive.google.com/open?id=1DfoA3m_Bm0fW8tOWXGVxi4ETlLEAgmcg)** and save them to the `weights` folder, *i.e.*, `~/catkin_ws/src/dope/weights/`. PLease remember that the docker image path `~/catkin_ws/src/dope/` is linked by default with your dope src path `~/dope/`; this means you can put the downloaded wieght files at your host machine at `~/dope/weights/`. 72 | 73 | 74 | ## Running 75 | 76 | 1. **(Optional) Use Tmux ** 77 | 78 | I have installed [Tmux](https://tmuxcheatsheet.com/) at the container to make things easier for runnign the ROS core, starting the camera and visualizing in RViz. For this, you need to firstly run Tmux 79 | ``` 80 | $ tmux 81 | ``` 82 | 83 | Then press Ctrl+b, release, and press ". This should have split your screen horizontally into two. You can then press Ctrl+b, release, and press %. Now, you should have three screens available in the same terminal. You can swap between them by pressing Ctrl+b and using the arrow keys. FInally, go up to the top screen and split it into two to get 4 screens on your terminal. Below, I'll be referring to the screens by numbers from top left (1), top right (2), bottom left (3), bottom right (4). 84 | 85 | ![Tmux Example With all commands ready to run](example_tmux.png) 86 | 87 | 88 | 89 | 1. **Start ROS master (optional at screen 1 in Tmux)** 90 | ``` 91 | $ cd ~/catkin_ws 92 | $ source devel/setup.bash 93 | $ roscore 94 | ``` 95 | 96 | 2. **Start Intel RealSense D435 node (optional at screen 2 in Tmux)** 97 | ``` 98 | $ roslaunch realsense2_camera rs_camera.launch` 99 | ``` 100 | 101 | 3. **(Optional) Edit config info** (if desired - I have already configured it for listeing to the Intel RealSense D435 topic) in `~/catkin_ws/src/dope/config/config_pose.yaml` 102 | * `topic_camera`: RGB topic to listen to 103 | * `topic_publishing`: topic name for publishing 104 | * `weights`: dictionary of object names and there weights path name, **comment out any line to disable detection/estimation of that object** 105 | * `dimension`: dictionary of dimensions for the objects (key values must match the `weights` names) 106 | * `draw_colors`: dictionary of object colors (key values must match the `weights` names) 107 | * `camera_settings`: dictionary for the camera intrinsics; edit these values to match your camera 108 | * `thresh_points`: Thresholding the confidence for object detection; increase this value if you see too many false positives, reduce it if objects are not detected. 109 | 110 | 4. **Start DOPE node (optional at screen 3 in Tmux)** 111 | ``` 112 | $ rosrun dope dope.py [my_config.yaml] # Config file is optional; default is `config_pose.yaml` 113 | ``` 114 | 115 | *Note:* Config files must be located in the `~/catkin_ws/src/dope/config/` folder. 116 | 117 | 5. **Run Rviz for visualization (optional at screen 4 in Tmux)** 118 | ```$ rviz` 119 | 120 | ## Debugging 121 | 122 | * The following ROS topics are published: 123 | ``` 124 | /dope/webcam_rgb_raw # RGB images from camera 125 | /dope/dimension_[obj_name] # dimensions of object 126 | /dope/pose_[obj_name] # timestamped pose of object 127 | /dope/rgb_points # RGB images with detected cuboids overlaid 128 | ``` 129 | *Note:* `[obj_name]` is in {cracker, gelatin, meat, mustard, soup, sugar} 130 | 131 | * To debug in RViz, `rosrun rviz rviz`, then either 132 | * `Add > Image` to view the raw RGB image or the image with cuboids overlaid 133 | * `Add > Pose` to view the object coordinate frame in 3D. If you do not have a coordinate frame set up, you can run this static transformation: `rosrun tf static_transform_publisher 0 0 0 0.7071 0 0 -0.7071 world dope 10`. Make sure that in RViz's `Global Options`, the `Fixed Frame` is set to `world`. 134 | 135 | * If `rosrun` does not find the package (`[rospack] Error: package 'dope' not found`), be sure that you called `source devel/setup.bash` as mentioned above. To find the package, run `rospack find dope`. 136 | 137 | 138 | ## YCB 3D Models 139 | 140 | DOPE returns the poses of the objects in the camera coordinate frame. DOPE uses the aligned YCB models, which can be obtained using [NVDU](https://github.com/NVIDIA/Dataset_Utilities) (see the `nvdu_ycb` command). 141 | 142 | 143 | ## Citation 144 | 145 | If you use this tool in a research project, please cite as follows: 146 | ``` 147 | @inproceedings{tremblay2018corl:dope, 148 | author = {Jonathan Tremblay and Thang To and Balakumar Sundaralingam and Yu Xiang and Dieter Fox and Stan Birchfield}, 149 | title = {Deep Object Pose Estimation for Semantic Robotic Grasping of Household Objects}, 150 | booktitle = {Conference on Robot Learning (CoRL)}, 151 | url = "https://arxiv.org/abs/1809.10790", 152 | year = 2018 153 | } 154 | ``` 155 | 156 | ## License 157 | 158 | Copyright (C) 2018 NVIDIA Corporation. All rights reserved. Licensed under the [CC BY-NC-SA 4.0 license](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 159 | 160 | 161 | ## Acknowledgment 162 | 163 | Thanks to Jeffrey Smith (jeffreys@nvidia.com) for creating the Docker image. 164 | 165 | 166 | ## Contact 167 | 168 | Jonathan Tremblay (jtremblay@nvidia.com), Stan Birchfield (sbirchfield@nvidia.com) 169 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pyrr==0.9.2 2 | torch==0.4.0 3 | rospkg==1.1.4 4 | numpy==1.14.2 5 | scipy==1.1.0 6 | opencv_python==3.4.1.15 7 | Pillow==5.3.0 8 | torchvision==0.2.1 9 | PyYAML==3.13 10 | -------------------------------------------------------------------------------- /src/camera.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 3 | # This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. 4 | # https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode 5 | 6 | """ 7 | This file opens an RGB camera and publishes images via ROS. 8 | It uses OpenCV to capture from camera 0. 9 | """ 10 | 11 | from __future__ import print_function 12 | import rospy 13 | from std_msgs.msg import String 14 | from cv_bridge import CvBridge, CvBridgeError 15 | from sensor_msgs.msg import Image as ImageSensor 16 | 17 | from sensor_msgs.msg import Image as Image_msg 18 | import numpy as np 19 | import cv2 20 | 21 | # Global variables 22 | cam_index = 0 # index of camera to capture 23 | topic = '/dope/webcam_rgb_raw' # topic for publishing 24 | cap = cv2.VideoCapture(cam_index) 25 | if not cap.isOpened(): 26 | print("ERROR: Unable to open camera for capture. Is camera plugged in?") 27 | exit(1) 28 | 29 | def publish_images(freq=5): 30 | rospy.init_node('dope_webcam_rgb_raw', anonymous=True) 31 | images_out = rospy.Publisher(topic, Image_msg, queue_size=10) 32 | rate = rospy.Rate(freq) 33 | 34 | print ("Publishing images from camera {} to topic '{}'...".format( 35 | cam_index, 36 | topic 37 | ) 38 | ) 39 | print ("Ctrl-C to stop") 40 | while not rospy.is_shutdown(): 41 | ret, frame = cap.read() 42 | 43 | if ret: 44 | msg_frame_edges = CvBridge().cv2_to_imgmsg(frame, "bgr8") 45 | images_out.publish(msg_frame_edges) 46 | 47 | rate.sleep() 48 | 49 | if __name__ == "__main__": 50 | 51 | try : 52 | publish_images() 53 | except rospy.ROSInterruptException: 54 | pass 55 | 56 | -------------------------------------------------------------------------------- /src/dope.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 4 | # This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. 5 | # https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode 6 | 7 | """ 8 | This file starts a ROS node to run DOPE, 9 | listening to an image topic and publishing poses. 10 | """ 11 | 12 | from __future__ import print_function 13 | import yaml 14 | import sys 15 | 16 | import numpy as np 17 | import cv2 18 | 19 | import rospy 20 | import rospkg 21 | from std_msgs.msg import String, Empty 22 | from cv_bridge import CvBridge, CvBridgeError 23 | from sensor_msgs.msg import Image as ImageSensor_msg 24 | from geometry_msgs.msg import PoseStamped 25 | 26 | from PIL import Image 27 | from PIL import ImageDraw 28 | 29 | # Import DOPE code 30 | rospack = rospkg.RosPack() 31 | g_path2package = rospack.get_path('dope') 32 | sys.path.append("{}/src/inference".format(g_path2package)) 33 | from cuboid import * 34 | from detector import * 35 | 36 | ### Global Variables 37 | g_bridge = CvBridge() 38 | g_img = None 39 | g_draw = None 40 | 41 | 42 | ### Basic functions 43 | def __image_callback(msg): 44 | '''Image callback''' 45 | global g_img 46 | g_img = g_bridge.imgmsg_to_cv2(msg, "rgb8") 47 | # cv2.imwrite('img.png', cv2.cvtColor(g_img, cv2.COLOR_BGR2RGB)) # for debugging 48 | 49 | 50 | ### Code to visualize the neural network output 51 | 52 | def DrawLine(point1, point2, lineColor, lineWidth): 53 | '''Draws line on image''' 54 | global g_draw 55 | if not point1 is None and point2 is not None: 56 | g_draw.line([point1,point2], fill=lineColor, width=lineWidth) 57 | 58 | def DrawDot(point, pointColor, pointRadius): 59 | '''Draws dot (filled circle) on image''' 60 | global g_draw 61 | if point is not None: 62 | xy = [ 63 | point[0]-pointRadius, 64 | point[1]-pointRadius, 65 | point[0]+pointRadius, 66 | point[1]+pointRadius 67 | ] 68 | g_draw.ellipse(xy, 69 | fill=pointColor, 70 | outline=pointColor 71 | ) 72 | 73 | def DrawCube(points, color=(255, 0, 0)): 74 | ''' 75 | Draws cube with a thick solid line across 76 | the front top edge and an X on the top face. 77 | ''' 78 | 79 | lineWidthForDrawing = 2 80 | 81 | # draw front 82 | DrawLine(points[0], points[1], color, lineWidthForDrawing) 83 | DrawLine(points[1], points[2], color, lineWidthForDrawing) 84 | DrawLine(points[3], points[2], color, lineWidthForDrawing) 85 | DrawLine(points[3], points[0], color, lineWidthForDrawing) 86 | 87 | # draw back 88 | DrawLine(points[4], points[5], color, lineWidthForDrawing) 89 | DrawLine(points[6], points[5], color, lineWidthForDrawing) 90 | DrawLine(points[6], points[7], color, lineWidthForDrawing) 91 | DrawLine(points[4], points[7], color, lineWidthForDrawing) 92 | 93 | # draw sides 94 | DrawLine(points[0], points[4], color, lineWidthForDrawing) 95 | DrawLine(points[7], points[3], color, lineWidthForDrawing) 96 | DrawLine(points[5], points[1], color, lineWidthForDrawing) 97 | DrawLine(points[2], points[6], color, lineWidthForDrawing) 98 | 99 | # draw dots 100 | DrawDot(points[0], pointColor=color, pointRadius = 4) 101 | DrawDot(points[1], pointColor=color, pointRadius = 4) 102 | 103 | # draw x on the top 104 | DrawLine(points[0], points[5], color, lineWidthForDrawing) 105 | DrawLine(points[1], points[4], color, lineWidthForDrawing) 106 | 107 | 108 | def run_dope_node(params, freq=5): 109 | '''Starts ROS node to listen to image topic, run DOPE, and publish DOPE results''' 110 | 111 | global g_img 112 | global g_draw 113 | 114 | pubs = {} 115 | models = {} 116 | pnp_solvers = {} 117 | pub_dimension = {} 118 | draw_colors = {} 119 | 120 | # Initialize parameters 121 | matrix_camera = np.zeros((3,3)) 122 | matrix_camera[0,0] = params["camera_settings"]['fx'] 123 | matrix_camera[1,1] = params["camera_settings"]['fy'] 124 | matrix_camera[0,2] = params["camera_settings"]['cx'] 125 | matrix_camera[1,2] = params["camera_settings"]['cy'] 126 | matrix_camera[2,2] = 1 127 | dist_coeffs = np.zeros((4,1)) 128 | 129 | if "dist_coeffs" in params["camera_settings"]: 130 | dist_coeffs = np.array(params["camera_settings"]['dist_coeffs']) 131 | config_detect = lambda: None 132 | config_detect.mask_edges = 1 133 | config_detect.mask_faces = 1 134 | config_detect.vertex = 1 135 | config_detect.threshold = 0.5 136 | config_detect.softmax = 1000 137 | config_detect.thresh_angle = params['thresh_angle'] 138 | config_detect.thresh_map = params['thresh_map'] 139 | config_detect.sigma = params['sigma'] 140 | config_detect.thresh_points = params["thresh_points"] 141 | 142 | # For each object to detect, load network model, create PNP solver, and start ROS publishers 143 | for model in params['weights']: 144 | models[model] =\ 145 | ModelData( 146 | model, 147 | g_path2package + "/weights/" + params['weights'][model] 148 | ) 149 | models[model].load_net_model() 150 | 151 | draw_colors[model] = \ 152 | tuple(params["draw_colors"][model]) 153 | pnp_solvers[model] = \ 154 | CuboidPNPSolver( 155 | model, 156 | matrix_camera, 157 | Cuboid3d(params['dimensions'][model]), 158 | dist_coeffs=dist_coeffs 159 | ) 160 | pubs[model] = \ 161 | rospy.Publisher( 162 | '{}/pose_{}'.format(params['topic_publishing'], model), 163 | PoseStamped, 164 | queue_size=10 165 | ) 166 | pub_dimension[model] = \ 167 | rospy.Publisher( 168 | '{}/dimension_{}'.format(params['topic_publishing'], model), 169 | String, 170 | queue_size=10 171 | ) 172 | 173 | # Start ROS publisher 174 | pub_rgb_dope_points = \ 175 | rospy.Publisher( 176 | params['topic_publishing']+"/rgb_points", 177 | ImageSensor_msg, 178 | queue_size=10 179 | ) 180 | 181 | # Starts ROS listener 182 | rospy.Subscriber( 183 | topic_cam, 184 | ImageSensor_msg, 185 | __image_callback 186 | ) 187 | 188 | # Initialize ROS node 189 | rospy.init_node('dope_vis', anonymous=True) 190 | rate = rospy.Rate(freq) 191 | 192 | print ("Running DOPE... (Listening to camera topic: '{}')".format(topic_cam)) 193 | print ("Ctrl-C to stop") 194 | 195 | while not rospy.is_shutdown(): 196 | if g_img is not None: 197 | # Copy and draw image 198 | img_copy = g_img.copy() 199 | im = Image.fromarray(img_copy) 200 | g_draw = ImageDraw.Draw(im) 201 | 202 | for m in models: 203 | # Detect object 204 | results = ObjectDetector.detect_object_in_image( 205 | models[m].net, 206 | pnp_solvers[m], 207 | g_img, 208 | config_detect 209 | ) 210 | 211 | # Publish pose and overlay cube on image 212 | for i_r, result in enumerate(results): 213 | if result["location"] is None: 214 | continue 215 | loc = result["location"] 216 | ori = result["quaternion"] 217 | msg = PoseStamped() 218 | msg.header.frame_id = params["frame_id"] 219 | msg.header.stamp = rospy.Time.now() 220 | CONVERT_SCALE_CM_TO_METERS = 100 221 | msg.pose.position.x = loc[0] / CONVERT_SCALE_CM_TO_METERS 222 | msg.pose.position.y = loc[1] / CONVERT_SCALE_CM_TO_METERS 223 | msg.pose.position.z = loc[2] / CONVERT_SCALE_CM_TO_METERS 224 | msg.pose.orientation.x = ori[0] 225 | msg.pose.orientation.y = ori[1] 226 | msg.pose.orientation.z = ori[2] 227 | msg.pose.orientation.w = ori[3] 228 | 229 | # Publish 230 | pubs[m].publish(msg) 231 | pub_dimension[m].publish(str(params['dimensions'][m])) 232 | 233 | # Draw the cube 234 | if None not in result['projected_points']: 235 | points2d = [] 236 | for pair in result['projected_points']: 237 | points2d.append(tuple(pair)) 238 | DrawCube(points2d, draw_colors[m]) 239 | 240 | # Publish the image with results overlaid 241 | pub_rgb_dope_points.publish( 242 | CvBridge().cv2_to_imgmsg( 243 | np.array(im)[...,::-1], 244 | "bgr8" 245 | ) 246 | ) 247 | rate.sleep() 248 | 249 | 250 | if __name__ == "__main__": 251 | '''Main routine to run DOPE''' 252 | 253 | if len(sys.argv) > 1: 254 | config_name = sys.argv[1] 255 | else: 256 | config_name = "config_pose.yaml" 257 | rospack = rospkg.RosPack() 258 | params = None 259 | yaml_path = g_path2package + '/config/{}'.format(config_name) 260 | with open(yaml_path, 'r') as stream: 261 | try: 262 | print("Loading DOPE parameters from '{}'...".format(yaml_path)) 263 | params = yaml.load(stream) 264 | print(' Parameters loaded.') 265 | except yaml.YAMLError as exc: 266 | print(exc) 267 | 268 | topic_cam = params['topic_camera'] 269 | 270 | try : 271 | run_dope_node(params) 272 | except rospy.ROSInterruptException: 273 | pass 274 | -------------------------------------------------------------------------------- /src/inference/cuboid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. 3 | # https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode 4 | 5 | from enum import IntEnum, unique 6 | import numpy as np 7 | import cv2 8 | from pyrr import Quaternion, Matrix44, Vector3, euler 9 | 10 | # Related to the object's local coordinate system 11 | # @unique 12 | class CuboidVertexType(IntEnum): 13 | FrontTopRight = 0 14 | FrontTopLeft = 1 15 | FrontBottomLeft = 2 16 | FrontBottomRight = 3 17 | RearTopRight = 4 18 | RearTopLeft = 5 19 | RearBottomLeft = 6 20 | RearBottomRight = 7 21 | Center = 8 22 | TotalCornerVertexCount = 8 # Corner vertexes doesn't include the center point 23 | TotalVertexCount = 9 24 | 25 | # List of the vertex indexes in each line edges of the cuboid 26 | CuboidLineIndexes = [ 27 | # Front face 28 | [ CuboidVertexType.FrontTopLeft, CuboidVertexType.FrontTopRight ], 29 | [ CuboidVertexType.FrontTopRight, CuboidVertexType.FrontBottomRight ], 30 | [ CuboidVertexType.FrontBottomRight, CuboidVertexType.FrontBottomLeft ], 31 | [ CuboidVertexType.FrontBottomLeft, CuboidVertexType.FrontTopLeft ], 32 | # Back face 33 | [ CuboidVertexType.RearTopLeft, CuboidVertexType.RearTopRight ], 34 | [ CuboidVertexType.RearTopRight, CuboidVertexType.RearBottomRight ], 35 | [ CuboidVertexType.RearBottomRight, CuboidVertexType.RearBottomLeft ], 36 | [ CuboidVertexType.RearBottomLeft, CuboidVertexType.RearTopLeft ], 37 | # Left face 38 | [ CuboidVertexType.FrontBottomLeft, CuboidVertexType.RearBottomLeft ], 39 | [ CuboidVertexType.FrontTopLeft, CuboidVertexType.RearTopLeft ], 40 | # Right face 41 | [ CuboidVertexType.FrontBottomRight, CuboidVertexType.RearBottomRight ], 42 | [ CuboidVertexType.FrontTopRight, CuboidVertexType.RearTopRight ], 43 | ] 44 | 45 | 46 | # ========================= Cuboid3d ========================= 47 | class Cuboid3d(): 48 | '''This class contains a 3D cuboid.''' 49 | 50 | # Create a box with a certain size 51 | def __init__(self, size3d = [1.0, 1.0, 1.0], center_location = [0, 0, 0], 52 | coord_system = None, parent_object = None): 53 | 54 | # NOTE: This local coordinate system is similar 55 | # to the intrinsic transform matrix of a 3d object 56 | self.center_location = center_location 57 | self.coord_system = coord_system 58 | self.size3d = size3d 59 | self._vertices = [0, 0, 0] * CuboidVertexType.TotalVertexCount 60 | 61 | self.generate_vertexes() 62 | 63 | def get_vertex(self, vertex_type): 64 | """Returns the location of a vertex. 65 | 66 | Args: 67 | vertex_type: enum of type CuboidVertexType 68 | 69 | Returns: 70 | Numpy array(3) - Location of the vertex type in the cuboid 71 | """ 72 | return self._vertices[vertex_type] 73 | 74 | def get_vertices(self): 75 | return self._vertices 76 | 77 | def generate_vertexes(self): 78 | width, height, depth = self.size3d 79 | 80 | # By default just use the normal OpenCV coordinate system 81 | if (self.coord_system is None): 82 | cx, cy, cz = self.center_location 83 | # X axis point to the right 84 | right = cx + width / 2.0 85 | left = cx - width / 2.0 86 | # Y axis point downward 87 | top = cy - height / 2.0 88 | bottom = cy + height / 2.0 89 | # Z axis point forward 90 | front = cz + depth / 2.0 91 | rear = cz - depth / 2.0 92 | 93 | # List of 8 vertices of the box 94 | self._vertices = [ 95 | [right, top, front], # Front Top Right 96 | [left, top, front], # Front Top Left 97 | [left, bottom, front], # Front Bottom Left 98 | [right, bottom, front], # Front Bottom Right 99 | [right, top, rear], # Rear Top Right 100 | [left, top, rear], # Rear Top Left 101 | [left, bottom, rear], # Rear Bottom Left 102 | [right, bottom, rear], # Rear Bottom Right 103 | self.center_location, # Center 104 | ] 105 | else: 106 | sx, sy, sz = self.size3d 107 | forward = np.array(self.coord_system.forward, dtype=float) * sy * 0.5 108 | up = np.array(self.coord_system.up, dtype=float) * sz * 0.5 109 | right = np.array(self.coord_system.right, dtype=float) * sx * 0.5 110 | center = np.array(self.center_location, dtype=float) 111 | self._vertices = [ 112 | center + forward + up + right, # Front Top Right 113 | center + forward + up - right, # Front Top Left 114 | center + forward - up - right, # Front Bottom Left 115 | center + forward - up + right, # Front Bottom Right 116 | center - forward + up + right, # Rear Top Right 117 | center - forward + up - right, # Rear Top Left 118 | center - forward - up - right, # Rear Bottom Left 119 | center - forward - up + right, # Rear Bottom Right 120 | self.center_location, # Center 121 | ] 122 | 123 | def get_projected_cuboid2d(self, cuboid_transform, camera_intrinsic_matrix): 124 | """ 125 | Projects the cuboid into the image plane using camera intrinsics. 126 | 127 | Args: 128 | cuboid_transform: the world transform of the cuboid 129 | camera_intrinsic_matrix: camera intrinsic matrix 130 | 131 | Returns: 132 | Cuboid2d - the projected cuboid points 133 | """ 134 | 135 | world_transform_matrix = cuboid_transform 136 | rvec = [0, 0, 0] 137 | tvec = [0, 0, 0] 138 | dist_coeffs = np.zeros((4, 1)) 139 | 140 | transformed_vertices = [0, 0, 0] * CuboidVertexType.TotalVertexCount 141 | for vertex_index in range(CuboidVertexType.TotalVertexCount): 142 | vertex3d = self._vertices[vertex_index] 143 | transformed_vertices[vertex_index] = world_transform_matrix * vertex3d 144 | 145 | projected_vertices = cv2.projectPoints(transformed_vertices, rvec, tvec, 146 | camera_intrinsic_matrix, dist_coeffs) 147 | 148 | return Cuboid2d(projected_vertices) 149 | -------------------------------------------------------------------------------- /src/inference/cuboid_pnp_solver.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. 3 | # https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode 4 | 5 | import numpy as np 6 | from pyrr import Quaternion 7 | import cv2 8 | from cuboid import * 9 | 10 | class CuboidPNPSolver(object): 11 | """ 12 | This class is used to find the 6-DoF pose of a cuboid given its projected vertices. 13 | 14 | Runs perspective-n-point (PNP) algorithm. 15 | """ 16 | 17 | # Class variables 18 | cv2version = cv2.__version__.split('.') 19 | cv2majorversion = int(cv2version[0]) 20 | 21 | def __init__(self, object_name="", camera_intrinsic_matrix = None, cuboid3d = None, 22 | dist_coeffs = np.zeros((4, 1))): 23 | self.object_name = object_name 24 | if (not camera_intrinsic_matrix is None): 25 | self._camera_intrinsic_matrix = camera_intrinsic_matrix 26 | else: 27 | camera_intrinsic_matrix = np.array([ 28 | [0, 0, 0], 29 | [0, 0, 0], 30 | [0, 0, 0] 31 | ]) 32 | self._cuboid3d = cuboid3d 33 | 34 | self._dist_coeffs = dist_coeffs 35 | 36 | def set_camera_intrinsic_matrix(self, new_intrinsic_matrix): 37 | '''Sets the camera intrinsic matrix''' 38 | self._camera_intrinsic_matrix = new_intrinsic_matrix 39 | 40 | def solve_pnp(self, cuboid2d_points, pnp_algorithm = None): 41 | """ 42 | Detects the rotation and traslation 43 | of a cuboid object from its vertexes' 44 | 2D location in the image 45 | """ 46 | 47 | # Fallback to default PNP algorithm base on OpenCV version 48 | if pnp_algorithm is None: 49 | if CuboidPNPSolver.cv2majorversion == 2: 50 | pnp_algorithm = cv2.CV_ITERATIVE 51 | elif CuboidPNPSolver.cv2majorversion == 3: 52 | pnp_algorithm = cv2.SOLVEPNP_ITERATIVE 53 | # Alternative algorithms: 54 | # pnp_algorithm = SOLVE_PNP_P3P 55 | # pnp_algorithm = SOLVE_PNP_EPNP 56 | 57 | location = None 58 | quaternion = None 59 | projected_points = cuboid2d_points 60 | 61 | cuboid3d_points = np.array(self._cuboid3d.get_vertices()) 62 | obj_2d_points = [] 63 | obj_3d_points = [] 64 | 65 | for i in range(CuboidVertexType.TotalVertexCount): 66 | check_point_2d = cuboid2d_points[i] 67 | # Ignore invalid points 68 | if (check_point_2d is None): 69 | continue 70 | obj_2d_points.append(check_point_2d) 71 | obj_3d_points.append(cuboid3d_points[i]) 72 | 73 | obj_2d_points = np.array(obj_2d_points, dtype=float) 74 | obj_3d_points = np.array(obj_3d_points, dtype=float) 75 | 76 | valid_point_count = len(obj_2d_points) 77 | 78 | # Can only do PNP if we have more than 3 valid points 79 | is_points_valid = valid_point_count >= 4 80 | 81 | if is_points_valid: 82 | 83 | ret, rvec, tvec = cv2.solvePnP( 84 | obj_3d_points, 85 | obj_2d_points, 86 | self._camera_intrinsic_matrix, 87 | self._dist_coeffs, 88 | flags=pnp_algorithm 89 | ) 90 | 91 | if ret: 92 | location = list(x[0] for x in tvec) 93 | quaternion = self.convert_rvec_to_quaternion(rvec) 94 | 95 | projected_points, _ = cv2.projectPoints(cuboid3d_points, rvec, tvec, self._camera_intrinsic_matrix, self._dist_coeffs) 96 | projected_points = np.squeeze(projected_points) 97 | 98 | # If the location.Z is negative or object is behind the camera then flip both location and rotation 99 | x, y, z = location 100 | if z < 0: 101 | # Get the opposite location 102 | location = [-x, -y, -z] 103 | 104 | # Change the rotation by 180 degree 105 | rotate_angle = np.pi 106 | rotate_quaternion = Quaternion.from_axis_rotation(location, rotate_angle) 107 | quaternion = rotate_quaternion.cross(quaternion) 108 | 109 | return location, quaternion, projected_points 110 | 111 | def convert_rvec_to_quaternion(self, rvec): 112 | '''Convert rvec (which is log quaternion) to quaternion''' 113 | theta = np.sqrt(rvec[0] * rvec[0] + rvec[1] * rvec[1] + rvec[2] * rvec[2]) # in radians 114 | raxis = [rvec[0] / theta, rvec[1] / theta, rvec[2] / theta] 115 | 116 | # pyrr's Quaternion (order is XYZW), https://pyrr.readthedocs.io/en/latest/oo_api_quaternion.html 117 | return Quaternion.from_axis_rotation(raxis, theta) 118 | 119 | # Alternatively: pyquaternion 120 | # return Quaternion(axis=raxis, radians=theta) # uses OpenCV's Quaternion (order is WXYZ) 121 | 122 | def project_points(self, rvec, tvec): 123 | '''Project points from model onto image using rotation, translation''' 124 | output_points, tmp = cv2.projectPoints( 125 | self.__object_vertex_coordinates, 126 | rvec, 127 | tvec, 128 | self.__camera_intrinsic_matrix, 129 | self.__dist_coeffs) 130 | 131 | output_points = np.squeeze(output_points) 132 | return output_points -------------------------------------------------------------------------------- /src/inference/detector.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. 3 | # https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode 4 | 5 | ''' 6 | Contains the following classes: 7 | - ModelData - High level information encapsulation 8 | - ObjectDetector - Greedy algorithm to build cuboids from belief maps 9 | ''' 10 | 11 | import time 12 | import json 13 | import os, shutil 14 | import sys 15 | import traceback 16 | from os import path 17 | import threading 18 | from threading import Thread 19 | 20 | import numpy as np 21 | import cv2 22 | 23 | import torch 24 | import torch.nn as nn 25 | import torchvision.transforms as transforms 26 | from torch.autograd import Variable 27 | import torchvision.models as models 28 | 29 | from scipy import ndimage 30 | import scipy 31 | import scipy.ndimage as ndimage 32 | import scipy.ndimage.filters as filters 33 | from scipy.ndimage.filters import gaussian_filter 34 | 35 | # Import the definition of the neural network model and cuboids 36 | from cuboid_pnp_solver import * 37 | 38 | #global transform for image input 39 | transform = transforms.Compose([ 40 | # transforms.Scale(IMAGE_SIZE), 41 | # transforms.CenterCrop((imagesize,imagesize)), 42 | transforms.ToTensor(), 43 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 44 | ]) 45 | 46 | 47 | #================================ Models ================================ 48 | 49 | 50 | class DopeNetwork(nn.Module): 51 | def __init__( 52 | self, 53 | numBeliefMap=9, 54 | numAffinity=16, 55 | stop_at_stage=6 # number of stages to process (if less than total number of stages) 56 | ): 57 | super(DopeNetwork, self).__init__() 58 | 59 | self.stop_at_stage = stop_at_stage 60 | 61 | vgg_full = models.vgg19(pretrained=False).features 62 | self.vgg = nn.Sequential() 63 | for i_layer in range(24): 64 | self.vgg.add_module(str(i_layer), vgg_full[i_layer]) 65 | 66 | # Add some layers 67 | i_layer = 23 68 | self.vgg.add_module(str(i_layer), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1)) 69 | self.vgg.add_module(str(i_layer+1), nn.ReLU(inplace=True)) 70 | self.vgg.add_module(str(i_layer+2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)) 71 | self.vgg.add_module(str(i_layer+3), nn.ReLU(inplace=True)) 72 | 73 | # print('---Belief------------------------------------------------') 74 | # _2 are the belief map stages 75 | self.m1_2 = DopeNetwork.create_stage(128, numBeliefMap, True) 76 | self.m2_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 77 | numBeliefMap, False) 78 | self.m3_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 79 | numBeliefMap, False) 80 | self.m4_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 81 | numBeliefMap, False) 82 | self.m5_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 83 | numBeliefMap, False) 84 | self.m6_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 85 | numBeliefMap, False) 86 | 87 | # print('---Affinity----------------------------------------------') 88 | # _1 are the affinity map stages 89 | self.m1_1 = DopeNetwork.create_stage(128, numAffinity, True) 90 | self.m2_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 91 | numAffinity, False) 92 | self.m3_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 93 | numAffinity, False) 94 | self.m4_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 95 | numAffinity, False) 96 | self.m5_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 97 | numAffinity, False) 98 | self.m6_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity, 99 | numAffinity, False) 100 | 101 | 102 | def forward(self, x): 103 | '''Runs inference on the neural network''' 104 | 105 | out1 = self.vgg(x) 106 | 107 | out1_2 = self.m1_2(out1) 108 | out1_1 = self.m1_1(out1) 109 | 110 | if self.stop_at_stage == 1: 111 | return [out1_2],\ 112 | [out1_1] 113 | 114 | out2 = torch.cat([out1_2, out1_1, out1], 1) 115 | out2_2 = self.m2_2(out2) 116 | out2_1 = self.m2_1(out2) 117 | 118 | if self.stop_at_stage == 2: 119 | return [out1_2, out2_2],\ 120 | [out1_1, out2_1] 121 | 122 | out3 = torch.cat([out2_2, out2_1, out1], 1) 123 | out3_2 = self.m3_2(out3) 124 | out3_1 = self.m3_1(out3) 125 | 126 | if self.stop_at_stage == 3: 127 | return [out1_2, out2_2, out3_2],\ 128 | [out1_1, out2_1, out3_1] 129 | 130 | out4 = torch.cat([out3_2, out3_1, out1], 1) 131 | out4_2 = self.m4_2(out4) 132 | out4_1 = self.m4_1(out4) 133 | 134 | if self.stop_at_stage == 4: 135 | return [out1_2, out2_2, out3_2, out4_2],\ 136 | [out1_1, out2_1, out3_1, out4_1] 137 | 138 | out5 = torch.cat([out4_2, out4_1, out1], 1) 139 | out5_2 = self.m5_2(out5) 140 | out5_1 = self.m5_1(out5) 141 | 142 | if self.stop_at_stage == 5: 143 | return [out1_2, out2_2, out3_2, out4_2, out5_2],\ 144 | [out1_1, out2_1, out3_1, out4_1, out5_1] 145 | 146 | out6 = torch.cat([out5_2, out5_1, out1], 1) 147 | out6_2 = self.m6_2(out6) 148 | out6_1 = self.m6_1(out6) 149 | 150 | return [out1_2, out2_2, out3_2, out4_2, out5_2, out6_2],\ 151 | [out1_1, out2_1, out3_1, out4_1, out5_1, out6_1] 152 | 153 | @staticmethod 154 | def create_stage(in_channels, out_channels, first=False): 155 | '''Create the neural network layers for a single stage.''' 156 | 157 | model = nn.Sequential() 158 | mid_channels = 128 159 | if first: 160 | padding = 1 161 | kernel = 3 162 | count = 6 163 | final_channels = 512 164 | else: 165 | padding = 3 166 | kernel = 7 167 | count = 10 168 | final_channels = mid_channels 169 | 170 | # First convolution 171 | model.add_module("0", 172 | nn.Conv2d( 173 | in_channels, 174 | mid_channels, 175 | kernel_size=kernel, 176 | stride=1, 177 | padding=padding) 178 | ) 179 | 180 | # Middle convolutions 181 | i = 1 182 | while i < count - 1: 183 | model.add_module(str(i), nn.ReLU(inplace=True)) 184 | i += 1 185 | model.add_module(str(i), 186 | nn.Conv2d( 187 | mid_channels, 188 | mid_channels, 189 | kernel_size=kernel, 190 | stride=1, 191 | padding=padding)) 192 | i += 1 193 | 194 | # Penultimate convolution 195 | model.add_module(str(i), nn.ReLU(inplace=True)) 196 | i += 1 197 | model.add_module(str(i), nn.Conv2d(mid_channels, final_channels, kernel_size=1, stride=1)) 198 | i += 1 199 | 200 | # Last convolution 201 | model.add_module(str(i), nn.ReLU(inplace=True)) 202 | i += 1 203 | model.add_module(str(i), nn.Conv2d(final_channels, out_channels, kernel_size=1, stride=1)) 204 | i += 1 205 | 206 | return model 207 | 208 | 209 | 210 | class ModelData(object): 211 | '''This class contains methods for loading the neural network''' 212 | 213 | def __init__(self, name="", net_path="", gpu_id=0): 214 | self.name = name 215 | self.net_path = net_path # Path to trained network model 216 | self.net = None # Trained network 217 | self.gpu_id = gpu_id 218 | 219 | def get_net(self): 220 | '''Returns network''' 221 | if not self.net: 222 | self.load_net_model() 223 | return self.net 224 | 225 | def load_net_model(self): 226 | '''Loads network model from disk''' 227 | if not self.net and path.exists(self.net_path): 228 | self.net = self.load_net_model_path(self.net_path) 229 | if not path.exists(self.net_path): 230 | print("ERROR: Unable to find model weights: '{}'".format( 231 | self.net_path)) 232 | exit(0) 233 | 234 | def load_net_model_path(self, path): 235 | '''Loads network model from disk with given path''' 236 | model_loading_start_time = time.time() 237 | print("Loading DOPE model '{}'...".format(path)) 238 | net = DopeNetwork() 239 | net = torch.nn.DataParallel(net, [0]).cuda() 240 | net.load_state_dict(torch.load(path)) 241 | net.eval() 242 | print(' Model loaded in {} seconds.'.format( 243 | time.time() - model_loading_start_time)) 244 | return net 245 | 246 | def __str__(self): 247 | '''Converts to string''' 248 | return "{}: {}".format(self.name, self.net_path) 249 | 250 | 251 | #================================ ObjectDetector ================================ 252 | class ObjectDetector(object): 253 | '''This class contains methods for object detection''' 254 | 255 | @staticmethod 256 | def detect_object_in_image(net_model, pnp_solver, in_img, config): 257 | '''Detect objects in a image using a specific trained network model''' 258 | 259 | if in_img is None: 260 | return [] 261 | 262 | # Run network inference 263 | image_tensor = transform(in_img) 264 | image_torch = Variable(image_tensor).cuda().unsqueeze(0) 265 | out, seg = net_model(image_torch) 266 | vertex2 = out[-1][0] 267 | aff = seg[-1][0] 268 | 269 | # Find objects from network output 270 | detected_objects = ObjectDetector.find_object_poses(vertex2, aff, pnp_solver, config) 271 | 272 | return detected_objects 273 | 274 | @staticmethod 275 | def find_object_poses(vertex2, aff, pnp_solver, config): 276 | '''Detect objects given network output''' 277 | 278 | # Detect objects from belief maps and affinities 279 | objects, all_peaks = ObjectDetector.find_objects(vertex2, aff, config) 280 | detected_objects = [] 281 | obj_name = pnp_solver.object_name 282 | 283 | for obj in objects: 284 | # Run PNP 285 | points = obj[1] + [(obj[0][0]*8, obj[0][1]*8)] 286 | cuboid2d = np.copy(points) 287 | location, quaternion, projected_points = pnp_solver.solve_pnp(points) 288 | 289 | # Save results 290 | detected_objects.append({ 291 | 'name': obj_name, 292 | 'location': location, 293 | 'quaternion': quaternion, 294 | 'cuboid2d': cuboid2d, 295 | 'projected_points': projected_points, 296 | }) 297 | 298 | return detected_objects 299 | 300 | @staticmethod 301 | def find_objects(vertex2, aff, config, numvertex=8): 302 | '''Detects objects given network belief maps and affinities, using heuristic method''' 303 | 304 | all_peaks = [] 305 | peak_counter = 0 306 | for j in range(vertex2.size()[0]): 307 | belief = vertex2[j].clone() 308 | map_ori = belief.cpu().data.numpy() 309 | 310 | map = gaussian_filter(belief.cpu().data.numpy(), sigma=config.sigma) 311 | p = 1 312 | map_left = np.zeros(map.shape) 313 | map_left[p:,:] = map[:-p,:] 314 | map_right = np.zeros(map.shape) 315 | map_right[:-p,:] = map[p:,:] 316 | map_up = np.zeros(map.shape) 317 | map_up[:,p:] = map[:,:-p] 318 | map_down = np.zeros(map.shape) 319 | map_down[:,:-p] = map[:,p:] 320 | 321 | peaks_binary = np.logical_and.reduce( 322 | ( 323 | map >= map_left, 324 | map >= map_right, 325 | map >= map_up, 326 | map >= map_down, 327 | map > config.thresh_map) 328 | ) 329 | peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) 330 | 331 | # Computing the weigthed average for localizing the peaks 332 | peaks = list(peaks) 333 | win = 5 334 | ran = win // 2 335 | peaks_avg = [] 336 | for p_value in range(len(peaks)): 337 | p = peaks[p_value] 338 | weights = np.zeros((win,win)) 339 | i_values = np.zeros((win,win)) 340 | j_values = np.zeros((win,win)) 341 | for i in range(-ran,ran+1): 342 | for j in range(-ran,ran+1): 343 | if p[1]+i < 0 \ 344 | or p[1]+i >= map_ori.shape[0] \ 345 | or p[0]+j < 0 \ 346 | or p[0]+j >= map_ori.shape[1]: 347 | continue 348 | 349 | i_values[j+ran, i+ran] = p[1] + i 350 | j_values[j+ran, i+ran] = p[0] + j 351 | 352 | weights[j+ran, i+ran] = (map_ori[p[1]+i, p[0]+j]) 353 | 354 | # if the weights are all zeros 355 | # then add the none continuous points 356 | OFFSET_DUE_TO_UPSAMPLING = 0.4395 357 | try: 358 | peaks_avg.append( 359 | (np.average(j_values, weights=weights) + OFFSET_DUE_TO_UPSAMPLING, \ 360 | np.average(i_values, weights=weights) + OFFSET_DUE_TO_UPSAMPLING)) 361 | except: 362 | peaks_avg.append((p[0] + OFFSET_DUE_TO_UPSAMPLING, p[1] + OFFSET_DUE_TO_UPSAMPLING)) 363 | # Note: Python3 doesn't support len for zip object 364 | peaks_len = min(len(np.nonzero(peaks_binary)[1]), len(np.nonzero(peaks_binary)[0])) 365 | 366 | peaks_with_score = [peaks_avg[x_] + (map_ori[peaks[x_][1],peaks[x_][0]],) for x_ in range(len(peaks))] 367 | 368 | id = range(peak_counter, peak_counter + peaks_len) 369 | 370 | peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))] 371 | 372 | all_peaks.append(peaks_with_score_and_id) 373 | peak_counter += peaks_len 374 | 375 | objects = [] 376 | 377 | # Check object centroid and build the objects if the centroid is found 378 | for nb_object in range(len(all_peaks[-1])): 379 | if all_peaks[-1][nb_object][2] > config.thresh_points: 380 | objects.append([ 381 | [all_peaks[-1][nb_object][:2][0],all_peaks[-1][nb_object][:2][1]], 382 | [None for i in range(numvertex)], 383 | [None for i in range(numvertex)], 384 | all_peaks[-1][nb_object][2] 385 | ]) 386 | 387 | # Working with an output that only has belief maps 388 | if aff is None: 389 | if len (objects) > 0 and len(all_peaks)>0 and len(all_peaks[0])>0: 390 | for i_points in range(8): 391 | if len(all_peaks[i_points])>0 and all_peaks[i_points][0][2] > config.threshold: 392 | objects[0][1][i_points] = (all_peaks[i_points][0][0], all_peaks[i_points][0][1]) 393 | else: 394 | # For all points found 395 | for i_lists in range(len(all_peaks[:-1])): 396 | lists = all_peaks[i_lists] 397 | 398 | for candidate in lists: 399 | if candidate[2] < config.thresh_points: 400 | continue 401 | 402 | i_best = -1 403 | best_dist = 10000 404 | best_angle = 100 405 | for i_obj in range(len(objects)): 406 | center = [objects[i_obj][0][0], objects[i_obj][0][1]] 407 | 408 | # integer is used to look into the affinity map, 409 | # but the float version is used to run 410 | point_int = [int(candidate[0]), int(candidate[1])] 411 | point = [candidate[0], candidate[1]] 412 | 413 | # look at the distance to the vector field. 414 | v_aff = np.array([ 415 | aff[i_lists*2, 416 | point_int[1], 417 | point_int[0]].data.item(), 418 | aff[i_lists*2+1, 419 | point_int[1], 420 | point_int[0]].data.item()]) * 10 421 | 422 | # normalize the vector 423 | xvec = v_aff[0] 424 | yvec = v_aff[1] 425 | 426 | norms = np.sqrt(xvec * xvec + yvec * yvec) 427 | 428 | xvec/=norms 429 | yvec/=norms 430 | 431 | v_aff = np.concatenate([[xvec],[yvec]]) 432 | 433 | v_center = np.array(center) - np.array(point) 434 | xvec = v_center[0] 435 | yvec = v_center[1] 436 | 437 | norms = np.sqrt(xvec * xvec + yvec * yvec) 438 | 439 | xvec /= norms 440 | yvec /= norms 441 | 442 | v_center = np.concatenate([[xvec],[yvec]]) 443 | 444 | # vector affinity 445 | dist_angle = np.linalg.norm(v_center - v_aff) 446 | 447 | # distance between vertexes 448 | dist_point = np.linalg.norm(np.array(point) - np.array(center)) 449 | 450 | if dist_angle < config.thresh_angle \ 451 | and best_dist > 1000 \ 452 | or dist_angle < config.thresh_angle \ 453 | and best_dist > dist_point: 454 | i_best = i_obj 455 | best_angle = dist_angle 456 | best_dist = dist_point 457 | 458 | if i_best is -1: 459 | continue 460 | 461 | if objects[i_best][1][i_lists] is None \ 462 | or best_angle < config.thresh_angle \ 463 | and best_dist < objects[i_best][2][i_lists][1]: 464 | objects[i_best][1][i_lists] = ((candidate[0])*8, (candidate[1])*8) 465 | objects[i_best][2][i_lists] = (best_angle, best_dist) 466 | 467 | return objects, all_peaks 468 | -------------------------------------------------------------------------------- /weights/readme.md: -------------------------------------------------------------------------------- 1 | This is where you need to store the weights. 2 | --------------------------------------------------------------------------------