├── .github └── workflows │ └── manual.yml ├── CODEOWNERS ├── LICENSE ├── README.md ├── gazebo_grasp_plugin ├── CHANGELOG.rst ├── CMakeLists.txt ├── LICENSE ├── README.md ├── include │ └── gazebo_grasp_plugin │ │ ├── GazeboGraspFix.h │ │ └── GazeboGraspGripper.h ├── package.xml └── src │ ├── GazeboGraspFix.cpp │ └── GazeboGraspGripper.cpp ├── pr2_moveit ├── .setup_assistant ├── CMakeLists.txt ├── config │ ├── controllers.yaml │ ├── fake_controllers.yaml │ ├── grasp.yaml │ ├── joint_limits.yaml │ ├── kinematics.yaml │ ├── ompl_planning.yaml │ └── pr2.srdf ├── launch │ ├── default_warehouse_db.launch │ ├── demo.launch │ ├── fake_moveit_controller_manager.launch.xml │ ├── filter.launch │ ├── joystick_control.launch │ ├── move_group.launch │ ├── moveit.rviz │ ├── moveit_rviz.launch │ ├── ompl_planning_pipeline.launch.xml │ ├── planning_context.launch │ ├── planning_pipeline.launch.xml │ ├── pr2_moveit.launch │ ├── pr2_moveit_controller_manager.launch.xml │ ├── pr2_moveit_sensor_manager.launch.xml │ ├── run_benchmark_ompl.launch │ ├── sensor_manager.launch.xml │ ├── setup_assistant.launch │ ├── trajectory_execution.launch.xml │ ├── warehouse.launch │ └── warehouse_settings.launch.xml └── package.xml ├── pr2_robot ├── .DS_Store ├── CMakeLists.txt ├── config │ ├── controllers.yaml │ ├── dropbox.yaml │ ├── output.yaml │ ├── pick_list_1.yaml │ ├── pick_list_2.yaml │ ├── pick_list_3.yaml │ ├── pr2.rviz │ └── sensors.yaml ├── include │ └── pr2_robot │ │ ├── pr2_motion.h │ │ └── pr2_pick_place_server.h ├── launch │ ├── octomap_server.launch │ ├── pick_place_demo.launch │ ├── pick_place_project.launch │ ├── robot_control.launch │ ├── robot_description.launch │ └── robot_spawn.launch ├── materials │ └── textures │ │ ├── pr2_caster_texture.png │ │ ├── pr2_wheel_left.png │ │ └── pr2_wheel_right.png ├── meshes │ ├── README.md │ ├── base_v0 │ │ ├── .gitignore │ │ ├── base.dae │ │ ├── base.stl │ │ ├── base_L.stl │ │ ├── base_color.png │ │ ├── base_normals.png │ │ ├── caster.stl │ │ ├── caster_L.stl │ │ ├── pr2_wheel.stl │ │ ├── wheel.dae │ │ ├── wheel.stl │ │ ├── wheel_color.png │ │ ├── wheel_h.dae │ │ ├── wheel_h_color.png │ │ └── wheel_normals.png │ ├── forearm_v0 │ │ ├── .gitignore │ │ ├── forearm.dae │ │ ├── forearm.jpg │ │ ├── forearm.stl │ │ ├── forearm_color.png │ │ ├── forearm_normals.png │ │ ├── wrist_color.png │ │ ├── wrist_flex.dae │ │ ├── wrist_flex.stl │ │ ├── wrist_normals.png │ │ ├── wrist_roll.stl │ │ └── wrist_roll_L.stl │ ├── gripper │ │ ├── finger_left.dae │ │ ├── finger_left_collision.dae │ │ ├── finger_right.dae │ │ ├── finger_right_collision.dae │ │ └── gripper_base.dae │ ├── head_v0 │ │ ├── .gitignore │ │ ├── head_pan.dae │ │ ├── head_pan.stl │ │ ├── head_pan_L.stl │ │ ├── head_pan_color.png │ │ ├── head_pan_normals.png │ │ ├── head_tilt.dae │ │ ├── head_tilt.stl │ │ ├── head_tilt_L.stl │ │ ├── head_tilt_color.png │ │ ├── head_tilt_color_red.png │ │ ├── head_tilt_color_yellow.png │ │ ├── head_tilt_green.png │ │ └── head_tilt_normals.png │ ├── shoulder_v0 │ │ ├── .gitignore │ │ ├── shoulder_lift.dae │ │ ├── shoulder_lift.stl │ │ ├── shoulder_lift_color.png │ │ ├── shoulder_lift_normals.png │ │ ├── shoulder_pan.dae │ │ ├── shoulder_pan.stl │ │ ├── shoulder_pan_color.png │ │ ├── shoulder_pan_normals.png │ │ ├── shoulder_yaw.stl │ │ ├── upper_arm_roll.dae │ │ ├── upper_arm_roll.stl │ │ ├── upper_arm_roll_L.stl │ │ ├── upper_arm_roll_color.png │ │ └── upper_arm_roll_normals.png │ ├── torso_v0 │ │ ├── .gitignore │ │ ├── torso.stl │ │ ├── torso_lift.dae │ │ ├── torso_lift.stl │ │ ├── torso_lift_L.stl │ │ ├── torso_lift_color.png │ │ └── torso_lift_normals.png │ └── upper_arm_v0 │ │ ├── .gitignore │ │ ├── elbow_flex.dae │ │ ├── elbow_flex.stl │ │ ├── elbow_flex_color.png │ │ ├── elbow_flex_normals.png │ │ ├── forearm_roll.stl │ │ ├── forearm_roll_L.stl │ │ ├── upper_arm.dae │ │ ├── upper_arm.jpg │ │ ├── upper_arm.stl │ │ ├── upper_arm_color.png │ │ └── upper_arm_normals.png ├── models │ ├── README.md │ ├── asus_xtion.dae │ ├── biscuits │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── biscuits.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── biscuits.dae │ │ ├── model.config │ │ └── model.sdf │ ├── book │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── book.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── book.dae │ │ ├── model.config │ │ └── model.sdf │ ├── dropbox │ │ ├── meshes │ │ │ └── dropbox.dae │ │ ├── model.config │ │ └── model.sdf │ ├── eraser │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── eraser.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── eraser.dae │ │ ├── model.config │ │ └── model.sdf │ ├── glue │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── glue.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── glue.dae │ │ ├── model.config │ │ └── model.sdf │ ├── short_table │ │ ├── meshes │ │ │ └── short_table.dae │ │ ├── model.config │ │ └── model.sdf │ ├── snacks │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── snacks.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── snacks.dae │ │ ├── model.config │ │ └── model.sdf │ ├── soap │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── soap.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── soap.dae │ │ ├── model.config │ │ └── model.sdf │ ├── soap2 │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── soap2.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── soap2.dae │ │ ├── model.config │ │ └── model.sdf │ ├── soda_can │ │ ├── materials │ │ │ └── textures │ │ │ │ └── uda_can.png │ │ ├── meshes │ │ │ ├── soda_can.dae │ │ │ └── udacity-blue.png.001.png │ │ ├── model-1_2.sdf │ │ ├── model-1_3.sdf │ │ ├── model-1_4.sdf │ │ ├── model.config │ │ └── model.sdf │ ├── sticky_notes │ │ ├── materials │ │ │ ├── scripts │ │ │ │ └── sticky_notes.material │ │ │ └── textures │ │ │ │ └── optimized_tsdf_texture_mapped_mesh.png │ │ ├── meshes │ │ │ └── sticky_notes.dae │ │ ├── model.config │ │ └── model.sdf │ └── twin_table │ │ ├── meshes │ │ └── twin_table.dae │ │ ├── model.config │ │ └── model.sdf ├── package.xml ├── scripts │ ├── .DS_Store │ ├── grasp_server.py │ ├── pcl_helper.py │ ├── pr2_safe_spawner.sh │ ├── project_template.py │ └── rospy_message_converter │ │ ├── __init__.py │ │ ├── json_message_converter.py │ │ └── message_converter.py ├── src │ ├── pr2_cloud_transformer.cpp │ ├── pr2_motion.cpp │ └── pr2_pick_place_server.cpp ├── srv │ ├── Grasp.srv │ └── PickPlace.srv ├── urdf │ ├── common.xacro │ ├── materials.urdf.xacro │ ├── pr2.gazebo.xacro │ ├── pr2.urdf.xacro │ └── sensor.urdf.xacro └── worlds │ ├── challenge.world │ ├── demo.world │ ├── tabletop.world │ ├── test1.world │ ├── test2.world │ └── test3.world └── writeup_template.md /.github/workflows/manual.yml: -------------------------------------------------------------------------------- 1 | # Workflow to ensure whenever a Github PR is submitted, 2 | # a JIRA ticket gets created automatically. 3 | name: Manual Workflow 4 | 5 | # Controls when the action will run. 6 | on: 7 | # Triggers the workflow on pull request events but only for the master branch 8 | pull_request_target: 9 | types: [opened, reopened] 10 | 11 | # Allows you to run this workflow manually from the Actions tab 12 | workflow_dispatch: 13 | 14 | jobs: 15 | test-transition-issue: 16 | name: Convert Github Issue to Jira Issue 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@master 21 | 22 | - name: Login 23 | uses: atlassian/gajira-login@master 24 | env: 25 | JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} 26 | JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} 27 | JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} 28 | 29 | - name: Create NEW JIRA ticket 30 | id: create 31 | uses: atlassian/gajira-create@master 32 | with: 33 | project: CONUPDATE 34 | issuetype: Task 35 | summary: | 36 | Github PR [Assign the ND component] | Repo: ${{ github.repository }} | PR# ${{github.event.number}} 37 | description: | 38 | Repo link: https://github.com/${{ github.repository }} 39 | PR no. ${{ github.event.pull_request.number }} 40 | PR title: ${{ github.event.pull_request.title }} 41 | PR description: ${{ github.event.pull_request.description }} 42 | In addition, please resolve other issues, if any. 43 | fields: '{"components": [{"name":"Github PR"}], "customfield_16449":"https://classroom.udacity.com/", "customfield_16450":"Resolve the PR", "labels": ["github"], "priority":{"id": "4"}}' 44 | 45 | - name: Log created issue 46 | run: echo "Issue ${{ steps.create.outputs.issue }} was created" 47 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @udacity/active-public-content -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Udacity 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deprecated Repository 2 | This repository is deprecated. Currently enrolled learners, if any, can: 3 | - Utilize the https://knowledge.udacity.com/ forum to seek help on content-specific issues. 4 | - [Submit a support ticket](https://udacity.zendesk.com/hc/en-us/requests/new) if (learners are) blocked due to other reasons. 5 | 6 | 7 | [![Udacity - Robotics NanoDegree Program](https://s3-us-west-1.amazonaws.com/udacity-robotics/Extra+Images/RoboND_flag.png)](https://www.udacity.com/robotics) 8 | # 3D Perception 9 | Before starting any work on this project, please complete all steps for [Exercise 1, 2 and 3](https://github.com/udacity/RoboND-Perception-Exercises). At the end of Exercise-3 you have a pipeline that can identify points that belong to a specific object. 10 | 11 | In this project, you must assimilate your work from previous exercises to successfully complete a tabletop pick and place operation using PR2. 12 | 13 | The PR2 has been outfitted with an RGB-D sensor much like the one you used in previous exercises. This sensor however is a bit noisy, much like real sensors. 14 | 15 | Given the cluttered tabletop scenario, you must implement a perception pipeline using your work from Exercises 1,2 and 3 to identify target objects from a so-called “Pick-List” in that particular order, pick up those objects and place them in corresponding dropboxes. 16 | 17 | # Project Setup 18 | For this setup, catkin_ws is the name of active ROS Workspace, if your workspace name is different, change the commands accordingly 19 | If you do not have an active ROS workspace, you can create one by: 20 | 21 | ```sh 22 | $ mkdir -p ~/catkin_ws/src 23 | $ cd ~/catkin_ws/ 24 | $ catkin_make 25 | ``` 26 | 27 | Now that you have a workspace, clone or download this repo into the src directory of your workspace: 28 | ```sh 29 | $ cd ~/catkin_ws/src 30 | $ git clone https://github.com/udacity/RoboND-Perception-Project.git 31 | ``` 32 | ### Note: If you have the Kinematics Pick and Place project in the same ROS Workspace as this project, please remove the 'gazebo_grasp_plugin' directory from the `RoboND-Perception-Project/` directory otherwise ignore this note. 33 | 34 | Now install missing dependencies using rosdep install: 35 | ```sh 36 | $ cd ~/catkin_ws 37 | $ rosdep install --from-paths src --ignore-src --rosdistro=kinetic -y 38 | ``` 39 | Build the project: 40 | ```sh 41 | $ cd ~/catkin_ws 42 | $ catkin_make 43 | ``` 44 | Add following to your .bashrc file 45 | ``` 46 | export GAZEBO_MODEL_PATH=~/catkin_ws/src/RoboND-Perception-Project/pr2_robot/models:$GAZEBO_MODEL_PATH 47 | ``` 48 | 49 | If you haven’t already, following line can be added to your .bashrc to auto-source all new terminals 50 | ``` 51 | source ~/catkin_ws/devel/setup.bash 52 | ``` 53 | 54 | To run the demo: 55 | ```sh 56 | $ cd ~/catkin_ws/src/RoboND-Perception-Project/pr2_robot/scripts 57 | $ chmod u+x pr2_safe_spawner.sh 58 | $ ./pr2_safe_spawner.sh 59 | ``` 60 | ![demo-1](https://user-images.githubusercontent.com/20687560/28748231-46b5b912-7467-11e7-8778-3095172b7b19.png) 61 | 62 | 63 | 64 | Once Gazebo is up and running, make sure you see following in the gazebo world: 65 | - Robot 66 | 67 | - Table arrangement 68 | 69 | - Three target objects on the table 70 | 71 | - Dropboxes on either sides of the robot 72 | 73 | 74 | If any of these items are missing, please report as an issue on [the waffle board](https://waffle.io/udacity/robotics-nanodegree-issues). 75 | 76 | In your RViz window, you should see the robot and a partial collision map displayed: 77 | 78 | ![demo-2](https://user-images.githubusercontent.com/20687560/28748286-9f65680e-7468-11e7-83dc-f1a32380b89c.png) 79 | 80 | Proceed through the demo by pressing the ‘Next’ button on the RViz window when a prompt appears in your active terminal 81 | 82 | The demo ends when the robot has successfully picked and placed all objects into respective dropboxes (though sometimes the robot gets excited and throws objects across the room!) 83 | 84 | Close all active terminal windows using **ctrl+c** before restarting the demo. 85 | 86 | You can launch the project scenario like this: 87 | ```sh 88 | $ roslaunch pr2_robot pick_place_project.launch 89 | ``` 90 | # Required Steps for a Passing Submission: 91 | 1. Extract features and train an SVM model on new objects (see `pick_list_*.yaml` in `/pr2_robot/config/` for the list of models you'll be trying to identify). 92 | 2. Write a ROS node and subscribe to `/pr2/world/points` topic. This topic contains noisy point cloud data that you must work with. 93 | 3. Use filtering and RANSAC plane fitting to isolate the objects of interest from the rest of the scene. 94 | 4. Apply Euclidean clustering to create separate clusters for individual items. 95 | 5. Perform object recognition on these objects and assign them labels (markers in RViz). 96 | 6. Calculate the centroid (average in x, y and z) of the set of points belonging to that each object. 97 | 7. Create ROS messages containing the details of each object (name, pick_pose, etc.) and write these messages out to `.yaml` files, one for each of the 3 scenarios (`test1-3.world` in `/pr2_robot/worlds/`). See the example `output.yaml` for details on what the output should look like. 98 | 8. Submit a link to your GitHub repo for the project or the Python code for your perception pipeline and your output `.yaml` files (3 `.yaml` files, one for each test world). You must have correctly identified 100% of objects from `pick_list_1.yaml` for `test1.world`, 80% of items from `pick_list_2.yaml` for `test2.world` and 75% of items from `pick_list_3.yaml` in `test3.world`. 99 | 9. Congratulations! Your Done! 100 | 101 | # Extra Challenges: Complete the Pick & Place 102 | 7. To create a collision map, publish a point cloud to the `/pr2/3d_map/points` topic and make sure you change the `point_cloud_topic` to `/pr2/3d_map/points` in `sensors.yaml` in the `/pr2_robot/config/` directory. This topic is read by Moveit!, which uses this point cloud input to generate a collision map, allowing the robot to plan its trajectory. Keep in mind that later when you go to pick up an object, you must first remove it from this point cloud so it is removed from the collision map! 103 | 8. Rotate the robot to generate collision map of table sides. This can be accomplished by publishing joint angle value(in radians) to `/pr2/world_joint_controller/command` 104 | 9. Rotate the robot back to its original state. 105 | 10. Create a ROS Client for the “pick_place_routine” rosservice. In the required steps above, you already created the messages you need to use this service. Checkout the [PickPlace.srv](https://github.com/udacity/RoboND-Perception-Project/tree/master/pr2_robot/srv) file to find out what arguments you must pass to this service. 106 | 11. If everything was done correctly, when you pass the appropriate messages to the `pick_place_routine` service, the selected arm will perform pick and place operation and display trajectory in the RViz window 107 | 12. Place all the objects from your pick list in their respective dropoff box and you have completed the challenge! 108 | 13. Looking for a bigger challenge? Load up the `challenge.world` scenario and see if you can get your perception pipeline working there! 109 | 110 | For all the step-by-step details on how to complete this project see the [RoboND 3D Perception Project Lesson](https://classroom.udacity.com/nanodegrees/nd209/parts/586e8e81-fc68-4f71-9cab-98ccd4766cfe/modules/e5bfcfbd-3f7d-43fe-8248-0c65d910345a/lessons/e3e5fd8e-2f76-4169-a5bc-5a128d380155/concepts/802deabb-7dbb-46be-bf21-6cb0a39a1961) 111 | Note: The robot is a bit moody at times and might leave objects on the table or fling them across the room :D 112 | As long as your pipeline performs succesful recognition, your project will be considered successful even if the robot feels otherwise! 113 | -------------------------------------------------------------------------------- /gazebo_grasp_plugin/CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 2 | Changelog for package gazebo_grasp_plugin 3 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 4 | 5 | 1.0.1 (2016-06-08) 6 | ------------------ 7 | * Fixed cmake files for jenkins builds 8 | * Contributors: Jennifer Buehler 9 | 10 | 1.0.0 (2016-06-07) 11 | ------------------ 12 | * Initial release 13 | * Contributors: Jennifer Buehler 14 | -------------------------------------------------------------------------------- /gazebo_grasp_plugin/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(gazebo_grasp_plugin) 3 | 4 | add_definitions(-std=c++11) 5 | 6 | ## Find catkin macros and libraries 7 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 8 | ## is used, also find other catkin packages 9 | find_package(catkin REQUIRED COMPONENTS 10 | gazebo_ros 11 | geometry_msgs 12 | roscpp 13 | std_msgs 14 | ) 15 | 16 | find_package(gazebo REQUIRED) 17 | 18 | ## Uncomment this if the package has a setup.py. This macro ensures 19 | ## modules and global scripts declared therein get installed 20 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 21 | # catkin_python_setup() 22 | 23 | ################################### 24 | ## catkin specific configuration ## 25 | ################################### 26 | ## The catkin_package macro generates cmake config files for your package 27 | ## Declare things to be passed to dependent projects 28 | ## INCLUDE_DIRS: uncomment this if you package contains header files 29 | ## LIBRARIES: libraries you create in this project that dependent projects also need 30 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 31 | ## DEPENDS: system dependencies of this project that dependent projects also need 32 | catkin_package( 33 | INCLUDE_DIRS include 34 | LIBRARIES gazebo_grasp_fix 35 | CATKIN_DEPENDS gazebo_ros geometry_msgs roscpp std_msgs 36 | DEPENDS gazebo 37 | ) 38 | 39 | ########### 40 | ## Build ## 41 | ########### 42 | 43 | ## Specify additional locations of header files 44 | ## Your package locations should be listed before other locations 45 | # include_directories(include) 46 | include_directories( 47 | include 48 | ${catkin_INCLUDE_DIRS} 49 | ${GAZEBO_INCLUDE_DIRS} 50 | ) 51 | 52 | link_directories( 53 | ${GAZEBO_LIBRARY_DIRS} 54 | ${catkin_LIBRARY_DIRS} 55 | ) 56 | 57 | ## Declare a cpp library 58 | add_library(gazebo_grasp_fix SHARED src/GazeboGraspFix.cpp src/GazeboGraspGripper.cpp) 59 | 60 | ## Add cmake target dependencies of the executable/library 61 | ## as an example, message headers may need to be generated before nodes 62 | # add_dependencies(gazebo_grasp_plugin_node gazebo_grasp_plugin_generate_messages_cpp) 63 | add_dependencies(gazebo_grasp_fix ${catkin_EXPORTED_TARGETS}) 64 | 65 | ## Specify libraries to link a library or executable target against 66 | 67 | target_link_libraries(gazebo_grasp_fix 68 | ${GAZEBO_LIBRARIES} 69 | ${Boost_LIBRARIES} 70 | ) 71 | 72 | ############# 73 | ## Install ## 74 | ############# 75 | 76 | # all install targets should use catkin DESTINATION variables 77 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 78 | 79 | ## Mark executable scripts (Python etc.) for installation 80 | ## in contrast to setup.py, you can choose the destination 81 | # install(PROGRAMS 82 | # scripts/my_python_script 83 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 84 | # ) 85 | 86 | ## Mark executables and/or libraries for installation 87 | install(TARGETS gazebo_grasp_fix 88 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 89 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 90 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 91 | ) 92 | 93 | ## Mark cpp header files for installation 94 | install(DIRECTORY include/${PROJECT_NAME}/ 95 | DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 96 | FILES_MATCHING PATTERN "*.h" 97 | ) 98 | 99 | install(DIRECTORY launch 100 | DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 101 | FILES_MATCHING PATTERN "*.launch" 102 | ) 103 | 104 | ## Mark other files for installation (e.g. launch and bag files, etc.) 105 | # install(FILES 106 | # # myfile1 107 | # # myfile2 108 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 109 | # ) 110 | 111 | ############# 112 | ## Testing ## 113 | ############# 114 | 115 | ## Add gtest based cpp test target and link libraries 116 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_gazebo_grasp_plugin.cpp) 117 | # if(TARGET ${PROJECT_NAME}-test) 118 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 119 | # endif() 120 | 121 | ## Add folders to be run by python nosetests 122 | # catkin_add_nosetests(test) 123 | -------------------------------------------------------------------------------- /gazebo_grasp_plugin/README.md: -------------------------------------------------------------------------------- 1 | This package was obtained from: https://github.com/JenniferBuehler/gazebo-pkgs 2 | -------------------------------------------------------------------------------- /gazebo_grasp_plugin/include/gazebo_grasp_plugin/GazeboGraspGripper.h: -------------------------------------------------------------------------------- 1 | #ifndef GAZEBO_GAZEBOGRASPGRIPPER_H 2 | #define GAZEBO_GAZEBOGRASPGRIPPER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | namespace gazebo { 12 | 13 | /** 14 | * \brief Helper class for GazeboGraspFix which holds information for one arm. 15 | * Attaches /detaches objects to the palm of this arm. 16 | * 17 | * \author Jennifer Buehler 18 | */ 19 | class GazeboGraspGripper { 20 | public: 21 | GazeboGraspGripper(); 22 | GazeboGraspGripper(const GazeboGraspGripper& o); 23 | virtual ~GazeboGraspGripper(); 24 | 25 | /** 26 | * 27 | * \param disableCollisionsOnAttach when an object is attached, collisions with it will be disabled. This is useful 28 | * if the robot then still keeps wobbling. 29 | */ 30 | bool Init(physics::ModelPtr& _model, 31 | const std::string& _gripperName, 32 | const std::string& palmLinkName, 33 | const std::vector& fingerLinkNames, 34 | bool _disableCollisionsOnAttach, 35 | std::map& _collisions); 36 | 37 | const std::string& getGripperName() const; 38 | 39 | /** 40 | * Has the link name (URDF) 41 | */ 42 | bool hasLink(const std::string& linkName) const; 43 | 44 | /** 45 | * Has the collision link name (Gazebo collision element name) 46 | */ 47 | bool hasCollisionLink(const std::string& linkName) const; 48 | 49 | bool isObjectAttached() const; 50 | 51 | const std::string& attachedObject() const; 52 | 53 | /** 54 | * \param gripContacts contact forces on the object sorted by the link name colliding. 55 | */ 56 | bool HandleAttach(const std::string& objName); 57 | void HandleDetach(const std::string& objName); 58 | 59 | private: 60 | 61 | physics::ModelPtr model; 62 | 63 | // name of the gripper 64 | std::string gripperName; 65 | 66 | // names of the gripper links 67 | std::vector linkNames; 68 | // names and Collision objects of the collision links in Gazebo (scoped names) 69 | // Not necessarily equal names and size to linkNames. 70 | std::map collisionElems; 71 | 72 | physics::JointPtr fixedJoint; 73 | 74 | physics::LinkPtr palmLink; 75 | 76 | // when an object is attached, collisions with it may be disabled, in case the 77 | // robot still keeps wobbling. 78 | bool disableCollisionsOnAttach; 79 | 80 | // flag holding whether an object is attached. Object name in \e attachedObjName 81 | bool attached; 82 | // name of the object currently attached. 83 | std::string attachedObjName; 84 | }; 85 | 86 | } 87 | #endif // GAZEBO_GAZEBOGRASPGRIPPER_H 88 | -------------------------------------------------------------------------------- /gazebo_grasp_plugin/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | gazebo_grasp_plugin 4 | 1.0.1 5 | 6 | Gazebo Model plugin(s) which handle/help grasping in Gazebo. 7 | 8 | 9 | 10 | Jennifer Buehler 11 | 12 | 13 | 14 | 15 | 16 | GPLv3 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | Jennifer Buehler 27 | 28 | 29 | 30 | 31 | catkin 32 | gazebo_ros 33 | gazebo 34 | geometry_msgs 35 | roscpp 36 | std_msgs 37 | gazebo_ros 38 | gazebo 39 | geometry_msgs 40 | roscpp 41 | std_msgs 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /gazebo_grasp_plugin/src/GazeboGraspGripper.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | using gazebo::GazeboGraspGripper; 12 | 13 | #define DEFAULT_FORCES_ANGLE_TOLERANCE 120 14 | #define DEFAULT_UPDATE_RATE 5 15 | #define DEFAULT_MAX_GRIP_COUNT 10 16 | #define DEFAULT_RELEASE_TOLERANCE 0.005 17 | #define DEFAULT_DISABLE_COLLISIONS_ON_ATTACH false 18 | 19 | GazeboGraspGripper::GazeboGraspGripper(): 20 | attached(false) 21 | { 22 | } 23 | 24 | GazeboGraspGripper::GazeboGraspGripper(const GazeboGraspGripper& o): 25 | model(o.model), 26 | gripperName(o.gripperName), 27 | linkNames(o.linkNames), 28 | collisionElems(o.collisionElems), 29 | fixedJoint(o.fixedJoint), 30 | palmLink(o.palmLink), 31 | disableCollisionsOnAttach(o.disableCollisionsOnAttach), 32 | attached(o.attached), 33 | attachedObjName(o.attachedObjName) 34 | {} 35 | 36 | GazeboGraspGripper::~GazeboGraspGripper() { 37 | this->model.reset(); 38 | } 39 | 40 | 41 | 42 | bool GazeboGraspGripper::Init(physics::ModelPtr& _model, 43 | const std::string& _gripperName, 44 | const std::string& palmLinkName, 45 | const std::vector& fingerLinkNames, 46 | bool _disableCollisionsOnAttach, 47 | std::map& _collisionElems) 48 | { 49 | this->gripperName=_gripperName; 50 | this->attached = false; 51 | this->disableCollisionsOnAttach = _disableCollisionsOnAttach; 52 | this->model = _model; 53 | physics::PhysicsEnginePtr physics = this->model->GetWorld()->GetPhysicsEngine(); 54 | this->fixedJoint = physics->CreateJoint("revolute"); 55 | 56 | this->palmLink = this->model->GetLink(palmLinkName); 57 | if (!this->palmLink) 58 | { 59 | gzerr << "GazeboGraspGripper: Palm link "<::const_iterator fingerIt=fingerLinkNames.begin(); 63 | fingerIt!=fingerLinkNames.end(); ++fingerIt) 64 | { 65 | physics::LinkPtr link = this->model->GetLink(*fingerIt); 66 | //std::cout<<"Got link "<Get()<GetChildCount(); ++j) 72 | { 73 | physics::CollisionPtr collision = link->GetCollision(j); 74 | std::string collName = collision->GetScopedName(); 75 | //collision->SetContactsEnabled(true); 76 | std::map::iterator collIter = collisionElems.find(collName); 77 | if (collIter != this->collisionElems.end()) { //this collision was already added before 78 | gzwarn <<"GazeboGraspGripper: Adding Gazebo collision link element "<collisionElems[collName] = collision; 82 | _collisionElems[collName] = collision; 83 | } 84 | } 85 | return !this->collisionElems.empty(); 86 | } 87 | 88 | 89 | 90 | const std::string& GazeboGraspGripper::getGripperName() const 91 | { 92 | return gripperName; 93 | } 94 | 95 | bool GazeboGraspGripper::hasLink(const std::string& linkName) const 96 | { 97 | for (std::vector::const_iterator it=linkNames.begin(); it!=linkNames.end(); ++it) 98 | { 99 | if (*it==linkName) return true; 100 | } 101 | return false; 102 | } 103 | 104 | bool GazeboGraspGripper::hasCollisionLink(const std::string& linkName) const 105 | { 106 | return collisionElems.find(linkName) != collisionElems.end(); 107 | } 108 | 109 | 110 | bool GazeboGraspGripper::isObjectAttached() const 111 | { 112 | return attached; 113 | } 114 | 115 | const std::string& GazeboGraspGripper::attachedObject() const 116 | { 117 | return attachedObjName; 118 | } 119 | 120 | 121 | 122 | // #define USE_MODEL_ATTACH // this only works if the object is a model in itself, which is usually not 123 | // the case. Leaving this in here anyway for documentation of what has been 124 | // tried, and for and later re-evaluation. 125 | bool GazeboGraspGripper::HandleAttach(const std::string& objName) 126 | { 127 | if (!this->palmLink) { 128 | gzwarn << "GazeboGraspGripper: palm link not found, not enforcing grasp hack!\n"<model->GetWorld(); 132 | if (!world.get()) 133 | { 134 | gzerr << "GazeboGraspGripper: world is NULL"<GetModel(objName); 139 | if (!obj.get()){ 140 | std::cerr<<"ERROR: Object ModelPtr "<GetLink()->GetWorldPose() - this->palmLink->GetWorldPose(); 144 | this->palmLink->AttachStaticModel(obj,diff); 145 | #else 146 | physics::CollisionPtr obj = boost::dynamic_pointer_cast(world->GetEntity(objName)); 147 | if (!obj.get()){ 148 | std::cerr<<"ERROR: Object "<GetLink()->GetWorldPose() - this->palmLink->GetWorldPose(); 152 | this->fixedJoint->Load(this->palmLink,obj->GetLink(), diff); 153 | this->fixedJoint->Init(); 154 | this->fixedJoint->SetHighStop(0, 0); 155 | this->fixedJoint->SetLowStop(0, 0); 156 | if (this->disableCollisionsOnAttach) { 157 | // we can disable collisions of the grasped object, because when the fingers keep colliding with 158 | // it, the fingers keep wobbling, which can create difficulties when moving the arm. 159 | obj->GetLink()->SetCollideMode("none"); 160 | } 161 | #endif // USE_MODEL_ATTACH 162 | this->attached=true; 163 | this->attachedObjName=objName; 164 | return true; 165 | } 166 | 167 | void GazeboGraspGripper::HandleDetach(const std::string& objName) 168 | { 169 | physics::WorldPtr world = this->model->GetWorld(); 170 | if (!world.get()) 171 | { 172 | gzerr << "GazeboGraspGripper: world is NULL"<GetModel(objName); 177 | if (!obj.get()){ 178 | std::cerr<<"ERROR: Object ModelPtr "<palmLink->DetachStaticModel(objName); 182 | #else 183 | physics::CollisionPtr obj = boost::dynamic_pointer_cast(world->GetEntity(objName)); 184 | if (!obj.get()){ 185 | std::cerr<<"ERROR: Object "<disableCollisionsOnAttach) 189 | { 190 | obj->GetLink()->SetCollideMode("all"); 191 | } 192 | this->fixedJoint->Detach(); 193 | #endif // USE_MODEL_ATTACH 194 | this->attached=false; 195 | this->attachedObjName=""; 196 | } 197 | -------------------------------------------------------------------------------- /pr2_moveit/.setup_assistant: -------------------------------------------------------------------------------- 1 | moveit_setup_assistant_config: 2 | URDF: 3 | package: pr2_robot 4 | relative_path: urdf/pr2.urdf.xacro 5 | SRDF: 6 | relative_path: config/pr2.srdf 7 | CONFIG: 8 | author_name: Harsh Pandya 9 | author_email: harsh@electricmovement.com 10 | generated_timestamp: 1500498214 -------------------------------------------------------------------------------- /pr2_moveit/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(pr2_moveit) 3 | 4 | find_package(catkin REQUIRED) 5 | 6 | catkin_package() 7 | 8 | install(DIRECTORY launch DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 9 | PATTERN "setup_assistant.launch" EXCLUDE) 10 | install(DIRECTORY config DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}) 11 | -------------------------------------------------------------------------------- /pr2_moveit/config/controllers.yaml: -------------------------------------------------------------------------------- 1 | # Load joint trajectory controller 2 | controller_list: 3 | - name: pr2/right_arm_controller 4 | action_ns: follow_joint_trajectory 5 | type: FollowJointTrajectory 6 | default: true 7 | joints: 8 | - right_shoulder_pan_joint 9 | - right_shoulder_lift_joint 10 | - right_upper_arm_roll_joint 11 | - right_elbow_flex_joint 12 | - right_forearm_roll_joint 13 | - right_wrist_flex_joint 14 | - right_wrist_roll_joint 15 | - name: pr2/left_arm_controller 16 | action_ns: follow_joint_trajectory 17 | type: FollowJointTrajectory 18 | default: true 19 | joints: 20 | - left_shoulder_pan_joint 21 | - left_shoulder_lift_joint 22 | - left_upper_arm_roll_joint 23 | - left_elbow_flex_joint 24 | - left_forearm_roll_joint 25 | - left_wrist_flex_joint 26 | - left_wrist_roll_joint 27 | 28 | - name: pr2/right_gripper_controller 29 | action_ns: follow_joint_trajectory 30 | type: FollowJointTrajectory 31 | default: true 32 | joints: 33 | - right_right_gripper_finger_joint 34 | - right_left_gripper_finger_joint 35 | 36 | - name: pr2/left_gripper_controller 37 | action_ns: follow_joint_trajectory 38 | type: FollowJointTrajectory 39 | default: true 40 | joints: 41 | - left_right_gripper_finger_joint 42 | - left_left_gripper_finger_joint 43 | -------------------------------------------------------------------------------- /pr2_moveit/config/fake_controllers.yaml: -------------------------------------------------------------------------------- 1 | controller_list: 2 | - name: fake_right_arm_controller 3 | joints: 4 | - right_shoulder_pan_joint 5 | - right_shoulder_lift_joint 6 | - right_upper_arm_roll_joint 7 | - right_elbow_flex_joint 8 | - right_forearm_roll_joint 9 | - right_wrist_flex_joint 10 | - right_wrist_roll_joint 11 | - name: fake_left_arm_controller 12 | joints: 13 | - left_shoulder_pan_joint 14 | - left_shoulder_lift_joint 15 | - left_upper_arm_roll_joint 16 | - left_elbow_flex_joint 17 | - left_forearm_roll_joint 18 | - left_wrist_flex_joint 19 | - left_wrist_roll_joint 20 | - name: fake_right_gripper_controller 21 | joints: 22 | - right_left_gripper_finger_joint 23 | - right_right_gripper_finger_joint 24 | - name: fake_left_gripper_controller 25 | joints: 26 | - left_left_gripper_finger_joint 27 | - left_right_gripper_finger_joint -------------------------------------------------------------------------------- /pr2_moveit/config/grasp.yaml: -------------------------------------------------------------------------------- 1 | grasp_list: 2 | - biscuits: 3 | position: 4 | x: 0.56 5 | y: -0.24 6 | z: 0.97 7 | orientation: 8 | x: 0.5 9 | y: 0.5 10 | z: -0.5 11 | w: 0.5 12 | soap: 13 | position: 14 | x: 0.57 15 | y: -0.02 16 | z: 0.93 17 | orientation: 18 | x: -0.259 19 | y: 0.658 20 | z: 0.259 21 | w: 0.658 22 | soap2: 23 | position: 24 | x: 0.46 25 | y: 0.23 26 | z: 0.95 27 | orientation: 28 | x: 0.293 29 | y: 0.643 30 | z: -0.293 31 | w: 0.643 32 | - biscuits: 33 | position: 34 | x: 0.58 35 | y: -0.24 36 | z: 0.97 37 | orientation: 38 | x: 0.5 39 | y: 0.5 40 | z: -0.5 41 | w: 0.5 42 | soap: 43 | position: 44 | x: 0.585 45 | y: 0 46 | z: 0.92 47 | orientation: 48 | x: 0.298 49 | y: 0.641 50 | z: -0.298 51 | w: 0.642 52 | book: 53 | position: 54 | x: 0.59 55 | y: 0.28 56 | z: 1.03 57 | orientation: 58 | x: 0.262 59 | y: 0.657 60 | z: -0.262 61 | w: 0.657 62 | soap2: 63 | position: 64 | x: 0.46 65 | y: 0.23 66 | z: 0.95 67 | orientation: 68 | x: 0.293 69 | y: 0.643 70 | z: -0.293 71 | w: 0.643 72 | glue: 73 | position: 74 | x: 0.634 75 | y: 0.13 76 | z: 0.96 77 | orientation: 78 | x: 0.140 79 | y: 0.693 80 | z: -0.140 81 | w: 0.693 82 | - sticky_notes: 83 | position: 84 | x: 0.45 85 | y: 0.22 86 | z: 0.97 87 | orientation: 88 | x: 0.5 89 | y: 0.5 90 | z: -0.5 91 | w: 0.5 92 | book: 93 | position: 94 | x: 0.5 95 | y: 0.08 96 | z: 1.03 97 | orientation: 98 | x: -0.259 99 | y: 0.658 100 | z: 0.259 101 | w: 0.658 102 | snacks: 103 | position: 104 | x: 0.44 105 | y: -0.34 106 | z: 1.05 107 | orientation: 108 | x: 0.325 109 | y: 0.657 110 | z: -0.261 111 | w: 0.628 112 | biscuits: 113 | position: 114 | x: 0.6 115 | y: -0.22 116 | z: 0.97 117 | orientation: 118 | x: 0.34 119 | y: 0.62 120 | z: -0.34 121 | w: 0.62 122 | eraser: 123 | position: 124 | x: 0.61 125 | y: 0.27 126 | z: 0.9 127 | orientation: 128 | x: 0.298 129 | y: 0.641 130 | z: -0.298 131 | w: 0.642 132 | soap2: 133 | position: 134 | x: 0.46 135 | y: -0.05 136 | z: 0.95 137 | orientation: 138 | x: 0.65 139 | y: 0.279 140 | z: -0.65 141 | w: 0.279 142 | soap: 143 | position: 144 | x: 0.69 145 | y: 0.01 146 | z: 0.92 147 | orientation: 148 | x: 0.293 149 | y: 0.643 150 | z: -0.293 151 | w: 0.643 152 | glue: 153 | position: 154 | x: 0.62 155 | y: 0.13 156 | z: 0.96 157 | orientation: 158 | x: 0.140 159 | y: 0.693 160 | z: -0.140 161 | w: 0.693 162 | -------------------------------------------------------------------------------- /pr2_moveit/config/joint_limits.yaml: -------------------------------------------------------------------------------- 1 | # joint_limits.yaml allows the dynamics properties specified in the URDF to be overwritten or augmented as needed 2 | # Specific joint properties can be changed with the keys [max_position, min_position, max_velocity, max_acceleration] 3 | # Joint limits can be turned off with [has_velocity_limits, has_acceleration_limits] 4 | joint_limits: 5 | left_elbow_flex_joint: 6 | has_velocity_limits: true 7 | max_velocity: 3.3 8 | has_acceleration_limits: false 9 | max_acceleration: 0 10 | left_forearm_roll_joint: 11 | has_velocity_limits: true 12 | max_velocity: 3.6 13 | has_acceleration_limits: false 14 | max_acceleration: 0 15 | left_left_gripper_finger_joint: 16 | has_velocity_limits: true 17 | max_velocity: 0.05 18 | has_acceleration_limits: false 19 | max_acceleration: 0 20 | left_right_gripper_finger_joint: 21 | has_velocity_limits: true 22 | max_velocity: 0.05 23 | has_acceleration_limits: false 24 | max_acceleration: 0 25 | left_shoulder_lift_joint: 26 | has_velocity_limits: true 27 | max_velocity: 2.082 28 | has_acceleration_limits: false 29 | max_acceleration: 0 30 | left_shoulder_pan_joint: 31 | has_velocity_limits: true 32 | max_velocity: 2.088 33 | has_acceleration_limits: false 34 | max_acceleration: 0 35 | left_upper_arm_roll_joint: 36 | has_velocity_limits: true 37 | max_velocity: 3.27 38 | has_acceleration_limits: false 39 | max_acceleration: 0 40 | left_wrist_flex_joint: 41 | has_velocity_limits: true 42 | max_velocity: 3.078 43 | has_acceleration_limits: false 44 | max_acceleration: 0 45 | left_wrist_roll_joint: 46 | has_velocity_limits: true 47 | max_velocity: 3.6 48 | has_acceleration_limits: false 49 | max_acceleration: 0 50 | right_elbow_flex_joint: 51 | has_velocity_limits: true 52 | max_velocity: 3.3 53 | has_acceleration_limits: false 54 | max_acceleration: 0 55 | right_forearm_roll_joint: 56 | has_velocity_limits: true 57 | max_velocity: 3.6 58 | has_acceleration_limits: false 59 | max_acceleration: 0 60 | right_left_gripper_finger_joint: 61 | has_velocity_limits: true 62 | max_velocity: 0.05 63 | has_acceleration_limits: false 64 | max_acceleration: 0 65 | right_right_gripper_finger_joint: 66 | has_velocity_limits: true 67 | max_velocity: 0.05 68 | has_acceleration_limits: false 69 | max_acceleration: 0 70 | right_shoulder_lift_joint: 71 | has_velocity_limits: true 72 | max_velocity: 2.082 73 | has_acceleration_limits: false 74 | max_acceleration: 0 75 | right_shoulder_pan_joint: 76 | has_velocity_limits: true 77 | max_velocity: 2.088 78 | has_acceleration_limits: false 79 | max_acceleration: 0 80 | right_upper_arm_roll_joint: 81 | has_velocity_limits: true 82 | max_velocity: 3.27 83 | has_acceleration_limits: false 84 | max_acceleration: 0 85 | right_wrist_flex_joint: 86 | has_velocity_limits: true 87 | max_velocity: 3.078 88 | has_acceleration_limits: false 89 | max_acceleration: 0 90 | right_wrist_roll_joint: 91 | has_velocity_limits: true 92 | max_velocity: 3.6 93 | has_acceleration_limits: false 94 | max_acceleration: 0 -------------------------------------------------------------------------------- /pr2_moveit/config/kinematics.yaml: -------------------------------------------------------------------------------- 1 | right_arm: 2 | kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin 3 | kinematics_solver_search_resolution: 0.005 4 | kinematics_solver_timeout: 0.005 5 | kinematics_solver_attempts: 3 6 | left_arm: 7 | kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin 8 | kinematics_solver_search_resolution: 0.005 9 | kinematics_solver_timeout: 0.005 10 | kinematics_solver_attempts: 3 -------------------------------------------------------------------------------- /pr2_moveit/config/ompl_planning.yaml: -------------------------------------------------------------------------------- 1 | planner_configs: 2 | SBLkConfigDefault: 3 | type: geometric::SBL 4 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 5 | ESTkConfigDefault: 6 | type: geometric::EST 7 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0 setup() 8 | goal_bias: 0.05 # When close to goal select goal, with this probability. default: 0.05 9 | LBKPIECEkConfigDefault: 10 | type: geometric::LBKPIECE 11 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 12 | border_fraction: 0.9 # Fraction of time focused on boarder default: 0.9 13 | min_valid_path_fraction: 0.5 # Accept partially valid moves above fraction. default: 0.5 14 | BKPIECEkConfigDefault: 15 | type: geometric::BKPIECE 16 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 17 | border_fraction: 0.9 # Fraction of time focused on boarder default: 0.9 18 | failed_expansion_score_factor: 0.5 # When extending motion fails, scale score by factor. default: 0.5 19 | min_valid_path_fraction: 0.5 # Accept partially valid moves above fraction. default: 0.5 20 | KPIECEkConfigDefault: 21 | type: geometric::KPIECE 22 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 23 | goal_bias: 0.05 # When close to goal select goal, with this probability. default: 0.05 24 | border_fraction: 0.9 # Fraction of time focused on boarder default: 0.9 (0.0,1.] 25 | failed_expansion_score_factor: 0.5 # When extending motion fails, scale score by factor. default: 0.5 26 | min_valid_path_fraction: 0.5 # Accept partially valid moves above fraction. default: 0.5 27 | RRTkConfigDefault: 28 | type: geometric::RRT 29 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 30 | goal_bias: 0.05 # When close to goal select goal, with this probability? default: 0.05 31 | RRTConnectkConfigDefault: 32 | type: geometric::RRTConnect 33 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 34 | RRTstarkConfigDefault: 35 | type: geometric::RRTstar 36 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 37 | goal_bias: 0.05 # When close to goal select goal, with this probability? default: 0.05 38 | delay_collision_checking: 1 # Stop collision checking as soon as C-free parent found. default 1 39 | TRRTkConfigDefault: 40 | type: geometric::TRRT 41 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 42 | goal_bias: 0.05 # When close to goal select goal, with this probability? default: 0.05 43 | max_states_failed: 10 # when to start increasing temp. default: 10 44 | temp_change_factor: 2.0 # how much to increase or decrease temp. default: 2.0 45 | min_temperature: 10e-10 # lower limit of temp change. default: 10e-10 46 | init_temperature: 10e-6 # initial temperature. default: 10e-6 47 | frountier_threshold: 0.0 # dist new state to nearest neighbor to disqualify as frontier. default: 0.0 set in setup() 48 | frountierNodeRatio: 0.1 # 1/10, or 1 nonfrontier for every 10 frontier. default: 0.1 49 | k_constant: 0.0 # value used to normalize expresssion. default: 0.0 set in setup() 50 | PRMkConfigDefault: 51 | type: geometric::PRM 52 | max_nearest_neighbors: 10 # use k nearest neighbors. default: 10 53 | PRMstarkConfigDefault: 54 | type: geometric::PRMstar 55 | FMTkConfigDefault: 56 | type: geometric::FMT 57 | num_samples: 1000 # number of states that the planner should sample. default: 1000 58 | radius_multiplier: 1.1 # multiplier used for the nearest neighbors search radius. default: 1.1 59 | nearest_k: 1 # use Knearest strategy. default: 1 60 | cache_cc: 1 # use collision checking cache. default: 1 61 | heuristics: 0 # activate cost to go heuristics. default: 0 62 | extended_fmt: 1 # activate the extended FMT*: adding new samples if planner does not finish successfully. default: 1 63 | BFMTkConfigDefault: 64 | type: geometric::BFMT 65 | num_samples: 1000 # number of states that the planner should sample. default: 1000 66 | radius_multiplier: 1.0 # multiplier used for the nearest neighbors search radius. default: 1.0 67 | nearest_k: 1 # use the Knearest strategy. default: 1 68 | balanced: 0 # exploration strategy: balanced true expands one tree every iteration. False will select the tree with lowest maximum cost to go. default: 1 69 | optimality: 1 # termination strategy: optimality true finishes when the best possible path is found. Otherwise, the algorithm will finish when the first feasible path is found. default: 1 70 | heuristics: 1 # activates cost to go heuristics. default: 1 71 | cache_cc: 1 # use the collision checking cache. default: 1 72 | extended_fmt: 1 # Activates the extended FMT*: adding new samples if planner does not finish successfully. default: 1 73 | PDSTkConfigDefault: 74 | type: geometric::PDST 75 | STRIDEkConfigDefault: 76 | type: geometric::STRIDE 77 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 78 | goal_bias: 0.05 # When close to goal select goal, with this probability. default: 0.05 79 | use_projected_distance: 0 # whether nearest neighbors are computed based on distances in a projection of the state rather distances in the state space itself. default: 0 80 | degree: 16 # desired degree of a node in the Geometric Near-neightbor Access Tree (GNAT). default: 16 81 | max_degree: 18 # max degree of a node in the GNAT. default: 12 82 | min_degree: 12 # min degree of a node in the GNAT. default: 12 83 | max_pts_per_leaf: 6 # max points per leaf in the GNAT. default: 6 84 | estimated_dimension: 0.0 # estimated dimension of the free space. default: 0.0 85 | min_valid_path_fraction: 0.2 # Accept partially valid moves above fraction. default: 0.2 86 | BiTRRTkConfigDefault: 87 | type: geometric::BiTRRT 88 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 89 | temp_change_factor: 0.1 # how much to increase or decrease temp. default: 0.1 90 | init_temperature: 100 # initial temperature. default: 100 91 | frountier_threshold: 0.0 # dist new state to nearest neighbor to disqualify as frontier. default: 0.0 set in setup() 92 | frountier_node_ratio: 0.1 # 1/10, or 1 nonfrontier for every 10 frontier. default: 0.1 93 | cost_threshold: 1e300 # the cost threshold. Any motion cost that is not better will not be expanded. default: inf 94 | LBTRRTkConfigDefault: 95 | type: geometric::LBTRRT 96 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 97 | goal_bias: 0.05 # When close to goal select goal, with this probability. default: 0.05 98 | epsilon: 0.4 # optimality approximation factor. default: 0.4 99 | BiESTkConfigDefault: 100 | type: geometric::BiEST 101 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 102 | ProjESTkConfigDefault: 103 | type: geometric::ProjEST 104 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 105 | goal_bias: 0.05 # When close to goal select goal, with this probability. default: 0.05 106 | LazyPRMkConfigDefault: 107 | type: geometric::LazyPRM 108 | range: 0.0 # Max motion added to tree. ==> maxDistance_ default: 0.0, if 0.0, set on setup() 109 | LazyPRMstarkConfigDefault: 110 | type: geometric::LazyPRMstar 111 | SPARSkConfigDefault: 112 | type: geometric::SPARS 113 | stretch_factor: 3.0 # roadmap spanner stretch factor. multiplicative upper bound on path quality. It does not make sense to make this parameter more than 3. default: 3.0 114 | sparse_delta_fraction: 0.25 # delta fraction for connection distance. This value represents the visibility range of sparse samples. default: 0.25 115 | dense_delta_fraction: 0.001 # delta fraction for interface detection. default: 0.001 116 | max_failures: 1000 # maximum consecutive failure limit. default: 1000 117 | SPARStwokConfigDefault: 118 | type: geometric::SPARStwo 119 | stretch_factor: 3.0 # roadmap spanner stretch factor. multiplicative upper bound on path quality. It does not make sense to make this parameter more than 3. default: 3.0 120 | sparse_delta_fraction: 0.25 # delta fraction for connection distance. This value represents the visibility range of sparse samples. default: 0.25 121 | dense_delta_fraction: 0.001 # delta fraction for interface detection. default: 0.001 122 | max_failures: 5000 # maximum consecutive failure limit. default: 5000 123 | right_arm: 124 | planner_configs: 125 | - SBLkConfigDefault 126 | - ESTkConfigDefault 127 | - LBKPIECEkConfigDefault 128 | - BKPIECEkConfigDefault 129 | - KPIECEkConfigDefault 130 | - RRTkConfigDefault 131 | - RRTConnectkConfigDefault 132 | - RRTstarkConfigDefault 133 | - TRRTkConfigDefault 134 | - PRMkConfigDefault 135 | - PRMstarkConfigDefault 136 | - FMTkConfigDefault 137 | - BFMTkConfigDefault 138 | - PDSTkConfigDefault 139 | - STRIDEkConfigDefault 140 | - BiTRRTkConfigDefault 141 | - LBTRRTkConfigDefault 142 | - BiESTkConfigDefault 143 | - ProjESTkConfigDefault 144 | - LazyPRMkConfigDefault 145 | - LazyPRMstarkConfigDefault 146 | - SPARSkConfigDefault 147 | - SPARStwokConfigDefault 148 | projection_evaluator: joints(right_shoulder_pan_joint,right_shoulder_lift_joint) 149 | longest_valid_segment_fraction: 0.005 150 | left_arm: 151 | planner_configs: 152 | - SBLkConfigDefault 153 | - ESTkConfigDefault 154 | - LBKPIECEkConfigDefault 155 | - BKPIECEkConfigDefault 156 | - KPIECEkConfigDefault 157 | - RRTkConfigDefault 158 | - RRTConnectkConfigDefault 159 | - RRTstarkConfigDefault 160 | - TRRTkConfigDefault 161 | - PRMkConfigDefault 162 | - PRMstarkConfigDefault 163 | - FMTkConfigDefault 164 | - BFMTkConfigDefault 165 | - PDSTkConfigDefault 166 | - STRIDEkConfigDefault 167 | - BiTRRTkConfigDefault 168 | - LBTRRTkConfigDefault 169 | - BiESTkConfigDefault 170 | - ProjESTkConfigDefault 171 | - LazyPRMkConfigDefault 172 | - LazyPRMstarkConfigDefault 173 | - SPARSkConfigDefault 174 | - SPARStwokConfigDefault 175 | projection_evaluator: joints(left_shoulder_pan_joint,left_shoulder_lift_joint) 176 | longest_valid_segment_fraction: 0.005 177 | right_gripper: 178 | planner_configs: 179 | - SBLkConfigDefault 180 | - ESTkConfigDefault 181 | - LBKPIECEkConfigDefault 182 | - BKPIECEkConfigDefault 183 | - KPIECEkConfigDefault 184 | - RRTkConfigDefault 185 | - RRTConnectkConfigDefault 186 | - RRTstarkConfigDefault 187 | - TRRTkConfigDefault 188 | - PRMkConfigDefault 189 | - PRMstarkConfigDefault 190 | - FMTkConfigDefault 191 | - BFMTkConfigDefault 192 | - PDSTkConfigDefault 193 | - STRIDEkConfigDefault 194 | - BiTRRTkConfigDefault 195 | - LBTRRTkConfigDefault 196 | - BiESTkConfigDefault 197 | - ProjESTkConfigDefault 198 | - LazyPRMkConfigDefault 199 | - LazyPRMstarkConfigDefault 200 | - SPARSkConfigDefault 201 | - SPARStwokConfigDefault 202 | left_gripper: 203 | planner_configs: 204 | - SBLkConfigDefault 205 | - ESTkConfigDefault 206 | - LBKPIECEkConfigDefault 207 | - BKPIECEkConfigDefault 208 | - KPIECEkConfigDefault 209 | - RRTkConfigDefault 210 | - RRTConnectkConfigDefault 211 | - RRTstarkConfigDefault 212 | - TRRTkConfigDefault 213 | - PRMkConfigDefault 214 | - PRMstarkConfigDefault 215 | - FMTkConfigDefault 216 | - BFMTkConfigDefault 217 | - PDSTkConfigDefault 218 | - STRIDEkConfigDefault 219 | - BiTRRTkConfigDefault 220 | - LBTRRTkConfigDefault 221 | - BiESTkConfigDefault 222 | - ProjESTkConfigDefault 223 | - LazyPRMkConfigDefault 224 | - LazyPRMstarkConfigDefault 225 | - SPARSkConfigDefault 226 | - SPARStwokConfigDefault -------------------------------------------------------------------------------- /pr2_moveit/launch/default_warehouse_db.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /pr2_moveit/launch/demo.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | [/move_group/fake_controller_joint_states] 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /pr2_moveit/launch/fake_moveit_controller_manager.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /pr2_moveit/launch/filter.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | filter_limit_negative: False 10 | leaf_size: 0.02 11 | 12 | 13 | 14 | 15 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | filter_field_name: z 31 | filter_limit_min: 0.5 32 | filter_limit_max: 0.62 33 | filter_limit_negative: False 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /pr2_moveit/launch/joystick_control.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /pr2_moveit/launch/move_group.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 56 | 57 | 58 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /pr2_moveit/launch/moveit_rviz.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /pr2_moveit/launch/ompl_planning_pipeline.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 8 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /pr2_moveit/launch/planning_context.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /pr2_moveit/launch/planning_pipeline.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /pr2_moveit/launch/pr2_moveit.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 8 | 9 | -------------------------------------------------------------------------------- /pr2_moveit/launch/pr2_moveit_controller_manager.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /pr2_moveit/launch/pr2_moveit_sensor_manager.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /pr2_moveit/launch/run_benchmark_ompl.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /pr2_moveit/launch/sensor_manager.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_moveit/launch/setup_assistant.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /pr2_moveit/launch/trajectory_execution.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /pr2_moveit/launch/warehouse.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /pr2_moveit/launch/warehouse_settings.launch.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /pr2_moveit/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | pr2_moveit 4 | 0.3.0 5 | 6 | An automatically generated package with all the configuration and launch files for using the pr2 with the MoveIt! Motion Planning Framework 7 | 8 | Harsh Pandya 9 | Harsh Pandya 10 | 11 | BSD 12 | 13 | http://moveit.ros.org/ 14 | https://github.com/ros-planning/moveit/issues 15 | https://github.com/ros-planning/moveit 16 | 17 | catkin 18 | 19 | moveit_ros_move_group 20 | moveit_kinematics 21 | moveit_planners_ompl 22 | moveit_ros_visualization 23 | joint_state_publisher 24 | robot_state_publisher 25 | xacro 26 | 27 | 28 | pr2_robot 29 | pr2_robot 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /pr2_robot/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/.DS_Store -------------------------------------------------------------------------------- /pr2_robot/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(pr2_robot) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | message_generation 12 | moveit_core 13 | moveit_ros_planning 14 | moveit_ros_planning_interface 15 | pluginlib 16 | pcl_conversions 17 | pcl_ros 18 | geometric_shapes 19 | gazebo_msgs 20 | gazebo_grasp_plugin 21 | moveit_visual_tools 22 | roscpp 23 | rospy 24 | roslint 25 | sensor_msgs 26 | ) 27 | find_package(Eigen3 REQUIRED) 28 | find_package(Boost REQUIRED) 29 | 30 | ## System dependencies are found with CMake's conventions 31 | # find_package(Boost REQUIRED COMPONENTS system) 32 | 33 | 34 | ## Uncomment this if the package has a setup.py. This macro ensures 35 | ## modules and global scripts declared therein get installed 36 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 37 | # catkin_python_setup() 38 | 39 | ################################################ 40 | ## Declare ROS messages, services and actions ## 41 | ################################################ 42 | 43 | ## To declare and build messages, services or actions from within this 44 | ## package, follow these steps: 45 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 46 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 47 | ## * In the file package.xml: 48 | ## * add a build_depend tag for "message_generation" 49 | ## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET 50 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 51 | ## but can be declared for certainty nonetheless: 52 | ## * add a run_depend tag for "message_runtime" 53 | ## * In this file (CMakeLists.txt): 54 | ## * add "message_generation" and every package in MSG_DEP_SET to 55 | ## find_package(catkin REQUIRED COMPONENTS ...) 56 | ## * add "message_runtime" and every package in MSG_DEP_SET to 57 | ## catkin_package(CATKIN_DEPENDS ...) 58 | ## * uncomment the add_*_files sections below as needed 59 | ## and list every .msg/.srv/.action file to be processed 60 | ## * uncomment the generate_messages entry below 61 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 62 | 63 | ## Generate messages in the 'msg' folder 64 | # add_message_files( 65 | # FILES 66 | # Message1.msg 67 | # Message2.msg 68 | # ) 69 | 70 | ## Generate services in the 'srv' folder 71 | add_service_files( 72 | FILES 73 | PickPlace.srv 74 | Grasp.srv 75 | ) 76 | 77 | ## Generate actions in the 'action' folder 78 | # add_action_files( 79 | # FILES 80 | # Action1.action 81 | # Action2.action 82 | # ) 83 | 84 | ## Generate added messages and services with any dependencies listed here 85 | generate_messages( 86 | DEPENDENCIES 87 | std_msgs 88 | geometry_msgs 89 | ) 90 | 91 | ################################################ 92 | ## Declare ROS dynamic reconfigure parameters ## 93 | ################################################ 94 | 95 | ## To declare and build dynamic reconfigure parameters within this 96 | ## package, follow these steps: 97 | ## * In the file package.xml: 98 | ## * add a build_depend and a run_depend tag for "dynamic_reconfigure" 99 | ## * In this file (CMakeLists.txt): 100 | ## * add "dynamic_reconfigure" to 101 | ## find_package(catkin REQUIRED COMPONENTS ...) 102 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 103 | ## and list every .cfg file to be processed 104 | 105 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 106 | # generate_dynamic_reconfigure_options( 107 | # cfg/DynReconf1.cfg 108 | # cfg/DynReconf2.cfg 109 | # ) 110 | 111 | ################################### 112 | ## catkin specific configuration ## 113 | ################################### 114 | ## The catkin_package macro generates cmake config files for your package 115 | ## Declare things to be passed to dependent projects 116 | ## INCLUDE_DIRS: uncomment this if you package contains header files 117 | ## LIBRARIES: libraries you create in this project that dependent projects also need 118 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 119 | ## DEPENDS: system dependencies of this project that dependent projects also need 120 | catkin_package( 121 | # INCLUDE_DIRS include 122 | # LIBRARIES pr2_robot 123 | # CATKIN_DEPENDS sensor_msgs 124 | # DEPENDS system_lib 125 | ) 126 | 127 | ########### 128 | ## Build ## 129 | ########### 130 | 131 | ## Specify additional locations of header files 132 | ## Your package locations should be listed before other locations 133 | include_directories(include ${Boost_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIRS}) 134 | 135 | ## Declare a C++ library 136 | # add_library(${PROJECT_NAME} 137 | # src/${PROJECT_NAME}/pr2_robot.cpp 138 | # ) 139 | 140 | ## Add cmake target dependencies of the library 141 | ## as an example, code may need to be generated before libraries 142 | ## either from message generation or dynamic reconfigure 143 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 144 | 145 | ## Declare a C++ executable 146 | ## With catkin_make all packages are built within a single CMake context 147 | ## The recommended prefix ensures that target names across packages don't collide 148 | add_executable(pr2_motion src/pr2_motion.cpp) 149 | add_executable(pr2_pick_place_server src/pr2_pick_place_server.cpp) 150 | add_executable(pr2_cloud_transformer src/pr2_cloud_transformer.cpp) 151 | 152 | ## Rename C++ executable without prefix 153 | ## The above recommended prefix causes long target names, the following renames the 154 | ## target back to the shorter version for ease of user use 155 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 156 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 157 | 158 | ## Add cmake target dependencies of the executable 159 | ## same as for the library above 160 | add_dependencies(pr2_motion ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 161 | add_dependencies(pr2_pick_place_server ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS} ${PROJECT_NAME}_generate_messages_cpp) 162 | add_dependencies(pr2_cloud_transformer ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 163 | 164 | ## Specify libraries to link a library or executable target against 165 | target_link_libraries(pr2_motion ${catkin_LIBRARIES}) 166 | target_link_libraries(pr2_pick_place_server ${catkin_LIBRARIES}) 167 | target_link_libraries(pr2_cloud_transformer ${catkin_LIBRARIES}) 168 | 169 | ############# 170 | ## Install ## 171 | ############# 172 | 173 | # all install targets should use catkin DESTINATION variables 174 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 175 | 176 | ## Mark executable scripts (Python etc.) for installation 177 | ## in contrast to setup.py, you can choose the destination 178 | # install(PROGRAMS 179 | # scripts/my_python_script 180 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 181 | # ) 182 | 183 | ## Mark executables and/or libraries for installation 184 | # install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node 185 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 186 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 187 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 188 | # ) 189 | 190 | ## Mark cpp header files for installation 191 | # install(DIRECTORY include/${PROJECT_NAME}/ 192 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 193 | # FILES_MATCHING PATTERN "*.h" 194 | # PATTERN ".svn" EXCLUDE 195 | # ) 196 | 197 | ## Mark other files for installation (e.g. launch and bag files, etc.) 198 | # install(FILES 199 | # # myfile1 200 | # # myfile2 201 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 202 | # ) 203 | 204 | ############# 205 | ## Testing ## 206 | ############# 207 | 208 | ## Add gtest based cpp test target and link libraries 209 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_pr2_robot.cpp) 210 | # if(TARGET ${PROJECT_NAME}-test) 211 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 212 | # endif() 213 | 214 | ## Add folders to be run by python nosetests 215 | # catkin_add_nosetests(test) 216 | -------------------------------------------------------------------------------- /pr2_robot/config/controllers.yaml: -------------------------------------------------------------------------------- 1 | pr2: 2 | #list of controllers 3 | joint_state_controller: 4 | type: joint_state_controller/JointStateController 5 | publish_rate: 50 6 | 7 | torso_lift_joint_controller: 8 | type: position_controllers/JointPositionController 9 | joint: torso_lift_joint 10 | pid: {p: 100.0, i: 0.1, d: 1.0} 11 | 12 | world_joint_controller: 13 | type: position_controllers/JointPositionController 14 | joint: world_joint 15 | pid: {p: 100.0, i: 0.1, d: 1.0} 16 | 17 | right_gripper_controller: 18 | type: "effort_controllers/JointTrajectoryController" 19 | joints: 20 | - right_right_gripper_finger_joint 21 | - right_left_gripper_finger_joint 22 | gains: 23 | right_right_gripper_finger_joint: {p: 100, i: 1, d: 10, i_clamp: 1.0} 24 | right_left_gripper_finger_joint: {p: 100, i: 1, d: 10, i_clamp: 1.0} 25 | constraints: 26 | goal_time: 3.0 27 | right_right_gripper_finger_joint: 28 | goal: 0.02 29 | right_left_gripper_finger_joint: 30 | goal: 0.02 31 | 32 | left_gripper_controller: 33 | type: "effort_controllers/JointTrajectoryController" 34 | joints: 35 | - left_right_gripper_finger_joint 36 | - left_left_gripper_finger_joint 37 | gains: 38 | left_right_gripper_finger_joint: {p: 100, i: 1, d: 10, i_clamp: 1.0} 39 | left_left_gripper_finger_joint: {p: 100, i: 1, d: 10, i_clamp: 1.0} 40 | constraints: 41 | goal_time: 3.0 42 | left_right_gripper_finger_joint: 43 | goal: 0.02 44 | left_left_gripper_finger_joint: 45 | goal: 0.02 46 | 47 | right_arm_controller: 48 | type: "position_controllers/JointTrajectoryController" 49 | joints: 50 | - right_shoulder_pan_joint 51 | - right_shoulder_lift_joint 52 | - right_upper_arm_roll_joint 53 | - right_elbow_flex_joint 54 | - right_forearm_roll_joint 55 | - right_wrist_flex_joint 56 | - right_wrist_roll_joint 57 | gains: 58 | right_shoulder_pan_joint: {p: 100, i: 0.01, d: 1} 59 | right_shoulder_lift_joint: {p: 100, i: 0.01, d: 1} 60 | right_upper_arm_roll_joint: {p: 100, i: 0.01, d: 1} 61 | right_elbow_flex_joint: {p: 100, i: 0.01, d: 1} 62 | right_forearm_roll_joint: {p: 100, i: 0.01, d: 1} 63 | right_wrist_flex_joint: {p: 100, i: 0.01, d: 1} 64 | right_wrist_roll_joint: {p: 100, i: 0.01, d: 1} 65 | 66 | constraints: 67 | goal_time: 10.0 # Override default 68 | 69 | state_publish_rate: 50 # Override default 70 | action_monitor_rate: 30 # Override default 71 | stop_trajectory_duration: 0 # Override default 72 | 73 | left_arm_controller: 74 | type: "position_controllers/JointTrajectoryController" 75 | joints: 76 | - left_shoulder_pan_joint 77 | - left_shoulder_lift_joint 78 | - left_upper_arm_roll_joint 79 | - left_elbow_flex_joint 80 | - left_forearm_roll_joint 81 | - left_wrist_flex_joint 82 | - left_wrist_roll_joint 83 | gains: 84 | left_shoulder_pan_joint: {p: 100, i: 0.01, d: 1} 85 | left_shoulder_lift_joint: {p: 100, i: 0.01, d: 1} 86 | left_upper_arm_roll_joint: {p: 100, i: 0.01, d: 1} 87 | left_elbow_flex_joint: {p: 100, i: 0.01, d: 1} 88 | left_forearm_roll_joint: {p: 100, i: 0.01, d: 1} 89 | left_wrist_flex_joint: {p: 100, i: 0.01, d: 1} 90 | left_wrist_roll_joint: {p: 100, i: 0.01, d: 1} 91 | 92 | constraints: 93 | goal_time: 10.0 # Override default 94 | 95 | state_publish_rate: 50 # Override default 96 | action_monitor_rate: 30 # Override default 97 | stop_trajectory_duration: 0 # Override default 98 | -------------------------------------------------------------------------------- /pr2_robot/config/dropbox.yaml: -------------------------------------------------------------------------------- 1 | dropbox: 2 | - name: left 3 | group: red 4 | position: [0,0.71,0.605] 5 | - name: right 6 | group: green 7 | position: [0,-0.71,0.605] 8 | -------------------------------------------------------------------------------- /pr2_robot/config/output.yaml: -------------------------------------------------------------------------------- 1 | # This is an example output file. You need to submit three of these, one for each tabletop setup. 2 | object_list: 3 | - test_scene_num: 1 4 | arm_name: left 5 | object_name: car 6 | pick_pose: 7 | position: 8 | x: 0 9 | y: 0 10 | z: 0 11 | orientation: 12 | x: 0 13 | y: 0 14 | z: 0 15 | w: 0 16 | place_pose: 17 | position: 18 | x: 0 19 | y: 0 20 | z: 0 21 | orientation: 22 | x: 0 23 | y: 0 24 | z: 0 25 | w: 0 26 | - test_scene_num: 1 27 | arm_name: right 28 | object_name: battery 29 | pick_pose: 30 | position: 31 | x: 0 32 | y: 0 33 | z: 0 34 | orientation: 35 | x: 0 36 | y: 0 37 | z: 0 38 | w: 0 39 | place_pose: 40 | position: 41 | x: 0 42 | y: 0 43 | z: 0 44 | orientation: 45 | x: 0 46 | y: 0 47 | z: 0 48 | w: 0 49 | -------------------------------------------------------------------------------- /pr2_robot/config/pick_list_1.yaml: -------------------------------------------------------------------------------- 1 | object_list: 2 | - name: biscuits 3 | group: green 4 | - name: soap 5 | group: green 6 | - name: soap2 7 | group: red 8 | -------------------------------------------------------------------------------- /pr2_robot/config/pick_list_2.yaml: -------------------------------------------------------------------------------- 1 | object_list: 2 | - name: biscuits 3 | group: green 4 | - name: soap 5 | group: green 6 | - name: book 7 | group: red 8 | - name: soap2 9 | group: red 10 | - name: glue 11 | group: red 12 | -------------------------------------------------------------------------------- /pr2_robot/config/pick_list_3.yaml: -------------------------------------------------------------------------------- 1 | object_list: 2 | - name: sticky_notes 3 | group: red 4 | - name: book 5 | group: red 6 | - name: snacks 7 | group: green 8 | - name: biscuits 9 | group: green 10 | - name: eraser 11 | group: red 12 | - name: soap2 13 | group: green 14 | - name: soap 15 | group: green 16 | - name: glue 17 | group: red 18 | -------------------------------------------------------------------------------- /pr2_robot/config/sensors.yaml: -------------------------------------------------------------------------------- 1 | sensors: 2 | - sensor_plugin: occupancy_map_monitor/PointCloudOctomapUpdater 3 | point_cloud_topic: /pr2/voxel_grid/points #replace with /pr2/3d_map/points when running your own code 4 | max_range: 2.5 #default value: 2.5 5 | frame_subsample: 1 #default value: 1 6 | point_subsample: 1 #default value: 1 7 | padding_offset: 0.01 #default value: 0.01 8 | padding_scale: 1.0 #default value: 1.0 9 | #filtered_cloud_topic: filtered_cloud #Use only for debugging 10 | -------------------------------------------------------------------------------- /pr2_robot/include/pr2_robot/pr2_motion.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | * Copyright (C) 2017 Udacity Inc. 3 | * 4 | * This file is part of Robotic Arm: Perception project for Udacity 5 | * Robotics nano-degree program 6 | * 7 | * All Rights Reserved. 8 | ******************************************************************************/ 9 | 10 | // Author: Harsh Pandya 11 | 12 | #ifndef PR2_ROBOT_MOTION_H 13 | #define PR2_ROBOT_MOTION_H 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | #include 30 | 31 | #include 32 | 33 | #include 34 | #include 35 | 36 | class PR2Motion 37 | { 38 | public: 39 | explicit PR2Motion(ros::NodeHandle nh); 40 | ~PR2Motion(); 41 | 42 | private: 43 | ros::NodeHandle nh_; 44 | 45 | const std::string LEFT_PLANNING_GROUP = "left_arm"; 46 | const std::string RIGHT_PLANNING_GROUP = "right_arm"; 47 | const std::string LEFT_GRIPPER_GROUP = "left_gripper"; 48 | const std::string RIGHT_GRIPPER_GROUP = "right_gripper"; 49 | 50 | const std::string DROPBOX_MESH_PATH = 51 | "package://pr2_robot/models/dropbox/meshes/dropbox.dae"; 52 | 53 | std::vector OBJECT_MESH_PATH_LIST; 54 | const std::string OBJECT_1_MESH_PATH = 55 | "package://pr2_robot/models/biscuits/meshes/biscuits.dae"; 56 | const std::string OBJECT_2_MESH_PATH = 57 | "package://pr2_robot/models/soap/meshes/soap.dae"; 58 | const std::string OBJECT_3_MESH_PATH = 59 | "package://pr2_robot/models/soap2/meshes/soap2.dae"; 60 | 61 | moveit::planning_interface::MoveGroupInterface right_move_group; 62 | moveit::planning_interface::MoveGroupInterface left_move_group; 63 | moveit::planning_interface::MoveGroupInterface right_gripper_group; 64 | moveit::planning_interface::MoveGroupInterface left_gripper_group; 65 | 66 | 67 | const robot_state::JointModelGroup *right_joint_model_group; 68 | const robot_state::JointModelGroup *left_joint_model_group; 69 | const robot_state::JointModelGroup *right_gripper_joint_model_group; 70 | const robot_state::JointModelGroup *left_gripper_joint_model_group; 71 | 72 | // Define PlanningSceneInterface object to add and remove collision objects 73 | moveit::planning_interface::PlanningSceneInterface planning_scene_interface; 74 | 75 | ros::Publisher world_joint_pub; 76 | /* 77 | * Functions for gripper actuation 78 | * close_gripper = 0; open gripper 79 | * = 1; close gripper 80 | */ 81 | bool OperateLeftGripper(const bool &close_gripper); 82 | bool OperateRightGripper(const bool &close_gripper); 83 | 84 | bool SetupCollisionObject(const std::string &object_id, 85 | const std::string &mesh_path, 86 | const geometry_msgs::Pose &object_pose, 87 | moveit_msgs::CollisionObject &collision_object); 88 | 89 | tf::Quaternion RPYToQuaternion(float R, float P, float Y); 90 | }; 91 | 92 | #endif // PR2_ROBOT_MOTION_H 93 | -------------------------------------------------------------------------------- /pr2_robot/include/pr2_robot/pr2_pick_place_server.h: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | * Copyright (C) 2017 Udacity Inc. 3 | * 4 | * This file is part of Robotic Arm: Perception project for Udacity 5 | * Robotics nano-degree program 6 | * 7 | * All Rights Reserved. 8 | ******************************************************************************/ 9 | 10 | // Author: Harsh Pandya 11 | 12 | #ifndef PR2_PICK_PLACE_SERVER_H 13 | #define PR2_PICK_PLACE_SERVER_H 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | #include 32 | 33 | #include 34 | 35 | #include 36 | #include 37 | #include 38 | #include 39 | 40 | class PR2PickPlace 41 | { 42 | public: 43 | explicit PR2PickPlace(ros::NodeHandle nh); 44 | ~PR2PickPlace(); 45 | 46 | bool Routine(pr2_robot::PickPlace::Request &req, 47 | pr2_robot::PickPlace::Response &res); 48 | 49 | private: 50 | ros::NodeHandle nh_; 51 | 52 | ros::ServiceClient client, grasp_client; 53 | 54 | std::vector grasp_list; 55 | bool left_success, right_success; 56 | 57 | const std::string LEFT_PLANNING_GROUP = "left_arm"; 58 | const std::string RIGHT_PLANNING_GROUP = "right_arm"; 59 | const std::string LEFT_GRIPPER_GROUP = "left_gripper"; 60 | const std::string RIGHT_GRIPPER_GROUP = "right_gripper"; 61 | 62 | const std::string DROPBOX_MESH_PATH = 63 | "package://pr2_robot/models/dropbox/meshes/dropbox.dae"; 64 | 65 | std::vector OBJECT_MESH_PATH_LIST; 66 | const std::string OBJECT_1_MESH_PATH = 67 | "package://pr2_robot/models/biscuits/meshes/biscuits.dae"; 68 | const std::string OBJECT_2_MESH_PATH = 69 | "package://pr2_robot/models/soap/meshes/soap.dae"; 70 | const std::string OBJECT_3_MESH_PATH = 71 | "package://pr2_robot/models/soap2/meshes/soap2.dae"; 72 | const std::string OBJECT_MESH_PATH = 73 | "package://pr2_robot/models/"; 74 | 75 | moveit::planning_interface::MoveGroupInterface right_move_group; 76 | moveit::planning_interface::MoveGroupInterface left_move_group; 77 | moveit::planning_interface::MoveGroupInterface right_gripper_group; 78 | moveit::planning_interface::MoveGroupInterface left_gripper_group; 79 | 80 | moveit_visual_tools::MoveItVisualToolsPtr visual_tools_ptr; 81 | 82 | 83 | const robot_state::JointModelGroup *right_joint_model_group; 84 | const robot_state::JointModelGroup *left_joint_model_group; 85 | const robot_state::JointModelGroup *right_gripper_joint_model_group; 86 | const robot_state::JointModelGroup *left_gripper_joint_model_group; 87 | 88 | // Define PlanningSceneInterface object to add and remove collision objects 89 | moveit::planning_interface::PlanningSceneInterface planning_scene_interface; 90 | moveit::planning_interface::MoveGroupInterface::Plan right_arm_plan, left_arm_plan; 91 | 92 | ros::Publisher world_joint_pub; 93 | /* 94 | * Functions for gripper actuation 95 | * close_gripper = 0; open gripper 96 | * = 1; close gripper 97 | */ 98 | bool OperateLeftGripper(const bool &close_gripper); 99 | bool OperateRightGripper(const bool &close_gripper); 100 | 101 | bool SetupCollisionObject(const std::string &object_id, 102 | const std::string &mesh_path, 103 | const geometry_msgs::Pose &object_pose, 104 | moveit_msgs::CollisionObject &collision_object); 105 | 106 | tf::Quaternion RPYToQuaternion(float R, float P, float Y); 107 | 108 | bool IsPickPoseWithinLimits(geometry_msgs::Pose &pick_pose, 109 | geometry_msgs::Pose &act_obj_pose); 110 | }; 111 | 112 | #endif // PR2_PICK_PLACE_SERVER_H 113 | -------------------------------------------------------------------------------- /pr2_robot/launch/octomap_server.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /pr2_robot/launch/pick_place_demo.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 21 | 22 | 23 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /pr2_robot/launch/pick_place_project.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 25 | 26 | 27 | 29 | 30 | 32 | 33 | 34 | 35 | 37 | 38 | 39 | 40 | 41 | 42 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /pr2_robot/launch/robot_control.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/launch/robot_description.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 17 | 18 | -------------------------------------------------------------------------------- /pr2_robot/launch/robot_spawn.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 21 | 22 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /pr2_robot/materials/textures/pr2_caster_texture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/materials/textures/pr2_caster_texture.png -------------------------------------------------------------------------------- /pr2_robot/materials/textures/pr2_wheel_left.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/materials/textures/pr2_wheel_left.png -------------------------------------------------------------------------------- /pr2_robot/materials/textures/pr2_wheel_right.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/materials/textures/pr2_wheel_right.png -------------------------------------------------------------------------------- /pr2_robot/meshes/README.md: -------------------------------------------------------------------------------- 1 | All meshes in this directory (except gripper) were obtained from https://github.com/pr2/pr2_common 2 | -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/.gitignore: -------------------------------------------------------------------------------- 1 | convex 2 | iv 3 | -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/base.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/base.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/base_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/base_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/base_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/base_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/base_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/base_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/caster.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/caster.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/caster_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/caster_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/pr2_wheel.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/pr2_wheel.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/wheel.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/wheel.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/wheel_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/wheel_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/wheel_h_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/wheel_h_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/base_v0/wheel_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/base_v0/wheel_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/.gitignore: -------------------------------------------------------------------------------- 1 | convex 2 | iv 3 | -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/forearm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/forearm.jpg -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/forearm.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/forearm.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/forearm_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/forearm_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/forearm_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/forearm_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/wrist_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/wrist_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/wrist_flex.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/wrist_flex.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/wrist_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/wrist_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/wrist_roll.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/wrist_roll.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/forearm_v0/wrist_roll_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/forearm_v0/wrist_roll_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/.gitignore: -------------------------------------------------------------------------------- 1 | convex 2 | iv 3 | -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_pan.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_pan.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_pan_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_pan_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_pan_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_pan_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_pan_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_pan_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_tilt.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_tilt.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_tilt_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_tilt_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_tilt_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_tilt_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_tilt_color_red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_tilt_color_red.png -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_tilt_color_yellow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_tilt_color_yellow.png -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_tilt_green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_tilt_green.png -------------------------------------------------------------------------------- /pr2_robot/meshes/head_v0/head_tilt_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/head_v0/head_tilt_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/.gitignore: -------------------------------------------------------------------------------- 1 | convex 2 | iv 3 | -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/shoulder_lift.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/shoulder_lift.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/shoulder_lift_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/shoulder_lift_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/shoulder_lift_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/shoulder_lift_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/shoulder_pan.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/shoulder_pan.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/shoulder_pan_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/shoulder_pan_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/shoulder_pan_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/shoulder_pan_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/shoulder_yaw.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/shoulder_yaw.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/upper_arm_roll.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/upper_arm_roll.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/upper_arm_roll_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/upper_arm_roll_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/upper_arm_roll_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/upper_arm_roll_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/shoulder_v0/upper_arm_roll_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/shoulder_v0/upper_arm_roll_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/torso_v0/.gitignore: -------------------------------------------------------------------------------- 1 | convex 2 | iv 3 | -------------------------------------------------------------------------------- /pr2_robot/meshes/torso_v0/torso.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/torso_v0/torso.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/torso_v0/torso_lift.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/torso_v0/torso_lift.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/torso_v0/torso_lift_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/torso_v0/torso_lift_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/torso_v0/torso_lift_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/torso_v0/torso_lift_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/torso_v0/torso_lift_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/torso_v0/torso_lift_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/.gitignore: -------------------------------------------------------------------------------- 1 | convex 2 | iv 3 | -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/elbow_flex.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/elbow_flex.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/elbow_flex_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/elbow_flex_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/elbow_flex_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/elbow_flex_normals.png -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/forearm_roll.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/forearm_roll.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/forearm_roll_L.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/forearm_roll_L.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/upper_arm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/upper_arm.jpg -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/upper_arm.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/upper_arm.stl -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/upper_arm_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/upper_arm_color.png -------------------------------------------------------------------------------- /pr2_robot/meshes/upper_arm_v0/upper_arm_normals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/meshes/upper_arm_v0/upper_arm_normals.png -------------------------------------------------------------------------------- /pr2_robot/models/README.md: -------------------------------------------------------------------------------- 1 | The mesh files for models listed below were obtained from http://rll.berkeley.edu/amazon_picking_challenge/ 2 | - biscuits 3 | - book 4 | - eraser 5 | - glue 6 | - snacks 7 | - soap 8 | - soap2 9 | - sticky_notes 10 | -------------------------------------------------------------------------------- /pr2_robot/models/biscuits/materials/scripts/biscuits.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/biscuits/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/biscuits/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/biscuits/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | biscuits 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of a packet of biscuits 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/biscuits/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.1 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://biscuits/meshes/biscuits.dae 23 | 24 | 25 | 26 | 27 | 28 | 0.015 0.01 0.075 0 0 2.2 29 | 30 | 31 | 0.19 0.06 0.15 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /pr2_robot/models/book/materials/scripts/book.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/book/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/book/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/book/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | book 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of a book 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/book/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.08 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://book/meshes/book.dae 23 | 24 | 25 | 26 | 27 | 28 | 29 | 0 0 0.103 0 0 2.3 30 | 31 | 32 | 0.13 0.02 0.206 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /pr2_robot/models/dropbox/meshes/dropbox.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Blender User 6 | Blender 2.78.0 7 | 8 | 2017-07-19T13:03:29 9 | 2017-07-19T13:03:29 10 | 11 | Z_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 49.13434 19 | 1.777778 20 | 0.1 21 | 100 22 | 23 | 24 | 25 | 26 | 27 | 0 28 | 0 29 | 0 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 1 1 1 39 | 1 40 | 0 41 | 0.00111109 42 | 43 | 44 | 45 | 46 | 0.000999987 47 | 1 48 | 0.1 49 | 0.1 50 | 1 51 | 1 52 | 1 53 | 2 54 | 0 55 | 1 56 | 1 57 | 1 58 | 1 59 | 1 60 | 0 61 | 2880 62 | 2 63 | 30.002 64 | 1.000799 65 | 0.04999995 66 | 29.99998 67 | 1 68 | 2 69 | 0 70 | 0 71 | 1 72 | 1 73 | 1 74 | 1 75 | 8192 76 | 1 77 | 1 78 | 0 79 | 1 80 | 1 81 | 1 82 | 3 83 | 0 84 | 0 85 | 0 86 | 0 87 | 0 88 | 1 89 | 1 90 | 1 91 | 3 92 | 0.15 93 | 75 94 | 1 95 | 1 96 | 0 97 | 1 98 | 1 99 | 0 100 | 101 | 102 | 103 | 104 | 105 | 106 | 1 1 1 107 | 1 108 | 0 109 | 0.00111109 110 | 111 | 112 | 113 | 114 | 9.99987e-4 115 | 1 116 | 0.1 117 | 0.1 118 | 1 119 | 1 120 | 1 121 | 2 122 | 0 123 | 1 124 | 1 125 | 1 126 | 1 127 | 1 128 | 0 129 | 2880 130 | 2 131 | 30.002 132 | 1.000799 133 | 0.04999995 134 | 29.99998 135 | 1 136 | 2 137 | 0 138 | 0 139 | 1 140 | 1 141 | 1 142 | 1 143 | 8192 144 | 1 145 | 1 146 | 0 147 | 1 148 | 1 149 | 1 150 | 3 151 | 0 152 | 0 153 | 0 154 | 0 155 | 0 156 | 1 157 | 1 158 | 1 159 | 3 160 | 0.15 161 | 75 162 | 1 163 | 1 164 | 0 165 | 1 166 | 1 167 | 0 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | -0.9994162 -1.001249 -1.78814e-7 -0.9994162 -1.001249 2 -0.9994162 0.9987508 -1.78814e-7 -0.9994162 0.9987508 2 1.000584 -1.001249 -1.78814e-7 1.000584 -1.001249 2 1.000584 0.9987508 -1.78814e-7 1.000584 0.9987508 2 -0.9416813 -0.9435141 0.05773472 -0.9287078 -0.9305406 2 -0.9416813 0.9410159 0.05773472 -0.9287078 0.9280425 2 0.9428491 -0.9435141 0.05773472 0.9298757 -0.9305406 2 0.9428491 0.9410159 0.05773472 0.9298757 0.9280425 2 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | -1 0 0 0 1 0 1 0 0 0 -1 0 0 0 -1 0.9999778 0 -0.006679356 0 -0.9999778 -0.006679296 -0.9999778 0 -0.006679356 0 0.9999777 -0.006679296 0 0 1 0.9999777 0 -0.006679356 0 -0.9999778 -0.006679415 -0.9999777 0 -0.006679356 0 0.9999778 -0.006679356 1.68593e-6 0 1 -1.68593e-6 0 1 -3.62842e-6 0 1 3.62842e-6 0 1 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 203 |

1 0 2 0 0 0 3 1 6 1 2 1 7 2 4 2 6 2 5 3 0 3 4 3 6 4 0 4 2 4 1 0 3 0 2 0 3 1 7 1 6 1 7 2 5 2 4 2 5 3 1 3 0 3 6 4 4 4 0 4 9 5 8 5 10 5 11 6 10 6 14 6 15 7 14 7 12 7 13 8 12 8 8 8 14 9 10 9 8 9 9 10 10 10 11 10 11 11 14 11 15 11 15 12 12 12 13 12 13 13 8 13 9 13 14 9 8 9 12 9 3 9 15 9 7 9 7 14 13 14 5 14 1 15 11 15 3 15 5 9 9 9 1 9 3 9 11 9 15 9 7 16 15 16 13 16 1 17 9 17 11 17 5 9 13 9 9 9

204 |
205 |
206 |
207 |
208 | 209 | 210 | 211 | 212 | 0.6859207 -0.3240135 0.6515582 7.481132 0.7276763 0.3054208 -0.6141704 -6.50764 0 0.8953956 0.4452714 5.343665 0 0 0 1 213 | 214 | 215 | 216 | -0.2908646 -0.7711008 0.5663932 4.076245 0.9551712 -0.1998834 0.2183912 1.005454 -0.05518906 0.6045247 0.7946723 5.903862 0 0 0 1 217 | 218 | 219 | 220 | -0.2908646 -0.7711008 0.5663933 4.076245 0.9551712 -0.1998833 0.2183912 1.005454 -0.05518906 0.6045247 0.7946723 5.903862 0 0 0 1 221 | 222 | 223 | 224 | 0.15 0 0 -5.83821e-4 0 0.3 0 0.00124926 0 0 0.1 1.78814e-7 0 0 0 1 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 |
-------------------------------------------------------------------------------- /pr2_robot/models/dropbox/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | dropbox 4 | 1.0 5 | model.sdf 6 | 7 | Harsh Pandya 8 | harsh@electricmovement.com 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /pr2_robot/models/dropbox/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 0 -0 0 0 0 -1.56873 6 | 7 | 1 8 | 9 | 0.166667 10 | 0 11 | 0 12 | 0.166667 13 | 0 14 | 0.166667 15 | 16 | 17 | 18 | 0 0 0 0 -0 0 19 | 20 | 21 | model://dropbox/meshes/dropbox.dae 22 | 1 1 1 23 | 24 | 25 | 26 | 1 27 | 31 | 0.3 0.3 0.3 1 32 | 0.7 0.7 0.7 1 33 | 0.01 0.01 0.01 1 34 | 0 0 0 1 35 | 36 | 0 37 | 1 38 | 39 | 40 | 0 41 | 10 42 | 0 0 0 0 -0 0 43 | 44 | 45 | model://dropbox/meshes/dropbox.dae 46 | 1 1 1 47 | 48 | 49 | 50 | 51 | 52 | 1 53 | 1 54 | 0 0 0 55 | 0 56 | 0 57 | 58 | 59 | 1 60 | 0 61 | 0 62 | 1 63 | 64 | 0 65 | 66 | 67 | 68 | 69 | 0 70 | 1e+06 71 | 72 | 73 | 0 74 | 1 75 | 1 76 | 77 | 0 78 | 0.2 79 | 1e+13 80 | 1 81 | 0.01 82 | 0 83 | 84 | 85 | 1 86 | -0.01 87 | 0 88 | 0.2 89 | 1e+13 90 | 1 91 | 92 | 93 | 94 | 95 | 96 | 1 97 | 1 98 | 99 | 100 | -------------------------------------------------------------------------------- /pr2_robot/models/eraser/materials/scripts/eraser.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/eraser/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/eraser/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/eraser/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | eraser 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of a board eraser 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/eraser/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.1 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://eraser/meshes/eraser.dae 23 | 24 | 25 | 26 | 27 | 28 | 29 | 0.02 0.02 0.02 0 0 -0.95 30 | 31 | 32 | 0.135 0.06 0.05 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /pr2_robot/models/glue/materials/scripts/glue.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/glue/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/glue/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/glue/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | glue 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of a glue bottle 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/glue/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.08 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://glue/meshes/glue.dae 23 | 24 | 25 | 26 | 27 | 28 | 29 | 0 0 0.067 0 0 -0.95 30 | 31 | 32 | 0.054 0.032 0.133 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /pr2_robot/models/short_table/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | short table 4 | 1.0 5 | model.sdf 6 | 7 | Harsh Pandya 8 | harsh@electricmovement.com 9 | 10 | A short table 11 | 12 | -------------------------------------------------------------------------------- /pr2_robot/models/short_table/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -0 0 0 0 -0 0 6 | 7 | 1 8 | 9 | 0.166667 10 | 0 11 | 0 12 | 0.166667 13 | 0 14 | 0.166667 15 | 16 | 17 | 18 | 0 0 0 0 -0 0 19 | 20 | 21 | model://short_table/meshes/short_table.dae 22 | 1 1 1 23 | 24 | 25 | 26 | 1 27 | 31 | 32 | 0 33 | 1 34 | 35 | 36 | 0 37 | 10 38 | 0 0 0 0 -0 0 39 | 40 | 41 | model://short_table/meshes/short_table.dae 42 | 1 1 1 43 | 44 | 45 | 46 | 47 | 48 | 1 49 | 1 50 | 0 0 0 51 | 0 52 | 0 53 | 54 | 55 | 1 56 | 0 57 | 0 58 | 1 59 | 60 | 0 61 | 62 | 63 | 64 | 65 | 0 66 | 1e+06 67 | 68 | 69 | 0 70 | 1 71 | 1 72 | 73 | 0 74 | 0.2 75 | 1e+13 76 | 1 77 | 0.01 78 | 0 79 | 80 | 81 | 1 82 | -0.01 83 | 0 84 | 0.2 85 | 1e+13 86 | 1 87 | 88 | 89 | 90 | 91 | 92 | 1 93 | 1 94 | 95 | 96 | -------------------------------------------------------------------------------- /pr2_robot/models/snacks/materials/scripts/snacks.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/snacks/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/snacks/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/snacks/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | snacks 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of a box of snacks 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/snacks/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.08 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://snacks/meshes/snacks.dae 23 | 24 | 25 | 26 | 27 | 28 | 29 | 0.04 0.02 0.117 0 0 2.1 30 | 31 | 32 | 0.165 0.06 0.235 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /pr2_robot/models/soap/materials/scripts/soap.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/soap/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/soap/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/soap/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | soap 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of a soap bar 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/soap/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.08 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://soap/meshes/soap.dae 23 | 24 | 25 | 26 | 27 | 28 | 29 | 0.02 0.002 0.05 0 0 -1.05 30 | 31 | 32 | 0.14 0.065 0.1 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /pr2_robot/models/soap2/materials/scripts/soap2.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/soap2/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/soap2/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/soap2/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | soap2 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of another soap 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/soap2/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.08 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://soap2/meshes/soap2.dae 23 | 24 | 25 | 26 | 27 | 28 | 29 | -0.002 0.00502 0.05 0 0 -1 30 | 31 | 32 | 0.065 0.04 0.105 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /pr2_robot/models/soda_can/materials/textures/uda_can.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/soda_can/materials/textures/uda_can.png -------------------------------------------------------------------------------- /pr2_robot/models/soda_can/meshes/udacity-blue.png.001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/soda_can/meshes/udacity-blue.png.001.png -------------------------------------------------------------------------------- /pr2_robot/models/soda_can/model-1_2.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 0.390 7 | 8 | 0.00058 9 | 0 10 | 0 11 | 0.00058 12 | 0 13 | 0.00019 14 | 15 | 16 | 17 | 0 0 -0.46 0 0 0 18 | 19 | 20 | model://coke_can/meshes/soda_can.dae 21 | 22 | 23 | 24 | 25 | 0 0 -0.46 0 0 0 26 | 27 | 28 | model://coke_can/meshes/soda_can.dae 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /pr2_robot/models/soda_can/model-1_3.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 0.390 7 | 8 | 0.00058 9 | 0 10 | 0 11 | 0.00058 12 | 0 13 | 0.00019 14 | 15 | 16 | 17 | 0 0 -0.46 0 0 0 18 | 19 | 20 | model://coke_can/meshes/soda_can.dae 21 | 22 | 23 | 24 | 25 | 0 0 -0.46 0 0 0 26 | 27 | 28 | model://coke_can/meshes/soda_can.dae 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /pr2_robot/models/soda_can/model-1_4.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -0.01 -0.012 0.15 0 0 0 7 | 0.390 8 | 9 | 0.00058 10 | 0 11 | 0 12 | 0.00058 13 | 0 14 | 0.00019 15 | 16 | 17 | 18 | 0 0 -0.46 0 0 0 19 | 20 | 21 | model://coke_can/meshes/soda_can.dae 22 | 23 | 24 | 25 | 26 | 27 | 1.0 28 | 1.0 29 | 30 | 31 | 32 | 33 | 10000000.0 34 | 1.0 35 | 0.001 36 | 0.1 37 | 38 | 39 | 40 | 41 | 42 | 0 0 -0.46 0 0 0 43 | 44 | 45 | model://coke_can/meshes/soda_can.dae 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /pr2_robot/models/soda_can/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Soda Can 5 | 1.0 6 | model-1_2.sdf 7 | model-1_3.sdf 8 | model-1_4.sdf 9 | model.sdf 10 | 11 | 12 | Harsh 13 | 14 | 15 | 16 | Soda can 17 | 18 | 19 | -------------------------------------------------------------------------------- /pr2_robot/models/soda_can/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -0.01 -0.012 0.15 0 0 0 7 | 0.390 8 | 9 | 0.00058 10 | 0 11 | 0 12 | 0.00058 13 | 0 14 | 0.00019 15 | 16 | 17 | 18 | 0 0 -0.46 0 0 0 19 | 20 | 21 | model://soda_can/meshes/soda_can.dae 22 | 23 | 24 | 25 | 26 | 27 | 1.0 28 | 1.0 29 | 30 | 31 | 32 | 33 | 10000000.0 34 | 1.0 35 | 0.001 36 | 0.1 37 | 38 | 39 | 40 | 41 | 42 | 0 0 -0.46 0 0 0 43 | 44 | 45 | model://soda_can/meshes/soda_can.dae 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /pr2_robot/models/sticky_notes/materials/scripts/sticky_notes.material: -------------------------------------------------------------------------------- 1 | material @HERE@ 2 | { 3 | technique 4 | { 5 | pass 6 | { 7 | texture_unit 8 | { 9 | texture optimized_tsdf_texture_mapped_mesh.png 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /pr2_robot/models/sticky_notes/materials/textures/optimized_tsdf_texture_mapped_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/models/sticky_notes/materials/textures/optimized_tsdf_texture_mapped_mesh.png -------------------------------------------------------------------------------- /pr2_robot/models/sticky_notes/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | sticky_notes 5 | 1.0 6 | model.sdf 7 | 8 | 9 | A model of sticky notes 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /pr2_robot/models/sticky_notes/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 0 0 0 0 0 0 8 | 0.08 9 | 10 | 11 | 12 | 0 0 0 0 0 0 13 | 14 | 19 | 20 | 21 | 22 | model://sticky_notes/meshes/sticky_notes.dae 23 | 24 | 25 | 26 | 27 | 28 | 29 | 0 0 0.06 0 0 0.68 30 | 31 | 32 | 0.043 0.055 0.12 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /pr2_robot/models/twin_table/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | twin_table 4 | 1.0 5 | model.sdf 6 | 7 | Harsh Pandya 8 | harsh@electricmovement.com 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /pr2_robot/models/twin_table/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 0.000466 -0 0 0 -0 0 6 | 7 | 1 8 | 9 | 0.166667 10 | 0 11 | 0 12 | 0.166667 13 | 0 14 | 0.166667 15 | 16 | 17 | 18 | 0 0 0 0 -0 0 19 | 20 | 21 | model://twin_table/meshes/twin_table.dae 22 | 1 1 1 23 | 24 | 25 | 26 | 1 27 | 31 | 0.3 0.3 0.3 1 32 | 0.7 0.7 0.7 1 33 | 0.01 0.01 0.01 1 34 | 0 0 0 1 35 | 36 | 0 37 | 1 38 | 39 | 40 | 0 41 | 10 42 | 0 0 0 0 -0 0 43 | 44 | 45 | model://twin_table/meshes/twin_table.dae 46 | 1 1 1 47 | 48 | 49 | 50 | 51 | 52 | 1 53 | 1 54 | 0 0 0 55 | 0 56 | 0 57 | 58 | 59 | 1 60 | 0 61 | 0 62 | 1 63 | 64 | 0 65 | 66 | 67 | 68 | 69 | 0 70 | 1e+06 71 | 72 | 73 | 0 74 | 1 75 | 1 76 | 77 | 0 78 | 0.2 79 | 1e+13 80 | 1 81 | 0.01 82 | 0 83 | 84 | 85 | 1 86 | -0.01 87 | 0 88 | 0.2 89 | 1e+13 90 | 1 91 | 92 | 93 | 94 | 95 | 96 | 1 97 | 1 98 | 99 | 100 | -------------------------------------------------------------------------------- /pr2_robot/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | pr2_robot 4 | 0.0.0 5 | The pr2_robot package 6 | 7 | 8 | 9 | 10 | Harsh Pandya 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | catkin 43 | dynamic_reconfigure 44 | message_generation 45 | moveit_core 46 | moveit_ros_planning_interface 47 | moveit_ros_perception 48 | interactive_markers 49 | geometric_shapes 50 | moveit_visual_tools 51 | pcl_conversions 52 | pcl_ros 53 | roslint 54 | sensor_msgs 55 | 56 | controller_manager 57 | dynamic_reconfigure 58 | effort_controllers 59 | position_controllers 60 | gazebo_plugins 61 | gazebo_grasp_plugin 62 | gazebo_ros 63 | gazebo_ros_control 64 | interactive_markers 65 | joint_state_controller 66 | joint_state_publisher 67 | joint_trajectory_controller 68 | message_runtime 69 | moveit_core 70 | moveit_fake_controller_manager 71 | moveit_ros_planning_interface 72 | moveit_ros_perception 73 | moveit_simple_controller_manager 74 | moveit_visual_tools 75 | pcl_conversions 76 | pcl_ros 77 | robot_state_publisher 78 | sensor_msgs 79 | xacro 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /pr2_robot/scripts/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/scripts/.DS_Store -------------------------------------------------------------------------------- /pr2_robot/scripts/grasp_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (C) 2017 Udacity Inc. 4 | # 5 | # This file is part of Robotic Arm: Pick and Place project for Udacity 6 | # Robotics nano-degree program 7 | # 8 | # All Rights Reserved. 9 | 10 | # Author: Harsh Pandya 11 | 12 | # import modules 13 | import rospy 14 | from geometry_msgs.msg import Pose 15 | from std_msgs.msg import Float64 16 | from std_msgs.msg import String 17 | from pr2_robot.srv import * 18 | 19 | def get_grasp_handler(req): 20 | object_name = req.object_name.data 21 | test_num = req.test_scene_num.data-1 22 | grasp_pose = Pose() 23 | 24 | grasp_list = rospy.get_param('/grasp_list') 25 | 26 | grasp_pose.position.x = grasp_list[test_num][object_name]['position']['x'] 27 | grasp_pose.position.y = grasp_list[test_num][object_name]['position']['y'] 28 | grasp_pose.position.z = grasp_list[test_num][object_name]['position']['z'] 29 | 30 | grasp_pose.orientation.w = grasp_list[test_num][object_name]['orientation']['w'] 31 | grasp_pose.orientation.x = grasp_list[test_num][object_name]['orientation']['x'] 32 | grasp_pose.orientation.y = grasp_list[test_num][object_name]['orientation']['y'] 33 | grasp_pose.orientation.z = grasp_list[test_num][object_name]['orientation']['z'] 34 | 35 | return GraspResponse(grasp_pose) 36 | 37 | def grasp_server(): 38 | rospy.init_node('grasp_server') 39 | s = rospy.Service('get_grasp', Grasp, get_grasp_handler) 40 | print "Ready to retrieve grasps" 41 | rospy.spin() 42 | 43 | if __name__ == '__main__': 44 | try: 45 | grasp_server() 46 | except rospy.ROSInterruptException: 47 | pass 48 | -------------------------------------------------------------------------------- /pr2_robot/scripts/pcl_helper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (C) 2017 Udacity Inc. 4 | # 5 | # This file is part of Robotic Arm: Pick and Place project for Udacity 6 | # Robotics nano-degree program 7 | # 8 | # All Rights Reserved. 9 | 10 | # Author: Harsh Pandya 11 | 12 | # Import modules 13 | import rospy 14 | import pcl 15 | import numpy as np 16 | import ctypes 17 | import struct 18 | import sensor_msgs.point_cloud2 as pc2 19 | 20 | from sensor_msgs.msg import PointCloud2, PointField 21 | from std_msgs.msg import Header 22 | from random import randint 23 | 24 | 25 | def random_color_gen(): 26 | """ Generates a random color 27 | 28 | Args: None 29 | 30 | Returns: 31 | list: 3 elements, R, G, and B 32 | """ 33 | r = randint(0, 255) 34 | g = randint(0, 255) 35 | b = randint(0, 255) 36 | return [r, g, b] 37 | 38 | 39 | def ros_to_pcl(ros_cloud): 40 | """ Converts a ROS PointCloud2 message to a pcl PointXYZRGB 41 | 42 | Args: 43 | ros_cloud (PointCloud2): ROS PointCloud2 message 44 | 45 | Returns: 46 | pcl.PointCloud_PointXYZRGB: PCL XYZRGB point cloud 47 | """ 48 | points_list = [] 49 | 50 | for data in pc2.read_points(ros_cloud, skip_nans=True): 51 | points_list.append([data[0], data[1], data[2], data[3]]) 52 | 53 | pcl_data = pcl.PointCloud_PointXYZRGB() 54 | pcl_data.from_list(points_list) 55 | 56 | return pcl_data 57 | 58 | 59 | def pcl_to_ros(pcl_array): 60 | """ Converts a pcl PointXYZRGB to a ROS PointCloud2 message 61 | 62 | Args: 63 | pcl_array (PointCloud_PointXYZRGB): A PCL XYZRGB point cloud 64 | 65 | Returns: 66 | PointCloud2: A ROS point cloud 67 | """ 68 | ros_msg = PointCloud2() 69 | 70 | ros_msg.header.stamp = rospy.Time.now() 71 | ros_msg.header.frame_id = "world" 72 | 73 | ros_msg.height = 1 74 | ros_msg.width = pcl_array.size 75 | 76 | ros_msg.fields.append(PointField( 77 | name="x", 78 | offset=0, 79 | datatype=PointField.FLOAT32, count=1)) 80 | ros_msg.fields.append(PointField( 81 | name="y", 82 | offset=4, 83 | datatype=PointField.FLOAT32, count=1)) 84 | ros_msg.fields.append(PointField( 85 | name="z", 86 | offset=8, 87 | datatype=PointField.FLOAT32, count=1)) 88 | ros_msg.fields.append(PointField( 89 | name="rgb", 90 | offset=16, 91 | datatype=PointField.FLOAT32, count=1)) 92 | 93 | ros_msg.is_bigendian = False 94 | ros_msg.point_step = 32 95 | ros_msg.row_step = ros_msg.point_step * ros_msg.width * ros_msg.height 96 | ros_msg.is_dense = False 97 | buffer = [] 98 | 99 | for data in pcl_array: 100 | s = struct.pack('>f', data[3]) 101 | i = struct.unpack('>l', s)[0] 102 | pack = ctypes.c_uint32(i).value 103 | 104 | r = (pack & 0x00FF0000) >> 16 105 | g = (pack & 0x0000FF00) >> 8 106 | b = (pack & 0x000000FF) 107 | 108 | buffer.append(struct.pack('ffffBBBBIII', data[0], data[1], data[2], 1.0, b, g, r, 0, 0, 0, 0)) 109 | 110 | ros_msg.data = "".join(buffer) 111 | 112 | return ros_msg 113 | 114 | 115 | def XYZRGB_to_XYZ(XYZRGB_cloud): 116 | """ Converts a PCL XYZRGB point cloud to an XYZ point cloud (removes color info) 117 | 118 | Args: 119 | XYZRGB_cloud (PointCloud_PointXYZRGB): A PCL XYZRGB point cloud 120 | 121 | Returns: 122 | PointCloud_PointXYZ: A PCL XYZ point cloud 123 | """ 124 | XYZ_cloud = pcl.PointCloud() 125 | points_list = [] 126 | 127 | for data in XYZRGB_cloud: 128 | points_list.append([data[0], data[1], data[2]]) 129 | 130 | XYZ_cloud.from_list(points_list) 131 | return XYZ_cloud 132 | 133 | 134 | def XYZ_to_XYZRGB(XYZ_cloud, color): 135 | """ Converts a PCL XYZ point cloud to a PCL XYZRGB point cloud 136 | 137 | All returned points in the XYZRGB cloud will be the color indicated 138 | by the color parameter. 139 | 140 | Args: 141 | XYZ_cloud (PointCloud_XYZ): A PCL XYZ point cloud 142 | color (list): 3-element list of integers [0-255,0-255,0-255] 143 | 144 | Returns: 145 | PointCloud_PointXYZRGB: A PCL XYZRGB point cloud 146 | """ 147 | XYZRGB_cloud = pcl.PointCloud_PointXYZRGB() 148 | points_list = [] 149 | 150 | float_rgb = rgb_to_float(color) 151 | 152 | for data in XYZ_cloud: 153 | points_list.append([data[0], data[1], data[2], float_rgb]) 154 | 155 | XYZRGB_cloud.from_list(points_list) 156 | return XYZRGB_cloud 157 | 158 | 159 | def rgb_to_float(color): 160 | """ Converts an RGB list to the packed float format used by PCL 161 | 162 | From the PCL docs: 163 | "Due to historical reasons (PCL was first developed as a ROS package), 164 | the RGB information is packed into an integer and casted to a float" 165 | 166 | Args: 167 | color (list): 3-element list of integers [0-255,0-255,0-255] 168 | 169 | Returns: 170 | float_rgb: RGB value packed as a float 171 | """ 172 | hex_r = (0xff & color[0]) << 16 173 | hex_g = (0xff & color[1]) << 8 174 | hex_b = (0xff & color[2]) 175 | 176 | hex_rgb = hex_r | hex_g | hex_b 177 | 178 | float_rgb = struct.unpack('f', struct.pack('i', hex_rgb))[0] 179 | 180 | return float_rgb 181 | 182 | 183 | def float_to_rgb(float_rgb): 184 | """ Converts a packed float RGB format to an RGB list 185 | 186 | Args: 187 | float_rgb: RGB value packed as a float 188 | 189 | Returns: 190 | color (list): 3-element list of integers [0-255,0-255,0-255] 191 | """ 192 | s = struct.pack('>f', float_rgb) 193 | i = struct.unpack('>l', s)[0] 194 | pack = ctypes.c_uint32(i).value 195 | 196 | r = (pack & 0x00FF0000) >> 16 197 | g = (pack & 0x0000FF00) >> 8 198 | b = (pack & 0x000000FF) 199 | 200 | color = [r,g,b] 201 | 202 | return color 203 | 204 | 205 | def get_color_list(cluster_count): 206 | """ Returns a list of randomized colors 207 | 208 | Args: 209 | cluster_count (int): Number of random colors to generate 210 | 211 | Returns: 212 | (list): List containing 3-element color lists 213 | """ 214 | if (cluster_count > len(get_color_list.color_list)): 215 | for i in xrange(len(get_color_list.color_list), cluster_count): 216 | get_color_list.color_list.append(random_color_gen()) 217 | return get_color_list.color_list 218 | -------------------------------------------------------------------------------- /pr2_robot/scripts/pr2_safe_spawner.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # This script safely launches ros nodes with buffer time to allow param server population 3 | x-terminal-emulator -e roslaunch pr2_robot pick_place_demo.launch & sleep 10 && 4 | x-terminal-emulator -e roslaunch pr2_moveit pr2_moveit.launch & sleep 20 && 5 | x-terminal-emulator -e rosrun pr2_robot pr2_motion 6 | -------------------------------------------------------------------------------- /pr2_robot/scripts/project_template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Import modules 4 | import numpy as np 5 | import sklearn 6 | from sklearn.preprocessing import LabelEncoder 7 | import pickle 8 | from sensor_stick.srv import GetNormals 9 | from sensor_stick.features import compute_color_histograms 10 | from sensor_stick.features import compute_normal_histograms 11 | from visualization_msgs.msg import Marker 12 | from sensor_stick.marker_tools import * 13 | from sensor_stick.msg import DetectedObjectsArray 14 | from sensor_stick.msg import DetectedObject 15 | from sensor_stick.pcl_helper import * 16 | 17 | import rospy 18 | import tf 19 | from geometry_msgs.msg import Pose 20 | from std_msgs.msg import Float64 21 | from std_msgs.msg import Int32 22 | from std_msgs.msg import String 23 | from pr2_robot.srv import * 24 | from rospy_message_converter import message_converter 25 | import yaml 26 | 27 | 28 | # Helper function to get surface normals 29 | def get_normals(cloud): 30 | get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals) 31 | return get_normals_prox(cloud).cluster 32 | 33 | # Helper function to create a yaml friendly dictionary from ROS messages 34 | def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose): 35 | yaml_dict = {} 36 | yaml_dict["test_scene_num"] = test_scene_num.data 37 | yaml_dict["arm_name"] = arm_name.data 38 | yaml_dict["object_name"] = object_name.data 39 | yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose) 40 | yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose) 41 | return yaml_dict 42 | 43 | # Helper function to output to yaml file 44 | def send_to_yaml(yaml_filename, dict_list): 45 | data_dict = {"object_list": dict_list} 46 | with open(yaml_filename, 'w') as outfile: 47 | yaml.dump(data_dict, outfile, default_flow_style=False) 48 | 49 | # Callback function for your Point Cloud Subscriber 50 | def pcl_callback(pcl_msg): 51 | 52 | # Exercise-2 TODOs: 53 | 54 | # TODO: Convert ROS msg to PCL data 55 | 56 | # TODO: Statistical Outlier Filtering 57 | 58 | # TODO: Voxel Grid Downsampling 59 | 60 | # TODO: PassThrough Filter 61 | 62 | # TODO: RANSAC Plane Segmentation 63 | 64 | # TODO: Extract inliers and outliers 65 | 66 | # TODO: Euclidean Clustering 67 | 68 | # TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately 69 | 70 | # TODO: Convert PCL data to ROS messages 71 | 72 | # TODO: Publish ROS messages 73 | 74 | # Exercise-3 TODOs: 75 | 76 | # Classify the clusters! (loop through each detected cluster one at a time) 77 | 78 | # Grab the points for the cluster 79 | 80 | # Compute the associated feature vector 81 | 82 | # Make the prediction 83 | 84 | # Publish a label into RViz 85 | 86 | # Add the detected object to the list of detected objects. 87 | 88 | # Publish the list of detected objects 89 | 90 | # Suggested location for where to invoke your pr2_mover() function within pcl_callback() 91 | # Could add some logic to determine whether or not your object detections are robust 92 | # before calling pr2_mover() 93 | try: 94 | pr2_mover(detected_objects_list) 95 | except rospy.ROSInterruptException: 96 | pass 97 | 98 | # function to load parameters and request PickPlace service 99 | def pr2_mover(object_list): 100 | 101 | # TODO: Initialize variables 102 | 103 | # TODO: Get/Read parameters 104 | 105 | # TODO: Parse parameters into individual variables 106 | 107 | # TODO: Rotate PR2 in place to capture side tables for the collision map 108 | 109 | # TODO: Loop through the pick list 110 | 111 | # TODO: Get the PointCloud for a given object and obtain it's centroid 112 | 113 | # TODO: Create 'place_pose' for the object 114 | 115 | # TODO: Assign the arm to be used for pick_place 116 | 117 | # TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format 118 | 119 | # Wait for 'pick_place_routine' service to come up 120 | rospy.wait_for_service('pick_place_routine') 121 | 122 | try: 123 | pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace) 124 | 125 | # TODO: Insert your message variables to be sent as a service request 126 | resp = pick_place_routine(TEST_SCENE_NUM, OBJECT_NAME, WHICH_ARM, PICK_POSE, PLACE_POSE) 127 | 128 | print ("Response: ",resp.success) 129 | 130 | except rospy.ServiceException, e: 131 | print "Service call failed: %s"%e 132 | 133 | # TODO: Output your request parameters into output yaml file 134 | 135 | 136 | 137 | if __name__ == '__main__': 138 | 139 | # TODO: ROS node initialization 140 | 141 | # TODO: Create Subscribers 142 | 143 | # TODO: Create Publishers 144 | 145 | # TODO: Load Model From disk 146 | 147 | # Initialize color_list 148 | get_color_list.color_list = [] 149 | 150 | # TODO: Spin while node is not shutdown 151 | -------------------------------------------------------------------------------- /pr2_robot/scripts/rospy_message_converter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/RoboND-Perception-Project/e93151a0d8cd5c4b720d024172bc0521caf51269/pr2_robot/scripts/rospy_message_converter/__init__.py -------------------------------------------------------------------------------- /pr2_robot/scripts/rospy_message_converter/json_message_converter.py: -------------------------------------------------------------------------------- 1 | import json 2 | from rospy_message_converter import message_converter 3 | 4 | def convert_json_to_ros_message(message_type, json_message): 5 | """ 6 | Takes in the message type and a JSON-formatted string and returns a ROS 7 | message. 8 | 9 | Example: 10 | message_type = "std_msgs/String" 11 | json_message = '{"data": "Hello, Robot"}' 12 | ros_message = convert_json_to_ros_message(message_type, json_message) 13 | """ 14 | dictionary = json.loads(json_message) 15 | return message_converter.convert_dictionary_to_ros_message(message_type, dictionary) 16 | 17 | def convert_ros_message_to_json(message): 18 | """ 19 | Takes in a ROS message and returns a JSON-formatted string. 20 | 21 | Example: 22 | ros_message = std_msgs.msg.String(data="Hello, Robot") 23 | json_message = convert_ros_message_to_json(ros_message) 24 | """ 25 | dictionary = message_converter.convert_ros_message_to_dictionary(message) 26 | json_message = json.dumps(dictionary) 27 | return json_message 28 | -------------------------------------------------------------------------------- /pr2_robot/scripts/rospy_message_converter/message_converter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Software License Agreement (BSD License) 3 | # 4 | # Copyright (c) 2013, Willow Garage, Inc. 5 | # All rights reserved. 6 | # 7 | # Redistribution and use in source and binary forms, with or without 8 | # modification, are permitted provided that the following conditions 9 | # are met: 10 | # 11 | # * Redistributions of source code must retain the above copyright 12 | # notice, this list of conditions and the following disclaimer. 13 | # * Redistributions in binary form must reproduce the above 14 | # copyright notice, this list of conditions and the following 15 | # disclaimer in the documentation and/or other materials provided 16 | # with the distribution. 17 | # * Neither the name of Willow Garage, Inc. nor the names of its 18 | # contributors may be used to endorse or promote products derived 19 | # from this software without specific prior written permission. 20 | # 21 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 | # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 | # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 | # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 | # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 | # POSSIBILITY OF SUCH DAMAGE. 33 | 34 | import roslib.message 35 | import rospy 36 | import re 37 | import base64 38 | from pprint import pprint 39 | 40 | python_to_ros_type_map = { 41 | 'bool' : ['bool'], 42 | 'int' : ['int8', 'byte', 'uint8', 'char', 43 | 'int16', 'uint16', 'int32', 'uint32', 44 | 'int64', 'uint64', 'float32', 'float64'], 45 | 'float' : ['float32', 'float64'], 46 | 'str' : ['string'], 47 | 'unicode' : ['string'], 48 | 'long' : ['uint64'] 49 | } 50 | 51 | python_primitive_types = [bool, int, long, float] 52 | python_string_types = [str, unicode] 53 | python_list_types = [list, tuple] 54 | 55 | ros_time_types = ['time', 'duration'] 56 | ros_primitive_types = ['bool', 'byte', 'char', 'int8', 'uint8', 'int16', 57 | 'uint16', 'int32', 'uint32', 'int64', 'uint64', 58 | 'float32', 'float64', 'string'] 59 | ros_header_types = ['Header', 'std_msgs/Header', 'roslib/Header'] 60 | ros_binary_types_regexp = re.compile(r'(uint8|char)\[[^\]]*\]') 61 | 62 | list_brackets = re.compile(r'\[[^\]]*\]') 63 | 64 | def convert_dictionary_to_ros_message(message_type, dictionary): 65 | """ 66 | Takes in the message type and a Python dictionary and returns a ROS message. 67 | 68 | Example: 69 | message_type = "std_msgs/String" 70 | dict_message = { "data": "Hello, Robot" } 71 | ros_message = convert_dictionary_to_ros_message(message_type, dict_message) 72 | """ 73 | message_class = roslib.message.get_message_class(message_type) 74 | message = message_class() 75 | message_fields = dict(_get_message_fields(message)) 76 | 77 | for field_name, field_value in dictionary.items(): 78 | if field_name in message_fields: 79 | field_type = message_fields[field_name] 80 | field_value = _convert_to_ros_type(field_type, field_value) 81 | setattr(message, field_name, field_value) 82 | else: 83 | error_message = 'ROS message type "{0}" has no field named "{1}"'\ 84 | .format(message_type, field_name) 85 | raise ValueError(error_message) 86 | 87 | return message 88 | 89 | def _convert_to_ros_type(field_type, field_value): 90 | if is_ros_binary_type(field_type, field_value): 91 | field_value = _convert_to_ros_binary(field_type, field_value) 92 | elif field_type in ros_time_types: 93 | field_value = _convert_to_ros_time(field_type, field_value) 94 | elif field_type in ros_primitive_types: 95 | field_value = _convert_to_ros_primitive(field_type, field_value) 96 | elif _is_field_type_an_array(field_type): 97 | field_value = _convert_to_ros_array(field_type, field_value) 98 | else: 99 | field_value = convert_dictionary_to_ros_message(field_type, field_value) 100 | 101 | return field_value 102 | 103 | def _convert_to_ros_binary(field_type, field_value): 104 | binary_value_as_string = field_value 105 | if type(field_value) in python_string_types: 106 | binary_value_as_string = base64.standard_b64decode(field_value) 107 | else: 108 | binary_value_as_string = str(bytearray(field_value)) 109 | 110 | return binary_value_as_string 111 | 112 | def _convert_to_ros_time(field_type, field_value): 113 | time = None 114 | 115 | if field_type == 'time' and field_value == 'now': 116 | time = rospy.get_rostime() 117 | else: 118 | if field_type == 'time': 119 | time = rospy.rostime.Time() 120 | elif field_type == 'duration': 121 | time = rospy.rostime.Duration() 122 | if 'secs' in field_value: 123 | setattr(time, 'secs', field_value['secs']) 124 | if 'nsecs' in field_value: 125 | setattr(time, 'nsecs', field_value['nsecs']) 126 | 127 | return time 128 | 129 | def _convert_to_ros_primitive(field_type, field_value): 130 | return field_value 131 | 132 | def _convert_to_ros_array(field_type, list_value): 133 | list_type = list_brackets.sub('', field_type) 134 | return [_convert_to_ros_type(list_type, value) for value in list_value] 135 | 136 | def convert_ros_message_to_dictionary(message): 137 | """ 138 | Takes in a ROS message and returns a Python dictionary. 139 | 140 | Example: 141 | ros_message = std_msgs.msg.String(data="Hello, Robot") 142 | dict_message = convert_ros_message_to_dictionary(ros_message) 143 | """ 144 | dictionary = {} 145 | message_fields = _get_message_fields(message) 146 | for field_name, field_type in message_fields: 147 | field_value = getattr(message, field_name) 148 | dictionary[field_name] = _convert_from_ros_type(field_type, field_value) 149 | 150 | return dictionary 151 | 152 | def _convert_from_ros_type(field_type, field_value): 153 | if is_ros_binary_type(field_type, field_value): 154 | field_value = _convert_from_ros_binary(field_type, field_value) 155 | elif field_type in ros_time_types: 156 | field_value = _convert_from_ros_time(field_type, field_value) 157 | elif field_type in ros_primitive_types: 158 | field_value = field_value 159 | elif _is_field_type_an_array(field_type): 160 | field_value = _convert_from_ros_array(field_type, field_value) 161 | else: 162 | field_value = convert_ros_message_to_dictionary(field_value) 163 | 164 | return field_value 165 | 166 | 167 | def is_ros_binary_type(field_type, field_value): 168 | """ Checks if the field is a binary array one, fixed size or not 169 | 170 | is_ros_binary_type("uint8", 42) 171 | >>> False 172 | is_ros_binary_type("uint8[]", [42, 18]) 173 | >>> True 174 | is_ros_binary_type("uint8[3]", [42, 18, 21] 175 | >>> True 176 | is_ros_binary_type("char", 42) 177 | >>> False 178 | is_ros_binary_type("char[]", [42, 18]) 179 | >>> True 180 | is_ros_binary_type("char[3]", [42, 18, 21] 181 | >>> True 182 | """ 183 | return re.search(ros_binary_types_regexp, field_type) is not None 184 | 185 | def _convert_from_ros_binary(field_type, field_value): 186 | field_value = base64.standard_b64encode(field_value) 187 | return field_value 188 | 189 | def _convert_from_ros_time(field_type, field_value): 190 | field_value = { 191 | 'secs' : field_value.secs, 192 | 'nsecs' : field_value.nsecs 193 | } 194 | return field_value 195 | 196 | def _convert_from_ros_primitive(field_type, field_value): 197 | return field_value 198 | 199 | def _convert_from_ros_array(field_type, field_value): 200 | list_type = list_brackets.sub('', field_type) 201 | return [_convert_from_ros_type(list_type, value) for value in field_value] 202 | 203 | def _get_message_fields(message): 204 | return zip(message.__slots__, message._slot_types) 205 | 206 | def _is_field_type_an_array(field_type): 207 | return list_brackets.search(field_type) is not None 208 | -------------------------------------------------------------------------------- /pr2_robot/src/pr2_cloud_transformer.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | * Copyright (C) 2017 Udacity Inc. 3 | * 4 | * This file is part of Robotic Arm: Pick and Place project for Udacity 5 | * Robotics nano-degree program 6 | * 7 | * All Rights Reserved. 8 | ******************************************************************************/ 9 | 10 | // Author: Harsh Pandya 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | /* 25 | * Brief: 26 | * This node transforms point cloud from /camera_link frame to /world frame with noise 27 | */ 28 | 29 | class CloudTransformer 30 | { 31 | public: 32 | explicit CloudTransformer(ros::NodeHandle nh) 33 | : nh_(nh), 34 | demo_(0) 35 | { 36 | // Define Publishers and Subscribers here 37 | pcl_sub_ = nh_.subscribe("/camera/depth_registered/points", 1, &CloudTransformer::pclCallback, this); 38 | pcl_pub_ = nh_.advertise("/pr2/world/points", 1); 39 | 40 | buffer_.reset(new sensor_msgs::PointCloud2); 41 | buffer_->header.frame_id = "world"; 42 | 43 | if(ros::param::get("/pr2_cloud_transformer/demo", demo_)) 44 | { 45 | ROS_INFO_STREAM("Demo flag: "<::Ptr cloud (new pcl::PointCloud); 76 | pcl::PointCloud::Ptr cloud_filtered (new pcl::PointCloud); 77 | pcl::PointCloud::Ptr cloud_out (new pcl::PointCloud); 78 | sensor_msgs::PointCloud2::Ptr noisy_buffer_ (new sensor_msgs::PointCloud2); 79 | 80 | pcl::fromROSMsg (*buffer_, *cloud); 81 | 82 | // Create the filtering object 83 | pcl::VoxelGrid sor; 84 | sor.setInputCloud (cloud); 85 | sor.setLeafSize (0.02, 0.02, 0.02); 86 | sor.filter (*cloud_out); 87 | 88 | cloud_filtered->points.resize (cloud_out->points.size ()); 89 | cloud_filtered->header = cloud_out->header; 90 | cloud_filtered->width = cloud_out->width; 91 | cloud_filtered->height = cloud_out->height; 92 | 93 | 94 | boost::mt19937 rng; 95 | rng.seed (static_cast (time (0))); 96 | boost::normal_distribution<> nd (0, standard_deviation); 97 | boost::variate_generator > var_nor (rng, nd); 98 | 99 | for (size_t i = 0; i < cloud_out->points.size (); ++i) 100 | { 101 | cloud_filtered->points[i].x = cloud_out->points[i].x + static_cast (var_nor ()); 102 | cloud_filtered->points[i].y = cloud_out->points[i].y + static_cast (var_nor ()); 103 | cloud_filtered->points[i].z = cloud_out->points[i].z + static_cast (var_nor ()); 104 | cloud_filtered->points[i].r = cloud_out->points[i].r; 105 | cloud_filtered->points[i].g = cloud_out->points[i].g; 106 | cloud_filtered->points[i].b = cloud_out->points[i].b; 107 | } 108 | 109 | 110 | *cloud_out = *cloud_filtered + *cloud; 111 | 112 | pcl::toROSMsg (*cloud_out, *noisy_buffer_); 113 | 114 | pcl_pub_.publish(noisy_buffer_); 115 | } 116 | } 117 | }; // End of class CloudTransformer 118 | 119 | int main(int argc, char **argv) 120 | { 121 | ros::init(argc, argv, "pr2_point_cloud_tf"); 122 | ros::NodeHandle nh; 123 | 124 | CloudTransformer tranform_cloud(nh); 125 | 126 | // Spin until ROS is shutdown 127 | while (ros::ok()) 128 | ros::spin(); 129 | 130 | return 0; 131 | } 132 | -------------------------------------------------------------------------------- /pr2_robot/srv/Grasp.srv: -------------------------------------------------------------------------------- 1 | # request 2 | std_msgs/String object_name 3 | std_msgs/Int32 test_scene_num 4 | --- 5 | # response 6 | geometry_msgs/Pose grasp_pose 7 | -------------------------------------------------------------------------------- /pr2_robot/srv/PickPlace.srv: -------------------------------------------------------------------------------- 1 | # request 2 | std_msgs/Int32 test_scene_num 3 | std_msgs/String object_name 4 | std_msgs/String arm_name 5 | geometry_msgs/Pose pick_pose 6 | geometry_msgs/Pose place_pose 7 | --- 8 | # response 9 | bool success 10 | -------------------------------------------------------------------------------- /pr2_robot/urdf/common.xacro: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /pr2_robot/urdf/materials.urdf.xacro: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /pr2_robot/urdf/pr2.gazebo.xacro: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | /pr2 6 | gazebo_ros_control/DefaultRobotHWSim 7 | 8 | 9 | 10 | 11 | 12 | 13 | true 14 | false 15 | 15.0 16 | 17 | 1.5 18 | 19 | 20 | B8G8R8 21 | 1024 22 | 576 23 | 24 | 25 | 0.01 26 | 9 27 | 28 | 29 | 30 | 0.1 31 | true 32 | 15.0 33 | camera 34 | /camera/rgb/image_raw 35 | /camera/rgb/camera_info 36 | /camera/depth_registered/image_raw 37 | /camera/depth_registered/camera_info 38 | /camera/depth_registered/points 39 | camera_rgb_optical_frame 40 | 0.5 41 | 4.5 42 | 0 43 | 0 44 | 0 45 | 0 46 | 0 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | pr2_right_arm 56 | right_wrist_roll_link 57 | right_right_gripper_finger_link 58 | right_left_gripper_finger_link 59 | 60 | 61 | pr2_left_arm 62 | left_wrist_roll_link 63 | left_right_gripper_finger_link 64 | left_left_gripper_finger_link 65 | 66 | 100 67 | 4 68 | 4 69 | 8 70 | 0.003 71 | false 72 | __default_topic__ 73 | 74 | 75 | 76 | 77 | 78 | false 79 | 80 | 81 | 82 | 83 | true 84 | 85 | 86 | 87 | 88 | 89 | 90 | true 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | true 100 | 101 | 102 | 103 | 104 | 105 | 106 | true 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | true 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | true 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | true 132 | 133 | 134 | true 135 | 136 | 137 | 138 | 139 | true 140 | 141 | 142 | 143 | 144 | 145 | 146 | true 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | true 156 | 157 | 158 | 159 | 160 | 161 | true 162 | 163 | 164 | 165 | 166 | 167 | 168 | true 169 | 170 | 171 | true 172 | 173 | 174 | 175 | true 176 | 177 | 178 | 179 | 180 | 181 | 182 | true 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | true 191 | 192 | 193 | 194 | 195 | 196 | true 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | Gazebo/DarkGrey 207 | true 208 | 0.2 209 | 0.2 210 | 211 | 212 | Gazebo/DarkGrey 213 | true 214 | 215 | 216 | Gazebo/DarkGrey 217 | true 218 | 1000000.0 219 | 1.0 220 | 30.0 221 | 30.0 222 | 223 | 224 | 225 | 226 | Gazebo/DarkGrey 227 | 228 | 229 | 230 | -------------------------------------------------------------------------------- /pr2_robot/urdf/sensor.urdf.xacro: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /writeup_template.md: -------------------------------------------------------------------------------- 1 | ## Project: Perception Pick & Place 2 | ### Writeup Template: You can use this file as a template for your writeup if you want to submit it as a markdown file, but feel free to use some other method and submit a pdf if you prefer. 3 | 4 | --- 5 | 6 | 7 | # Required Steps for a Passing Submission: 8 | 1. Extract features and train an SVM model on new objects (see `pick_list_*.yaml` in `/pr2_robot/config/` for the list of models you'll be trying to identify). 9 | 2. Write a ROS node and subscribe to `/pr2/world/points` topic. This topic contains noisy point cloud data that you must work with. 10 | 3. Use filtering and RANSAC plane fitting to isolate the objects of interest from the rest of the scene. 11 | 4. Apply Euclidean clustering to create separate clusters for individual items. 12 | 5. Perform object recognition on these objects and assign them labels (markers in RViz). 13 | 6. Calculate the centroid (average in x, y and z) of the set of points belonging to that each object. 14 | 7. Create ROS messages containing the details of each object (name, pick_pose, etc.) and write these messages out to `.yaml` files, one for each of the 3 scenarios (`test1-3.world` in `/pr2_robot/worlds/`). [See the example `output.yaml` for details on what the output should look like.](https://github.com/udacity/RoboND-Perception-Project/blob/master/pr2_robot/config/output.yaml) 15 | 8. Submit a link to your GitHub repo for the project or the Python code for your perception pipeline and your output `.yaml` files (3 `.yaml` files, one for each test world). You must have correctly identified 100% of objects from `pick_list_1.yaml` for `test1.world`, 80% of items from `pick_list_2.yaml` for `test2.world` and 75% of items from `pick_list_3.yaml` in `test3.world`. 16 | 9. Congratulations! Your Done! 17 | 18 | # Extra Challenges: Complete the Pick & Place 19 | 7. To create a collision map, publish a point cloud to the `/pr2/3d_map/points` topic and make sure you change the `point_cloud_topic` to `/pr2/3d_map/points` in `sensors.yaml` in the `/pr2_robot/config/` directory. This topic is read by Moveit!, which uses this point cloud input to generate a collision map, allowing the robot to plan its trajectory. Keep in mind that later when you go to pick up an object, you must first remove it from this point cloud so it is removed from the collision map! 20 | 8. Rotate the robot to generate collision map of table sides. This can be accomplished by publishing joint angle value(in radians) to `/pr2/world_joint_controller/command` 21 | 9. Rotate the robot back to its original state. 22 | 10. Create a ROS Client for the “pick_place_routine” rosservice. In the required steps above, you already created the messages you need to use this service. Checkout the [PickPlace.srv](https://github.com/udacity/RoboND-Perception-Project/tree/master/pr2_robot/srv) file to find out what arguments you must pass to this service. 23 | 11. If everything was done correctly, when you pass the appropriate messages to the `pick_place_routine` service, the selected arm will perform pick and place operation and display trajectory in the RViz window 24 | 12. Place all the objects from your pick list in their respective dropoff box and you have completed the challenge! 25 | 13. Looking for a bigger challenge? Load up the `challenge.world` scenario and see if you can get your perception pipeline working there! 26 | 27 | ## [Rubric](https://review.udacity.com/#!/rubrics/1067/view) Points 28 | ### Here I will consider the rubric points individually and describe how I addressed each point in my implementation. 29 | 30 | --- 31 | ### Writeup / README 32 | 33 | #### 1. Provide a Writeup / README that includes all the rubric points and how you addressed each one. You can submit your writeup as markdown or pdf. 34 | 35 | You're reading it! 36 | 37 | ### Exercise 1, 2 and 3 pipeline implemented 38 | #### 1. Complete Exercise 1 steps. Pipeline for filtering and RANSAC plane fitting implemented. 39 | 40 | #### 2. Complete Exercise 2 steps: Pipeline including clustering for segmentation implemented. 41 | 42 | #### 2. Complete Exercise 3 Steps. Features extracted and SVM trained. Object recognition implemented. 43 | Here is an example of how to include an image in your writeup. 44 | 45 | ![demo-1](https://user-images.githubusercontent.com/20687560/28748231-46b5b912-7467-11e7-8778-3095172b7b19.png) 46 | 47 | ### Pick and Place Setup 48 | 49 | #### 1. For all three tabletop setups (`test*.world`), perform object recognition, then read in respective pick list (`pick_list_*.yaml`). Next construct the messages that would comprise a valid `PickPlace` request output them to `.yaml` format. 50 | 51 | And here's another image! 52 | ![demo-2](https://user-images.githubusercontent.com/20687560/28748286-9f65680e-7468-11e7-83dc-f1a32380b89c.png) 53 | 54 | Spend some time at the end to discuss your code, what techniques you used, what worked and why, where the implementation might fail and how you might improve it if you were going to pursue this project further. 55 | 56 | 57 | 58 | --------------------------------------------------------------------------------