├── CMakeLists.txt
├── LICENSE
├── README.md
├── config
├── CR5_Project.rviz
├── Robot_pointcloud.rviz
└── Starting_command.txt
├── find object
└── bin
│ ├── session 1.bin
│ ├── session 10.bin
│ ├── session 2.bin
│ ├── session 3.bin
│ ├── session 7.bin
│ ├── session 8.bin
│ └── session 9.bin
├── launch
└── CR5_with_realsense.launch
├── msg
├── ObjectMsg.msg
└── object_position.msg
├── package.xml
├── pic
├── Fig.1.jpg
├── Fig.10.jpg
├── Fig.11.jpg
├── Fig.12.jpg
├── Fig.13.jpg
├── Fig.14.jpg
├── Fig.15.jpg
├── Fig.16.jpg
├── Fig.17.jpg
├── Fig.18.jpg
├── Fig.2.jpg
├── Fig.3.jpg
├── Fig.4.jpg
├── Fig.5(a).jpg
├── Fig.5(b).jpg
├── Fig.6.jpg
├── Fig.7.jpg
├── Fig.8.jpg
└── Fig.9.jpg
└── src
├── dev
├── compute_picking.py
├── node_order0.0.py
├── node_order1.0.py
├── node_order2.0.py
├── read_point2.py
├── read_point3.py
├── realsense_yolo.py
├── service_call_old.cpp
├── t_yolo_ros.py
├── t_yolo_tf.py
├── t_yolo_tf_2.py
├── t_yolo_tf_order.py
├── test2_rotation.py
├── test_compute.py
├── test_rotation.py
├── test_yolo_order.py
└── tf_listener.cpp
├── main_control.cpp
├── node_order.py
├── service_call.cpp
└── yolo_order.py
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.0.2)
2 | project(CR5_Project)
3 |
4 | ## Compile as C++11, supported in ROS Kinetic and newer
5 | # add_compile_options(-std=c++11)
6 |
7 | ## Find catkin macros and libraries
8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9 | ## is used, also find other catkin packages
10 | find_package(catkin REQUIRED COMPONENTS
11 | roscpp
12 | rospy
13 | std_msgs
14 | message_generation
15 | tf
16 | )
17 |
18 | ## System dependencies are found with CMake's conventions
19 | # find_package(Boost REQUIRED COMPONENTS system)
20 |
21 |
22 | ## Uncomment this if the package has a setup.py. This macro ensures
23 | ## modules and global scripts declared therein get installed
24 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
25 | # catkin_python_setup()
26 |
27 | ################################################
28 | ## Declare ROS messages, services and actions ##
29 | ################################################
30 |
31 | ## To declare and build messages, services or actions from within this
32 | ## package, follow these steps:
33 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
34 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
35 | ## * In the file package.xml:
36 | ## * add a build_depend tag for "message_generation"
37 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
38 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
39 | ## but can be declared for certainty nonetheless:
40 | ## * add a exec_depend tag for "message_runtime"
41 | ## * In this file (CMakeLists.txt):
42 | ## * add "message_generation" and every package in MSG_DEP_SET to
43 | ## find_package(catkin REQUIRED COMPONENTS ...)
44 | ## * add "message_runtime" and every package in MSG_DEP_SET to
45 | ## catkin_package(CATKIN_DEPENDS ...)
46 | ## * uncomment the add_*_files sections below as needed
47 | ## and list every .msg/.srv/.action file to be processed
48 | ## * uncomment the generate_messages entry below
49 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
50 |
51 | ## Generate messages in the 'msg' folder
52 | add_message_files(
53 | FILES
54 | ObjectMsg.msg
55 | )
56 |
57 | ## Generate services in the 'srv' folder
58 | # add_service_files(
59 | # FILES
60 | # Service1.srv
61 | # Service2.srv
62 | # )
63 |
64 | ## Generate actions in the 'action' folder
65 | # add_action_files(
66 | # FILES
67 | # Action1.action
68 | # Action2.action
69 | # )
70 |
71 | ## Generate added messages and services with any dependencies listed here
72 | generate_messages(
73 | DEPENDENCIES
74 | std_msgs
75 | )
76 |
77 | ################################################
78 | ## Declare ROS dynamic reconfigure parameters ##
79 | ################################################
80 |
81 | ## To declare and build dynamic reconfigure parameters within this
82 | ## package, follow these steps:
83 | ## * In the file package.xml:
84 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
85 | ## * In this file (CMakeLists.txt):
86 | ## * add "dynamic_reconfigure" to
87 | ## find_package(catkin REQUIRED COMPONENTS ...)
88 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
89 | ## and list every .cfg file to be processed
90 |
91 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
92 | # generate_dynamic_reconfigure_options(
93 | # cfg/DynReconf1.cfg
94 | # cfg/DynReconf2.cfg
95 | # )
96 |
97 | ###################################
98 | ## catkin specific configuration ##
99 | ###################################
100 | ## The catkin_package macro generates cmake config files for your package
101 | ## Declare things to be passed to dependent projects
102 | ## INCLUDE_DIRS: uncomment this if your package contains header files
103 | ## LIBRARIES: libraries you create in this project that dependent projects also need
104 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
105 | ## DEPENDS: system dependencies of this project that dependent projects also need
106 | catkin_package(
107 | # INCLUDE_DIRS include
108 | # LIBRARIES your_package
109 | CATKIN_DEPENDS roscpp rospy std_msgs message_runtime
110 | # DEPENDS system_lib
111 | )
112 |
113 | ###########
114 | ## Build ##
115 | ###########
116 |
117 | ## Specify additional locations of header files
118 | ## Your package locations should be listed before other locations
119 | include_directories(
120 | include
121 | ${catkin_INCLUDE_DIRS}
122 | )
123 |
124 | ## Declare a C++ library
125 | # add_library(${PROJECT_NAME}
126 | # src/${PROJECT_NAME}/your_package.cpp
127 | # )
128 |
129 | ## Add cmake target dependencies of the library
130 | ## as an example, code may need to be generated before libraries
131 | ## either from message generation or dynamic reconfigure
132 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
133 |
134 | ## Declare a C++ executable
135 | ## With catkin_make all packages are built within a single CMake context
136 | ## The recommended prefix ensures that target names across packages don't collide
137 | # add_executable(${PROJECT_NAME}_node src/your_package_node.cpp)
138 |
139 | ## Rename C++ executable without prefix
140 | ## The above recommended prefix causes long target names, the following renames the
141 | ## target back to the shorter version for ease of user use
142 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
143 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
144 |
145 | ## Add cmake target dependencies of the executable
146 | ## same as for the library above
147 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
148 |
149 | ## Specify libraries to link a library or executable target against
150 | # target_link_libraries(${PROJECT_NAME}_node
151 | # ${catkin_LIBRARIES}
152 | # )
153 |
154 | #add_executable(main_control src/main_control.cpp)
155 | #target_link_libraries(main_control ${catkin_LIBRARIES})
156 | #add_dependencies(main_control beginner_tutorials_generate_messages_cpp)
157 |
158 | add_executable(service_call src/service_call.cpp)
159 | target_link_libraries(service_call ${catkin_LIBRARIES})
160 | #add_dependencies(service_call beginner_tutorials_generate_messages_cpp)
161 |
162 | #add_executable(tf_listener src/tf_listener.cpp)
163 | #target_link_libraries(tf_listener ${catkin_LIBRARIES})
164 |
165 | #############
166 | ## Install ##
167 | #############
168 |
169 | # all install targets should use catkin DESTINATION variables
170 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
171 |
172 | ## Mark executable scripts (Python etc.) for installation
173 | ## in contrast to setup.py, you can choose the destination
174 | catkin_install_python(PROGRAMS
175 | src/yolo_order.py
176 | src/node_order.py
177 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
178 | )
179 |
180 | ## Mark executables for installation
181 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
182 | # install(TARGETS ${PROJECT_NAME}_node
183 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
184 | # )
185 |
186 | ## Mark libraries for installation
187 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
188 | # install(TARGETS ${PROJECT_NAME}
189 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
190 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
191 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
192 | # )
193 |
194 | ## Mark cpp header files for installation
195 | # install(DIRECTORY include/${PROJECT_NAME}/
196 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
197 | # FILES_MATCHING PATTERN "*.h"
198 | # PATTERN ".svn" EXCLUDE
199 | # )
200 |
201 | ## Mark other files for installation (e.g. launch and bag files, etc.)
202 | # install(FILES
203 | # # myfile1
204 | # # myfile2
205 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
206 | # )
207 |
208 | #############
209 | ## Testing ##
210 | #############
211 |
212 | ## Add gtest based cpp test target and link libraries
213 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_your_package.cpp)
214 | # if(TARGET ${PROJECT_NAME}-test)
215 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
216 | # endif()
217 |
218 | ## Add folders to be run by python nosetests
219 | # catkin_add_nosetests(test)
220 |
221 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2023, Thanakrit Taweesoontorn
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | 3. Neither the name of the copyright holder nor the names of its
16 | contributors may be used to endorse or promote products derived from
17 | this software without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Hybrid Deep Learning and FAST–BRISK 3D Object Detection for Bin-picking Applications
2 |
3 | **Published in:** *Sensors and Materials*, Vol. 36, No. 4(2), pp. 1389–1404
4 | **DOI:** https://doi.org/10.18494/SAM4840
5 | **Published Date:** April 19, 2024
6 | **Paper ID:** S&M3606 Research Paper of Special Issue
7 |
8 | 
9 |
10 | ## 🧠 Overview
11 |
12 | This project presents a hybrid vision-based bin-picking system for collaborative robots. It combines deep learning (YOLOv5) for coarse object detection and classical computer vision algorithms (FAST + BRISK) for precise pose estimation. The system is implemented using the Robot Operating System (ROS) and a low-cost Intel RealSense D435i depth camera in an eye-in-hand setup.
13 |
14 | **Key Features:**
15 | - Real-time object detection and 6D pose estimation
16 | - Integration with CR5 collaborative robot
17 | - High detection accuracy and sub-second processing
18 | - Cost-effective hardware setup
19 |
20 | ---
21 |
22 | ## 🔍 Abstract
23 |
24 | We here propose a hybrid method for bin-picking tasks using a collaborative robot, or cobot combining the You Only Look Once version 5 (YOLOv5) convolutional neural network (CNN) model for object detection and pose estimation with traditional feature detection based on the features from accelerated segment test (FAST) technique, feature description using binary robust invariant scalable keypoints (BRISK) algorithms, and matching algorithms. By integrating these algorithms and utilizing a low-cost depth sensor camera for capturing depth and RGB images, the system enhances real-time object detection and pose estimation speed, facilitating accurate object manipulation by the robotic arm. Furthermore, the proposed method is implemented within the robot operating system (ROS) framework to provide a seamless platform for robotic control and integration. We compared our results with those of other methodologies, highlighting the superior object detection accuracy and processing speed of our hybrid approach. This integration of robotic arm, camera, and AI technology contributes to the development of industrial robotics, opening up new possibilities for automating challenging tasks and improving overall operational efficiency.
25 |
26 | > **Keywords**: computer vision, object detection and pose estimation, bin-picking application, robot operating system
27 |
28 | ---
29 |
30 | ## 🧩 System Architecture
31 |
32 | 
33 |
34 | - Firstly, You Only Look Once Version 5 (YOLOv5), a state-of-the-art deep learning algorithm for object detection, was employed. By leveraging YOLOv5, objects could be detected in real-time streams, and the robot could be precisely moved closer to the targeted object.
35 | - Secondly, the features from accelerated segment test (FAST) and binary robust invariant scalable keypoints (BRISK) algorithms, which are feature detection, description, and matching algorithms, were utilized. As the robot approached close to the object, a high resolution object image was captured by the camera. This image was suitable for using the FAST and BRISK algorithms to rapidly estimate the object’s pose. The combination of these techniques enabled high accuracy in picking up objects at the correct position and orientation.
36 |
37 | ---
38 |
39 | ## 🖥️ Requirement
40 |
41 | ### Hardware
42 | - **Collaborative Robot**: CR5 (DOBOT)
43 | - **Camera**: Intel RealSense D435i (eye-in-hand configuration)
44 |
45 | ### Software
46 | - **OS**: Ubuntu 20.04
47 | - **Middleware**: ROS Noetic
48 |
49 | ---
50 |
51 | ## ⚙️ Installation
52 |
53 | ### 1. Clone Repositories
54 |
55 | # Building
56 |
57 | ### 1. Use git to clone the source code
58 | ```sh
59 | cd $HOME/catkin_ws/src
60 | git clone https://github.com/Dobot-Arm/CR_ROS.git
61 | git clone https://github.com/introlab/find-object.git
62 | git clone https://github.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application.git
63 | ```
64 |
65 | ### 2. Installing Realsense-ROS
66 |
67 | Follow the installation instructions here:
68 |
https://github.com/monkeyrom/realsense-ros
69 |
70 | ### 3. Build Workspace
71 | ```sh
72 | cd $HOME/catkin_ws
73 | catkin build
74 | ```
75 |
76 | ### 4. Set Environment Variables
77 | ```sh
78 | echo "export DOBOT_TYPE=cr5" >> ~/.bashrc
79 | source ~/.bashrc
80 | source $HOME/catkin_ws/devel/setup.bash
81 | ```
82 |
83 | ---
84 |
85 | ## 🚀 Launch the System
86 |
87 | * Connect the robotic arm with the following command, and the default robot_ip is 192.168.1.6
88 |
89 | ### 1. Start Robot and Camera
90 |
91 | ```sh
92 | roslaunch CR5_Project CR5_with_realsense.launch
93 | ```
94 |
95 | This will:
96 | - Launch CR5 robot control
97 | - Stream RGB-D images
98 | - Run 3D object detection node and 3D Visualization
99 | - Synchronize TF transforms
100 |
101 | #### RViz display
102 |
103 | 
104 |
105 | Rviz, the ROS visualization tool, displays the object link, robot model with the camera link, point cloud, depth image, and detected object image.
106 |
107 | ### 2. Launch YOLOv5 Node
108 |
109 | ```sh
110 | rosrun CR5_Project yolo_order.py
111 | ```
112 |
113 | YOLOv5 detects objects in the scene and sends coordinates to the robot.
114 |
115 | 
116 |
117 | ---
118 |
119 | ## 🎯 Object Detection & Pose Estimation
120 |
121 | ### 🟢 Phase 1: YOLOv5 Detection
122 | - Trained with a custom dataset on Roboflow (1431 images, YOLOv5l, 100 epochs)
123 | - Output: 2D bounding boxes and object class
124 |
125 | 
126 |
127 | To train the YOLOv5 model, a dataset was prepared using Roboflow. The dataset was carefully labelled, and various augmentation techniques were applied to enhance its diversity and quality. During the training process, the YOLOv5l model was utilized with 100 epochs, batch size of 16, and the training dataset consisting of 1431 images.
128 |
129 | .jpg)
130 |
131 | ## 🔵 Phase 2: FAST + BRISK for Pose Refinement
132 |
133 | ### Feature detector — FAST
134 |
135 | The algorithm utilizes a circle consisting of 16 pixels, labelled clockwise from 1 to 16, within which a candidate point is assessed, as shown in Fig. 6. By comparing the intensities of the candidate pixel (I) with a threshold value (t), FAST applies the following conditions for corner
136 |
137 | classification:
138 | - Condition 1: If a set of (N) contiguous pixels (S) within the circle satisfies : x p ∀∈ > + x SI I t, the candidate point is classified as a corner.
139 | - Condition 2: If a set of (N) contiguous pixels (S) within the circle satisfies : x p ∀x SI I t ∈ < − , the candidate point is classified as a corner.
140 |
141 | 
142 |
143 | The selection of appropriate values for N and the threshold (t) involves consideration of the number of detected corner points and computational efficiency; typically, N is set to 12. Additionally, a high-speed test can be applied to exclude noncorner points, further improving the algorithm’s overall performance. FAST’s rapid intensity comparison process enables real-time corner keypoint detection with high accuracy and efficiency
144 |
145 | ### Feature descriptor — BRISK
146 |
147 | Once keypoints are detected, it is essential to describe their visual attributes in a compact and distinctive technique. This is where feature descriptors come into play. In this research, the BRISK algorithm is utilized as the feature descriptor. BRISK captures the unique characteristics of keypoints by encoding their local appearance, texture, and shape information into a compact binary representation.
148 |
149 | ### Feature matching
150 |
151 | Feature matching techniques are used to establish correspondence between keypoints in the reference and query images. The brute force strategy is applied for nearest neighbour matching, wherein the feature descriptors are compared to find suitable matches. Homography estimation techniques are then employed to determine the geometric transformation between the images. This estimation accurately calculates the object’s pose, including its position and orientation.
152 |
153 | 
154 |
155 | By sequentially employing these techniques, the system excels in object detection and pose estimation. The initial top-view detection provides comprehensive workspace coverage, while the subsequent close-up detection offers detailed information for precise pose estimation. This seamless integration within a ROS framework ensures efficient object detection and pose estimation, as depicted by the 6D object link.
156 |
157 | ## 📐 Robot Pose Calculation
158 |
159 | - Pose calculated from camera frame to robot base using transformation matrices in ROS
160 | - Offset applied for precise vertical grasping
161 | - Visualized in RViz with full robot and object models
162 |
163 | 
164 |
165 | Within the ROS framework, the object link was established to reference the camera link integrated into the robot’s URDF file. This connection allowed for continuous derivation of the object’s position from the robot’s base link, enabling accurate visualization of both the robot and the object within the Rviz.
166 |
167 | ## 🤖 Real-world Execution
168 |
169 | Run the main control node:
170 |
171 | ```sh
172 | rosrun CR5_Project service_call
173 | ```
174 |
175 | This will:
176 | - Spawn a terminal and run the listener node (robot feedback)
177 | - Launch main_order (control logic)
178 |
179 | 
180 |
181 | In the experimental result and comparison, our hybrid method showed impressive performance. In terms of object detection, our approach achieved an AP50:95 score of 96.5%, underscoring its capabilities in accurately detecting objects at any orientation, and a pose estimation accuracy of 94.7% within angle error of 5°. Additionally, our hybrid method demonstrated efficient processing, with a minimal detection and pose estimation time of merely 0.32 s, indicating its suitability for real-time applications.
182 |
183 | ## ✅ Conclusion
184 |
185 | This project demonstrates a low-cost, accurate, and real-time bin-picking solution by combining the power of deep learning and classical computer vision. Using YOLOv5 and FAST-BRISK within a ROS-based framework, our system offers precise object detection and pose estimation with minimal hardware requirements. It’s a practical and scalable solution for industrial automation in manufacturing and logistics.
186 |
187 | 
188 |
189 | # References
190 | - **CR_ROS**: https://github.com/Dobot-Arm/CR_ROS
191 | - **Intel Realsense**: https://github.com/IntelRealSense/librealsense
192 | - **find-object**: https://github.com/introlab/find-object
193 | - **YOLOv5** : https://github.com/ultralytics/yolov5
194 |
--------------------------------------------------------------------------------
/config/CR5_Project.rviz:
--------------------------------------------------------------------------------
1 | Panels:
2 | - Class: rviz/Displays
3 | Help Height: 78
4 | Name: Displays
5 | Property Tree Widget:
6 | Expanded:
7 | - /Global Options1
8 | Splitter Ratio: 0.5
9 | Tree Height: 719
10 | - Class: rviz/Selection
11 | Name: Selection
12 | - Class: rviz/Tool Properties
13 | Expanded:
14 | - /2D Pose Estimate1
15 | - /2D Nav Goal1
16 | - /Publish Point1
17 | Name: Tool Properties
18 | Splitter Ratio: 0.5886790156364441
19 | - Class: rviz/Views
20 | Expanded:
21 | - /Current View1
22 | Name: Views
23 | Splitter Ratio: 0.5
24 | - Class: rviz/Time
25 | Name: Time
26 | SyncMode: 0
27 | SyncSource: ""
28 | Preferences:
29 | PromptSaveOnExit: true
30 | Toolbars:
31 | toolButtonStyle: 2
32 | Visualization Manager:
33 | Class: ""
34 | Displays:
35 | - Alpha: 0.5
36 | Cell Size: 1
37 | Class: rviz/Grid
38 | Color: 160; 160; 164
39 | Enabled: true
40 | Line Style:
41 | Line Width: 0.029999999329447746
42 | Value: Lines
43 | Name: Grid
44 | Normal Cell Count: 0
45 | Offset:
46 | X: 0
47 | Y: 0
48 | Z: 0
49 | Plane: XY
50 | Plane Cell Count: 10
51 | Reference Frame:
52 | Value: true
53 | - Alpha: 1
54 | Autocompute Intensity Bounds: true
55 | Autocompute Value Bounds:
56 | Max Value: 10
57 | Min Value: -10
58 | Value: true
59 | Axis: Z
60 | Channel Name: intensity
61 | Class: rviz/PointCloud2
62 | Color: 255; 255; 255
63 | Color Transformer: RGB8
64 | Decay Time: 0
65 | Enabled: true
66 | Invert Rainbow: false
67 | Max Color: 255; 255; 255
68 | Min Color: 0; 0; 0
69 | Name: PointCloud2
70 | Position Transformer: XYZ
71 | Queue Size: 10
72 | Selectable: true
73 | Size (Pixels): 3
74 | Size (m): 0.009999999776482582
75 | Style: Flat Squares
76 | Topic: /camera/depth/color/points
77 | Unreliable: false
78 | Use Fixed Frame: true
79 | Use rainbow: true
80 | Value: true
81 | - Alpha: 1
82 | Class: rviz/RobotModel
83 | Collision Enabled: false
84 | Enabled: true
85 | Links:
86 | All Links Enabled: true
87 | Expand Joint Details: false
88 | Expand Link Details: false
89 | Expand Tree: false
90 | Link Tree Style: Links in Alphabetic Order
91 | Link1:
92 | Alpha: 1
93 | Show Axes: false
94 | Show Trail: false
95 | Value: true
96 | Link2:
97 | Alpha: 1
98 | Show Axes: false
99 | Show Trail: false
100 | Value: true
101 | Link3:
102 | Alpha: 1
103 | Show Axes: false
104 | Show Trail: false
105 | Value: true
106 | Link4:
107 | Alpha: 1
108 | Show Axes: false
109 | Show Trail: false
110 | Value: true
111 | Link5:
112 | Alpha: 1
113 | Show Axes: false
114 | Show Trail: false
115 | Value: true
116 | Link6:
117 | Alpha: 1
118 | Show Axes: false
119 | Show Trail: false
120 | Value: true
121 | base_link:
122 | Alpha: 1
123 | Show Axes: false
124 | Show Trail: false
125 | Value: true
126 | camera_link:
127 | Alpha: 1
128 | Show Axes: false
129 | Show Trail: false
130 | Value: true
131 | dummy_link:
132 | Alpha: 1
133 | Show Axes: false
134 | Show Trail: false
135 | Name: RobotModel
136 | Robot Description: robot_description
137 | TF Prefix: ""
138 | Update Interval: 0
139 | Value: true
140 | Visual Enabled: true
141 | Enabled: true
142 | Global Options:
143 | Background Color: 48; 48; 48
144 | Default Light: true
145 | Fixed Frame: base_link
146 | Frame Rate: 30
147 | Name: root
148 | Tools:
149 | - Class: rviz/Interact
150 | Hide Inactive Objects: true
151 | - Class: rviz/MoveCamera
152 | - Class: rviz/Select
153 | - Class: rviz/FocusCamera
154 | - Class: rviz/Measure
155 | - Class: rviz/SetInitialPose
156 | Theta std deviation: 0.2617993950843811
157 | Topic: /initialpose
158 | X std deviation: 0.5
159 | Y std deviation: 0.5
160 | - Class: rviz/SetGoal
161 | Topic: /move_base_simple/goal
162 | - Class: rviz/PublishPoint
163 | Single click: true
164 | Topic: /clicked_point
165 | Value: true
166 | Views:
167 | Current:
168 | Class: rviz/ThirdPersonFollower
169 | Distance: 2.958254098892212
170 | Enable Stereo Rendering:
171 | Stereo Eye Separation: 0.05999999865889549
172 | Stereo Focal Distance: 1
173 | Swap Stereo Eyes: false
174 | Value: false
175 | Field of View: 0.7853981852531433
176 | Focal Point:
177 | X: 0
178 | Y: 0
179 | Z: 0
180 | Focal Shape Fixed Size: false
181 | Focal Shape Size: 0.05000000074505806
182 | Invert Z Axis: false
183 | Name: Current View
184 | Near Clip Distance: 0.009999999776482582
185 | Pitch: -0.09460186958312988
186 | Target Frame:
187 | Yaw: 5.243566513061523
188 | Saved: ~
189 | Window Geometry:
190 | Displays:
191 | collapsed: false
192 | Height: 1016
193 | Hide Left Dock: false
194 | Hide Right Dock: false
195 | QMainWindow State: 000000ff00000000fd00000004000000000000016a0000035afc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000035a000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d006100670065000000023e000001640000000000000000000000010000010f0000035afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d0000035a000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007380000003efc0100000002fb0000000800540069006d0065010000000000000738000003bc00fffffffb0000000800540069006d00650100000000000004500000000000000000000004b30000035a00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
196 | Selection:
197 | collapsed: false
198 | Time:
199 | collapsed: false
200 | Tool Properties:
201 | collapsed: false
202 | Views:
203 | collapsed: false
204 | Width: 1848
205 | X: 72
206 | Y: 27
207 |
--------------------------------------------------------------------------------
/config/Robot_pointcloud.rviz:
--------------------------------------------------------------------------------
1 | Panels:
2 | - Class: rviz/Displays
3 | Help Height: 0
4 | Name: Displays
5 | Property Tree Widget:
6 | Expanded:
7 | - /TF1
8 | - /TF1/Frames1
9 | Splitter Ratio: 0.5
10 | Tree Height: 827
11 | - Class: rviz/Selection
12 | Name: Selection
13 | - Class: rviz/Tool Properties
14 | Expanded:
15 | - /2D Pose Estimate1
16 | - /2D Nav Goal1
17 | - /Publish Point1
18 | Name: Tool Properties
19 | Splitter Ratio: 0.5886790156364441
20 | - Class: rviz/Views
21 | Expanded:
22 | - /Current View1
23 | Name: Views
24 | Splitter Ratio: 0.5
25 | - Class: rviz/Time
26 | Name: Time
27 | SyncMode: 0
28 | SyncSource: PointCloud2
29 | Preferences:
30 | PromptSaveOnExit: true
31 | Toolbars:
32 | toolButtonStyle: 2
33 | Visualization Manager:
34 | Class: ""
35 | Displays:
36 | - Alpha: 0.5
37 | Cell Size: 1
38 | Class: rviz/Grid
39 | Color: 160; 160; 164
40 | Enabled: true
41 | Line Style:
42 | Line Width: 0.029999999329447746
43 | Value: Lines
44 | Name: Grid
45 | Normal Cell Count: 0
46 | Offset:
47 | X: 0
48 | Y: 0
49 | Z: 0
50 | Plane: XY
51 | Plane Cell Count: 10
52 | Reference Frame:
53 | Value: true
54 | - Alpha: 1
55 | Autocompute Intensity Bounds: true
56 | Autocompute Value Bounds:
57 | Max Value: 10
58 | Min Value: -10
59 | Value: true
60 | Axis: Z
61 | Channel Name: intensity
62 | Class: rviz/PointCloud2
63 | Color: 255; 255; 255
64 | Color Transformer: RGB8
65 | Decay Time: 0
66 | Enabled: true
67 | Invert Rainbow: false
68 | Max Color: 255; 255; 255
69 | Min Color: 0; 0; 0
70 | Name: PointCloud2
71 | Position Transformer: XYZ
72 | Queue Size: 10
73 | Selectable: true
74 | Size (Pixels): 3
75 | Size (m): 0.009999999776482582
76 | Style: Points
77 | Topic: /camera/depth/color/points
78 | Unreliable: false
79 | Use Fixed Frame: true
80 | Use rainbow: true
81 | Value: true
82 | - Alpha: 1
83 | Class: rviz/RobotModel
84 | Collision Enabled: false
85 | Enabled: true
86 | Links:
87 | All Links Enabled: true
88 | Expand Joint Details: false
89 | Expand Link Details: false
90 | Expand Tree: false
91 | Link Tree Style: Links in Alphabetic Order
92 | Link1:
93 | Alpha: 1
94 | Show Axes: false
95 | Show Trail: false
96 | Value: true
97 | Link2:
98 | Alpha: 1
99 | Show Axes: false
100 | Show Trail: false
101 | Value: true
102 | Link3:
103 | Alpha: 1
104 | Show Axes: false
105 | Show Trail: false
106 | Value: true
107 | Link4:
108 | Alpha: 1
109 | Show Axes: false
110 | Show Trail: false
111 | Value: true
112 | Link5:
113 | Alpha: 1
114 | Show Axes: false
115 | Show Trail: false
116 | Value: true
117 | Link6:
118 | Alpha: 1
119 | Show Axes: false
120 | Show Trail: false
121 | Value: true
122 | base_link:
123 | Alpha: 1
124 | Show Axes: false
125 | Show Trail: false
126 | Value: true
127 | camera_link:
128 | Alpha: 1
129 | Show Axes: false
130 | Show Trail: false
131 | Value: true
132 | dummy_link:
133 | Alpha: 1
134 | Show Axes: false
135 | Show Trail: false
136 | Name: RobotModel
137 | Robot Description: robot_description
138 | TF Prefix: ""
139 | Update Interval: 0
140 | Value: true
141 | Visual Enabled: true
142 | - Class: rviz/Image
143 | Enabled: true
144 | Image Topic: /main/yolo_detect_image
145 | Max Value: 1
146 | Median window: 5
147 | Min Value: 0
148 | Name: Image
149 | Normalize Range: true
150 | Queue Size: 2
151 | Transport Hint: raw
152 | Unreliable: false
153 | Value: true
154 | - Class: rviz/Image
155 | Enabled: true
156 | Image Topic: /main/color_depth_image
157 | Max Value: 1
158 | Median window: 5
159 | Min Value: 0
160 | Name: Image
161 | Normalize Range: true
162 | Queue Size: 2
163 | Transport Hint: raw
164 | Unreliable: false
165 | Value: true
166 | - Class: rviz/TF
167 | Enabled: true
168 | Filter (blacklist): ""
169 | Filter (whitelist): ""
170 | Frame Timeout: 15
171 | Frames:
172 | All Enabled: false
173 | Link1:
174 | Value: false
175 | Link2:
176 | Value: false
177 | Link3:
178 | Value: false
179 | Link4:
180 | Value: false
181 | Link5:
182 | Value: false
183 | Link6:
184 | Value: false
185 | base_link:
186 | Value: false
187 | camera_color_frame:
188 | Value: false
189 | camera_color_optical_frame:
190 | Value: false
191 | camera_depth_frame:
192 | Value: false
193 | camera_depth_optical_frame:
194 | Value: false
195 | camera_link:
196 | Value: false
197 | dummy_link:
198 | Value: false
199 | obj_arduino:
200 | Value: true
201 | object_6:
202 | Value: true
203 | object_6_b:
204 | Value: true
205 | Marker Alpha: 1
206 | Marker Scale: 1
207 | Name: TF
208 | Show Arrows: false
209 | Show Axes: true
210 | Show Names: false
211 | Tree:
212 | dummy_link:
213 | base_link:
214 | Link1:
215 | Link2:
216 | Link3:
217 | Link4:
218 | Link5:
219 | Link6:
220 | camera_link:
221 | camera_color_frame:
222 | camera_color_optical_frame:
223 | obj_arduino:
224 | {}
225 | object_6:
226 | {}
227 | object_6_b:
228 | {}
229 | camera_depth_frame:
230 | camera_depth_optical_frame:
231 | {}
232 | Update Interval: 0
233 | Value: true
234 | Enabled: true
235 | Global Options:
236 | Background Color: 48; 48; 48
237 | Default Light: true
238 | Fixed Frame: base_link
239 | Frame Rate: 30
240 | Name: root
241 | Tools:
242 | - Class: rviz/Interact
243 | Hide Inactive Objects: true
244 | - Class: rviz/MoveCamera
245 | - Class: rviz/Select
246 | - Class: rviz/FocusCamera
247 | - Class: rviz/Measure
248 | - Class: rviz/SetInitialPose
249 | Theta std deviation: 0.2617993950843811
250 | Topic: /initialpose
251 | X std deviation: 0.5
252 | Y std deviation: 0.5
253 | - Class: rviz/SetGoal
254 | Topic: /move_base_simple/goal
255 | - Class: rviz/PublishPoint
256 | Single click: true
257 | Topic: /clicked_point
258 | Value: true
259 | Views:
260 | Current:
261 | Class: rviz/ThirdPersonFollower
262 | Distance: 1.3385756015777588
263 | Enable Stereo Rendering:
264 | Stereo Eye Separation: 0.05999999865889549
265 | Stereo Focal Distance: 1
266 | Swap Stereo Eyes: false
267 | Value: false
268 | Field of View: 0.7853981852531433
269 | Focal Point:
270 | X: -0.04574248194694519
271 | Y: -0.5771075487136841
272 | Z: 3.8743019104003906e-07
273 | Focal Shape Fixed Size: false
274 | Focal Shape Size: 0.05000000074505806
275 | Invert Z Axis: false
276 | Name: Current View
277 | Near Clip Distance: 0.009999999776482582
278 | Pitch: 0.39539769291877747
279 | Target Frame:
280 | Yaw: 3.9904072284698486
281 | Saved: ~
282 | Window Geometry:
283 | Displays:
284 | collapsed: false
285 | Height: 1043
286 | Hide Left Dock: false
287 | Hide Right Dock: false
288 | Image:
289 | collapsed: false
290 | QMainWindow State: 000000ff00000000fd00000004000000000000016100000378fc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000378000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000280020002d0020005400720061006a006500630074006f0072007900200053006c00690064006500720000000000ffffffff0000000000000000fb0000001c004d006f00740069006f006e0050006c0061006e006e0069006e006700000001ed00000188000000000000000000000001000001d000000378fc0200000009fb0000001e0054006f006f006c002000500072006f0070006500720074006900650073000000003d000000960000005c00fffffffb0000000a00560069006500770073010000003d000000db000000a400fffffffb0000000a0049006d006100670065010000011e000001620000001600fffffffb0000000a0049006d00610067006501000002860000012f0000001600fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007300000000d6000000780000000000000000fc00000243000001170000000000fffffffa000000000100000001fb0000000a0049006d0061006700650100000579000001bf0000000000000000fb0000000a0049006d0061006700650100000161000001230000000000000000fb0000000a0049006d00610067006501000001480000010a0000000000000000fb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003bfc0100000002fb0000000800540069006d0065010000000000000780000003bc00fffffffb0000000800540069006d00650100000000000004500000000000000000000004430000037800000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
291 | Selection:
292 | collapsed: false
293 | Time:
294 | collapsed: false
295 | Tool Properties:
296 | collapsed: false
297 | Views:
298 | collapsed: false
299 | Width: 1920
300 | X: 0
301 | Y: 0
302 |
--------------------------------------------------------------------------------
/config/Starting_command.txt:
--------------------------------------------------------------------------------
1 | roslaunch CR5_Project CR5_with_realsense.launch
2 |
3 | rosrun CR5_Project yolo_order.py
4 |
5 | sudo chmod a+rw /dev/ttyUSB0
6 | rosrun dobot DobotServer /dev/ttyUSB0
7 |
8 | rosrun CR5_Project service_call
--------------------------------------------------------------------------------
/find object/bin/session 1.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/find object/bin/session 1.bin
--------------------------------------------------------------------------------
/find object/bin/session 10.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/find object/bin/session 10.bin
--------------------------------------------------------------------------------
/find object/bin/session 2.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/find object/bin/session 2.bin
--------------------------------------------------------------------------------
/find object/bin/session 3.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/find object/bin/session 3.bin
--------------------------------------------------------------------------------
/find object/bin/session 7.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/find object/bin/session 7.bin
--------------------------------------------------------------------------------
/find object/bin/session 8.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/find object/bin/session 8.bin
--------------------------------------------------------------------------------
/find object/bin/session 9.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/find object/bin/session 9.bin
--------------------------------------------------------------------------------
/launch/CR5_with_realsense.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
--------------------------------------------------------------------------------
/msg/ObjectMsg.msg:
--------------------------------------------------------------------------------
1 | float64 x
2 | float64 y
3 | float64 z
4 | float64 rx
5 | float64 ry
6 | float64 rz
--------------------------------------------------------------------------------
/msg/object_position.msg:
--------------------------------------------------------------------------------
1 | float64 x
2 | float64 y
3 | float64 z
4 | float64 rx
5 | float64 ry
6 | float64 rz
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | CR5_Project
4 | 0.1.0
5 | The CR5_Project package
6 |
7 |
8 |
9 |
10 | rai
11 |
12 |
13 |
14 |
15 |
16 | ROM
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 | message_generation
41 |
42 |
43 |
44 |
45 |
46 | message_runtime
47 |
48 |
49 |
50 |
51 | catkin
52 | roscpp
53 | rospy
54 | std_msgs
55 | roscpp
56 | rospy
57 | std_msgs
58 |
59 | roscpp
60 | rospy
61 | std_msgs
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/pic/Fig.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.1.jpg
--------------------------------------------------------------------------------
/pic/Fig.10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.10.jpg
--------------------------------------------------------------------------------
/pic/Fig.11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.11.jpg
--------------------------------------------------------------------------------
/pic/Fig.12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.12.jpg
--------------------------------------------------------------------------------
/pic/Fig.13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.13.jpg
--------------------------------------------------------------------------------
/pic/Fig.14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.14.jpg
--------------------------------------------------------------------------------
/pic/Fig.15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.15.jpg
--------------------------------------------------------------------------------
/pic/Fig.16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.16.jpg
--------------------------------------------------------------------------------
/pic/Fig.17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.17.jpg
--------------------------------------------------------------------------------
/pic/Fig.18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.18.jpg
--------------------------------------------------------------------------------
/pic/Fig.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.2.jpg
--------------------------------------------------------------------------------
/pic/Fig.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.3.jpg
--------------------------------------------------------------------------------
/pic/Fig.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.4.jpg
--------------------------------------------------------------------------------
/pic/Fig.5(a).jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.5(a).jpg
--------------------------------------------------------------------------------
/pic/Fig.5(b).jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.5(b).jpg
--------------------------------------------------------------------------------
/pic/Fig.6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.6.jpg
--------------------------------------------------------------------------------
/pic/Fig.7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.7.jpg
--------------------------------------------------------------------------------
/pic/Fig.8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.8.jpg
--------------------------------------------------------------------------------
/pic/Fig.9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/monkeyrom/3D_Object_Detection_and_Pose_Estimation_for_Automated_Bin-Picking_Application/d273b3af91faab0cbc22372c6449e0ea5537bb80/pic/Fig.9.jpg
--------------------------------------------------------------------------------
/src/dev/compute_picking.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def compute_vector():
4 | # Define the position and orientation of the object
5 | object_pos = np.array([143, -580, -30]) # Object position in world frame
6 | object_ori = np.array([0, 0, 20]) # Object orientation in world frame (euler angles)
7 |
8 | # Convert the object orientation from euler angles to a rotation matrix
9 | Rx = np.array([[1, 0, 0],
10 | [0, np.cos(object_ori[0]), -np.sin(object_ori[0])],
11 | [0, np.sin(object_ori[0]), np.cos(object_ori[0])]])
12 | Ry = np.array([[np.cos(object_ori[1]), 0, np.sin(object_ori[1])],
13 | [0, 1, 0],
14 | [-np.sin(object_ori[1]), 0, np.cos(object_ori[1])]])
15 | Rz = np.array([[np.cos(object_ori[2]), -np.sin(object_ori[2]), 0],
16 | [np.sin(object_ori[2]), np.cos(object_ori[2]), 0],
17 | [0, 0, 1]])
18 | object_rot = Rz.dot(Ry.dot(Rx))
19 |
20 | # Define the desired position of the robot relative to the object frame
21 | p_o_desired = np.array([0, 0, 30]) # 30mm above the object in the object frame
22 |
23 | # Transform the position vector from the object frame to the world frame
24 | p_w_desired = object_rot.dot(p_o_desired) + object_pos
25 |
26 | print(p_w_desired)
27 |
--------------------------------------------------------------------------------
/src/dev/node_order0.0.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | from std_msgs.msg import String
3 | from time import sleep
4 | import tf
5 | import math
6 | from CR5_Project.msg import ObjectMsg
7 | from tf.transformations import euler_from_quaternion
8 |
9 | def home():
10 | cmd = "home"
11 | pub_cmd.publish(cmd)
12 | rospy.loginfo("Moving to Home Pose...")
13 |
14 | def Point5():
15 | cmd = "Point5"
16 | pub_cmd.publish(cmd)
17 | print("Moving to Point 5...")
18 |
19 | def robot_home_pose(home):
20 | #print('home checking...')
21 | try :
22 | listener.waitForTransform('base_link', 'Link6', rospy.Time(), rospy.Duration(0.2))
23 | except :
24 | return -1
25 |
26 | if listener.canTransform('base_link', 'Link6', rospy.Time()) :
27 | (trans,rot) = listener.lookupTransform("base_link", "Link6", rospy.Time())
28 | x = round(trans[0]*1000)
29 | y = round(trans[1]*1000)
30 | z = round(trans[2]*1000)
31 | #print("robot pose at [", x,y,z,"]")
32 | if abs(abs(x) - 143) <= 5 and abs(abs(y) - 580) <= 5 and abs(abs(z) - 450) <= 5 :
33 | home = 1
34 | else :
35 | #print('waiting robot to home position')
36 | print('.')
37 | home = 0 #change to 0,1 for pass home checking
38 | sleep(0.2)
39 | else :
40 | home = -1
41 |
42 | if rospy.is_shutdown():
43 | print('Stop home Checking')
44 | return 0
45 |
46 | return home
47 |
48 | def robot_check_pose(x_order,y_order,z_order):
49 | print('pose checking ...')
50 | pose = 0
51 | while pose == 0 :
52 | listener.waitForTransform('base_link', 'Link6', rospy.Time(), rospy.Duration(0.2))
53 | if listener.canTransform('base_link', 'Link6', rospy.Time()) :
54 | (trans,rot) = listener.lookupTransform('base_link', 'Link6', rospy.Time())
55 | x = round(trans[0]*1000)
56 | y = round(trans[1]*1000)
57 | z = round(trans[2]*1000)
58 | #print("command pose at [", x_order,y_order,z_order,"]")
59 | #print("robot pose at [", x,y,z,"]")
60 | if abs(abs(x) - abs(x_order)) <= 4 and abs(abs(y) - abs(y_order)) <= 4 and abs(abs(z) - abs(z_order)) <= 4 :
61 | pose = 1
62 | else :
63 | #print('waiting robot to the position')
64 | print('.')
65 | pose = 0 #change to 0,1 for pass pose checking
66 | else :
67 | print("Error : robot not found")
68 | pose = -1
69 |
70 | if rospy.is_shutdown():
71 | print('Stop Pose Checking')
72 | return 0
73 |
74 | sleep(0.2)
75 |
76 | return pose
77 |
78 | def yolo_detect():
79 | process = "Yolo detect at %s" % rospy.get_time()
80 | cmd = "runYolo"
81 | rospy.loginfo(process)
82 | pub_order.publish(cmd)
83 |
84 | def check_object():
85 | obj = 0
86 | source_frameid = 'base_link'
87 | try :
88 | listener.waitForTransform('obj_arduino', source_frameid, rospy.Time(), rospy.Duration(0.2))
89 | if listener.canTransform('obj_arduino', source_frameid, rospy.Time()) == 1 :
90 | obj = 1
91 | except :
92 | pass
93 |
94 | try :
95 | listener.waitForTransform('obj_raspi', source_frameid, rospy.Time(), rospy.Duration(0.2))
96 | if listener.canTransform('obj_raspi', source_frameid, rospy.Time()) and obj == 0:
97 | obj = 2
98 | if listener.canTransform('obj_raspi', source_frameid, rospy.Time()) and obj == 1:
99 | obj = 3
100 | except :
101 | pass
102 |
103 | else :
104 | print("cannot Transform object reffer to robot baselink")
105 |
106 | return obj
107 |
108 | def move_close (focus_obj):
109 | if focus_obj == 1 :
110 | source_frameid = 'base_link'
111 | target_frameid = 'obj_arduino'
112 | listener.waitForTransform(target_frameid, source_frameid, rospy.Time(), rospy.Duration(0.2))
113 | if listener.canTransform(target_frameid, source_frameid, rospy.Time()) :
114 | (trans,rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
115 | x = round(trans[0]*1000) - 20
116 | y = round(trans[1]*1000) + 50
117 | z = round(trans[2]*1000) + 200
118 |
119 | else :
120 | print("Error : object lost")
121 | return
122 |
123 | elif focus_obj == 2 :
124 | source_frameid = 'base_link'
125 | target_frameid = 'obj_raspi'
126 | listener.waitForTransform(source_frameid, target_frameid, rospy.Time(), rospy.Duration(0.2))
127 | if listener.canTransform(source_frameid, target_frameid, rospy.Time()) :
128 | (trans,rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
129 | x = round(trans[0]*1000)
130 | y = round(trans[1]*1000) + 50
131 | z = round(trans[2]*1000) + 200
132 | else :
133 | print("Error : object lost")
134 | return
135 |
136 | target = ObjectMsg()
137 | target.x = x
138 | target.y = y
139 | target.z = z
140 | target.rx = 180
141 | target.ry = 0
142 | target.rz = 180
143 | rospy.loginfo(target)
144 | pub_target.publish(target)
145 |
146 | return (x,y,z)
147 |
148 | def pick_phase1 (focus_obj):
149 | if focus_obj == 1 :
150 | source_frameid = 'base_link'
151 | camera_frameid = 'camera_link'
152 | target_frameid1 = 'object_6'
153 | target_frameid2 = 'object_6_b'
154 | target_frameid3 = 'object_6_c'
155 | #target_frameid4 = 'object_6_d'
156 | multi = 0
157 | try :
158 | listener.waitForTransform(camera_frameid, source_frameid, rospy.Time(), rospy.Duration(0.2))
159 | listener.waitForTransform(target_frameid1, source_frameid, rospy.Time(), rospy.Duration(0.1))
160 | if listener.canTransform(target_frameid1, source_frameid, rospy.Time()):
161 | multi = multi + 1
162 | print("multi = ", multi)
163 | except :
164 | print('no object_a')
165 | try :
166 | listener.waitForTransform(target_frameid2, source_frameid, rospy.Time(), rospy.Duration(0.1))
167 | if listener.canTransform(target_frameid2, source_frameid, rospy.Time()):
168 | multi = multi + 1
169 | print("multi = ", multi)
170 | except :
171 | print('no object_b')
172 | try :
173 | listener.waitForTransform(target_frameid3, source_frameid, rospy.Time(), rospy.Duration(0.1))
174 | if listener.canTransform(target_frameid3, source_frameid, rospy.Time()):
175 | multi = multi + 1
176 | print("multi = ", multi)
177 | except :
178 | print('no object_c')
179 | #try :
180 | #listener.waitForTransform(target_frameid4, source_frameid, rospy.Time(), rospy.Duration(0.1))
181 | #if listener.canTransform(target_frameid4, source_frameid, rospy.Time()):
182 | #multi = multi + 1
183 | #print("multi = ", multi)
184 | #except :
185 | #print('no object_d')
186 |
187 | try :
188 | if multi == 1:
189 | print("found 1 object")
190 | echo_transform = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
191 | echo_cam = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
192 | yaw, pitch, roll = euler_from_quaternion(echo_cam[1])
193 | v = echo_transform[0]
194 |
195 | x = int(v[0] * 1000)
196 | y = int(v[1] * 1000)
197 | z = int(v[2] * 1000)
198 | rx = int(roll * 180.0 / math.pi)
199 | ry = int(pitch * 180.0 / math.pi)
200 | rz = int(yaw * 180.0 / math.pi)
201 |
202 | x_cal = x
203 | y_cal = y
204 | z_cal = z + 54
205 | rx_cal = ry + 180
206 | ry_cal = -(rx - 180)
207 | rz_cal = rz + 180
208 |
209 | elif multi == 2:
210 | print("found 2 objects")
211 | echo_transform1 = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
212 | echo_cam1 = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
213 | echo_transform2 = listener.lookupTransform(source_frameid, target_frameid2, rospy.Time(0))
214 | echo_cam2 = listener.lookupTransform(target_frameid2, camera_frameid, rospy.Time(0))
215 | yaw1, pitch1, roll1 = euler_from_quaternion(echo_cam1[1])
216 | v1 = echo_transform1[0]
217 | yaw2, pitch2, roll2 = euler_from_quaternion(echo_cam2[1])
218 | v2 = echo_transform2[0]
219 |
220 | x1 = int(v1[0] * 1000)
221 | y1 = int(v1[1] * 1000)
222 | z1 = int(v1[2] * 1000)
223 | rx1 = int(roll1 * 180.0 / math.pi)
224 | ry1 = int(pitch1 * 180.0 / math.pi)
225 | rz1 = int(yaw1 * 180.0 / math.pi)
226 |
227 | x2 = int(v2[0] * 1000)
228 | y2 = int(v2[1] * 1000)
229 | z2 = int(v2[2] * 1000)
230 | rx2 = int(roll2 * 180.0 / math.pi)
231 | ry2 = int(pitch2 * 180.0 / math.pi)
232 | rz2 = int(yaw2 * 180.0 / math.pi)
233 |
234 | if z1 > z2 :
235 | x_cal = x1
236 | y_cal = y1
237 | z_cal = z1 + 54
238 | rx_cal = ry1 + 180
239 | ry_cal = -(rx1 - 180)
240 | rz_cal = rz1 + 180
241 |
242 | elif z2 > z1 :
243 | x_cal = x2
244 | y_cal = y2
245 | z_cal = z2 + 54
246 | rx_cal = ry2 + 180
247 | ry_cal = -(rx2 - 180)
248 | rz_cal = rz2 + 180
249 |
250 | elif multi == 3:
251 | print("found 3 objects")
252 | echo_transform1 = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
253 | echo_cam1 = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
254 | #print("tf 1")
255 | echo_transform2 = listener.lookupTransform(source_frameid, target_frameid2, rospy.Time(0))
256 | echo_cam2 = listener.lookupTransform(target_frameid2, camera_frameid, rospy.Time(0))
257 | #print("tf 2")
258 | echo_transform3 = listener.lookupTransform(source_frameid, target_frameid3, rospy.Time(0))
259 | echo_cam3 = listener.lookupTransform(target_frameid3, camera_frameid, rospy.Time(0))
260 | #print("tf 3")
261 | yaw1, pitch1, roll1 = euler_from_quaternion(echo_cam1[1])
262 | v1 = echo_transform1[0]
263 | yaw2, pitch2, roll2 = euler_from_quaternion(echo_cam2[1])
264 | v2 = echo_transform2[0]
265 | yaw3, pitch3, roll3 = euler_from_quaternion(echo_cam3[1])
266 | v3 = echo_transform3[0]
267 |
268 | x1 = int(v1[0] * 1000)
269 | y1 = int(v1[1] * 1000)
270 | z1 = int(v1[2] * 1000)
271 | rx1 = int(roll1 * 180.0 / math.pi)
272 | ry1 = int(pitch1 * 180.0 / math.pi)
273 | rz1 = int(yaw1 * 180.0 / math.pi)
274 |
275 | x2 = int(v2[0] * 1000)
276 | y2 = int(v2[1] * 1000)
277 | z2 = int(v2[2] * 1000)
278 | rx2 = int(roll2 * 180.0 / math.pi)
279 | ry2 = int(pitch2 * 180.0 / math.pi)
280 | rz2 = int(yaw2 * 180.0 / math.pi)
281 |
282 | x3 = int(v3[0] * 1000)
283 | y3 = int(v3[1] * 1000)
284 | z3 = int(v3[2] * 1000)
285 | rx3 = int(roll3 * 180.0 / math.pi)
286 | ry3 = int(pitch3 * 180.0 / math.pi)
287 | rz3 = int(yaw3 * 180.0 / math.pi)
288 |
289 | if z1 > z2 and z1 > z3:
290 | #print("z1")
291 | x_cal = x1
292 | y_cal = y1
293 | z_cal = z1 + 54
294 | rx_cal = ry1 + 180
295 | ry_cal = -(rx1 - 180)
296 | rz_cal = rz1 + 180
297 | elif z2 > z1 and z2 > z3:
298 | #print("z2")
299 | x_cal = x2
300 | y_cal = y2
301 | z_cal = z2 + 54
302 | rx_cal = ry2 + 180
303 | ry_cal = -(rx2 - 180)
304 | rz_cal = rz2 + 180
305 | elif z3 > z1 and z3 > z2:
306 | #print("z3")
307 | x_cal = x3
308 | y_cal = y3
309 | z_cal = z3 + 54
310 | rx_cal = ry3 + 180
311 | ry_cal = -(rx3 - 180)
312 | rz_cal = rz3 + 180
313 |
314 | elif multi == 4:
315 | print("found 4 objects")
316 | echo_transform1 = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
317 | echo_cam1 = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
318 | echo_transform2 = listener.lookupTransform(source_frameid, target_frameid2, rospy.Time(0))
319 | echo_cam2 = listener.lookupTransform(target_frameid2, camera_frameid, rospy.Time(0))
320 | echo_transform3 = listener.lookupTransform(source_frameid, target_frameid3, rospy.Time(0))
321 | echo_cam3 = listener.lookupTransform(target_frameid3, camera_frameid, rospy.Time(0))
322 | echo_transform4 = listener.lookupTransform(source_frameid, target_frameid4, rospy.Time(0))
323 | echo_cam4 = listener.lookupTransform(target_frameid4, camera_frameid, rospy.Time(0))
324 | yaw1, pitch1, roll1 = euler_from_quaternion(echo_cam1[1])
325 | v1 = echo_transform1[0]
326 | yaw2, pitch2, roll2 = euler_from_quaternion(echo_cam2[1])
327 | v2 = echo_transform2[0]
328 | yaw3, pitch3, roll3 = euler_from_quaternion(echo_cam3[1])
329 | v3 = echo_transform3[0]
330 | yaw4, pitch4, roll4 = euler_from_quaternion(echo_cam4[1])
331 | v4 = echo_transform4[0]
332 |
333 | x1 = int(v1[0] * 1000)
334 | y1 = int(v1[1] * 1000)
335 | z1 = int(v1[2] * 1000)
336 | rx1 = int(roll1 * 180.0 / math.pi)
337 | ry1 = int(pitch1 * 180.0 / math.pi)
338 | rz1 = int(yaw1 * 180.0 / math.pi)
339 |
340 | x2 = int(v2[0] * 1000)
341 | y2 = int(v2[1] * 1000)
342 | z2 = int(v2[2] * 1000)
343 | rx2 = int(roll2 * 180.0 / math.pi)
344 | ry2 = int(pitch2 * 180.0 / math.pi)
345 | rz2 = int(yaw2 * 180.0 / math.pi)
346 |
347 | x3 = int(v3[0] * 1000)
348 | y3 = int(v3[1] * 1000)
349 | z3 = int(v3[2] * 1000)
350 | rx3 = int(roll3 * 180.0 / math.pi)
351 | ry3 = int(pitch3 * 180.0 / math.pi)
352 | rz3 = int(yaw3 * 180.0 / math.pi)
353 |
354 | x4 = int(v4[0] * 1000)
355 | y4 = int(v4[1] * 1000)
356 | z4 = int(v4[2] * 1000)
357 | rx4 = int(roll4 * 180.0 / math.pi)
358 | ry4 = int(pitch4 * 180.0 / math.pi)
359 | rz4 = int(yaw4 * 180.0 / math.pi)
360 |
361 | if z1 > z2 and z1 > z3 and z1 > z4:
362 | x_cal = x1
363 | y_cal = y1
364 | z_cal = z1 + 54
365 | rx_cal = ry1 + 180
366 | ry_cal = -(rx1 - 180)
367 | rz_cal = rz1 + 180
368 | elif z2 > z1 and z2 > z3 and z2 > z4:
369 | x_cal = x2
370 | y_cal = y2
371 | z_cal = z2 + 54
372 | rx_cal = ry2 + 180
373 | ry_cal = -(rx2 - 180)
374 | rz_cal = rz2 + 180
375 | elif z3 > z1 and z3 > z2 and z3 > z4:
376 | x_cal = x3
377 | y_cal = y3
378 | z_cal = z3 + 54
379 | rx_cal = ry3 + 180
380 | ry_cal = -(rx3 - 180)
381 | rz_cal = rz3 + 180
382 | elif z4 > z1 and z4 > z2 and z4 > z3:
383 | x_cal = x4
384 | y_cal = y4
385 | z_cal = z4 + 54
386 | rx_cal = ry4 + 180
387 | ry_cal = -(rx4 - 180)
388 | rz_cal = rz4 + 180
389 |
390 | else:
391 | print("Error : object lost 2")
392 | return -1
393 |
394 | if z_cal >= 25 :
395 | z_cal = z_cal
396 | if z_cal < 25 :
397 | z_cal = 25
398 |
399 | target = ObjectMsg()
400 | target.x = x_cal
401 | target.y = y_cal
402 | target.z = z_cal + 25
403 | target.rx = 180
404 | target.ry = 0
405 | target.rz = 180
406 | rospy.loginfo(target)
407 | pub_target.publish(target)
408 | return (x_cal,y_cal,z_cal,rx_cal,ry_cal,rz_cal)
409 |
410 | except:
411 | return -1
412 |
413 | elif focus_obj == 2 :
414 | return -1
415 |
416 | def pick_phase2 (x,y,z,rx,ry,rz):
417 | target = ObjectMsg()
418 | target.x = x
419 | target.y = y
420 | target.z = z - 5 # Pickup
421 | target.rx = rx
422 | target.ry = ry
423 | target.rz = rz
424 | rospy.loginfo(target)
425 | pub_target.publish(target)
426 | return (x,y,z-5,rx,ry,rz)
427 |
428 | def pick_phase3 (x,y,z,rx,ry,rz):
429 | target = ObjectMsg()
430 | target.x = x
431 | target.y = y
432 | target.z = z + 95
433 | target.rx = rx
434 | target.ry = ry
435 | target.rz = rz
436 | rospy.loginfo(target)
437 | pub_target.publish(target)
438 | return (x,y,z+95)
439 |
440 | def sunction_cup(set):
441 | if set == 1 :
442 | msg = "SetVac"
443 | pub_cmd.publish(msg)
444 | print("Set Robot Tool0 (Vacuum)...\n")
445 | elif set == 0 :
446 | msg = "ResetVac"
447 | pub_cmd.publish(msg)
448 | print("Set Robot Tool0 (Vacuum)...\n")
449 | return 0
450 |
451 | def start_order():
452 | # Initial parameter
453 | step = 0
454 | focus_obj = 0
455 | while True :
456 | home_check = robot_home_pose(0)
457 | if step == 0 and home_check == -1 : # Check Robot and not found
458 | print("Error : no robot found")
459 | break
460 | elif step == 0 and home_check == 0 : # Check Robot and Robot not at home position
461 | step = 0
462 | elif step == 0 and home_check == 1 : # Check Robot and Robot at home position
463 | yolo_detect() # publish to yolo node to detect
464 | sleep(0.5) # sleep 500 milisec
465 | if check_object() == 1 : # echo tf found Arduino
466 | print("found obj_arduino")
467 | step = 1
468 | focus_obj = 1
469 | elif check_object() == 2 : # echo tf found Raspi
470 | print("found obj_raspi")
471 | step = 1
472 | focus_obj = 2
473 | elif check_object() == 3 : # echo tf found Arduino and Raspi
474 | print("found both obj_arduino and obj_raspi")
475 | step = 1
476 | focus_obj = 1
477 | else :
478 | print("no object found") # echo tf not found object
479 | step = 0
480 |
481 | elif step == 1 : # move close to the object
482 | obj_target = move_close(focus_obj)
483 | x_order = obj_target[0]
484 | y_order = obj_target[1]
485 | z_order = obj_target[2]
486 | pose_check = robot_check_pose(x_order,y_order,z_order)
487 | print ("robot close to object")
488 | sleep(1.5) # sleep 1500 milisec swiching algorithm
489 | if pose_check == 1 : # Robot at position (close to object)
490 | step = 2
491 | if pose_check == -1 :
492 | break
493 | if pose_check == 0 :
494 | break
495 |
496 | elif step == 2 : # Robot close to object echo tf again and going to pick
497 | obj_target1 = pick_phase1(focus_obj) # echo object position and move to 3 cm above object
498 | if obj_target1 == -1: # Object lost
499 | home()
500 | robot_home_pose(0)
501 | sleep(0.2) # sleep 200 milisec
502 | step = 0
503 | pass
504 | else :
505 | x = obj_target1[0]
506 | y = obj_target1[1]
507 | z = obj_target1[2]
508 | rx = obj_target1[3]
509 | ry = obj_target1[4]
510 | rz = obj_target1[5]
511 | #pose_check = robot_check_pose(obj_target1[0],obj_target1[1],obj_target1[2]+30) # Check Robot position (25 mm above object)
512 | sleep(0.5)
513 | if pose_check == 1 : # Robot at the position (25 mm above object)
514 | obj_target2 = pick_phase2(x,y,z,rx,ry,rz) # move to object
515 | sunction_cup(1) # Set sunction cup on
516 | #pose_check = robot_check_pose(obj_target2[0],obj_target2[1],obj_target2[2]) # Check Robot position (at object)
517 | sleep(0.5)
518 | print ("robot picking object")
519 | if pose_check == 1 :
520 | print ('Picked object')
521 | obj_target3 = pick_phase3(x,y,z,rx,ry,rz)
522 | #pose_check = robot_check_pose(obj_target3[0],obj_target3[1],obj_target3[2])
523 | sleep(0.5)
524 | step = 3
525 | if pose_check == -1 :
526 | break
527 | if pose_check == 0 :
528 | break
529 | if pose_check == -1 :
530 | break
531 |
532 | elif step == 3 : # go to inventory
533 | Point5() # move robot to inventory
534 | pose_check = robot_check_pose(-210,-580,150) # check robot at inventory
535 | sunction_cup(0) # Set sunction cup off
536 | sleep(0.3) # sleep 300 milisec
537 | step = 4
538 |
539 | elif step == 4 :
540 | home()
541 | robot_home_pose(0)
542 | sleep(0.2) # sleep 200 milisec
543 | step = 0
544 |
545 | if rospy.is_shutdown():
546 | print('Shutdown main loop')
547 | break
548 |
549 | def start_service():
550 | print("Started rosrun CR5_Project main_control")
551 | print("\n================")
552 | print(" SUMMARY")
553 | print("================")
554 | print("\nThis program use for controlling CR5 Robot, still in developing stage.")
555 | print("The purpose of this project is to ...")
556 | print("\n================")
557 | print(" SERVICE LISTS")
558 | print("================")
559 | print("* Home (Moving robot to home position)")
560 | print("* Pose (Moving robot to x,y,z input)")
561 | print("* EnableRobot")
562 | print("* DisableRobot")
563 | print("* ClearError")
564 | print("* Sleep (Moving robot to sleep position)")
565 | print("* Point0 (Set all robot joints to 0)")
566 | print("* Point1 (Moving robot to point 1)")
567 | print("* PickingLoop (Starting picking loop)\n")
568 |
569 | while True:
570 | service = input("Please type your service: ")
571 | if service == "Pose":
572 | print("Input position to publish")
573 | position_x = float(input("position.x: "))
574 | position_y = float(input("position.y: "))
575 | position_z = float(input("position.z: "))
576 | msg.x = position_x
577 | msg.y = position_y
578 | msg.z = position_z
579 | msg.rx = 180
580 | msg.ry = 0
581 | msg.rz = 180
582 | pub_target.publish(msg)
583 | elif service == "Home":
584 | msg = "home"
585 | pub_cmd.publish(msg)
586 | print("Robot moving to Home Pose...\n")
587 | elif service == "Sleep":
588 | msg = "sleep"
589 | pub_cmd.publish(msg)
590 | print("Robot moving to Sleep Pose...\n")
591 | elif service == "ClearError":
592 | msg = "ClearError"
593 | pub_cmd.publish(msg)
594 | print("Published Clear Error ...\n")
595 | elif service == "DisableRobot":
596 | msg = "DisableRobot"
597 | pub_cmd.publish(msg)
598 | print("Published Disable Robot...\n")
599 | elif service == "EnableRobot":
600 | msg = "EnableRobot"
601 | pub_cmd.publish(msg)
602 | print("Published Enable Robot...\n")
603 | elif service == "Point0":
604 | msg = "Point0"
605 | pub_cmd.publish(msg)
606 | print("Set all robot joints to 0...\n")
607 | elif service == "Point1":
608 | msg = "Point1"
609 | pub_cmd.publish(msg)
610 | print("Moving to Point 1...\n")
611 | elif service == "Point2":
612 | msg = "Point2"
613 | pub_cmd.publish(msg)
614 | print("Moving to Point 2...\n")
615 | elif service == "Point3":
616 | msg = "Point2"
617 | pub_cmd.publish(msg)
618 | print("Moving to Point 3...\n")
619 | elif service == "Point5":
620 | msg = "Point5"
621 | pub_cmd.publish(msg)
622 | print("Moving to Point 5...\n")
623 | elif service == "SetVac":
624 | msg = "SetVac"
625 | pub_cmd.publish(msg)
626 | print("Set Robot Tool0 (Vacuum)...\n")
627 | elif service == "ResetVac":
628 | msg = "ResetVac"
629 | pub_cmd.publish(msg)
630 | print("Reset Robot Tool0 (Vacuum)...\n")
631 | elif service == "PickingLoop":
632 | print("Starting picking loop...\n")
633 | start_order()
634 | elif service == "S":
635 | msg = "Speed100"
636 | pub_cmd.publish(msg)
637 | print("SpeedJ to 100\n")
638 | elif service == "A":
639 | msg = "Acc100"
640 | pub_cmd.publish(msg)
641 | print("SpeedJ to 100\n")
642 | else :
643 | print("ERROR: command not found \n")
644 |
645 | if rospy.is_shutdown():
646 | print("Stop Service")
647 | break
648 | return 0
649 |
650 | if __name__ == "__main__":
651 |
652 | rospy.init_node('main_order', anonymous=True)
653 | listener = tf.TransformListener()
654 | pub_order = rospy.Publisher('main/yolo_order', String, queue_size=10)
655 | pub_target = rospy.Publisher('main/target_order', ObjectMsg, queue_size=10)
656 | pub_cmd = rospy.Publisher('main/cmd_talker', String, queue_size=10)
657 | try:
658 | start_service()
659 | except rospy.ROSInterruptException:
660 | pass
--------------------------------------------------------------------------------
/src/dev/node_order1.0.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | from std_msgs.msg import String
3 | from time import sleep
4 | import tf
5 | import math
6 | from CR5_Project.msg import ObjectMsg
7 | from tf.transformations import euler_from_quaternion
8 |
9 | def home():
10 | cmd = "home"
11 | pub_cmd.publish(cmd)
12 | rospy.loginfo("Moving to Home Pose...")
13 |
14 | def Point5():
15 | cmd = "Point5"
16 | pub_cmd.publish(cmd)
17 | print("Moving to Point 5...")
18 |
19 | def robot_home_pose(home):
20 | #print('home checking...')
21 | try :
22 | listener.waitForTransform('base_link', 'Link6', rospy.Time(), rospy.Duration(0.2))
23 | except :
24 | return -1
25 |
26 | if listener.canTransform('base_link', 'Link6', rospy.Time()) :
27 | (trans,rot) = listener.lookupTransform("base_link", "Link6", rospy.Time())
28 | x = round(trans[0]*1000)
29 | y = round(trans[1]*1000)
30 | z = round(trans[2]*1000)
31 | #print("robot pose at [", x,y,z,"]")
32 | if abs(abs(x) - 143) <= 5 and abs(abs(y) - 580) <= 5 and abs(abs(z) - 405) <= 5 :
33 | home = 1
34 | else :
35 | #print('waiting robot to home position')
36 | print('.')
37 | home = 0 #change to 0,1 for pass home checking
38 | sleep(0.2)
39 | else :
40 | home = -1
41 |
42 | if rospy.is_shutdown():
43 | print('Stop home Checking')
44 | return 0
45 |
46 | return home
47 |
48 | def robot_check_pose(x_order,y_order,z_order):
49 | print('pose checking ...')
50 | pose = 0
51 | while pose == 0 :
52 | listener.waitForTransform('base_link', 'Link6', rospy.Time(), rospy.Duration(0.2))
53 | if listener.canTransform('base_link', 'Link6', rospy.Time()) :
54 | (trans,rot) = listener.lookupTransform('base_link', 'Link6', rospy.Time())
55 | x = round(trans[0]*1000)
56 | y = round(trans[1]*1000)
57 | z = round(trans[2]*1000)
58 | #print("command pose at [", x_order,y_order,z_order,"]")
59 | #print("robot pose at [", x,y,z,"]")
60 | if abs(abs(x) - abs(x_order)) <= 4 and abs(abs(y) - abs(y_order)) <= 4 and abs(abs(z) - abs(z_order)) <= 4 :
61 | pose = 1
62 | else :
63 | #print('waiting robot to the position')
64 | print('.')
65 | pose = 0 #change to 0,1 for pass pose checking
66 | else :
67 | print("Error : robot not found")
68 | pose = -1
69 |
70 | if rospy.is_shutdown():
71 | print('Stop Pose Checking')
72 | return 0
73 |
74 | sleep(0.2)
75 |
76 | return pose
77 |
78 | def yolo_detect():
79 | process = "Yolo detect at %s" % rospy.get_time()
80 | cmd = "runYolo"
81 | rospy.loginfo(process)
82 | pub_order.publish(cmd)
83 |
84 | def check_object():
85 | obj = 0
86 | source_frameid = 'base_link'
87 | try :
88 | listener.waitForTransform('obj_arduino', source_frameid, rospy.Time(), rospy.Duration(0.2))
89 | if listener.canTransform('obj_arduino', source_frameid, rospy.Time()) == 1 :
90 | obj = 1
91 | except :
92 | pass
93 |
94 | try :
95 | listener.waitForTransform('obj_raspi', source_frameid, rospy.Time(), rospy.Duration(0.2))
96 | if listener.canTransform('obj_raspi', source_frameid, rospy.Time()) and obj == 0:
97 | obj = 2
98 | if listener.canTransform('obj_raspi', source_frameid, rospy.Time()) and obj == 1:
99 | obj = 3
100 | except :
101 | pass
102 |
103 | else :
104 | print("cannot Transform object reffer to robot baselink")
105 |
106 | return obj
107 |
108 | def move_close (focus_obj):
109 | if focus_obj == 1 :
110 | source_frameid = 'base_link'
111 | target_frameid = 'obj_arduino'
112 | listener.waitForTransform(target_frameid, source_frameid, rospy.Time(), rospy.Duration(0.2))
113 | if listener.canTransform(target_frameid, source_frameid, rospy.Time()) :
114 | (trans,rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
115 | x = round(trans[0]*1000) - 30
116 | y = round(trans[1]*1000) + 50
117 | z = round(trans[2]*1000) + 200
118 |
119 | else :
120 | print("Error : object lost")
121 | return
122 |
123 | elif focus_obj == 2 :
124 | source_frameid = 'base_link'
125 | target_frameid = 'obj_raspi'
126 | listener.waitForTransform(source_frameid, target_frameid, rospy.Time(), rospy.Duration(0.2))
127 | if listener.canTransform(source_frameid, target_frameid, rospy.Time()) :
128 | (trans,rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
129 | x = round(trans[0]*1000)
130 | y = round(trans[1]*1000) + 50
131 | z = round(trans[2]*1000) + 200
132 | else :
133 | print("Error : object lost")
134 | return
135 |
136 | target = ObjectMsg()
137 | target.x = x
138 | target.y = y
139 | target.z = z
140 | target.rx = 180
141 | target.ry = 0
142 | target.rz = 180
143 | rospy.loginfo(target)
144 | pub_target.publish(target)
145 |
146 | return (x,y,z)
147 |
148 | def pick_phase1 (focus_obj):
149 |
150 | if focus_obj == 1 :
151 | x2,y2,z2,rx2,ry2,rz2 = 0,0,-100,0,0,0
152 | x3,y3,z3,rx3,ry3,rz3 = 0,0,-100,0,0,0
153 | source_frameid = 'base_link'
154 | camera_frameid = 'camera_link'
155 | target_frameid1 = 'object_6'
156 | target_frameid2 = 'object_6_b'
157 | target_frameid3 = 'object_6_c'
158 | try :
159 | listener.waitForTransform(camera_frameid, source_frameid, rospy.Time(), rospy.Duration(0.2))
160 | listener.waitForTransform(target_frameid1, source_frameid, rospy.Time(), rospy.Duration(0.2))
161 | except :
162 | pass
163 | try :
164 | listener.waitForTransform(target_frameid2, source_frameid, rospy.Time(), rospy.Duration(0.2))
165 | except :
166 | pass
167 | try :
168 | listener.waitForTransform(target_frameid3, source_frameid, rospy.Time(), rospy.Duration(0.2))
169 | except :
170 | pass
171 |
172 | if listener.canTransform(target_frameid1, source_frameid, rospy.Time()):
173 | echo_transform = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
174 | echo_cam = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
175 |
176 | yaw, pitch, roll = euler_from_quaternion(echo_cam[1])
177 | v = echo_transform[0]
178 |
179 | x = int(v[0] * 1000)
180 | y = int(v[1] * 1000)
181 | z = int(v[2] * 1000)
182 | rx = int(roll * 180.0 / math.pi)
183 | ry = int(pitch * 180.0 / math.pi)
184 | rz = int(yaw * 180.0 / math.pi)
185 |
186 | x_cal = x
187 | y_cal = y
188 | z_cal = z + 54 if z + 54 >= 25 else 25
189 | rx_cal = ry + 180
190 | ry_cal = -(rx - 180)
191 | rz_cal = rz + 180
192 |
193 | if listener.canTransform(target_frameid2, source_frameid, rospy.Time()):
194 | echo_transform = listener.lookupTransform(source_frameid, target_frameid2, rospy.Time(0))
195 | echo_cam = listener.lookupTransform(target_frameid2, camera_frameid, rospy.Time(0))
196 |
197 | yaw, pitch, roll = euler_from_quaternion(echo_cam[1])
198 | v = echo_transform[0]
199 |
200 | x2 = int(v[0] * 1000)
201 | y2 = int(v[1] * 1000)
202 | z2 = int(v[2] * 1000)
203 | rx2 = int(roll * 180.0 / math.pi)
204 | ry2 = int(pitch * 180.0 / math.pi)
205 | rz2 = int(yaw * 180.0 / math.pi)
206 |
207 | if listener.canTransform(target_frameid3, source_frameid, rospy.Time()):
208 | echo_transform = listener.lookupTransform(source_frameid, target_frameid3, rospy.Time(0))
209 | echo_cam = listener.lookupTransform(target_frameid3, camera_frameid, rospy.Time(0))
210 |
211 | yaw, pitch, roll = euler_from_quaternion(echo_cam[1])
212 | v = echo_transform[0]
213 |
214 | x3 = int(v[0] * 1000)
215 | y3 = int(v[1] * 1000)
216 | z3 = int(v[2] * 1000)
217 | rx3 = int(roll * 180.0 / math.pi)
218 | ry3 = int(pitch * 180.0 / math.pi)
219 | rz3 = int(yaw * 180.0 / math.pi)
220 |
221 | if z2 > z and z2 > z3 :
222 | x_cal = x2
223 | y_cal = y2
224 | z_cal = z2 + 54 if z + 54 >= 25 else 25
225 | rx_cal = ry2 + 180
226 | ry_cal = -(rx2 - 180)
227 | rz_cal = rz2 + 180
228 |
229 | if z3 > z and z3 > z2 :
230 | x_cal = x3
231 | y_cal = y3
232 | z_cal = z3 + 54 if z + 54 >= 25 else 25
233 | rx_cal = ry3 + 180
234 | ry_cal = -(rx3 - 180)
235 | rz_cal = rz3 + 180
236 |
237 | target = ObjectMsg()
238 | target.x = x_cal
239 | target.y = y_cal
240 | target.z = z_cal + 25
241 | target.rx = 180
242 | target.ry = 0
243 | target.rz = 180
244 | rospy.loginfo(target)
245 | pub_target.publish(target)
246 |
247 | return (x_cal,y_cal,z_cal,rx_cal,ry_cal,rz_cal)
248 |
249 | else :
250 | print("Error : object lost")
251 | return -1
252 |
253 | elif focus_obj == 2 :
254 | source_frameid = 'base_link'
255 | target_frameid = 'obj_raspi'
256 | listener.waitForTransform(target_frameid, source_frameid, rospy.Time(), rospy.Duration(0.2))
257 | if listener.canTransform(target_frameid, source_frameid, rospy.Time()):
258 | (trans, rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
259 | x = round(trans[0] * 1000)
260 | y = round(trans[1] * 1000)
261 | z = round(trans[2] * 1000)
262 | roll, pitch, yaw = tf.transformations.euler_from_quaternion(rot)
263 | rx = round(pitch * 180.0 / math.pi)
264 | ry = round(yaw * 180.0 / math.pi)
265 | rz = round(roll * 180.0 / math.pi)
266 | z_off = z + 54 if z + 54 >= 25 else 25
267 |
268 | rx = rx + 180
269 | ry = ry - 180
270 | rz = rz + 180
271 |
272 | target = ObjectMsg()
273 | target.x = x
274 | target.y = y
275 | target.z = z_off + 20
276 | target.rx = 180
277 | target.ry = 0
278 | target.rz = 180
279 | rospy.loginfo(target)
280 | pub_target.publish(target)
281 |
282 | return (x,y,z_off,rx,ry,rz)
283 |
284 | else :
285 | print("Error : object lost")
286 | return
287 |
288 | def pick_phase2 (x,y,z,rx,ry,rz):
289 | target = ObjectMsg()
290 | target.x = x
291 | target.y = y
292 | target.z = z-3
293 | target.rx = rx
294 | target.ry = ry
295 | target.rz = rz
296 | rospy.loginfo(target)
297 | pub_target.publish(target)
298 | return (x,y,z,rx,ry,rz)
299 |
300 | def pick_phase3 (x,y,z,rx,ry,rz):
301 | target = ObjectMsg()
302 | target.x = x
303 | target.y = y
304 | target.z = z+80
305 | target.rx = rx
306 | target.ry = ry
307 | target.rz = rz
308 | rospy.loginfo(target)
309 | pub_target.publish(target)
310 | return (x,y,z)
311 |
312 | def sunction_cup(set):
313 | if set == 1 :
314 | msg = "SetVac"
315 | pub_cmd.publish(msg)
316 | print("Set Robot Tool0 (Vacuum)...\n")
317 | elif set == 0 :
318 | msg = "ResetVac"
319 | pub_cmd.publish(msg)
320 | print("Set Robot Tool0 (Vacuum)...\n")
321 | return 0
322 |
323 | def start_order():
324 | # Initial parameter
325 | step = 0
326 | focus_obj = 0
327 | while True :
328 | home_check = robot_home_pose(0)
329 | if step == 0 and home_check == -1 : # Check Robot and not found
330 | print("Error : no robot found")
331 | break
332 | elif step == 0 and home_check == 0 : # Check Robot and Robot not at home position
333 | step = 0
334 | elif step == 0 and home_check == 1 : # Check Robot and Robot at home position
335 | yolo_detect() # publish to yolo node to detect
336 | sleep(0.5) # sleep 500 milisec
337 | if check_object() == 1 : # echo tf found Arduino
338 | print("found obj_arduino")
339 | step = 1
340 | focus_obj = 1
341 | elif check_object() == 2 : # echo tf found Raspi
342 | print("found obj_raspi")
343 | step = 1
344 | focus_obj = 2
345 | elif check_object() == 3 : # echo tf found Arduino and Raspi
346 | print("found both obj_arduino and obj_raspi")
347 | step = 1
348 | focus_obj = 1
349 | else :
350 | print("no object found") # echo tf not found object
351 | step = 0
352 |
353 | elif step == 1 : # move close to the object
354 | obj_target = move_close(focus_obj)
355 | x_order = obj_target[0]
356 | y_order = obj_target[1]
357 | z_order = obj_target[2]
358 | pose_check = robot_check_pose(x_order,y_order,z_order)
359 | print ("robot close to object")
360 | sleep(0.5) # sleep 200 milisec
361 | if pose_check == 1 : # Robot at position (close to object)
362 | step = 2
363 | if pose_check == -1 :
364 | break
365 | if pose_check == 0 :
366 | break
367 |
368 | elif step == 2 : # Robot close to object echo tf again and going to pick
369 | obj_target1 = pick_phase1(focus_obj) # echo object position and move to 3 cm above object
370 | if obj_target1 == -1: # Object lost
371 | home()
372 | robot_home_pose(0)
373 | sleep(0.1) # sleep 200 milisec
374 | step = 0
375 | return
376 | else :
377 | x = obj_target1[0]
378 | y = obj_target1[1]
379 | z = obj_target1[2]
380 | rx = obj_target1[3]
381 | ry = obj_target1[4]
382 | rz = obj_target1[5]
383 | pose_check = robot_check_pose(obj_target1[0],obj_target1[1],obj_target1[2]+30) # Check Robot position (3 cm above object)
384 | if pose_check == 1 : # Robot at the position (3 cm above object)
385 | obj_target2 = pick_phase2(x,y,z,rx,ry,rz) # move to object
386 | sunction_cup(1) # Set sunction cup on
387 | pose_check = robot_check_pose(obj_target2[0],obj_target2[1],obj_target2[2]) # Check Robot position (at object)
388 | print ("robot picking object")
389 | if pose_check == 1 :
390 | print ('Picked object')
391 | obj_target3 = pick_phase3(x,y,z,rx,ry,rz)
392 | pose_check = robot_check_pose(obj_target3[0],obj_target3[1],obj_target3[2])
393 | step = 3
394 | if pose_check == -1 :
395 | break
396 | if pose_check == 0 :
397 | break
398 | if pose_check == -1 :
399 | break
400 |
401 | elif step == 3 : # go to inventory
402 | Point5() # move robot to inventory
403 | pose_check = robot_check_pose(-210,-580,150) # check robot at inventory
404 | sunction_cup(0) # Set sunction cup off
405 | sleep(0.3) # sleep 300 milisec
406 | step = 4
407 |
408 | elif step == 4 :
409 | home()
410 | robot_home_pose(0)
411 | sleep(0.2) # sleep 200 milisec
412 | step = 0
413 |
414 | if rospy.is_shutdown():
415 | print('Shutdown main loop')
416 | break
417 |
418 | def start_service():
419 | print("Started rosrun CR5_Project main_control")
420 | print("\n================")
421 | print(" SUMMARY")
422 | print("================")
423 | print("\nThis program use for controlling CR5 Robot, still in developing stage.")
424 | print("The purpose of this project is to ...")
425 | print("\n================")
426 | print(" SERVICE LISTS")
427 | print("================")
428 | print("* Home (Moving robot to home position)")
429 | print("* Pose (Moving robot to x,y,z input)")
430 | print("* EnableRobot")
431 | print("* DisableRobot")
432 | print("* ClearError")
433 | print("* Sleep (Moving robot to sleep position)")
434 | print("* Point0 (Set all robot joints to 0)")
435 | print("* Point1 (Moving robot to point 1)")
436 | print("* PickingLoop (Starting picking loop)\n")
437 |
438 | while True:
439 | service = input("Please type your service: ")
440 | if service == "Pose":
441 | print("Input position to publish")
442 | position_x = float(input("position.x: "))
443 | position_y = float(input("position.y: "))
444 | position_z = float(input("position.z: "))
445 | msg.x = position_x
446 | msg.y = position_y
447 | msg.z = position_z
448 | msg.rx = 180
449 | msg.ry = 0
450 | msg.rz = 180
451 | pub_target.publish(msg)
452 | elif service == "Home":
453 | msg = "home"
454 | pub_cmd.publish(msg)
455 | print("Robot moving to Home Pose...\n")
456 | elif service == "Sleep":
457 | msg = "sleep"
458 | pub_cmd.publish(msg)
459 | print("Robot moving to Sleep Pose...\n")
460 | elif service == "ClearError":
461 | msg = "ClearError"
462 | pub_cmd.publish(msg)
463 | print("Published Clear Error ...\n")
464 | elif service == "DisableRobot":
465 | msg = "DisableRobot"
466 | pub_cmd.publish(msg)
467 | print("Published Disable Robot...\n")
468 | elif service == "EnableRobot":
469 | msg = "EnableRobot"
470 | pub_cmd.publish(msg)
471 | print("Published Enable Robot...\n")
472 | elif service == "Point0":
473 | msg = "Point0"
474 | pub_cmd.publish(msg)
475 | print("Set all robot joints to 0...\n")
476 | elif service == "Point1":
477 | msg = "Point1"
478 | pub_cmd.publish(msg)
479 | print("Moving to Point 1...\n")
480 | elif service == "Point5":
481 | msg = "Point5"
482 | pub_cmd.publish(msg)
483 | print("Moving to Point 5...\n")
484 | elif service == "SetVac":
485 | msg = "SetVac"
486 | pub_cmd.publish(msg)
487 | print("Set Robot Tool0 (Vacuum)...\n")
488 | elif service == "ResetVac":
489 | msg = "ResetVac"
490 | pub_cmd.publish(msg)
491 | print("Reset Robot Tool0 (Vacuum)...\n")
492 | elif service == "PickingLoop":
493 | print("Starting picking loop...\n")
494 | start_order()
495 | else :
496 | print("ERROR: command not found \n")
497 |
498 | if rospy.is_shutdown():
499 | print("Stop Service")
500 | break
501 | return 0
502 |
503 | if __name__ == "__main__":
504 |
505 | rospy.init_node('main_order', anonymous=True)
506 | listener = tf.TransformListener()
507 | pub_order = rospy.Publisher('main/yolo_order', String, queue_size=10)
508 | pub_target = rospy.Publisher('main/target_order', ObjectMsg, queue_size=10)
509 | pub_cmd = rospy.Publisher('main/cmd_talker', String, queue_size=10)
510 | try:
511 | start_service()
512 | except rospy.ROSInterruptException:
513 | pass
--------------------------------------------------------------------------------
/src/dev/node_order2.0.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | from std_msgs.msg import String
4 | from time import sleep
5 | import tf
6 | import math
7 | from CR5_Project.msg import ObjectMsg
8 | from tf.transformations import euler_from_quaternion
9 |
10 | def home():
11 | cmd = "home"
12 | pub_cmd.publish(cmd)
13 | rospy.loginfo("Moving to Home Pose...")
14 |
15 | def Point5():
16 | cmd = "Point5"
17 | pub_cmd.publish(cmd)
18 | print("Moving to Point 5...")
19 |
20 | def robot_home_pose(home):
21 | #print('home checking...')
22 | try :
23 | listener.waitForTransform('base_link', 'Link6', rospy.Time(), rospy.Duration(0.2))
24 | except :
25 | return -1
26 |
27 | if listener.canTransform('base_link', 'Link6', rospy.Time()) :
28 | (trans,rot) = listener.lookupTransform("base_link", "Link6", rospy.Time())
29 | x = round(trans[0]*1000)
30 | y = round(trans[1]*1000)
31 | z = round(trans[2]*1000)
32 | #print("robot pose at [", x,y,z,"]")
33 | if abs(abs(x) - 143) <= 5 and abs(abs(y) - 580) <= 5 and abs(abs(z) - 405) <= 5 :
34 | home = 1
35 | else :
36 | #print('waiting robot to home position')
37 | print('.')
38 | home = 0 #change to 0,1 for pass home checking
39 | sleep(0.2)
40 | else :
41 | home = -1
42 |
43 | if rospy.is_shutdown():
44 | print('Stop home Checking')
45 | return 0
46 |
47 | return home
48 |
49 | def robot_check_pose(x_order,y_order,z_order):
50 | print('pose checking ...')
51 | pose = 0
52 | while pose == 0 :
53 | listener.waitForTransform('base_link', 'Link6', rospy.Time(), rospy.Duration(0.2))
54 | if listener.canTransform('base_link', 'Link6', rospy.Time()) :
55 | (trans,rot) = listener.lookupTransform('base_link', 'Link6', rospy.Time())
56 | x = round(trans[0]*1000)
57 | y = round(trans[1]*1000)
58 | z = round(trans[2]*1000)
59 | #print("command pose at [", x_order,y_order,z_order,"]")
60 | #print("robot pose at [", x,y,z,"]")
61 | if abs(abs(x) - abs(x_order)) <= 4 and abs(abs(y) - abs(y_order)) <= 4 and abs(abs(z) - abs(z_order)) <= 4 :
62 | pose = 1
63 | else :
64 | #print('waiting robot to the position')
65 | print('.')
66 | pose = 0 #change to 0,1 for pass pose checking
67 | else :
68 | print("Error : robot not found")
69 | pose = -1
70 |
71 | if rospy.is_shutdown():
72 | print('Stop Pose Checking')
73 | return 0
74 |
75 | sleep(0.2)
76 |
77 | return pose
78 |
79 | def yolo_detect():
80 | process = "Yolo detect at %s" % rospy.get_time()
81 | cmd = "runYolo"
82 | rospy.loginfo(process)
83 | pub_order.publish(cmd)
84 |
85 | def check_object():
86 | obj = 0
87 | source_frameid = 'base_link'
88 | try :
89 | listener.waitForTransform('obj_arduino', source_frameid, rospy.Time(), rospy.Duration(0.2))
90 | if listener.canTransform('obj_arduino', source_frameid, rospy.Time()) == 1 :
91 | obj = 1
92 | except :
93 | pass
94 |
95 | try :
96 | listener.waitForTransform('obj_raspi', source_frameid, rospy.Time(), rospy.Duration(0.2))
97 | if listener.canTransform('obj_raspi', source_frameid, rospy.Time()) and obj == 0:
98 | obj = 2
99 | if listener.canTransform('obj_raspi', source_frameid, rospy.Time()) and obj == 1:
100 | obj = 3
101 | except :
102 | pass
103 |
104 | else :
105 | print("cannot Transform object reffer to robot baselink")
106 |
107 | return obj
108 |
109 | def move_close (focus_obj):
110 | if focus_obj == 1 :
111 | source_frameid = 'base_link'
112 | target_frameid = 'obj_arduino'
113 | listener.waitForTransform(target_frameid, source_frameid, rospy.Time(), rospy.Duration(0.2))
114 | if listener.canTransform(target_frameid, source_frameid, rospy.Time()) :
115 | (trans,rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
116 | x = round(trans[0]*1000) - 20
117 | y = round(trans[1]*1000) + 50
118 | z = round(trans[2]*1000) + 200
119 |
120 | else :
121 | print("Error : object lost")
122 | return
123 |
124 | elif focus_obj == 2 :
125 | source_frameid = 'base_link'
126 | target_frameid = 'obj_raspi'
127 | listener.waitForTransform(source_frameid, target_frameid, rospy.Time(), rospy.Duration(0.2))
128 | if listener.canTransform(source_frameid, target_frameid, rospy.Time()) :
129 | (trans,rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
130 | x = round(trans[0]*1000)
131 | y = round(trans[1]*1000) + 50
132 | z = round(trans[2]*1000) + 200
133 | else :
134 | print("Error : object lost")
135 | return
136 |
137 | target = ObjectMsg()
138 | target.x = x
139 | target.y = y
140 | target.z = z
141 | target.rx = 180
142 | target.ry = 0
143 | target.rz = 180
144 | rospy.loginfo(target)
145 | pub_target.publish(target)
146 |
147 | return (x,y,z)
148 |
149 | def compute_vector_80(x,y,z,rx,ry,rz):
150 | # Define the position and orientation of the object
151 | object_pos = np.array([x, y, z]) # Object position in world frame
152 | object_ori = np.array([rx, ry, rz]) # Object orientation in world frame (euler angles)
153 |
154 | # Convert the object orientation from euler angles to a rotation matrix
155 | Rx = np.array([[1, 0, 0],
156 | [0, np.cos(object_ori[0]), -np.sin(object_ori[0])],
157 | [0, np.sin(object_ori[0]), np.cos(object_ori[0])]])
158 | Ry = np.array([[np.cos(object_ori[1]), 0, np.sin(object_ori[1])],
159 | [0, 1, 0],
160 | [-np.sin(object_ori[1]), 0, np.cos(object_ori[1])]])
161 | Rz = np.array([[np.cos(object_ori[2]), -np.sin(object_ori[2]), 0],
162 | [np.sin(object_ori[2]), np.cos(object_ori[2]), 0],
163 | [0, 0, 1]])
164 | object_rot = Rz.dot(Ry.dot(Rx))
165 |
166 | # Define the desired position of the robot relative to the object frame
167 | p_o_desired = np.array([0, 0, 80]) # 54mm above the object in the object frame
168 |
169 | # Transform the position vector from the object frame to the world frame
170 | p_w_desired = object_rot.dot(p_o_desired) + object_pos
171 |
172 | print(p_w_desired)
173 | return (p_w_desired[0],p_w_desired[1],p_w_desired[2])
174 |
175 | def compute_vector_50(x,y,z,rx,ry,rz):
176 | # Define the position and orientation of the object
177 | object_pos = np.array([x, y, z]) # Object position in world frame
178 | object_ori = np.array([rx, ry, rz]) # Object orientation in world frame (euler angles)
179 |
180 | # Convert the object orientation from euler angles to a rotation matrix
181 | Rx = np.array([[1, 0, 0],
182 | [0, np.cos(object_ori[0]), -np.sin(object_ori[0])],
183 | [0, np.sin(object_ori[0]), np.cos(object_ori[0])]])
184 | Ry = np.array([[np.cos(object_ori[1]), 0, np.sin(object_ori[1])],
185 | [0, 1, 0],
186 | [-np.sin(object_ori[1]), 0, np.cos(object_ori[1])]])
187 | Rz = np.array([[np.cos(object_ori[2]), -np.sin(object_ori[2]), 0],
188 | [np.sin(object_ori[2]), np.cos(object_ori[2]), 0],
189 | [0, 0, 1]])
190 | object_rot = Rz.dot(Ry.dot(Rx))
191 |
192 | # Define the desired position of the robot relative to the object frame
193 | p_o_desired = np.array([0, 0, 54]) # 54mm above the object in the object frame
194 |
195 | # Transform the position vector from the object frame to the world frame
196 | p_w_desired = object_rot.dot(p_o_desired) + object_pos
197 |
198 | print(p_w_desired)
199 | return (p_w_desired[0],p_w_desired[1],p_w_desired[2])
200 |
201 | def pick_phase1 (focus_obj):
202 | if focus_obj == 1 :
203 | source_frameid = 'base_link'
204 | camera_frameid = 'camera_link'
205 | target_frameid1 = 'object_6'
206 | target_frameid2 = 'object_6_b'
207 | target_frameid3 = 'object_6_c'
208 | target_frameid4 = 'object_6_d'
209 | multi = 0
210 | try :
211 | listener.waitForTransform(camera_frameid, source_frameid, rospy.Time(), rospy.Duration(0.2))
212 | listener.waitForTransform(target_frameid1, source_frameid, rospy.Time(), rospy.Duration(0.1))
213 | if listener.canTransform(target_frameid1, source_frameid, rospy.Time()):
214 | multi = multi + 1
215 | print("multi = ", multi)
216 | except :
217 | print('no object_a')
218 | try :
219 | listener.waitForTransform(target_frameid2, source_frameid, rospy.Time(), rospy.Duration(0.1))
220 | if listener.canTransform(target_frameid2, source_frameid, rospy.Time()):
221 | multi = multi + 1
222 | print("multi = ", multi)
223 | except :
224 | print('no object_b')
225 | try :
226 | listener.waitForTransform(target_frameid3, source_frameid, rospy.Time(), rospy.Duration(0.1))
227 | if listener.canTransform(target_frameid3, source_frameid, rospy.Time()):
228 | multi = multi + 1
229 | print("multi = ", multi)
230 | except :
231 | print('no object_c')
232 | try :
233 | listener.waitForTransform(target_frameid4, source_frameid, rospy.Time(), rospy.Duration(0.1))
234 | if listener.canTransform(target_frameid4, source_frameid, rospy.Time()):
235 | multi = multi + 1
236 | print("multi = ", multi)
237 | except :
238 | print('no object_d')
239 |
240 | try :
241 | if multi == 1:
242 | print("found 1 object")
243 | echo_transform = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
244 | echo_cam = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
245 | yaw, pitch, roll = euler_from_quaternion(echo_cam[1])
246 | v = echo_transform[0]
247 |
248 | x = int(v[0] * 1000)
249 | y = int(v[1] * 1000)
250 | z = int(v[2] * 1000)
251 | rx = int(roll * 180.0 / math.pi)
252 | ry = int(pitch * 180.0 / math.pi)
253 | rz = int(yaw * 180.0 / math.pi)
254 |
255 | x_cal = x
256 | y_cal = y
257 | z_cal = z
258 | rx_cal = ry
259 | ry_cal = rx
260 | rz_cal = rz
261 |
262 | elif multi == 2:
263 | print("found 2 objects")
264 | echo_transform1 = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
265 | echo_cam1 = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
266 | echo_transform2 = listener.lookupTransform(source_frameid, target_frameid2, rospy.Time(0))
267 | echo_cam2 = listener.lookupTransform(target_frameid2, camera_frameid, rospy.Time(0))
268 | yaw1, pitch1, roll1 = euler_from_quaternion(echo_cam1[1])
269 | v1 = echo_transform1[0]
270 | yaw2, pitch2, roll2 = euler_from_quaternion(echo_cam2[1])
271 | v2 = echo_transform2[0]
272 |
273 | x1 = int(v1[0] * 1000)
274 | y1 = int(v1[1] * 1000)
275 | z1 = int(v1[2] * 1000)
276 | rx1 = int(roll1 * 180.0 / math.pi)
277 | ry1 = int(pitch1 * 180.0 / math.pi)
278 | rz1 = int(yaw1 * 180.0 / math.pi)
279 |
280 | x2 = int(v2[0] * 1000)
281 | y2 = int(v2[1] * 1000)
282 | z2 = int(v2[2] * 1000)
283 | rx2 = int(roll2 * 180.0 / math.pi)
284 | ry2 = int(pitch2 * 180.0 / math.pi)
285 | rz2 = int(yaw2 * 180.0 / math.pi)
286 |
287 | if z1 > z2 :
288 | x_cal = x1
289 | y_cal = y1
290 | z_cal = z1
291 | rx_cal = ry1
292 | ry_cal = rx1
293 | rz_cal = rz1
294 |
295 | elif z2 > z1 :
296 | x_cal = x2
297 | y_cal = y2
298 | z_cal = z2
299 | rx_cal = ry2
300 | ry_cal = rx2
301 | rz_cal = rz2
302 |
303 | elif multi == 3:
304 | print("found 3 objects")
305 | echo_transform1 = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
306 | echo_cam1 = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
307 | #print("tf 1")
308 | echo_transform2 = listener.lookupTransform(source_frameid, target_frameid2, rospy.Time(0))
309 | echo_cam2 = listener.lookupTransform(target_frameid2, camera_frameid, rospy.Time(0))
310 | #print("tf 2")
311 | echo_transform3 = listener.lookupTransform(source_frameid, target_frameid3, rospy.Time(0))
312 | echo_cam3 = listener.lookupTransform(target_frameid3, camera_frameid, rospy.Time(0))
313 | #print("tf 3")
314 | yaw1, pitch1, roll1 = euler_from_quaternion(echo_cam1[1])
315 | v1 = echo_transform1[0]
316 | yaw2, pitch2, roll2 = euler_from_quaternion(echo_cam2[1])
317 | v2 = echo_transform2[0]
318 | yaw3, pitch3, roll3 = euler_from_quaternion(echo_cam3[1])
319 | v3 = echo_transform3[0]
320 |
321 | x1 = int(v1[0] * 1000)
322 | y1 = int(v1[1] * 1000)
323 | z1 = int(v1[2] * 1000)
324 | rx1 = int(roll1 * 180.0 / math.pi)
325 | ry1 = int(pitch1 * 180.0 / math.pi)
326 | rz1 = int(yaw1 * 180.0 / math.pi)
327 |
328 | x2 = int(v2[0] * 1000)
329 | y2 = int(v2[1] * 1000)
330 | z2 = int(v2[2] * 1000)
331 | rx2 = int(roll2 * 180.0 / math.pi)
332 | ry2 = int(pitch2 * 180.0 / math.pi)
333 | rz2 = int(yaw2 * 180.0 / math.pi)
334 |
335 | x3 = int(v3[0] * 1000)
336 | y3 = int(v3[1] * 1000)
337 | z3 = int(v3[2] * 1000)
338 | rx3 = int(roll3 * 180.0 / math.pi)
339 | ry3 = int(pitch3 * 180.0 / math.pi)
340 | rz3 = int(yaw3 * 180.0 / math.pi)
341 |
342 | if z1 > z2 and z1 > z3:
343 | #print("z1")
344 | x_cal = x1
345 | y_cal = y1
346 | z_cal = z1
347 | rx_cal = ry1
348 | ry_cal = rx1
349 | rz_cal = rz1
350 | elif z2 > z1 and z2 > z3:
351 | #print("z2")
352 | x_cal = x2
353 | y_cal = y2
354 | z_cal = z2
355 | rx_cal = ry2
356 | ry_cal = rx2
357 | rz_cal = rz2
358 | elif z3 > z1 and z3 > z2:
359 | #print("z3")
360 | x_cal = x3
361 | y_cal = y3
362 | z_cal = z3
363 | rx_cal = ry3
364 | ry_cal = rx3
365 | rz_cal = rz3
366 |
367 | elif multi == 4:
368 | print("found 4 objects")
369 | echo_transform1 = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
370 | echo_cam1 = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
371 | echo_transform2 = listener.lookupTransform(source_frameid, target_frameid2, rospy.Time(0))
372 | echo_cam2 = listener.lookupTransform(target_frameid2, camera_frameid, rospy.Time(0))
373 | echo_transform3 = listener.lookupTransform(source_frameid, target_frameid3, rospy.Time(0))
374 | echo_cam3 = listener.lookupTransform(target_frameid3, camera_frameid, rospy.Time(0))
375 | echo_transform4 = listener.lookupTransform(source_frameid, target_frameid4, rospy.Time(0))
376 | echo_cam4 = listener.lookupTransform(target_frameid4, camera_frameid, rospy.Time(0))
377 | yaw1, pitch1, roll1 = euler_from_quaternion(echo_cam1[1])
378 | v1 = echo_transform1[0]
379 | yaw2, pitch2, roll2 = euler_from_quaternion(echo_cam2[1])
380 | v2 = echo_transform2[0]
381 | yaw3, pitch3, roll3 = euler_from_quaternion(echo_cam3[1])
382 | v3 = echo_transform3[0]
383 | yaw4, pitch4, roll4 = euler_from_quaternion(echo_cam4[1])
384 | v4 = echo_transform4[0]
385 |
386 | x1 = int(v1[0] * 1000)
387 | y1 = int(v1[1] * 1000)
388 | z1 = int(v1[2] * 1000)
389 | rx1 = int(roll1 * 180.0 / math.pi)
390 | ry1 = int(pitch1 * 180.0 / math.pi)
391 | rz1 = int(yaw1 * 180.0 / math.pi)
392 |
393 | x2 = int(v2[0] * 1000)
394 | y2 = int(v2[1] * 1000)
395 | z2 = int(v2[2] * 1000)
396 | rx2 = int(roll2 * 180.0 / math.pi)
397 | ry2 = int(pitch2 * 180.0 / math.pi)
398 | rz2 = int(yaw2 * 180.0 / math.pi)
399 |
400 | x3 = int(v3[0] * 1000)
401 | y3 = int(v3[1] * 1000)
402 | z3 = int(v3[2] * 1000)
403 | rx3 = int(roll3 * 180.0 / math.pi)
404 | ry3 = int(pitch3 * 180.0 / math.pi)
405 | rz3 = int(yaw3 * 180.0 / math.pi)
406 |
407 | x4 = int(v4[0] * 1000)
408 | y4 = int(v4[1] * 1000)
409 | z4 = int(v4[2] * 1000)
410 | rx4 = int(roll4 * 180.0 / math.pi)
411 | ry4 = int(pitch4 * 180.0 / math.pi)
412 | rz4 = int(yaw4 * 180.0 / math.pi)
413 |
414 | if z1 > z2 and z1 > z3 and z1 > z4:
415 | x_cal = x1
416 | y_cal = y1
417 | z_cal = z1
418 | rx_cal = ry1
419 | ry_cal = rx1
420 | rz_cal = rz1
421 | elif z2 > z1 and z2 > z3 and z2 > z4:
422 | x_cal = x2
423 | y_cal = y2
424 | z_cal = z2
425 | rx_cal = ry2
426 | ry_cal = rx2
427 | rz_cal = rz2
428 | elif z3 > z1 and z3 > z2 and z3 > z4:
429 | x_cal = x3
430 | y_cal = y3
431 | z_cal = z3
432 | rx_cal = ry3
433 | ry_cal = rx3
434 | rz_cal = rz3
435 | elif z4 > z1 and z4 > z2 and z4 > z3:
436 | x_cal = x4
437 | y_cal = y4
438 | z_cal = z4
439 | rx_cal = ry4
440 | ry_cal = rx4
441 | rz_cal = rz4
442 |
443 | else :
444 | print("Error : object lost 2")
445 | return -1
446 |
447 | if z_cal >= 25 :
448 | z_cal = z_cal
449 | if z_cal < 25 :
450 | z_cal = 25
451 |
452 | rx_robo = rx_cal + 180
453 | ry_robo = -(ry_cal - 180)
454 | rz_robo = rz_cal + 180
455 |
456 | robo_compute = compute_vector_80(x_cal,y_cal,z_cal,rx_cal,ry_cal,rz_cal)
457 | target = ObjectMsg()
458 | target.x = robo_compute[0]
459 | target.y = robo_compute[1]
460 | target.z = robo_compute[2]
461 | target.rx = rx_robo
462 | target.ry = ry_robo
463 | target.rz = rz_robo
464 | rospy.loginfo(target)
465 | pub_target.publish(target)
466 | return (x_cal,y_cal,z_cal,rx_cal,ry_cal,rz_cal)
467 |
468 | except:
469 | return -1
470 |
471 | elif focus_obj == 2 :
472 | return -1
473 |
474 | def pick_phase2 (x,y,z,rx,ry,rz):
475 | rx_robo = rx + 180
476 | ry_robo = -(ry - 180)
477 | rz_robo = rz + 180
478 |
479 | robo_compute = compute_vector_50(x,y,z,rx,ry,rz)
480 | target = ObjectMsg()
481 | target.x = robo_compute[0]
482 | target.y = robo_compute[1]
483 | target.z = robo_compute[2]
484 | target.rx = rx_robo
485 | target.ry = ry_robo
486 | target.rz = rz_robo
487 | rospy.loginfo(target)
488 | pub_target.publish(target)
489 | return (x,y,z)
490 |
491 | def pick_phase3 (x,y,z,rx,ry,rz):
492 | rx_robo = rx + 180
493 | ry_robo = -(ry - 180)
494 | rz_robo = rz + 180
495 |
496 | robo_compute = compute_vector_50(x,y,z,rx,ry,rz)
497 | target = ObjectMsg()
498 | target.x = robo_compute[0]
499 | target.y = robo_compute[1]
500 | target.z = robo_compute[2] + 100
501 | target.rx = rx_robo
502 | target.ry = ry_robo
503 | target.rz = rz_robo
504 | rospy.loginfo(target)
505 | pub_target.publish(target)
506 | return (x,y,z)
507 |
508 | def sunction_cup(set):
509 | if set == 1 :
510 | msg = "SetVac"
511 | pub_cmd.publish(msg)
512 | print("Set Robot Tool0 (Vacuum)...\n")
513 | elif set == 0 :
514 | msg = "ResetVac"
515 | pub_cmd.publish(msg)
516 | print("Set Robot Tool0 (Vacuum)...\n")
517 | return 0
518 |
519 | def start_order():
520 | # Initial parameter
521 | step = 0
522 | focus_obj = 0
523 | while True :
524 | home_check = robot_home_pose(0)
525 | if step == 0 and home_check == -1 : # Check Robot and not found
526 | print("Error : no robot found")
527 | break
528 | elif step == 0 and home_check == 0 : # Check Robot and Robot not at home position
529 | step = 0
530 | elif step == 0 and home_check == 1 : # Check Robot and Robot at home position
531 | yolo_detect() # publish to yolo node to detect
532 | sleep(0.5) # sleep 500 milisec
533 | if check_object() == 1 : # echo tf found Arduino
534 | print("found obj_arduino")
535 | step = 1
536 | focus_obj = 1
537 | elif check_object() == 2 : # echo tf found Raspi
538 | print("found obj_raspi")
539 | step = 1
540 | focus_obj = 2
541 | elif check_object() == 3 : # echo tf found Arduino and Raspi
542 | print("found both obj_arduino and obj_raspi")
543 | step = 1
544 | focus_obj = 1
545 | else :
546 | print("no object found") # echo tf not found object
547 | step = 0
548 |
549 | elif step == 1 : # move close to the object
550 | obj_target = move_close(focus_obj)
551 | x_order = obj_target[0]
552 | y_order = obj_target[1]
553 | z_order = obj_target[2]
554 | pose_check = robot_check_pose(x_order,y_order,z_order)
555 | print ("robot close to object")
556 | sleep(1.5) # sleep 1500 milisec swiching algorithm
557 | if pose_check == 1 : # Robot at position (close to object)
558 | step = 2
559 | if pose_check == -1 :
560 | break
561 | if pose_check == 0 :
562 | break
563 |
564 | elif step == 2 : # Robot close to object echo tf again and going to pick
565 | obj_target1 = pick_phase1(focus_obj) # echo object position and move to 3 cm above object
566 | if obj_target1 == -1: # Object lost
567 | home()
568 | robot_home_pose(0)
569 | sleep(0.2) # sleep 200 milisec
570 | step = 0
571 | pass
572 | else :
573 | robo_compute1 = compute_vector_80(x,y,z,rx,ry,rz)
574 | x = obj_target1[0]
575 | y = obj_target1[1]
576 | z = obj_target1[2]
577 | rx = obj_target1[3]
578 | ry = obj_target1[4]
579 | rz = obj_target1[5]
580 | pose_check = robot_check_pose(robo_compute1[0],robo_compute1[1],robo_compute1[2]+30) # Check Robot position (25 mm above object)
581 | sleep(0.1)
582 | if pose_check == 1 :
583 | pick_phase2(focus_obj) # Robot at the position (25 mm above object)
584 | robo_compute2 = compute_vector_50(x,y,z,rx,ry,rz) # move to object
585 | sunction_cup(1) # Set sunction cup on
586 | pose_check = robot_check_pose(robo_compute2[0],robo_compute2[1],robo_compute2[2]) # Check Robot position (at object)
587 | sleep(0.1)
588 | print ("robot picking object")
589 | if pose_check == 1 :
590 | print ('Picked object')
591 | pick_phase3(x,y,z,rx,ry,rz)
592 | pose_check = robot_check_pose(robo_compute2[0],robo_compute2[1],robo_compute2[2]+100)
593 | sleep(0.1)
594 | step = 3
595 | if pose_check == -1 :
596 | break
597 | if pose_check == 0 :
598 | break
599 | if pose_check == -1 :
600 | break
601 |
602 | elif step == 3 : # go to inventory
603 | Point5() # move robot to inventory
604 | pose_check = robot_check_pose(-210,-580,150) # check robot at inventory
605 | sunction_cup(0) # Set sunction cup off
606 | sleep(0.3) # sleep 300 milisec
607 | step = 4
608 |
609 | elif step == 4 :
610 | home()
611 | robot_home_pose(0)
612 | sleep(0.2) # sleep 200 milisec
613 | step = 0
614 |
615 | if rospy.is_shutdown():
616 | print('Shutdown main loop')
617 | break
618 |
619 | def start_service():
620 | print("Started rosrun CR5_Project node_order")
621 | print("\n================")
622 | print(" SUMMARY")
623 | print("================")
624 | print("\nThis program use for controlling CR5 Robot, still in developing stage.")
625 | print("The purpose of this project is to ...")
626 | print("\n================")
627 | print(" SERVICE LISTS")
628 | print("================")
629 | print("* Home (Moving robot to home position)")
630 | print("* Pose (Moving robot to x,y,z input)")
631 | print("* EnableRobot")
632 | print("* DisableRobot")
633 | print("* ClearError")
634 | print("* Sleep (Moving robot to sleep position)")
635 | print("* SetVac (Activate the vacuum)")
636 | print("* ResetVac (Deactivate the vacuum)")
637 | print("* Point0 (Set all robot joints to 0)")
638 | print("* Point1 (Moving robot to point 1)")
639 | print("* Point5 (Moving robot to point 5)")
640 | print("* PickingLoop (Starting picking loop)\n")
641 |
642 | while True:
643 | service = input("Please type your service: ")
644 | if service == "Pose":
645 | print("Input position to publish")
646 | position_x = float(input("position.x: "))
647 | position_y = float(input("position.y: "))
648 | position_z = float(input("position.z: "))
649 | msg.x = position_x
650 | msg.y = position_y
651 | msg.z = position_z
652 | msg.rx = 180
653 | msg.ry = 0
654 | msg.rz = 180
655 | pub_target.publish(msg)
656 | elif service == "Home":
657 | msg = "home"
658 | pub_cmd.publish(msg)
659 | print("Robot moving to Home Pose...\n")
660 | elif service == "Sleep":
661 | msg = "sleep"
662 | pub_cmd.publish(msg)
663 | print("Robot moving to Sleep Pose...\n")
664 | elif service == "ClearError":
665 | msg = "ClearError"
666 | pub_cmd.publish(msg)
667 | print("Published Clear Error ...\n")
668 | elif service == "DisableRobot":
669 | msg = "DisableRobot"
670 | pub_cmd.publish(msg)
671 | print("Published Disable Robot...\n")
672 | elif service == "EnableRobot":
673 | msg = "EnableRobot"
674 | pub_cmd.publish(msg)
675 | print("Published Enable Robot...\n")
676 | elif service == "Point0":
677 | msg = "Point0"
678 | pub_cmd.publish(msg)
679 | print("Set all robot joints to 0...\n")
680 | elif service == "Point1":
681 | msg = "Point1"
682 | pub_cmd.publish(msg)
683 | print("Moving to Point 1...\n")
684 | elif service == "Point2":
685 | msg = "Point2"
686 | pub_cmd.publish(msg)
687 | print("Moving to Point 2...\n")
688 | elif service == "Point3":
689 | msg = "Point2"
690 | pub_cmd.publish(msg)
691 | print("Moving to Point 3...\n")
692 | elif service == "Point5":
693 | msg = "Point5"
694 | pub_cmd.publish(msg)
695 | print("Moving to Point 5...\n")
696 | elif service == "SetVac":
697 | msg = "SetVac"
698 | pub_cmd.publish(msg)
699 | print("Set Robot Tool0 (Vacuum)...\n")
700 | elif service == "ResetVac":
701 | msg = "ResetVac"
702 | pub_cmd.publish(msg)
703 | print("Reset Robot Tool0 (Vacuum)...\n")
704 | elif service == "PickingLoop":
705 | print("Starting picking loop...\n")
706 | start_order()
707 | else :
708 | print("ERROR: command not found \n")
709 |
710 | if rospy.is_shutdown():
711 | print("Stop Service")
712 | break
713 | return 0
714 |
715 | if __name__ == "__main__":
716 |
717 | rospy.init_node('main_order', anonymous=True)
718 | listener = tf.TransformListener()
719 | pub_order = rospy.Publisher('main/yolo_order', String, queue_size=10)
720 | pub_target = rospy.Publisher('main/target_order', ObjectMsg, queue_size=10)
721 | pub_cmd = rospy.Publisher('main/cmd_talker', String, queue_size=10)
722 | try:
723 | start_service()
724 | except rospy.ROSInterruptException:
725 | pass
--------------------------------------------------------------------------------
/src/dev/read_point2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import rospy
3 | import sensor_msgs.point_cloud2 as pc2
4 | from sensor_msgs.msg import PointCloud2, PointField
5 |
6 | def listen():
7 | rospy.init_node('listen', anonymous=True)
8 | rospy.Subscriber('/camera/depth/color/points', PointCloud2, callback_kinect)
9 |
10 | def callback_kinect(data):
11 | # set the pixel coordinates
12 | x, y = 100, 100
13 |
14 | # read the point cloud data at the pixel coordinates (x, y)
15 | gen = pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z"), uvs=[(x, y)])
16 | try:
17 | # extract the z coordinate of the first point
18 | z = next(gen)[2]
19 | rospy.loginfo("Depth at pixel (x={}, y={}): {:.2f}m".format(x, y, z))
20 | except StopIteration:
21 | rospy.loginfo("No point at pixel (x={}, y={})".format(x, y))
22 |
23 | if __name__ == '__main__':
24 | try:
25 | listen()
26 | rospy.spin()
27 | except rospy.ROSInterruptException:
28 | pass
29 |
--------------------------------------------------------------------------------
/src/dev/read_point3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import rospy
3 | import sensor_msgs.point_cloud2 as pc2
4 | from sensor_msgs.msg import PointCloud2, PointField
5 | import struct
6 |
7 | def listen():
8 | rospy.init_node('listen', anonymous=True)
9 | rospy.Subscriber('/camera/depth/color/points', PointCloud2, callback_kinect, queue_size=10000)
10 |
11 | def callback_kinect(data):
12 | # set the pixel coordinates
13 | x, y = 100, 100
14 |
15 | # read the point cloud data at the pixel coordinates (x, y)
16 | gen = pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z"), uvs=[(x, y)])
17 | try:
18 | # extract the z coordinate of the first point
19 | z = next(gen)[2]
20 | rospy.loginfo("Depth at pixel (x={}, y={}): {:.2f}m".format(x, y, z))
21 | except (StopIteration, ValueError, struct.error) as e:
22 | rospy.loginfo("No point at pixel (x={}, y={}). Error: {}".format(x, y, str(e)))
23 |
24 | if __name__ == '__main__':
25 | try:
26 | listen()
27 | rospy.spin()
28 | except rospy.ROSInterruptException:
29 | pass
30 |
--------------------------------------------------------------------------------
/src/dev/realsense_yolo.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import random
5 | import torch
6 |
7 | #model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best5.pt')
8 |
9 | #model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
10 |
11 | model = torch.hub.load('ultralytics/yolov5', 'yolov5l6')
12 | model.conf = 0.5
13 |
14 | def get_mid_pos(frame,box,depth_data,randnum):
15 | distance_list = []
16 | mid_pos = [(box[0] + box[2])//2, (box[1] + box[3])//2]
17 | min_val = min(abs(box[2] - box[0]), abs(box[3] - box[1]))
18 | #print(box,)
19 | for i in range(randnum):
20 | bias = random.randint(-min_val//4, min_val//4)
21 | dist = depth_data[int(mid_pos[1] + bias), int(mid_pos[0] + bias)]
22 | cv2.circle(frame, (int(mid_pos[0] + bias), int(mid_pos[1] + bias)), 4, (255,0,0), -1)
23 | #print(int(mid_pos[1] + bias), int(mid_pos[0] + bias))
24 | if dist:
25 | distance_list.append(dist)
26 | distance_list = np.array(distance_list)
27 | distance_list = np.sort(distance_list)[randnum//2-randnum//4:randnum//2+randnum//4]
28 | #print(distance_list, np.mean(distance_list))
29 | return np.mean(distance_list)
30 |
31 | def dectshow(org_img, boxs,depth_data):
32 | img = org_img.copy()
33 | for box in boxs:
34 | cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
35 | dist = get_mid_pos(org_img, box, depth_data, 24)
36 | cv2.putText(img, box[-1] + str(dist / 1000)[:4] + ' m',
37 | (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
38 | cv2.imshow('CR5_Realsense (yolov5_detect)', img)
39 |
40 | if __name__ == "__main__":
41 | # Configure depth and color streams
42 | pipeline = rs.pipeline()
43 | config = rs.config()
44 | config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 60)
45 | config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 60)
46 | # Start streaming
47 | pipeline.start(config)
48 | try:
49 | while True:
50 | # Wait for a coherent pair of frames: depth and color
51 | frames = pipeline.wait_for_frames()
52 | depth_frame = frames.get_depth_frame()
53 | color_frame = frames.get_color_frame()
54 | if not depth_frame or not color_frame:
55 | continue
56 | # Convert images to numpy arrays
57 |
58 | depth_image = np.asanyarray(depth_frame.get_data())
59 |
60 | color_image = np.asanyarray(color_frame.get_data())
61 |
62 | results = model(color_image)
63 | boxs= results.pandas().xyxy[0].values
64 | #boxs = np.load('temp.npy',allow_pickle=True)
65 | dectshow(color_image, boxs, depth_image)
66 |
67 | # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
68 | depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
69 | # Stack both images horizontally
70 | #images = np.hstack((color_image, depth_colormap))
71 |
72 | # Show images
73 | cv2.namedWindow('CR5_Realsense (Depth)', cv2.WINDOW_AUTOSIZE)
74 | cv2.imshow('CR5_Realsense (Depth)', depth_colormap)
75 | key = cv2.waitKey(1)
76 | # Press esc or 'q' to close the image window
77 | if key & 0xFF == ord('q') or key == 27:
78 | cv2.destroyAllWindows()
79 | break
80 | finally:
81 | # Stop streaming
82 | pipeline.stop()
--------------------------------------------------------------------------------
/src/dev/service_call_old.cpp:
--------------------------------------------------------------------------------
1 | #include "ros/ros.h"
2 | #include "std_msgs/String.h"
3 | #include "CR5_Project/ObjectMsg.h"
4 | #include
5 | #include
6 |
7 | void chatterCallback(const CR5_Project::ObjectMsg::ConstPtr& msg)
8 | {
9 | ROS_INFO("Recieved Position :\nx: [%0.3f]\ny: [%0.3f]\nz: [%0.3f]\nrx: [%0.1f]\nry: [%0.1f]\nrz: [%0.1f]", msg->x, msg->y, msg->z, msg->rx, msg->ry, msg->rz);
10 | char cmd[200];
11 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: %0.3f, y: %0.3f, z: %0.3f, a: %0.3f, b: %0.3f, c: %0.3f}\"'", msg->x, msg->y, msg->z, msg->rx, msg->ry, msg->rz);
12 | system(cmd);
13 | }
14 |
15 | void cmdCallback(const std_msgs::String::ConstPtr& msg)
16 | {
17 | std::string cmd;
18 | cmd = msg->data.c_str();
19 | if (cmd == "home"){
20 | char cmd[200];
21 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/JointMovJ \"{j1: 90.0, j2: 15.0, j3: 83.0, j4: -8.0, j5: -89.0, j6: 0.0}\"'");
22 | system(cmd);
23 | }
24 | if (cmd == "sleep"){
25 | char cmd[200];
26 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/JointMovJ \"{j1: 115.0, j2: -30.0, j3: 135.0, j4: -15.0, j5: -90.0, j6: 0.0}\"'");
27 | system(cmd);
28 | }
29 | if (cmd == "ClearError"){
30 | char cmd[200];
31 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/ClearError'");
32 | system(cmd);
33 | }
34 | if (cmd == "DisableRobot"){
35 | char cmd[200];
36 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/DisableRobot'");
37 | system(cmd);
38 | }
39 | if (cmd == "EnableRobot"){
40 | char cmd[200];
41 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/EnableRobot'");
42 | system(cmd);
43 | }
44 | if (cmd == "object_not_stay"){
45 | char cmd_stop[200];
46 | sprintf(cmd_stop, "gnome-terminal --tab -e 'rosservice call /dobot_bringup/srv/StopScript'");
47 | system(cmd_stop);
48 | sprintf(cmd_stop, "gnome-terminal --tab -e 'rosservice call /dobot_bringup/srv/EnableRobot'");
49 | system(cmd_stop);
50 | }
51 | if (cmd == "Point0"){
52 | char cmd[200];
53 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/JointMovJ \"{j1:0.0, j2:0.0, j3:0.0, j4:0.0, j5:0.0, j6:0.0}\"'");
54 | system(cmd);
55 | }
56 | if (cmd == "Point1"){
57 | char cmd[200];
58 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 136.0, y: -550.0, z: 260.0, a: 180.0, b: 0.0, c: 180.0}\"'");
59 | system(cmd);
60 | }
61 | if (cmd == "Point1_R"){
62 | char cmd[200];
63 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 20.0, y: -550.0, z: 260.0, a: 180.0, b: 0.0, c: 180.0}\"'");
64 | system(cmd);
65 | }
66 | if (cmd == "Point2"){
67 | char cmd[200];
68 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 20.0, y: -550.0, z: 200.0, a: 180.0, b: 0.0, c: 180.0}\"'");
69 | system(cmd);
70 | }
71 | if (cmd == "Speed100"){
72 | char cmd_spdJ[200];
73 | sprintf(cmd_spdJ, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/SpeedJ \"r: 100 \"'");
74 | system(cmd_spdJ);
75 | }
76 | if (cmd == "Speed50"){
77 | char cmd_spdJ[200];
78 | sprintf(cmd_spdJ, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/SpeedJ \"r: 50 \"'");
79 | system(cmd_spdJ);
80 | }
81 | if (cmd == "inventory"){
82 | char cmd[200];
83 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 264.0, y: -310.0, z: 96.0, a: 180.0, b: 0.0, c: 180.0}\"'");
84 | system(cmd);
85 | }
86 | if (cmd == "inventory2"){
87 | char cmd[200];
88 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call bringup/srv/MovJ \"{x: 0.0, y: -285.0, z: 385.0, a: 0.0, b: 0.0, c: 0.0}\"'");
89 | system(cmd);
90 | char cmd2[200];
91 | sprintf(cmd2, "gnome-terminal --tab -e 'rosservice call bringup/srv/MovJ \"{x: 0.0, y: -375.0, z: 330.0, a: -90.0, b: 0.0, c: 0.0}\"'");
92 | system(cmd2);
93 | }
94 | if (cmd == "SetVac"){
95 | char cmd_magic[200];
96 | sprintf(cmd_magic, "gnome-terminal --tab -e 'rosservice call /DobotServer/SetEndEffectorSuctionCup \"enableCtrl: 1 \nsuck: 1 \nisQueued: false\"'");
97 | system(cmd_magic);
98 | }
99 | if (cmd == "ResetVac"){
100 | char cmd_magic[200];
101 | sprintf(cmd_magic, "gnome-terminal --tab -e 'rosservice call /DobotServer/SetEndEffectorSuctionCup \"enableCtrl: 1 \nsuck: 0 \nisQueued: false\"'");
102 | system(cmd_magic);
103 | }
104 |
105 | else {
106 | char cmd[200];
107 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call bringup/srv/JointMovJ \"{j1: 0.0, j2: 0.0, j3: 0.0, j4: 0.0, j5: 0.0, j6: 0.0}\"'");
108 | system(cmd);
109 | }
110 | }
111 |
112 | int main(int argc, char **argv)
113 | {
114 | ros::init(argc, argv, "listener");
115 |
116 | ros::NodeHandle n;
117 |
118 | ros::Subscriber sub1 = n.subscribe("Object_Position", 100, chatterCallback);
119 | ros::Subscriber sub2 = n.subscribe("Cmd_Talker", 100, cmdCallback);
120 |
121 | // Starting
122 | std::cout << "Started rosrun CR5_Project service_call";
123 | std::cout << "\n \nSUMMARY";
124 | std::cout << "\n========";
125 | std::cout << "\n \nThis program is for using ros service to CR5 Robot. \nStill in development stage.";
126 | std::cout << "\n \nCOMMANDS INPUT";
127 | std::cout << "\n========";
128 |
129 | std::cout << "\nInitializing listener node for subscribe commands.";
130 |
131 | char newtmn[200];
132 | sprintf(newtmn, "gnome-terminal --geometry=90x24+0+0 --window \ --working-directory=/depot --title='Main Control Terminal' --command=\"rosrun CR5_Project main_control\"");
133 | system(newtmn);
134 |
135 | char cmd[200];
136 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/EnableRobot'");
137 | system(cmd);
138 |
139 | ros::spin();
140 |
141 | return 0;
142 | }
143 |
--------------------------------------------------------------------------------
/src/dev/t_yolo_ros.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | import cv2
4 | import torch
5 | from sensor_msgs.msg import Image
6 | from cv_bridge import CvBridge
7 |
8 | rospy.init_node('yolov5_detect')
9 | bridge = CvBridge()
10 |
11 | model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best5.pt')
12 | model.conf = 0.5
13 |
14 | def image_callback(msg):
15 | # Convert ROS message to OpenCV image
16 | color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
17 |
18 | # Run object detection on color_image
19 | results = model(color_image)
20 | boxs = results.pandas().xyxy[0].values
21 | dectshow(color_image, boxs)
22 |
23 | #cv2.imshow('CR5_Realsense (yolov5_detect)', color_image)
24 | key = cv2.waitKey(1)
25 |
26 | def dectshow(org_img, boxs):
27 | img = org_img.copy()
28 | height, width = img.shape[:2]
29 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
30 |
31 | for box in boxs:
32 | x1, y1, x2, y2 = map(int, box[:4])
33 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
34 |
35 | # Calculate coordinates of center point of box relative to the new origin
36 | center_x_new = x1 + (x2 - x1) // 2 - center_x
37 | center_y_new = center_y - y1 - (y2 - y1) // 2
38 |
39 | label = f"{box[-1]}"
40 | font = cv2.FONT_HERSHEY_SIMPLEX
41 | font_scale = 0.6
42 | thickness = 2
43 | color = (255, 255, 255)
44 | cv2.putText(img, label, (x1, y1), font, font_scale, color, thickness)
45 |
46 | # Show coordinates of center point relative to the new origin
47 | cv2.putText(img, f"x: {center_x_new}, y: {center_y_new}", (x1, y1 + 30), font, 0.5, color, thickness)
48 |
49 | cv2.imshow('CR5_Realsense (yolov5_detect)', img)
50 |
51 |
52 |
53 | if __name__ == "__main__":
54 |
55 | # Subscribe to rostopic "image_raw"
56 | rospy.Subscriber("/camera/color/image_raw", Image, image_callback)
57 |
58 | rospy.spin()
59 |
--------------------------------------------------------------------------------
/src/dev/t_yolo_tf.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | import cv2
4 | import torch
5 | import tf
6 | from sensor_msgs.msg import Image
7 | from cv_bridge import CvBridge
8 |
9 | rospy.init_node('yolov5_detect')
10 | bridge = CvBridge()
11 |
12 | model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best5.pt')
13 | model.conf = 0.5
14 |
15 | # Initialize the tf broadcaster
16 | tf_broadcaster = tf.TransformBroadcaster()
17 |
18 | def image_callback(msg):
19 | # Convert ROS message to OpenCV image
20 | color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
21 |
22 | # Run object detection on color_image
23 | results = model(color_image)
24 | boxs = results.pandas().xyxy[0].values
25 | dectshow(color_image, boxs)
26 |
27 | # Get the object pose from the first box
28 | if len(boxs) > 0:
29 | x1, y1, x2, y2 = map(int, boxs[0][:4])
30 | center_x = x1 + (x2 - x1) // 2
31 | center_y = y1 + (y2 - y1) // 2
32 |
33 | # Calculate the object's position relative to the image center
34 | height, width = color_image.shape[:2]
35 | center_x_new = center_x - width // 2
36 | center_y_new = height // 2 - center_y
37 |
38 | # Create a translation matrix for the object's position
39 | trans_matrix = tf.transformations.translation_matrix((center_x_new, center_y_new, 0))
40 |
41 | # Create a rotation matrix for the object's orientation
42 | rot_matrix = tf.transformations.quaternion_matrix(tf.transformations.quaternion_from_euler(0, 0, 0))
43 |
44 | # Calculate the transform matrix for the object's pose
45 | obj_matrix = np.dot(trans_matrix, rot_matrix)
46 |
47 | # Publish the transform
48 | tf_broadcaster.sendTransform((center_x_new, center_y_new, 0), tf.transformations.quaternion_from_euler(0, 0, 0), rospy.Time.now(), "object_frame", "camera_color_optical_frame")
49 |
50 | #cv2.imshow('CR5_Realsense (yolov5_detect)', color_image)
51 | key = cv2.waitKey(1)
52 |
53 | def dectshow(org_img, boxs):
54 | img = org_img.copy()
55 |
56 | for box in boxs:
57 | x1, y1, x2, y2 = map(int, box[:4])
58 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
59 |
60 | label = f"{box[-1]}"
61 | font = cv2.FONT_HERSHEY_SIMPLEX
62 | font_scale = 1
63 | thickness = 2
64 | color = (255, 255, 255)
65 | cv2.putText(img, label, (x1, y1), font, font_scale, color, thickness)
66 |
67 | cv2.imshow('CR5_Realsense (yolov5_detect)', img)
68 |
69 | if __name__ == "__main__":
70 |
71 | # Subscribe to rostopic "image_raw"
72 | rospy.Subscriber("/camera/color/image_raw", Image, image_callback)
73 |
74 | rospy.spin()
75 |
--------------------------------------------------------------------------------
/src/dev/t_yolo_tf_2.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | import cv2
4 | import torch
5 | import tf
6 | from sensor_msgs.msg import Image
7 | from cv_bridge import CvBridge
8 |
9 | rospy.init_node('yolov5_detect')
10 | bridge = CvBridge()
11 |
12 | model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best5.pt')
13 | model.conf = 0.5
14 |
15 | # Initialize the tf broadcaster
16 | tf_broadcaster = tf.TransformBroadcaster()
17 |
18 | def image_callback(msg):
19 | # Convert ROS message to OpenCV image
20 | color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
21 |
22 | # Run object detection on color_image
23 | results = model(color_image)
24 | boxs = results.pandas().xyxy[0].values
25 | dectshow(color_image, boxs)
26 | i = 0
27 | # Get the object pose from the first box
28 | if len(boxs) > 0:
29 | for box in boxs:
30 | print(boxs)
31 | x1, y1, x2, y2 = map(int, box[:4])
32 | img = color_image.copy()
33 | height, width = img.shape[:2]
34 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
35 |
36 | center_x_new = x1 + (x2 - x1) // 2 - center_x
37 | center_y_new = center_y - y1 - (y2 - y1) // 2
38 |
39 | # Convert pixel coordinates to meters
40 | W = 48 # object width in centimeters
41 | H = 36 # object height in centimeters
42 | width = color_image.shape[1] # image width in pixels
43 | height = color_image.shape[0] # image height in pixels
44 | x_m = (center_x_new * (W / width))/100
45 | y_m = (center_y_new * (H / height))/100
46 | z_m = 0.45
47 | object_frame = "Yolo_Object "
48 | object_frame = object_frame + f"{i}"
49 | # Publish the transform
50 | tf_broadcaster.sendTransform((x_m, -y_m, z_m), tf.transformations.quaternion_from_euler(0, 0, 0), rospy.Time.now(), object_frame, "camera_color_optical_frame")
51 | i += 1
52 |
53 | #cv2.imshow('CR5_Realsense (yolov5_detect)', color_image)
54 | key = cv2.waitKey(1)
55 |
56 | def dectshow(org_img, boxs):
57 | img = org_img.copy()
58 | height, width = img.shape[:2]
59 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
60 |
61 | for box in boxs:
62 | x1, y1, x2, y2 = map(int, box[:4])
63 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
64 |
65 | # Calculate coordinates of center point of box relative to the new origin
66 | center_x_new = x1 + (x2 - x1) // 2 - center_x
67 | center_y_new = center_y - y1 - (y2 - y1) // 2
68 |
69 | label = f"{box[-1]}"
70 | label = label + f"x: {center_x_new}, y: {center_y_new}"
71 | font = cv2.FONT_HERSHEY_SIMPLEX
72 | font_scale = 0.5
73 | thickness = 2
74 | color = (255, 255, 255)
75 |
76 | # Show coordinates of center point relative to the new origin
77 | cv2.putText(img, label, (x1, y1+10), font, font_scale, color, thickness)
78 |
79 | cv2.imshow('CR5_Realsense (yolov5_detect)', img)
80 |
81 | if __name__ == "__main__":
82 |
83 | # Subscribe to rostopic "image_raw"
84 | rospy.Subscriber("/camera/color/image_raw", Image, image_callback)
85 |
86 | rospy.spin()
87 |
--------------------------------------------------------------------------------
/src/dev/t_yolo_tf_order.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | import cv2
4 | import torch
5 | import tf
6 | from sensor_msgs.msg import Image
7 | from cv_bridge import CvBridge
8 | from std_msgs.msg import String
9 |
10 | bridge = CvBridge()
11 |
12 | model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best5.pt')
13 | model.conf = 0.5
14 |
15 | # Initialize the tf broadcaster
16 | tf_broadcaster = tf.TransformBroadcaster()
17 |
18 | def image_callback(msg):
19 | global start_detect
20 |
21 | if start_detect == 1 :
22 | print("run detect loop")
23 | start_detect = 0
24 |
25 | # Convert ROS message to OpenCV image
26 | color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
27 |
28 | # Run object detection on color_image
29 | results = model(color_image)
30 | boxs = results.pandas().xyxy[0].values
31 | dectshow(color_image, boxs)
32 | i = 0
33 | # Get the object pose from the first box
34 | if len(boxs) > 0:
35 | for box in boxs:
36 | print(boxs)
37 | x1, y1, x2, y2 = map(int, box[:4])
38 | img = color_image.copy()
39 | height, width = img.shape[:2]
40 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
41 |
42 | center_x_new = x1 + (x2 - x1) // 2 - center_x
43 | center_y_new = center_y - y1 - (y2 - y1) // 2
44 |
45 | # Convert pixel coordinates to meters
46 | W = 48 # object width in centimeters
47 | H = 36 # object height in centimeters
48 | width = color_image.shape[1] # image width in pixels
49 | height = color_image.shape[0] # image height in pixels
50 | x_m = (center_x_new * (W / width))/100
51 | y_m = (center_y_new * (H / height))/100
52 | z_m = 0.420
53 | object_frame = "Yolo_Object "
54 | object_frame = object_frame + f"{i}"
55 | object_frame = "obj_arduino"
56 | # Publish the transform
57 | if i == 0 :
58 | tf_broadcaster.sendTransform((x_m, -y_m, z_m), tf.transformations.quaternion_from_euler(0, 0, 0), rospy.Time.now(), object_frame, "camera_color_optical_frame")
59 | i += 1
60 |
61 | else :
62 | pass
63 |
64 | #cv2.imshow('CR5_Realsense (yolov5_detect)', color_image)
65 | #key = cv2.waitKey(1)
66 |
67 | def dectshow(org_img, boxs):
68 | img = org_img.copy()
69 | height, width = img.shape[:2]
70 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
71 |
72 | for box in boxs:
73 | x1, y1, x2, y2 = map(int, box[:4])
74 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
75 |
76 | # Calculate coordinates of center point of box relative to the new origin
77 | center_x_new = x1 + (x2 - x1) // 2 - center_x
78 | center_y_new = center_y - y1 - (y2 - y1) // 2
79 |
80 | label = f"{box[-1]}"
81 | label = label + f" x: {center_x_new}, y: {center_y_new}"
82 | font = cv2.FONT_HERSHEY_SIMPLEX
83 | font_scale = 0.5
84 | thickness = 1
85 | color = (255, 255, 255)
86 |
87 | # Show coordinates of center point relative to the new origin
88 | cv2.putText(img, label, (x1+2, y1-7), font, font_scale, color, thickness)
89 |
90 | ros_image = bridge.cv2_to_imgmsg(img, encoding="rgb8") # Convert OpenCV image to ROS Image message
91 | pub_img.publish(ros_image) # Publish ROS Image message to topic
92 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
93 | #cv2.imshow('CR5_Realsense (yolov5_detect)', img)
94 |
95 | def detect_order(msg):
96 | global start_detect
97 | start_detect = 1
98 |
99 | rospy.loginfo("I heard %s", msg.data)
100 |
101 | if __name__ == "__main__":
102 |
103 | global start_detect
104 | start_detect = 0
105 | print("Ready to detect")
106 | rospy.init_node('yolo_listener', anonymous=True)
107 | # Publish to rostopic
108 | pub_img = rospy.Publisher("/main/yolo_detect_image", Image, queue_size=10)
109 |
110 | # Subscribe to rostopic
111 | rospy.Subscriber("/main/yolo_order", String, detect_order)
112 | rospy.Subscriber("/camera/color/image_raw", Image, image_callback)
113 |
114 | rospy.spin()
--------------------------------------------------------------------------------
/src/dev/test2_rotation.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import tf
3 | import math
4 | from time import sleep
5 | from tf.transformations import euler_from_quaternion
6 |
7 | rospy.init_node('main_order', anonymous=True)
8 | echoListener = tf.TransformListener()
9 |
10 | source_frameid = 'base_link'
11 | camera_frameid = 'camera_link'
12 | target_frameid = 'object_6'
13 |
14 | echoListener.waitForTransform(target_frameid, source_frameid, rospy.Time(), rospy.Duration(0.5))
15 | echoListener.waitForTransform(camera_frameid, source_frameid, rospy.Time(), rospy.Duration(0.5))
16 |
17 | echo_transform = echoListener.lookupTransform(source_frameid, target_frameid, rospy.Time(0))
18 | echo_cam = echoListener.lookupTransform(target_frameid, camera_frameid, rospy.Time(0))
19 |
20 | yaw, pitch, roll = euler_from_quaternion(echo_cam[1])
21 | v = echo_transform[0]
22 |
23 | goal_x = int(v[0] * 1000)
24 | goal_y = int(v[1] * 1000)
25 | goal_z = int(v[2] * 1000)
26 | goal_roll = int(roll * 180.0 / math.pi)
27 | goal_pitch = int(pitch * 180.0 / math.pi)
28 | goal_yall = int(yaw * 180.0 / math.pi)
29 |
30 | goal_x_cal = goal_x
31 | goal_y_cal = goal_y
32 | goal_z_cal = goal_z + 54
33 | goal_rx_cal = goal_pitch + 180
34 | goal_ry_cal = -(goal_roll - 180)
35 | goal_rz_cal = goal_yall + 180
36 |
37 | print(goal_x_cal," ",goal_y_cal," ",goal_z_cal," ",goal_rx_cal," ",goal_ry_cal," ",goal_rz_cal)
38 |
39 |
--------------------------------------------------------------------------------
/src/dev/test_compute.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | import tf
4 | import math
5 | from tf.transformations import euler_from_quaternion
6 |
7 | def compute_vector_50(x,y,z,ry,rx,rz):
8 | # Convert the orientation from degrees to radians
9 | rx = math.radians(rx)
10 | ry = math.radians(ry)
11 | rz = math.radians(rz)
12 |
13 | # Define the position and orientation of the object
14 | object_pos = np.array([x, y, z]) # Object position in world frame
15 | object_ori = np.array([ry, rx, rz]) # Object orientation in world frame (euler angles)
16 |
17 | # Convert the object orientation from euler angles to a rotation matrix
18 | Rx = np.array([[1, 0, 0],
19 | [0, np.cos(object_ori[1]), -np.sin(object_ori[1])],
20 | [0, np.sin(object_ori[1]), np.cos(object_ori[1])]])
21 | Ry = np.array([[np.cos(object_ori[0]), 0, np.sin(object_ori[0])],
22 | [0, 1, 0],
23 | [-np.sin(object_ori[0]), 0, np.cos(object_ori[0])]])
24 | Rz = np.array([[np.cos(object_ori[2]), -np.sin(object_ori[2]), 0],
25 | [np.sin(object_ori[2]), np.cos(object_ori[2]), 0],
26 | [0, 0, 1]])
27 | object_rot = Rz.dot(Ry.dot(Rx))
28 |
29 | # Define the desired position of the robot relative to the object frame
30 | p_o_desired = np.array([0, 0, -50]) # 100mm above the object in the object frame
31 |
32 | # Transform the position vector from the object frame to the world frame
33 | p_w_desired = object_rot.dot(p_o_desired) + object_pos
34 |
35 | #print(p_w_desired)
36 | return (p_w_desired[0],p_w_desired[1],p_w_desired[2])
37 |
38 | def compute_vector_100(x,y,z,ry,rx,rz):
39 | # Convert the orientation from degrees to radians
40 | rx = math.radians(rx)
41 | ry = math.radians(ry)
42 | rz = math.radians(rz)
43 |
44 | # Define the position and orientation of the object
45 | object_pos = np.array([x, y, z]) # Object position in world frame
46 | object_ori = np.array([ry, rx, rz]) # Object orientation in world frame (euler angles)
47 |
48 | # Convert the object orientation from euler angles to a rotation matrix
49 | Rx = np.array([[1, 0, 0],
50 | [0, np.cos(object_ori[1]), -np.sin(object_ori[1])],
51 | [0, np.sin(object_ori[1]), np.cos(object_ori[1])]])
52 | Ry = np.array([[np.cos(object_ori[0]), 0, np.sin(object_ori[0])],
53 | [0, 1, 0],
54 | [-np.sin(object_ori[0]), 0, np.cos(object_ori[0])]])
55 | Rz = np.array([[np.cos(object_ori[2]), -np.sin(object_ori[2]), 0],
56 | [np.sin(object_ori[2]), np.cos(object_ori[2]), 0],
57 | [0, 0, 1]])
58 | object_rot = Rz.dot(Ry.dot(Rx))
59 |
60 | # Define the desired position of the robot relative to the object frame
61 | p_o_desired = np.array([0, 0, -100]) # 100mm above the object in the object frame
62 |
63 | # Transform the position vector from the object frame to the world frame
64 | p_w_desired = object_rot.dot(p_o_desired) + object_pos
65 |
66 | #print(p_w_desired)
67 | return (p_w_desired[0],p_w_desired[1],p_w_desired[2])
68 |
69 | def compute_vector_150(x,y,z,ry,rx,rz):
70 | # Convert the orientation from degrees to radians
71 | rx = math.radians(rx)
72 | ry = math.radians(ry)
73 | rz = math.radians(rz)
74 |
75 | # Define the position and orientation of the object
76 | object_pos = np.array([x, y, z]) # Object position in world frame
77 | object_ori = np.array([ry, rx, rz]) # Object orientation in world frame (euler angles)
78 |
79 | # Convert the object orientation from euler angles to a rotation matrix
80 | Rx = np.array([[1, 0, 0],
81 | [0, np.cos(object_ori[1]), -np.sin(object_ori[1])],
82 | [0, np.sin(object_ori[1]), np.cos(object_ori[1])]])
83 | Ry = np.array([[np.cos(object_ori[0]), 0, np.sin(object_ori[0])],
84 | [0, 1, 0],
85 | [-np.sin(object_ori[0]), 0, np.cos(object_ori[0])]])
86 | Rz = np.array([[np.cos(object_ori[2]), -np.sin(object_ori[2]), 0],
87 | [np.sin(object_ori[2]), np.cos(object_ori[2]), 0],
88 | [0, 0, 1]])
89 | object_rot = Rz.dot(Ry.dot(Rx))
90 |
91 | # Define the desired position of the robot relative to the object frame
92 | p_o_desired = np.array([0, 0, -150]) # 100mm above the object in the object frame
93 |
94 | # Transform the position vector from the object frame to the world frame
95 | p_w_desired = object_rot.dot(p_o_desired) + object_pos
96 |
97 | #print(p_w_desired)
98 | return (p_w_desired[0],p_w_desired[1],p_w_desired[2])
99 |
100 | if __name__ == "__main__":
101 | source_frameid = 'base_link'
102 | camera_frameid = 'camera_link'
103 | target_frameid1 = 'object_6'
104 | rospy.init_node('main_order', anonymous=True)
105 | listener = tf.TransformListener()
106 | listener.waitForTransform(target_frameid1, source_frameid, rospy.Time(), rospy.Duration(1))
107 |
108 | print("found 1 object")
109 |
110 | echo_transform = listener.lookupTransform(source_frameid, target_frameid1, rospy.Time(0))
111 | echo_cam = listener.lookupTransform(target_frameid1, camera_frameid, rospy.Time(0))
112 | yaw, pitch, roll = euler_from_quaternion(echo_cam[1])
113 | v = echo_transform[0]
114 |
115 | x = int(v[0] * 1000)
116 | y = int(v[1] * 1000)
117 | z = int(v[2] * 1000)
118 | rx = int(roll * 180.0 / math.pi)
119 | ry = int(pitch * 180.0 / math.pi)
120 | rz = int(yaw * 180.0 / math.pi)
121 |
122 | x_cal = x
123 | y_cal = y
124 | z_cal = z
125 | rx_cal = ry
126 | ry_cal = rx
127 | rz_cal = rz
128 |
129 | rx_robo = rx_cal + 180
130 | ry_robo = -(ry_cal - 180)
131 | rz_robo = rz_cal + 180
132 |
133 | print("Object : ",x_cal,y_cal,z_cal,rx,ry,rz)
134 | robo_compute1 = compute_vector_50(x_cal,y_cal,z_cal,rx,ry,rz)
135 | robo_compute2 = compute_vector_100(x_cal,y_cal,z_cal,rx,ry,rz)
136 | robo_compute3 = compute_vector_150(x_cal,y_cal,z_cal,rx,ry,rz)
137 |
138 | '''if robo_compute[2] >= 33 :
139 | robo_compute[2] = robo_compute[2]
140 | if robo_compute[2] < 33 :
141 | robo_compute[2] = 33'''
142 |
143 | print("Robot : ",round(robo_compute1[0]),round(robo_compute1[1]),round(robo_compute1[2]),rx_robo,ry_robo,rz_robo)
144 | print("Robot : ",round(robo_compute2[0]),round(robo_compute2[1]),round(robo_compute2[2]),rx_robo,ry_robo,rz_robo)
145 | print("Robot : ",round(robo_compute3[0]),round(robo_compute3[1]),round(robo_compute3[2]),rx_robo,ry_robo,rz_robo)
--------------------------------------------------------------------------------
/src/dev/test_rotation.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import tf
3 | import math
4 | from time import sleep
5 | import tf.transformations as tr
6 |
7 | rospy.init_node('main_order', anonymous=True)
8 | listener = tf.TransformListener()
9 |
10 | while True:
11 | source_frameid = 'base_link'
12 | target_frameid = 'object_6'
13 |
14 | listener.waitForTransform(target_frameid, source_frameid, rospy.Time(), rospy.Duration(0.5))
15 | if listener.canTransform(target_frameid, source_frameid, rospy.Time()):
16 | (trans, rot) = listener.lookupTransform(source_frameid, target_frameid, rospy.Time())
17 |
18 | # convert the rotation vector to a rotation matrix
19 | rot_matrix = tr.quaternion_matrix(rot)
20 |
21 | # extract the roll-pitch-yaw angles from the rotation matrix
22 | rpy_angles = tr.euler_from_matrix(rot_matrix, 'rxyz')
23 |
24 | # rpy_angles is a tuple containing the roll, pitch, and yaw angles in radians
25 | roll, pitch, yaw = rpy_angles
26 |
27 | x = round(trans[0] * 1000)
28 | y = round(trans[1] * 1000)
29 | z = round(trans[2] * 1000)
30 | rx = round(roll )
31 | ry = round(pitch + 180)
32 | rz = round(yaw )
33 | z_off = z + 54 if z + 54 >= 15 else 15
34 |
35 | rx = rx + 180
36 | ry = ry - 180
37 | rz = rz + 180
38 |
39 | x = x
40 | y = y
41 | z = z_off + 30
42 |
43 | print(x," ",y," ",z," ",rx," ",ry," ",rz)
44 |
45 | sleep(1)
46 |
--------------------------------------------------------------------------------
/src/dev/test_yolo_order.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | import cv2
4 | import torch
5 | import tf
6 | from sensor_msgs.msg import Image
7 | from cv_bridge import CvBridge
8 | from std_msgs.msg import String
9 |
10 | bridge = CvBridge()
11 |
12 | model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/yolov5l6.pt')
13 | model.conf = 0.5
14 |
15 | #model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best.pt')
16 | #model.conf = 0.6
17 |
18 | #model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best.pt')
19 | #model.conf = 0.7
20 |
21 | # Initialize the tf broadcaster
22 | tf_broadcaster = tf.TransformBroadcaster()
23 |
24 | def image_callback(msg):
25 | global start_detect
26 | start_detect = 1
27 | #color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
28 | #cv2.imshow('depth_image', color_image)
29 | #key = cv2.waitKey(1)
30 |
31 | if start_detect == 1 :
32 | print("run detect loop")
33 | start_detect = 1 # change 0,1
34 |
35 | # Convert ROS message to OpenCV image
36 | color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
37 |
38 | # Run object detection on color_image
39 | results = model(color_image)
40 | boxs = results.pandas().xyxy[0].values
41 | dectshow(color_image, boxs)
42 | i = 0
43 | z_compare = 1.0
44 | # Get the object pose from the first box
45 | if len(boxs) > 0:
46 | for box in boxs:
47 | print(boxs)
48 | x1, y1, x2, y2 = map(int, box[:4])
49 | img = color_image.copy()
50 | height, width = img.shape[:2]
51 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
52 |
53 | center_x_new = x1 + (x2 - x1) // 2 - center_x
54 | center_y_new = center_y - y1 - (y2 - y1) // 2
55 |
56 | obj_center_old_x = x1 + (x2 - x1) // 2
57 | obj_center_old_y = y1 + (y2 - y1) // 2
58 | obj_center_old = [obj_center_old_x, obj_center_old_y]
59 | obj_depth = get_mid_pos(obj_center_old)
60 |
61 | # Convert pixel coordinates to meters
62 | W = 48 # object width in centimeters
63 | H = 36 # object height in centimeters
64 | width = color_image.shape[1] # image width in pixels
65 | height = color_image.shape[0] # image height in pixels
66 | x_m = (center_x_new * (W / width))/100
67 | y_m = (center_y_new * (H / height))/100
68 | z_m = obj_depth/1000
69 |
70 |
71 | object_frame = "Yolo_Object "
72 | object_frame = object_frame + f"{i}"
73 | object_frame = "obj_arduino"
74 | # Publish the transform
75 | if z_compare > z_m :
76 | z_compare = z_m
77 | tf_broadcaster.sendTransform((x_m, -y_m, z_m), tf.transformations.quaternion_from_euler(0, 0, 0), rospy.Time.now(), object_frame, "camera_color_optical_frame")
78 | i += 1
79 | else :
80 | pass
81 |
82 |
83 | def dectshow(org_img, boxs):
84 | img = org_img.copy()
85 | height, width = img.shape[:2]
86 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
87 |
88 | for box in boxs:
89 | x1, y1, x2, y2 = map(int, box[:4])
90 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
91 |
92 | # Calculate coordinates of center point of box relative to the new origin
93 | center_x_new = x1 + (x2 - x1) // 2 - center_x
94 | center_y_new = center_y - y1 - (y2 - y1) // 2
95 |
96 | obj_center_old_x = x1 + (x2 - x1) // 2
97 | obj_center_old_y = y1 + (y2 - y1) // 2
98 | obj_center_old = [obj_center_old_x, obj_center_old_y]
99 | obj_depth = get_mid_pos(obj_center_old)
100 |
101 | label = f"{box[-1]}"
102 | label = label + f" x: {center_x_new}, y: {center_y_new}, z: {obj_depth}"
103 | font = cv2.FONT_HERSHEY_SIMPLEX
104 | font_scale = 0.5
105 | thickness = 1
106 | color = (255, 255, 255)
107 |
108 | # Show coordinates of center point relative to the new origin
109 | cv2.putText(img, label, (x1+2, y1-7), font, font_scale, color, thickness)
110 |
111 | ros_image = bridge.cv2_to_imgmsg(img, encoding="rgb8") # Convert OpenCV image to ROS Image message
112 | pub_img.publish(ros_image) # Publish ROS Image message to topic
113 | #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
114 | #cv2.imshow('CR5_Realsense (yolov5_detect)', img)
115 |
116 | def get_mid_pos(center):
117 | global depth_global
118 | print(center)
119 | depth_data = depth_global
120 | width = 410
121 | heigh = 300
122 | percent_w = width/640
123 | percent_h = heigh/480
124 | radius = 5
125 |
126 | #print(percent_w,percent_w)
127 |
128 | X = center[0] * percent_w
129 | Y = center[1] * percent_h
130 |
131 | distance_list = []
132 |
133 | dist = depth_data[int(Y), int(X)]
134 | print(int(X),int(Y))
135 |
136 | for i in range(int(Y)-radius, int(Y)+radius+1):
137 | for j in range(int(X)-radius, int(X)+radius+1):
138 | dist = depth_data[i, j]
139 | if dist:
140 | distance_list.append(dist)
141 |
142 | distance_list = np.array(distance_list)
143 | mean_dist = np.mean(distance_list)
144 |
145 | return int(mean_dist)
146 |
147 | def depth_callback(msg):
148 | global depth_global
149 | depth_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
150 | # Define crop parameters
151 | crop_left = 100
152 | crop_right = 130
153 | crop_top = 90
154 | crop_bottom = 90
155 |
156 | # Crop depth image
157 | depth_cropped = depth_image[crop_top:-crop_bottom, crop_left:-crop_right]
158 | #depth_cropped = depth_image
159 |
160 | depth_global = depth_cropped
161 | depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_cropped, alpha=0.30), cv2.COLORMAP_JET)
162 |
163 | depth_colormap = cv2.cvtColor(depth_colormap, cv2.COLOR_BGR2RGB)
164 | ros_image = bridge.cv2_to_imgmsg(depth_colormap, encoding="rgb8") # Convert OpenCV image to ROS Image message
165 | pub_depth_img.publish(ros_image) # Publish ROS Image message to topic
166 | #cv2.imshow('depth_image', depth_colormap)
167 | #key = cv2.waitKey(1)
168 |
169 | def detect_order(msg):
170 | global start_detect
171 | start_detect = 1
172 |
173 | rospy.loginfo("I heard %s", msg.data)
174 |
175 | if __name__ == "__main__":
176 |
177 | global start_detect
178 | start_detect = 0
179 | print("Ready to detect")
180 | rospy.init_node('yolo_listener', anonymous=True)
181 | # Publish to rostopic
182 | pub_img = rospy.Publisher("/main/yolo_detect_image", Image, queue_size=10)
183 | pub_depth_img = rospy.Publisher("/main/color_depth_image", Image, queue_size=10)
184 |
185 | # Subscribe to rostopic
186 | rospy.Subscriber("/main/yolo_order", String, detect_order)
187 | rospy.Subscriber("/camera/color/image_raw", Image, image_callback)
188 | rospy.Subscriber("/camera/depth/image_rect_raw", Image, depth_callback)
189 |
190 | rospy.spin()
--------------------------------------------------------------------------------
/src/dev/tf_listener.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include "tf/transform_listener.h"
3 | #include "ros/ros.h"
4 |
5 | #define _USE_MATH_DEFINES
6 | class echoListener
7 | {
8 | public:
9 |
10 | tf::TransformListener tf;
11 |
12 | //constructor with name
13 | echoListener()
14 | {
15 |
16 | };
17 |
18 | ~echoListener()
19 | {
20 |
21 | };
22 |
23 | private:
24 |
25 | };
26 |
27 |
28 | int main(int argc, char ** argv)
29 | {
30 | //Initialize ROS
31 | ros::init(argc, argv, "tf_echo", ros::init_options::AnonymousName);
32 |
33 | // Allow 2 or 3 command line arguments
34 | if (argc < 3 || argc > 4)
35 | {
36 | printf("Usage: tf_echo source_frame target_frame [echo_rate]\n\n");
37 | printf("This will echo the transform from the coordinate frame of the source_frame\n");
38 | printf("to the coordinate frame of the target_frame. \n");
39 | printf("Note: This is the transform to get data from target_frame into the source_frame.\n");
40 | printf("Default echo rate is 1 if echo_rate is not given.\n");
41 | return -1;
42 | }
43 |
44 | ros::NodeHandle nh;
45 |
46 | double rate_hz;
47 | if (argc == 4)
48 | {
49 | // read rate from command line
50 | rate_hz = atof(argv[3]);
51 | }
52 | else
53 | {
54 | // read rate parameter
55 | ros::NodeHandle p_nh("~");
56 | p_nh.param("rate", rate_hz, 1.0);
57 | }
58 | if (rate_hz <= 0.0)
59 | {
60 | std::cerr << "Echo rate must be > 0.0\n";
61 | return -1;
62 | }
63 | ros::Rate rate(1);
64 |
65 | //Instantiate a local listener
66 | echoListener echoListener;
67 |
68 | std::string source_frameid = std::string("base_link");
69 | std::string target_frameid = std::string("object_16");
70 |
71 | // Wait for up to one second for the first transforms to become avaiable.
72 | echoListener.tf.waitForTransform(source_frameid, target_frameid, ros::Time(), ros::Duration(1.0));
73 |
74 | //Nothing needs to be done except wait for a quit
75 | //The callbacks withing the listener class
76 | //will take care of everything
77 | while(nh.ok())
78 | {
79 | try
80 | {
81 | tf::StampedTransform echo_transform;
82 | echoListener.tf.lookupTransform(source_frameid, target_frameid, ros::Time(), echo_transform);
83 | std::cout.precision(3);
84 | std::cout.setf(std::ios::fixed,std::ios::floatfield);
85 | std::cout << "At time " << echo_transform.stamp_.toSec() << std::endl;
86 | double yaw, pitch, roll;
87 | echo_transform.getBasis().getRPY(roll, pitch, yaw);
88 | tf::Quaternion q = echo_transform.getRotation();
89 | tf::Vector3 v = echo_transform.getOrigin();
90 | std::cout << "- Translation: [" << v.getX() << ", " << v.getY() << ", " << v.getZ() << "]" << std::endl;
91 |
92 | //print transform
93 | }
94 | catch(tf::TransformException& ex)
95 | {
96 | std::cout << "Failure at "<< ros::Time::now() << std::endl;
97 | std::cout << "Exception thrown:" << ex.what()<< std::endl;
98 | std::cout << "The current list of frames is:" <
5 | #include
6 |
7 | #include
8 | #include
9 | #include "tf/transform_listener.h"
10 | #include "tf/tf.h"
11 | #include
12 | #include
13 | #include
14 |
15 | using namespace tf;
16 | using namespace ros;
17 | using namespace std;
18 |
19 | #define _USE_MATH_DEFINES
20 |
21 | class echoListener
22 | {
23 | public:
24 |
25 | tf::TransformListener tf;
26 |
27 | //constructor with name
28 | echoListener()
29 | {
30 |
31 | };
32 |
33 | ~echoListener()
34 | {
35 |
36 | };
37 |
38 | private:
39 |
40 | };
41 |
42 | int checkpose (int x, int y, int z)
43 | {
44 | echoListener echoListener;
45 |
46 | std::string source_frameid = std::string("base_link");
47 | std::string target_eef = std::string("Link6");
48 | bool check = false;
49 |
50 | while (check == false) {
51 | echoListener.tf.waitForTransform(source_frameid, target_eef, ros::Time(), ros::Duration(0.2));
52 | tf::StampedTransform echo_transform;
53 | echoListener.tf.lookupTransform(source_frameid, target_eef, ros::Time(), echo_transform);
54 |
55 | //double yaw, pitch, roll;
56 | //echo_transform.getBasis().getRPY(roll, pitch, yaw);
57 | //tf::Quaternion q = echo_transform.getRotation();
58 | tf::Vector3 v = echo_transform.getOrigin();
59 | int actual_x,actual_y,actual_z ;
60 | actual_x = v.getX()*1000;
61 | actual_y = v.getY()*1000;
62 | actual_z = v.getZ()*1000;
63 | //std::cout << "Checking: [" << actual_x << ", " << actual_y << ", " << actual_z << "]" << std::endl;
64 |
65 | if (abs(abs(actual_x) - abs(x))<= 5 && abs(abs(actual_y) - abs(y))<= 5 && abs(actual_z - z)<= 5)
66 | {check = true;}
67 | usleep(100000);
68 | }
69 | return 0;
70 | }
71 |
72 | int checkpose_rpy (int x, int y, int z, int rx, int ry, int rz)
73 | {
74 | echoListener echoListener;
75 |
76 | std::string source_frameid = std::string("base_link");
77 | std::string target_eef = std::string("Link6");
78 | bool check = false;
79 |
80 | while (check == false) {
81 | echoListener.tf.waitForTransform(source_frameid, target_eef, ros::Time(), ros::Duration(0.2));
82 | tf::StampedTransform echo_transform;
83 | echoListener.tf.lookupTransform(source_frameid, target_eef, ros::Time(), echo_transform);
84 |
85 | double yaw, pitch, roll;
86 | echo_transform.getBasis().getRPY(roll, pitch, yaw);
87 | tf::Quaternion q = echo_transform.getRotation();
88 | tf::Vector3 v = echo_transform.getOrigin();
89 | int actual_x,actual_y,actual_z,actual_rx,actual_ry,actual_rz ;
90 | actual_x = v.getX()*1000;
91 | actual_y = v.getY()*1000;
92 | actual_z = v.getZ()*1000;
93 | actual_rx = roll*180.0/M_PI;
94 | actual_ry = pitch*180.0/M_PI;
95 | actual_rz = yaw*180.0/M_PI;
96 |
97 | std::cout << "Checking: [" << actual_x << ", " << actual_y << ", " << actual_z << ", " << actual_rx << ", " << actual_ry << ", " << actual_rz << "]" << std::endl;
98 | if (abs(abs(actual_x) - abs(x))<= 5 && abs(abs(actual_y) - abs(y))<= 5 && abs(actual_z - z)<= 5 && abs(abs(actual_rx) - abs(rx))<= 5 && abs(abs(actual_ry) - abs(ry))<= 5 && abs(actual_rz - rz)<= 5)
99 | {check = true;}
100 | usleep(100000);
101 | }
102 | return 0;
103 | }
104 |
105 | int checkpose_focus (int x, int y, int z)
106 | {
107 | int at_goal;
108 | echoListener echoListener;
109 | std::string source_frameid = std::string("base_link");
110 | std::string target_eef = std::string("Link6");
111 |
112 | echoListener.tf.waitForTransform(source_frameid, target_eef, ros::Time(), ros::Duration(0.2));
113 | tf::StampedTransform echo_transform;
114 | echoListener.tf.lookupTransform(source_frameid, target_eef, ros::Time(), echo_transform);
115 | tf::Vector3 v = echo_transform.getOrigin();
116 | int actual_x,actual_y,actual_z ;
117 |
118 | actual_x = v.getX()*1000;
119 | actual_y = v.getY()*1000;
120 | actual_z = v.getZ()*1000;
121 |
122 | if (abs(abs(actual_x) - abs(x))<= 5 && abs(abs(actual_y) - abs(y))<= 5 && abs(actual_z - z)<= 5)
123 | {at_goal = 1;}
124 | else {at_goal = 0;}
125 |
126 | return at_goal;
127 | }
128 |
129 | int checkpose_object (int x, int y, int z, int object_id)
130 | {
131 | int object_stay;
132 | echoListener echoListener;
133 | std::string source_frameid = std::string("base_link");
134 | std::string target_frameid = std::string("object_1");
135 | std::string target_frameid2 = std::string("object_2");
136 | std::string target_frameid3 = std::string("object_3");
137 | std::string target_frameid4 = std::string("object_4");
138 | std::string target_frameid5 = std::string("object_5");
139 |
140 | std::string final_target;
141 |
142 | if (object_id == 1) {final_target = target_frameid;}
143 | if (object_id == 2) {final_target = target_frameid2;}
144 | if (object_id == 3) {final_target = target_frameid3;}
145 | if (object_id == 4) {final_target = target_frameid4;}
146 | if (object_id == 5) {final_target = target_frameid5;}
147 |
148 | ros::NodeHandle n;
149 | ros::Publisher chatter_pub2 = n.advertise("Cmd_Talker", 100);
150 | std_msgs::String msg;
151 |
152 | if (echoListener.tf.waitForTransform(source_frameid, final_target, ros::Time(), ros::Duration(0.8)) == 1)
153 | {
154 | tf::StampedTransform echo_transform;
155 | echoListener.tf.lookupTransform(source_frameid, final_target, ros::Time(), echo_transform);
156 | tf::Vector3 v = echo_transform.getOrigin();
157 | int actual_x,actual_y,actual_z ;
158 |
159 | actual_x = v.getX()*1000;
160 | actual_y = v.getY()*1000;
161 | actual_z = v.getZ()*1000;
162 |
163 | if (abs(abs(actual_x)-abs(x)) >= 20 || abs(abs(actual_y)-abs(y)) >= 20 || abs(abs(actual_z)-abs(z)) >= 20)
164 | {
165 | //ROS_INFO("Delta X : [%0.3f] Delta Y : [%0.3f] Delta Y : [%0.3f]", abs(abs(actual_x)-abs(x)), abs(abs(actual_y)-abs(y)), abs(abs(actual_z)-abs(z)));
166 | std::stringstream cmd;
167 | ROS_INFO("Object move");
168 | cmd << "object_not_stay";
169 | msg.data = cmd.str();
170 | chatter_pub2.publish(msg);
171 | object_stay = 1;
172 | }
173 | else
174 | {
175 | object_stay = 0;
176 | //ROS_INFO("Delta X : [%0.3f] Delta Y : [%0.3f] Delta Y : [%0.3f]", abs(abs(actual_x)-abs(x)), abs(abs(actual_y)-abs(y)), abs(abs(actual_z)-abs(z)));
177 | }
178 | }
179 | else
180 | {
181 | std::stringstream cmd;
182 | ROS_INFO("Checking object again");
183 | cmd << "object_not_stay";
184 | msg.data = cmd.str();
185 | chatter_pub2.publish(msg);
186 |
187 | if (echoListener.tf.waitForTransform(source_frameid, target_frameid, ros::Time(), ros::Duration(1.5)) == 1)
188 | {
189 | object_stay = 1;
190 | }
191 | else
192 | {
193 | ROS_INFO("Object not found");
194 | object_stay = -1;
195 | }
196 | }
197 | return object_stay;
198 | }
199 |
200 | int checkpose_object_focus (int x, int y, int z, int object_id)
201 | {
202 | int object_stay;
203 | echoListener echoListener;
204 | std::string source_frameid = std::string("base_link");
205 | std::string target_frameid = std::string("object_1");
206 | std::string target_frameid2 = std::string("object_2");
207 | std::string target_frameid3 = std::string("object_3");
208 | std::string target_frameid4 = std::string("object_4");
209 | std::string target_frameid5 = std::string("object_5");
210 |
211 | std::string final_target;
212 |
213 | if (object_id == 1) {final_target = target_frameid;}
214 | if (object_id == 2) {final_target = target_frameid2;}
215 | if (object_id == 3) {final_target = target_frameid3;}
216 | if (object_id == 4) {final_target = target_frameid4;}
217 | if (object_id == 5) {final_target = target_frameid5;}
218 |
219 | ros::NodeHandle n;
220 | ros::Publisher chatter_pub2 = n.advertise("Cmd_Talker", 100);
221 | std_msgs::String msg;
222 |
223 | if (echoListener.tf.waitForTransform(source_frameid, final_target, ros::Time(), ros::Duration(0.3)) == 1)
224 | {
225 | tf::StampedTransform echo_transform;
226 | echoListener.tf.lookupTransform(source_frameid, final_target, ros::Time(), echo_transform);
227 | tf::Vector3 v = echo_transform.getOrigin();
228 | int actual_x,actual_y,actual_z ;
229 |
230 | actual_x = v.getX()*1000;
231 | actual_y = v.getY()*1000;
232 | actual_z = v.getZ()*1000;
233 |
234 | if (abs(abs(actual_x)-abs(x)) <= 20 || abs(abs(actual_y)-abs(y)) <= 20 || abs(abs(actual_z)-abs(z))<= 20)
235 | {
236 | object_stay = 0;
237 | }
238 | else
239 | {
240 | std::stringstream cmd;
241 | std::cout << "Object move" << std::endl;
242 | cmd << "object_not_stay";
243 | msg.data = cmd.str();
244 | chatter_pub2.publish(msg);
245 | object_stay = 1;
246 | }
247 | }
248 | else
249 | {
250 | std::stringstream cmd;
251 | std::cout << "Object move" << std::endl;
252 | cmd << "object_not_stay";
253 | msg.data = cmd.str();
254 | chatter_pub2.publish(msg);
255 |
256 | if (echoListener.tf.waitForTransform(source_frameid, target_frameid, ros::Time(), ros::Duration(1.0)) == 1)
257 | {
258 | object_stay = 1;
259 | }
260 | else
261 | {
262 | std::cout << "Object not found" << std::endl;
263 | object_stay = -1;
264 | }
265 | }
266 | return object_stay;
267 | }
268 |
269 | int picking_object (int object_id, int box_count)
270 | {
271 | ros::NodeHandle n;
272 | ros::Publisher chatter_pub1 = n.advertise("Object_Position", 100);
273 | ros::Publisher chatter_pub2 = n.advertise("Cmd_Talker", 100);
274 |
275 | echoListener echoListener;
276 | std::string source_frameid = std::string("base_link");
277 | std::string camera_frameid = std::string("camera_link");
278 | std::string target_frameid = std::string("object_1");
279 | std::string target_frameid2 = std::string("object_2");
280 | std::string target_frameid3 = std::string("object_3");
281 | std::string target_frameid4 = std::string("object_4");
282 | std::string target_frameid5 = std::string("object_5");
283 | std::string target_eef = std::string("Link6");
284 |
285 | std::string final_target;
286 |
287 | if (object_id == 1) {final_target = target_frameid;}
288 | if (object_id == 2) {final_target = target_frameid2;}
289 | if (object_id == 3) {final_target = target_frameid3;}
290 | if (object_id == 4) {final_target = target_frameid4;}
291 | if (object_id == 5) {final_target = target_frameid5;}
292 |
293 | CR5_Project::ObjectMsg msg;
294 | int carry = 0;
295 | int checking = 0;
296 | int place = 0;
297 | int box = box_count;
298 | int box_stack;
299 |
300 | while (place == 0)
301 | {
302 | if (carry == 0 && echoListener.tf.waitForTransform(source_frameid, final_target, ros::Time(), ros::Duration(0.2)) == 1)
303 | {
304 | tf::StampedTransform echo_transform;
305 | echoListener.tf.lookupTransform(source_frameid, final_target, ros::Time(), echo_transform);
306 | tf::Vector3 v = echo_transform.getOrigin();
307 |
308 | int goal_x = v.getX()*1000;
309 | int goal_y = v.getY()*1000;
310 | int goal_z = v.getZ()*1000;
311 | int goal_rx = 180;
312 | int goal_ry = 0;
313 | int goal_rz = 180;
314 |
315 | int goal_x_cal = goal_x;
316 | int goal_y_cal = goal_y + 50;
317 | int goal_z_cal = 390;
318 |
319 | msg.x = goal_x_cal;
320 | msg.y = goal_y_cal;
321 | msg.z = goal_z_cal;
322 | msg.rx = goal_rx;
323 | msg.ry = goal_ry;
324 | msg.rz = goal_rz;
325 |
326 | ROS_INFO("Moving to x:[%0.3f], y:[%0.3f], z:[%0.3f]", msg.x, msg.y, msg.z);
327 | chatter_pub1.publish(msg);
328 |
329 | while(checkpose_focus(goal_x_cal,goal_y_cal,goal_z_cal) == 0 && checking == 0)
330 | {
331 | int object_state = checkpose_object(goal_x,goal_y,goal_z,object_id);
332 |
333 | //object state 0 = object stay
334 | //object state 1 = object move
335 | //object state -1 = object lost
336 |
337 | if (object_state == 1)
338 | {
339 | checking = 1;
340 | sleep(1);
341 | }
342 | else if (object_state == 0)
343 | {
344 | ROS_INFO(" ~ ");
345 | //usleep(100000);
346 | }
347 | else if (object_state == -1)
348 | {
349 | ROS_INFO("Return to Home Pose ");
350 | std_msgs::String msg;
351 | std::stringstream cmd;
352 | cmd << "object_not_stay";
353 | msg.data = cmd.str();
354 | chatter_pub2.publish(msg);
355 |
356 | std::stringstream cmd2;
357 | cmd2 << "home";
358 | msg.data = cmd2.str();
359 | chatter_pub2.publish(msg);
360 | checkpose (143,-580,405);
361 | checking = 1;
362 | carry = 0;
363 | sleep(2);
364 | }
365 | }
366 |
367 | if (checkpose_focus(goal_x_cal,goal_y_cal,goal_z_cal) == 1)
368 | {
369 | carry = 1;
370 | usleep(100000);
371 | }
372 | checking = 0;
373 | }
374 |
375 | if (carry == 0 && echoListener.tf.waitForTransform(source_frameid, final_target, ros::Time(), ros::Duration(0.2)) == 0)
376 | {
377 | place = 1;
378 | box_stack == 0;
379 | }
380 |
381 | if (carry == 1)
382 | {
383 | tf::StampedTransform echo_transform;
384 | echoListener.tf.lookupTransform(source_frameid, final_target, ros::Time(), echo_transform);
385 | tf::Vector3 v = echo_transform.getOrigin();
386 |
387 | int goal_x = v.getX()*1000;
388 | int goal_y = v.getY()*1000;
389 | int goal_z = v.getZ()*1000;
390 | int goal_rx = 180;
391 | int goal_ry = 0;
392 | int goal_rz = 180;
393 |
394 | int goal_x_cal = goal_x;
395 | int goal_y_cal = goal_y + 50;
396 | int goal_z_cal = goal_z + 280;
397 |
398 | msg.x = goal_x_cal;
399 | msg.y = goal_y_cal;
400 | msg.z = goal_z_cal;
401 | msg.rx = goal_rx;
402 | msg.ry = goal_ry;
403 | msg.rz = goal_rz;
404 |
405 | ROS_INFO("Moving to x:[%0.3f], y:[%0.3f], z:[%0.3f]", msg.x, msg.y, msg.z);
406 | chatter_pub1.publish(msg);
407 |
408 | while(checkpose_focus(goal_x_cal,goal_y_cal,goal_z_cal) == 0 && checking == 0)
409 | {
410 | int object_state = checkpose_object(goal_x,goal_y,goal_z,object_id);
411 |
412 | //object state 0 = object stay
413 | //object state 1 = object move
414 | //object state -1 = object lost
415 |
416 | if (object_state == 1){checking = 1; sleep(1);}
417 | else if (object_state == 0){
418 | ROS_INFO(" ~ ");
419 | //usleep(100000);
420 | }
421 | else if (object_state == -1)
422 | {
423 | ROS_INFO("Return to Home Pose ");
424 | std_msgs::String msg;
425 | std::stringstream cmd;
426 | cmd << "object_not_stay";
427 | msg.data = cmd.str();
428 | chatter_pub2.publish(msg);
429 |
430 | std::stringstream cmd2;
431 | cmd2 << "home";
432 | msg.data = cmd2.str();
433 | chatter_pub2.publish(msg);
434 | checkpose (143,-580,405);
435 | checking = 1;
436 | carry = 0;
437 | sleep(2);
438 | }
439 | }
440 | if (checkpose_focus(goal_x_cal,goal_y_cal,goal_z_cal) == 1)
441 | {
442 | carry = 2;
443 | usleep(500000);
444 | }
445 | checking = 0;
446 | }
447 |
448 | if (carry == 2){
449 |
450 | tf::StampedTransform echo_transform;
451 | tf::StampedTransform echo_cam;
452 | echoListener.tf.lookupTransform(source_frameid, final_target, ros::Time(), echo_transform);
453 | echoListener.tf.lookupTransform(camera_frameid, final_target, ros::Time(), echo_cam);
454 | double yaw, pitch, roll;
455 | echo_cam.getBasis().getRPY(roll, pitch, yaw);
456 | tf::Vector3 v = echo_transform.getOrigin();
457 |
458 | int goal_x = v.getX()*1000;
459 | int goal_y = v.getY()*1000;
460 | int goal_z = v.getZ()*1000;
461 | int goal_roll = roll*180.0/M_PI;
462 | int goal_pitch = pitch*180.0/M_PI;
463 | int goal_yall = yaw*180.0/M_PI;
464 |
465 | int goal_x_cal = goal_x;
466 | int goal_y_cal = goal_y;
467 | int goal_z_cal = goal_z + 54;
468 | int goal_rx_cal = goal_pitch + 180;
469 | int goal_ry_cal = goal_yall - 180;
470 | int goal_rz_cal = goal_roll + 180;
471 |
472 | if (goal_z_cal < 15) {goal_z_cal = 15;}
473 | else if (goal_z_cal >= 15){goal_z_cal = goal_z_cal;}
474 |
475 | msg.x = goal_x_cal;
476 | msg.y = goal_y_cal;
477 | msg.z = goal_z_cal;
478 | msg.rx = goal_rx_cal;
479 | msg.ry = goal_ry_cal;
480 | msg.rz = goal_rz_cal;
481 |
482 | ROS_INFO("Picking Object at x:[%f], y:[%f], z:[%f], rx:[%f], ry:[%f], rz:[%f]", msg.x, msg.y, msg.z, msg.rx, msg.ry, msg.rz);
483 | chatter_pub1.publish(msg);
484 |
485 | usleep(1000000);
486 | std_msgs::String msg;
487 | std::stringstream cmd;
488 | cmd << "SetVac";
489 | msg.data = cmd.str();
490 | chatter_pub2.publish(msg);
491 | ROS_INFO("Picked object");
492 |
493 | checkpose(goal_x_cal,goal_y_cal,goal_z_cal);
494 |
495 | carry = 3;
496 | }
497 |
498 | if (carry == 3)
499 | {
500 | usleep(100000);
501 |
502 | carry = 4;
503 | }
504 |
505 | if (carry == 4)
506 | {
507 | std_msgs::String msg;
508 | std::stringstream cmd;
509 | cmd << "home";
510 | msg.data = cmd.str();
511 | chatter_pub2.publish(msg);
512 | checkpose(143,-580,405);
513 |
514 | carry = 5;
515 | }
516 |
517 | if (carry == 5)
518 | {
519 | int goal_x;
520 | int goal_y;
521 | int goal_z;
522 | int z_height;
523 | goal_x = -180;
524 |
525 | if (object_id == 1){goal_y = -710;}
526 | if (object_id == 2){goal_y = -710;}
527 | if (object_id == 3){goal_y = -540;}
528 | if (object_id == 4){goal_y = -540;}
529 | if (object_id == 5){goal_y = -540;}
530 |
531 | goal_z = -70 + (box);
532 | if (goal_z < 50){goal_z = 50;}
533 | else if (goal_z >= 50){goal_z = goal_z;}
534 |
535 | int goal_x_cal = goal_x;
536 | int goal_y_cal = goal_y;
537 | int goal_z_cal = goal_z;
538 | int goal_rx_cal = 180;
539 | int goal_ry_cal = 0;
540 | int goal_rz_cal = 180;
541 |
542 | msg.x = goal_x_cal;
543 | msg.y = goal_y_cal;
544 | msg.z = goal_z_cal;
545 | msg.rx = goal_rx_cal;
546 | msg.ry = goal_ry_cal;
547 | msg.rz = goal_rz_cal;
548 |
549 | ROS_INFO("Placing Object at x:[%f], y:[%f], z:[%f], rx:[%f], ry:[%f], rz:[%f]", msg.x, msg.y, msg.z, msg.rx, msg.ry, msg.rz);
550 | chatter_pub1.publish(msg);
551 | checkpose(goal_x_cal,goal_y_cal,goal_z_cal);
552 |
553 | std_msgs::String msg;
554 | std::stringstream cmd;
555 | cmd << "ResetVac";
556 | msg.data = cmd.str();
557 | chatter_pub2.publish(msg);
558 | ROS_INFO("Placed object");
559 | usleep(200000);
560 |
561 | carry = 6;
562 | }
563 |
564 | if (carry == 6)
565 | {
566 | int goal_x;
567 | int goal_y;
568 | goal_x = -180;
569 |
570 | if (object_id == 1){goal_y = -710;}
571 | if (object_id == 2){goal_y = -710;}
572 | if (object_id == 3){goal_y = -540;}
573 | if (object_id == 4){goal_y = -540;}
574 | if (object_id == 5){goal_y = -540;}
575 |
576 | int goal_x_cal = goal_x;
577 | int goal_y_cal = goal_y;
578 | int goal_z_cal = 200;
579 | int goal_rx_cal = 180;
580 | int goal_ry_cal = 0;
581 | int goal_rz_cal = 180;
582 |
583 | msg.x = goal_x_cal;
584 | msg.y = goal_y_cal;
585 | msg.z = goal_z_cal;
586 | msg.rx = goal_rx_cal;
587 | msg.ry = goal_ry_cal;
588 | msg.rz = goal_rz_cal;
589 |
590 | chatter_pub1.publish(msg);
591 | checkpose(goal_x_cal,goal_y_cal,goal_z_cal);
592 | usleep(200000);
593 |
594 | carry = 7;
595 | }
596 |
597 | if (carry == 7)
598 | {
599 | ROS_INFO("Go Home");
600 |
601 | std_msgs::String msg;
602 | std::stringstream cmd;
603 | cmd << "home";
604 | msg.data = cmd.str();
605 | chatter_pub2.publish(msg);
606 | checkpose(143,-580,405);
607 | usleep(200000);
608 |
609 | carry = 0;
610 | place = 1;
611 | box_stack = box;
612 | }
613 | }
614 |
615 | ROS_INFO("Picking done");
616 | return box_stack;
617 | }
618 |
619 | int main(int argc, char **argv)
620 | {
621 | ros::init(argc, argv, "talker");
622 |
623 | ros::NodeHandle n;
624 |
625 | ros::Publisher chatter_pub1 = n.advertise("Object_Position", 100);
626 | ros::Publisher chatter_pub2 = n.advertise("Cmd_Talker", 100);
627 |
628 | ros::Rate loop_rate(10);
629 |
630 | float position_x,position_y,position_z;
631 |
632 | // Starting
633 | std::cout << "Started rosrun CR5_Project main_control";
634 | std::cout << "\n \nSUMMARY";
635 | std::cout << "\n========";
636 | std::cout << "\n \nThis program use for controlling CR5 Robot, still in development stage.";
637 | std::cout << "\nCan also work with Realsense camera D435i.";
638 | std::cout << "\n \nCOMMANDS INPUT";
639 | std::cout << "\n========";
640 | std::cout << "\n* Home";
641 | std::cout << "\n* Pose";
642 | std::cout << "\n* EnableRobot";
643 | std::cout << "\n* DisableRobot";
644 | std::cout << "\n* ClearError";
645 | std::cout << "\n* Point0";
646 | std::cout << "\n* Point1";
647 | std::cout << "\n* Picking";
648 | std::cout << "\n* PickMe";
649 |
650 | //Instantiate a local listener
651 | echoListener echoListener;
652 |
653 | std::cout << "\n \nInitializing echoListenter for Transform (tf).\n \n";
654 |
655 | std::string source_frameid = std::string("base_link");
656 | std::string camera_frameid = std::string("camera_link");
657 | std::string target_frameid = std::string("object_1");
658 | std::string target_frameid2 = std::string("object_2");
659 | std::string target_frameid3 = std::string("object_3");
660 | std::string target_frameid4 = std::string("object_4");
661 | std::string target_frameid5 = std::string("object_5");
662 | std::string target_eef = std::string("Link6");
663 |
664 | ROS_INFO("source_frameid = \"base_link\"");
665 | ROS_INFO("target_eef = \"Link6\"");
666 | ROS_INFO("camera_frameid = \"camera_link\"");
667 | ROS_INFO("target_framid = \"object_1\"");
668 | ROS_INFO("target_framid2 = \"object_2\"");
669 | ROS_INFO("target_framid3 = \"object_3\"");
670 | ROS_INFO("target_framid4 = \"object_4\"");
671 | ROS_INFO("target_framid5 = \"object_5\"\n");
672 | ROS_INFO("Initializing new console terminal for using ros service.");
673 | ROS_INFO("Enable Robot");
674 |
675 | while (ros::ok())
676 | {
677 | ros::Rate rate(1);
678 |
679 | //----------Simple command-------------//
680 |
681 | CR5_Project::ObjectMsg msg;
682 | std::string cmd;
683 | std::cout << "\nUser input command: ";
684 | std::cin >> cmd;
685 |
686 | if (cmd == "Pose"){
687 | std::cout << "Input position to publish" << std::endl << "position.x :";
688 | std::cin >> position_x;
689 | std::cout << "position.y :";
690 | std::cin >> position_y;
691 | std::cout << "position.z :";
692 | std::cin >> position_z;
693 |
694 | msg.x = position_x;
695 | msg.y = position_y;
696 | msg.z = position_z;
697 | msg.rx = 180;
698 | msg.ry = 0;
699 | msg.rz = 180;
700 |
701 | chatter_pub1.publish(msg);
702 | }
703 | else if (cmd == "Home"){
704 | std_msgs::String msg;
705 | std::stringstream cmd;
706 | cmd << "home";
707 | msg.data = cmd.str();
708 | chatter_pub2.publish(msg);
709 | ROS_INFO("Moving to Home Pose");
710 | }
711 | else if (cmd == "Sleep"){
712 | std_msgs::String msg;
713 | std::stringstream cmd;
714 | cmd << "sleep";
715 | msg.data = cmd.str();
716 | chatter_pub2.publish(msg);
717 | ROS_INFO("Moving to Sleep Pose");
718 | }
719 | else if (cmd == "ClearError"){
720 | std_msgs::String msg;
721 | std::stringstream cmd;
722 | cmd << "ClearError";
723 | msg.data = cmd.str();
724 | chatter_pub2.publish(msg);
725 | ROS_INFO("Published Clear Error");
726 | }
727 | else if (cmd == "DisableRobot"){
728 | std_msgs::String msg;
729 | std::stringstream cmd;
730 | cmd << "DisableRobot";
731 | msg.data = cmd.str();
732 | chatter_pub2.publish(msg);
733 | ROS_INFO("Published Disable Robot");
734 | }
735 | else if (cmd == "EnableRobot")
736 | {
737 | std_msgs::String msg;
738 | std::stringstream cmd;
739 | cmd << "EnableRobot";
740 | msg.data = cmd.str();
741 | chatter_pub2.publish(msg);
742 | ROS_INFO("Published Enable Robot");
743 | }
744 | else if (cmd == "Point0")
745 | {
746 | std_msgs::String msg;
747 | std::stringstream cmd;
748 | cmd << "Point0";
749 | msg.data = cmd.str();
750 | chatter_pub2.publish(msg);
751 | ROS_INFO("Moving to Point 0");
752 | }
753 | else if (cmd == "Point1")
754 | {
755 | std_msgs::String msg;
756 | std::stringstream cmd;
757 | cmd << "Point1";
758 | msg.data = cmd.str();
759 | chatter_pub2.publish(msg);
760 | ROS_INFO("Moving to Point 1");
761 | }
762 | else if (cmd == "Point1_R")
763 | {
764 | std_msgs::String msg;
765 | std::stringstream cmd;
766 | cmd << "Point1_R";
767 | msg.data = cmd.str();
768 | chatter_pub2.publish(msg);
769 | ROS_INFO("Moving to Point 1_R");
770 | }
771 | else if (cmd == "Point2")
772 | {
773 | std_msgs::String msg;
774 | std::stringstream cmd;
775 | cmd << "Point2";
776 | msg.data = cmd.str();
777 | chatter_pub2.publish(msg);
778 | ROS_INFO("Moving to Point 2");
779 | }
780 | else if (cmd == "SetVac")
781 | {
782 | std_msgs::String msg;
783 | std::stringstream cmd;
784 | cmd << "SetVac";
785 | msg.data = cmd.str();
786 | chatter_pub2.publish(msg);
787 | ROS_INFO("Set Robot Tool0 (Vacuum)");
788 | }
789 | else if (cmd == "ResetVac")
790 | {
791 | std_msgs::String msg;
792 | std::stringstream cmd;
793 | cmd << "ResetVac";
794 | msg.data = cmd.str();
795 | chatter_pub2.publish(msg);
796 | ROS_INFO("Reset Robot Tool0 (Vacuum)");
797 | }
798 | else if (cmd == "Picking")
799 | {
800 | if (echoListener.tf.waitForTransform(source_frameid, target_frameid, ros::Time(), ros::Duration(0.2)) == 1)
801 | {
802 | picking_object(1,1);
803 | }
804 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid2, ros::Time(), ros::Duration(0.2)) == 1)
805 | {
806 | picking_object(2,1);
807 | }
808 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid3, ros::Time(), ros::Duration(0.2)) == 1)
809 | {
810 | picking_object(3,1);
811 | }
812 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid4, ros::Time(), ros::Duration(0.2)) == 1)
813 | {
814 | picking_object(4,1);
815 | }
816 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid5, ros::Time(), ros::Duration(0.2)) == 1)
817 | {
818 | picking_object(5,1);
819 | }
820 | }
821 | else if (cmd == "PickMe")
822 | {
823 | int box1 = 0;
824 | int box2 = 0;
825 | int box3 = 0;
826 | int box4 = 0;
827 | int box5 = 0;
828 | int box_stack;
829 |
830 | while (ros::ok)
831 | {
832 | if (echoListener.tf.waitForTransform(source_frameid, target_frameid, ros::Time(), ros::Duration(0.2)) == 1)
833 | {
834 | box1 += 25;
835 | box_stack = picking_object(1, box1);
836 | box1 = box1 - 25 + box_stack;
837 | }
838 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid2, ros::Time(), ros::Duration(0.2)) == 1)
839 | {
840 | box1 += 25;
841 | box_stack = picking_object(2, box1);
842 | box1 = box1 - 25 + box_stack;
843 | }
844 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid3, ros::Time(), ros::Duration(0.2)) == 1)
845 | {
846 | box2 += 30;
847 | box_stack = picking_object(3, box2);
848 | box2 = box2 - 30 + box_stack;
849 | }
850 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid4, ros::Time(), ros::Duration(0.2)) == 1)
851 | {
852 | box2 += 25;
853 | box_stack = picking_object(4, box2);
854 | box2 = box2 - 25 + box_stack;
855 | }
856 | else if (echoListener.tf.waitForTransform(source_frameid, target_frameid5, ros::Time(), ros::Duration(0.2)) == 1)
857 | {
858 | box2 += 50;
859 | box_stack = picking_object(5, box2);
860 | box2 = box2 - 50 + box_stack;
861 | }
862 | }
863 | }
864 |
865 | else {std::cout << "\n----- command not found ----- \n";}
866 | }
867 |
868 | ros::spinOnce();
869 | loop_rate.sleep();
870 | return 0;
871 | }
872 |
--------------------------------------------------------------------------------
/src/service_call.cpp:
--------------------------------------------------------------------------------
1 | #include "ros/ros.h"
2 | #include "std_msgs/String.h"
3 | #include "CR5_Project/ObjectMsg.h"
4 | #include
5 | #include
6 |
7 | void chatterCallback(const CR5_Project::ObjectMsg::ConstPtr& msg)
8 | {
9 | ROS_INFO("Recieved Position :\nx: [%0.3f]\ny: [%0.3f]\nz: [%0.3f]\nrx: [%0.1f]\nry: [%0.1f]\nrz: [%0.1f]", msg->x, msg->y, msg->z, msg->rx, msg->ry, msg->rz);
10 | char cmd[200];
11 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: %0.3f, y: %0.3f, z: %0.3f, a: %0.3f, b: %0.3f, c: %0.3f}\"'", msg->x, msg->y, msg->z, msg->rx, msg->ry, msg->rz);
12 | system(cmd);
13 | }
14 |
15 | void cmdCallback(const std_msgs::String::ConstPtr& msg)
16 | {
17 | std::string cmd;
18 | cmd = msg->data.c_str();
19 | /* if (cmd == "home"){
20 | char cmd[200];
21 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/JointMovJ \"{j1: 90.0, j2: 15.0, j3: 83.0, j4: -8.0, j5: -89.0, j6: 0.0}\"'");
22 | system(cmd);
23 | }*/
24 | if (cmd == "home"){
25 | char cmd[200];
26 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 143.0, y: -580.0, z: 450.0, a: 180.0, b: 0.0, c: 180.0}\"'");
27 | system(cmd);
28 | }
29 | if (cmd == "sleep"){
30 | char cmd[200];
31 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/JointMovJ \"{j1: 115.0, j2: -30.0, j3: 135.0, j4: -15.0, j5: -90.0, j6: 0.0}\"'");
32 | system(cmd);
33 | }
34 | if (cmd == "ClearError"){
35 | char cmd[200];
36 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/ClearError'");
37 | system(cmd);
38 | }
39 | if (cmd == "DisableRobot"){
40 | char cmd[200];
41 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/DisableRobot'");
42 | system(cmd);
43 | }
44 | if (cmd == "EnableRobot"){
45 | char cmd[200];
46 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/EnableRobot'");
47 | system(cmd);
48 | }
49 | if (cmd == "object_not_stay"){
50 | char cmd_stop[200];
51 | sprintf(cmd_stop, "gnome-terminal --tab -e 'rosservice call /dobot_bringup/srv/StopScript'");
52 | system(cmd_stop);
53 | sprintf(cmd_stop, "gnome-terminal --tab -e 'rosservice call /dobot_bringup/srv/EnableRobot'");
54 | system(cmd_stop);
55 | }
56 | if (cmd == "Point0"){
57 | char cmd[200];
58 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/JointMovJ \"{j1:0.0, j2:0.0, j3:0.0, j4:0.0, j5:0.0, j6:0.0}\"'");
59 | system(cmd);
60 | }
61 | if (cmd == "Point1"){
62 | char cmd[200];
63 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 143.0, y: -600.0, z: 170.0, a: 180.0, b: 0.0, c: 180.0}\"'");
64 | system(cmd);
65 | }
66 | if (cmd == "Point2"){
67 | char cmd[200];
68 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 143.0, y: -600.0, z: 200.0, a: 180.0, b: 0.0, c: 180.0}\"'");
69 | system(cmd);
70 | }
71 | if (cmd == "Point3"){
72 | char cmd[200];
73 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 20.0, y: -550.0, z: 260.0, a: 180.0, b: 0.0, c: 180.0}\"'");
74 | system(cmd);
75 | }
76 | if (cmd == "Point5"){
77 | char cmd[200];
78 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: -210.0, y: -580.0, z: 150.0, a: 180.0, b: 0.0, c: 180.0}\"'");
79 | system(cmd);
80 | }
81 | if (cmd == "Speed100"){
82 | char cmd_spdJ[200];
83 | sprintf(cmd_spdJ, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/SpeedJ \"r: 100 \"'");
84 | system(cmd_spdJ);
85 | }
86 | if (cmd == "Speed50"){
87 | char cmd_spdJ[200];
88 | sprintf(cmd_spdJ, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/SpeedJ \"r: 50 \"'");
89 | system(cmd_spdJ);
90 | }
91 | if (cmd == "Acc100"){
92 | char cmd_spdJ[200];
93 | sprintf(cmd_spdJ, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/AccJ \"r: 100 \"'");
94 | system(cmd_spdJ);
95 | }
96 | if (cmd == "inventory"){
97 | char cmd[200];
98 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/MovJ \"{x: 264.0, y: -310.0, z: 96.0, a: 180.0, b: 0.0, c: 180.0}\"'");
99 | system(cmd);
100 | }
101 | if (cmd == "inventory2"){
102 | char cmd[200];
103 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call bringup/srv/MovJ \"{x: 0.0, y: -285.0, z: 385.0, a: 0.0, b: 0.0, c: 0.0}\"'");
104 | system(cmd);
105 | char cmd2[200];
106 | sprintf(cmd2, "gnome-terminal --tab -e 'rosservice call bringup/srv/MovJ \"{x: 0.0, y: -375.0, z: 330.0, a: -90.0, b: 0.0, c: 0.0}\"'");
107 | system(cmd2);
108 | }
109 | if (cmd == "SetVac"){
110 | char cmd_magic[200];
111 | sprintf(cmd_magic, "gnome-terminal --tab -e 'rosservice call /DobotServer/SetEndEffectorSuctionCup \"enableCtrl: 1 \nsuck: 1 \nisQueued: false\"'");
112 | system(cmd_magic);
113 | }
114 | if (cmd == "ResetVac"){
115 | char cmd_magic[200];
116 | sprintf(cmd_magic, "gnome-terminal --tab -e 'rosservice call /DobotServer/SetEndEffectorSuctionCup \"enableCtrl: 1 \nsuck: 0 \nisQueued: false\"'");
117 | system(cmd_magic);
118 | }
119 |
120 | else {
121 | char cmd[200];
122 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call bringup/srv/JointMovJ \"{j1: 0.0, j2: 0.0, j3: 0.0, j4: 0.0, j5: 0.0, j6: 0.0}\"'");
123 | system(cmd);
124 | }
125 | }
126 |
127 | int main(int argc, char **argv)
128 | {
129 | ros::init(argc, argv, "listener");
130 |
131 | ros::NodeHandle n;
132 |
133 | ros::Subscriber sub1 = n.subscribe("Object_Position", 10, chatterCallback);
134 | ros::Subscriber sub1_order = n.subscribe("main/target_order", 10, chatterCallback);
135 | ros::Subscriber sub2 = n.subscribe("Cmd_Talker", 10, cmdCallback);
136 | ros::Subscriber sub2_order = n.subscribe("main/cmd_talker", 10, cmdCallback);
137 |
138 | // Starting
139 | std::cout << "Started rosrun CR5_Project service_call";
140 | std::cout << "\n \nSUMMARY";
141 | std::cout << "\n========";
142 | std::cout << "\n \nThis program is for using ros service to CR5 Robot. \nStill in development stage.";
143 | std::cout << "\n \nCOMMANDS INPUT";
144 | std::cout << "\n========";
145 |
146 | std::cout << "\nInitializing listener node for subscribe commands.";
147 |
148 | char newtmn[200];
149 | sprintf(newtmn, "gnome-terminal --geometry=90x24+0+0 --window \ --working-directory=/depot --title='Main Control Terminal' --command=\"rosrun CR5_Project node_order.py\"");
150 | system(newtmn);
151 |
152 | char cmd[200];
153 | sprintf(cmd, "gnome-terminal --tab -e 'rosservice call dobot_bringup/srv/EnableRobot'");
154 | system(cmd);
155 |
156 | ros::spin();
157 |
158 | return 0;
159 | }
--------------------------------------------------------------------------------
/src/yolo_order.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import numpy as np
3 | import cv2
4 | import torch
5 | import tf
6 | from sensor_msgs.msg import Image
7 | from cv_bridge import CvBridge
8 | from std_msgs.msg import String
9 |
10 | bridge = CvBridge()
11 |
12 | model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best.pt')
13 | model.conf = 0.6
14 |
15 | #model = torch.hub.load('ultralytics/yolov5', 'custom', path='/home/rom/catkin_ws/src/CR5_Project/weights/best.pt')
16 | #model.conf = 0.7
17 |
18 | # Initialize the tf broadcaster
19 | tf_broadcaster = tf.TransformBroadcaster()
20 |
21 | def image_callback(msg):
22 | global start_detect
23 |
24 | #color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
25 | #cv2.imshow('depth_image', color_image)
26 | #key = cv2.waitKey(1)
27 |
28 | if start_detect == 1 :
29 | print("run detect loop")
30 | start_detect = 0
31 |
32 | # Convert ROS message to OpenCV image
33 | color_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
34 |
35 | # Run object detection on color_image
36 | results = model(color_image)
37 | boxs = results.pandas().xyxy[0].values
38 | dectshow(color_image, boxs)
39 | i = 0
40 | z_compare = 1.0
41 | # Get the object pose from the first box
42 | if len(boxs) > 0:
43 | for box in boxs:
44 | print(boxs)
45 | x1, y1, x2, y2 = map(int, box[:4])
46 | img = color_image.copy()
47 | height, width = img.shape[:2]
48 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
49 |
50 | center_x_new = x1 + (x2 - x1) // 2 - center_x
51 | center_y_new = center_y - y1 - (y2 - y1) // 2
52 |
53 | obj_center_old_x = x1 + (x2 - x1) // 2
54 | obj_center_old_y = y1 + (y2 - y1) // 2
55 | obj_center_old = [obj_center_old_x, obj_center_old_y]
56 | obj_depth = get_mid_pos(obj_center_old)
57 |
58 | # Convert pixel coordinates to meters
59 | W = 48 # object width in centimeters
60 | H = 36 # object height in centimeters
61 | width = color_image.shape[1] # image width in pixels
62 | height = color_image.shape[0] # image height in pixels
63 | x_m = (center_x_new * (W / width))/100
64 | y_m = (center_y_new * (H / height))/100
65 | z_m = obj_depth/1000
66 |
67 |
68 | object_frame = "Yolo_Object "
69 | object_frame = object_frame + f"{i}"
70 | object_frame = "obj_arduino"
71 | # Publish the transform
72 | if z_compare > z_m :
73 | z_compare = z_m
74 | tf_broadcaster.sendTransform((x_m, -y_m, z_m), tf.transformations.quaternion_from_euler(0, 0, 0), rospy.Time.now(), object_frame, "camera_color_optical_frame")
75 | i += 1
76 | else :
77 | pass
78 |
79 |
80 | def dectshow(org_img, boxs):
81 | img = org_img.copy()
82 | height, width = img.shape[:2]
83 | center_x, center_y = width // 2, height // 2 # Calculate center point of image
84 |
85 | for box in boxs:
86 | x1, y1, x2, y2 = map(int, box[:4])
87 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
88 |
89 | # Calculate coordinates of center point of box relative to the new origin
90 | center_x_new = x1 + (x2 - x1) // 2 - center_x
91 | center_y_new = center_y - y1 - (y2 - y1) // 2
92 |
93 | obj_center_old_x = x1 + (x2 - x1) // 2
94 | obj_center_old_y = y1 + (y2 - y1) // 2
95 | obj_center_old = [obj_center_old_x, obj_center_old_y]
96 | obj_depth = get_mid_pos(obj_center_old)
97 |
98 | label = f"{box[-1]}"
99 | label = label + f" x: {center_x_new}, y: {center_y_new}, z: {obj_depth}"
100 | font = cv2.FONT_HERSHEY_SIMPLEX
101 | font_scale = 0.5
102 | thickness = 1
103 | color = (255, 255, 255)
104 |
105 | # Show coordinates of center point relative to the new origin
106 | cv2.putText(img, label, (x1+2, y1-7), font, font_scale, color, thickness)
107 |
108 | ros_image = bridge.cv2_to_imgmsg(img, encoding="rgb8") # Convert OpenCV image to ROS Image message
109 | pub_img.publish(ros_image) # Publish ROS Image message to topic
110 | #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
111 | #cv2.imshow('CR5_Realsense (yolov5_detect)', img)
112 |
113 | def get_mid_pos(center):
114 | global depth_global
115 | print(center)
116 | depth_data = depth_global
117 | width = 410
118 | heigh = 300
119 | percent_w = width/640
120 | percent_h = heigh/480
121 | radius = 5
122 |
123 | #print(percent_w,percent_w)
124 |
125 | X = center[0] * percent_w
126 | Y = center[1] * percent_h
127 |
128 | distance_list = []
129 |
130 | dist = depth_data[int(Y), int(X)]
131 | print(int(X),int(Y))
132 |
133 | for i in range(int(Y)-radius, int(Y)+radius+1):
134 | for j in range(int(X)-radius, int(X)+radius+1):
135 | dist = depth_data[i, j]
136 | if dist:
137 | distance_list.append(dist)
138 |
139 | distance_list = np.array(distance_list)
140 | mean_dist = np.mean(distance_list)
141 |
142 | return int(mean_dist)
143 |
144 | def depth_callback(msg):
145 | global depth_global
146 | depth_image = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
147 | # Define crop parameters
148 | crop_left = 100
149 | crop_right = 130
150 | crop_top = 90
151 | crop_bottom = 90
152 |
153 | # Crop depth image
154 | depth_cropped = depth_image[crop_top:-crop_bottom, crop_left:-crop_right]
155 | #depth_cropped = depth_image
156 |
157 | depth_global = depth_cropped
158 | depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_cropped, alpha=0.30), cv2.COLORMAP_JET)
159 |
160 | depth_colormap = cv2.cvtColor(depth_colormap, cv2.COLOR_BGR2RGB)
161 | ros_image = bridge.cv2_to_imgmsg(depth_colormap, encoding="rgb8") # Convert OpenCV image to ROS Image message
162 | pub_depth_img.publish(ros_image) # Publish ROS Image message to topic
163 | #cv2.imshow('depth_image', depth_colormap)
164 | #key = cv2.waitKey(1)
165 |
166 | def detect_order(msg):
167 | global start_detect
168 | start_detect = 1
169 |
170 | rospy.loginfo("I heard %s", msg.data)
171 |
172 | if __name__ == "__main__":
173 |
174 | global start_detect
175 | start_detect = 0
176 | print("Ready to detect")
177 | rospy.init_node('yolo_listener', anonymous=True)
178 | # Publish to rostopic
179 | pub_img = rospy.Publisher("/main/yolo_detect_image", Image, queue_size=10)
180 | pub_depth_img = rospy.Publisher("/main/color_depth_image", Image, queue_size=10)
181 |
182 | # Subscribe to rostopic
183 | rospy.Subscriber("/main/yolo_order", String, detect_order)
184 | rospy.Subscriber("/camera/color/image_raw", Image, image_callback)
185 | rospy.Subscriber("/camera/depth/image_rect_raw", Image, depth_callback)
186 |
187 | rospy.spin()
--------------------------------------------------------------------------------