├── .gitignore
├── LICENSE
├── README.md
├── drv_action
├── CMakeLists.txt
├── package.xml
└── src
│ └── drv_action.cpp
├── drv_brain
├── CMakeLists.txt
├── launch
│ ├── drv_gpd_port.launch
│ ├── drv_v2_host.launch
│ ├── drv_v2_single_cpu.launch
│ ├── drv_v2_single_gpu.launch
│ ├── drv_v2_workstation.launch
│ └── includes
│ │ ├── drv_v2_host.launch.xml
│ │ └── drv_v2_workstation.launch.xml
├── package.xml
└── src
│ ├── drv_brain.cpp
│ ├── facelistener.cpp
│ ├── facelistener.h
│ ├── targetlistener.cpp
│ ├── targetlistener.h
│ ├── targetselecter.cpp
│ └── targetselecter.h
├── drv_face_client
├── CMakeLists.txt
├── package.xml
└── src
│ ├── drv_face_client.cpp
│ ├── facedetector.cpp
│ └── facedetector.h
├── drv_face_service
├── CMakeLists.txt
├── package.xml
├── scripts
│ ├── drv_face_service.py
│ └── process.py
└── src
│ ├── classifier.cpp
│ ├── classifier.h
│ ├── drv_face_service.cpp
│ ├── processface.cpp
│ └── processface.h
├── drv_face_train
├── CMakeLists.txt
├── package.xml
├── scripts
│ ├── face_train_server.py
│ └── process.py
└── src
│ ├── drv_face_train.cpp
│ ├── facedetector.cpp
│ └── facedetector.h
├── drv_grasp
├── CMakeLists.txt
├── package.xml
└── src
│ ├── drv_frame_service.cpp
│ ├── drv_grasp.cpp
│ ├── drv_put.cpp
│ ├── fetchrgbd.cpp
│ ├── fetchrgbd.h
│ ├── getsourcecloud.cpp
│ ├── getsourcecloud.h
│ ├── makeplan.cpp
│ ├── makeplan.h
│ ├── obstacledetect.cpp
│ ├── obstacledetect.h
│ ├── transform.cpp
│ ├── transform.h
│ ├── utilities.cpp
│ └── utilities.h
├── drv_motor
├── CMakeLists.txt
├── package.xml
└── src
│ ├── drv_motor.cpp
│ ├── getpose.cpp
│ └── getpose.h
├── drv_msgs
├── CMakeLists.txt
├── action
│ └── Vision.action
├── msg
│ ├── recognized_faces.msg
│ ├── recognized_objects.msg
│ ├── recognized_target.msg
│ └── target_info.msg
├── package.xml
└── srv
│ ├── face_recognize.srv
│ ├── face_train.srv
│ ├── frame_transform.srv
│ ├── recognize.srv
│ ├── refine_depth.srv
│ ├── servo.srv
│ └── user_select.srv
├── drv_pointcloud
├── CMakeLists.txt
├── cfg
│ └── pointcloud.cfg
├── go_to_goal.py
├── package.xml
├── src
│ ├── drv_pointcloud.cpp
│ ├── getsourcecloud.cpp
│ └── getsourcecloud.h
└── test_publisher.py
├── drv_recognize
├── CMakeLists.txt
├── cfg
│ └── recog.cfg
├── package.xml
├── scripts
│ ├── drv_object_detection.py
│ ├── drv_recognize.py
│ ├── drv_recognize_client.py
│ └── process.py
└── src
│ ├── detectcolor.cpp
│ ├── detectcolor.h
│ ├── drv_recognize_color.cpp
│ ├── drv_recognize_hand.cpp
│ ├── drv_refine_depth.cpp
│ ├── processdepth.cpp
│ ├── processdepth.h
│ ├── refinedepth.cpp
│ ├── refinedepth.h
│ ├── transform.cpp
│ └── transform.h
├── drv_search
├── CMakeLists.txt
├── package.xml
└── src
│ ├── drv_search.cpp
│ ├── search.cpp
│ ├── search.h
│ ├── segment.cpp
│ ├── segment.h
│ ├── smoothservo.cpp
│ ├── smoothservo.h
│ ├── targetselect.cpp
│ ├── targetselect.h
│ ├── utilities.cpp
│ └── utilities.h
├── drv_tf
├── CMakeLists.txt
├── cfg
│ └── tf.cfg
├── package.xml
└── src
│ ├── drv_tf.cpp
│ ├── movemean.cpp
│ └── movemean.h
├── drv_track
├── CMakeLists.txt
├── cmake
│ └── Modules
│ │ ├── FindCaffe.cmake
│ │ └── FindTinyXML.cmake
├── package.xml
└── src
│ ├── drv_track.cpp
│ ├── goturn.cpp
│ ├── goturn.h
│ ├── helper
│ ├── bounding_box.cpp
│ ├── bounding_box.h
│ ├── helper.cpp
│ ├── helper.h
│ ├── high_res_timer.cpp
│ ├── high_res_timer.h
│ ├── image_proc.cpp
│ └── image_proc.h
│ ├── loader
│ ├── loader_alov.cpp
│ ├── loader_alov.h
│ ├── loader_imagenet_det.cpp
│ ├── loader_imagenet_det.h
│ ├── loader_vot.cpp
│ ├── loader_vot.h
│ ├── video.cpp
│ ├── video.h
│ ├── video_loader.cpp
│ └── video_loader.h
│ ├── native
│ ├── mex
│ │ └── matlab_static.m
│ ├── ncc.cpp
│ ├── static.c
│ ├── static.cpp
│ ├── trax.h
│ ├── traxclient
│ ├── traxdummy
│ ├── vot.cpp
│ └── vot.h
│ ├── network
│ ├── regressor.cpp
│ ├── regressor.h
│ ├── regressor_base.cpp
│ ├── regressor_base.h
│ ├── regressor_train.cpp
│ ├── regressor_train.h
│ ├── regressor_train_base.cpp
│ └── regressor_train_base.h
│ ├── test
│ ├── test_tracker_alov.cpp
│ ├── test_tracker_vot.cpp
│ └── track.cpp
│ ├── tracker
│ ├── tracker.cpp
│ ├── tracker.h
│ ├── tracker_manager.cpp
│ └── tracker_manager.h
│ ├── train
│ ├── example_generator.cpp
│ ├── example_generator.h
│ ├── pretrain.cpp
│ ├── tracker_trainer.cpp
│ ├── tracker_trainer.h
│ └── train.cpp
│ ├── utilities.cpp
│ ├── utilities.h
│ └── visualizer
│ ├── show_alov.cpp
│ ├── show_imagenet.cpp
│ ├── show_tracker_alov.cpp
│ └── show_tracker_vot.cpp
├── drv_track_kcf
├── CMakeLists.txt
├── LICENSE
├── package.xml
└── src
│ ├── drv_track_kcf.cpp
│ ├── ffttools.hpp
│ ├── fhog.cpp
│ ├── fhog.hpp
│ ├── kcftracker.cpp
│ ├── kcftracker.hpp
│ ├── labdata.hpp
│ ├── recttools.hpp
│ ├── tracker.h
│ ├── utilities.cpp
│ └── utilities.h
├── drv_user
├── CMakeLists.txt
├── package.xml
└── src
│ └── drv_user.py
└── supplements
├── arduino_control
├── Arduino_vision.rules
├── JY901
│ ├── JY901.cpp
│ ├── JY901.h
│ ├── examples
│ │ ├── JY901IIC
│ │ │ └── JY901IIC.ino
│ │ └── JY901Serial
│ │ │ └── JY901Serial.ino
│ └── keywords.txt
├── arduino_control.ino
└── arduino_control_v2.ino
├── camera_info
├── depth_Astra_Orbbec.yaml
├── depth_PS1080_PrimeSense.yaml
├── rgb_Astra_Orbbec.yaml
└── rgb_PS1080_PrimeSense.yaml
├── face_recognize
├── face_deploy.prototxt
├── face_meta.prototxt
├── haarcascade_eye_tree_eyeglasses.xml
├── haarcascade_frontalface_alt.xml
└── images
│ └── 0
│ ├── 0.jpg
│ ├── 1.jpg
│ ├── 10.jpg
│ ├── 11.jpg
│ ├── 12.jpg
│ ├── 13.jpg
│ ├── 14.jpg
│ ├── 15.jpg
│ ├── 16.jpg
│ ├── 17.jpg
│ ├── 18.jpg
│ ├── 19.jpg
│ ├── 2.jpg
│ ├── 20.jpg
│ ├── 21.jpg
│ ├── 22.jpg
│ ├── 23.jpg
│ ├── 24.jpg
│ ├── 25.jpg
│ ├── 26.jpg
│ ├── 27.jpg
│ ├── 28.jpg
│ ├── 29.jpg
│ ├── 3.jpg
│ ├── 30.jpg
│ ├── 31.jpg
│ ├── 32.jpg
│ ├── 33.jpg
│ ├── 34.jpg
│ ├── 35.jpg
│ ├── 36.jpg
│ ├── 37.jpg
│ ├── 38.jpg
│ ├── 39.jpg
│ ├── 4.jpg
│ ├── 40.jpg
│ ├── 41.jpg
│ ├── 42.jpg
│ ├── 43.jpg
│ ├── 44.jpg
│ ├── 45.jpg
│ ├── 46.jpg
│ ├── 47.jpg
│ ├── 48.jpg
│ ├── 49.jpg
│ ├── 5.jpg
│ ├── 50.jpg
│ ├── 51.jpg
│ ├── 52.jpg
│ ├── 53.jpg
│ ├── 54.jpg
│ ├── 55.jpg
│ ├── 56.jpg
│ ├── 57.jpg
│ ├── 58.jpg
│ ├── 59.jpg
│ ├── 6.jpg
│ ├── 60.jpg
│ ├── 61.jpg
│ ├── 62.jpg
│ ├── 63.jpg
│ ├── 64.jpg
│ ├── 65.jpg
│ ├── 66.jpg
│ ├── 67.jpg
│ ├── 68.jpg
│ ├── 69.jpg
│ ├── 7.jpg
│ ├── 70.jpg
│ ├── 71.jpg
│ ├── 72.jpg
│ ├── 73.jpg
│ ├── 74.jpg
│ ├── 75.jpg
│ ├── 76.jpg
│ ├── 77.jpg
│ ├── 78.jpg
│ ├── 79.jpg
│ ├── 8.jpg
│ ├── 80.jpg
│ ├── 81.jpg
│ ├── 82.jpg
│ ├── 83.jpg
│ ├── 84.jpg
│ ├── 85.jpg
│ ├── 86.jpg
│ ├── 87.jpg
│ ├── 88.jpg
│ ├── 89.jpg
│ ├── 9.jpg
│ ├── 90.jpg
│ ├── 91.jpg
│ ├── 92.jpg
│ ├── 93.jpg
│ ├── 94.jpg
│ ├── 95.jpg
│ ├── 96.jpg
│ ├── 97.jpg
│ ├── 98.jpg
│ └── 99.jpg
├── figures
├── 1.png
├── hardware.png
└── interior.JPG
├── object_recognize
└── faster_rcnn_test.pt
├── object_track
└── tracker.prototxt
├── password
└── refine_depth
└── refine_depth.prototxt
/.gitignore:
--------------------------------------------------------------------------------
1 | #IGNORE CERTAIN SUBFIX
2 | *.user
3 | *.pyc
4 | *.caffemodel
5 | *.solverstate
6 | *.jpg # don't update train image
7 |
8 | #IGNORED FOLDERS
9 | build/
10 | .idea/
11 | supplements/face_recognize/images
12 |
13 | #IGNORE FILES GENERATED BY TRAINING
14 | supplements/face_recognize/names.txt
15 | supplements/face_recognize/train.txt
16 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Zhipeng Dong
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/drv_action/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_action)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | if (NOT CMAKE_BUILD_TYPE)
7 | set(CMAKE_BUILD_TYPE "Debug")
8 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
9 | endif()
10 |
11 | find_package(catkin REQUIRED COMPONENTS
12 | actionlib
13 | actionlib_msgs
14 | cv_bridge
15 | drv_msgs
16 | dynamic_reconfigure
17 | geometry_msgs
18 | image_geometry
19 | image_transport
20 | pcl_ros
21 | roscpp
22 | rospy
23 | sensor_msgs
24 | std_msgs
25 | tf2
26 | tf2_ros
27 | )
28 |
29 | find_package(Boost REQUIRED COMPONENTS system)
30 |
31 | catkin_package(
32 | CATKIN_DEPENDS
33 | actionlib
34 | actionlib_msgs
35 | drv_msgs
36 | )
37 |
38 | include_directories(
39 | ${catkin_INCLUDE_DIRS}
40 | ${Boost_INCLUDE_DIRS}
41 | )
42 |
43 | add_executable(drv_action ${SRC_LIST})
44 | target_link_libraries(drv_action ${catkin_LIBRARIES})
45 |
46 | add_dependencies(
47 | drv_action
48 | ${drv_msgs_EXPORTED_TARGETS}
49 | )
50 |
--------------------------------------------------------------------------------
/drv_brain/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_brain)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | if (NOT CMAKE_BUILD_TYPE)
7 | set(CMAKE_BUILD_TYPE "Release")
8 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
9 | endif()
10 |
11 | find_package(catkin REQUIRED COMPONENTS
12 | cv_bridge
13 | drv_msgs
14 | dynamic_reconfigure
15 | geometry_msgs
16 | image_transport
17 | pcl_ros
18 | roscpp
19 | rospy
20 | sensor_msgs
21 | std_msgs
22 | tf
23 | )
24 |
25 | set(OpenCV_DIR "/usr/share/OpenCV")
26 | find_package(OpenCV REQUIRED)
27 |
28 | find_package(PCL 1.7 REQUIRED)
29 |
30 | catkin_package(
31 | )
32 |
33 | link_directories(${PCL_LIBRARY_DIRS})
34 | add_definitions(${PCL_DEFINITIONS})
35 |
36 | include_directories(${catkin_INCLUDE_DIRS} ${PCL_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
37 |
38 | add_executable(drv_brain ${SRC_LIST})
39 |
40 | target_link_libraries(drv_brain ${catkin_LIBRARIES} ${OpenCV_LIBS} ${PCL_LIBRARIES})
41 |
--------------------------------------------------------------------------------
/drv_brain/launch/drv_v2_host.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/drv_brain/launch/drv_v2_single_cpu.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/drv_brain/launch/drv_v2_single_gpu.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/drv_brain/launch/drv_v2_workstation.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/drv_brain/launch/includes/drv_v2_workstation.launch.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/drv_brain/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_brain
4 | 0.0.0
5 | The drv_brain package
6 |
7 | ZhipengDong
8 |
9 | MIT
10 |
11 | catkin
12 | cv_bridge
13 | drv_msgs
14 | dynamic_reconfigure
15 | geometry_msgs
16 | image_transport
17 | pcl_ros
18 | roscpp
19 | rospy
20 | sensor_msgs
21 | std_msgs
22 | tf
23 | cv_bridge
24 | drv_msgs
25 | dynamic_reconfigure
26 | geometry_msgs
27 | image_transport
28 | pcl_ros
29 | roscpp
30 | rospy
31 | sensor_msgs
32 | std_msgs
33 | tf
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/drv_brain/src/facelistener.cpp:
--------------------------------------------------------------------------------
1 | #include "facelistener.h"
2 |
3 | FaceListener::FaceListener()
4 | {
5 | control_param_need_recog = "/comm/param/control/face/need_recognize";
6 | local_param_need_recog = "/vision/face/need_recognize";
7 | }
8 |
9 | void FaceListener::isNeedRecognizeFace()
10 | {
11 | // if (ros::param::has(local_param_need_recog))
12 | // {
13 | // bool need_recog = false;
14 | // ros::param::get(local_param_need_recog, need_recog);
15 | // if (need_recog)
16 | // {
17 | // ros::param::set(local_param_need_recog, true);
18 | // }
19 | // else
20 | // {
21 | // ros::param::set(local_param_need_recog, false);
22 | // }
23 | // }
24 | // else
25 | // ros::param::set(local_param_need_recog, false);
26 | }
27 |
--------------------------------------------------------------------------------
/drv_brain/src/facelistener.h:
--------------------------------------------------------------------------------
1 | #ifndef FACELISTENER_H
2 | #define FACELISTENER_H
3 |
4 | #include
5 |
6 | using namespace std;
7 |
8 | class FaceListener
9 | {
10 | public:
11 | FaceListener();
12 | void isNeedRecognizeFace();
13 |
14 | private:
15 | string control_param_need_recog;
16 | string local_param_need_recog;
17 | };
18 |
19 | #endif // FACELISTENER_H
20 |
--------------------------------------------------------------------------------
/drv_brain/src/targetlistener.h:
--------------------------------------------------------------------------------
1 | #ifndef TARGETLISTENER_H
2 | #define TARGETLISTENER_H
3 |
4 | #include
5 | #include
6 |
7 | using namespace std;
8 |
9 | class TargetListener
10 | {
11 | public:
12 | TargetListener();
13 | void getTargetStatus(bool &is_tgt_set, string &tgt_label, int &is_pub);
14 |
15 | private:
16 | bool isTargetSet_;
17 | string targetLabel_;
18 |
19 | // Target info from Android device
20 | string param_target_set;
21 | string param_target_label;
22 |
23 | // Training face recognition info
24 | string param_train_face_name;
25 | string param_train_face_run;
26 |
27 | // Target info from action
28 | string param_action_target_set;
29 | string param_action_target_label;
30 |
31 | // Is put down the object in hand
32 | string param_comm_is_put;
33 |
34 | /**
35 | * @brief TargetListener::checkLabel
36 | * @param label
37 | * Label formats: Meanings
38 | * bottle target to be searched
39 | * name:Wang name to be added (by face training method)
40 | * train:face train on face data
41 | * Notice that target label has no ':'
42 | * @return
43 | */
44 | bool checkLabel(string label);
45 | };
46 |
47 | #endif // TARGETLISTENER_H
48 |
--------------------------------------------------------------------------------
/drv_brain/src/targetselecter.h:
--------------------------------------------------------------------------------
1 | #ifndef ANDROIDLISTENER_H
2 | #define ANDROIDLISTENER_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 | #include
11 |
12 | #include
13 |
14 | class TargetSelector
15 | {
16 | public:
17 | TargetSelector();
18 |
19 | ros::NodeHandle nh;
20 | void publishOnceIfTargetSelected(bool &is_tgt_set, bool &is_tgt_found);
21 |
22 | private:
23 | ros::Publisher ALPubROI_;
24 |
25 | // ALSubROI_ sub roi from Android App and process it with roiCallback
26 | ros::Subscriber ALSubROI_;
27 | void APPROICallback(const std_msgs::Float32MultiArrayConstPtr &roi_msg);
28 |
29 | // subImageView2ROI_ sub roi from ImageView2 and process it with
30 | ros::Subscriber subImageView2ROI_;
31 | void IVROICallback(const geometry_msgs::PolygonStampedConstPtr &roi_msg);
32 |
33 | ros::Subscriber subImageView2P_;
34 | void IVPCallback(const geometry_msgs::PointStampedConstPtr &p_msg);
35 |
36 | drv_msgs::recognized_target rt_;
37 | bool targetNeedPub_;
38 |
39 | };
40 |
41 | #endif // ANDROIDLISTENER_H
42 |
--------------------------------------------------------------------------------
/drv_face_client/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_face_client)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | if (NOT CMAKE_BUILD_TYPE)
7 | set(CMAKE_BUILD_TYPE "Debug")
8 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
9 | endif()
10 |
11 | find_package(OpenCV REQUIRED)
12 |
13 |
14 | find_package(catkin REQUIRED COMPONENTS
15 | cv_bridge
16 | drv_msgs
17 | dynamic_reconfigure
18 | geometry_msgs
19 | image_geometry
20 | image_transport
21 | pcl_ros
22 | roscpp
23 | rospy
24 | sensor_msgs
25 | std_msgs
26 | tf2
27 | tf2_ros
28 | )
29 |
30 | catkin_package()
31 |
32 | include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
33 |
34 | add_executable(drv_face_client ${SRC_LIST})
35 |
36 | target_link_libraries(drv_face_client ${catkin_LIBRARIES} ${OpenCV_LIBS})
37 |
--------------------------------------------------------------------------------
/drv_face_client/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_face_client
4 | 0.0.0
5 | The drv_face_client package
6 |
7 |
8 |
9 |
10 | aicrobo
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | catkin
43 | cv_bridge
44 | drv_msgs
45 | dynamic_reconfigure
46 | geometry_msgs
47 | image_geometry
48 | image_transport
49 | roscpp
50 | rospy
51 | sensor_msgs
52 | std_msgs
53 | cv_bridge
54 | drv_msgs
55 | dynamic_reconfigure
56 | geometry_msgs
57 | image_geometry
58 | image_transport
59 | roscpp
60 | rospy
61 | sensor_msgs
62 | std_msgs
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/drv_face_client/src/facedetector.h:
--------------------------------------------------------------------------------
1 | #ifndef FACEDETECTOR_H
2 | #define FACEDETECTOR_H
3 |
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | using namespace std;
13 | using namespace cv;
14 |
15 | class FaceDetector
16 | {
17 | public:
18 | FaceDetector(string path);
19 | bool Process(Mat img_in, Mat &img_out, vector &faceRoi, std::vector &face_imgs);
20 |
21 | private:
22 | void detectAndDraw(Mat img_in, Mat& img_out, double scale, bool tryflip, vector &face_roi);
23 | void getFaceFromRoi(Mat img_in, vector face_roi, vector &face_imgs);
24 | bool trySquareRoi(Mat img_in, Rect face_roi, Rect &square_roi);
25 |
26 | CascadeClassifier cascade_;
27 | CascadeClassifier nestedCascade_;
28 | };
29 |
30 | #endif // FACEDETECTOR_H
31 |
--------------------------------------------------------------------------------
/drv_face_service/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_face_service)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | #set(CMAKE_BUILD_TYPE "Debug")
7 | if (NOT CMAKE_BUILD_TYPE)
8 | set(CMAKE_BUILD_TYPE "Release")
9 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
10 | endif()
11 |
12 | find_package(catkin REQUIRED COMPONENTS
13 | cv_bridge
14 | drv_msgs
15 | dynamic_reconfigure
16 | geometry_msgs
17 | image_geometry
18 | image_transport
19 | roscpp
20 | rospy
21 | sensor_msgs
22 | std_msgs
23 | )
24 |
25 | find_package(Boost COMPONENTS system filesystem regex REQUIRED)
26 |
27 | find_package(Caffe REQUIRED)
28 | message("Caffe DIR is ${Caffe_DIR}")
29 | # set(Caffe_INCLUDE_DIRS "/home/aicrobo/Downloads/caffe/cmake_build/include") this works!
30 | include_directories(${Caffe_INCLUDE_DIRS})
31 | add_definitions(${Caffe_DEFINITIONS}) # ex. -DCPU_ONLY
32 |
33 | set(OpenCV_DIR "/usr/share/OpenCV")
34 | find_package(OpenCV REQUIRED )
35 | message("Open CV version is ${OpenCV_VERSION}")
36 |
37 | if(CUDA_FOUND)
38 | include_directories(${CUDA_INCLUDE_DIRS})
39 | endif()
40 |
41 | catkin_package()
42 |
43 | include_directories(
44 | ${catkin_INCLUDE_DIRS}
45 | )
46 |
47 | add_executable(${PROJECT_NAME} ${SRC_LIST})
48 | target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS} ${Caffe_LIBRARIES} ${Boost_LIBRARIES})
49 |
--------------------------------------------------------------------------------
/drv_face_service/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_face_service
4 | 0.0.0
5 | The drv_face_service package
6 |
7 | ZhipengDong
8 |
9 | MIT
10 |
11 | catkin
12 | cv_bridge
13 | drv_msgs
14 | dynamic_reconfigure
15 | geometry_msgs
16 | image_geometry
17 | image_transport
18 | roscpp
19 | rospy
20 | sensor_msgs
21 | std_msgs
22 | cv_bridge
23 | drv_msgs
24 | dynamic_reconfigure
25 | geometry_msgs
26 | image_geometry
27 | image_transport
28 | roscpp
29 | rospy
30 | sensor_msgs
31 | std_msgs
32 |
33 |
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/drv_face_service/scripts/drv_face_service.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import roslib
4 | import rospy
5 | import process as ps
6 |
7 | from drv_msgs.srv import *
8 | from cv_bridge import CvBridge, CvBridgeError
9 |
10 | roslib.load_manifest('drv_face_service')
11 |
12 |
13 | def handle_face_recognize(req):
14 | rsp = face_recognizeResponse()
15 |
16 | bridge = CvBridge()
17 | fl_msg = []
18 | try:
19 | for im in req.images_in:
20 | cv_image = bridge.imgmsg_to_cv2(im, "bgr8")
21 | # Leave image preprocess to client
22 | result = ps.process(cv_image)
23 |
24 | if result[1] > 0.9:
25 | name_id_msg = result[0] + 1 # known face id start with 1
26 | else:
27 | name_id_msg = 0 # unknown face has id 0
28 | fl_msg.append(name_id_msg)
29 |
30 | rsp.face_label_ids = fl_msg
31 | return rsp
32 |
33 | except CvBridgeError as e:
34 | print(e)
35 | return rsp
36 |
37 |
38 | def face_recognize_server():
39 | rospy.init_node('face_recognize_server')
40 | s = rospy.Service('drv_face_service', face_recognize, handle_face_recognize)
41 |
42 | print "Ready to recognize face."
43 | rospy.spin()
44 |
45 |
46 | if __name__ == "__main__":
47 | face_recognize_server()
48 |
--------------------------------------------------------------------------------
/drv_face_service/scripts/process.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # Make sure these 2 lines are in your ~/.bashrc:
4 |
5 | # export PYTHONPATH="/home/USER/py-faster-rcnn/lib:/home/USER/py-faster-rcnn/caffe-fast-rcnn/python:${PYTHONPATH}"
6 | # export DRV=/home/USER/catkin_ws/src/drv_package
7 |
8 | # Change 'USER' according to your environment
9 |
10 | # Author: Zhipeng Dong
11 | # 2017.9.6
12 |
13 | # --------------------------------------------------------
14 |
15 | import sys
16 | import os
17 | import caffe
18 |
19 | # # Load caffe
20 | # caffe_root = '/home/aicrobo/caffe/'
21 | # sys.path.insert(0, caffe_root + 'python')
22 |
23 |
24 | def process(im):
25 | result = [0, 0]
26 |
27 | if "DRV" not in os.environ:
28 | print "Can't find environment variable DRV."
29 | return result
30 |
31 | dir_prefix = os.environ['DRV']
32 | prototxt = dir_prefix + '/supplements/neu_face/neu_face_deploy.prototxt'
33 | caffemodel = dir_prefix + '/supplements/neu_face/finetune_neu_face.caffemodel'
34 |
35 | if os.path.isfile(prototxt) and os.path.isfile(caffemodel):
36 | print 'Caffe prototxt and model found.'
37 | else:
38 | print 'Caffe prototxt or model not found!'
39 | return result
40 |
41 | use_gpu = True
42 | if use_gpu:
43 | caffe.set_mode_gpu()
44 | caffe.set_device(0)
45 | else:
46 | caffe.set_mode_cpu()
47 | net = caffe.Net(prototxt, caffemodel, caffe.TEST)
48 |
49 | # create transformer for the input called 'data'
50 | transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
51 | transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
52 | transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
53 | transformer.set_channel_swap('data', (2, 1, 0)) # swap channels from RGB to BGR
54 |
55 | net.blobs['data'].reshape(1, # batch size
56 | 3, # 3-channel (BGR) images
57 | 224, 224) # image size is 224x224 for vgg face
58 |
59 | transformed_image = transformer.preprocess('data', im)
60 | # copy the image data into the memory allocated for the net
61 | net.blobs['data'].data[...] = transformed_image
62 |
63 | # perform classification
64 | output = net.forward()
65 | output_prob = output['prob'][0] # the output probability vector for the first image in the batch
66 |
67 | # id and probability
68 | result = [output_prob.argmax(), output_prob[output_prob.argmax()]]
69 |
70 | print result
71 | return result
72 |
--------------------------------------------------------------------------------
/drv_face_service/src/classifier.h:
--------------------------------------------------------------------------------
1 | #ifndef CLASSIFIER_H
2 | #define CLASSIFIER_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 |
14 |
15 | using namespace std;
16 | using namespace cv;
17 |
18 | class Classifier
19 | {
20 | public:
21 | Classifier();
22 |
23 | Classifier(const string& train_deploy_proto,
24 | const string& caffe_model,
25 | const int gpu_id,
26 | const bool do_train);
27 |
28 | void Classify(Mat image_in, int &id, float &trust);
29 |
30 | protected:
31 | boost::shared_ptr > net_;
32 |
33 | // Get the features corresponding to the output of the network.
34 | void GetOutput(int &id, float &trust);
35 |
36 | // Get the features in the network with the given name,
37 | // and copy their values to the output.
38 | void GetFeatures(const string& feature_name, int &id, float &trust);
39 |
40 | private:
41 | // Set up a network with the architecture specified in deploy_proto,
42 | // with the model weights saved in caffe_model.
43 | void SetupNetwork(const string& deploy_proto,
44 | const string& caffe_model,
45 | const int gpu_id,
46 | const bool do_train);
47 |
48 | // Set the mean input (used to normalize the inputs to be 0-mean).
49 | void SetMean();
50 |
51 | // Wrap the input layer of the network in separate cv::Mat objects
52 | // (one per channel).
53 | void WrapInputLayer(vector* target_channels);
54 |
55 | // Set the inputs to the network.
56 | void Preprocess(const Mat& img, vector* input_channels);
57 |
58 | vector Argmax(vector v, int N);
59 |
60 | private:
61 | // Folder containing the model weights.
62 | string caffe_model_;
63 | // Size of the input images.
64 | Size input_geometry_;
65 |
66 | // Number of image channels: normally either 1 (gray) or 3 (bgr).
67 | int num_channels_;
68 |
69 | // Mean image, used to make the input 0-mean.
70 | Mat mean_;
71 | };
72 |
73 | #endif // CLASSIFIER_H
74 |
--------------------------------------------------------------------------------
/drv_face_service/src/drv_face_service.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include
6 | #include
7 |
8 | #include "processface.h"
9 |
10 | using namespace std;
11 |
12 | char* drv_path_env = getenv("DRV");
13 |
14 | string drv_path_ = string(drv_path_env) + "/supplements/face_recognize/";
15 |
16 | string prototxt = drv_path_ + "face_deploy.prototxt";
17 | string caffemodel = drv_path_ + "face_deploy.caffemodel";
18 |
19 | ProcessFace pf_(prototxt, caffemodel, 0, false);
20 | bool pf_init = false;
21 |
22 | float face_th_ = 0.9;
23 |
24 | bool recognize_face(drv_msgs::face_recognize::Request &req,
25 | drv_msgs::face_recognize::Response &res)
26 | {
27 | size_t im = req.images_in.size();
28 | vector result_id;
29 | for (size_t i = 0;i < im; i++) {
30 | cv_bridge::CvImagePtr src = cv_bridge::toCvCopy(req.images_in[i], "bgr8");
31 | int id = 0;
32 | float result_trust = 0.0;
33 | pf_.processFace(src->image, id, result_trust);
34 |
35 | // The result_id starts from 0. When result_id=0 the face is "unknown",
36 | // while known person id starts from 1 which is result_id + 1
37 | if (result_trust < face_th_)
38 | result_id.push_back(0);
39 | else
40 | result_id.push_back(id + 1);
41 | }
42 | res.face_label_ids = result_id;
43 | return true; // Return bool is necessary for service
44 | }
45 |
46 | int main(int argc, char **argv)
47 | {
48 | ros::init(argc, argv, "face_recognize_server");
49 | ros::NodeHandle n;
50 | ros::NodeHandle pnh("~");
51 |
52 | pnh.getParam("face_likelihood", face_th_);
53 |
54 | ros::ServiceServer service = n.advertiseService("drv_face_service", recognize_face);
55 | ROS_INFO("Ready to recognize face.");
56 | ros::spin();
57 |
58 | return 0;
59 | }
60 |
--------------------------------------------------------------------------------
/drv_face_service/src/processface.cpp:
--------------------------------------------------------------------------------
1 | #include "processface.h"
2 |
3 | ProcessFace::ProcessFace()
4 | {
5 |
6 | }
7 |
8 | ProcessFace::ProcessFace(string test_proto, string caffe_model, int gpu_id, bool do_train) :
9 | classifier_(test_proto, caffe_model, gpu_id, do_train)
10 | {
11 |
12 | }
13 |
14 | void ProcessFace::initClassifier(string test_proto, string caffe_model, int gpu_id)
15 | {
16 | classifier_ = Classifier(test_proto, caffe_model, gpu_id, false);
17 | }
18 |
19 | void ProcessFace::processFace(cv::Mat img_in, int &result_id, float &result_trust)
20 | {
21 | classifier_.Classify(img_in,result_id, result_trust);
22 | }
23 |
--------------------------------------------------------------------------------
/drv_face_service/src/processface.h:
--------------------------------------------------------------------------------
1 | #ifndef PROCESSFACE_H
2 | #define PROCESSFACE_H
3 |
4 | #include
5 | #include
6 |
7 | #include
8 | #include
9 |
10 | #include "classifier.h"
11 |
12 | using namespace std;
13 |
14 | class ProcessFace
15 | {
16 | public:
17 | ProcessFace();
18 | ProcessFace(string test_proto, string caffe_model, int gpu_id, bool do_train);
19 |
20 | void processFace(cv::Mat img_in, int &result_id, float &result_trust);
21 |
22 | void initClassifier(string test_proto, string caffe_model, int gpu_id);
23 | private:
24 | Classifier classifier_;
25 | };
26 |
27 | #endif // PROCESSFACE_H
28 |
--------------------------------------------------------------------------------
/drv_face_train/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_face_train)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | #set(CMAKE_BUILD_TYPE "Release")
7 | if (NOT CMAKE_BUILD_TYPE)
8 | set(CMAKE_BUILD_TYPE "Debug")
9 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
10 | endif()
11 |
12 | find_package(catkin REQUIRED COMPONENTS
13 | cv_bridge
14 | drv_msgs
15 | dynamic_reconfigure
16 | image_geometry
17 | image_transport
18 | roscpp
19 | rospy
20 | sensor_msgs
21 | std_msgs
22 | )
23 |
24 | find_package(Boost COMPONENTS system filesystem regex REQUIRED)
25 |
26 | find_package(Caffe REQUIRED)
27 | message("Caffe DIR is ${Caffe_DIR}")
28 | include_directories(${Caffe_INCLUDE_DIRS})
29 | add_definitions(${Caffe_DEFINITIONS})
30 |
31 | set(OpenCV_DIR "/usr/share/OpenCV")
32 | find_package(OpenCV REQUIRED )
33 | message("Open CV version is ${OpenCV_VERSION}")
34 |
35 | if(CUDA_FOUND)
36 | include_directories(${CUDA_INCLUDE_DIRS})
37 | endif()
38 |
39 | catkin_package()
40 |
41 | include_directories(
42 | ${catkin_INCLUDE_DIRS}
43 | )
44 |
45 | add_executable(${PROJECT_NAME} ${SRC_LIST})
46 | target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES}
47 | ${OpenCV_LIBS} ${Caffe_LIBRARIES} ${Boost_LIBRARIES} crypto)
48 |
49 |
--------------------------------------------------------------------------------
/drv_face_train/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_face_train
4 | 0.0.0
5 | The drv_face_train package
6 |
7 | ZhipengDong
8 |
9 | MIT
10 |
11 | catkin
12 | cv_bridge
13 | drv_msgs
14 | dynamic_reconfigure
15 | image_geometry
16 | image_transport
17 | roscpp
18 | rospy
19 | sensor_msgs
20 | std_msgs
21 | cv_bridge
22 | drv_msgs
23 | dynamic_reconfigure
24 | image_geometry
25 | image_transport
26 | roscpp
27 | rospy
28 | sensor_msgs
29 | std_msgs
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/drv_face_train/scripts/face_train_server.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import roslib
4 | import rospy
5 | import process as ps
6 |
7 | from drv_msgs.srv import *
8 |
9 | roslib.load_manifest('drv_face_train')
10 |
11 |
12 | def handle_train(req):
13 | rsp = face_trainResponse()
14 | ps.process()
15 | return rsp
16 |
17 |
18 | def train_server():
19 | rospy.init_node('face_train_server')
20 | s = rospy.Service('face_train_service', face_train, handle_train)
21 | # The ~ before param id is mandatory, indicate that it is global
22 | i_n = rospy.get_param('~iter_num')
23 | ps.set_param(i_n)
24 | print "Ready to train face. The iteration number is %d", i_n
25 | rospy.spin()
26 |
27 |
28 | if __name__ == "__main__":
29 | train_server()
30 |
--------------------------------------------------------------------------------
/drv_face_train/src/facedetector.h:
--------------------------------------------------------------------------------
1 | #ifndef FACEDETECTOR_H
2 | #define FACEDETECTOR_H
3 |
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | using namespace std;
13 | using namespace cv;
14 |
15 | class FaceDetector
16 | {
17 | public:
18 | FaceDetector(string path);
19 | bool getOneFace(Mat img_in, Mat &img_out);
20 | bool Process(Mat img_in, Mat &img_out, vector &faceRoi, std::vector &face_imgs);
21 |
22 | private:
23 | void detectAndDraw(Mat img_in, Mat& img_out, double scale, bool tryflip, vector &face_roi);
24 | void getFaceFromRoi(Mat img_in, vector face_roi, vector &face_imgs);
25 | bool trySquareRoi(Mat img_in, Rect face_roi, Rect &square_roi);
26 | bool isOnCenter(Rect rect, Mat img_in);
27 |
28 | CascadeClassifier cascade_;
29 | };
30 |
31 | #endif // FACEDETECTOR_H
32 |
--------------------------------------------------------------------------------
/drv_grasp/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_grasp)
3 |
4 | #Comment this if don't want debug
5 | set(CMAKE_BUILD_TYPE Debug)
6 |
7 | if (NOT CMAKE_BUILD_TYPE)
8 | set(CMAKE_BUILD_TYPE "Release")
9 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
10 | endif()
11 |
12 | find_package(catkin REQUIRED COMPONENTS
13 | cv_bridge
14 | drv_msgs
15 | dynamic_reconfigure
16 | geometry_msgs
17 | gpd #
18 | image_geometry
19 | image_transport
20 | pcl_ros
21 | roscpp
22 | rospy
23 | sensor_msgs
24 | std_msgs
25 | tf2
26 | tf2_ros
27 | )
28 |
29 | set(OpenCV_DIR "/usr/share/OpenCV")
30 | find_package(OpenCV REQUIRED)
31 |
32 | find_package(PCL 1.7 REQUIRED)
33 |
34 | find_package(Caffe REQUIRED)
35 | message("Caffe DIR is ${Caffe_DIR}")
36 | include_directories(${Caffe_INCLUDE_DIRS})
37 | add_definitions(${Caffe_DEFINITIONS}) # ex. -DCPU_ONLY
38 |
39 | catkin_package()
40 |
41 | link_directories(${PCL_LIBRARY_DIRS})
42 | add_definitions(${PCL_DEFINITIONS})
43 |
44 | include_directories(${catkin_INCLUDE_DIRS} ${PCL_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
45 |
46 | add_library (drv_lib
47 | src/fetchrgbd.cpp
48 | src/getsourcecloud.cpp
49 | src/obstacledetect.cpp
50 | src/transform.cpp
51 | src/utilities.cpp
52 |
53 | src/fetchrgbd.h
54 | src/getsourcecloud.h
55 | src/obstacledetect.h
56 | src/transform.h
57 | src/utilities.h
58 | )
59 |
60 | add_executable(drv_grasp src/drv_grasp.cpp)
61 | target_link_libraries(drv_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${PCL_LIBRARIES} ${Caffe_LIBRARIES})
62 | target_link_libraries (drv_grasp drv_lib)
63 |
64 | add_executable(drv_put src/drv_put.cpp)
65 | target_link_libraries(drv_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${PCL_LIBRARIES} ${Caffe_LIBRARIES})
66 | target_link_libraries (drv_put drv_lib)
67 |
68 | add_executable(drv_frame_service src/drv_frame_service.cpp)
69 | target_link_libraries(drv_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${PCL_LIBRARIES} ${Caffe_LIBRARIES})
70 | target_link_libraries (drv_frame_service drv_lib)
71 |
--------------------------------------------------------------------------------
/drv_grasp/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_grasp
4 | 0.0.0
5 | The drv_grasp package
6 |
7 | ZhipengDong
8 |
9 | MIT
10 |
11 | catkin
12 | cv_bridge
13 | drv_msgs
14 | dynamic_reconfigure
15 | geometry_msgs
16 | gpd
17 | image_geometry
18 | image_transport
19 | pcl_ros
20 | roscpp
21 | rospy
22 | sensor_msgs
23 | std_msgs
24 | tf2
25 | tf2_ros
26 | cv_bridge
27 | drv_msgs
28 | dynamic_reconfigure
29 | geometry_msgs
30 | gpd
31 | image_geometry
32 | image_transport
33 | pcl_ros
34 | roscpp
35 | rospy
36 | sensor_msgs
37 | std_msgs
38 | tf2
39 | tf2_ros
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/drv_grasp/src/drv_frame_service.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include
5 |
6 | #include
7 | #include
8 |
9 | using namespace std;
10 |
11 | bool frame_transform(drv_msgs::frame_transform::Request &req,
12 | drv_msgs::frame_transform::Response &res)
13 | {
14 | if (req.type == 0) {
15 | // The point in target frame and the transform from
16 | // target frame to base frame is known,
17 | // get the point's coordinate in base frame
18 | for (size_t i = 0; i < req.p_in_tgt.size(); ++i) {
19 |
20 | }
21 | }
22 | else if (req.type == 1) {
23 | // The point in base frame and the transform from
24 | // base frame to target frame is known,
25 | // get the point's coordinate in target frame
26 | for (size_t i = 0; i < req.p_in_base.size(); ++i) {
27 |
28 | }
29 | }
30 | else if (req.type == 2) {
31 | // The point's coordinate in base frame and the target frame is known,
32 | // get the translation from base frame to target frame
33 | }
34 | else
35 | return false;
36 | return true; // Return bool is necessary for service
37 | }
38 |
39 | int main(int argc, char **argv)
40 | {
41 | ros::init(argc, argv, "drv_frame_server");
42 | ros::NodeHandle n;
43 | ros::NodeHandle pnh("~");
44 |
45 | ros::ServiceServer service = n.advertiseService("drv_frame_service", frame_transform);
46 | ROS_INFO("Ready to do transform between frames.");
47 | while(ros::ok()) {
48 | ros::spinOnce();
49 |
50 | }
51 |
52 | return 0;
53 | }
54 |
--------------------------------------------------------------------------------
/drv_grasp/src/fetchrgbd.cpp:
--------------------------------------------------------------------------------
1 | #include "fetchrgbd.h"
2 |
3 | using namespace image_transport;
4 |
5 | FetchRGBD::FetchRGBD()
6 | {
7 | initRGBDCallback();
8 | }
9 |
10 | void FetchRGBD::fetchRGBD(cv_bridge::CvImagePtr &rgb,
11 | cv_bridge::CvImagePtr &depth,
12 | sensor_msgs::CameraInfo &info)
13 | {
14 | while (ros::ok()) {
15 | if (rgb_ptr_ != NULL && depth_ptr_ != NULL)
16 | break;
17 | ros::spinOnce();
18 | ros::Duration(0.005).sleep();
19 | }
20 | rgb = rgb_ptr_;
21 | depth = depth_ptr_;
22 | info = cam_info_;
23 | }
24 |
25 | void FetchRGBD::initRGBDCallback()
26 | {
27 | rgb_it_.reset(new ImageTransport(nh_));
28 | depth_it_.reset(new ImageTransport(nh_));
29 |
30 | sub_rgb_.subscribe(*rgb_it_, "/vision/rgb/image_rect_color", 5, TransportHints("compressed"));
31 | sub_depth_.subscribe(*depth_it_, "/vision/depth/image_rect", 5, TransportHints("compressedDepth"));
32 | sub_camera_info_.subscribe(nh_, "/vision/depth/camera_info", 5);
33 |
34 | synchronizer_.reset(new Synchronizer(SyncPolicy(5), sub_rgb_, sub_depth_, sub_camera_info_));
35 | synchronizer_->registerCallback(boost::bind(&FetchRGBD::rgbdCallback, this, _1, _2, _3));
36 | }
37 |
38 | void FetchRGBD::rgbdCallback(const sensor_msgs::ImageConstPtr &rgb_msg,
39 | const sensor_msgs::ImageConstPtr &depth_msg,
40 | const sensor_msgs::CameraInfoConstPtr &camera_info_msg)
41 | {
42 | rgb_ptr_ = cv_bridge::toCvCopy(rgb_msg);
43 | depth_ptr_ = cv_bridge::toCvCopy(depth_msg);
44 | cam_info_ = *camera_info_msg;
45 | }
46 |
--------------------------------------------------------------------------------
/drv_grasp/src/fetchrgbd.h:
--------------------------------------------------------------------------------
1 | #ifndef FETCHRGBD_H
2 | #define FETCHRGBD_H
3 |
4 | #include
5 | #include
6 |
7 | #include
8 |
9 | #include
10 | #include
11 | #include
12 | #include
13 |
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 |
21 | #include
22 |
23 | #include
24 |
25 | #include
26 | #include
27 | #include
28 |
29 | using namespace std;
30 |
31 | class FetchRGBD
32 | {
33 | public:
34 | FetchRGBD();
35 |
36 | void fetchRGBD(cv_bridge::CvImagePtr &rgb, cv_bridge::CvImagePtr &depth,
37 | sensor_msgs::CameraInfo &info);
38 |
39 | private:
40 | ros::NodeHandle nh_;
41 | boost::shared_ptr rgb_it_;
42 | boost::shared_ptr depth_it_;
43 |
44 | image_transport::SubscriberFilter sub_rgb_;
45 | image_transport::SubscriberFilter sub_depth_;
46 | void initRGBDCallback();
47 |
48 | message_filters::Subscriber sub_camera_info_;
49 |
50 | typedef message_filters::sync_policies::ApproximateTime<
51 | sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::CameraInfo>
52 | SyncPolicy;
53 |
54 | typedef message_filters::Synchronizer Synchronizer;
55 | boost::shared_ptr synchronizer_;
56 |
57 | // Image temp
58 | cv_bridge::CvImagePtr rgb_ptr_;
59 | cv_bridge::CvImagePtr depth_ptr_;
60 | // Image info
61 | sensor_msgs::CameraInfo cam_info_;
62 |
63 | void rgbdCallback(const sensor_msgs::ImageConstPtr& rgb_msg,
64 | const sensor_msgs::ImageConstPtr& depth_msg,
65 | const sensor_msgs::CameraInfoConstPtr& camera_info_msg);
66 | };
67 |
68 | #endif // FETCHRGBD_H
69 |
--------------------------------------------------------------------------------
/drv_grasp/src/getsourcecloud.h:
--------------------------------------------------------------------------------
1 | #ifndef GETSOURCECLOUD_H
2 | #define GETSOURCECLOUD_H
3 |
4 | //STL
5 | #include
6 | #include
7 | #include
8 |
9 | //PCL
10 | #include
11 | #include
12 |
13 | #include
14 | #include
15 |
16 | #include
17 |
18 | #include
19 | #include
20 |
21 | #include
22 | #include
23 |
24 | #include
25 |
26 | //OpenCV
27 | #include
28 | #include
29 | #include
30 |
31 | #include "utilities.h"
32 |
33 | using namespace std;
34 | using namespace cv;
35 |
36 | class GetSourceCloud
37 | {
38 | public:
39 | GetSourceCloud();
40 |
41 | static bool getCloud(Mat depth, float fx, float fy, float cx, float cy,
42 | float max_depth, float min_depth, PointCloudMono::Ptr &cloud);
43 |
44 | static bool getPoint(Mat depth, int row, int col, float fx, float fy, float cx, float cy,
45 | float maxDepth, float min_depth, pcl::PointXYZ &pt);
46 |
47 | };
48 |
49 | #endif // GETSOURCECLOUD_H
50 |
--------------------------------------------------------------------------------
/drv_grasp/src/makeplan.cpp:
--------------------------------------------------------------------------------
1 | #include "makeplan.h"
2 |
3 | MakePlan::MakePlan()
4 | {
5 | }
6 |
7 | void MakePlan::removeOutliers(PointCloud::Ptr cloud_in, PointCloud::Ptr &cloud_out)
8 | {
9 | pcl::RadiusOutlierRemoval outrem;
10 | // build the filter
11 | outrem.setInputCloud(cloud_in);
12 | outrem.setRadiusSearch(0.01);
13 | outrem.setMinNeighborsInRadius (4);
14 | // apply filter
15 | outrem.filter (*cloud_out);
16 | }
17 |
18 | bool MakePlan::getAveragePoint(PointCloud::Ptr cloud_in, pcl::PointXYZ &avrPt)
19 | {
20 | if (!cloud_in->points.size())
21 | {
22 | return false;
23 | }
24 |
25 | pcl::PointXYZ min_dis_point;
26 | float min_dis = 100.0;
27 | for (PointCloud::const_iterator it = cloud_in->begin(); it != cloud_in->end(); ++ it)
28 | {
29 | float dis = pow(it->x, 2) + pow(it->y, 2);
30 | if (dis < min_dis)
31 | {
32 | min_dis = dis;
33 | min_dis_point = *it;
34 | }
35 | }
36 |
37 | smartOffset(min_dis_point, 0.02, 0.05);
38 | avrPt = min_dis_point;
39 | return true;
40 | }
41 |
42 | void MakePlan::removeNans(PointCloud::Ptr cloud_in, PointCloud::Ptr &cloud_out)
43 | {
44 | std::vector< int > index;
45 | PointCloud::Ptr cloud_filtered_nan (new PointCloud);
46 |
47 | pcl::removeNaNFromPointCloud(*cloud_in, *cloud_filtered_nan, index);
48 |
49 | pcl::PointIndices::Ptr indices_x(new pcl::PointIndices);
50 | pcl::PointIndices::Ptr indices_xy(new pcl::PointIndices);
51 |
52 | pcl::PassThrough ptfilter; // Initializing with true will allow us to extract the removed indices
53 | ptfilter.setInputCloud (cloud_filtered_nan);
54 | ptfilter.setFilterFieldName ("x");
55 | ptfilter.setFilterLimits (-0.2, 0.2);
56 | ptfilter.filter (indices_x->indices);
57 |
58 | ptfilter.setIndices (indices_x);
59 | ptfilter.setFilterFieldName ("y");
60 | ptfilter.setFilterLimits (-0.2, 0.2);
61 | ptfilter.filter (indices_xy->indices);
62 |
63 | ptfilter.setIndices (indices_xy);
64 | ptfilter.setFilterFieldName ("z");
65 | ptfilter.setFilterLimits (0.5, 2.0);// this method can only be used on source cloud, aka cloud whose frame is camera optical frame
66 | ptfilter.filter (*cloud_out);
67 | }
68 |
69 | bool MakePlan::process(PointCloud::Ptr cloud_in, float a, float b, float c, float d, pcl::PointXYZ &avrPt)
70 | {
71 | if (!cloud_in->points.size())
72 | {
73 | return false;
74 | }
75 |
76 | PointCloud::Ptr cloud_filtered (new PointCloud);
77 | #ifdef USE_CENTER
78 | cloud_filtered = cloud_in;
79 | #else
80 | removeOutliers(cloud_in, cloud_filtered);
81 | #endif
82 | bool success = getAveragePoint(cloud_filtered, avrPt);
83 | return success;
84 | }
85 |
--------------------------------------------------------------------------------
/drv_grasp/src/makeplan.h:
--------------------------------------------------------------------------------
1 | #ifndef MAKEPLAN_H
2 | #define MAKEPLAN_H
3 |
4 | //STL
5 | #include
6 | #include
7 | #include
8 |
9 | //PCL
10 | #include
11 | #include
12 |
13 | #include
14 | #include
15 |
16 | #include
17 |
18 | #include
19 |
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 |
28 | #include
29 | #include
30 |
31 | #include
32 |
33 | #include
34 |
35 | #include
36 |
37 | #include
38 | #include
39 | #include
40 |
41 | #include
42 | #include
43 | #include
44 |
45 | #include
46 | #include
47 | #include
48 |
49 | class MakePlan
50 | {
51 | public:
52 | MakePlan();
53 |
54 | typedef pcl::PointCloud PointCloud;
55 |
56 | bool process(PointCloud::Ptr cloud_in, float a, float b, float c, float d, pcl::PointXYZ &avrPt);
57 | void removeNans(PointCloud::Ptr cloud_in, PointCloud::Ptr &cloud_out);
58 | void smartOffset(pcl::PointXYZ &p_in, float off_xy, float off_z);
59 |
60 | private:
61 | void removeOutliers(PointCloud::Ptr cloud_in, PointCloud::Ptr &cloud_out);
62 | bool getAveragePoint(PointCloud::Ptr cloud_in, pcl::PointXYZ &avrPt);
63 |
64 | };
65 |
66 | #endif // MAKEPLAN_H
67 |
68 |
--------------------------------------------------------------------------------
/drv_grasp/src/transform.cpp:
--------------------------------------------------------------------------------
1 | #include "transform.h"
2 |
3 | Transform::Transform() :
4 | tf_buffer_(),
5 | tf_listener_(tf_buffer_, nh_)
6 | {
7 | // Initialize node handler before tf_buffer is important
8 | }
9 |
10 | bool Transform::getTransform(string base_frame, string header_frame)
11 | {
12 | try {
13 | // While we aren't supposed to be shutting down
14 | while (ros::ok()) {
15 | // Check if the transform from map to quad can be made right now
16 | if (tf_buffer_.canTransform(base_frame, header_frame, ros::Time(0))) {
17 | // Get the transform
18 | tf_handle_ = tf_buffer_.lookupTransform(base_frame, header_frame, ros::Time(0));
19 | return true;
20 | }
21 | else {
22 | ROS_WARN("Transform: Frame %s does not exist.", base_frame.c_str());
23 | }
24 |
25 | // Handle callbacks and sleep for a small amount of time
26 | // before looping again
27 | ros::spinOnce();
28 | ros::Duration(0.005).sleep();
29 | }
30 | }
31 | // Catch any exceptions that might happen while transforming
32 | catch (tf2::TransformException& ex)
33 | {
34 | ROS_ERROR("Exception transforming %s to %s: %s",
35 | base_frame.c_str(),
36 | header_frame.c_str(),
37 | ex.what());
38 | }
39 | }
40 |
41 | void Transform::doTransform(PointCloudMono::Ptr cloud_in,
42 | PointCloudMono::Ptr &cloud_out)
43 | {
44 | geometry_msgs::Vector3 trans = tf_handle_.transform.translation;
45 | geometry_msgs::Quaternion rotate = tf_handle_.transform.rotation;
46 |
47 | Eigen::Transform t = Eigen::Translation3f(trans.x,
48 | trans.y,
49 | trans.z)
50 | * Eigen::Quaternion(rotate.w, rotate.x, rotate.y, rotate.z);
51 |
52 | cloud_out->height = cloud_in->height;
53 | cloud_out->width = cloud_in->width;
54 | cloud_out->is_dense = false;
55 | cloud_out->resize(cloud_out->height * cloud_out->width);
56 |
57 | Eigen::Vector3f point;
58 | size_t i = 0;
59 | for (PointCloudMono::const_iterator pit = cloud_in->begin();
60 | pit != cloud_in->end(); ++pit) {
61 | point = t * Eigen::Vector3f(pit->x, pit->y, pit->z);
62 | cloud_out->points[i].x = point.x();
63 | cloud_out->points[i].y = point.y();
64 | cloud_out->points[i].z = point.z();
65 | ++i;
66 | }
67 | }
68 |
69 | void Transform::doTransform(pcl::PointXYZ p_in, pcl::PointXYZ &p_out)
70 | {
71 | geometry_msgs::Vector3 trans = tf_handle_.transform.translation;
72 | geometry_msgs::Quaternion rotate = tf_handle_.transform.rotation;
73 |
74 | Eigen::Transform t = Eigen::Translation3f(trans.x,
75 | trans.y,
76 | trans.z)
77 | * Eigen::Quaternion(rotate.w, rotate.x, rotate.y, rotate.z);
78 |
79 | Eigen::Vector3f point;
80 |
81 | point = t * Eigen::Vector3f(p_in.x, p_in.y, p_in.z);
82 | p_out.x = point.x();
83 | p_out.y = point.y();
84 | p_out.z = point.z();
85 | }
86 |
--------------------------------------------------------------------------------
/drv_grasp/src/transform.h:
--------------------------------------------------------------------------------
1 | #ifndef TRANSFORM_H
2 | #define TRANSFORM_H
3 |
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 |
10 | #include
11 |
12 | #include
13 |
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | #include
20 |
21 | #include
22 | #include
23 |
24 | #include "utilities.h"
25 |
26 | using namespace std;
27 |
28 | class Transform
29 | {
30 | public:
31 | Transform();
32 |
33 | bool getTransform(string base_frame, string header_frame);
34 |
35 | void doTransform(PointCloudMono::Ptr cloud_in, PointCloudMono::Ptr &cloud_out);
36 | void doTransform(pcl::PointXYZ p_in, pcl::PointXYZ &p_out);
37 |
38 | private:
39 | ros::NodeHandle nh_;
40 | // These declaration must be after the node initialization
41 | tf2_ros::Buffer tf_buffer_;
42 | // This is mandatory and should be declared before while loop
43 | tf2_ros::TransformListener tf_listener_;
44 |
45 | geometry_msgs::TransformStamped tf_handle_;
46 | };
47 |
48 | #endif // TRANSFORM_H
49 |
--------------------------------------------------------------------------------
/drv_motor/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_motor)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | if (NOT CMAKE_BUILD_TYPE)
7 | set(CMAKE_BUILD_TYPE "Release")
8 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
9 | endif()
10 |
11 | find_package(catkin REQUIRED COMPONENTS
12 | dynamic_reconfigure
13 | geometry_msgs
14 | pcl_ros
15 | roscpp
16 | rospy
17 | sensor_msgs
18 | std_msgs
19 | drv_msgs
20 | tf2
21 | tf2_ros
22 | )
23 |
24 | catkin_package()
25 |
26 | include_directories(
27 | include
28 | ${catkin_INCLUDE_DIRS}
29 | )
30 |
31 | add_executable(drv_motor ${SRC_LIST})
32 |
33 | target_link_libraries(drv_motor ${catkin_LIBRARIES})
34 |
--------------------------------------------------------------------------------
/drv_motor/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_motor
4 | 0.0.0
5 | The drv_motor package
6 |
7 | Zhipeng Dong
8 |
9 | MIT
10 |
11 | catkin
12 | drv_msgs
13 | dynamic_reconfigure
14 | geometry_msgs
15 | pcl_ros
16 | roscpp
17 | rospy
18 | sensor_msgs
19 | std_msgs
20 | tf2
21 | tf2_ros
22 | drv_msgs
23 | dynamic_reconfigure
24 | geometry_msgs
25 | pcl_ros
26 | roscpp
27 | rospy
28 | sensor_msgs
29 | std_msgs
30 | tf2
31 | tf2_ros
32 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/drv_motor/src/getpose.cpp:
--------------------------------------------------------------------------------
1 | #include "getpose.h"
2 |
3 | GetPose::GetPose()
4 | {
5 | min_yaw_ = 0;
6 | max_yaw_ = 180;
7 | yaw_step_ = 20;
8 | min_pitch_ = 60;
9 | max_pitch_ = 80;
10 | pitch_step_ = 10;
11 |
12 | moveDirection = true;
13 | }
14 |
15 | bool GetPose::getNextPosition(int &pitch, int &yaw)
16 | {
17 | pitch = 60;
18 | if (moveDirection) {
19 | if (yaw + yaw_step_ > max_yaw_) {
20 | yaw -= yaw_step_;
21 | moveDirection = !moveDirection;
22 | return true;
23 | }
24 | else {
25 | yaw += yaw_step_;
26 | }
27 | }
28 | else {
29 | if (yaw - yaw_step_ < min_yaw_) {
30 | moveDirection = true; // reset
31 | return false;
32 | }
33 | else {
34 | yaw -= yaw_step_;
35 | }
36 | }
37 | return true;
38 | }
39 |
--------------------------------------------------------------------------------
/drv_motor/src/getpose.h:
--------------------------------------------------------------------------------
1 | #ifndef GETPOSE_H
2 | #define GETPOSE_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | using namespace std;
9 |
10 | class GetPose
11 | {
12 | public:
13 | GetPose();
14 |
15 | bool getNextPosition(int &pitch, int &yaw);
16 |
17 | private:
18 | int min_yaw_;
19 | int max_yaw_;
20 | int yaw_step_;
21 | int min_pitch_;
22 | int max_pitch_;
23 | int pitch_step_;
24 |
25 | bool moveDirection;
26 | };
27 |
28 | #endif // GETPOSE_H
29 |
--------------------------------------------------------------------------------
/drv_msgs/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_msgs)
3 |
4 | find_package(catkin REQUIRED COMPONENTS
5 | actionlib_msgs
6 | geometry_msgs
7 | message_generation
8 | roscpp
9 | rospy
10 | sensor_msgs
11 | std_msgs
12 | )
13 |
14 | add_message_files(
15 | FILES
16 | recognized_faces.msg
17 | recognized_objects.msg
18 | recognized_target.msg
19 | target_info.msg
20 | )
21 |
22 | add_service_files(
23 | FILES
24 | recognize.srv
25 | refine_depth.srv
26 | face_recognize.srv
27 | face_train.srv
28 | servo.srv
29 | user_select.srv
30 | frame_transform.srv
31 | )
32 |
33 | add_action_files(
34 | DIRECTORY action
35 | FILES Vision.action
36 | )
37 |
38 | generate_messages(
39 | DEPENDENCIES
40 | actionlib_msgs
41 | geometry_msgs
42 | sensor_msgs
43 | std_msgs
44 | )
45 |
46 | catkin_package(
47 | CATKIN_DEPENDS
48 | actionlib_msgs
49 | message_runtime
50 | geometry_msgs
51 | sensor_msgs
52 | std_msgs
53 | )
54 |
55 | include_directories(
56 | ${catkin_INCLUDE_DIRS}
57 | )
58 |
59 | add_dependencies(${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
60 |
--------------------------------------------------------------------------------
/drv_msgs/action/Vision.action:
--------------------------------------------------------------------------------
1 | #goal definition
2 | uint8 mode # 0: not active, 1: object location, 2: facial recognition (not support for now)
3 | string target_label # The target label in English. If not set, the mode 1 will not be execute
4 | string target_color # [Optional] parameter for target color, example: blue, orange, green...
5 | ---
6 | #result definition
7 | geometry_msgs/PoseStamped target_pose # Return target pose in camera_yaw_frame if mode=1
8 | geometry_msgs/PoseStamped target_location # Return target location in map frame if mode=1
9 |
10 | int32[] ids # Return faces' id if mode=2, 0 for unknown face
11 | std_msgs/String[] names # Return names of face if mode=2
12 | ---
13 | #feedback
14 | uint8 status # Feedback working state for each mode, 0:not working (wandering), 1:working, 2:error occured, 3:successed
--------------------------------------------------------------------------------
/drv_msgs/msg/recognized_faces.msg:
--------------------------------------------------------------------------------
1 | Header header
2 |
3 | int32[] name_ids
4 | std_msgs/String[] names
5 |
6 | # the bounding box, data order: leftup x y, bottomright x y
7 | std_msgs/UInt16MultiArray[] bbox_arrays
--------------------------------------------------------------------------------
/drv_msgs/msg/recognized_objects.msg:
--------------------------------------------------------------------------------
1 | Header header
2 |
3 | std_msgs/String[] labels
4 |
5 | std_msgs/UInt32MultiArray[] objects_pixels_arrays
6 |
7 | # the bounding box, data order: leftup x y, bottomright x y
8 | std_msgs/UInt16MultiArray[] bbox_arrays
--------------------------------------------------------------------------------
/drv_msgs/msg/recognized_target.msg:
--------------------------------------------------------------------------------
1 | Header header
2 |
3 | std_msgs/String label
4 |
5 | # pixel ids for target in image
6 | std_msgs/UInt32MultiArray tgt_pixels
7 |
8 | # target bounding box x, y, x, y
9 | std_msgs/UInt16MultiArray tgt_bbox_array
10 |
11 | # target bounding box center x, y
12 | std_msgs/UInt16MultiArray tgt_bbox_center
--------------------------------------------------------------------------------
/drv_msgs/msg/target_info.msg:
--------------------------------------------------------------------------------
1 | Header header
2 |
3 | ## Readable labels of object (defined by PASCAL-VOC)
4 | # 'aeroplane', 'bicycle', 'bird', 'boat',
5 | # 'bottle', 'bus', 'car', 'cat', 'chair',
6 | # 'cow', 'diningtable', 'dog', 'horse',
7 | # 'motorbike', 'person', 'pottedplant',
8 | # 'sheep', 'sofa', 'train', 'tvmonitor'
9 | std_msgs/String label
10 |
11 | ## Name for person
12 | std_msgs/String name
13 |
14 | ## what to do to the target,
15 | ## includes 'find' 'follow' 'approach' 'grasp' 'put'
16 | std_msgs/String action
17 |
18 | ## hsv
19 | std_msgs/UInt16MultiArray hsv_color
20 |
21 | ## The target's current location
22 | std_msgs/String location_curr
23 |
24 | ## The target's target location
25 | std_msgs/String location_tgt
26 |
27 | ## Object's geometry
28 | geometry_msgs/Point size
29 | ## Distance between object's centroid to its base (for putting)
30 | std_msgs/Float32 center_to_base
31 |
--------------------------------------------------------------------------------
/drv_msgs/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_msgs
4 | 0.0.0
5 | The drv_msgs package
6 |
7 | aicrobo
8 |
9 | TODO
10 |
11 | catkin
12 | actionlib_msgs
13 | geometry_msgs
14 | message_generation
15 | roscpp
16 | rospy
17 | sensor_msgs
18 | std_msgs
19 | message_runtime
20 | message_runtime
21 | actionlib_msgs
22 | geometry_msgs
23 | roscpp
24 | rospy
25 | sensor_msgs
26 | std_msgs
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/drv_msgs/srv/face_recognize.srv:
--------------------------------------------------------------------------------
1 | sensor_msgs/Image[] images_in
2 |
3 | ---
4 | int32[] face_label_ids
--------------------------------------------------------------------------------
/drv_msgs/srv/face_train.srv:
--------------------------------------------------------------------------------
1 |
2 | ---
3 | float32 accuracy
--------------------------------------------------------------------------------
/drv_msgs/srv/frame_transform.srv:
--------------------------------------------------------------------------------
1 | int64 type
2 | geometry_msgs/Point32[] p_in_tgt
3 | geometry_msgs/Point32[] p_in_base
4 | geometry_msgs/Transform base_to_tgt
5 | geometry_msgs/Transform tgt_to_base
6 |
7 | ---
8 | geometry_msgs/Point32[] p_in_tgt_res
9 | geometry_msgs/Point32[] p_in_base_res
10 | geometry_msgs/Transform base_to_tgt_res
11 | geometry_msgs/Transform tgt_to_base_res
12 |
--------------------------------------------------------------------------------
/drv_msgs/srv/recognize.srv:
--------------------------------------------------------------------------------
1 | sensor_msgs/Image img_in
2 |
3 | ---
4 | sensor_msgs/Image img_out
5 | drv_msgs/recognized_objects obj_info
--------------------------------------------------------------------------------
/drv_msgs/srv/refine_depth.srv:
--------------------------------------------------------------------------------
1 | sensor_msgs/CompressedImage rgb_in
2 | sensor_msgs/Image depth_in
3 |
4 | ---
5 | sensor_msgs/Image depth_out
6 |
--------------------------------------------------------------------------------
/drv_msgs/srv/servo.srv:
--------------------------------------------------------------------------------
1 | # If to_next is true, servo will try to go to
2 | # next search angle and pitch and yaw will be ignored
3 | bool to_next
4 |
5 | # You can specify the angle that the servo should go to
6 | int64 pitch
7 | int64 yaw
8 | ---
9 | # true if the servo can go to target pose,
10 | # if the next pose does not exist or
11 | # the pose can not be reached, return false
12 | bool success
13 |
--------------------------------------------------------------------------------
/drv_msgs/srv/user_select.srv:
--------------------------------------------------------------------------------
1 | int64 select_num
2 | ---
3 | # 0 represent no select, 1 and above represent id
4 | int64 selected_id
--------------------------------------------------------------------------------
/drv_pointcloud/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_pointcloud)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | #Comment this if don't want debug
7 | #set(CMAKE_BUILD_TYPE Debug)
8 | if (NOT CMAKE_BUILD_TYPE)
9 | set(CMAKE_BUILD_TYPE "Release")
10 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
11 | endif()
12 |
13 | find_package(catkin REQUIRED COMPONENTS
14 | cv_bridge
15 | drv_msgs
16 | dynamic_reconfigure
17 | geometry_msgs
18 | image_geometry
19 | image_transport
20 | pcl_ros
21 | roscpp
22 | rospy
23 | sensor_msgs
24 | std_msgs
25 | tf2
26 | tf2_ros
27 | )
28 |
29 | find_package(PCL 1.7 REQUIRED)
30 |
31 | #add dynamic reconfigure api
32 | generate_dynamic_reconfigure_options(cfg/pointcloud.cfg)
33 |
34 | catkin_package()
35 |
36 | link_directories(${PCL_LIBRARY_DIRS})
37 | add_definitions(${PCL_DEFINITIONS})
38 |
39 | include_directories(${catkin_INCLUDE_DIRS} ${PCL_INCLUDE_DIRS})
40 |
41 | add_executable(drv_pointcloud ${SRC_LIST})
42 |
43 | target_link_libraries(drv_pointcloud ${catkin_LIBRARIES} ${PCL_LIBRARIES})
44 |
--------------------------------------------------------------------------------
/drv_pointcloud/cfg/pointcloud.cfg:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | PACKAGE = "drv_pointcloud"
3 |
4 | from dynamic_reconfigure.parameter_generator_catkin import *
5 |
6 | gen = ParameterGenerator()
7 |
8 | gen.add("max_depth", double_t, 0, "Max distance the camera can see", 4.0, 1.0, 8.0)
9 | gen.add("min_depth", double_t, 0, "Min distance the camera can see", 0.5, 0.3, 0.7)
10 |
11 | exit(gen.generate(PACKAGE, "drv_pointcloud", "pointcloud"))
--------------------------------------------------------------------------------
/drv_pointcloud/src/getsourcecloud.h:
--------------------------------------------------------------------------------
1 | #ifndef GETSOURCECLOUD_H
2 | #define GETSOURCECLOUD_H
3 |
4 | //STL
5 | #include
6 | #include
7 | #include
8 |
9 | //PCL
10 | #include
11 | #include
12 |
13 | #include
14 | #include
15 |
16 | #include
17 |
18 | #include
19 | #include
20 |
21 | #include
22 | #include
23 |
24 | #include
25 |
26 | //OpenCV
27 | #include
28 | #include
29 | #include
30 |
31 | class GetSourceCloud
32 | {
33 | public:
34 | GetSourceCloud();
35 |
36 | static bool getCloud(cv::Mat color, cv::Mat depth, float fx, float fy, float cx, float cy, float maxDepth, float minDepth,
37 | pcl::PointCloud::Ptr &cloudSource);
38 |
39 | };
40 |
41 | #endif // GETSOURCECLOUD_H
42 |
--------------------------------------------------------------------------------
/drv_pointcloud/test_publisher.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # license removed for brevity
3 | import rospy
4 | from geometry_msgs.msg import PoseStamped
5 |
6 |
7 | def talker():
8 | pub = rospy.Publisher('/vision/grasp/location', PoseStamped, queue_size=1)
9 | rospy.init_node('talker', anonymous=True)
10 | rate = rospy.Rate(10) # 10hz
11 | while not rospy.is_shutdown():
12 | pose = PoseStamped()
13 | pose.pose.position.x = 1
14 | pose.pose.position.y = -1
15 | pub.publish(pose)
16 | rate.sleep()
17 |
18 | if __name__ == '__main__':
19 | try:
20 | talker()
21 | except rospy.ROSInterruptException:
22 | pass
23 |
--------------------------------------------------------------------------------
/drv_recognize/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_recognize)
3 |
4 | # You only need add this to use C++ 11 features
5 | add_compile_options(-std=c++11)
6 |
7 | aux_source_directory(./src SRC_LIST)
8 |
9 | set(CMAKE_BUILD_TYPE "Debug")
10 | if (NOT CMAKE_BUILD_TYPE)
11 | set(CMAKE_BUILD_TYPE "Release")
12 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
13 | endif()
14 |
15 | find_package(catkin REQUIRED COMPONENTS
16 | cv_bridge
17 | dynamic_reconfigure
18 | drv_msgs
19 | image_transport
20 | roscpp
21 | rospy
22 | tf2
23 | tf2_ros
24 | )
25 | find_package(Eigen3 REQUIRED)
26 |
27 | set(Caffe_DIR "/home/aicrobo/video_prop_networks/lib/build-caffe-Desktop-Default")
28 | find_package(Caffe REQUIRED)
29 | message("Caffe DIR is ${Caffe_DIR}")
30 | include_directories(${Caffe_INCLUDE_DIRS})
31 | add_definitions(${Caffe_DEFINITIONS}) # ex. -DCPU_ONLY
32 |
33 |
34 | set(OpenCV_DIR "/usr/share/OpenCV")
35 | find_package(OpenCV REQUIRED )
36 | message("Open CV version is ${OpenCV_VERSION}")
37 |
38 | # Add dynamic reconfigure API
39 | generate_dynamic_reconfigure_options(cfg/recog.cfg)
40 |
41 | catkin_package()
42 |
43 | include_directories(
44 | ${catkin_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIR}
45 | )
46 |
47 | add_library (drv_recognize_lib
48 | src/detectcolor.cpp
49 | src/processdepth.cpp
50 | src/refinedepth.cpp
51 | src/transform.cpp
52 |
53 | src/detectcolor.h
54 | src/processdepth.h
55 | src/refinedepth.h
56 | src/transform.h
57 | )
58 |
59 | add_executable(drv_recognize_color src/drv_recognize_color.cpp)
60 | target_link_libraries(drv_recognize_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${Caffe_LIBRARIES})
61 | target_link_libraries (drv_recognize_color drv_recognize_lib)
62 |
63 | add_executable(drv_recognize_hand src/drv_recognize_hand.cpp)
64 | target_link_libraries(drv_recognize_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${PCL_LIBRARIES})
65 | target_link_libraries (drv_recognize_hand drv_recognize_lib)
66 |
67 | add_executable(drv_refine_depth src/drv_refine_depth.cpp)
68 | target_link_libraries(drv_recognize_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${PCL_LIBRARIES})
69 | target_link_libraries (drv_refine_depth drv_recognize_lib)
70 |
--------------------------------------------------------------------------------
/drv_recognize/cfg/recog.cfg:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | PACKAGE = "drv_recognize"
3 | from dynamic_reconfigure.parameter_generator_catkin import *
4 |
5 | # name - a string which specifies the name under which this parameter should be stored
6 | # type - defines the type of value stored, and can be any of int_t, double_t, str_t, or bool_t
7 | # level - A bitmask which will later be passed to the dynamic reconfigure callback.
8 | # When the callback is called all of the level values for parameters that have been changed are ORed together and the resulting value is passed to the callback.
9 | # description - string which describes the parameter
10 | # default - specifies the default value
11 | # min - specifies the min value (optional and does not apply to strings and bools)
12 | # max - specifies the max value (optional and does not apply to strings and bools)
13 | gen = ParameterGenerator()
14 | gen.add("hue_low_cfg", int_t, 0, "hue_low_cfg", 26, 0, 255)
15 | gen.add("sat_low_cfg", int_t, 0, "sat_low_cfg", 34, 0, 255)
16 | gen.add("val_low_cfg", int_t, 0, "val_low_cfg", 46, 0, 255)
17 | gen.add("hue_high_cfg", int_t, 0, "hue_high_cfg", 77, 0, 255)
18 | gen.add("sat_high_cfg", int_t, 0, "sat_high_cfg", 255, 0, 255)
19 | gen.add("val_high_cfg", int_t, 0, "val_high_cfg", 255, 0, 255)
20 |
21 | exit(gen.generate(PACKAGE, "drv_recognize", "recog"))
--------------------------------------------------------------------------------
/drv_recognize/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_recognize
4 | 0.0.0
5 | The drv_recognize package
6 |
7 | donghw
8 |
9 | MIT
10 |
11 | catkin
12 | cv_bridge
13 | dynamic_reconfigure
14 | drv_msgs
15 | image_transport
16 | roscpp
17 | rospy
18 | tf2
19 | tf2_ros
20 | cv_bridge
21 | dynamic_reconfigure
22 | drv_msgs
23 | image_transport
24 | roscpp
25 | rospy
26 | tf2
27 | tf2_ros
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/drv_recognize/scripts/drv_recognize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import roslib
4 | import rospy
5 | import cv2
6 | import process as ps
7 |
8 | from std_msgs.msg import String
9 | from std_msgs.msg import UInt16MultiArray
10 | from drv_msgs.msg import recognized_objects
11 | from drv_msgs.srv import *
12 | from cv_bridge import CvBridge, CvBridgeError
13 |
14 | roslib.load_manifest('drv_recognize')
15 |
16 |
17 | def handle_recognize(req):
18 | rsp = recognizeResponse()
19 |
20 | bridge = CvBridge()
21 | try:
22 | # Convert rosmsg to cv image
23 | # np_array = np.fromstring(req.img_in.data, np.uint8)
24 | # image = cv2.imdecode(np_array, cv2.CV_LOAD_IMAGE_COLOR)
25 |
26 | cv_image = bridge.imgmsg_to_cv2(req.img_in, "bgr8")
27 | (rows, cols, channels) = cv_image.shape
28 |
29 | if cols != 640 or rows != 480:
30 | rospy.WARN('Can not get image.')
31 | return rsp
32 | else:
33 | result = ps.process(cv_image)
34 | ro_msg = recognized_objects()
35 |
36 | for i in result:
37 | bbox = i[0]
38 | score = i[1]
39 | class_name = i[2]
40 |
41 | cv2.rectangle(cv_image, (int(bbox[0]), int(bbox[1])),
42 | (int(bbox[2]), int(bbox[3])), (255, 0, 255), 2)
43 | cv2.putText(cv_image, '{:s} {:.3f}'.format(class_name, score),
44 | (int(bbox[0]), int(bbox[1]) - 2),
45 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
46 |
47 | label_str_msg = String()
48 | label_str_msg.data = class_name
49 |
50 | bbox_array_msg = UInt16MultiArray()
51 | for j in bbox:
52 | bbox_array_msg.data.append(j)
53 |
54 | ro_msg.labels.append(label_str_msg)
55 | ro_msg.bbox_arrays.append(bbox_array_msg)
56 |
57 | ro_msg.header = req.img_in.header
58 |
59 | rsp.img_out = bridge.cv2_to_imgmsg(cv_image, "bgr8")
60 | rsp.obj_info = ro_msg
61 | return rsp
62 |
63 | except CvBridgeError as e:
64 | print(e)
65 | return rsp
66 |
67 |
68 | def recognize_server():
69 | rospy.init_node('rcnn_recognize_server')
70 | s = rospy.Service('drv_recognize', recognize, handle_recognize)
71 | print "Ready to recognize objects with RCNN."
72 | rospy.spin()
73 |
74 |
75 | if __name__ == "__main__":
76 | try:
77 | recognize_server()
78 | except rospy.ROSInterruptException:
79 | rospy.loginfo("RCNN detection server terminated.")
80 |
81 |
--------------------------------------------------------------------------------
/drv_recognize/scripts/drv_recognize_client.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import rospy
4 | import cv2
5 | from drv_msgs.srv import *
6 | from sensor_msgs.msg import CompressedImage
7 | from cv_bridge import CvBridge
8 |
9 |
10 | def call_recognize(img_msg):
11 | rospy.wait_for_service('drv_recognize')
12 | try:
13 | recognizer = rospy.ServiceProxy('drv_recognize', recognize)
14 | response = recognizer(img_msg)
15 | return response
16 | except rospy.ServiceException, e:
17 | print "Service call failed: %s" % e
18 |
19 |
20 | def image_callback(img_msg):
21 | resp = call_recognize(img_msg)
22 |
23 | # rate = rospy.Rate(0.2)
24 | # rate.sleep()
25 |
26 | bg = CvBridge()
27 | cv_img_labeled = bg.imgmsg_to_cv2(resp.img_out, 'bgr8')
28 |
29 | print resp.obj_info.labels
30 | print resp.obj_info.box_array
31 |
32 |
33 | def image_listener():
34 | rospy.init_node('recognize_client', anonymous=True)
35 | rospy.Subscriber('image_rect_color', CompressedImage, image_callback, queue_size=1, buff_size=921600)
36 |
37 | while not rospy.is_shutdown():
38 | rospy.spin()
39 | print 'reached'
40 |
41 |
42 | if __name__ == "__main__":
43 | print "Requesting"
44 | image_listener()
45 |
--------------------------------------------------------------------------------
/drv_recognize/scripts/process.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # Make sure these 2 lines are in your ~/.bashrc:
4 |
5 | # export PYTHONPATH="/home/USER/py-faster-rcnn/lib:/home/USER/py-faster-rcnn/caffe-fast-rcnn/python:${PYTHONPATH}"
6 | # export DRV=/home/USER/catkin_ws/src/drv_package
7 |
8 | # Change 'USER' according to your environment
9 |
10 | # Author: Zhipeng Dong
11 | # 2017.9.6
12 |
13 | # --------------------------------------------------------
14 |
15 | import os
16 | import sys
17 | from fast_rcnn.config import cfg
18 | from fast_rcnn.test import im_detect
19 | from fast_rcnn.nms_wrapper import nms
20 | import numpy as np
21 | import caffe
22 | import timeit
23 |
24 | if "DRV" not in os.environ:
25 | print "Can't find environment variable DRV."
26 | sys.exit(1)
27 |
28 | dir_prefix = os.environ['DRV'] + '/supplements/object_recognize/'
29 | prototxt = dir_prefix + 'faster_rcnn_test.pt'
30 | caffemodel = dir_prefix + 'VGG16_faster_rcnn_final.caffemodel'
31 |
32 | if os.path.isfile(prototxt) and os.path.isfile(caffemodel):
33 | print 'Found Caffe prototxt and model.'
34 | else:
35 | print 'Caffe prototxt or model not found!'
36 | sys.exit(2)
37 |
38 | CLASSES = ('__background__',
39 | 'aeroplane', 'bicycle', 'bird', 'boat',
40 | 'bottle', 'bus', 'car', 'cat', 'chair',
41 | 'cow', 'table', 'dog', 'horse',
42 | 'motorbike', 'person', 'plant',
43 | 'sheep', 'sofa', 'train', 'tv')
44 |
45 |
46 | def process(im):
47 | start_time = timeit.default_timer()
48 | result = []
49 |
50 | cfg.TEST.HAS_RPN = True # Use RPN for proposals
51 |
52 | use_gpu = True
53 | if use_gpu:
54 | caffe.set_mode_gpu()
55 | caffe.set_device(0)
56 | cfg.GPU_ID = 0
57 | else:
58 | caffe.set_mode_cpu()
59 |
60 | # Although strange, the net being initialized for each loop is
61 | # faster in detection than being set as a global value
62 | net = caffe.Net(prototxt, caffemodel, caffe.TEST)
63 |
64 | scores, boxes = im_detect(net, im)
65 |
66 | conf_thresh = 0.8
67 | nms_thresh = 0.3
68 |
69 | for cls_ind, cls in enumerate(CLASSES[1:]):
70 | cls_ind += 1 # because we skipped background
71 | cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
72 | cls_scores = scores[:, cls_ind]
73 | dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
74 | keep = nms(dets, nms_thresh)
75 | dets = dets[keep, :]
76 |
77 | inds = np.where(dets[:, -1] >= conf_thresh)[0]
78 |
79 | for i in inds:
80 | # Left up corner x, y; bottom down corner x, y
81 | bbox = dets[i, :4]
82 | score = dets[i, -1]
83 |
84 | instance = [bbox, score, cls]
85 | result.append(instance)
86 |
87 | elapsed = timeit.default_timer() - start_time
88 |
89 | print 'Detection took {:.3f}s for {:d} object proposals'.format(elapsed, boxes.shape[0])
90 | return result
91 |
--------------------------------------------------------------------------------
/drv_recognize/src/detectcolor.h:
--------------------------------------------------------------------------------
1 | #ifndef DETECTCOLOR_H
2 | #define DETECTCOLOR_H
3 |
4 | #include
5 | #include
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | using namespace std;
12 | using namespace cv;
13 |
14 | class DetectColor
15 | {
16 | public:
17 | DetectColor();
18 | bool detect(Mat img_in, Mat &img_out, vector > &bbox_array);
19 | void setHSV(int h_l, int s_l, int v_l, int h_h, int s_h, int v_h);
20 |
21 | private:
22 | Scalar hsv_low_;
23 | Scalar hsv_high_;
24 |
25 | void morphOps(Mat &thresh);
26 | bool process(Mat img_in, Mat threshold, Mat &img_out,
27 | vector > &bbox_array);
28 | };
29 |
30 | #endif // DETECTCOLOR_H
31 |
--------------------------------------------------------------------------------
/drv_recognize/src/drv_refine_depth.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 |
10 | #include "refinedepth.h"
11 | #include "drv_msgs/refine_depth.h"
12 |
13 | using namespace std;
14 | using namespace cv;
15 |
16 | RefineDepth m_rd_;
17 |
18 | bool refine_depth(drv_msgs::refine_depth::Request &req,
19 | drv_msgs::refine_depth::Response &res)
20 | {
21 | cv_bridge::CvImagePtr rgb = cv_bridge::toCvCopy(req.rgb_in, "bgr8");
22 | cv_bridge::CvImagePtr depth = cv_bridge::toCvCopy(req.depth_in);
23 |
24 | Mat depth_refined;
25 | m_rd_.refineDepth(rgb->image, depth->image, depth_refined);
26 |
27 | // Convert cv::Mat to sensor_msgs::Image
28 | cv_bridge::CvImage img_cv;
29 | img_cv.image = depth_refined;
30 | img_cv.encoding = req.depth_in.encoding;
31 | //cerr << img_cv.encoding << endl; 32FC1
32 | res.depth_out = *img_cv.toImageMsg();
33 |
34 | return true; // Return bool is necessary for service
35 | }
36 |
37 | int main(int argc, char **argv)
38 | {
39 | ros::init(argc, argv, "refine_depth_server");
40 | ros::NodeHandle n;
41 | ros::NodeHandle pnh("~");
42 |
43 | ros::ServiceServer srv = n.advertiseService("drv_refine_depth", refine_depth);
44 | ROS_INFO("Ready to refine depth image.");
45 | ros::spin();
46 |
47 | return 0;
48 | }
49 |
--------------------------------------------------------------------------------
/drv_recognize/src/processdepth.h:
--------------------------------------------------------------------------------
1 | #ifndef PROCESSDEPTH_H
2 | #define PROCESSDEPTH_H
3 |
4 | //STL
5 | #include
6 | #include
7 | #include
8 |
9 | //OpenCV
10 | #include
11 | #include
12 | #include
13 |
14 |
15 | using namespace std;
16 | using namespace cv;
17 |
18 | class ProcessDepth
19 | {
20 | public:
21 | ProcessDepth();
22 |
23 | bool detectHand(Mat depth, vector &bbox,
24 | Point3f ¢er, int &gesture);
25 |
26 | private:
27 | bool getContentInRange(Mat depth, Mat &depth_out, vector &bbox,
28 | float range_max, float range_min = 0.3);
29 |
30 | float getDepth(const Mat &depthImage, int x, int y, bool smoothing,
31 | float maxZError, bool estWithNeighborsIfNull);
32 |
33 | bool analyseGesture(Mat depth, vector bbox,
34 | Point3f ¢er, int &gesture);
35 |
36 | template
37 | inline bool uIsFinite(const T & value) {
38 | return std::isfinite(value);
39 | }
40 | };
41 |
42 | #endif // PROCESSDEPTH_H
43 |
--------------------------------------------------------------------------------
/drv_recognize/src/refinedepth.h:
--------------------------------------------------------------------------------
1 | #ifndef REFINEDEPTH_H
2 | #define REFINEDEPTH_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 |
17 | using namespace std;
18 | using namespace cv;
19 |
20 | class RefineDepth
21 | {
22 | public:
23 | RefineDepth();
24 | void refineDepth(Mat rgb_in, Mat depth_in, Mat &depth_out);
25 |
26 | protected:
27 | boost::shared_ptr > net_;
28 |
29 | private:
30 | void forward(vector depth_in, vector > features_in,
31 | vector > features_out, Mat &depth_out);
32 | void setParams();
33 | void wrapInputLayer(vector depth_in, vector > features_in,
34 | vector > features_out);
35 | void getOutput(Mat &depth_out);
36 | };
37 |
38 | #endif // REFINEDEPTH_H
39 |
40 |
--------------------------------------------------------------------------------
/drv_recognize/src/transform.cpp:
--------------------------------------------------------------------------------
1 | #include "transform.h"
2 |
3 | Transform::Transform() :
4 | tf_buffer_(),
5 | tf_listener_(tf_buffer_, nh_)
6 | {
7 | // Initialize node handler before tf_buffer is important
8 | }
9 |
10 | bool Transform::getTransform(string base_frame, string header_frame)
11 | {
12 | try {
13 | // While we aren't supposed to be shutting down
14 | while (ros::ok()) {
15 | // Check if the transform from map to quad can be made right now
16 | if (tf_buffer_.canTransform(base_frame, header_frame, ros::Time(0))) {
17 | // Get the transform
18 | tf_handle_ = tf_buffer_.lookupTransform(base_frame, header_frame, ros::Time(0));
19 | return true;
20 | }
21 | else {
22 | ROS_WARN("Transform: Frame %s does not exist.", base_frame.c_str());
23 | }
24 |
25 | // Handle callbacks and sleep for a small amount of time
26 | // before looping again
27 | ros::spinOnce();
28 | ros::Duration(0.005).sleep();
29 | }
30 | }
31 | // Catch any exceptions that might happen while transforming
32 | catch (tf2::TransformException& ex)
33 | {
34 | ROS_ERROR("Exception transforming %s to %s: %s",
35 | base_frame.c_str(),
36 | header_frame.c_str(),
37 | ex.what());
38 | }
39 | }
40 |
41 | void Transform::doTransform(Eigen::Vector3f p_in, Eigen::Vector3f &p_out)
42 | {
43 | geometry_msgs::Vector3 trans = tf_handle_.transform.translation;
44 | geometry_msgs::Quaternion rotate = tf_handle_.transform.rotation;
45 |
46 | Eigen::Transform t = Eigen::Translation3f(trans.x,
47 | trans.y,
48 | trans.z)
49 | * Eigen::Quaternion(rotate.w, rotate.x, rotate.y, rotate.z);
50 |
51 | p_out = t * p_in;
52 | }
53 |
54 |
--------------------------------------------------------------------------------
/drv_recognize/src/transform.h:
--------------------------------------------------------------------------------
1 | #ifndef TRANSFORM_H
2 | #define TRANSFORM_H
3 |
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 |
10 | #include
11 |
12 | #include
13 |
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | #include
20 |
21 | #include
22 | #include
23 |
24 | using namespace std;
25 |
26 | class Transform
27 | {
28 | public:
29 | Transform();
30 |
31 | bool getTransform(string base_frame, string header_frame);
32 |
33 | void doTransform(Eigen::Vector3f p_in, Eigen::Vector3f &p_out);
34 |
35 | private:
36 | ros::NodeHandle nh_;
37 | // These declaration must be after the node initialization
38 | tf2_ros::Buffer tf_buffer_;
39 | // This is mandatory and should be declared before while loop
40 | tf2_ros::TransformListener tf_listener_;
41 |
42 | geometry_msgs::TransformStamped tf_handle_;
43 | };
44 |
45 | #endif // TRANSFORM_H
46 |
47 |
--------------------------------------------------------------------------------
/drv_search/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(drv_search)
3 |
4 | aux_source_directory(./src SRC_LIST)
5 |
6 | if (NOT CMAKE_BUILD_TYPE)
7 | set(CMAKE_BUILD_TYPE "Release")
8 | message(STATUS "No build type selected, default to ${CMAKE_BUILD_TYPE}")
9 | endif()
10 |
11 | find_package(catkin REQUIRED COMPONENTS
12 | cv_bridge
13 | drv_msgs
14 | geometry_msgs
15 | image_transport
16 | pcl_ros
17 | roscpp
18 | rospy
19 | sensor_msgs
20 | std_msgs
21 | tf
22 | )
23 |
24 | set(OpenCV_DIR "/usr/share/OpenCV")
25 | find_package(OpenCV REQUIRED)
26 | message("Open CV version is ${OpenCV_VERSION}")
27 |
28 | find_package(PCL 1.7 REQUIRED)
29 |
30 | catkin_package()
31 |
32 | link_directories(${PCL_LIBRARY_DIRS})
33 | add_definitions(${PCL_DEFINITIONS})
34 |
35 | include_directories(${catkin_INCLUDE_DIRS} ${PCL_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
36 |
37 | add_executable(drv_search ${SRC_LIST})
38 |
39 | target_link_libraries(drv_search ${catkin_LIBRARIES} ${OpenCV_LIBS} ${PCL_LIBRARIES})
40 |
--------------------------------------------------------------------------------
/drv_search/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drv_search
4 | 0.0.0
5 | The drv_search package
6 |
7 | aicrobo
8 |
9 | MIT
10 |
11 | catkin
12 | cv_bridge
13 | drv_msgs
14 | geometry_msgs
15 | pcl_ros
16 | roscpp
17 | rospy
18 | sensor_msgs
19 | std_msgs
20 | tf
21 | image_transport
22 | image_transport
23 | cv_bridge
24 | drv_msgs
25 | geometry_msgs
26 | pcl_ros
27 | roscpp
28 | rospy
29 | sensor_msgs
30 | std_msgs
31 | tf
32 |
33 |
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/drv_search/src/search.cpp:
--------------------------------------------------------------------------------
1 | #include "search.h"
2 |
3 | Search::Search()
4 | {
5 | minPX = 0;
6 | maxPX = 180;
7 | xStep = 20;
8 | minPY = 60;
9 | maxPY = 80;
10 | yStep = 10;
11 |
12 | moveDirection = true;
13 |
14 | apRatio = 0.1;
15 | }
16 |
17 | Search::Search(int minpx = 0, int maxpx = 180, int xstep = 30,
18 | int minpy = 60, int maxpy = 80, int ystep = 10)
19 | {
20 | minPX = minpx;
21 | maxPX = maxpx;
22 | xStep = xstep;
23 | minPY = minpy;
24 | maxPY = maxpy;
25 | yStep = ystep;
26 | }
27 |
28 | bool Search::getNextPosition(int & yaw, int & pitch)
29 | {
30 | if (moveDirection) {
31 | if (yaw + xStep > maxPX) {
32 | yaw -= xStep;
33 | moveDirection = !moveDirection;
34 | return true;
35 | }
36 | else {
37 | yaw += xStep;
38 | }
39 | }
40 | else {
41 | if (yaw - xStep < minPX) {
42 | moveDirection = true; // reset
43 | return false;
44 | }
45 | else {
46 | yaw -= xStep;
47 | }
48 | }
49 | return true;
50 | }
51 |
52 |
53 | bool Search::getTargetPosition(vector bbox_array,
54 | int num, int &delta_pitch, int &delta_yaw)
55 | {
56 | // Array index starts from 0, while num starts from 1
57 | std_msgs::UInt16MultiArray array = bbox_array[num - 1];
58 | int roix = (array.data[0] + array.data[2]) / 2;
59 | int roiy = (array.data[1] + array.data[3]) / 2;
60 |
61 | // Convert image pixel distance to angle
62 | int d_x = roix - 320;
63 | int d_y = roiy - 240;
64 |
65 | // Offset angle the robot head needs to turn
66 | delta_yaw = - int(d_x * apRatio);
67 | delta_pitch = - int(d_y * apRatio);
68 | }
69 |
--------------------------------------------------------------------------------
/drv_search/src/search.h:
--------------------------------------------------------------------------------
1 | #ifndef SEARCH_H
2 | #define SEARCH_H
3 |
4 | #include
5 |
6 | #include
7 |
8 | #include
9 |
10 | using namespace std;
11 |
12 | class Search
13 | {
14 | public:
15 | Search();
16 | Search(int minpx, int maxpx, int xstep, int minpy, int maxpy, int ystep);
17 |
18 | bool getNextPosition(int & yaw, int & pitch);
19 | bool getTargetPosition(vector bbox_array,
20 | int num, int & delta_pitch, int &delta_yaw);
21 |
22 | private:
23 |
24 | int minPX;
25 | int maxPX;
26 | int xStep;
27 | int minPY;
28 | int maxPY;
29 | int yStep;
30 |
31 | bool moveDirection;
32 |
33 | // pitch/yaw angle to rotate per pixel
34 | float apRatio;
35 | };
36 |
37 | #endif // SEARCH_H
38 |
39 |
--------------------------------------------------------------------------------
/drv_search/src/segment.cpp:
--------------------------------------------------------------------------------
1 | #include "segment.h"
2 |
3 | using namespace cv;
4 |
5 | Segment::Segment() : it_(nh_)
6 | {
7 | pubImage_ = it_.advertise("search/labeled_image", 1);
8 | }
9 |
10 | void Segment::segment(cv_bridge::CvImageConstPtr imagePtr, cv_bridge::CvImageConstPtr depthPtr)
11 | {
12 | Mat rgb, rgb_edge;
13 | rgb = imagePtr->image;
14 | cannyDetect(rgb, rgb_edge, 40, 120);
15 |
16 | Mat depth, depth_edge;
17 | depthPtr->image.convertTo(depth,CV_8UC1, 0.05);//scale factor 0.05
18 | cannyDetect(depth, depth_edge, 0, 20);
19 |
20 | Mat combine;
21 | bitwise_and(rgb_edge, depth_edge, combine);
22 |
23 | imshow("c", combine);
24 | waitKey();
25 | }
26 |
27 | void Segment::cannyDetect(Mat img_in, Mat &img_out, int t1, int t2)
28 | {
29 | Mat gray_blur;
30 |
31 | /// Reduce noise with a kernel 3x3
32 | blur(img_in, gray_blur, Size(3,3) );
33 |
34 | Canny(gray_blur, img_out, t1, t2);
35 |
36 | dilate(img_out, img_out, Mat(3,3,CV_8U));
37 |
38 | imwrite("/home/aicrobo/Documents/3.png", img_out);
39 |
40 | imshow("Edge", img_out);
41 | waitKey();
42 | }
43 |
--------------------------------------------------------------------------------
/drv_search/src/segment.h:
--------------------------------------------------------------------------------
1 | #ifndef SEGMENT_H
2 | #define SEGMENT_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 |
11 | #include
12 |
13 | #include
14 | #include
15 | #include
16 |
17 | class Segment
18 | {
19 | public:
20 | Segment();
21 |
22 | void segment(cv_bridge::CvImageConstPtr imagePtr, cv_bridge::CvImageConstPtr depthPtr);
23 | void cannyDetect(cv::Mat img_in, cv::Mat &img_out, int t1, int t2);
24 |
25 | private:
26 | ros::NodeHandle nh_;
27 | image_transport::ImageTransport it_;
28 |
29 | image_transport::Publisher pubImage_;
30 | };
31 |
32 | #endif // SEGMENT_H
33 |
--------------------------------------------------------------------------------
/drv_search/src/smoothservo.cpp:
--------------------------------------------------------------------------------
1 | #include "smoothservo.h"
2 |
3 | string param_servo_pitch = "/status/servo/pitch";
4 | string param_servo_yaw = "/status/servo/yaw";
5 |
6 | SmoothServo::SmoothServo()
7 | {
8 | pitch_temp_ = 60;
9 | yaw_temp_ = 90;
10 | step = 1; // minium step for single move
11 | servoPubSearch_ = nh.advertise("/vision/servo", 1);
12 | }
13 |
14 | void SmoothServo::getCurrentServoAngle(int &pitch, int &yaw)
15 | {
16 | if (ros::param::has(param_servo_pitch))
17 | ros::param::get(param_servo_pitch, pitch);
18 |
19 | if (ros::param::has(param_servo_yaw))
20 | ros::param::get(param_servo_yaw, yaw);
21 |
22 | pitch_temp_ = pitch;
23 | yaw_temp_ = yaw;
24 | }
25 |
26 | bool SmoothServo::moveServoTo(int pitch, int yaw)
27 | {
28 | /*
29 | vector > path;
30 |
31 | if (smooth(path, pitch, yaw)) {
32 | for (size_t j = 0; j < path[0].size(); j++) {
33 | // Notice that this array should be cleaned in each loop
34 | std_msgs::UInt16MultiArray array;
35 | array.data.push_back(path[0][j]);
36 | array.data.push_back(yaw_temp);
37 | servoPubSearch_.publish(array);
38 | ros::Duration(0.1).sleep();
39 | }
40 | for (size_t j = 0; j < path[1].size(); j++) {
41 | // Notice that this array should be cleaned in each loop
42 | std_msgs::UInt16MultiArray array;
43 | array.data.push_back(pitch);
44 | array.data.push_back(path[1][j]);
45 | servoPubSearch_.publish(array);
46 | ros::Duration(0.1).sleep();
47 | }
48 | ros::Duration(0.2).sleep(); // give the servo time for getting to position
49 | }
50 | */
51 |
52 | // Simplified method that using velocity param of servo
53 | std_msgs::UInt16MultiArray array;
54 | array.data.push_back(pitch);
55 | array.data.push_back(yaw);
56 | array.data.push_back(30); // Speed: 0~255
57 | servoPubSearch_.publish(array);
58 | ros::Duration(1).sleep(); // give the servo time for getting to position
59 | return true;
60 | }
61 |
62 | bool SmoothServo::smooth(vector > &path, int p, int y)
63 | {
64 | // 0 < p < 150, 0 <= y <=180
65 | if (p < 0 || p > 150 || y < 0 || y > 180)
66 | return false;
67 |
68 | int mp = (p - pitch_temp_) / step;
69 | int my = (y - yaw_temp_) / step;
70 |
71 | if (mp == 0 && my == 0)
72 | return false;
73 |
74 | vector pv;
75 | vector yv;
76 |
77 | for (size_t i = 1; i < abs(mp) + 1; i++)
78 | pv.push_back(pitch_temp_ + mp / abs(mp) * i * step);
79 | pv.push_back(p);
80 |
81 | for (size_t j = 1; j < abs(my) + 1; j++)
82 | yv.push_back(yaw_temp_ + my / abs(my) * j * step);
83 | yv.push_back(y);
84 |
85 | // path: pitch_value: p1,p2,p3...; yaw_value: y1,y2,y3...
86 | path.push_back(pv);
87 | path.push_back(yv);
88 |
89 | return true;
90 | }
91 |
--------------------------------------------------------------------------------
/drv_search/src/smoothservo.h:
--------------------------------------------------------------------------------
1 | #ifndef SMOOTHSERVO_H
2 | #define SMOOTHSERVO_H
3 |
4 | #include
5 |
6 | #include
7 |
8 | using namespace std;
9 |
10 | class SmoothServo
11 | {
12 | public:
13 | SmoothServo();
14 |
15 | bool moveServoTo(int pitch, int yaw);
16 | void getCurrentServoAngle(int &pitch, int &yaw);
17 |
18 | private:
19 | int pitch_temp_;
20 | int yaw_temp_;
21 | int step;
22 |
23 | ros::NodeHandle nh;
24 | ros::Publisher servoPubSearch_;
25 |
26 | bool smooth(vector > &path, int p, int y);
27 |
28 | };
29 |
30 | #endif // SMOOTHSERVO_H
31 |
--------------------------------------------------------------------------------
/drv_search/src/targetselect.h:
--------------------------------------------------------------------------------
1 | #ifndef TARGETSELECT_H
2 | #define TARGETSELECT_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include