├── .gitignore ├── CMakeLists.txt ├── README.md ├── calibration parameters ├── calib_ROS_red.txt ├── calib_ROS_yellow.txt ├── camera_parameters_logitech.txt └── hokuyo_parameters.txt ├── data └── trained_boost.xml ├── hdetect.doxyfile ├── include └── hdetect │ ├── human_follower.hpp │ ├── lib │ ├── annotator.hpp │ ├── bagReader.hpp │ ├── bagSubscriber.hpp │ ├── detector.hpp │ ├── header.hpp │ ├── human.hpp │ ├── laserLib.hpp │ ├── lengine.hpp │ ├── lfeatures.hpp │ ├── lgeometry.hpp │ ├── object_tracking.hpp │ ├── observation.hpp │ ├── projectTools.hpp │ ├── recognizer.hpp │ └── visualizer.hpp │ └── recognizeRT.hpp ├── launches ├── annotateBAG.launch ├── calibrate_camera.launch ├── ff_only.launch ├── headlessBAG.launch ├── headlessRT.launch ├── recognizeBAG.launch ├── recognizeRT.launch ├── recordBAG.launch ├── recordINIT.launch ├── recordURGGS.launch ├── recordUTMFF.launch ├── rvizRT.launch ├── showBAG.launch ├── showRT.launch ├── trainLaser.launch ├── utm_only.launch ├── viewRect.launch ├── visualizeBAG.launch └── visualizeRT.launch ├── msg ├── ClusterClass.msg ├── ClusteredScan.msg ├── HumansFeat.msg └── HumansFeatClass.msg ├── nopac.xml ├── package.xml ├── rviz └── hdetect2.rviz ├── src ├── FramePublisher.cpp ├── HumanFollowerRT.cpp ├── annotateData.cpp ├── detectTest.cpp ├── headlessRT.cpp ├── lib │ ├── HumanFollower.cpp │ ├── Recognizer_20140217.cpp │ ├── annotator.cpp │ ├── bagReader.cpp │ ├── detector.cpp │ ├── header.cpp │ ├── human.cpp │ ├── laserLib.cpp │ ├── lengine.cpp │ ├── lfeatures.cpp │ ├── lgeometry.cpp │ ├── object_tracking.cpp │ ├── observation.cpp │ ├── projectTools.cpp │ ├── recognizer.cpp │ └── visualizer.cpp ├── other │ ├── Calib_Matlab2ROS.cpp │ ├── TF_Publisher.cpp │ └── caltest.cpp ├── recognizeRT.cpp ├── showRT.cpp ├── trainLaser.cpp └── visualizeRT.cpp └── yaml ├── camera_calib.yaml ├── camera_calib_log.yaml ├── camera_calib_yellow.yaml ├── detector.yaml ├── laser_filter.yaml ├── logitech_cam_hd.yml └── shadow_filter.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | /CMakeFiles 2 | /bag 3 | /bin 4 | /lib 5 | /msg_gen 6 | CMakeCache.txt 7 | /src/hdetect/ 8 | /build 9 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(hdetect) 3 | 4 | ## Find catkin macros and libraries 5 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 6 | ## is used, also find other catkin packages 7 | find_package(catkin REQUIRED COMPONENTS 8 | roslib 9 | rosbag 10 | tf 11 | image_transport 12 | roscpp 13 | std_msgs 14 | cv_bridge 15 | laser_geometry 16 | angles 17 | sensor_msgs 18 | filters 19 | laser_filters 20 | message_filters 21 | pluginlib 22 | geometry_msgs 23 | image_geometry 24 | nav_msgs 25 | camera_calibration_parsers 26 | 27 | ) 28 | ## System dependencies are found with CMake's conventions 29 | find_package(Boost REQUIRED COMPONENTS signals) 30 | find_library(NEWMAT newmat /usr/lib) 31 | 32 | ### GSL IS NEEDED FOR PEOPLE2D LIBRARY TO COMPILE 33 | find_package(PkgConfig REQUIRED) 34 | pkg_check_modules(GSL REQUIRED gsl) 35 | #include_directories(${GSL_INCLUDE_DIRS}) 36 | #link_directories(${GLS_LIBRARY_DIRS}) 37 | 38 | #find_package(newmat REQUIRED COMPONENTS newmat) 39 | #find_package(GSL REQUIRED gsl) 40 | 41 | ## Uncomment this if the package has a setup.py. This macro ensures 42 | ## modules and global scripts declared therein get installed 43 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 44 | # catkin_python_setup() 45 | 46 | ################################################ 47 | ## Declare ROS messages, services and actions ## 48 | ################################################ 49 | 50 | ## To declare and build messages, services or actions from within this 51 | ## package, follow these steps: 52 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 53 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 54 | ## * In the file package.xml: 55 | ## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET 56 | ## * If MSG_DEP_SET isn't empty the following dependencies might have been 57 | ## pulled in transitively but can be declared for certainty nonetheless: 58 | ## * add a build_depend tag for "message_generation" 59 | ## * add a run_depend tag for "message_runtime" 60 | ## * In this file (CMakeLists.txt): 61 | ## * add "message_generation" and every package in MSG_DEP_SET to 62 | ## find_package(catkin REQUIRED COMPONENTS ...) 63 | ## * add "message_runtime" and every package in MSG_DEP_SET to 64 | ## catkin_package(CATKIN_DEPENDS ...) 65 | ## * uncomment the add_*_files sections below as needed 66 | ## and list every .msg/.srv/.action file to be processed 67 | ## * uncomment the generate_messages entry below 68 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 69 | 70 | ## Generate messages in the 'msg' folder 71 | add_message_files( 72 | FILES 73 | ClusterClass.msg 74 | ClusteredScan.msg 75 | HumansFeatClass.msg 76 | HumansFeat.msg 77 | ) 78 | 79 | ## Generate services in the 'srv' folder 80 | #add_service_files( 81 | # FILES 82 | # SetPolygon.srv 83 | # SetSimpleGoal.srv 84 | # ) 85 | 86 | ## Generate actions in the 'action' folder 87 | # add_action_files( 88 | # FILES 89 | # Action1.action 90 | # Action2.action 91 | # ) 92 | 93 | ## Generate added messages and services with any dependencies listed here 94 | generate_messages( 95 | DEPENDENCIES 96 | geometry_msgs sensor_msgs std_msgs# move_base_msgs# nav_msgs# visualization_msgs 97 | ) 98 | 99 | ################################### 100 | ## catkin specific configuration ## 101 | ################################### 102 | ## The catkin_package macro generates cmake config files for your package 103 | ## Declare things to be passed to dependent projects 104 | ## INCLUDE_DIRS: uncomment this if you package contains header files 105 | ## LIBRARIES: libraries you create in this project that dependent projects also need 106 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 107 | ## DEPENDS: system dependencies of this project that dependent projects also need 108 | 109 | 110 | # Resolve system dependency on yaml-cpp, which apparently does not 111 | # provide a CMake find_package() module. 112 | 113 | catkin_package( 114 | # INCLUDE_DIRS include 115 | # LIBRARIES trajectory_follower 116 | # CATKIN_DEPENDS actionlib amp_ros geometry_msgs move_base_msgs nav_msgs roscpp tf visualization_msgs yaml-cpp 117 | # DEPENDS system_lib 118 | ) 119 | 120 | 121 | 122 | 123 | 124 | 125 | ########### 126 | ## Build ## 127 | ########### 128 | 129 | #Add custom (non compiling) targets so launch scripts and python files show up in QT Creator's project view. 130 | file(GLOB_RECURSE EXTRA_FILES */*) 131 | add_custom_target(${PROJECT_NAME}_OTHER_FILES ALL WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} SOURCES ${EXTRA_FILES}) 132 | 133 | ## Specify additional locations of header files 134 | ## Your package locations should be listed before other locations 135 | include_directories(include) 136 | include_directories(${catkin_INCLUDE_DIRS}) 137 | include_directories(${Boost_INCLUDE_DIRS}) 138 | include_directories(${NEWMAT_INCLUDE_DIRS}) 139 | include_directories(${GSL_INCLUDE_DIRS}) 140 | 141 | link_directories(${GLS_LIBRARY_DIRS}) 142 | 143 | 144 | ## Declare a cpp library 145 | # add_library(trajectory_follower 146 | # src/${PROJECT_NAME}/trajectory_follower.cpp 147 | # ) 148 | 149 | 150 | add_library(Header src/lib/header.cpp) 151 | add_library(Recognizer src/lib/recognizer.cpp) 152 | add_library(ObjectTracking src/lib/object_tracking.cpp) 153 | add_library(Observation src/lib/observation.cpp) 154 | add_library(Human src/lib/human.cpp) 155 | add_library(laserLib src/lib/laserLib.cpp) 156 | add_library(visualizer src/lib/visualizer.cpp) 157 | add_library(lgeometry src/lib/lgeometry.cpp) 158 | add_library(lfeatures src/lib/lfeatures.cpp) 159 | add_library(lengine src/lib/lengine.cpp) 160 | add_library(projectTools src/lib/projectTools.cpp) 161 | add_library(detector src/lib/detector.cpp) 162 | add_library(annotator src/lib/annotator.cpp) 163 | add_library(bagReader src/lib/bagReader.cpp) 164 | 165 | ## Declare a cpp executable # EXECUTABLE NODES 166 | add_executable(visualizeRT src/visualizeRT.cpp) 167 | add_executable(recognizeRT src/recognizeRT.cpp) 168 | add_executable(showRT src/showRT.cpp) 169 | add_executable(annotateData src/annotateData.cpp) 170 | add_executable(trainLaser src/trainLaser.cpp) 171 | add_executable(headlessRT src/headlessRT.cpp) 172 | 173 | ## Add cmake target dependencies of the executable/library 174 | ## as an example, message headers may need to be generated before nodes 175 | 176 | add_dependencies(showRT ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 177 | 178 | 179 | add_dependencies(lgeometry ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 180 | 181 | add_dependencies(Header ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 182 | 183 | add_dependencies(lfeatures ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 184 | 185 | add_dependencies(projectTools ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 186 | 187 | add_dependencies(bagReader ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 188 | 189 | add_dependencies(trainLaser ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 190 | 191 | add_dependencies(Observation ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 192 | 193 | add_dependencies(ObjectTracking ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 194 | 195 | 196 | add_dependencies(visualizer detector) 197 | add_dependencies(Recognizer detector) 198 | add_dependencies(detector laserLib) 199 | add_dependencies(detector projectTools) 200 | add_dependencies(laserLib lengine) 201 | add_dependencies(lengine lgeometry) 202 | add_dependencies(lengine lfeatures) 203 | add_dependencies(lengine Header) 204 | add_dependencies(annotateData bagReader) 205 | add_dependencies(annotateData annotator) 206 | add_dependencies(annotator visualizer) 207 | 208 | add_dependencies(visualizeRT visualizer) 209 | add_dependencies(visualizeRT lgeometry) 210 | 211 | add_dependencies(headlessRT detector) 212 | add_dependencies(headlessRT lgeometry) 213 | 214 | add_dependencies(recognizeRT Recognizer) 215 | add_dependencies(recognizeRT Humam) 216 | add_dependencies(recognizeRT ObjectTracking) 217 | add_dependencies(recognizeRT Observation) 218 | 219 | 220 | ## Specify libraries to link a library or executable target against 221 | # target_link_libraries(trajectory_follower_node 222 | # ${catkin_LIBRARIES} 223 | # ) 224 | 225 | # LINK THE LIBRARIES 226 | 227 | target_link_libraries(visualizeRT ${Boost_LIBRARIES}) 228 | target_link_libraries(headlessRT ${Boost_LIBRARIES}) 229 | target_link_libraries(recognizeRT ${Boost_LIBRARIES}) 230 | target_link_libraries(showRT ${Boost_LIBRARIES}) 231 | target_link_libraries(annotateData ${Boost_LIBRARIES}) 232 | 233 | target_link_libraries(lgeometry ${GSL_LIBRARIES}) 234 | target_link_libraries(lfeatures ${GSL_LIBRARIES}) 235 | target_link_libraries(lengine lgeometry lfeatures Header) 236 | target_link_libraries(laserLib lengine) 237 | target_link_libraries(detector laserLib projectTools) 238 | target_link_libraries(visualizer detector) 239 | target_link_libraries(Recognizer detector) 240 | target_link_libraries(visualizeRT visualizer lgeometry ${NEWMAT}) 241 | target_link_libraries(headlessRT detector lgeometry ${NEWMAT}) 242 | target_link_libraries(recognizeRT Recognizer lgeometry ${NEWMAT} Recognizer ObjectTracking Human Observation) 243 | target_link_libraries(showRT ${GSL_LIBRARIES}) 244 | 245 | target_link_libraries(annotator visualizer) 246 | target_link_libraries(annotateData bagReader annotator) 247 | 248 | 249 | target_link_libraries(Header ${catkin_LIBRARIES}) 250 | target_link_libraries(Recognizer ${catkin_LIBRARIES}) 251 | target_link_libraries(ObjectTracking ${catkin_LIBRARIES}) 252 | target_link_libraries(Observation ${catkin_LIBRARIES}) 253 | target_link_libraries(Human ${catkin_LIBRARIES}) 254 | target_link_libraries(laserLib ${catkin_LIBRARIES}) 255 | target_link_libraries(visualizer ${catkin_LIBRARIES}) 256 | target_link_libraries(lgeometry ${catkin_LIBRARIES}) 257 | target_link_libraries(lfeatures ${catkin_LIBRARIES}) 258 | target_link_libraries(lengine ${catkin_LIBRARIES}) 259 | target_link_libraries(projectTools ${catkin_LIBRARIES}) 260 | target_link_libraries(detector ${catkin_LIBRARIES}) 261 | target_link_libraries(annotator ${catkin_LIBRARIES}) 262 | target_link_libraries(bagReader ${catkin_LIBRARIES}) 263 | 264 | target_link_libraries(visualizeRT ${catkin_LIBRARIES}) 265 | target_link_libraries(headlessRT ${catkin_LIBRARIES}) 266 | target_link_libraries(recognizeRT ${catkin_LIBRARIES}) 267 | target_link_libraries(showRT ${catkin_LIBRARIES}) 268 | target_link_libraries(annotateData ${catkin_LIBRARIES}) 269 | target_link_libraries(trainLaser ${catkin_LIBRARIES}) 270 | 271 | # ) 272 | 273 | 274 | # LIBRARIES 275 | 276 | #find_library(NEWMAT newmat /usr/lib) 277 | 278 | 279 | 280 | 281 | 282 | #target_link_libraries(detectTest visualizer) 283 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Human detection & tracking package for ROS 2 | ======= 3 | 4 | Uses a combination of laser range finder and a computer vision module for the pedestrian detection. The vision module works with OpenCV's detector which uses Histogram of Oriented Gradients and Support Vector Machines. 5 | 6 | For pairing observations and tracking it uses a combination of Kalman filters and Mahalanobis distance. 7 | 8 | The code was written for the [ROTOS](http://robcib.etsii.upm.es/) project in the RobCib research group of the Polytechnic University of Madrid. 9 | 10 | ## Publications 11 | 12 | If you use this code please reference the following works: 13 | 14 | [Mario Andrei Garzon Oviedo , Antonio Barrientos , Jaime Del Cerro , Andrés Alacid , Efstathios Fotiadis , Gonzalo R. Rodríguez-Canosa , Bang-Chen Wang. Tracking and following pedestrian trajectories, an approach for autonomous surveillance of critical infrastructures. Industrial Robot: An International Journal, 2015 42:5 , 429-440](http://dx.doi.org/10.1108/IR-02-2015-0037) 15 | 16 | Bibtext entry: 17 | 18 | ``` 19 | @article{doi:10.1108/IR-02-2015-0037, 20 | author = { Mario Andrei Garzon Oviedo and Antonio Barrientos and Jaime Del Cerro and Andrés Alacid and Efstathios Fotiadis and Gonzalo R. Rodríguez-Canosa and Bang-Chen Wang }, 21 | title = {Tracking and following pedestrian trajectories, an approach for autonomous surveillance of critical infrastructures}, 22 | journal = {Industrial Robot: An International Journal}, 23 | volume = {42}, 24 | number = {5}, 25 | pages = {429-440}, 26 | year = {2015}, 27 | doi = {10.1108/IR-02-2015-0037}, 28 | URL = {http://dx.doi.org/10.1108/IR-02-2015-0037} 29 | } 30 | ``` 31 | 32 | 33 | [Fotiadis, E.P.; Garzón, M.; Barrientos, A. Human Detection from a Mobile Robot Using Fusion of Laser and Vision Information. Sensors 2013, 13, 11603-11635.](http://www.mdpi.com/1424-8220/13/9/11603) 34 | 35 | Bibtext entry: 36 | 37 | ``` 38 | @Article{s130911603, 39 | AUTHOR = {Fotiadis, Efstathios P. and Garzón, Mario and Barrientos, Antonio}, 40 | TITLE = {Human Detection from a Mobile Robot Using Fusion of Laser and Vision Information}, 41 | JOURNAL = {Sensors}, 42 | VOLUME = {13}, 43 | YEAR = {2013}, 44 | NUMBER = {9}, 45 | PAGES = {11603--11635}, 46 | URL = {http://www.mdpi.com/1424-8220/13/9/11603}, 47 | PubMedID = {24008280}, 48 | ISSN = {1424-8220}, 49 | DOI = {10.3390/s130911603} 50 | } 51 | ``` 52 | 53 | ## Videos 54 | 55 | Human Detection with fusion of laser and camera 56 | 57 | [![Human Detection with fusion of laser and camera](http://img.youtube.com/vi/W84ERQ0LYjM/0.jpg)](http://www.youtube.com/watch?v=W84ERQ0LYjM) 58 | 59 | Autonomous detection tracking and following 60 | 61 | [![Autonomous detection tracking and following](http://img.youtube.com/vi/gqlUAyLwUE4/0.jpg)](http://www.youtube.com/watch?v=gqlUAyLwUE4) 62 | 63 | 64 | ## Requirements 65 | 1. ROS (Indigo) 66 | 2. libgsl 67 | - install it with: 68 | $ sudo apt-get install libgsl-ruby 69 | 3. NEWMAT 70 | - install it with: 71 | $ sudo apt-get install libnewmat10-dev 72 | 4. all the ROS components listed in the package.xml dependencies 73 | 74 | ## How to compile 75 | 76 | The code compiles only using *catkin*. 77 | 78 | * cd to catkin workspace 79 | * catkin_make hdetect 80 | 81 | ## Demo the code 82 | 1. Download [this rosbag](https://www.dropbox.com/sh/4uslv212ywle268/AABT589xCamNHPQ5aZr5T_zsa?dl=0) 83 | 2. Change the *recognizeBag.launch* launchfile to point towards the rosbag 84 | 3. Run ```roslaunch hdetect recognizeBag.launch``` 85 | 4. Rviz is going to launch. Enable the image (camera) 86 | 5. Wait till everything is launched and hit space to playback 87 | 88 | ## Executable files 89 | 90 | * headlessRT - human detection without visualization and tracking 91 | * visualizeRT - human detection with visualization and without tracking 92 | * recognizeRT - human detection with tracking and without visualization 93 | * showRT - human detection with visualization and tracking 94 | * annotateData - annotate the human for training and save the result to csv file 95 | * trainLaser – train the annotation with given csv file and save the result to boost.xml 96 | 97 | ## Launch files. 98 | 99 | It is suggested to run the launch files than to run the bin files 100 | 101 | * headlessRT.launch - headlessRT on robot 102 | * headlessBag.launch - headlessRT with rosbag. The bag name can be changed inside the launch 103 | * visualizeRT.launch - visualizeRT on robot. 104 | * visualizeBag.launch - visualizeRT with rosbag. The bag name can be changed inside the launch 105 | * recognizeRT.launch - recognizeRT on robot. 106 | * recognizeBag.launch - recognizeRT with rosbag. The bag name can be changed inside the launch 107 | * showRT.launch - showRT on robot. 108 | * showBag.launch - showRT with rosbag. The bag name can be changed inside the launch 109 | * annotateBAG.launch - annotateData with rosbag. The bag name can be changed inside the launch. 110 | * trainLaser.launch - trainLaser with csv file. The file name can be changed inside the launch. 111 | 112 | ## Brief explanation of the code 113 | 114 | #### lengine 115 | Segments the laser points into clusters 116 | 117 | #### lfeature 118 | Computes the features of the laser clusters 119 | 120 | #### lgeometry 121 | compute the geometry used by the computation of the features 122 | 123 | #### laserLib 124 | Loads the raw laser points 125 | 126 | #### projectTools 127 | Standard function for projection 128 | 129 | #### header 130 | Contains the enumeration of HUMAN, the static topic name, curTimeStamp and preTimeStamp 131 | 132 | #### human 133 | Structure for storing the value of the human detection and tracking 134 | 135 | #### observation 136 | Structure for storing the value casted from detection 137 | 138 | #### object_tracking 139 | Using Kalman filter to track the object, including predict and update 140 | 141 | #### etector 142 | Callback function of headlessRT, main function for detection, run the detection of laser and of image, then merge them together 143 | 144 | #### visualizer 145 | Callback function of visualizeRT, run the detector first, then plot them on the window 146 | 147 | #### recognizer 148 | Callback function of recognizeRT, run the detector first, then do the tracking of the human, and stand it to rviz 149 | 150 | #### annotator 151 | Callback function of annotateData, main function of the annotation 152 | 153 | #### bagReader 154 | read the bag for the annotation 155 | 156 | ## Acknowledgements 157 | 158 | The laser processing module uses code swritten by L. Spinello. The tracking module is based on the work of Gonzalo Rodriguez-Canosa 159 | -------------------------------------------------------------------------------- /calibration parameters/calib_ROS_red.txt: -------------------------------------------------------------------------------- 1 | /Users/kabamaru/Desktop/matlab/upm_calibration/active_calibration/red_base 2 | delta: 0.00186613576757430262 -0.06614515092809147845 -0.01334054866694768006 3 | rpy: -3.13453672906077862592 -0.00213669092672443169 0.11200467126997579181 4 | fc: 667.11582342365500153392 667.29082132395581083983 5 | cc: 379.43180307082042190814 234.29123158659274395177 6 | kc: -0.42399950787425427778 0.20396779072473769645 0.00014505749712079148 -0.00024869007597676197 0.00000000000000000000 -------------------------------------------------------------------------------- /calibration parameters/calib_ROS_yellow.txt: -------------------------------------------------------------------------------- 1 | /Users/kabamaru/Desktop/matlab/upm_calibration/active_calibration/yellow_base 2 | delta: 0.01070505651962671764 -0.06722345284252317921 -0.03089017107345514920 3 | rpy: 3.12148546397231552163 -0.01876295705026196844 0.10236192367415890281 4 | fc: 665.49213516386726041674 664.98397874717795730248 5 | cc: 366.08910016553352306801 248.39451425645043514123 6 | kc: -0.41618302374745735905 0.18323293672503468410 -0.00154826994718998173 0.00292684802966111830 0.00000000000000000000 7 | -------------------------------------------------------------------------------- /calibration parameters/camera_parameters_logitech.txt: -------------------------------------------------------------------------------- 1 | # Camera intrinsics 2 | 3 | [image] 4 | 5 | width 6 | 640 7 | 8 | height 9 | 480 10 | 11 | [camera] 12 | 13 | camera matrix 14 | 699.37505 0.00000 332.17176 15 | 0.00000 698.01093 298.73703 16 | 0.00000 0.00000 1.00000 17 | 18 | distortion 19 | 0.11313 -0.16908 0.01694 -0.00145 0.00000 20 | 21 | 22 | rectification 23 | 1.00000 0.00000 0.00000 24 | 0.00000 1.00000 0.00000 25 | 0.00000 0.00000 1.00000 26 | 27 | projection 28 | 716.81919 0.00000 330.88509 0.00000 29 | 0.00000 705.84808 303.89945 0.00000 30 | 0.00000 0.00000 1.00000 0.00000 31 | -------------------------------------------------------------------------------- /calibration parameters/hokuyo_parameters.txt: -------------------------------------------------------------------------------- 1 | parameters: 2 | - 3 | name: min_ang 4 | type: double 5 | level: 1 6 | description: The angle of the first range measurement. The unit depends on ~ang_radians. 7 | edit_method: '' 8 | - 9 | name: max_ang 10 | type: double 11 | level: 1 12 | description: The angle of the first range measurement. The unit depends on ~ang_radians. 13 | edit_method: '' 14 | - 15 | name: intensity 16 | type: bool 17 | level: 1 18 | description: Whether or not the hokuyo returns intensity values. 19 | edit_method: '' 20 | - 21 | name: cluster 22 | type: int 23 | level: 1 24 | description: The number of adjacent range measurements to cluster into a single reading 25 | edit_method: '' 26 | - 27 | name: skip 28 | type: int 29 | level: 1 30 | description: The number of scans to skip between each measured scan 31 | edit_method: '' 32 | - 33 | name: port 34 | type: str 35 | level: 3 36 | description: The serial port where the hokuyo device can be found 37 | edit_method: '' 38 | - 39 | name: calibrate_time 40 | type: bool 41 | level: 3 42 | description: Whether the node should calibrate the hokuyo's time offset 43 | edit_method: '' 44 | - 45 | name: frame_id 46 | type: str 47 | level: 0 48 | description: The frame in which laser scans will be returned 49 | edit_method: '' 50 | - 51 | name: time_offset 52 | type: double 53 | level: 0 54 | description: An offet to add to the timestamp before publication of a scan 55 | edit_method: '' 56 | - 57 | name: allow_unsafe_settings 58 | type: bool 59 | level: 3 60 | description: Turn this on if you wish to use the UTM-30LX with an unsafe angular range. Turning this option on may cause occasional crashes or bad data. This option is a tempory workaround that will hopefully be removed in an upcoming driver version. 61 | edit_method: '' 62 | max: 63 | bools: 64 | - 65 | name: intensity 66 | value: True 67 | - 68 | name: calibrate_time 69 | value: True 70 | - 71 | name: allow_unsafe_settings 72 | value: True 73 | ints: 74 | - 75 | name: cluster 76 | value: 99 77 | - 78 | name: skip 79 | value: 9 80 | strs: 81 | - 82 | name: port 83 | value: '' 84 | - 85 | name: frame_id 86 | value: '' 87 | doubles: 88 | - 89 | name: min_ang 90 | value: 3.14159265359 91 | - 92 | name: max_ang 93 | value: 3.14159265359 94 | - 95 | name: time_offset 96 | value: 0.25 97 | min: 98 | bools: 99 | - 100 | name: intensity 101 | value: False 102 | - 103 | name: calibrate_time 104 | value: False 105 | - 106 | name: allow_unsafe_settings 107 | value: False 108 | ints: 109 | - 110 | name: cluster 111 | value: 0 112 | - 113 | name: skip 114 | value: 0 115 | strs: 116 | - 117 | name: port 118 | value: '' 119 | - 120 | name: frame_id 121 | value: '' 122 | doubles: 123 | - 124 | name: min_ang 125 | value: -3.14159265359 126 | - 127 | name: max_ang 128 | value: -3.14159265359 129 | - 130 | name: time_offset 131 | value: -0.25 132 | dflt: 133 | bools: 134 | - 135 | name: intensity 136 | value: False 137 | - 138 | name: calibrate_time 139 | value: True 140 | - 141 | name: allow_unsafe_settings 142 | value: False 143 | ints: 144 | - 145 | name: cluster 146 | value: 1 147 | - 148 | name: skip 149 | value: 0 150 | strs: 151 | - 152 | name: port 153 | value: /dev/ttyACM0 154 | - 155 | name: frame_id 156 | value: laser 157 | doubles: 158 | - 159 | name: min_ang 160 | value: -1.57079632679 161 | - 162 | name: max_ang 163 | value: 1.57079632679 164 | - 165 | name: time_offset 166 | value: 0.0 167 | --- 168 | -------------------------------------------------------------------------------- /include/hdetect/human_follower.hpp: -------------------------------------------------------------------------------- 1 | #ifndef HUMANPOSE_HPP 2 | #define HUMANPOSE_HPP 3 | 4 | // STANDARD 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "cmath" 12 | 13 | // ROS 14 | #include "ros/ros.h" 15 | #include "std_msgs/String.h" 16 | #include "geometry_msgs/PointStamped.h" 17 | #include "geometry_msgs/Twist.h" 18 | #include "tf/transform_listener.h" 19 | #include "move_base_msgs/MoveBaseAction.h" 20 | #include "actionlib/client/simple_action_client.h" 21 | 22 | // Project includes 23 | #include 24 | #include 25 | #include 26 | 27 | 28 | /** 29 | * A node to catch the position of the closest human 30 | * to the robot and publish it in ROS 31 | * Human position in the laser frame 32 | * It is supposed that once an Human with a determined 33 | * ID is gone, this ID will not appear any more 34 | * @author Andrés Alacid Cano 35 | * @date 2014/02/15 36 | */ 37 | 38 | class HumanFollower : public Recognizer 39 | { 40 | public: 41 | HumanFollower(); 42 | void scoringprocess(); 43 | void decisionfollower(); 44 | void goal_follower(); 45 | void goal_frame(); 46 | //void getHumans(deque &humans, bool odom, bool odom_combined); 47 | 48 | void update(const sensor_msgs::Image::ConstPtr &image, 49 | const sensor_msgs::LaserScan::ConstPtr &lScan); 50 | 51 | private: 52 | 53 | ros::NodeHandle nh; 54 | 55 | typedef std::deque HumanDeque; 56 | 57 | // Amcl_pose subscription 58 | void amcl_subCallback(const geometry_msgs::PoseWithCovarianceStamped amcl_robot1_position); 59 | ros::Subscriber amcl_position; 60 | float x1amcl, y1amcl, w1amcl, z1amcl, yaw_robotamcl; 61 | 62 | // Odom_combined subscription 63 | void odom_combinedCallback (const geometry_msgs::PoseWithCovarianceStamped odom_combined_robot); 64 | ros::Subscriber odom_combined_sub; 65 | float x1odom, y1odom, w1odom, z1odom, yaw_robotodom; 66 | 67 | // Humans detected subscription 68 | void humans_detectedCallback (HumanDeque &humans_detected); 69 | geometry_msgs::PointStamped obj_position; 70 | 71 | // Variables 72 | bool firstData, with_odom_combined, with_amcl, firstFollow, reached, goal_sent, goal_frame_on, clockwise; // General variables 73 | float px, py, x2, y2, xgoal, ygoal; // New objective position and goal position 74 | float x1, y1, z1, w1, yaw_robot, yaw_first; // Robot position 75 | float xdis, ydis, obj_dist, obj_mov, thetaObj; // Objective position and movement 76 | float radians, radiansPI; // Angle difference 77 | int message_counter; // Message counters 78 | 79 | //std::deque HumansDQ; 80 | std::deque HumansIncoming; 81 | 82 | // Scoring variables 83 | float sc1, sc2, sc3; 84 | float k1, k2, k3; 85 | ros::Duration detectionDuration; 86 | ros::Duration followingDuration; 87 | float aux; 88 | int idMaxSc; 89 | 90 | // Goal publication variables 91 | geometry_msgs::Twist base_cmd; 92 | ros::Publisher cmd_vel_pub_; 93 | // Creation of a convenience typedef for a SimpleActionClient to allow us to communicate with actions 94 | typedef actionlib::SimpleActionClient MoveBaseClient; 95 | // Action Client construction 96 | MoveBaseClient ac; 97 | 98 | string frame; 99 | }; 100 | 101 | 102 | #endif 103 | -------------------------------------------------------------------------------- /include/hdetect/lib/annotator.hpp: -------------------------------------------------------------------------------- 1 | #ifndef ANNOTATOR_HPP 2 | #define ANNOTATOR_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | /** 14 | * It's a class used for the annotation of the laser and the cropped image data. 15 | */ 16 | class annotator : public visualizer 17 | { 18 | public: 19 | /** 20 | * 21 | * @param bagFile This is the name of bag file where the output data will be written 22 | */ 23 | annotator(std::string bagFile); 24 | ~annotator(); 25 | 26 | /** 27 | * @param image Image message 28 | * @param cInfo CameraInfo message 29 | * @param lScan LaserScan message 30 | */ 31 | void annotateData(const sensor_msgs::Image::ConstPtr &image, 32 | const sensor_msgs::LaserScan::ConstPtr &lScan); 33 | 34 | private: 35 | 36 | /// Curent scan number 37 | int scanNo; 38 | 39 | std::string bagDir; 40 | 41 | /// File handle for CSV output. 42 | FILE *f; 43 | 44 | /// For writing to bag format. 45 | rosbag::Bag bag; 46 | 47 | std::deque prev_points; 48 | 49 | void saveCSV(); 50 | 51 | /** 52 | * @param image Image message 53 | * @param cInfo CameraInfo message 54 | * @param lScan LaserScan message 55 | */ 56 | void saveBag(const sensor_msgs::Image::ConstPtr &image, 57 | const sensor_msgs::LaserScan::ConstPtr &lScan); 58 | 59 | float calculateEucDis(geometry_msgs::Point32 &point1, geometry_msgs::Point32 &point2); 60 | }; 61 | #endif 62 | -------------------------------------------------------------------------------- /include/hdetect/lib/bagReader.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BAGREADER_HPP 2 | #define BAGREADER_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | 20 | /** 21 | * Opens a bag file and reads its data 22 | */ 23 | class bagReader 24 | { 25 | private: 26 | ros::NodeHandle nh_; 27 | 28 | // The clock publisher 29 | ros::Publisher clock_pub_; 30 | rosgraph_msgs::Clock clockmsg; 31 | 32 | std::string bagFile; 33 | 34 | public: 35 | /** 36 | * @param fname The filename of the bag file 37 | */ 38 | bagReader(std::string fname); 39 | ~bagReader(); 40 | /** 41 | * 42 | * @param topics A vector of the topics to be read 43 | * @param cameraImage_sub_ Subscriber to handle the camera messages 44 | * @param cameraInfo_sub_ Subscriber to handle the camera info messages 45 | * @param laserScan_sub_ Subscriber to handle the laser scan messages 46 | */ 47 | void loadBag(std::vector topics, 48 | bagSubscriber &cameraImage_sub_, 49 | bagSubscriber &laserScan_sub_); 50 | }; 51 | 52 | #endif 53 | -------------------------------------------------------------------------------- /include/hdetect/lib/bagSubscriber.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BAGSUBSCRIBER_HPP 2 | #define BAGSUBSCRIBER_HPP 3 | 4 | #include 5 | /* 6 | * bagSubscriber.hpp 7 | * 8 | * Created on: Nov 29, 2012 9 | * Author: kabamaru 10 | */ 11 | 12 | 13 | /** 14 | * Inherits from message_filters::SimpleFilter to use protected signalMessage function 15 | * A dummy class bases on SimpleFilter. It's being fed with the messages read from the bag. 16 | */ 17 | template 18 | class bagSubscriber : public message_filters::SimpleFilter 19 | { 20 | public: 21 | bagSubscriber() {} 22 | /** 23 | * It sends a messages to the subscriber 24 | * @param msg The message variable 25 | */ 26 | void newMessage(const boost::shared_ptr &msg) 27 | { 28 | this->signalMessage(msg); 29 | } 30 | }; 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /include/hdetect/lib/detector.hpp: -------------------------------------------------------------------------------- 1 | #ifndef DETECTOR_HPP 2 | #define DETECTOR_HPP 3 | 4 | // ROS 5 | #include 6 | #include 7 | #include 8 | // OPENCV 9 | #include 10 | #include 11 | #include 12 | // MY INCLUDES 13 | #include 14 | #include 15 | #include 16 | 17 | 18 | /// A structure to hold all the parameters needed by the detector 19 | class detectorParameters 20 | { 21 | public: 22 | /// Width of laser window 23 | int laser_window_width; 24 | 25 | /// Height of laser window 26 | int laser_window_height; 27 | 28 | /// If the rectified image is used or not. For projection purposes. 29 | int rect; 30 | 31 | /// The number of features is loaded as a parameter 32 | /// Also used in laserLib for defining the feature set 33 | int no_features; 34 | 35 | /// The HoG group threshold is defined as a parameter (normally 2) 36 | int hog_group_threshold; 37 | 38 | /// Defines if HoG multiscale detection uses meanshift clustering (default 1) 39 | int hog_meanshift; 40 | 41 | /// The HoG SVM classifier bias (normally 0) 42 | int hog_hit_threshold; 43 | 44 | /// The camera info is loaded from the file "yaml/camera_calib.yaml" 45 | sensor_msgs::CameraInfo cInfo; 46 | 47 | /// The hz timeout for the tf (normally below the slower sensor) 48 | int tf_timeout; 49 | 50 | /// The sigmoid parameters of the laser and the camera classifiers 51 | /// They are provided by Platts scaling 52 | double laserA, laserB; 53 | double cameraA, cameraB; 54 | 55 | /// The meter to pixels ratio based on the camera sensor, the lens and the size of the ROI 56 | double m_to_pixels; 57 | /// Upper/lower body ratio 58 | double body_ratio; 59 | 60 | /// Jumping distance for the laser segmentation 61 | double jumpdist; 62 | 63 | /// Feature set (0 = 17, 1 = 63, 2 = 73) 64 | int feature_set; 65 | 66 | /// Maximum allowed laser range 67 | double laser_range; 68 | 69 | /// Threshold of fusion probability 70 | double fusion_prob; 71 | 72 | /// Minimum acceptable camera problability 73 | double min_camera_prob; 74 | 75 | /// Minimum acceptable laser problability 76 | double min_laser_prob; 77 | 78 | /// Maximum euclidean distance for pairing 79 | double max_euc_dist;//: 3.5 # Maximum euclidean distance 80 | 81 | /// Maximum mahalanobis distance for pairing 82 | double max_mah_dist;//: 4. # Maximum mahalanobis distance 83 | 84 | /// Inital Human ID 85 | int init_id;//: 1 # Inital Human ID 86 | 87 | /// Score given to a detection when initialized 88 | double new_object_score;//: 4.0 # Score given to a detection when initialized 89 | 90 | /// Score for prediction (must be negative) 91 | double predict_score; //: -1.0 # Score for prediction (must be negative) 92 | 93 | /// Score for detection positive 94 | double update_score;//: 1.0 # Score for detection positive 95 | 96 | /// Value to consider an detection "Real" 97 | double min_add_score;//: 8 98 | 99 | }; 100 | 101 | class detector 102 | { 103 | protected: 104 | 105 | /// The projection matrices in openv format 106 | /// Camera matrix 107 | cv::Mat K; 108 | /// Distortion coeffs 109 | cv::Mat D; 110 | 111 | /// Detector parameters 112 | detectorParameters params; 113 | 114 | /// Local node handle, used to get the file parameters, subscribers, publishers etc. 115 | ros::NodeHandle nh; 116 | 117 | /// Used to listen the transform between the laser and the camera. 118 | tf::TransformListener tf_listener_; 119 | tf::StampedTransform transform; 120 | 121 | /// When the image was captured. 122 | ros::Time acquisition_time; 123 | 124 | /// Point projected to pixel coordinates. 125 | cv::Point2d prPixel; 126 | 127 | /// Crop box corners 128 | cv::Rect rect; 129 | 130 | /// Rectangle of laser window 131 | cv::Size window_size; 132 | 133 | /// Object used to do the low lever segmentation and feature extraction from the scan. 134 | laserLib *laserProcessor; 135 | 136 | /// A pointer to the opencv converted image. 137 | cv_bridge::CvImagePtr cv_ptr; 138 | 139 | 140 | // The adaboost detector for the laser 141 | CvBoost boost; 142 | 143 | // The laser feature matrix 144 | cv::Mat lFeatures; 145 | 146 | // The HoG detector for the image 147 | cv::HOGDescriptor hog; 148 | 149 | /// Mat where the temporary crop will be saved 150 | cv::Mat crop; 151 | 152 | // Vectors to hold the class and the probability of the ROI 153 | std::vector hogFound; 154 | std::vector hogPred; 155 | 156 | // Publishes the detected humans coordinates and probability 157 | ros::Publisher detectionPublisher; 158 | 159 | /// Contains the laser clusters, annotation, features, , annotation, if it should be fused etc. 160 | std::vector clusterData; 161 | 162 | /// Contains the coordinates, the labels and the probabilities of the detections 163 | hdetect::ClusterClass detections; 164 | 165 | /// Does the laser segmentation, feature extraction etc into scanClusters 166 | void processLaser(const sensor_msgs::LaserScan::ConstPtr &lScan, std::vector &clusterData); 167 | 168 | /// Brings sensor_msgs::Image to an opencv accesible pointer. 169 | void getImage(const sensor_msgs::Image::ConstPtr &image); 170 | 171 | /// Returns the transform between image and lScan 172 | void getTF(const sensor_msgs::Image::ConstPtr &image, const sensor_msgs::LaserScan::ConstPtr &lScan); 173 | 174 | /// Initialize cog_projected, image_projected, probs and label to default 175 | /// Uses directly clusterData 176 | void initClusterData(std::vector &clusterData); 177 | 178 | /// Does the rest of the laser processing, find projected and fused segments 179 | /// Uses directly scanClusters 180 | void findProjectedClusters(std::vector &clusterData); 181 | 182 | /// Detects if there is a pedestrian in the cluster and or ROI 183 | /// Gives the probabilities and the class output of each detector 184 | void detectFusion(std::vector &clusterData, hdetect::ClusterClass &detections); 185 | 186 | /// Detects if there is a pedestrian using only the camera ROI 187 | /// Gives the probabilities and the class output of each detector 188 | void detectCamera(hdetect::ClusteredScan &clusterData); 189 | 190 | /// Finds the class and the probability for a given sample of laser features 191 | void classifyLaser(std_msgs::Float32MultiArray &features, float &prob); 192 | 193 | /// Finds the class and the probability for a given crop of the image 194 | void classifyCamera(geometry_msgs::Point32 &cog, float &prob); 195 | 196 | public: 197 | detector(); 198 | ~detector(); 199 | 200 | /** Extracts all the info from an imaga/laserscan pair 201 | * First it segments the laser scan and finds the cog for each cluster. 202 | * Then translates each cog into the corresponding pixel values. 203 | * Crops the ROI from the image. 204 | * 205 | * @param image Image message 206 | * @param lScan LaserScan message 207 | */ 208 | void detectHumans(const sensor_msgs::Image::ConstPtr &image, const sensor_msgs::LaserScan::ConstPtr &lScan); 209 | 210 | /** 211 | * Used only for annotation purposes 212 | * @param cs 213 | */ 214 | //void setClusters(hdetect::ClusteredScan cs); 215 | //hdetect::ClusteredScan getClusters(); 216 | }; 217 | #endif 218 | -------------------------------------------------------------------------------- /include/hdetect/lib/header.hpp: -------------------------------------------------------------------------------- 1 | #ifndef HEADER_HPP 2 | #define HEADER_HPP 3 | 4 | #include 5 | 6 | namespace Header 7 | { 8 | enum 9 | { 10 | NOT_HUMAN = -1, 11 | OTHER, 12 | FUSION_HUMAN, 13 | LASER_HUMAN, 14 | LABEL_HUMAN 15 | }; 16 | 17 | const std::string imageTopic = "/recognizeRT/imageTopic"; 18 | 19 | extern float curTimestamp; 20 | extern float preTimestamp; 21 | } 22 | 23 | #endif // HEADER_HPP 24 | -------------------------------------------------------------------------------- /include/hdetect/lib/human.hpp: -------------------------------------------------------------------------------- 1 | #ifndef HUMAN_HPP 2 | #define HUMAN_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | class Human 9 | { 10 | public: 11 | int id; 12 | 13 | float score; 14 | 15 | NEWMAT::ColumnVector state; 16 | NEWMAT::Matrix cov; 17 | 18 | NEWMAT::ColumnVector preState; 19 | float preTimestamp; 20 | 21 | // Variable for scoring process 22 | ros::Time firstTimestamp; // First detection time 23 | 24 | Human(int id, float score, NEWMAT::ColumnVector state, NEWMAT::Matrix cov, int preTimestamp); 25 | 26 | geometry_msgs::Point toPoint(); 27 | }; 28 | 29 | #endif // HUMAN_HPP 30 | -------------------------------------------------------------------------------- /include/hdetect/lib/laserLib.hpp: -------------------------------------------------------------------------------- 1 | #ifndef LASERLIB_HPP 2 | #define LASERLIB_HPP 3 | 4 | //#include "sensor_msgs/LaserScan.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | /** 13 | * A class used as a ROS wrapper for the lengine class. 14 | */ 15 | class laserLib { 16 | private: 17 | 18 | /// Needed by getClusters() 19 | Point3D_str cogL; 20 | Point3D_str origin; 21 | geometry_msgs::Point32 pt32; 22 | 23 | /// The shadow filter to preprocess the laser 24 | filters::FilterChain laserFilter; 25 | /// The filtered scan 26 | sensor_msgs::LaserScan filtScan; 27 | 28 | /// This is a people2d_engine class to hold the laser scan data for it. 29 | laserscan_data libScanData; 30 | 31 | /// people2d_engine object used to make the segmentation and compute the features. 32 | lengine *libEngine; 33 | 34 | /// Parameters of the libEngine. Initialized in constructor. 35 | lengine_params libEngineParams; 36 | 37 | /// Vector to hold the clusters of each scan in people2d_engine format 38 | std::vector clusters; // lgeometry.hpp 39 | 40 | /// The feature vector of the lengine format 41 | std::vector < std::vector > descriptor; 42 | 43 | float angle_min; 44 | float angle_max; 45 | float angle_inc; 46 | 47 | // Helping variables to copy the header to the new msgs 48 | ros::Time stamp; 49 | uint32_t seq; 50 | std::string frame_id; 51 | 52 | /** 53 | * 54 | * @param ls[in] The input scan to be converted 55 | * @param libScan [out] The laserscan_data variable to hold the lengine compatible data 56 | */ 57 | void scan2lib(sensor_msgs::LaserScan &ls, laserscan_data &libScan); 58 | 59 | /** 60 | * 61 | * @param features[out] Exported cluster features. 62 | */ 63 | void features2ROS(std::vector &clusterData); 64 | 65 | public: 66 | /// Null constructor 67 | laserLib(); 68 | ~laserLib(); 69 | 70 | /** 71 | * Constructor that receiver parameters from the calling class (e.g. detector) 72 | * @param jumpdist The cluster segmentation distance 73 | * @param feature_set Feature set to be used (0 = 17, 1 = 63, 2 = 73) 74 | * @param laser_range The maximum trusted laser range 75 | */ 76 | laserLib(double jumpdist, int feature_set, double laser_range); 77 | 78 | /** 79 | * 80 | * @param features[out] Exported cluster features 81 | */ 82 | void getFeatures(std::vector &clusterData); 83 | 84 | /** 85 | * 86 | * @param features[out] Where the clusters are going to be exported. 87 | */ 88 | void getClusters(std::vector &clusterData); 89 | 90 | /** 91 | * 92 | * @param ls LaserScan to be loaded 93 | */ 94 | void loadScan(sensor_msgs::LaserScan ls); 95 | 96 | void getHeader(std_msgs::Header &header); 97 | }; 98 | 99 | #endif 100 | -------------------------------------------------------------------------------- /include/hdetect/lib/lengine.hpp: -------------------------------------------------------------------------------- 1 | /***************************************************************** 2 | * 3 | * This file is part of the People2D project 4 | * 5 | * People2D Copyright (c) 2011 Luciano Spinello 6 | * 7 | * This software is licensed under the "Creative Commons 8 | * License (Attribution-NonCommercial-ShareAlike 3.0)" 9 | * and is copyrighted by Luciano Spinello 10 | * 11 | * Further information on this license can be found at: 12 | * http://creativecommons.org/licenses/by-nc-sa/3.0/ 13 | * 14 | * People2D is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied 16 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 17 | * PURPOSE. 18 | * 19 | *****************************************************************/ 20 | #ifndef LENGINE_ 21 | #define LENGINE_H 22 | 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | #include 31 | 32 | typedef struct 33 | { 34 | double jumpdist, laser_range; 35 | int feature_set; 36 | char segonly, sanity, verbosity; 37 | } lengine_params; 38 | 39 | 40 | class laserscan_data 41 | { 42 | public: 43 | 44 | Point3D_container data; 45 | double timestamp; 46 | 47 | laserscan_data() { } 48 | laserscan_data(int num) 49 | { 50 | data = Point3D_container(num); 51 | } 52 | }; 53 | 54 | class lengine 55 | { 56 | private: 57 | 58 | lengine_params params; 59 | std::vector laserscan; 60 | 61 | int get_breakpoint(std::vector &pts, int last_breaking_idx); 62 | 63 | int sanity_check(std::vector > & descriptor); 64 | 65 | // UPM 66 | // instead of using the vector laserscan we will be using a 67 | // single laserscan_data object 68 | laserscan_data laserscan_single; 69 | 70 | public: 71 | 72 | const static uint feature_set_size[]; 73 | 74 | int load_scandata(std::string fname); 75 | 76 | lengine(lengine_params param_in) 77 | { 78 | params = param_in; 79 | } 80 | 81 | void set_featureset(); 82 | 83 | // UPM functions 84 | // Null constructor 85 | // lengine() { } 86 | 87 | void load_scandata(laserscan_data laserscan_input) 88 | { 89 | laserscan_single = laserscan_input; 90 | } 91 | 92 | int getScanSize() 93 | { 94 | return laserscan_single.data.pts.size(); 95 | } 96 | 97 | int segmentscanJDC(std::vector &clusters); 98 | 99 | void computeFeatures(std::vector &clusters, std::vector > &descriptor); 100 | 101 | }; 102 | #endif 103 | -------------------------------------------------------------------------------- /include/hdetect/lib/lfeatures.hpp: -------------------------------------------------------------------------------- 1 | /***************************************************************** 2 | * 3 | * This file is part of the People2D project 4 | * 5 | * People2D Copyright (c) 2011 Luciano Spinello 6 | * 7 | * This software is licensed under the "Creative Commons 8 | * License (Attribution-NonCommercial-ShareAlike 3.0)" 9 | * and is copyrighted by Luciano Spinello 10 | * 11 | * Further information on this license can be found at: 12 | * http://creativecommons.org/licenses/by-nc-sa/3.0/ 13 | * 14 | * People2D is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied 16 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 17 | * PURPOSE. 18 | * 19 | *****************************************************************/ 20 | #ifndef LIBFEATURES_H 21 | #define LIBFEATURES_H 22 | 23 | #include 24 | #include 25 | #include 26 | 27 | #include 28 | 29 | #define L_MINCLUSTERSZ 3 30 | #define L_EPSNUM 1e-10 31 | #define L_HUGENUM 1e10 32 | 33 | #define FEATURE_SET_0 17 34 | #define FEATURE_SET_1 63 35 | #define FEATURE_BASIS 13 36 | 37 | typedef float Real; 38 | 39 | class lfeatures_class 40 | { 41 | private: 42 | std::vector feature_descr; 43 | void descr(void); 44 | int feature_01(Point3D_container *laserfeat_cluster, Real *out); 45 | int feature_02(Point3D_container *laserfeat_cluster, Real *out); 46 | int feature_03(Point3D_container *laserfeat_cluster, Real *out); 47 | int feature_04(Point3D_container *laserfeat_cluster, Real *out); 48 | int feature_05(Point3D_container *laserfeat_cluster, Real *out); 49 | int feature_06(Point3D_container *laserfeat_cluster, Real *out); 50 | int feature_07(Point3D_container *laserfeat_cluster, Real *out); 51 | int feature_08(Point3D_container *laserfeat_cluster, Real *out); 52 | int feature_09(Point3D_container *laserfeat_cluster, Real *out); 53 | int feature_10(Point3D_container *laserfeat_cluster, Real *out); 54 | int feature_11(Point3D_container *laserfeat_cluster, Real *out); 55 | int feature_12(Point3D_container *laserfeat_cluster, Real *out); 56 | int feature_13(Point3D_container *laserfeat_cluster, Real *out); 57 | int feature_14(Point3D_container *laserfeat_cluster, Real *out); 58 | int feature_15(Point3D_container *laserfeat_cluster, Real *out); 59 | int feature_16(Point3D_container *laserfeat_cluster, Real *out); 60 | int feature_01_comparative(std::vector &all_laserfeat_cluster, uint curidx, Real *out); 61 | 62 | int feature_size; 63 | int feature_set; 64 | 65 | #define CALL_MEMBER_FN(object,ptrToMember) ((object).*(ptrToMember)) 66 | typedef int (lfeatures_class::*ptr_feature)(Point3D_container*, Real*); 67 | std::vector ptr_feature_vector; 68 | 69 | #define CALL_MEMBER_FN_CMP(object,ptrToMember) ((object).*(ptrToMember)) 70 | typedef int (lfeatures_class::*ptr_feature_cmp)(std::vector &, uint, Real*); 71 | std::vector ptr_feature_vector_cmp; 72 | 73 | public: 74 | lfeatures_class(int feature_set); 75 | void compute_descriptor(std::vector &all_laserfeat_cluster, std::vector< std::vector > &descriptor); 76 | 77 | }; 78 | #endif 79 | -------------------------------------------------------------------------------- /include/hdetect/lib/lgeometry.hpp: -------------------------------------------------------------------------------- 1 | /***************************************************************** 2 | * 3 | * This file is part of the People2D project 4 | * 5 | * People2D Copyright (c) 2011 Luciano Spinello 6 | * 7 | * This software is licensed under the "Creative Commons 8 | * License (Attribution-NonCommercial-ShareAlike 3.0)" 9 | * and is copyrighted by Luciano Spinello 10 | * 11 | * Further information on this license can be found at: 12 | * http://creativecommons.org/licenses/by-nc-sa/3.0/ 13 | * 14 | * People2D is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied 16 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 17 | * PURPOSE. 18 | * 19 | *****************************************************************/ 20 | 21 | 22 | 23 | #ifndef LIBGEOMETRY_H 24 | #define LIBGEOMETRY_H 25 | 26 | // C++ specific 27 | #include 28 | #include 29 | #include 30 | 31 | // C specific 32 | #include 33 | 34 | // GSL lib 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | 43 | // defines 44 | #define GEOMETRY_COORD_X 0 45 | #define GEOMETRY_COORD_Y 1 46 | #define GEOMETRY_COORD_Z 2 47 | 48 | #define GEOMETRY_HUGENUMBER 2000000 49 | #define GEOMETRY_DWARFNUMBER 1e-6 50 | 51 | #define GEOMETRY_MAX(a, b) (((a) > (b)) ? (a) : (b)) 52 | #define GEOMETRY_MIN(a, b) (((a) < (b)) ? (a) : (b)) 53 | 54 | typedef struct 55 | { 56 | double x; 57 | double y; 58 | double z; 59 | int ch; 60 | int status; 61 | double w; 62 | double tag; 63 | int label; 64 | int id; 65 | }Point3D_str; 66 | 67 | typedef struct 68 | { 69 | double theta; 70 | double alpha; 71 | double rho; 72 | int ch; 73 | int status; 74 | double w; 75 | int label; 76 | int id; 77 | double tag; 78 | }Point3D_polar_str; 79 | 80 | class Point3D_container 81 | { 82 | private: 83 | 84 | 85 | public: 86 | std::vector pts; 87 | 88 | // constructor 89 | Point3D_container(); 90 | Point3D_container(uint sz); 91 | Point3D_container(std::vector &ptvec); 92 | 93 | // compute cog 94 | void compute_cog(Point3D_str *pts_out); 95 | 96 | // get x y or z 97 | void get_coords(std::vector &pts_coord, char coord_sel); 98 | 99 | // compute cog 100 | void conv2polar(std::vector &pts_polar_out); 101 | 102 | // destructor 103 | ~Point3D_container(); 104 | }; 105 | 106 | 107 | 108 | // p2p 109 | double distance_L2_XY (Point3D_str *pt0, Point3D_str *pt1); 110 | double distance_L2_XY_sqr (Point3D_str *pt0, Point3D_str *pt1); 111 | 112 | // convertstuf 113 | void conv2polar_func(std::vector & pts_in, std::vector & pts_polar_out); 114 | void conv2cart_func(std::vector & pts_polar_in, std::vector & pts_out); 115 | 116 | // order points 117 | void order_bytheta (std::vector &pts_polar_out); 118 | void order_bytheta_incart (std::vector & pts_out); 119 | 120 | //~ line param and circle param from a set of pts 121 | void get_line_param(Point3D_container *laserfeat_cluster, Point3D_str *line_param); 122 | void get_circle_param(Point3D_container *laserfeat_cluster, Point3D_str *circle_param); 123 | 124 | 125 | #endif 126 | -------------------------------------------------------------------------------- /include/hdetect/lib/object_tracking.hpp: -------------------------------------------------------------------------------- 1 | #ifndef OBJECTDETECTION_HPP 2 | #define OBJECTDETECTION_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | namespace ObjectTracking 16 | { 17 | void loadCfg(detectorParameters params); 18 | void loadCfg(std::string cfg); 19 | 20 | void eliminate(deque &humans); 21 | void predict(deque &humans); 22 | void pair(deque &humans, deque &observations, map &pairs); 23 | void update(deque &humans, deque &observations, map &pairs); 24 | 25 | float calculateMahDis(Observation &observation, Human &human); 26 | float calculateEucDis(Observation &observation, Human &human); 27 | } 28 | 29 | #endif // OBJECTDETECTION_HPP 30 | -------------------------------------------------------------------------------- /include/hdetect/lib/observation.hpp: -------------------------------------------------------------------------------- 1 | #ifndef OBSERVATION_HPP 2 | #define OBSERVATION_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | class Observation 11 | { 12 | public: 13 | Observation(int scan_index, float prob, bool camera_detected, geometry_msgs::Point32 &pos); 14 | Observation(int scan_index, float prob, bool camera_detected, geometry_msgs::Point32 &pos, cv::Mat &image, cv::Rect &rect); 15 | 16 | int scan_index; 17 | 18 | float prob; 19 | 20 | bool camera_detected; 21 | 22 | NEWMAT::ColumnVector state; 23 | 24 | cv::Mat image; 25 | cv::Rect rect; 26 | }; 27 | 28 | #endif // OBSERVATION_HPP 29 | -------------------------------------------------------------------------------- /include/hdetect/lib/projectTools.hpp: -------------------------------------------------------------------------------- 1 | #ifndef PROJECTTOOLS_HPP 2 | #define PROJECTTOOLS_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | /** 11 | * 12 | * @param rng A random generator. 13 | * @return A vector of colors. 14 | */ 15 | std::vector randomColors(cv::RNG& rng); 16 | 17 | 18 | /** 19 | * @param[out] roi The mat to hold the cropped region 20 | * @param[in] image The image mat 21 | * @param[in] upleft Upperleft point of the crop in pixels 22 | * @param[in] boxSize The crop size (horizontaly) 23 | */ 24 | void getCrop(cv::Mat &roi, cv::Mat &image, cv::Rect &rect); 25 | 26 | /** 27 | * 28 | * @param[in] it The laser point. Used to compute the distance of the box. 29 | * @param[in] pt2D The point in pixel coords. 30 | * @param[out] rect Rectangle of the box in pixel coords. 31 | * @param[in] M_TO_PIXELS The meter to pixels ration 32 | * @param[in] BODY_RATIO The ratio of the upper body part to the lower body part 33 | */ 34 | void getBox(geometry_msgs::Point32 &it, cv::Point2d &pt2D, cv::Rect &rect, double &M_TO_PIXELS, double &BODY_RATIO); 35 | 36 | /** 37 | * 38 | * @param cInfo The camera info 39 | * @param upleft Upper left corner of the box in pixel coords. 40 | * @param downright Lower right corner of the box in pixel coords. 41 | * @return TRUE if the box lies into image, FALSE if not 42 | */ 43 | bool checkBox(sensor_msgs::CameraInfo &cInfo, cv::Rect &rect); 44 | 45 | /** 46 | * 47 | * @param[in] pointIn Input point in cartesian x,y coords. 48 | * @param[out] pointOut Point projected to pixel coords. 49 | * @param[in] cam_info The camera info variable. Contains focal length and distortion coeffs. 50 | * @param[in] transform The transformation between the camera and the laser 51 | * @param[in] rect Set to RECT or NO_RECT if using the rectified image or not. 52 | * 53 | */ 54 | void projectPoint(geometry_msgs::Point32 &pointIn, cv::Point2d &pointOut, cv::Mat &K, cv::Mat &D, 55 | tf::StampedTransform &transform); 56 | 57 | /** 58 | * Converts a point to the laser plane image coordinates 59 | * @param ptIn 60 | * @param ptOut 61 | * @param zoom 62 | */ 63 | void pointToPlane(geometry_msgs::Point32 &ptIn, cv::Point &ptOut, cv::Size &windowSize, int &zoom); 64 | 65 | /** 66 | * Converts the camera info to opencv ready mats. 67 | * @param[in] cInfo 68 | * @param[out] K 69 | * @param[out] D 70 | */ 71 | void CameraInfo2CV(sensor_msgs::CameraInfo &cInfo, cv::Mat &K, cv::Mat &D, int &rect); 72 | 73 | #endif 74 | -------------------------------------------------------------------------------- /include/hdetect/lib/recognizer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef RECOGNIZER_HPP 2 | #define RECOGNIZER_HPP 3 | 4 | // STANDARD 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | // ROS 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | // OPENCV 20 | #include 21 | 22 | // NEWMAT 23 | #include 24 | 25 | // MY INCLUDES 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | class Recognizer : public detector 34 | { 35 | public: 36 | Recognizer(); 37 | ~Recognizer(); 38 | 39 | void recognizeData(const sensor_msgs::Image::ConstPtr &image, 40 | const sensor_msgs::LaserScan::ConstPtr &lScan); 41 | 42 | protected: 43 | image_transport::ImageTransport it_; 44 | image_transport::Publisher it_pub_; 45 | 46 | ros::Subscriber odom_sub_; 47 | ros::Subscriber odom_ekf_sub_; 48 | ros::Subscriber pos_amcl_sub_; 49 | 50 | ros::Publisher rviz_pub_; 51 | 52 | cv_bridge::CvImagePtr output_cv_ptr; 53 | 54 | /// Raw image mat. 55 | cv::Mat rawImage; 56 | 57 | std::deque humans; 58 | std::deque observations; 59 | std::map pairs; 60 | 61 | std::deque colors; 62 | 63 | // Odometry 64 | tf::Transform cur_odom; 65 | tf::Transform pre_odom; 66 | 67 | 68 | // Odometry Ekf 69 | tf::Transform cur_odom_ekf; 70 | tf::Transform pre_odom_ekf; 71 | 72 | // Using AMCL localization 73 | tf::Transform pre_amcl; 74 | tf::Transform cur_amcl; 75 | 76 | private: 77 | 78 | enum Rviz 79 | { 80 | RVIZ_NONE = 0, 81 | RVIZ_HUMAN, 82 | RVIZ_ID, 83 | RVIZ_LINE, 84 | RVIZ_SCAN, 85 | RVIZ_TOTAL 86 | }; 87 | 88 | bool with_odom; 89 | 90 | bool with_odom_ekf; 91 | 92 | bool with_amcl; 93 | 94 | bool use_amcl; 95 | 96 | int rviz_id[RVIZ_TOTAL]; 97 | 98 | void initColor(); 99 | 100 | void loadObservation(); 101 | 102 | void publish(const sensor_msgs::LaserScan::ConstPtr &lScan); 103 | 104 | void setOdom(const nav_msgs::Odometry &odom); 105 | 106 | void setOdomEkf(const geometry_msgs::PoseWithCovarianceStamped &odom); 107 | 108 | void setPosAMCL(const geometry_msgs::PoseWithCovarianceStamped &posAMCL); 109 | 110 | void correctOdom(NEWMAT::ColumnVector &state); 111 | 112 | void correctOdomEkf(NEWMAT::ColumnVector &state); 113 | 114 | void correctPosAMCL(NEWMAT::ColumnVector &state); 115 | 116 | void setPoint(float x, float y, float z, geometry_msgs::Point &p); 117 | 118 | cv::Scalar getColor(int index); 119 | 120 | std_msgs::ColorRGBA getColorRgba(int index, float a); 121 | 122 | float getTimestamp(); 123 | 124 | std::string toString(float num); 125 | 126 | void resetId(); 127 | 128 | uint getId(int index, int category); 129 | 130 | uint getNewId(); 131 | 132 | uint getNewId(int category); 133 | 134 | string laser_frame_id; 135 | 136 | // Publisher variables 137 | ros::Publisher human_publisher; 138 | ros::Publisher best_pose_pub; 139 | hdetect::HumansFeat human_aux; 140 | std::vector humans_vec; 141 | hdetect::HumansFeatClass humans_detected; 142 | 143 | }; 144 | 145 | #endif // RECOGNIZER_HPP 146 | -------------------------------------------------------------------------------- /include/hdetect/lib/visualizer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef VISUALIZER_HPP 2 | #define VISUALIZER_HPP 3 | 4 | // ROS_INCLUDES 5 | #include 6 | // OPENCV INCLUDES 7 | #include 8 | 9 | #include "hdetect/lib/detector.hpp" 10 | 11 | /// A class for visualizing the segmentation and detection process of our system. 12 | class visualizer : public detector 13 | { 14 | protected: 15 | 16 | /// Publisher used to count the hz rate of the node 17 | ros::Publisher pub; 18 | std_msgs::Byte dummy; 19 | 20 | CvFont font_; 21 | 22 | /// The zoom used to create the plane. 23 | int zoom; 24 | 25 | /// Mat to draw the laser plan. 26 | cv::Mat laserPlane; 27 | 28 | /// Color image mat. 29 | cv::Mat colorImage; 30 | 31 | /// Keeps the color to draw the segments 32 | std::vector pallete; 33 | 34 | /// Holds the color for each cluster. 35 | /// Based on where it is located so we have less variation in color appereance. 36 | cv::Scalar color; 37 | 38 | /// Output string stream for visualization purposes. 39 | std::ostringstream ss; 40 | 41 | public: 42 | visualizer(); 43 | ~visualizer(); 44 | 45 | /// Returns the color of point according to its position 46 | cv::Scalar getColor(geometry_msgs::Point32 &point); 47 | 48 | /// Returns the ClusteredScan from the object 49 | // std::vector &getClusteredData(); 50 | 51 | /// Set the laserplane for visualizing purposes 52 | /// @param lp The computed laser plane image. 53 | // void setLaserPlane(cv::Mat lp); 54 | 55 | /** 56 | * @param image Image message 57 | * @param cInfo CameraInfo message 58 | * @param lScan LaserScan message 59 | */ 60 | void visualizeData(const sensor_msgs::Image::ConstPtr &image, 61 | const sensor_msgs::LaserScan::ConstPtr &lScan); 62 | 63 | private: 64 | /** 65 | * Absolute value of Point32 66 | */ 67 | float distance(geometry_msgs::Point32 point); 68 | 69 | /** 70 | * Draw the laser scan in a 2d plane 71 | * It's a callback function for the zoom trackbar handler that's why it's a bit fuzzy. 72 | * The obj_class class must be a visualizer or a derived class. 73 | * @param zoom The zoom factor 74 | * @param obj The object where to get the ClusteredScan data 75 | */ 76 | void plotLaser(int zoom); 77 | 78 | std::deque colors; 79 | }; 80 | #endif 81 | -------------------------------------------------------------------------------- /include/hdetect/recognizeRT.hpp: -------------------------------------------------------------------------------- 1 | #ifndef RECOGNIZERT_HPP 2 | #define RECOGNIZERT_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | /** 20 | * A node to set up all things needed for recognition. 21 | * Also used for the annotation. 22 | * The name file is given from command line. 23 | * @author Bang-Cheng Wang 24 | * @date 2013/10/01 25 | */ 26 | 27 | class recognizeRT 28 | { 29 | public: 30 | recognizeRT(std::string cameraTopic, std::string laserTopic); 31 | 32 | ~recognizeRT(); 33 | 34 | private: 35 | 36 | ros::NodeHandle nh; 37 | 38 | /// Subsciber to the camera image topic 39 | message_filters::Subscriber cameraImage_sub_; 40 | /// Subsciber to the laser scan topic 41 | message_filters::Subscriber laserScan_sub_; 42 | 43 | /** 44 | * An approximate time policy to synchronize image, camera info and laser messages. 45 | * This sync policy waits for all the three messages to arrive before it invokes the callback 46 | * from the annotator object. 47 | */ 48 | typedef message_filters::sync_policies::ApproximateTime MySyncPolicy; 49 | 50 | /** The synchronizer based on the three messages policy */ 51 | message_filters::Synchronizer *sync; 52 | 53 | // Declare the topics 54 | std::string cameraImageIn; 55 | std::string laserScanIn; 56 | 57 | /// The recognizer object that will be used for the callback 58 | Recognizer myRecognizer; 59 | }; 60 | 61 | #endif 62 | -------------------------------------------------------------------------------- /launches/annotateBAG.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /launches/calibrate_camera.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /launches/ff_only.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /launches/headlessBAG.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | 13 | 14 | 18 | 19 | 21 | 22 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /launches/headlessRT.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 18 | 19 | 25 | 26 | 30 | 31 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /launches/recognizeBAG.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 13 | 14 | 19 | 28 | 29 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /launches/recognizeRT.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /launches/recordBAG.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /launches/recordINIT.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /launches/recordURGGS.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /launches/recordUTMFF.launch: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 25 | 26 | 28 | 29 | 30 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /launches/rvizRT.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | use_sim_time: false 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 48 | 49 | 51 | 52 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /launches/showBAG.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | 13 | 14 | 18 | 19 | 21 | 22 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /launches/showRT.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 18 | 19 | 25 | 26 | 30 | 31 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /launches/trainLaser.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /launches/utm_only.launch: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /launches/viewRect.launch: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /launches/visualizeBAG.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | 13 | 14 | 18 | 19 | 21 | 22 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /launches/visualizeRT.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 18 | 19 | 25 | 26 | 30 | 31 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /msg/ClusterClass.msg: -------------------------------------------------------------------------------- 1 | # A message to contain all the info about the detected humans in each scan 2 | Header header 3 | ClusteredScan[] clusterData 4 | -------------------------------------------------------------------------------- /msg/ClusteredScan.msg: -------------------------------------------------------------------------------- 1 | # A message to contain all the info and the points of each laser cluster of the scan 2 | # clusters : the actual points of the cluster 3 | # cog : the center of gravity of each cluster 4 | # features: the extracted features for each cluster 5 | # cog_projected : if image is projected onto image 6 | # image_projected : if cog is projected onto image 7 | # detection_laser_prob : the laser scan probability 8 | # detection_camera_prob : the laser scan probability 9 | # detection_fusion_prob : the laser scan probability 10 | # label: annotation for the label 11 | 12 | sensor_msgs/PointCloud clusters 13 | geometry_msgs/Point32 cog 14 | std_msgs/Float32MultiArray features 15 | 16 | bool cog_projected 17 | bool crop_projected 18 | 19 | int8 detection_label 20 | float32 detection_laser_prob 21 | float32 detection_camera_prob 22 | float32 detection_fusion_prob 23 | 24 | int8 label 25 | -------------------------------------------------------------------------------- /msg/HumansFeat.msg: -------------------------------------------------------------------------------- 1 | # Message to send information about Humans detected 2 | 3 | # id : The idenfification of the human 4 | # x : Human x position in the /laser_top frame 5 | # y : Human y position in the /laser_top frame 6 | # distance : Distance between the human and the robot 7 | # detectiontime : Starting human time detection 8 | 9 | int16 id 10 | 11 | float32 x 12 | float32 y 13 | 14 | float32 velx 15 | float32 vely 16 | 17 | float64 detectiontime 18 | -------------------------------------------------------------------------------- /msg/HumansFeatClass.msg: -------------------------------------------------------------------------------- 1 | # Array variable message of HumansFeat 2 | Header header 3 | HumansFeat[] HumansDetected 4 | -------------------------------------------------------------------------------- /nopac.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | Uses a combination of laser range finder and a computer vision module for the pedestrian detection. 4 | The vision module works with OpenCV's detector which uses Histogram of Oriented Gradients and Support Vector Machines. 5 | For pairing observations and tracking it uses a combination of Kalman filters and Mahalanobis distance. 6 | 7 | 8 | Stathis Fotiadis, Bang-Cheng Wang, Mario Garzon 9 | BSD 10 | 11 | http://ros.org/wiki/upm 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | hdetect 4 | 1.0.1 5 | Human detection and tracking package 6 | Uses a combination of laser range finder and a computer vision module for the pedestrian detection. 7 | The vision module works with OpenCV's detector which uses Histogram of Oriented Gradients and Support Vector Machines. 8 | For pairing observations and tracking it uses a combination of Kalman filters and Mahalanobis distance. 9 | 10 | 11 | Stathis Fotiadis, Bang-Cheng Wang, Mario Garzon 12 | Mario Garzon 13 | BSD 14 | 15 | 16 | catkin 17 | roslib 18 | rosbag 19 | tf 20 | image_transport 21 | roscpp 22 | std_msgs 23 | cv_bridge 24 | 25 | laser_geometry 26 | angles 27 | 28 | sensor_msgs 29 | filters 30 | laser_filters 31 | message_filters 32 | 33 | 34 | pluginlib 35 | geometry_msgs 36 | image_geometry 37 | nav_msgs 38 | camera_calibration_parsers 39 | 40 | 41 | roslib 42 | rosbag 43 | tf 44 | image_transport 45 | roscpp 46 | std_msgs 47 | cv_bridge 48 | 49 | laser_geometry 50 | angles 51 | 52 | sensor_msgs 53 | filters 54 | laser_filters 55 | message_filters 56 | 57 | 58 | pluginlib 59 | geometry_msgs 60 | image_geometry 61 | nav_msgs 62 | camera_calibration_parsers 63 | 64 | 65 | -------------------------------------------------------------------------------- /rviz/hdetect2.rviz: -------------------------------------------------------------------------------- 1 | Panels: 2 | - Class: rviz/Displays 3 | Help Height: 0 4 | Name: Displays 5 | Property Tree Widget: 6 | Expanded: 7 | - /Global Options1 8 | - /Status1 9 | - /MarkerArray1 10 | - /Axes1 11 | Splitter Ratio: 0.5 12 | Tree Height: 515 13 | - Class: rviz/Selection 14 | Name: Selection 15 | - Class: rviz/Tool Properties 16 | Expanded: 17 | - /2D Pose Estimate1 18 | - /2D Nav Goal1 19 | - /Publish Point1 20 | Name: Tool Properties 21 | Splitter Ratio: 0.588679 22 | - Class: rviz/Views 23 | Expanded: 24 | - /Current View1 25 | Name: Views 26 | Splitter Ratio: 0.5 27 | - Class: rviz/Time 28 | Experimental: false 29 | Name: Time 30 | SyncMode: 0 31 | SyncSource: Image 32 | Visualization Manager: 33 | Class: "" 34 | Displays: 35 | - Alpha: 0.5 36 | Cell Size: 1 37 | Class: rviz/Grid 38 | Color: 160; 160; 164 39 | Enabled: true 40 | Line Style: 41 | Line Width: 0.03 42 | Value: Lines 43 | Name: Grid 44 | Normal Cell Count: 0 45 | Offset: 46 | X: 0 47 | Y: 0 48 | Z: 0 49 | Plane: XY 50 | Plane Cell Count: 20 51 | Reference Frame: 52 | Value: true 53 | - Alpha: 1 54 | Autocompute Intensity Bounds: true 55 | Autocompute Value Bounds: 56 | Max Value: 21.7783 57 | Min Value: -10.7763 58 | Value: true 59 | Axis: X 60 | Channel Name: intensity 61 | Class: rviz/LaserScan 62 | Color: 255; 255; 255 63 | Color Transformer: AxisColor 64 | Decay Time: 0 65 | Enabled: true 66 | Invert Rainbow: false 67 | Max Color: 255; 255; 255 68 | Max Intensity: 4096 69 | Min Color: 0; 0; 0 70 | Min Intensity: 0 71 | Name: LaserScan 72 | Position Transformer: XYZ 73 | Queue Size: 10 74 | Selectable: true 75 | Size (Pixels): 3 76 | Size (m): 0.05 77 | Style: Flat Squares 78 | Topic: /iri_hokuyo_laser/scan 79 | Unreliable: false 80 | Use Fixed Frame: true 81 | Use rainbow: true 82 | Value: true 83 | - Class: rviz/Image 84 | Enabled: true 85 | Image Topic: /recognizeRT/imageTopic 86 | Max Value: 1 87 | Median window: 5 88 | Min Value: 0 89 | Name: Image 90 | Normalize Range: true 91 | Queue Size: 2 92 | Transport Hint: compressed 93 | Unreliable: false 94 | Value: true 95 | - Class: rviz/MarkerArray 96 | Enabled: true 97 | Marker Topic: /recognizeRT/markers 98 | Name: MarkerArray 99 | Namespaces: 100 | "": true 101 | Queue Size: 1 102 | Value: true 103 | - Class: rviz/Axes 104 | Enabled: true 105 | Length: 1 106 | Name: Axes 107 | Radius: 0.1 108 | Reference Frame: base_link 109 | Value: true 110 | Enabled: true 111 | Global Options: 112 | Background Color: 48; 48; 48 113 | Fixed Frame: odom 114 | Frame Rate: 30 115 | Name: root 116 | Tools: 117 | - Class: rviz/Interact 118 | Hide Inactive Objects: true 119 | - Class: rviz/MoveCamera 120 | - Class: rviz/Select 121 | - Class: rviz/FocusCamera 122 | - Class: rviz/Measure 123 | - Class: rviz/SetInitialPose 124 | Topic: /initialpose 125 | - Class: rviz/SetGoal 126 | Topic: /move_base_simple/goal 127 | - Class: rviz/PublishPoint 128 | Single click: true 129 | Topic: /clicked_point 130 | Value: true 131 | Views: 132 | Current: 133 | Angle: -1.24 134 | Class: rviz/TopDownOrtho 135 | Enable Stereo Rendering: 136 | Stereo Eye Separation: 0.06 137 | Stereo Focal Distance: 1 138 | Swap Stereo Eyes: false 139 | Value: false 140 | Name: Current View 141 | Near Clip Distance: 0.01 142 | Scale: 37.3008 143 | Target Frame: laser_top 144 | Value: TopDownOrtho (rviz) 145 | X: 1.72264 146 | Y: 0.371949 147 | Saved: ~ 148 | Window Geometry: 149 | Camera: 150 | collapsed: false 151 | Displays: 152 | collapsed: false 153 | Height: 740 154 | Hide Left Dock: false 155 | Hide Right Dock: false 156 | Image: 157 | collapsed: false 158 | QMainWindow State: 000000ff00000000fd00000004000000000000016a00000244fc020000000cfb0000001200530065006c0065006300740069006f006e00000000410000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000004100000244000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650300000a550000006800000245000002f5fb0000000c00430061006d00650072006102000005dd000000580000041c0000037cfb0000000c00430061006d006500720061000000016a0000011b0000001600000016fb0000000a0049006d00610067006503000009fb000000af000002d5000002b2000000010000010f000002d7fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000041000002d7000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b2000000000000000000000002000004ac000001f7fc0100000002fb0000000c00430061006d0065007200610100000000000004ac0000000000000000fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005560000003bfc0100000002fb0000000800540069006d0065010000000000000556000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000003e60000024400000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 159 | Selection: 160 | collapsed: false 161 | Time: 162 | collapsed: false 163 | Tool Properties: 164 | collapsed: false 165 | Views: 166 | collapsed: false 167 | Width: 1366 168 | X: -8 169 | Y: 40 170 | -------------------------------------------------------------------------------- /src/FramePublisher.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | 5 | //void poseCallback(const ???camera pose??::PoseConstPtr& msg){ 6 | //} 7 | 8 | int main(int argc, char** argv){ 9 | ros::init(argc, argv, "upm_tf_broadcaster"); 10 | 11 | ros::NodeHandle node; 12 | 13 | //for using later when the camera will pan and/or tilt 14 | //ros::Subscriber sub = node.subscribe(turtle_name+"/pose", 10, &poseCallback); 15 | 16 | static tf::TransformBroadcaster br; 17 | 18 | ros::Rate rate(30.0); 19 | tf::Transform world2camera_tf ; 20 | tf::Transform world2laser_tf;; 21 | // camera 2 laser transform. it is 10 cm above the camera and its static so it goes out of while loop 22 | 23 | // we can use something like the following in the launch file but we choose this instead 24 | // for future use, when the camera moves 25 | // 27 | 28 | world2laser_tf.setOrigin( tf::Vector3( 0.0 , 0.0 , 0.0) ); 29 | world2laser_tf.setRotation( tf::Quaternion(0, 0, 0) ); 30 | 31 | while(ros::ok()) { 32 | //world 2 camera transform. it is 0 0 0 for the beginning 33 | 34 | world2camera_tf.setOrigin( tf::Vector3(0.0, 0.0 , 0.0) ); 35 | world2camera_tf.setRotation( tf::Quaternion(0, 0, 0) ); 36 | br.sendTransform(tf::StampedTransform(world2camera_tf, ros::Time::now(), "world", "camera")); 37 | 38 | 39 | br.sendTransform(tf::StampedTransform(world2laser_tf, ros::Time::now(), "world", "laser")); 40 | rate.sleep(); 41 | ros::spinOnce(); 42 | } 43 | return 0; 44 | }; 45 | -------------------------------------------------------------------------------- /src/HumanFollowerRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | /** 5 | * A node to execute following process. 6 | * @author Andrés Alacid Cano 7 | * @date 2014/02/16 8 | */ 9 | 10 | class HumanFollowerRT 11 | { 12 | public: 13 | HumanFollowerRT(string cameraTopic, string laserTopic); 14 | 15 | ~HumanFollowerRT(); 16 | 17 | private: 18 | 19 | ros::NodeHandle nh; 20 | 21 | /// Subsciber to the camera image topic 22 | message_filters::Subscriber cameraImage_sub_; 23 | /// Subsciber to the laser scan topic 24 | message_filters::Subscriber laserScan_sub_; 25 | 26 | /** 27 | * An approximate time policy to synchronize image, camera info and laser messages. 28 | * This sync policy waits for all the three messages to arrive before it invokes the callback 29 | * from the annotator object. 30 | */ 31 | typedef message_filters::sync_policies::ApproximateTime MySyncPolicy; 32 | 33 | /** The synchronizer based on the three messages policy */ 34 | message_filters::Synchronizer *sync; 35 | 36 | // Declare the topics 37 | std::string cameraImageIn; 38 | std::string laserScanIn; 39 | 40 | HumanFollower myHumanFollower; 41 | }; 42 | 43 | HumanFollowerRT::HumanFollowerRT(std::string cameraTopic, std::string laserTopic) 44 | : nh("~") 45 | { 46 | // Subscibe to the corresponding topics 47 | cameraImage_sub_.subscribe(nh,cameraTopic,3); 48 | laserScan_sub_.subscribe(nh,laserTopic,3); 49 | 50 | // Initialize synchronizer 51 | // Future work change it to a tf::MessageFilter to include the tf transform 52 | sync = new message_filters::Synchronizer(MySyncPolicy(10), cameraImage_sub_, laserScan_sub_); 53 | 54 | sync->registerCallback(boost::bind(&HumanFollower::update, boost::ref(myHumanFollower), _1, _2)); 55 | 56 | // Sleep to give time to other nodes (tf) to start 57 | sleep(2); 58 | ROS_INFO("[HUMANFOLLOWER_RT] HumanFollowerRT running OK."); 59 | } 60 | 61 | HumanFollowerRT::~HumanFollowerRT() 62 | { 63 | 64 | } 65 | 66 | 67 | int main(int argc, char* argv[]) 68 | { 69 | ros::init(argc, argv, "HumanFollowerRT"); 70 | 71 | std::string cameraTopic(argv[1]); 72 | std::string laserTopic(argv[2]); 73 | 74 | HumanFollowerRT vl(cameraTopic, laserTopic); 75 | ros::spin(); 76 | 77 | return 0; 78 | } 79 | -------------------------------------------------------------------------------- /src/annotateData.cpp: -------------------------------------------------------------------------------- 1 | /* AnnotateData 2 | Reads the camera topic (camera/image_raw) and the laser topic from a bag 3 | Publishes ... 4 | */ 5 | #include 6 | #include 7 | 8 | /** 9 | * A node to set up all things needed for the annotation. 10 | * The name file is given from command line. 11 | * @author Stathis Fotiadis and Bang-Cheng Wang 12 | * @date 10/10/2012 13 | */ 14 | class AnnotateData 15 | { 16 | private: 17 | /** 18 | * An approximate time policy to synchronize image, camera info and laser messages. 19 | * This sync policy waits for all the three messages to arrive before it invokes the callback 20 | * from the annotator object. 21 | */ 22 | typedef message_filters::sync_policies::ApproximateTime MySyncPolicy; 23 | 24 | /** The synchronizer based on the three messages policy */ 25 | message_filters::Synchronizer *sync; 26 | 27 | /** Fake subscriber to the camera msgs */ 28 | bagSubscriber cameraImage_sub_; 29 | /** Fake subscriber to the laser scan msgs */ 30 | bagSubscriber laserScan_sub_; 31 | 32 | /** The annotator class used to do the annotation of the data */ 33 | annotator dataAnnotator; 34 | 35 | bagReader bagger; 36 | 37 | // Declare the topics 38 | 39 | /** The container of the topics that will be read from the bag file. They are hardcoded. */ 40 | std::vector topics; 41 | 42 | public: 43 | /** 44 | * Sets up everything to read and annotate the given bag. 45 | * First it creates some fake subscribers for each one of the camera, camera info and laser topics. 46 | * The it sets up the approximate time policy synchronizer. 47 | * Then it uses bagger, a bagReader class object, to open and feed the subscribers with messages from the topics. 48 | * When all three messages arrive to the corresponding subscribers then the synchronizer calls 49 | * the annotateData() function of the annotator object. It goes on with doing the annotation 50 | * @param[in] bagFile The name of the bag file to be annotated 51 | */ 52 | AnnotateData(std::string bagFile, std::string cameraImageIn, std::string laserScanIn); 53 | 54 | ~AnnotateData() 55 | { 56 | } 57 | }; 58 | 59 | AnnotateData::AnnotateData(std::string bagFile, std::string cameraImageIn, std::string laserScanIn) : 60 | dataAnnotator(bagFile), bagger(bagFile) 61 | { 62 | // Define the topics 63 | topics.push_back(cameraImageIn); 64 | topics.push_back(laserScanIn); 65 | 66 | // Initialize synchronizer 67 | sync = new message_filters::Synchronizer(MySyncPolicy(10), cameraImage_sub_, laserScan_sub_); 68 | 69 | sync->registerCallback(boost::bind(&annotator::annotateData, boost::ref(dataAnnotator), _1, _2 )); 70 | 71 | ROS_INFO("[ANNOTATE_LASER] Obj creation OK"); 72 | 73 | // Sleep to give time to other nodes (tf) to start 74 | sleep(2); 75 | 76 | ROS_INFO("[ANNOTATE_LASER] Opening bag file %s", bagFile.c_str()); 77 | 78 | bagger.loadBag(topics, cameraImage_sub_, laserScan_sub_); 79 | 80 | ROS_INFO("[ANNOTATE_LASER] Finished OK"); 81 | } 82 | 83 | int main(int argc, char* argv[]) 84 | { 85 | std::string bagFile(argv[1]); 86 | std::string cameraImageIn(argv[2]); 87 | std::string laserScanIn(argv[3]); 88 | 89 | ros::init(argc, argv, "AnnotateLaser"); 90 | 91 | AnnotateData al(bagFile, cameraImageIn, laserScanIn); 92 | 93 | return 0; 94 | } 95 | -------------------------------------------------------------------------------- /src/detectTest.cpp: -------------------------------------------------------------------------------- 1 | /* detectBAG 2 | Reads the camera topic, the laser topic, and the ClusteredScan from a bag 3 | Then it does the detection. 4 | ITS 5 | */ 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | 15 | /** TODO A LOT 16 | * Makes the detection test from a bag file. 17 | * The name file is given from command line. 18 | * @author Stathis Fotiadis 19 | * @date 10/10/2012 20 | */ 21 | class detectTest : public visualizer 22 | { 23 | private: 24 | 25 | /** 26 | * An approximate time policy to synchronize image, camera info and laser messages. 27 | * This sync policy waits for all the three messages to arrive before it invokes the callback 28 | * from the annotator object. 29 | */ 30 | // visualizer myVzr; 31 | 32 | // Declare the topics 33 | std::string clusteredTopic; 34 | 35 | /** The container of the topics that will be read from the bag file. They are hardcoded. */ 36 | std::vector topics; 37 | 38 | // The clock publisher 39 | ros::Publisher clock_pub_; 40 | rosgraph_msgs::Clock clockmsg; 41 | 42 | public: 43 | /** 44 | * Reads from an already annotated bag, what it needs. 45 | * @param[in] bagFile The name of the bag file to be annotated 46 | */ 47 | detectTest(std::string bagFile); 48 | ~detectTest() { }; 49 | 50 | }; 51 | 52 | detectTest::detectTest(std::string bagFile) 53 | { 54 | // define the topics 55 | clusteredTopic = "/laser/ClusteredScan"; 56 | 57 | topics.push_back(clusteredTopic); 58 | 59 | 60 | ROS_INFO("[DETECT_BAG] Obj creation OK"); 61 | 62 | // Sleep to give time to other nodes (tf) to start 63 | sleep(2); 64 | ROS_INFO("[DETECT_BAG] Opening bag file %s", bagFile.c_str()); 65 | 66 | rosbag::Bag bag(bagFile, rosbag::bagmode::Read); 67 | rosbag::View view(bag, rosbag::TopicQuery(topics)); 68 | 69 | int count = 0; 70 | // Reads the topics form the bag file 71 | BOOST_FOREACH(rosbag::MessageInstance const m, view) 72 | { 73 | if (m.getTopic() == clusteredTopic) 74 | { 75 | hdetect::ClusteredScan::ConstPtr mes = m.instantiate(); 76 | if (mes != NULL) 77 | { 78 | // Publish the clock 79 | clockmsg.clock = m.getTime(); 80 | clock_pub_.publish(clockmsg); 81 | 82 | sensor_msgs::Image::Ptr image_ptr = boost::make_shared(mes->image); 83 | sensor_msgs::LaserScan::ConstPtr laser_ptr = boost::make_shared(mes->scan); 84 | 85 | visualizeData(image_ptr, laser_ptr); 86 | } 87 | else 88 | { 89 | ROS_INFO("[DETECT_TEST] Null message found"); 90 | } 91 | } 92 | 93 | count++; 94 | } 95 | 96 | bag.close(); 97 | //ROS_INFO("[BAG_READER] %d messages read.", count); 98 | 99 | ROS_INFO("[DETECT_BAG] Finished OK"); 100 | } 101 | 102 | int main(int argc, char* argv[]) 103 | { 104 | //if (argc==1) ROS_ERROR("Give the bag filename."); 105 | 106 | std::string bagFile(argv[1]); 107 | 108 | ros::init(argc, argv, "detectBAG"); 109 | 110 | detectTest db(bagFile); 111 | return 0; 112 | } 113 | -------------------------------------------------------------------------------- /src/headlessRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | 14 | /** 15 | * A node to set up all things needed for visualization. 16 | * Also used for the annotation. 17 | * The name file is given from command line. 18 | * @author Stathis Fotiadis 19 | * @date 10/10/2012 20 | */ 21 | class headlessRT 22 | { 23 | private: 24 | ros::NodeHandle nh; 25 | 26 | /// Subsciber to the camera image topic 27 | message_filters::Subscriber cameraImage_sub_; 28 | /// Subsciber to the laser scan topic 29 | message_filters::Subscriber laserScan_sub_; 30 | 31 | /** 32 | * An approximate time policy to synchronize image, camera info and laser messages. 33 | * This sync policy waits for all the three messages to arrive before it invokes the callback 34 | * from the annotator object. 35 | */ 36 | typedef message_filters::sync_policies::ApproximateTime MySyncPolicy; 37 | 38 | /** The synchronizer based on the three messages policy */ 39 | message_filters::Synchronizer *sync; 40 | 41 | // Declare the topics 42 | std::string cameraImageIn; 43 | std::string laserScanIn; 44 | 45 | /// The visualizer object that will be used for the callback 46 | detector myDetector; 47 | 48 | public: 49 | /** 50 | * Creates a synchronizer of two topics (image, laser). 51 | * It displays the laser data on to the image and the laser data alone in 2d. 52 | * @param[in] laserTopic The name of the laser topic that will be subscribed to. 53 | * @param[in] cameraTopic The name of the camera topic that will be subscribed to. 54 | */ 55 | headlessRT(std::string cameraTopic, std::string laserTopic); 56 | 57 | ~headlessRT(); 58 | }; 59 | 60 | headlessRT::headlessRT(std::string cameraTopic, std::string laserTopic) : nh("~") 61 | { 62 | // Subscibe to the corresponding topics 63 | cameraImage_sub_.subscribe(nh,cameraTopic,3); 64 | laserScan_sub_.subscribe(nh,laserTopic,3); 65 | 66 | // Initialize synchronizer 67 | // Future work change it to a tf::MessageFilter to include the tf transform 68 | sync = new message_filters::Synchronizer(MySyncPolicy(10), cameraImage_sub_, laserScan_sub_); 69 | 70 | sync->registerCallback(boost::bind(&detector::detectHumans, boost::ref(myDetector), _1, _2 )); 71 | 72 | // Sleep to give time to other nodes (tf) to start 73 | sleep(2); 74 | ROS_INFO("[HEADLESS_RT] headlessRT running OK."); 75 | } 76 | 77 | headlessRT::~headlessRT() 78 | { 79 | 80 | } 81 | 82 | int main(int argc, char* argv[]) 83 | { 84 | ros::init(argc, argv, "headlessRT"); 85 | 86 | std::string cameraTopic(argv[1]); 87 | std::string laserTopic(argv[2]); 88 | 89 | headlessRT hdless(cameraTopic, laserTopic); 90 | ros::spin(); 91 | 92 | return 0; 93 | } 94 | -------------------------------------------------------------------------------- /src/lib/annotator.cpp: -------------------------------------------------------------------------------- 1 | #include "hdetect/lib/annotator.hpp" 2 | 3 | //using namespace std; 4 | using std::string; 5 | using std::set; 6 | using std::stringstream; 7 | using std::cin; 8 | 9 | //using namespace cv; 10 | 11 | using cv::waitKey; 12 | 13 | 14 | using namespace Header; 15 | 16 | /** 17 | * Creates the directory and the files to save 18 | */ 19 | annotator::annotator(string bagFile) 20 | { 21 | ROS_INFO("[ANNOTATOR] Intializing"); 22 | 23 | scanNo = 0; 24 | 25 | string abs_path; 26 | ros::NodeHandle nh; 27 | 28 | if (nh.hasParam("pkg_path")) 29 | { 30 | ros::param::get("/pkg_path",abs_path); 31 | } 32 | else 33 | { 34 | ROS_ERROR("[ANNOTATOR] Parameter pkg_path (absolute package path) not found."); 35 | } 36 | 37 | // File where the image crops will be saved 38 | bagFile = bagFile.substr(bagFile.find_last_of("/") + 1, bagFile.find_last_of(".") - bagFile.find_last_of("/") - 1); 39 | 40 | // Create the subdirectory for the specific bag 41 | bagDir = abs_path + "/data/"+bagFile; 42 | ROS_INFO("[ANNOTATOR] Creating directory %s", bagDir.c_str()); 43 | mkdir(bagDir.c_str(), S_IRWXG | S_IRWXO | S_IRWXU); 44 | 45 | // Open the CSV file for writing 46 | bagFile = bagDir + "/annotation.csv"; 47 | f = fopen(bagFile.c_str(), "w"); 48 | 49 | if (f == NULL) 50 | { 51 | ROS_ERROR("[ANNOTATOR] CSV File to write annotation data couldn't be opened"); 52 | exit(-1); 53 | } 54 | 55 | ROS_INFO("[ANNOTATOR] Writing CSV annotation data to file %s", bagFile.c_str()); 56 | 57 | // Opening the bag file to write everything 58 | bagFile = bagDir + "/everything.bag"; 59 | bag.open(bagFile, rosbag::bagmode::Write); 60 | ROS_INFO("[ANNOTATOR] Writing all data to BAG file %s", bagFile.c_str()); 61 | 62 | 63 | // Create the subdirectory of the crops of the specific bag 64 | bagDir = bagDir + "/crops/"; 65 | ROS_INFO("[ANNOTATOR] Creating directory %s", bagDir.c_str()); 66 | mkdir(bagDir.c_str(), S_IRWXG | S_IRWXO | S_IRWXU); 67 | 68 | ROS_INFO("[ANNOTATOR] CONSTRUCTOR END"); 69 | } 70 | 71 | /// Closes the open files and windows. 72 | annotator::~annotator() 73 | { 74 | fclose(f); 75 | bag.close(); 76 | } 77 | 78 | 79 | /** 80 | * Gets the annotation from the user. 81 | * It superimposes the clusters on the image along with their bounding boxes if available. 82 | * Also draws a 2d plane of the laser scan to facilitate the annotation process. 83 | * Asks the user to annotate the cluster. The user can only annotate clusters that can be seen on the image. 84 | * Saves all the data: annotation, clusters, features, laserscan, image etc in a bag. 85 | * The annotated clusters (only those that can be seen on the image) and their features 86 | * are also saved in csv format. 87 | * This means that the annotation of the off-screen clusters is NOT VALID. 88 | * If the bounding box of a projected cluster is within the image limits, a crop is also saved to a seperate file. 89 | * This function serves a the callback for the synchronizer in AnnotateData.cpp 90 | * 91 | */ 92 | void annotator::annotateData(const sensor_msgs::Image::ConstPtr &image, 93 | const sensor_msgs::LaserScan::ConstPtr &lScan) 94 | { 95 | visualizeData(image, lScan); 96 | waitKey(200); 97 | 98 | int annot; 99 | string input; 100 | set humans; 101 | 102 | // For warning message 103 | bool error = false; 104 | char warning[1000]; 105 | sprintf(warning, "\n--------------------------------------------------------\n" 106 | " Input should be an integer between 0 and %d\n" 107 | "--------------------------------------------------------\n",(int)(clusterData.size() - 1)); 108 | 109 | // Check if there are some points which have been labeled before 110 | for (uint i = 0; i < clusterData.size(); i++) 111 | { 112 | for (uint j = 0; j < prev_points.size(); j++) 113 | { 114 | float dis = calculateEucDis(clusterData[i].cog, prev_points[j]); 115 | 116 | if (dis < 0.35) 117 | { 118 | humans.insert(i); 119 | 120 | break; 121 | } 122 | } 123 | } 124 | do 125 | { 126 | system("clear"); 127 | 128 | printf("\nSCAN No %04d\n", scanNo); 129 | 130 | printf("\nCurrently there are %d human clusters", (int)(humans.size())); 131 | 132 | if (humans.size() > 0) 133 | { 134 | printf(": \n"); 135 | 136 | // For automatic labeling 137 | for (set::iterator it = humans.begin(); it != humans.end(); it++) 138 | { 139 | printf("[%d]\n", *it); 140 | } 141 | } 142 | else 143 | { 144 | printf(".\n"); 145 | } 146 | 147 | if (error == true) 148 | { 149 | printf("%s", warning); 150 | } 151 | 152 | printf("\nPlease give the number of the human clusters.\n" 153 | "[enter] to proceed, [number] to label and unlabel: "); 154 | 155 | getline(cin, input); 156 | 157 | // Check if input is not "", trim the string 158 | if (input.compare("") > 0) 159 | { 160 | input = input.substr(input.find_first_not_of(" ")); 161 | } 162 | 163 | // If it's an accurate integer change the cluster name 164 | if (stringstream(input) >> annot && annot >= 0 && (uint)annot < clusterData.size()) 165 | { 166 | // Label is not found 167 | if (humans.count(annot) == 0) 168 | { 169 | humans.insert(annot); 170 | } 171 | // Label is found 172 | else 173 | { 174 | humans.erase(annot); 175 | } 176 | 177 | error = false; 178 | } 179 | // If there is an error input 180 | else if (input.compare("") > 0) 181 | { 182 | error = true; 183 | } 184 | } 185 | while (input.compare("")); 186 | 187 | // For load the label from the set 188 | for (set::iterator it = humans.begin(); it != humans.end(); it++) 189 | { 190 | clusterData[*it].label = LABEL_HUMAN; 191 | } 192 | 193 | 194 | // Save image crops 195 | // for (uint i = 0; i < clusterData.fusion.size() ; i++) 196 | // { 197 | // if (clusterData.fusion[i] == 1) 198 | // { 199 | // projectPoint(clusterData.cogs[i], prPixel, K, D, transform); 200 | // getBox(clusterData.cogs[i], prPixel, boxSize, upleft, downright, params.m_to_pixels, params.body_ratio); 201 | // getCrop (crop, cv_ptr->image, upleft, boxSize); 202 | 203 | 204 | // char cropID[30] = 0; 205 | 206 | // if (clusterData.labels[i] == 1) 207 | // { 208 | // sprintf(cropID,"S%04d_C_%03d_1.pgm", scanNo, i); 209 | // } 210 | // else 211 | // { 212 | // sprintf(cropID,"S%04d_C_%03d_0.pgm", scanNo, i); 213 | // } 214 | 215 | // stringstream ss; 216 | // ss << cropID; 217 | // string cname; 218 | // ss >> cname; 219 | // cname = bagDir + cname; 220 | 221 | // ROS_INFO("[ANNOTATOR] Saving crop : %s", cname.c_str()); 222 | // imwrite(cname, crop); 223 | // } 224 | // } 225 | 226 | // saveBag(image, lScan); 227 | 228 | saveCSV(); 229 | 230 | prev_points.clear(); 231 | 232 | // printf("\nClusters are: \n\n"); 233 | 234 | for (uint i = 0; i < clusterData.size(); i++) 235 | { 236 | if (clusterData[i].label == LABEL_HUMAN) 237 | { 238 | // printf("cluster %03d\tlabel %1d\tprojected %1d\tfusion %1d\n", 239 | // i, 240 | // clusterData[i].label, 241 | // clusterData[i].cog_projected, 242 | // clusterData[i].crop_projected); 243 | 244 | prev_points.push_back(clusterData[i].cog); 245 | } 246 | } 247 | 248 | scanNo++; 249 | } 250 | 251 | /// Saves to everything (image, camera info, laser scan, ClusteredScan) to a bag file 252 | void annotator::saveBag(const sensor_msgs::Image::ConstPtr &image, 253 | const sensor_msgs::LaserScan::ConstPtr &lScan) 254 | { 255 | sensor_msgs::Image new_image = *image; 256 | sensor_msgs::LaserScan new_lScan = *lScan; 257 | 258 | // Set all the timestamps to the acquisition time (camera) 259 | // new_image.header.stamp = acquisition_time; 260 | // new_lScan.header.stamp = acquisition_time; 261 | detections.header.stamp = image->header.stamp; 262 | 263 | bag.write("/camera/image", acquisition_time, new_image ); 264 | bag.write("/laser_top/scan", acquisition_time, new_lScan ); 265 | bag.write("/ClusteredClass", acquisition_time, detections); 266 | } 267 | 268 | /// Used to save the annotated data in CSV format, even those that are not present in the image. 269 | void annotator::saveCSV() 270 | { 271 | uint cluster_size = clusterData.size(); 272 | uint features_size = clusterData[0].features.data.size(); 273 | 274 | for (uint i = 0; i < cluster_size; i++) 275 | { 276 | //if (projectedClusters[i] == 1) { // IF THIS IS UNCOMMENTED IT ONLY SAVES VISIBLE CLUSTERS 277 | // fprintf(f, "%04d %d %d", scanNo, clusterData.fusion[i], clusterData.labels[i]); 278 | fprintf(f, "%d", clusterData[i].label); 279 | 280 | for (uint j = 0; j < features_size; j++) 281 | { 282 | fprintf(f, ", %.6f", clusterData[i].features.data[j]); 283 | } 284 | 285 | fprintf(f, "\n"); 286 | //} 287 | } 288 | } 289 | 290 | float annotator::calculateEucDis(geometry_msgs::Point32 &point1, geometry_msgs::Point32 &point2) 291 | { 292 | float sum1 = (point1.x - point2.x) * (point1.x - point2.x); 293 | float sum2 = (point1.y - point2.y) * (point1.y - point2.y); 294 | 295 | return sqrt(sum1 + sum2); 296 | } 297 | -------------------------------------------------------------------------------- /src/lib/bagReader.cpp: -------------------------------------------------------------------------------- 1 | // It reads a bag 2 | // Reads three specified topics from a vector of string 3 | // Publishes clock time 4 | 5 | #include "hdetect/lib/bagReader.hpp" 6 | 7 | 8 | /** 9 | * Reads all of the messages from the bag file 10 | * Check what type each message is and it sends it to the appropriate subscriber 11 | * Three types of messages are supported sensor_msgs::Image, sensor_msgs::CameraInfo and sensor_msgs::LaserScan 12 | * The clock is published when a camera or a laser scan message arrives. Camera info messages have the same 13 | * timestamp as camera image ones so it's not needed to publish their time. 14 | */ 15 | void bagReader::loadBag(std::vector topics, 16 | bagSubscriber &cameraImage_sub_, 17 | bagSubscriber &laserScan_sub_) 18 | { 19 | ROS_INFO("[BAG_READER] Opening %s", bagFile.c_str()); 20 | 21 | rosbag::Bag bag(bagFile, rosbag::bagmode::Read); 22 | 23 | // push topics we are going to read from rosbag file 24 | std::string cameraImageIn = topics[0]; 25 | std::string laserScanIn = topics[1]; 26 | 27 | ROS_INFO("[BAG_READER] topics %s %s", topics[0].c_str(), topics[1].c_str()); 28 | 29 | rosbag::View view(bag, rosbag::TopicQuery(topics)); 30 | ROS_INFO("[BAG_READER] Bag loaded for reading."); 31 | 32 | int count=0; 33 | 34 | BOOST_FOREACH(rosbag::MessageInstance const m, view) 35 | //for ( rosbag::View::iterator it = view.begin(); it != view.end(); it++ ) 36 | { 37 | //rosbag:: MessageInstance const m = *it; 38 | 39 | count++; 40 | 41 | //ROS_INFO("[BAG_READER] Bag view count = %d %s.", count, m.getTopic().c_str()); 42 | 43 | if (m.getTopic() == cameraImageIn) 44 | { 45 | sensor_msgs::Image::ConstPtr img = m.instantiate(); 46 | if (img != NULL) 47 | { 48 | clockmsg.clock = m.getTime(); 49 | clock_pub_.publish(clockmsg); 50 | cameraImage_sub_.newMessage(img); 51 | } 52 | } 53 | 54 | if (m.getTopic() == laserScanIn) 55 | { 56 | sensor_msgs::LaserScanConstPtr ls = m.instantiate(); 57 | if (ls != NULL) 58 | { 59 | clockmsg.clock = m.getTime(); 60 | clock_pub_.publish(clockmsg); 61 | laserScan_sub_.newMessage(ls); 62 | } 63 | } 64 | } 65 | 66 | bag.close(); 67 | 68 | ROS_INFO("[BAG_READER] %d messages read.", count); 69 | 70 | ROS_INFO("[BAG_READER] Bag is read."); 71 | } 72 | 73 | /// The constructor just opens the bagfile and creates a clock publisher 74 | bagReader::bagReader(std::string fname) 75 | { 76 | bagFile = fname; 77 | 78 | clock_pub_ = nh_.advertise("clock", 1); 79 | 80 | ROS_INFO("[BAG_READER] Object created."); 81 | } 82 | 83 | bagReader::~bagReader() 84 | { 85 | 86 | } 87 | -------------------------------------------------------------------------------- /src/lib/header.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace Header 4 | { 5 | float curTimestamp = -1; 6 | float preTimestamp = -1; 7 | } 8 | -------------------------------------------------------------------------------- /src/lib/human.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | //using namespace NEWMAT; 5 | using NEWMAT::Matrix; 6 | using NEWMAT::ColumnVector; 7 | 8 | using geometry_msgs::Point; 9 | 10 | Human::Human(int id, float score, ColumnVector state, Matrix cov, int preTimestamp) 11 | { 12 | this->id = id; 13 | 14 | this->score = score; 15 | 16 | this->state = state; 17 | this->cov = cov; 18 | 19 | this->preState = state; 20 | this->preTimestamp = preTimestamp; 21 | this->firstTimestamp = ros::Time::now(); 22 | } 23 | 24 | Point Human::toPoint() 25 | { 26 | Point pos; 27 | 28 | pos.x = state(1); 29 | pos.y = state(2); 30 | pos.z = 0; 31 | 32 | return pos; 33 | } 34 | -------------------------------------------------------------------------------- /src/lib/laserLib.cpp: -------------------------------------------------------------------------------- 1 | /* LaserTranslator.cpp 2 | Transforms the laserscan to a format that the external people2d_engine (modified) 3 | can read. It returns 4 | Clusters as LaserClustersMsg 5 | Features as ScanFeaturesMsg 6 | 7 | Stathis Fotiadis 2012 8 | */ 9 | #include "hdetect/lib/laserLib.hpp" 10 | 11 | //using namespace std; 12 | using std::vector; 13 | 14 | /// Starts the laser filter. people2D_engine parameter setting. 15 | laserLib::laserLib(double jumpdist, int feature_set, double laser_range ) : laserFilter("sensor_msgs::LaserScan") { 16 | // Set the configuration 17 | // segmentation distance 18 | libEngineParams.jumpdist = jumpdist; 19 | libEngineParams.feature_set = feature_set; // WE SHOULD TRY CHANGING THAT TO 2 20 | libEngineParams.laser_range = laser_range; 21 | 22 | libEngineParams.sanity = 1; 23 | libEngineParams.segonly = 0; 24 | 25 | libEngine = new lengine( libEngineParams ); 26 | 27 | // Shadow filtering parameters 28 | // Must have been loaded to the paramater server 29 | laserFilter.configure("scan_filter_chain"); 30 | } 31 | 32 | laserLib::~laserLib() 33 | { 34 | delete libEngine; 35 | } 36 | 37 | void laserLib::getHeader(std_msgs::Header &header) 38 | { 39 | header.stamp = stamp; 40 | header.seq = seq; 41 | header.frame_id = frame_id; 42 | } 43 | 44 | /** Filters and loads the laser scan. 45 | First it processes the laser scan with a shadow filter. 46 | Does conversion to people2D_engine format using scan2lib() 47 | Finally segments the result to clusters, still in people2D_engine format. 48 | To get the clusters in ROS use getClusters(). 49 | */ 50 | void laserLib::loadScan(sensor_msgs::LaserScan ls) { 51 | 52 | // First apply the filter 53 | // WARNING THE FILTER KEEPS THE FILTERD DATA BUT WITH REVERSE RANGE (*it) 54 | laserFilter.update(ls, filtScan); 55 | 56 | /*printf("\npre %d",ls.ranges.size()); 57 | int filtp=0; 58 | for(uint cnt=0; cnt0.0) filtp++; 60 | printf("\nfilt %d\n",filtp); 61 | */ 62 | 63 | // Convert filtered scan to a library ready format into variable libScan 64 | scan2lib(filtScan, libScanData); 65 | 66 | // Load the data 67 | libEngine->load_scandata(libScanData); 68 | // Copy over the header 69 | seq=ls.header.seq; 70 | stamp=ls.header.stamp; 71 | frame_id=ls.header.frame_id; 72 | 73 | // Segment the data 74 | clusters.clear(); 75 | libEngine->segmentscanJDC(clusters); 76 | } 77 | 78 | /** The API function used to get the features. 79 | * Computes the features and saves them in the ClusteredScan message. 80 | * Must use loadScan() first. 81 | */ 82 | void laserLib::getFeatures(vector &clusterData) 83 | { 84 | // Compute the features 85 | libEngine->computeFeatures(clusters, descriptor); 86 | 87 | // Convert features to Float32MultiArray 88 | features2ROS(clusterData); 89 | 90 | // Copy the header from the laser scan 91 | //features.header.seq=seq; 92 | //features.header.stamp=stamp; 93 | //features.header.frame_id=frame_id; 94 | 95 | /* 96 | ROS_INFO("clusters %d", clusters.size()); 97 | ROS_INFO("features %d", descriptor.size()); 98 | ROS_INFO("features ROS %d", features.features.size()); 99 | ROS_INFO("features dim %d", descriptor[0].size()); 100 | ROS_INFO("features dim ROS %d", features.features[0].data.size()); 101 | //*/ 102 | } 103 | 104 | /** Converts the features to a ROS compatible ClusteredScan message 105 | * The timestamp of the message is set to the timestamp of the image. 106 | * Each cluster's annotation initialised to FALSE (0) 107 | * Each cluster's fusion attribute initialised to FALSE (0) 108 | * */ 109 | void laserLib::features2ROS(vector &clusterData) 110 | { 111 | uint descriptor_size = descriptor[0].size(); 112 | 113 | for (uint i = 0; i < clusterData.size(); i++) 114 | { 115 | // Clear all features in cluster data 116 | clusterData[i].features.data.clear(); 117 | 118 | // Insert all descriptor to cluster data 119 | for (uint j = 0; j < descriptor_size; j++) 120 | { 121 | clusterData[i].features.data.push_back(descriptor[i][j]); 122 | } 123 | } 124 | } 125 | 126 | /** 127 | * Converts the scan from ROS to a format that the people2D_engine library understands 128 | * The LaserScan is converted to a laserscan_data object */ 129 | void laserLib::scan2lib (sensor_msgs::LaserScan &ls, laserscan_data &libScan) { 130 | 131 | // read angle min , max and increament 132 | angle_min = ls.angle_min; 133 | angle_max = ls.angle_max; 134 | angle_inc = ls.angle_increment; 135 | 136 | // counter 137 | int i = 0; 138 | 139 | // angle 140 | float ang; 141 | 142 | // helping LSL_Point3D_str 143 | Point3D_str point; 144 | point.z=0; 145 | point.label=0; 146 | 147 | // clear the data of libScan 148 | libScan.data.pts.clear(); 149 | 150 | 151 | for (std::vector::iterator it = ls.ranges.begin(); it!=ls.ranges.end(); it++) { 152 | // Convert the laserscan from ROS to library format 153 | 154 | // Compute the x,y of the point 155 | ang = angle_min + (float) i * angle_inc; 156 | 157 | // WARNING: WE DO THAT BECAUSE THE FILTER KEEPS THE DATA BUT WITH REVERSE RANGE (*it) 158 | if ( *it > 0.0 && *it <= libEngineParams.laser_range ) 159 | { 160 | point.x = *it * cos(ang); 161 | point.y = *it * sin(ang); 162 | 163 | // Push it to the libScan vector if its a valid range 164 | libScan.data.pts.push_back(point); 165 | 166 | } 167 | //else 168 | // ROS_WARN("[LASER_LIB] point out of bounds r %f - x %f - y %f ", *it, point.x, point.y); 169 | 170 | //ROS_INFO("i %d libScan size before %d",i, libScan.data.pts.size()); 171 | /* 172 | ROS_INFO("i %d", i); 173 | ROS_INFO("r %f",*it); 174 | ROS_INFO("a %f", ang); 175 | ROS_INFO("x %f", point.x); 176 | ROS_INFO("y %f\n", point.y); 177 | sleep(0.1); 178 | //*/ 179 | 180 | i++; 181 | 182 | } 183 | //printf("[LASER_LIB] scan2lib: Laser scan Prefiltered = %d - Filtered = %d\n",ls.ranges.size(),libScan.data.pts.size()); 184 | 185 | } 186 | 187 | /** The API function used to get the clusters. 188 | * Computes the individual clusters and saves them in the ClusteredScan message. 189 | * Must use loadScan() first. 190 | */ 191 | void laserLib::getClusters(vector &clusterData) 192 | { 193 | // Get the amount of clusters 194 | uint cluster_size = clusters.size(); 195 | 196 | // Clear the data for clusters and cogs 197 | clusterData.resize(cluster_size); 198 | 199 | for (uint i = 0; i < cluster_size; i++) 200 | { 201 | // compute cog 202 | clusters[i].compute_cog(&cogL); 203 | // put cluster only if it's more than 20cm away from the laser 204 | //ROS_INFO("cluster %d dist %f x %f y %f",i, distance_L2_XY(&cogLSL,&origin),cogLSL.x, cogLSL.y); 205 | 206 | // discard cluster too close to the laser 207 | //if (distance_L2_XY(&cogLSL,&origin) > 0.2) 208 | //{ 209 | 210 | clusterData[i].cog.x = cogL.x; 211 | clusterData[i].cog.y = cogL.y; 212 | clusterData[i].cog.z = cogL.z; 213 | 214 | // Insert all the cluster points into cluster data 215 | clusterData[i].clusters.points.clear(); 216 | for (vector::iterator it2 = clusters[i].pts.begin(); 217 | it2 != clusters[i].pts.end(); it2++) 218 | { 219 | pt32.x = it2->x; 220 | pt32.y = it2->y; 221 | pt32.z = it2->z; 222 | //if( sqrt( pow(pt.x,2) + pow(pt.y,2) ) > libEngineParams.laser_range ) 223 | clusterData[i].clusters.points.push_back(pt32); 224 | } 225 | //} 226 | } 227 | //ROS_INFO("clusters %d", laserClusters.clusters.size()); 228 | //ROS_INFO("cogs ROS %d", laserClusters.cogs.size()); 229 | } 230 | -------------------------------------------------------------------------------- /src/lib/lengine.cpp: -------------------------------------------------------------------------------- 1 | /***************************************************************** 2 | * 3 | * This file is part of the People2D project 4 | * 5 | * People2D Copyright (c) 2011 Luciano Spinello 6 | * 7 | * This software is licensed under the "Creative Commons 8 | * License (Attribution-NonCommercial-ShareAlike 3.0)" 9 | * and is copyrighted by Luciano Spinello 10 | * 11 | * Further information on this license can be found at: 12 | * http://creativecommons.org/licenses/by-nc-sa/3.0/ 13 | * 14 | * People2D is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied 16 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 17 | * PURPOSE. 18 | * 19 | *****************************************************************/ 20 | 21 | #include "hdetect/lib/lengine.hpp" 22 | 23 | const uint lengine::feature_set_size[] = {FEATURE_SET_0, FEATURE_SET_1}; 24 | 25 | int lengine::sanity_check(std::vector > &descriptor) 26 | { 27 | int ret = 1; 28 | if (!descriptor.size()) 29 | { 30 | printf("Feature set is 0 dimensional\n"); 31 | return (0); 32 | } 33 | 34 | uint f_num = descriptor[0].size(); 35 | 36 | for (uint i = 0; i < descriptor.size(); i++) 37 | { 38 | if (f_num != descriptor[i].size()) 39 | { 40 | printf("Feature size mismatch [%d]\n", i); 41 | ret = 0; 42 | } 43 | 44 | for (uint j = 0; j < descriptor[i].size(); j++) 45 | { 46 | int typeval = fpclassify(descriptor[i][j]); 47 | if (typeval == FP_NAN || typeval == FP_INFINITE) 48 | { 49 | printf("nan or inf found in the feature @ position [%d][%d] = %g\n", i, j, descriptor[i][j]); 50 | ret = 0; 51 | } 52 | } 53 | } 54 | 55 | return ret; 56 | } 57 | 58 | 59 | //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | 61 | int lengine::get_breakpoint(std::vector &pts, int last_breaking_idx) 62 | { 63 | int ptsz = pts.size() - 1; 64 | 65 | //~ failsafe 66 | int jmp_idx = pts.size(); 67 | 68 | for (int i = last_breaking_idx; i < ptsz; i++) 69 | { 70 | double dist; 71 | dist = distance_L2_XY_sqr(&pts[i], &pts[i + 1]); 72 | 73 | if (dist > params.jumpdist) 74 | { 75 | //~ printf("dist: %g (%g): [%g %g]v[%g %g]\n",dist, sqjumpdist, pts[i].x,pts[i].y, pts[i + 1].x, pts[i + 1].y); 76 | 77 | //~ mark index 78 | jmp_idx = i + 1; 79 | 80 | //~ bail out 81 | i = ptsz; 82 | } 83 | } 84 | 85 | return (jmp_idx); 86 | } 87 | 88 | 89 | int lengine::segmentscanJDC(std::vector &clusters) 90 | { 91 | 92 | //~ bailing var 93 | char split_complete = 1; 94 | 95 | //~ numpts 96 | int ptsz = laserscan_single.data.pts.size(); 97 | 98 | //~ avoid nulls 99 | if (ptsz == 0) 100 | return (0); 101 | 102 | //~ last break point index 103 | int last_breaking_idx = 0; 104 | 105 | Point3D_str single_cog; 106 | std::vector all_cog; 107 | uint i; 108 | double dist[5]; 109 | 110 | uint clusterNo = 0; 111 | 112 | bool newcluster; 113 | while (split_complete) 114 | { 115 | //~ min pts number 116 | if (last_breaking_idx < ptsz - 1) 117 | { 118 | //~ max distance check 119 | int breaking_idx = get_breakpoint(laserscan_single.data.pts, last_breaking_idx); 120 | 121 | if (breaking_idx - last_breaking_idx >= L_MINCLUSTERSZ) 122 | { 123 | //~ a cluster 124 | Point3D_container single_cluster; 125 | 126 | //~ pump it into 127 | single_cluster.pts.insert(single_cluster.pts.begin(), laserscan_single.data.pts.begin() + last_breaking_idx, 128 | laserscan_single.data.pts.begin() + breaking_idx); 129 | 130 | //printf("\n\n--- NEW CLUSTER COMPUTED ---"); 131 | //printf("current cluster no %d\n", clusterNo); 132 | //printf("total clusters no %d\n", all_cog.size()); 133 | 134 | newcluster = true; 135 | //concatenate clusters 136 | //compute the cog 137 | single_cluster.compute_cog(&single_cog); 138 | //compare it with all existing cogs 139 | for (i = 0; i < all_cog.size(); i++) 140 | { 141 | //we compute the distance between each end of both clusters 142 | // and the distance between the cogs 143 | // if the minimum of these distances is below a threshold, we concatenate the clusters 144 | dist[0] = distance_L2_XY(&clusters[i].pts.front(), &single_cluster.pts.front()); 145 | dist[1] = distance_L2_XY(&clusters[i].pts.front(), &single_cluster.pts.back()); 146 | dist[2] = distance_L2_XY(&clusters[i].pts.back(), &single_cluster.pts.front()); 147 | dist[3] = distance_L2_XY(&clusters[i].pts.back(), &single_cluster.pts.back()); 148 | dist[4] = distance_L2_XY(&single_cog, &all_cog[i]); 149 | //printf("distances: %f\t%f\t%f\t%f\t%f\n", dist[0],dist[1],dist[2],dist[3],dist[4]); 150 | if (*std::min_element(dist, dist + 5) < params.jumpdist) // same with the segmentation distance 151 | { //printf("close cluster %d\n", i); 152 | newcluster = false; 153 | while (!single_cluster.pts.empty()) 154 | { 155 | clusters[i].pts.push_back(single_cluster.pts.back()); 156 | single_cluster.pts.pop_back(); 157 | } 158 | break; 159 | } 160 | } 161 | //printf("\n"); 162 | if (newcluster) 163 | { 164 | all_cog.push_back(single_cog); 165 | clusters.push_back(single_cluster); 166 | } 167 | clusterNo++; 168 | 169 | } 170 | 171 | //~ endpoint 172 | last_breaking_idx = breaking_idx; 173 | } 174 | else 175 | { 176 | //~ break cycle 177 | split_complete = 0; 178 | //printf("current cluster no %d\n", clusterNo); 179 | //printf("[PEOPLE2D_ENGINE] Connected clusters %d\n", all_cog.size()-clusterNo); 180 | } 181 | } 182 | 183 | return (1); 184 | } 185 | 186 | void lengine::computeFeatures(std::vector &clusters, 187 | std::vector > &descriptor) 188 | { 189 | // set feature set 190 | 191 | 192 | // made it public 193 | lfeatures_class lfeatures = lfeatures_class(params.feature_set); 194 | 195 | lfeatures.compute_descriptor(clusters, descriptor); 196 | 197 | // for now only checking right size 198 | if ( !(descriptor[0].size() == feature_set_size[params.feature_set]) ) { 199 | printf("[LENGINE] Features computed %d. Correct size %d.\n", (int) descriptor[0].size(), feature_set_size[params.feature_set]); 200 | exit(-1); 201 | } 202 | 203 | /* 204 | if (params.sanity) 205 | { 206 | int ret = sanity_check(descriptor); 207 | if (!ret) 208 | { 209 | printf("Sanity check failed.\n"); 210 | exit(1); 211 | } 212 | else 213 | { 214 | if (params.verbosity == 2) 215 | printf("Sanity check passed \n"); 216 | } 217 | } 218 | */ 219 | } 220 | -------------------------------------------------------------------------------- /src/lib/lgeometry.cpp: -------------------------------------------------------------------------------- 1 | /***************************************************************** 2 | * 3 | * This file is part of the People2D project 4 | * 5 | * People2D Copyright (c) 2011 Luciano Spinello 6 | * 7 | * This software is licensed under the "Creative Commons 8 | * License (Attribution-NonCommercial-ShareAlike 3.0)" 9 | * and is copyrighted by Luciano Spinello 10 | * 11 | * Further information on this license can be found at: 12 | * http://creativecommons.org/licenses/by-nc-sa/3.0/ 13 | * 14 | * People2D is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied 16 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 17 | * PURPOSE. 18 | * 19 | *****************************************************************/ 20 | 21 | 22 | 23 | #include "hdetect/lib/lgeometry.hpp" 24 | 25 | Point3D_container :: Point3D_container (void) 26 | { 27 | } 28 | 29 | ////------------------------------------------------------------------------------------------------------------------- 30 | 31 | Point3D_container :: Point3D_container(uint sz) 32 | { 33 | pts = std::vector (sz); 34 | } 35 | 36 | 37 | 38 | ////------------------------------------------------------------------------------------------------------------------- 39 | 40 | 41 | Point3D_container :: ~Point3D_container () 42 | { 43 | } 44 | 45 | 46 | ////------------------------------------------------------------------------------------------------------------------- 47 | 48 | double distance_L2_XY (Point3D_str *pt0, Point3D_str *pt1) 49 | { 50 | return ( sqrt( (pt0->x - pt1->x)*(pt0->x - pt1->x) + (pt0->y - pt1->y)*(pt0->y - pt1->y) ) ); 51 | } 52 | 53 | ////------------------------------------------------------------------------------------------------------------------- 54 | 55 | 56 | double distance_L2_XY_sqr (Point3D_str *pt0, Point3D_str *pt1) 57 | { 58 | double dist = (pt0->x - pt1->x)*(pt0->x - pt1->x) + (pt0->y - pt1->y)*(pt0->y - pt1->y); 59 | return(dist); 60 | } 61 | 62 | 63 | ////------------------------------------------------------------------------------------------------------------------- 64 | 65 | 66 | void order_bytheta_incart (std::vector & pts_out) 67 | { 68 | 69 | std::vector pts_polar_out; 70 | 71 | // convert and order 72 | conv2polar_func(pts_out, pts_polar_out); 73 | order_bytheta (pts_polar_out); 74 | 75 | // clear before pushing 76 | pts_out.clear(); 77 | 78 | // convert in cartesian 79 | conv2cart_func(pts_polar_out, pts_out); 80 | 81 | //free 82 | pts_polar_out.clear(); 83 | } 84 | 85 | ////------------------------------------------------------------------------------------------------------------------- 86 | 87 | 88 | void order_bytheta (std::vector &pts_polar_out) 89 | { 90 | std::vector pts_polar_tmp; 91 | Point3D_polar_str polar_p; 92 | 93 | std::vector th_val; 94 | size_t *p = (size_t*) malloc(pts_polar_out.size() * sizeof(size_t)); 95 | 96 | // sort index according to thetaval 97 | for(uint i=0; i &ptvec) 132 | { 133 | pts = ptvec; 134 | } 135 | 136 | ////------------------------------------------------------------------------------------------------------------------- 137 | 138 | 139 | void Point3D_container :: compute_cog(Point3D_str *pts_out) 140 | { 141 | double x_sum = 0; 142 | double y_sum = 0; 143 | double z_sum = 0; 144 | 145 | // centroid 146 | for(uint i=0 ; i < pts.size(); i++) 147 | { 148 | x_sum += pts[i].x; 149 | y_sum += pts[i].y; 150 | z_sum += pts[i].z; 151 | } 152 | 153 | pts_out -> x = x_sum/(double)pts.size(); 154 | pts_out ->y = y_sum/(double)pts.size(); 155 | pts_out ->z = z_sum/(double)pts.size(); 156 | } 157 | 158 | 159 | 160 | ////------------------------------------------------------------------------------------------------------------------- 161 | 162 | /** 163 | * It had a problem because it was initializing the size of the vector. 164 | * Effectively the result was double the expected size and the first NO_SIZE elements where empty. 165 | * The rest that was push_back were never accessed. Removed the initialization and fixed it. 166 | * @param pts_coord 167 | * @param coord_sel 168 | */ 169 | void Point3D_container :: get_coords(std::vector &pts_coord, char coord_sel) 170 | { 171 | 172 | //pts_coord = std::vector (pts.size()); 173 | if(coord_sel == GEOMETRY_COORD_X) 174 | { 175 | // centroid 176 | for(uint i=0 ; i < pts.size(); i++) 177 | pts_coord.push_back(pts[i].x); 178 | } 179 | 180 | if(coord_sel == GEOMETRY_COORD_Y) 181 | { 182 | // centroid 183 | for(uint i=0 ; i < pts.size(); i++) 184 | pts_coord.push_back(pts[i].y); 185 | } 186 | 187 | if(coord_sel == GEOMETRY_COORD_Z) 188 | { 189 | // centroid 190 | for(uint i=0 ; i < pts.size(); i++) 191 | pts_coord.push_back(pts[i].z); 192 | } 193 | 194 | } 195 | 196 | 197 | ////------------------------------------------------------------------------------------------------------------------- 198 | 199 | 200 | void conv2polar_func(std::vector & pts_in, std::vector & pts_polar_out) 201 | { 202 | // theta is angle plane x-y 203 | // alpha is angle plane z-y 204 | 205 | Point3D_str origin; 206 | Point3D_polar_str polar_pt; 207 | 208 | origin.x = 0; 209 | origin.y = 0; 210 | origin.z = 0; 211 | 212 | for(uint i= 0; i < pts_in.size() ; i++) 213 | { 214 | // cart to pol conver 215 | polar_pt.rho = distance_L2_XY (&pts_in[i], &origin); 216 | polar_pt.theta = atan2(pts_in[i].y, pts_in[i].x); 217 | polar_pt.alpha = atan2(sqrt(pts_in[i].x*pts_in[i].x + pts_in[i].y * pts_in[i].y) , pts_in[i].z); 218 | polar_pt.ch = pts_in[i].ch; 219 | polar_pt.status = pts_in[i].status; 220 | polar_pt.w = pts_in[ i].w; 221 | polar_pt.tag = pts_in[ i].tag; 222 | polar_pt.label = pts_in[ i ].label; 223 | polar_pt.id = pts_in[ i ].id; 224 | 225 | // pump it in! 226 | pts_polar_out.push_back(polar_pt); 227 | } 228 | 229 | } 230 | 231 | 232 | ////------------------------------------------------------------------------------------------------------------------- 233 | 234 | 235 | void conv2cart_func(std::vector & pts_polar_in, std::vector & pts_out) 236 | { 237 | // theta is angle plane x-y 238 | // alpha is angle plane z-y 239 | 240 | Point3D_str pt; 241 | 242 | for(uint i= 0; i < pts_polar_in.size() ; i++) 243 | { 244 | // pol2cart conver 245 | pt.x = pts_polar_in[i].rho * sin(pts_polar_in[i].alpha) * cos(pts_polar_in[i].theta); 246 | pt.y = pts_polar_in[i].rho * sin(pts_polar_in[i].alpha) * sin(pts_polar_in[i].theta); 247 | pt.z = pts_polar_in[i].rho * cos(pts_polar_in[i].alpha); 248 | pt.ch = pts_polar_in[i].ch; 249 | pt.status = pts_polar_in[i].status; 250 | pt.w = pts_polar_in[i].w; 251 | pt.label = pts_polar_in[i].label; 252 | pt.id = pts_polar_in[i].id; 253 | pt.tag = pts_polar_in[i].tag; 254 | 255 | // printf("*[%f %f %f] %d\n",pt.x ,pt.y, pt.z, pt.ch ); 256 | 257 | // pump it in! 258 | pts_out.push_back(pt); 259 | } 260 | 261 | } 262 | 263 | 264 | ////------------------------------------------------------------------------------------------------------------------- 265 | 266 | 267 | 268 | void get_circle_param(Point3D_container *laserfeat_cluster, Point3D_str *circle_param) 269 | { 270 | // takes only plane XY 271 | 272 | std::vector A (laserfeat_cluster->pts.size()*3); 273 | std::vector B (laserfeat_cluster->pts.size()); 274 | 275 | // fill A 276 | for(uint i=0,a=0; i < laserfeat_cluster->pts.size(); i++) 277 | { 278 | A[a] = -2.0 * laserfeat_cluster->pts[i].x; 279 | a = a+1; 280 | A[a] = -2.0 * laserfeat_cluster->pts[i].y; 281 | a = a+1; 282 | A[a]= 1.0; 283 | a = a+1; 284 | } 285 | 286 | // fill B 287 | for(uint i=0; i < laserfeat_cluster->pts.size(); i++) 288 | B[i] = -laserfeat_cluster->pts[i].x*laserfeat_cluster->pts[i].x - laserfeat_cluster->pts[i].y*laserfeat_cluster->pts[i].y; 289 | 290 | gsl_matrix_view m = gsl_matrix_view_array (&A[0], laserfeat_cluster->pts.size(), 3); 291 | gsl_vector_view b = gsl_vector_view_array (&B[0], laserfeat_cluster->pts.size()); 292 | 293 | // GSL SOLVER 294 | gsl_vector *x = gsl_vector_alloc (3); 295 | gsl_vector *tau= gsl_vector_alloc (3); 296 | gsl_vector *residual = gsl_vector_alloc (laserfeat_cluster->pts.size()); 297 | 298 | gsl_linalg_QR_decomp (&m.matrix, tau); 299 | gsl_linalg_QR_lssolve (&m.matrix, tau, &b.vector, x, residual); 300 | 301 | 302 | // in Z I put rc 303 | circle_param -> x = x->data[0]; 304 | circle_param -> y = x->data[1]; 305 | circle_param -> z = sqrt( x->data[0] * x->data[0] + x->data[1] * x->data[1] - x->data[2]); 306 | 307 | //printf ("%f %f %f\n",xc,yc,rc); 308 | 309 | gsl_vector_free (x); 310 | gsl_vector_free (tau); 311 | gsl_vector_free (residual); 312 | } 313 | 314 | 315 | 316 | ////------------------------------------------------------------------------------------------------------------------- 317 | 318 | 319 | 320 | void get_line_param(Point3D_container *laserfeat_cluster, Point3D_str *circle_param) 321 | { 322 | // takes only plane XY 323 | 324 | std::vector A; 325 | std::vector B; 326 | 327 | // fill A 328 | for(uint i=0; i < laserfeat_cluster->pts.size(); i++) 329 | { 330 | A.push_back( laserfeat_cluster->pts[i].x); 331 | A.push_back(1.0); 332 | } 333 | 334 | // fill B 335 | for(uint i=0; i < laserfeat_cluster->pts.size(); i++) 336 | B.push_back( laserfeat_cluster->pts[i].y); 337 | 338 | 339 | gsl_matrix_view m = gsl_matrix_view_array (&A[0], laserfeat_cluster->pts.size(), 2); 340 | gsl_vector_view b = gsl_vector_view_array (&B[0], laserfeat_cluster->pts.size()); 341 | 342 | // GSL SOLVER 343 | gsl_vector *x = gsl_vector_alloc (2); 344 | gsl_vector *tau= gsl_vector_alloc (2); 345 | gsl_vector *residual = gsl_vector_alloc (laserfeat_cluster->pts.size()); 346 | 347 | gsl_linalg_QR_decomp (&m.matrix, tau); 348 | gsl_linalg_QR_lssolve (&m.matrix, tau, &b.vector, x, residual); 349 | 350 | 351 | // in Z I put rc 352 | circle_param -> x = x->data[0]; 353 | circle_param -> y = x->data[1]; 354 | circle_param -> z = 0; 355 | 356 | //printf ("%f %f %f\n",xc,yc,rc); 357 | 358 | gsl_vector_free (x); 359 | gsl_vector_free (tau); 360 | gsl_vector_free (residual); 361 | } 362 | 363 | ////------------------------------------------------------------------------------------------------------------------- 364 | 365 | -------------------------------------------------------------------------------- /src/lib/object_tracking.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | //using namespace std; 4 | using std::deque; 5 | using std::map; 6 | using NEWMAT::Matrix; 7 | using NEWMAT::IdentityMatrix; 8 | 9 | using namespace Header; 10 | 11 | namespace ObjectTracking 12 | { 13 | float max_mah_dist; 14 | float max_euc_dist; 15 | 16 | int init_id; 17 | 18 | float new_object_score; 19 | float predict_score; 20 | float update_score; 21 | } 22 | 23 | void ObjectTracking::loadCfg(string cfg) 24 | { 25 | 26 | max_mah_dist = 2.3; 27 | max_euc_dist = 3.3; 28 | 29 | init_id = 0; 30 | 31 | new_object_score = 4.0; 32 | predict_score = -1.0; 33 | update_score = 1.0; 34 | } 35 | void ObjectTracking::loadCfg(detectorParameters params) 36 | { 37 | max_mah_dist = params.max_mah_dist; 38 | max_euc_dist = params.max_euc_dist; 39 | 40 | init_id = params.init_id; 41 | 42 | new_object_score = params.new_object_score; 43 | predict_score = params.predict_score; 44 | update_score = params.update_score; 45 | } 46 | 47 | void ObjectTracking::eliminate(deque &humans) 48 | { 49 | for (int i = 0; i < (int)humans.size(); i++) 50 | { 51 | //if(i==1) 52 | //fprintf(stderr, "delete human %d - Score %f\n",i+1,humans[i].score); 53 | if (humans[i].score < 0.0 || curTimestamp - humans[i].preTimestamp > 5) 54 | { 55 | humans.erase(humans.begin() + i); 56 | i--; 57 | } 58 | } 59 | } 60 | 61 | void ObjectTracking::predict(deque &humans) 62 | { 63 | float deltaT = curTimestamp - preTimestamp; 64 | 65 | Matrix A = Matrix(4, 4); 66 | A << 1 << 0 << deltaT << 0 << 67 | 0 << 1 << 0 << deltaT << 68 | 0 << 0 << 1 << 0 << 69 | 0 << 0 << 0 << 1; 70 | 71 | Matrix Q = Matrix(4, 4); 72 | Q << 0.16 * deltaT << 0 << 0 << 0 << 73 | 0 << 0.16 * deltaT << 0 << 0 << 74 | 0 << 0 << 0.2025 * deltaT << 0 << 75 | 0 << 0 << 0 << 0.2025 * deltaT; 76 | 77 | for (uint i = 0; i < humans.size(); i++) 78 | { 79 | humans[i].score += predict_score * deltaT; 80 | humans[i].state = A * humans[i].state; 81 | humans[i].cov = A * humans[i].cov * A.t() + Q; 82 | //fprintf(stderr, "predict (ID, score, deltaT) = (%d, %.2f, %.2f) \n", humans[i].id, humans[i].score, deltaT); 83 | } 84 | } 85 | 86 | void ObjectTracking::pair(deque &humans, deque &observations, map &pairs) 87 | { 88 | if (humans.size() == 0 || observations.size() == 0) 89 | { 90 | //fprintf(stderr, "No observations"); 91 | return; 92 | } 93 | 94 | // Matching Matrix 95 | Matrix MahDis = Matrix(observations.size(), humans.size()); 96 | Matrix EucDis = Matrix(observations.size(), humans.size()); 97 | 98 | for (uint i = 0; i < observations.size(); i++) 99 | { 100 | for (uint j = 0; j < humans.size(); j++) 101 | { 102 | // Mahalanobis Distance and Euclidean Distance 103 | MahDis(i + 1, j + 1) = calculateMahDis(observations[i], humans[j]); 104 | EucDis(i + 1, j + 1) = calculateEucDis(observations[i], humans[j]); 105 | //fprintf(stderr, "observation %d, humans %d, minEuc = %.2f, minMah = %.2f \n",i, j, EucDis(i + 1, j + 1), MahDis(i + 1, j + 1)); 106 | } 107 | } 108 | 109 | int pairNum = 0; 110 | 111 | //fprintf(stderr, "---------- Pair Start ----------\n"); 112 | while (true) 113 | { 114 | // Get the row and the col of the minimum value in the matrix 115 | int row = 0; 116 | int col = 0; 117 | 118 | // int rowE = 0; 119 | // int colE = 0; 120 | //float minEuc = 10; 121 | 122 | float minMah = MahDis.Minimum2(row, col); 123 | float minEuc = EucDis(row, col); 124 | 125 | // float minEuc_2 = EucDis.Minimum2(rowE, colE); 126 | // float minMah_2 = MahDis(rowE, colE); 127 | // float minMah = MahDis(row, col); 128 | 129 | // float minMah = MahDis(row, col); 130 | // If the euclidean distance is different from the mahalanobis one 131 | // discard the pairing 132 | // if (row != rowE || col != colE) 133 | // { 134 | // if(minEuc > 2.0) 135 | // minEuc = 10; 136 | // } 137 | 138 | //fprintf(stderr, "row %d, col %d, minEuc = %.2f, minMah = %.2f \n",row, col, minEuc, minMah); 139 | 140 | // Observation index 141 | int i = row - 1; 142 | 143 | // Human index 144 | int j = col - 1; 145 | 146 | // Pair observation and human only if distance is smaller than MAX_MAH_DIS and MAX_EUC_DIS 147 | if (minMah < max_mah_dist) 148 | { 149 | if (minEuc < max_euc_dist) 150 | { 151 | pairs[i] = j; 152 | pairNum += 1; 153 | 154 | // Puts the row and column selected to infinity to avoid double pairing 155 | EucDis.Row(row) = std::numeric_limits::infinity(); 156 | EucDis.Column(col) = std::numeric_limits::infinity(); 157 | MahDis.Row(row) = std::numeric_limits::infinity(); 158 | MahDis.Column(col) = std::numeric_limits::infinity(); 159 | 160 | //fprintf(stderr, "Success = %d %d, minEuc = %.2f, minMah = %.2f, cov = %.2f %.2f \n", 161 | // i, humans[j].id, minEuc, minMah, humans[j].cov(1, 1), humans[j].cov(2, 2)); 162 | } 163 | else 164 | { 165 | //fprintf(stderr, "Euc Fail -- Hum id: %d Obs: %d, minMah = %.2f - minEuc = %.2f \n", humans[j].id, i, minMah, minEuc); 166 | EucDis(row, col) = std::numeric_limits::infinity(); 167 | MahDis(row, col) = std::numeric_limits::infinity(); 168 | } 169 | } 170 | else 171 | { 172 | //fprintf(stderr, "Mah Fail = Obs: %d Hum id: %d, minMah = %.2f \n", i, humans[j].id, minEuc); 173 | break; 174 | } 175 | 176 | if (pairNum == fmin(observations.size(), humans.size())) 177 | { 178 | break; 179 | } 180 | } 181 | //fprintf(stderr, "---------- Pair End ----------\n"); 182 | } 183 | 184 | void ObjectTracking::update(deque &humans, deque &observations, map &pairs) 185 | { 186 | if (observations.size() == 0) 187 | { 188 | return; 189 | } 190 | 191 | float deltaT = curTimestamp - preTimestamp; 192 | 193 | Matrix R = Matrix(4, 4); 194 | 195 | R << 0.09 << 0 << 0 << 0 << 196 | 0 << 0.09 << 0 << 0 << 197 | 0 << 0 << 0.36 << 0 << 198 | 0 << 0 << 0 << 0.36; 199 | 200 | deque unpairs; 201 | unpairs.resize(observations.size(), true); 202 | 203 | // For Pairing Result 204 | for (map::iterator it = pairs.begin(); it != pairs.end(); it++) 205 | { 206 | // Observation index 207 | int i = it->first; 208 | 209 | // Human index 210 | int j = it->second; 211 | 212 | observations[i].state(3) = (observations[i].state(1) - humans[j].preState(1)) / (curTimestamp - humans[j].preTimestamp); 213 | observations[i].state(4) = (observations[i].state(2) - humans[j].preState(2)) / (curTimestamp - humans[j].preTimestamp); 214 | 215 | Matrix H = IdentityMatrix(4); 216 | Matrix Y = observations[i].state - H * humans[j].state; 217 | Matrix S = H * humans[j].cov * H.t() + R; 218 | Matrix K = humans[j].cov * H.t() * S.i(); 219 | Matrix I = IdentityMatrix(K.Ncols()); 220 | 221 | // Maintain score if only detected by laser 222 | humans[j].score -= predict_score * deltaT; 223 | 224 | // Increase score if detected by camera 225 | if (observations[i].camera_detected == true) 226 | { 227 | // humans[j].score -= 0.01 * PREDICT_OBJECT_SCORE * deltaT; 228 | humans[j].score += ((update_score - (calculateEucDis(observations[i], humans[j]) / max_euc_dist)) + 229 | (update_score - (calculateMahDis(observations[i], humans[j]) / max_mah_dist)) * 2) * deltaT; 230 | } 231 | 232 | humans[j].state = humans[j].state + K * Y; 233 | humans[j].cov = (I - K * H) * humans[j].cov; 234 | 235 | humans[j].preState = humans[j].state; 236 | humans[j].preTimestamp = curTimestamp; 237 | 238 | unpairs[i] = false; 239 | //fprintf(stderr, "paired (observation, ID) = (%d, %d) \n", i, humans[j].id); 240 | } 241 | 242 | // For New Observations Result 243 | for (uint i = 0; i < observations.size(); i++) 244 | { 245 | // Only the observation detected by image can be put into the human list 246 | if (unpairs[i] == true && observations[i].camera_detected == true) 247 | { 248 | humans.push_back(Human(init_id, new_object_score, observations[i].state, R, curTimestamp)); 249 | init_id += 1; 250 | pairs[i] = humans.size() - 1; 251 | // fprintf(stderr, "new (observation, ID) = (%d, %d) \n", i, humans[pairs[i]].id); 252 | } 253 | } 254 | } 255 | 256 | float ObjectTracking::calculateMahDis(Observation &observation, Human &human) 257 | { 258 | float sum1 = (human.state(1) - observation.state(1)) * (human.state(1) - observation.state(1)) / human.cov(1, 1); 259 | float sum2 = (human.state(2) - observation.state(2)) * (human.state(2) - observation.state(2)) / human.cov(2, 2); 260 | return sqrt(sum1 + sum2); 261 | } 262 | 263 | float ObjectTracking::calculateEucDis(Observation &observation, Human &human) 264 | { 265 | float sum1 = (human.state(1) - observation.state(1)) * (human.state(1) - observation.state(1)); 266 | float sum2 = (human.state(2) - observation.state(2)) * (human.state(2) - observation.state(2)); 267 | 268 | return sqrt(sum1 + sum2); 269 | } 270 | -------------------------------------------------------------------------------- /src/lib/observation.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | //using namespace cv; 5 | using cv::Mat; 6 | using cv::Rect; 7 | //using namespace NEWMAT; 8 | using NEWMAT::ColumnVector; 9 | Observation::Observation(int scan_index, float prob, bool camera_detected, geometry_msgs::Point32 &pos) 10 | { 11 | this->scan_index = scan_index; 12 | 13 | this->prob = prob; 14 | 15 | this->camera_detected = camera_detected; 16 | 17 | this->state = ColumnVector(4); 18 | this->state << pos.x << 19 | pos.y << 20 | 0 << 21 | 0; 22 | } 23 | 24 | Observation::Observation(int scan_index, float prob, bool camera_detected, geometry_msgs::Point32 &pos, Mat &image, Rect &rect) 25 | { 26 | this->scan_index = scan_index; 27 | 28 | this->prob = prob; 29 | 30 | this->camera_detected = camera_detected; 31 | 32 | this->state = ColumnVector(4); 33 | this->state << pos.x << 34 | pos.y << 35 | 0 << 36 | 0; 37 | 38 | this->image = image; 39 | this->rect = rect; 40 | } 41 | -------------------------------------------------------------------------------- /src/lib/projectTools.cpp: -------------------------------------------------------------------------------- 1 | #include "hdetect/lib/projectTools.hpp" 2 | 3 | #define HOG_WIDTH 64 4 | #define HOG_HEIGHT 128 5 | 6 | //using namespace std; 7 | //using namespace cv; 8 | using cv::Point; 9 | using cv::Size; 10 | using cv::Rect; 11 | using cv::Scalar; 12 | using cv::Point2d; 13 | using cv::Mat; 14 | using std::vector; 15 | 16 | /// Projects a point from the laser scan to a 2d top-down view plane. 17 | void pointToPlane(geometry_msgs::Point32 &ptIn, Point &ptOut, Size &windowSize, int &zoom) 18 | { 19 | int width = windowSize.width; 20 | int height = windowSize.height; 21 | 22 | ptOut.x = width - 1 - (int)((ptIn.y * width / (float)zoom) + width / 2.0); 23 | ptOut.y = height - 1 - (int)((ptIn.x * height / (float)zoom) + height / (float)zoom); 24 | } 25 | 26 | /// Function to return a selection of random generated colors 27 | vector randomColors(cv::RNG& rng) 28 | { 29 | vector colors; 30 | 31 | for (int i = 0; i < 250; i++) 32 | { 33 | int icolor = (unsigned)rng; 34 | colors.push_back(cv::Scalar(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255)); 35 | } 36 | 37 | return colors; 38 | } 39 | 40 | /// Returns the corresponding crop of an image 41 | void getCrop(cv::Mat &roi, cv::Mat &image, Rect &rect) 42 | { 43 | roi = image(rect); 44 | } 45 | 46 | /// Returns true if the box lies within the image 47 | bool checkBox(sensor_msgs::CameraInfo &cInfo, Rect &rect) 48 | { 49 | bool ret = (rect.x >= 0 && rect.x + rect.width < (int)cInfo.width) && // width inbounds 50 | (rect.y >= 0 && rect.y + rect.height < (int)cInfo.height) && // height inbounds 51 | (rect.width >= HOG_WIDTH && rect.height >= HOG_HEIGHT); // boxsize check 52 | 53 | return ret; 54 | } 55 | 56 | /// Finds the right bounding box size according to the distance 57 | /// 1.0xL 58 | /// _______________ 59 | /// |<-upleft | 60 | /// | | 61 | /// | | 62 | /// | | 63 | /// | | 2.0xL 64 | /// | | 65 | /// | | 66 | /// | | 67 | /// |_downright->_| 68 | /// 69 | void getBox(geometry_msgs::Point32 &it, Point2d &pt2D, Rect &rect, double &M_TO_PIXELS, double &BODY_RATIO) 70 | { 71 | //boxWidth = (int)(M_TO_PIXELS / sqrt(pow(double(it.x)*M_TO_PIXELS, 2.0) + pow(double(it.y)*M_TO_PIXELS, 2.0))); 72 | double dist = sqrt(pow(it.x, 2.0) + pow(it.y, 2.0)); 73 | float boxWidth = M_TO_PIXELS / dist; 74 | 75 | // Compute the rectangle 76 | rect.x = (int)(pt2D.x - (boxWidth / 2)); 77 | rect.y = (int)(pt2D.y - BODY_RATIO * boxWidth); 78 | rect.width = (int)(1.0 * boxWidth); 79 | rect.height = (int)(2.0 * boxWidth); 80 | } 81 | 82 | void CameraInfo2CV(sensor_msgs::CameraInfo &cInfo, Mat &K, Mat &D, int &rect) 83 | { 84 | // K Camera Matrix for Distorted images 85 | K = Mat::zeros(3, 3, CV_64FC1); 86 | 87 | int i, j; 88 | for (i = 0; i < 3; i++) 89 | { 90 | for (j = 0; j < 3; j++) 91 | { 92 | K.at(i, j) = cInfo.K[3 * i + j]; 93 | } 94 | } 95 | 96 | D = Mat::zeros(1,5,CV_64FC1); 97 | 98 | if (rect == 0) 99 | { 100 | for (i = 0; i < 5; i++) 101 | { 102 | D.at(i) = cInfo.D[i]; 103 | } 104 | } 105 | 106 | 107 | /* Print the camera calibration parameters. 108 | printf("\nCamera Matrix K\n"); 109 | for (i = 0; i < 3; i++) 110 | { 111 | for (j = 0; j < 3; j++) 112 | { 113 | printf("%f\t",cInfo.K[3*i+j]); 114 | } 115 | printf("\n"); 116 | } 117 | 118 | printf("\nRectification Matrix R\n"); 119 | for (i = 0; i < 3; i++) 120 | { 121 | for (j = 0; j < 3; j++) 122 | { 123 | printf("%f\t",cInfo.R[3*i+j]); 124 | } 125 | printf("\n"); 126 | } 127 | printf("\nProjection Matrix P\n"); 128 | for (i = 0; i < 3; i++) 129 | { 130 | for (j = 0; j < 4; j++) 131 | { 132 | printf("%f\t",cInfo.P[4*i+j]); 133 | } 134 | printf("\n"); 135 | } 136 | 137 | 138 | printf("\nDistortion coefficients D\n"); 139 | if (rect == 0) { 140 | for (i = 0; i < 5; i++) { 141 | printf("%f ",D.at(i)); 142 | } 143 | } 144 | */ 145 | } 146 | 147 | 148 | void projectPoint(geometry_msgs::Point32 &pointIn, Point2d &pointOut, Mat &K, Mat &D, 149 | tf::StampedTransform &transform) 150 | { 151 | /* 152 | * METHOD: calib3d::projectPoints 153 | * 154 | */ 155 | 156 | Mat pIn(1, 3, CV_64FC1); 157 | Mat pOut(1, 3, CV_64FC1); 158 | 159 | Mat cvPhi = Mat::zeros(3, 3, CV_64FC1); 160 | Mat rvec = Mat::zeros(3, 1, CV_64FC1); 161 | Mat tvec = Mat::zeros(3, 1, CV_64FC1); 162 | 163 | tf::Vector3 t = transform.getOrigin(); 164 | tvec.at(0, 0) = t.x(); 165 | tvec.at(0, 1) = t.y(); 166 | tvec.at(0, 2) = t.z(); 167 | //tf::Quaternion q; 168 | //q = transform.getRotation(); 169 | tf::Matrix3x3 phi(transform.getRotation()); 170 | 171 | // *********************************** 172 | // With matlab camera calib. 173 | // NOTICE WE EXCHANGE X --> Z, y --> X 174 | // *********************************** 175 | // *********************************** 176 | // pIn.at(0, 0) = pointIn.y; 177 | // pIn.at(0, 1) = pointIn.z; 178 | // pIn.at(0, 2) = pointIn.x; 179 | // ----------------------------------- 180 | // *********************************** 181 | // *********************************** 182 | // With ROS camera tf calibration 183 | // NOTICE WE EXCHANGE z --> x, y --> z x --> y 184 | // *********************************** 185 | // *********************************** 186 | pIn.at(0, 0) = pointIn.z; 187 | pIn.at(0, 1) = pointIn.x; 188 | pIn.at(0, 2) = pointIn.y; 189 | 190 | 191 | /* 192 | //phi.setValue( -0.999929898220757 , -0.00442044279230751 , 0.0109844585550146, 193 | // 0.00357500223864038 , -0.997114460355643 , -0.0758285718490689, 194 | // 0.0112879583282635 , -0.0757839866673404 , 0.99706036395074); 195 | //phi.setRPY(-0.075862, -0.011288, 3.138017); 196 | phi.setRotation(q); 197 | //q.setRPY(roll, pitch, yaw); 198 | * 199 | 200 | //phi.setEulerYPR(roll, pitch, yaw); 201 | // YPR 3.138017 -0.011288 -0.075862 202 | */ 203 | double roll, pitch, yaw; 204 | phi.getRPY(roll, pitch, yaw); 205 | 206 | //printf("\nPHI\n"); 207 | for (uint i = 0; i < 3; i++) 208 | { 209 | for (uint j = 0; j < 3; j++) 210 | { 211 | cvPhi.at(i, j) = phi.getRow(i)[j]; 212 | //printf("%f\t", cvPhi.at(i,j)); 213 | } 214 | //printf("\n"); 215 | } 216 | 217 | /* 218 | //printf("\nRPY Q = (%f, %f, %f, %f)\n", q.x(), q.y(), q.z(), q.w()); 219 | //phi.getRotation(q); 220 | printf("Rotation Q = (%f, %f, %f, %f)\n\n", q.x(), q.y(), q.z(), q.w()); 221 | 222 | printf("Rotation RPY = (%f, %f, %f)\n\n", roll, pitch, yaw); 223 | 224 | printf("Translation = (%f, %f, %f)\n\n", t.x(), t.y(), t.z()); 225 | 226 | 227 | // Print results 228 | printf("Original Point: x %f y %f z %f\n", pt.x, pt.y, pt.z); 229 | printf("Projected point: x %f y %f z %f\n\n",pIn.at(0,0), pIn.at(0,1), pIn.at(0,2)); 230 | 231 | */ 232 | Rodrigues(cvPhi, rvec); 233 | 234 | //printf("\nRvec %f %f %f",rvec.at(0,0),rvec.at(0,1),rvec.at(0,2)); 235 | //printf("\nTvec %f %f %f",tvec.at(0,0),tvec.at(0,1),tvec.at(0,2)); 236 | 237 | /* 238 | int i, j; 239 | printf("\nCamera Matrix K\n"); 240 | for (i = 0; i < 3; i++) 241 | { 242 | for (j = 0; j < 3; j++) 243 | { 244 | //K.at(i, j) = cInfo.K[3 * i + j]; 245 | printf("%f\t",K.at(i,j)); 246 | } 247 | printf("\n"); 248 | } 249 | 250 | printf("\nD (kc) "); 251 | for (int i = 0; i < 5; i++) { 252 | //D.at(0,i) = 0; 253 | printf("%f ",D.at(0,i)); 254 | } 255 | */ 256 | 257 | //pIn.at(0,0)=5; 258 | //pIn.at(0,1)=0; 259 | //pIn.at(0,2)=10; 260 | //printf("\n\nPoint in %f %f %f\n",pIn.at(0,0),pIn.at(0,1),pIn.at(0,2)); 261 | cv::projectPoints(pIn, rvec, tvec, K, D, pOut); 262 | //printf("After projectPoints"); 263 | //printf("\nPoint out %f %f %f\n",pOut.at(0,0),pOut.at(0,1),pOut.at(0,2)); 264 | 265 | // printf("Computed rotation\n"); 266 | // printf("Rotation vector = %f %f %f\n",rvec.at(0,0),rvec.at(0,1),rvec.at(0,2)); 267 | // printf("Point = x %f y %f\n\n",pOut.at(0,0),pOut.at(0,1)); 268 | 269 | pointOut.x = pOut.at(0, 0); 270 | pointOut.y = pOut.at(0, 1); 271 | 272 | /* 273 | // rotation vector NOT RPY 274 | rvec.at(0,0)= 0.0174833625050782; 275 | rvec.at(0,1)= -0.119012558661694; 276 | rvec.at(0,2)= 3.13528527743474; 277 | 278 | cv::projectPoints(pIn, rvec, tvec, K, cam_info->D, pOut); 279 | printf("Hardcoded rotation\n"); 280 | printf("Rotation vector = %f %f %f\n",rvec.at(0,0),rvec.at(0,1),rvec.at(0,2)); 281 | printf("Point = x %f y %f\n\n",pOut.at(0,0),pOut.at(0,1)); 282 | 283 | waitKey(); 284 | */ 285 | } 286 | -------------------------------------------------------------------------------- /src/lib/visualizer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | //using namespace std; 4 | using std::string; 5 | //using namespace cv; 6 | using cv::RNG; 7 | using cv::namedWindow; 8 | using cv::Scalar; 9 | using cv::Point; 10 | using cv::waitKey; 11 | using cv::Mat; 12 | 13 | #define FALSE 0 14 | #define TRUE 1 15 | 16 | #define INITIAL_ZOOM 30 17 | 18 | static const char C_WINDOW[] = "Visualizer: Camera + Laser"; 19 | static const char L_WINDOW[] = "Visualizer: Laser"; 20 | 21 | visualizer::visualizer() 22 | { 23 | ss << std::fixed << std::setprecision(2); 24 | 25 | // Random generator for the random coloring of the clusters 26 | RNG rng(0xFFFFFFFF); 27 | pallete = randomColors(rng); 28 | 29 | // init window and font 30 | namedWindow(C_WINDOW, CV_WINDOW_AUTOSIZE); 31 | namedWindow(L_WINDOW, CV_WINDOW_AUTOSIZE); 32 | 33 | 34 | cvInitFont(&font_, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5); 35 | 36 | zoom = INITIAL_ZOOM; // in meters 37 | 38 | // Trackbar to control the zoom 39 | // createTrackbar("Zoom:", L_WINDOW, &zoom, 30, &plotLaser, this); 40 | 41 | pub = nh.advertise("Hz", 5); 42 | dummy.data=1; 43 | 44 | ROS_INFO("[VISUALIZER] Visualizer running OK."); 45 | colors.push_back(Scalar(191, 0, 0)); 46 | colors.push_back(Scalar(0, 191, 0)); 47 | colors.push_back(Scalar(0, 0, 191)); 48 | colors.push_back(Scalar(255, 63, 0)); 49 | colors.push_back(Scalar(63, 255, 0)); 50 | colors.push_back(Scalar(255, 0, 63)); 51 | colors.push_back(Scalar(63, 0, 255)); 52 | colors.push_back(Scalar(0, 255, 63)); 53 | colors.push_back(Scalar(0, 63, 255)); 54 | colors.push_back(Scalar(191, 191, 0)); 55 | colors.push_back(Scalar(191, 0, 191)); 56 | colors.push_back(Scalar(0, 191, 191)); 57 | } 58 | 59 | visualizer::~visualizer() 60 | { 61 | cv::destroyWindow(C_WINDOW); 62 | cv::destroyWindow(L_WINDOW); 63 | } 64 | 65 | 66 | //std::vector &visualizer::getClusteredData() 67 | //{ 68 | // return clusterData; 69 | //} 70 | 71 | 72 | //void visualizer::setLaserPlane(Mat lp) 73 | //{ 74 | // laserPlane = lp; 75 | //} 76 | 77 | /** 78 | * It superimposes the clusters on the image along with their bounding boxes if available. 79 | * Also draws a 2d plane of the laser scan to facilitate the annotation process. 80 | */ 81 | void visualizer::visualizeData(const sensor_msgs::Image::ConstPtr &image, 82 | const sensor_msgs::LaserScan::ConstPtr &lScan) 83 | { 84 | detector::detectHumans(image, lScan); 85 | 86 | // Convert image to RGB 87 | cvtColor(cv_ptr->image, colorImage , CV_GRAY2RGB); 88 | 89 | // Iterate through every cog of the scanClusters 90 | for (uint i = 0; i < clusterData.size() ;i++) 91 | { 92 | // If the cog is in the image save its features and then plot the cluster 93 | if (clusterData[i].cog_projected == true) 94 | { 95 | // Get the color index 96 | // color = getColor(clusterData[i].cog); 97 | color = colors.at(i % colors.size()); 98 | 99 | // Draw a rectangle around each crop 100 | //rectangle(colorImage, upleft, downright, color, 2, 8, 0); 101 | 102 | /// TODO add code so when in detector mode we classify the crop/cluster or only cluster 103 | 104 | /// This cluster is in the image 105 | /// Doedsn't matter if we use fusion or only laser, we want to show the classification to the image 106 | /// check if fused 107 | /// use getCrop 108 | /// use a new method that computes class for cluster , cluster/crop 109 | /// add an if below so it draws something in either case 110 | 111 | // Draw the cluster number 112 | projectPoint(clusterData[i].cog, prPixel, K , D, transform); 113 | putText(colorImage, boost::lexical_cast(i), prPixel, 1, 1.4, color, 1.6, 1); 114 | circle(colorImage, prPixel, 4, color); 115 | 116 | // Draw the rectangle around the ROI 117 | if (clusterData[i].crop_projected == true) 118 | { 119 | if (clusterData[i].detection_label == true) 120 | { 121 | getBox(clusterData[i].cog, prPixel, rect, params.m_to_pixels, params.body_ratio); 122 | rectangle(colorImage, rect, color); 123 | ss << clusterData[i].detection_fusion_prob; 124 | putText(colorImage, ss.str(), Point(rect.x, rect.y), 1, 1, color, 1, 1); 125 | ss.str(""); 126 | } 127 | } 128 | 129 | // This is the code to superimpose the clusters on the image 130 | for (uint j = 0; j < clusterData[i].clusters.points.size(); j++) 131 | { 132 | // Convert each cluster point to image coordinates 133 | projectPoint(clusterData[i].clusters.points[j], prPixel, K , D, transform); 134 | 135 | // Draw the point to the image 136 | if (prPixel.x >= 0 && prPixel.x < colorImage.cols && prPixel.y >= 0 && prPixel.y < colorImage.rows) 137 | { 138 | circle(colorImage, prPixel, 2, color); 139 | } 140 | } 141 | } 142 | } 143 | /* 144 | ROS_INFO("[ANNOTATOR] num features %d", scanClusters.nfeatures); 145 | ROS_INFO("[ANNOTATOR] size features %d", scanClusters.features[0].data.size()); 146 | ROS_INFO("[ANNOTATOR] num clusters %d", scanClusters.nclusters); 147 | ROS_INFO("[ANNOTATOR] size clusters %d", scanClusters.clusters.size()); 148 | ROS_INFO("[ANNOTATOR] size features %d", scanClusters.features.size()); 149 | ROS_INFO("[ANNOTATOR] size labels %d", scanClusters.labels.size()); 150 | ROS_INFO("[ANNOTATOR] size fusion %d", scanClusters.fusion.size()); 151 | /*/ 152 | 153 | plotLaser(zoom); 154 | cv::imshow(L_WINDOW, laserPlane); 155 | waitKey(3); 156 | cv::imshow(C_WINDOW, colorImage); 157 | waitKey(3); 158 | 159 | //waitKey(); 160 | pub.publish(dummy); 161 | //*/ 162 | } 163 | 164 | /// Returns a color from the pallete, based on its position 165 | Scalar visualizer::getColor(geometry_msgs::Point32 &point) 166 | { 167 | return pallete[(int) (5 * distance(point))]; 168 | } 169 | 170 | float visualizer::distance(geometry_msgs::Point32 point) 171 | { 172 | return sqrt(point.x * point.x + point.y * point.y); 173 | } 174 | 175 | 176 | void visualizer::plotLaser(int zoom) 177 | { 178 | laserPlane = Mat(window_size, CV_8UC3); 179 | laserPlane.setTo(Scalar(255, 255, 255)); 180 | 181 | Point pt; 182 | Scalar color; 183 | // The mat will be 1000(X)x500(Y) pixels 184 | // The values are computed according to the zoom value 185 | // The default zoom corresponds to 500 pixels = 30000 mm 186 | 187 | for (uint cNo = 0; cNo < clusterData.size(); cNo++) 188 | { 189 | if (1) //scanClusters.fusion[clusterNo] == TRUE) 190 | { 191 | // color = getColor(clusterData[cNo].cog); 192 | color = colors.at(cNo % colors.size()); 193 | 194 | for (uint pointNo = 0; pointNo < clusterData[cNo].clusters.points.size(); pointNo++) 195 | { 196 | // Transform x, y point to pixels 197 | pointToPlane(clusterData[cNo].clusters.points[pointNo], pt, window_size, zoom); 198 | circle(laserPlane, pt, 2, color); 199 | } 200 | 201 | pointToPlane(clusterData[cNo].cog, pt, window_size, zoom); 202 | 203 | // Number of cluster 204 | //cv::putText(laserPlane, boost::lexical_cast(clusterNo), 205 | // pt, 1, 2, color,2, 8); 206 | //pt.x+=30; 207 | 208 | // Number of points 209 | Scalar black(0,0,0); 210 | // cv::putText(laserPlane, boost::lexical_cast(clusterData[cNo].clusters.points.size()), 211 | // pt, 1, 2, black, 2, 8); 212 | pt.x -= 10; 213 | pt.y += 5; 214 | cv::putText(laserPlane, boost::lexical_cast(cNo), 215 | pt, 1, 0.8, black, 1.6, 8); 216 | 217 | // // Distance to cog 218 | // char buf[10] = ""; 219 | // sprintf(buf, "%.1f", distance(clusterData[cNo].cog)); 220 | // pt.x += 35; 221 | // cv::putText(laserPlane, boost::lexical_cast(buf), 222 | // pt, 1, 0.6, black, 1.0, 8); 223 | 224 | if (clusterData[cNo].label == true) 225 | { 226 | //cv::circle(laserPlane, 1 , 2, Scalar(0,0,0)); 227 | } 228 | } 229 | } 230 | 231 | color.val[0] = 0; 232 | color.val[1] = 255; 233 | color.val[2] = 0; 234 | 235 | geometry_msgs::Point32 org; 236 | org.x = 0; 237 | org.y = 0; 238 | 239 | pointToPlane(org, pt, window_size, zoom); 240 | pt.x -= 10; 241 | cv::putText(laserPlane, "Laser", pt, 1, 1, color, 1, 1); 242 | circle(laserPlane, pt, 2, color); 243 | } 244 | -------------------------------------------------------------------------------- /src/other/Calib_Matlab2ROS.cpp: -------------------------------------------------------------------------------- 1 | // Converts the calibration from matlab to ros. 2 | // The values are hardwired to the code. 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | using namespace camera_calibration_parsers; 11 | 12 | int main(int argc,char** argv) { 13 | std::string name=("00b09d0100aa73bb"); 14 | sensor_msgs::CameraInfo cam_info; 15 | readCalibrationYml("/home/kabamaru/.ros/camera_info/00b09d0100aa73bb.yaml", name, cam_info); 16 | 17 | cv::Mat ncm; 18 | 19 | double matlab_K[9] = 20 | {668.68462594024, 0, 384.916944772716, 21 | 0, 669.810891377069, 239.099132134278, 22 | 0, 0, 1}; 23 | 24 | double matlab_D[5] = { -0.413014051243856, 25 | 0.182192431895544, 26 | 0.0012217553319971, 27 | -0.00116201628505299, 28 | 0}; 29 | 30 | for (int i=0; i<9 ; i++) cam_info.K[i]=matlab_K[i]; 31 | for (int i=0; i<5 ; i++) cam_info.D[i]=matlab_D[i]; 32 | 33 | 34 | 35 | cv::Mat K(3, 3, CV_64FC1); 36 | cv::Mat R(3, 3, CV_64FC1); 37 | 38 | //float D[5]; 39 | //ncm = cv.GetSubRect(self.P, (0, 0, 3, 3)) 40 | for (int i=0;i<3 ; i++) { 41 | for(int j=0; j<3; j++) { 42 | K.at(i,j)=cam_info.K[3*i+j]; 43 | R.at(i,j)=cam_info.R[i*3+j]; 44 | } 45 | printf("\n"); 46 | } 47 | printf("\n"); 48 | 49 | //for (int i=0; i<5; i++) D[i]=cam_info[ 50 | 51 | ncm = cv::getOptimalNewCameraMatrix(K, cam_info.D, cvSize(cam_info.width,cam_info.height) , 0.0); 52 | 53 | // print camera matrix 54 | for (int i=0;i<3 ; i++) { 55 | for(int j=0; j<3; j++) { 56 | printf("%f\t",cam_info.K[3*i+j]); 57 | } 58 | printf("\n"); 59 | } 60 | printf("\n"); 61 | 62 | // print the distortion coefficients 63 | for (int i=0; i<5 ; i++) printf("%f\t",cam_info.D[i]); 64 | printf("\n\n"); 65 | 66 | // print the rectification matrix 67 | for (int i=0;i<3 ; i++) { 68 | for(int j=0; j<3; j++) { 69 | printf("%f\t",cam_info.R[3*i+j]); 70 | } 71 | printf("\n"); 72 | } 73 | printf("\n"); 74 | 75 | // print the projection matrix 76 | for (int i=0;i<3 ; i++) { 77 | for(int j=0; j<3; j++) { 78 | cam_info.P[3*i+j]=ncm.at(i,j); 79 | printf("%f\t",cam_info.P[3*i+j]); 80 | } 81 | printf("\n"); 82 | } 83 | 84 | // Important change height and width to the original 85 | cam_info.width=752; 86 | cam_info.height=480; 87 | 88 | writeCalibrationYml("/home/kabamaru/.ros/camera_info/matlab_calib.yaml", name, cam_info); 89 | 90 | } 91 | -------------------------------------------------------------------------------- /src/other/TF_Publisher.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | 5 | //void poseCallback(const ???camera pose??::PoseConstPtr& msg){ 6 | //} 7 | 8 | int main(int argc, char** argv){ 9 | ros::init(argc, argv, "upm_tf_broadcaster"); 10 | 11 | ros::NodeHandle node; 12 | 13 | //for using later when the camera will pan and/or tilt 14 | //ros::Subscriber sub = node.subscribe(turtle_name+"/pose", 10, &poseCallback); 15 | 16 | static tf::TransformBroadcaster br; 17 | 18 | ros::Rate rate(30.0); 19 | tf::Transform world2camera_tf ; 20 | tf::Transform world2laser_tf;; 21 | // camera 2 laser transform. it is 10 cm above the camera and its static so it goes out of while loop 22 | 23 | // we can use something like the following in the launch file but we choose this instead 24 | // for future use, when the camera moves 25 | // 27 | 28 | world2laser_tf.setOrigin( tf::Vector3( 0.0 , 0.0 , 0.0) ); 29 | world2laser_tf.setRotation( tf::Quaternion(0, 0, 0) ); 30 | 31 | while(ros::ok()) { 32 | //world 2 camera transform. it is 0 0 0 for the beginning 33 | 34 | world2camera_tf.setOrigin( tf::Vector3(0.0, 0.0 , 0.0) ); 35 | world2camera_tf.setRotation( tf::Quaternion(0, 0, 0) ); 36 | br.sendTransform(tf::StampedTransform(world2camera_tf, ros::Time::now(), "world", "camera")); 37 | 38 | 39 | br.sendTransform(tf::StampedTransform(world2laser_tf, ros::Time::now(), "world", "laser")); 40 | rate.sleep(); 41 | ros::spinOnce(); 42 | } 43 | return 0; 44 | }; 45 | -------------------------------------------------------------------------------- /src/other/caltest.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * test.cpp 3 | * test to see how the calibration works 4 | * Created on: Nov 16, 2012 5 | * Author: kabamaru 6 | * 7 | */ 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | std::string frame_l; 21 | std::string frame_cam; 22 | std::string fname; 23 | 24 | tf::StampedTransform transform; 25 | 26 | geometry_msgs::PointStamped lSPoint; 27 | geometry_msgs::PointStamped lSPointOut; 28 | 29 | geometry_msgs::Point32 point; 30 | 31 | cv::Point2d pt2D; 32 | cv::Point3d pt3D; 33 | 34 | image_geometry::PinholeCameraModel cam_model_; 35 | 36 | sensor_msgs::CameraInfo cam_info; 37 | 38 | void test (void) { 39 | 40 | ros::NodeHandle node; 41 | 42 | std::cout << "Give x, y ,z\n"; 43 | std::cin >> point.x; 44 | std::cin >> point.y; 45 | //std::cin >> point.z; 46 | point.z = 0; 47 | 48 | tf::TransformListener tf_listener_; 49 | 50 | try { 51 | ros::Duration timeout(3); 52 | tf_listener_.waitForTransform(frame_cam, frame_l, ros::Time(0), timeout); 53 | tf_listener_.lookupTransform(frame_cam, frame_l, ros::Time(0), transform); 54 | } 55 | catch (tf::TransformException& ex) { 56 | ROS_WARN("[CALTEST] TF exception:\n%s", ex.what()); 57 | } 58 | 59 | // METHOD 1: ROS tf::TransformPoint + pinholeCameraModel::project3dtoPixel 60 | // 61 | 62 | lSPoint.point.x=point.x; lSPoint.point.y=point.y; lSPoint.point.z=point.z; 63 | lSPoint.header.frame_id=frame_l; 64 | 65 | //tf_listener_.transformPoint(frame_cam, ros::Time(0), lSPoint, frame_l, lSPointOut); 66 | lSPointOut=lSPoint; 67 | 68 | pt3D.x = lSPointOut.point.x; 69 | pt3D.y = lSPointOut.point.z; 70 | pt3D.z = lSPointOut.point.y; 71 | 72 | pt2D = cam_model_.project3dToPixel(pt3D); 73 | 74 | 75 | cv::Mat K(3, 3, CV_64FC1); 76 | cv::Mat R(3, 3, CV_64FC1); 77 | cv::Mat P(3, 4, CV_64FC1); 78 | 79 | printf("CAMERA MATRIX\n"); 80 | 81 | int i,j; 82 | for(i=0; i < 3 ; i++) { 83 | for(j=0; j < 3; j++) { 84 | K.at(i,j)=cam_info.K[3*i+j]; 85 | R.at(i,j)=cam_info.R[i*3+j]; 86 | printf("%f\t",cam_info.K[3*i+j]); 87 | } 88 | printf("\n"); 89 | } 90 | printf("\n"); 91 | 92 | printf("PROJECTION MATRIX\n"); 93 | 94 | for(i=0; i < 3 ; i++) { 95 | for(j=0; j < 4; j++) { 96 | P.at(i,j)=cam_info.P[4*i+j]; 97 | printf("%f\t",cam_info.P[4*i+j]); 98 | } 99 | printf("\n"); 100 | } 101 | printf("\n"); 102 | 103 | // METHOD 2: calib3d::projectPoints 104 | 105 | cv::Mat pIn(1, 3, CV_64FC1); 106 | cv::Mat pOut(1, 3, CV_64FC1); 107 | 108 | cv::Mat cvPhi = cv::Mat::zeros(3, 3, CV_64FC1); 109 | cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1); 110 | cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1); 111 | 112 | tf::Vector3 t=transform.getOrigin(); 113 | tvec.at(0,0)= t.x(); tvec.at(0,1)= t.y(); tvec.at(0,2)= t.z(); 114 | 115 | // rotation vector NOT RPY 116 | rvec.at(0,0)= 0.0174833625050782; 117 | rvec.at(0,1)= -0.119012558661694; 118 | rvec.at(0,2)= 3.13528527743474; 119 | 120 | tf::Quaternion q; 121 | q = transform.getRotation(); 122 | double roll, pitch, yaw; 123 | tf::Matrix3x3 phi;//(q); 124 | //phi.setValue( -0.999929898220757 , -0.00442044279230751 , 0.0109844585550146, 125 | // 0.00357500223864038 , -0.997114460355643 , -0.0758285718490689, 126 | // 0.0112879583282635 , -0.0757839866673404 , 0.99706036395074); 127 | //phi.setRPY(-0.075862, -0.011288, 3.138017); 128 | phi.setRotation(q); 129 | phi.getRPY(roll, pitch, yaw); 130 | //q.setRPY(roll, pitch, yaw); 131 | 132 | //phi.setEulerYPR(roll, pitch, yaw); 133 | // YPR 3.138017 -0.011288 -0.075862 134 | 135 | 136 | printf("PHI\n"); 137 | for (i = 0; i < 3; i++) { 138 | for (j= 0; j < 3; j++) { 139 | cvPhi.at(i,j) = phi.getRow(i)[j]; 140 | printf("%f\t", cvPhi.at(i,j)); 141 | } 142 | printf("\n"); 143 | } 144 | 145 | printf("\nRPY Q = (%f, %f, %f, %f)\n", q.x(), q.y(), q.z(), q.w()); 146 | phi.getRotation(q); 147 | printf("Rotation Q = (%f, %f, %f, %f)\n\n", q.x(), q.y(), q.z(), q.w()); 148 | 149 | printf("Rotation RPY = (%f, %f, %f)\n\n", roll, pitch, yaw); 150 | 151 | printf("Translation = (%f, %f, %f)\n\n", t.x(), t.y(), t.z()); 152 | 153 | 154 | pIn.at(0,0) = pt3D.x; pIn.at(0,1) = pt3D.y; pIn.at(0,2) = pt3D.z; 155 | 156 | // Print results 157 | 158 | printf("Original Point: x %f y %f z %f\n", point.x, point.y, point.z); 159 | 160 | printf("Projected point: x %f y %f z %f\n\n",pIn.at(0,0), pIn.at(0,1), pIn.at(0,2)); 161 | 162 | //printf("Method 1: x %f y %f\n\n",pt2D.x,pt2D.y); 163 | 164 | cv::projectPoints(pIn, rvec, tvec, K, cam_info.D, pOut); 165 | printf("Hardcoded rotation\n"); 166 | printf("Rotation vector = %f %f %f\n",rvec.at(0,0),rvec.at(0,1),rvec.at(0,2)); 167 | printf("Point = x %f y %f\n\n",pOut.at(0,0),pOut.at(0,1)); 168 | 169 | cv::Rodrigues(cvPhi, rvec); 170 | cv::projectPoints(pIn, rvec, tvec, K, cam_info.D, pOut); 171 | printf("Computed rotation\n"); 172 | printf("Rotation vector = %f %f %f\n",rvec.at(0,0),rvec.at(0,1),rvec.at(0,2)); 173 | printf("Point = x %f y %f\n\n",pOut.at(0,0),pOut.at(0,1)); 174 | 175 | cv::Mat pm = P(cv::Rect(0,0,3,3)); 176 | cv::projectPoints(pIn, rvec, tvec, pm, cam_info.D, pOut); 177 | //printf("Method 2. Projection Matrix\n"); 178 | //printf("x %f y %f\n\n",pOut.at(0,0),pOut.at(0,1)); 179 | 180 | 181 | 182 | } 183 | 184 | 185 | int main(int argc, char* argv[]) 186 | { 187 | fname="00b09d0100aa73bb"; 188 | frame_l="/laser"; 189 | frame_cam="/camera"; 190 | 191 | ros::init(argc, argv, "testcalib"); 192 | camera_calibration_parsers::readCalibrationYml("/home/kabamaru/.ros/camera_info/matlab_calib.yaml", fname, cam_info); 193 | cam_model_.fromCameraInfo(cam_info); 194 | ROS_INFO("Start OK"); 195 | test(); 196 | //ros::spin(); 197 | return 0; 198 | } 199 | -------------------------------------------------------------------------------- /src/recognizeRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | /** 17 | * A node to set up all things needed for recognition. 18 | * Also used for the annotation. 19 | * The name file is given from command line. 20 | * @author Bang-Cheng Wang 21 | * @date 2013/10/01 22 | */ 23 | 24 | class recognizeRT 25 | { 26 | public: 27 | recognizeRT(std::string cameraTopic, std::string laserTopic); 28 | 29 | ~recognizeRT(); 30 | 31 | private: 32 | 33 | ros::NodeHandle nh; 34 | 35 | /// Subsciber to the camera image topic 36 | message_filters::Subscriber cameraImage_sub_; 37 | /// Subsciber to the laser scan topic 38 | message_filters::Subscriber laserScan_sub_; 39 | 40 | /** 41 | * An approximate time policy to synchronize image, camera info and laser messages. 42 | * This sync policy waits for all the three messages to arrive before it invokes the callback 43 | * from the annotator object. 44 | */ 45 | typedef message_filters::sync_policies::ApproximateTime MySyncPolicy; 46 | //typedef message_filters::sync_policies::ExactTime MySyncPolicy; 47 | /** The synchronizer based on the three messages policy */ 48 | message_filters::Synchronizer *sync; 49 | 50 | // Declare the topics 51 | std::string cameraImageIn; 52 | std::string laserScanIn; 53 | 54 | /// The recognizer object that will be used for the callback 55 | Recognizer myRecognizer; 56 | 57 | 58 | }; 59 | 60 | recognizeRT::recognizeRT(std::string cameraTopic, std::string laserTopic) 61 | : nh("~") 62 | { 63 | // Subscibe to the corresponding topics 64 | cameraImage_sub_.subscribe(nh,cameraTopic,3); 65 | laserScan_sub_.subscribe(nh,laserTopic,3); 66 | 67 | // Initialize synchronizer 68 | // Future work change it to a tf::MessageFilter to include the tf transform 69 | sync = new message_filters::Synchronizer(MySyncPolicy(10), cameraImage_sub_, laserScan_sub_); 70 | 71 | sync->registerCallback(boost::bind(&Recognizer::recognizeData, boost::ref(myRecognizer), _1, _2)); 72 | 73 | // Sleep to give time to other nodes (tf) to start 74 | sleep(2); 75 | ROS_INFO("[ROCOGNIZE_RT] RecognizeRT running OK."); 76 | } 77 | 78 | recognizeRT::~recognizeRT() 79 | { 80 | 81 | } 82 | 83 | int main(int argc, char* argv[]) 84 | { 85 | ros::init(argc, argv, "recognizeRT"); 86 | 87 | std::string cameraTopic(argv[1]); 88 | std::string laserTopic(argv[2]); 89 | 90 | recognizeRT vl(cameraTopic, laserTopic); 91 | ros::spin(); 92 | 93 | return 0; 94 | } 95 | 96 | -------------------------------------------------------------------------------- /src/showRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | /** 21 | * A node to set up all things needed for recognition. 22 | * Also used for the annotation. 23 | * The name file is given from command line. 24 | * @author Bang-Cheng Wang 25 | * @date 2013/10/01 26 | */ 27 | 28 | static const char WINDOW[] = "Result"; 29 | 30 | class showRT 31 | { 32 | public: 33 | showRT(); 34 | ~showRT(); 35 | 36 | void showImage(const sensor_msgs::ImageConstPtr& msg); 37 | 38 | private: 39 | 40 | ros::NodeHandle nh; 41 | 42 | /// Subsciber to the image topic 43 | 44 | image_transport::ImageTransport it_; 45 | image_transport::Subscriber image_sub_; 46 | }; 47 | 48 | showRT::showRT() : nh("~"), it_(nh) 49 | { 50 | // Subscibe to the corresponding topics 51 | image_sub_ = it_.subscribe(Header::imageTopic, 3, &showRT::showImage, this); 52 | 53 | cv::namedWindow(WINDOW); 54 | 55 | // Sleep to give time to other nodes (tf) to start 56 | sleep(2); 57 | ROS_INFO("[SHOW_RT] ShowRT running OK."); 58 | } 59 | 60 | showRT::~showRT() 61 | { 62 | cv::destroyWindow(WINDOW); 63 | } 64 | 65 | void showRT::showImage(const sensor_msgs::ImageConstPtr &msg) 66 | { 67 | cv_bridge::CvImagePtr cv_ptr; 68 | 69 | try 70 | { 71 | cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8); 72 | } 73 | catch (cv_bridge::Exception& e) 74 | { 75 | ROS_ERROR("cv_bridge exception: %s", e.what()); 76 | return; 77 | } 78 | 79 | cv::imshow(WINDOW, cv_ptr->image); 80 | cv::waitKey(3); 81 | } 82 | 83 | int main(int argc, char* argv[]) 84 | { 85 | ros::init(argc, argv, "showRT"); 86 | 87 | showRT vl(); 88 | ros::spin(); 89 | 90 | return 0; 91 | } 92 | 93 | -------------------------------------------------------------------------------- /src/trainLaser.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Trains a boosted classifier and saves the model to data/trained_boost.xml 3 | * TODO: input from command line the file and the number of weak classifiers to be trained 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | //using namespace cv; 14 | using cv::TrainData; 15 | using cv::Boost; 16 | using cv::BoostParams; 17 | 18 | int main(int argc, char** argv) 19 | { 20 | ros::init(argc, argv, "TrainLaser"); 21 | 22 | std::string abs_path; 23 | ros::NodeHandle nh; 24 | 25 | if (nh.hasParam("pkg_path")) 26 | { 27 | ros::param::get("/pkg_path",abs_path); 28 | } 29 | else 30 | { 31 | ROS_ERROR("[TRAIN LASER] Parameter pkg_path (absolute package path) not found."); 32 | } 33 | 34 | 35 | std::string read_path = abs_path + argv[1]; 36 | std::string write_path = abs_path + "/data/trained_boost.xml"; 37 | 38 | printf("Reading from %s\n", read_path.c_str()); 39 | 40 | /// STEP 2. Opening the file 41 | //1. Declare a structure to keep the data 42 | TrainData cvml; 43 | 44 | //2. Read the file 45 | cvml.read_csv(read_path.c_str()); 46 | 47 | //3. Indicate which column is the response 48 | 49 | // Change the type of the first column to categorical 50 | cvml.change_var_type(0, CV_VAR_CATEGORICAL); 51 | 52 | // Set the the first column as the response 53 | cvml.set_response_idx(0); 54 | 55 | // // From col 0 to col N-1, 0 means NUMERICAL and 1 means CATEGORICAL 56 | // for (int j = 0; j < cvml.get_var_types()->cols; j++) 57 | // { 58 | // printf("%-4d", cvml.get_var_type(j)); 59 | // } 60 | // printf("\n"); 61 | 62 | // // From col 0 to col N-1, 0 means NUMERICAL and 1 means CATEGORICAL 63 | // // Col N-1 is the response column 64 | // Mat tp = cvml.get_var_types(); 65 | // for (int j = 0; j < tp.cols; j++) 66 | // { 67 | // printf("%-4d", tp.at(0,j)); 68 | // } 69 | // printf("\n"); 70 | 71 | 72 | // Mat val = cvml.get_values(); 73 | // for (int i = 0; i < val.rows; i++) 74 | // { 75 | // printf("%-4d", i); 76 | 77 | // for (int j = 0; j < val.cols; j++) 78 | // { 79 | // printf("%-7.2f", val.at(i, j)); 80 | // } 81 | 82 | // printf("\n"); 83 | // } 84 | 85 | // Mat resp = cvml.get_responses(); 86 | // for (int i = 0; i < resp.rows; i++) 87 | // { 88 | // printf("%-4d ",i); 89 | 90 | // for (int j = 0; j < resp.cols; j++) 91 | // { 92 | // printf("%2.2f", resp.at(i,j)); 93 | // } 94 | 95 | // printf("\n"); 96 | // } 97 | 98 | 99 | /// STEP 3. Splitting the samples 100 | //1. Select 80% for the training 101 | float portion = 1.0; 102 | CvTrainTestSplit cvtts(portion, true); 103 | 104 | //2. Assign the division to the data 105 | cvml.set_train_test_split(&cvtts); 106 | 107 | 108 | printf("\nTraining ...\n"); 109 | // STEP 4. The training 110 | //1. Declare the classifier 111 | Boost boost; 112 | 113 | //2. Train it with max tree depth 100 114 | bool success = boost.train(&cvml, BoostParams(CvBoost::REAL, 100, 0, 1, false, 0), false); 115 | if (success) 116 | { 117 | printf("Training Success ...\n"); 118 | } 119 | else 120 | { 121 | printf("Training Failed ...\n"); 122 | return EXIT_FAILURE; 123 | 124 | } 125 | 126 | /// STEP 5. Calculating the testing and training error 127 | // 1. Declare a couple of vectors to save the predictions of each sample 128 | std::vector train_responses, test_responses; 129 | 130 | // 2. Calculate the rate of the training error in % 131 | float fl1 = boost.calc_error(&cvml, CV_TRAIN_ERROR, &train_responses); 132 | 133 | CvTrainTestSplit cvtts2(0.5f, true); 134 | cvml.set_train_test_split(&cvtts2); 135 | 136 | // 3. Calculate the rate of the testing error in % 137 | float fl2 = boost.calc_error(&cvml, CV_TEST_ERROR, &test_responses); 138 | 139 | printf("Error train %f %% \n", fl1); 140 | printf("Error test %f %% \n", fl2); 141 | 142 | // STEP 6. Save your classifier 143 | // Save the trained classifier 144 | boost.save(write_path.c_str(), "boost"); 145 | 146 | return EXIT_SUCCESS; 147 | } 148 | -------------------------------------------------------------------------------- /src/visualizeRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | 14 | /** 15 | * A node to set up all things needed for visualization. 16 | * Also used for the annotation. 17 | * The name file is given from command line. 18 | * @author Stathis Fotiadis 19 | * @date 10/10/2012 20 | */ 21 | class visualizeRT 22 | { 23 | private: 24 | ros::NodeHandle nh; 25 | 26 | /// Subsciber to the camera image topic 27 | message_filters::Subscriber cameraImage_sub_; 28 | /// Subsciber to the laser scan topic 29 | message_filters::Subscriber laserScan_sub_; 30 | 31 | /** 32 | * An approximate time policy to synchronize image, camera info and laser messages. 33 | * This sync policy waits for all the three messages to arrive before it invokes the callback 34 | * from the annotator object. 35 | */ 36 | typedef message_filters::sync_policies::ApproximateTime MySyncPolicy; 37 | 38 | /** The synchronizer based on the three messages policy */ 39 | message_filters::Synchronizer *sync; 40 | 41 | // Declare the topics 42 | std::string cameraImageIn; 43 | std::string laserScanIn; 44 | 45 | /// The visualizer object that will be used for the callback 46 | visualizer myVisualizer; 47 | 48 | public: 49 | /** 50 | * Creates a synchronizer of two topics (image, laser). 51 | * It displays the laser data on to the image and the laser data alone in 2d. 52 | * @param[in] laserTopic The name of the laser topic that will be subscribed to. 53 | * @param[in] cameraTopic The name of the camera topic that will be subscribed to. 54 | */ 55 | visualizeRT(std::string cameraTopic, std::string laserTopic); 56 | 57 | ~visualizeRT(); 58 | 59 | }; 60 | 61 | visualizeRT::visualizeRT(std::string cameraTopic, std::string laserTopic) : nh("~") 62 | { 63 | // Subscibe to the corresponding topics 64 | cameraImage_sub_.subscribe(nh,cameraTopic,3); 65 | laserScan_sub_.subscribe(nh,laserTopic,3); 66 | 67 | // Initialize synchronizer 68 | // Future work change it to a tf::MessageFilter to include the tf transform 69 | sync = new message_filters::Synchronizer(MySyncPolicy(10), cameraImage_sub_, laserScan_sub_); 70 | 71 | sync->registerCallback(boost::bind(&visualizer::visualizeData, boost::ref(myVisualizer), _1, _2 )); 72 | 73 | // Sleep to give time to other nodes (tf) to start 74 | sleep(2); 75 | ROS_INFO("[VISUALIZE_RT] VisualizeRT running OK."); 76 | } 77 | 78 | visualizeRT::~visualizeRT() 79 | { 80 | 81 | } 82 | 83 | int main(int argc, char* argv[]) 84 | { 85 | ros::init(argc, argv, "visualiseRT"); 86 | 87 | std::string cameraTopic(argv[1]); 88 | std::string laserTopic(argv[2]); 89 | 90 | visualizeRT vl(cameraTopic, laserTopic); 91 | ros::spin(); 92 | 93 | return 0; 94 | } 95 | -------------------------------------------------------------------------------- /yaml/camera_calib.yaml: -------------------------------------------------------------------------------- 1 | image_width: 752 2 | image_height: 480 3 | camera_name: 00b09d0100aa73bb 4 | camera_matrix: 5 | rows: 3 6 | cols: 3 7 | data: [668.68462594024, 0, 384.916944772716, 0, 669.810891377069, 239.099132134278, 0, 0, 1] 8 | distortion_model: plumb_bob 9 | distortion_coefficients: 10 | rows: 1 11 | cols: 5 12 | data: [-0.413014051243856, 0.182192431895544, 0.0012217553319971, -0.00116201628505299, 0] 13 | rectification_matrix: 14 | rows: 3 15 | cols: 3 16 | data: [1, 0, 0, 0, 1, 0, 0, 0, 1] 17 | projection_matrix: 18 | rows: 3 19 | cols: 4 20 | data: [589.601196289062, 0, 398.414197628073, 0, 0, 630.999816894531, 238.88709127824 , 0, 0, 0, 1, 0] 21 | -------------------------------------------------------------------------------- /yaml/camera_calib_log.yaml: -------------------------------------------------------------------------------- 1 | image_width: 960 2 | image_height: 1280 3 | camera_name: 00b09d0100aa73bb 4 | camera_matrix: 5 | rows: 3 6 | cols: 3 7 | data: [1406.992195, 0.0, 612.124512, 0.0, 1417.365398, 562.017015, 0.0, 0.0, 1.0] 8 | distortion_model: plumb_bob 9 | distortion_coefficients: 10 | rows: 1 11 | cols: 5 12 | data: [0.152716, -0.30629, 0.024623, 0.000513, 0.0] 13 | rectification_matrix: 14 | rows: 3 15 | cols: 3 16 | data: [1, 0, 0, 0, 1, 0, 0, 0, 1] 17 | projection_matrix: 18 | rows: 3 19 | cols: 4 20 | data: [1450.89917, 0.0, 612.003218, 0.0, 0.0, 1437.697876, 577.58158, 0.0, 0.0, 0.0, 1.0, 0.0] 21 | -------------------------------------------------------------------------------- /yaml/camera_calib_yellow.yaml: -------------------------------------------------------------------------------- 1 | image_width: 752 2 | image_height: 480 3 | camera_name: 00b09d0100aa73bb 4 | camera_matrix: 5 | rows: 3 6 | cols: 3 7 | data: [665.49213516386726041674, 0, 366.08910016553352306801, 0, 664.98397874717795730248, 248.39451425645043514123, 0, 0, 1] 8 | distortion_model: plumb_bob 9 | distortion_coefficients: 10 | rows: 1 11 | cols: 5 12 | data: [-0.41618302374745735905, 0.18323293672503468410, -0.00154826994718998173, 0.00292684802966111830, 0.00000000000000000000] 13 | rectification_matrix: 14 | rows: 3 15 | cols: 3 16 | data: [1, 0, 0, 0, 1, 0, 0, 0, 1] 17 | projection_matrix: 18 | rows: 3 19 | cols: 4 20 | data: [590, 0, 398, 0, 0, 630, 238 , 0, 0, 0, 1, 0] 21 | -------------------------------------------------------------------------------- /yaml/detector.yaml: -------------------------------------------------------------------------------- 1 | laser_window_width: 600 #width of laser window 2 | laser_window_height: 400 #height of laser window 3 | rect: 0 # 0 for original, 1 if rectified image is used. for projection purposes. 4 | jumpdist: 0.45 # laser segmentation distance 5 | #jumpdist: 0.80 # laser segmentation distance 6 | feature_set: 0 # 0 = 17, 1 = 63 7 | hog_hit_threshold: 0 # hog svm bias 8 | hog_group_threshold : 0 #0 # hog grouping threshold, use 0 for meanshift 9 | hog_meanshift: false # if meanshift grouping is used 10 | body_ratio: 1.5 # upper/lower body ratio 11 | tf_timeout: 30 # how long to wait tf 12 | laserA: -2.0 # probability sigmoid 13 | laserB: 0.0 # parameters for laser 14 | cameraA: -4.0 # probability sigmoid 15 | cameraB: 0.0 # parameters for camera 16 | #m_to_pixels: 1000.0 # ROI meter to pixel ratio 17 | m_to_pixels: 1600.0 # ROI meter to pixel ratio 18 | laser_range: 20.0 # accepted laser range 19 | fusion_prob: 0.25 # threshold of fusion probability 20 | min_camera_prob: 0.1 # Minimun probability fo camera detection module 21 | min_laser_prob: 0.0001 # Minimun probability for laser detection module 22 | #detection_prob: 0.4 # threshold of fusion probability 23 | max_euc_dist: 2.0 # Maximum euclidean distance 24 | max_mah_dist: 4.0 # Maximum mahalanobis distance 25 | init_id: 0 # Inital Human ID 26 | new_object_score: 1.5 # Score given to a detection when initialized 27 | predict_score: -1.0 # Score for prediction (must be negative) 28 | update_score: 1.0 # Score for detection positive 29 | min_add_score: 4.0 # Value to consider an detection "Real" 30 | object_tracking: 'object_tracking' # I don't know why is this here 31 | 32 | -------------------------------------------------------------------------------- /yaml/laser_filter.yaml: -------------------------------------------------------------------------------- 1 | scan_filter_chain: 2 | - name: shadows 3 | type: laser_filters/ScanShadowsFilter 4 | params: 5 | min_angle: 10 6 | max_angle: 170 7 | neighbors: 1 8 | window: 1 9 | - name: dark_shadows 10 | type: laser_filters/LaserScanIntensityFilter 11 | params: 12 | lower_threshold: 0 13 | upper_threshold: 30000 14 | disp_histogram: 0 15 | - name: angle_bound 16 | type: laser_filters/LaserScanAngularBoundsFilter 17 | params: 18 | lower_angle: -2.1 19 | upper_angle: 2.1 20 | 21 | 22 | -------------------------------------------------------------------------------- /yaml/logitech_cam_hd.yml: -------------------------------------------------------------------------------- 1 | image_width: 1280 2 | image_height: 960 3 | camera_name: narrow_stereo 4 | camera_matrix: 5 | rows: 3 6 | cols: 3 7 | data: [1406.992195, 0, 612.124512, 0, 1417.365398, 562.017015, 0, 0, 1] 8 | distortion_model: plumb_bob 9 | distortion_coefficients: 10 | rows: 1 11 | cols: 5 12 | data: [0.152716, -0.30629, 0.024623, 0.000513, 0] 13 | rectification_matrix: 14 | rows: 3 15 | cols: 3 16 | data: [1, 0, 0, 0, 1, 0, 0, 0, 1] 17 | projection_matrix: 18 | rows: 3 19 | cols: 4 20 | data: [1450.89917, 0, 612.003218, 0, 0, 1437.697876, 577.58158, 0, 0, 0, 1, 0] -------------------------------------------------------------------------------- /yaml/shadow_filter.yaml: -------------------------------------------------------------------------------- 1 | scan_filter_chain: 2 | - name: shadows 3 | type: laser_filters/ScanShadowsFilter 4 | params: 5 | min_angle: 10 6 | max_angle: 170 7 | neighbors: 0 8 | window: 1 9 | --------------------------------------------------------------------------------