├── CMakeLists.txt ├── LICENSE ├── config ├── gtsam_onecamera_asus.yaml └── gtsam_onecamera_multisense.yaml ├── include └── CameraMarkerTransform.hpp ├── launch ├── gtsam_multicamera.launch ├── gtsam_onecamera_asus_calibration.launch └── gtsam_onecamera_multisense_calibration.launch ├── package.xml └── src ├── CameraMarkerTransform.cpp ├── MultiCameraNode.cpp └── OneCameraNode.cpp /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(self_camera_calibration) 4 | 5 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") 6 | 7 | find_package(Eigen3) 8 | message(STATUS "Eigen3 found: ${EIGEN3_INCLUDE_DIR}") 9 | include_directories(${EIGEN3_INCLUDE_DIRS}) 10 | 11 | 12 | find_package(PkgConfig) 13 | pkg_check_modules(ipopt ipopt) 14 | include_directories(${ipopt_INCLUDE_DIRS}) 15 | 16 | find_package(GTSAM REQUIRED) # Uses installed package 17 | include_directories(${GTSAM_INCLUDE_DIR}) 18 | set(GTSAM_LIBRARIES gtsam) # TODO: automatic search libs 19 | 20 | find_package(GTSAMCMakeTools) 21 | include(GtsamMakeConfigFile) 22 | include(GtsamBuildTypes) 23 | include(GtsamTesting) 24 | 25 | 26 | include_directories(include) 27 | 28 | find_package(OpenCV 3.3) 29 | include_directories(${OpenCV_INCLUDE_DIRS}) 30 | link_directories(${OpenCV_LIBRARY_DIRS}) 31 | 32 | find_package(Boost REQUIRED COMPONENTS system) 33 | include_directories(${Boost_INCLUDE_DIRS}) 34 | link_directories(${Boost_LIBRARY_DIRS}) 35 | 36 | 37 | find_package(catkin REQUIRED COMPONENTS roscpp sensor_msgs cv_bridge tf tf_conversions) 38 | include_directories(${catkin_INCLUDE_DIRS}) 39 | link_directories(${catkin_LIBRARY_DIRS}) 40 | 41 | catkin_package(INCLUDE_DIRS include 42 | LIBRARIES cam_marker_calib 43 | CATKIN_DEPENDS roscpp sensor_msgs cv_bridge tf 44 | DEPENDS ipopt OpenCV) 45 | 46 | 47 | add_library(cam_marker_calib src/CameraMarkerTransform.cpp) 48 | target_link_libraries(cam_marker_calib ${OpenCV_LIBRARIES}) 49 | 50 | #GTSAM ONE CALIBRATION 51 | add_executable(gtsam_calib_one_camera src/OneCameraNode.cpp) 52 | target_link_libraries(gtsam_calib_one_camera gtsam ${catkin_LIBRARIES} cam_marker_calib ${Boost_LIBRARIES} ${OpenCV_LIBRARIES}) 53 | 54 | #GTSAM MULTICAMERA CALIBRATION 55 | add_executable(gtsam_calib_multi src/MultiCameraNode.cpp) 56 | target_link_libraries(gtsam_calib_multi gtsam ${catkin_LIBRARIES} cam_marker_calib ${Boost_LIBRARIES} ${OpenCV_LIBRARIES}) 57 | 58 | 59 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /config/gtsam_onecamera_asus.yaml: -------------------------------------------------------------------------------- 1 | # topic from which we take the camera images 2 | image_topic : "/asus/rgb/image_raw" 3 | # topic from which we take the camera parameters 4 | camera_info_topic : "/asus/rgb/camera_info" 5 | # topic from which we take the joint states 6 | joint_state_topic : "/hyq/joint_states" 7 | # frame of reference at the sensor mounting point. 8 | # Typically this has x-axis point forward, y-axis pointing left 9 | # z-axis pointing up 10 | # the calibration output will be from "base_link" to this frame 11 | sensor_frame_name : "/asus_mount_link" 12 | 13 | camera_frame_name: "/asus_rgb_optical_frame" 14 | 15 | # number of valid points to evaluate for the optimization 16 | points_number : 300 17 | 18 | -------------------------------------------------------------------------------- /config/gtsam_onecamera_multisense.yaml: -------------------------------------------------------------------------------- 1 | # topic from which we take the camera images 2 | image_topic : "/multisense/left/image_mono" 3 | # topic from which we take the camera parameters 4 | camera_info_topic : "/multisense/left/image_mono/camera_info" 5 | # topic from which we take the joint states 6 | camera_frame_name : "/multisense/left_camera_optical_frame" 7 | # frame of reference at the sensor mounting point. 8 | # Typically this has x-axis point forward, y-axis pointing left 9 | # z-axis pointing up 10 | # the calibration output will be from "base_link" to this frame 11 | sensor_frame_name : "/head" 12 | # number of valid points to evaluate for the optimization 13 | points_number : 300 14 | 15 | -------------------------------------------------------------------------------- /include/CameraMarkerTransform.hpp: -------------------------------------------------------------------------------- 1 | #ifndef DLS_PERCEPTION_CAMERA_MARKER_TRANSFORM_HPP__ 2 | #define DLS_PERCEPTION_CAMERA_MARKER_TRANSFORM_HPP__ 3 | 4 | #include 5 | #include 6 | 7 | namespace dls { 8 | namespace perception { 9 | 10 | /** 11 | * @brief The CameraMarkerTransform class computes the full 3D Transform between 12 | * the camera frame and an aruco marker, given a single camera image. 13 | */ 14 | class CameraMarkerTransform { 15 | 16 | public: 17 | typedef typename cv::Matx33d CameraMatrix; 18 | typedef typename cv::Vec DistortionCoeff; 19 | typedef typename Eigen::Affine3d Transform; 20 | typedef typename cv::Mat Image; 21 | typedef std::vector Corners; 22 | typedef std::vector CornerList; 23 | 24 | public: 25 | CameraMarkerTransform(); 26 | 27 | void setCameraParameters(const CameraMatrix& camera_matrix, 28 | const DistortionCoeff& distortion_coeff); 29 | 30 | bool getCameraMarkerTransform(const Image& image, 31 | Transform& cTm); 32 | 33 | CameraMarkerTransform(const CameraMatrix& camera_matrix, 34 | const DistortionCoeff& distortion_coeff); 35 | 36 | void setDebug(const bool& debug); 37 | void setVerbose(const bool& verbose); 38 | 39 | 40 | 41 | private: 42 | CameraMatrix camera_matrix_; 43 | DistortionCoeff distortion_coeff_; 44 | bool verbose_ = false; 45 | bool debug_ = false; 46 | 47 | Transform camera_marker_transform_ = Transform::Identity(); 48 | }; 49 | 50 | } 51 | } 52 | 53 | #endif // DLS_PERCEPTION_CAMERA_MARKER_TRANSFORM_HPP__ 54 | -------------------------------------------------------------------------------- /launch/gtsam_multicamera.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /launch/gtsam_onecamera_asus_calibration.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /launch/gtsam_onecamera_multisense_calibration.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | self_camera_calibration 3 | 4 | 0.0.1 5 | 6 | 7 | This package finds the mutual position and orientation between a 8 | camera and the base link of HyQ. It uses the forward kinematics 9 | and a special foot design, which has attached a ChAruCo marker on it. 10 | 11 | The transform is obtained by nonlinear optimization on a set of 12 | points collected by the camera and by the forward kinematics. 13 | 14 | 15 | Marco Camurri 16 | Andrzej Reinke 17 | 18 | Marco Camurri 19 | 20 | Apache 2.0 21 | 22 | catkin 23 | 24 | roscpp 25 | sensor_msgs 26 | cv_bridge 27 | tf 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /src/CameraMarkerTransform.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | using namespace cv; 12 | 13 | namespace dls { 14 | namespace perception { 15 | 16 | using Transform = CameraMarkerTransform::Transform; 17 | 18 | CameraMarkerTransform::CameraMarkerTransform() { 19 | 20 | } 21 | 22 | CameraMarkerTransform::CameraMarkerTransform(const CameraMatrix &camera_matrix, 23 | const DistortionCoeff &distortion_coeff) 24 | { 25 | setCameraParameters(camera_matrix, distortion_coeff); 26 | } 27 | 28 | void CameraMarkerTransform::setDebug(const bool &debug){ 29 | debug_ = debug; 30 | } 31 | 32 | void CameraMarkerTransform::setVerbose(const bool &verbose){ 33 | verbose_ = verbose; 34 | } 35 | 36 | void CameraMarkerTransform::setCameraParameters(const CameraMatrix &camera_matrix, 37 | const DistortionCoeff &distortion_coeff){ 38 | camera_matrix_ = camera_matrix; 39 | distortion_coeff_ = distortion_coeff; 40 | } 41 | 42 | bool CameraMarkerTransform::getCameraMarkerTransform(const Image &image, 43 | Transform& cTm){ 44 | std::vector ids; 45 | CornerList corner_list; 46 | std::vector charucoCorners; 47 | std::vector charucoIds; 48 | cv::Vec3d rvec, tvec; 49 | bool print_marker = false; 50 | bool valid = true; 51 | 52 | auto dictionary = aruco::getPredefinedDictionary(aruco::DICT_4X4_50); 53 | 54 | // creating a board with 5 x 5 tiles, 4 cm wide, with 55 | // 2 cm aruco marker inside. The dictionary is small 56 | // because the marker are small, so we want them to be 57 | // visibly different from each other 58 | auto board = aruco::CharucoBoard::create(5, 59 | 5, 60 | 0.04, 61 | 0.025, 62 | dictionary); 63 | 64 | aruco::detectMarkers(image,dictionary,corner_list,ids); 65 | 66 | if(print_marker){ 67 | cv::Mat board_print; 68 | // 2362 px are 20 cm at 300 dpi 69 | board->draw(cv::Size(2362,2362),board_print); 70 | passwd* pw = getpwuid(getuid()); 71 | std::string home_path(pw->pw_dir); 72 | std::string full_path = home_path + "/calibration_marker.png"; 73 | std::cout << "Writing marker for print: " << full_path << std::endl; 74 | cv::imwrite(full_path,board_print); 75 | } 76 | 77 | if (ids.size() > 0) { 78 | if(debug_) { 79 | std::cout << "Ids found: " << ids.size() << std::endl; 80 | } 81 | try{ 82 | cv::aruco::interpolateCornersCharuco(corner_list, 83 | ids, 84 | image, 85 | board, 86 | charucoCorners, 87 | charucoIds, 88 | camera_matrix_, 89 | distortion_coeff_); 90 | 91 | // we want all the markers to be detected 92 | if(charucoIds.size() >= 16) { 93 | if(debug_) { 94 | std::cout << "Charuco ids found: " << charucoIds.size() << std::endl; 95 | } 96 | 97 | 98 | 99 | for(int i = 0; i < charucoCorners.size(); i++) { 100 | // if there are two corners in the same place, there is 101 | // something wrong 102 | if(i != 0 && charucoCorners[i].x == charucoCorners[i - 1].x 103 | && charucoCorners[i].y == charucoCorners[i - 1].y) { 104 | valid = false; 105 | break; 106 | } 107 | } 108 | 109 | if(valid) { 110 | valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, 111 | charucoIds, 112 | board, 113 | camera_matrix_, 114 | distortion_coeff_, 115 | rvec, 116 | tvec); 117 | } else { 118 | return false; 119 | } 120 | } else { 121 | return false; 122 | } 123 | 124 | 125 | } catch(cv::Exception e){ 126 | std::cout << "Error says: " << e.what() << std::endl; 127 | } 128 | 129 | 130 | 131 | if(valid) { 132 | if(debug_) { 133 | Image image_color(image.rows, image.cols, CV_8UC3);; 134 | cvtColor(image, image_color, CV_GRAY2BGR); 135 | 136 | cv::aruco::drawDetectedCornersCharuco(image_color, 137 | charucoCorners, 138 | charucoIds, 139 | cv::Scalar(255, 0, 0)); 140 | 141 | cv::aruco::drawAxis(image_color, 142 | camera_matrix_, 143 | distortion_coeff_, rvec, tvec, 0.1); 144 | 145 | cv::imshow("out", image_color); 146 | waitKey(1); 147 | } 148 | 149 | Mat rotmatrix; 150 | Rodrigues(rvec, rotmatrix); 151 | Eigen::Matrix3d rotation; 152 | Eigen::Vector3d translation; 153 | 154 | cv2eigen(rotmatrix, rotation); 155 | cv2eigen(tvec, translation); 156 | 157 | camera_marker_transform_.setIdentity(); 158 | camera_marker_transform_.matrix().block<3, 3>(0, 0) = rotation; 159 | camera_marker_transform_.matrix().block<3, 1>(0, 3) = translation; 160 | 161 | cTm = camera_marker_transform_; 162 | 163 | 164 | 165 | if(verbose_) { 166 | //std::cout << "Ids found: " << ids.size() << std::endl; 167 | //std::cout << "Translation: " << camera_marker_transform_.translation().transpose() << std::endl; 168 | //std::cout << "Rot matrix: " << camera_marker_transform_.rotation() << std::endl; 169 | std::cout << "Marker to camera captured: "; 170 | std::cout << cTm.matrix(); 171 | 172 | } 173 | } else { 174 | return false; 175 | } 176 | 177 | return true; 178 | } 179 | return false; 180 | } 181 | 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /src/MultiCameraNode.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include 24 | 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | #include 36 | 37 | using namespace dls::perception; 38 | using namespace cv; 39 | using namespace cv::aruco; 40 | using namespace gtsam; 41 | 42 | 43 | typedef typename message_filters::Subscriber ImgSubscriber; 44 | typedef typename std::shared_ptr ImgSubscriberPtr; 45 | 46 | typedef message_filters::sync_policies::ApproximateTime MyPolicy; 47 | typedef typename message_filters::Synchronizer Synchronizer; 48 | typedef typename std::shared_ptr SynchronizerPtr; 49 | 50 | 51 | typedef Expression Pose3_; 52 | 53 | class MultiCameraCalibNode { 54 | public: 55 | typedef Eigen::Affine3d Transform; 56 | public: 57 | MultiCameraCalibNode(ros::NodeHandle& nh) : 58 | nh_(nh), 59 | x1_asus(Symbol('x', 0)), 60 | x2_multisense(Symbol('x', 1)) 61 | { 62 | 63 | tf::Matrix3x3 temp_tf_matrix; 64 | temp_tf_matrix.setRPY(0,60*M_PI/180,0); 65 | tf::Vector3 temp_tf_vector(0.65,0,0.1); 66 | tf::Transform temp_tf_transform(temp_tf_matrix,temp_tf_vector); 67 | tf::transformTFToEigen(temp_tf_transform,init_asus); 68 | 69 | tf::Matrix3x3 temp_tf_matrix2; 70 | temp_tf_matrix2.setRPY(0,30*M_PI/180,0); 71 | tf::Vector3 temp_tf_vector2(0.75,0,0.1); 72 | tf::Transform temp_tf_transform2(temp_tf_matrix2,temp_tf_vector2); 73 | tf::transformTFToEigen(temp_tf_transform2,init_multisense); 74 | 75 | tf::Matrix3x3 temp_tf_matrix3; 76 | temp_tf_matrix3.setRPY(0,30*M_PI/180,0); 77 | tf::Vector3 temp_tf_vector3(0.85,0,-0.1); 78 | tf::Transform temp_tf_transform3(temp_tf_matrix3,temp_tf_vector3); 79 | tf::transformTFToEigen(temp_tf_transform3,init_pose); 80 | 81 | 82 | 83 | asus_image_sub.reset(new ImgSubscriber(nh, asus_image_topic, 1)); 84 | multisense_image_sub.reset(new ImgSubscriber(nh, multisense_image_topic, 1)); 85 | sync.reset(new Synchronizer(MyPolicy(10),*asus_image_sub,*multisense_image_sub)); 86 | sync->registerCallback(boost::bind(&MultiCameraCalibNode::camerasCallback,this,_1,_2)); 87 | priorModel = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(1e-6), Vector3::Constant(1e-4)).finished()); 88 | measurementModel = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(1e-6), Vector3::Constant(1e-4)).finished()); 89 | } 90 | 91 | void run(){ 92 | bool tf_ok_asus = false; 93 | bool tf_ok_multisense = false; 94 | bool tf_ok_extender=false; 95 | 96 | if(verbose){ 97 | ROS_INFO("Preparing to run the calibration with parameters:"); 98 | ROS_INFO_STREAM("multisense_image_topic: " << multisense_image_topic); 99 | ROS_INFO_STREAM("multisense_camera_info_topic: " << multisense_camera_info_topic); 100 | ROS_INFO_STREAM("multisense_camera_frame_name: " << multisense_camera_frame_name); 101 | ROS_INFO_STREAM("multisense_sensor_frame_name: " << multisense_sensor_frame_name); 102 | ROS_INFO_STREAM("asus_image_topic: " << asus_image_topic); 103 | ROS_INFO_STREAM("asus_camera_info_topic: " << asus_camera_info_topic); 104 | ROS_INFO_STREAM("asus_camera_frame_name: " << asus_camera_frame_name); 105 | ROS_INFO_STREAM("asus_sensor_frame_name: " << asus_sensor_frame_name); 106 | ROS_INFO_STREAM("base_frame_name: " << base_frame_name); 107 | ROS_INFO_STREAM("marker_frame_name" << marker_frame_name); 108 | ROS_INFO_STREAM("knee_frame_name" << knee_frame_name); 109 | } 110 | 111 | // cameras_sub_ = nh_.subscribe(multisense_image_topic,10, &MultiCameraCalibNode::multisenseInfoCallback, this); 112 | multisense_camera_info_sub_ = nh_.subscribe(multisense_camera_info_topic, 10, &MultiCameraCalibNode::multisenseInfoCallback, this); 113 | asus_camera_info_sub_ = nh_.subscribe(asus_camera_info_topic,10,&MultiCameraCalibNode::asusInfoCallback,this); 114 | 115 | // Don't leave until you get the camera to sensor mount transform! 116 | while(nh_.ok() && !tf_ok_multisense && !tf_ok_asus && !tf_ok_extender){ 117 | try{ 118 | ros::Time now = ros::Time::now(); 119 | tf_listener_.waitForTransform(asus_sensor_frame_name, asus_camera_frame_name, 120 | now, ros::Duration(3.0)); 121 | tf_listener_.lookupTransform(asus_sensor_frame_name, asus_camera_frame_name, 122 | now, tf_transform_); 123 | 124 | tf::transformTFToEigen(tf_transform_, sTc_asus); 125 | 126 | if(verbose){ 127 | ROS_INFO("Asus Camera to sensor captured:"); 128 | ROS_INFO_STREAM(sTc_asus.matrix().format(fmt_)); 129 | } 130 | tf_ok_asus = true; 131 | 132 | } 133 | catch (tf::TransformException ex){ 134 | ROS_ERROR("%s",ex.what()); 135 | ros::Duration(1.0).sleep(); 136 | } 137 | 138 | try{ 139 | ros::Time now = ros::Time::now(); 140 | tf_listener_.waitForTransform(multisense_sensor_frame_name, multisense_camera_frame_name, 141 | now, ros::Duration(3.0)); 142 | tf_listener_.lookupTransform(multisense_sensor_frame_name, multisense_camera_frame_name, 143 | now, tf_transform_); 144 | 145 | tf::transformTFToEigen(tf_transform_, sTc_multisense); 146 | 147 | if(verbose){ 148 | ROS_INFO("Camera to sensor captured:"); 149 | ROS_INFO_STREAM(sTc_multisense.matrix().format(fmt_)); 150 | } 151 | tf_ok_multisense = true; 152 | 153 | } 154 | catch (tf::TransformException ex){ 155 | ROS_ERROR("%s",ex.what()); 156 | ros::Duration(1.0).sleep(); 157 | } 158 | 159 | 160 | 161 | try{ 162 | ros::Time now = ros::Time::now(); 163 | tf_listener_.waitForTransform(marker_frame_name, knee_frame_name, 164 | now, ros::Duration(3.0)); 165 | tf_listener_.lookupTransform(marker_frame_name, knee_frame_name, 166 | now, tf_transform_); 167 | 168 | tf::transformTFToEigen(tf_transform_, mTk_); 169 | 170 | if(verbose){ 171 | ROS_INFO("Knee to Marker captured:"); 172 | ROS_INFO_STREAM(mTk_.matrix().format(fmt_)); 173 | } 174 | tf_ok_extender = true; 175 | 176 | } 177 | catch (tf::TransformException ex){ 178 | ROS_ERROR("%s",ex.what()); 179 | ros::Duration(1.0).sleep(); 180 | } 181 | } 182 | 183 | 184 | // don't leave until I say so! 185 | while(nh_.ok() && !stop){ 186 | ros::spinOnce(); 187 | } 188 | 189 | 190 | 191 | std::cout << " Time to process data and factor graph " << std::endl; 192 | 193 | graph.print("\nFactor Graph:\n"); 194 | 195 | Values initials; 196 | initials.insert(Symbol('x', 0), Pose3(Rot3(init_asus.rotation()),init_asus.translation())); 197 | initials.insert(Symbol('x', 1), Pose3(Rot3(init_multisense.rotation()),init_multisense.translation())); 198 | 199 | for (int i=2;i<=points_number;i++) 200 | { 201 | std::cout << "init: " << i << std::endl; 202 | initials.insert(Symbol('x',i),Pose3(Rot3(init_pose.rotation()),init_pose.translation())); 203 | } 204 | 205 | 206 | 207 | GaussNewtonParams parameters; 208 | parameters.setVerbosity("ERROR"); 209 | 210 | // optimize! 211 | GaussNewtonOptimizer optimizer(graph, initials, parameters); 212 | Values results = optimizer.optimize(); 213 | 214 | // print final values 215 | results.print("Final Result:\n"); 216 | 217 | // // Calculate marginal covariances for all poses 218 | // Marginals marginals(graph, results); 219 | 220 | // // print marginal covariances 221 | // std::cout << "x1 covariance:\n" << marginals.marginalCovariance(Symbol('x', 1)) << std::endl; 222 | // std::cout << "x2 covariance:\n" << marginals.marginalCovariance(Symbol('x', 2)) << std::endl; 223 | 224 | 225 | } 226 | 227 | void camerasCallback(const sensor_msgs::ImageConstPtr& image_asus, 228 | const sensor_msgs::ImageConstPtr& image_multisense) 229 | { 230 | std::cout << "msg received" << std::endl; 231 | 232 | cv_bridge::CvImageConstPtr asus_temp_image = cv_bridge::toCvShare(image_asus, "mono8"); 233 | Mat asus_frame = asus_temp_image->image; 234 | 235 | cv_bridge::CvImageConstPtr multisense_temp_image = cv_bridge::toCvShare(image_multisense, "mono8"); 236 | Mat multisense_frame = multisense_temp_image->image; 237 | if(debug){ 238 | std::cout << "ASUS image: " << asus_frame.rows 239 | << " rows and " << asus_frame.cols << " cols" << std::endl; 240 | std::cout << "Multisense image: " << multisense_frame.rows 241 | << " rows and " << multisense_frame.cols << " cols" << std::endl; 242 | } 243 | std::cout << "\nAsus looks for the image ... " <header.stamp; 255 | ros::Time now_multisese = image_multisense->header.stamp; 256 | ros::Time now = ros::Time(0); 257 | if(1) 258 | { 259 | std::cout << "\nasus_stamp - multisense_stamp = " << now_asus-now_multisese << std::endl; 260 | std::cout << " asus_stamp " << now_asus << " multisese_stamp " << now_multisese << " now " << now << std::endl; 261 | } 262 | 263 | try{ 264 | tf_listener_.waitForTransform(base_frame_name, knee_frame_name, 265 | now_asus, ros::Duration(3.0)); 266 | tf_listener_.lookupTransform(base_frame_name, knee_frame_name, 267 | now_asus, tf_transform_); 268 | 269 | tf::transformTFToEigen(tf_transform_, bTk_); 270 | 271 | } 272 | catch (tf::TransformException ex) 273 | { 274 | ROS_ERROR("%s",ex.what()); 275 | ros::Duration(1.0).sleep(); 276 | std::cout << " DATA SKIPPED " << std::endl; 277 | return; 278 | } 279 | 280 | 281 | factor_dot_asus = bTk_ * mTk_.inverse() * cTm_asus.inverse()*sTc_asus.inverse(); 282 | factor_dot_multisense = bTk_ * mTk_.inverse() * cTm_multisense.inverse()*sTc_multisense.inverse(); 283 | Transform asusTmulti=sTc_asus*cTm_asus*cTm_multisense.inverse()*sTc_multisense.inverse(); 284 | 285 | 286 | Transform pose_knee = bTk_ * mTk_.inverse(); 287 | Transform sTm_asus = sTc_asus * cTm_asus; 288 | Transform sTm_multisense = sTc_multisense * cTm_multisense; 289 | 290 | 291 | if (verbose) 292 | { 293 | std::cout << " \n<------------------DOT FACTORS --------------->" << std::endl; 294 | std::cout << " sTc_asus " << std::endl; 295 | std::cout << sTc_asus.matrix().format(fmt_) << std::endl << std::endl; 296 | std::cout << " sTc_multisense " << std::endl; 297 | std::cout << sTc_multisense.matrix().format(fmt_) << std::endl << std::endl; 298 | std::cout << " cTm_asus " << std::endl; 299 | std::cout << cTm_asus.matrix().format(fmt_) << std::endl << std::endl; 300 | std::cout << " cTm_multisense " << std::endl; 301 | std::cout << cTm_multisense.matrix().format(fmt_) << std::endl << std::endl; 302 | std::cout << " mTk_ " << std::endl; 303 | std::cout << mTk_.matrix().format(fmt_) << std::endl << std::endl; 304 | std::cout << " bTk_ " << std::endl; 305 | std::cout << bTk_.matrix().format(fmt_) << std::endl << std::endl; 306 | std::cout << " factor_dot_asus " << std::endl; 307 | std::cout << factor_dot_asus.matrix().format(fmt_) << std::endl << std::endl; 308 | std::cout << " factor_dot_multisense " << std::endl; 309 | std::cout << factor_dot_multisense.matrix().format(fmt_) << std::endl << std::endl; 310 | std::cout << " relative transform asus - multisense " << std::endl; 311 | std::cout << asusTmulti.matrix().format(fmt_) << std::endl << std::endl; 312 | } 313 | 314 | 315 | if (count <= points_number) 316 | { 317 | Pose3_ foot_pose(Symbol('x', count)); 318 | 319 | 320 | graph.addExpressionFactor(foot_pose, 321 | Pose3(Rot3(pose_knee.rotation()), 322 | pose_knee.translation()), 323 | priorModel); 324 | 325 | 326 | graph.addExpressionFactor(between(foot_pose,x1_asus), 327 | Pose3(Rot3( sTm_asus.inverse().rotation()), 328 | sTm_asus.inverse().translation()), 329 | measurementModel); 330 | 331 | 332 | graph.addExpressionFactor(between(foot_pose,x2_multisense), 333 | Pose3(Rot3( sTm_multisense.inverse().rotation()), 334 | sTm_multisense.inverse().translation()), 335 | measurementModel); 336 | 337 | 338 | graph.addExpressionFactor(between(x1_asus,x2_multisense), 339 | Pose3(Rot3(asusTmulti.rotation()),asusTmulti.translation()), 340 | measurementModel); 341 | 342 | 343 | } 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | // graph.addExpressionFactor(x1_asus, 352 | // Pose3(Rot3(factor_dot_asus.rotation()),factor_dot_asus.translation()), 353 | // priorModel); 354 | // graph.addExpressionFactor(x2_multisense, 355 | // Pose3(Rot3(factor_dot_multisense.rotation()),factor_dot_multisense.translation()), 356 | // priorModel); 357 | 358 | // graph.addExpressionFactor(between(x1_asus,x2_multisense), 359 | // Pose3(Rot3(asusTmulti.rotation()),asusTmulti.translation()), 360 | // measurementModel); 361 | 362 | if (count >= points_number) 363 | { 364 | std::cout << " Accumulated point exceeded, stop = true " << std::endl; 365 | stop=true; 366 | } 367 | 368 | count++; 369 | 370 | if (1) 371 | { 372 | std::cout << "Number of factors accumulated " << count << std::endl; 373 | } 374 | 375 | } 376 | 377 | 378 | 379 | 380 | void multisenseInfoCallback(const sensor_msgs::CameraInfoConstPtr& camera_info_msg){ 381 | if(verbose){ 382 | std::cout << "Received camera info: " << std::endl; 383 | std::cout << "-- distortion model: " << camera_info_msg->distortion_model << std::endl; 384 | std::cout << "-- coefficients: (k1, k2, t1, t2, k3): "; 385 | } 386 | for(int i = 0; i < 5; i++){ 387 | std::cout << camera_info_msg->D[i] << " "; 388 | } 389 | std::cout << std::endl; 390 | 391 | std::cout << camera_info_msg->D.size() << std::endl; 392 | 393 | if(!camera_info_msg->distortion_model.compare("plumb_bob") == 0){ 394 | std::cout << "Sorry, only plumb_bob distortion model is supported" << std::endl; 395 | return; 396 | } 397 | 398 | Matx33d cameraMatrix; 399 | for(int i = 0; i < 9; i++) { 400 | cameraMatrix(i / 3, i % 3) = camera_info_msg->K[i]; 401 | } 402 | 403 | cv::Vec distCoeff; 404 | for(int i = 0; i < 5; i++) { 405 | distCoeff(i) = (double)camera_info_msg->D[i]; 406 | } 407 | 408 | if(verbose){ 409 | std::cout << "-- Receiving Multisense parameters" << std::endl; 410 | std::cout << " Camera Matrix: " << std::endl << cameraMatrix << std::endl; 411 | std::cout << std::endl; 412 | std::cout << " Distortion coefficients: " << std::endl << distCoeff; 413 | std::cout << std::endl; 414 | } 415 | multisense_camera_marker_transf_.setCameraParameters(cameraMatrix, distCoeff); 416 | 417 | // we need the parameters only once. 418 | multisense_camera_info_sub_.shutdown(); 419 | } 420 | 421 | void asusInfoCallback(const sensor_msgs::CameraInfoConstPtr& camera_info_msg){ 422 | if(verbose){ 423 | std::cout << "Received camera info: " << std::endl; 424 | std::cout << "-- distortion model: " << camera_info_msg->distortion_model << std::endl; 425 | std::cout << "-- coefficients: (k1, k2, t1, t2, k3): "; 426 | } 427 | for(int i = 0; i < 5; i++){ 428 | std::cout << camera_info_msg->D[i] << " "; 429 | } 430 | std::cout << std::endl; 431 | 432 | std::cout << camera_info_msg->D.size() << std::endl; 433 | 434 | if(!camera_info_msg->distortion_model.compare("plumb_bob") == 0){ 435 | std::cout << "Sorry, only plumb_bob distortion model is supported" << std::endl; 436 | return; 437 | } 438 | 439 | Matx33d cameraMatrix; 440 | for(int i = 0; i < 9; i++) { 441 | cameraMatrix(i / 3, i % 3) = camera_info_msg->K[i]; 442 | } 443 | 444 | cv::Vec distCoeff; 445 | for(int i = 0; i < 5; i++) { 446 | distCoeff(i) = (double)camera_info_msg->D[i]; 447 | } 448 | 449 | if(verbose){ 450 | std::cout << "-- Receiving Asus parameters" << std::endl; 451 | std::cout << " Camera Matrix: " << std::endl << cameraMatrix << std::endl; 452 | std::cout << std::endl; 453 | std::cout << " Distortion coefficients: " << std::endl << distCoeff; 454 | std::cout << std::endl; 455 | } 456 | asus_camera_marker_transf_.setCameraParameters(cameraMatrix, distCoeff); 457 | 458 | // we need the parameters only once. 459 | asus_camera_info_sub_.shutdown(); 460 | } 461 | 462 | 463 | 464 | private: 465 | ros::NodeHandle nh_; 466 | CameraMarkerTransform multisense_camera_marker_transf_; 467 | CameraMarkerTransform asus_camera_marker_transf_; 468 | 469 | 470 | ImgSubscriberPtr asus_image_sub; 471 | ImgSubscriberPtr multisense_image_sub; 472 | 473 | ros::Subscriber cameras_sub_; 474 | ros::Subscriber multisense_camera_info_sub_; 475 | ros::Subscriber asus_camera_info_sub_; 476 | 477 | SynchronizerPtr sync; 478 | 479 | tf::TransformListener tf_listener_; 480 | tf::StampedTransform tf_transform_; 481 | 482 | Transform cTm_asus = Transform::Identity(); 483 | Transform cTm_multisense = Transform::Identity(); 484 | Transform bTk_ = Transform::Identity(); // from base frame to knee frame 485 | Transform mTk_ = Transform::Identity(); // from knee frame to marker frame 486 | Transform sTc_asus = Transform::Identity(); 487 | Transform sTc_multisense = Transform::Identity(); 488 | 489 | Transform factor_dot_asus=Transform::Identity(); 490 | Transform factor_dot_multisense=Transform::Identity(); 491 | 492 | Transform pose_asus=Transform::Identity(); 493 | Transform pose_multisense=Transform::Identity(); 494 | 495 | Transform init_asus=Transform::Identity(); 496 | Transform init_multisense=Transform::Identity(); 497 | Transform init_pose = Transform::Identity(); 498 | 499 | Eigen::IOFormat fmt_ = Eigen::IOFormat(Eigen::FullPrecision, 0, "\t ", "\n", "[", "]"); 500 | 501 | bool stop = false; 502 | 503 | int count=2; 504 | 505 | ExpressionFactorGraph graph; 506 | noiseModel::Diagonal::shared_ptr measurementModel; 507 | noiseModel::Diagonal::shared_ptr priorModel; 508 | 509 | 510 | Pose3_ x2_multisense,x1_asus; 511 | 512 | 513 | public: 514 | std::string multisense_image_topic = "/multisense/left/image_mono"; 515 | std::string multisense_camera_info_topic = "/multisense/left/image_mono/camera_info"; 516 | std::string asus_image_topic="/asus/rgb/image_raw"; 517 | std::string asus_camera_info_topic = "/asus/rgb/camera_info"; 518 | 519 | std::string base_frame_name = "/base_link"; 520 | std::string multisense_camera_frame_name = "/multisense/left_camera_optical_frame"; 521 | std::string multisense_sensor_frame_name = "/head"; 522 | std::string asus_camera_frame_name = "/asus_rgb_optical_frame"; 523 | std::string asus_sensor_frame_name = "/asus_mount_link"; 524 | std::string marker_frame_name = "/ext_calibration_plate_head"; 525 | std::string knee_frame_name = "/lf_lowerleg"; 526 | 527 | int points_number = 100; 528 | 529 | bool debug = false; 530 | bool verbose = false; 531 | }; 532 | 533 | 534 | 535 | 536 | int main(int argc, char** argv) 537 | { 538 | ros::init(argc,argv,"multicamera_calibrator_node"); 539 | ros::NodeHandle nh("~"); 540 | MultiCameraCalibNode calib_node(nh); 541 | 542 | std::string multisense_image_topic = "/multisense/left/image_mono"; 543 | std::string multisense_camera_info_topic = "/multisense/left/image_mono/camera_info"; 544 | std::string asus_image_topic="/asus/rgb/image_raw"; 545 | std::string asus_camera_info_topic = "/asus/rgb/camera_info"; 546 | 547 | std::string multisense_camera_frame_name = "/multisense/left_camera_optical_frame"; 548 | std::string multisense_sensor_frame_name = "/head"; 549 | std::string asus_camera_frame_name = "/asus_rgb_optical_frame"; 550 | std::string asus_sensor_frame_name = "/asus_mount_link"; 551 | std::string knee_frame_name = "/lf_lowerleg"; 552 | 553 | bool verbose = true; 554 | int points_number=100; 555 | 556 | // getting parameters from outside (e.g., launch file) 557 | nh.getParam("multisense_image_topic", multisense_image_topic); 558 | nh.getParam("multisense_camera_info_topic", multisense_camera_info_topic); 559 | nh.getParam("asus_image_topic", asus_image_topic); 560 | nh.getParam("asus_camera_info_topic", asus_camera_info_topic); 561 | 562 | nh.getParam("multisense_camera_frame_name", multisense_camera_frame_name); 563 | nh.getParam("multisense_sensor_frame_name",multisense_sensor_frame_name); 564 | nh.getParam("asus_camera_frame_name", asus_camera_frame_name); 565 | nh.getParam("asus_sensor_frame_name",asus_sensor_frame_name); 566 | 567 | nh.getParam("verbose", verbose); 568 | nh.getParam("points_number",points_number); 569 | 570 | calib_node.multisense_image_topic = multisense_image_topic; 571 | calib_node.multisense_camera_info_topic = multisense_camera_info_topic; 572 | calib_node.multisense_camera_frame_name = multisense_camera_frame_name; 573 | calib_node.multisense_sensor_frame_name = multisense_sensor_frame_name; 574 | 575 | calib_node.asus_image_topic = asus_image_topic; 576 | calib_node.asus_camera_info_topic = asus_camera_info_topic; 577 | calib_node.asus_camera_frame_name = asus_camera_frame_name; 578 | calib_node.asus_sensor_frame_name = asus_sensor_frame_name; 579 | 580 | calib_node.points_number = points_number; 581 | 582 | calib_node.verbose = verbose; 583 | 584 | calib_node.run(); 585 | return 0; 586 | } 587 | -------------------------------------------------------------------------------- /src/OneCameraNode.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include 24 | 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | #include 36 | 37 | using namespace dls::perception; 38 | using namespace cv; 39 | using namespace cv::aruco; 40 | using namespace gtsam; 41 | 42 | 43 | typedef typename message_filters::Subscriber ImgSubscriber; 44 | typedef typename std::shared_ptr ImgSubscriberPtr; 45 | 46 | typedef message_filters::sync_policies::ApproximateTime MyPolicy; 47 | typedef typename message_filters::Synchronizer Synchronizer; 48 | typedef typename std::shared_ptr SynchronizerPtr; 49 | 50 | 51 | typedef Expression Pose3_; 52 | 53 | class MultiCameraCalibNode { 54 | public: 55 | typedef Eigen::Affine3d Transform; 56 | public: 57 | MultiCameraCalibNode(ros::NodeHandle& nh) : 58 | nh_(nh), 59 | x_sensor(Symbol('x', 0)) 60 | { 61 | 62 | tf::Matrix3x3 temp_tf_matrix; 63 | temp_tf_matrix.setRPY(0,30*M_PI/180,0); 64 | tf::Vector3 temp_tf_vector(0.65,0,0.1); 65 | tf::Transform temp_tf_transform(temp_tf_matrix,temp_tf_vector); 66 | tf::transformTFToEigen(temp_tf_transform,init_sensor); 67 | 68 | 69 | tf::Matrix3x3 temp_tf_matrix2; 70 | temp_tf_matrix2.setRPY(0,0,0); 71 | tf::Vector3 temp_tf_vector2(0.9,0,0); 72 | tf::Transform temp_tf_transform2(temp_tf_matrix2,temp_tf_vector2); 73 | tf::transformTFToEigen(temp_tf_transform2,init_sensor2); 74 | 75 | 76 | priorModel = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(1e-6), Vector3::Constant(1e-4)).finished()); 77 | measurementModel = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(1e-6), Vector3::Constant(1e-4)).finished()); 78 | } 79 | 80 | void run(){ 81 | bool tf_ok_sensor = false; 82 | bool tf_ok_extender=false; 83 | 84 | if(verbose){ 85 | ROS_INFO("Preparing to run the calibration with parameters:"); 86 | ROS_INFO_STREAM("sensor_image_topic: " << image_topic); 87 | ROS_INFO_STREAM("sensor_camera_info_topic: " << camera_info_topic); 88 | ROS_INFO_STREAM("sensor_camera_frame_name: " << camera_frame_name); 89 | ROS_INFO_STREAM("sensor_sensor_frame_name: " << sensor_frame_name); 90 | ROS_INFO_STREAM("base_frame_name: " << base_frame_name); 91 | ROS_INFO_STREAM("marker_frame_name" << marker_frame_name); 92 | ROS_INFO_STREAM("knee_frame_name" << knee_frame_name); 93 | } 94 | 95 | sensor_camera_info_sub_ = nh_.subscribe(camera_info_topic,10,&MultiCameraCalibNode::sensorInfoCallback,this); 96 | camera_sub_ = nh_.subscribe(image_topic,10, &MultiCameraCalibNode::cameraCallback, this); 97 | // Don't leave until you get the camera to sensor mount transform! 98 | while(nh_.ok() && !tf_ok_sensor && !tf_ok_extender){ 99 | try{ 100 | ros::Time now = ros::Time::now(); 101 | tf_listener_.waitForTransform(sensor_frame_name, camera_frame_name, 102 | now, ros::Duration(3.0)); 103 | tf_listener_.lookupTransform(sensor_frame_name, camera_frame_name, 104 | now, tf_transform_); 105 | 106 | tf::transformTFToEigen(tf_transform_, sTc_sensor); 107 | 108 | if(verbose){ 109 | ROS_INFO("sensor Camera to sensor captured:"); 110 | ROS_INFO_STREAM(sTc_sensor.matrix().format(fmt_)); 111 | } 112 | tf_ok_sensor = true; 113 | 114 | } 115 | catch (tf::TransformException ex){ 116 | ROS_ERROR("%s",ex.what()); 117 | ros::Duration(1.0).sleep(); 118 | } 119 | 120 | 121 | try{ 122 | ros::Time now = ros::Time::now(); 123 | tf_listener_.waitForTransform(marker_frame_name, knee_frame_name, 124 | now, ros::Duration(3.0)); 125 | tf_listener_.lookupTransform(marker_frame_name, knee_frame_name, 126 | now, tf_transform_); 127 | 128 | tf::transformTFToEigen(tf_transform_, mTk_); 129 | 130 | if(verbose){ 131 | ROS_INFO("Knee to Marker captured:"); 132 | ROS_INFO_STREAM(mTk_.matrix().format(fmt_)); 133 | } 134 | tf_ok_extender = true; 135 | 136 | } 137 | catch (tf::TransformException ex){ 138 | ROS_ERROR("%s",ex.what()); 139 | ros::Duration(1.0).sleep(); 140 | } 141 | } 142 | 143 | 144 | // don't leave until I say so! 145 | while(nh_.ok() && !stop){ 146 | ros::spinOnce(); 147 | } 148 | 149 | 150 | 151 | std::cout << " Time to process data and factor graph " << std::endl; 152 | 153 | graph.print("\nFactor Graph:\n"); 154 | 155 | Values initials; 156 | initials.insert(Symbol('x', 0), Pose3(Rot3(init_sensor.rotation()),init_sensor.translation())); 157 | 158 | for (int i=1;i<=points_number;i++) 159 | { 160 | std::cout << "init: " << i << std::endl; 161 | initials.insert(Symbol('x',i),Pose3(Rot3(init_sensor2.rotation()),init_sensor2.translation())); 162 | } 163 | 164 | 165 | GaussNewtonParams parameters; 166 | parameters.setVerbosity("ERROR"); 167 | 168 | // optimize! 169 | GaussNewtonOptimizer optimizer(graph, initials, parameters); 170 | Values results = optimizer.optimize(); 171 | 172 | // print final values 173 | results.print("Final Result:\n"); 174 | 175 | 176 | 177 | } 178 | 179 | void cameraCallback(const sensor_msgs::ImageConstPtr& image_sensor) 180 | { 181 | std::cout << "msg received" << std::endl; 182 | 183 | cv_bridge::CvImageConstPtr sensor_temp_image = cv_bridge::toCvShare(image_sensor, "mono8"); 184 | Mat sensor_frame = sensor_temp_image->image; 185 | 186 | if(debug){ 187 | std::cout << "sensor image: " << sensor_frame.rows 188 | << " rows and " << sensor_frame.cols << " cols" << std::endl; 189 | } 190 | std::cout << "\nsensor looks for the image ... " <header.stamp; 197 | ros::Time now = ros::Time(0); 198 | if(1) 199 | { 200 | std::cout << " sensor_stamp " << now_sensor << " multisese_stamp " << " now " << now << std::endl; 201 | } 202 | 203 | try{ 204 | tf_listener_.waitForTransform(base_frame_name, knee_frame_name, 205 | now_sensor, ros::Duration(3.0)); 206 | tf_listener_.lookupTransform(base_frame_name, knee_frame_name, 207 | now_sensor, tf_transform_); 208 | 209 | tf::transformTFToEigen(tf_transform_, bTk_); 210 | 211 | } 212 | catch (tf::TransformException ex) 213 | { 214 | ROS_ERROR("%s",ex.what()); 215 | ros::Duration(1.0).sleep(); 216 | std::cout << " DATA SKIPPED " << std::endl; 217 | return; 218 | } 219 | 220 | factor_dot_sensor = bTk_ * mTk_.inverse(); 221 | sTm = sTc_sensor*cTm_sensor; 222 | 223 | chain = bTk_ * mTk_.inverse() * cTm_sensor.inverse() * sTc_sensor.inverse(); 224 | if (verbose) 225 | { 226 | std::cout << " \n<------------------DOT FACTORS --------------->" << std::endl; 227 | std::cout << " sTc_sensor " << std::endl; 228 | std::cout << sTc_sensor.matrix().format(fmt_) << std::endl << std::endl; 229 | std::cout << " cTm_sensor " << std::endl; 230 | std::cout << cTm_sensor.matrix().format(fmt_) << std::endl << std::endl; 231 | std::cout << " mTk_ " << std::endl; 232 | std::cout << mTk_.matrix().format(fmt_) << std::endl << std::endl; 233 | std::cout << " bTk_ " << std::endl; 234 | std::cout << bTk_.matrix().format(fmt_) << std::endl << std::endl; 235 | std::cout << " kTb_ " << std::endl; 236 | std::cout << bTk_.inverse().matrix().format(fmt_) << std::endl << std::endl; 237 | std::cout << " factor_dot_sensor " << std::endl; 238 | std::cout << factor_dot_sensor.matrix().format(fmt_) << std::endl << std::endl; 239 | std::cout << " sTc_sensor*cTm_sensor " << std::endl; 240 | std::cout << (sTc_sensor*cTm_sensor).matrix().format(fmt_) << std::endl << std::endl; 241 | std::cout << " sTc_sensor*cTm_sensor " << std::endl; 242 | std::cout << (sTc_sensor*cTm_sensor).inverse().matrix().format(fmt_) << std::endl << std::endl; 243 | 244 | } 245 | 246 | if (count <= points_number) 247 | { 248 | Pose3_ foot_pose(Symbol('x', count)); 249 | 250 | 251 | graph.addExpressionFactor(foot_pose, 252 | Pose3(Rot3(factor_dot_sensor.rotation()), 253 | factor_dot_sensor.translation()), 254 | priorModel); 255 | 256 | graph.addExpressionFactor(between(foot_pose,x_sensor), 257 | Pose3(Rot3( sTm.inverse().rotation()), 258 | sTm.inverse().translation()), 259 | measurementModel); 260 | 261 | } 262 | if (count == points_number) 263 | { 264 | std::cout << " Accumulated point exceeded, stop = true " << std::endl; 265 | stop=true; 266 | } 267 | 268 | 269 | 270 | if (1) 271 | { 272 | std::cout << "Number of factors accumulated " << count << std::endl; 273 | } 274 | count++; 275 | 276 | } 277 | 278 | void sensorInfoCallback(const sensor_msgs::CameraInfoConstPtr& camera_info_msg){ 279 | if(verbose){ 280 | std::cout << "Received camera info: " << std::endl; 281 | std::cout << "-- distortion model: " << camera_info_msg->distortion_model << std::endl; 282 | std::cout << "-- coefficients: (k1, k2, t1, t2, k3): "; 283 | } 284 | for(int i = 0; i < 5; i++){ 285 | std::cout << camera_info_msg->D[i] << " "; 286 | } 287 | std::cout << std::endl; 288 | 289 | std::cout << camera_info_msg->D.size() << std::endl; 290 | 291 | if(!camera_info_msg->distortion_model.compare("plumb_bob") == 0){ 292 | std::cout << "Sorry, only plumb_bob distortion model is supported" << std::endl; 293 | return; 294 | } 295 | 296 | Matx33d cameraMatrix; 297 | for(int i = 0; i < 9; i++) { 298 | cameraMatrix(i / 3, i % 3) = camera_info_msg->K[i]; 299 | } 300 | 301 | cv::Vec distCoeff; 302 | for(int i = 0; i < 5; i++) { 303 | distCoeff(i) = (double)camera_info_msg->D[i]; 304 | } 305 | 306 | if(verbose){ 307 | std::cout << "-- Receiving sensor parameters" << std::endl; 308 | std::cout << " Camera Matrix: " << std::endl << cameraMatrix << std::endl; 309 | std::cout << std::endl; 310 | std::cout << " Distortion coefficients: " << std::endl << distCoeff; 311 | std::cout << std::endl; 312 | } 313 | sensor_camera_marker_transf_.setCameraParameters(cameraMatrix, distCoeff); 314 | 315 | // we need the parameters only once. 316 | sensor_camera_info_sub_.shutdown(); 317 | } 318 | 319 | 320 | 321 | private: 322 | ros::NodeHandle nh_; 323 | CameraMarkerTransform sensor_camera_marker_transf_; 324 | 325 | 326 | ros::Subscriber camera_sub_; 327 | ros::Subscriber sensor_camera_info_sub_; 328 | 329 | 330 | tf::TransformListener tf_listener_; 331 | tf::StampedTransform tf_transform_; 332 | 333 | Transform cTm_sensor = Transform::Identity(); 334 | Transform bTk_ = Transform::Identity(); // from base frame to knee frame 335 | Transform mTk_ = Transform::Identity(); // from knee frame to marker frame 336 | Transform sTc_sensor = Transform::Identity(); 337 | 338 | Transform factor_dot_sensor=Transform::Identity(); 339 | Transform sTm = Transform::Identity(); 340 | Transform chain = Transform::Identity(); 341 | 342 | Transform pose_sensor=Transform::Identity(); 343 | 344 | Transform init_sensor=Transform::Identity(); 345 | Transform init_sensor2=Transform::Identity(); 346 | 347 | Eigen::IOFormat fmt_ = Eigen::IOFormat(Eigen::FullPrecision, 0, "\t ", "\n", "[", "]"); 348 | 349 | bool stop = false; 350 | 351 | int count=1; 352 | 353 | ExpressionFactorGraph graph; 354 | noiseModel::Diagonal::shared_ptr measurementModel; 355 | noiseModel::Diagonal::shared_ptr priorModel; 356 | 357 | 358 | Pose3_ x_sensor; 359 | std::vector poses; 360 | 361 | public: 362 | std::string image_topic = "/multisense/left/image_mono"; 363 | std::string camera_info_topic = "/multisense/left/image_mono/camera_info"; 364 | 365 | std::string camera_frame_name = "/multisense/left_camera_optical_frame"; 366 | std::string sensor_frame_name = "/head"; 367 | std::string knee_frame_name = "/lf_lowerleg"; 368 | 369 | std::string base_frame_name = "/base_link"; 370 | std::string marker_frame_name = "/ext_calibration_plate_head"; 371 | 372 | int points_number = 1; 373 | 374 | bool debug = false; 375 | bool verbose = false; 376 | }; 377 | 378 | 379 | 380 | 381 | int main(int argc, char** argv) 382 | { 383 | ros::init(argc,argv,"multicamera_calibrator_node"); 384 | ros::NodeHandle nh("~"); 385 | MultiCameraCalibNode calib_node(nh); 386 | 387 | std::string image_topic = "/multisense/left/image_mono"; 388 | std::string camera_info_topic = "/multisense/left/image_mono/camera_info"; 389 | 390 | std::string camera_frame_name = "/multisense/left_camera_optical_frame"; 391 | std::string sensor_frame_name = "/head"; 392 | std::string knee_frame_name = "/lf_lowerleg"; 393 | 394 | bool verbose = false; 395 | int points_number=1; 396 | 397 | 398 | // getting parameters from outside (e.g., launch file) 399 | nh.getParam("image_topic", image_topic); 400 | nh.getParam("camera_info_topic", camera_info_topic); 401 | 402 | nh.getParam("camera_frame_name", camera_frame_name); 403 | nh.getParam("sensor_frame_name",sensor_frame_name); 404 | 405 | nh.getParam("verbose", verbose); 406 | nh.getParam("points_number",points_number); 407 | 408 | calib_node.image_topic = image_topic; 409 | calib_node.camera_info_topic = camera_info_topic; 410 | calib_node.camera_frame_name = camera_frame_name; 411 | calib_node.sensor_frame_name = sensor_frame_name; 412 | 413 | calib_node.points_number = points_number; 414 | 415 | calib_node.verbose = verbose; 416 | 417 | calib_node.run(); 418 | return 0; 419 | } 420 | --------------------------------------------------------------------------------