├── doc └── mainpage.md ├── .clang-format ├── cmake └── package-config.cmake.in ├── README.md ├── LICENSE ├── .gitignore ├── CMakeLists.txt ├── include └── calibrel │ └── calibrel.hpp ├── test └── test_calibrel.cpp └── src └── calibrel.cpp /doc/mainpage.md: -------------------------------------------------------------------------------- 1 | \mainpage Getting started 2 | 3 | Please refer to \ref calrel. 4 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | BasedOnStyle: WebKit 4 | AlwaysBreakTemplateDeclarations: true 5 | ColumnLimit: 78 6 | NamespaceIndentation: None 7 | ... 8 | -------------------------------------------------------------------------------- /cmake/package-config.cmake.in: -------------------------------------------------------------------------------- 1 | @PACKAGE_INIT@ 2 | 3 | include(CMakeFindDependencyMacro) 4 | 5 | find_dependency(OpenCV) 6 | 7 | include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### calibrel: More Accurate Camera Calibration with Imperfect Planar Target 2 | 3 | **_Deprecated. This implementation has been integrated into OpenCV 4.0. 4 | See [opencv/opencv/#12772](https://github.com/opencv/opencv/pull/12772) 5 | for details. The C++ interface in OpenCV is `calibrateCameraRO()`_** 6 | 7 | This calibration code is based on the paper: 8 | K. H. Strobl and G. Hirzinger. "[More Accurate Pinhole Camera 9 | Calibration with Imperfect Planar 10 | Target](https://www.robotic.dlr.de/fileadmin/robotic/stroblk/publications/strobl_2011iccv.pdf)". 11 | In Proceedings of the IEEE International Conference on Computer Vision 12 | (ICCV 2011), 1st IEEE Workshop on Challenges and Opportunities in Robot 13 | Perception, Barcelona, Spain, pp. 1068-1075, November 2011. 14 | 15 | This implementation was validated against [DLR CalDe and DLR 16 | CalLab](http://www.robotic.dlr.de/callab/), a remarkable camera 17 | calibration toolbox, which already includes this method. 18 | 19 | The code is largely copied from OpenCV's implementation. 20 | 21 | #### How to build 22 | 23 | ``` 24 | $ mkdir build 25 | $ cd build 26 | $ cmake .. 27 | $ make 28 | ``` 29 | 30 | #### Run test program 31 | 32 | ``` 33 | test_calibrel --mode=0 default.xml 34 | ``` 35 | 36 | Where `mode` has one of the following three values: 37 | 38 | * **0** Test with this method. 39 | * **1** Test with OpenCV's calibration method. 40 | * **2** Test with the hybrid method, i.e., OpenCV's calibration method 41 | followed by this method. 42 | 43 | See [calibrel_testdata](https://github.com/xoox/calibrel_testdata) 44 | for examples of XML setting files. 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | By downloading, copying, installing or using the software you agree to this license. 2 | If you do not agree to this license, do not download, install, 3 | copy or use the software. 4 | 5 | 6 | License Agreement 7 | For Open Source Computer Vision Library 8 | (3-clause BSD License) 9 | 10 | Copyright (C) 2000-2018, Intel Corporation, all rights reserved. 11 | Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. 12 | Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved. 13 | Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. 14 | Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved. 15 | Copyright (C) 2015-2016, Itseez Inc., all rights reserved. 16 | Copyright (C) 2018, Wenfeng CAI, all rights reserved. 17 | Third party copyrights are property of their respective owners. 18 | 19 | Redistribution and use in source and binary forms, with or without modification, 20 | are permitted provided that the following conditions are met: 21 | 22 | * Redistributions of source code must retain the above copyright notice, 23 | this list of conditions and the following disclaimer. 24 | 25 | * Redistributions in binary form must reproduce the above copyright notice, 26 | this list of conditions and the following disclaimer in the documentation 27 | and/or other materials provided with the distribution. 28 | 29 | * Neither the names of the copyright holders nor the names of the contributors 30 | may be used to endorse or promote products derived from this software 31 | without specific prior written permission. 32 | 33 | This software is provided by the copyright holders and contributors "as is" and 34 | any express or implied warranties, including, but not limited to, the implied 35 | warranties of merchantability and fitness for a particular purpose are disclaimed. 36 | In no event shall copyright holders or contributors be liable for any direct, 37 | indirect, incidental, special, exemplary, or consequential damages 38 | (including, but not limited to, procurement of substitute goods or services; 39 | loss of use, data, or profits; or business interruption) however caused 40 | and on any theory of liability, whether in contract, strict liability, 41 | or tort (including negligence or otherwise) arising in any way out of 42 | the use of this software, even if advised of the possibility of such damage. 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### C++ ### 2 | 3 | # Prerequisites 4 | *.d 5 | 6 | # Compiled Object files 7 | *.slo 8 | *.lo 9 | *.o 10 | *.obj 11 | 12 | # Precompiled Headers 13 | *.gch 14 | *.pch 15 | 16 | # Compiled Dynamic libraries 17 | *.so 18 | *.dylib 19 | *.dll 20 | 21 | # Fortran module files 22 | *.mod 23 | *.smod 24 | 25 | # Compiled Static libraries 26 | *.lai 27 | *.la 28 | *.a 29 | *.lib 30 | 31 | # Executables 32 | *.exe 33 | *.out 34 | *.app 35 | 36 | ### CMake ### 37 | 38 | CMakeCache.txt 39 | CMakeFiles 40 | CMakeScripts 41 | Testing 42 | Makefile 43 | cmake_install.cmake 44 | install_manifest.txt 45 | compile_commands.json 46 | CTestTestfile.cmake 47 | 48 | ### Vim ### 49 | 50 | # Swap 51 | [._]*.s[a-v][a-z] 52 | [._]*.sw[a-p] 53 | [._]s[a-rt-v][a-z] 54 | [._]ss[a-gi-z] 55 | [._]sw[a-p] 56 | 57 | # Session 58 | Session.vim 59 | 60 | # Temporary 61 | .netrwhist 62 | *~ 63 | # Auto-generated tag files 64 | tags 65 | # Persistent undo 66 | [._]*.un~ 67 | 68 | ### Linux ### 69 | 70 | *~ 71 | 72 | # temporary files which can be created if a process still has a handle open of a deleted file 73 | .fuse_hidden* 74 | 75 | # KDE directory preferences 76 | .directory 77 | 78 | # Linux trash folder which might appear on any partition or disk 79 | .Trash-* 80 | 81 | # .nfs files are created when an open file is removed but is still being accessed 82 | .nfs* 83 | 84 | ### Windows ### 85 | 86 | # Windows thumbnail cache files 87 | Thumbs.db 88 | ehthumbs.db 89 | ehthumbs_vista.db 90 | 91 | # Dump file 92 | *.stackdump 93 | 94 | # Folder config file 95 | [Dd]esktop.ini 96 | 97 | # Recycle Bin used on file shares 98 | $RECYCLE.BIN/ 99 | 100 | # Windows Installer files 101 | *.cab 102 | *.msi 103 | *.msix 104 | *.msm 105 | *.msp 106 | 107 | # Windows shortcuts 108 | *.lnk 109 | 110 | ### macOS ### 111 | 112 | # General 113 | .DS_Store 114 | .AppleDouble 115 | .LSOverride 116 | 117 | # Icon must end with two \r 118 | Icon 119 | 120 | 121 | # Thumbnails 122 | ._* 123 | 124 | # Files that might appear in the root of a volume 125 | .DocumentRevisions-V100 126 | .fseventsd 127 | .Spotlight-V100 128 | .TemporaryItems 129 | .Trashes 130 | .VolumeIcon.icns 131 | .com.apple.timemachine.donotpresent 132 | 133 | # Directories potentially created on remote AFP share 134 | .AppleDB 135 | .AppleDesktop 136 | Network Trash Folder 137 | Temporary Items 138 | .apdisk 139 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.1) 2 | project(calibrel VERSION 0.2) 3 | 4 | #------------------------------------------------------------------------------ 5 | # Dependencies 6 | #------------------------------------------------------------------------------ 7 | include(GNUInstallDirs) 8 | 9 | find_package(OpenCV REQUIRED COMPONENTS core imgcodecs imgproc calib3d) 10 | include_directories(${OpenCV_INCLUDE_DIRS}) 11 | 12 | OPTION(BUILD_TESTS "Enable testing" ON) 13 | 14 | #------------------------------------------------------------------------------ 15 | # Build 16 | #------------------------------------------------------------------------------ 17 | set(CMAKE_CXX_STANDARD 11) 18 | set(CMAKE_CXX_STANDARD_REQUIRED YES) 19 | # set(CMAKE_POSITION_INDEPENDENT_CODE YES) 20 | 21 | # Compiler 22 | if (WIN32) 23 | if (MSVC) 24 | add_compile_options(/W4) 25 | endif(MSVC) 26 | else(WIN32) 27 | add_compile_options(-Wall) 28 | endif(WIN32) 29 | 30 | # set library and executable locations 31 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) 32 | set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 33 | set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 34 | 35 | include_directories(include) 36 | 37 | aux_source_directory("src" LIBSRCALL) 38 | add_library(${PROJECT_NAME} STATIC ${LIBSRCALL}) 39 | target_link_libraries(${PROJECT_NAME} INTERFACE ${OpenCV_LIBS}) 40 | 41 | if (BUILD_TESTS) 42 | enable_testing() 43 | aux_source_directory("test" TMVTESTSRC) 44 | foreach (PERSRC ${TMVTESTSRC}) 45 | string(REGEX REPLACE "test/(test_.*)\.cpp" "\\1" PERTGT ${PERSRC}) 46 | add_executable(${PERTGT} ${PERSRC}) 47 | add_dependencies(${PERTGT} ${PROJECT_NAME}) 48 | target_link_libraries(${PERTGT} PRIVATE ${PROJECT_NAME}) 49 | endforeach (PERSRC) 50 | endif(BUILD_TESTS) 51 | 52 | #------------------------------------------------------------------------------ 53 | # Installation 54 | #------------------------------------------------------------------------------ 55 | install( 56 | DIRECTORY 57 | "include/" 58 | DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} 59 | ) 60 | 61 | install(TARGETS ${PROJECT_NAME} 62 | EXPORT ${PROJECT_NAME}Targets 63 | RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} 64 | LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} 65 | ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} 66 | INCLUDES DESTINATION include 67 | ) 68 | 69 | #-- Add an Option to toggle the generation of the API documentation 70 | option(BUILD_DOCUMENTATION "Use Doxygen to create the HTML based API documentation" OFF) 71 | if(BUILD_DOCUMENTATION) 72 | find_package(Doxygen) 73 | if (NOT DOXYGEN_FOUND) 74 | message(FATAL_ERROR 75 | "Doxygen is needed to build the documentation. Please install it correctly") 76 | endif() 77 | if(DOXYGEN_VERSION VERSION_LESS "1.8.0") 78 | message(WARNING "Doxygen >=1.8.0 is needed for better output.") 79 | endif() 80 | # - DOXYGEN_STRIP_FROM_INC_PATH corresponding to STRIP_FROM_INC_PATH in the doxy file 81 | set(DOXYGEN_STRIP_FROM_INC_PATH "\"${CMAKE_SOURCE_DIR}/include\"") 82 | #-- Configure the Template Doxyfile for our specific project 83 | configure_file(doc/Doxyfile.in 84 | ${PROJECT_BINARY_DIR}/doc/Doxyfile @ONLY IMMEDIATE) 85 | #-- Add a custom target to run Doxygen when ever the project is built 86 | add_custom_target (docs 87 | COMMAND ${DOXYGEN_EXECUTABLE} 88 | ${PROJECT_BINARY_DIR}/doc/Doxyfile 89 | SOURCES ${PROJECT_BINARY_DIR}/doc/Doxyfile) 90 | # IF you do NOT want the documentation to be generated EVERY time you build the project 91 | # then leave out the 'ALL' keyword from the above command. 92 | install(DIRECTORY ${PROJECT_BINARY_DIR}/doc/html 93 | DESTINATION ${CMAKE_INSTALL_DOCDIR} 94 | COMPONENT doc) 95 | endif(BUILD_DOCUMENTATION) 96 | 97 | #----------------------------------------------------------------------- 98 | include(CMakePackageConfigHelpers) 99 | 100 | configure_package_config_file( 101 | cmake/package-config.cmake.in 102 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake 103 | INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} 104 | NO_CHECK_REQUIRED_COMPONENTS_MACRO 105 | ) 106 | 107 | write_basic_package_version_file( 108 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake 109 | VERSION ${PROJECT_VERSION} 110 | COMPATIBILITY SameMajorVersion 111 | ) 112 | 113 | install( 114 | FILES 115 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake 116 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake 117 | DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} 118 | ) 119 | 120 | install(EXPORT ${PROJECT_NAME}Targets NAMESPACE ${PROJECT_NAME}:: 121 | DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) 122 | -------------------------------------------------------------------------------- /include/calibrel/calibrel.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 3 | 4 | By downloading, copying, installing or using the software you agree 5 | to this license. If you do not agree to this license, do not 6 | download, install, copy or use the software. 7 | 8 | License Agreement 9 | For Open Source Computer Vision Library 10 | 11 | Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 12 | Copyright (C) 2009, Willow Garage Inc., all rights reserved. 13 | Copyright (C) 2018, Wenfeng CAI, all rights reserved. 14 | Third party copyrights are property of their respective owners. 15 | 16 | Redistribution and use in source and binary forms, with or without 17 | modification, are permitted provided that the following conditions 18 | are met: 19 | 20 | * Redistribution's of source code must retain the above copyright 21 | notice, this list of conditions and the following disclaimer. 22 | 23 | * Redistribution's in binary form must reproduce the above 24 | copyright notice, this list of conditions and the following 25 | disclaimer in the documentation and/or other materials provided 26 | with the distribution. 27 | 28 | * The name of the copyright holders may not be used to endorse or 29 | promote products derived from this software without specific 30 | prior written permission. 31 | 32 | This software is provided by the copyright holders and contributors 33 | "as is" and any express or implied warranties, including, but not 34 | limited to, the implied warranties of merchantability and fitness 35 | for a particular purpose are disclaimed. In no event shall the 36 | Intel Corporation or contributors be liable for any direct, 37 | indirect, incidental, special, exemplary, or consequential damages 38 | (including, but not limited to, procurement of substitute goods or 39 | services; loss of use, data, or profits; or business interruption) 40 | however caused and on any theory of liability, whether in contract, 41 | strict liability, or tort (including negligence or otherwise) 42 | arising in any way out of the use of this software, even if advised 43 | of the possibility of such damage. 44 | */ 45 | 46 | #pragma once 47 | 48 | #include "opencv2/calib3d/calib3d.hpp" 49 | #include "opencv2/calib3d/calib3d_c.h" 50 | #include "opencv2/imgproc/detail/distortion_model.hpp" 51 | #include "opencv2/imgproc/imgproc.hpp" 52 | #include "opencv2/imgproc/imgproc_c.h" 53 | #include 54 | #include 55 | 56 | /*! 57 | * \brief Namespace calrel. 58 | */ 59 | namespace calrel { 60 | 61 | using namespace cv; 62 | 63 | /*! 64 | \brief Finds the camera intrinsic and extrinsic parameters from 65 | several views of a calibration pattern. 66 | 67 | \param imagePoints It is a vector of vectors of the projections of 68 | calibration pattern points (e.g. 69 | std::vector>). imagePoints[i].size() must be 70 | equal to objectPoints.size() for each i. 71 | 72 | \param imageSize Size of the image used only to initialize the 73 | intrinsic camera matrix. 74 | 75 | \param objectPoints It is a vector of calibration pattern points in 76 | the calibration pattern coordinate space (e.g. 77 | std::vector). The same calibration pattern must be shown 78 | in each view and it is fully visible. The points are 3D, but since 79 | they are in a pattern coordinate system, then, if the rig is planar, 80 | it may make sense to put the model to a XY coordinate plane so that 81 | Z-coordinate of each input object point is 0. On output, the refined 82 | pattern points are returned for imperfect planar target. 83 | 84 | \param fixedObjPt The index of the 3D object point to be set as (d, 85 | 0, 0) as in Strobl's paper. Usually it is the top-right corner point 86 | of the calibration board grid. 87 | 88 | \param cameraMatrix Output 3x3 floating-point camera matrix 89 | \f$\begin{bmatrix} f_x & 0 & c_x\\ 0 & f_y & c_y\\ 0 & 0 & 1 90 | \end{bmatrix}\f$. If CV_CALIB_USE_INTRINSIC_GUESS and/or 91 | CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy 92 | must be initialized before calling the function. 93 | 94 | \param distCoeffs Output vector of distortion coefficients \f$(k_1, 95 | k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, 96 | \tau_y]]]])\f$ of 4, 5, 8, 12 or 14 elements. 97 | 98 | \param rvecs Output vector of rotation vectors (see Rodrigues ) 99 | estimated for each pattern view (e.g. std::vector>). That 100 | is, each k-th rotation vector together with the corresponding k-th 101 | translation vector (see the next output parameter description) 102 | brings the calibration pattern from the model coordinate space (in 103 | which object points are specified) to the world coordinate space, 104 | that is, a real position of the calibration pattern in the k-th 105 | pattern view (k=0.. *M* -1). 106 | 107 | \param tvecs Output vector of translation vectors estimated for each 108 | pattern view. 109 | 110 | \param newObjPoints The updated output vector of pattern points. 111 | 112 | \param stdDeviationsIntrinsics Output vector of standard deviations 113 | estimated for intrinsic parameters. Order of deviations values: 114 | \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, 115 | s_2, s_3, s_4, \tau_x, \tau_y)\f$ If one of parameters is not 116 | estimated, it's deviation is equals to zero. 117 | 118 | \param stdDeviationsExtrinsics Output vector of standard deviations 119 | estimated for extrinsic parameters. Order of deviations values: 120 | \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern 121 | views, \f$R_i, T_i\f$ are concatenated 1x3 vectors. 122 | 123 | \param stdDeviationsObjectPoints Output vector of standard 124 | deviations estimated for refined coordinates of calibration pattern 125 | points. It has the same size and order as objectPoints vector. 126 | 127 | \param perViewErrors Output vector of the RMS re-projection error 128 | estimated for each pattern view. 129 | 130 | \param flags Different flags that may be zero or a combination of 131 | the following values: 132 | - **CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid 133 | initial values of fx, fy, cx, cy that are optimized further. 134 | Otherwise, (cx, cy) is initially set to the image center ( 135 | imageSize is used), and focal distances are computed in a 136 | least-squares fashion. Note, that if intrinsic parameters 137 | are known, there is no need to use this function just to 138 | estimate extrinsic parameters. Use solvePnP instead. 139 | - **CALIB_FIX_PRINCIPAL_POINT** The principal point is not 140 | changed during the global optimization. It stays at the 141 | center or at a different location specified when 142 | CALIB_USE_INTRINSIC_GUESS is set too. 143 | - **CALIB_FIX_ASPECT_RATIO** The functions considers only fy 144 | as a free parameter. The ratio fx/fy stays the same as in 145 | the input cameraMatrix. When CALIB_USE_INTRINSIC_GUESS is 146 | not set, the actual input values of fx and fy are ignored, 147 | only their ratio is computed and used further. 148 | - **CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients 149 | \f$(p_1, p_2)\f$ are set to zeros and stay zero. 150 | - **CALIB_FIX_K1,...,CALIB_FIX_K6** The corresponding radial 151 | distortion coefficient is not changed during the 152 | optimization. If CALIB_USE_INTRINSIC_GUESS is set, the 153 | coefficient from the supplied distCoeffs matrix is used. 154 | Otherwise, it is set to 0. 155 | - **CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are 156 | enabled. To provide the backward compatibility, this extra 157 | flag should be explicitly specified to make the calibration 158 | function use the rational model and return 8 coefficients. 159 | If the flag is not set, the function computes and returns 160 | only 5 distortion coefficients. 161 | - **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 162 | are enabled. To provide the backward compatibility, this 163 | extra flag should be explicitly specified to make the 164 | calibration function use the thin prism model and return 12 165 | coefficients. If the flag is not set, the function computes 166 | and returns only 5 distortion coefficients. 167 | - **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion 168 | coefficients are not changed during the optimization. If 169 | CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 170 | supplied distCoeffs matrix is used. Otherwise, it is set to 0. 171 | - **CALIB_TILTED_MODEL** Coefficients tauX and tauY are 172 | enabled. To provide the backward compatibility, this extra 173 | flag should be explicitly specified to make the calibration 174 | function use the tilted sensor model and return 14 175 | coefficients. If the flag is not set, the function computes 176 | and returns only 5 distortion coefficients. 177 | - **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted 178 | sensor model are not changed during the optimization. If 179 | CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 180 | supplied distCoeffs matrix is used. Otherwise, it is set to 0. 181 | 182 | \param criteria Termination criteria for the iterative optimization 183 | algorithm. 184 | 185 | \return the overall RMS re-projection error. 186 | 187 | The function estimates the intrinsic camera parameters and extrinsic 188 | parameters for each of the views. The algorithm is based on Klaus H. 189 | Strobl and Gerd Hirzinger's paper "More Accurate Pinhole Camera 190 | Calibration with Imperfect Planar Target". The coordinates of 3D 191 | object points and their corresponding 2D projections in each view 192 | must be specified. That may be achieved by using an object with a 193 | known geometry and easily detectable feature points. Such an object 194 | is called a calibration rig or calibration pattern. Currently, 195 | initialization of intrinsic parameters (when CALIB_USE_INTRINSIC_GUESS 196 | is not set) is only implemented for planar calibration patterns 197 | (where Z-coordinates of the object points must be all zeros). 198 | 199 | The algorithm performs the following steps: 200 | 201 | - Compute the initial intrinsic parameters (the option only 202 | available for planar calibration patterns) or read them from the 203 | input parameters. The distortion coefficients are all set to 204 | zeros initially unless some of CALIB_FIX_K? are specified. 205 | 206 | - Estimate the initial camera pose as if the intrinsic parameters 207 | have been already known. This is done using solvePnP. 208 | 209 | - Run the global Levenberg-Marquardt optimization algorithm to 210 | minimize the reprojection error, that is, the total sum of 211 | squared distances between the observed feature points 212 | imagePoints and the projected (using the current estimates for 213 | camera parameters and the poses) object points objectPoints. See 214 | projectPoints for details. 215 | */ 216 | double calibrateCamera(InputArrayOfArrays imagePoints, Size imageSize, 217 | InputArray objectPoints, int fixedObjPt, InputOutputArray cameraMatrix, 218 | InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, 219 | OutputArrayOfArrays tvecs, OutputArray newObjPoints, 220 | OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, 221 | OutputArray stdDeviationsObjectPoints, OutputArray perViewErrors, 222 | int flags = 0, 223 | TermCriteria criteria = TermCriteria( 224 | TermCriteria::COUNT + TermCriteria::EPS, 60, DBL_EPSILON * 30)); 225 | 226 | /*! 227 | \overload 228 | */ 229 | double calibrateCamera(InputArrayOfArrays imagePoints, Size imageSize, 230 | InputArray objectPoints, int fixedObjPt, InputOutputArray cameraMatrix, 231 | InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, 232 | OutputArrayOfArrays tvecs, OutputArray newObjPoints, int flags = 0, 233 | TermCriteria criteria = TermCriteria( 234 | TermCriteria::COUNT + TermCriteria::EPS, 60, DBL_EPSILON * 30)); 235 | 236 | } /* end of namespace calrel */ 237 | -------------------------------------------------------------------------------- /test/test_calibrel.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "calibrel/calibrel.hpp" 16 | 17 | using namespace cv; 18 | using namespace std; 19 | 20 | static void help() 21 | { 22 | cout << "This is a camera calibration sample." << endl 23 | << "Usage: camera_calibration [options] [configuration_file -- " 24 | "default ./default.xml]" 25 | << endl 26 | << "In the calibrel_testdata project you'll find the configuration " 27 | "file, which has detailed help of how to edit it. It may be any " 28 | "OpenCV supported file format XML/YAML." 29 | << endl 30 | << "The option of --mode accept three values:\n0 for this project's " 31 | "method;\n1 for OpenCV standard method;\n2 for standard method " 32 | "followed by this project's method." 33 | << endl; 34 | } 35 | class Settings { 36 | public: 37 | Settings() 38 | : goodInput(false) 39 | { 40 | } 41 | enum Pattern { 42 | NOT_EXISTING, 43 | CHESSBOARD, 44 | CIRCLES_GRID, 45 | ASYMMETRIC_CIRCLES_GRID 46 | }; 47 | enum InputType { INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST }; 48 | 49 | void write(FileStorage& fs) const // Write serialization for this class 50 | { 51 | fs << "{" 52 | << "BoardSize_Width" << boardSize.width << "BoardSize_Height" 53 | << boardSize.height << "Square_Size" << squareSize 54 | << "Calibrate_Pattern" << patternToUse 55 | << "Calibrate_NrOfFrameToUse" << nrFrames 56 | << "Calibrate_FixAspectRatio" << aspectRatio 57 | << "Calibrate_AssumeZeroTangentialDistortion" 58 | << calibZeroTangentDist << "Calibrate_FixPrincipalPointAtTheCenter" 59 | << calibFixPrincipalPoint 60 | 61 | << "Write_DetectedFeaturePoints" << writePoints 62 | << "Write_extrinsicParameters" << writeExtrinsics 63 | << "Write_gridPoints" << writeGrid 64 | << "Write_outputFileName" << outputFileName 65 | 66 | << "Show_UndistortedImage" << showUndistorsed 67 | 68 | << "Input_FlipAroundHorizontalAxis" << flipVertical 69 | << "Input_Delay" << delay << "Input" << input << "}"; 70 | } 71 | void read(const FileNode& node) // Read serialization for this class 72 | { 73 | node["BoardSize_Width"] >> boardSize.width; 74 | node["BoardSize_Height"] >> boardSize.height; 75 | node["Calibrate_Pattern"] >> patternToUse; 76 | node["Square_Size"] >> squareSize; 77 | node["Calibrate_NrOfFrameToUse"] >> nrFrames; 78 | node["Calibrate_FixAspectRatio"] >> aspectRatio; 79 | node["Write_DetectedFeaturePoints"] >> writePoints; 80 | node["Write_extrinsicParameters"] >> writeExtrinsics; 81 | node["Write_gridPoints"] >> writeGrid; 82 | node["Write_outputFileName"] >> outputFileName; 83 | node["Calibrate_AssumeZeroTangentialDistortion"] 84 | >> calibZeroTangentDist; 85 | node["Calibrate_FixPrincipalPointAtTheCenter"] 86 | >> calibFixPrincipalPoint; 87 | node["Calibrate_UseFisheyeModel"] >> useFisheye; 88 | node["Input_FlipAroundHorizontalAxis"] >> flipVertical; 89 | node["Show_UndistortedImage"] >> showUndistorsed; 90 | node["Input"] >> input; 91 | node["Input_Delay"] >> delay; 92 | node["Fix_K1"] >> fixK1; 93 | node["Fix_K2"] >> fixK2; 94 | node["Fix_K3"] >> fixK3; 95 | node["Fix_K4"] >> fixK4; 96 | node["Fix_K5"] >> fixK5; 97 | 98 | validate(); 99 | } 100 | void validate() 101 | { 102 | goodInput = true; 103 | if (boardSize.width <= 0 || boardSize.height <= 0) { 104 | cerr << "Invalid Board size: " << boardSize.width << " " 105 | << boardSize.height << endl; 106 | goodInput = false; 107 | } 108 | if (squareSize <= 10e-6) { 109 | cerr << "Invalid square size " << squareSize << endl; 110 | goodInput = false; 111 | } 112 | if (nrFrames <= 0) { 113 | cerr << "Invalid number of frames " << nrFrames << endl; 114 | goodInput = false; 115 | } 116 | 117 | if (input.empty()) // Check for valid input 118 | inputType = INVALID; 119 | else { 120 | if (input[0] >= '0' && input[0] <= '9') { 121 | stringstream ss(input); 122 | ss >> cameraID; 123 | inputType = CAMERA; 124 | } else { 125 | if (isListOfImages(input) 126 | && readStringList(input, imageList)) { 127 | inputType = IMAGE_LIST; 128 | nrFrames = (nrFrames < (int)imageList.size()) 129 | ? nrFrames 130 | : (int)imageList.size(); 131 | } else 132 | inputType = VIDEO_FILE; 133 | } 134 | if (inputType == CAMERA) 135 | inputCapture.open(cameraID); 136 | if (inputType == VIDEO_FILE) 137 | inputCapture.open(input); 138 | if (inputType != IMAGE_LIST && !inputCapture.isOpened()) 139 | inputType = INVALID; 140 | } 141 | if (inputType == INVALID) { 142 | cerr << " Input does not exist: " << input; 143 | goodInput = false; 144 | } 145 | 146 | flag = 0; 147 | if (calibFixPrincipalPoint) 148 | flag |= CALIB_FIX_PRINCIPAL_POINT; 149 | if (calibZeroTangentDist) 150 | flag |= CALIB_ZERO_TANGENT_DIST; 151 | if (aspectRatio) 152 | flag |= CALIB_FIX_ASPECT_RATIO; 153 | if (fixK1) 154 | flag |= CALIB_FIX_K1; 155 | if (fixK2) 156 | flag |= CALIB_FIX_K2; 157 | if (fixK3) 158 | flag |= CALIB_FIX_K3; 159 | if (fixK4) 160 | flag |= CALIB_FIX_K4; 161 | if (fixK5) 162 | flag |= CALIB_FIX_K5; 163 | 164 | if (useFisheye) { 165 | // the fisheye model has its own enum, so overwrite the flags 166 | flag = fisheye::CALIB_FIX_SKEW 167 | | fisheye::CALIB_RECOMPUTE_EXTRINSIC; 168 | if (fixK1) 169 | flag |= fisheye::CALIB_FIX_K1; 170 | if (fixK2) 171 | flag |= fisheye::CALIB_FIX_K2; 172 | if (fixK3) 173 | flag |= fisheye::CALIB_FIX_K3; 174 | if (fixK4) 175 | flag |= fisheye::CALIB_FIX_K4; 176 | if (calibFixPrincipalPoint) 177 | flag |= fisheye::CALIB_FIX_PRINCIPAL_POINT; 178 | } 179 | 180 | calibrationPattern = NOT_EXISTING; 181 | if (!patternToUse.compare("CHESSBOARD")) 182 | calibrationPattern = CHESSBOARD; 183 | if (!patternToUse.compare("CIRCLES_GRID")) 184 | calibrationPattern = CIRCLES_GRID; 185 | if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) 186 | calibrationPattern = ASYMMETRIC_CIRCLES_GRID; 187 | if (calibrationPattern == NOT_EXISTING) { 188 | cerr << " Camera calibration mode does not exist: " 189 | << patternToUse << endl; 190 | goodInput = false; 191 | } 192 | atImageList = 0; 193 | } 194 | Mat nextImage() 195 | { 196 | Mat result; 197 | if (inputCapture.isOpened()) { 198 | Mat view0; 199 | inputCapture >> view0; 200 | view0.copyTo(result); 201 | } else if (atImageList < imageList.size()) 202 | result = imread(imageList[atImageList++], IMREAD_COLOR); 203 | 204 | return result; 205 | } 206 | 207 | static bool readStringList(const string& filename, vector& l) 208 | { 209 | l.clear(); 210 | FileStorage fs(filename, FileStorage::READ); 211 | if (!fs.isOpened()) 212 | return false; 213 | FileNode n = fs.getFirstTopLevelNode(); 214 | if (n.type() != FileNode::SEQ) 215 | return false; 216 | FileNodeIterator it = n.begin(), it_end = n.end(); 217 | for (; it != it_end; ++it) 218 | l.push_back((string)*it); 219 | return true; 220 | } 221 | 222 | static bool isListOfImages(const string& filename) 223 | { 224 | string s(filename); 225 | // Look for file extension 226 | if (s.find(".xml") == string::npos && s.find(".yaml") == string::npos 227 | && s.find(".yml") == string::npos) 228 | return false; 229 | else 230 | return true; 231 | } 232 | 233 | public: 234 | Size boardSize; // The size of the board -> Number of items by width and 235 | // height 236 | Pattern calibrationPattern; // One of the Chessboard, circles, or 237 | // asymmetric circle pattern 238 | float squareSize; // The size of a square in your defined unit (point, 239 | // millimeter,etc). 240 | int nrFrames; // The number of frames to use from the input for 241 | // calibration 242 | float aspectRatio; // The aspect ratio 243 | int delay; // In case of a video input 244 | bool writePoints; // Write detected feature points 245 | bool writeExtrinsics; // Write extrinsic parameters 246 | bool writeGrid; // Write refined 3D target grid points 247 | bool calibZeroTangentDist; // Assume zero tangential distortion 248 | bool calibFixPrincipalPoint; // Fix the principal point at the center 249 | bool flipVertical; // Flip the captured images around the horizontal axis 250 | string outputFileName; // The name of the file where to write 251 | bool showUndistorsed; // Show undistorted images after calibration 252 | string input; // The input -> 253 | bool useFisheye; // use fisheye camera model for calibration 254 | bool fixK1; // fix K1 distortion coefficient 255 | bool fixK2; // fix K2 distortion coefficient 256 | bool fixK3; // fix K3 distortion coefficient 257 | bool fixK4; // fix K4 distortion coefficient 258 | bool fixK5; // fix K5 distortion coefficient 259 | 260 | int cameraID; 261 | vector imageList; 262 | size_t atImageList; 263 | VideoCapture inputCapture; 264 | InputType inputType; 265 | bool goodInput; 266 | int flag; 267 | 268 | private: 269 | string patternToUse; 270 | }; 271 | 272 | static inline void read(const FileNode& node, Settings& x, 273 | const Settings& default_value = Settings()) 274 | { 275 | if (node.empty()) 276 | x = default_value; 277 | else 278 | x.read(node); 279 | } 280 | 281 | enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 }; 282 | 283 | enum CALIB_MODE { 284 | DLR11, //!< The method implemented in this project 285 | STD, //!< The standard method of OpenCV 286 | HYBRID //!< STD followed by DLR11 method 287 | }; 288 | 289 | bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, 290 | Mat& distCoeffs, vector > imagePoints, float grid_width, 291 | CALIB_MODE calib_mode = DLR11); 292 | 293 | int main(int argc, char* argv[]) 294 | { 295 | const String keys 296 | = "{help h usage ? | | print this message }" 297 | "{@set |default.xml| input setting file }" 298 | "{mode |0 | calibration method selection }" 299 | "{d | | actual distance between top-left " 300 | "and top-right corners of the calibration grid }" 301 | "{winSize |7 | Half of search window }"; 302 | CommandLineParser parser(argc, argv, keys); 303 | parser.about("camera calibration test program"); 304 | if (!parser.check()) { 305 | parser.printErrors(); 306 | return 0; 307 | } 308 | 309 | if (parser.has("help")) { 310 | help(); 311 | parser.printMessage(); 312 | return 0; 313 | } 314 | const string inputSettingsFile = parser.get(0); 315 | CALIB_MODE calib_mode = CALIB_MODE(parser.get("mode")); 316 | int winSize = parser.get("winSize"); 317 | 318 | //! [file_read] 319 | Settings s; 320 | FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings 321 | if (!fs.isOpened()) { 322 | cout << "Could not open the configuration file: \"" 323 | << inputSettingsFile << "\"" << endl; 324 | return -1; 325 | } 326 | fs["Settings"] >> s; 327 | fs.release(); // close Settings file 328 | //! [file_read] 329 | 330 | // FileStorage fout("settings.yml", FileStorage::WRITE); // write config 331 | // as YAML 332 | // fout << "Settings" << s; 333 | 334 | if (!s.goodInput) { 335 | cout << "Invalid input detected. Application stopping. " << endl; 336 | return -1; 337 | } 338 | 339 | float grid_width = s.squareSize * (s.boardSize.width - 1); 340 | if (parser.has("d")) { 341 | grid_width = parser.get("d"); 342 | } 343 | 344 | vector > imagePoints; 345 | Mat cameraMatrix, distCoeffs; 346 | Size imageSize; 347 | int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION; 348 | clock_t prevTimestamp = 0; 349 | const Scalar RED(0, 0, 255), GREEN(0, 255, 0); 350 | const char ESC_KEY = 27; 351 | 352 | //! [get_input] 353 | for (;;) { 354 | Mat view; 355 | bool blinkOutput = false; 356 | 357 | view = s.nextImage(); 358 | 359 | //----- If no more image, or got enough, then stop calibration and 360 | // show result ------------- 361 | if (mode == CAPTURING && imagePoints.size() >= (size_t)s.nrFrames) { 362 | if (runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, 363 | imagePoints, grid_width, calib_mode)) 364 | mode = CALIBRATED; 365 | else 366 | mode = DETECTION; 367 | } 368 | if (view.empty()) // If there are no more images stop the loop 369 | { 370 | // if calibration threshold was not reached yet, calibrate now 371 | if (mode != CALIBRATED && !imagePoints.empty()) 372 | runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, 373 | imagePoints, grid_width, calib_mode); 374 | break; 375 | } 376 | //! [get_input] 377 | 378 | imageSize = view.size(); // Format input image. 379 | if (s.flipVertical) 380 | flip(view, view, 0); 381 | 382 | //! [find_pattern] 383 | vector pointBuf; 384 | 385 | bool found; 386 | 387 | int chessBoardFlags 388 | = CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE; 389 | 390 | if (!s.useFisheye) { 391 | // fast check erroneously fails with high distortions like fisheye 392 | chessBoardFlags |= CALIB_CB_FAST_CHECK; 393 | } 394 | 395 | switch ( 396 | s.calibrationPattern) // Find feature points on the input format 397 | { 398 | case Settings::CHESSBOARD: 399 | found = findChessboardCorners( 400 | view, s.boardSize, pointBuf, chessBoardFlags); 401 | break; 402 | case Settings::CIRCLES_GRID: 403 | found = findCirclesGrid(view, s.boardSize, pointBuf); 404 | break; 405 | case Settings::ASYMMETRIC_CIRCLES_GRID: 406 | found = findCirclesGrid( 407 | view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID); 408 | break; 409 | default: 410 | found = false; 411 | break; 412 | } 413 | //! [find_pattern] 414 | //! [pattern_found] 415 | if (found) // If done with success, 416 | { 417 | // improve the found corners' coordinate accuracy for chessboard 418 | if (s.calibrationPattern == Settings::CHESSBOARD) { 419 | Mat viewGray; 420 | cvtColor(view, viewGray, COLOR_BGR2GRAY); 421 | cornerSubPix(viewGray, pointBuf, Size(winSize, winSize), 422 | Size(-1, -1), 423 | TermCriteria( 424 | TermCriteria::EPS + TermCriteria::COUNT, 30, 0.0001)); 425 | } 426 | 427 | if (mode == CAPTURING 428 | && // For camera only take new samples after delay time 429 | (!s.inputCapture.isOpened() 430 | || clock() - prevTimestamp 431 | > s.delay * 1e-3 * CLOCKS_PER_SEC)) { 432 | imagePoints.push_back(pointBuf); 433 | prevTimestamp = clock(); 434 | blinkOutput = s.inputCapture.isOpened(); 435 | } 436 | 437 | // Draw the corners. 438 | drawChessboardCorners(view, s.boardSize, Mat(pointBuf), found); 439 | } 440 | //! [pattern_found] 441 | //----------------------------- Output Text 442 | //------------------------------------------------ 443 | //! [output_text] 444 | string msg = (mode == CAPTURING) 445 | ? "100/100" 446 | : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start"; 447 | int baseLine = 0; 448 | Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); 449 | Point textOrigin(view.cols - 2 * textSize.width - 10, 450 | view.rows - 2 * baseLine - 10); 451 | 452 | if (mode == CAPTURING) { 453 | if (s.showUndistorsed) 454 | msg = format( 455 | "%d/%d Undist", (int)imagePoints.size(), s.nrFrames); 456 | else 457 | msg = format("%d/%d", (int)imagePoints.size(), s.nrFrames); 458 | } 459 | 460 | putText( 461 | view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED); 462 | 463 | if (blinkOutput) 464 | bitwise_not(view, view); 465 | //! [output_text] 466 | //------------------------- Video capture output undistorted 467 | //------------------------------ 468 | //! [output_undistorted] 469 | if (mode == CALIBRATED && s.showUndistorsed) { 470 | Mat temp = view.clone(); 471 | if (s.useFisheye) 472 | cv::fisheye::undistortImage( 473 | temp, view, cameraMatrix, distCoeffs); 474 | else 475 | undistort(temp, view, cameraMatrix, distCoeffs); 476 | } 477 | //! [output_undistorted] 478 | //------------------------------ Show image and check for input 479 | // commands ------------------- 480 | //! [await_input] 481 | imshow("Image View", view); 482 | char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay); 483 | 484 | if (key == ESC_KEY) 485 | break; 486 | 487 | if (key == 'u' && mode == CALIBRATED) 488 | s.showUndistorsed = !s.showUndistorsed; 489 | 490 | if (s.inputCapture.isOpened() && key == 'g') { 491 | mode = CAPTURING; 492 | imagePoints.clear(); 493 | } 494 | //! [await_input] 495 | } 496 | 497 | // -----------------------Show the undistorted image for the image list 498 | // ------------------------ 499 | //! [show_results] 500 | if (s.inputType == Settings::IMAGE_LIST && s.showUndistorsed) { 501 | Mat view, rview, map1, map2; 502 | 503 | if (s.useFisheye) { 504 | Mat newCamMat; 505 | fisheye::estimateNewCameraMatrixForUndistortRectify(cameraMatrix, 506 | distCoeffs, imageSize, Matx33d::eye(), newCamMat, 1); 507 | fisheye::initUndistortRectifyMap(cameraMatrix, distCoeffs, 508 | Matx33d::eye(), newCamMat, imageSize, CV_16SC2, map1, map2); 509 | } else { 510 | initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(), 511 | getOptimalNewCameraMatrix( 512 | cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), 513 | imageSize, CV_16SC2, map1, map2); 514 | } 515 | 516 | for (size_t i = 0; i < s.imageList.size(); i++) { 517 | view = imread(s.imageList[i], IMREAD_COLOR); 518 | if (view.empty()) 519 | continue; 520 | remap(view, rview, map1, map2, INTER_LINEAR); 521 | imshow("Image View", rview); 522 | char c = (char)waitKey(); 523 | if (c == ESC_KEY || c == 'q' || c == 'Q') 524 | break; 525 | } 526 | } 527 | //! [show_results] 528 | 529 | return 0; 530 | } 531 | 532 | //! [compute_errors] 533 | static double computeReprojectionErrors( 534 | const vector >& objectPoints, 535 | const vector >& imagePoints, const vector& rvecs, 536 | const vector& tvecs, const Mat& cameraMatrix, const Mat& distCoeffs, 537 | vector& perViewErrors, bool fisheye) 538 | { 539 | vector imagePoints2; 540 | size_t totalPoints = 0; 541 | double totalErr = 0, err; 542 | perViewErrors.resize(objectPoints.size()); 543 | 544 | for (size_t i = 0; i < objectPoints.size(); ++i) { 545 | if (fisheye) { 546 | fisheye::projectPoints(objectPoints[i], imagePoints2, rvecs[i], 547 | tvecs[i], cameraMatrix, distCoeffs); 548 | } else { 549 | projectPoints(objectPoints[i], rvecs[i], tvecs[i], cameraMatrix, 550 | distCoeffs, imagePoints2); 551 | } 552 | err = norm(imagePoints[i], imagePoints2, NORM_L2); 553 | 554 | size_t n = objectPoints[i].size(); 555 | perViewErrors[i] = (float)std::sqrt(err * err / n); 556 | totalErr += err * err; 557 | totalPoints += n; 558 | } 559 | 560 | return std::sqrt(totalErr / totalPoints); 561 | } 562 | //! [compute_errors] 563 | 564 | //! [board_corners] 565 | static void calcBoardCornerPositions(Size boardSize, float squareSize, 566 | vector& corners, 567 | Settings::Pattern patternType /*= Settings::CHESSBOARD*/) 568 | { 569 | corners.clear(); 570 | 571 | switch (patternType) { 572 | case Settings::CHESSBOARD: 573 | case Settings::CIRCLES_GRID: 574 | for (int i = 0; i < boardSize.height; ++i) 575 | for (int j = 0; j < boardSize.width; ++j) 576 | corners.push_back(Point3f(j * squareSize, i * squareSize, 0)); 577 | break; 578 | 579 | case Settings::ASYMMETRIC_CIRCLES_GRID: 580 | for (int i = 0; i < boardSize.height; i++) 581 | for (int j = 0; j < boardSize.width; j++) 582 | corners.push_back( 583 | Point3f((2 * j + i % 2) * squareSize, i * squareSize, 0)); 584 | break; 585 | default: 586 | break; 587 | } 588 | } 589 | //! [board_corners] 590 | static bool runCalibration(Settings& s, Size& imageSize, float grid_width, 591 | Mat& cameraMatrix, Mat& distCoeffs, vector > imagePoints, 592 | vector& rvecs, vector& tvecs, vector& reprojErrs, 593 | double& totalAvgErr, vector& newObjPoints, 594 | CALIB_MODE calib_mode = DLR11) 595 | { 596 | //! [fixed_aspect] 597 | cameraMatrix = Mat::eye(3, 3, CV_64F); 598 | if (s.flag & CALIB_FIX_ASPECT_RATIO) 599 | cameraMatrix.at(0, 0) = s.aspectRatio; 600 | //! [fixed_aspect] 601 | if (s.useFisheye) { 602 | distCoeffs = Mat::zeros(4, 1, CV_64F); 603 | } else { 604 | distCoeffs = Mat::zeros(12, 1, CV_64F); 605 | } 606 | 607 | vector objectPoints; 608 | calcBoardCornerPositions( 609 | s.boardSize, s.squareSize, objectPoints, s.calibrationPattern); 610 | objectPoints[s.boardSize.width - 1].x = objectPoints[0].x + grid_width; 611 | 612 | // Find intrinsic and extrinsic camera parameters 613 | double rms; 614 | 615 | newObjPoints = objectPoints; 616 | vector > objectPointsArray( 617 | imagePoints.size(), objectPoints); 618 | if (s.useFisheye) { 619 | Mat _rvecs, _tvecs; 620 | rms = fisheye::calibrate(objectPoints, imagePoints, imageSize, 621 | cameraMatrix, distCoeffs, _rvecs, _tvecs, s.flag); 622 | 623 | rvecs.reserve(_rvecs.rows); 624 | tvecs.reserve(_tvecs.rows); 625 | for (int i = 0; i < int(objectPoints.size()); i++) { 626 | rvecs.push_back(_rvecs.row(i)); 627 | tvecs.push_back(_tvecs.row(i)); 628 | } 629 | } else { 630 | int64 t0 = getCPUTickCount(); 631 | switch (calib_mode) { 632 | case STD: 633 | rms = calibrateCamera(objectPointsArray, imagePoints, imageSize, 634 | cameraMatrix, distCoeffs, rvecs, tvecs, 635 | s.flag | CALIB_USE_LU); 636 | break; 637 | case HYBRID: 638 | rms = calibrateCamera(objectPointsArray, imagePoints, imageSize, 639 | cameraMatrix, distCoeffs, rvecs, tvecs, 640 | s.flag | CALIB_USE_LU); 641 | rms = calrel::calibrateCamera(imagePoints, imageSize, 642 | objectPoints, s.boardSize.width - 1, cameraMatrix, distCoeffs, 643 | rvecs, tvecs, newObjPoints, 644 | s.flag | CALIB_USE_LU | CALIB_USE_INTRINSIC_GUESS); 645 | objectPointsArray.clear(); 646 | objectPointsArray.resize(imagePoints.size(), newObjPoints); 647 | break; 648 | default: 649 | rms = calrel::calibrateCamera(imagePoints, imageSize, 650 | objectPoints, s.boardSize.width - 1, cameraMatrix, distCoeffs, 651 | rvecs, tvecs, newObjPoints, s.flag | CALIB_USE_LU); 652 | objectPointsArray.clear(); 653 | objectPointsArray.resize(imagePoints.size(), newObjPoints); 654 | } 655 | int64 t1 = getCPUTickCount(); 656 | cout << "Calibration phase execution time is " 657 | << (t1 - t0) / (double)getTickFrequency() << " seconds" << endl; 658 | cout << "New board corners: " << endl; 659 | cout << newObjPoints[0].x << " " << newObjPoints[0].y << " " 660 | << newObjPoints[0].z << endl; 661 | cout << newObjPoints[s.boardSize.width - 1].x << " " 662 | << newObjPoints[s.boardSize.width - 1].y << " " 663 | << newObjPoints[s.boardSize.width - 1].z << endl; 664 | cout << newObjPoints[s.boardSize.width * (s.boardSize.height - 1)].x 665 | << " " 666 | << newObjPoints[s.boardSize.width * (s.boardSize.height - 1)].y 667 | << " " 668 | << newObjPoints[s.boardSize.width * (s.boardSize.height - 1)].z 669 | << endl; 670 | cout << newObjPoints.back().x << " " << newObjPoints.back().y << " " 671 | << newObjPoints.back().z << endl; 672 | } 673 | 674 | cout << "Re-projection error reported by calibrateCamera: " << rms 675 | << endl; 676 | 677 | bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs); 678 | 679 | totalAvgErr = computeReprojectionErrors(objectPointsArray, imagePoints, 680 | rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs, s.useFisheye); 681 | 682 | return ok; 683 | } 684 | 685 | // Print camera parameters to the output file 686 | static void saveCameraParams(Settings& s, Size& imageSize, Mat& cameraMatrix, 687 | Mat& distCoeffs, const vector& rvecs, const vector& tvecs, 688 | const vector& reprojErrs, 689 | const vector >& imagePoints, double totalAvgErr, 690 | vector newObjPoints) 691 | { 692 | FileStorage fs(s.outputFileName, FileStorage::WRITE); 693 | 694 | time_t tm; 695 | time(&tm); 696 | struct tm* t2 = localtime(&tm); 697 | char buf[1024]; 698 | strftime(buf, sizeof(buf), "%c", t2); 699 | 700 | fs << "calibration_time" << buf; 701 | 702 | if (!rvecs.empty() || !reprojErrs.empty()) 703 | fs << "nr_of_frames" 704 | << (int)std::max(rvecs.size(), reprojErrs.size()); 705 | fs << "image_width" << imageSize.width; 706 | fs << "image_height" << imageSize.height; 707 | fs << "board_width" << s.boardSize.width; 708 | fs << "board_height" << s.boardSize.height; 709 | fs << "square_size" << s.squareSize; 710 | 711 | if (s.flag & CALIB_FIX_ASPECT_RATIO) 712 | fs << "fix_aspect_ratio" << s.aspectRatio; 713 | 714 | if (s.flag) { 715 | std::stringstream flagsStringStream; 716 | if (s.useFisheye) { 717 | flagsStringStream 718 | << "flags:" 719 | << (s.flag & fisheye::CALIB_FIX_SKEW ? " +fix_skew" : "") 720 | << (s.flag & fisheye::CALIB_FIX_K1 ? " +fix_k1" : "") 721 | << (s.flag & fisheye::CALIB_FIX_K2 ? " +fix_k2" : "") 722 | << (s.flag & fisheye::CALIB_FIX_K3 ? " +fix_k3" : "") 723 | << (s.flag & fisheye::CALIB_FIX_K4 ? " +fix_k4" : "") 724 | << (s.flag & fisheye::CALIB_RECOMPUTE_EXTRINSIC 725 | ? " +recompute_extrinsic" 726 | : ""); 727 | } else { 728 | flagsStringStream 729 | << "flags:" << (s.flag & CALIB_USE_INTRINSIC_GUESS 730 | ? " +use_intrinsic_guess" 731 | : "") 732 | << (s.flag & CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" 733 | : "") 734 | << (s.flag & CALIB_FIX_PRINCIPAL_POINT 735 | ? " +fix_principal_point" 736 | : "") 737 | << (s.flag & CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" 738 | : "") 739 | << (s.flag & CALIB_FIX_K1 ? " +fix_k1" : "") 740 | << (s.flag & CALIB_FIX_K2 ? " +fix_k2" : "") 741 | << (s.flag & CALIB_FIX_K3 ? " +fix_k3" : "") 742 | << (s.flag & CALIB_FIX_K4 ? " +fix_k4" : "") 743 | << (s.flag & CALIB_FIX_K5 ? " +fix_k5" : ""); 744 | } 745 | fs.writeComment(flagsStringStream.str()); 746 | } 747 | 748 | fs << "flags" << s.flag; 749 | 750 | fs << "fisheye_model" << s.useFisheye; 751 | 752 | fs << "camera_matrix" << cameraMatrix; 753 | fs << "distortion_coefficients" << distCoeffs; 754 | 755 | fs << "avg_reprojection_error" << totalAvgErr; 756 | if (s.writeExtrinsics && !reprojErrs.empty()) 757 | fs << "per_view_reprojection_errors" << Mat(reprojErrs); 758 | 759 | if (s.writeExtrinsics && !rvecs.empty() && !tvecs.empty()) { 760 | CV_Assert(rvecs[0].type() == tvecs[0].type()); 761 | Mat bigmat((int)rvecs.size(), 6, CV_MAKETYPE(rvecs[0].type(), 1)); 762 | bool needReshapeR = rvecs[0].depth() != 1 ? true : false; 763 | bool needReshapeT = tvecs[0].depth() != 1 ? true : false; 764 | 765 | for (size_t i = 0; i < rvecs.size(); i++) { 766 | Mat r = bigmat(Range(int(i), int(i + 1)), Range(0, 3)); 767 | Mat t = bigmat(Range(int(i), int(i + 1)), Range(3, 6)); 768 | 769 | if (needReshapeR) 770 | rvecs[i].reshape(1, 1).copyTo(r); 771 | else { 772 | //*.t() is MatExpr (not Mat) so we can use assignment operator 773 | CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1); 774 | r = rvecs[i].t(); 775 | } 776 | 777 | if (needReshapeT) 778 | tvecs[i].reshape(1, 1).copyTo(t); 779 | else { 780 | CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1); 781 | t = tvecs[i].t(); 782 | } 783 | } 784 | fs.writeComment("a set of 6-tuples (rotation vector + translation " 785 | "vector) for each view"); 786 | fs << "extrinsic_parameters" << bigmat; 787 | } 788 | 789 | if (s.writePoints && !imagePoints.empty()) { 790 | Mat imagePtMat( 791 | (int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2); 792 | for (size_t i = 0; i < imagePoints.size(); i++) { 793 | Mat r = imagePtMat.row(int(i)).reshape(2, imagePtMat.cols); 794 | Mat imgpti(imagePoints[i]); 795 | imgpti.copyTo(r); 796 | } 797 | fs << "image_points" << imagePtMat; 798 | } 799 | 800 | if (s.writeGrid && !newObjPoints.empty()) { 801 | fs << "grid_points" << newObjPoints; 802 | } 803 | } 804 | 805 | //! [run_and_save] 806 | bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, 807 | Mat& distCoeffs, vector > imagePoints, float grid_width, 808 | CALIB_MODE calib_mode) 809 | { 810 | vector rvecs, tvecs; 811 | vector reprojErrs; 812 | double totalAvgErr = 0; 813 | 814 | vector newObjPoints; 815 | bool ok = runCalibration(s, imageSize, grid_width, cameraMatrix, 816 | distCoeffs, imagePoints, rvecs, tvecs, reprojErrs, totalAvgErr, 817 | newObjPoints, calib_mode); 818 | cout << (ok ? "Calibration succeeded" : "Calibration failed") 819 | << ". avg re projection error = " << totalAvgErr << endl; 820 | 821 | if (ok) 822 | saveCameraParams(s, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, 823 | reprojErrs, imagePoints, totalAvgErr, newObjPoints); 824 | return ok; 825 | } 826 | //! [run_and_save] 827 | -------------------------------------------------------------------------------- /src/calibrel.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 3 | 4 | By downloading, copying, installing or using the software you agree 5 | to this license. If you do not agree to this license, do not 6 | download, install, copy or use the software. 7 | 8 | License Agreement 9 | For Open Source Computer Vision Library 10 | 11 | Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 12 | Copyright (C) 2009, Willow Garage Inc., all rights reserved. 13 | Copyright (C) 2018, Wenfeng CAI, all rights reserved. 14 | Third party copyrights are property of their respective owners. 15 | 16 | Redistribution and use in source and binary forms, with or without 17 | modification, are permitted provided that the following conditions 18 | are met: 19 | 20 | * Redistribution's of source code must retain the above copyright 21 | notice, this list of conditions and the following disclaimer. 22 | 23 | * Redistribution's in binary form must reproduce the above 24 | copyright notice, this list of conditions and the following 25 | disclaimer in the documentation and/or other materials provided 26 | with the distribution. 27 | 28 | * The name of the copyright holders may not be used to endorse or 29 | promote products derived from this software without specific 30 | prior written permission. 31 | 32 | This software is provided by the copyright holders and contributors 33 | "as is" and any express or implied warranties, including, but not 34 | limited to, the implied warranties of merchantability and fitness 35 | for a particular purpose are disclaimed. In no event shall the 36 | Intel Corporation or contributors be liable for any direct, 37 | indirect, incidental, special, exemplary, or consequential damages 38 | (including, but not limited to, procurement of substitute goods or 39 | services; loss of use, data, or profits; or business interruption) 40 | however caused and on any theory of liability, whether in contract, 41 | strict liability, or tort (including negligence or otherwise) 42 | arising in any way out of the use of this software, even if advised 43 | of the possibility of such damage. 44 | */ 45 | 46 | #include "calibrel/calibrel.hpp" 47 | 48 | /* 49 | This is (in a large extent) based on the paper: 50 | Klaus H. Strobl and Gerd Hirzinger. "More Accurate Pinhole Camera 51 | Calibration with Imperfect Planar Target". 52 | 2011 IEEE International Conference on Computer Vision Workshops. 53 | */ 54 | 55 | namespace calrel { 56 | 57 | using namespace cv; 58 | 59 | static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, " 60 | "4x1, 1x5, 5x1, 1x8, 8x1, 1x12, 12x1, " 61 | "1x14 or 14x1 floating-point vector"; 62 | 63 | static void projectPoints2(const CvMat* objectPoints, const CvMat* r_vec, 64 | const CvMat* t_vec, const CvMat* A, const CvMat* distCoeffs, 65 | CvMat* imagePoints, CvMat* dpdr = NULL, CvMat* dpdt = NULL, 66 | CvMat* dpdf = NULL, CvMat* dpdc = NULL, CvMat* dpdk = NULL, 67 | CvMat* dpdo = NULL, double aspectRatio = 0) 68 | { 69 | Ptr matM, _m; 70 | Ptr _dpdr, _dpdt, _dpdc, _dpdf, _dpdk, _dpdo; 71 | 72 | int i, j, count; 73 | int calc_derivatives; 74 | const CvPoint3D64f* M; 75 | CvPoint2D64f* m; 76 | double r[3], R[9], dRdr[27], t[3], a[9], 77 | k[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, fx, fy, cx, cy; 78 | Matx33d matTilt = Matx33d::eye(); 79 | Matx33d dMatTiltdTauX(0, 0, 0, 0, 0, 0, 0, -1, 0); 80 | Matx33d dMatTiltdTauY(0, 0, 0, 0, 0, 0, 1, 0, 0); 81 | CvMat _r, _t, _a = cvMat(3, 3, CV_64F, a), _k; 82 | CvMat matR = cvMat(3, 3, CV_64F, R), _dRdr = cvMat(3, 9, CV_64F, dRdr); 83 | double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0, 84 | *dpdo_p = 0; 85 | int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, 86 | dpdc_step = 0, dpdo_step = 0; 87 | bool fixedAspectRatio = aspectRatio > FLT_EPSILON; 88 | 89 | if (!CV_IS_MAT(objectPoints) || !CV_IS_MAT(r_vec) || !CV_IS_MAT(t_vec) 90 | || !CV_IS_MAT(A) || 91 | /*!CV_IS_MAT(distCoeffs) ||*/ !CV_IS_MAT(imagePoints)) 92 | CV_Error( 93 | CV_StsBadArg, "One of required arguments is not a valid matrix"); 94 | 95 | int total = objectPoints->rows * objectPoints->cols 96 | * CV_MAT_CN(objectPoints->type); 97 | if (total % 3 != 0) { 98 | // we have stopped support of homogeneous coordinates because it cause 99 | // ambiguity in interpretation of the input data 100 | CV_Error(CV_StsBadArg, "Homogeneous coordinates are not supported"); 101 | } 102 | count = total / 3; 103 | 104 | if (CV_IS_CONT_MAT(objectPoints->type) 105 | && (CV_MAT_DEPTH(objectPoints->type) == CV_32F 106 | || CV_MAT_DEPTH(objectPoints->type) == CV_64F) 107 | && ((objectPoints->rows == 1 && CV_MAT_CN(objectPoints->type) == 3) 108 | || (objectPoints->rows == count 109 | && CV_MAT_CN(objectPoints->type) * objectPoints->cols 110 | == 3) 111 | || (objectPoints->rows == 3 112 | && CV_MAT_CN(objectPoints->type) == 1 113 | && objectPoints->cols == count))) { 114 | matM.reset(cvCreateMat(objectPoints->rows, objectPoints->cols, 115 | CV_MAKETYPE(CV_64F, CV_MAT_CN(objectPoints->type)))); 116 | cvConvert(objectPoints, matM); 117 | } else { 118 | // matM = cvCreateMat( 1, count, CV_64FC3 ); 119 | // cvConvertPointsHomogeneous( objectPoints, matM ); 120 | CV_Error(CV_StsBadArg, "Homogeneous coordinates are not supported"); 121 | } 122 | 123 | if (CV_IS_CONT_MAT(imagePoints->type) 124 | && (CV_MAT_DEPTH(imagePoints->type) == CV_32F 125 | || CV_MAT_DEPTH(imagePoints->type) == CV_64F) 126 | && ((imagePoints->rows == 1 && CV_MAT_CN(imagePoints->type) == 2) 127 | || (imagePoints->rows == count 128 | && CV_MAT_CN(imagePoints->type) * imagePoints->cols 129 | == 2) 130 | || (imagePoints->rows == 2 && CV_MAT_CN(imagePoints->type) == 1 131 | && imagePoints->cols == count))) { 132 | _m.reset(cvCreateMat(imagePoints->rows, imagePoints->cols, 133 | CV_MAKETYPE(CV_64F, CV_MAT_CN(imagePoints->type)))); 134 | cvConvert(imagePoints, _m); 135 | } else { 136 | // _m = cvCreateMat( 1, count, CV_64FC2 ); 137 | CV_Error(CV_StsBadArg, "Homogeneous coordinates are not supported"); 138 | } 139 | 140 | M = (CvPoint3D64f*)matM->data.db; 141 | m = (CvPoint2D64f*)_m->data.db; 142 | 143 | if ((CV_MAT_DEPTH(r_vec->type) != CV_64F 144 | && CV_MAT_DEPTH(r_vec->type) != CV_32F) 145 | || (((r_vec->rows != 1 && r_vec->cols != 1) 146 | || r_vec->rows * r_vec->cols * CV_MAT_CN(r_vec->type) != 3) 147 | && ((r_vec->rows != 3 && r_vec->cols != 3) 148 | || CV_MAT_CN(r_vec->type) != 1))) 149 | CV_Error(CV_StsBadArg, 150 | "Rotation must be represented by 1x3 or 3x1 " 151 | "floating-point rotation vector, or 3x3 rotation matrix"); 152 | 153 | if (r_vec->rows == 3 && r_vec->cols == 3) { 154 | _r = cvMat(3, 1, CV_64FC1, r); 155 | cvRodrigues2(r_vec, &_r); 156 | cvRodrigues2(&_r, &matR, &_dRdr); 157 | cvCopy(r_vec, &matR); 158 | } else { 159 | _r = cvMat(r_vec->rows, r_vec->cols, 160 | CV_MAKETYPE(CV_64F, CV_MAT_CN(r_vec->type)), r); 161 | cvConvert(r_vec, &_r); 162 | cvRodrigues2(&_r, &matR, &_dRdr); 163 | } 164 | 165 | if ((CV_MAT_DEPTH(t_vec->type) != CV_64F 166 | && CV_MAT_DEPTH(t_vec->type) != CV_32F) 167 | || (t_vec->rows != 1 && t_vec->cols != 1) 168 | || t_vec->rows * t_vec->cols * CV_MAT_CN(t_vec->type) != 3) 169 | CV_Error(CV_StsBadArg, 170 | "Translation vector must be 1x3 or 3x1 floating-point vector"); 171 | 172 | _t = cvMat(t_vec->rows, t_vec->cols, 173 | CV_MAKETYPE(CV_64F, CV_MAT_CN(t_vec->type)), t); 174 | cvConvert(t_vec, &_t); 175 | 176 | if ((CV_MAT_TYPE(A->type) != CV_64FC1 && CV_MAT_TYPE(A->type) != CV_32FC1) 177 | || A->rows != 3 || A->cols != 3) 178 | CV_Error(CV_StsBadArg, 179 | "Instrinsic parameters must be 3x3 floating-point matrix"); 180 | 181 | cvConvert(A, &_a); 182 | fx = a[0]; 183 | fy = a[4]; 184 | cx = a[2]; 185 | cy = a[5]; 186 | 187 | if (fixedAspectRatio) 188 | fx = fy * aspectRatio; 189 | 190 | if (distCoeffs) { 191 | if (!CV_IS_MAT(distCoeffs) 192 | || (CV_MAT_DEPTH(distCoeffs->type) != CV_64F 193 | && CV_MAT_DEPTH(distCoeffs->type) != CV_32F) 194 | || (distCoeffs->rows != 1 && distCoeffs->cols != 1) 195 | || (distCoeffs->rows * distCoeffs->cols 196 | * CV_MAT_CN(distCoeffs->type) 197 | != 4 198 | && distCoeffs->rows * distCoeffs->cols 199 | * CV_MAT_CN(distCoeffs->type) 200 | != 5 201 | && distCoeffs->rows * distCoeffs->cols 202 | * CV_MAT_CN(distCoeffs->type) 203 | != 8 204 | && distCoeffs->rows * distCoeffs->cols 205 | * CV_MAT_CN(distCoeffs->type) 206 | != 12 207 | && distCoeffs->rows * distCoeffs->cols 208 | * CV_MAT_CN(distCoeffs->type) 209 | != 14)) 210 | CV_Error(CV_StsBadArg, cvDistCoeffErr); 211 | 212 | _k = cvMat(distCoeffs->rows, distCoeffs->cols, 213 | CV_MAKETYPE(CV_64F, CV_MAT_CN(distCoeffs->type)), k); 214 | cvConvert(distCoeffs, &_k); 215 | if (k[12] != 0 || k[13] != 0) { 216 | detail::computeTiltProjectionMatrix( 217 | k[12], k[13], &matTilt, &dMatTiltdTauX, &dMatTiltdTauY); 218 | } 219 | } 220 | 221 | if (dpdr) { 222 | if (!CV_IS_MAT(dpdr) || (CV_MAT_TYPE(dpdr->type) != CV_32FC1 223 | && CV_MAT_TYPE(dpdr->type) != CV_64FC1) 224 | || dpdr->rows != count * 2 || dpdr->cols != 3) 225 | CV_Error( 226 | CV_StsBadArg, "dp/drot must be 2Nx3 floating-point matrix"); 227 | 228 | if (CV_MAT_TYPE(dpdr->type) == CV_64FC1) { 229 | _dpdr.reset(cvCloneMat(dpdr)); 230 | } else 231 | _dpdr.reset(cvCreateMat(2 * count, 3, CV_64FC1)); 232 | dpdr_p = _dpdr->data.db; 233 | dpdr_step = _dpdr->step / sizeof(dpdr_p[0]); 234 | } 235 | 236 | if (dpdt) { 237 | if (!CV_IS_MAT(dpdt) || (CV_MAT_TYPE(dpdt->type) != CV_32FC1 238 | && CV_MAT_TYPE(dpdt->type) != CV_64FC1) 239 | || dpdt->rows != count * 2 || dpdt->cols != 3) 240 | CV_Error( 241 | CV_StsBadArg, "dp/dT must be 2Nx3 floating-point matrix"); 242 | 243 | if (CV_MAT_TYPE(dpdt->type) == CV_64FC1) { 244 | _dpdt.reset(cvCloneMat(dpdt)); 245 | } else 246 | _dpdt.reset(cvCreateMat(2 * count, 3, CV_64FC1)); 247 | dpdt_p = _dpdt->data.db; 248 | dpdt_step = _dpdt->step / sizeof(dpdt_p[0]); 249 | } 250 | 251 | if (dpdf) { 252 | if (!CV_IS_MAT(dpdf) || (CV_MAT_TYPE(dpdf->type) != CV_32FC1 253 | && CV_MAT_TYPE(dpdf->type) != CV_64FC1) 254 | || dpdf->rows != count * 2 || dpdf->cols != 2) 255 | CV_Error( 256 | CV_StsBadArg, "dp/df must be 2Nx2 floating-point matrix"); 257 | 258 | if (CV_MAT_TYPE(dpdf->type) == CV_64FC1) { 259 | _dpdf.reset(cvCloneMat(dpdf)); 260 | } else 261 | _dpdf.reset(cvCreateMat(2 * count, 2, CV_64FC1)); 262 | dpdf_p = _dpdf->data.db; 263 | dpdf_step = _dpdf->step / sizeof(dpdf_p[0]); 264 | } 265 | 266 | if (dpdc) { 267 | if (!CV_IS_MAT(dpdc) || (CV_MAT_TYPE(dpdc->type) != CV_32FC1 268 | && CV_MAT_TYPE(dpdc->type) != CV_64FC1) 269 | || dpdc->rows != count * 2 || dpdc->cols != 2) 270 | CV_Error( 271 | CV_StsBadArg, "dp/dc must be 2Nx2 floating-point matrix"); 272 | 273 | if (CV_MAT_TYPE(dpdc->type) == CV_64FC1) { 274 | _dpdc.reset(cvCloneMat(dpdc)); 275 | } else 276 | _dpdc.reset(cvCreateMat(2 * count, 2, CV_64FC1)); 277 | dpdc_p = _dpdc->data.db; 278 | dpdc_step = _dpdc->step / sizeof(dpdc_p[0]); 279 | } 280 | 281 | if (dpdk) { 282 | if (!CV_IS_MAT(dpdk) || (CV_MAT_TYPE(dpdk->type) != CV_32FC1 283 | && CV_MAT_TYPE(dpdk->type) != CV_64FC1) 284 | || dpdk->rows != count * 2 285 | || (dpdk->cols != 14 && dpdk->cols != 12 && dpdk->cols != 8 286 | && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2)) 287 | CV_Error(CV_StsBadArg, "dp/df must be 2Nx14, 2Nx12, 2Nx8, 2Nx5, " 288 | "2Nx4 or 2Nx2 floating-point matrix"); 289 | 290 | if (!distCoeffs) 291 | CV_Error(CV_StsNullPtr, "distCoeffs is NULL while dpdk is not"); 292 | 293 | if (CV_MAT_TYPE(dpdk->type) == CV_64FC1) { 294 | _dpdk.reset(cvCloneMat(dpdk)); 295 | } else 296 | _dpdk.reset(cvCreateMat(dpdk->rows, dpdk->cols, CV_64FC1)); 297 | dpdk_p = _dpdk->data.db; 298 | dpdk_step = _dpdk->step / sizeof(dpdk_p[0]); 299 | } 300 | 301 | if (dpdo) { 302 | if (!CV_IS_MAT(dpdo) || (CV_MAT_TYPE(dpdo->type) != CV_32FC1 303 | && CV_MAT_TYPE(dpdo->type) != CV_64FC1) 304 | || dpdo->rows != count * 2 || dpdo->cols != count * 3) 305 | CV_Error( 306 | CV_StsBadArg, "dp/do must be 2Nx3N floating-point matrix"); 307 | 308 | if (CV_MAT_TYPE(dpdo->type) == CV_64FC1) { 309 | _dpdo.reset(cvCloneMat(dpdo)); 310 | } else 311 | _dpdo.reset(cvCreateMat(2 * count, 3 * count, CV_64FC1)); 312 | dpdo_p = _dpdo->data.db; 313 | dpdo_step = _dpdo->step / sizeof(dpdo_p[0]); 314 | } 315 | 316 | calc_derivatives = dpdr || dpdt || dpdf || dpdc || dpdk || dpdo; 317 | 318 | for (i = 0; i < count; i++) { 319 | double X = M[i].x, Y = M[i].y, Z = M[i].z; 320 | double x = R[0] * X + R[1] * Y + R[2] * Z + t[0]; 321 | double y = R[3] * X + R[4] * Y + R[5] * Z + t[1]; 322 | double z = R[6] * X + R[7] * Y + R[8] * Z + t[2]; 323 | double r2, r4, r6, a1, a2, a3, cdist, icdist2; 324 | double xd, yd, xd0, yd0, invProj; 325 | Vec3d vecTilt; 326 | Vec3d dVecTilt; 327 | Matx22d dMatTilt; 328 | Vec2d dXdYd; 329 | 330 | double z0 = z; 331 | z = z ? 1. / z : 1; 332 | x *= z; 333 | y *= z; 334 | 335 | r2 = x * x + y * y; 336 | r4 = r2 * r2; 337 | r6 = r4 * r2; 338 | a1 = 2 * x * y; 339 | a2 = r2 + 2 * x * x; 340 | a3 = r2 + 2 * y * y; 341 | cdist = 1 + k[0] * r2 + k[1] * r4 + k[4] * r6; 342 | icdist2 = 1. / (1 + k[5] * r2 + k[6] * r4 + k[7] * r6); 343 | xd0 = x * cdist * icdist2 + k[2] * a1 + k[3] * a2 + k[8] * r2 344 | + k[9] * r4; 345 | yd0 = y * cdist * icdist2 + k[2] * a3 + k[3] * a1 + k[10] * r2 346 | + k[11] * r4; 347 | 348 | // additional distortion by projecting onto a tilt plane 349 | vecTilt = matTilt * Vec3d(xd0, yd0, 1); 350 | invProj = vecTilt(2) ? 1. / vecTilt(2) : 1; 351 | xd = invProj * vecTilt(0); 352 | yd = invProj * vecTilt(1); 353 | 354 | m[i].x = xd * fx + cx; 355 | m[i].y = yd * fy + cy; 356 | 357 | if (calc_derivatives) { 358 | if (dpdc_p) { 359 | dpdc_p[0] = 1; 360 | dpdc_p[1] = 0; // dp_xdc_x; dp_xdc_y 361 | dpdc_p[dpdc_step] = 0; 362 | dpdc_p[dpdc_step + 1] = 1; 363 | dpdc_p += dpdc_step * 2; 364 | } 365 | 366 | if (dpdf_p) { 367 | if (fixedAspectRatio) { 368 | dpdf_p[0] = 0; 369 | dpdf_p[1] = xd * aspectRatio; // dp_xdf_x; dp_xdf_y 370 | dpdf_p[dpdf_step] = 0; 371 | dpdf_p[dpdf_step + 1] = yd; 372 | } else { 373 | dpdf_p[0] = xd; 374 | dpdf_p[1] = 0; 375 | dpdf_p[dpdf_step] = 0; 376 | dpdf_p[dpdf_step + 1] = yd; 377 | } 378 | dpdf_p += dpdf_step * 2; 379 | } 380 | for (int row = 0; row < 2; ++row) 381 | for (int col = 0; col < 2; ++col) 382 | dMatTilt(row, col) = matTilt(row, col) * vecTilt(2) 383 | - matTilt(2, col) * vecTilt(row); 384 | double invProjSquare = (invProj * invProj); 385 | dMatTilt *= invProjSquare; 386 | if (dpdk_p) { 387 | dXdYd = dMatTilt * Vec2d(x * icdist2 * r2, y * icdist2 * r2); 388 | dpdk_p[0] = fx * dXdYd(0); 389 | dpdk_p[dpdk_step] = fy * dXdYd(1); 390 | dXdYd = dMatTilt * Vec2d(x * icdist2 * r4, y * icdist2 * r4); 391 | dpdk_p[1] = fx * dXdYd(0); 392 | dpdk_p[dpdk_step + 1] = fy * dXdYd(1); 393 | if (_dpdk->cols > 2) { 394 | dXdYd = dMatTilt * Vec2d(a1, a3); 395 | dpdk_p[2] = fx * dXdYd(0); 396 | dpdk_p[dpdk_step + 2] = fy * dXdYd(1); 397 | dXdYd = dMatTilt * Vec2d(a2, a1); 398 | dpdk_p[3] = fx * dXdYd(0); 399 | dpdk_p[dpdk_step + 3] = fy * dXdYd(1); 400 | if (_dpdk->cols > 4) { 401 | dXdYd = dMatTilt 402 | * Vec2d(x * icdist2 * r6, y * icdist2 * r6); 403 | dpdk_p[4] = fx * dXdYd(0); 404 | dpdk_p[dpdk_step + 4] = fy * dXdYd(1); 405 | 406 | if (_dpdk->cols > 5) { 407 | dXdYd = dMatTilt 408 | * Vec2d(x * cdist * (-icdist2) * icdist2 * r2, 409 | y * cdist * (-icdist2) * icdist2 * r2); 410 | dpdk_p[5] = fx * dXdYd(0); 411 | dpdk_p[dpdk_step + 5] = fy * dXdYd(1); 412 | dXdYd = dMatTilt 413 | * Vec2d(x * cdist * (-icdist2) * icdist2 * r4, 414 | y * cdist * (-icdist2) * icdist2 * r4); 415 | dpdk_p[6] = fx * dXdYd(0); 416 | dpdk_p[dpdk_step + 6] = fy * dXdYd(1); 417 | dXdYd = dMatTilt 418 | * Vec2d(x * cdist * (-icdist2) * icdist2 * r6, 419 | y * cdist * (-icdist2) * icdist2 * r6); 420 | dpdk_p[7] = fx * dXdYd(0); 421 | dpdk_p[dpdk_step + 7] = fy * dXdYd(1); 422 | if (_dpdk->cols > 8) { 423 | dXdYd = dMatTilt * Vec2d(r2, 0); 424 | dpdk_p[8] = fx * dXdYd(0); // s1 425 | dpdk_p[dpdk_step + 8] = fy * dXdYd(1); // s1 426 | dXdYd = dMatTilt * Vec2d(r4, 0); 427 | dpdk_p[9] = fx * dXdYd(0); // s2 428 | dpdk_p[dpdk_step + 9] = fy * dXdYd(1); // s2 429 | dXdYd = dMatTilt * Vec2d(0, r2); 430 | dpdk_p[10] = fx * dXdYd(0); // s3 431 | dpdk_p[dpdk_step + 10] = fy * dXdYd(1); // s3 432 | dXdYd = dMatTilt * Vec2d(0, r4); 433 | dpdk_p[11] = fx * dXdYd(0); // s4 434 | dpdk_p[dpdk_step + 11] = fy * dXdYd(1); // s4 435 | if (_dpdk->cols > 12) { 436 | dVecTilt 437 | = dMatTiltdTauX * Vec3d(xd0, yd0, 1); 438 | dpdk_p[12] = fx * invProjSquare 439 | * (dVecTilt(0) * vecTilt(2) 440 | - dVecTilt(2) * vecTilt(0)); 441 | dpdk_p[dpdk_step + 12] = fy 442 | * invProjSquare 443 | * (dVecTilt(1) * vecTilt(2) 444 | - dVecTilt(2) * vecTilt(1)); 445 | dVecTilt 446 | = dMatTiltdTauY * Vec3d(xd0, yd0, 1); 447 | dpdk_p[13] = fx * invProjSquare 448 | * (dVecTilt(0) * vecTilt(2) 449 | - dVecTilt(2) * vecTilt(0)); 450 | dpdk_p[dpdk_step + 13] = fy 451 | * invProjSquare 452 | * (dVecTilt(1) * vecTilt(2) 453 | - dVecTilt(2) * vecTilt(1)); 454 | } 455 | } 456 | } 457 | } 458 | } 459 | dpdk_p += dpdk_step * 2; 460 | } 461 | 462 | if (dpdt_p) { 463 | double dxdt[] = { z, 0, -x * z }, dydt[] = { 0, z, -y * z }; 464 | for (j = 0; j < 3; j++) { 465 | double dr2dt = 2 * x * dxdt[j] + 2 * y * dydt[j]; 466 | double dcdist_dt = k[0] * dr2dt + 2 * k[1] * r2 * dr2dt 467 | + 3 * k[4] * r4 * dr2dt; 468 | double dicdist2_dt = -icdist2 * icdist2 469 | * (k[5] * dr2dt + 2 * k[6] * r2 * dr2dt 470 | + 3 * k[7] * r4 * dr2dt); 471 | double da1dt = 2 * (x * dydt[j] + y * dxdt[j]); 472 | double dmxdt = (dxdt[j] * cdist * icdist2 473 | + x * dcdist_dt * icdist2 + x * cdist * dicdist2_dt 474 | + k[2] * da1dt + k[3] * (dr2dt + 4 * x * dxdt[j]) 475 | + k[8] * dr2dt + 2 * r2 * k[9] * dr2dt); 476 | double dmydt = (dydt[j] * cdist * icdist2 477 | + y * dcdist_dt * icdist2 + y * cdist * dicdist2_dt 478 | + k[2] * (dr2dt + 4 * y * dydt[j]) + k[3] * da1dt 479 | + k[10] * dr2dt + 2 * r2 * k[11] * dr2dt); 480 | dXdYd = dMatTilt * Vec2d(dmxdt, dmydt); 481 | dpdt_p[j] = fx * dXdYd(0); 482 | dpdt_p[dpdt_step + j] = fy * dXdYd(1); 483 | } 484 | dpdt_p += dpdt_step * 2; 485 | } 486 | 487 | if (dpdr_p) { 488 | double dx0dr[] = { X * dRdr[0] + Y * dRdr[1] + Z * dRdr[2], 489 | X * dRdr[9] + Y * dRdr[10] + Z * dRdr[11], 490 | X * dRdr[18] + Y * dRdr[19] + Z * dRdr[20] }; 491 | double dy0dr[] = { X * dRdr[3] + Y * dRdr[4] + Z * dRdr[5], 492 | X * dRdr[12] + Y * dRdr[13] + Z * dRdr[14], 493 | X * dRdr[21] + Y * dRdr[22] + Z * dRdr[23] }; 494 | double dz0dr[] = { X * dRdr[6] + Y * dRdr[7] + Z * dRdr[8], 495 | X * dRdr[15] + Y * dRdr[16] + Z * dRdr[17], 496 | X * dRdr[24] + Y * dRdr[25] + Z * dRdr[26] }; 497 | for (j = 0; j < 3; j++) { 498 | double dxdr = z * (dx0dr[j] - x * dz0dr[j]); 499 | double dydr = z * (dy0dr[j] - y * dz0dr[j]); 500 | double dr2dr = 2 * x * dxdr + 2 * y * dydr; 501 | double dcdist_dr 502 | = (k[0] + 2 * k[1] * r2 + 3 * k[4] * r4) * dr2dr; 503 | double dicdist2_dr = -icdist2 * icdist2 504 | * (k[5] + 2 * k[6] * r2 + 3 * k[7] * r4) * dr2dr; 505 | double da1dr = 2 * (x * dydr + y * dxdr); 506 | double dmxdr = (dxdr * cdist * icdist2 507 | + x * dcdist_dr * icdist2 + x * cdist * dicdist2_dr 508 | + k[2] * da1dr + k[3] * (dr2dr + 4 * x * dxdr) 509 | + (k[8] + 2 * r2 * k[9]) * dr2dr); 510 | double dmydr = (dydr * cdist * icdist2 511 | + y * dcdist_dr * icdist2 + y * cdist * dicdist2_dr 512 | + k[2] * (dr2dr + 4 * y * dydr) + k[3] * da1dr 513 | + (k[10] + 2 * r2 * k[11]) * dr2dr); 514 | dXdYd = dMatTilt * Vec2d(dmxdr, dmydr); 515 | dpdr_p[j] = fx * dXdYd(0); 516 | dpdr_p[dpdr_step + j] = fy * dXdYd(1); 517 | } 518 | dpdr_p += dpdr_step * 2; 519 | } 520 | 521 | if (dpdo_p) { 522 | double dxdo[] = { z * (R[0] - x * z * z0 * R[6]), 523 | z * (R[1] - x * z * z0 * R[7]), 524 | z * (R[2] - x * z * z0 * R[8]) }; 525 | double dydo[] = { z * (R[3] - y * z * z0 * R[6]), 526 | z * (R[4] - y * z * z0 * R[7]), 527 | z * (R[5] - y * z * z0 * R[8]) }; 528 | for (j = 0; j < 3; j++) { 529 | double dr2do = 2 * x * dxdo[j] + 2 * y * dydo[j]; 530 | double dr4do = 2 * r2 * dr2do; 531 | double dr6do = 3 * r4 * dr2do; 532 | double da1do = 2 * y * dxdo[j] + 2 * x * dydo[j]; 533 | double da2do = dr2do + 4 * x * dxdo[j]; 534 | double da3do = dr2do + 4 * y * dydo[j]; 535 | double dcdist_do 536 | = k[0] * dr2do + k[1] * dr4do + k[4] * dr6do; 537 | double dicdist2_do = -icdist2 * icdist2 538 | * (k[5] * dr2do + k[6] * dr4do + k[7] * dr6do); 539 | double dxd0_do = cdist * icdist2 * dxdo[j] 540 | + x * icdist2 * dcdist_do + x * cdist * dicdist2_do 541 | + k[2] * da1do + k[3] * da2do + k[8] * dr2do 542 | + k[9] * dr4do; 543 | double dyd0_do = cdist * icdist2 * dydo[j] 544 | + y * icdist2 * dcdist_do + y * cdist * dicdist2_do 545 | + k[2] * da3do + k[3] * da1do + k[10] * dr2do 546 | + k[11] * dr4do; 547 | dXdYd = dMatTilt * Vec2d(dxd0_do, dyd0_do); 548 | dpdo_p[i * 3 + j] = fx * dXdYd(0); 549 | dpdo_p[dpdo_step + i * 3 + j] = fy * dXdYd(1); 550 | } 551 | dpdo_p += dpdo_step * 2; 552 | } 553 | } 554 | } 555 | 556 | if (_m != imagePoints) 557 | cvConvert(_m, imagePoints); 558 | 559 | if (_dpdr != dpdr) 560 | cvConvert(_dpdr, dpdr); 561 | 562 | if (_dpdt != dpdt) 563 | cvConvert(_dpdt, dpdt); 564 | 565 | if (_dpdf != dpdf) 566 | cvConvert(_dpdf, dpdf); 567 | 568 | if (_dpdc != dpdc) 569 | cvConvert(_dpdc, dpdc); 570 | 571 | if (_dpdk != dpdk) 572 | cvConvert(_dpdk, dpdk); 573 | 574 | if (_dpdo != dpdo) 575 | cvConvert(_dpdo, dpdo); 576 | } 577 | 578 | static void findExtrinsicCameraParams2(const CvMat* objectPoints, 579 | const CvMat* imagePoints, const CvMat* A, const CvMat* distCoeffs, 580 | CvMat* rvec, CvMat* tvec, int useExtrinsicGuess = 0) 581 | { 582 | const int max_iter = 20; 583 | Ptr matM, _Mxy, _m, _mn, matL; 584 | 585 | int i, count; 586 | double a[9], ar[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }, R[9]; 587 | double MM[9], U[9], V[9], W[3]; 588 | cv::Scalar Mc; 589 | double param[6]; 590 | CvMat matA = cvMat(3, 3, CV_64F, a); 591 | CvMat _Ar = cvMat(3, 3, CV_64F, ar); 592 | CvMat matR = cvMat(3, 3, CV_64F, R); 593 | CvMat _r = cvMat(3, 1, CV_64F, param); 594 | CvMat _t = cvMat(3, 1, CV_64F, param + 3); 595 | CvMat _Mc = cvMat(1, 3, CV_64F, Mc.val); 596 | CvMat _MM = cvMat(3, 3, CV_64F, MM); 597 | CvMat matU = cvMat(3, 3, CV_64F, U); 598 | CvMat matV = cvMat(3, 3, CV_64F, V); 599 | CvMat matW = cvMat(3, 1, CV_64F, W); 600 | CvMat _param = cvMat(6, 1, CV_64F, param); 601 | CvMat _dpdr, _dpdt; 602 | 603 | CV_Assert(CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) 604 | && CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec)); 605 | 606 | count = MAX(objectPoints->cols, objectPoints->rows); 607 | matM.reset(cvCreateMat(1, count, CV_64FC3)); 608 | _m.reset(cvCreateMat(1, count, CV_64FC2)); 609 | 610 | cvConvert(objectPoints, matM); 611 | cvConvert(imagePoints, _m); 612 | cvConvert(A, &matA); 613 | 614 | CV_Assert((CV_MAT_DEPTH(rvec->type) == CV_64F 615 | || CV_MAT_DEPTH(rvec->type) == CV_32F) 616 | && (rvec->rows == 1 || rvec->cols == 1) 617 | && rvec->rows * rvec->cols * CV_MAT_CN(rvec->type) == 3); 618 | 619 | CV_Assert((CV_MAT_DEPTH(tvec->type) == CV_64F 620 | || CV_MAT_DEPTH(tvec->type) == CV_32F) 621 | && (tvec->rows == 1 || tvec->cols == 1) 622 | && tvec->rows * tvec->cols * CV_MAT_CN(tvec->type) == 3); 623 | 624 | // it is unsafe to call LM optimisation without an extrinsic guess 625 | // in the case of 3 points. This is because there is no guarantee 626 | // that it will converge on the correct solution. 627 | CV_Assert((count >= 4) || (count == 3 && useExtrinsicGuess)); 628 | 629 | _mn.reset(cvCreateMat(1, count, CV_64FC2)); 630 | _Mxy.reset(cvCreateMat(1, count, CV_64FC2)); 631 | 632 | // normalize image points 633 | // (unapply the intrinsic matrix transformation and distortion) 634 | cvUndistortPoints(_m, _mn, &matA, distCoeffs, 0, &_Ar); 635 | 636 | if (useExtrinsicGuess) { 637 | CvMat _r_temp = cvMat(rvec->rows, rvec->cols, 638 | CV_MAKETYPE(CV_64F, CV_MAT_CN(rvec->type)), param); 639 | CvMat _t_temp = cvMat(tvec->rows, tvec->cols, 640 | CV_MAKETYPE(CV_64F, CV_MAT_CN(tvec->type)), param + 3); 641 | cvConvert(rvec, &_r_temp); 642 | cvConvert(tvec, &_t_temp); 643 | } else { 644 | Mc = cvAvg(matM); 645 | cvReshape(matM, matM, 1, count); 646 | cvMulTransposed(matM, &_MM, 1, &_Mc); 647 | cvSVD(&_MM, &matW, 0, &matV, CV_SVD_MODIFY_A + CV_SVD_V_T); 648 | 649 | // initialize extrinsic parameters 650 | if (W[2] / W[1] < 1e-3) { 651 | // a planar structure case (all M's lie in the same plane) 652 | double tt[3], h[9], h1_norm, h2_norm; 653 | CvMat* R_transform = &matV; 654 | CvMat T_transform = cvMat(3, 1, CV_64F, tt); 655 | CvMat matH = cvMat(3, 3, CV_64F, h); 656 | CvMat _h1, _h2, _h3; 657 | 658 | if (V[2] * V[2] + V[5] * V[5] < 1e-10) 659 | cvSetIdentity(R_transform); 660 | 661 | if (cvDet(R_transform) < 0) 662 | cvScale(R_transform, R_transform, -1); 663 | 664 | cvGEMM(R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T); 665 | 666 | for (i = 0; i < count; i++) { 667 | const double* Rp = R_transform->data.db; 668 | const double* Tp = T_transform.data.db; 669 | const double* src = matM->data.db + i * 3; 670 | double* dst = _Mxy->data.db + i * 2; 671 | 672 | dst[0] = Rp[0] * src[0] + Rp[1] * src[1] + Rp[2] * src[2] 673 | + Tp[0]; 674 | dst[1] = Rp[3] * src[0] + Rp[4] * src[1] + Rp[5] * src[2] 675 | + Tp[1]; 676 | } 677 | 678 | cvFindHomography(_Mxy, _mn, &matH); 679 | 680 | if (cvCheckArr(&matH, CV_CHECK_QUIET)) { 681 | cvGetCol(&matH, &_h1, 0); 682 | _h2 = _h1; 683 | _h2.data.db++; 684 | _h3 = _h2; 685 | _h3.data.db++; 686 | h1_norm = std::sqrt(h[0] * h[0] + h[3] * h[3] + h[6] * h[6]); 687 | h2_norm = std::sqrt(h[1] * h[1] + h[4] * h[4] + h[7] * h[7]); 688 | 689 | cvScale(&_h1, &_h1, 1. / MAX(h1_norm, DBL_EPSILON)); 690 | cvScale(&_h2, &_h2, 1. / MAX(h2_norm, DBL_EPSILON)); 691 | cvScale(&_h3, &_t, 2. / MAX(h1_norm + h2_norm, DBL_EPSILON)); 692 | cvCrossProduct(&_h1, &_h2, &_h3); 693 | 694 | cvRodrigues2(&matH, &_r); 695 | cvRodrigues2(&_r, &matH); 696 | cvMatMulAdd(&matH, &T_transform, &_t, &_t); 697 | cvMatMul(&matH, R_transform, &matR); 698 | } else { 699 | cvSetIdentity(&matR); 700 | cvZero(&_t); 701 | } 702 | 703 | cvRodrigues2(&matR, &_r); 704 | } else { 705 | // non-planar structure. Use DLT method 706 | double* L; 707 | double LL[12 * 12], LW[12], LV[12 * 12], sc; 708 | CvMat _LL = cvMat(12, 12, CV_64F, LL); 709 | CvMat _LW = cvMat(12, 1, CV_64F, LW); 710 | CvMat _LV = cvMat(12, 12, CV_64F, LV); 711 | CvMat _RRt, _RR, _tt; 712 | CvPoint3D64f* M = (CvPoint3D64f*)matM->data.db; 713 | CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db; 714 | 715 | matL.reset(cvCreateMat(2 * count, 12, CV_64F)); 716 | L = matL->data.db; 717 | 718 | for (i = 0; i < count; i++, L += 24) { 719 | double x = -mn[i].x, y = -mn[i].y; 720 | L[0] = L[16] = M[i].x; 721 | L[1] = L[17] = M[i].y; 722 | L[2] = L[18] = M[i].z; 723 | L[3] = L[19] = 1.; 724 | L[4] = L[5] = L[6] = L[7] = 0.; 725 | L[12] = L[13] = L[14] = L[15] = 0.; 726 | L[8] = x * M[i].x; 727 | L[9] = x * M[i].y; 728 | L[10] = x * M[i].z; 729 | L[11] = x; 730 | L[20] = y * M[i].x; 731 | L[21] = y * M[i].y; 732 | L[22] = y * M[i].z; 733 | L[23] = y; 734 | } 735 | 736 | cvMulTransposed(matL, &_LL, 1); 737 | cvSVD(&_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T); 738 | _RRt = cvMat(3, 4, CV_64F, LV + 11 * 12); 739 | cvGetCols(&_RRt, &_RR, 0, 3); 740 | cvGetCol(&_RRt, &_tt, 3); 741 | if (cvDet(&_RR) < 0) 742 | cvScale(&_RRt, &_RRt, -1); 743 | sc = cvNorm(&_RR); 744 | cvSVD(&_RR, &matW, &matU, &matV, 745 | CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T); 746 | cvGEMM(&matU, &matV, 1, 0, 0, &matR, CV_GEMM_A_T); 747 | cvScale(&_tt, &_t, cvNorm(&matR) / sc); 748 | cvRodrigues2(&matR, &_r); 749 | } 750 | } 751 | 752 | cvReshape(matM, matM, 3, 1); 753 | cvReshape(_mn, _mn, 2, 1); 754 | 755 | // refine extrinsic parameters using iterative algorithm 756 | CvLevMarq solver(6, count * 2, 757 | cvTermCriteria( 758 | CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, max_iter, FLT_EPSILON), 759 | true); 760 | cvCopy(&_param, solver.param); 761 | 762 | for (;;) { 763 | CvMat *matJ = 0, *_err = 0; 764 | const CvMat* __param = 0; 765 | bool proceed = solver.update(__param, matJ, _err); 766 | cvCopy(__param, &_param); 767 | if (!proceed || !_err) 768 | break; 769 | cvReshape(_err, _err, 2, 1); 770 | if (matJ) { 771 | cvGetCols(matJ, &_dpdr, 0, 3); 772 | cvGetCols(matJ, &_dpdt, 3, 6); 773 | projectPoints2(matM, &_r, &_t, &matA, distCoeffs, _err, &_dpdr, 774 | &_dpdt, 0, 0, 0, 0); 775 | } else { 776 | projectPoints2( 777 | matM, &_r, &_t, &matA, distCoeffs, _err, 0, 0, 0, 0, 0, 0); 778 | } 779 | cvSub(_err, _m, _err); 780 | cvReshape(_err, _err, 1, 2 * count); 781 | } 782 | cvCopy(solver.param, &_param); 783 | 784 | _r = cvMat(rvec->rows, rvec->cols, 785 | CV_MAKETYPE(CV_64F, CV_MAT_CN(rvec->type)), param); 786 | _t = cvMat(tvec->rows, tvec->cols, 787 | CV_MAKETYPE(CV_64F, CV_MAT_CN(tvec->type)), param + 3); 788 | 789 | cvConvert(&_r, rvec); 790 | cvConvert(&_t, tvec); 791 | } 792 | 793 | static void initIntrinsicParams2D(const CvMat* objectPoints, 794 | const CvMat* imagePoints, const CvMat* npoints, CvSize imageSize, 795 | CvMat* cameraMatrix, double aspectRatio) 796 | { 797 | Ptr matA, _b, _allH; 798 | 799 | int i, j, pos, nimages, ni = 0; 800 | double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 }; 801 | double H[9] = { 0 }, f[2] = { 0 }; 802 | CvMat _a = cvMat(3, 3, CV_64F, a); 803 | CvMat matH = cvMat(3, 3, CV_64F, H); 804 | CvMat _f = cvMat(2, 1, CV_64F, f); 805 | 806 | assert(CV_MAT_TYPE(npoints->type) == CV_32SC1 807 | && CV_IS_MAT_CONT(npoints->type)); 808 | nimages = npoints->rows * npoints->cols; 809 | 810 | if ((CV_MAT_TYPE(objectPoints->type) != CV_32FC3 811 | && CV_MAT_TYPE(objectPoints->type) != CV_64FC3) 812 | || (CV_MAT_TYPE(imagePoints->type) != CV_32FC2 813 | && CV_MAT_TYPE(imagePoints->type) != CV_64FC2)) 814 | CV_Error(CV_StsUnsupportedFormat, 815 | "Object points must be 3D and image points must be 2D"); 816 | 817 | if (objectPoints->rows != 1 || imagePoints->rows != 1) 818 | CV_Error(CV_StsBadSize, 819 | "object points and image points must be a single-row matrices"); 820 | 821 | matA.reset(cvCreateMat(2 * nimages, 2, CV_64F)); 822 | _b.reset(cvCreateMat(2 * nimages, 1, CV_64F)); 823 | a[2] = (!imageSize.width) ? 0.5 : (imageSize.width) * 0.5; 824 | a[5] = (!imageSize.height) ? 0.5 : (imageSize.height) * 0.5; 825 | _allH.reset(cvCreateMat(nimages, 9, CV_64F)); 826 | 827 | // extract vanishing points in order to obtain initial value for the focal 828 | // length 829 | for (i = 0, pos = 0; i < nimages; i++, pos += ni) { 830 | double* Ap = matA->data.db + i * 4; 831 | double* bp = _b->data.db + i * 2; 832 | ni = npoints->data.i[i]; 833 | double h[3], v[3], d1[3], d2[3]; 834 | double n[4] = { 0, 0, 0, 0 }; 835 | CvMat _m, matM; 836 | cvGetCols(objectPoints, &matM, 0, ni); 837 | cvGetCols(imagePoints, &_m, pos, pos + ni); 838 | 839 | cvFindHomography(&matM, &_m, &matH); 840 | memcpy(_allH->data.db + i * 9, H, sizeof(H)); 841 | 842 | H[0] -= H[6] * a[2]; 843 | H[1] -= H[7] * a[2]; 844 | H[2] -= H[8] * a[2]; 845 | H[3] -= H[6] * a[5]; 846 | H[4] -= H[7] * a[5]; 847 | H[5] -= H[8] * a[5]; 848 | 849 | for (j = 0; j < 3; j++) { 850 | double t0 = H[j * 3], t1 = H[j * 3 + 1]; 851 | h[j] = t0; 852 | v[j] = t1; 853 | d1[j] = (t0 + t1) * 0.5; 854 | d2[j] = (t0 - t1) * 0.5; 855 | n[0] += t0 * t0; 856 | n[1] += t1 * t1; 857 | n[2] += d1[j] * d1[j]; 858 | n[3] += d2[j] * d2[j]; 859 | } 860 | 861 | for (j = 0; j < 4; j++) 862 | n[j] = 1. / std::sqrt(n[j]); 863 | 864 | for (j = 0; j < 3; j++) { 865 | h[j] *= n[0]; 866 | v[j] *= n[1]; 867 | d1[j] *= n[2]; 868 | d2[j] *= n[3]; 869 | } 870 | 871 | Ap[0] = h[0] * v[0]; 872 | Ap[1] = h[1] * v[1]; 873 | Ap[2] = d1[0] * d2[0]; 874 | Ap[3] = d1[1] * d2[1]; 875 | bp[0] = -h[2] * v[2]; 876 | bp[1] = -d1[2] * d2[2]; 877 | } 878 | 879 | cvSolve(matA, _b, &_f, CV_NORMAL + CV_SVD); 880 | a[0] = std::sqrt(fabs(1. / f[0])); 881 | a[4] = std::sqrt(fabs(1. / f[1])); 882 | if (aspectRatio != 0) { 883 | double tf = (a[0] + a[4]) / (aspectRatio + 1.); 884 | a[0] = aspectRatio * tf; 885 | a[4] = tf; 886 | } 887 | 888 | cvConvert(&_a, cameraMatrix); 889 | } 890 | 891 | static void subMatrix(const cv::Mat& src, cv::Mat& dst, 892 | const std::vector& cols, const std::vector& rows) 893 | { 894 | int nonzeros_cols = cv::countNonZero(cols); 895 | cv::Mat tmp(src.rows, nonzeros_cols, CV_64FC1); 896 | 897 | for (int i = 0, j = 0; i < (int)cols.size(); i++) { 898 | if (cols[i]) { 899 | src.col(i).copyTo(tmp.col(j++)); 900 | } 901 | } 902 | 903 | int nonzeros_rows = cv::countNonZero(rows); 904 | dst.create(nonzeros_rows, nonzeros_cols, CV_64FC1); 905 | for (int i = 0, j = 0; i < (int)rows.size(); i++) { 906 | if (rows[i]) { 907 | tmp.row(i).copyTo(dst.row(j++)); 908 | } 909 | } 910 | } 911 | 912 | static double calibrateCamera2Internal(const CvMat* objectPoints, 913 | const CvMat* imagePoints, const CvMat* npoints, CvSize imageSize, 914 | int fixedObjPt, CvMat* cameraMatrix, CvMat* distCoeffs, 915 | CvMat* newObjPoints, CvMat* rvecs, CvMat* tvecs, CvMat* stdDevs, 916 | CvMat* perViewErrors, int flags, CvTermCriteria termCrit) 917 | { 918 | const int NINTRINSIC = CV_CALIB_NINTRINSIC; 919 | double reprojErr = 0; 920 | 921 | Matx33d A; 922 | double k[14] = { 0 }; 923 | CvMat matA = cvMat(3, 3, CV_64F, A.val), _k; 924 | int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, 925 | cn; 926 | double aspectRatio = 0.; 927 | 928 | // 0. check the parameters & allocate buffers 929 | if (!CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) 930 | || !CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) 931 | || !CV_IS_MAT(distCoeffs)) 932 | CV_Error(CV_StsBadArg, 933 | "One of required vector arguments is not a valid matrix"); 934 | 935 | if (imageSize.width <= 0 || imageSize.height <= 0) 936 | CV_Error(CV_StsOutOfRange, "image width and height must be positive"); 937 | 938 | if (CV_MAT_TYPE(npoints->type) != CV_32SC1 939 | || (npoints->rows != 1 && npoints->cols != 1)) 940 | CV_Error(CV_StsUnsupportedFormat, "the array of point counters must " 941 | "be 1-dimensional integer vector"); 942 | if (flags & CALIB_TILTED_MODEL) { 943 | // when the tilted sensor model is used the distortion coefficients 944 | // matrix must have 14 parameters 945 | if (distCoeffs->cols * distCoeffs->rows != 14) 946 | CV_Error(CV_StsBadArg, "The tilted sensor model must have 14 " 947 | "parameters in the distortion matrix"); 948 | } else { 949 | // when the thin prism model is used the distortion coefficients 950 | // matrix must have 12 parameters 951 | if (flags & CALIB_THIN_PRISM_MODEL) 952 | if (distCoeffs->cols * distCoeffs->rows != 12) 953 | CV_Error(CV_StsBadArg, "Thin prism model must have 12 " 954 | "parameters in the distortion matrix"); 955 | } 956 | 957 | nimages = npoints->rows * npoints->cols; 958 | npstep = npoints->rows == 1 ? 1 959 | : npoints->step / CV_ELEM_SIZE(npoints->type); 960 | 961 | if (rvecs) { 962 | cn = CV_MAT_CN(rvecs->type); 963 | if (!CV_IS_MAT(rvecs) || (CV_MAT_DEPTH(rvecs->type) != CV_32F 964 | && CV_MAT_DEPTH(rvecs->type) != CV_64F) 965 | || ((rvecs->rows != nimages 966 | || (rvecs->cols * cn != 3 && rvecs->cols * cn != 9)) 967 | && (rvecs->rows != 1 || rvecs->cols != nimages 968 | || cn != 3))) 969 | CV_Error(CV_StsBadArg, 970 | "the output array of rotation vectors must be 3-channel " 971 | "1xn or nx1 array or 1-channel nx3 or nx9 array, where n is " 972 | "the number of views"); 973 | } 974 | 975 | if (tvecs) { 976 | cn = CV_MAT_CN(tvecs->type); 977 | if (!CV_IS_MAT(tvecs) || (CV_MAT_DEPTH(tvecs->type) != CV_32F 978 | && CV_MAT_DEPTH(tvecs->type) != CV_64F) 979 | || ((tvecs->rows != nimages || tvecs->cols * cn != 3) 980 | && (tvecs->rows != 1 || tvecs->cols != nimages 981 | || cn != 3))) 982 | CV_Error(CV_StsBadArg, 983 | "the output array of translation vectors must be 3-channel " 984 | "1xn or nx1 array or 1-channel nx3 array, where n is the " 985 | "number of views"); 986 | } 987 | 988 | if (stdDevs) { 989 | cn = CV_MAT_CN(stdDevs->type); 990 | if (!CV_IS_MAT(stdDevs) 991 | || (CV_MAT_DEPTH(stdDevs->type) != CV_32F 992 | && CV_MAT_DEPTH(stdDevs->type) != CV_64F) 993 | || ((stdDevs->rows != (nimages * 6 + NINTRINSIC + maxPoints * 3) 994 | || stdDevs->cols * cn != 1) 995 | && (stdDevs->rows != 1 996 | || stdDevs->cols 997 | != (nimages * 6 + NINTRINSIC + maxPoints * 3) 998 | || cn != 1))) 999 | #define STR__(x) #x 1000 | #define STR_(x) STR__(x) 1001 | CV_Error(CV_StsBadArg, 1002 | "the output array of standard deviations vectors must be " 1003 | "1-channel 1x(n*6 + NINTRINSIC + maxPoints*3) or (n*6 + " 1004 | "NINTRINSIC + maxPoints*3)x1 array, where n is the number of " 1005 | "views, maxPoints is the number of points per view, " 1006 | "NINTRINSIC = " STR_(CV_CALIB_NINTRINSIC)); 1007 | } 1008 | 1009 | if ((CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 1010 | && CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) 1011 | || cameraMatrix->rows != 3 || cameraMatrix->cols != 3) 1012 | CV_Error(CV_StsBadArg, 1013 | "Intrinsic parameters must be 3x3 floating-point matrix"); 1014 | 1015 | if ((CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 1016 | && CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) 1017 | || (distCoeffs->cols != 1 && distCoeffs->rows != 1) 1018 | || (distCoeffs->cols * distCoeffs->rows != 4 1019 | && distCoeffs->cols * distCoeffs->rows != 5 1020 | && distCoeffs->cols * distCoeffs->rows != 8 1021 | && distCoeffs->cols * distCoeffs->rows != 12 1022 | && distCoeffs->cols * distCoeffs->rows != 14)) 1023 | CV_Error(CV_StsBadArg, cvDistCoeffErr); 1024 | 1025 | for (i = 0; i < nimages; i++) { 1026 | ni = npoints->data.i[i * npstep]; 1027 | if (ni < 4) { 1028 | CV_Error_(CV_StsOutOfRange, 1029 | ("The number of points in the view #%d is < 4", i)); 1030 | } 1031 | maxPoints = MAX(maxPoints, ni); 1032 | total += ni; 1033 | } 1034 | 1035 | Mat matM(1, maxPoints, CV_64FC3); 1036 | Mat _m(1, total, CV_64FC2); 1037 | Mat allErrors(1, total, CV_64FC2); 1038 | 1039 | if (CV_MAT_CN(objectPoints->type) == 3) { 1040 | cvarrToMat(objectPoints).convertTo(matM, CV_64F); 1041 | } else { 1042 | convertPointsHomogeneous(cvarrToMat(objectPoints), matM); 1043 | } 1044 | 1045 | if (CV_MAT_CN(imagePoints->type) == 2) { 1046 | cvarrToMat(imagePoints).convertTo(_m, CV_64F); 1047 | } else { 1048 | convertPointsHomogeneous(cvarrToMat(imagePoints), _m); 1049 | } 1050 | 1051 | nparams = NINTRINSIC + nimages * 6 + maxPoints * 3; 1052 | Mat _Ji(maxPoints * 2, NINTRINSIC, CV_64FC1, Scalar(0)); 1053 | Mat _Je(maxPoints * 2, 6, CV_64FC1); 1054 | Mat _Jo(maxPoints * 2, maxPoints * 3, CV_64FC1, Scalar(0)); 1055 | Mat _err(maxPoints * 2, 1, CV_64FC1); 1056 | 1057 | _k = cvMat(distCoeffs->rows, distCoeffs->cols, 1058 | CV_MAKETYPE(CV_64F, CV_MAT_CN(distCoeffs->type)), k); 1059 | if (distCoeffs->rows * distCoeffs->cols * CV_MAT_CN(distCoeffs->type) 1060 | < 8) { 1061 | if (distCoeffs->rows * distCoeffs->cols * CV_MAT_CN(distCoeffs->type) 1062 | < 5) 1063 | flags |= CALIB_FIX_K3; 1064 | flags |= CALIB_FIX_K4 | CALIB_FIX_K5 | CALIB_FIX_K6; 1065 | } 1066 | const double minValidAspectRatio = 0.01; 1067 | const double maxValidAspectRatio = 100.0; 1068 | 1069 | // 1. initialize intrinsic parameters & LM solver 1070 | if (flags & CALIB_USE_INTRINSIC_GUESS) { 1071 | cvConvert(cameraMatrix, &matA); 1072 | if (A(0, 0) <= 0 || A(1, 1) <= 0) 1073 | CV_Error(CV_StsOutOfRange, 1074 | "Focal length (fx and fy) must be positive"); 1075 | if (A(0, 2) < 0 || A(0, 2) >= imageSize.width || A(1, 2) < 0 1076 | || A(1, 2) >= imageSize.height) 1077 | CV_Error( 1078 | CV_StsOutOfRange, "Principal point must be within the image"); 1079 | if (fabs(A(0, 1)) > 1e-5) 1080 | CV_Error(CV_StsOutOfRange, 1081 | "Non-zero skew is not supported by the function"); 1082 | if (fabs(A(1, 0)) > 1e-5 || fabs(A(2, 0)) > 1e-5 1083 | || fabs(A(2, 1)) > 1e-5 || fabs(A(2, 2) - 1) > 1e-5) 1084 | CV_Error(CV_StsOutOfRange, "The intrinsic matrix must have [fx 0 " 1085 | "cx; 0 fy cy; 0 0 1] shape"); 1086 | A(0, 1) = A(1, 0) = A(2, 0) = A(2, 1) = 0.; 1087 | A(2, 2) = 1.; 1088 | 1089 | if (flags & CALIB_FIX_ASPECT_RATIO) { 1090 | aspectRatio = A(0, 0) / A(1, 1); 1091 | 1092 | if (aspectRatio < minValidAspectRatio 1093 | || aspectRatio > maxValidAspectRatio) 1094 | CV_Error(CV_StsOutOfRange, 1095 | "The specified aspect ratio (= cameraMatrix[0][0] / " 1096 | "cameraMatrix[1][1]) is incorrect"); 1097 | } 1098 | cvConvert(distCoeffs, &_k); 1099 | } else { 1100 | Scalar mean, sdv; 1101 | meanStdDev(matM, mean, sdv); 1102 | if (fabs(mean[2]) > 1e-5 || fabs(sdv[2]) > 1e-5) 1103 | CV_Error(CV_StsBadArg, "For non-planar calibration rigs the " 1104 | "initial intrinsic matrix must be " 1105 | "specified"); 1106 | for (i = 0; i < maxPoints; i++) 1107 | matM.at(i).z = 0.; 1108 | 1109 | if (flags & CALIB_FIX_ASPECT_RATIO) { 1110 | aspectRatio = cvmGet(cameraMatrix, 0, 0); 1111 | aspectRatio /= cvmGet(cameraMatrix, 1, 1); 1112 | if (aspectRatio < minValidAspectRatio 1113 | || aspectRatio > maxValidAspectRatio) 1114 | CV_Error(CV_StsOutOfRange, 1115 | "The specified aspect ratio (= cameraMatrix[0][0] / " 1116 | "cameraMatrix[1][1]) is incorrect"); 1117 | } 1118 | CvMat _matM = CvMat(matM), m = CvMat(_m); 1119 | initIntrinsicParams2D( 1120 | &_matM, &m, npoints, imageSize, &matA, aspectRatio); 1121 | } 1122 | 1123 | CvLevMarq solver(nparams, 0, termCrit); 1124 | 1125 | if (flags & CALIB_USE_LU) { 1126 | solver.solveMethod = DECOMP_LU; 1127 | } else if (flags & CALIB_USE_QR) { 1128 | solver.solveMethod = DECOMP_QR; 1129 | } 1130 | 1131 | { 1132 | double* param = solver.param->data.db; 1133 | uchar* mask = solver.mask->data.ptr; 1134 | 1135 | param[0] = A(0, 0); 1136 | param[1] = A(1, 1); 1137 | param[2] = A(0, 2); 1138 | param[3] = A(1, 2); 1139 | std::copy(k, k + 14, param + 4); 1140 | 1141 | if (flags & CALIB_FIX_ASPECT_RATIO) 1142 | mask[0] = 0; 1143 | if (flags & CALIB_FIX_FOCAL_LENGTH) 1144 | mask[0] = mask[1] = 0; 1145 | if (flags & CALIB_FIX_PRINCIPAL_POINT) 1146 | mask[2] = mask[3] = 0; 1147 | if (flags & CALIB_ZERO_TANGENT_DIST) { 1148 | param[6] = param[7] = 0; 1149 | mask[6] = mask[7] = 0; 1150 | } 1151 | if (!(flags & CALIB_RATIONAL_MODEL)) 1152 | flags |= CALIB_FIX_K4 + CALIB_FIX_K5 + CALIB_FIX_K6; 1153 | if (!(flags & CALIB_THIN_PRISM_MODEL)) 1154 | flags |= CALIB_FIX_S1_S2_S3_S4; 1155 | if (!(flags & CALIB_TILTED_MODEL)) 1156 | flags |= CALIB_FIX_TAUX_TAUY; 1157 | 1158 | mask[4] = !(flags & CALIB_FIX_K1); 1159 | mask[5] = !(flags & CALIB_FIX_K2); 1160 | if (flags & CALIB_FIX_TANGENT_DIST) { 1161 | mask[6] = mask[7] = 0; 1162 | } 1163 | mask[8] = !(flags & CALIB_FIX_K3); 1164 | mask[9] = !(flags & CALIB_FIX_K4); 1165 | mask[10] = !(flags & CALIB_FIX_K5); 1166 | mask[11] = !(flags & CALIB_FIX_K6); 1167 | 1168 | if (flags & CALIB_FIX_S1_S2_S3_S4) { 1169 | mask[12] = 0; 1170 | mask[13] = 0; 1171 | mask[14] = 0; 1172 | mask[15] = 0; 1173 | } 1174 | if (flags & CALIB_FIX_TAUX_TAUY) { 1175 | mask[16] = 0; 1176 | mask[17] = 0; 1177 | } 1178 | 1179 | // copy object points 1180 | std::copy(matM.ptr(), matM.ptr(0, maxPoints - 1) + 3, 1181 | param + NINTRINSIC + nimages * 6); 1182 | // fix points 1183 | mask[NINTRINSIC + nimages * 6] = 0; 1184 | mask[NINTRINSIC + nimages * 6 + 1] = 0; 1185 | mask[NINTRINSIC + nimages * 6 + 2] = 0; 1186 | mask[NINTRINSIC + nimages * 6 + fixedObjPt * 3] = 0; 1187 | mask[NINTRINSIC + nimages * 6 + fixedObjPt * 3 + 1] = 0; 1188 | mask[NINTRINSIC + nimages * 6 + fixedObjPt * 3 + 2] = 0; 1189 | mask[nparams - 1] = 0; 1190 | } 1191 | 1192 | // 2. initialize extrinsic parameters 1193 | CvMat _Mi(matM.colRange(0, maxPoints)); 1194 | for (i = 0, pos = 0; i < nimages; i++, pos += ni) { 1195 | CvMat _ri, _ti; 1196 | ni = npoints->data.i[i * npstep]; 1197 | 1198 | cvGetRows( 1199 | solver.param, &_ri, NINTRINSIC + i * 6, NINTRINSIC + i * 6 + 3); 1200 | cvGetRows(solver.param, &_ti, NINTRINSIC + i * 6 + 3, 1201 | NINTRINSIC + i * 6 + 6); 1202 | 1203 | CvMat _mi(_m.colRange(pos, pos + ni)); 1204 | 1205 | findExtrinsicCameraParams2(&_Mi, &_mi, &matA, &_k, &_ri, &_ti); 1206 | } 1207 | 1208 | // 3. run the optimization 1209 | for (;;) { 1210 | const CvMat* _param = 0; 1211 | CvMat *_JtJ = 0, *_JtErr = 0; 1212 | double* _errNorm = 0; 1213 | bool proceed = solver.updateAlt(_param, _JtJ, _JtErr, _errNorm); 1214 | double *param = solver.param->data.db, 1215 | *pparam = solver.prevParam->data.db; 1216 | bool calcJ 1217 | = solver.state == CvLevMarq::CALC_J || (!proceed && stdDevs); 1218 | 1219 | if (flags & CALIB_FIX_ASPECT_RATIO) { 1220 | param[0] = param[1] * aspectRatio; 1221 | pparam[0] = pparam[1] * aspectRatio; 1222 | } 1223 | 1224 | A(0, 0) = param[0]; 1225 | A(1, 1) = param[1]; 1226 | A(0, 2) = param[2]; 1227 | A(1, 2) = param[3]; 1228 | std::copy(param + 4, param + 4 + 14, k); 1229 | cvGetRows(solver.param, &_Mi, NINTRINSIC + nimages * 6, 1230 | NINTRINSIC + nimages * 6 + ni * 3); 1231 | cvReshape(&_Mi, &_Mi, 3, 1); 1232 | 1233 | if (!proceed && !stdDevs && !perViewErrors) 1234 | break; 1235 | else if (!proceed && stdDevs) 1236 | cvZero(_JtJ); 1237 | 1238 | reprojErr = 0; 1239 | 1240 | for (i = 0, pos = 0; i < nimages; i++, pos += ni) { 1241 | CvMat _ri, _ti; 1242 | ni = npoints->data.i[i * npstep]; 1243 | 1244 | cvGetRows(solver.param, &_ri, NINTRINSIC + i * 6, 1245 | NINTRINSIC + i * 6 + 3); 1246 | cvGetRows(solver.param, &_ti, NINTRINSIC + i * 6 + 3, 1247 | NINTRINSIC + i * 6 + 6); 1248 | 1249 | CvMat _mi = CvMat(_m.colRange(pos, pos + ni)); 1250 | CvMat _me = CvMat(allErrors.colRange(pos, pos + ni)); 1251 | 1252 | _Je.resize(ni * 2); 1253 | _Ji.resize(ni * 2); 1254 | _Jo.resize(ni * 2); 1255 | _err.resize(ni * 2); 1256 | CvMat _dpdr = CvMat(_Je.colRange(0, 3)); 1257 | CvMat _dpdt = CvMat(_Je.colRange(3, 6)); 1258 | CvMat _dpdf = CvMat(_Ji.colRange(0, 2)); 1259 | CvMat _dpdc = CvMat(_Ji.colRange(2, 4)); 1260 | CvMat _dpdk = CvMat(_Ji.colRange(4, NINTRINSIC)); 1261 | CvMat _dpdo = CvMat(_Jo.colRange(0, maxPoints * 3)); 1262 | CvMat _mp = CvMat(_err.reshape(2, 1)); 1263 | 1264 | if (calcJ) { 1265 | projectPoints2(&_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, 1266 | &_dpdt, (flags & CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf, 1267 | (flags & CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk, 1268 | &_dpdo, 1269 | (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0); 1270 | } else 1271 | projectPoints2(&_Mi, &_ri, &_ti, &matA, &_k, &_mp); 1272 | 1273 | cvSub(&_mp, &_mi, &_mp); 1274 | if (perViewErrors || stdDevs) 1275 | cvCopy(&_mp, &_me); 1276 | 1277 | if (calcJ) { 1278 | Mat JtJ(cvarrToMat(_JtJ)), JtErr(cvarrToMat(_JtErr)); 1279 | 1280 | // see HZ: (A6.14) for details on the structure of the 1281 | // Jacobian 1282 | JtJ(Rect(0, 0, NINTRINSIC, NINTRINSIC)) += _Ji.t() * _Ji; 1283 | JtJ(Rect(NINTRINSIC + i * 6, NINTRINSIC + i * 6, 6, 6)) 1284 | = _Je.t() * _Je; 1285 | JtJ(Rect(NINTRINSIC + i * 6, 0, 6, NINTRINSIC)) 1286 | = _Ji.t() * _Je; 1287 | JtJ(Rect( 1288 | NINTRINSIC + nimages * 6, 0, maxPoints * 3, NINTRINSIC)) 1289 | += _Ji.t() * _Jo; 1290 | JtJ(Rect(NINTRINSIC + nimages * 6, NINTRINSIC + i * 6, 1291 | maxPoints * 3, 6)) 1292 | += _Je.t() * _Jo; 1293 | JtJ(Rect(NINTRINSIC + nimages * 6, NINTRINSIC + nimages * 6, 1294 | maxPoints * 3, maxPoints * 3)) 1295 | += _Jo.t() * _Jo; 1296 | 1297 | JtErr.rowRange(0, NINTRINSIC) += _Ji.t() * _err; 1298 | JtErr.rowRange(NINTRINSIC + i * 6, NINTRINSIC + (i + 1) * 6) 1299 | = _Je.t() * _err; 1300 | JtErr.rowRange(NINTRINSIC + nimages * 6, nparams) 1301 | += _Jo.t() * _err; 1302 | } 1303 | 1304 | double viewErr = norm(_err, NORM_L2SQR); 1305 | 1306 | if (perViewErrors) 1307 | perViewErrors->data.db[i] = std::sqrt(viewErr / ni); 1308 | 1309 | reprojErr += viewErr; 1310 | } 1311 | if (_errNorm) 1312 | *_errNorm = reprojErr; 1313 | 1314 | if (!proceed) { 1315 | if (stdDevs) { 1316 | Mat mask = cvarrToMat(solver.mask); 1317 | int nparams_nz = countNonZero(mask); 1318 | Mat JtJinv, JtJN; 1319 | JtJN.create(nparams_nz, nparams_nz, CV_64F); 1320 | subMatrix(cvarrToMat(_JtJ), JtJN, mask, mask); 1321 | completeSymm(JtJN, false); 1322 | cv::invert(JtJN, JtJinv, DECOMP_SVD); 1323 | // sigma2 is deviation of the noise 1324 | // see any papers about variance of the least squares 1325 | // estimator for 1326 | // detailed description of the variance estimation methods 1327 | double sigma2 1328 | = norm(allErrors, NORM_L2SQR) / (total - nparams_nz); 1329 | Mat stdDevsM = cvarrToMat(stdDevs); 1330 | int j = 0; 1331 | for (int s = 0; s < nparams; s++) 1332 | if (mask.data[s]) { 1333 | stdDevsM.at(s) 1334 | = std::sqrt(JtJinv.at(j, j) * sigma2); 1335 | j++; 1336 | } else 1337 | stdDevsM.at(s) = 0.; 1338 | } 1339 | break; 1340 | } 1341 | } 1342 | 1343 | // 4. store the results 1344 | cvConvert(&matA, cameraMatrix); 1345 | cvConvert(&_k, distCoeffs); 1346 | cvConvert(&_Mi, newObjPoints); 1347 | 1348 | for (i = 0, pos = 0; i < nimages; i++) { 1349 | CvMat src, dst; 1350 | 1351 | if (rvecs) { 1352 | src = cvMat( 1353 | 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i * 6); 1354 | if (rvecs->rows == nimages 1355 | && rvecs->cols * CV_MAT_CN(rvecs->type) == 9) { 1356 | dst = cvMat(3, 3, CV_MAT_DEPTH(rvecs->type), 1357 | rvecs->data.ptr + rvecs->step * i); 1358 | cvRodrigues2(&src, &matA); 1359 | cvConvert(&matA, &dst); 1360 | } else { 1361 | dst = cvMat(3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 1362 | ? rvecs->data.ptr + i * CV_ELEM_SIZE(rvecs->type) 1363 | : rvecs->data.ptr + rvecs->step * i); 1364 | cvConvert(&src, &dst); 1365 | } 1366 | } 1367 | if (tvecs) { 1368 | src = cvMat( 1369 | 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i * 6 + 3); 1370 | dst = cvMat(3, 1, CV_MAT_DEPTH(tvecs->type), tvecs->rows == 1 1371 | ? tvecs->data.ptr + i * CV_ELEM_SIZE(tvecs->type) 1372 | : tvecs->data.ptr + tvecs->step * i); 1373 | cvConvert(&src, &dst); 1374 | } 1375 | } 1376 | 1377 | return std::sqrt(reprojErr / total); 1378 | } 1379 | 1380 | static void collectCalibrationData(InputArray objectPoints, 1381 | InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, 1382 | Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2, Mat& npoints) 1383 | { 1384 | int nimages = (int)imagePoints1.total(); 1385 | int i, j = 0, ni = 0, total = 0; 1386 | CV_Assert( 1387 | nimages > 0 && (!imgPtMat2 || nimages == (int)imagePoints2.total())); 1388 | 1389 | ni = objectPoints.getMat().checkVector(3, CV_32F); 1390 | if (ni <= 0) 1391 | CV_Error(CV_StsUnsupportedFormat, 1392 | "objectPoints should contain vector of points of type Point3f"); 1393 | for (i = 0; i < nimages; i++) { 1394 | int ni1 = imagePoints1.getMat(i).checkVector(2, CV_32F); 1395 | if (ni1 <= 0) 1396 | CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain " 1397 | "vector of vectors of points " 1398 | "of type Point2f"); 1399 | CV_Assert(ni == ni1); 1400 | 1401 | total += ni1; 1402 | } 1403 | 1404 | npoints.create(1, (int)nimages, CV_32S); 1405 | objPtMat.create(1, (int)ni, CV_32FC3); 1406 | imgPtMat1.create(1, (int)total, CV_32FC2); 1407 | Point2f* imgPtData2 = 0; 1408 | 1409 | if (imgPtMat2) { 1410 | imgPtMat2->create(1, (int)total, CV_32FC2); 1411 | imgPtData2 = imgPtMat2->ptr(); 1412 | } 1413 | 1414 | Point3f* objPtData = objPtMat.ptr(); 1415 | Point2f* imgPtData1 = imgPtMat1.ptr(); 1416 | 1417 | Mat objpt = objectPoints.getMat(); 1418 | for (int n = 0; n < ni; ++n) { 1419 | objPtData[n] = objpt.ptr()[n]; 1420 | } 1421 | for (i = 0; i < nimages; i++, j += ni) { 1422 | Mat imgpt1 = imagePoints1.getMat(i); 1423 | ni = imgpt1.checkVector(2, CV_32F); 1424 | npoints.at(i) = ni; 1425 | for (int n = 0; n < ni; ++n) { 1426 | imgPtData1[j + n] = imgpt1.ptr()[n]; 1427 | } 1428 | 1429 | if (imgPtData2) { 1430 | Mat imgpt2 = imagePoints2.getMat(i); 1431 | int ni2 = imgpt2.checkVector(2, CV_32F); 1432 | CV_Assert(ni == ni2); 1433 | for (int n = 0; n < ni2; ++n) { 1434 | imgPtData2[j + n] = imgpt2.ptr()[n]; 1435 | } 1436 | } 1437 | } 1438 | } 1439 | 1440 | static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype) 1441 | { 1442 | Mat cameraMatrix = Mat::eye(3, 3, rtype); 1443 | if (cameraMatrix0.size() == cameraMatrix.size()) 1444 | cameraMatrix0.convertTo(cameraMatrix, rtype); 1445 | return cameraMatrix; 1446 | } 1447 | 1448 | static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype, int outputSize = 14) 1449 | { 1450 | CV_Assert((int)distCoeffs0.total() <= outputSize); 1451 | Mat distCoeffs = Mat::zeros( 1452 | distCoeffs0.cols == 1 ? Size(1, outputSize) : Size(outputSize, 1), 1453 | rtype); 1454 | if (distCoeffs0.size() == Size(1, 4) || distCoeffs0.size() == Size(1, 5) 1455 | || distCoeffs0.size() == Size(1, 8) 1456 | || distCoeffs0.size() == Size(1, 12) 1457 | || distCoeffs0.size() == Size(1, 14) 1458 | || distCoeffs0.size() == Size(4, 1) 1459 | || distCoeffs0.size() == Size(5, 1) 1460 | || distCoeffs0.size() == Size(8, 1) 1461 | || distCoeffs0.size() == Size(12, 1) 1462 | || distCoeffs0.size() == Size(14, 1)) { 1463 | Mat dstCoeffs( 1464 | distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows)); 1465 | distCoeffs0.convertTo(dstCoeffs, rtype); 1466 | } 1467 | return distCoeffs; 1468 | } 1469 | 1470 | double calibrateCamera(InputArrayOfArrays _imagePoints, Size imageSize, 1471 | InputArray _objectPoints, int _fixedObjPt, InputOutputArray _cameraMatrix, 1472 | InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, 1473 | OutputArrayOfArrays _tvecs, OutputArray _newObjPoints, 1474 | OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, 1475 | OutputArray stdDeviationsObjectPoints, OutputArray _perViewErrors, 1476 | int flags, TermCriteria criteria) 1477 | { 1478 | int rtype = CV_64F; 1479 | Mat cameraMatrix = _cameraMatrix.getMat(); 1480 | cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype); 1481 | Mat distCoeffs = _distCoeffs.getMat(); 1482 | distCoeffs 1483 | = (flags & CALIB_THIN_PRISM_MODEL) && !(flags & CALIB_TILTED_MODEL) 1484 | ? prepareDistCoeffs(distCoeffs, rtype, 12) 1485 | : prepareDistCoeffs(distCoeffs, rtype); 1486 | if (!(flags & CALIB_RATIONAL_MODEL) && (!(flags & CALIB_THIN_PRISM_MODEL)) 1487 | && (!(flags & CALIB_TILTED_MODEL))) 1488 | distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) 1489 | : distCoeffs.rowRange(0, 5); 1490 | 1491 | int nimages = int(_imagePoints.total()); 1492 | CV_Assert(nimages > 0); 1493 | Mat objPt, imgPt, npoints, rvecM, tvecM, stdDeviationsM, errorsM; 1494 | 1495 | bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed(), 1496 | stddev_needed = stdDeviationsIntrinsics.needed(), 1497 | errors_needed = _perViewErrors.needed(), 1498 | stddev_ext_needed = stdDeviationsExtrinsics.needed(); 1499 | bool stddev_obj_needed = stdDeviationsObjectPoints.needed(); 1500 | 1501 | bool rvecs_mat_vec = _rvecs.isMatVector(); 1502 | bool tvecs_mat_vec = _tvecs.isMatVector(); 1503 | 1504 | if (rvecs_needed) { 1505 | _rvecs.create(nimages, 1, CV_64FC3); 1506 | 1507 | if (rvecs_mat_vec) 1508 | rvecM.create(nimages, 3, CV_64F); 1509 | else 1510 | rvecM = _rvecs.getMat(); 1511 | } 1512 | 1513 | if (tvecs_needed) { 1514 | _tvecs.create(nimages, 1, CV_64FC3); 1515 | 1516 | if (tvecs_mat_vec) 1517 | tvecM.create(nimages, 3, CV_64F); 1518 | else 1519 | tvecM = _tvecs.getMat(); 1520 | } 1521 | 1522 | if (errors_needed) { 1523 | _perViewErrors.create(nimages, 1, CV_64F); 1524 | errorsM = _perViewErrors.getMat(); 1525 | } 1526 | 1527 | collectCalibrationData( 1528 | _objectPoints, _imagePoints, noArray(), objPt, imgPt, 0, npoints); 1529 | Mat newObjPt = _newObjPoints.getMat(); 1530 | newObjPt.create(1, objPt.checkVector(3, CV_32F), CV_32FC3); 1531 | 1532 | bool stddev_any_needed 1533 | = stddev_needed || stddev_ext_needed || stddev_obj_needed; 1534 | int np = npoints.at(0); 1535 | if (stddev_any_needed) { 1536 | stdDeviationsM.create( 1537 | nimages * 6 + CV_CALIB_NINTRINSIC + np * 3, 1, CV_64F); 1538 | } 1539 | 1540 | CvMat c_objPt = CvMat(objPt), c_imgPt = CvMat(imgPt), 1541 | c_npoints = CvMat(npoints); 1542 | CvMat c_cameraMatrix = CvMat(cameraMatrix), 1543 | c_distCoeffs = CvMat(distCoeffs); 1544 | CvMat c_rvecM = CvMat(rvecM), c_tvecM = CvMat(tvecM), 1545 | c_stdDev = CvMat(stdDeviationsM), c_errors = CvMat(errorsM); 1546 | CvMat c_newObjPt = CvMat(newObjPt); 1547 | 1548 | double reprojErr = calibrateCamera2Internal(&c_objPt, &c_imgPt, 1549 | &c_npoints, CvSize(imageSize), _fixedObjPt, &c_cameraMatrix, 1550 | &c_distCoeffs, &c_newObjPt, rvecs_needed ? &c_rvecM : NULL, 1551 | tvecs_needed ? &c_tvecM : NULL, stddev_any_needed ? &c_stdDev : NULL, 1552 | errors_needed ? &c_errors : NULL, flags, CvTermCriteria(criteria)); 1553 | 1554 | if (stddev_needed) { 1555 | stdDeviationsIntrinsics.create(CV_CALIB_NINTRINSIC, 1, CV_64F); 1556 | Mat stdDeviationsIntrinsicsMat = stdDeviationsIntrinsics.getMat(); 1557 | std::memcpy(stdDeviationsIntrinsicsMat.ptr(), stdDeviationsM.ptr(), 1558 | CV_CALIB_NINTRINSIC * sizeof(double)); 1559 | } 1560 | 1561 | if (stddev_ext_needed) { 1562 | stdDeviationsExtrinsics.create(nimages * 6, 1, CV_64F); 1563 | Mat stdDeviationsExtrinsicsMat = stdDeviationsExtrinsics.getMat(); 1564 | std::memcpy(stdDeviationsExtrinsicsMat.ptr(), 1565 | stdDeviationsM.ptr() + CV_CALIB_NINTRINSIC * sizeof(double), 1566 | nimages * 6 * sizeof(double)); 1567 | } 1568 | 1569 | if (stddev_obj_needed) { 1570 | stdDeviationsObjectPoints.create(np * 3, 1, CV_64F); 1571 | Mat stdDeviationsObjectPointsMat = stdDeviationsObjectPoints.getMat(); 1572 | std::memcpy(stdDeviationsObjectPointsMat.ptr(), stdDeviationsM.ptr() 1573 | + (CV_CALIB_NINTRINSIC + nimages * 6) * sizeof(double), 1574 | np * 3 * sizeof(double)); 1575 | } 1576 | 1577 | // overly complicated and inefficient rvec/ tvec handling to support 1578 | // vector 1579 | for (int i = 0; i < nimages; i++) { 1580 | if (rvecs_needed && rvecs_mat_vec) { 1581 | _rvecs.create(3, 1, CV_64F, i, true); 1582 | Mat rv = _rvecs.getMat(i); 1583 | memcpy(rv.ptr(), rvecM.ptr(i), 3 * sizeof(double)); 1584 | } 1585 | if (tvecs_needed && tvecs_mat_vec) { 1586 | _tvecs.create(3, 1, CV_64F, i, true); 1587 | Mat tv = _tvecs.getMat(i); 1588 | memcpy(tv.ptr(), tvecM.ptr(i), 3 * sizeof(double)); 1589 | } 1590 | } 1591 | 1592 | cameraMatrix.copyTo(_cameraMatrix); 1593 | distCoeffs.copyTo(_distCoeffs); 1594 | newObjPt.copyTo(_newObjPoints); 1595 | 1596 | return reprojErr; 1597 | } 1598 | 1599 | double calibrateCamera(InputArrayOfArrays _imagePoints, Size imageSize, 1600 | InputArray _objectPoints, int _fixedObjPt, InputOutputArray _cameraMatrix, 1601 | InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, 1602 | OutputArrayOfArrays _tvecs, OutputArray _newObjPoints, int flags, 1603 | TermCriteria criteria) 1604 | { 1605 | return calibrateCamera(_imagePoints, imageSize, _objectPoints, 1606 | _fixedObjPt, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, 1607 | _newObjPoints, noArray(), noArray(), noArray(), noArray(), flags, 1608 | criteria); 1609 | } 1610 | 1611 | } /* end of namespace calrel */ 1612 | --------------------------------------------------------------------------------