├── CMakeLists.txt ├── include └── camodocal │ ├── calib │ └── CameraCalibration.h │ ├── camera_models │ ├── Camera.h │ ├── CameraFactory.h │ ├── CataCamera.h │ ├── CostFunctionFactory.h │ ├── EquidistantCamera.h │ ├── PinholeCamera.h │ └── ScaramuzzaCamera.h │ ├── chessboard │ ├── Chessboard.h │ ├── ChessboardCorner.h │ ├── ChessboardQuad.h │ └── Spline.h │ ├── gpl │ ├── EigenQuaternionParameterization.h │ ├── EigenUtils.h │ └── gpl.h │ └── sparse_graph │ └── Transform.h ├── package.xml ├── readme.md └── src ├── calib └── CameraCalibration.cc ├── camera_models ├── Camera.cc ├── CameraFactory.cc ├── CataCamera.cc ├── CostFunctionFactory.cc ├── EquidistantCamera.cc ├── PinholeCamera.cc └── ScaramuzzaCamera.cc ├── chessboard └── Chessboard.cc ├── gpl ├── EigenQuaternionParameterization.cc └── gpl.cc ├── intrinsic_calib.cc └── sparse_graph └── Transform.cc /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(camera_model) 3 | 4 | set(CMAKE_BUILD_TYPE "Release") 5 | set(CMAKE_CXX_FLAGS "-std=c++11 -march=native") 6 | set(CMAKE_CXX_FLAGS_RELEASE "-O3 -Wall -fPIC") 7 | 8 | find_package(catkin REQUIRED COMPONENTS 9 | roscpp 10 | std_msgs 11 | ) 12 | 13 | find_package(Boost REQUIRED COMPONENTS filesystem program_options system) 14 | include_directories(${Boost_INCLUDE_DIRS}) 15 | 16 | find_package(OpenCV REQUIRED) 17 | 18 | set(EIGEN_INCLUDE_DIR "/usr/local/include/eigen3") 19 | find_package(Ceres REQUIRED) 20 | include_directories(${CERES_INCLUDE_DIRS}) 21 | 22 | catkin_package( 23 | INCLUDE_DIRS include 24 | LIBRARIES camera_model 25 | CATKIN_DEPENDS roscpp std_msgs 26 | DEPENDS system_lib 27 | ) 28 | 29 | include_directories( 30 | ${catkin_INCLUDE_DIRS} 31 | ) 32 | 33 | include_directories("include") 34 | 35 | 36 | add_executable(Calibration 37 | src/intrinsic_calib.cc 38 | src/chessboard/Chessboard.cc 39 | src/calib/CameraCalibration.cc 40 | src/camera_models/Camera.cc 41 | src/camera_models/CameraFactory.cc 42 | src/camera_models/CostFunctionFactory.cc 43 | src/camera_models/PinholeCamera.cc 44 | src/camera_models/CataCamera.cc 45 | src/camera_models/EquidistantCamera.cc 46 | src/camera_models/ScaramuzzaCamera.cc 47 | src/sparse_graph/Transform.cc 48 | src/gpl/gpl.cc 49 | src/gpl/EigenQuaternionParameterization.cc) 50 | 51 | add_library(camera_model STATIC 52 | src/chessboard/Chessboard.cc 53 | src/calib/CameraCalibration.cc 54 | src/camera_models/Camera.cc 55 | src/camera_models/CameraFactory.cc 56 | src/camera_models/CostFunctionFactory.cc 57 | src/camera_models/PinholeCamera.cc 58 | src/camera_models/CataCamera.cc 59 | src/camera_models/EquidistantCamera.cc 60 | src/camera_models/ScaramuzzaCamera.cc 61 | src/sparse_graph/Transform.cc 62 | src/gpl/gpl.cc 63 | src/gpl/EigenQuaternionParameterization.cc) 64 | 65 | target_link_libraries(Calibration ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES}) 66 | target_link_libraries(camera_model ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES}) 67 | -------------------------------------------------------------------------------- /include/camodocal/calib/CameraCalibration.h: -------------------------------------------------------------------------------- 1 | #ifndef CAMERACALIBRATION_H 2 | #define CAMERACALIBRATION_H 3 | 4 | #include 5 | 6 | #include "camodocal/camera_models/Camera.h" 7 | 8 | namespace camodocal 9 | { 10 | 11 | class CameraCalibration 12 | { 13 | public: 14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 15 | CameraCalibration(); 16 | 17 | CameraCalibration(Camera::ModelType modelType, 18 | const std::string& cameraName, 19 | const cv::Size& imageSize, 20 | const cv::Size& boardSize, 21 | float squareSize); 22 | 23 | void clear(void); 24 | 25 | void addChessboardData(const std::vector& corners); 26 | 27 | bool calibrate(void); 28 | 29 | int sampleCount(void) const; 30 | std::vector >& imagePoints(void); 31 | const std::vector >& imagePoints(void) const; 32 | std::vector >& scenePoints(void); 33 | const std::vector >& scenePoints(void) const; 34 | CameraPtr& camera(void); 35 | const CameraConstPtr camera(void) const; 36 | 37 | Eigen::Matrix2d& measurementCovariance(void); 38 | const Eigen::Matrix2d& measurementCovariance(void) const; 39 | 40 | cv::Mat& cameraPoses(void); 41 | const cv::Mat& cameraPoses(void) const; 42 | 43 | void drawResults(std::vector& images) const; 44 | 45 | void writeParams(const std::string& filename) const; 46 | 47 | bool writeChessboardData(const std::string& filename) const; 48 | bool readChessboardData(const std::string& filename); 49 | 50 | void setVerbose(bool verbose); 51 | 52 | private: 53 | bool calibrateHelper(CameraPtr& camera, 54 | std::vector& rvecs, std::vector& tvecs) const; 55 | 56 | void optimize(CameraPtr& camera, 57 | std::vector& rvecs, std::vector& tvecs) const; 58 | 59 | template 60 | void readData(std::ifstream& ifs, T& data) const; 61 | 62 | template 63 | void writeData(std::ofstream& ofs, T data) const; 64 | 65 | cv::Size m_boardSize; 66 | float m_squareSize; 67 | 68 | CameraPtr m_camera; 69 | cv::Mat m_cameraPoses; 70 | 71 | std::vector > m_imagePoints; 72 | std::vector > m_scenePoints; 73 | 74 | Eigen::Matrix2d m_measurementCovariance; 75 | 76 | bool m_verbose; 77 | }; 78 | 79 | } 80 | 81 | #endif 82 | -------------------------------------------------------------------------------- /include/camodocal/camera_models/Camera.h: -------------------------------------------------------------------------------- 1 | #ifndef CAMERA_H 2 | #define CAMERA_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace camodocal 10 | { 11 | 12 | class Camera 13 | { 14 | public: 15 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 16 | enum ModelType 17 | { 18 | KANNALA_BRANDT, 19 | MEI, 20 | PINHOLE, 21 | SCARAMUZZA 22 | }; 23 | 24 | class Parameters 25 | { 26 | public: 27 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 28 | Parameters(ModelType modelType); 29 | 30 | Parameters(ModelType modelType, const std::string& cameraName, 31 | int w, int h); 32 | 33 | ModelType& modelType(void); 34 | std::string& cameraName(void); 35 | int& imageWidth(void); 36 | int& imageHeight(void); 37 | 38 | ModelType modelType(void) const; 39 | const std::string& cameraName(void) const; 40 | int imageWidth(void) const; 41 | int imageHeight(void) const; 42 | 43 | int nIntrinsics(void) const; 44 | 45 | virtual bool readFromYamlFile(const std::string& filename) = 0; 46 | virtual void writeToYamlFile(const std::string& filename) const = 0; 47 | 48 | protected: 49 | ModelType m_modelType; 50 | int m_nIntrinsics; 51 | std::string m_cameraName; 52 | int m_imageWidth; 53 | int m_imageHeight; 54 | }; 55 | 56 | virtual ModelType modelType(void) const = 0; 57 | virtual const std::string& cameraName(void) const = 0; 58 | virtual int imageWidth(void) const = 0; 59 | virtual int imageHeight(void) const = 0; 60 | 61 | virtual cv::Mat& mask(void); 62 | virtual const cv::Mat& mask(void) const; 63 | 64 | virtual void estimateIntrinsics(const cv::Size& boardSize, 65 | const std::vector< std::vector >& objectPoints, 66 | const std::vector< std::vector >& imagePoints) = 0; 67 | virtual void estimateExtrinsics(const std::vector& objectPoints, 68 | const std::vector& imagePoints, 69 | cv::Mat& rvec, cv::Mat& tvec) const; 70 | 71 | // Lift points from the image plane to the sphere 72 | virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const = 0; 73 | //%output P 74 | 75 | // Lift points from the image plane to the projective space 76 | virtual void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const = 0; 77 | //%output P 78 | 79 | // Projects 3D points to the image plane (Pi function) 80 | virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const = 0; 81 | //%output p 82 | 83 | // Projects 3D points to the image plane (Pi function) 84 | // and calculates jacobian 85 | //virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 86 | // Eigen::Matrix& J) const = 0; 87 | //%output p 88 | //%output J 89 | 90 | virtual void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const = 0; 91 | //%output p 92 | 93 | //virtual void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const = 0; 94 | virtual cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 95 | float fx = -1.0f, float fy = -1.0f, 96 | cv::Size imageSize = cv::Size(0, 0), 97 | float cx = -1.0f, float cy = -1.0f, 98 | cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const = 0; 99 | 100 | virtual int parameterCount(void) const = 0; 101 | 102 | virtual void readParameters(const std::vector& parameters) = 0; 103 | virtual void writeParameters(std::vector& parameters) const = 0; 104 | 105 | virtual void writeParametersToYamlFile(const std::string& filename) const = 0; 106 | 107 | virtual std::string parametersToString(void) const = 0; 108 | 109 | /** 110 | * \brief Calculates the reprojection distance between points 111 | * 112 | * \param P1 first 3D point coordinates 113 | * \param P2 second 3D point coordinates 114 | * \return euclidean distance in the plane 115 | */ 116 | double reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const; 117 | 118 | double reprojectionError(const std::vector< std::vector >& objectPoints, 119 | const std::vector< std::vector >& imagePoints, 120 | const std::vector& rvecs, 121 | const std::vector& tvecs, 122 | cv::OutputArray perViewErrors = cv::noArray()) const; 123 | 124 | double reprojectionError(const Eigen::Vector3d& P, 125 | const Eigen::Quaterniond& camera_q, 126 | const Eigen::Vector3d& camera_t, 127 | const Eigen::Vector2d& observed_p) const; 128 | 129 | void projectPoints(const std::vector& objectPoints, 130 | const cv::Mat& rvec, 131 | const cv::Mat& tvec, 132 | std::vector& imagePoints) const; 133 | protected: 134 | cv::Mat m_mask; 135 | }; 136 | 137 | typedef boost::shared_ptr CameraPtr; 138 | typedef boost::shared_ptr CameraConstPtr; 139 | 140 | } 141 | 142 | #endif 143 | -------------------------------------------------------------------------------- /include/camodocal/camera_models/CameraFactory.h: -------------------------------------------------------------------------------- 1 | #ifndef CAMERAFACTORY_H 2 | #define CAMERAFACTORY_H 3 | 4 | #include 5 | #include 6 | 7 | #include "camodocal/camera_models/Camera.h" 8 | 9 | namespace camodocal 10 | { 11 | 12 | class CameraFactory 13 | { 14 | public: 15 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 16 | CameraFactory(); 17 | 18 | static boost::shared_ptr instance(void); 19 | 20 | CameraPtr generateCamera(Camera::ModelType modelType, 21 | const std::string& cameraName, 22 | cv::Size imageSize) const; 23 | 24 | CameraPtr generateCameraFromYamlFile(const std::string& filename); 25 | 26 | private: 27 | static boost::shared_ptr m_instance; 28 | }; 29 | 30 | } 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /include/camodocal/camera_models/CataCamera.h: -------------------------------------------------------------------------------- 1 | #ifndef CATACAMERA_H 2 | #define CATACAMERA_H 3 | 4 | #include 5 | #include 6 | 7 | #include "ceres/rotation.h" 8 | #include "Camera.h" 9 | 10 | namespace camodocal 11 | { 12 | 13 | /** 14 | * C. Mei, and P. Rives, Single View Point Omnidirectional Camera Calibration 15 | * from Planar Grids, ICRA 2007 16 | */ 17 | 18 | class CataCamera: public Camera 19 | { 20 | public: 21 | class Parameters: public Camera::Parameters 22 | { 23 | public: 24 | Parameters(); 25 | Parameters(const std::string& cameraName, 26 | int w, int h, 27 | double xi, 28 | double k1, double k2, double p1, double p2, 29 | double gamma1, double gamma2, double u0, double v0); 30 | 31 | double& xi(void); 32 | double& k1(void); 33 | double& k2(void); 34 | double& p1(void); 35 | double& p2(void); 36 | double& gamma1(void); 37 | double& gamma2(void); 38 | double& u0(void); 39 | double& v0(void); 40 | 41 | double xi(void) const; 42 | double k1(void) const; 43 | double k2(void) const; 44 | double p1(void) const; 45 | double p2(void) const; 46 | double gamma1(void) const; 47 | double gamma2(void) const; 48 | double u0(void) const; 49 | double v0(void) const; 50 | 51 | bool readFromYamlFile(const std::string& filename); 52 | void writeToYamlFile(const std::string& filename) const; 53 | 54 | Parameters& operator=(const Parameters& other); 55 | friend std::ostream& operator<< (std::ostream& out, const Parameters& params); 56 | 57 | private: 58 | double m_xi; 59 | double m_k1; 60 | double m_k2; 61 | double m_p1; 62 | double m_p2; 63 | double m_gamma1; 64 | double m_gamma2; 65 | double m_u0; 66 | double m_v0; 67 | }; 68 | 69 | CataCamera(); 70 | 71 | /** 72 | * \brief Constructor from the projection model parameters 73 | */ 74 | CataCamera(const std::string& cameraName, 75 | int imageWidth, int imageHeight, 76 | double xi, double k1, double k2, double p1, double p2, 77 | double gamma1, double gamma2, double u0, double v0); 78 | /** 79 | * \brief Constructor from the projection model parameters 80 | */ 81 | CataCamera(const Parameters& params); 82 | 83 | Camera::ModelType modelType(void) const; 84 | const std::string& cameraName(void) const; 85 | int imageWidth(void) const; 86 | int imageHeight(void) const; 87 | 88 | void estimateIntrinsics(const cv::Size& boardSize, 89 | const std::vector< std::vector >& objectPoints, 90 | const std::vector< std::vector >& imagePoints); 91 | 92 | // Lift points from the image plane to the sphere 93 | void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 94 | //%output P 95 | 96 | // Lift points from the image plane to the projective space 97 | void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 98 | //%output P 99 | 100 | // Projects 3D points to the image plane (Pi function) 101 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; 102 | //%output p 103 | 104 | // Projects 3D points to the image plane (Pi function) 105 | // and calculates jacobian 106 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 107 | Eigen::Matrix& J) const; 108 | //%output p 109 | //%output J 110 | 111 | void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; 112 | //%output p 113 | 114 | template 115 | static void spaceToPlane(const T* const params, 116 | const T* const q, const T* const t, 117 | const Eigen::Matrix& P, 118 | Eigen::Matrix& p); 119 | 120 | void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const; 121 | void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, 122 | Eigen::Matrix2d& J) const; 123 | 124 | void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; 125 | cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 126 | float fx = -1.0f, float fy = -1.0f, 127 | cv::Size imageSize = cv::Size(0, 0), 128 | float cx = -1.0f, float cy = -1.0f, 129 | cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; 130 | 131 | int parameterCount(void) const; 132 | 133 | const Parameters& getParameters(void) const; 134 | void setParameters(const Parameters& parameters); 135 | 136 | void readParameters(const std::vector& parameterVec); 137 | void writeParameters(std::vector& parameterVec) const; 138 | 139 | void writeParametersToYamlFile(const std::string& filename) const; 140 | 141 | std::string parametersToString(void) const; 142 | 143 | private: 144 | Parameters mParameters; 145 | 146 | double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; 147 | bool m_noDistortion; 148 | }; 149 | 150 | typedef boost::shared_ptr CataCameraPtr; 151 | typedef boost::shared_ptr CataCameraConstPtr; 152 | 153 | template 154 | void 155 | CataCamera::spaceToPlane(const T* const params, 156 | const T* const q, const T* const t, 157 | const Eigen::Matrix& P, 158 | Eigen::Matrix& p) 159 | { 160 | T P_w[3]; 161 | P_w[0] = T(P(0)); 162 | P_w[1] = T(P(1)); 163 | P_w[2] = T(P(2)); 164 | 165 | // Convert quaternion from Eigen convention (x, y, z, w) 166 | // to Ceres convention (w, x, y, z) 167 | T q_ceres[4] = {q[3], q[0], q[1], q[2]}; 168 | 169 | T P_c[3]; 170 | ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); 171 | 172 | P_c[0] += t[0]; 173 | P_c[1] += t[1]; 174 | P_c[2] += t[2]; 175 | 176 | // project 3D object point to the image plane 177 | T xi = params[0]; 178 | T k1 = params[1]; 179 | T k2 = params[2]; 180 | T p1 = params[3]; 181 | T p2 = params[4]; 182 | T gamma1 = params[5]; 183 | T gamma2 = params[6]; 184 | T alpha = T(0); //cameraParams.alpha(); 185 | T u0 = params[7]; 186 | T v0 = params[8]; 187 | 188 | // Transform to model plane 189 | T len = sqrt(P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]); 190 | P_c[0] /= len; 191 | P_c[1] /= len; 192 | P_c[2] /= len; 193 | 194 | T u = P_c[0] / (P_c[2] + xi); 195 | T v = P_c[1] / (P_c[2] + xi); 196 | 197 | T rho_sqr = u * u + v * v; 198 | T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr; 199 | T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u); 200 | T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v; 201 | 202 | u = L * u + du; 203 | v = L * v + dv; 204 | p(0) = gamma1 * (u + alpha * v) + u0; 205 | p(1) = gamma2 * v + v0; 206 | } 207 | 208 | } 209 | 210 | #endif 211 | -------------------------------------------------------------------------------- /include/camodocal/camera_models/CostFunctionFactory.h: -------------------------------------------------------------------------------- 1 | #ifndef COSTFUNCTIONFACTORY_H 2 | #define COSTFUNCTIONFACTORY_H 3 | 4 | #include 5 | #include 6 | 7 | #include "camodocal/camera_models/Camera.h" 8 | 9 | namespace ceres 10 | { 11 | class CostFunction; 12 | } 13 | 14 | namespace camodocal 15 | { 16 | 17 | enum 18 | { 19 | CAMERA_INTRINSICS = 1 << 0, 20 | CAMERA_POSE = 1 << 1, 21 | POINT_3D = 1 << 2, 22 | ODOMETRY_INTRINSICS = 1 << 3, 23 | ODOMETRY_3D_POSE = 1 << 4, 24 | ODOMETRY_6D_POSE = 1 << 5, 25 | CAMERA_ODOMETRY_TRANSFORM = 1 << 6 26 | }; 27 | 28 | class CostFunctionFactory 29 | { 30 | public: 31 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 32 | CostFunctionFactory(); 33 | 34 | static boost::shared_ptr instance(void); 35 | 36 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 37 | const Eigen::Vector3d& observed_P, 38 | const Eigen::Vector2d& observed_p, 39 | int flags) const; 40 | 41 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 42 | const Eigen::Vector3d& observed_P, 43 | const Eigen::Vector2d& observed_p, 44 | const Eigen::Matrix2d& sqrtPrecisionMat, 45 | int flags) const; 46 | 47 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 48 | const Eigen::Vector2d& observed_p, 49 | int flags, bool optimize_cam_odo_z = true) const; 50 | 51 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 52 | const Eigen::Vector2d& observed_p, 53 | const Eigen::Matrix2d& sqrtPrecisionMat, 54 | int flags, bool optimize_cam_odo_z = true) const; 55 | 56 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 57 | const Eigen::Vector3d& odo_pos, 58 | const Eigen::Vector3d& odo_att, 59 | const Eigen::Vector2d& observed_p, 60 | int flags, bool optimize_cam_odo_z = true) const; 61 | 62 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 63 | const Eigen::Quaterniond& cam_odo_q, 64 | const Eigen::Vector3d& cam_odo_t, 65 | const Eigen::Vector3d& odo_pos, 66 | const Eigen::Vector3d& odo_att, 67 | const Eigen::Vector2d& observed_p, 68 | int flags) const; 69 | 70 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& cameraLeft, 71 | const CameraConstPtr& cameraRight, 72 | const Eigen::Vector3d& observed_P, 73 | const Eigen::Vector2d& observed_p_left, 74 | const Eigen::Vector2d& observed_p_right) const; 75 | 76 | private: 77 | static boost::shared_ptr m_instance; 78 | }; 79 | 80 | } 81 | 82 | #endif 83 | -------------------------------------------------------------------------------- /include/camodocal/camera_models/EquidistantCamera.h: -------------------------------------------------------------------------------- 1 | #ifndef EQUIDISTANTCAMERA_H 2 | #define EQUIDISTANTCAMERA_H 3 | 4 | #include 5 | #include 6 | 7 | #include "ceres/rotation.h" 8 | #include "Camera.h" 9 | 10 | namespace camodocal 11 | { 12 | 13 | /** 14 | * J. Kannala, and S. Brandt, A Generic Camera Model and Calibration Method 15 | * for Conventional, Wide-Angle, and Fish-Eye Lenses, PAMI 2006 16 | */ 17 | 18 | class EquidistantCamera: public Camera 19 | { 20 | public: 21 | class Parameters: public Camera::Parameters 22 | { 23 | public: 24 | Parameters(); 25 | Parameters(const std::string& cameraName, 26 | int w, int h, 27 | double k2, double k3, double k4, double k5, 28 | double mu, double mv, 29 | double u0, double v0); 30 | 31 | double& k2(void); 32 | double& k3(void); 33 | double& k4(void); 34 | double& k5(void); 35 | double& mu(void); 36 | double& mv(void); 37 | double& u0(void); 38 | double& v0(void); 39 | 40 | double k2(void) const; 41 | double k3(void) const; 42 | double k4(void) const; 43 | double k5(void) const; 44 | double mu(void) const; 45 | double mv(void) const; 46 | double u0(void) const; 47 | double v0(void) const; 48 | 49 | bool readFromYamlFile(const std::string& filename); 50 | void writeToYamlFile(const std::string& filename) const; 51 | 52 | Parameters& operator=(const Parameters& other); 53 | friend std::ostream& operator<< (std::ostream& out, const Parameters& params); 54 | 55 | private: 56 | // projection 57 | double m_k2; 58 | double m_k3; 59 | double m_k4; 60 | double m_k5; 61 | 62 | double m_mu; 63 | double m_mv; 64 | double m_u0; 65 | double m_v0; 66 | }; 67 | 68 | EquidistantCamera(); 69 | 70 | /** 71 | * \brief Constructor from the projection model parameters 72 | */ 73 | EquidistantCamera(const std::string& cameraName, 74 | int imageWidth, int imageHeight, 75 | double k2, double k3, double k4, double k5, 76 | double mu, double mv, 77 | double u0, double v0); 78 | /** 79 | * \brief Constructor from the projection model parameters 80 | */ 81 | EquidistantCamera(const Parameters& params); 82 | 83 | Camera::ModelType modelType(void) const; 84 | const std::string& cameraName(void) const; 85 | int imageWidth(void) const; 86 | int imageHeight(void) const; 87 | 88 | void estimateIntrinsics(const cv::Size& boardSize, 89 | const std::vector< std::vector >& objectPoints, 90 | const std::vector< std::vector >& imagePoints); 91 | 92 | // Lift points from the image plane to the sphere 93 | virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 94 | //%output P 95 | 96 | // Lift points from the image plane to the projective space 97 | void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 98 | //%output P 99 | 100 | // Projects 3D points to the image plane (Pi function) 101 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; 102 | //%output p 103 | 104 | // Projects 3D points to the image plane (Pi function) 105 | // and calculates jacobian 106 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 107 | Eigen::Matrix& J) const; 108 | //%output p 109 | //%output J 110 | 111 | void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; 112 | //%output p 113 | 114 | template 115 | static void spaceToPlane(const T* const params, 116 | const T* const q, const T* const t, 117 | const Eigen::Matrix& P, 118 | Eigen::Matrix& p); 119 | 120 | void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; 121 | cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 122 | float fx = -1.0f, float fy = -1.0f, 123 | cv::Size imageSize = cv::Size(0, 0), 124 | float cx = -1.0f, float cy = -1.0f, 125 | cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; 126 | 127 | int parameterCount(void) const; 128 | 129 | const Parameters& getParameters(void) const; 130 | void setParameters(const Parameters& parameters); 131 | 132 | void readParameters(const std::vector& parameterVec); 133 | void writeParameters(std::vector& parameterVec) const; 134 | 135 | void writeParametersToYamlFile(const std::string& filename) const; 136 | 137 | std::string parametersToString(void) const; 138 | 139 | private: 140 | template 141 | static T r(T k2, T k3, T k4, T k5, T theta); 142 | 143 | 144 | void fitOddPoly(const std::vector& x, const std::vector& y, 145 | int n, std::vector& coeffs) const; 146 | 147 | void backprojectSymmetric(const Eigen::Vector2d& p_u, 148 | double& theta, double& phi) const; 149 | 150 | Parameters mParameters; 151 | 152 | double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; 153 | }; 154 | 155 | typedef boost::shared_ptr EquidistantCameraPtr; 156 | typedef boost::shared_ptr EquidistantCameraConstPtr; 157 | 158 | template 159 | T 160 | EquidistantCamera::r(T k2, T k3, T k4, T k5, T theta) 161 | { 162 | // k1 = 1 163 | return theta + 164 | k2 * theta * theta * theta + 165 | k3 * theta * theta * theta * theta * theta + 166 | k4 * theta * theta * theta * theta * theta * theta * theta + 167 | k5 * theta * theta * theta * theta * theta * theta * theta * theta * theta; 168 | } 169 | 170 | template 171 | void 172 | EquidistantCamera::spaceToPlane(const T* const params, 173 | const T* const q, const T* const t, 174 | const Eigen::Matrix& P, 175 | Eigen::Matrix& p) 176 | { 177 | T P_w[3]; 178 | P_w[0] = T(P(0)); 179 | P_w[1] = T(P(1)); 180 | P_w[2] = T(P(2)); 181 | 182 | // Convert quaternion from Eigen convention (x, y, z, w) 183 | // to Ceres convention (w, x, y, z) 184 | T q_ceres[4] = {q[3], q[0], q[1], q[2]}; 185 | 186 | T P_c[3]; 187 | ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); 188 | 189 | P_c[0] += t[0]; 190 | P_c[1] += t[1]; 191 | P_c[2] += t[2]; 192 | 193 | // project 3D object point to the image plane; 194 | T k2 = params[0]; 195 | T k3 = params[1]; 196 | T k4 = params[2]; 197 | T k5 = params[3]; 198 | T mu = params[4]; 199 | T mv = params[5]; 200 | T u0 = params[6]; 201 | T v0 = params[7]; 202 | 203 | T len = sqrt(P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]); 204 | T theta = acos(P_c[2] / len); 205 | T phi = atan2(P_c[1], P_c[0]); 206 | 207 | Eigen::Matrix p_u = r(k2, k3, k4, k5, theta) * Eigen::Matrix(cos(phi), sin(phi)); 208 | 209 | p(0) = mu * p_u(0) + u0; 210 | p(1) = mv * p_u(1) + v0; 211 | } 212 | 213 | } 214 | 215 | #endif 216 | -------------------------------------------------------------------------------- /include/camodocal/camera_models/PinholeCamera.h: -------------------------------------------------------------------------------- 1 | #ifndef PINHOLECAMERA_H 2 | #define PINHOLECAMERA_H 3 | 4 | #include 5 | #include 6 | 7 | #include "ceres/rotation.h" 8 | #include "Camera.h" 9 | 10 | namespace camodocal 11 | { 12 | 13 | class PinholeCamera: public Camera 14 | { 15 | public: 16 | class Parameters: public Camera::Parameters 17 | { 18 | public: 19 | Parameters(); 20 | Parameters(const std::string& cameraName, 21 | int w, int h, 22 | double k1, double k2, double p1, double p2, 23 | double fx, double fy, double cx, double cy); 24 | 25 | double& k1(void); 26 | double& k2(void); 27 | double& p1(void); 28 | double& p2(void); 29 | double& fx(void); 30 | double& fy(void); 31 | double& cx(void); 32 | double& cy(void); 33 | 34 | double xi(void) const; 35 | double k1(void) const; 36 | double k2(void) const; 37 | double p1(void) const; 38 | double p2(void) const; 39 | double fx(void) const; 40 | double fy(void) const; 41 | double cx(void) const; 42 | double cy(void) const; 43 | 44 | bool readFromYamlFile(const std::string& filename); 45 | void writeToYamlFile(const std::string& filename) const; 46 | 47 | Parameters& operator=(const Parameters& other); 48 | friend std::ostream& operator<< (std::ostream& out, const Parameters& params); 49 | 50 | private: 51 | double m_k1; 52 | double m_k2; 53 | double m_p1; 54 | double m_p2; 55 | double m_fx; 56 | double m_fy; 57 | double m_cx; 58 | double m_cy; 59 | }; 60 | 61 | PinholeCamera(); 62 | 63 | /** 64 | * \brief Constructor from the projection model parameters 65 | */ 66 | PinholeCamera(const std::string& cameraName, 67 | int imageWidth, int imageHeight, 68 | double k1, double k2, double p1, double p2, 69 | double fx, double fy, double cx, double cy); 70 | /** 71 | * \brief Constructor from the projection model parameters 72 | */ 73 | PinholeCamera(const Parameters& params); 74 | 75 | Camera::ModelType modelType(void) const; 76 | const std::string& cameraName(void) const; 77 | int imageWidth(void) const; 78 | int imageHeight(void) const; 79 | 80 | void estimateIntrinsics(const cv::Size& boardSize, 81 | const std::vector< std::vector >& objectPoints, 82 | const std::vector< std::vector >& imagePoints); 83 | 84 | // Lift points from the image plane to the sphere 85 | virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 86 | //%output P 87 | 88 | // Lift points from the image plane to the projective space 89 | void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 90 | //%output P 91 | 92 | // Projects 3D points to the image plane (Pi function) 93 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; 94 | //%output p 95 | 96 | // Projects 3D points to the image plane (Pi function) 97 | // and calculates jacobian 98 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 99 | Eigen::Matrix& J) const; 100 | //%output p 101 | //%output J 102 | 103 | void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; 104 | //%output p 105 | 106 | template 107 | static void spaceToPlane(const T* const params, 108 | const T* const q, const T* const t, 109 | const Eigen::Matrix& P, 110 | Eigen::Matrix& p); 111 | 112 | void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const; 113 | void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, 114 | Eigen::Matrix2d& J) const; 115 | 116 | void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; 117 | cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 118 | float fx = -1.0f, float fy = -1.0f, 119 | cv::Size imageSize = cv::Size(0, 0), 120 | float cx = -1.0f, float cy = -1.0f, 121 | cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; 122 | 123 | int parameterCount(void) const; 124 | 125 | const Parameters& getParameters(void) const; 126 | void setParameters(const Parameters& parameters); 127 | 128 | void readParameters(const std::vector& parameterVec); 129 | void writeParameters(std::vector& parameterVec) const; 130 | 131 | void writeParametersToYamlFile(const std::string& filename) const; 132 | 133 | std::string parametersToString(void) const; 134 | 135 | private: 136 | Parameters mParameters; 137 | 138 | double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; 139 | bool m_noDistortion; 140 | }; 141 | 142 | typedef boost::shared_ptr PinholeCameraPtr; 143 | typedef boost::shared_ptr PinholeCameraConstPtr; 144 | 145 | template 146 | void 147 | PinholeCamera::spaceToPlane(const T* const params, 148 | const T* const q, const T* const t, 149 | const Eigen::Matrix& P, 150 | Eigen::Matrix& p) 151 | { 152 | T P_w[3]; 153 | P_w[0] = T(P(0)); 154 | P_w[1] = T(P(1)); 155 | P_w[2] = T(P(2)); 156 | 157 | // Convert quaternion from Eigen convention (x, y, z, w) 158 | // to Ceres convention (w, x, y, z) 159 | T q_ceres[4] = {q[3], q[0], q[1], q[2]}; 160 | 161 | T P_c[3]; 162 | ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); 163 | 164 | P_c[0] += t[0]; 165 | P_c[1] += t[1]; 166 | P_c[2] += t[2]; 167 | 168 | // project 3D object point to the image plane 169 | T k1 = params[0]; 170 | T k2 = params[1]; 171 | T p1 = params[2]; 172 | T p2 = params[3]; 173 | T fx = params[4]; 174 | T fy = params[5]; 175 | T alpha = T(0); //cameraParams.alpha(); 176 | T cx = params[6]; 177 | T cy = params[7]; 178 | 179 | // Transform to model plane 180 | T u = P_c[0] / P_c[2]; 181 | T v = P_c[1] / P_c[2]; 182 | 183 | T rho_sqr = u * u + v * v; 184 | T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr; 185 | T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u); 186 | T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v; 187 | 188 | u = L * u + du; 189 | v = L * v + dv; 190 | p(0) = fx * (u + alpha * v) + cx; 191 | p(1) = fy * v + cy; 192 | } 193 | 194 | } 195 | 196 | #endif 197 | -------------------------------------------------------------------------------- /include/camodocal/camera_models/ScaramuzzaCamera.h: -------------------------------------------------------------------------------- 1 | #ifndef SCARAMUZZACAMERA_H 2 | #define SCARAMUZZACAMERA_H 3 | 4 | #include 5 | #include 6 | 7 | #include "ceres/rotation.h" 8 | #include "Camera.h" 9 | 10 | namespace camodocal 11 | { 12 | 13 | #define SCARAMUZZA_POLY_SIZE 5 14 | #define SCARAMUZZA_INV_POLY_SIZE 6 15 | 16 | #define SCARAMUZZA_CAMERA_NUM_PARAMS (SCARAMUZZA_POLY_SIZE + SCARAMUZZA_INV_POLY_SIZE + 2 /*center*/ + 3 /*affine*/) 17 | 18 | /** 19 | * Scaramuzza Camera (Omnidirectional) 20 | * https://sites.google.com/site/scarabotix/ocamcalib-toolbox 21 | */ 22 | 23 | class OCAMCamera: public Camera 24 | { 25 | public: 26 | class Parameters: public Camera::Parameters 27 | { 28 | public: 29 | Parameters(); 30 | 31 | double& C(void) { return m_C; } 32 | double& D(void) { return m_D; } 33 | double& E(void) { return m_E; } 34 | 35 | double& center_x(void) { return m_center_x; } 36 | double& center_y(void) { return m_center_y; } 37 | 38 | double& poly(int idx) { return m_poly[idx]; } 39 | double& inv_poly(int idx) { return m_inv_poly[idx]; } 40 | 41 | double C(void) const { return m_C; } 42 | double D(void) const { return m_D; } 43 | double E(void) const { return m_E; } 44 | 45 | double center_x(void) const { return m_center_x; } 46 | double center_y(void) const { return m_center_y; } 47 | 48 | double poly(int idx) const { return m_poly[idx]; } 49 | double inv_poly(int idx) const { return m_inv_poly[idx]; } 50 | 51 | bool readFromYamlFile(const std::string& filename); 52 | void writeToYamlFile(const std::string& filename) const; 53 | 54 | Parameters& operator=(const Parameters& other); 55 | friend std::ostream& operator<< (std::ostream& out, const Parameters& params); 56 | 57 | private: 58 | double m_poly[SCARAMUZZA_POLY_SIZE]; 59 | double m_inv_poly[SCARAMUZZA_INV_POLY_SIZE]; 60 | double m_C; 61 | double m_D; 62 | double m_E; 63 | double m_center_x; 64 | double m_center_y; 65 | }; 66 | 67 | OCAMCamera(); 68 | 69 | /** 70 | * \brief Constructor from the projection model parameters 71 | */ 72 | OCAMCamera(const Parameters& params); 73 | 74 | Camera::ModelType modelType(void) const; 75 | const std::string& cameraName(void) const; 76 | int imageWidth(void) const; 77 | int imageHeight(void) const; 78 | 79 | void estimateIntrinsics(const cv::Size& boardSize, 80 | const std::vector< std::vector >& objectPoints, 81 | const std::vector< std::vector >& imagePoints); 82 | 83 | // Lift points from the image plane to the sphere 84 | void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 85 | //%output P 86 | 87 | // Lift points from the image plane to the projective space 88 | void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 89 | //%output P 90 | 91 | // Projects 3D points to the image plane (Pi function) 92 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; 93 | //%output p 94 | 95 | // Projects 3D points to the image plane (Pi function) 96 | // and calculates jacobian 97 | //void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 98 | // Eigen::Matrix& J) const; 99 | //%output p 100 | //%output J 101 | 102 | void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; 103 | //%output p 104 | 105 | template 106 | static void spaceToPlane(const T* const params, 107 | const T* const q, const T* const t, 108 | const Eigen::Matrix& P, 109 | Eigen::Matrix& p); 110 | 111 | 112 | void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; 113 | cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 114 | float fx = -1.0f, float fy = -1.0f, 115 | cv::Size imageSize = cv::Size(0, 0), 116 | float cx = -1.0f, float cy = -1.0f, 117 | cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; 118 | 119 | int parameterCount(void) const; 120 | 121 | const Parameters& getParameters(void) const; 122 | void setParameters(const Parameters& parameters); 123 | 124 | void readParameters(const std::vector& parameterVec); 125 | void writeParameters(std::vector& parameterVec) const; 126 | 127 | void writeParametersToYamlFile(const std::string& filename) const; 128 | 129 | std::string parametersToString(void) const; 130 | 131 | private: 132 | Parameters mParameters; 133 | 134 | double m_inv_scale; 135 | }; 136 | 137 | typedef boost::shared_ptr OCAMCameraPtr; 138 | typedef boost::shared_ptr OCAMCameraConstPtr; 139 | 140 | template 141 | void 142 | OCAMCamera::spaceToPlane(const T* const params, 143 | const T* const q, const T* const t, 144 | const Eigen::Matrix& P, 145 | Eigen::Matrix& p) 146 | { 147 | T P_c[3]; 148 | { 149 | T P_w[3]; 150 | P_w[0] = T(P(0)); 151 | P_w[1] = T(P(1)); 152 | P_w[2] = T(P(2)); 153 | 154 | // Convert quaternion from Eigen convention (x, y, z, w) 155 | // to Ceres convention (w, x, y, z) 156 | T q_ceres[4] = {q[3], q[0], q[1], q[2]}; 157 | 158 | ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); 159 | 160 | P_c[0] += t[0]; 161 | P_c[1] += t[1]; 162 | P_c[2] += t[2]; 163 | } 164 | 165 | T c = params[0]; 166 | T d = params[1]; 167 | T e = params[2]; 168 | T xc[2] = { params[3], params[4] }; 169 | 170 | //T poly[SCARAMUZZA_POLY_SIZE]; 171 | //for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) 172 | // poly[i] = params[5+i]; 173 | 174 | T inv_poly[SCARAMUZZA_INV_POLY_SIZE]; 175 | for (int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 176 | inv_poly[i] = params[5 + SCARAMUZZA_POLY_SIZE + i]; 177 | 178 | T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1]; 179 | T norm = T(0.0); 180 | if (norm_sqr > T(0.0)) 181 | norm = sqrt(norm_sqr); 182 | 183 | T theta = atan2(-P_c[2], norm); 184 | T rho = T(0.0); 185 | T theta_i = T(1.0); 186 | 187 | for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 188 | { 189 | rho += theta_i * inv_poly[i]; 190 | theta_i *= theta; 191 | } 192 | 193 | T invNorm = T(1.0) / norm; 194 | T xn[2] = { 195 | P_c[0] * invNorm * rho, 196 | P_c[1] * invNorm * rho 197 | }; 198 | 199 | p(0) = xn[0] * c + xn[1] * d + xc[0]; 200 | p(1) = xn[0] * e + xn[1] + xc[1]; 201 | } 202 | 203 | } 204 | 205 | #endif 206 | -------------------------------------------------------------------------------- /include/camodocal/chessboard/Chessboard.h: -------------------------------------------------------------------------------- 1 | #ifndef CHESSBOARD_H 2 | #define CHESSBOARD_H 3 | 4 | #include 5 | #include 6 | 7 | namespace camodocal 8 | { 9 | 10 | // forward declarations 11 | class ChessboardCorner; 12 | typedef boost::shared_ptr ChessboardCornerPtr; 13 | class ChessboardQuad; 14 | typedef boost::shared_ptr ChessboardQuadPtr; 15 | 16 | class Chessboard 17 | { 18 | public: 19 | Chessboard(cv::Size boardSize, cv::Mat& image); 20 | 21 | void findCorners(bool useOpenCV = false); 22 | const std::vector& getCorners(void) const; 23 | bool cornersFound(void) const; 24 | 25 | const cv::Mat& getImage(void) const; 26 | const cv::Mat& getSketch(void) const; 27 | 28 | private: 29 | bool findChessboardCorners(const cv::Mat& image, 30 | const cv::Size& patternSize, 31 | std::vector& corners, 32 | int flags, bool useOpenCV); 33 | 34 | bool findChessboardCornersImproved(const cv::Mat& image, 35 | const cv::Size& patternSize, 36 | std::vector& corners, 37 | int flags); 38 | 39 | void cleanFoundConnectedQuads(std::vector& quadGroup, cv::Size patternSize); 40 | 41 | void findConnectedQuads(std::vector& quads, 42 | std::vector& group, 43 | int group_idx, int dilation); 44 | 45 | // int checkQuadGroup(std::vector& quadGroup, 46 | // std::vector& outCorners, 47 | // cv::Size patternSize); 48 | 49 | void labelQuadGroup(std::vector& quad_group, 50 | cv::Size patternSize, bool firstRun); 51 | 52 | void findQuadNeighbors(std::vector& quads, int dilation); 53 | 54 | int augmentBestRun(std::vector& candidateQuads, int candidateDilation, 55 | std::vector& existingQuads, int existingDilation); 56 | 57 | void generateQuads(std::vector& quads, 58 | cv::Mat& image, int flags, 59 | int dilation, bool firstRun); 60 | 61 | bool checkQuadGroup(std::vector& quads, 62 | std::vector& corners, 63 | cv::Size patternSize); 64 | 65 | void getQuadrangleHypotheses(const std::vector< std::vector >& contours, 66 | std::vector< std::pair >& quads, 67 | int classId) const; 68 | 69 | bool checkChessboard(const cv::Mat& image, cv::Size patternSize) const; 70 | 71 | bool checkBoardMonotony(std::vector& corners, 72 | cv::Size patternSize); 73 | 74 | bool matchCorners(ChessboardQuadPtr& quad1, int corner1, 75 | ChessboardQuadPtr& quad2, int corner2) const; 76 | 77 | cv::Mat mImage; 78 | cv::Mat mSketch; 79 | std::vector mCorners; 80 | cv::Size mBoardSize; 81 | bool mCornersFound; 82 | }; 83 | 84 | } 85 | 86 | #endif 87 | -------------------------------------------------------------------------------- /include/camodocal/chessboard/ChessboardCorner.h: -------------------------------------------------------------------------------- 1 | #ifndef CHESSBOARDCORNER_H 2 | #define CHESSBOARDCORNER_H 3 | 4 | #include 5 | #include 6 | 7 | namespace camodocal 8 | { 9 | 10 | class ChessboardCorner; 11 | typedef boost::shared_ptr ChessboardCornerPtr; 12 | 13 | class ChessboardCorner 14 | { 15 | public: 16 | ChessboardCorner() : row(0), column(0), needsNeighbor(true), count(0) {} 17 | 18 | float meanDist(int &n) const 19 | { 20 | float sum = 0; 21 | n = 0; 22 | for (int i = 0; i < 4; ++i) 23 | { 24 | if (neighbors[i].get()) 25 | { 26 | float dx = neighbors[i]->pt.x - pt.x; 27 | float dy = neighbors[i]->pt.y - pt.y; 28 | sum += sqrt(dx*dx + dy*dy); 29 | n++; 30 | } 31 | } 32 | return sum / std::max(n, 1); 33 | } 34 | 35 | cv::Point2f pt; // X and y coordinates 36 | int row; // Row and column of the corner 37 | int column; // in the found pattern 38 | bool needsNeighbor; // Does the corner require a neighbor? 39 | int count; // number of corner neighbors 40 | ChessboardCornerPtr neighbors[4]; // pointer to all corner neighbors 41 | }; 42 | 43 | } 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /include/camodocal/chessboard/ChessboardQuad.h: -------------------------------------------------------------------------------- 1 | #ifndef CHESSBOARDQUAD_H 2 | #define CHESSBOARDQUAD_H 3 | 4 | #include 5 | 6 | #include "camodocal/chessboard/ChessboardCorner.h" 7 | 8 | namespace camodocal 9 | { 10 | 11 | class ChessboardQuad; 12 | typedef boost::shared_ptr ChessboardQuadPtr; 13 | 14 | class ChessboardQuad 15 | { 16 | public: 17 | ChessboardQuad() : count(0), group_idx(-1), edge_len(FLT_MAX), labeled(false) {} 18 | 19 | int count; // Number of quad neighbors 20 | int group_idx; // Quad group ID 21 | float edge_len; // Smallest side length^2 22 | ChessboardCornerPtr corners[4]; // Coordinates of quad corners 23 | ChessboardQuadPtr neighbors[4]; // Pointers of quad neighbors 24 | bool labeled; // Has this corner been labeled? 25 | }; 26 | 27 | } 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /include/camodocal/chessboard/Spline.h: -------------------------------------------------------------------------------- 1 | /* dynamo:- Event driven molecular dynamics simulator 2 | http://www.marcusbannerman.co.uk/dynamo 3 | Copyright (C) 2011 Marcus N Campbell Bannerman 4 | 5 | This program is free software: you can redistribute it and/or 6 | modify it under the terms of the GNU General Public License 7 | version 3 as published by the Free Software Foundation. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU General Public License for more details. 13 | 14 | You should have received a copy of the GNU General Public License 15 | along with this program. If not, see . 16 | */ 17 | 18 | #pragma once 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | namespace ublas = boost::numeric::ublas; 28 | 29 | class Spline : private std::vector > 30 | { 31 | public: 32 | //The boundary conditions available 33 | enum BC_type { 34 | FIXED_1ST_DERIV_BC, 35 | FIXED_2ND_DERIV_BC, 36 | PARABOLIC_RUNOUT_BC 37 | }; 38 | 39 | enum Spline_type { 40 | LINEAR, 41 | CUBIC 42 | }; 43 | 44 | //Constructor takes the boundary conditions as arguments, this 45 | //sets the first derivative (gradient) at the lower and upper 46 | //end points 47 | Spline(): 48 | _valid(false), 49 | _BCLow(FIXED_2ND_DERIV_BC), _BCHigh(FIXED_2ND_DERIV_BC), 50 | _BCLowVal(0), _BCHighVal(0), 51 | _type(CUBIC) 52 | {} 53 | 54 | typedef std::vector > base; 55 | typedef base::const_iterator const_iterator; 56 | 57 | //Standard STL read-only container stuff 58 | const_iterator begin() const { return base::begin(); } 59 | const_iterator end() const { return base::end(); } 60 | void clear() { _valid = false; base::clear(); _data.clear(); } 61 | size_t size() const { return base::size(); } 62 | size_t max_size() const { return base::max_size(); } 63 | size_t capacity() const { return base::capacity(); } 64 | bool empty() const { return base::empty(); } 65 | 66 | //Add a point to the spline, and invalidate it so its 67 | //recalculated on the next access 68 | inline void addPoint(double x, double y) 69 | { 70 | _valid = false; 71 | base::push_back(std::pair(x,y)); 72 | } 73 | 74 | //Reset the boundary conditions 75 | inline void setLowBC(BC_type BC, double val = 0) 76 | { _BCLow = BC; _BCLowVal = val; _valid = false; } 77 | 78 | inline void setHighBC(BC_type BC, double val = 0) 79 | { _BCHigh = BC; _BCHighVal = val; _valid = false; } 80 | 81 | void setType(Spline_type type) { _type = type; _valid = false; } 82 | 83 | //Check if the spline has been calculated, then generate the 84 | //spline interpolated value 85 | double operator()(double xval) 86 | { 87 | if (!_valid) generate(); 88 | 89 | //Special cases when we're outside the range of the spline points 90 | if (xval <= x(0)) return lowCalc(xval); 91 | if (xval >= x(size()-1)) return highCalc(xval); 92 | 93 | //Check all intervals except the last one 94 | for (std::vector::const_iterator iPtr = _data.begin(); 95 | iPtr != _data.end()-1; ++iPtr) 96 | if ((xval >= iPtr->x) && (xval <= (iPtr+1)->x)) 97 | return splineCalc(iPtr, xval); 98 | 99 | return splineCalc(_data.end() - 1, xval); 100 | } 101 | 102 | private: 103 | 104 | ///////PRIVATE DATA MEMBERS 105 | struct SplineData { double x,a,b,c,d; }; 106 | //vector of calculated spline data 107 | std::vector _data; 108 | //Second derivative at each point 109 | ublas::vector _ddy; 110 | //Tracks whether the spline parameters have been calculated for 111 | //the current set of points 112 | bool _valid; 113 | //The boundary conditions 114 | BC_type _BCLow, _BCHigh; 115 | //The values of the boundary conditions 116 | double _BCLowVal, _BCHighVal; 117 | 118 | Spline_type _type; 119 | 120 | ///////PRIVATE FUNCTIONS 121 | //Function to calculate the value of a given spline at a point xval 122 | inline double splineCalc(std::vector::const_iterator i, double xval) 123 | { 124 | const double lx = xval - i->x; 125 | return ((i->a * lx + i->b) * lx + i->c) * lx + i->d; 126 | } 127 | 128 | inline double lowCalc(double xval) 129 | { 130 | const double lx = xval - x(0); 131 | 132 | if (_type == LINEAR) 133 | return lx * _BCHighVal + y(0); 134 | 135 | const double firstDeriv = (y(1) - y(0)) / h(0) - 2 * h(0) * (_data[0].b + 2 * _data[1].b) / 6; 136 | 137 | switch(_BCLow) 138 | { 139 | case FIXED_1ST_DERIV_BC: 140 | return lx * _BCLowVal + y(0); 141 | case FIXED_2ND_DERIV_BC: 142 | return lx * lx * _BCLowVal + firstDeriv * lx + y(0); 143 | case PARABOLIC_RUNOUT_BC: 144 | return lx * lx * _ddy[0] + lx * firstDeriv + y(0); 145 | } 146 | throw std::runtime_error("Unknown BC"); 147 | } 148 | 149 | inline double highCalc(double xval) 150 | { 151 | const double lx = xval - x(size() - 1); 152 | 153 | if (_type == LINEAR) 154 | return lx * _BCHighVal + y(size() - 1); 155 | 156 | const double firstDeriv = 2 * h(size() - 2) * (_ddy[size() - 2] + 2 * _ddy[size() - 1]) / 6 + (y(size() - 1) - y(size() - 2)) / h(size() - 2); 157 | 158 | switch(_BCHigh) 159 | { 160 | case FIXED_1ST_DERIV_BC: 161 | return lx * _BCHighVal + y(size() - 1); 162 | case FIXED_2ND_DERIV_BC: 163 | return lx * lx * _BCHighVal + firstDeriv * lx + y(size() - 1); 164 | case PARABOLIC_RUNOUT_BC: 165 | return lx * lx * _ddy[size()-1] + lx * firstDeriv + y(size() - 1); 166 | } 167 | throw std::runtime_error("Unknown BC"); 168 | } 169 | 170 | //These just provide access to the point data in a clean way 171 | inline double x(size_t i) const { return operator[](i).first; } 172 | inline double y(size_t i) const { return operator[](i).second; } 173 | inline double h(size_t i) const { return x(i+1) - x(i); } 174 | 175 | //Invert a arbitrary matrix using the boost ublas library 176 | template 177 | bool InvertMatrix(ublas::matrix A, 178 | ublas::matrix& inverse) 179 | { 180 | using namespace ublas; 181 | 182 | // create a permutation matrix for the LU-factorization 183 | permutation_matrix pm(A.size1()); 184 | 185 | // perform LU-factorization 186 | int res = lu_factorize(A,pm); 187 | if( res != 0 ) return false; 188 | 189 | // create identity matrix of "inverse" 190 | inverse.assign(ublas::identity_matrix(A.size1())); 191 | 192 | // backsubstitute to get the inverse 193 | lu_substitute(A, pm, inverse); 194 | 195 | return true; 196 | } 197 | 198 | //This function will recalculate the spline parameters and store 199 | //them in _data, ready for spline interpolation 200 | void generate() 201 | { 202 | if (size() < 2) 203 | throw std::runtime_error("Spline requires at least 2 points"); 204 | 205 | //If any spline points are at the same x location, we have to 206 | //just slightly seperate them 207 | { 208 | bool testPassed(false); 209 | while (!testPassed) 210 | { 211 | testPassed = true; 212 | std::sort(base::begin(), base::end()); 213 | 214 | for (base::iterator iPtr = base::begin(); iPtr != base::end() - 1; ++iPtr) 215 | if (iPtr->first == (iPtr+1)->first) 216 | { 217 | if ((iPtr+1)->first != 0) 218 | (iPtr+1)->first += (iPtr+1)->first 219 | * std::numeric_limits::epsilon() * 10; 220 | else 221 | (iPtr+1)->first = std::numeric_limits::epsilon() * 10; 222 | testPassed = false; 223 | break; 224 | } 225 | } 226 | } 227 | 228 | const size_t e = size() - 1; 229 | 230 | switch (_type) 231 | { 232 | case LINEAR: 233 | { 234 | _data.resize(e); 235 | for (size_t i(0); i < e; ++i) 236 | { 237 | _data[i].x = x(i); 238 | _data[i].a = 0; 239 | _data[i].b = 0; 240 | _data[i].c = (y(i+1) - y(i)) / (x(i+1) - x(i)); 241 | _data[i].d = y(i); 242 | } 243 | break; 244 | } 245 | case CUBIC: 246 | { 247 | ublas::matrix A(size(), size()); 248 | for (size_t yv(0); yv <= e; ++yv) 249 | for (size_t xv(0); xv <= e; ++xv) 250 | A(xv,yv) = 0; 251 | 252 | for (size_t i(1); i < e; ++i) 253 | { 254 | A(i-1,i) = h(i-1); 255 | A(i,i) = 2 * (h(i-1) + h(i)); 256 | A(i+1,i) = h(i); 257 | } 258 | 259 | ublas::vector C(size()); 260 | for (size_t xv(0); xv <= e; ++xv) 261 | C(xv) = 0; 262 | 263 | for (size_t i(1); i < e; ++i) 264 | C(i) = 6 * 265 | ((y(i+1) - y(i)) / h(i) 266 | - (y(i) - y(i-1)) / h(i-1)); 267 | 268 | //Boundary conditions 269 | switch(_BCLow) 270 | { 271 | case FIXED_1ST_DERIV_BC: 272 | C(0) = 6 * ((y(1) - y(0)) / h(0) - _BCLowVal); 273 | A(0,0) = 2 * h(0); 274 | A(1,0) = h(0); 275 | break; 276 | case FIXED_2ND_DERIV_BC: 277 | C(0) = _BCLowVal; 278 | A(0,0) = 1; 279 | break; 280 | case PARABOLIC_RUNOUT_BC: 281 | C(0) = 0; A(0,0) = 1; A(1,0) = -1; 282 | break; 283 | } 284 | 285 | switch(_BCHigh) 286 | { 287 | case FIXED_1ST_DERIV_BC: 288 | C(e) = 6 * (_BCHighVal - (y(e) - y(e-1)) / h(e-1)); 289 | A(e,e) = 2 * h(e - 1); 290 | A(e-1,e) = h(e - 1); 291 | break; 292 | case FIXED_2ND_DERIV_BC: 293 | C(e) = _BCHighVal; 294 | A(e,e) = 1; 295 | break; 296 | case PARABOLIC_RUNOUT_BC: 297 | C(e) = 0; A(e,e) = 1; A(e-1,e) = -1; 298 | break; 299 | } 300 | 301 | ublas::matrix AInv(size(), size()); 302 | InvertMatrix(A,AInv); 303 | 304 | _ddy = ublas::prod(C, AInv); 305 | 306 | _data.resize(size()-1); 307 | for (size_t i(0); i < e; ++i) 308 | { 309 | _data[i].x = x(i); 310 | _data[i].a = (_ddy(i+1) - _ddy(i)) / (6 * h(i)); 311 | _data[i].b = _ddy(i) / 2; 312 | _data[i].c = (y(i+1) - y(i)) / h(i) - _ddy(i+1) * h(i) / 6 - _ddy(i) * h(i) / 3; 313 | _data[i].d = y(i); 314 | } 315 | } 316 | } 317 | _valid = true; 318 | } 319 | }; 320 | -------------------------------------------------------------------------------- /include/camodocal/gpl/EigenQuaternionParameterization.h: -------------------------------------------------------------------------------- 1 | #ifndef EIGENQUATERNIONPARAMETERIZATION_H 2 | #define EIGENQUATERNIONPARAMETERIZATION_H 3 | 4 | #include "ceres/local_parameterization.h" 5 | 6 | namespace camodocal 7 | { 8 | 9 | class EigenQuaternionParameterization : public ceres::LocalParameterization 10 | { 11 | public: 12 | virtual ~EigenQuaternionParameterization() {} 13 | virtual bool Plus(const double* x, 14 | const double* delta, 15 | double* x_plus_delta) const; 16 | virtual bool ComputeJacobian(const double* x, 17 | double* jacobian) const; 18 | virtual int GlobalSize() const { return 4; } 19 | virtual int LocalSize() const { return 3; } 20 | 21 | private: 22 | template 23 | void EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const; 24 | }; 25 | 26 | 27 | template 28 | void 29 | EigenQuaternionParameterization::EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const 30 | { 31 | zw[0] = z[3] * w[0] + z[0] * w[3] + z[1] * w[2] - z[2] * w[1]; 32 | zw[1] = z[3] * w[1] - z[0] * w[2] + z[1] * w[3] + z[2] * w[0]; 33 | zw[2] = z[3] * w[2] + z[0] * w[1] - z[1] * w[0] + z[2] * w[3]; 34 | zw[3] = z[3] * w[3] - z[0] * w[0] - z[1] * w[1] - z[2] * w[2]; 35 | } 36 | 37 | } 38 | 39 | #endif 40 | 41 | -------------------------------------------------------------------------------- /include/camodocal/gpl/EigenUtils.h: -------------------------------------------------------------------------------- 1 | #ifndef EIGENUTILS_H 2 | #define EIGENUTILS_H 3 | 4 | #include 5 | 6 | #include "ceres/rotation.h" 7 | #include "camodocal/gpl/gpl.h" 8 | 9 | namespace camodocal 10 | { 11 | 12 | // Returns the 3D cross product skew symmetric matrix of a given 3D vector 13 | template 14 | Eigen::Matrix skew(const Eigen::Matrix& vec) 15 | { 16 | return (Eigen::Matrix() << T(0), -vec(2), vec(1), 17 | vec(2), T(0), -vec(0), 18 | -vec(1), vec(0), T(0)).finished(); 19 | } 20 | 21 | template 22 | typename Eigen::MatrixBase::PlainObject sqrtm(const Eigen::MatrixBase& A) 23 | { 24 | Eigen::SelfAdjointEigenSolver es(A); 25 | 26 | return es.operatorSqrt(); 27 | } 28 | 29 | template 30 | Eigen::Matrix AngleAxisToRotationMatrix(const Eigen::Matrix& rvec) 31 | { 32 | T angle = rvec.norm(); 33 | if (angle == T(0)) 34 | { 35 | return Eigen::Matrix::Identity(); 36 | } 37 | 38 | Eigen::Matrix axis; 39 | axis = rvec.normalized(); 40 | 41 | Eigen::Matrix rmat; 42 | rmat = Eigen::AngleAxis(angle, axis); 43 | 44 | return rmat; 45 | } 46 | 47 | template 48 | Eigen::Quaternion AngleAxisToQuaternion(const Eigen::Matrix& rvec) 49 | { 50 | Eigen::Matrix rmat = AngleAxisToRotationMatrix(rvec); 51 | 52 | return Eigen::Quaternion(rmat); 53 | } 54 | 55 | template 56 | void AngleAxisToQuaternion(const Eigen::Matrix& rvec, T* q) 57 | { 58 | Eigen::Quaternion quat = AngleAxisToQuaternion(rvec); 59 | 60 | q[0] = quat.x(); 61 | q[1] = quat.y(); 62 | q[2] = quat.z(); 63 | q[3] = quat.w(); 64 | } 65 | 66 | template 67 | Eigen::Matrix RotationToAngleAxis(const Eigen::Matrix & rmat) 68 | { 69 | Eigen::AngleAxis angleaxis; 70 | angleaxis.fromRotationMatrix(rmat); 71 | return angleaxis.angle() * angleaxis.axis(); 72 | 73 | } 74 | 75 | template 76 | void QuaternionToAngleAxis(const T* const q, Eigen::Matrix& rvec) 77 | { 78 | Eigen::Quaternion quat(q[3], q[0], q[1], q[2]); 79 | 80 | Eigen::Matrix rmat = quat.toRotationMatrix(); 81 | 82 | Eigen::AngleAxis angleaxis; 83 | angleaxis.fromRotationMatrix(rmat); 84 | 85 | rvec = angleaxis.angle() * angleaxis.axis(); 86 | } 87 | 88 | template 89 | Eigen::Matrix QuaternionToRotation(const T* const q) 90 | { 91 | T R[9]; 92 | ceres::QuaternionToRotation(q, R); 93 | 94 | Eigen::Matrix rmat; 95 | for (int i = 0; i < 3; ++i) 96 | { 97 | for (int j = 0; j < 3; ++j) 98 | { 99 | rmat(i,j) = R[i * 3 + j]; 100 | } 101 | } 102 | 103 | return rmat; 104 | } 105 | 106 | template 107 | void QuaternionToRotation(const T* const q, T* rot) 108 | { 109 | ceres::QuaternionToRotation(q, rot); 110 | } 111 | 112 | template 113 | Eigen::Matrix QuaternionMultMatLeft(const Eigen::Quaternion& q) 114 | { 115 | return (Eigen::Matrix() << q.w(), -q.z(), q.y(), q.x(), 116 | q.z(), q.w(), -q.x(), q.y(), 117 | -q.y(), q.x(), q.w(), q.z(), 118 | -q.x(), -q.y(), -q.z(), q.w()).finished(); 119 | } 120 | 121 | template 122 | Eigen::Matrix QuaternionMultMatRight(const Eigen::Quaternion& q) 123 | { 124 | return (Eigen::Matrix() << q.w(), q.z(), -q.y(), q.x(), 125 | -q.z(), q.w(), q.x(), q.y(), 126 | q.y(), -q.x(), q.w(), q.z(), 127 | -q.x(), -q.y(), -q.z(), q.w()).finished(); 128 | } 129 | 130 | /// @param theta - rotation about screw axis 131 | /// @param d - projection of tvec on the rotation axis 132 | /// @param l - screw axis direction 133 | /// @param m - screw axis moment 134 | template 135 | void AngleAxisAndTranslationToScrew(const Eigen::Matrix& rvec, 136 | const Eigen::Matrix& tvec, 137 | T& theta, T& d, 138 | Eigen::Matrix& l, 139 | Eigen::Matrix& m) 140 | { 141 | 142 | theta = rvec.norm(); 143 | if (theta == 0) 144 | { 145 | l.setZero(); 146 | m.setZero(); 147 | std::cout << "Warning: Undefined screw! Returned 0. " << std::endl; 148 | } 149 | 150 | l = rvec.normalized(); 151 | 152 | Eigen::Matrix t = tvec; 153 | 154 | d = t.transpose() * l; 155 | 156 | // point on screw axis - projection of origin on screw axis 157 | Eigen::Matrix c; 158 | c = 0.5 * (t - d * l + (1.0 / tan(theta / 2.0) * l).cross(t)); 159 | 160 | // c and hence the screw axis is not defined if theta is either 0 or M_PI 161 | m = c.cross(l); 162 | } 163 | 164 | template 165 | Eigen::Matrix RPY2mat(T roll, T pitch, T yaw) 166 | { 167 | Eigen::Matrix m; 168 | 169 | T cr = cos(roll); 170 | T sr = sin(roll); 171 | T cp = cos(pitch); 172 | T sp = sin(pitch); 173 | T cy = cos(yaw); 174 | T sy = sin(yaw); 175 | 176 | m(0,0) = cy * cp; 177 | m(0,1) = cy * sp * sr - sy * cr; 178 | m(0,2) = cy * sp * cr + sy * sr; 179 | m(1,0) = sy * cp; 180 | m(1,1) = sy * sp * sr + cy * cr; 181 | m(1,2) = sy * sp * cr - cy * sr; 182 | m(2,0) = - sp; 183 | m(2,1) = cp * sr; 184 | m(2,2) = cp * cr; 185 | return m; 186 | } 187 | 188 | template 189 | void mat2RPY(const Eigen::Matrix& m, T& roll, T& pitch, T& yaw) 190 | { 191 | roll = atan2(m(2,1), m(2,2)); 192 | pitch = atan2(-m(2,0), sqrt(m(2,1) * m(2,1) + m(2,2) * m(2,2))); 193 | yaw = atan2(m(1,0), m(0,0)); 194 | } 195 | 196 | template 197 | Eigen::Matrix homogeneousTransform(const Eigen::Matrix& R, const Eigen::Matrix& t) 198 | { 199 | Eigen::Matrix H; 200 | H.setIdentity(); 201 | 202 | H.block(0,0,3,3) = R; 203 | H.block(0,3,3,1) = t; 204 | 205 | return H; 206 | } 207 | 208 | template 209 | Eigen::Matrix poseWithCartesianTranslation(const T* const q, const T* const p) 210 | { 211 | Eigen::Matrix pose = Eigen::Matrix::Identity(); 212 | 213 | T rotation[9]; 214 | ceres::QuaternionToRotation(q, rotation); 215 | for (int i = 0; i < 3; ++i) 216 | { 217 | for (int j = 0; j < 3; ++j) 218 | { 219 | pose(i,j) = rotation[i * 3 + j]; 220 | } 221 | } 222 | 223 | pose(0,3) = p[0]; 224 | pose(1,3) = p[1]; 225 | pose(2,3) = p[2]; 226 | 227 | return pose; 228 | } 229 | 230 | template 231 | Eigen::Matrix poseWithSphericalTranslation(const T* const q, const T* const p, const T scale = T(1.0)) 232 | { 233 | Eigen::Matrix pose = Eigen::Matrix::Identity(); 234 | 235 | T rotation[9]; 236 | ceres::QuaternionToRotation(q, rotation); 237 | for (int i = 0; i < 3; ++i) 238 | { 239 | for (int j = 0; j < 3; ++j) 240 | { 241 | pose(i,j) = rotation[i * 3 + j]; 242 | } 243 | } 244 | 245 | T theta = p[0]; 246 | T phi = p[1]; 247 | pose(0,3) = sin(theta) * cos(phi) * scale; 248 | pose(1,3) = sin(theta) * sin(phi) * scale; 249 | pose(2,3) = cos(theta) * scale; 250 | 251 | return pose; 252 | } 253 | 254 | // Returns the Sampson error of a given essential matrix and 2 image points 255 | template 256 | T sampsonError(const Eigen::Matrix& E, 257 | const Eigen::Matrix& p1, 258 | const Eigen::Matrix& p2) 259 | { 260 | Eigen::Matrix Ex1 = E * p1; 261 | Eigen::Matrix Etx2 = E.transpose() * p2; 262 | 263 | T x2tEx1 = p2.dot(Ex1); 264 | 265 | // compute Sampson error 266 | T err = square(x2tEx1) / (square(Ex1(0,0)) + square(Ex1(1,0)) + square(Etx2(0,0)) + square(Etx2(1,0))); 267 | 268 | return err; 269 | } 270 | 271 | // Returns the Sampson error of a given rotation/translation and 2 image points 272 | template 273 | T sampsonError(const Eigen::Matrix& R, 274 | const Eigen::Matrix& t, 275 | const Eigen::Matrix& p1, 276 | const Eigen::Matrix& p2) 277 | { 278 | // construct essential matrix 279 | Eigen::Matrix E = skew(t) * R; 280 | 281 | Eigen::Matrix Ex1 = E * p1; 282 | Eigen::Matrix Etx2 = E.transpose() * p2; 283 | 284 | T x2tEx1 = p2.dot(Ex1); 285 | 286 | // compute Sampson error 287 | T err = square(x2tEx1) / (square(Ex1(0,0)) + square(Ex1(1,0)) + square(Etx2(0,0)) + square(Etx2(1,0))); 288 | 289 | return err; 290 | } 291 | 292 | // Returns the Sampson error of a given rotation/translation and 2 image points 293 | template 294 | T sampsonError(const Eigen::Matrix& H, 295 | const Eigen::Matrix& p1, 296 | const Eigen::Matrix& p2) 297 | { 298 | Eigen::Matrix R = H.block(0, 0, 3, 3); 299 | Eigen::Matrix t = H.block(0, 3, 3, 1); 300 | 301 | return sampsonError(R, t, p1, p2); 302 | } 303 | 304 | template 305 | Eigen::Matrix 306 | transformPoint(const Eigen::Matrix& H, const Eigen::Matrix& P) 307 | { 308 | Eigen::Matrix P_trans = H.block(0, 0, 3, 3) * P + H.block(0, 3, 3, 1); 309 | 310 | return P_trans; 311 | } 312 | 313 | template 314 | Eigen::Matrix 315 | estimate3DRigidTransform(const std::vector, Eigen::aligned_allocator > >& points1, 316 | const std::vector, Eigen::aligned_allocator > >& points2) 317 | { 318 | // compute centroids 319 | Eigen::Matrix c1, c2; 320 | c1.setZero(); c2.setZero(); 321 | 322 | for (size_t i = 0; i < points1.size(); ++i) 323 | { 324 | c1 += points1.at(i); 325 | c2 += points2.at(i); 326 | } 327 | 328 | c1 /= points1.size(); 329 | c2 /= points1.size(); 330 | 331 | Eigen::Matrix X(3, points1.size()); 332 | Eigen::Matrix Y(3, points1.size()); 333 | for (size_t i = 0; i < points1.size(); ++i) 334 | { 335 | X.col(i) = points1.at(i) - c1; 336 | Y.col(i) = points2.at(i) - c2; 337 | } 338 | 339 | Eigen::Matrix H = X * Y.transpose(); 340 | 341 | Eigen::JacobiSVD< Eigen::Matrix > svd(H, Eigen::ComputeFullU | Eigen::ComputeFullV); 342 | 343 | Eigen::Matrix U = svd.matrixU(); 344 | Eigen::Matrix V = svd.matrixV(); 345 | if (U.determinant() * V.determinant() < 0.0) 346 | { 347 | V.col(2) *= -1.0; 348 | } 349 | 350 | Eigen::Matrix R = V * U.transpose(); 351 | Eigen::Matrix t = c2 - R * c1; 352 | 353 | return homogeneousTransform(R, t); 354 | } 355 | 356 | template 357 | Eigen::Matrix 358 | estimate3DRigidSimilarityTransform(const std::vector, Eigen::aligned_allocator > >& points1, 359 | const std::vector, Eigen::aligned_allocator > >& points2) 360 | { 361 | // compute centroids 362 | Eigen::Matrix c1, c2; 363 | c1.setZero(); c2.setZero(); 364 | 365 | for (size_t i = 0; i < points1.size(); ++i) 366 | { 367 | c1 += points1.at(i); 368 | c2 += points2.at(i); 369 | } 370 | 371 | c1 /= points1.size(); 372 | c2 /= points1.size(); 373 | 374 | Eigen::Matrix X(3, points1.size()); 375 | Eigen::Matrix Y(3, points1.size()); 376 | for (size_t i = 0; i < points1.size(); ++i) 377 | { 378 | X.col(i) = points1.at(i) - c1; 379 | Y.col(i) = points2.at(i) - c2; 380 | } 381 | 382 | Eigen::Matrix H = X * Y.transpose(); 383 | 384 | Eigen::JacobiSVD< Eigen::Matrix > svd(H, Eigen::ComputeFullU | Eigen::ComputeFullV); 385 | 386 | Eigen::Matrix U = svd.matrixU(); 387 | Eigen::Matrix V = svd.matrixV(); 388 | if (U.determinant() * V.determinant() < 0.0) 389 | { 390 | V.col(2) *= -1.0; 391 | } 392 | 393 | Eigen::Matrix R = V * U.transpose(); 394 | 395 | std::vector, Eigen::aligned_allocator > > rotatedPoints1(points1.size()); 396 | for (size_t i = 0; i < points1.size(); ++i) 397 | { 398 | rotatedPoints1.at(i) = R * (points1.at(i) - c1); 399 | } 400 | 401 | double sum_ss = 0.0, sum_tt = 0.0; 402 | for (size_t i = 0; i < points1.size(); ++i) 403 | { 404 | sum_ss += (points1.at(i) - c1).squaredNorm(); 405 | sum_tt += (points2.at(i) - c2).dot(rotatedPoints1.at(i)); 406 | } 407 | 408 | double scale = sum_tt / sum_ss; 409 | 410 | Eigen::Matrix sR = scale * R; 411 | Eigen::Matrix t = c2 - sR * c1; 412 | 413 | return homogeneousTransform(sR, t); 414 | } 415 | 416 | } 417 | 418 | #endif 419 | -------------------------------------------------------------------------------- /include/camodocal/gpl/gpl.h: -------------------------------------------------------------------------------- 1 | #ifndef GPL_H 2 | #define GPL_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace camodocal 9 | { 10 | 11 | template 12 | const T clamp(const T& v, const T& a, const T& b) 13 | { 14 | return std::min(b, std::max(a, v)); 15 | } 16 | 17 | double hypot3(double x, double y, double z); 18 | float hypot3f(float x, float y, float z); 19 | 20 | template 21 | const T normalizeTheta(const T& theta) 22 | { 23 | T normTheta = theta; 24 | 25 | while (normTheta < - M_PI) 26 | { 27 | normTheta += 2.0 * M_PI; 28 | } 29 | while (normTheta > M_PI) 30 | { 31 | normTheta -= 2.0 * M_PI; 32 | } 33 | 34 | return normTheta; 35 | } 36 | 37 | double d2r(double deg); 38 | float d2r(float deg); 39 | double r2d(double rad); 40 | float r2d(float rad); 41 | 42 | double sinc(double theta); 43 | 44 | template 45 | const T square(const T& x) 46 | { 47 | return x * x; 48 | } 49 | 50 | template 51 | const T cube(const T& x) 52 | { 53 | return x * x * x; 54 | } 55 | 56 | template 57 | const T random(const T& a, const T& b) 58 | { 59 | return static_cast(rand()) / RAND_MAX * (b - a) + a; 60 | } 61 | 62 | template 63 | const T randomNormal(const T& sigma) 64 | { 65 | T x1, x2, w; 66 | 67 | do 68 | { 69 | x1 = 2.0 * random(0.0, 1.0) - 1.0; 70 | x2 = 2.0 * random(0.0, 1.0) - 1.0; 71 | w = x1 * x1 + x2 * x2; 72 | } 73 | while (w >= 1.0 || w == 0.0); 74 | 75 | w = sqrt((-2.0 * log(w)) / w); 76 | 77 | return x1 * w * sigma; 78 | } 79 | 80 | unsigned long long timeInMicroseconds(void); 81 | 82 | double timeInSeconds(void); 83 | 84 | void colorDepthImage(cv::Mat& imgDepth, 85 | cv::Mat& imgColoredDepth, 86 | float minRange, float maxRange); 87 | 88 | bool colormap(const std::string& name, unsigned char idx, 89 | float& r, float& g, float& b); 90 | 91 | std::vector bresLine(int x0, int y0, int x1, int y1); 92 | std::vector bresCircle(int x0, int y0, int r); 93 | 94 | void fitCircle(const std::vector& points, 95 | double& centerX, double& centerY, double& radius); 96 | 97 | std::vector intersectCircles(double x1, double y1, double r1, 98 | double x2, double y2, double r2); 99 | 100 | void LLtoUTM(double latitude, double longitude, 101 | double& utmNorthing, double& utmEasting, 102 | std::string& utmZone); 103 | void UTMtoLL(double utmNorthing, double utmEasting, 104 | const std::string& utmZone, 105 | double& latitude, double& longitude); 106 | 107 | long int timestampDiff(uint64_t t1, uint64_t t2); 108 | 109 | } 110 | 111 | #endif 112 | -------------------------------------------------------------------------------- /include/camodocal/sparse_graph/Transform.h: -------------------------------------------------------------------------------- 1 | #ifndef TRANSFORM_H 2 | #define TRANSFORM_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace camodocal 9 | { 10 | 11 | class Transform 12 | { 13 | public: 14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 15 | 16 | Transform(); 17 | Transform(const Eigen::Matrix4d& H); 18 | 19 | Eigen::Quaterniond& rotation(void); 20 | const Eigen::Quaterniond& rotation(void) const; 21 | double* rotationData(void); 22 | const double* const rotationData(void) const; 23 | 24 | Eigen::Vector3d& translation(void); 25 | const Eigen::Vector3d& translation(void) const; 26 | double* translationData(void); 27 | const double* const translationData(void) const; 28 | 29 | Eigen::Matrix4d toMatrix(void) const; 30 | 31 | private: 32 | Eigen::Quaterniond m_q; 33 | Eigen::Vector3d m_t; 34 | }; 35 | 36 | } 37 | 38 | #endif 39 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | camera_model 4 | 0.0.0 5 | The camera_model package 6 | 7 | 8 | 9 | 10 | dvorak 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | catkin 43 | roscpp 44 | std_msgs 45 | roscpp 46 | std_msgs 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | part of [camodocal](https://github.com/hengli/camodocal) 2 | 3 | [Google Ceres](http://ceres-solver.org) is needed. 4 | 5 | # Calibration: 6 | 7 | Use [intrinsic_calib.cc](https://github.com/dvorak0/camera_model/blob/master/src/intrinsic_calib.cc) to calibrate your camera. 8 | 9 | # Undistortion: 10 | 11 | See [Camera.h](https://github.com/dvorak0/camera_model/blob/master/include/camodocal/camera_models/Camera.h) for general interface: 12 | 13 | - liftProjective: Lift points from the image plane to the projective space. 14 | - spaceToPlane: Projects 3D points to the image plane (Pi function) 15 | 16 | -------------------------------------------------------------------------------- /src/calib/CameraCalibration.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/calib/CameraCalibration.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "camodocal/camera_models/CameraFactory.h" 15 | #include "camodocal/sparse_graph/Transform.h" 16 | #include "camodocal/gpl/EigenQuaternionParameterization.h" 17 | #include "camodocal/gpl/EigenUtils.h" 18 | #include "camodocal/camera_models/CostFunctionFactory.h" 19 | 20 | #include "ceres/ceres.h" 21 | namespace camodocal 22 | { 23 | 24 | CameraCalibration::CameraCalibration() 25 | : m_boardSize(cv::Size(0,0)) 26 | , m_squareSize(0.0f) 27 | , m_verbose(false) 28 | { 29 | 30 | } 31 | 32 | CameraCalibration::CameraCalibration(const Camera::ModelType modelType, 33 | const std::string& cameraName, 34 | const cv::Size& imageSize, 35 | const cv::Size& boardSize, 36 | float squareSize) 37 | : m_boardSize(boardSize) 38 | , m_squareSize(squareSize) 39 | , m_verbose(false) 40 | { 41 | m_camera = CameraFactory::instance()->generateCamera(modelType, cameraName, imageSize); 42 | } 43 | 44 | void 45 | CameraCalibration::clear(void) 46 | { 47 | m_imagePoints.clear(); 48 | m_scenePoints.clear(); 49 | } 50 | 51 | void 52 | CameraCalibration::addChessboardData(const std::vector& corners) 53 | { 54 | m_imagePoints.push_back(corners); 55 | 56 | std::vector scenePointsInView; 57 | for (int i = 0; i < m_boardSize.height; ++i) 58 | { 59 | for (int j = 0; j < m_boardSize.width; ++j) 60 | { 61 | scenePointsInView.push_back(cv::Point3f(i * m_squareSize, j * m_squareSize, 0.0)); 62 | } 63 | } 64 | m_scenePoints.push_back(scenePointsInView); 65 | } 66 | 67 | bool 68 | CameraCalibration::calibrate(void) 69 | { 70 | int imageCount = m_imagePoints.size(); 71 | 72 | // compute intrinsic camera parameters and extrinsic parameters for each of the views 73 | std::vector rvecs; 74 | std::vector tvecs; 75 | bool ret = calibrateHelper(m_camera, rvecs, tvecs); 76 | 77 | m_cameraPoses = cv::Mat(imageCount, 6, CV_64F); 78 | for (int i = 0; i < imageCount; ++i) 79 | { 80 | m_cameraPoses.at(i,0) = rvecs.at(i).at(0); 81 | m_cameraPoses.at(i,1) = rvecs.at(i).at(1); 82 | m_cameraPoses.at(i,2) = rvecs.at(i).at(2); 83 | m_cameraPoses.at(i,3) = tvecs.at(i).at(0); 84 | m_cameraPoses.at(i,4) = tvecs.at(i).at(1); 85 | m_cameraPoses.at(i,5) = tvecs.at(i).at(2); 86 | } 87 | 88 | // Compute measurement covariance. 89 | std::vector > errVec(m_imagePoints.size()); 90 | Eigen::Vector2d errSum = Eigen::Vector2d::Zero(); 91 | size_t errCount = 0; 92 | for (size_t i = 0; i < m_imagePoints.size(); ++i) 93 | { 94 | std::vector estImagePoints; 95 | m_camera->projectPoints(m_scenePoints.at(i), rvecs.at(i), tvecs.at(i), 96 | estImagePoints); 97 | 98 | for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) 99 | { 100 | cv::Point2f pObs = m_imagePoints.at(i).at(j); 101 | cv::Point2f pEst = estImagePoints.at(j); 102 | 103 | cv::Point2f err = pObs - pEst; 104 | 105 | errVec.at(i).push_back(err); 106 | 107 | errSum += Eigen::Vector2d(err.x, err.y); 108 | } 109 | 110 | errCount += m_imagePoints.at(i).size(); 111 | } 112 | 113 | Eigen::Vector2d errMean = errSum / static_cast(errCount); 114 | 115 | Eigen::Matrix2d measurementCovariance = Eigen::Matrix2d::Zero(); 116 | for (size_t i = 0; i < errVec.size(); ++i) 117 | { 118 | for (size_t j = 0; j < errVec.at(i).size(); ++j) 119 | { 120 | cv::Point2f err = errVec.at(i).at(j); 121 | double d0 = err.x - errMean(0); 122 | double d1 = err.y - errMean(1); 123 | 124 | measurementCovariance(0,0) += d0 * d0; 125 | measurementCovariance(0,1) += d0 * d1; 126 | measurementCovariance(1,1) += d1 * d1; 127 | } 128 | } 129 | measurementCovariance /= static_cast(errCount); 130 | measurementCovariance(1,0) = measurementCovariance(0,1); 131 | 132 | m_measurementCovariance = measurementCovariance; 133 | 134 | return ret; 135 | } 136 | 137 | int 138 | CameraCalibration::sampleCount(void) const 139 | { 140 | return m_imagePoints.size(); 141 | } 142 | 143 | std::vector >& 144 | CameraCalibration::imagePoints(void) 145 | { 146 | return m_imagePoints; 147 | } 148 | 149 | const std::vector >& 150 | CameraCalibration::imagePoints(void) const 151 | { 152 | return m_imagePoints; 153 | } 154 | 155 | std::vector >& 156 | CameraCalibration::scenePoints(void) 157 | { 158 | return m_scenePoints; 159 | } 160 | 161 | const std::vector >& 162 | CameraCalibration::scenePoints(void) const 163 | { 164 | return m_scenePoints; 165 | } 166 | 167 | CameraPtr& 168 | CameraCalibration::camera(void) 169 | { 170 | return m_camera; 171 | } 172 | 173 | const CameraConstPtr 174 | CameraCalibration::camera(void) const 175 | { 176 | return m_camera; 177 | } 178 | 179 | Eigen::Matrix2d& 180 | CameraCalibration::measurementCovariance(void) 181 | { 182 | return m_measurementCovariance; 183 | } 184 | 185 | const Eigen::Matrix2d& 186 | CameraCalibration::measurementCovariance(void) const 187 | { 188 | return m_measurementCovariance; 189 | } 190 | 191 | cv::Mat& 192 | CameraCalibration::cameraPoses(void) 193 | { 194 | return m_cameraPoses; 195 | } 196 | 197 | const cv::Mat& 198 | CameraCalibration::cameraPoses(void) const 199 | { 200 | return m_cameraPoses; 201 | } 202 | 203 | void 204 | CameraCalibration::drawResults(std::vector& images) const 205 | { 206 | std::vector rvecs, tvecs; 207 | 208 | for (size_t i = 0; i < images.size(); ++i) 209 | { 210 | cv::Mat rvec(3, 1, CV_64F); 211 | rvec.at(0) = m_cameraPoses.at(i,0); 212 | rvec.at(1) = m_cameraPoses.at(i,1); 213 | rvec.at(2) = m_cameraPoses.at(i,2); 214 | 215 | cv::Mat tvec(3, 1, CV_64F); 216 | tvec.at(0) = m_cameraPoses.at(i,3); 217 | tvec.at(1) = m_cameraPoses.at(i,4); 218 | tvec.at(2) = m_cameraPoses.at(i,5); 219 | 220 | rvecs.push_back(rvec); 221 | tvecs.push_back(tvec); 222 | } 223 | 224 | int drawShiftBits = 4; 225 | int drawMultiplier = 1 << drawShiftBits; 226 | 227 | cv::Scalar green(0, 255, 0); 228 | cv::Scalar red(0, 0, 255); 229 | 230 | for (size_t i = 0; i < images.size(); ++i) 231 | { 232 | cv::Mat& image = images.at(i); 233 | if (image.channels() == 1) 234 | { 235 | cv::cvtColor(image, image, CV_GRAY2RGB); 236 | } 237 | 238 | std::vector estImagePoints; 239 | m_camera->projectPoints(m_scenePoints.at(i), rvecs.at(i), tvecs.at(i), 240 | estImagePoints); 241 | 242 | float errorSum = 0.0f; 243 | float errorMax = std::numeric_limits::min(); 244 | 245 | for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) 246 | { 247 | cv::Point2f pObs = m_imagePoints.at(i).at(j); 248 | cv::Point2f pEst = estImagePoints.at(j); 249 | 250 | cv::circle(image, 251 | cv::Point(cvRound(pObs.x * drawMultiplier), 252 | cvRound(pObs.y * drawMultiplier)), 253 | 5, green, 2, CV_AA, drawShiftBits); 254 | 255 | cv::circle(image, 256 | cv::Point(cvRound(pEst.x * drawMultiplier), 257 | cvRound(pEst.y * drawMultiplier)), 258 | 5, red, 2, CV_AA, drawShiftBits); 259 | 260 | float error = cv::norm(pObs - pEst); 261 | 262 | errorSum += error; 263 | if (error > errorMax) 264 | { 265 | errorMax = error; 266 | } 267 | } 268 | 269 | std::ostringstream oss; 270 | oss << "Reprojection error: avg = " << errorSum / m_imagePoints.at(i).size() 271 | << " max = " << errorMax; 272 | 273 | cv::putText(image, oss.str(), cv::Point(10, image.rows - 10), 274 | cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(255, 255, 255), 275 | 1, CV_AA); 276 | } 277 | } 278 | 279 | void 280 | CameraCalibration::writeParams(const std::string& filename) const 281 | { 282 | m_camera->writeParametersToYamlFile(filename); 283 | } 284 | 285 | bool 286 | CameraCalibration::writeChessboardData(const std::string& filename) const 287 | { 288 | std::ofstream ofs(filename.c_str(), std::ios::out | std::ios::binary); 289 | if (!ofs.is_open()) 290 | { 291 | return false; 292 | } 293 | 294 | writeData(ofs, m_boardSize.width); 295 | writeData(ofs, m_boardSize.height); 296 | writeData(ofs, m_squareSize); 297 | 298 | writeData(ofs, m_measurementCovariance(0,0)); 299 | writeData(ofs, m_measurementCovariance(0,1)); 300 | writeData(ofs, m_measurementCovariance(1,0)); 301 | writeData(ofs, m_measurementCovariance(1,1)); 302 | 303 | writeData(ofs, m_cameraPoses.rows); 304 | writeData(ofs, m_cameraPoses.cols); 305 | writeData(ofs, m_cameraPoses.type()); 306 | for (int i = 0; i < m_cameraPoses.rows; ++i) 307 | { 308 | for (int j = 0; j < m_cameraPoses.cols; ++j) 309 | { 310 | writeData(ofs, m_cameraPoses.at(i,j)); 311 | } 312 | } 313 | 314 | writeData(ofs, m_imagePoints.size()); 315 | for (size_t i = 0; i < m_imagePoints.size(); ++i) 316 | { 317 | writeData(ofs, m_imagePoints.at(i).size()); 318 | for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) 319 | { 320 | const cv::Point2f& ipt = m_imagePoints.at(i).at(j); 321 | 322 | writeData(ofs, ipt.x); 323 | writeData(ofs, ipt.y); 324 | } 325 | } 326 | 327 | writeData(ofs, m_scenePoints.size()); 328 | for (size_t i = 0; i < m_scenePoints.size(); ++i) 329 | { 330 | writeData(ofs, m_scenePoints.at(i).size()); 331 | for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j) 332 | { 333 | const cv::Point3f& spt = m_scenePoints.at(i).at(j); 334 | 335 | writeData(ofs, spt.x); 336 | writeData(ofs, spt.y); 337 | writeData(ofs, spt.z); 338 | } 339 | } 340 | 341 | return true; 342 | } 343 | 344 | bool 345 | CameraCalibration::readChessboardData(const std::string& filename) 346 | { 347 | std::ifstream ifs(filename.c_str(), std::ios::in | std::ios::binary); 348 | if (!ifs.is_open()) 349 | { 350 | return false; 351 | } 352 | 353 | readData(ifs, m_boardSize.width); 354 | readData(ifs, m_boardSize.height); 355 | readData(ifs, m_squareSize); 356 | 357 | readData(ifs, m_measurementCovariance(0,0)); 358 | readData(ifs, m_measurementCovariance(0,1)); 359 | readData(ifs, m_measurementCovariance(1,0)); 360 | readData(ifs, m_measurementCovariance(1,1)); 361 | 362 | int rows, cols, type; 363 | readData(ifs, rows); 364 | readData(ifs, cols); 365 | readData(ifs, type); 366 | m_cameraPoses = cv::Mat(rows, cols, type); 367 | 368 | for (int i = 0; i < m_cameraPoses.rows; ++i) 369 | { 370 | for (int j = 0; j < m_cameraPoses.cols; ++j) 371 | { 372 | readData(ifs, m_cameraPoses.at(i,j)); 373 | } 374 | } 375 | 376 | size_t nImagePointSets; 377 | readData(ifs, nImagePointSets); 378 | 379 | m_imagePoints.clear(); 380 | m_imagePoints.resize(nImagePointSets); 381 | for (size_t i = 0; i < m_imagePoints.size(); ++i) 382 | { 383 | size_t nImagePoints; 384 | readData(ifs, nImagePoints); 385 | m_imagePoints.at(i).resize(nImagePoints); 386 | 387 | for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) 388 | { 389 | cv::Point2f& ipt = m_imagePoints.at(i).at(j); 390 | readData(ifs, ipt.x); 391 | readData(ifs, ipt.y); 392 | } 393 | } 394 | 395 | size_t nScenePointSets; 396 | readData(ifs, nScenePointSets); 397 | 398 | m_scenePoints.clear(); 399 | m_scenePoints.resize(nScenePointSets); 400 | for (size_t i = 0; i < m_scenePoints.size(); ++i) 401 | { 402 | size_t nScenePoints; 403 | readData(ifs, nScenePoints); 404 | m_scenePoints.at(i).resize(nScenePoints); 405 | 406 | for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j) 407 | { 408 | cv::Point3f& spt = m_scenePoints.at(i).at(j); 409 | readData(ifs, spt.x); 410 | readData(ifs, spt.y); 411 | readData(ifs, spt.z); 412 | } 413 | } 414 | 415 | return true; 416 | } 417 | 418 | void 419 | CameraCalibration::setVerbose(bool verbose) 420 | { 421 | m_verbose = verbose; 422 | } 423 | 424 | bool 425 | CameraCalibration::calibrateHelper(CameraPtr& camera, 426 | std::vector& rvecs, std::vector& tvecs) const 427 | { 428 | rvecs.assign(m_scenePoints.size(), cv::Mat()); 429 | tvecs.assign(m_scenePoints.size(), cv::Mat()); 430 | 431 | // STEP 1: Estimate intrinsics 432 | camera->estimateIntrinsics(m_boardSize, m_scenePoints, m_imagePoints); 433 | 434 | // STEP 2: Estimate extrinsics 435 | for (size_t i = 0; i < m_scenePoints.size(); ++i) 436 | { 437 | camera->estimateExtrinsics(m_scenePoints.at(i), m_imagePoints.at(i), rvecs.at(i), tvecs.at(i)); 438 | } 439 | 440 | if (m_verbose) 441 | { 442 | std::cout << "[" << camera->cameraName() << "] " 443 | << "# INFO: " << "Initial reprojection error: " 444 | << std::fixed << std::setprecision(3) 445 | << camera->reprojectionError(m_scenePoints, m_imagePoints, rvecs, tvecs) 446 | << " pixels" << std::endl; 447 | } 448 | 449 | // STEP 3: optimization using ceres 450 | optimize(camera, rvecs, tvecs); 451 | 452 | if (m_verbose) 453 | { 454 | double err = camera->reprojectionError(m_scenePoints, m_imagePoints, rvecs, tvecs); 455 | std::cout << "[" << camera->cameraName() << "] " << "# INFO: Final reprojection error: " 456 | << err << " pixels" << std::endl; 457 | std::cout << "[" << camera->cameraName() << "] " << "# INFO: " 458 | << camera->parametersToString() << std::endl; 459 | } 460 | 461 | return true; 462 | } 463 | 464 | void 465 | CameraCalibration::optimize(CameraPtr& camera, 466 | std::vector& rvecs, std::vector& tvecs) const 467 | { 468 | // Use ceres to do optimization 469 | ceres::Problem problem; 470 | 471 | std::vector > transformVec(rvecs.size()); 472 | for (size_t i = 0; i < rvecs.size(); ++i) 473 | { 474 | Eigen::Vector3d rvec; 475 | cv::cv2eigen(rvecs.at(i), rvec); 476 | 477 | transformVec.at(i).rotation() = Eigen::AngleAxisd(rvec.norm(), rvec.normalized()); 478 | transformVec.at(i).translation() << tvecs[i].at(0), 479 | tvecs[i].at(1), 480 | tvecs[i].at(2); 481 | } 482 | 483 | std::vector intrinsicCameraParams; 484 | m_camera->writeParameters(intrinsicCameraParams); 485 | 486 | // create residuals for each observation 487 | for (size_t i = 0; i < m_imagePoints.size(); ++i) 488 | { 489 | for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) 490 | { 491 | const cv::Point3f& spt = m_scenePoints.at(i).at(j); 492 | const cv::Point2f& ipt = m_imagePoints.at(i).at(j); 493 | 494 | ceres::CostFunction* costFunction = 495 | CostFunctionFactory::instance()->generateCostFunction(camera, 496 | Eigen::Vector3d(spt.x, spt.y, spt.z), 497 | Eigen::Vector2d(ipt.x, ipt.y), 498 | CAMERA_INTRINSICS | CAMERA_POSE); 499 | 500 | ceres::LossFunction* lossFunction = new ceres::CauchyLoss(1.0); 501 | problem.AddResidualBlock(costFunction, lossFunction, 502 | intrinsicCameraParams.data(), 503 | transformVec.at(i).rotationData(), 504 | transformVec.at(i).translationData()); 505 | } 506 | 507 | ceres::LocalParameterization* quaternionParameterization = 508 | new EigenQuaternionParameterization; 509 | 510 | problem.SetParameterization(transformVec.at(i).rotationData(), 511 | quaternionParameterization); 512 | } 513 | 514 | std::cout << "begin ceres" << std::endl; 515 | ceres::Solver::Options options; 516 | options.max_num_iterations = 1000; 517 | options.num_threads = 1; 518 | 519 | if (m_verbose) 520 | { 521 | options.minimizer_progress_to_stdout = true; 522 | } 523 | 524 | ceres::Solver::Summary summary; 525 | ceres::Solve(options, &problem, &summary); 526 | std::cout << "end ceres" << std::endl; 527 | 528 | if (m_verbose) 529 | { 530 | std::cout << summary.FullReport() << std::endl; 531 | } 532 | 533 | camera->readParameters(intrinsicCameraParams); 534 | 535 | for (size_t i = 0; i < rvecs.size(); ++i) 536 | { 537 | Eigen::AngleAxisd aa(transformVec.at(i).rotation()); 538 | 539 | Eigen::Vector3d rvec = aa.angle() * aa.axis(); 540 | cv::eigen2cv(rvec, rvecs.at(i)); 541 | 542 | cv::Mat& tvec = tvecs.at(i); 543 | tvec.at(0) = transformVec.at(i).translation()(0); 544 | tvec.at(1) = transformVec.at(i).translation()(1); 545 | tvec.at(2) = transformVec.at(i).translation()(2); 546 | } 547 | } 548 | 549 | template 550 | void 551 | CameraCalibration::readData(std::ifstream& ifs, T& data) const 552 | { 553 | char* buffer = new char[sizeof(T)]; 554 | 555 | ifs.read(buffer, sizeof(T)); 556 | 557 | data = *(reinterpret_cast(buffer)); 558 | 559 | delete buffer; 560 | } 561 | 562 | template 563 | void 564 | CameraCalibration::writeData(std::ofstream& ofs, T data) const 565 | { 566 | char* pData = reinterpret_cast(&data); 567 | 568 | ofs.write(pData, sizeof(T)); 569 | } 570 | 571 | } 572 | -------------------------------------------------------------------------------- /src/camera_models/Camera.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/camera_models/Camera.h" 2 | #include "camodocal/camera_models/ScaramuzzaCamera.h" 3 | 4 | #include 5 | 6 | namespace camodocal 7 | { 8 | 9 | Camera::Parameters::Parameters(ModelType modelType) 10 | : m_modelType(modelType) 11 | , m_imageWidth(0) 12 | , m_imageHeight(0) 13 | { 14 | switch (modelType) 15 | { 16 | case KANNALA_BRANDT: 17 | m_nIntrinsics = 8; 18 | break; 19 | case PINHOLE: 20 | m_nIntrinsics = 8; 21 | break; 22 | case SCARAMUZZA: 23 | m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS; 24 | break; 25 | case MEI: 26 | default: 27 | m_nIntrinsics = 9; 28 | } 29 | } 30 | 31 | Camera::Parameters::Parameters(ModelType modelType, 32 | const std::string& cameraName, 33 | int w, int h) 34 | : m_modelType(modelType) 35 | , m_cameraName(cameraName) 36 | , m_imageWidth(w) 37 | , m_imageHeight(h) 38 | { 39 | switch (modelType) 40 | { 41 | case KANNALA_BRANDT: 42 | m_nIntrinsics = 8; 43 | break; 44 | case PINHOLE: 45 | m_nIntrinsics = 8; 46 | break; 47 | case SCARAMUZZA: 48 | m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS; 49 | break; 50 | case MEI: 51 | default: 52 | m_nIntrinsics = 9; 53 | } 54 | } 55 | 56 | Camera::ModelType& 57 | Camera::Parameters::modelType(void) 58 | { 59 | return m_modelType; 60 | } 61 | 62 | std::string& 63 | Camera::Parameters::cameraName(void) 64 | { 65 | return m_cameraName; 66 | } 67 | 68 | int& 69 | Camera::Parameters::imageWidth(void) 70 | { 71 | return m_imageWidth; 72 | } 73 | 74 | int& 75 | Camera::Parameters::imageHeight(void) 76 | { 77 | return m_imageHeight; 78 | } 79 | 80 | Camera::ModelType 81 | Camera::Parameters::modelType(void) const 82 | { 83 | return m_modelType; 84 | } 85 | 86 | const std::string& 87 | Camera::Parameters::cameraName(void) const 88 | { 89 | return m_cameraName; 90 | } 91 | 92 | int 93 | Camera::Parameters::imageWidth(void) const 94 | { 95 | return m_imageWidth; 96 | } 97 | 98 | int 99 | Camera::Parameters::imageHeight(void) const 100 | { 101 | return m_imageHeight; 102 | } 103 | 104 | int 105 | Camera::Parameters::nIntrinsics(void) const 106 | { 107 | return m_nIntrinsics; 108 | } 109 | 110 | cv::Mat& 111 | Camera::mask(void) 112 | { 113 | return m_mask; 114 | } 115 | 116 | const cv::Mat& 117 | Camera::mask(void) const 118 | { 119 | return m_mask; 120 | } 121 | 122 | void 123 | Camera::estimateExtrinsics(const std::vector& objectPoints, 124 | const std::vector& imagePoints, 125 | cv::Mat& rvec, cv::Mat& tvec) const 126 | { 127 | std::vector Ms(imagePoints.size()); 128 | for (size_t i = 0; i < Ms.size(); ++i) 129 | { 130 | Eigen::Vector3d P; 131 | liftProjective(Eigen::Vector2d(imagePoints.at(i).x, imagePoints.at(i).y), P); 132 | 133 | P /= P(2); 134 | 135 | Ms.at(i).x = P(0); 136 | Ms.at(i).y = P(1); 137 | } 138 | 139 | // assume unit focal length, zero principal point, and zero distortion 140 | cv::solvePnP(objectPoints, Ms, cv::Mat::eye(3, 3, CV_64F), cv::noArray(), rvec, tvec); 141 | } 142 | 143 | double 144 | Camera::reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const 145 | { 146 | Eigen::Vector2d p1, p2; 147 | 148 | spaceToPlane(P1, p1); 149 | spaceToPlane(P2, p2); 150 | 151 | return (p1 - p2).norm(); 152 | } 153 | 154 | double 155 | Camera::reprojectionError(const std::vector< std::vector >& objectPoints, 156 | const std::vector< std::vector >& imagePoints, 157 | const std::vector& rvecs, 158 | const std::vector& tvecs, 159 | cv::OutputArray _perViewErrors) const 160 | { 161 | int imageCount = objectPoints.size(); 162 | size_t pointsSoFar = 0; 163 | double totalErr = 0.0; 164 | 165 | bool computePerViewErrors = _perViewErrors.needed(); 166 | cv::Mat perViewErrors; 167 | if (computePerViewErrors) 168 | { 169 | _perViewErrors.create(imageCount, 1, CV_64F); 170 | perViewErrors = _perViewErrors.getMat(); 171 | } 172 | 173 | for (int i = 0; i < imageCount; ++i) 174 | { 175 | size_t pointCount = imagePoints.at(i).size(); 176 | 177 | pointsSoFar += pointCount; 178 | 179 | std::vector estImagePoints; 180 | projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i), 181 | estImagePoints); 182 | 183 | double err = 0.0; 184 | for (size_t j = 0; j < imagePoints.at(i).size(); ++j) 185 | { 186 | err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j)); 187 | } 188 | 189 | if (computePerViewErrors) 190 | { 191 | perViewErrors.at(i) = err / pointCount; 192 | } 193 | 194 | totalErr += err; 195 | } 196 | 197 | return totalErr / pointsSoFar; 198 | } 199 | 200 | double 201 | Camera::reprojectionError(const Eigen::Vector3d& P, 202 | const Eigen::Quaterniond& camera_q, 203 | const Eigen::Vector3d& camera_t, 204 | const Eigen::Vector2d& observed_p) const 205 | { 206 | Eigen::Vector3d P_cam = camera_q.toRotationMatrix() * P + camera_t; 207 | 208 | Eigen::Vector2d p; 209 | spaceToPlane(P_cam, p); 210 | 211 | return (p - observed_p).norm(); 212 | } 213 | 214 | void 215 | Camera::projectPoints(const std::vector& objectPoints, 216 | const cv::Mat& rvec, 217 | const cv::Mat& tvec, 218 | std::vector& imagePoints) const 219 | { 220 | // project 3D object points to the image plane 221 | imagePoints.reserve(objectPoints.size()); 222 | 223 | //double 224 | cv::Mat R0; 225 | cv::Rodrigues(rvec, R0); 226 | 227 | Eigen::MatrixXd R(3,3); 228 | R << R0.at(0,0), R0.at(0,1), R0.at(0,2), 229 | R0.at(1,0), R0.at(1,1), R0.at(1,2), 230 | R0.at(2,0), R0.at(2,1), R0.at(2,2); 231 | 232 | Eigen::Vector3d t; 233 | t << tvec.at(0), tvec.at(1), tvec.at(2); 234 | 235 | for (size_t i = 0; i < objectPoints.size(); ++i) 236 | { 237 | const cv::Point3f& objectPoint = objectPoints.at(i); 238 | 239 | // Rotate and translate 240 | Eigen::Vector3d P; 241 | P << objectPoint.x, objectPoint.y, objectPoint.z; 242 | 243 | P = R * P + t; 244 | 245 | Eigen::Vector2d p; 246 | spaceToPlane(P, p); 247 | 248 | imagePoints.push_back(cv::Point2f(p(0), p(1))); 249 | } 250 | } 251 | 252 | } 253 | -------------------------------------------------------------------------------- /src/camera_models/CameraFactory.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/camera_models/CameraFactory.h" 2 | 3 | #include 4 | 5 | 6 | #include "camodocal/camera_models/CataCamera.h" 7 | #include "camodocal/camera_models/EquidistantCamera.h" 8 | #include "camodocal/camera_models/PinholeCamera.h" 9 | #include "camodocal/camera_models/ScaramuzzaCamera.h" 10 | 11 | #include "ceres/ceres.h" 12 | 13 | namespace camodocal 14 | { 15 | 16 | boost::shared_ptr CameraFactory::m_instance; 17 | 18 | CameraFactory::CameraFactory() 19 | { 20 | 21 | } 22 | 23 | boost::shared_ptr 24 | CameraFactory::instance(void) 25 | { 26 | if (m_instance.get() == 0) 27 | { 28 | m_instance.reset(new CameraFactory); 29 | } 30 | 31 | return m_instance; 32 | } 33 | 34 | CameraPtr 35 | CameraFactory::generateCamera(Camera::ModelType modelType, 36 | const std::string& cameraName, 37 | cv::Size imageSize) const 38 | { 39 | switch (modelType) 40 | { 41 | case Camera::KANNALA_BRANDT: 42 | { 43 | EquidistantCameraPtr camera(new EquidistantCamera); 44 | 45 | EquidistantCamera::Parameters params = camera->getParameters(); 46 | params.cameraName() = cameraName; 47 | params.imageWidth() = imageSize.width; 48 | params.imageHeight() = imageSize.height; 49 | camera->setParameters(params); 50 | return camera; 51 | } 52 | case Camera::PINHOLE: 53 | { 54 | PinholeCameraPtr camera(new PinholeCamera); 55 | 56 | PinholeCamera::Parameters params = camera->getParameters(); 57 | params.cameraName() = cameraName; 58 | params.imageWidth() = imageSize.width; 59 | params.imageHeight() = imageSize.height; 60 | camera->setParameters(params); 61 | return camera; 62 | } 63 | case Camera::SCARAMUZZA: 64 | { 65 | OCAMCameraPtr camera(new OCAMCamera); 66 | 67 | OCAMCamera::Parameters params = camera->getParameters(); 68 | params.cameraName() = cameraName; 69 | params.imageWidth() = imageSize.width; 70 | params.imageHeight() = imageSize.height; 71 | camera->setParameters(params); 72 | return camera; 73 | } 74 | case Camera::MEI: 75 | default: 76 | { 77 | CataCameraPtr camera(new CataCamera); 78 | 79 | CataCamera::Parameters params = camera->getParameters(); 80 | params.cameraName() = cameraName; 81 | params.imageWidth() = imageSize.width; 82 | params.imageHeight() = imageSize.height; 83 | camera->setParameters(params); 84 | return camera; 85 | } 86 | } 87 | } 88 | 89 | CameraPtr 90 | CameraFactory::generateCameraFromYamlFile(const std::string& filename) 91 | { 92 | cv::FileStorage fs(filename, cv::FileStorage::READ); 93 | 94 | if (!fs.isOpened()) 95 | { 96 | return CameraPtr(); 97 | } 98 | 99 | Camera::ModelType modelType = Camera::MEI; 100 | if (!fs["model_type"].isNone()) 101 | { 102 | std::string sModelType; 103 | fs["model_type"] >> sModelType; 104 | 105 | if (boost::iequals(sModelType, "kannala_brandt")) 106 | { 107 | modelType = Camera::KANNALA_BRANDT; 108 | } 109 | else if (boost::iequals(sModelType, "mei")) 110 | { 111 | modelType = Camera::MEI; 112 | } 113 | else if (boost::iequals(sModelType, "scaramuzza")) 114 | { 115 | modelType = Camera::SCARAMUZZA; 116 | } 117 | else if (boost::iequals(sModelType, "pinhole")) 118 | { 119 | modelType = Camera::PINHOLE; 120 | } 121 | else 122 | { 123 | std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl; 124 | return CameraPtr(); 125 | } 126 | } 127 | 128 | switch (modelType) 129 | { 130 | case Camera::KANNALA_BRANDT: 131 | { 132 | EquidistantCameraPtr camera(new EquidistantCamera); 133 | 134 | EquidistantCamera::Parameters params = camera->getParameters(); 135 | params.readFromYamlFile(filename); 136 | camera->setParameters(params); 137 | return camera; 138 | } 139 | case Camera::PINHOLE: 140 | { 141 | PinholeCameraPtr camera(new PinholeCamera); 142 | 143 | PinholeCamera::Parameters params = camera->getParameters(); 144 | params.readFromYamlFile(filename); 145 | camera->setParameters(params); 146 | return camera; 147 | } 148 | case Camera::SCARAMUZZA: 149 | { 150 | OCAMCameraPtr camera(new OCAMCamera); 151 | 152 | OCAMCamera::Parameters params = camera->getParameters(); 153 | params.readFromYamlFile(filename); 154 | camera->setParameters(params); 155 | return camera; 156 | } 157 | case Camera::MEI: 158 | default: 159 | { 160 | CataCameraPtr camera(new CataCamera); 161 | 162 | CataCamera::Parameters params = camera->getParameters(); 163 | params.readFromYamlFile(filename); 164 | camera->setParameters(params); 165 | return camera; 166 | } 167 | } 168 | 169 | return CameraPtr(); 170 | } 171 | 172 | } 173 | 174 | -------------------------------------------------------------------------------- /src/camera_models/EquidistantCamera.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/camera_models/EquidistantCamera.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "camodocal/gpl/gpl.h" 13 | 14 | namespace camodocal 15 | { 16 | 17 | EquidistantCamera::Parameters::Parameters() 18 | : Camera::Parameters(KANNALA_BRANDT) 19 | , m_k2(0.0) 20 | , m_k3(0.0) 21 | , m_k4(0.0) 22 | , m_k5(0.0) 23 | , m_mu(0.0) 24 | , m_mv(0.0) 25 | , m_u0(0.0) 26 | , m_v0(0.0) 27 | { 28 | 29 | } 30 | 31 | EquidistantCamera::Parameters::Parameters(const std::string& cameraName, 32 | int w, int h, 33 | double k2, double k3, double k4, double k5, 34 | double mu, double mv, 35 | double u0, double v0) 36 | : Camera::Parameters(KANNALA_BRANDT, cameraName, w, h) 37 | , m_k2(k2) 38 | , m_k3(k3) 39 | , m_k4(k4) 40 | , m_k5(k5) 41 | , m_mu(mu) 42 | , m_mv(mv) 43 | , m_u0(u0) 44 | , m_v0(v0) 45 | { 46 | 47 | } 48 | 49 | double& 50 | EquidistantCamera::Parameters::k2(void) 51 | { 52 | return m_k2; 53 | } 54 | 55 | double& 56 | EquidistantCamera::Parameters::k3(void) 57 | { 58 | return m_k3; 59 | } 60 | 61 | double& 62 | EquidistantCamera::Parameters::k4(void) 63 | { 64 | return m_k4; 65 | } 66 | 67 | double& 68 | EquidistantCamera::Parameters::k5(void) 69 | { 70 | return m_k5; 71 | } 72 | 73 | double& 74 | EquidistantCamera::Parameters::mu(void) 75 | { 76 | return m_mu; 77 | } 78 | 79 | double& 80 | EquidistantCamera::Parameters::mv(void) 81 | { 82 | return m_mv; 83 | } 84 | 85 | double& 86 | EquidistantCamera::Parameters::u0(void) 87 | { 88 | return m_u0; 89 | } 90 | 91 | double& 92 | EquidistantCamera::Parameters::v0(void) 93 | { 94 | return m_v0; 95 | } 96 | 97 | double 98 | EquidistantCamera::Parameters::k2(void) const 99 | { 100 | return m_k2; 101 | } 102 | 103 | double 104 | EquidistantCamera::Parameters::k3(void) const 105 | { 106 | return m_k3; 107 | } 108 | 109 | double 110 | EquidistantCamera::Parameters::k4(void) const 111 | { 112 | return m_k4; 113 | } 114 | 115 | double 116 | EquidistantCamera::Parameters::k5(void) const 117 | { 118 | return m_k5; 119 | } 120 | 121 | double 122 | EquidistantCamera::Parameters::mu(void) const 123 | { 124 | return m_mu; 125 | } 126 | 127 | double 128 | EquidistantCamera::Parameters::mv(void) const 129 | { 130 | return m_mv; 131 | } 132 | 133 | double 134 | EquidistantCamera::Parameters::u0(void) const 135 | { 136 | return m_u0; 137 | } 138 | 139 | double 140 | EquidistantCamera::Parameters::v0(void) const 141 | { 142 | return m_v0; 143 | } 144 | 145 | bool 146 | EquidistantCamera::Parameters::readFromYamlFile(const std::string& filename) 147 | { 148 | cv::FileStorage fs(filename, cv::FileStorage::READ); 149 | 150 | if (!fs.isOpened()) 151 | { 152 | return false; 153 | } 154 | 155 | if (!fs["model_type"].isNone()) 156 | { 157 | std::string sModelType; 158 | fs["model_type"] >> sModelType; 159 | 160 | if (sModelType.compare("KANNALA_BRANDT") != 0) 161 | { 162 | return false; 163 | } 164 | } 165 | 166 | m_modelType = KANNALA_BRANDT; 167 | fs["camera_name"] >> m_cameraName; 168 | m_imageWidth = static_cast(fs["image_width"]); 169 | m_imageHeight = static_cast(fs["image_height"]); 170 | 171 | cv::FileNode n = fs["projection_parameters"]; 172 | m_k2 = static_cast(n["k2"]); 173 | m_k3 = static_cast(n["k3"]); 174 | m_k4 = static_cast(n["k4"]); 175 | m_k5 = static_cast(n["k5"]); 176 | m_mu = static_cast(n["mu"]); 177 | m_mv = static_cast(n["mv"]); 178 | m_u0 = static_cast(n["u0"]); 179 | m_v0 = static_cast(n["v0"]); 180 | 181 | return true; 182 | } 183 | 184 | void 185 | EquidistantCamera::Parameters::writeToYamlFile(const std::string& filename) const 186 | { 187 | cv::FileStorage fs(filename, cv::FileStorage::WRITE); 188 | 189 | fs << "model_type" << "KANNALA_BRANDT"; 190 | fs << "camera_name" << m_cameraName; 191 | fs << "image_width" << m_imageWidth; 192 | fs << "image_height" << m_imageHeight; 193 | 194 | // projection: k2, k3, k4, k5, mu, mv, u0, v0 195 | fs << "projection_parameters"; 196 | fs << "{" << "k2" << m_k2 197 | << "k3" << m_k3 198 | << "k4" << m_k4 199 | << "k5" << m_k5 200 | << "mu" << m_mu 201 | << "mv" << m_mv 202 | << "u0" << m_u0 203 | << "v0" << m_v0 << "}"; 204 | 205 | fs.release(); 206 | } 207 | 208 | EquidistantCamera::Parameters& 209 | EquidistantCamera::Parameters::operator=(const EquidistantCamera::Parameters& other) 210 | { 211 | if (this != &other) 212 | { 213 | m_modelType = other.m_modelType; 214 | m_cameraName = other.m_cameraName; 215 | m_imageWidth = other.m_imageWidth; 216 | m_imageHeight = other.m_imageHeight; 217 | m_k2 = other.m_k2; 218 | m_k3 = other.m_k3; 219 | m_k4 = other.m_k4; 220 | m_k5 = other.m_k5; 221 | m_mu = other.m_mu; 222 | m_mv = other.m_mv; 223 | m_u0 = other.m_u0; 224 | m_v0 = other.m_v0; 225 | } 226 | 227 | return *this; 228 | } 229 | 230 | std::ostream& 231 | operator<< (std::ostream& out, const EquidistantCamera::Parameters& params) 232 | { 233 | out << "Camera Parameters:" << std::endl; 234 | out << " model_type " << "KANNALA_BRANDT" << std::endl; 235 | out << " camera_name " << params.m_cameraName << std::endl; 236 | out << " image_width " << params.m_imageWidth << std::endl; 237 | out << " image_height " << params.m_imageHeight << std::endl; 238 | 239 | // projection: k2, k3, k4, k5, mu, mv, u0, v0 240 | out << "Projection Parameters" << std::endl; 241 | out << " k2 " << params.m_k2 << std::endl 242 | << " k3 " << params.m_k3 << std::endl 243 | << " k4 " << params.m_k4 << std::endl 244 | << " k5 " << params.m_k5 << std::endl 245 | << " mu " << params.m_mu << std::endl 246 | << " mv " << params.m_mv << std::endl 247 | << " u0 " << params.m_u0 << std::endl 248 | << " v0 " << params.m_v0 << std::endl; 249 | 250 | return out; 251 | } 252 | 253 | EquidistantCamera::EquidistantCamera() 254 | : m_inv_K11(1.0) 255 | , m_inv_K13(0.0) 256 | , m_inv_K22(1.0) 257 | , m_inv_K23(0.0) 258 | { 259 | 260 | } 261 | 262 | EquidistantCamera::EquidistantCamera(const std::string& cameraName, 263 | int imageWidth, int imageHeight, 264 | double k2, double k3, double k4, double k5, 265 | double mu, double mv, 266 | double u0, double v0) 267 | : mParameters(cameraName, imageWidth, imageHeight, 268 | k2, k3, k4, k5, mu, mv, u0, v0) 269 | { 270 | // Inverse camera projection matrix parameters 271 | m_inv_K11 = 1.0 / mParameters.mu(); 272 | m_inv_K13 = -mParameters.u0() / mParameters.mu(); 273 | m_inv_K22 = 1.0 / mParameters.mv(); 274 | m_inv_K23 = -mParameters.v0() / mParameters.mv(); 275 | } 276 | 277 | EquidistantCamera::EquidistantCamera(const EquidistantCamera::Parameters& params) 278 | : mParameters(params) 279 | { 280 | // Inverse camera projection matrix parameters 281 | m_inv_K11 = 1.0 / mParameters.mu(); 282 | m_inv_K13 = -mParameters.u0() / mParameters.mu(); 283 | m_inv_K22 = 1.0 / mParameters.mv(); 284 | m_inv_K23 = -mParameters.v0() / mParameters.mv(); 285 | } 286 | 287 | Camera::ModelType 288 | EquidistantCamera::modelType(void) const 289 | { 290 | return mParameters.modelType(); 291 | } 292 | 293 | const std::string& 294 | EquidistantCamera::cameraName(void) const 295 | { 296 | return mParameters.cameraName(); 297 | } 298 | 299 | int 300 | EquidistantCamera::imageWidth(void) const 301 | { 302 | return mParameters.imageWidth(); 303 | } 304 | 305 | int 306 | EquidistantCamera::imageHeight(void) const 307 | { 308 | return mParameters.imageHeight(); 309 | } 310 | 311 | void 312 | EquidistantCamera::estimateIntrinsics(const cv::Size& boardSize, 313 | const std::vector< std::vector >& objectPoints, 314 | const std::vector< std::vector >& imagePoints) 315 | { 316 | Parameters params = getParameters(); 317 | 318 | double u0 = params.imageWidth() / 2.0; 319 | double v0 = params.imageHeight() / 2.0; 320 | 321 | double minReprojErr = std::numeric_limits::max(); 322 | 323 | std::vector rvecs, tvecs; 324 | rvecs.assign(objectPoints.size(), cv::Mat()); 325 | tvecs.assign(objectPoints.size(), cv::Mat()); 326 | 327 | params.k2() = 0.0; 328 | params.k3() = 0.0; 329 | params.k4() = 0.0; 330 | params.k5() = 0.0; 331 | params.u0() = u0; 332 | params.v0() = v0; 333 | 334 | // Initialize focal length 335 | // C. Hughes, P. Denny, M. Glavin, and E. Jones, 336 | // Equidistant Fish-Eye Calibration and Rectification by Vanishing Point 337 | // Extraction, PAMI 2010 338 | // Find circles from rows of chessboard corners, and for each pair 339 | // of circles, find vanishing points: v1 and v2. 340 | // f = ||v1 - v2|| / PI; 341 | double f0 = 0.0; 342 | for (size_t i = 0; i < imagePoints.size(); ++i) 343 | { 344 | std::vector center(boardSize.height); 345 | double radius[boardSize.height]; 346 | for (int r = 0; r < boardSize.height; ++r) 347 | { 348 | std::vector circle; 349 | for (int c = 0; c < boardSize.width; ++c) 350 | { 351 | circle.push_back(imagePoints.at(i).at(r * boardSize.width + c)); 352 | } 353 | 354 | fitCircle(circle, center[r](0), center[r](1), radius[r]); 355 | } 356 | 357 | for (int j = 0; j < boardSize.height; ++j) 358 | { 359 | for (int k = j + 1; k < boardSize.height; ++k) 360 | { 361 | // find distance between pair of vanishing points which 362 | // correspond to intersection points of 2 circles 363 | std::vector ipts; 364 | ipts = intersectCircles(center[j](0), center[j](1), radius[j], 365 | center[k](0), center[k](1), radius[k]); 366 | 367 | if (ipts.size() < 2) 368 | { 369 | continue; 370 | } 371 | 372 | double f = cv::norm(ipts.at(0) - ipts.at(1)) / M_PI; 373 | 374 | params.mu() = f; 375 | params.mv() = f; 376 | 377 | setParameters(params); 378 | 379 | for (size_t l = 0; l < objectPoints.size(); ++l) 380 | { 381 | estimateExtrinsics(objectPoints.at(l), imagePoints.at(l), rvecs.at(l), tvecs.at(l)); 382 | } 383 | 384 | double reprojErr = reprojectionError(objectPoints, imagePoints, rvecs, tvecs, cv::noArray()); 385 | 386 | if (reprojErr < minReprojErr) 387 | { 388 | minReprojErr = reprojErr; 389 | f0 = f; 390 | } 391 | } 392 | } 393 | } 394 | 395 | if (f0 <= 0.0 && minReprojErr >= std::numeric_limits::max()) 396 | { 397 | std::cout << "[" << params.cameraName() << "] " 398 | << "# INFO: kannala-Brandt model fails with given data. " << std::endl; 399 | 400 | return; 401 | } 402 | 403 | params.mu() = f0; 404 | params.mv() = f0; 405 | 406 | setParameters(params); 407 | } 408 | 409 | /** 410 | * \brief Lifts a point from the image plane to the unit sphere 411 | * 412 | * \param p image coordinates 413 | * \param P coordinates of the point on the sphere 414 | */ 415 | void 416 | EquidistantCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const 417 | { 418 | liftProjective(p, P); 419 | } 420 | 421 | /** 422 | * \brief Lifts a point from the image plane to its projective ray 423 | * 424 | * \param p image coordinates 425 | * \param P coordinates of the projective ray 426 | */ 427 | void 428 | EquidistantCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const 429 | { 430 | // Lift points to normalised plane 431 | Eigen::Vector2d p_u; 432 | p_u << m_inv_K11 * p(0) + m_inv_K13, 433 | m_inv_K22 * p(1) + m_inv_K23; 434 | 435 | // Obtain a projective ray 436 | double theta, phi; 437 | backprojectSymmetric(p_u, theta, phi); 438 | 439 | P(0) = sin(theta) * cos(phi); 440 | P(1) = sin(theta) * sin(phi); 441 | P(2) = cos(theta); 442 | } 443 | 444 | /** 445 | * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v) 446 | * 447 | * \param P 3D point coordinates 448 | * \param p return value, contains the image point coordinates 449 | */ 450 | void 451 | EquidistantCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const 452 | { 453 | double theta = acos(P(2) / P.norm()); 454 | double phi = atan2(P(1), P(0)); 455 | 456 | Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(), mParameters.k5(), theta) * Eigen::Vector2d(cos(phi), sin(phi)); 457 | 458 | // Apply generalised projection matrix 459 | p << mParameters.mu() * p_u(0) + mParameters.u0(), 460 | mParameters.mv() * p_u(1) + mParameters.v0(); 461 | } 462 | 463 | 464 | /** 465 | * \brief Project a 3D point to the image plane and calculate Jacobian 466 | * 467 | * \param P 3D point coordinates 468 | * \param p return value, contains the image point coordinates 469 | */ 470 | void 471 | EquidistantCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 472 | Eigen::Matrix& J) const 473 | { 474 | double theta = acos(P(2) / P.norm()); 475 | double phi = atan2(P(1), P(0)); 476 | 477 | Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(), mParameters.k5(), theta) * Eigen::Vector2d(cos(phi), sin(phi)); 478 | 479 | // Apply generalised projection matrix 480 | p << mParameters.mu() * p_u(0) + mParameters.u0(), 481 | mParameters.mv() * p_u(1) + mParameters.v0(); 482 | } 483 | 484 | /** 485 | * \brief Projects an undistorted 2D point p_u to the image plane 486 | * 487 | * \param p_u 2D point coordinates 488 | * \return image point coordinates 489 | */ 490 | void 491 | EquidistantCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const 492 | { 493 | // Eigen::Vector2d p_d; 494 | // 495 | // if (m_noDistortion) 496 | // { 497 | // p_d = p_u; 498 | // } 499 | // else 500 | // { 501 | // // Apply distortion 502 | // Eigen::Vector2d d_u; 503 | // distortion(p_u, d_u); 504 | // p_d = p_u + d_u; 505 | // } 506 | // 507 | // // Apply generalised projection matrix 508 | // p << mParameters.gamma1() * p_d(0) + mParameters.u0(), 509 | // mParameters.gamma2() * p_d(1) + mParameters.v0(); 510 | } 511 | 512 | void 513 | EquidistantCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const 514 | { 515 | cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight()); 516 | 517 | cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F); 518 | cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F); 519 | 520 | for (int v = 0; v < imageSize.height; ++v) 521 | { 522 | for (int u = 0; u < imageSize.width; ++u) 523 | { 524 | double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale; 525 | double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale; 526 | 527 | double theta, phi; 528 | backprojectSymmetric(Eigen::Vector2d(mx_u, my_u), theta, phi); 529 | 530 | Eigen::Vector3d P; 531 | P << sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta); 532 | 533 | Eigen::Vector2d p; 534 | spaceToPlane(P, p); 535 | 536 | mapX.at(v,u) = p(0); 537 | mapY.at(v,u) = p(1); 538 | } 539 | } 540 | 541 | cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); 542 | } 543 | 544 | cv::Mat 545 | EquidistantCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 546 | float fx, float fy, 547 | cv::Size imageSize, 548 | float cx, float cy, 549 | cv::Mat rmat) const 550 | { 551 | if (imageSize == cv::Size(0, 0)) 552 | { 553 | imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight()); 554 | } 555 | 556 | cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); 557 | cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); 558 | 559 | Eigen::Matrix3f K_rect; 560 | 561 | if (cx == -1.0f && cy == -1.0f) 562 | { 563 | K_rect << fx, 0, imageSize.width / 2, 564 | 0, fy, imageSize.height / 2, 565 | 0, 0, 1; 566 | } 567 | else 568 | { 569 | K_rect << fx, 0, cx, 570 | 0, fy, cy, 571 | 0, 0, 1; 572 | } 573 | 574 | if (fx == -1.0f || fy == -1.0f) 575 | { 576 | K_rect(0,0) = mParameters.mu(); 577 | K_rect(1,1) = mParameters.mv(); 578 | } 579 | 580 | Eigen::Matrix3f K_rect_inv = K_rect.inverse(); 581 | 582 | Eigen::Matrix3f R, R_inv; 583 | cv::cv2eigen(rmat, R); 584 | R_inv = R.inverse(); 585 | 586 | for (int v = 0; v < imageSize.height; ++v) 587 | { 588 | for (int u = 0; u < imageSize.width; ++u) 589 | { 590 | Eigen::Vector3f xo; 591 | xo << u, v, 1; 592 | 593 | Eigen::Vector3f uo = R_inv * K_rect_inv * xo; 594 | 595 | Eigen::Vector2d p; 596 | spaceToPlane(uo.cast(), p); 597 | 598 | mapX.at(v,u) = p(0); 599 | mapY.at(v,u) = p(1); 600 | } 601 | } 602 | 603 | cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); 604 | 605 | cv::Mat K_rect_cv; 606 | cv::eigen2cv(K_rect, K_rect_cv); 607 | return K_rect_cv; 608 | } 609 | 610 | int 611 | EquidistantCamera::parameterCount(void) const 612 | { 613 | return 8; 614 | } 615 | 616 | const EquidistantCamera::Parameters& 617 | EquidistantCamera::getParameters(void) const 618 | { 619 | return mParameters; 620 | } 621 | 622 | void 623 | EquidistantCamera::setParameters(const EquidistantCamera::Parameters& parameters) 624 | { 625 | mParameters = parameters; 626 | 627 | // Inverse camera projection matrix parameters 628 | m_inv_K11 = 1.0 / mParameters.mu(); 629 | m_inv_K13 = -mParameters.u0() / mParameters.mu(); 630 | m_inv_K22 = 1.0 / mParameters.mv(); 631 | m_inv_K23 = -mParameters.v0() / mParameters.mv(); 632 | } 633 | 634 | void 635 | EquidistantCamera::readParameters(const std::vector& parameterVec) 636 | { 637 | if (parameterVec.size() != parameterCount()) 638 | { 639 | return; 640 | } 641 | 642 | Parameters params = getParameters(); 643 | 644 | params.k2() = parameterVec.at(0); 645 | params.k3() = parameterVec.at(1); 646 | params.k4() = parameterVec.at(2); 647 | params.k5() = parameterVec.at(3); 648 | params.mu() = parameterVec.at(4); 649 | params.mv() = parameterVec.at(5); 650 | params.u0() = parameterVec.at(6); 651 | params.v0() = parameterVec.at(7); 652 | 653 | setParameters(params); 654 | } 655 | 656 | void 657 | EquidistantCamera::writeParameters(std::vector& parameterVec) const 658 | { 659 | parameterVec.resize(parameterCount()); 660 | parameterVec.at(0) = mParameters.k2(); 661 | parameterVec.at(1) = mParameters.k3(); 662 | parameterVec.at(2) = mParameters.k4(); 663 | parameterVec.at(3) = mParameters.k5(); 664 | parameterVec.at(4) = mParameters.mu(); 665 | parameterVec.at(5) = mParameters.mv(); 666 | parameterVec.at(6) = mParameters.u0(); 667 | parameterVec.at(7) = mParameters.v0(); 668 | } 669 | 670 | void 671 | EquidistantCamera::writeParametersToYamlFile(const std::string& filename) const 672 | { 673 | mParameters.writeToYamlFile(filename); 674 | } 675 | 676 | std::string 677 | EquidistantCamera::parametersToString(void) const 678 | { 679 | std::ostringstream oss; 680 | oss << mParameters; 681 | 682 | return oss.str(); 683 | } 684 | 685 | void 686 | EquidistantCamera::fitOddPoly(const std::vector& x, const std::vector& y, 687 | int n, std::vector& coeffs) const 688 | { 689 | std::vector pows; 690 | for (int i = 1; i <= n; i += 2) 691 | { 692 | pows.push_back(i); 693 | } 694 | 695 | Eigen::MatrixXd X(x.size(), pows.size()); 696 | Eigen::MatrixXd Y(y.size(), 1); 697 | for (size_t i = 0; i < x.size(); ++i) 698 | { 699 | for (size_t j = 0; j < pows.size(); ++j) 700 | { 701 | X(i,j) = pow(x.at(i), pows.at(j)); 702 | } 703 | Y(i,0) = y.at(i); 704 | } 705 | 706 | Eigen::MatrixXd A = (X.transpose() * X).inverse() * X.transpose() * Y; 707 | 708 | coeffs.resize(A.rows()); 709 | for (int i = 0; i < A.rows(); ++i) 710 | { 711 | coeffs.at(i) = A(i,0); 712 | } 713 | } 714 | 715 | void 716 | EquidistantCamera::backprojectSymmetric(const Eigen::Vector2d& p_u, 717 | double& theta, double& phi) const 718 | { 719 | double tol = 1e-10; 720 | double p_u_norm = p_u.norm(); 721 | 722 | if (p_u_norm < 1e-10) 723 | { 724 | phi = 0.0; 725 | } 726 | else 727 | { 728 | phi = atan2(p_u(1), p_u(0)); 729 | } 730 | 731 | int npow = 9; 732 | if (mParameters.k5() == 0.0) 733 | { 734 | npow -= 2; 735 | } 736 | if (mParameters.k4() == 0.0) 737 | { 738 | npow -= 2; 739 | } 740 | if (mParameters.k3() == 0.0) 741 | { 742 | npow -= 2; 743 | } 744 | if (mParameters.k2() == 0.0) 745 | { 746 | npow -= 2; 747 | } 748 | 749 | Eigen::MatrixXd coeffs(npow + 1, 1); 750 | coeffs.setZero(); 751 | coeffs(0) = -p_u_norm; 752 | coeffs(1) = 1.0; 753 | 754 | if (npow >= 3) 755 | { 756 | coeffs(3) = mParameters.k2(); 757 | } 758 | if (npow >= 5) 759 | { 760 | coeffs(5) = mParameters.k3(); 761 | } 762 | if (npow >= 7) 763 | { 764 | coeffs(7) = mParameters.k4(); 765 | } 766 | if (npow >= 9) 767 | { 768 | coeffs(9) = mParameters.k5(); 769 | } 770 | 771 | if (npow == 1) 772 | { 773 | theta = p_u_norm; 774 | } 775 | else 776 | { 777 | // Get eigenvalues of companion matrix corresponding to polynomial. 778 | // Eigenvalues correspond to roots of polynomial. 779 | Eigen::MatrixXd A(npow, npow); 780 | A.setZero(); 781 | A.block(1, 0, npow - 1, npow - 1).setIdentity(); 782 | A.col(npow - 1) = - coeffs.block(0, 0, npow, 1) / coeffs(npow); 783 | 784 | Eigen::EigenSolver es(A); 785 | Eigen::MatrixXcd eigval = es.eigenvalues(); 786 | 787 | std::vector thetas; 788 | for (int i = 0; i < eigval.rows(); ++i) 789 | { 790 | if (fabs(eigval(i).imag()) > tol) 791 | { 792 | continue; 793 | } 794 | 795 | double t = eigval(i).real(); 796 | 797 | if (t < -tol) 798 | { 799 | continue; 800 | } 801 | else if (t < 0.0) 802 | { 803 | t = 0.0; 804 | } 805 | 806 | thetas.push_back(t); 807 | } 808 | 809 | if (thetas.empty()) 810 | { 811 | theta = p_u_norm; 812 | } 813 | else 814 | { 815 | theta = *std::min_element(thetas.begin(), thetas.end()); 816 | } 817 | } 818 | } 819 | 820 | } 821 | -------------------------------------------------------------------------------- /src/camera_models/PinholeCamera.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/camera_models/PinholeCamera.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "camodocal/gpl/gpl.h" 12 | 13 | namespace camodocal 14 | { 15 | 16 | PinholeCamera::Parameters::Parameters() 17 | : Camera::Parameters(PINHOLE) 18 | , m_k1(0.0) 19 | , m_k2(0.0) 20 | , m_p1(0.0) 21 | , m_p2(0.0) 22 | , m_fx(0.0) 23 | , m_fy(0.0) 24 | , m_cx(0.0) 25 | , m_cy(0.0) 26 | { 27 | 28 | } 29 | 30 | PinholeCamera::Parameters::Parameters(const std::string& cameraName, 31 | int w, int h, 32 | double k1, double k2, 33 | double p1, double p2, 34 | double fx, double fy, 35 | double cx, double cy) 36 | : Camera::Parameters(PINHOLE, cameraName, w, h) 37 | , m_k1(k1) 38 | , m_k2(k2) 39 | , m_p1(p1) 40 | , m_p2(p2) 41 | , m_fx(fx) 42 | , m_fy(fy) 43 | , m_cx(cx) 44 | , m_cy(cy) 45 | { 46 | } 47 | 48 | double& 49 | PinholeCamera::Parameters::k1(void) 50 | { 51 | return m_k1; 52 | } 53 | 54 | double& 55 | PinholeCamera::Parameters::k2(void) 56 | { 57 | return m_k2; 58 | } 59 | 60 | double& 61 | PinholeCamera::Parameters::p1(void) 62 | { 63 | return m_p1; 64 | } 65 | 66 | double& 67 | PinholeCamera::Parameters::p2(void) 68 | { 69 | return m_p2; 70 | } 71 | 72 | double& 73 | PinholeCamera::Parameters::fx(void) 74 | { 75 | return m_fx; 76 | } 77 | 78 | double& 79 | PinholeCamera::Parameters::fy(void) 80 | { 81 | return m_fy; 82 | } 83 | 84 | double& 85 | PinholeCamera::Parameters::cx(void) 86 | { 87 | return m_cx; 88 | } 89 | 90 | double& 91 | PinholeCamera::Parameters::cy(void) 92 | { 93 | return m_cy; 94 | } 95 | 96 | double 97 | PinholeCamera::Parameters::k1(void) const 98 | { 99 | return m_k1; 100 | } 101 | 102 | double 103 | PinholeCamera::Parameters::k2(void) const 104 | { 105 | return m_k2; 106 | } 107 | 108 | double 109 | PinholeCamera::Parameters::p1(void) const 110 | { 111 | return m_p1; 112 | } 113 | 114 | double 115 | PinholeCamera::Parameters::p2(void) const 116 | { 117 | return m_p2; 118 | } 119 | 120 | double 121 | PinholeCamera::Parameters::fx(void) const 122 | { 123 | return m_fx; 124 | } 125 | 126 | double 127 | PinholeCamera::Parameters::fy(void) const 128 | { 129 | return m_fy; 130 | } 131 | 132 | double 133 | PinholeCamera::Parameters::cx(void) const 134 | { 135 | return m_cx; 136 | } 137 | 138 | double 139 | PinholeCamera::Parameters::cy(void) const 140 | { 141 | return m_cy; 142 | } 143 | 144 | bool 145 | PinholeCamera::Parameters::readFromYamlFile(const std::string& filename) 146 | { 147 | cv::FileStorage fs(filename, cv::FileStorage::READ); 148 | 149 | if (!fs.isOpened()) 150 | { 151 | return false; 152 | } 153 | 154 | if (!fs["model_type"].isNone()) 155 | { 156 | std::string sModelType; 157 | fs["model_type"] >> sModelType; 158 | 159 | if (sModelType.compare("PINHOLE") != 0) 160 | { 161 | return false; 162 | } 163 | } 164 | 165 | m_modelType = PINHOLE; 166 | fs["camera_name"] >> m_cameraName; 167 | m_imageWidth = static_cast(fs["image_width"]); 168 | m_imageHeight = static_cast(fs["image_height"]); 169 | 170 | cv::FileNode n = fs["distortion_parameters"]; 171 | m_k1 = static_cast(n["k1"]); 172 | m_k2 = static_cast(n["k2"]); 173 | m_p1 = static_cast(n["p1"]); 174 | m_p2 = static_cast(n["p2"]); 175 | 176 | n = fs["projection_parameters"]; 177 | m_fx = static_cast(n["fx"]); 178 | m_fy = static_cast(n["fy"]); 179 | m_cx = static_cast(n["cx"]); 180 | m_cy = static_cast(n["cy"]); 181 | 182 | return true; 183 | } 184 | 185 | void 186 | PinholeCamera::Parameters::writeToYamlFile(const std::string& filename) const 187 | { 188 | cv::FileStorage fs(filename, cv::FileStorage::WRITE); 189 | 190 | fs << "model_type" << "PINHOLE"; 191 | fs << "camera_name" << m_cameraName; 192 | fs << "image_width" << m_imageWidth; 193 | fs << "image_height" << m_imageHeight; 194 | 195 | // radial distortion: k1, k2 196 | // tangential distortion: p1, p2 197 | fs << "distortion_parameters"; 198 | fs << "{" << "k1" << m_k1 199 | << "k2" << m_k2 200 | << "p1" << m_p1 201 | << "p2" << m_p2 << "}"; 202 | 203 | // projection: fx, fy, cx, cy 204 | fs << "projection_parameters"; 205 | fs << "{" << "fx" << m_fx 206 | << "fy" << m_fy 207 | << "cx" << m_cx 208 | << "cy" << m_cy << "}"; 209 | 210 | fs.release(); 211 | } 212 | 213 | PinholeCamera::Parameters& 214 | PinholeCamera::Parameters::operator=(const PinholeCamera::Parameters& other) 215 | { 216 | if (this != &other) 217 | { 218 | m_modelType = other.m_modelType; 219 | m_cameraName = other.m_cameraName; 220 | m_imageWidth = other.m_imageWidth; 221 | m_imageHeight = other.m_imageHeight; 222 | m_k1 = other.m_k1; 223 | m_k2 = other.m_k2; 224 | m_p1 = other.m_p1; 225 | m_p2 = other.m_p2; 226 | m_fx = other.m_fx; 227 | m_fy = other.m_fy; 228 | m_cx = other.m_cx; 229 | m_cy = other.m_cy; 230 | } 231 | 232 | return *this; 233 | } 234 | 235 | std::ostream& 236 | operator<< (std::ostream& out, const PinholeCamera::Parameters& params) 237 | { 238 | out << "Camera Parameters:" << std::endl; 239 | out << " model_type " << "PINHOLE" << std::endl; 240 | out << " camera_name " << params.m_cameraName << std::endl; 241 | out << " image_width " << params.m_imageWidth << std::endl; 242 | out << " image_height " << params.m_imageHeight << std::endl; 243 | 244 | // radial distortion: k1, k2 245 | // tangential distortion: p1, p2 246 | out << "Distortion Parameters" << std::endl; 247 | out << " k1 " << params.m_k1 << std::endl 248 | << " k2 " << params.m_k2 << std::endl 249 | << " p1 " << params.m_p1 << std::endl 250 | << " p2 " << params.m_p2 << std::endl; 251 | 252 | // projection: fx, fy, cx, cy 253 | out << "Projection Parameters" << std::endl; 254 | out << " fx " << params.m_fx << std::endl 255 | << " fy " << params.m_fy << std::endl 256 | << " cx " << params.m_cx << std::endl 257 | << " cy " << params.m_cy << std::endl; 258 | 259 | return out; 260 | } 261 | 262 | PinholeCamera::PinholeCamera() 263 | : m_inv_K11(1.0) 264 | , m_inv_K13(0.0) 265 | , m_inv_K22(1.0) 266 | , m_inv_K23(0.0) 267 | , m_noDistortion(true) 268 | { 269 | 270 | } 271 | 272 | PinholeCamera::PinholeCamera(const std::string& cameraName, 273 | int imageWidth, int imageHeight, 274 | double k1, double k2, double p1, double p2, 275 | double fx, double fy, double cx, double cy) 276 | : mParameters(cameraName, imageWidth, imageHeight, 277 | k1, k2, p1, p2, fx, fy, cx, cy) 278 | { 279 | if ((mParameters.k1() == 0.0) && 280 | (mParameters.k2() == 0.0) && 281 | (mParameters.p1() == 0.0) && 282 | (mParameters.p2() == 0.0)) 283 | { 284 | m_noDistortion = true; 285 | } 286 | else 287 | { 288 | m_noDistortion = false; 289 | } 290 | 291 | // Inverse camera projection matrix parameters 292 | m_inv_K11 = 1.0 / mParameters.fx(); 293 | m_inv_K13 = -mParameters.cx() / mParameters.fx(); 294 | m_inv_K22 = 1.0 / mParameters.fy(); 295 | m_inv_K23 = -mParameters.cy() / mParameters.fy(); 296 | } 297 | 298 | PinholeCamera::PinholeCamera(const PinholeCamera::Parameters& params) 299 | : mParameters(params) 300 | { 301 | if ((mParameters.k1() == 0.0) && 302 | (mParameters.k2() == 0.0) && 303 | (mParameters.p1() == 0.0) && 304 | (mParameters.p2() == 0.0)) 305 | { 306 | m_noDistortion = true; 307 | } 308 | else 309 | { 310 | m_noDistortion = false; 311 | } 312 | 313 | // Inverse camera projection matrix parameters 314 | m_inv_K11 = 1.0 / mParameters.fx(); 315 | m_inv_K13 = -mParameters.cx() / mParameters.fx(); 316 | m_inv_K22 = 1.0 / mParameters.fy(); 317 | m_inv_K23 = -mParameters.cy() / mParameters.fy(); 318 | } 319 | 320 | Camera::ModelType 321 | PinholeCamera::modelType(void) const 322 | { 323 | return mParameters.modelType(); 324 | } 325 | 326 | const std::string& 327 | PinholeCamera::cameraName(void) const 328 | { 329 | return mParameters.cameraName(); 330 | } 331 | 332 | int 333 | PinholeCamera::imageWidth(void) const 334 | { 335 | return mParameters.imageWidth(); 336 | } 337 | 338 | int 339 | PinholeCamera::imageHeight(void) const 340 | { 341 | return mParameters.imageHeight(); 342 | } 343 | 344 | void 345 | PinholeCamera::estimateIntrinsics(const cv::Size& boardSize, 346 | const std::vector< std::vector >& objectPoints, 347 | const std::vector< std::vector >& imagePoints) 348 | { 349 | // Z. Zhang, A Flexible New Technique for Camera Calibration, PAMI 2000 350 | 351 | Parameters params = getParameters(); 352 | 353 | params.k1() = 0.0; 354 | params.k2() = 0.0; 355 | params.p1() = 0.0; 356 | params.p2() = 0.0; 357 | 358 | double cx = params.imageWidth() / 2.0; 359 | double cy = params.imageHeight() / 2.0; 360 | params.cx() = cx; 361 | params.cy() = cy; 362 | 363 | size_t nImages = imagePoints.size(); 364 | 365 | cv::Mat A(nImages * 2, 2, CV_64F); 366 | cv::Mat b(nImages * 2, 1, CV_64F); 367 | 368 | for (size_t i = 0; i < nImages; ++i) 369 | { 370 | const std::vector& oPoints = objectPoints.at(i); 371 | 372 | std::vector M(oPoints.size()); 373 | for (size_t j = 0; j < M.size(); ++j) 374 | { 375 | M.at(j) = cv::Point2f(oPoints.at(j).x, oPoints.at(j).y); 376 | } 377 | 378 | cv::Mat H = cv::findHomography(M, imagePoints.at(i)); 379 | 380 | H.at(0,0) -= H.at(2,0) * cx; 381 | H.at(0,1) -= H.at(2,1) * cx; 382 | H.at(0,2) -= H.at(2,2) * cx; 383 | H.at(1,0) -= H.at(2,0) * cy; 384 | H.at(1,1) -= H.at(2,1) * cy; 385 | H.at(1,2) -= H.at(2,2) * cy; 386 | 387 | double h[3], v[3], d1[3], d2[3]; 388 | double n[4] = {0,0,0,0}; 389 | 390 | for (int j = 0; j < 3; ++j) 391 | { 392 | double t0 = H.at(j,0); 393 | double t1 = H.at(j,1); 394 | h[j] = t0; v[j] = t1; 395 | d1[j] = (t0 + t1) * 0.5; 396 | d2[j] = (t0 - t1) * 0.5; 397 | n[0] += t0 * t0; n[1] += t1 * t1; 398 | n[2] += d1[j] * d1[j]; n[3] += d2[j] * d2[j]; 399 | } 400 | 401 | for (int j = 0; j < 4; ++j) 402 | { 403 | n[j] = 1.0 / sqrt(n[j]); 404 | } 405 | 406 | for (int j = 0; j < 3; ++j) 407 | { 408 | h[j] *= n[0]; v[j] *= n[1]; 409 | d1[j] *= n[2]; d2[j] *= n[3]; 410 | } 411 | 412 | A.at(i * 2, 0) = h[0] * v[0]; 413 | A.at(i * 2, 1) = h[1] * v[1]; 414 | A.at(i * 2 + 1, 0) = d1[0] * d2[0]; 415 | A.at(i * 2 + 1, 1) = d1[1] * d2[1]; 416 | b.at(i * 2, 0) = -h[2] * v[2]; 417 | b.at(i * 2 + 1, 0) = -d1[2] * d2[2]; 418 | } 419 | 420 | cv::Mat f(2, 1, CV_64F); 421 | cv::solve(A, b, f, cv::DECOMP_NORMAL | cv::DECOMP_LU); 422 | 423 | params.fx() = sqrt(fabs(1.0 / f.at(0))); 424 | params.fy() = sqrt(fabs(1.0 / f.at(1))); 425 | 426 | setParameters(params); 427 | } 428 | 429 | /** 430 | * \brief Lifts a point from the image plane to the unit sphere 431 | * 432 | * \param p image coordinates 433 | * \param P coordinates of the point on the sphere 434 | */ 435 | void 436 | PinholeCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const 437 | { 438 | liftProjective(p, P); 439 | 440 | P.normalize(); 441 | } 442 | 443 | /** 444 | * \brief Lifts a point from the image plane to its projective ray 445 | * 446 | * \param p image coordinates 447 | * \param P coordinates of the projective ray 448 | */ 449 | void 450 | PinholeCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const 451 | { 452 | double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u; 453 | double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d; 454 | //double lambda; 455 | 456 | // Lift points to normalised plane 457 | mx_d = m_inv_K11 * p(0) + m_inv_K13; 458 | my_d = m_inv_K22 * p(1) + m_inv_K23; 459 | 460 | if (m_noDistortion) 461 | { 462 | mx_u = mx_d; 463 | my_u = my_d; 464 | } 465 | else 466 | { 467 | if (0) 468 | { 469 | double k1 = mParameters.k1(); 470 | double k2 = mParameters.k2(); 471 | double p1 = mParameters.p1(); 472 | double p2 = mParameters.p2(); 473 | 474 | // Apply inverse distortion model 475 | // proposed by Heikkila 476 | mx2_d = mx_d*mx_d; 477 | my2_d = my_d*my_d; 478 | mxy_d = mx_d*my_d; 479 | rho2_d = mx2_d+my2_d; 480 | rho4_d = rho2_d*rho2_d; 481 | radDist_d = k1*rho2_d+k2*rho4_d; 482 | Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d; 483 | Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d; 484 | inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d); 485 | 486 | mx_u = mx_d - inv_denom_d*Dx_d; 487 | my_u = my_d - inv_denom_d*Dy_d; 488 | } 489 | else 490 | { 491 | // Recursive distortion model 492 | int n = 8; 493 | Eigen::Vector2d d_u; 494 | distortion(Eigen::Vector2d(mx_d, my_d), d_u); 495 | // Approximate value 496 | mx_u = mx_d - d_u(0); 497 | my_u = my_d - d_u(1); 498 | 499 | for (int i = 1; i < n; ++i) 500 | { 501 | distortion(Eigen::Vector2d(mx_u, my_u), d_u); 502 | mx_u = mx_d - d_u(0); 503 | my_u = my_d - d_u(1); 504 | } 505 | } 506 | } 507 | 508 | // Obtain a projective ray 509 | P << mx_u, my_u, 1.0; 510 | } 511 | 512 | 513 | /** 514 | * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v) 515 | * 516 | * \param P 3D point coordinates 517 | * \param p return value, contains the image point coordinates 518 | */ 519 | void 520 | PinholeCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const 521 | { 522 | Eigen::Vector2d p_u, p_d; 523 | 524 | // Project points to the normalised plane 525 | p_u << P(0) / P(2), P(1) / P(2); 526 | 527 | if (m_noDistortion) 528 | { 529 | p_d = p_u; 530 | } 531 | else 532 | { 533 | // Apply distortion 534 | Eigen::Vector2d d_u; 535 | distortion(p_u, d_u); 536 | p_d = p_u + d_u; 537 | } 538 | 539 | // Apply generalised projection matrix 540 | p << mParameters.fx() * p_d(0) + mParameters.cx(), 541 | mParameters.fy() * p_d(1) + mParameters.cy(); 542 | } 543 | 544 | #if 0 545 | /** 546 | * \brief Project a 3D point to the image plane and calculate Jacobian 547 | * 548 | * \param P 3D point coordinates 549 | * \param p return value, contains the image point coordinates 550 | */ 551 | void 552 | PinholeCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 553 | Eigen::Matrix& J) const 554 | { 555 | Eigen::Vector2d p_u, p_d; 556 | double norm, inv_denom; 557 | double dxdmx, dydmx, dxdmy, dydmy; 558 | 559 | norm = P.norm(); 560 | // Project points to the normalised plane 561 | inv_denom = 1.0 / P(2); 562 | p_u << inv_denom * P(0), inv_denom * P(1); 563 | 564 | // Calculate jacobian 565 | double dudx = inv_denom; 566 | double dvdx = 0.0; 567 | double dudy = 0.0; 568 | double dvdy = inv_denom; 569 | inv_denom = - inv_denom * inv_denom; 570 | double dudz = P(0) * inv_denom; 571 | double dvdz = P(1) * inv_denom; 572 | 573 | if (m_noDistortion) 574 | { 575 | p_d = p_u; 576 | } 577 | else 578 | { 579 | // Apply distortion 580 | Eigen::Vector2d d_u; 581 | distortion(p_u, d_u); 582 | p_d = p_u + d_u; 583 | } 584 | 585 | double fx = mParameters.fx(); 586 | double fy = mParameters.fy(); 587 | 588 | // Make the product of the jacobians 589 | // and add projection matrix jacobian 590 | inv_denom = fx * (dudx * dxdmx + dvdx * dxdmy); // reuse 591 | dvdx = fy * (dudx * dydmx + dvdx * dydmy); 592 | dudx = inv_denom; 593 | 594 | inv_denom = fx * (dudy * dxdmx + dvdy * dxdmy); // reuse 595 | dvdy = fy * (dudy * dydmx + dvdy * dydmy); 596 | dudy = inv_denom; 597 | 598 | inv_denom = fx * (dudz * dxdmx + dvdz * dxdmy); // reuse 599 | dvdz = fy * (dudz * dydmx + dvdz * dydmy); 600 | dudz = inv_denom; 601 | 602 | // Apply generalised projection matrix 603 | p << fx * p_d(0) + mParameters.cx(), 604 | fy * p_d(1) + mParameters.cy(); 605 | 606 | J << dudx, dudy, dudz, 607 | dvdx, dvdy, dvdz; 608 | } 609 | #endif 610 | 611 | /** 612 | * \brief Projects an undistorted 2D point p_u to the image plane 613 | * 614 | * \param p_u 2D point coordinates 615 | * \return image point coordinates 616 | */ 617 | void 618 | PinholeCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const 619 | { 620 | Eigen::Vector2d p_d; 621 | 622 | if (m_noDistortion) 623 | { 624 | p_d = p_u; 625 | } 626 | else 627 | { 628 | // Apply distortion 629 | Eigen::Vector2d d_u; 630 | distortion(p_u, d_u); 631 | p_d = p_u + d_u; 632 | } 633 | 634 | // Apply generalised projection matrix 635 | p << mParameters.fx() * p_d(0) + mParameters.cx(), 636 | mParameters.fy() * p_d(1) + mParameters.cy(); 637 | } 638 | 639 | /** 640 | * \brief Apply distortion to input point (from the normalised plane) 641 | * 642 | * \param p_u undistorted coordinates of point on the normalised plane 643 | * \return to obtain the distorted point: p_d = p_u + d_u 644 | */ 645 | void 646 | PinholeCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const 647 | { 648 | double k1 = mParameters.k1(); 649 | double k2 = mParameters.k2(); 650 | double p1 = mParameters.p1(); 651 | double p2 = mParameters.p2(); 652 | 653 | double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u; 654 | 655 | mx2_u = p_u(0) * p_u(0); 656 | my2_u = p_u(1) * p_u(1); 657 | mxy_u = p_u(0) * p_u(1); 658 | rho2_u = mx2_u + my2_u; 659 | rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u; 660 | d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u), 661 | p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u); 662 | } 663 | 664 | /** 665 | * \brief Apply distortion to input point (from the normalised plane) 666 | * and calculate Jacobian 667 | * 668 | * \param p_u undistorted coordinates of point on the normalised plane 669 | * \return to obtain the distorted point: p_d = p_u + d_u 670 | */ 671 | void 672 | PinholeCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, 673 | Eigen::Matrix2d& J) const 674 | { 675 | double k1 = mParameters.k1(); 676 | double k2 = mParameters.k2(); 677 | double p1 = mParameters.p1(); 678 | double p2 = mParameters.p2(); 679 | 680 | double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u; 681 | 682 | mx2_u = p_u(0) * p_u(0); 683 | my2_u = p_u(1) * p_u(1); 684 | mxy_u = p_u(0) * p_u(1); 685 | rho2_u = mx2_u + my2_u; 686 | rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u; 687 | d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u), 688 | p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u); 689 | 690 | double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u + k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) + 6.0 * p2 * p_u(0); 691 | double dydmx = k1 * 2.0 * p_u(0) * p_u(1) + k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) + 2.0 * p2 * p_u(1); 692 | double dxdmy = dydmx; 693 | double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u + k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) + 2.0 * p2 * p_u(0); 694 | 695 | J << dxdmx, dxdmy, 696 | dydmx, dydmy; 697 | } 698 | 699 | void 700 | PinholeCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const 701 | { 702 | cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight()); 703 | 704 | cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F); 705 | cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F); 706 | 707 | for (int v = 0; v < imageSize.height; ++v) 708 | { 709 | for (int u = 0; u < imageSize.width; ++u) 710 | { 711 | double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale; 712 | double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale; 713 | 714 | Eigen::Vector3d P; 715 | P << mx_u, my_u, 1.0; 716 | 717 | Eigen::Vector2d p; 718 | spaceToPlane(P, p); 719 | 720 | mapX.at(v,u) = p(0); 721 | mapY.at(v,u) = p(1); 722 | } 723 | } 724 | 725 | cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); 726 | } 727 | 728 | cv::Mat 729 | PinholeCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 730 | float fx, float fy, 731 | cv::Size imageSize, 732 | float cx, float cy, 733 | cv::Mat rmat) const 734 | { 735 | if (imageSize == cv::Size(0, 0)) 736 | { 737 | imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight()); 738 | } 739 | 740 | cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); 741 | cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); 742 | 743 | Eigen::Matrix3f R, R_inv; 744 | cv::cv2eigen(rmat, R); 745 | R_inv = R.inverse(); 746 | 747 | // assume no skew 748 | Eigen::Matrix3f K_rect; 749 | 750 | if (cx == -1.0f || cy == -1.0f) 751 | { 752 | K_rect << fx, 0, imageSize.width / 2, 753 | 0, fy, imageSize.height / 2, 754 | 0, 0, 1; 755 | } 756 | else 757 | { 758 | K_rect << fx, 0, cx, 759 | 0, fy, cy, 760 | 0, 0, 1; 761 | } 762 | 763 | if (fx == -1.0f || fy == -1.0f) 764 | { 765 | K_rect(0,0) = mParameters.fx(); 766 | K_rect(1,1) = mParameters.fy(); 767 | } 768 | 769 | Eigen::Matrix3f K_rect_inv = K_rect.inverse(); 770 | 771 | for (int v = 0; v < imageSize.height; ++v) 772 | { 773 | for (int u = 0; u < imageSize.width; ++u) 774 | { 775 | Eigen::Vector3f xo; 776 | xo << u, v, 1; 777 | 778 | Eigen::Vector3f uo = R_inv * K_rect_inv * xo; 779 | 780 | Eigen::Vector2d p; 781 | spaceToPlane(uo.cast(), p); 782 | 783 | mapX.at(v,u) = p(0); 784 | mapY.at(v,u) = p(1); 785 | } 786 | } 787 | 788 | cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); 789 | 790 | cv::Mat K_rect_cv; 791 | cv::eigen2cv(K_rect, K_rect_cv); 792 | return K_rect_cv; 793 | } 794 | 795 | int 796 | PinholeCamera::parameterCount(void) const 797 | { 798 | return 8; 799 | } 800 | 801 | const PinholeCamera::Parameters& 802 | PinholeCamera::getParameters(void) const 803 | { 804 | return mParameters; 805 | } 806 | 807 | void 808 | PinholeCamera::setParameters(const PinholeCamera::Parameters& parameters) 809 | { 810 | mParameters = parameters; 811 | 812 | if ((mParameters.k1() == 0.0) && 813 | (mParameters.k2() == 0.0) && 814 | (mParameters.p1() == 0.0) && 815 | (mParameters.p2() == 0.0)) 816 | { 817 | m_noDistortion = true; 818 | } 819 | else 820 | { 821 | m_noDistortion = false; 822 | } 823 | 824 | m_inv_K11 = 1.0 / mParameters.fx(); 825 | m_inv_K13 = -mParameters.cx() / mParameters.fx(); 826 | m_inv_K22 = 1.0 / mParameters.fy(); 827 | m_inv_K23 = -mParameters.cy() / mParameters.fy(); 828 | } 829 | 830 | void 831 | PinholeCamera::readParameters(const std::vector& parameterVec) 832 | { 833 | if ((int)parameterVec.size() != parameterCount()) 834 | { 835 | return; 836 | } 837 | 838 | Parameters params = getParameters(); 839 | 840 | params.k1() = parameterVec.at(0); 841 | params.k2() = parameterVec.at(1); 842 | params.p1() = parameterVec.at(2); 843 | params.p2() = parameterVec.at(3); 844 | params.fx() = parameterVec.at(4); 845 | params.fy() = parameterVec.at(5); 846 | params.cx() = parameterVec.at(6); 847 | params.cy() = parameterVec.at(7); 848 | 849 | setParameters(params); 850 | } 851 | 852 | void 853 | PinholeCamera::writeParameters(std::vector& parameterVec) const 854 | { 855 | parameterVec.resize(parameterCount()); 856 | parameterVec.at(0) = mParameters.k1(); 857 | parameterVec.at(1) = mParameters.k2(); 858 | parameterVec.at(2) = mParameters.p1(); 859 | parameterVec.at(3) = mParameters.p2(); 860 | parameterVec.at(4) = mParameters.fx(); 861 | parameterVec.at(5) = mParameters.fy(); 862 | parameterVec.at(6) = mParameters.cx(); 863 | parameterVec.at(7) = mParameters.cy(); 864 | } 865 | 866 | void 867 | PinholeCamera::writeParametersToYamlFile(const std::string& filename) const 868 | { 869 | mParameters.writeToYamlFile(filename); 870 | } 871 | 872 | std::string 873 | PinholeCamera::parametersToString(void) const 874 | { 875 | std::ostringstream oss; 876 | oss << mParameters; 877 | 878 | return oss.str(); 879 | } 880 | 881 | } 882 | -------------------------------------------------------------------------------- /src/camera_models/ScaramuzzaCamera.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/camera_models/ScaramuzzaCamera.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "camodocal/gpl/gpl.h" 15 | 16 | namespace camodocal 17 | { 18 | 19 | OCAMCamera::Parameters::Parameters() 20 | : Camera::Parameters(SCARAMUZZA) 21 | , m_C(0.0) 22 | , m_D(0.0) 23 | , m_E(0.0) 24 | , m_center_x(0.0) 25 | , m_center_y(0.0) 26 | { 27 | memset(m_poly, 0, sizeof(double) * SCARAMUZZA_POLY_SIZE); 28 | memset(m_inv_poly, 0, sizeof(double) * SCARAMUZZA_INV_POLY_SIZE); 29 | } 30 | 31 | 32 | 33 | bool 34 | OCAMCamera::Parameters::readFromYamlFile(const std::string& filename) 35 | { 36 | cv::FileStorage fs(filename, cv::FileStorage::READ); 37 | 38 | if (!fs.isOpened()) 39 | { 40 | return false; 41 | } 42 | 43 | if (!fs["model_type"].isNone()) 44 | { 45 | std::string sModelType; 46 | fs["model_type"] >> sModelType; 47 | 48 | if (!boost::iequals(sModelType, "scaramuzza")) 49 | { 50 | return false; 51 | } 52 | } 53 | 54 | m_modelType = SCARAMUZZA; 55 | fs["camera_name"] >> m_cameraName; 56 | m_imageWidth = static_cast(fs["image_width"]); 57 | m_imageHeight = static_cast(fs["image_height"]); 58 | 59 | cv::FileNode n = fs["poly_parameters"]; 60 | for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++) 61 | m_poly[i] = static_cast(n[std::string("p") + boost::lexical_cast(i)]); 62 | 63 | n = fs["inv_poly_parameters"]; 64 | for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 65 | m_inv_poly[i] = static_cast(n[std::string("p") + boost::lexical_cast(i)]); 66 | 67 | n = fs["affine_parameters"]; 68 | m_C = static_cast(n["ac"]); 69 | m_D = static_cast(n["ad"]); 70 | m_E = static_cast(n["ae"]); 71 | 72 | m_center_x = static_cast(n["cx"]); 73 | m_center_y = static_cast(n["cy"]); 74 | 75 | return true; 76 | } 77 | 78 | void 79 | OCAMCamera::Parameters::writeToYamlFile(const std::string& filename) const 80 | { 81 | cv::FileStorage fs(filename, cv::FileStorage::WRITE); 82 | 83 | fs << "model_type" << "scaramuzza"; 84 | fs << "camera_name" << m_cameraName; 85 | fs << "image_width" << m_imageWidth; 86 | fs << "image_height" << m_imageHeight; 87 | 88 | fs << "poly_parameters"; 89 | fs << "{"; 90 | for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++) 91 | fs << std::string("p") + boost::lexical_cast(i) << m_poly[i]; 92 | fs << "}"; 93 | 94 | fs << "inv_poly_parameters"; 95 | fs << "{"; 96 | for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 97 | fs << std::string("p") + boost::lexical_cast(i) << m_inv_poly[i]; 98 | fs << "}"; 99 | 100 | fs << "affine_parameters"; 101 | fs << "{" << "ac" << m_C 102 | << "ad" << m_D 103 | << "ae" << m_E 104 | << "cx" << m_center_x 105 | << "cy" << m_center_y << "}"; 106 | 107 | fs.release(); 108 | } 109 | 110 | OCAMCamera::Parameters& 111 | OCAMCamera::Parameters::operator=(const OCAMCamera::Parameters& other) 112 | { 113 | if (this != &other) 114 | { 115 | m_modelType = other.m_modelType; 116 | m_cameraName = other.m_cameraName; 117 | m_imageWidth = other.m_imageWidth; 118 | m_imageHeight = other.m_imageHeight; 119 | m_C = other.m_C; 120 | m_D = other.m_D; 121 | m_E = other.m_E; 122 | m_center_x = other.m_center_x; 123 | m_center_y = other.m_center_y; 124 | 125 | memcpy(m_poly, other.m_poly, sizeof(double) * SCARAMUZZA_POLY_SIZE); 126 | memcpy(m_inv_poly, other.m_inv_poly, sizeof(double) * SCARAMUZZA_INV_POLY_SIZE); 127 | } 128 | 129 | return *this; 130 | } 131 | 132 | std::ostream& 133 | operator<< (std::ostream& out, const OCAMCamera::Parameters& params) 134 | { 135 | out << "Camera Parameters:" << std::endl; 136 | out << " model_type " << "scaramuzza" << std::endl; 137 | out << " camera_name " << params.m_cameraName << std::endl; 138 | out << " image_width " << params.m_imageWidth << std::endl; 139 | out << " image_height " << params.m_imageHeight << std::endl; 140 | 141 | out << std::fixed << std::setprecision(10); 142 | 143 | out << "Poly Parameters" << std::endl; 144 | for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++) 145 | out << std::string("p") + boost::lexical_cast(i) << params.m_poly[i] << std::endl; 146 | 147 | out << "Inverse Poly Parameters" << std::endl; 148 | for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 149 | out << std::string("p") + boost::lexical_cast(i) << params.m_inv_poly[i] << std::endl; 150 | 151 | out << "Affine Parameters" << std::endl; 152 | out << " ac " << params.m_C << std::endl 153 | << " ad " << params.m_D << std::endl 154 | << " ae " << params.m_E << std::endl; 155 | out << " cx " << params.m_center_x << std::endl 156 | << " cy " << params.m_center_y << std::endl; 157 | 158 | return out; 159 | } 160 | 161 | OCAMCamera::OCAMCamera() 162 | : m_inv_scale(0.0) 163 | { 164 | 165 | } 166 | 167 | OCAMCamera::OCAMCamera(const OCAMCamera::Parameters& params) 168 | : mParameters(params) 169 | { 170 | m_inv_scale = 1.0 / (params.C() - params.D() * params.E()); 171 | } 172 | 173 | Camera::ModelType 174 | OCAMCamera::modelType(void) const 175 | { 176 | return mParameters.modelType(); 177 | } 178 | 179 | const std::string& 180 | OCAMCamera::cameraName(void) const 181 | { 182 | return mParameters.cameraName(); 183 | } 184 | 185 | int 186 | OCAMCamera::imageWidth(void) const 187 | { 188 | return mParameters.imageWidth(); 189 | } 190 | 191 | int 192 | OCAMCamera::imageHeight(void) const 193 | { 194 | return mParameters.imageHeight(); 195 | } 196 | 197 | void 198 | OCAMCamera::estimateIntrinsics(const cv::Size& /*boardSize*/, 199 | const std::vector< std::vector >& /*objectPoints*/, 200 | const std::vector< std::vector >& /*imagePoints*/) 201 | { 202 | std::cout << "OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED" << std::endl; 203 | throw std::string("OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED"); 204 | } 205 | 206 | /** 207 | * \brief Lifts a point from the image plane to the unit sphere 208 | * 209 | * \param p image coordinates 210 | * \param P coordinates of the point on the sphere 211 | */ 212 | void 213 | OCAMCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const 214 | { 215 | liftProjective(p, P); 216 | P.normalize(); 217 | } 218 | 219 | /** 220 | * \brief Lifts a point from the image plane to its projective ray 221 | * 222 | * \param p image coordinates 223 | * \param P coordinates of the projective ray 224 | */ 225 | void 226 | OCAMCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const 227 | { 228 | // Relative to Center 229 | Eigen::Vector2d xc(p[0] - mParameters.center_x(), p[1] - mParameters.center_y()); 230 | 231 | // Affine Transformation 232 | // xc_a = inv(A) * xc; 233 | Eigen::Vector2d xc_a( 234 | m_inv_scale * (xc[0] - mParameters.D() * xc[1]), 235 | m_inv_scale * (-mParameters.E() * xc[0] + mParameters.C() * xc[1]) 236 | ); 237 | 238 | double phi = std::sqrt(xc_a[0] * xc_a[0] + xc_a[1] * xc_a[1]); 239 | double phi_i = 1.0; 240 | double z = 0.0; 241 | 242 | for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++) 243 | { 244 | z += phi_i * mParameters.poly(i); 245 | phi_i *= phi; 246 | } 247 | 248 | P << xc[0], xc[1], -z; 249 | } 250 | 251 | 252 | /** 253 | * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v) 254 | * 255 | * \param P 3D point coordinates 256 | * \param p return value, contains the image point coordinates 257 | */ 258 | void 259 | OCAMCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const 260 | { 261 | double norm = std::sqrt(P[0] * P[0] + P[1] * P[1]); 262 | double theta = std::atan2(-P[2], norm); 263 | double rho = 0.0; 264 | double theta_i = 1.0; 265 | 266 | for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 267 | { 268 | rho += theta_i * mParameters.inv_poly(i); 269 | theta_i *= theta; 270 | } 271 | 272 | double invNorm = 1.0 / norm; 273 | Eigen::Vector2d xn( 274 | P[0] * invNorm * rho, 275 | P[1] * invNorm * rho 276 | ); 277 | 278 | p << xn[0] * mParameters.C() + xn[1] * mParameters.D() + mParameters.center_x(), 279 | xn[0] * mParameters.E() + xn[1] + mParameters.center_y(); 280 | } 281 | 282 | 283 | /** 284 | * \brief Projects an undistorted 2D point p_u to the image plane 285 | * 286 | * \param p_u 2D point coordinates 287 | * \return image point coordinates 288 | */ 289 | void 290 | OCAMCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const 291 | { 292 | Eigen::Vector3d P(p_u[0], p_u[1], 1.0); 293 | spaceToPlane(P, p); 294 | } 295 | 296 | 297 | #if 0 298 | void 299 | OCAMCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const 300 | { 301 | cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight()); 302 | 303 | cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F); 304 | cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F); 305 | 306 | for (int v = 0; v < imageSize.height; ++v) 307 | { 308 | for (int u = 0; u < imageSize.width; ++u) 309 | { 310 | double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale; 311 | double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale; 312 | 313 | double xi = mParameters.xi(); 314 | double d2 = mx_u * mx_u + my_u * my_u; 315 | 316 | Eigen::Vector3d P; 317 | P << mx_u, my_u, 1.0 - xi * (d2 + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * d2)); 318 | 319 | Eigen::Vector2d p; 320 | spaceToPlane(P, p); 321 | 322 | mapX.at(v,u) = p(0); 323 | mapY.at(v,u) = p(1); 324 | } 325 | } 326 | 327 | cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); 328 | } 329 | #endif 330 | 331 | cv::Mat 332 | OCAMCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 333 | float fx, float fy, 334 | cv::Size imageSize, 335 | float cx, float cy, 336 | cv::Mat rmat) const 337 | { 338 | if (imageSize == cv::Size(0, 0)) 339 | { 340 | imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight()); 341 | } 342 | 343 | cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); 344 | cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); 345 | 346 | Eigen::Matrix3f K_rect; 347 | 348 | K_rect << fx, 0, cx < 0 ? imageSize.width / 2 : cx, 349 | 0, fy, cy < 0 ? imageSize.height / 2 : cy, 350 | 0, 0, 1; 351 | 352 | if (fx < 0 || fy < 0) 353 | { 354 | throw std::string(std::string(__FUNCTION__) + ": Focal length must be specified"); 355 | } 356 | 357 | Eigen::Matrix3f K_rect_inv = K_rect.inverse(); 358 | 359 | Eigen::Matrix3f R, R_inv; 360 | cv::cv2eigen(rmat, R); 361 | R_inv = R.inverse(); 362 | 363 | for (int v = 0; v < imageSize.height; ++v) 364 | { 365 | for (int u = 0; u < imageSize.width; ++u) 366 | { 367 | Eigen::Vector3f xo; 368 | xo << u, v, 1; 369 | 370 | Eigen::Vector3f uo = R_inv * K_rect_inv * xo; 371 | 372 | Eigen::Vector2d p; 373 | spaceToPlane(uo.cast(), p); 374 | 375 | mapX.at(v,u) = p(0); 376 | mapY.at(v,u) = p(1); 377 | } 378 | } 379 | 380 | cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); 381 | 382 | cv::Mat K_rect_cv; 383 | cv::eigen2cv(K_rect, K_rect_cv); 384 | return K_rect_cv; 385 | } 386 | 387 | int 388 | OCAMCamera::parameterCount(void) const 389 | { 390 | return SCARAMUZZA_CAMERA_NUM_PARAMS; 391 | } 392 | 393 | const OCAMCamera::Parameters& 394 | OCAMCamera::getParameters(void) const 395 | { 396 | return mParameters; 397 | } 398 | 399 | void 400 | OCAMCamera::setParameters(const OCAMCamera::Parameters& parameters) 401 | { 402 | mParameters = parameters; 403 | 404 | m_inv_scale = 1.0 / (parameters.C() - parameters.D() * parameters.E()); 405 | } 406 | 407 | void 408 | OCAMCamera::readParameters(const std::vector& parameterVec) 409 | { 410 | if ((int)parameterVec.size() != parameterCount()) 411 | { 412 | return; 413 | } 414 | 415 | Parameters params = getParameters(); 416 | 417 | params.C() = parameterVec.at(0); 418 | params.D() = parameterVec.at(1); 419 | params.E() = parameterVec.at(2); 420 | params.center_x() = parameterVec.at(3); 421 | params.center_y() = parameterVec.at(4); 422 | for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) 423 | params.poly(i) = parameterVec.at(5+i); 424 | for (int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 425 | params.inv_poly(i) = parameterVec.at(5 + SCARAMUZZA_POLY_SIZE + i); 426 | 427 | setParameters(params); 428 | } 429 | 430 | void 431 | OCAMCamera::writeParameters(std::vector& parameterVec) const 432 | { 433 | parameterVec.resize(parameterCount()); 434 | parameterVec.at(0) = mParameters.C(); 435 | parameterVec.at(1) = mParameters.D(); 436 | parameterVec.at(2) = mParameters.E(); 437 | parameterVec.at(3) = mParameters.center_x(); 438 | parameterVec.at(4) = mParameters.center_y(); 439 | for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) 440 | parameterVec.at(5+i) = mParameters.poly(i); 441 | for (int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) 442 | parameterVec.at(5 + SCARAMUZZA_POLY_SIZE + i) = mParameters.inv_poly(i); 443 | } 444 | 445 | void 446 | OCAMCamera::writeParametersToYamlFile(const std::string& filename) const 447 | { 448 | mParameters.writeToYamlFile(filename); 449 | } 450 | 451 | std::string 452 | OCAMCamera::parametersToString(void) const 453 | { 454 | std::ostringstream oss; 455 | oss << mParameters; 456 | 457 | return oss.str(); 458 | } 459 | 460 | } 461 | -------------------------------------------------------------------------------- /src/gpl/EigenQuaternionParameterization.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/gpl/EigenQuaternionParameterization.h" 2 | 3 | #include 4 | 5 | namespace camodocal 6 | { 7 | 8 | bool 9 | EigenQuaternionParameterization::Plus(const double* x, 10 | const double* delta, 11 | double* x_plus_delta) const 12 | { 13 | const double norm_delta = 14 | sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]); 15 | if (norm_delta > 0.0) 16 | { 17 | const double sin_delta_by_delta = (sin(norm_delta) / norm_delta); 18 | double q_delta[4]; 19 | q_delta[0] = sin_delta_by_delta * delta[0]; 20 | q_delta[1] = sin_delta_by_delta * delta[1]; 21 | q_delta[2] = sin_delta_by_delta * delta[2]; 22 | q_delta[3] = cos(norm_delta); 23 | EigenQuaternionProduct(q_delta, x, x_plus_delta); 24 | } 25 | else 26 | { 27 | for (int i = 0; i < 4; ++i) 28 | { 29 | x_plus_delta[i] = x[i]; 30 | } 31 | } 32 | return true; 33 | } 34 | 35 | bool 36 | EigenQuaternionParameterization::ComputeJacobian(const double* x, 37 | double* jacobian) const 38 | { 39 | jacobian[0] = x[3]; jacobian[1] = x[2]; jacobian[2] = -x[1]; // NOLINT 40 | jacobian[3] = -x[2]; jacobian[4] = x[3]; jacobian[5] = x[0]; // NOLINT 41 | jacobian[6] = x[1]; jacobian[7] = -x[0]; jacobian[8] = x[3]; // NOLINT 42 | jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2]; // NOLINT 43 | return true; 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /src/gpl/gpl.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/gpl/gpl.h" 2 | 3 | #include 4 | #ifdef _WIN32 5 | #include 6 | #else 7 | #include 8 | #endif 9 | 10 | 11 | // source: https://stackoverflow.com/questions/5167269/clock-gettime-alternative-in-mac-os-x 12 | #ifdef __APPLE__ 13 | #include 14 | #define ORWL_NANO (+1.0E-9) 15 | #define ORWL_GIGA UINT64_C(1000000000) 16 | 17 | static double orwl_timebase = 0.0; 18 | static uint64_t orwl_timestart = 0; 19 | 20 | struct timespec orwl_gettime(void) { 21 | // be more careful in a multithreaded environement 22 | if (!orwl_timestart) { 23 | mach_timebase_info_data_t tb = { 0 }; 24 | mach_timebase_info(&tb); 25 | orwl_timebase = tb.numer; 26 | orwl_timebase /= tb.denom; 27 | orwl_timestart = mach_absolute_time(); 28 | } 29 | struct timespec t; 30 | double diff = (mach_absolute_time() - orwl_timestart) * orwl_timebase; 31 | t.tv_sec = diff * ORWL_NANO; 32 | t.tv_nsec = diff - (t.tv_sec * ORWL_GIGA); 33 | return t; 34 | } 35 | #endif // __APPLE__ 36 | 37 | 38 | const double WGS84_A = 6378137.0; 39 | const double WGS84_ECCSQ = 0.00669437999013; 40 | 41 | // Windows lacks fminf 42 | #ifndef fminf 43 | #define fminf(x, y) (((x) < (y)) ? (x) : (y)) 44 | #endif 45 | 46 | namespace camodocal 47 | { 48 | 49 | double hypot3(double x, double y, double z) 50 | { 51 | return sqrt(square(x) + square(y) + square(z)); 52 | } 53 | 54 | float hypot3f(float x, float y, float z) 55 | { 56 | return sqrtf(square(x) + square(y) + square(z)); 57 | } 58 | 59 | double d2r(double deg) 60 | { 61 | return deg / 180.0 * M_PI; 62 | } 63 | 64 | float d2r(float deg) 65 | { 66 | return deg / 180.0f * M_PI; 67 | } 68 | 69 | double r2d(double rad) 70 | { 71 | return rad / M_PI * 180.0; 72 | } 73 | 74 | float r2d(float rad) 75 | { 76 | return rad / M_PI * 180.0f; 77 | } 78 | 79 | double sinc(double theta) 80 | { 81 | return sin(theta) / theta; 82 | } 83 | 84 | #ifdef _WIN32 85 | #include 86 | #include 87 | #include 88 | LARGE_INTEGER 89 | getFILETIMEoffset() 90 | { 91 | SYSTEMTIME s; 92 | FILETIME f; 93 | LARGE_INTEGER t; 94 | 95 | s.wYear = 1970; 96 | s.wMonth = 1; 97 | s.wDay = 1; 98 | s.wHour = 0; 99 | s.wMinute = 0; 100 | s.wSecond = 0; 101 | s.wMilliseconds = 0; 102 | SystemTimeToFileTime(&s, &f); 103 | t.QuadPart = f.dwHighDateTime; 104 | t.QuadPart <<= 32; 105 | t.QuadPart |= f.dwLowDateTime; 106 | return (t); 107 | } 108 | 109 | int 110 | clock_gettime(int X, struct timespec *tp) 111 | { 112 | LARGE_INTEGER t; 113 | FILETIME f; 114 | double microseconds; 115 | static LARGE_INTEGER offset; 116 | static double frequencyToMicroseconds; 117 | static int initialized = 0; 118 | static BOOL usePerformanceCounter = 0; 119 | 120 | if (!initialized) { 121 | LARGE_INTEGER performanceFrequency; 122 | initialized = 1; 123 | usePerformanceCounter = QueryPerformanceFrequency(&performanceFrequency); 124 | if (usePerformanceCounter) { 125 | QueryPerformanceCounter(&offset); 126 | frequencyToMicroseconds = (double)performanceFrequency.QuadPart / 1000000.; 127 | } else { 128 | offset = getFILETIMEoffset(); 129 | frequencyToMicroseconds = 10.; 130 | } 131 | } 132 | if (usePerformanceCounter) QueryPerformanceCounter(&t); 133 | else { 134 | GetSystemTimeAsFileTime(&f); 135 | t.QuadPart = f.dwHighDateTime; 136 | t.QuadPart <<= 32; 137 | t.QuadPart |= f.dwLowDateTime; 138 | } 139 | 140 | t.QuadPart -= offset.QuadPart; 141 | microseconds = (double)t.QuadPart / frequencyToMicroseconds; 142 | t.QuadPart = microseconds; 143 | tp->tv_sec = t.QuadPart / 1000000; 144 | tp->tv_nsec = (t.QuadPart % 1000000) * 1000; 145 | return (0); 146 | } 147 | #endif 148 | 149 | unsigned long long timeInMicroseconds(void) 150 | { 151 | struct timespec tp; 152 | #ifdef __APPLE__ 153 | tp = orwl_gettime(); 154 | #else 155 | clock_gettime(CLOCK_REALTIME, &tp); 156 | #endif 157 | 158 | return tp.tv_sec * 1000000 + tp.tv_nsec / 1000; 159 | } 160 | 161 | double timeInSeconds(void) 162 | { 163 | struct timespec tp; 164 | #ifdef __APPLE__ 165 | tp = orwl_gettime(); 166 | #else 167 | clock_gettime(CLOCK_REALTIME, &tp); 168 | #endif 169 | 170 | return static_cast(tp.tv_sec) + 171 | static_cast(tp.tv_nsec) / 1000000000.0; 172 | } 173 | 174 | float colormapAutumn[128][3] = 175 | { 176 | {1.0f,0.f,0.f}, 177 | {1.0f,0.007874f,0.f}, 178 | {1.0f,0.015748f,0.f}, 179 | {1.0f,0.023622f,0.f}, 180 | {1.0f,0.031496f,0.f}, 181 | {1.0f,0.03937f,0.f}, 182 | {1.0f,0.047244f,0.f}, 183 | {1.0f,0.055118f,0.f}, 184 | {1.0f,0.062992f,0.f}, 185 | {1.0f,0.070866f,0.f}, 186 | {1.0f,0.07874f,0.f}, 187 | {1.0f,0.086614f,0.f}, 188 | {1.0f,0.094488f,0.f}, 189 | {1.0f,0.10236f,0.f}, 190 | {1.0f,0.11024f,0.f}, 191 | {1.0f,0.11811f,0.f}, 192 | {1.0f,0.12598f,0.f}, 193 | {1.0f,0.13386f,0.f}, 194 | {1.0f,0.14173f,0.f}, 195 | {1.0f,0.14961f,0.f}, 196 | {1.0f,0.15748f,0.f}, 197 | {1.0f,0.16535f,0.f}, 198 | {1.0f,0.17323f,0.f}, 199 | {1.0f,0.1811f,0.f}, 200 | {1.0f,0.18898f,0.f}, 201 | {1.0f,0.19685f,0.f}, 202 | {1.0f,0.20472f,0.f}, 203 | {1.0f,0.2126f,0.f}, 204 | {1.0f,0.22047f,0.f}, 205 | {1.0f,0.22835f,0.f}, 206 | {1.0f,0.23622f,0.f}, 207 | {1.0f,0.24409f,0.f}, 208 | {1.0f,0.25197f,0.f}, 209 | {1.0f,0.25984f,0.f}, 210 | {1.0f,0.26772f,0.f}, 211 | {1.0f,0.27559f,0.f}, 212 | {1.0f,0.28346f,0.f}, 213 | {1.0f,0.29134f,0.f}, 214 | {1.0f,0.29921f,0.f}, 215 | {1.0f,0.30709f,0.f}, 216 | {1.0f,0.31496f,0.f}, 217 | {1.0f,0.32283f,0.f}, 218 | {1.0f,0.33071f,0.f}, 219 | {1.0f,0.33858f,0.f}, 220 | {1.0f,0.34646f,0.f}, 221 | {1.0f,0.35433f,0.f}, 222 | {1.0f,0.3622f,0.f}, 223 | {1.0f,0.37008f,0.f}, 224 | {1.0f,0.37795f,0.f}, 225 | {1.0f,0.38583f,0.f}, 226 | {1.0f,0.3937f,0.f}, 227 | {1.0f,0.40157f,0.f}, 228 | {1.0f,0.40945f,0.f}, 229 | {1.0f,0.41732f,0.f}, 230 | {1.0f,0.4252f,0.f}, 231 | {1.0f,0.43307f,0.f}, 232 | {1.0f,0.44094f,0.f}, 233 | {1.0f,0.44882f,0.f}, 234 | {1.0f,0.45669f,0.f}, 235 | {1.0f,0.46457f,0.f}, 236 | {1.0f,0.47244f,0.f}, 237 | {1.0f,0.48031f,0.f}, 238 | {1.0f,0.48819f,0.f}, 239 | {1.0f,0.49606f,0.f}, 240 | {1.0f,0.50394f,0.f}, 241 | {1.0f,0.51181f,0.f}, 242 | {1.0f,0.51969f,0.f}, 243 | {1.0f,0.52756f,0.f}, 244 | {1.0f,0.53543f,0.f}, 245 | {1.0f,0.54331f,0.f}, 246 | {1.0f,0.55118f,0.f}, 247 | {1.0f,0.55906f,0.f}, 248 | {1.0f,0.56693f,0.f}, 249 | {1.0f,0.5748f,0.f}, 250 | {1.0f,0.58268f,0.f}, 251 | {1.0f,0.59055f,0.f}, 252 | {1.0f,0.59843f,0.f}, 253 | {1.0f,0.6063f,0.f}, 254 | {1.0f,0.61417f,0.f}, 255 | {1.0f,0.62205f,0.f}, 256 | {1.0f,0.62992f,0.f}, 257 | {1.0f,0.6378f,0.f}, 258 | {1.0f,0.64567f,0.f}, 259 | {1.0f,0.65354f,0.f}, 260 | {1.0f,0.66142f,0.f}, 261 | {1.0f,0.66929f,0.f}, 262 | {1.0f,0.67717f,0.f}, 263 | {1.0f,0.68504f,0.f}, 264 | {1.0f,0.69291f,0.f}, 265 | {1.0f,0.70079f,0.f}, 266 | {1.0f,0.70866f,0.f}, 267 | {1.0f,0.71654f,0.f}, 268 | {1.0f,0.72441f,0.f}, 269 | {1.0f,0.73228f,0.f}, 270 | {1.0f,0.74016f,0.f}, 271 | {1.0f,0.74803f,0.f}, 272 | {1.0f,0.75591f,0.f}, 273 | {1.0f,0.76378f,0.f}, 274 | {1.0f,0.77165f,0.f}, 275 | {1.0f,0.77953f,0.f}, 276 | {1.0f,0.7874f,0.f}, 277 | {1.0f,0.79528f,0.f}, 278 | {1.0f,0.80315f,0.f}, 279 | {1.0f,0.81102f,0.f}, 280 | {1.0f,0.8189f,0.f}, 281 | {1.0f,0.82677f,0.f}, 282 | {1.0f,0.83465f,0.f}, 283 | {1.0f,0.84252f,0.f}, 284 | {1.0f,0.85039f,0.f}, 285 | {1.0f,0.85827f,0.f}, 286 | {1.0f,0.86614f,0.f}, 287 | {1.0f,0.87402f,0.f}, 288 | {1.0f,0.88189f,0.f}, 289 | {1.0f,0.88976f,0.f}, 290 | {1.0f,0.89764f,0.f}, 291 | {1.0f,0.90551f,0.f}, 292 | {1.0f,0.91339f,0.f}, 293 | {1.0f,0.92126f,0.f}, 294 | {1.0f,0.92913f,0.f}, 295 | {1.0f,0.93701f,0.f}, 296 | {1.0f,0.94488f,0.f}, 297 | {1.0f,0.95276f,0.f}, 298 | {1.0f,0.96063f,0.f}, 299 | {1.0f,0.9685f,0.f}, 300 | {1.0f,0.97638f,0.f}, 301 | {1.0f,0.98425f,0.f}, 302 | {1.0f,0.99213f,0.f}, 303 | {1.0f,1.0f,0.0f} 304 | }; 305 | 306 | 307 | float colormapJet[128][3] = 308 | { 309 | {0.0f,0.0f,0.53125f}, 310 | {0.0f,0.0f,0.5625f}, 311 | {0.0f,0.0f,0.59375f}, 312 | {0.0f,0.0f,0.625f}, 313 | {0.0f,0.0f,0.65625f}, 314 | {0.0f,0.0f,0.6875f}, 315 | {0.0f,0.0f,0.71875f}, 316 | {0.0f,0.0f,0.75f}, 317 | {0.0f,0.0f,0.78125f}, 318 | {0.0f,0.0f,0.8125f}, 319 | {0.0f,0.0f,0.84375f}, 320 | {0.0f,0.0f,0.875f}, 321 | {0.0f,0.0f,0.90625f}, 322 | {0.0f,0.0f,0.9375f}, 323 | {0.0f,0.0f,0.96875f}, 324 | {0.0f,0.0f,1.0f}, 325 | {0.0f,0.03125f,1.0f}, 326 | {0.0f,0.0625f,1.0f}, 327 | {0.0f,0.09375f,1.0f}, 328 | {0.0f,0.125f,1.0f}, 329 | {0.0f,0.15625f,1.0f}, 330 | {0.0f,0.1875f,1.0f}, 331 | {0.0f,0.21875f,1.0f}, 332 | {0.0f,0.25f,1.0f}, 333 | {0.0f,0.28125f,1.0f}, 334 | {0.0f,0.3125f,1.0f}, 335 | {0.0f,0.34375f,1.0f}, 336 | {0.0f,0.375f,1.0f}, 337 | {0.0f,0.40625f,1.0f}, 338 | {0.0f,0.4375f,1.0f}, 339 | {0.0f,0.46875f,1.0f}, 340 | {0.0f,0.5f,1.0f}, 341 | {0.0f,0.53125f,1.0f}, 342 | {0.0f,0.5625f,1.0f}, 343 | {0.0f,0.59375f,1.0f}, 344 | {0.0f,0.625f,1.0f}, 345 | {0.0f,0.65625f,1.0f}, 346 | {0.0f,0.6875f,1.0f}, 347 | {0.0f,0.71875f,1.0f}, 348 | {0.0f,0.75f,1.0f}, 349 | {0.0f,0.78125f,1.0f}, 350 | {0.0f,0.8125f,1.0f}, 351 | {0.0f,0.84375f,1.0f}, 352 | {0.0f,0.875f,1.0f}, 353 | {0.0f,0.90625f,1.0f}, 354 | {0.0f,0.9375f,1.0f}, 355 | {0.0f,0.96875f,1.0f}, 356 | {0.0f,1.0f,1.0f}, 357 | {0.03125f,1.0f,0.96875f}, 358 | {0.0625f,1.0f,0.9375f}, 359 | {0.09375f,1.0f,0.90625f}, 360 | {0.125f,1.0f,0.875f}, 361 | {0.15625f,1.0f,0.84375f}, 362 | {0.1875f,1.0f,0.8125f}, 363 | {0.21875f,1.0f,0.78125f}, 364 | {0.25f,1.0f,0.75f}, 365 | {0.28125f,1.0f,0.71875f}, 366 | {0.3125f,1.0f,0.6875f}, 367 | {0.34375f,1.0f,0.65625f}, 368 | {0.375f,1.0f,0.625f}, 369 | {0.40625f,1.0f,0.59375f}, 370 | {0.4375f,1.0f,0.5625f}, 371 | {0.46875f,1.0f,0.53125f}, 372 | {0.5f,1.0f,0.5f}, 373 | {0.53125f,1.0f,0.46875f}, 374 | {0.5625f,1.0f,0.4375f}, 375 | {0.59375f,1.0f,0.40625f}, 376 | {0.625f,1.0f,0.375f}, 377 | {0.65625f,1.0f,0.34375f}, 378 | {0.6875f,1.0f,0.3125f}, 379 | {0.71875f,1.0f,0.28125f}, 380 | {0.75f,1.0f,0.25f}, 381 | {0.78125f,1.0f,0.21875f}, 382 | {0.8125f,1.0f,0.1875f}, 383 | {0.84375f,1.0f,0.15625f}, 384 | {0.875f,1.0f,0.125f}, 385 | {0.90625f,1.0f,0.09375f}, 386 | {0.9375f,1.0f,0.0625f}, 387 | {0.96875f,1.0f,0.03125f}, 388 | {1.0f,1.0f,0.0f}, 389 | {1.0f,0.96875f,0.0f}, 390 | {1.0f,0.9375f,0.0f}, 391 | {1.0f,0.90625f,0.0f}, 392 | {1.0f,0.875f,0.0f}, 393 | {1.0f,0.84375f,0.0f}, 394 | {1.0f,0.8125f,0.0f}, 395 | {1.0f,0.78125f,0.0f}, 396 | {1.0f,0.75f,0.0f}, 397 | {1.0f,0.71875f,0.0f}, 398 | {1.0f,0.6875f,0.0f}, 399 | {1.0f,0.65625f,0.0f}, 400 | {1.0f,0.625f,0.0f}, 401 | {1.0f,0.59375f,0.0f}, 402 | {1.0f,0.5625f,0.0f}, 403 | {1.0f,0.53125f,0.0f}, 404 | {1.0f,0.5f,0.0f}, 405 | {1.0f,0.46875f,0.0f}, 406 | {1.0f,0.4375f,0.0f}, 407 | {1.0f,0.40625f,0.0f}, 408 | {1.0f,0.375f,0.0f}, 409 | {1.0f,0.34375f,0.0f}, 410 | {1.0f,0.3125f,0.0f}, 411 | {1.0f,0.28125f,0.0f}, 412 | {1.0f,0.25f,0.0f}, 413 | {1.0f,0.21875f,0.0f}, 414 | {1.0f,0.1875f,0.0f}, 415 | {1.0f,0.15625f,0.0f}, 416 | {1.0f,0.125f,0.0f}, 417 | {1.0f,0.09375f,0.0f}, 418 | {1.0f,0.0625f,0.0f}, 419 | {1.0f,0.03125f,0.0f}, 420 | {1.0f,0.0f,0.0f}, 421 | {0.96875f,0.0f,0.0f}, 422 | {0.9375f,0.0f,0.0f}, 423 | {0.90625f,0.0f,0.0f}, 424 | {0.875f,0.0f,0.0f}, 425 | {0.84375f,0.0f,0.0f}, 426 | {0.8125f,0.0f,0.0f}, 427 | {0.78125f,0.0f,0.0f}, 428 | {0.75f,0.0f,0.0f}, 429 | {0.71875f,0.0f,0.0f}, 430 | {0.6875f,0.0f,0.0f}, 431 | {0.65625f,0.0f,0.0f}, 432 | {0.625f,0.0f,0.0f}, 433 | {0.59375f,0.0f,0.0f}, 434 | {0.5625f,0.0f,0.0f}, 435 | {0.53125f,0.0f,0.0f}, 436 | {0.5f,0.0f,0.0f} 437 | }; 438 | 439 | void colorDepthImage(cv::Mat& imgDepth, cv::Mat& imgColoredDepth, 440 | float minRange, float maxRange) 441 | { 442 | imgColoredDepth = cv::Mat::zeros(imgDepth.size(), CV_8UC3); 443 | 444 | for (int i = 0; i < imgColoredDepth.rows; ++i) 445 | { 446 | const float* depth = imgDepth.ptr(i); 447 | unsigned char* pixel = imgColoredDepth.ptr(i); 448 | for (int j = 0; j < imgColoredDepth.cols; ++j) 449 | { 450 | if (depth[j] != 0) 451 | { 452 | int idx = fminf(depth[j] - minRange, maxRange - minRange) / (maxRange - minRange) * 127.0f; 453 | idx = 127 - idx; 454 | 455 | pixel[0] = colormapJet[idx][2] * 255.0f; 456 | pixel[1] = colormapJet[idx][1] * 255.0f; 457 | pixel[2] = colormapJet[idx][0] * 255.0f; 458 | } 459 | 460 | pixel += 3; 461 | } 462 | } 463 | } 464 | 465 | bool colormap(const std::string& name, unsigned char idx, 466 | float& r, float& g, float& b) 467 | { 468 | if (name.compare("jet") == 0) 469 | { 470 | float* color = colormapJet[idx]; 471 | 472 | r = color[0]; 473 | g = color[1]; 474 | b = color[2]; 475 | 476 | return true; 477 | } 478 | else if (name.compare("autumn") == 0) 479 | { 480 | float* color = colormapAutumn[idx]; 481 | 482 | r = color[0]; 483 | g = color[1]; 484 | b = color[2]; 485 | 486 | return true; 487 | } 488 | 489 | return false; 490 | } 491 | 492 | std::vector bresLine(int x0, int y0, int x1, int y1) 493 | { 494 | // Bresenham's line algorithm 495 | // Find cells intersected by line between (x0,y0) and (x1,y1) 496 | 497 | std::vector cells; 498 | 499 | int dx = std::abs(x1 - x0); 500 | int dy = std::abs(y1 - y0); 501 | 502 | int sx = (x0 < x1) ? 1 : -1; 503 | int sy = (y0 < y1) ? 1 : -1; 504 | 505 | int err = dx - dy; 506 | 507 | while (1) 508 | { 509 | cells.push_back(cv::Point2i(x0, y0)); 510 | 511 | if (x0 == x1 && y0 == y1) 512 | { 513 | break; 514 | } 515 | 516 | int e2 = 2 * err; 517 | if (e2 > -dy) 518 | { 519 | err -= dy; 520 | x0 += sx; 521 | } 522 | if (e2 < dx) 523 | { 524 | err += dx; 525 | y0 += sy; 526 | } 527 | } 528 | 529 | return cells; 530 | } 531 | 532 | std::vector bresCircle(int x0, int y0, int r) 533 | { 534 | // Bresenham's circle algorithm 535 | // Find cells intersected by circle with center (x0,y0) and radius r 536 | 537 | std::vector< std::vector > mask(2 * r + 1); 538 | 539 | for (int i = 0; i < 2 * r + 1; ++i) 540 | { 541 | mask[i].resize(2 * r + 1); 542 | for (int j = 0; j < 2 * r + 1; ++j) 543 | { 544 | mask[i][j] = false; 545 | } 546 | } 547 | 548 | int f = 1 - r; 549 | int ddF_x = 1; 550 | int ddF_y = -2 * r; 551 | int x = 0; 552 | int y = r; 553 | 554 | std::vector line; 555 | 556 | line = bresLine(x0, y0 - r, x0, y0 + r); 557 | for (std::vector::iterator it = line.begin(); it != line.end(); ++it) 558 | { 559 | mask[it->x - x0 + r][it->y - y0 + r] = true; 560 | } 561 | 562 | line = bresLine(x0 - r, y0, x0 + r, y0); 563 | for (std::vector::iterator it = line.begin(); it != line.end(); ++it) 564 | { 565 | mask[it->x - x0 + r][it->y - y0 + r] = true; 566 | } 567 | 568 | while (x < y) 569 | { 570 | if (f >= 0) 571 | { 572 | y--; 573 | ddF_y += 2; 574 | f += ddF_y; 575 | } 576 | 577 | x++; 578 | ddF_x += 2; 579 | f += ddF_x; 580 | 581 | line = bresLine(x0 - x, y0 + y, x0 + x, y0 + y); 582 | for (std::vector::iterator it = line.begin(); it != line.end(); ++it) 583 | { 584 | mask[it->x - x0 + r][it->y - y0 + r] = true; 585 | } 586 | 587 | line = bresLine(x0 - x, y0 - y, x0 + x, y0 - y); 588 | for (std::vector::iterator it = line.begin(); it != line.end(); ++it) 589 | { 590 | mask[it->x - x0 + r][it->y - y0 + r] = true; 591 | } 592 | 593 | line = bresLine(x0 - y, y0 + x, x0 + y, y0 + x); 594 | for (std::vector::iterator it = line.begin(); it != line.end(); ++it) 595 | { 596 | mask[it->x - x0 + r][it->y - y0 + r] = true; 597 | } 598 | 599 | line = bresLine(x0 - y, y0 - x, x0 + y, y0 - x); 600 | for (std::vector::iterator it = line.begin(); it != line.end(); ++it) 601 | { 602 | mask[it->x - x0 + r][it->y - y0 + r] = true; 603 | } 604 | } 605 | 606 | std::vector cells; 607 | for (int i = 0; i < 2 * r + 1; ++i) 608 | { 609 | for (int j = 0; j < 2 * r + 1; ++j) 610 | { 611 | if (mask[i][j]) 612 | { 613 | cells.push_back(cv::Point2i(i - r + x0, j - r + y0)); 614 | } 615 | } 616 | } 617 | 618 | return cells; 619 | } 620 | 621 | void 622 | fitCircle(const std::vector& points, 623 | double& centerX, double& centerY, double& radius) 624 | { 625 | // D. Umbach, and K. Jones, A Few Methods for Fitting Circles to Data, 626 | // IEEE Transactions on Instrumentation and Measurement, 2000 627 | // We use the modified least squares method. 628 | double sum_x = 0.0; 629 | double sum_y = 0.0; 630 | double sum_xx = 0.0; 631 | double sum_xy = 0.0; 632 | double sum_yy = 0.0; 633 | double sum_xxx = 0.0; 634 | double sum_xxy = 0.0; 635 | double sum_xyy = 0.0; 636 | double sum_yyy = 0.0; 637 | 638 | int n = points.size(); 639 | for (int i = 0; i < n; ++i) 640 | { 641 | double x = points.at(i).x; 642 | double y = points.at(i).y; 643 | 644 | sum_x += x; 645 | sum_y += y; 646 | sum_xx += x * x; 647 | sum_xy += x * y; 648 | sum_yy += y * y; 649 | sum_xxx += x * x * x; 650 | sum_xxy += x * x * y; 651 | sum_xyy += x * y * y; 652 | sum_yyy += y * y * y; 653 | } 654 | 655 | double A = n * sum_xx - square(sum_x); 656 | double B = n * sum_xy - sum_x * sum_y; 657 | double C = n * sum_yy - square(sum_y); 658 | double D = 0.5 * (n * sum_xyy - sum_x * sum_yy + n * sum_xxx - sum_x * sum_xx); 659 | double E = 0.5 * (n * sum_xxy - sum_y * sum_xx + n * sum_yyy - sum_y * sum_yy); 660 | 661 | centerX = (D * C - B * E) / (A * C - square(B)); 662 | centerY = (A * E - B * D) / (A * C - square(B)); 663 | 664 | double sum_r = 0.0; 665 | for (int i = 0; i < n; ++i) 666 | { 667 | double x = points.at(i).x; 668 | double y = points.at(i).y; 669 | 670 | sum_r += hypot(x - centerX, y - centerY); 671 | } 672 | 673 | radius = sum_r / n; 674 | } 675 | 676 | std::vector 677 | intersectCircles(double x1, double y1, double r1, 678 | double x2, double y2, double r2) 679 | { 680 | std::vector ipts; 681 | 682 | double d = hypot(x1 - x2, y1 - y2); 683 | if (d > r1 + r2) 684 | { 685 | // circles are separate 686 | return ipts; 687 | } 688 | if (d < fabs(r1 - r2)) 689 | { 690 | // one circle is contained within the other 691 | return ipts; 692 | } 693 | 694 | double a = (square(r1) - square(r2) + square(d)) / (2.0 * d); 695 | double h = sqrt(square(r1) - square(a)); 696 | 697 | double x3 = x1 + a * (x2 - x1) / d; 698 | double y3 = y1 + a * (y2 - y1) / d; 699 | 700 | if (h < 1e-10) 701 | { 702 | // two circles touch at one point 703 | ipts.push_back(cv::Point2d(x3, y3)); 704 | return ipts; 705 | } 706 | 707 | ipts.push_back(cv::Point2d(x3 + h * (y2 - y1) / d, 708 | y3 - h * (x2 - x1) / d)); 709 | ipts.push_back(cv::Point2d(x3 - h * (y2 - y1) / d, 710 | y3 + h * (x2 - x1) / d)); 711 | return ipts; 712 | } 713 | 714 | char 715 | UTMLetterDesignator(double latitude) 716 | { 717 | // This routine determines the correct UTM letter designator for the given latitude 718 | // returns 'Z' if latitude is outside the UTM limits of 84N to 80S 719 | // Written by Chuck Gantz- chuck.gantz@globalstar.com 720 | char letterDesignator; 721 | 722 | if ((84.0 >= latitude) && (latitude >= 72.0)) letterDesignator = 'X'; 723 | else if ((72.0 > latitude) && (latitude >= 64.0)) letterDesignator = 'W'; 724 | else if ((64.0 > latitude) && (latitude >= 56.0)) letterDesignator = 'V'; 725 | else if ((56.0 > latitude) && (latitude >= 48.0)) letterDesignator = 'U'; 726 | else if ((48.0 > latitude) && (latitude >= 40.0)) letterDesignator = 'T'; 727 | else if ((40.0 > latitude) && (latitude >= 32.0)) letterDesignator = 'S'; 728 | else if ((32.0 > latitude) && (latitude >= 24.0)) letterDesignator = 'R'; 729 | else if ((24.0 > latitude) && (latitude >= 16.0)) letterDesignator = 'Q'; 730 | else if ((16.0 > latitude) && (latitude >= 8.0)) letterDesignator = 'P'; 731 | else if (( 8.0 > latitude) && (latitude >= 0.0)) letterDesignator = 'N'; 732 | else if (( 0.0 > latitude) && (latitude >= -8.0)) letterDesignator = 'M'; 733 | else if ((-8.0 > latitude) && (latitude >= -16.0)) letterDesignator = 'L'; 734 | else if ((-16.0 > latitude) && (latitude >= -24.0)) letterDesignator = 'K'; 735 | else if ((-24.0 > latitude) && (latitude >= -32.0)) letterDesignator = 'J'; 736 | else if ((-32.0 > latitude) && (latitude >= -40.0)) letterDesignator = 'H'; 737 | else if ((-40.0 > latitude) && (latitude >= -48.0)) letterDesignator = 'G'; 738 | else if ((-48.0 > latitude) && (latitude >= -56.0)) letterDesignator = 'F'; 739 | else if ((-56.0 > latitude) && (latitude >= -64.0)) letterDesignator = 'E'; 740 | else if ((-64.0 > latitude) && (latitude >= -72.0)) letterDesignator = 'D'; 741 | else if ((-72.0 > latitude) && (latitude >= -80.0)) letterDesignator = 'C'; 742 | else letterDesignator = 'Z'; //This is here as an error flag to show that the Latitude is outside the UTM limits 743 | 744 | return letterDesignator; 745 | } 746 | 747 | void 748 | LLtoUTM(double latitude, double longitude, 749 | double& utmNorthing, double& utmEasting, std::string& utmZone) 750 | { 751 | // converts lat/long to UTM coords. Equations from USGS Bulletin 1532 752 | // East Longitudes are positive, West longitudes are negative. 753 | // North latitudes are positive, South latitudes are negative 754 | // Lat and Long are in decimal degrees 755 | // Written by Chuck Gantz- chuck.gantz@globalstar.com 756 | 757 | double k0 = 0.9996; 758 | 759 | double LongOrigin; 760 | double eccPrimeSquared; 761 | double N, T, C, A, M; 762 | 763 | double LatRad = latitude * M_PI / 180.0; 764 | double LongRad = longitude * M_PI / 180.0; 765 | double LongOriginRad; 766 | 767 | int ZoneNumber = static_cast((longitude + 180.0) / 6.0) + 1; 768 | 769 | if (latitude >= 56.0 && latitude < 64.0 && 770 | longitude >= 3.0 && longitude < 12.0) { 771 | ZoneNumber = 32; 772 | } 773 | 774 | // Special zones for Svalbard 775 | if (latitude >= 72.0 && latitude < 84.0) { 776 | if ( longitude >= 0.0 && longitude < 9.0) ZoneNumber = 31; 777 | else if (longitude >= 9.0 && longitude < 21.0) ZoneNumber = 33; 778 | else if (longitude >= 21.0 && longitude < 33.0) ZoneNumber = 35; 779 | else if (longitude >= 33.0 && longitude < 42.0) ZoneNumber = 37; 780 | } 781 | LongOrigin = static_cast((ZoneNumber - 1) * 6 - 180 + 3); //+3 puts origin in middle of zone 782 | LongOriginRad = LongOrigin * M_PI / 180.0; 783 | 784 | // compute the UTM Zone from the latitude and longitude 785 | std::ostringstream oss; 786 | oss << ZoneNumber << UTMLetterDesignator(latitude); 787 | utmZone = oss.str(); 788 | 789 | eccPrimeSquared = WGS84_ECCSQ / (1.0 - WGS84_ECCSQ); 790 | 791 | N = WGS84_A / sqrt(1.0 - WGS84_ECCSQ * sin(LatRad) * sin(LatRad)); 792 | T = tan(LatRad) * tan(LatRad); 793 | C = eccPrimeSquared * cos(LatRad) * cos(LatRad); 794 | A = cos(LatRad) * (LongRad - LongOriginRad); 795 | 796 | M = WGS84_A * ((1.0 - WGS84_ECCSQ / 4.0 797 | - 3.0 * WGS84_ECCSQ * WGS84_ECCSQ / 64.0 798 | - 5.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 256.0) 799 | * LatRad 800 | - (3.0 * WGS84_ECCSQ / 8.0 801 | + 3.0 * WGS84_ECCSQ * WGS84_ECCSQ / 32.0 802 | + 45.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 1024.0) 803 | * sin(2.0 * LatRad) 804 | + (15.0 * WGS84_ECCSQ * WGS84_ECCSQ / 256.0 805 | + 45.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 1024.0) 806 | * sin(4.0 * LatRad) 807 | - (35.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 3072.0) 808 | * sin(6.0 * LatRad)); 809 | 810 | utmEasting = k0 * N * (A + (1.0 - T + C) * A * A * A / 6.0 811 | + (5.0 - 18.0 * T + T * T + 72.0 * C 812 | - 58.0 * eccPrimeSquared) 813 | * A * A * A * A * A / 120.0) 814 | + 500000.0; 815 | 816 | utmNorthing = k0 * (M + N * tan(LatRad) * 817 | (A * A / 2.0 + 818 | (5.0 - T + 9.0 * C + 4.0 * C * C) * A * A * A * A / 24.0 819 | + (61.0 - 58.0 * T + T * T + 600.0 * C 820 | - 330.0 * eccPrimeSquared) 821 | * A * A * A * A * A * A / 720.0)); 822 | if (latitude < 0.0) { 823 | utmNorthing += 10000000.0; //10000000 meter offset for southern hemisphere 824 | } 825 | } 826 | 827 | void 828 | UTMtoLL(double utmNorthing, double utmEasting, const std::string& utmZone, 829 | double& latitude, double& longitude) 830 | { 831 | // converts UTM coords to lat/long. Equations from USGS Bulletin 1532 832 | // East Longitudes are positive, West longitudes are negative. 833 | // North latitudes are positive, South latitudes are negative 834 | // Lat and Long are in decimal degrees. 835 | // Written by Chuck Gantz- chuck.gantz@globalstar.com 836 | 837 | double k0 = 0.9996; 838 | double eccPrimeSquared; 839 | double e1 = (1.0 - sqrt(1.0 - WGS84_ECCSQ)) / (1.0 + sqrt(1.0 - WGS84_ECCSQ)); 840 | double N1, T1, C1, R1, D, M; 841 | double LongOrigin; 842 | double mu, phi1, phi1Rad; 843 | double x, y; 844 | int ZoneNumber; 845 | char ZoneLetter; 846 | bool NorthernHemisphere; 847 | 848 | x = utmEasting - 500000.0; //remove 500,000 meter offset for longitude 849 | y = utmNorthing; 850 | 851 | std::istringstream iss(utmZone); 852 | iss >> ZoneNumber >> ZoneLetter; 853 | if ((static_cast(ZoneLetter) - static_cast('N')) >= 0) { 854 | NorthernHemisphere = true;//point is in northern hemisphere 855 | } else { 856 | NorthernHemisphere = false;//point is in southern hemisphere 857 | y -= 10000000.0;//remove 10,000,000 meter offset used for southern hemisphere 858 | } 859 | 860 | LongOrigin = (ZoneNumber - 1.0) * 6.0 - 180.0 + 3.0; //+3 puts origin in middle of zone 861 | 862 | eccPrimeSquared = WGS84_ECCSQ / (1.0 - WGS84_ECCSQ); 863 | 864 | M = y / k0; 865 | mu = M / (WGS84_A * (1.0 - WGS84_ECCSQ / 4.0 866 | - 3.0 * WGS84_ECCSQ * WGS84_ECCSQ / 64.0 867 | - 5.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 256.0)); 868 | 869 | phi1Rad = mu + (3.0 * e1 / 2.0 - 27.0 * e1 * e1 * e1 / 32.0) * sin(2.0 * mu) 870 | + (21.0 * e1 * e1 / 16.0 - 55.0 * e1 * e1 * e1 * e1 / 32.0) 871 | * sin(4.0 * mu) 872 | + (151.0 * e1 * e1 * e1 / 96.0) * sin(6.0 * mu); 873 | phi1 = phi1Rad / M_PI * 180.0; 874 | 875 | N1 = WGS84_A / sqrt(1.0 - WGS84_ECCSQ * sin(phi1Rad) * sin(phi1Rad)); 876 | T1 = tan(phi1Rad) * tan(phi1Rad); 877 | C1 = eccPrimeSquared * cos(phi1Rad) * cos(phi1Rad); 878 | R1 = WGS84_A * (1.0 - WGS84_ECCSQ) / 879 | pow(1.0 - WGS84_ECCSQ * sin(phi1Rad) * sin(phi1Rad), 1.5); 880 | D = x / (N1 * k0); 881 | 882 | latitude = phi1Rad - (N1 * tan(phi1Rad) / R1) 883 | * (D * D / 2.0 - (5.0 + 3.0 * T1 + 10.0 * C1 - 4.0 * C1 * C1 884 | - 9.0 * eccPrimeSquared) * D * D * D * D / 24.0 885 | + (61.0 + 90.0 * T1 + 298.0 * C1 + 45.0 * T1 * T1 886 | - 252.0 * eccPrimeSquared - 3.0 * C1 * C1) 887 | * D * D * D * D * D * D / 720.0); 888 | latitude *= 180.0 / M_PI; 889 | 890 | longitude = (D - (1.0 + 2.0 * T1 + C1) * D * D * D / 6.0 891 | + (5.0 - 2.0 * C1 + 28.0 * T1 - 3.0 * C1 * C1 892 | + 8.0 * eccPrimeSquared + 24.0 * T1 * T1) 893 | * D * D * D * D * D / 120.0) / cos(phi1Rad); 894 | longitude = LongOrigin + longitude / M_PI * 180.0; 895 | } 896 | 897 | long int 898 | timestampDiff(uint64_t t1, uint64_t t2) 899 | { 900 | if (t2 > t1) 901 | { 902 | uint64_t d = t2 - t1; 903 | 904 | if (d > std::numeric_limits::max()) 905 | { 906 | return std::numeric_limits::max(); 907 | } 908 | else 909 | { 910 | return d; 911 | } 912 | } 913 | else 914 | { 915 | uint64_t d = t1 - t2; 916 | 917 | if (d > std::numeric_limits::max()) 918 | { 919 | return std::numeric_limits::min(); 920 | } 921 | else 922 | { 923 | return - static_cast(d); 924 | } 925 | } 926 | } 927 | 928 | } 929 | -------------------------------------------------------------------------------- /src/intrinsic_calib.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "camodocal/chessboard/Chessboard.h" 11 | #include "camodocal/calib/CameraCalibration.h" 12 | #include "camodocal/gpl/gpl.h" 13 | 14 | int main(int argc, char** argv) 15 | { 16 | cv::Size boardSize; 17 | float squareSize; 18 | std::string inputDir; 19 | std::string cameraModel; 20 | std::string cameraName; 21 | std::string prefix; 22 | std::string fileExtension; 23 | bool useOpenCV; 24 | bool viewResults; 25 | bool verbose; 26 | 27 | //========= Handling Program options ========= 28 | boost::program_options::options_description desc("Allowed options"); 29 | desc.add_options() 30 | ("help", "produce help message") 31 | ("width,w", boost::program_options::value(&boardSize.width)->default_value(8), "Number of inner corners on the chessboard pattern in x direction") 32 | ("height,h", boost::program_options::value(&boardSize.height)->default_value(12), "Number of inner corners on the chessboard pattern in y direction") 33 | ("size,s", boost::program_options::value(&squareSize)->default_value(7.f), "Size of one square in mm") 34 | ("input,i", boost::program_options::value(&inputDir)->default_value("calibrationdata"), "Input directory containing chessboard images") 35 | ("prefix,p", boost::program_options::value(&prefix)->default_value("left-"), "Prefix of images") 36 | ("file-extension,e", boost::program_options::value(&fileExtension)->default_value(".png"), "File extension of images") 37 | ("camera-model", boost::program_options::value(&cameraModel)->default_value("mei"), "Camera model: kannala-brandt | mei | pinhole") 38 | ("camera-name", boost::program_options::value(&cameraName)->default_value("camera"), "Name of camera") 39 | ("opencv", boost::program_options::bool_switch(&useOpenCV)->default_value(true), "Use OpenCV to detect corners") 40 | ("view-results", boost::program_options::bool_switch(&viewResults)->default_value(false), "View results") 41 | ("verbose,v", boost::program_options::bool_switch(&verbose)->default_value(true), "Verbose output") 42 | ; 43 | 44 | boost::program_options::positional_options_description pdesc; 45 | pdesc.add("input", 1); 46 | 47 | boost::program_options::variables_map vm; 48 | boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(desc).positional(pdesc).run(), vm); 49 | boost::program_options::notify(vm); 50 | 51 | if (vm.count("help")) 52 | { 53 | std::cout << desc << std::endl; 54 | return 1; 55 | } 56 | 57 | if (!boost::filesystem::exists(inputDir) && !boost::filesystem::is_directory(inputDir)) 58 | { 59 | std::cerr << "# ERROR: Cannot find input directory " << inputDir << "." << std::endl; 60 | return 1; 61 | } 62 | 63 | camodocal::Camera::ModelType modelType; 64 | if (boost::iequals(cameraModel, "kannala-brandt")) 65 | { 66 | modelType = camodocal::Camera::KANNALA_BRANDT; 67 | } 68 | else if (boost::iequals(cameraModel, "mei")) 69 | { 70 | modelType = camodocal::Camera::MEI; 71 | } 72 | else if (boost::iequals(cameraModel, "pinhole")) 73 | { 74 | modelType = camodocal::Camera::PINHOLE; 75 | } 76 | else if (boost::iequals(cameraModel, "scaramuzza")) 77 | { 78 | modelType = camodocal::Camera::SCARAMUZZA; 79 | } 80 | else 81 | { 82 | std::cerr << "# ERROR: Unknown camera model: " << cameraModel << std::endl; 83 | return 1; 84 | } 85 | 86 | switch (modelType) 87 | { 88 | case camodocal::Camera::KANNALA_BRANDT: 89 | std::cout << "# INFO: Camera model: Kannala-Brandt" << std::endl; 90 | break; 91 | case camodocal::Camera::MEI: 92 | std::cout << "# INFO: Camera model: Mei" << std::endl; 93 | break; 94 | case camodocal::Camera::PINHOLE: 95 | std::cout << "# INFO: Camera model: Pinhole" << std::endl; 96 | break; 97 | case camodocal::Camera::SCARAMUZZA: 98 | std::cout << "# INFO: Camera model: Scaramuzza-Omnidirect" << std::endl; 99 | break; 100 | } 101 | 102 | // look for images in input directory 103 | std::vector imageFilenames; 104 | boost::filesystem::directory_iterator itr; 105 | for (boost::filesystem::directory_iterator itr(inputDir); itr != boost::filesystem::directory_iterator(); ++itr) 106 | { 107 | if (!boost::filesystem::is_regular_file(itr->status())) 108 | { 109 | continue; 110 | } 111 | 112 | std::string filename = itr->path().filename().string(); 113 | 114 | // check if prefix matches 115 | if (!prefix.empty()) 116 | { 117 | if (filename.compare(0, prefix.length(), prefix) != 0) 118 | { 119 | continue; 120 | } 121 | } 122 | 123 | // check if file extension matches 124 | if (filename.compare(filename.length() - fileExtension.length(), fileExtension.length(), fileExtension) != 0) 125 | { 126 | continue; 127 | } 128 | 129 | imageFilenames.push_back(itr->path().string()); 130 | 131 | if (verbose) 132 | { 133 | std::cerr << "# INFO: Adding " << imageFilenames.back() << std::endl; 134 | } 135 | } 136 | 137 | if (imageFilenames.empty()) 138 | { 139 | std::cerr << "# ERROR: No chessboard images found." << std::endl; 140 | return 1; 141 | } 142 | 143 | if (verbose) 144 | { 145 | std::cerr << "# INFO: # images: " << imageFilenames.size() << std::endl; 146 | } 147 | 148 | cv::Mat image = cv::imread(imageFilenames.front(), -1); 149 | const cv::Size frameSize = image.size(); 150 | 151 | camodocal::CameraCalibration calibration(modelType, cameraName, frameSize, boardSize, squareSize); 152 | calibration.setVerbose(verbose); 153 | 154 | std::vector chessboardFound(imageFilenames.size(), false); 155 | for (size_t i = 0; i < imageFilenames.size(); ++i) 156 | { 157 | image = cv::imread(imageFilenames.at(i), -1); 158 | 159 | camodocal::Chessboard chessboard(boardSize, image); 160 | 161 | chessboard.findCorners(useOpenCV); 162 | if (chessboard.cornersFound()) 163 | { 164 | if (verbose) 165 | { 166 | std::cerr << "# INFO: Detected chessboard in image " << i + 1 << ", " << imageFilenames.at(i) << std::endl; 167 | } 168 | 169 | calibration.addChessboardData(chessboard.getCorners()); 170 | 171 | cv::Mat sketch; 172 | chessboard.getSketch().copyTo(sketch); 173 | 174 | cv::imshow("Image", sketch); 175 | cv::waitKey(50); 176 | } 177 | else if (verbose) 178 | { 179 | std::cerr << "# INFO: Did not detect chessboard in image " << i + 1 << std::endl; 180 | } 181 | chessboardFound.at(i) = chessboard.cornersFound(); 182 | } 183 | cv::destroyWindow("Image"); 184 | 185 | if (calibration.sampleCount() < 10) 186 | { 187 | std::cerr << "# ERROR: Insufficient number of detected chessboards." << std::endl; 188 | return 1; 189 | } 190 | 191 | if (verbose) 192 | { 193 | std::cerr << "# INFO: Calibrating..." << std::endl; 194 | } 195 | 196 | double startTime = camodocal::timeInSeconds(); 197 | 198 | calibration.calibrate(); 199 | calibration.writeParams(cameraName + "_camera_calib.yaml"); 200 | calibration.writeChessboardData(cameraName + "_chessboard_data.dat"); 201 | 202 | if (verbose) 203 | { 204 | std::cout << "# INFO: Calibration took a total time of " 205 | << std::fixed << std::setprecision(3) << camodocal::timeInSeconds() - startTime 206 | << " sec.\n"; 207 | } 208 | 209 | if (verbose) 210 | { 211 | std::cerr << "# INFO: Wrote calibration file to " << cameraName + "_camera_calib.yaml" << std::endl; 212 | } 213 | 214 | if (viewResults) 215 | { 216 | std::vector cbImages; 217 | std::vector cbImageFilenames; 218 | 219 | for (size_t i = 0; i < imageFilenames.size(); ++i) 220 | { 221 | if (!chessboardFound.at(i)) 222 | { 223 | continue; 224 | } 225 | 226 | cbImages.push_back(cv::imread(imageFilenames.at(i), -1)); 227 | cbImageFilenames.push_back(imageFilenames.at(i)); 228 | } 229 | 230 | // visualize observed and reprojected points 231 | calibration.drawResults(cbImages); 232 | 233 | for (size_t i = 0; i < cbImages.size(); ++i) 234 | { 235 | cv::putText(cbImages.at(i), cbImageFilenames.at(i), cv::Point(10,20), 236 | cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(255, 255, 255), 237 | 1, CV_AA); 238 | cv::imshow("Image", cbImages.at(i)); 239 | cv::waitKey(0); 240 | } 241 | } 242 | 243 | return 0; 244 | } 245 | -------------------------------------------------------------------------------- /src/sparse_graph/Transform.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace camodocal 4 | { 5 | 6 | Transform::Transform() 7 | { 8 | m_q.setIdentity(); 9 | m_t.setZero(); 10 | } 11 | 12 | Transform::Transform(const Eigen::Matrix4d& H) 13 | { 14 | m_q = Eigen::Quaterniond(H.block<3,3>(0,0)); 15 | m_t = H.block<3,1>(0,3); 16 | } 17 | 18 | Eigen::Quaterniond& 19 | Transform::rotation(void) 20 | { 21 | return m_q; 22 | } 23 | 24 | const Eigen::Quaterniond& 25 | Transform::rotation(void) const 26 | { 27 | return m_q; 28 | } 29 | 30 | double* 31 | Transform::rotationData(void) 32 | { 33 | return m_q.coeffs().data(); 34 | } 35 | 36 | const double* const 37 | Transform::rotationData(void) const 38 | { 39 | return m_q.coeffs().data(); 40 | } 41 | 42 | Eigen::Vector3d& 43 | Transform::translation(void) 44 | { 45 | return m_t; 46 | } 47 | 48 | const Eigen::Vector3d& 49 | Transform::translation(void) const 50 | { 51 | return m_t; 52 | } 53 | 54 | double* 55 | Transform::translationData(void) 56 | { 57 | return m_t.data(); 58 | } 59 | 60 | const double* const 61 | Transform::translationData(void) const 62 | { 63 | return m_t.data(); 64 | } 65 | 66 | Eigen::Matrix4d 67 | Transform::toMatrix(void) const 68 | { 69 | Eigen::Matrix4d H; 70 | H.setIdentity(); 71 | H.block<3,3>(0,0) = m_q.toRotationMatrix(); 72 | H.block<3,1>(0,3) = m_t; 73 | 74 | return H; 75 | } 76 | 77 | } 78 | --------------------------------------------------------------------------------