├── docs ├── logo.png ├── mesh.gif ├── roll.gif ├── roll.png └── pointCloud.gif ├── .gitIgnore ├── docker-compose.yml ├── src ├── parametersHandler.h ├── loadImages.h ├── visualizer.h ├── meshTexturing.h ├── cameraCalibration.h ├── structureFromMotion.h ├── loadImages.cpp ├── bundleAdjustment.h ├── surfaceReconstruction.h ├── parametersHandler.cpp ├── meshTexturing.cpp ├── main.cpp ├── bundleAdjustment.cpp ├── visualizer.cpp ├── features.h ├── surfaceReconstruction.cpp ├── cameraCalibration.cpp ├── structureFromMotion.cpp └── features.cpp ├── CMakeLists.txt ├── LICENSE ├── Dockerfile └── README.md /docs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BennySobol/Sr/HEAD/docs/logo.png -------------------------------------------------------------------------------- /docs/mesh.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BennySobol/Sr/HEAD/docs/mesh.gif -------------------------------------------------------------------------------- /docs/roll.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BennySobol/Sr/HEAD/docs/roll.gif -------------------------------------------------------------------------------- /docs/roll.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BennySobol/Sr/HEAD/docs/roll.png -------------------------------------------------------------------------------- /docs/pointCloud.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BennySobol/Sr/HEAD/docs/pointCloud.gif -------------------------------------------------------------------------------- /.gitIgnore: -------------------------------------------------------------------------------- 1 | # Cmake solution 2 | build/* 3 | 4 | .git/* 5 | .vs/* 6 | 7 | CMakeCache.txt 8 | CMakeFiles/* 9 | Makefile 10 | cmake_install.cmake 11 | project 12 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | sr: 5 | image: bennysobol/sr:latest 6 | build: . 7 | environment: 8 | - DISPLAY=host.docker.internal:0.0 9 | volumes: 10 | - /tmp/.X11-unix:/tmp/.X11-unix 11 | - C:/Users/BennySobol/OneDrive/Desktop/dataset:/home/user/dataset 12 | - C:/Users/BennySobol/OneDrive/Desktop/Sr:/home/user/benny/Sr 13 | network_mode: host 14 | -------------------------------------------------------------------------------- /src/parametersHandler.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | class parametersHandler 7 | { 8 | private: 9 | void printHelp(); 10 | public: 11 | bool showMatch; 12 | bool isSorted; 13 | double downScaleFactor; 14 | double focalLength; // 0 if user has calib.xml 15 | std::string chessboardImagesPath; // chess board images for camera calibration 16 | std::string objectImagesPath; // folder with images of an object - requierd 17 | 18 | parametersHandler(int argc, char* argv[]); 19 | }; -------------------------------------------------------------------------------- /src/loadImages.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | #include "opencv2/highgui.hpp" 6 | #include "opencv2/imgproc.hpp" 7 | 8 | #include "features.h" 9 | 10 | #include 11 | namespace fs = boost::filesystem; 12 | 13 | // Singleton designed 14 | class loadImages 15 | { 16 | private: 17 | loadImages() = default; 18 | ~loadImages(); 19 | std::vector _images; 20 | 21 | public: 22 | static loadImages* getInstance(); 23 | std::vector load(const fs::path& dirPath, const bool isSorted = true); 24 | std::vector getImages(); 25 | }; 26 | 27 | -------------------------------------------------------------------------------- /src/visualizer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "features.h" 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | class visualizer 14 | { 15 | private: 16 | pcl::PointCloud::Ptr _pclPointCloudPtr; 17 | double _cameraScale; 18 | bool _isUpdateRequired; 19 | int _curentCameraIdx; 20 | void addCamerasToViewer(pcl::visualization::PCLVisualizer::Ptr& viewer); 21 | pcl::PointXYZ toPointXYZ(cv::Mat point3d); 22 | public: 23 | visualizer(pcl::PointCloud::Ptr& pclPointCloudPtr, double cameraScale); 24 | ~visualizer(); 25 | void show(); 26 | void update(); 27 | void updateAll(); 28 | }; 29 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | # Create Project 3 | project( Sr_Project ) 4 | 5 | file(GLOB SRC_FILES "src/*.h" "src/*.cpp") 6 | add_executable(project ${SRC_FILES}) 7 | 8 | set_property( DIRECTORY PROPERTY VS_STARTUP_PROJECT "project" ) 9 | 10 | # Find PCL 11 | find_package( PCL REQUIRED ) 12 | 13 | 14 | set(CMAKE_BUILD_TYPE Release) 15 | # Find OpenCV 16 | set( OpenCV_DIR "C:/opencv/build" ) 17 | find_package( OpenCV REQUIRED ) 18 | 19 | set( Eigen3_DIR "C:/ceres/eigen-build" ) 20 | 21 | # Find CERES 22 | find_package( Ceres REQUIRED ) 23 | 24 | # Find CGAL 25 | find_package( CGAL REQUIRED ) 26 | 27 | # [Linker]>[Input]>[Additional Dependencies] 28 | target_link_libraries( project ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ${CERES_LIBRARIES} ${CGAL_LIBRARIES} ) 29 | 30 | # install(TARGETS project DESTINATION bin) 31 | 32 | -------------------------------------------------------------------------------- /src/meshTexturing.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "features.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | class meshTexturing 20 | { 21 | private: 22 | // The texture mesh object that will contain our UV-mapped mesh 23 | pcl::TextureMesh _texturedMesh; 24 | public: 25 | meshTexturing(std::string plyFilePath, std::string saveObjFilePath, pcl::PolygonMesh& triangles); 26 | ~meshTexturing(); 27 | void saveTextureMesh(std::string filePath); 28 | }; 29 | 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Benny Sobol 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/cameraCalibration.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "opencv2/calib3d.hpp" 6 | #include "opencv2/imgproc.hpp" 7 | #include 8 | #include "opencv2/highgui.hpp" 9 | 10 | class cameraCalibration 11 | { 12 | private: 13 | std::vector> objectPoints; // store x,y,z points 14 | std::vector> imagePoints; // store x,y points 15 | 16 | static cv::Mat _cameraMatrix; 17 | static cv::Mat _distortionCoefficients; 18 | 19 | cv::Mat map1, map2; 20 | cv::Size imageSize; 21 | public: 22 | cameraCalibration() = default; 23 | ~cameraCalibration(); 24 | int addChessboardPoints(const std::vector& filelist, cv::Size boardSize, const bool showMatchingFeature = false); 25 | double calibrate(); 26 | cv::Mat remap(const cv::Mat& image); 27 | static cv::Mat& getCameraMatrix(); 28 | static cv::Mat& getDistortionCoefficients(); 29 | static cv::Point2d getPrinciplePoint(); 30 | double getFocal(); 31 | void save(std::string filePath); 32 | void load(std::string filePath); 33 | cv::Mat& estimateCameraMatrix(double focalLength, cv::Size imageSize); 34 | void downScale(double downScale = 2); 35 | friend std::ostream& operator<<(std::ostream& out, const cameraCalibration& calib); 36 | }; -------------------------------------------------------------------------------- /src/structureFromMotion.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "features.h" 3 | #include "visualizer.h" 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | 14 | // point part of PCL contains 3D point and its 2D origin point and color 15 | struct pointInCloud { 16 | int imageIndex; 17 | 18 | cv::Point3d point; 19 | int otherKeyPointsIdx; 20 | 21 | float color; 22 | } typedef pointInCloud; 23 | 24 | // camera motion and pointcloud reconstruction 25 | class structureFromMotion 26 | { 27 | private: 28 | std::vector _pointCloud; 29 | pcl::PointCloud::Ptr _pclPointCloudPtr; 30 | std::vector _previous3dPoints; 31 | cv::Scalar _averageColor; 32 | visualizer* _visualizer; 33 | 34 | void obtainMatches(imageFeatures features, cv::Mat descriptors3d, std::vector& imagePoints2d, 35 | std::vector& objectPoints3d, std::vector previous3dPoints); 36 | void addPoints4DToPointCloud(cv::Mat points4D, imageFeatures feature, int index, std::vector currentKeyPoints); 37 | public: 38 | structureFromMotion(double cameraScale = 0.4); 39 | ~structureFromMotion(); 40 | void savePointCloud(std::string filePath); 41 | }; 42 | -------------------------------------------------------------------------------- /src/loadImages.cpp: -------------------------------------------------------------------------------- 1 | #include "loadImages.h" 2 | 3 | // get loadImages Instance - a Singleton class 4 | loadImages* loadImages::getInstance() 5 | { 6 | static loadImages instance; 7 | 8 | return &instance; 9 | } 10 | 11 | // loading the images into vector 12 | // return vector of strings - contains the images path 13 | std::vector loadImages::load(const fs::path& dirPath, const bool isSorted) 14 | { 15 | _images.clear(); 16 | // filename extension to locate 17 | std::vector exts{ ".jpg", ".JPG", ".jpeg", ".JPEG", ".png", ".PNG" }; 18 | // check if the path is ok 19 | if (!fs::exists(dirPath) || !fs::is_directory(dirPath)) 20 | { 21 | throw std::runtime_error(dirPath.string() + " is not a valid folder"); 22 | } 23 | // load the images names from the paths 24 | for (auto const& entry : fs::directory_iterator(dirPath)) 25 | { 26 | if (fs::is_regular_file(entry) && std::find(exts.begin(), exts.end(), entry.path().extension()) != exts.end()) 27 | { 28 | _images.push_back(entry.path().string()); 29 | } 30 | } 31 | if (!isSorted) // if images are not already sorted by names (like pic1, pic2 ...) 32 | { 33 | features::sortImages(_images); 34 | } 35 | 36 | if (_images.size() < 2) 37 | { 38 | throw std::runtime_error("There mast be at least 2 images in the folder"); 39 | } 40 | return _images; 41 | } 42 | 43 | // loadImages destractor 44 | loadImages::~loadImages() 45 | { 46 | _images.clear(); 47 | } 48 | 49 | // get the images paths 50 | std::vector loadImages::getImages() 51 | { 52 | return _images; 53 | } -------------------------------------------------------------------------------- /src/bundleAdjustment.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "structureFromMotion.h" 3 | 4 | #include "ceres/ceres.h" 5 | #include "ceres/rotation.h" 6 | 7 | 8 | struct ReprojectionError 9 | { 10 | ReprojectionError(const cv::Point2f& imagePoint) : _imagePoint(imagePoint) {} 11 | 12 | template 13 | bool operator()(const T* const camera, const T* const point3d, const T* const K, T* residuals) const 14 | { 15 | // camera[0,1,2] are the angle-axis rotation. 16 | T p[3]; 17 | ceres::AngleAxisRotatePoint(camera, point3d, p); 18 | // camera[3,4,5] are the translation. 19 | p[0] += camera[3]; 20 | p[1] += camera[4]; 21 | p[2] += camera[5]; 22 | // Compute the center of distortion. The sign change comes from 23 | // the camera model that Noah Snavely's Bundler assumes, whereby 24 | // the camera coordinate system has a negative z axis. 25 | T xp = p[0] / p[2]; 26 | T yp = p[1] / p[2]; 27 | // Apply second and fourth order radial distortion. 28 | const T& fx = K[0]; 29 | const T& fy = K[1]; 30 | const T& cx = K[2]; 31 | const T& cy = K[3]; 32 | 33 | // Compute final projected point position. 34 | 35 | T predicted_x = fx * xp + cx; 36 | T predicted_y = fy * yp + cy; 37 | 38 | // The error is the difference between the predicted and observed position. 39 | residuals[0] = predicted_x - T(_imagePoint.x); 40 | residuals[1] = predicted_y - T(_imagePoint.y); 41 | return true; 42 | } 43 | 44 | static ceres::CostFunction* create(const cv::Point2f& imagePoint) 45 | { 46 | return (new ceres::AutoDiffCostFunction(new ReprojectionError(imagePoint))); 47 | } 48 | 49 | const cv::Point2f& _imagePoint; 50 | }; 51 | 52 | class bundleAdjustment 53 | { 54 | private: 55 | void solveProblem(); 56 | ceres::Problem _problem; 57 | double* _points; 58 | double* _intrinsics; 59 | double* _cameraPose; 60 | public: 61 | bundleAdjustment(std::vector& pointCloud, pcl::PointCloud& pclPointCloud); 62 | ~bundleAdjustment(); 63 | }; 64 | 65 | -------------------------------------------------------------------------------- /src/surfaceReconstruction.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | 34 | typedef CGAL::Exact_predicates_inexact_constructions_kernel kernel; 35 | typedef CGAL::Point_set_3 pointSet; 36 | 37 | typedef CGAL::Surface_mesh Mesh; 38 | typedef boost::graph_traits::halfedge_descriptor halfedge_descriptor; 39 | 40 | 41 | class surfaceReconstruction 42 | { 43 | private: 44 | pcl::PolygonMesh _polygonMesh; 45 | 46 | void colseHoles(Mesh& mesh); 47 | bool isHoleToClose(halfedge_descriptor h, Mesh& mesh); 48 | 49 | public: 50 | surfaceReconstruction(std::string inputFile, std::string saveFile); 51 | ~surfaceReconstruction(); 52 | 53 | void saveMesh(std::string filePath); 54 | pcl::PolygonMesh& getPolygonMesh(); 55 | }; 56 | 57 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | MAINTAINER Benny Sobol 4 | 5 | RUN apt-get update 6 | RUN apt-get install -y --no-install-recommends software-properties-common 7 | RUN add-apt-repository ppa:rock-core/qt4 8 | RUN add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main" 9 | RUN add-apt-repository main 10 | RUN add-apt-repository universe 11 | RUN add-apt-repository restricted 12 | RUN add-apt-repository multiverse 13 | RUN apt-get update --fix-missing 14 | 15 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y \ 16 | build-essential cmake \ 17 | wget git pkg-config \ 18 | libjpeg-dev libtiff-dev \ 19 | libjasper1 libjasper-dev \ 20 | libpng-dev libavcodec-dev \ 21 | libopenblas-dev liblapacke-dev \ 22 | libavformat-dev libswscale-dev \ 23 | libv4l-dev libgtk2.0-dev \ 24 | libatlas-base-dev gfortran \ 25 | freeglut3 freeglut3-dev \ 26 | libtbb-dev libqt4-dev \ 27 | libcgal-dev libpcl-dev \ 28 | libgoogle-glog-dev libgflags-dev \ 29 | libatlas-base-dev libeigen3-dev \ 30 | libusb-1.0-0-dev \ 31 | && apt-get -y clean all \ 32 | && rm -rf /var/lib/apt/lists/* 33 | 34 | RUN mkdir -p /home/user/benny 35 | RUN mkdir -p /home/user/opencv 36 | RUN mkdir -p /home/user/ceres 37 | 38 | WORKDIR /home/user/opencv 39 | 40 | RUN wget https://github.com/opencv/opencv_contrib/archive/4.4.0.tar.gz && \ 41 | tar zxf 4.4.0.tar.gz && \ 42 | rm 4.4.0.tar.gz && \ 43 | mv opencv_contrib-4.4.0 opencv_contrib 44 | 45 | RUN wget https://github.com/opencv/opencv/archive/4.4.0.tar.gz && \ 46 | tar zxf 4.4.0.tar.gz && \ 47 | rm 4.4.0.tar.gz && \ 48 | mv opencv-4.4.0 opencv 49 | 50 | WORKDIR /home/user/opencv/opencv 51 | RUN cmake -Bbuild \ 52 | -DCMAKE_BUILD_TYPE=RELEASE \ 53 | -DOPENCV_EXTRA_MODULES_PATH=/home/user/opencv/opencv_contrib/modules \ 54 | -DCMAKE_INSTALL_PREFIX=/usr/local \ 55 | -DBUILD_TESTS=OFF \ 56 | -DBUILD_PERF_TESTS=OFF \ 57 | -DBUILD_EXAMPLES=OFF \ 58 | -DBUILD_DOCS=OFF \ 59 | -DWITH_TBB=ON \ 60 | -DWITH_OPENMP=ON \ 61 | -DWITH_IPP=ON 62 | 63 | RUN cd build \ 64 | && make install \ 65 | && cd /home/user/ \ 66 | && rm -r opencv \ 67 | && bash -c 'echo "/usr/local/lib" > /etc/ld.so.conf.d/opencv.conf' \ 68 | && ldconfig 69 | 70 | # ceres solver 71 | WORKDIR /home/user/ceres 72 | RUN git clone https://ceres-solver.googlesource.com/ceres-solver 73 | WORKDIR /home/user/ceres/ceres-bin/ 74 | RUN cmake ../ceres-solver 75 | RUN make -j4 76 | RUN make install 77 | 78 | 79 | ## Build and compile Sr 80 | #RUN git clone https://github.com/BennySobol/Sr 81 | #WORKDIR /home/user/benny/Sr 82 | #RUN cmake build . 83 | #RUN make 84 | -------------------------------------------------------------------------------- /src/parametersHandler.cpp: -------------------------------------------------------------------------------- 1 | #include "parametersHandler.h" 2 | 3 | #define SHOW_HELP1 "-h" 4 | #define SHOW_HELP2 "-H" 5 | #define SHOW_MATCH1 "-v" 6 | #define SHOW_MATCH2 "-V" 7 | #define SORT_IMAGES1 "-s" 8 | #define SORT_IMAGES2 "-S" 9 | #define DOWN_SCALE1 "-d" 10 | #define DOWN_SCALE2 "-D" 11 | #define FOCAL_LEN1 "-f" 12 | #define FOCAL_LEN2 "-F" 13 | #define CALIB_DIR1 "-c" 14 | #define CALIB_DIR2 "-C" 15 | 16 | void parametersHandler::printHelp() 17 | { 18 | std::cout << "Usage: project.exe [-h] [-v] [-s] [-d down_scale] [-f focal_length] [-c calib_directory] input_directory" << std::endl; 19 | std::cout << "Options:" << std::endl; 20 | std::cout << " -h (optional) Print help message" << std::endl; 21 | std::cout << " -v (optional) Show match - default is unactive" << std::endl; 22 | std::cout << " -s (optional) Sort input images - default is no sort" << std::endl; 23 | std::cout << " -d down_scale (optional) Downscale factor for input images - default is 1 (decimal)" << std::endl; 24 | std::cout << " -f focal_length (option 1) Camera focal length (decimal)" << std::endl; 25 | std::cout << " -c calib_directory (option 2) Directory to find chess board images for camera calibration" << std::endl; 26 | std::cout << " input_directory (option 3) Directory to find input images and calib.xml(optinal)" << std::endl; 27 | std::cout << "PLEASE NOTE THAT YOU MUST ENTER OR FOCAL LENGTH, OR calib_directory, OR HAVE calib.xml IN THE input_directory NATIVE!" << std::endl; 28 | 29 | } 30 | 31 | //checks all params 32 | parametersHandler::parametersHandler(int argc, char* argv[]) : showMatch(false), isSorted(true), focalLength(0), downScaleFactor(1), objectImagesPath(""), chessboardImagesPath("") 33 | { 34 | if (argc < 2) 35 | { 36 | printHelp(); 37 | throw std::runtime_error("Main param are invalid"); 38 | } 39 | std::vector paramList; 40 | for (int i = 0; i < argc; i++) 41 | { 42 | paramList.push_back(argv[i]); 43 | } 44 | 45 | std::vector::iterator it; 46 | for (it = paramList.begin(); it != paramList.end(); it++) 47 | { 48 | 49 | if (*it == SHOW_HELP1 || *it == SHOW_HELP2) 50 | { 51 | printHelp(); 52 | } 53 | else if (*it == SHOW_MATCH1 || *it == SHOW_MATCH2) 54 | { 55 | showMatch = true; 56 | } 57 | else if (*it == SORT_IMAGES1 || *it == SORT_IMAGES2) 58 | { 59 | isSorted = false; 60 | } 61 | else if (*it == DOWN_SCALE1 || *it == DOWN_SCALE2) 62 | { 63 | it++; 64 | if (it == paramList.end()) 65 | { 66 | printHelp(); 67 | throw std::runtime_error("Main param are invalid"); 68 | } 69 | downScaleFactor = std::stod((*it), nullptr); // stod throw error if it fails to convert 70 | if (downScaleFactor <= 1) 71 | { 72 | downScaleFactor = 1; 73 | } 74 | } 75 | else if (*it == FOCAL_LEN1 || *it == FOCAL_LEN2) 76 | { 77 | it++; 78 | if (it == paramList.end()) 79 | { 80 | printHelp(); 81 | throw std::runtime_error("Main param are invalid"); 82 | } 83 | focalLength = std::stod((*it), nullptr); 84 | } 85 | else if (*it == CALIB_DIR1 || *it == CALIB_DIR2) 86 | { 87 | it++; 88 | if (it == paramList.end()) 89 | { 90 | printHelp(); 91 | throw std::runtime_error("Main param are invalid"); 92 | } 93 | chessboardImagesPath = (*it); 94 | } 95 | else 96 | { 97 | objectImagesPath = (*it); 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![](docs/logo.png) 2 | 3 | # Sr. | Structure Reconstruction 4 | Sr. is a Structure from motion software: 5 | The software provide 3D Reconstruction and Camera Tracking. 6 | 7 | ### For More Details: [Project Workflow Presentation](https://1drv.ms/p/s!AudVtA3cNIargYUkhE_9iAjjZmI8Yw) 8 | 9 | # Examples 10 | ## Temple Ring (46 images) 11 | ![](docs/pointCloud.gif) 12 | ![](docs/mesh.gif) 13 | ## Wooden Spool (41 images) 14 | The images were cropped to reduce computation time 15 | ![](docs/roll.png) 16 | ![](docs/roll.gif) 17 | 18 | 19 | # Required tools and libraries 20 | - [CMake](https://cmake.org) for easier building 21 | - [OpenCV compile with contrib modules](https://github.com/opencv/opencv_contrib) (Apache 2 License) 22 | - [Ceres Solver](http://ceres-solver.org/installation.html) (BSD license) 23 | - [Point Cloud Library (PCL)](https://pointclouds.org/downloads) (BSD license) 24 | - [CGAL](https://www.cgal.org/download.html) (Open Source license) 25 | 26 | # Build 27 | ## Using Docker 28 | #### See [Sr image](https://hub.docker.com/repository/docker/bennysobol/sr) on docker hub 29 | 30 | - Start X Server on your machine 31 | - Deploy with docker compose 32 | ``` 33 | docker-compose run sr 34 | ``` 35 | 36 | ## Without Docker 37 | - Download the source code into some root folder "SomePath/Sr" 38 | - Create a build folder within the Sr directory: SomePath/Sr/Build 39 | - Set "YOUR OWN" Build DIR dependency in CMakeList.txt 40 | - Run CMake configure within SomePath/Sr/Build as Build directory and SomePath/Sr as source directory 41 | - Run generate 42 | - Open visual studio solotion in SomePath/Sr/Build/Sr_Project.sln and compile 43 | 44 | # Usage 45 | ### Execute 46 | 47 | ``` 48 | Usage: ./project.exe [-h] [-v] [-s] [-d down_scale] [-c calib_directory] [-f focal_length] input_directory 49 | 50 | Options: 51 | -h Print help message 52 | -v Show match 53 | -s Sort input images 54 | -d down_scale Downscale factor for input images (decimal) 55 | -f focal_length Camera focal length (decimal) 56 | -c calib_directory Directory to find chess board images for camera calibration 57 | input_directory Directory to find input images and calib.xml(optinal) 58 | ``` 59 | 60 | ### Input Data 61 | 62 | The pipline gets as input images of an object and intrinsic camera parameters, the pipeline creates colored point cloud and turns it into textured mesh. 63 | 64 | Sr. gets an input directory containing images of an object from multiple views 65 | first option - using calib.xml: 66 | second option using - focal length: 67 | Note that intrinsic camera parameters must be named calib.xml! 68 | and be in the format: 69 | ``` 70 | 71 | 72 | 73 | 3 74 | 3 75 |
d
76 | fx 0 cx 0 fy cy 0 0 1
77 | 78 | 1 79 | 5 80 |
d
81 | k1 k2 p1 p2 k3
82 |
83 | ``` 84 | where: 85 | `fx, fy` are the focal length on the axis 86 | `(cx, cy)` is the principal point 87 | `k1, k2, p1, p2` and `k3` are the distortion coefficients 88 | ### Output Data 89 | Sr. will create new folder in the input drictory named **output**, in this folder the software will crate several files: 90 | - **PointCloud.ply** - Colored pointcloud of the object 91 | - **occluded.jpg** - This images is used to texture occluded part in the mesh 92 | - **TexturMesh.mtl** - File containing information about the textuers matrials 93 | - **TexturMesh.obj** - Textuerd mesh of the object 94 | -------------------------------------------------------------------------------- /src/meshTexturing.cpp: -------------------------------------------------------------------------------- 1 | #include "meshTexturing.h" 2 | #include "loadImages.h" 3 | 4 | // mesh texturing inspiration from main function at https://github.com/PointCloudLibrary/pcl/blob/master/gpu/kinfu_large_scale/tools/standalone_texture_mapping.cpp 5 | meshTexturing::meshTexturing(std::string plyFilePath, std::string saveObjFilePath, pcl::PolygonMesh& triangles) 6 | { 7 | std::vector& features = features::getFeatures(); 8 | pcl::PointCloud::Ptr cloud(new pcl::PointCloud); 9 | pcl::fromPCLPointCloud2(triangles.cloud, *cloud); 10 | 11 | std::cout << "Estimating normals" << std::endl; 12 | pcl::NormalEstimation n; 13 | pcl::PointCloud::Ptr normals(new pcl::PointCloud); 14 | pcl::search::KdTree::Ptr tree(new pcl::search::KdTree); 15 | tree->setInputCloud(cloud); 16 | n.setInputCloud(cloud); 17 | n.setSearchMethod(tree); 18 | n.setKSearch(20); 19 | n.compute(*normals); 20 | 21 | // concatenate point cloud and normal fields 22 | pcl::PointCloud::Ptr cloudWithNormals(new pcl::PointCloud); 23 | pcl::concatenateFields(*cloud, *normals, *cloudWithNormals); 24 | 25 | 26 | _texturedMesh.cloud = triangles.cloud; 27 | std::vector polygon; 28 | 29 | for (size_t i = 0; i < triangles.polygons.size(); ++i) 30 | { 31 | polygon.push_back(triangles.polygons[i]); 32 | } 33 | _texturedMesh.tex_polygons.push_back(polygon); 34 | std::cout << "Mesh contains " << _texturedMesh.tex_polygons[0].size() << " faces" << std::endl; 35 | 36 | pcl::texture_mapping::CameraVector cameras; 37 | for (imageFeatures feature : features) 38 | { 39 | pcl::TextureMapping::Camera cam; 40 | cam.pose = feature.getCamPose(); 41 | cv::Mat cameraMatrix = cameraCalibration::getCameraMatrix(); 42 | cam.focal_length_w = cameraMatrix.at(0, 0); 43 | cam.focal_length_h = cameraMatrix.at(1, 1); 44 | cv::Point2d principlePoint = cameraCalibration::getPrinciplePoint(); 45 | cam.center_w = principlePoint.x; 46 | cam.center_h = principlePoint.y; 47 | cam.height = feature.image.size().height; 48 | cam.width = feature.image.size().width; 49 | cam.texture_file = feature.path; 50 | 51 | cameras.push_back(cam); 52 | } 53 | // create materials for each texture and one extra for occluded faces 54 | for (int i = 0; i <= cameras.size(); i++) 55 | { 56 | pcl::TexMaterial meshMaterial; 57 | meshMaterial.tex_Ka.r = 0.2f; 58 | meshMaterial.tex_Ka.g = 0.2f; 59 | meshMaterial.tex_Ka.b = 0.2f; 60 | 61 | meshMaterial.tex_Kd.r = 0.8f; 62 | meshMaterial.tex_Kd.g = 0.8f; 63 | meshMaterial.tex_Kd.b = 0.8f; 64 | 65 | meshMaterial.tex_Ks.r = 1.0f; 66 | meshMaterial.tex_Ks.g = 1.0f; 67 | meshMaterial.tex_Ks.b = 1.0f; 68 | 69 | meshMaterial.tex_d = 1.0f; 70 | meshMaterial.tex_Ns = 75.0f; 71 | meshMaterial.tex_illum = 2; 72 | 73 | std::stringstream texName; 74 | texName << "material_" << i; 75 | texName >> meshMaterial.tex_name; 76 | 77 | if (i < cameras.size()) 78 | { 79 | meshMaterial.tex_file = "../" + getFileNameWithExtension(cameras[i].texture_file); 80 | } 81 | else 82 | { 83 | meshMaterial.tex_file = "occluded.jpg"; 84 | } 85 | 86 | _texturedMesh.tex_materials.push_back(meshMaterial); 87 | } 88 | 89 | // segment and texture 90 | std::cout << "Finding occlusions" << std::endl; 91 | pcl::TextureMapping tm; 92 | tm.textureMeshwithMultipleCameras(_texturedMesh, cameras); 93 | 94 | for (int i = 0; i < cameras.size(); i++) 95 | { 96 | std::cout << "Camera " << getFileName(features[i].path) << " is texturing " << _texturedMesh.tex_polygons[i].size() << " faces" << std::endl; 97 | } 98 | 99 | // converting cloud with normals to ROS format 100 | pcl::toPCLPointCloud2(*cloudWithNormals, _texturedMesh.cloud); 101 | 102 | saveTextureMesh(saveObjFilePath); 103 | std::cout << "Textured mesh has been saved" << std::endl; 104 | } 105 | 106 | 107 | // saves the texture mesh to file 108 | void meshTexturing::saveTextureMesh(std::string filePath) 109 | { 110 | pcl::io::saveOBJFile(filePath, _texturedMesh); 111 | } 112 | 113 | meshTexturing::~meshTexturing() 114 | { 115 | _texturedMesh.cloud.data.clear(); 116 | _texturedMesh.tex_coordinates.clear(); 117 | _texturedMesh.tex_materials.clear(); 118 | _texturedMesh.tex_polygons.clear(); 119 | } 120 | -------------------------------------------------------------------------------- /src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "structureFromMotion.h" 2 | #include "surfaceReconstruction.h" 3 | #include "meshTexturing.h" 4 | #include "loadImages.h" 5 | #include "parametersHandler.h" 6 | 7 | #include 8 | 9 | #pragma warning(disable:6031) 10 | 11 | std::chrono::high_resolution_clock::time_point timer; 12 | 13 | void clockHandle() 14 | { 15 | std::cout << "In " << (double)std::chrono::duration_cast 16 | ((std::chrono::high_resolution_clock::now() - timer)).count() / 600 << " seconds" << std::endl << std::endl; 17 | timer = std::chrono::high_resolution_clock::now(); 18 | } 19 | 20 | void handleCalibrationParms(cameraCalibration& calib, const parametersHandler paramsHandler, const std::string fistImagePath) 21 | { 22 | // handle calibration parms 23 | if (fs::exists(paramsHandler.objectImagesPath + "\\calib.xml")) // load calibration - if exsits 24 | { 25 | std::cout << "Using calib.xml as calibration parms" << std::endl; 26 | calib.load(paramsHandler.objectImagesPath + "\\calib.xml"); 27 | } 28 | else if (paramsHandler.chessboardImagesPath != "") // calibrate camera 29 | { 30 | std::cout << "Camera calibration using chessboard images at " << paramsHandler.chessboardImagesPath << std::endl; 31 | std::vector chessboardImages = loadImages::getInstance()->load(paramsHandler.chessboardImagesPath); 32 | calib.addChessboardPoints(chessboardImages, cv::Size(9, 6), paramsHandler.showMatch); // size of inner chessboard corners 33 | // print if there are errors, and save calibration file 34 | std::cout << "Error: " << calib.calibrate() << std::endl; 35 | calib.save(paramsHandler.objectImagesPath + "\\calib.xml"); 36 | } 37 | else if (paramsHandler.focalLength != 0) // cameraMatrix from focal length 38 | { 39 | std::cout << "Using focal length as calibration parms" << std::endl; 40 | calib.estimateCameraMatrix(paramsHandler.focalLength, cv::imread(fistImagePath).size()); 41 | } 42 | else 43 | { 44 | throw std::runtime_error("There mast be calibration parms"); 45 | } 46 | calib.downScale(paramsHandler.downScaleFactor); 47 | } 48 | 49 | int main(int argc, char* argv[]) 50 | { 51 | std::chrono::high_resolution_clock::time_point startTime = std::chrono::high_resolution_clock::now(); 52 | timer = startTime; 53 | try 54 | { 55 | parametersHandler paramsHandler(argc, argv); 56 | cameraCalibration calib; 57 | 58 | // load images 59 | std::vector images = loadImages::getInstance()->load(paramsHandler.objectImagesPath, paramsHandler.isSorted); 60 | std::cout << "Images path were loaded "; 61 | clockHandle(); 62 | 63 | handleCalibrationParms(calib, paramsHandler, images[0]); 64 | std::cout << calib; // print camera params 65 | clockHandle(); 66 | 67 | // create directories for output 68 | fs::create_directories(paramsHandler.objectImagesPath + "\\output"); 69 | 70 | // extract features from every image 71 | features features(images, paramsHandler.downScaleFactor); 72 | 73 | std::cout << "Features were extracted "; 74 | clockHandle(); 75 | 76 | // find matching features between every 2 neighbor images 77 | features.matchAllFeatures(paramsHandler.showMatch); 78 | std::cout << "Images features were matched "; 79 | clockHandle(); 80 | 81 | std::cout << "Starting structure from motion" << std::endl; 82 | structureFromMotion structureFromMotion(0.35); 83 | structureFromMotion.savePointCloud(paramsHandler.objectImagesPath + "\\output\\PointCloud.ply"); 84 | std::cout << "Structure from motion is finished "; 85 | clockHandle(); 86 | 87 | std::cout << "Starting surface reconstruction" << std::endl; 88 | surfaceReconstruction surfaceReconstruction(paramsHandler.objectImagesPath + "\\output\\PointCloud.ply", paramsHandler.objectImagesPath + "\\output\\Mesh.ply"); 89 | clockHandle(); 90 | 91 | std::cout << "Texturing mesh" << std::endl; 92 | meshTexturing meshTexturing(paramsHandler.objectImagesPath + "\\output\\Mesh.ply", paramsHandler.objectImagesPath + "/output/TextureMesh.obj", surfaceReconstruction.getPolygonMesh()); 93 | clockHandle(); 94 | std::cout << "Total time is " << (double)std::chrono::duration_cast 95 | ((std::chrono::high_resolution_clock::now() - startTime)).count() / 600 << " seconds" << std::endl; 96 | std::cout << "Press any key to exit" << std::endl; 97 | getchar(); 98 | } 99 | catch (const std::exception& e) 100 | { 101 | std::cout << "ERROR: " << e.what() << std::endl; 102 | } 103 | 104 | return 0; 105 | } -------------------------------------------------------------------------------- /src/bundleAdjustment.cpp: -------------------------------------------------------------------------------- 1 | #include "bundleAdjustment.h" 2 | 3 | 4 | // inspiration from a tutorial at http://ceres-solver.org/nnls_tutorial.html#bundle-adjustment 5 | bundleAdjustment::bundleAdjustment(std::vector& pointCloud, pcl::PointCloud& pclPointCloud) 6 | { 7 | std::vector& features = features::getFeatures(); 8 | _cameraPose = new double[6 * features.size()]; 9 | for (int i = 0; i < features.size(); i++) 10 | { 11 | cv::Mat rotationVector; 12 | cv::Rodrigues(features[i].rotation, rotationVector); 13 | _cameraPose[6 * i + 0] = rotationVector.at(0, 0); 14 | _cameraPose[6 * i + 1] = rotationVector.at(1, 0); 15 | _cameraPose[6 * i + 2] = rotationVector.at(2, 0); 16 | _cameraPose[6 * i + 3] = features[i].translation.at(0, 0); 17 | _cameraPose[6 * i + 4] = features[i].translation.at(1, 0); 18 | _cameraPose[6 * i + 5] = features[i].translation.at(2, 0); 19 | } 20 | 21 | _points = new double[3 * pointCloud.size()]; 22 | for (int i = 0; i < pointCloud.size(); i++) 23 | { 24 | _points[3 * i + 0] = pointCloud[i].point.x; 25 | _points[3 * i + 1] = pointCloud[i].point.y; 26 | _points[3 * i + 2] = pointCloud[i].point.z; 27 | } 28 | 29 | cv::Mat cameraMatrix = cameraCalibration::getCameraMatrix(); 30 | 31 | _intrinsics = new double[4]; 32 | _intrinsics[0] = cameraMatrix.at(0, 0); //fx 33 | _intrinsics[1] = cameraMatrix.at(1, 1); //fy 34 | _intrinsics[2] = cameraMatrix.at(0, 2); //cx 35 | _intrinsics[3] = cameraMatrix.at(1, 2); //cy 36 | 37 | ////// TO DELETE - it does not do anything ////// 38 | // load extrinsics (rotations and translation) 39 | for (size_t i = 0; i < features.size(); ++i) 40 | { 41 | _problem.AddParameterBlock(&_cameraPose[i * 6], 6); 42 | } 43 | // fix the first camera. 44 | _problem.SetParameterBlockConstant(&_cameraPose[0]); 45 | 46 | // load intrinsic 47 | _problem.AddParameterBlock(_intrinsics, 4); // fx, fy, cx, cy 48 | ////// TO DELETE ////// 49 | 50 | ceres::LossFunction* lossFunction = new ceres::HuberLoss(4); 51 | for (int i = 0; i < pointCloud.size(); i++) 52 | { 53 | ceres::CostFunction* costFunction = ReprojectionError::create( 54 | features[pointCloud[i].imageIndex].keyPoints[pointCloud[i].otherKeyPointsIdx].pt); 55 | _problem.AddResidualBlock(costFunction, lossFunction, &_cameraPose[6 * pointCloud[i].imageIndex], &_points[3 * i], _intrinsics); 56 | } 57 | 58 | solveProblem(); 59 | 60 | // copy back the points 61 | for (int i = 0; i < pointCloud.size(); i++) 62 | { 63 | pointCloud[i].point.x = _points[3 * i]; 64 | pointCloud[i].point.y = _points[3 * i + 1]; 65 | pointCloud[i].point.z = _points[3 * i + 2]; 66 | 67 | pclPointCloud[i].x = _points[3 * i]; 68 | pclPointCloud[i].y = _points[3 * i + 1]; 69 | pclPointCloud[i].z = _points[3 * i + 2]; 70 | } 71 | 72 | for (int i = 0; i < features.size(); i++) 73 | { 74 | // copy back the rotation 75 | cv::Mat rotationVector = (cv::Mat_(3, 1) << _cameraPose[6 * i + 0], _cameraPose[6 * i + 1], _cameraPose[6 * i + 2]); 76 | cv::Rodrigues(rotationVector, features[i].rotation); 77 | 78 | // copy back the translation 79 | cv::Mat translation = (cv::Mat_(3, 1) << _cameraPose[6 * i + 3], _cameraPose[6 * i + 4], _cameraPose[6 * i + 5]); 80 | translation.copyTo(features[i].translation); 81 | } 82 | // copy back the intrinsics 83 | cameraMatrix.at(0, 0) = _intrinsics[0]; //fx 84 | cameraMatrix.at(1, 1) = _intrinsics[1]; //fy 85 | cameraMatrix.at(0, 2) = _intrinsics[2]; //cx 86 | cameraMatrix.at(1, 2) = _intrinsics[3]; //cy 87 | } 88 | 89 | 90 | void bundleAdjustment::solveProblem() 91 | { 92 | ceres::Solver::Options options; 93 | options.linear_solver_type = ceres::DENSE_SCHUR; 94 | options.minimizer_progress_to_stdout = true; 95 | options.max_num_iterations = 500; 96 | options.eta = 1e-2; 97 | options.max_solver_time_in_seconds = 3500; 98 | options.logging_type = ceres::LoggingType::SILENT; 99 | ceres::Solver::Summary summary; 100 | ceres::Solve(options, &_problem, &summary); 101 | 102 | std::cout << summary.BriefReport() << ", SolverTime = " << summary.total_time_in_seconds << std::endl; 103 | 104 | if (summary.termination_type != ceres::CONVERGENCE) 105 | { 106 | std::cout << "Bundle adjustment failed." << std::endl; 107 | std::cout << summary.FullReport(); 108 | return; 109 | } 110 | } 111 | 112 | bundleAdjustment::~bundleAdjustment() 113 | { 114 | delete[] _points; 115 | delete[] _intrinsics; 116 | delete[] _cameraPose; 117 | } 118 | -------------------------------------------------------------------------------- /src/visualizer.cpp: -------------------------------------------------------------------------------- 1 | #include "visualizer.h" 2 | #include "loadImages.h" 3 | #include 4 | 5 | std::mutex mutex; 6 | 7 | visualizer::visualizer(pcl::PointCloud::Ptr& pclPointCloudPtr, double cameraScale) : _pclPointCloudPtr(pclPointCloudPtr), _cameraScale(cameraScale), _isUpdateRequired(false), _curentCameraIdx(0) 8 | { 9 | std::thread mythread([this] { show(); }); // thread of show - LIVE point cloud update in the viewer 10 | mythread.detach(); 11 | } 12 | 13 | visualizer::~visualizer() 14 | { 15 | _pclPointCloudPtr->clear(); 16 | } 17 | 18 | void visualizer::show() 19 | { 20 | // initialize the pcl 3D Viewer 21 | pcl::visualization::PCLVisualizer::Ptr viewer(new pcl::visualization::PCLVisualizer("3D Viewer")); 22 | // display the point cloud 23 | viewer->setBackgroundColor(0.04, 0.04, 0.12); 24 | viewer->setShowFPS(true); 25 | viewer->setWindowName("Point Cloud Viewer"); 26 | // pos_x, pos_y, pos_z, view_x, view_y, view_z 27 | viewer->setCameraPosition(0.100329, -2.76665, -6.4343, 0.0162269, -0.967499, 0.252354); 28 | 29 | while (!viewer->wasStopped()) 30 | { 31 | if (_isUpdateRequired) 32 | { 33 | mutex.lock(); 34 | viewer->removeAllPointClouds(); 35 | viewer->addPointCloud(_pclPointCloudPtr, "point cloud"); 36 | viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 3, "point cloud"); 37 | 38 | viewer->removeAllShapes(); 39 | addCamerasToViewer(viewer); 40 | _isUpdateRequired = false; 41 | mutex.unlock(); 42 | } 43 | viewer->spinOnce(100); 44 | } 45 | } 46 | 47 | void visualizer::addCamerasToViewer(pcl::visualization::PCLVisualizer::Ptr& viewer) 48 | { 49 | std::vector& features = features::getFeatures(); 50 | for (int i = 0; i < features.size(); i++) 51 | { 52 | if (features[i].rotation.rows == 0) // if not reconstct yet 53 | { 54 | break; 55 | } 56 | _curentCameraIdx++; 57 | 58 | /* center formula from https://colmap.github.io/format.html#images-txt 59 | The coordinates of the projection/camera center are given by -R^t * T, 60 | where R^t is the inverse/transpose of the 3x3 rotation matrix composed from the quaternion and T is the translation vector */ 61 | cv::Mat center = -features[i].rotation.t() * features[i].translation; 62 | // formulas from http://www.pcl-users.org/How-to-draw-camera-pyramids-in-Visualizer-tp4019124p4021310.html 63 | cv::Mat right = features[i].rotation.row(0).t() * _cameraScale; 64 | cv::Mat up = -features[i].rotation.row(1).t() * _cameraScale; 65 | cv::Mat forward = features[i].rotation.row(2).t() * _cameraScale; 66 | 67 | pcl::PointXYZ middle, rightUp, rightDown, leftUp, leftDown; 68 | middle = toPointXYZ(center); 69 | rightUp = toPointXYZ(center + forward + right / 1.5 + up / 2.0); 70 | rightDown = toPointXYZ(center + forward + right / 1.5 - up / 2.0); 71 | leftUp = toPointXYZ(center + forward - right / 1.5 + up / 2.0); 72 | leftDown = toPointXYZ(center + forward - right / 1.5 - up / 2.0); 73 | 74 | // lines idea from function showCameras at https://github.com/PointCloudLibrary/pcl/blob/master/gpu/kinfu_large_scale/tools/standalone_texture_mapping.cpp 75 | std::string cameraName = getFileName(features[i].path); 76 | viewer->removeText3D(cameraName + std::to_string(i)); 77 | viewer->addText3D(cameraName, middle, 0.1, 1.0, 1.0, 1.0, cameraName + std::to_string(i)); 78 | 79 | viewer->addLine(middle, rightUp, 1, 1, 1, "camera" + std::to_string(i) + "line1"); 80 | viewer->addLine(middle, rightDown, 1, 1, 1, "camera" + std::to_string(i) + "line2"); 81 | viewer->addLine(middle, leftUp, 1, 1, 1, "camera" + std::to_string(i) + "line3"); 82 | viewer->addLine(middle, leftDown, 1, 1, 1, "camera" + std::to_string(i) + "line4"); 83 | viewer->addLine(rightDown, rightUp, 1, 1, 1, "camera" + std::to_string(i) + "line5"); 84 | viewer->addLine(leftDown, leftUp, 1, 1, 1, "camera" + std::to_string(i) + "line6"); 85 | viewer->addLine(leftUp, rightUp, 1, 1, 1, "camera" + std::to_string(i) + "line7"); 86 | viewer->addLine(leftDown, rightDown, 1, 1, 1, "camera" + std::to_string(i) + "line8"); 87 | } 88 | } 89 | 90 | // convert opencv Mat to pcl PointXYZ and returns it 91 | pcl::PointXYZ visualizer::toPointXYZ(cv::Mat point3d) 92 | { 93 | return pcl::PointXYZ(point3d.at(0), point3d.at(1), point3d.at(2)); 94 | } 95 | 96 | // this function will cause the thread to add new cameras to the viewer 97 | void visualizer::update() 98 | { 99 | _isUpdateRequired = true; 100 | }; 101 | 102 | // this function will reset _curentCameraIdx, and the thread will readd all cameras to the viewer 103 | void visualizer::updateAll() 104 | { 105 | _isUpdateRequired = true; 106 | _curentCameraIdx = 0; 107 | }; 108 | -------------------------------------------------------------------------------- /src/features.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "cameraCalibration.h" 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "opencv2/features2d.hpp" 13 | 14 | 15 | // function declaration 16 | int getScreenWidth(); 17 | void resizeWithAspectRatio(cv::Mat& image, int width); 18 | std::string getFileNameWithExtension(std::string path); 19 | std::string getFileName(std::string path); 20 | 21 | 22 | struct matchingKeyPoints 23 | { 24 | std::vector currentKeyPointsIdx; 25 | std::vector otherKeyPointsIdx; 26 | } typedef matchingKeyPoints; 27 | 28 | struct imageFeatures 29 | { 30 | std::string path; 31 | cv::Mat image; 32 | std::vector keyPoints; 33 | cv::Mat descriptors; 34 | 35 | matchingKeyPoints _matchingKeyPoints; 36 | 37 | cv::Mat projection; 38 | cv::Mat rotation; 39 | cv::Mat translation; 40 | 41 | // based on readCamPoseFile function at https://github.com/PointCloudLibrary/pcl/blob/master/gpu/kinfu_large_scale/tools/standalone_texture_mapping.cpp 42 | Eigen::Matrix4f getCamPose() 43 | { 44 | Eigen::Matrix4f cameraPose; 45 | cameraPose << rotation.at(0, 0), rotation.at(0, 1), rotation.at(0, 2), translation.at(0, 0)/*Tx*/, 46 | rotation.at(1, 0), rotation.at(1, 1), rotation.at(1, 2), translation.at(1, 0)/*Ty*/, 47 | rotation.at(2, 0), rotation.at(2, 1), rotation.at(2, 2), translation.at(2, 0)/*Tz*/, 48 | 0, 0, 0, 1/*Scale*/; 49 | 50 | return cameraPose; 51 | } 52 | } typedef imageFeatures; 53 | 54 | class features 55 | { 56 | protected: 57 | static std::vector _features; 58 | private: 59 | inline static cv::Mat smallExtractFeatures(std::string path, int size); 60 | inline static int widthFor800Features(std::string path); 61 | inline static imageFeatures extractFeatures(std::string path, double downScale = 1); 62 | static int matchFeaturesScore(cv::Mat firstImageDescriptors, cv::Mat secondImageDescriptors); 63 | static void matchFeatures(imageFeatures& firstImageFeatures, imageFeatures& secondImageFeatures, 64 | std::vector& firstPoints, std::vector& secondPoints); 65 | static std::vector findMaxRoute(const std::vector> scoreMatrix); 66 | static cv::Ptr _detector; 67 | static cv::Ptr _matcher; 68 | public: 69 | void matchAllFeatures(bool showMatchingFeature = false); 70 | features(std::vector images, double downScaleFactor = 1); 71 | ~features(); 72 | static std::vector& getFeatures(); 73 | 74 | static void sortImages(std::vector& images); 75 | static void getCurrentKeyPoints(std::vector& currentKeyPoints, int featureIndex); 76 | static void getOtherKeyPoints(std::vector& otherKeyPoints, int featureIndex); 77 | }; 78 | 79 | 80 | int features::widthFor800Features(std::string path) 81 | { 82 | std::vector keypoints; 83 | cv::Mat src = cv::imread(path, cv::IMREAD_GRAYSCALE); 84 | cv::Mat save = src; 85 | int size = 600; 86 | while (keypoints.size() < 800) 87 | { 88 | if (size > src.size().width) 89 | { 90 | return src.size().width; 91 | } 92 | resizeWithAspectRatio(save, size); 93 | _detector->detect(save, keypoints); 94 | save = src; 95 | size += 50; 96 | } 97 | return size; 98 | } 99 | 100 | cv::Mat features::smallExtractFeatures(std::string path, int size) 101 | { 102 | cv::Mat src = cv::imread(path, cv::IMREAD_GRAYSCALE); 103 | 104 | resizeWithAspectRatio(src, size); 105 | 106 | if (!src.empty()) 107 | { 108 | cv::Mat descriptors; 109 | std::vector keypoints; 110 | _detector->detectAndCompute(src, cv::noArray(), keypoints, descriptors); 111 | std::cout << getFileNameWithExtension(path) << " " << keypoints.size() << " features were extracted\n"; 112 | return descriptors; 113 | } 114 | throw std::runtime_error(path + " is not a valid image"); 115 | } 116 | 117 | imageFeatures features::extractFeatures(std::string path, double downScale) 118 | { 119 | cv::Mat src = cv::imread(path); 120 | cv::resize(src, src, src.size() / (int)downScale); 121 | if (!src.empty()) 122 | { 123 | cv::Mat descriptors; 124 | std::vector keypoints; 125 | _detector->detectAndCompute(src, cv::noArray(), keypoints, descriptors); 126 | std::cout << getFileNameWithExtension(path) << " " << keypoints.size() << " features were extracted\n"; 127 | return imageFeatures{ path, src, keypoints, descriptors }; 128 | } 129 | throw std::runtime_error(path + " is not a valid image"); 130 | } -------------------------------------------------------------------------------- /src/surfaceReconstruction.cpp: -------------------------------------------------------------------------------- 1 | #include "surfaceReconstruction.h" 2 | 3 | 4 | // information from https://doc.cgal.org/latest/Manual/tuto_reconstruction.html 5 | surfaceReconstruction::surfaceReconstruction(std::string inputFile, std::string saveFile) 6 | { 7 | // reading pointcloud 8 | pointSet points; 9 | 10 | std::ifstream stream(inputFile, std::ios_base::binary); 11 | if (!stream) 12 | { 13 | std::cerr << "Error: cannot read file " << inputFile << std::endl; 14 | return; 15 | } 16 | stream >> points; 17 | std::cout << "Read " << points.size() << " points" << std::endl; 18 | if (points.empty()) 19 | { 20 | return; 21 | } 22 | 23 | // Compute average spacing using neighborhood of 6 points 24 | double averageSpacing = CGAL::compute_average_spacing(points, 6); 25 | // TOFIX! 26 | // CGAL::remove_outliers(points, 16, points.parameters().threshold_percent(100).threshold_distance(2 * averageSpacing)); 27 | 28 | std::cout << "Outliers were removed" << std::endl; 29 | 30 | CGAL::grid_simplify_point_set(points, 2 * averageSpacing); 31 | std::cout << "Points were simplified" << std::endl; 32 | points.collect_garbage(); 33 | CGAL::jet_smooth_point_set(points, 64); 34 | 35 | CGAL::Scale_space_surface_reconstruction_3 reconstruct(points.points().begin(), points.points().end()); 36 | // Smooth using iterations of Jet Smoothing 37 | reconstruct.increase_scale(24); 38 | // Mesh with the Advancing Front mesher with a maximum facet length of 0.5 39 | reconstruct.reconstruct_surface(CGAL::Scale_space_reconstruction_3::Advancing_front_mesher(0.5)); 40 | std::cout << "Surface reconstruction is finished" << std::endl; 41 | 42 | // convert reconstruct to cgal mesh 43 | Mesh cgalMesh; 44 | 45 | for (pointSet::Index idx : points) 46 | { 47 | cgalMesh.add_vertex(points.point(idx)); 48 | } 49 | for (const auto& face : CGAL::make_range(reconstruct.facets_begin(), reconstruct.facets_end())) 50 | { 51 | cgalMesh.add_face(Mesh::Vertex_index(face[0]), Mesh::Vertex_index(face[1]), Mesh::Vertex_index(face[2])); 52 | } 53 | 54 | std::cout << "Colse Holes" << std::endl; 55 | colseHoles(cgalMesh); 56 | 57 | // convert to pcl PolygonMesh in order to save as ply file 58 | pcl::PointCloud::Ptr mesh_cloud(new pcl::PointCloud); 59 | 60 | 61 | _polygonMesh.polygons.resize(cgalMesh.faces().size()); 62 | 63 | for (const Mesh::Vertex_index vertexIdx : cgalMesh.vertices()) 64 | { 65 | mesh_cloud->push_back(pcl::PointXYZ(cgalMesh.point(vertexIdx).x(), cgalMesh.point(vertexIdx).y(), cgalMesh.point(vertexIdx).z())); 66 | } 67 | int i = 0; 68 | for (const Mesh::Face_index& faceIdx : cgalMesh.faces()) 69 | { 70 | CGAL::Vertex_around_face_circulator vcirc(cgalMesh.halfedge(faceIdx), cgalMesh), done(vcirc); 71 | do 72 | { 73 | _polygonMesh.polygons[i].vertices.push_back(*vcirc++); 74 | } while (vcirc != done); 75 | i++; 76 | } 77 | pcl::toPCLPointCloud2(*mesh_cloud, _polygonMesh.cloud); 78 | } 79 | 80 | // information from https://doc.cgal.org/latest/Polygon_mesh_processing/index.html#PMPHoleFilling 81 | bool surfaceReconstruction::isHoleToClose(halfedge_descriptor h, Mesh& mesh) 82 | { 83 | double maxHoleDiam = 850; 84 | int maxNumHoleEdges = 35; 85 | int num_hole_edges = 0; 86 | CGAL::Bbox_3 hole_bbox; 87 | for (halfedge_descriptor hc : CGAL::halfedges_around_face(h, mesh)) 88 | { 89 | hole_bbox += mesh.point(target(hc, mesh)).bbox(); 90 | //if (++num_hole_edges > maxNumHoleEdges) return false; 91 | if (hole_bbox.xmax() - hole_bbox.xmin() > maxHoleDiam) return false; 92 | if (hole_bbox.ymax() - hole_bbox.ymin() > maxHoleDiam) return false; 93 | if (hole_bbox.zmax() - hole_bbox.zmin() > maxHoleDiam) return false; 94 | } 95 | return true; 96 | } 97 | 98 | // Incrementally fill the holes that are no larger than given diameter and with no more than a given number of edges 99 | void surfaceReconstruction::colseHoles(Mesh& mesh) 100 | { 101 | unsigned int nbHoles = 0; 102 | std::vector border_cycles; 103 | // collect one halfedge per boundary cycle 104 | CGAL::Polygon_mesh_processing::extract_boundary_cycles(mesh, std::back_inserter(border_cycles)); 105 | for (halfedge_descriptor h : border_cycles) 106 | { 107 | if (!isHoleToClose(h, mesh)) 108 | { 109 | std::cout << "Skipping the hole" << std::endl; 110 | continue; 111 | } 112 | 113 | std::vector::face_descriptor> patch_facets; 114 | std::vector::vertex_descriptor> patch_vertices; 115 | bool success = std::get<0>(CGAL::Polygon_mesh_processing::triangulate_refine_and_fair_hole(mesh, h, std::back_inserter(patch_facets), std::back_inserter(patch_vertices))); 116 | std::cout << "Constructed patch: facets: " << patch_facets.size() << ", vertices: " << patch_vertices.size() << std::endl; 117 | nbHoles++; 118 | } 119 | std::cout << nbHoles << " holes have been filled" << std::endl; 120 | } 121 | 122 | 123 | // saves the mesh to ply file 124 | void surfaceReconstruction::saveMesh(std::string filePath) 125 | { 126 | pcl::io::savePLYFile(filePath, _polygonMesh); 127 | } 128 | 129 | pcl::PolygonMesh& surfaceReconstruction::getPolygonMesh() 130 | { 131 | return _polygonMesh; 132 | } 133 | 134 | surfaceReconstruction::~surfaceReconstruction() 135 | { 136 | _polygonMesh.cloud.data.clear(); 137 | _polygonMesh.polygons.clear(); 138 | } 139 | -------------------------------------------------------------------------------- /src/cameraCalibration.cpp: -------------------------------------------------------------------------------- 1 | #include "cameraCalibration.h" 2 | #include "features.h" 3 | 4 | cv::Mat cameraCalibration::_cameraMatrix; 5 | cv::Mat cameraCalibration::_distortionCoefficients; 6 | 7 | // extract corner points from chessboard images - helps to get parameters about the camera that took the images 8 | int cameraCalibration::addChessboardPoints(const std::vector& chessboardImages, cv::Size boardSize, const bool showMatchingFeature) 9 | { 10 | std::vector imageCorners; 11 | std::vector objectCorners; 12 | 13 | // chessboard corners are at (i,j,0) 14 | for (int i = 0; i < boardSize.height; i++) 15 | for (int j = 0; j < boardSize.width; j++) 16 | objectCorners.push_back(cv::Point3f(i, j, 0.0f)); 17 | 18 | cv::Mat image; 19 | int goodImages = 0; 20 | int screenWidth = getScreenWidth(); 21 | 22 | for (int i = 0; i < chessboardImages.size(); i++) // for all chessboards - find the corners 23 | { 24 | image = cv::imread(chessboardImages[i], cv::IMREAD_GRAYSCALE); 25 | if (!cv::findChessboardCorners(image, boardSize, imageCorners, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FILTER_QUADS)) 26 | { 27 | std::cout << "find chessboard corners faild" << std::endl; 28 | continue; 29 | } 30 | // refine the imageCorners to subpixel accuracy 31 | cv::cornerSubPix(image, imageCorners, cv::Size(5, 5), cv::Size(-1, -1), 32 | cv::TermCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 33 | 30, // max number of iterations 34 | 0.1)); // min accuracy 35 | 36 | if (imageCorners.size() == boardSize.area()) // if the chessboard image is valid 37 | { 38 | // add chessboard image and scene points 39 | // 2D points 40 | imagePoints.push_back(imageCorners); 41 | // corresponding 3D scene points 42 | objectPoints.push_back(objectCorners); 43 | goodImages++; 44 | } 45 | std::cout << "Chessboard image " << i + 1 << " / " << chessboardImages.size() << std::endl; 46 | 47 | if (showMatchingFeature) // Draw the corners 48 | { 49 | cv::drawChessboardCorners(image, boardSize, imageCorners, true); 50 | resizeWithAspectRatio(image, screenWidth); 51 | cv::imshow("Corners on chessboard", image); 52 | cv::waitKey(100); 53 | } 54 | } 55 | imageSize = image.size(); 56 | cv::destroyAllWindows(); 57 | return goodImages; 58 | } 59 | 60 | //calibrating usuing details we got from the load/chess board analyze 61 | double cameraCalibration::calibrate() 62 | { 63 | // calibrate the camera 64 | return cv::calibrateCamera( 65 | objectPoints, 66 | imagePoints, 67 | imageSize, 68 | _cameraMatrix, 69 | _distortionCoefficients, 70 | cv::noArray(), 71 | cv::noArray(), 72 | cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_FIX_PRINCIPAL_POINT 73 | ); 74 | } 75 | 76 | //to get undistorted image ------ NOTE: Check** 77 | cv::Mat cameraCalibration::remap(const cv::Mat& image) 78 | { 79 | cv::Mat undistorted; 80 | if (map1.empty() || map2.empty()) 81 | cv::initUndistortRectifyMap(_cameraMatrix, // computed camera matrix 82 | _distortionCoefficients, // computed distortion matrix 83 | cv::Mat(), // optional rectification (none) 84 | cv::Mat(), // camera matrix to generate undistorted 85 | image.size(), // size of undistorted 86 | CV_32FC1, // type of output map 87 | map1, map2); // the x and y mapping functions 88 | 89 | cv::remap(image, undistorted, map1, map2, cv::INTER_LINEAR); 90 | return undistorted; 91 | } 92 | 93 | // save calibration data we analyzed to given file path 94 | void cameraCalibration::save(std::string filePath) 95 | { 96 | cv::FileStorage storage(filePath, cv::FileStorage::WRITE); 97 | storage << "cameraMatrix" << _cameraMatrix; 98 | storage << "distortionCoefficients" << _distortionCoefficients; 99 | storage.release(); 100 | } 101 | 102 | // load calibration data from the given file path 103 | void cameraCalibration::load(std::string filePath) 104 | { 105 | cv::FileStorage fs(filePath, cv::FileStorage::READ); 106 | fs["cameraMatrix"] >> _cameraMatrix; 107 | fs["distortionCoefficients"] >> _distortionCoefficients; 108 | fs.release(); 109 | } 110 | 111 | //get the fical from the camera metrix data 112 | double cameraCalibration::getFocal() 113 | { 114 | return _cameraMatrix.at(0, 0); 115 | } 116 | 117 | //get specific PARAM from the camera metrix - NOTE: Check* 118 | cv::Point2d cameraCalibration::getPrinciplePoint() 119 | { 120 | return cv::Point2d(_cameraMatrix.at(0, 2), _cameraMatrix.at(1, 2)); 121 | } 122 | 123 | //get camera matrix 124 | cv::Mat& cameraCalibration::getCameraMatrix() 125 | { 126 | /*| fx 0 cx | 127 | | 0 fy cy | 128 | | 0 0 1 |*/ 129 | return _cameraMatrix; 130 | } 131 | 132 | //get distortion coefficients 133 | cv::Mat& cameraCalibration::getDistortionCoefficients() 134 | { 135 | return _distortionCoefficients; 136 | } 137 | 138 | cv::Mat& cameraCalibration::estimateCameraMatrix(double focalLength, cv::Size imageSize) 139 | { 140 | double cx = imageSize.width / 2; 141 | double cy = imageSize.height / 2; 142 | 143 | cv::Point2d pp(cx, cy); 144 | 145 | _cameraMatrix = cv::Mat::eye(3, 3, CV_64F); 146 | 147 | _cameraMatrix.at(0, 0) = focalLength; 148 | _cameraMatrix.at(1, 1) = focalLength; 149 | _cameraMatrix.at(0, 2) = cx; 150 | _cameraMatrix.at(1, 2) = cy; 151 | 152 | return _cameraMatrix; 153 | } 154 | void cameraCalibration::downScale(double downScale) 155 | { 156 | _cameraMatrix.at(0, 0) /= downScale; //fx 157 | _cameraMatrix.at(1, 1) /= downScale; //fy 158 | _cameraMatrix.at(0, 2) /= downScale; //cx 159 | _cameraMatrix.at(1, 2) /= downScale; //cy 160 | } 161 | 162 | cameraCalibration::~cameraCalibration() 163 | { 164 | _cameraMatrix.release(); 165 | _distortionCoefficients.release(); 166 | map1.release(); 167 | map2.release(); 168 | for (int i = 0; i < objectPoints.size(); i++) 169 | { 170 | objectPoints[i].clear(); 171 | } 172 | objectPoints.clear(); 173 | for (int i = 0; i < imagePoints.size(); i++) 174 | { 175 | imagePoints[i].clear(); 176 | } 177 | imagePoints.clear(); 178 | } 179 | 180 | std::ostream& operator<<(std::ostream& out, const cameraCalibration& calib) 181 | { 182 | out << "Calibration parms:" << std::endl; 183 | out << "Camera matrix:" << std::endl; 184 | out << calib.getCameraMatrix() << std::endl; 185 | out << "Distortion coefficients:" << std::endl; 186 | out << calib.getDistortionCoefficients() << std::endl; 187 | return out; 188 | } 189 | -------------------------------------------------------------------------------- /src/structureFromMotion.cpp: -------------------------------------------------------------------------------- 1 | #include "structureFromMotion.h" 2 | #include "bundleAdjustment.h" 3 | #include "loadImages.h" 4 | 5 | #define RATIO_THRESH 0.7 6 | 7 | // builds pcl and reconstract camera position 8 | // gets camera calib and vector of features 9 | structureFromMotion::structureFromMotion(double cameraScale) 10 | { 11 | std::vector& features = features::getFeatures(); 12 | cv::Mat cameraMatrix = cameraCalibration::getCameraMatrix(); 13 | // initialize the pcl point cloud ptr which contains points - each point has XYZ coordinates and also has a Red Green Blue params - RGB 14 | _pclPointCloudPtr.reset(new pcl::PointCloud()); 15 | 16 | _visualizer = new visualizer(_pclPointCloudPtr, cameraScale); 17 | 18 | cv::Mat E, rotation, translation, points4D; 19 | 20 | std::vector currentKeyPoints, otherKeyPoints; 21 | features::getCurrentKeyPoints(currentKeyPoints, 0); 22 | features::getOtherKeyPoints(otherKeyPoints, 0); 23 | 24 | // getting essential metrix and recover second camera position 25 | E = findEssentialMat(currentKeyPoints, otherKeyPoints, cameraMatrix, cv::RANSAC, 0.9999999999999999, 1.0); 26 | 27 | cv::recoverPose(E, currentKeyPoints, otherKeyPoints, cameraMatrix, rotation, translation); 28 | 29 | cv::Mat firstProjection, firstTranslation = (cv::Mat_(3, 1) << 0, 0, 0); 30 | hconcat(cameraMatrix, firstTranslation, firstProjection); 31 | features[0].projection = firstProjection; 32 | 33 | features[0].rotation = (cv::Mat_(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); 34 | features[0].translation = firstTranslation; 35 | _visualizer->update(); 36 | 37 | _averageColor = 0; 38 | 39 | for (unsigned int i = 1; i < features.size(); i++) // go throw all iamges 40 | { 41 | features::getCurrentKeyPoints(currentKeyPoints, i - 1); 42 | features::getOtherKeyPoints(otherKeyPoints, i - 1); 43 | 44 | if (i > 1) // first pair will skip this 45 | { // recover rotation and translation between current image and previous image 46 | cv::Mat descriptors3d; 47 | 48 | // descriptors of previous image thet was added to the poincloud 49 | for (int j = 0; j < _previous3dPoints.size(); j++) 50 | { // add descriptors at pointInCloud position 51 | descriptors3d.push_back(features[i - 1].descriptors.row(_previous3dPoints[j].otherKeyPointsIdx)); 52 | } 53 | 54 | std::vector imagePoints2d; 55 | std::vector newObjIndexes; 56 | std::vector objectPoints3d; 57 | 58 | // obtain matches between previous 3d points and current image 2d points 59 | obtainMatches(features[i], descriptors3d, imagePoints2d, objectPoints3d, _previous3dPoints); 60 | 61 | double maxVal; 62 | cv::minMaxIdx(imagePoints2d, nullptr, &maxVal); 63 | 64 | std::future future = std::async(std::launch::async, 65 | [&]() // capturing all Local variables by Reference 66 | { 67 | cv::solvePnPRansac(objectPoints3d, imagePoints2d, cameraMatrix, cameraCalibration::getDistortionCoefficients(), 68 | rotation, translation, true, INT_MAX, maxVal * 0.0023, 0.9999999999999999); 69 | }); 70 | 71 | if (future.wait_for(std::chrono::seconds(15)) == std::future_status::timeout) 72 | { // if solvePnPRansac is in timeout we should break the images sequence 73 | features.erase(features.begin() + i, features.end()); 74 | break; 75 | } 76 | 77 | // Convert from Rodrigues format to rotation matrix and build the 3x4 Transformation matrix 78 | cv::Rodrigues(rotation, rotation); 79 | 80 | _previous3dPoints.clear(); 81 | } 82 | features[i].rotation = rotation; 83 | features[i].translation = translation; 84 | 85 | // triangulate the points 86 | hconcat(rotation, translation, translation); 87 | features[i].projection = cameraMatrix * translation; 88 | triangulatePoints(features[i - 1].projection, features[i].projection, currentKeyPoints, otherKeyPoints, points4D); 89 | 90 | addPoints4DToPointCloud(points4D, features[i - 1], i, currentKeyPoints); 91 | 92 | cout << "Points cloud " << getFileName(features[i - 1].path) << " / " << getFileName(features[i].path) << endl; 93 | _visualizer->update(); 94 | } 95 | cout << "Number of points in the point cloud " << _pointCloud.size() << endl; 96 | cout << "Starting Bundle Adjustment" << endl; 97 | 98 | // save occluded.jpg - an image with the average color of all points in the point cloud, used later in the pipline for the tuxtureing of untuxturd part 99 | cv::Mat avrageColorImage(features[0].image.size().height, features[0].image.size().width, CV_8UC3, _averageColor); 100 | cv::imwrite(features[0].path.substr(0, features[0].path.find_last_of('/') + 1) + "output/occluded.jpg", avrageColorImage); 101 | 102 | bundleAdjustment(_pointCloud, *_pclPointCloudPtr); 103 | _visualizer->updateAll(); 104 | 105 | //clean the cloud using radius of neighbors method 106 | std::cout << "Bundle Adjustment finished. Starting Noise Clean" << endl; 107 | pcl::StatisticalOutlierRemoval sor; 108 | sor.setInputCloud(_pclPointCloudPtr); 109 | sor.setMeanK(32); // set to consider the number of neighboring points of the query point when performing statistics 110 | sor.setStddevMulThresh(1); // set whether the judgment is the inverse value of the outlier, x = 1. 111 | sor.filter(*_pclPointCloudPtr); 112 | 113 | _visualizer->update(); 114 | std::cout << _pointCloud.size() - _pclPointCloudPtr->size() << " points were filtered" << endl; 115 | } 116 | 117 | void structureFromMotion::addPoints4DToPointCloud(cv::Mat points4D, imageFeatures feature, int index, std::vector currentKeyPoints) 118 | { 119 | for (int j = 0; j < points4D.cols; j++) 120 | { 121 | pointInCloud point; 122 | 123 | // convert point from homogeneous to 3d cartesian coordinates 124 | point.point = cv::Point3d(points4D.at(0, j) / points4D.at(3, j), 125 | points4D.at(1, j) / points4D.at(3, j), points4D.at(2, j) / points4D.at(3, j)); 126 | 127 | point.otherKeyPointsIdx = feature._matchingKeyPoints.otherKeyPointsIdx[j]; 128 | point.imageIndex = index; 129 | 130 | cv::Vec3b color = feature.image.at(cv::Point(currentKeyPoints[j].x, currentKeyPoints[j].y)); 131 | // color.val[2] - red, color.val[1] - green, color.val[0] - blue 132 | uint32_t rgb = (color.val[2] << 16 | color.val[1] << 8 | color.val[0]); 133 | point.color = *reinterpret_cast(&rgb); 134 | pcl::PointXYZRGB pclPoint(point.point.x, point.point.y, point.point.z); 135 | pclPoint.rgb = *reinterpret_cast(&rgb); 136 | 137 | // average color of all points in the point cloud - (blue green red) 138 | _averageColor += (cv::Scalar(color.val[0], color.val[1], color.val[2]) - _averageColor) / (int)(_pointCloud.size() + 1); 139 | 140 | _pointCloud.push_back(point); 141 | _previous3dPoints.push_back(point); 142 | _pclPointCloudPtr->push_back(pclPoint); 143 | } 144 | } 145 | 146 | // obtain matches between previous 3d points and current image 2d points 147 | void structureFromMotion::obtainMatches(imageFeatures features, cv::Mat descriptors3d, 148 | std::vector& imagePoints2d, std::vector& objectPoints3d, std::vector previous3dPoints) 149 | { 150 | std::vector> matches; 151 | cv::Ptr matcher = cv::DescriptorMatcher::create(cv::DescriptorMatcher::BRUTEFORCE); 152 | 153 | matcher->knnMatch(descriptors3d, features.descriptors, matches, 2); 154 | 155 | for (std::vector& matche : matches) 156 | { 157 | if (matche[0].distance < RATIO_THRESH * matche[1].distance) 158 | { 159 | imagePoints2d.push_back(features.keyPoints[matche[0].trainIdx].pt); 160 | objectPoints3d.push_back(previous3dPoints[matche[0].queryIdx].point); 161 | } 162 | } 163 | } 164 | 165 | // saves the Point cloud to file 166 | void structureFromMotion::savePointCloud(std::string filePath) 167 | { 168 | pcl::io::savePLYFileBinary(filePath, *_pclPointCloudPtr); 169 | } 170 | 171 | structureFromMotion::~structureFromMotion() 172 | { 173 | _pointCloud.clear(); 174 | _previous3dPoints.clear(); 175 | _pclPointCloudPtr->clear(); 176 | delete _visualizer; 177 | } 178 | -------------------------------------------------------------------------------- /src/features.cpp: -------------------------------------------------------------------------------- 1 | #include "features.h" 2 | //#include "wtypes.h" 3 | 4 | 5 | #pragma warning(disable:26451) 6 | 7 | #define RATIO_THRESH 0.7 8 | 9 | std::vector features::_features; 10 | cv::Ptr features::_detector = cv::SIFT::create(); 11 | cv::Ptr features::_matcher = cv::DescriptorMatcher::create(cv::DescriptorMatcher::BRUTEFORCE); 12 | 13 | // detect and compute all of the images features 14 | features::features(std::vector images, double downScale) 15 | { 16 | // find features using SIFT feature detector 17 | // find keypoints and descriptors of image using SIFT for every image in the vector 18 | // the detector with detect and compute are locating FEATURES and descriptors 19 | for (std::string path : images) 20 | { 21 | _features.push_back(extractFeatures(path, downScale)); 22 | } 23 | } 24 | 25 | void features::matchFeatures(imageFeatures& firstImageFeatures, imageFeatures& secondImageFeatures, 26 | std::vector& firstPoints, std::vector& secondPoints) 27 | { 28 | std::vector> matches; 29 | std::vector& firstPointsIdx = firstImageFeatures._matchingKeyPoints.currentKeyPointsIdx; 30 | std::vector& secondPointsIdx = firstImageFeatures._matchingKeyPoints.otherKeyPointsIdx; 31 | // 2 nearest neighbour match, k=2 therefore there will be two DMatch in matches[i] 32 | _matcher->knnMatch(firstImageFeatures.descriptors, secondImageFeatures.descriptors, matches, 2); 33 | 34 | for (std::vector& matche : matches) // the DMatches in matche vector are arranged in descending order of quality 35 | { 36 | // good matche is one with small distance measurement 37 | if (matche[0].distance < RATIO_THRESH * matche[1].distance) 38 | { 39 | // queryIdx refers to keypoints1 and trainIdx refers to keypoints2 40 | firstPoints.push_back(firstImageFeatures.keyPoints[matche[0].queryIdx].pt); 41 | secondPoints.push_back(secondImageFeatures.keyPoints[matche[0].trainIdx].pt); 42 | firstPointsIdx.push_back(matche[0].queryIdx); 43 | secondPointsIdx.push_back(matche[0].trainIdx); 44 | } 45 | } 46 | } 47 | 48 | 49 | // match the features between the images 50 | void features::matchAllFeatures(bool showMatchingFeature) 51 | { 52 | int screenWidth = getScreenWidth(); 53 | 54 | //check for all features found 55 | for (unsigned int i = 0; i < _features.size() - 1; i++) 56 | { 57 | std::vector firstPoints, secondPoints; 58 | std::vector& firstPointsIdx = _features[i]._matchingKeyPoints.currentKeyPointsIdx; 59 | std::vector& secondPointsIdx = _features[i]._matchingKeyPoints.otherKeyPointsIdx; 60 | 61 | cv::Mat image, mask; 62 | matchFeatures(_features[i], _features[i + 1], firstPoints, secondPoints); 63 | if (showMatchingFeature) 64 | { 65 | hconcat(_features[i].image, _features[i + 1].image, image); // create image with image i and image j 66 | } 67 | 68 | // erase bad matches using fundamental matrix constraints 69 | cv::Mat E = findEssentialMat(firstPoints, secondPoints, cameraCalibration::getCameraMatrix(), cv::RANSAC, 0.9999999999999999, 1.0, mask); 70 | 71 | for (int j = mask.rows - 1; j >= 0; j--) 72 | { 73 | if (mask.at(j, 0)) // if the point was used to solve the essential matrix - the value is 1 otherwise 0 74 | { 75 | if (showMatchingFeature) 76 | { 77 | // draw line on the image 78 | line(image, firstPoints[j], secondPoints[j] + cv::Point2f(_features[i].image.cols, 0), cv::Scalar(rand() % 255, rand() % 255, rand() % 255), 2); 79 | } 80 | } 81 | else 82 | { 83 | firstPointsIdx.erase(firstPointsIdx.begin() + j); 84 | secondPointsIdx.erase(secondPointsIdx.begin() + j); 85 | } 86 | } 87 | 88 | if (_features[i]._matchingKeyPoints.currentKeyPointsIdx.size() < 20) 89 | { 90 | _features.erase(_features.begin() + i, _features.end()); 91 | break; 92 | } 93 | std::cout << "Feature matching " << getFileName(_features[i].path) << " / " << getFileName(_features[i + 1].path) << ", good matches " << firstPointsIdx.size() << std::endl; 94 | if (showMatchingFeature) 95 | { 96 | resizeWithAspectRatio(image, screenWidth); 97 | cv::imshow("Feature matching", image); 98 | cv::waitKey(100); 99 | } 100 | } 101 | cv::destroyAllWindows(); 102 | } 103 | 104 | void features::getCurrentKeyPoints(std::vector& currentKeyPoints, int featureIndex) 105 | { 106 | currentKeyPoints.clear(); 107 | for (int index : _features[featureIndex]._matchingKeyPoints.currentKeyPointsIdx) 108 | { 109 | currentKeyPoints.push_back(_features[featureIndex].keyPoints[index].pt); 110 | } 111 | } 112 | 113 | void features::getOtherKeyPoints(std::vector& otherKeyPoints, int featureIndex) 114 | { 115 | otherKeyPoints.clear(); 116 | for (int index : _features[featureIndex]._matchingKeyPoints.otherKeyPointsIdx) 117 | { 118 | otherKeyPoints.push_back(_features[featureIndex + 1].keyPoints[index].pt); 119 | } 120 | } 121 | 122 | int features::matchFeaturesScore(cv::Mat firstImageDescriptors, cv::Mat secondImageDescriptors) 123 | { 124 | int score = 0; 125 | std::vector> matches; 126 | // 2 nearest neighbour match, k=2 therefore there will be two DMatch in matches[i] 127 | _matcher->knnMatch(firstImageDescriptors, secondImageDescriptors, matches, 2); 128 | 129 | // the DMatches in matche vector are arranged in descending order of quality 130 | for (std::vector& matche : matches) 131 | { 132 | // good matche is one with small distance measurement 133 | if (matche[0].distance < RATIO_THRESH * matche[1].distance) 134 | { 135 | score++; 136 | } 137 | } 138 | return score; 139 | } 140 | 141 | 142 | // function to find the maximum cost path for all possible the paths 143 | std::vector features::findMaxRoute(const std::vector> scoreMatrix) 144 | { 145 | int counter = 0; 146 | int j = 0, i = 0; 147 | int max = INT_MIN; 148 | std::map visitedRouteList; 149 | 150 | // Starting from the 0th indexed 151 | visitedRouteList[0] = 1; 152 | std::vector route(scoreMatrix.size()); 153 | 154 | // traverse the adjacency matrix - scoreMatrix 155 | while (i < scoreMatrix.size() && j < scoreMatrix[i].size()) 156 | { 157 | // corner of the matrix 158 | if (counter >= scoreMatrix[i].size() - 1) 159 | { 160 | break; 161 | } 162 | 163 | // if this path is unvisited then and if the cost is more then update the cost 164 | if (j != i && (visitedRouteList[j] == 0)) 165 | { 166 | if (scoreMatrix[i][j] > max) 167 | { 168 | max = scoreMatrix[i][j]; 169 | route[counter] = j + 1; 170 | } 171 | } 172 | j++; 173 | 174 | // Check all paths from the ith indexed city 175 | if (j == scoreMatrix[i].size()) 176 | { 177 | max = INT_MIN; 178 | visitedRouteList[route[counter] - 1] = 1; 179 | j = 0; 180 | i = route[counter] - 1; 181 | counter++; 182 | } 183 | } 184 | 185 | // update the ending node in array from node which was last visited 186 | i = route[counter - 1] - 1; 187 | 188 | for (j = 0; j < scoreMatrix.size(); j++) 189 | { 190 | 191 | if ((i != j) && scoreMatrix[i][j] > max) 192 | { 193 | max = scoreMatrix[i][j]; 194 | route[counter] = j + 1; 195 | } 196 | } 197 | return route; 198 | } 199 | 200 | void features::sortImages(std::vector& images) 201 | { 202 | std::cout << "Calculating image width for 800 features" << std::endl; 203 | std::map descriptors; 204 | int size = 0; 205 | for (int i = 1; i < 4; i++) 206 | { 207 | size += (widthFor800Features(images[(images.size() / 4) + i - 1]) - size) / i; 208 | } 209 | std::cout << "Size is " << size << std::endl; 210 | for (std::string path : images) 211 | { 212 | descriptors[path] = smallExtractFeatures(path, size); 213 | } 214 | 215 | std::vector> scoreMatrix; 216 | for (int i = 0; i < images.size(); i++) 217 | { 218 | scoreMatrix.push_back(std::vector(images.size())); 219 | } 220 | for (int i = 0; i < images.size(); i++) 221 | { 222 | for (int j = i + 1; j < images.size(); j++) 223 | { 224 | int score = matchFeaturesScore(descriptors[images[i]], descriptors[images[j]]); 225 | scoreMatrix[i][j] = score; 226 | scoreMatrix[j][i] = score; 227 | } 228 | std::cout << "matching " << i << " / " << images.size() - 1 << std::endl; 229 | } 230 | 231 | std::vector sorted = findMaxRoute(scoreMatrix); 232 | 233 | // sorted images according to max routes ordering 234 | std::vector sortedImages; 235 | for (int i = 0; i < images.size(); i++) 236 | { 237 | sortedImages.push_back(images[sorted[i] - 1]); 238 | } 239 | images = sortedImages; 240 | } 241 | 242 | // return the features vector 243 | std::vector& features::getFeatures() 244 | { 245 | return _features; 246 | } 247 | 248 | features::~features() 249 | { 250 | for (int i = 0; i < _features.size(); i++) 251 | { 252 | _features[i].image.release(); 253 | _features[i].keyPoints.clear(); 254 | _features[i].descriptors.release(); 255 | _features[i]._matchingKeyPoints.currentKeyPointsIdx.clear(); 256 | _features[i]._matchingKeyPoints.otherKeyPointsIdx.clear(); 257 | _features[i].projection.release(); 258 | _features[i].rotation.release(); 259 | _features[i].translation.release(); 260 | } 261 | _features.clear(); 262 | } 263 | 264 | // resize image height to be with aspect ratio with a given width 265 | void resizeWithAspectRatio(cv::Mat& image, int width) 266 | { 267 | float ratio = width / float(image.size().width); 268 | cv::Size dim = cv::Size(width, int(image.size().height * ratio)); 269 | 270 | resize(image, image, dim); 271 | } 272 | 273 | // get the screen width 274 | int getScreenWidth() 275 | { 276 | // RECT desktop; 277 | // // Get a handle to the desktop window 278 | // const HWND hDesktop = GetDesktopWindow(); 279 | // Get the size of screen to the variable desktop 280 | // GetWindowRect(hDesktop, &desktop); 281 | 282 | // return desktop.right; //screen width 283 | return 1500; 284 | } 285 | 286 | std::string getFileNameWithExtension(std::string path) 287 | { 288 | return path.substr(path.find_last_of("/") + 1, path.size() - 1); 289 | } 290 | 291 | std::string getFileName(std::string path) 292 | { 293 | std::string fileNameWithExtension = getFileNameWithExtension(path); 294 | return fileNameWithExtension.substr(0, fileNameWithExtension.find_first_of('.')); 295 | } --------------------------------------------------------------------------------