├── CMakeLists.txt ├── README.md ├── good_matches.png ├── include ├── RunSLAM.h ├── VO.h └── VisualOdometry.h ├── launch └── VO.launch ├── lib └── liblightslam.so ├── matches.png ├── package.xml ├── run_dataset.sh └── src ├── RunSLAM.cc ├── VO.cc ├── VisualOdometry.cc ├── VisualOdometry.cc.bak ├── new.cc ├── ros_mono.cc ├── run_dataset.cc └── test_dataset.cc /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(light_slam) 3 | 4 | SET(CMAKE_EXPORT_COMPILE_COMMANDS "ON") 5 | 6 | IF(NOT CMAKE_BUILD_TYPE) 7 | SET(CMAKE_BUILD_TYPE Release) 8 | ENDIF() 9 | 10 | # 显示 编译模式信息 11 | MESSAGE("Build type: " ${CMAKE_BUILD_TYPE}) 12 | 13 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -O3 -march=native ") 14 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O3 -march=native") 15 | 16 | # 检查c++11或者 c++0x 编译支持 Check C++11 or C++0x support 17 | include(CheckCXXCompilerFlag) 18 | CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) 19 | CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) 20 | if(COMPILER_SUPPORTS_CXX11) 21 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") 22 | add_definitions(-DCOMPILEDWITHC11) 23 | message(STATUS "Using flag -std=c++11.") 24 | elseif(COMPILER_SUPPORTS_CXX0X) 25 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") 26 | add_definitions(-DCOMPILEDWITHC0X) 27 | message(STATUS "Using flag -std=c++0x.") 28 | else() 29 | message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") 30 | endif() 31 | 32 | find_package(catkin REQUIRED 33 | cv_bridge 34 | roscpp 35 | std_msgs 36 | ) 37 | find_package(OpenCV 3.0 QUIET) 38 | 39 | if(NOT OpenCV_FOUND) 40 | find_package(OpenCV 2.4.3 QUIET) 41 | if(NOT OpenCV_FOUND) 42 | message(FATAL_ERROR "OpenCV > 2.4.3 not found.") 43 | endif() 44 | endif() 45 | 46 | 47 | catkin_package( 48 | # INCLUDE_DIRS include 49 | # LIBRARIES light_slam 50 | CATKIN_DEPENDS 51 | # DEPENDS system_lib 52 | ) 53 | 54 | ########### 55 | ## Build ## 56 | ########### 57 | 58 | ## Specify additional locations of header files 59 | ## Your package locations should be listed before other locations 60 | include_directories( 61 | ${PROJECT_SOURCE_DIR}/include 62 | ${catkin_INCLUDE_DIRS} 63 | ${OpenCV_INCLUDE_DIRS} 64 | ) 65 | 66 | set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}/lib) 67 | 68 | add_library(lightslam SHARED 69 | src/VisualOdometry.cc 70 | src/RunSLAM.cc 71 | src/VO.cc 72 | ) 73 | 74 | # add_executable(light_slam src/ros_mono.cc) 75 | # target_link_libraries(light_slam 76 | # lightslam 77 | # ${catkin_LIBRARIES} 78 | # ${OpenCV_LIBRARIES} 79 | # ) 80 | 81 | add_executable(dataset src/run_dataset.cc) 82 | target_link_libraries(dataset 83 | lightslam 84 | ${catkin_LIBRARIES} 85 | ${OpenCV_LIBRARIES} 86 | ) 87 | 88 | add_executable(vo src/test_dataset.cc) 89 | target_link_libraries(vo 90 | lightslam 91 | ${catkin_LIBRARIES} 92 | ${OpenCV_LIBRARIES} 93 | ) 94 | 95 | add_executable(draw src/new.cc) 96 | target_link_libraries(draw ${OpenCV_LIBS}) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Light VO 2 | A light version SLAM could run with IoT devices 3 | 4 | --- 5 | 6 | ### Prerequisites 7 | 8 | * ROS kinetic 9 | * OpenCV 3.0+ 10 | * In Real-time version: any monocular camera 11 | * In Dataset version: KITTI sequence 00 12 | --- 13 | 14 | ### How to install 15 | 1. Create a ROS workspace 16 | ``` 17 | mkdir -p slam_ws/src 18 | cd slam/src 19 | catkin_init_workspace 20 | ``` 21 | 22 | 2. Clone the repository: 23 | ``` 24 | git clone https://github.com/Jing-lun/LightSLAM.git 25 | ``` 26 | 27 | 3. Build 28 | ``` 29 | cd .. 30 | catkin_make -j4 31 | ``` 32 | --- 33 | 34 | ### How to run in real time 35 | 1. Install your own camear's driver in ros 36 | 37 | 2. Calibrate your camera 38 | 39 | 3. Change the topic and calibration path to your own version 40 | ``` 41 | cd slam_ws/src/light_slam/launch 42 | gedit VO.launch 43 | ``` 44 | 45 | 4. Run it 46 | ``` 47 | cd slam_wa 48 | source devel/setup.bash 49 | roslauch light_slam VO.launch 50 | ``` 51 | 52 | 5. Don't forget to open your camera's node! Here's my camera noed: 53 | ``` 54 | roslaunch pointgrey_camera_driver camera.launch 55 | ``` 56 | 57 | ### How to run with dataset (especially for mono-slam) 58 | 1. Change the dataset path in src/test_dataset.cc and run_dataset.sh 59 | 60 | 2. Rebuild && Run it 61 | -------------------------------------------------------------------------------- /good_matches.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jing-lun/LightSLAM/dcb2da883291bca14b753dc1b882ceabd9a72b45/good_matches.png -------------------------------------------------------------------------------- /include/RunSLAM.h: -------------------------------------------------------------------------------- 1 | #ifndef RUNSLAM_H 2 | #define RUNSLAM_H 3 | 4 | #include "VisualOdometry.h" 5 | #include "VO.h" 6 | 7 | #include 8 | #include 9 | 10 | namespace Light_SLAM 11 | { 12 | 13 | class VisualOdometry; 14 | class VO; 15 | 16 | class RunSLAM 17 | { 18 | 19 | public: 20 | 21 | RunSLAM(VisualOdometry* pVO); 22 | // RunSLAM(VO* pVO); 23 | ~RunSLAM(); 24 | 25 | void vo(const cv::Mat& img); 26 | 27 | public: 28 | 29 | VisualOdometry* mpTest1; 30 | // VO* mpTest1; 31 | int mFontFace; 32 | double mFontScale; 33 | int mFontThickness; 34 | cv::Point mText; 35 | cv::Mat mTrajectory; 36 | 37 | std::string mstrFrameName; 38 | std::string mstrTrajName; 39 | char text[200]; 40 | std::fstream out; 41 | }; 42 | 43 | } // Light_SLAM 44 | #endif // RUNSLAM_H -------------------------------------------------------------------------------- /include/VO.h: -------------------------------------------------------------------------------- 1 | #ifndef VO_H 2 | #define VO_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | extern int frame_id; 15 | namespace Light_SLAM 16 | { 17 | 18 | class VO 19 | { 20 | 21 | public: 22 | 23 | enum FrameState 24 | { 25 | INIT_FRAME, 26 | SECOND_FRAME, 27 | REST_FRAME, 28 | LOST_FRAME 29 | }; 30 | 31 | VO(const std::string &strSettingPath); 32 | VO(const std::string &strSettingPath, const std::string &strGroundTruth); 33 | ~VO(){} 34 | 35 | void ProcessFrames(const cv::Mat& img); 36 | void ProcessFirstFrame(); 37 | void ProcessSecondFrame(); 38 | void ProcessRestFrames(); 39 | void FeatureExtraction(); 40 | void FeatureTracking(); 41 | double GetAbsoluteScale(); 42 | double getAbsoluteScale(int frame_id); 43 | void GetGroundTruth(); 44 | bool SetGroundTruth(); 45 | void ShowFeature(const cv::Mat& img); 46 | inline cv::Mat getTranslation() 47 | { 48 | return mT; 49 | } 50 | 51 | public: 52 | 53 | FrameState mFrameState; 54 | std::vector mvKeyPoints; 55 | std::vector mvLastKeyPoints; 56 | std::vector mvCurrentPoints; 57 | std::vector mvLastPoints; 58 | std::vector mvMatches; 59 | std::vector mvGoodMatches; 60 | 61 | cv::Mat mLastImage; 62 | cv::Mat mCurrentImage; 63 | 64 | cv::Mat mK; 65 | cv::Point2d mOpticalCenter; 66 | double mFocalLength; 67 | cv::Mat mR; 68 | cv::Mat mT; 69 | 70 | int mCount; 71 | double mScale; 72 | bool bUseDataset; 73 | std::string mstrGroundTruth; 74 | std::ifstream mfGroundTruthStream; 75 | 76 | double x_pre_, y_pre_, z_pre_; 77 | }; 78 | 79 | } //namespace Light_SLAM 80 | #endif // VO_H 81 | -------------------------------------------------------------------------------- /include/VisualOdometry.h: -------------------------------------------------------------------------------- 1 | #ifndef VISUALODOMETRY_H 2 | #define VISUALODOMETRY_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | extern int frame_id; 14 | namespace Light_SLAM 15 | { 16 | 17 | class VisualOdometry 18 | { 19 | 20 | public: 21 | 22 | enum FrameState 23 | { 24 | INIT_FRAME, 25 | SECOND_FRAME, 26 | REST_FRAME, 27 | LOST_FRAME 28 | }; 29 | 30 | VisualOdometry(const std::string &strSettingPath, const std::string &strGroundTruth); 31 | ~VisualOdometry(){} 32 | void ProcessFrames(const cv::Mat& img); 33 | void FeatureExtract(); 34 | void FeatureMatching(); 35 | void FeatureTracking(const std::vector& points1, const std::vector& points2); 36 | void ProcessFirstFrame(); 37 | void ProcessSecondFrame(); 38 | void ProcessRestFrames(); 39 | double GetAbsoluteScale(int frame_id); 40 | void ShowTrajectory(); 41 | inline cv::Mat getTranslation() 42 | { 43 | return mT; 44 | } 45 | 46 | 47 | public: 48 | std::string mstrGroundTruth; 49 | std::string mstrSettingPath; 50 | cv::Mat mK; 51 | cv::Point2d mOpticalCenter; 52 | double mFocalLength; 53 | FrameState mFrameState; 54 | bool bUseDataset; 55 | double mScale; 56 | 57 | cv::Mat mCurrentImage; 58 | cv::Mat mDescriptor; 59 | std::vector mvKeyPoints; 60 | std::vector mvCurrentPoints; 61 | 62 | cv::Mat mLastImage; 63 | cv::Mat mLastDescriptor; 64 | std::vector mvLastKeyPoints; 65 | std::vector mvLastPoints; 66 | 67 | std::vector mvMatches; 68 | cv::Ptr mpMatcher; 69 | cv::Ptr detector; 70 | cv::Ptr descriptor; 71 | std::vector mvGoodMatches; 72 | 73 | cv::Mat mT, mR; 74 | }; 75 | 76 | } //namespace Light_SLAM 77 | #endif // VISUALODOMETRY_H 78 | -------------------------------------------------------------------------------- /launch/VO.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /lib/liblightslam.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jing-lun/LightSLAM/dcb2da883291bca14b753dc1b882ceabd9a72b45/lib/liblightslam.so -------------------------------------------------------------------------------- /matches.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jing-lun/LightSLAM/dcb2da883291bca14b753dc1b882ceabd9a72b45/matches.png -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | light_slam 4 | 0.0.0 5 | The light_slam package 6 | 7 | 8 | 9 | 10 | jinglun 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | cv_bridge 53 | roscpp 54 | std_msgs 55 | std_msgs 56 | roscpp 57 | cv_bridge 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /run_dataset.sh: -------------------------------------------------------------------------------- 1 | ./../../devel/lib/light_slam/dataset /home/jinglun/Downloads/data_odometry_gray/00/image_0/ /home/jinglun/Documents/dataset/poses/00.txt /home/jinglun/viorb_config/config/pointgray_mono.yaml 2 | -------------------------------------------------------------------------------- /src/RunSLAM.cc: -------------------------------------------------------------------------------- 1 | #include "RunSLAM.h" 2 | #include 3 | using namespace std; 4 | using namespace cv; 5 | 6 | namespace Light_SLAM 7 | { 8 | RunSLAM::RunSLAM(VisualOdometry* pVO):mpTest1(pVO), mFontFace(FONT_HERSHEY_PLAIN), mFontScale(1.0), 9 | mFontThickness(1), mText(10, 50) 10 | { 11 | namedWindow("Camera"); 12 | namedWindow("Trajectory"); 13 | mTrajectory = Mat::zeros(800, 800, CV_8UC3); 14 | out = fstream("position_orb.txt", ios::out); 15 | } 16 | 17 | // RunSLAM::RunSLAM(VO* pVO):mpTest1(pVO), mFontFace(FONT_HERSHEY_PLAIN), mFontScale(1.0), 18 | // mFontThickness(1), mText(10, 50) 19 | // { 20 | // namedWindow("Camera"); 21 | // namedWindow("Trajectory"); 22 | // mTrajectory = Mat::zeros(600, 600, CV_8UC3); 23 | // mpTest1->SetGroundTruth(); 24 | // } 25 | 26 | void RunSLAM::vo(const cv::Mat& img) 27 | { 28 | mpTest1->ProcessFrames(img); 29 | cv::Mat t = mpTest1->getTranslation(); 30 | double x = 0, y = 0, z = 0; 31 | if(t.rows != 0) 32 | { 33 | x = t.at(0); 34 | y = t.at(1); 35 | z = t.at(2); 36 | } 37 | out << x << " " << y << " " << z << endl; 38 | 39 | int draw_x = static_cast(x) + 400; 40 | int draw_y = static_cast(z) + 100; 41 | cv::circle(mTrajectory, cv::Point(draw_x, draw_y), 1, CV_RGB(255, 0, 220), 2); 42 | 43 | cv::rectangle(mTrajectory, cv::Point(10, 30), cv::Point(600, 60), CV_RGB(0, 0, 0), CV_FILLED); 44 | sprintf(text, "Coordinates: x = %02fm y = %02fm z = %02fm", x, y, z); 45 | cv::putText(mTrajectory, text, mText, mFontFace, mFontScale, cv::Scalar::all(255), mFontThickness, 8); 46 | 47 | cv::imshow("Camera", img); 48 | cv::imshow("Trajectory", mTrajectory); 49 | 50 | cv::waitKey(1); 51 | } 52 | 53 | RunSLAM::~RunSLAM() 54 | { 55 | if (this->out.is_open()) 56 | this->out.close(); 57 | try 58 | { 59 | cv::imwrite("Trajectory.png", mTrajectory); 60 | } 61 | catch(const std::exception& e) 62 | { 63 | std::cerr << e.what() << '\n'; 64 | } 65 | } 66 | } -------------------------------------------------------------------------------- /src/VO.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "VO.h" 4 | 5 | using namespace cv; 6 | using namespace std; 7 | 8 | int frame_id = 0; 9 | namespace Light_SLAM 10 | { 11 | VO::VO(const std::string &strSettingPath):mFrameState(INIT_FRAME), 12 | mCount(0), bUseDataset(false) 13 | { 14 | cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ); //Read Setting.yaml 15 | // |fx 0 cx| 16 | // K = |0 fy cy| 17 | // |0 0 1 | 18 | float fx = fSettings["Camera.fx"]; 19 | float fy = fSettings["Camera.fy"]; 20 | float cx = fSettings["Camera.cx"]; 21 | float cy = fSettings["Camera.cy"]; 22 | cv::Mat K = cv::Mat::eye(3,3,CV_32F);// initialized as diagnal matrix 23 | K.at(0,0) = fx; 24 | K.at(1,1) = fy; 25 | K.at(0,2) = cx; 26 | K.at(1,2) = cy; 27 | K.copyTo(mK); 28 | mOpticalCenter = cv::Point2d(cx, cy); 29 | mFocalLength = static_cast(fx); 30 | } 31 | 32 | VO::VO(const std::string &strSettingPath, const std::string &strGroundTruth): 33 | mFrameState(INIT_FRAME), mCount(0), bUseDataset(true) 34 | { 35 | mstrGroundTruth = strGroundTruth; 36 | cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ); //Read Setting.yaml 37 | // |fx 0 cx| 38 | // K = |0 fy cy| 39 | // |0 0 1 | 40 | float fx = fSettings["Camera2.fx"]; 41 | float fy = fSettings["Camera2.fy"]; 42 | float cx = fSettings["Camera2.cx"]; 43 | float cy = fSettings["Camera2.cy"]; 44 | cv::Mat K = cv::Mat::eye(3,3,CV_32F);// initialized as diagnal matrix 45 | K.at(0,0) = fx; 46 | K.at(1,1) = fy; 47 | K.at(0,2) = cx; 48 | K.at(1,2) = cy; 49 | K.copyTo(mK); 50 | mOpticalCenter = cv::Point2d(cx, cy); 51 | mFocalLength = static_cast(fx); 52 | } 53 | 54 | void VO::ProcessFrames(const Mat& img) 55 | { 56 | mCurrentImage = img; 57 | switch(mFrameState) 58 | { 59 | case INIT_FRAME: 60 | ProcessFirstFrame(); 61 | break; 62 | case SECOND_FRAME: 63 | try 64 | { 65 | ProcessSecondFrame(); 66 | } 67 | catch(const std::exception& e) 68 | { 69 | std::cerr << e.what() << '\n'; 70 | } 71 | break; 72 | case REST_FRAME: 73 | try 74 | { 75 | ProcessRestFrames(); 76 | } 77 | catch(const std::exception& e) 78 | { 79 | std::cerr << e.what() << '\n'; 80 | } 81 | break; 82 | case LOST_FRAME: 83 | cerr << "VO Lost!!" << endl; 84 | break; 85 | } 86 | mLastImage = mCurrentImage; 87 | } 88 | 89 | void VO::ProcessFirstFrame() 90 | { 91 | FeatureExtraction(); 92 | mFrameState = FrameState::SECOND_FRAME; 93 | } 94 | 95 | void VO::ProcessSecondFrame() 96 | { 97 | // FeatureExtraction(); 98 | FeatureTracking(); 99 | Mat E, R, T, mask; 100 | 101 | E = findEssentialMat(mvLastPoints, mvCurrentPoints, mFocalLength, mOpticalCenter, RANSAC, 0.999, 1.0, mask); 102 | recoverPose(E, mvLastPoints, mvCurrentPoints, R, T, mFocalLength, mOpticalCenter); 103 | R.copyTo(mR); 104 | T.copyTo(mT); 105 | cout< 0.1) 123 | { 124 | mT = mT + mScale * (mR * T); 125 | mR = R * mR; 126 | } 127 | } 128 | else 129 | { 130 | mT = mT + mR * T; 131 | mR = R * mR; 132 | } 133 | if(mvLastKeyPoints.size() < 2000) 134 | { 135 | FeatureExtraction(); 136 | FeatureTracking(); 137 | } 138 | else 139 | { 140 | mvLastPoints = mvCurrentPoints; 141 | } 142 | } 143 | 144 | void VO::GetGroundTruth() 145 | { 146 | std::string line; 147 | getline(mfGroundTruthStream, line); 148 | std::stringstream ss; 149 | ss << line; 150 | double r1, r2, r3, r4, r5, r6, r7, r8, r9; 151 | ss >> r1 >> r2 >> r3 >> x_pre_ >> r4 >> r5 >> r6 >> y_pre_ >> r7 >> r8 >> r9 >> z_pre_; 152 | } 153 | 154 | double VO::GetAbsoluteScale() 155 | { 156 | std::string line; 157 | 158 | if (std::getline(this->mfGroundTruthStream, line)) 159 | { 160 | double r1, r2, r3, r4, r5, r6, r7, r8, r9; 161 | double x = 0, y = 0, z = 0; 162 | std::istringstream in(line); 163 | in >> r1 >> r2 >> r3 >> x >> r4 >> r5 >> r6 >> y >> r7 >> r8 >> r9 >> z; 164 | double scale = sqrt((x - x_pre_) * (x - x_pre_) + (y - y_pre_) * (y - y_pre_) + (z - z_pre_) * (z - z_pre_)); 165 | x_pre_ = x; 166 | y_pre_ = y; 167 | z_pre_ = z; 168 | return scale; 169 | } 170 | else 171 | { 172 | return 0; 173 | } 174 | } 175 | 176 | void VO::FeatureExtraction() 177 | { 178 | int fast_threshold = 20; 179 | bool non_max_suppression = true; 180 | FAST(mCurrentImage, mvKeyPoints, fast_threshold, non_max_suppression); 181 | KeyPoint::convert(mvKeyPoints, mvCurrentPoints); 182 | } 183 | 184 | void VO::FeatureTracking() 185 | { 186 | const double klt_win_size = 21.0; 187 | const int klt_max_iter = 30; 188 | const double kly_eps = 0.001; 189 | 190 | vector status; 191 | vector error; 192 | vector min_eig_vec; 193 | TermCriteria criteria(TermCriteria::COUNT + TermCriteria::EPS, klt_max_iter, kly_eps); 194 | calcOpticalFlowPyrLK(mLastImage, mCurrentImage, 195 | mvCurrentPoints, mvLastPoints, 196 | status, error, 197 | cv::Size2i(klt_win_size, klt_win_size), 198 | 4, criteria, 0); 199 | } 200 | 201 | bool VO::SetGroundTruth() 202 | { 203 | this->mfGroundTruthStream.open(mstrGroundTruth); 204 | if (!this->mfGroundTruthStream.is_open()) 205 | { 206 | return false; 207 | } else 208 | { 209 | return true; 210 | } 211 | } 212 | 213 | double VO::getAbsoluteScale(int frame_id) 214 | { 215 | std::string line; 216 | int i = 0; 217 | std::ifstream ground_truth(mstrGroundTruth); 218 | double x = 0, y = 0, z = 0; 219 | double x_prev = 0, y_prev = 0, z_prev = 0; 220 | if (ground_truth.is_open()) 221 | { 222 | while ((std::getline(ground_truth, line)) && (i <= frame_id)) 223 | { 224 | z_prev = z; 225 | x_prev = x; 226 | y_prev = y; 227 | std::istringstream in(line); 228 | for (int j = 0; j < 12; j++) { 229 | in >> z; 230 | if (j == 7) y = z; 231 | if (j == 3) x = z; 232 | } 233 | i++; 234 | } 235 | ground_truth.close(); 236 | } 237 | 238 | else { 239 | std::cerr<< "Unable to open file"; 240 | return 0; 241 | } 242 | 243 | return sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev)); 244 | } 245 | 246 | } // namespace Light_SLAM -------------------------------------------------------------------------------- /src/VisualOdometry.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "VisualOdometry.h" 5 | 6 | using namespace cv; 7 | using namespace std; 8 | // int frame_id = 0; 9 | int iiii = 0; 10 | namespace Light_SLAM 11 | { 12 | VisualOdometry::VisualOdometry(const std::string &strSettingPath, const std::string &strGroundTruth): 13 | mstrGroundTruth(strGroundTruth), mstrSettingPath(strSettingPath), 14 | mFrameState(INIT_FRAME), detector(ORB::create(8000)), descriptor(ORB::create(8000)), 15 | mpMatcher(DescriptorMatcher::create ( "BruteForce-Hamming" )), bUseDataset(true) 16 | { 17 | cv::FileStorage fSettings(mstrSettingPath, cv::FileStorage::READ); //Read Setting.yaml 18 | // |fx 0 cx| 19 | // K = |0 fy cy| 20 | // |0 0 1 | 21 | float fx = fSettings["Camera2.fx"]; 22 | float fy = fSettings["Camera2.fy"]; 23 | float cx = fSettings["Camera2.cx"]; 24 | float cy = fSettings["Camera2.cy"]; 25 | Mat K = cv::Mat::eye(3,3,CV_32F); // initialized as diagnal matrix 26 | K.at(0,0) = fx; 27 | K.at(1,1) = fy; 28 | K.at(0,2) = cx; 29 | K.at(1,2) = cy; 30 | K.copyTo(mK); 31 | mOpticalCenter = cv::Point2d(cx, cy); 32 | mFocalLength = static_cast(fx); 33 | } 34 | 35 | void VisualOdometry::ProcessFrames(const Mat& img) 36 | { 37 | mCurrentImage = img; 38 | switch(mFrameState) 39 | { 40 | case INIT_FRAME: 41 | ProcessFirstFrame(); 42 | break; 43 | case SECOND_FRAME: 44 | try 45 | { 46 | ProcessSecondFrame(); 47 | } 48 | catch(const std::exception& e) 49 | { 50 | std::cerr << e.what() << '\n'; 51 | } 52 | break; 53 | case REST_FRAME: 54 | try 55 | { 56 | ProcessRestFrames(); 57 | } 58 | catch(const std::exception& e) 59 | { 60 | std::cerr << e.what() << '\n'; 61 | } 62 | break; 63 | case LOST_FRAME: 64 | cerr << "VO Lost!!" << endl; 65 | break; 66 | } 67 | mLastImage = mCurrentImage; 68 | mLastDescriptor.release(); 69 | mvLastKeyPoints.clear(); 70 | mLastDescriptor = mDescriptor; 71 | mvLastKeyPoints = mvKeyPoints; 72 | mDescriptor.release(); 73 | mvKeyPoints.clear(); 74 | } 75 | 76 | void VisualOdometry::ProcessFirstFrame() 77 | { 78 | FeatureExtract(); 79 | mFrameState = FrameState::SECOND_FRAME; 80 | } 81 | 82 | void VisualOdometry::ProcessSecondFrame() 83 | { 84 | FeatureExtract(); 85 | FeatureMatching(); 86 | 87 | vector points1; 88 | vector points2; 89 | 90 | for ( int i = 0; i < ( int ) mvGoodMatches.size(); i++ ) 91 | { 92 | points1.push_back ( mvLastKeyPoints[mvGoodMatches[i].queryIdx].pt ); 93 | points2.push_back ( mvKeyPoints[mvGoodMatches[i].trainIdx].pt ); 94 | } 95 | 96 | // FeatureTracking(points1, points2); 97 | 98 | Mat E, R, T, mask; 99 | 100 | E = findEssentialMat(points2, points1, mFocalLength, mOpticalCenter, RANSAC, 0.999, 1.0, mask); 101 | recoverPose(E, points2, points1, R, T, mFocalLength, mOpticalCenter); 102 | R.copyTo(mR); 103 | T.copyTo(mT); 104 | mFrameState = FrameState::REST_FRAME; 105 | } 106 | 107 | void VisualOdometry::ProcessRestFrames() 108 | { 109 | FeatureExtract(); 110 | FeatureMatching(); 111 | 112 | vector points1; 113 | vector points2; 114 | 115 | for ( int i = 0; i < ( int ) mvGoodMatches.size(); i++ ) 116 | { 117 | points1.push_back ( mvLastKeyPoints[mvGoodMatches[i].queryIdx].pt ); 118 | points2.push_back ( mvKeyPoints[mvGoodMatches[i].trainIdx].pt ); 119 | } 120 | 121 | // FeatureTracking(points1, points2); 122 | 123 | Mat E, R, T, mask; 124 | 125 | E = findEssentialMat(points2, points1, mFocalLength, mOpticalCenter, RANSAC, 0.999, 1.0, mask); 126 | recoverPose(E, points2, points1, R, T, mFocalLength, mOpticalCenter); 127 | mScale = GetAbsoluteScale(frame_id); 128 | if(bUseDataset) 129 | { 130 | if(mScale > 0.1) 131 | { 132 | mT = mT + mScale * (mR * T); 133 | mR = R * mR; 134 | } 135 | } 136 | else 137 | { 138 | mT = mT + mR * T; 139 | mR = R * mR; 140 | } 141 | if(mvLastKeyPoints.size() < 1000) 142 | { 143 | FeatureExtract(); 144 | FeatureMatching(); 145 | } 146 | } 147 | 148 | void VisualOdometry::FeatureExtract() 149 | { 150 | detector->detect ( mCurrentImage, mvKeyPoints ); 151 | descriptor->compute ( mCurrentImage, mvKeyPoints, mDescriptor ); 152 | } 153 | 154 | void VisualOdometry::FeatureMatching() 155 | { 156 | mpMatcher->match ( mLastDescriptor, mDescriptor, mvMatches ); 157 | 158 | double min_dist=10000, max_dist=0; 159 | 160 | for ( int i = 0; i < mLastDescriptor.rows; i++ ) 161 | { 162 | double dist = mvMatches[i].distance; 163 | if ( dist < min_dist ) min_dist = dist; 164 | if ( dist > max_dist ) max_dist = dist; 165 | } 166 | 167 | min_dist = min_element( mvMatches.begin(), mvMatches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distancedistance; 168 | max_dist = max_element( mvMatches.begin(), mvMatches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distancedistance; 169 | 170 | mvGoodMatches.clear(); 171 | 172 | for ( int i = 0; i < mLastDescriptor.rows; i++ ) 173 | { 174 | if ( mvMatches[i].distance <= max ( 2*min_dist, 30.0 ) ) 175 | { 176 | mvGoodMatches.push_back ( mvMatches[i] ); 177 | } 178 | } 179 | } 180 | 181 | void VisualOdometry::FeatureTracking(const vector& points1, const vector& points2) 182 | { 183 | const double klt_win_size = 21.0; 184 | const int klt_max_iter = 30; 185 | const double kly_eps = 0.001; 186 | 187 | vector status; 188 | vector error; 189 | vector min_eig_vec; 190 | TermCriteria criteria(TermCriteria::COUNT + TermCriteria::EPS, klt_max_iter, kly_eps); 191 | calcOpticalFlowPyrLK(mLastImage, mCurrentImage, 192 | points1, points2, 193 | status, error, 194 | cv::Size2i(klt_win_size, klt_win_size), 195 | 4, criteria, 0); 196 | } 197 | 198 | double VisualOdometry::GetAbsoluteScale(int frame_id) 199 | { 200 | string line; 201 | int i = 0; 202 | ifstream ground_truth(mstrGroundTruth); 203 | double x = 0, y = 0, z = 0; 204 | double x_prev = 0, y_prev = 0, z_prev = 0; 205 | if (ground_truth.is_open()) 206 | { 207 | while ((getline(ground_truth, line)) && (i <= frame_id)) 208 | { 209 | z_prev = z; 210 | x_prev = x; 211 | y_prev = y; 212 | istringstream in(line); 213 | for (int j = 0; j < 12; j++) { 214 | in >> z; 215 | if (j == 7) y = z; 216 | if (j == 3) x = z; 217 | } 218 | i++; 219 | } 220 | ground_truth.close(); 221 | } 222 | 223 | else { 224 | cerr<< "Unable to open file"; 225 | return 0; 226 | } 227 | 228 | return sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev)); 229 | } 230 | 231 | } //namespace Light_SLAM -------------------------------------------------------------------------------- /src/VisualOdometry.cc.bak: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "VisualOdometry.h" 5 | 6 | using namespace cv; 7 | using namespace std; 8 | // int frame_id = 0; 9 | int iiii = 0; 10 | namespace Light_SLAM 11 | { 12 | VisualOdometry::VisualOdometry(const std::string &strSettingPath, const std::string &strGroundTruth): 13 | mstrGroundTruth(strGroundTruth), mstrSettingPath(strSettingPath), 14 | mFrameState(INIT_FRAME), detector(ORB::create()), descriptor(ORB::create()), 15 | mpMatcher(DescriptorMatcher::create ( "BruteForce-Hamming" )) 16 | { 17 | cv::FileStorage fSettings(mstrSettingPath, cv::FileStorage::READ); //Read Setting.yaml 18 | // |fx 0 cx| 19 | // K = |0 fy cy| 20 | // |0 0 1 | 21 | float fx = fSettings["Camera2.fx"]; 22 | float fy = fSettings["Camera2.fy"]; 23 | float cx = fSettings["Camera2.cx"]; 24 | float cy = fSettings["Camera2.cy"]; 25 | Mat K = cv::Mat::eye(3,3,CV_32F); // initialized as diagnal matrix 26 | K.at(0,0) = fx; 27 | K.at(1,1) = fy; 28 | K.at(0,2) = cx; 29 | K.at(1,2) = cy; 30 | K.copyTo(mK); 31 | mOpticalCenter = cv::Point2d(cx, cy); 32 | mFocalLength = static_cast(fx); 33 | } 34 | 35 | void VisualOdometry::ProcessFrames(const Mat& img) 36 | { 37 | mCurrentImage = img; 38 | switch(mFrameState) 39 | { 40 | case INIT_FRAME: 41 | ProcessFirstFrame(); 42 | break; 43 | case SECOND_FRAME: 44 | try 45 | { 46 | ProcessSecondFrame(); 47 | } 48 | catch(const std::exception& e) 49 | { 50 | std::cerr << e.what() << '\n'; 51 | } 52 | break; 53 | case REST_FRAME: 54 | try 55 | { 56 | ProcessRestFrames(); 57 | } 58 | catch(const std::exception& e) 59 | { 60 | std::cerr << e.what() << '\n'; 61 | } 62 | break; 63 | case LOST_FRAME: 64 | cerr << "VO Lost!!" << endl; 65 | break; 66 | } 67 | mLastImage = mCurrentImage; 68 | } 69 | 70 | void VisualOdometry::ProcessFirstFrame() 71 | { 72 | FeatureExtract(); 73 | mFrameState = FrameState::SECOND_FRAME; 74 | 75 | mLastDescriptor.release(); 76 | mvLastKeyPoints.clear(); 77 | mLastDescriptor = mDescriptor; 78 | mvLastKeyPoints = mvKeyPoints; 79 | mDescriptor.release(); 80 | mvKeyPoints.clear(); 81 | } 82 | 83 | void VisualOdometry::ProcessSecondFrame() 84 | { 85 | FeatureExtract(); 86 | FeatureTracking(); 87 | 88 | vector points1; 89 | vector points2; 90 | 91 | for ( int i = 0; i < ( int ) mvGoodMatches.size(); i++ ) 92 | { 93 | points1.push_back ( mvLastKeyPoints[mvGoodMatches[i].queryIdx].pt ); 94 | points2.push_back ( mvKeyPoints[mvGoodMatches[i].trainIdx].pt ); 95 | } 96 | cout< points1; 128 | // vector points2; 129 | 130 | // for ( int i = 0; i < ( int ) mvGoodMatches.size(); i++ ) 131 | // { 132 | // points1.push_back ( mvLastKeyPoints[mvGoodMatches[i].queryIdx].pt ); 133 | // points2.push_back ( mvKeyPoints[mvGoodMatches[i].trainIdx].pt ); 134 | // } 135 | // cout<detect ( mCurrentImage, mvKeyPoints ); 164 | descriptor->compute ( mCurrentImage, mvKeyPoints, mDescriptor ); 165 | } 166 | 167 | void VisualOdometry::FeatureTracking() 168 | { 169 | mpMatcher->match ( mLastDescriptor, mDescriptor, mvMatches ); 170 | 171 | double min_dist=10000, max_dist=0; 172 | 173 | for ( int i = 0; i < mLastDescriptor.rows; i++ ) 174 | { 175 | double dist = mvMatches[i].distance; 176 | if ( dist < min_dist ) min_dist = dist; 177 | if ( dist > max_dist ) max_dist = dist; 178 | } 179 | 180 | min_dist = min_element( mvMatches.begin(), mvMatches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distancedistance; 181 | max_dist = max_element( mvMatches.begin(), mvMatches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distancedistance; 182 | 183 | mvGoodMatches.clear(); 184 | 185 | for ( int i = 0; i < mLastDescriptor.rows; i++ ) 186 | { 187 | if ( mvMatches[i].distance <= max ( 2*min_dist, 30.0 ) )//最大距离 188 | { 189 | mvGoodMatches.push_back ( mvMatches[i] ); 190 | } 191 | } 192 | cout<> z; 213 | if (j == 7) y = z; 214 | if (j == 3) x = z; 215 | } 216 | i++; 217 | } 218 | ground_truth.close(); 219 | } 220 | 221 | else { 222 | cerr<< "Unable to open file"; 223 | return 0; 224 | } 225 | 226 | return sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev)); 227 | } 228 | 229 | } //namespace Light_SLAM -------------------------------------------------------------------------------- /src/new.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include "VO.h" 15 | 16 | // #define RED cv::Scalar(0, 0, 255) 17 | // #define BLUE cv::Scalar(255, 0, 0) 18 | 19 | void load_pose(const std::string& path, std::vector>& poses); 20 | 21 | int main() 22 | { 23 | const std::string testing_path = "/home/jinglun/slam_ws/src/light_slam/position_orb.txt"; 24 | // const std::string testing_path = "/home/jinglun/slam_ws/devel/lib/light_slam/position.txt"; 25 | const std::string ground_truth_path = "/home/jinglun/Documents/dataset/poses/00.txt"; 26 | const char *frame_viewer_name = "Road facing camera"; 27 | const char *trajectory_viewer_name = "Trajectory"; 28 | 29 | char test_text[200]; 30 | char truth_text[200]; 31 | // Define trajectory params 32 | int font_face = cv::FONT_HERSHEY_PLAIN; 33 | double font_scale = 1; 34 | int thickness = 1; 35 | cv::Point test_org(10, 50); 36 | cv::Point truth_org(10, 70); 37 | 38 | std::vector> positions; 39 | load_pose(testing_path, positions); 40 | 41 | std::vector> poses; 42 | load_pose(ground_truth_path, poses); 43 | 44 | // Define show windows 45 | cv::namedWindow(frame_viewer_name, cv::WINDOW_AUTOSIZE); 46 | cv::namedWindow(trajectory_viewer_name, cv::WINDOW_AUTOSIZE); 47 | // Define traj map 48 | cv::Mat traj = cv::Mat::zeros(800, 800, CV_8UC3); 49 | 50 | double x = 0, y = 0, z = 0; 51 | double xt = 0, yt = 0, zt = 0; 52 | int size = positions.size(); 53 | 54 | for(int num=0; num position; 57 | position = positions[num]; 58 | x = position[0]; 59 | y = position[1]; 60 | z = position[2]; 61 | 62 | int draw_x = static_cast(x) + 300; 63 | int draw_y = static_cast(z) + 200; 64 | 65 | std::vector pose; 66 | pose = poses[num]; 67 | xt = pose[3]; 68 | yt = pose[7]; 69 | zt = pose[11]; 70 | 71 | int draw_xt = static_cast(xt) + 300; 72 | int draw_yt = static_cast(zt) + 200; 73 | 74 | cv::rectangle(traj, cv::Point(10, 30), cv::Point(600, 100), CV_RGB(0, 0, 0), CV_FILLED); 75 | 76 | cv::circle(traj, cv::Point(draw_x, draw_y), 0.6, cv::Scalar(255, 0, 255), 2); 77 | sprintf(test_text, "Test Position: x = %02fm y = %02fm z = %02fm", x, y, z); 78 | cv::putText(traj, test_text, test_org, font_face, font_scale, cv::Scalar(255, 0, 255), thickness, 8); 79 | 80 | cv::circle(traj, cv::Point(draw_xt, draw_yt), 0.6, cv::Scalar(255, 255, 0), 2); 81 | sprintf(truth_text, "Truth Position: x = %02fm y = %02fm z = %02fm", xt, yt, zt); 82 | cv::putText(traj, truth_text, truth_org, font_face, font_scale, cv::Scalar(255, 255, 0), thickness, 8); 83 | 84 | // cv::imshow("Camera", img); 85 | cv::imshow("Trajectory", traj); 86 | cv::waitKey(1); 87 | } 88 | 89 | // for(int num=0; num pose; 92 | // pose = poses[num]; 93 | // xt = pose[3]; 94 | // yt = pose[7]; 95 | // zt = pose[11]; 96 | 97 | // int draw_xt = static_cast(xt) + 300; 98 | // int draw_yt = static_cast(zt) + 100; 99 | // cv::circle(traj, cv::Point(draw_xt, draw_yt), 0.6, CV_RGB(0, 0, 255), 2); 100 | // sprintf(truth_text, "Coordinates: x = %02fm y = %02fm z = %02fm", xt, yt, zt); 101 | // cv::putText(traj, truth_text, truth_org, font_face, font_scale, cv::Scalar(0, 0, 255), thickness, 8); 102 | 103 | // // cv::imshow("Camera", img); 104 | // cv::imshow("Trajectory", traj); 105 | // cv::waitKey(1); 106 | // } 107 | 108 | try 109 | { 110 | cv::imwrite("Trajectory.png", traj); 111 | } 112 | catch(const std::exception& e) 113 | { 114 | std::cerr << e.what() << '\n'; 115 | } 116 | 117 | return 0; 118 | } 119 | 120 | void load_pose(const std::string& path, std::vector>& poses) 121 | { 122 | std::cout << "Loading path file:" << path << std::endl; 123 | std::ifstream myfile(path); 124 | std::string line; 125 | if(myfile.is_open()) 126 | { 127 | while(getline(myfile, line)) 128 | { 129 | std::stringstream ss; 130 | ss << line; 131 | std::vector v; 132 | v.resize(12); 133 | for(int i=0; i<12; i++) 134 | { 135 | double value; 136 | ss >> value; 137 | v[i] = value; 138 | } 139 | poses.push_back(v); 140 | } 141 | myfile.close(); 142 | } 143 | else 144 | { 145 | std::cout << "Unable to open file" << std::endl; 146 | } 147 | 148 | } -------------------------------------------------------------------------------- /src/ros_mono.cc: -------------------------------------------------------------------------------- 1 | /** 2 | * This file is part of ORB-SLAM2. 3 | * 4 | * Copyright (C) 2014-2016 Raúl Mur-Artal (University of Zaragoza) 5 | * For more information see 6 | * 7 | * ORB-SLAM2 is free software: you can redistribute it and/or modify 8 | * it under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * ORB-SLAM2 is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with ORB-SLAM2. If not, see . 19 | */ 20 | 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | #include 29 | #include 30 | 31 | #include 32 | #include "VisualOdometry.h" 33 | #include "VO.h" 34 | #include "RunSLAM.h" 35 | using namespace std; 36 | 37 | class ImageGrabber 38 | { 39 | public: 40 | ImageGrabber(Light_SLAM::RunSLAM* pTest):mpTest(pTest){} 41 | 42 | void GrabImage(const sensor_msgs::ImageConstPtr& msg); 43 | 44 | Light_SLAM::RunSLAM* mpTest; 45 | }; 46 | 47 | int main(int argc, char** argv) 48 | { 49 | if(argc != 4) 50 | { 51 | std::cerr << std::endl << "Usage: rosrun light_slam light_slam path_to_settings" << std::endl; 52 | ros::shutdown(); 53 | return 1; 54 | } 55 | const char *SetiingsFile = argv[1]; 56 | 57 | ros::init(argc, argv, "LightSLAM"); 58 | ros::start(); 59 | 60 | Light_SLAM::VisualOdometry* EF = new Light_SLAM::VisualOdometry(SetiingsFile); 61 | // Light_SLAM::VO* EF = new Light_SLAM::VO(SetiingsFile); 62 | Light_SLAM::RunSLAM Test(EF); 63 | 64 | ImageGrabber igb(&Test); 65 | 66 | ros::NodeHandle nodeHandler; 67 | ros::Subscriber sub = nodeHandler.subscribe("/camera/image_raw", 1, &ImageGrabber::GrabImage, &igb); 68 | 69 | ros::spin(); 70 | 71 | ros::shutdown(); 72 | 73 | return 0; 74 | } 75 | 76 | void ImageGrabber::GrabImage(const sensor_msgs::ImageConstPtr& msg) 77 | { 78 | // Copy the ros image message to cv::Mat. 79 | cv_bridge::CvImageConstPtr cv_ptr; 80 | try 81 | { 82 | cv_ptr = cv_bridge::toCvShare(msg); 83 | } 84 | catch (cv_bridge::Exception& e) 85 | { 86 | ROS_ERROR("cv_bridge exception: %s", e.what()); 87 | return; 88 | } 89 | mpTest->vo(cv_ptr->image); 90 | } 91 | 92 | 93 | -------------------------------------------------------------------------------- /src/run_dataset.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include "VisualOdometry.h" 14 | // #include "VO.h" 15 | #include "RunSLAM.h" 16 | 17 | int main(int argc, char* argv[]) 18 | { 19 | if(argc != 4) 20 | { 21 | std::cout << argc; 22 | std::cerr << std::endl << "Usage: please add path_to_dataset_image path_to_dataset_groundtruth path_to_settings" << std::endl; 23 | return 1; 24 | } 25 | const std::string image_list_folder = argv[1]; 26 | const std::string ground_truth_path = argv[2]; 27 | const std::string SetiingsFile = argv[3]; 28 | 29 | Light_SLAM::VisualOdometry* EF = new Light_SLAM::VisualOdometry(SetiingsFile, ground_truth_path); 30 | // Light_SLAM::VO* EF = new Light_SLAM::VO(SetiingsFile, ground_truth_path); 31 | Light_SLAM::RunSLAM Test(EF); 32 | 33 | while (true) 34 | { 35 | std::stringstream ss; 36 | ss << image_list_folder << std::setw(6) << std::setfill('0') << frame_id << ".png"; 37 | 38 | cv::Mat img = cv::imread(ss.str().c_str(), 0); 39 | if(img.empty()) 40 | break; 41 | 42 | // EF->ProcessFrames(img); 43 | Test.vo(img); 44 | frame_id++; 45 | } 46 | 47 | return 0; 48 | } -------------------------------------------------------------------------------- /src/test_dataset.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include "VO.h" 14 | 15 | int main() 16 | { 17 | const char *position_file_name = "position.txt"; 18 | const char *frame_viewer_name = "Road facing camera"; 19 | const char *trajectory_viewer_name = "Trajectory"; 20 | const std::string SetiingsFile = "/home/jinglun/viorb_config/config/pointgray_mono.yaml"; 21 | const std::string image_list_folder = "/home/jinglun/Downloads/data_odometry_gray/00/image_0/"; 22 | const std::string ground_truth_path = "/home/jinglun/Documents/dataset/poses/00.txt"; 23 | 24 | char text[200]; 25 | 26 | // VO instance 27 | Light_SLAM::VO* EF = new Light_SLAM::VO(SetiingsFile, ground_truth_path); 28 | 29 | // Define trajectory params 30 | int font_face = cv::FONT_HERSHEY_PLAIN; 31 | double font_scale = 1; 32 | int thickness = 1; 33 | cv::Point text_org(10, 50); 34 | 35 | // Define show windows 36 | cv::namedWindow(frame_viewer_name, cv::WINDOW_AUTOSIZE); 37 | cv::namedWindow(trajectory_viewer_name, cv::WINDOW_AUTOSIZE); 38 | // Define traj map 39 | cv::Mat traj = cv::Mat::zeros(600, 600, CV_8UC3); 40 | std::fstream out(position_file_name, std::ios::out); 41 | 42 | double x = 0, y = 0, z = 0; 43 | 44 | while (true) 45 | { 46 | std::stringstream ss; 47 | ss << image_list_folder << std::setw(6) << std::setfill('0') << frame_id << ".png"; 48 | 49 | cv::Mat img = cv::imread(ss.str().c_str(), 0); 50 | if(img.empty()) 51 | break; 52 | 53 | EF->ProcessFrames(img); 54 | cv::Mat t = EF->getTranslation(); 55 | double x = 0, y = 0, z = 0; 56 | if(t.rows != 0) 57 | { 58 | x = t.at(0); 59 | y = t.at(1); 60 | z = t.at(2); 61 | } 62 | out << x << " " << y << " " << z << std::endl; 63 | 64 | int draw_x = static_cast(x) + 300; 65 | int draw_y = static_cast(z) + 100; 66 | cv::circle(traj, cv::Point(draw_x, draw_y), 0.6, CV_RGB(255, 0, 220), 2); 67 | 68 | cv::rectangle(traj, cv::Point(10, 30), cv::Point(590, 50), CV_RGB(0, 0, 0), CV_FILLED); 69 | sprintf(text, "Coordinates: x = %02fm y = %02fm z = %02fm", x, y, z); 70 | cv::putText(traj, text, text_org, font_face, font_scale, cv::Scalar::all(255), thickness, 8); 71 | 72 | // cv::imshow("Camera", img); 73 | cv::imshow("Trajectory", traj); 74 | cv::waitKey(1); 75 | frame_id++; 76 | } 77 | try 78 | { 79 | cv::imwrite("Trajectory.png", traj); 80 | } 81 | catch(const std::exception& e) 82 | { 83 | std::cerr << e.what() << '\n'; 84 | } 85 | out.close(); 86 | 87 | return 0; 88 | } --------------------------------------------------------------------------------