├── .gitignore ├── example ├── run_example.sh └── download_dataset.sh ├── src ├── visualizer │ ├── shaders │ │ ├── render_points.frag │ │ ├── colored_vertices.frag │ │ ├── render_points.vert │ │ ├── draw_mesh.vert │ │ ├── colored_vertices.vert │ │ └── draw_mesh.frag │ ├── visualizer.h │ └── visualizer.cpp ├── fastcd │ ├── shaders │ │ ├── project_mesh.frag │ │ └── project_mesh.vert │ ├── image_region.h │ ├── image.cpp │ ├── regions_matcher.h │ ├── depth_projector.h │ ├── image_sequence.cpp │ ├── depth_projector.cpp │ ├── point_covariance2d.cpp │ ├── image.h │ ├── image_sequence.h │ ├── change_detector.h │ ├── regions_matcher_hist.h │ ├── point_covariance3d.h │ ├── point_covariance2d.h │ ├── point_covariance3d.cpp │ ├── regions3d_projector.h │ ├── change_detector.cpp │ ├── mesh.h │ ├── camera.h │ ├── processed_image.h │ ├── camera.cpp │ ├── mesh.cpp │ ├── regions3d_projector.cpp │ ├── regions_matcher_hist.cpp │ └── processed_image.cpp ├── utils │ ├── obj_reader.h │ └── obj_reader.cpp └── example.cpp ├── package.xml ├── .gitlab-ci.yml ├── LICENSE.txt ├── .clang-format ├── CMakeLists.txt └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | docs/ -------------------------------------------------------------------------------- /example/run_example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ../bin/fastcd_example dataset/statue -------------------------------------------------------------------------------- /src/visualizer/shaders/render_points.frag: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | out vec4 color; 4 | 5 | void main() { 6 | color = vec4(1.0, 0.0, 0.0, 1.0); 7 | } -------------------------------------------------------------------------------- /src/visualizer/shaders/colored_vertices.frag: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | in vec4 vertexColor; 4 | 5 | out vec4 color; 6 | 7 | void main() { 8 | color = vertexColor; 9 | } -------------------------------------------------------------------------------- /src/fastcd/shaders/project_mesh.frag: -------------------------------------------------------------------------------- 1 | #version 330 2 | 3 | in vec4 pos; 4 | out vec4 pos_out; 5 | 6 | void main() { 7 | pos_out = vec4(pos.xyz, 1.0); // that's it! position and valid flag. 8 | } 9 | -------------------------------------------------------------------------------- /example/download_dataset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir dataset 4 | 5 | wget http://www.ipb.uni-bonn.de/html/projects/changedetection2017/changedetection2017.zip -O dataset/changedetection2017.zip 6 | 7 | unzip dataset/changedetection2017 -d dataset 8 | rm dataset/changedetection2017.zip -------------------------------------------------------------------------------- /src/visualizer/shaders/render_points.vert: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | layout (location = 0) in vec2 texCoords; 4 | 5 | uniform sampler2D texOutput; 6 | uniform mat4 mvp; 7 | 8 | void main() { 9 | gl_Position = vec4(10,10,10,1.0); // invalid NDC 10 | 11 | vec4 point = texture(texOutput, texCoords); 12 | if(point.w > 0.5) { 13 | gl_Position = mvp * point; 14 | } 15 | } -------------------------------------------------------------------------------- /src/fastcd/shaders/project_mesh.vert: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | layout (location = 0) in vec4 position_in; 4 | 5 | uniform mat4 mvp; // model-view-projection matrix. (applied p*v*m) 6 | 7 | out vec4 pos; // this will be linearly interpolated between the three triangle corners. 8 | 9 | void main() { 10 | gl_Position = mvp * position_in; // this must be in NDC. 11 | pos = position_in; 12 | } 13 | -------------------------------------------------------------------------------- /src/visualizer/shaders/draw_mesh.vert: -------------------------------------------------------------------------------- 1 | #version 330 2 | 3 | layout (location = 0) in vec4 position_in; 4 | layout (location = 1) in vec4 normal_in; 5 | 6 | uniform mat4 model_mat; // model matrix. 7 | uniform mat4 normal_mat; // transformation of normals to world coordinates. 8 | uniform mat4 mvp; // model-view-projection matrix. (applied p*v*m) 9 | 10 | out vec4 pos; 11 | out vec4 normal; 12 | 13 | void main() { 14 | gl_Position = mvp * position_in; 15 | pos = model_mat * position_in; 16 | normal = normal_mat * normal_in; 17 | } -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fast_change_detection 4 | 0.0.1 5 | The fast_change_detection package 6 | 7 | Emanuele Palazzolo 8 | 9 | FreeBSD 10 | 11 | catkin 12 | glow 13 | glow 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /src/visualizer/shaders/colored_vertices.vert: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | layout (location = 0) in vec4 position_color; 4 | 5 | uniform mat4 mvp; 6 | 7 | out vec4 vertexColor; 8 | 9 | uniform bool useCustomColor; 10 | uniform vec4 customColor; 11 | 12 | vec4 decodeColor(float c) { 13 | vec4 col; 14 | col.x = float(int(c) >> 16 & 0xFF) / 255.0f; 15 | col.y = float(int(c) >> 8 & 0xFF) / 255.0f; 16 | col.z = float(int(c) & 0xFF) / 255.0f; 17 | col.w = 1.0f; 18 | return col; 19 | } 20 | 21 | void main() 22 | { 23 | gl_Position = mvp * vec4(position_color.xyz, 1.0); 24 | vertexColor = decodeColor(position_color.w); 25 | if(useCustomColor) vertexColor = customColor; 26 | } -------------------------------------------------------------------------------- /src/fastcd/image_region.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | #include 4 | #include 5 | 6 | namespace fastcd { 7 | 8 | /** 9 | * @brief Class that represents a labeled region. 10 | */ 11 | class ImageRegion { 12 | public: 13 | /** 14 | * @brief Constructor. 15 | * 16 | * @param[in] contour The contour of the region 17 | */ 18 | explicit ImageRegion(std::vector contour) 19 | : contour_(contour) {} 20 | 21 | /** The points describing the contour of the region */ 22 | std::vector contour_; 23 | 24 | /** The label of the region */ 25 | int label_ = -1; 26 | }; 27 | 28 | } // namespace fastcd 29 | -------------------------------------------------------------------------------- /src/utils/obj_reader.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | #include "fastcd/mesh.h" 6 | 7 | /** 8 | * @brief Class for loading a mesh from a Wavefront .obj file. 9 | */ 10 | class ObjReader { 11 | public: 12 | /** 13 | * @brief Read a mesh from a file with the given filename. 14 | * 15 | * @param[in] filename The filename 16 | * 17 | * @return The mesh. 18 | */ 19 | static Mesh FromFile(const std::string& filename); 20 | 21 | protected: 22 | /** 23 | * @brief Parse the given material file 24 | * 25 | * @param[in] filename The filename 26 | * @param[out] materials The materials 27 | */ 28 | static void ParseMaterials(const std::string& filename, 29 | std::map& materials); 30 | }; 31 | -------------------------------------------------------------------------------- /src/fastcd/image.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/image.h" 3 | #include 4 | #include 5 | 6 | namespace fastcd { 7 | 8 | Image::Image() {} 9 | 10 | Image::Image(const cv::Mat &img, const Camera &camera) 11 | : camera_(camera), raw_image_(img) {} 12 | 13 | Image::Image(const Image &img) 14 | : camera_(img.camera_), raw_image_(img.raw_image_) {} 15 | 16 | Camera Image::GetCamera() const { return camera_; } 17 | 18 | cv::Mat Image::GetRawImage() const { return raw_image_; } 19 | 20 | bool Image::LoadImage(std::string filepath, const Camera &camera) { 21 | raw_image_ = cv::imread(filepath, cv::IMREAD_COLOR); 22 | if (!raw_image_.data) { 23 | return false; 24 | } else { 25 | camera_ = camera; 26 | return true; 27 | } 28 | } 29 | 30 | void Image::Scale(double factor) { 31 | camera_.ScaleCalibration(factor); 32 | cv::resize(raw_image_, raw_image_, 33 | cv::Size(Width() * factor, Height() * factor)); 34 | } 35 | 36 | int Image::Width() const { return raw_image_.cols; } 37 | int Image::Height() const { return raw_image_.rows; } 38 | 39 | } // namespace fastcd 40 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | # specify the image for docker to run 2 | image: gitlab.ipb.uni-bonn.de:4567/global/docker-images:ipb_opencv 3 | 4 | build: 5 | script: 6 | # set proper bash 7 | - export SHELL=/bin/bash 8 | # install any external software you may need 9 | - apt-get update -yq 10 | - apt-get install -yq rsync python-pip libopencv-dev qtbase5-dev libglew-dev 11 | - pip install catkin_tools catkin_tools_fetch --upgrade 12 | # update submodules 13 | - git submodule update --init 14 | # set ssh key. 15 | - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )' 16 | - eval $(ssh-agent -s) 17 | - ssh-add <(echo "$SSH_PRIVATE_KEY") 18 | - mkdir -p ~/.ssh 19 | - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config' 20 | # we create a directory and implicitly initialize a catkin workspace there 21 | - mkdir all_pkgs 22 | - rsync -ar --exclude={all_pkgs} . all_pkgs 23 | - rm -r src/ 24 | - mkdir src 25 | - cd src 26 | - git clone https://github.com/ros/catkin.git 27 | - cd .. 28 | - catkin build --no-status 29 | - mv all_pkgs src/ 30 | - catkin deps fetch 31 | - catkin build -j2 --no-status --verbose 32 | 33 | tags: 34 | - docker 35 | except: 36 | - tags 37 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018, Emanuele Palazzolo, Jens Behley, Cyrill Stachniss, University of Bonn 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /src/fastcd/regions_matcher.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | #include "fastcd/processed_image.h" 6 | 7 | namespace fastcd { 8 | 9 | /** 10 | * @brief Pure abstract class for a regions matcher. A region matcher 11 | * performs the data association between the segmented regions of 12 | * two images and returns the labels assigned to the different regions. 13 | */ 14 | class RegionsMatcher { 15 | public: 16 | /** 17 | * @brief Matches the regions of the two images and computes the relarive 18 | * labels. 19 | * 20 | * @param[in] img1 The first image 21 | * @param[in] img2 The second image 22 | * @param[in] min_label The minimum label to assign 23 | */ 24 | virtual void Match(const ProcessedImage &img1, const ProcessedImage &img2, 25 | int min_label) = 0; 26 | /** 27 | * @brief Gets the computed labels for the regions of the first image. 28 | * 29 | * @return The vector of labels. 30 | */ 31 | virtual std::vector GetLabelsImg1() = 0; 32 | 33 | /** 34 | * @brief Gets the computed labels for the regions of the second image. 35 | * 36 | * @return The vector of labels. 37 | */ 38 | virtual std::vector GetLabelsImg2() = 0; 39 | 40 | /** 41 | * @brief Gets the maximum assigned label. 42 | * 43 | * @return The maximum assigned label. 44 | */ 45 | virtual int GetMaxLabel() = 0; 46 | }; 47 | 48 | } // namespace fastcd 49 | -------------------------------------------------------------------------------- /src/fastcd/depth_projector.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include "fastcd/mesh.h" 11 | 12 | namespace fastcd { 13 | 14 | /** 15 | * @brief Basic projection into a framebuffer texture. 16 | */ 17 | class DepthProjector { 18 | public: 19 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 20 | 21 | /** 22 | * @brief Constructor. 23 | * 24 | * @param[in] width The image width 25 | * @param[in] height The image height 26 | * @param[in] projection The camera projection matrix 27 | */ 28 | DepthProjector(uint32_t width, uint32_t height, 29 | const Eigen::Matrix4f& projection); 30 | 31 | /** 32 | * @brief Renders the given mesh from the given view relative to the 33 | * mesh. 34 | * 35 | * @param[in] mesh The mesh 36 | * @param[in] view The view 37 | */ 38 | void render(const Mesh& mesh, const Eigen::Matrix4f& view); 39 | 40 | /** 41 | * @brief Gets the depth projection. 42 | * 43 | * @return The depth image. 44 | */ 45 | glow::GlTexture& texture(); 46 | 47 | protected: 48 | /** The framebuffer used to render the mesh */ 49 | glow::GlFramebuffer framebuffer_; 50 | 51 | /** The output texture */ 52 | glow::GlTexture output_; 53 | 54 | /** The program used to render the mesh */ 55 | glow::GlProgram program_; 56 | 57 | /** The projection matrix of the cameras*/ 58 | Eigen::Matrix4f projection_; 59 | }; 60 | 61 | } // namespace fastcd 62 | -------------------------------------------------------------------------------- /src/visualizer/shaders/draw_mesh.frag: -------------------------------------------------------------------------------- 1 | #version 330 2 | 3 | in vec4 pos; 4 | in vec4 normal; 5 | 6 | struct Light { 7 | vec4 position; // directional lights have w == 0. 8 | 9 | vec3 ambient; 10 | vec3 diffuse; 11 | vec3 specular; 12 | }; 13 | 14 | uniform int num_lights; 15 | uniform Light lights[10]; 16 | 17 | uniform vec4 view_pos; 18 | 19 | struct Material { 20 | vec3 ambient; 21 | vec3 diffuse; 22 | vec3 specular; 23 | vec3 emission; 24 | 25 | float shininess; 26 | float alpha; 27 | }; 28 | 29 | uniform Material material; 30 | 31 | out vec4 color; 32 | 33 | // see also http://learnopengl.com/#!Lighting/Materials 34 | 35 | void main() { 36 | vec3 norm = normalize(vec3(normal)); 37 | vec3 view_dir = normalize((view_pos - pos).xyz); 38 | 39 | vec3 result = vec3(0); 40 | 41 | for(int i = 0; i < num_lights; ++i) 42 | { 43 | // ambient: 44 | vec3 ambient = lights[i].ambient * material.ambient; 45 | 46 | // diffuse: 47 | vec3 light_dir = normalize(lights[i].position.xyz - pos.xyz); 48 | if(lights[i].position.w < 0.0001) light_dir = normalize(-lights[i].position.xyz); // directional light. 49 | float diff = abs(dot(norm, light_dir)); 50 | vec3 diffuse = lights[i].diffuse * (diff * material.diffuse); 51 | 52 | // specular: 53 | vec3 reflect_dir = reflect(-light_dir, norm); 54 | float spec = pow(max(dot(view_dir, reflect_dir), 0.0), material.shininess); 55 | vec3 specular = lights[i].specular * (spec * material.specular); 56 | 57 | // emission (seems good to apply for each light the emission; with this the emission part doesn't just depend 58 | // on the number of lights: 59 | result += ambient + diffuse + specular + material.emission; 60 | } 61 | 62 | color = vec4(result, material.alpha); 63 | } -------------------------------------------------------------------------------- /src/fastcd/image_sequence.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/image_sequence.h" 3 | 4 | #include 5 | #include 6 | #include 7 | #include "fastcd/regions_matcher_hist.h" 8 | 9 | namespace fastcd { 10 | 11 | ImageSequence::ImageSequence(int cache_size, int max_comparison) 12 | : cache_size_(cache_size), max_comparisons_(max_comparison) {} 13 | 14 | void ImageSequence::AddImage(std::shared_ptr image, 15 | int kernel_size) { 16 | if (sequence_.size() == cache_size_) { 17 | sequence_.pop_front(); 18 | } 19 | if (!sequence_.empty()) { 20 | for (int i = sequence_.size() - 1; i >= 0; i--) { 21 | if (sequence_[i].ImageCompared() < max_comparisons_) { 22 | sequence_[i].UpdateInconsistencies( 23 | image->Warp(sequence_[i]),kernel_size); 24 | } 25 | if (image->ImageCompared() < max_comparisons_) { 26 | image->UpdateInconsistencies(sequence_[i].Warp(*image),kernel_size); 27 | } 28 | } 29 | } 30 | sequence_.push_back(*image); 31 | } 32 | 33 | void ImageSequence::ComputeAndMatchRegions(int threshold_change_area) { 34 | for (size_t i = 0; i < sequence_.size(); i++) { 35 | sequence_[i].ComputeRegions(threshold_change_area); 36 | } 37 | 38 | RegionsMatcherHist regions_matcher; 39 | int label = 0; 40 | for (size_t i = 0; i < sequence_.size(); i++) { 41 | for (size_t j = i + 1; j < sequence_.size(); j++) { 42 | regions_matcher.Match(sequence_[i], sequence_[j], label); 43 | sequence_[i].UpdateLabels(regions_matcher.GetLabelsImg1()); 44 | sequence_[j].UpdateLabels(regions_matcher.GetLabelsImg2()); 45 | label = regions_matcher.GetMaxLabel(); 46 | } 47 | } 48 | } 49 | 50 | } // namespace fastcd 51 | -------------------------------------------------------------------------------- /src/fastcd/depth_projector.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/depth_projector.h" 3 | #include "fastcd/mesh.h" 4 | 5 | namespace fastcd { 6 | 7 | DepthProjector::DepthProjector(uint32_t width, uint32_t height, 8 | const Eigen::Matrix4f& projection) 9 | : framebuffer_(width, height), 10 | output_(width, height, glow::TextureFormat::RGBA_FLOAT), 11 | projection_(projection) { 12 | framebuffer_.attach(glow::FramebufferAttachment::COLOR0, output_); 13 | glow::GlRenderbuffer rbo(width, height, 14 | glow::RenderbufferFormat::DEPTH_STENCIL); 15 | framebuffer_.attach(glow::FramebufferAttachment::DEPTH_STENCIL, 16 | rbo); // framebuffers needs a depth/stencil buffer. 17 | 18 | program_.attach(glow::GlShader::fromCache( 19 | glow::ShaderType::VERTEX_SHADER, "fastcd/shaders/project_mesh.vert")); 20 | program_.attach(glow::GlShader::fromCache( 21 | glow::ShaderType::FRAGMENT_SHADER, "fastcd/shaders/project_mesh.frag")); 22 | program_.link(); 23 | } 24 | 25 | /** \brief render given mesh from given view relative to the mesh. **/ 26 | void DepthProjector::render(const Mesh& mesh, const Eigen::Matrix4f& view) { 27 | GLint ov[4]; 28 | glGetIntegerv(GL_VIEWPORT, ov); 29 | 30 | framebuffer_.bind(); 31 | glEnable(GL_DEPTH_TEST); 32 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 33 | glViewport(0, 0, framebuffer_.width(), framebuffer_.height()); 34 | 35 | program_.bind(); 36 | program_.setUniform( 37 | glow::GlUniform("mvp", projection_ * view)); 38 | 39 | mesh.draw(program_); 40 | program_.release(); 41 | 42 | framebuffer_.release(); 43 | 44 | glViewport(ov[0], ov[1], ov[2], ov[3]); 45 | } 46 | 47 | /** \brief get depth projection. **/ 48 | glow::GlTexture& DepthProjector::texture() { return output_; } 49 | 50 | } // namespace fastcd 51 | -------------------------------------------------------------------------------- /src/fastcd/point_covariance2d.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/point_covariance2d.h" 3 | #include 4 | 5 | namespace fastcd { 6 | 7 | PointCovariance2d::PointCovariance2d() {} 8 | 9 | PointCovariance2d::PointCovariance2d(const Eigen::Vector2d &point, 10 | const Eigen::Matrix2d &covariance, double chi_square) 11 | : point_(point), covariance_(covariance) { 12 | Eigen::SelfAdjointEigenSolver es(covariance_); 13 | eigenvalues_ = es.eigenvalues(); 14 | eigenvectors_ = es.eigenvectors(); 15 | angle_ = atan2(eigenvectors_(1, 1), eigenvectors_(1, 0)); 16 | if (angle_ < 0) angle_ += 2 * M_PI; 17 | half_major_axis_size_ = sqrt(chi_square) * sqrt(eigenvalues_(1)); 18 | half_minor_axis_size_ = sqrt(chi_square) * sqrt(eigenvalues_(0)); 19 | Eigen::Matrix2d R; 20 | R << cos(angle_), -sin(angle_), sin(angle_), cos(angle_); 21 | Eigen::Vector2d xaxis, yaxis, vertex; 22 | yaxis << 0, 1; 23 | xaxis << 1, 0; 24 | vertex = R * half_major_axis_size_ * xaxis + point; 25 | vertices_.push_back(vertex); 26 | vertex = -R * half_major_axis_size_ * xaxis + point; 27 | vertices_.push_back(vertex); 28 | vertex = R * half_minor_axis_size_ * yaxis + point; 29 | vertices_.push_back(vertex); 30 | vertex = -R * half_minor_axis_size_ * yaxis + point; 31 | vertices_.push_back(vertex); 32 | } 33 | 34 | Eigen::Vector2d PointCovariance2d::Point() { return point_; } 35 | 36 | Eigen::Matrix2d PointCovariance2d::Covariance() { return covariance_; } 37 | 38 | Eigen::Vector2d PointCovariance2d::Eigenvalues() { return eigenvalues_; } 39 | 40 | Eigen::Matrix2d PointCovariance2d::Eigenvectors() { return eigenvectors_; } 41 | 42 | double PointCovariance2d::Angle() { return angle_; } 43 | 44 | double PointCovariance2d::HalfMajorAxisSize() { return half_major_axis_size_; } 45 | 46 | double PointCovariance2d::HalfMinorAxisSize() { return half_minor_axis_size_; } 47 | 48 | std::vector PointCovariance2d::Vertices() { return vertices_; } 49 | 50 | } // namespace fastcd 51 | -------------------------------------------------------------------------------- /src/fastcd/image.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include "fastcd/camera.h" 7 | 8 | namespace fastcd { 9 | 10 | /** 11 | * @brief Class that stores an image and its calibration. 12 | */ 13 | class Image { 14 | public: 15 | /** 16 | * @brief Empty constructor. 17 | */ 18 | Image(); 19 | 20 | /** 21 | * @brief Constructor from OpenCV image. 22 | * 23 | * @param[in] img The OpenCV image 24 | * @param[in] camera The camera calibration 25 | */ 26 | explicit Image(const cv::Mat &img, const Camera &camera); 27 | 28 | /** 29 | * @brief Copy constructor. 30 | * 31 | * @param[in] img The image 32 | */ 33 | Image(const Image &img); 34 | 35 | /** 36 | * @brief Loads an image from a file, with a passed camera calibration. 37 | * 38 | * @param[in] filepath The image file path 39 | * @param[in] camera The camera calibration 40 | * 41 | * @return true if the load was successful, false otherwise. 42 | */ 43 | bool LoadImage(std::string filepath, const Camera &camera); 44 | 45 | /** 46 | * @brief Scales the image and its calibration by the specified factor. 47 | * 48 | * @param[in] factor The scaling factor 49 | */ 50 | void Scale(double factor); 51 | 52 | /** 53 | * @brief Gets the camera calibration. 54 | * 55 | * @return The camera calibration. 56 | */ 57 | Camera GetCamera() const; 58 | 59 | /** 60 | * @brief Gets the raw OpenCV image. 61 | * 62 | * @return The raw image. 63 | */ 64 | cv::Mat GetRawImage() const; 65 | 66 | /** 67 | * @brief Gets the width of the image in pixel. 68 | * 69 | * @return The width of the image. 70 | */ 71 | int Width() const; 72 | 73 | /** 74 | * @brief Gets the height of the image in pixel. 75 | * 76 | * @return The height of the image. 77 | */ 78 | int Height() const; 79 | 80 | protected: 81 | /** The camera calibration */ 82 | Camera camera_; 83 | 84 | /** The OpenCV image*/ 85 | cv::Mat raw_image_; 86 | }; 87 | 88 | } // namespace fastcd 89 | -------------------------------------------------------------------------------- /src/fastcd/image_sequence.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | #include 4 | #include 5 | #include 6 | #include "fastcd/processed_image.h" 7 | 8 | namespace fastcd { 9 | 10 | /** 11 | * @brief Class representing a sequence of images. 12 | */ 13 | class ImageSequence { 14 | public: 15 | /** 16 | * @brief Constructor. 17 | * 18 | * @param[in] cache_size The maximum number of images stored in the 19 | * sequence 20 | * @param[in] max_comparisons The maximum number of comparisons of a single 21 | * image with other images 22 | */ 23 | explicit ImageSequence(int cache_size, int max_comparisons); 24 | 25 | /** 26 | * @brief Adds an image to the sequence and update all the relevant 27 | * inconsistency images. 28 | * 29 | * @param[in] image The image to add 30 | * 31 | * @param[in] kernel_size The size of the window over which the image is 32 | * compared with the others. It represents the uncertainty of the camera pose 33 | */ 34 | void AddImage(std::shared_ptr image, int kernel_size); 35 | 36 | /** 37 | * @brief Segments the regions of inconsistencies in each image and 38 | * matches them. 39 | * 40 | * @param[in] threshold_change_area Threshold area under which regions are 41 | * not considered 42 | */ 43 | void ComputeAndMatchRegions(int threshold_change_area); 44 | 45 | /** 46 | * @brief Access the i-th element of the sequence. 47 | * 48 | * @param[in] i Element of the sequence to access 49 | * 50 | * @return The selected element. 51 | */ 52 | ProcessedImage operator[](int i) const { return sequence_[i]; } 53 | 54 | /** 55 | * @brief Access the i-th element of the sequence. 56 | * 57 | * @param[in] i Element of the sequence to access 58 | * 59 | * @return The selected element. 60 | */ 61 | ProcessedImage &operator[](int i) { return sequence_[i]; } 62 | 63 | /** 64 | * @brief Gets the size of the sequence. 65 | * 66 | * @return The size of the sequence. 67 | */ 68 | std::size_t size() { return sequence_.size(); } 69 | 70 | protected: 71 | /** The maximum number of images stored in the sequence */ 72 | int cache_size_; 73 | 74 | /** The maximum number of comparisons of a single image with other images */ 75 | int max_comparisons_; 76 | 77 | /** The sequence of images */ 78 | std::deque sequence_; 79 | }; 80 | 81 | } // namespace fastcd 82 | -------------------------------------------------------------------------------- /src/fastcd/change_detector.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include "fastcd/point_covariance3d.h" 7 | #include "fastcd/image.h" 8 | #include "fastcd/image_sequence.h" 9 | #include "fastcd/depth_projector.h" 10 | #include "fastcd/mesh.h" 11 | 12 | namespace fastcd { 13 | 14 | /** 15 | * @brief Main class of the change detector algorithm. 16 | */ 17 | class ChangeDetector { 18 | public: 19 | /** 20 | * @brief Options for the change detector algorithm (see ChangeDetector). 21 | */ 22 | struct ChangeDetectorOptions { 23 | /** The maximum amount of images stored in the queue. */ 24 | int cache_size; 25 | 26 | /** The maximum amount of comparisons per image. */ 27 | int max_comparisons; 28 | 29 | /** Chi square value for 2D confidence ellipses (95% of the variance 30 | * retained by default). 31 | */ 32 | double chi_square2d = 5.991; 33 | 34 | /** Chi square value for 3D confidence ellipsoids (95% of the variance 35 | * retained by default). 36 | */ 37 | double chi_square3d = 7.815; 38 | 39 | /** Width to which rescale the images */ 40 | int rescale_width; 41 | 42 | /** The threshold area under which a 2D change is discarded */ 43 | int threshold_change_area; 44 | }; 45 | 46 | /** 47 | * @brief Constructor. 48 | * 49 | * @param[in] mesh The 3D model of the environment 50 | * @param[in] options The options of the algorithm (see ChangeDetectorOptions) 51 | */ 52 | explicit ChangeDetector(const Mesh &mesh, 53 | const ChangeDetectorOptions &options); 54 | 55 | /** 56 | * @brief Adds an image to the sequence and process it. 57 | * 58 | * @param[in] image The image 59 | * @param[in] kernel_size The size of the window over which the image is 60 | * compared with the others. It represents the uncertainty of the camera pose 61 | */ 62 | void AddImage(Image &image, int kernel_size); 63 | 64 | /** 65 | * @brief Gets the detected changes. 66 | * 67 | * @return The changes. 68 | */ 69 | std::vector GetChanges(); 70 | 71 | protected: 72 | /** The 3D model of the environment */ 73 | Mesh mesh_; 74 | 75 | /** The struct containing the options of the algorithm */ 76 | ChangeDetectorOptions options_; 77 | 78 | /** It stores whether the depth projector has been initialized or not */ 79 | bool projector_init_ = false; 80 | 81 | /** The sequence of images */ 82 | std::shared_ptr image_sequence_; 83 | 84 | /** The depth projector */ 85 | std::unique_ptr depth_projector_; 86 | }; 87 | 88 | } // namespace fastcd 89 | -------------------------------------------------------------------------------- /src/fastcd/regions_matcher_hist.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | #include "fastcd/processed_image.h" 6 | #include "fastcd/regions_matcher.h" 7 | 8 | namespace fastcd { 9 | 10 | /** 11 | * @brief Class for matching regions using histograms of their hue and 12 | * saturation values. 13 | */ 14 | class RegionsMatcherHist : RegionsMatcher { 15 | public: 16 | /** 17 | * @brief Matches the regions of the two images and computes the relarive 18 | * labels. 19 | * 20 | * @param[in] img1 The first image 21 | * @param[in] img2 The second image 22 | * @param[in] min_label The minimum label to assign 23 | */ 24 | void Match(const ProcessedImage &img1, const ProcessedImage &img2, 25 | int min_label) override; 26 | 27 | /** 28 | * @brief Gets the computed labels for the regions of the first image. 29 | * 30 | * @return The vector of labels. 31 | */ 32 | std::vector GetLabelsImg1() override; 33 | 34 | /** 35 | * @brief Gets the computed labels for the regions of the second image. 36 | * 37 | * @return The vector of labels. 38 | */ 39 | std::vector GetLabelsImg2() override; 40 | 41 | /** 42 | * @brief Gets the maximum assigned label. 43 | * 44 | * @return The maximum assigned label. 45 | */ 46 | int GetMaxLabel() override; 47 | 48 | protected: 49 | /** 50 | * @brief Computes a mask that represents the epipolar constraints of the 51 | * region idx of img1 on img2. 52 | * 53 | * @param[in] img1 The first image 54 | * @param[in] img2 The second image 55 | * @param[in] idx The index of the region of the first image for which the 56 | * mask is created 57 | * 58 | * @return The mask. 59 | */ 60 | cv::Mat EpilineMask(const ProcessedImage &img1, const ProcessedImage &img2, 61 | int idx); 62 | 63 | /** 64 | * @brief Calculates the fundamental matrix given the projection matrices 65 | * of two cameras. 66 | * 67 | * @param[in] P1 The projection matrix of the first camera 68 | * @param[in] P2 The projection matrix of the second camera 69 | * 70 | * @return The fundamental matrix. 71 | */ 72 | Eigen::Matrix3d ComputeF(const Eigen::Matrix &P1, 73 | const Eigen::Matrix &P2); 74 | 75 | /** The labels of the regions of the first image */ 76 | std::vector labels1_; 77 | 78 | /** The labels of the regions of the second image */ 79 | std::vector labels2_; 80 | 81 | /** The maximum assigned label */ 82 | int max_label_; 83 | }; 84 | 85 | } // namespace fastcd 86 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | # BasedOnStyle: Google 4 | AccessModifierOffset: -1 5 | AlignAfterOpenBracket: Align 6 | AlignConsecutiveAssignments: false 7 | AlignConsecutiveDeclarations: false 8 | AlignEscapedNewlinesLeft: true 9 | AlignOperands: true 10 | AlignTrailingComments: true 11 | AllowAllParametersOfDeclarationOnNextLine: true 12 | AllowShortBlocksOnASingleLine: false 13 | AllowShortCaseLabelsOnASingleLine: false 14 | AllowShortFunctionsOnASingleLine: All 15 | AllowShortIfStatementsOnASingleLine: true 16 | AllowShortLoopsOnASingleLine: true 17 | AlwaysBreakAfterDefinitionReturnType: None 18 | AlwaysBreakAfterReturnType: None 19 | AlwaysBreakBeforeMultilineStrings: true 20 | AlwaysBreakTemplateDeclarations: true 21 | BinPackArguments: true 22 | BinPackParameters: true 23 | BraceWrapping: 24 | AfterClass: false 25 | AfterControlStatement: false 26 | AfterEnum: false 27 | AfterFunction: false 28 | AfterNamespace: false 29 | AfterObjCDeclaration: false 30 | AfterStruct: false 31 | AfterUnion: false 32 | BeforeCatch: false 33 | BeforeElse: false 34 | IndentBraces: false 35 | BreakBeforeBinaryOperators: None 36 | BreakBeforeBraces: Attach 37 | BreakBeforeTernaryOperators: true 38 | BreakConstructorInitializersBeforeComma: false 39 | ColumnLimit: 80 40 | CommentPragmas: '^ IWYU pragma:' 41 | ConstructorInitializerAllOnOneLineOrOnePerLine: true 42 | ConstructorInitializerIndentWidth: 4 43 | ContinuationIndentWidth: 4 44 | Cpp11BracedListStyle: true 45 | DerivePointerAlignment: true 46 | DisableFormat: false 47 | ExperimentalAutoDetectBinPacking: false 48 | ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] 49 | IncludeCategories: 50 | - Regex: '^<.*\.h>' 51 | Priority: 1 52 | - Regex: '^<.*' 53 | Priority: 2 54 | - Regex: '.*' 55 | Priority: 3 56 | IndentCaseLabels: true 57 | IndentWidth: 2 58 | IndentWrappedFunctionNames: false 59 | KeepEmptyLinesAtTheStartOfBlocks: false 60 | MacroBlockBegin: '' 61 | MacroBlockEnd: '' 62 | MaxEmptyLinesToKeep: 1 63 | NamespaceIndentation: None 64 | ObjCBlockIndentWidth: 2 65 | ObjCSpaceAfterProperty: false 66 | ObjCSpaceBeforeProtocolList: false 67 | PenaltyBreakBeforeFirstCallParameter: 1 68 | PenaltyBreakComment: 300 69 | PenaltyBreakFirstLessLess: 120 70 | PenaltyBreakString: 1000 71 | PenaltyExcessCharacter: 1000000 72 | PenaltyReturnTypeOnItsOwnLine: 200 73 | PointerAlignment: Left 74 | ReflowComments: true 75 | SortIncludes: true 76 | SpaceAfterCStyleCast: false 77 | SpaceBeforeAssignmentOperators: true 78 | SpaceBeforeParens: ControlStatements 79 | SpaceInEmptyParentheses: false 80 | SpacesBeforeTrailingComments: 2 81 | SpacesInAngles: false 82 | SpacesInContainerLiterals: true 83 | SpacesInCStyleCastParentheses: false 84 | SpacesInParentheses: false 85 | SpacesInSquareBrackets: false 86 | Standard: Auto 87 | TabWidth: 8 88 | UseTab: Never 89 | ... 90 | 91 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(fast_change_detection) 3 | 4 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_RELEASE} -std=c++11 -DNDEBUG -O3") 5 | set(CMAKE_BUILD_TYPE Release) 6 | 7 | find_package(catkin REQUIRED COMPONENTS glow) 8 | find_package(OpenCV REQUIRED) 9 | find_package(OpenGL REQUIRED) 10 | find_package(Qt5Gui REQUIRED) 11 | find_package(Qt5OpenGL REQUIRED) 12 | find_package(GLEW REQUIRED) 13 | find_package(Boost REQUIRED COMPONENTS filesystem system) 14 | 15 | set(CMAKE_INCLUDE_CURRENT_DIR ON) # needs to be activated for qt generated files in build directory. 16 | set(CMAKE_AUTOMOC ON) 17 | 18 | option(USE_OpenMP "Use OpenMP" OFF) 19 | if(USE_OpenMP) 20 | find_package(OpenMP) 21 | if(OPENMP_FOUND) 22 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") 23 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 24 | endif() 25 | endif() 26 | 27 | catkin_package( 28 | INCLUDE_DIRS src/ 29 | LIBRARIES fastcd 30 | CATKIN_DEPENDS glow 31 | ) 32 | 33 | SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/bin) 34 | 35 | include_directories(${catkin_INCLUDE_DIRS}) 36 | include_directories(${QT5_INCLUDE_DIRS} 37 | ${GLEW_INCLUDE_DIRS} 38 | ${OpenCV_INCLUDE_DIRS} 39 | /usr/include/eigen3 40 | src) 41 | 42 | set(FASTCD_SHADER_SRC ${CMAKE_BINARY_DIR}/fastcd_shaders.cpp) 43 | set(VISUALIZER_SHADER_SRC ${CMAKE_BINARY_DIR}/visualizer_shaders.cpp) 44 | 45 | compile_shaders(${FASTCD_SHADER_SRC} 46 | src/fastcd/shaders/project_mesh.vert 47 | src/fastcd/shaders/project_mesh.frag 48 | ) 49 | 50 | compile_shaders(${VISUALIZER_SHADER_SRC} 51 | src/visualizer/shaders/colored_vertices.vert 52 | src/visualizer/shaders/colored_vertices.frag 53 | src/visualizer/shaders/draw_mesh.vert 54 | src/visualizer/shaders/draw_mesh.frag 55 | src/visualizer/shaders/render_points.vert 56 | src/visualizer/shaders/render_points.frag 57 | ) 58 | 59 | add_library(fastcd 60 | src/fastcd/camera.cpp 61 | src/fastcd/image.cpp 62 | src/fastcd/processed_image.cpp 63 | src/fastcd/image_sequence.cpp 64 | src/fastcd/point_covariance2d.cpp 65 | src/fastcd/point_covariance3d.cpp 66 | src/fastcd/depth_projector.cpp 67 | src/fastcd/mesh.cpp 68 | src/fastcd/regions3d_projector.cpp 69 | src/fastcd/change_detector.cpp 70 | src/fastcd/regions_matcher_hist.cpp 71 | 72 | ${FASTCD_SHADER_SRC} 73 | ) 74 | 75 | target_link_libraries(fastcd ${catkin_LIBRARIES} ${OpenCV_LIBS} ${Boost_LIBRARIES}) 76 | 77 | add_executable(fastcd_example src/example.cpp 78 | src/utils/obj_reader.cpp 79 | src/visualizer/visualizer.cpp 80 | ${VISUALIZER_SHADER_SRC} 81 | ) 82 | 83 | target_link_libraries(fastcd_example fastcd ${OPENGL_LIBRARIES} ${catkin_LIBRARIES} ${GLEW_LIBRARIES} Qt5::Widgets Qt5::OpenGL) 84 | 85 | # Generate API documentation with Doxygen 86 | find_package(Doxygen) 87 | if(DOXYGEN_FOUND) 88 | configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile @ONLY) 89 | add_custom_target(doc ALL 90 | ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile 91 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} 92 | COMMENT "Generating API documentation with Doxygen" VERBATIM 93 | ) 94 | endif(DOXYGEN_FOUND) -------------------------------------------------------------------------------- /src/fastcd/point_covariance3d.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | #include 4 | #include "fastcd/mesh.h" 5 | 6 | namespace fastcd { 7 | 8 | /** 9 | * @brief Class for storing a 3D point, its covariance and the 10 | * corresponding confidence ellipsoid. 11 | */ 12 | class PointCovariance3d { 13 | public: 14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 15 | 16 | /** 17 | * @brief Constructor. It computes all the parameters of the ellipsoid 18 | * from point, covariance and chi square; 19 | * 20 | * @param[in] point The point 21 | * @param[in] covariance The covariance 22 | * @param[in] chi_square The chi square. 95% of the variance retained by 23 | * default 24 | */ 25 | PointCovariance3d(const Eigen::Vector3d &point, 26 | const Eigen::Matrix3d &covariance, 27 | double chi_square = 7.815); 28 | 29 | /** 30 | * @brief Creates and returns a Mesh representing the confidence 31 | * ellipsoid. 32 | * 33 | * @param[in] stacks The number of stacks 34 | * @param[in] slices The number of slices 35 | * @param[in] r The red color value 36 | * @param[in] g The green color value 37 | * @param[in] b The blue color value 38 | * @param[in] a The alpha value 39 | * 40 | * @return The mesh. 41 | */ 42 | Mesh ToMesh(int stacks, int slices, float r, float g, float b, float a) const; 43 | 44 | /** 45 | * @brief Gets the point. 46 | * 47 | * @return The point. 48 | */ 49 | Eigen::Vector3d Point() const; 50 | 51 | /** 52 | * @brief Gets the covariance matrix. 53 | * 54 | * @return The covariance matrix. 55 | */ 56 | Eigen::Matrix3d Covariance() const; 57 | 58 | /** 59 | * @brief Gets the eigenvalues of the covariance matrix. 60 | * 61 | * @return The eigenvalues of the covariance matrix. 62 | */ 63 | Eigen::Vector3d Eigenvalues() const; 64 | 65 | /** 66 | * @brief Gets the eigenvectors of the covariance matrix. 67 | * 68 | * @return The eigenvectors of the covariance matrix. 69 | */ 70 | Eigen::Matrix3d Eigenvectors() const; 71 | 72 | /** 73 | * @brief Gets the transformation matrix to scale the confidence 74 | * ellipsoid. 75 | * 76 | * @return The transformation matrix to scale the confidence ellipsoid. 77 | */ 78 | Eigen::Matrix3d Scaling() const; 79 | 80 | /** 81 | * @brief Gets the vertices of the confidence ellipsoid. 82 | * 83 | * @return The 6 vertices of the confidence ellipsoid. 84 | */ 85 | std::vector Vertices() const; 86 | 87 | protected: 88 | /** The point */ 89 | Eigen::Vector3d point_; 90 | 91 | /** The covariance matrix */ 92 | Eigen::Matrix3d covariance_; 93 | 94 | /** The eigenvalues of the covariance matrix */ 95 | Eigen::Vector3d eigenvalues_; 96 | 97 | /** The eigenvectors of the covariance matrix */ 98 | Eigen::Matrix3d eigenvectors_; 99 | 100 | /** The transformation matrix to scale the confidence ellipsoid */ 101 | Eigen::Matrix3d scaling_; 102 | 103 | /** The 6 vertices of the confidence ellipsoid */ 104 | std::vector vertices_; 105 | }; 106 | 107 | } // namespace fastcd 108 | -------------------------------------------------------------------------------- /src/fastcd/point_covariance2d.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | #include 4 | #include 5 | 6 | namespace fastcd { 7 | 8 | /** 9 | * @brief Class for storing a 2D point, its covariance and the 10 | * corresponding confidence ellipse. 11 | */ 12 | class PointCovariance2d { 13 | public: 14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 15 | 16 | /** 17 | * @brief Empty constructor. 18 | */ 19 | PointCovariance2d(); 20 | 21 | /** 22 | * @brief Constructor. It computes all the parameters of the ellipse from 23 | * point, covariance and chi square; 24 | * 25 | * @param[in] point The point 26 | * @param[in] covariance The covariance 27 | * @param[in] chi_square The chi square. 95% of the variance retained by 28 | * default 29 | */ 30 | PointCovariance2d(const Eigen::Vector2d &point, 31 | const Eigen::Matrix2d &covariance, 32 | double chi_square = 5.991); 33 | 34 | /** 35 | * @brief Gets the point. 36 | * 37 | * @return The point. 38 | */ 39 | Eigen::Vector2d Point(); 40 | 41 | /** 42 | * @brief Gets the covariance matrix. 43 | * 44 | * @return The covariance matrix. 45 | */ 46 | Eigen::Matrix2d Covariance(); 47 | 48 | /** 49 | * @brief Gets the eigenvalues of the covariance matrix. 50 | * 51 | * @return The eigenvalues of the covariance matrix. 52 | */ 53 | Eigen::Vector2d Eigenvalues(); 54 | 55 | /** 56 | * @brief Gets the eigenvectors of the covariance matrix. 57 | * 58 | * @return The eigenvectors of the covariance matrix. 59 | */ 60 | Eigen::Matrix2d Eigenvectors(); 61 | 62 | /** 63 | * @brief Gets the angle of the confidence ellipse w.r.t. the x-axis. 64 | * 65 | * @return The angle of the confidence ellipse w.r.t. the x-axis. 66 | */ 67 | double Angle(); 68 | 69 | /** 70 | * @brief Gets the length of the major axis of the confidence ellipse. 71 | * 72 | * @return The length of the major axis of the confidence ellipse. 73 | */ 74 | double HalfMajorAxisSize(); 75 | 76 | /** 77 | * @brief Gets the length of the minor axis of the confidence ellipse. 78 | * 79 | * @return The length of the minor axis of the confidence ellipse. 80 | */ 81 | double HalfMinorAxisSize(); 82 | 83 | /** 84 | * @brief Gets the vertices of the confidence ellipse. 85 | * 86 | * @return The 4 vertices of the confidence ellipse. 87 | */ 88 | std::vector Vertices(); 89 | 90 | protected: 91 | /** The point */ 92 | Eigen::Vector2d point_; 93 | 94 | /** The covariance matrix */ 95 | Eigen::Matrix2d covariance_; 96 | 97 | /** The eigenvalues of the covariance matrix */ 98 | Eigen::Vector2d eigenvalues_; 99 | 100 | /** The eigenvectors of the covariance matrix */ 101 | Eigen::Matrix2d eigenvectors_; 102 | 103 | /** The angle of the confidence ellipse w.r.t. the x-axis */ 104 | double angle_; 105 | 106 | /** The length of the major axis of the confidence ellipse */ 107 | double half_major_axis_size_; 108 | 109 | /** The length of the minor axis of the confidence ellipse */ 110 | double half_minor_axis_size_; 111 | 112 | /** The 4 vertices of the confidence ellipse */ 113 | std::vector vertices_; 114 | }; 115 | 116 | } // namespace fastcd 117 | -------------------------------------------------------------------------------- /src/example.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "fastcd/change_detector.h" 7 | #include "visualizer/visualizer.h" 8 | #include "utils/obj_reader.h" 9 | 10 | struct Dataset { 11 | bool loaded = false; 12 | Mesh model; 13 | std::vector images; 14 | }; 15 | 16 | Dataset LoadDataset(char *path, int n_images) { 17 | Dataset data; 18 | std::ostringstream filename; 19 | if (path[strlen(path) - 1] != '/') { 20 | filename << path << "/"; 21 | } else { 22 | filename << path; 23 | } 24 | std::string basepath = filename.str(); 25 | 26 | std::cout << "Loading model... " << std::flush; 27 | filename << "model.obj"; 28 | try { 29 | data.model = ObjReader::FromFile(filename.str()); 30 | } catch (...) { 31 | std::cout << "Error reading Obj file!" << std::endl; 32 | return Dataset(); 33 | } 34 | std::cout << "Done." << std::endl; 35 | 36 | std::cout << "Loading images... " << std::flush; 37 | for (int i = 0; i < n_images; i++) { 38 | fastcd::Camera cam; 39 | filename.str(""); 40 | filename.clear(); 41 | filename << basepath << "cameras.xml"; 42 | try { 43 | cam.ReadCalibration(filename.str(), i); 44 | } catch (...) { 45 | std::cout << "Error reading calibration!" << std::endl; 46 | return Dataset(); 47 | } 48 | 49 | fastcd::Image img; 50 | filename.str(""); 51 | filename.clear(); 52 | filename << basepath << "images/Image" << i << ".JPG"; 53 | if (!img.LoadImage(filename.str(), cam)) { 54 | std::cout << "Error reading images!" << std::endl; 55 | return Dataset(); 56 | } 57 | data.images.push_back(img); 58 | } 59 | std::cout << "Done." << std::endl; 60 | data.loaded = true; 61 | return data; 62 | } 63 | 64 | int main(int argc, char **argv) { 65 | if (argc < 2) { 66 | std::cout << "Usage: fastcd_example DATASET_PATH" << std::endl; 67 | return -1; 68 | } 69 | 70 | // Initialize the viewer 71 | QApplication app(argc, argv); 72 | fastcd::Visualizer visualizer; 73 | 74 | // Load data 75 | Dataset data = LoadDataset(argv[1], 5); 76 | if (!data.loaded) return -1; 77 | 78 | // Initialize Change Detector 79 | fastcd::ChangeDetector::ChangeDetectorOptions cd_opts; 80 | cd_opts.cache_size = 10; 81 | cd_opts.max_comparisons = 4; 82 | cd_opts.threshold_change_area = 50; 83 | cd_opts.rescale_width = 500; 84 | cd_opts.chi_square2d = 3.219; 85 | cd_opts.chi_square3d = 4.642; 86 | fastcd::ChangeDetector change_detector(data.model, cd_opts); 87 | 88 | // Add the images to the sequence. Each new image is compared with the others 89 | // and the inconsistencies are updated. 90 | for (auto &img : data.images) { 91 | change_detector.AddImage(img, 3); 92 | } 93 | 94 | // Compute and get the changes in the environment in the form of mean position 95 | // and covariance of the points of the regions 96 | std::vector changes = change_detector.GetChanges(); 97 | 98 | // Visualize the result 99 | for (auto &img : data.images) { 100 | visualizer.addCamera(img.GetCamera(), 101 | fastcd::Visualizer::rgb2float(0.0f, 1.0f, 0.0f), 1.0f); 102 | } 103 | visualizer.addMesh(data.model); 104 | for (auto change : changes) { 105 | visualizer.addPoint(change.Point().cast(), 0.2, 106 | fastcd::Visualizer::rgb2float(1.0f, 1.0f, 1.0f)); 107 | Mesh ellipsoid = change.ToMesh(100, 100, 0.0f, 0.0f, 1.0f, 0.5f); 108 | visualizer.addMesh(ellipsoid); 109 | } 110 | visualizer.show(); 111 | visualizer.resize(1504, 1000); 112 | 113 | int32_t ret = app.exec(); 114 | return ret; 115 | } 116 | -------------------------------------------------------------------------------- /src/fastcd/point_covariance3d.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/point_covariance3d.h" 3 | #include 4 | #include "fastcd/mesh.h" 5 | 6 | namespace fastcd { 7 | 8 | PointCovariance3d::PointCovariance3d(const Eigen::Vector3d &point, 9 | const Eigen::Matrix3d &covariance, double chi_square) 10 | : point_(point), covariance_(covariance) { 11 | Eigen::SelfAdjointEigenSolver es(covariance_); 12 | eigenvalues_ = es.eigenvalues(); 13 | eigenvectors_ = es.eigenvectors(); 14 | scaling_.setZero(); 15 | for (int i = 0; i < 3; i++) { 16 | scaling_(i, i) = sqrt(chi_square) * sqrt(eigenvalues_(i)); 17 | } 18 | Eigen::Vector3d vertex; 19 | vertex << 1, 0, 0; 20 | vertex = eigenvectors_ * scaling_ * vertex + point_; 21 | vertices_.push_back(vertex); 22 | vertex << -1, 0, 0; 23 | vertex = eigenvectors_ * scaling_ * vertex + point_; 24 | vertices_.push_back(vertex); 25 | vertex << 0, 1, 0; 26 | vertex = eigenvectors_ * scaling_ * vertex + point_; 27 | vertices_.push_back(vertex); 28 | vertex << 0, -1, 0; 29 | vertex = eigenvectors_ * scaling_ * vertex + point_; 30 | vertices_.push_back(vertex); 31 | vertex << 0, 0, 1; 32 | vertex = eigenvectors_ * scaling_ * vertex + point_; 33 | vertices_.push_back(vertex); 34 | vertex << 0, 0, -1; 35 | vertex = eigenvectors_ * scaling_ * vertex + point_; 36 | vertices_.push_back(vertex); 37 | } 38 | 39 | Mesh PointCovariance3d::ToMesh(int stacks, int slices, float r, float g, 40 | float b, float a) const { 41 | Mesh::Material material; 42 | material.ambient = glow::vec3(0.1, 0.1, 0.1); 43 | material.diffuse = glow::vec3(r, g, b); 44 | material.specular = glow::vec3(0, 0, 0); 45 | material.emission = glow::vec3(0, 0, 0); 46 | material.alpha = a; 47 | std::vector materials; 48 | materials.push_back(material); 49 | 50 | std::vector vertices; 51 | float t_step = M_PI / static_cast(slices); 52 | float s_step = M_PI / static_cast(stacks); 53 | for (float t = -M_PI / 2; t <= (M_PI / 2) + .0001; t += t_step) { 54 | for (float s = -M_PI; s <= M_PI + .0001; s += s_step) { 55 | Eigen::Vector3d vertex; 56 | vertex << cos(t) * cos(s), cos(t) * sin(s), sin(t); 57 | vertex = eigenvectors_ * scaling_ * vertex + point_; 58 | Mesh::Vertex v1; 59 | v1.position = glow::vec4(vertex(0), vertex(1), vertex(2), 1); 60 | vertices.push_back(v1); 61 | 62 | Mesh::Vertex v2; 63 | vertex << cos(t + t_step) * cos(s), cos(t + t_step) * sin(s), 64 | sin(t + t_step); 65 | vertex = eigenvectors_ * scaling_ * vertex + point_; 66 | v2.position = glow::vec4(vertex(0), vertex(1), vertex(2), 1); 67 | vertices.push_back(v2); 68 | } 69 | } 70 | 71 | std::vector triangles; 72 | for (size_t i = 2; i < vertices.size(); i++) { 73 | Mesh::Triangle t; 74 | t.vertices[0] = i-2; 75 | t.vertices[1] = i % 2 == 0 ? i - 1 : i; 76 | t.vertices[2] = i % 2 == 0 ? i : i - 1; 77 | 78 | if (t.vertices[0] != t.vertices[1] && t.vertices[1] != t.vertices[2] && 79 | t.vertices[2] != t.vertices[0]) { 80 | triangles.push_back(t); 81 | } 82 | } 83 | 84 | Mesh mesh(vertices, triangles, materials); 85 | return mesh; 86 | } 87 | 88 | Eigen::Vector3d PointCovariance3d::Point() const { return point_; } 89 | 90 | Eigen::Matrix3d PointCovariance3d::Covariance() const { return covariance_; } 91 | 92 | Eigen::Vector3d PointCovariance3d::Eigenvalues() const { return eigenvalues_; } 93 | 94 | Eigen::Matrix3d PointCovariance3d::Eigenvectors() const { 95 | return eigenvectors_; 96 | } 97 | 98 | Eigen::Matrix3d PointCovariance3d::Scaling() const { return scaling_; } 99 | 100 | std::vector PointCovariance3d::Vertices() const { 101 | return vertices_; 102 | } 103 | 104 | } // namespace fastcd 105 | -------------------------------------------------------------------------------- /src/fastcd/regions3d_projector.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "fastcd/image_sequence.h" 8 | #include "fastcd/processed_image.h" 9 | #include "fastcd/point_covariance3d.h" 10 | 11 | namespace fastcd { 12 | 13 | /** 14 | * @brief Class that backprojects in 3D the changes identified in a 15 | * sequence of images. 16 | */ 17 | class Regions3dProjector { 18 | public: 19 | /** 20 | * @brief Constructor 21 | * 22 | * @param[in] img_sequence The image sequence 23 | * @param[in] chi_square2d The chi square for the ellipses in the images 24 | * (95% of the variance retained by default) 25 | * @param[in] chi_square3d The chi square for the 3D ellipsoids 26 | * (95% of the variance retained by default) 27 | * @param[in] threshold_change_area Threshold area under which regions of 28 | * change in 2D are not considered 29 | */ 30 | Regions3dProjector(std::shared_ptr img_sequence, 31 | int threshold_change_area, double chi_square2d = 5.991, 32 | double chi_square3d = 7.815); 33 | 34 | /** 35 | * @brief Gets the mean points of the changed 3D regions with their 36 | * covariance. 37 | * 38 | * @return The mean points of the changed 3D regions with their 39 | * covariance. 40 | */ 41 | std::vector GetProjectedRegions(); 42 | 43 | protected: 44 | /** 45 | * @brief Triangulates the mean points of the 2D regions in the image 46 | * sequence with a certain label 47 | * 48 | * @param[in] img_deque The deque of images 49 | * @param[in] label The label of the regions to triangulate 50 | * 51 | * @return The triangulated 3D point. 52 | */ 53 | Eigen::Vector3d TriangulateMeanRay( 54 | const std::shared_ptr &img_deque, 55 | const std::pair> &label); 56 | 57 | /** 58 | * @brief Projects the vertices of the ellipses corresponding to a 59 | * certain label onto a plane passing through the triangulated 60 | * mean and parallel to the image plane. 61 | * 62 | * @param[in] img_deque The deque of images 63 | * @param[in] triangulated_mean The triangulated mean 64 | * @param[in] label The label of the regions to consider 65 | * 66 | * @return The 3D positions of the projected vertices. 67 | */ 68 | std::vector ProjectVertices( 69 | const std::shared_ptr &img_deque, 70 | const Eigen::Vector3d &triangulated_mean, 71 | const std::pair> &label); 72 | 73 | /** 74 | * @brief Compute the skew-symmetric matrix corresponding to the input 75 | * vector. 76 | * 77 | * @param[in] vec The vector 78 | * 79 | * @return The skew-symmetric matrix 80 | */ 81 | Eigen::Matrix3d SkewSymmetric3d(const Eigen::Vector3d &vec); 82 | 83 | /** 84 | * @brief Calculates the mean position of the given 3D points. 85 | * 86 | * @param[in] points The vector of 3D points 87 | * 88 | * @return The mean position. 89 | */ 90 | Eigen::Vector3d ComputeMean(const std::vector &points); 91 | 92 | /** 93 | * @brief Calculates the covariance corresponding to the mean position of 94 | * the given 3D points. 95 | * 96 | * @param[in] points The vector of 3D points 97 | * @param[in] mean The mean position 98 | * 99 | * @return The covariance. 100 | */ 101 | Eigen::Matrix3d ComputeCovariance(const std::vector &points, 102 | const Eigen::Vector3d &mean); 103 | 104 | /** 105 | * The mean of the points of the 3D regions with the corresponding covariance 106 | */ 107 | std::vector projected_regions_; 108 | }; 109 | 110 | } // namespace fastcd 111 | -------------------------------------------------------------------------------- /src/fastcd/change_detector.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include 3 | #include 4 | #include "fastcd/change_detector.h" 5 | #include "fastcd/regions3d_projector.h" 6 | #include "fastcd/processed_image.h" 7 | 8 | namespace fastcd { 9 | 10 | ChangeDetector::ChangeDetector(const Mesh &mesh, 11 | const ChangeDetectorOptions &options) 12 | : mesh_(mesh), options_(options) { 13 | image_sequence_ = std::shared_ptr( 14 | new ImageSequence(options_.cache_size, options_.max_comparisons)); 15 | } 16 | 17 | void ChangeDetector::AddImage(Image &image, int kernel_size) { 18 | // Scale the image to a fixed resolution 19 | double scaling = static_cast(options_.rescale_width) / 20 | static_cast(image.GetCamera().GetWidth()); 21 | image.Scale(scaling); 22 | if (!projector_init_) { 23 | uint32_t width = static_cast(image.GetCamera().GetWidth()); 24 | uint32_t height = static_cast(image.GetCamera().GetHeight()); 25 | Eigen::Matrix4f projection = image.GetCamera().GetGlProjection(0.1f, 50.0f); 26 | depth_projector_ = std::unique_ptr( 27 | new DepthProjector(width, height, projection)); 28 | projector_init_ = true; 29 | } 30 | Eigen::Matrix4f view = image.GetCamera().GetGlView(); 31 | depth_projector_->render(mesh_, view); 32 | std::vector data(static_cast( 33 | image.GetCamera().GetWidth() * image.GetCamera().GetHeight())); 34 | depth_projector_->texture().download(data); 35 | std::shared_ptr processed_img( 36 | new ProcessedImage(image, data)); 37 | image_sequence_->AddImage(processed_img, kernel_size); 38 | } 39 | 40 | std::vector ChangeDetector::GetChanges() { 41 | Regions3dProjector projector(image_sequence_, options_.threshold_change_area, 42 | options_.chi_square2d, options_.chi_square3d); 43 | // Get the bounding box from the mesh and the cameras and discard the regions 44 | // outside it 45 | std::pair bounding_box = 46 | mesh_.GetBoundingBox(); 47 | for (int i = 0; i < image_sequence_->size(); i++) { 48 | if ((*image_sequence_)[i].GetCamera().GetPosition()(0) < 49 | bounding_box.first(0)) 50 | bounding_box.first(0) = 51 | (*image_sequence_)[i].GetCamera().GetPosition()(0); 52 | if ((*image_sequence_)[i].GetCamera().GetPosition()(1) < 53 | bounding_box.first(1)) 54 | bounding_box.first(1) = 55 | (*image_sequence_)[i].GetCamera().GetPosition()(1); 56 | if ((*image_sequence_)[i].GetCamera().GetPosition()(2) < 57 | bounding_box.first(2)) 58 | bounding_box.first(2) = 59 | (*image_sequence_)[i].GetCamera().GetPosition()(2); 60 | if ((*image_sequence_)[i].GetCamera().GetPosition()(0) > 61 | bounding_box.second(0)) 62 | bounding_box.second(0) = 63 | (*image_sequence_)[i].GetCamera().GetPosition()(0); 64 | if ((*image_sequence_)[i].GetCamera().GetPosition()(1) > 65 | bounding_box.second(1)) 66 | bounding_box.second(1) = 67 | (*image_sequence_)[i].GetCamera().GetPosition()(1); 68 | if ((*image_sequence_)[i].GetCamera().GetPosition()(2) > 69 | bounding_box.second(2)) 70 | bounding_box.second(2) = 71 | (*image_sequence_)[i].GetCamera().GetPosition()(2); 72 | } 73 | std::vector regions3d = projector.GetProjectedRegions(); 74 | std::vector regions3d_in_model; 75 | for (auto ®ion : regions3d) { 76 | if (region.Point()(0) > bounding_box.first(0) && 77 | region.Point()(1) > bounding_box.first(1) && 78 | region.Point()(2) > bounding_box.first(2) && 79 | region.Point()(0) < bounding_box.second(0) && 80 | region.Point()(1) < bounding_box.second(1) && 81 | region.Point()(2) < bounding_box.second(2)) { 82 | std::vector vertices = region.Vertices(); 83 | bool out = false; 84 | for (auto &v : vertices){ 85 | if(v(0) < bounding_box.first(0) || 86 | v(1) < bounding_box.first(1) || 87 | v(2) < bounding_box.first(2) || 88 | v(0) > bounding_box.second(0) || 89 | v(1) > bounding_box.second(1) || 90 | v(2) > bounding_box.second(2)){ 91 | break; 92 | } 93 | } 94 | if (!out) { 95 | regions3d_in_model.push_back(region); 96 | } 97 | } 98 | } 99 | return regions3d_in_model; 100 | } 101 | 102 | } // namespace fastcd 103 | -------------------------------------------------------------------------------- /src/fastcd/mesh.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | /** 12 | * @brief Class that represents a mesh with possibly multiple materials. 13 | * It contains all the vertex buffers for drawing. 14 | */ 15 | class Mesh { 16 | public: 17 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 18 | /** 19 | * @brief Struct that represents a material 20 | */ 21 | struct Material { 22 | public: 23 | Material() 24 | : ambient(0.1, 0.1, 0.1), 25 | diffuse(0.5, 0.5, 0.8), 26 | specular(0, 0, 0), 27 | emission(0, 0, 0) {} 28 | 29 | /** The ambient component of the material */ 30 | glow::vec3 ambient; 31 | 32 | /** The diffuse component of the material */ 33 | glow::vec3 diffuse; 34 | 35 | /** The specular component of the material */ 36 | glow::vec3 specular; 37 | 38 | /** The emission component of the material */ 39 | glow::vec3 emission; 40 | 41 | /** The specular exponent of the material */ 42 | float shininess{1.0f}; 43 | 44 | /** The alpha value of the material */ 45 | float alpha{1.0f}; 46 | }; 47 | 48 | 49 | /** 50 | * @brief Struct that represents a vertex 51 | */ 52 | struct Vertex { 53 | public: 54 | /** The position of the vertex */ 55 | glow::vec4 position; 56 | 57 | /** The normal of the vertex */ 58 | glow::vec4 normal; 59 | 60 | /** The texture coordinate of the vertex */ 61 | glow::vec2 texture; 62 | }; 63 | 64 | /** 65 | * @brief Struct that represents a triangle 66 | */ 67 | struct Triangle { 68 | public: 69 | /** The indices of the vertices of the triangle */ 70 | uint32_t vertices[3]; 71 | 72 | /** The material of the triangle */ 73 | uint32_t material{0}; 74 | }; 75 | 76 | /** 77 | * @brief Empty constructor. 78 | */ 79 | Mesh(); 80 | 81 | /** 82 | * @brief Constructor. Initializes the mesh with a default material. 83 | * 84 | * @param[in] vertices The vertices 85 | * @param[in] faces The faces 86 | */ 87 | Mesh(const std::vector& vertices, const std::vector& faces); 88 | 89 | /** 90 | * @brief Constructor. 91 | * 92 | * @param[in] vertices The vertices 93 | * @param[in] faces The faces 94 | * @param[in] materials The materials 95 | */ 96 | Mesh(const std::vector& vertices, const std::vector& faces, 97 | const std::vector& materials); 98 | 99 | 100 | /** 101 | * @brief Draws the mesh using the specified program. 102 | * 103 | * @param program The program 104 | */ 105 | void draw(glow::GlProgram& program) const; 106 | 107 | /** 108 | * @brief Gets the material with index idx. 109 | * 110 | * @param[in] idx The index of the material 111 | * 112 | * @return The material. 113 | */ 114 | const Material& material(uint32_t idx) const { 115 | if (idx >= materials_.size()) 116 | throw std::runtime_error("non-existent material."); 117 | return materials_[idx].value(); 118 | } 119 | 120 | /** 121 | * @brief Gets the bounding box of the mesh in the form of a pair of 122 | * points representing the rectangle 123 | * 124 | * @return The bounding box. 125 | */ 126 | std::pair GetBoundingBox(); 127 | 128 | protected: 129 | /** 130 | * @brief Initializes the mesh. 131 | * 132 | * @param[in] vertices The vertices 133 | * @param[in] faces The faces 134 | * @param[in] materials The materials 135 | */ 136 | void initialize(const std::vector& vertices, 137 | const std::vector& faces, 138 | const std::vector& materials); 139 | 140 | /** 141 | * @brief Compute the length of a vector. 142 | * 143 | * @param[in] v The vector 144 | * 145 | * @return The lenght of the vector. 146 | */ 147 | float length(const glow::vec4& v); 148 | 149 | /** The vertices of the mesh */ 150 | glow::GlBuffer vertices_{glow::BufferTarget::ARRAY_BUFFER, 151 | glow::BufferUsage::STATIC_DRAW}; 152 | 153 | /** The VAOs used to draw the mesh */ 154 | std::vector vaos_; 155 | 156 | /** The triangles of the mesh per material */ 157 | std::vector > triangles_; 158 | 159 | /** The materials of the mesh */ 160 | std::vector > materials_; 161 | 162 | /** The vertex of the mesh with the maximum coordinates */ 163 | Eigen::Vector3d max_ = Eigen::Vector3d::Zero(); 164 | 165 | /** The vertex of the mesh with the minimum coordinates */ 166 | Eigen::Vector3d min_ = Eigen::Vector3d::Zero(); 167 | }; 168 | -------------------------------------------------------------------------------- /src/fastcd/camera.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace fastcd { 10 | 11 | /** 12 | * @brief Class that represents a camera. It stores its calibration and 13 | * allows the projection and back-projection of points. 14 | */ 15 | class Camera { 16 | public: 17 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 18 | 19 | /** 20 | * @brief Reads the camera calibration from an XML file with Agisoft 21 | * format. 22 | * 23 | * @param[in] filename The file path. 24 | * @param[in] id The camera id in the XML file. 25 | */ 26 | void ReadCalibration(const std::string& filename, int id); 27 | 28 | /** 29 | * @brief Scales the camera calibration by the specified scaling factor. 30 | * 31 | * @param[in] scaling The scaling factor 32 | */ 33 | void ScaleCalibration(double scaling); 34 | 35 | /** 36 | * @brief Gets the calibration matrix. 37 | * 38 | * @return The calibration matrix. 39 | */ 40 | Eigen::Matrix3d GetK() const; 41 | 42 | /** 43 | * @brief Gets the inverse calibration matrix. 44 | * 45 | * @return The inverse calibration matrix. 46 | */ 47 | Eigen::Matrix3d GetInvK() const; 48 | 49 | /** 50 | * @brief Gets the pose of the camera. 51 | * 52 | * @return The pose of the camera. 53 | */ 54 | Eigen::Matrix4d GetPose() const; 55 | 56 | /** 57 | * @brief Gets the position of the camera. 58 | * 59 | * @return The position of the camera. 60 | */ 61 | Eigen::Vector3d GetPosition() const; 62 | 63 | /** 64 | * @brief Compute the OpenGL projection matrix relative to the camera. 65 | * 66 | * @return The OpenGL projection matrix. 67 | */ 68 | Eigen::Matrix4f GetGlProjection(float near, float far); 69 | 70 | /** 71 | * @brief Compute the OpenGL view matrix relative to the camera. 72 | * 73 | * @return The OpenGL view matrix. 74 | */ 75 | Eigen::Matrix4f GetGlView(); 76 | 77 | /** 78 | * @brief Return a vector of vertices for an OpenGL visualization of the 79 | * camera in the form of a line strip. 80 | * 81 | * @param[in] color The color of the line strip 82 | * @param[in] length The length of the four edges of the camera 83 | * 84 | * @return The vector of vertices. 85 | */ 86 | std::vector GetGlFovVertices(float color, float length); 87 | 88 | /** 89 | * @brief Gets the width of the image plane in pixels. 90 | * 91 | * @return The width of the image plane in pixels. 92 | */ 93 | int GetWidth(); 94 | 95 | /** 96 | * @brief Gets the height of the image plane in pixels. 97 | * 98 | * @return The height of the image plane in pixels. 99 | */ 100 | int GetHeight(); 101 | 102 | /** 103 | * @brief Gets the camera projection matrix. 104 | * 105 | * @return The camera projection matrix. 106 | */ 107 | Eigen::Matrix GetP() const; 108 | 109 | /** 110 | * @brief Projects a point on the image plane. 111 | * 112 | * @param[in] x The x coordinate 113 | * @param[in] y The y coordinate 114 | * @param[in] z The z coordinate 115 | * 116 | * @return The (u,v) coordinates of the point. 117 | */ 118 | Eigen::Vector2i Project(double x, double y, double z) const; 119 | 120 | /** 121 | * @brief Projects a point on the image plane. 122 | * 123 | * @param[in] point The 3D point 124 | * 125 | * @return The (u,v) coordinates of the point. 126 | */ 127 | Eigen::Vector2i Project(const Eigen::Vector3d& point) const; 128 | 129 | /** 130 | * @brief Back-projects a pixel. 131 | * 132 | * @param[in] u The u coordinate 133 | * @param[in] v The v coordinate 134 | * 135 | * @return The 3D coordinate of a point in the direction of the ray. 136 | */ 137 | Eigen::Vector3d BackProject(int u, int v); 138 | 139 | /** 140 | * @brief Back-projects a pixel. 141 | * 142 | * @param[in] point The 2D point 143 | * 144 | * @return The 3D coordinate of a point in the direction of the ray 145 | */ 146 | Eigen::Vector3d BackProject(const Eigen::Vector2i &point); 147 | 148 | /** 149 | * @brief Prints the camera parameters on the output stream. 150 | */ 151 | friend std::ostream& operator<<(std::ostream& os, const Camera& c); 152 | 153 | protected: 154 | /** Width of the image plane (in pixels) */ 155 | int width_ = 0; 156 | 157 | /** Height of the image plane (in pixels) */ 158 | int height_ = 0; 159 | 160 | /** Calibration matrix */ 161 | Eigen::Matrix3d calibration_ = Eigen::Matrix3d::Identity(); 162 | 163 | /** Inverse of calibration matrix */ 164 | Eigen::Matrix3d inverse_calibration_ = Eigen::Matrix3d::Identity(); 165 | 166 | /** Pose of the camera */ 167 | Eigen::Matrix4d pose_ = Eigen::Matrix4d::Identity(); 168 | }; 169 | 170 | } // namespace fastcd 171 | -------------------------------------------------------------------------------- /src/visualizer/visualizer.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), , Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | 4 | #include 5 | 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include "fastcd/mesh.h" 16 | #include "fastcd/camera.h" 17 | 18 | namespace fastcd { 19 | 20 | /** 21 | * @brief Class for an OpenGL visualization. 22 | */ 23 | class Visualizer : public QGLWidget { 24 | Q_OBJECT 25 | 26 | public: 27 | /** 28 | * @brief Constructor. 29 | */ 30 | Visualizer(QWidget* parent = 0, const QGLWidget* shareWidget = 0, 31 | Qt::WindowFlags f = 0); 32 | 33 | /** 34 | * @brief Adds a mesh to the 3D world. 35 | * 36 | * @param mesh The mesh 37 | */ 38 | void addMesh(Mesh& mesh); 39 | 40 | /** 41 | * @brief Sets the background color. 42 | * 43 | * @param[in] r The red value 44 | * @param[in] g The green value 45 | * @param[in] b The blue value 46 | * @param[in] a The alpha value 47 | */ 48 | void setBackgroundColor(float r, float g, float b, float a); 49 | 50 | /** 51 | * @brief Sets the texture to be painted on the 3D model. (for debug 52 | * purpose) 53 | * 54 | * @param texture The texture 55 | */ 56 | void setOutput(glow::GlTexture& texture); 57 | 58 | /** 59 | * @brief Adds a camera do be drawn in the 3D world. Its position is also 60 | * a teleport point of the user camera. 61 | * 62 | * @param[in] camera The camera 63 | * @param[in] color The color of the visualized frustum 64 | * @param[in] length The length of the visualized frustum 65 | */ 66 | void addCamera(Camera camera, float color, float length); 67 | 68 | /** 69 | * @brief Adds a point to be drawn in the 3D world (as a cross). 70 | * 71 | * @param[in] position The position of the point 72 | * @param[in] size The size of the point 73 | * @param[in] color The color of the point 74 | */ 75 | void addPoint(const Eigen::Vector3f& position, float size, float color); 76 | 77 | /** 78 | * @brief Adds a line to be drawn in the 3D world. 79 | * 80 | * @param[in] start The starting point of the line 81 | * @param[in] end The ending point of the line 82 | * @param[in] color The color of the line 83 | */ 84 | void addLine(const Eigen::Vector3f& start, const Eigen::Vector3f& end, 85 | float color); 86 | 87 | /** 88 | * @brief Helper fuction that packs the red, green and blue values of a 89 | * color in a single float. 90 | * 91 | * @param[in] r The red value 92 | * @param[in] g The green value 93 | * @param[in] b The blue value 94 | * 95 | * @return The color packed in a single float. 96 | */ 97 | static float rgb2float(float r, float g, float b); 98 | 99 | void ShowAxis(bool option); 100 | 101 | protected: 102 | bool initContext(); 103 | 104 | void initializeGL(); 105 | void resizeGL(int w, int h); 106 | void paintGL(); 107 | 108 | void mousePressEvent(QMouseEvent* event); 109 | void mouseReleaseEvent(QMouseEvent* event); 110 | void mouseMoveEvent(QMouseEvent* event); 111 | void keyPressEvent(QKeyEvent* event); 112 | void keyReleaseEvent(QKeyEvent* event); 113 | 114 | glow::GlCamera::KeyboardModifier resolveKeyboardModifier( 115 | Qt::KeyboardModifiers modifiers); 116 | glow::GlCamera::MouseButton resolveMouseButton(Qt::MouseButtons button); 117 | glow::GlCamera::KeyboardKey resolveKeyboardKey(Qt::Key key); 118 | // hack to have GLEW initalized before anything else. 119 | bool contextInitialized_; 120 | 121 | glow::GlUniform mvp_{"mvp", Eigen::Matrix4f::Identity()}; 122 | glow::GlUniform mvp_inv_t_{"mvp_inv_t", 123 | Eigen::Matrix4f::Identity()}; 124 | 125 | QTimer timer_; 126 | 127 | glow::FpsCamera camera_; 128 | 129 | Eigen::Matrix4f model_{Eigen::Matrix4f::Identity()}; 130 | Eigen::Matrix4f view_{Eigen::Matrix4f::Identity()}; 131 | Eigen::Matrix4f projection_{Eigen::Matrix4f::Identity()}; 132 | 133 | std::vector meshes_; 134 | glow::GlProgram prgDrawMesh_; 135 | glow::GlProgram coloredPointProgram_; 136 | 137 | glow::GlBuffer coordAxisVBO_{glow::BufferTarget::ARRAY_BUFFER, 138 | glow::BufferUsage::STATIC_DRAW}; // x, y, z, color(float) 139 | glow::GlVertexArray coordAxisVAO_; 140 | std::vector> linesVBOs_; 141 | std::vector linesVAOs_; 142 | 143 | // visualization of the image points. 144 | bool show_output_{false}; 145 | glow::GlTexture output_{10, 10, glow::TextureFormat::RGBA_FLOAT}; 146 | glow::GlVertexArray vao_img_coords_; 147 | glow::GlBuffer vbo_img_coords_{glow::BufferTarget::ARRAY_BUFFER, 148 | glow::BufferUsage::STATIC_DRAW}; 149 | glow::GlSampler sampler_; 150 | glow::GlProgram renderPoints_; 151 | 152 | std::vector bg_color_{0.0f, 0.0f, 0.0f, 1.0f}; 153 | std::vector cameras_; 154 | 155 | bool show_axis_{true}; 156 | }; 157 | 158 | } // namespace fastcd 159 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > [!IMPORTANT] 2 | > **Repository Archived** 3 | > This repository has been archived and is no longer actively maintained. You are welcome to explore the code, but please note that no further updates, issues, or pull requests will be accepted. 4 | > 5 | > Thank you for your interest and contributions. 6 | 7 | # Fast Change Detection 8 | 9 | ## Description 10 | 11 | The program allows to identify, in real-time, changes on a 3D model from a sequence of images. 12 | 13 | The idea is to first detect inconsistencies between pairs of images by reprojecting an image onto another one by passing through the 3D model. Ambiguities about possible inconsistencies resulting from this process are then resolved by combining multiple images. Finally, the 3D location of the change is estimated by projecting in 3D these inconsistencies. 14 | 15 | Check out the video: 16 | 17 | [![Fast Change Detection Video](http://img.youtube.com/vi/DEkOYf4Zzh4/0.jpg)](https://www.youtube.com/watch?v=DEkOYf4Zzh4&feature=youtu.be "Fast Change Detection Video") 18 | 19 | For further details, see the paper ["Fast Image-Based Geometric Change Detection Given a 3D Model"](http://www.ipb.uni-bonn.de/pdfs/palazzolo2018icra.pdf). 20 | 21 | ## Key contributors 22 | 23 | Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de) 24 | 25 | ## Key assumptions 26 | 27 | * The input images are calibrated and registered w.r.t. the 3D model 28 | 29 | ## Related publications 30 | 31 | If you use this code for your research, please cite: 32 | 33 | E. Palazzolo and C. Stachniss, “Fast Image-Based Geometric Change Detection Given a 3D Model”, in _Proceedings of the IEEE Int. Conf. on Robotics & Automation (ICRA)_, 2018. [PDF](http://www.ipb.uni-bonn.de/pdfs/palazzolo2018icra.pdf) 34 | 35 | BibTeX: 36 | ``` 37 | @InProceedings{palazzolo2018icra, 38 | Title = {{Fast Image-Based Geometric Change Detection Given a 3D Model}}, 39 | Author = {E. Palazzolo and C. Stachniss}, 40 | Booktitle = {Proceedings of the IEEE Int. Conf. on Robotics & Automation (ICRA)}, 41 | Year = {2018} 42 | } 43 | ``` 44 | 45 | ## Dependencies 46 | 47 | * catkin 48 | * Eigen >= 3.2 49 | * boost >= 1.54 50 | * OpenCV >= 2.4 51 | * QT >= 5.2 52 | * OpenGL >= 3.3 53 | * [glow](https://github.com/jbehley/glow) (catkin package) 54 | * (optional) Doxygen >= 1.8.11 55 | 56 | ## Installation guide 57 | 58 | On Ubuntu 16.04, most of the dependencies can be installed from the package manager: 59 | ```bash 60 | sudo apt install git libeigen3-dev libboost-all-dev qtbase5-dev libglew-dev libopencv-dev catkin 61 | ``` 62 | 63 | Additionally, make sure you have [catkin-tools](https://catkin-tools.readthedocs.io/en/latest/) and the [fetch](https://github.com/Photogrammetry-Robotics-Bonn/catkin_tools_fetch) verb installed: 64 | ```bash 65 | sudo apt install python-pip 66 | sudo pip install catkin_tools catkin_tools_fetch empy 67 | ``` 68 | 69 | Finally, if you also want to build the documentation you need Doxygen installed (tested only with Doxygen 1.8.11): 70 | ```bash 71 | sudo apt install doxygen 72 | ``` 73 | 74 | If you do not have a catkin workspace already, create one: 75 | ```bash 76 | cd 77 | mkdir catkin_ws 78 | cd catkin_ws 79 | mkdir src 80 | catkin init 81 | cd src 82 | git clone https://github.com/ros/catkin.git 83 | ``` 84 | Clone the repository in your catkin workspace: 85 | ```bash 86 | cd ~/catkin_ws/src 87 | git clone https://github.com/Photogrammetry-Robotics-Bonn/fast_change_detection.git 88 | ``` 89 | Download the additional dependencies: 90 | ```bash 91 | catkin deps fetch 92 | ``` 93 | Then, build the project: 94 | ```bash 95 | catkin build fast_change_detection 96 | ``` 97 | Now the project root directory (e.g. `~/catkin_ws/src/fast_change_detection`) should contain a `bin` directory containing an example binary and, if Doxygen is installed, a `docs` directory containing the documentation. 98 | 99 | ## How to use it 100 | 101 | The `ChangeDetector` class is the core of the program. Its constructor requires 102 | the 3D model of the environment and the options. Use the `AddImage` member function to 103 | add an image and compute the inconsistencies with the others. Use the `GetChanges` 104 | member function to compute and get the changes in the form of mean position and 105 | covariance of the points of the 3D regions. 106 | 107 | Refer to the documentation and to the source code for further details. An example 108 | that illustrates how to use the library is located in `src/example.cpp`. 109 | 110 | ## Examples / datafiles 111 | 112 | After the build process, the `bin` directory in the project root directory (e.g. `~/catkin_ws/src/fast_change_detection`) will contain an example binary. 113 | To run it execute from the command line: 114 | ```bash 115 | cd ~/catkin_ws/src/fast_change_detection/bin 116 | ./fastcd_example DATASET_PATH 117 | ``` 118 | where `DATASET_PATH` is the path to the directory of a dataset (e.g. `~/changedetection2017/statue`). 119 | Some example datasets can be found [here](http://www.ipb.uni-bonn.de/data/changedetection2017/). 120 | 121 | Alternatively, the `example` directory contains some scripts to immediately test the library. 122 | Make sure you have `wget` installed to download the dataset and `zip` to uncompress it: 123 | ```bash 124 | sudo apt install wget zip 125 | ``` 126 | Download and uncompress the dataset: 127 | ```bash 128 | cd ~/catkin_ws/src/fast_change_detection/example 129 | ./download_dataset.sh 130 | ``` 131 | Execute the example program: 132 | ```bash 133 | ./run_example.sh 134 | ``` 135 | 136 | A 3D visualization of the model will appear. The changes are represented by the blue ellipsoids. 137 | Rotate the view with the mouse while the right button is pressed. The keyboard is mapped as following: 138 | * W/A/S/D -> Move around 139 | * Space -> Move up vertically 140 | * C -> Move down vertically 141 | * Shift -> Keep pressed to move faster 142 | * 1-0 -> Teleport the camera to the pose of one of the first 10 cameras 143 | 144 | ## License 145 | 146 | This project is licensed under the FreeBSD License. See the LICENSE.txt file for details. 147 | 148 | ## Acknowledgments 149 | 150 | This work has partly been supported by the DFG under the grant number FOR~1505: Mapping on Demand. 151 | -------------------------------------------------------------------------------- /src/fastcd/processed_image.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #pragma once 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "fastcd/camera.h" 9 | #include "fastcd/point_covariance2d.h" 10 | #include "fastcd/image.h" 11 | #include "fastcd/image_region.h" 12 | 13 | namespace fastcd { 14 | 15 | /** 16 | * @brief Class that stores an image, its camera calibration, and all the 17 | * necessary information for the change detection algorithm. It also 18 | * includes the algorithms to generate such information. 19 | */ 20 | class ProcessedImage : public Image { 21 | public: 22 | /** 23 | * @brief Constructor. 24 | * 25 | * @param[in] img The image 26 | * @param[in] depth The depth image w.r.t. the 3D model 27 | */ 28 | explicit ProcessedImage(const Image &img, 29 | const std::vector &depth); 30 | /** 31 | * @brief Warps the image using camera calibration of the provided image 32 | * and the depth. 33 | * 34 | * @param[in] target_image The target image 35 | * 36 | * @return The warped image 37 | */ 38 | Image Warp(const ProcessedImage &target_image) const; 39 | 40 | /** 41 | * @brief Update the inconsistency image by comparing this image with the 42 | * one passed as input (it computes the minimum difference over a 43 | * window of size kernel_size). 44 | * 45 | * @param[in] image The new image to be compared 46 | * @param[in] kernel_size The size of the window over which the minimum 47 | * difference is computed 48 | */ 49 | void UpdateInconsistencies(const Image &image, int kernel_size); 50 | 51 | /** 52 | * @brief Gets the number of images compared with this one. 53 | * 54 | * @return The number of images compared with this one. 55 | */ 56 | int ImageCompared(); 57 | 58 | /** 59 | * @brief Calculates the mean position and covariance of the points of 60 | * each region with the same label. 61 | * 62 | * @param[in] chi_square The chi square value (95% of the variance retained 63 | * by default) 64 | * 65 | * @return The labels of the processed regions. 66 | */ 67 | std::vector ComputeMeansCovariances(double chi_square = 5.991); 68 | 69 | /** 70 | * @brief Extract the regions where changes occur from the inconsistency 71 | * image. 72 | * 73 | * @param[in] threshold_change_area Threshold area under which regions are 74 | * not considered 75 | */ 76 | void ComputeRegions(int threshold_change_area); 77 | 78 | /** 79 | * @brief Gets the mean position and covariance of the points of each 80 | * region with the same label. 81 | * 82 | * @return The map containing the pairs [label, XXX]. 83 | */ 84 | std::unordered_map GetMeansCovariances() const; 85 | 86 | /** 87 | * @brief Gets the inconsistency image. 88 | * 89 | * @return The inconsistency image. 90 | */ 91 | cv::Mat GetInconsistencies(); 92 | 93 | /** 94 | * @brief Gets the regions where changes occur. 95 | * 96 | * @return The regions. 97 | */ 98 | const std::vector& GetRegions() const; 99 | 100 | /** 101 | * @brief Gets the depth image. 102 | * 103 | * @return The depth image. 104 | */ 105 | const std::vector& GetDepth() const; 106 | 107 | /** 108 | * @brief Update the label of the regions with the provided values. 109 | * 110 | * @param[in] labels The new labels 111 | */ 112 | void UpdateLabels(const std::vector &labels); 113 | 114 | protected: 115 | /** 116 | * @brief For every pixel of img1 subtracts the color of img2 in a window 117 | * of kernel_size pixel and stores the smallest L2 norm of the 118 | * result in a OpenCV matrix of doubles. 119 | * 120 | * @param[in] img1 The first image 121 | * @param[in] img2 The second image 122 | * @param[in] kernel_size The size in pixel of the window in img2 over which 123 | * the difference is computed 124 | * 125 | * @return The resulting OpenCV matrix. 126 | */ 127 | cv::Mat Subtract(const cv::Mat &img1, const cv::Mat &img2, int kernel_size); 128 | 129 | /** 130 | * @brief Computes the minimum element-wise of two OpenCV matrices of 131 | * doubles. 132 | * 133 | * @param[in] img1 The first matrix 134 | * @param[in] img2 The second matrix 135 | * 136 | * @return The resulting matrix 137 | */ 138 | cv::Mat Minimum(const cv::Mat &img1, const cv::Mat &img2); 139 | 140 | /** 141 | * @brief Find non-zero elements in the matrix 142 | * 143 | * @param[in] img The matrix 144 | * 145 | * @return The (rows*cols) by 2 matrix containing all the non-zero 146 | * elements. 147 | */ 148 | cv::Mat FindNonZero(const cv::Mat &img); 149 | 150 | /** 151 | * @brief Segments the inconsistency image into region and fills those 152 | * regions with their average intensity. 153 | * 154 | * @param[in] img The inconsistency image 155 | * @param[in] threshold_change_area Threshold area under which regions are 156 | * not considered 157 | * 158 | * @return The inconsistency image with the segmented regions. 159 | */ 160 | cv::Mat AverageSegmentedRegions(const cv::Mat &img, int threshold_change_area); 161 | 162 | /** 163 | * The depth image w.r.t. the 3D model, in the form of a vector of 4D points 164 | * ordered row-wise from the bottom left. The 4th coordinate is used as a 165 | * boolean: it has value 0 if the back-projected ray does not hit the model, 166 | * 1 otherwise 167 | */ 168 | std::vector depth_; 169 | 170 | /** The inconsistency image (doubles from 0 to 1) */ 171 | cv::Mat inconsistencies_; 172 | 173 | /** The segmented regions */ 174 | std::vector regions_; 175 | 176 | /** 177 | * The mean position and covariance of the points of each region with the same 178 | * label 179 | */ 180 | std::unordered_map regions_mean_cov_; 181 | 182 | /** The highest label assigned to a region */ 183 | int max_region_label_ = 0; 184 | 185 | /** The number of images this one has been compared to*/ 186 | int num_img_compared_ = 0; 187 | }; 188 | 189 | } // namespace fastcd 190 | -------------------------------------------------------------------------------- /src/fastcd/camera.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/camera.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace fastcd { 9 | 10 | void Camera::ReadCalibration(const std::string& filename, int id) { 11 | boost::property_tree::ptree pt, resolution, camera; 12 | read_xml(filename, pt); 13 | 14 | double cx = pt.get("document.chunk.sensors.sensor.calibration.cx"); 15 | double cy = pt.get("document.chunk.sensors.sensor.calibration.cy"); 16 | double fx = pt.get("document.chunk.sensors.sensor.calibration.fx"); 17 | double fy = pt.get("document.chunk.sensors.sensor.calibration.fy"); 18 | resolution = 19 | pt.get_child("document.chunk.sensors.sensor.calibration.resolution"); 20 | width_ = resolution.get(".width"); 21 | height_ = resolution.get(".height"); 22 | 23 | BOOST_FOREACH(boost::property_tree::ptree::value_type const& v, 24 | pt.get_child("document.chunk.cameras")) { 25 | if (v.first == "camera" && v.second.get(".id") == id) { 26 | std::string s = v.second.get("transform"); 27 | std::stringstream sstream(s); 28 | for (size_t i = 0; i < 4; i++) 29 | for (size_t j = 0; j < 4; j++) sstream >> pose_(i, j); 30 | } 31 | } 32 | 33 | calibration_.setIdentity(); 34 | calibration_(0, 0) = fx; 35 | calibration_(1, 1) = fy; 36 | calibration_(0, 2) = cx; 37 | calibration_(1, 2) = cy; 38 | inverse_calibration_ = calibration_.inverse(); 39 | } 40 | 41 | void Camera::ScaleCalibration(double scaling) { 42 | Eigen::Matrix3d k_transform; 43 | k_transform << scaling, 0, scaling/2-0.5, 44 | 0, scaling, scaling/2-0.5, 45 | 0, 0, 1; 46 | width_ *= scaling; 47 | height_ *= scaling; 48 | calibration_ = k_transform * calibration_; 49 | inverse_calibration_ = calibration_.inverse(); 50 | } 51 | 52 | 53 | Eigen::Vector2i Camera::Project(double x, double y, double z) const { 54 | Eigen::Vector4d world_point; 55 | Eigen::Vector3d camera_point; 56 | Eigen::Matrix P; 57 | Eigen::Matrix3d R = pose_.block<3, 3>(0, 0).transpose(); 58 | world_point << x, y, z, 1; 59 | P.block<3, 3>(0, 0) = calibration_ * R; 60 | P.block<3, 1>(0, 3) = -calibration_ * R * pose_.block<3, 1>(0, 3); 61 | camera_point = P * world_point; 62 | Eigen::Vector2i uv_coord; 63 | uv_coord << static_cast(camera_point(0) / camera_point(2)), 64 | static_cast(camera_point(1) / camera_point(2)); 65 | return uv_coord; 66 | } 67 | 68 | Eigen::Vector2i Camera::Project(const Eigen::Vector3d& point) const { 69 | return Project(point(0), point(1), point(2)); 70 | } 71 | 72 | Eigen::Vector3d Camera::BackProject(int u, int v) { 73 | Eigen::Vector3d camera_point; 74 | camera_point << u, v, 1; 75 | Eigen::Vector4d world_point, hom_direction; 76 | Eigen::Vector3d direction; 77 | direction = inverse_calibration_ * camera_point; 78 | hom_direction << direction(0), direction(1), direction(2), 1; 79 | world_point = pose_ * hom_direction; 80 | direction << world_point(0) / world_point(3), world_point(1) / world_point(3), 81 | world_point(2) / world_point(3); 82 | return direction; 83 | } 84 | 85 | Eigen::Vector3d Camera::BackProject(const Eigen::Vector2i& point) { 86 | return BackProject(point(0), point(1)); 87 | } 88 | 89 | std::vector Camera::GetGlFovVertices(float color, float length) { 90 | Eigen::Vector3f position = GetPosition().cast(); 91 | 92 | Eigen::Vector3f direction_tl = BackProject(0, 0).cast(); 93 | Eigen::Vector3f direction_bl = BackProject(0, height_).cast(); 94 | Eigen::Vector3f direction_tr = BackProject(width_, 0).cast(); 95 | Eigen::Vector3f direction_br = BackProject(width_, height_).cast(); 96 | Eigen::Vector3f corner_tl = (direction_tl - position) * length + position; 97 | Eigen::Vector3f corner_bl = (direction_bl - position) * length + position; 98 | Eigen::Vector3f corner_tr = (direction_tr - position) * length + position; 99 | Eigen::Vector3f corner_br = (direction_br - position) * length + position; 100 | 101 | std::vector verts({position(0), position(1), position(2), color, 102 | corner_tl(0), corner_tl(1), corner_tl(2), color, 103 | position(0), position(1), position(2), color, 104 | corner_bl(0), corner_bl(1), corner_bl(2), color, 105 | position(0), position(1), position(2), color, 106 | corner_tr(0), corner_tr(1), corner_tr(2), color, 107 | position(0), position(1), position(2), color, 108 | corner_br(0), corner_br(1), corner_br(2), color}); 109 | return verts; 110 | } 111 | 112 | std::ostream& operator<<(std::ostream& os, const Camera& c) { 113 | os << "Resolution: " << c.width_ << "x" << c.height_ << std::endl; 114 | os << "K:\n" << c.calibration_ << std::endl; 115 | os << "\nPose:\n" << c.pose_ << std::endl; 116 | return os; 117 | } 118 | 119 | Eigen::Matrix3d Camera::GetK() const { return calibration_; } 120 | 121 | Eigen::Matrix3d Camera::GetInvK() const { return inverse_calibration_; } 122 | 123 | Eigen::Matrix4d Camera::GetPose() const { return pose_; } 124 | 125 | Eigen::Vector3d Camera::GetPosition() const { 126 | return pose_.block<3, 1>(0, 3); 127 | } 128 | 129 | Eigen::Matrix Camera::GetP() const { 130 | Eigen::Matrix P; 131 | Eigen::Matrix3d R = pose_.block<3, 3>(0, 0).transpose(); 132 | P.block<3, 3>(0, 0) = calibration_ * R; 133 | P.block<3, 1>(0, 3) = -calibration_ * R * pose_.block<3, 1>(0, 3); 134 | return P; 135 | } 136 | 137 | Eigen::Matrix4f Camera::GetGlProjection(float near, float far) { 138 | float left = 0.0f; 139 | float right = static_cast(width_); 140 | float bottom = 0.0f; 141 | float top = static_cast(height_); 142 | Eigen::Matrix4f gl_ortho, gl_persp, gl_camera; 143 | gl_persp.setZero(); 144 | gl_persp.block<3, 3>(0, 0) = calibration_.cast(); 145 | gl_persp.block<2, 1>(0, 2) *= -1; 146 | gl_persp(2, 2) = near + far; 147 | gl_persp(2, 3) = near * far; 148 | gl_persp(3, 2) = -1; 149 | 150 | gl_ortho << 2 / (right - left), 0, 0, -(right + left) / (right - left), 151 | 0, 2 / (top - bottom), 0, -(top + bottom) / (top - bottom), 152 | 0, 0, -2 / (far - near), -(far + near) / (far - near), 153 | 0, 0, 0, 1; 154 | 155 | gl_camera = gl_ortho * gl_persp; 156 | 157 | return gl_camera; 158 | } 159 | 160 | Eigen::Matrix4f Camera::GetGlView() { 161 | return glow::glRotateY(M_PI) * glow::glRotateZ(M_PI) * 162 | GetPose().inverse().cast(); 163 | } 164 | 165 | int Camera::GetWidth() { return width_; } 166 | 167 | int Camera::GetHeight() { return height_; } 168 | 169 | } // namespace fastcd 170 | -------------------------------------------------------------------------------- /src/fastcd/mesh.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/mesh.h" 3 | 4 | #include 5 | 6 | namespace glow { 7 | template <> 8 | void GlUniform::bind(GLuint program_id) const { 9 | GLint loc; 10 | loc = glGetUniformLocation(program_id, (name_ + ".ambient").c_str()); 11 | glUniform3fv(loc, 1, &data_.ambient.x); 12 | loc = glGetUniformLocation(program_id, (name_ + ".diffuse").c_str()); 13 | glUniform3fv(loc, 1, &data_.diffuse.x); 14 | loc = glGetUniformLocation(program_id, (name_ + ".specular").c_str()); 15 | glUniform3fv(loc, 1, &data_.specular.x); 16 | loc = glGetUniformLocation(program_id, (name_ + ".emission").c_str()); 17 | glUniform3fv(loc, 1, &data_.emission.x); 18 | loc = glGetUniformLocation(program_id, (name_ + ".shininess").c_str()); 19 | glUniform1f(loc, data_.shininess); 20 | loc = glGetUniformLocation(program_id, (name_ + ".alpha").c_str()); 21 | glUniform1f(loc, data_.alpha); 22 | } 23 | 24 | } // namespace glow 25 | 26 | Mesh::Mesh() { 27 | } 28 | 29 | Mesh::Mesh(const std::vector& vertices, 30 | const std::vector& faces) { 31 | std::vector dummy(1); 32 | initialize(vertices, faces, dummy); 33 | } 34 | 35 | Mesh::Mesh(const std::vector& vertices, 36 | const std::vector& faces, 37 | const std::vector& materials) { 38 | initialize(vertices, faces, materials); 39 | } 40 | 41 | float Mesh::length(const glow::vec4& v) { 42 | return std::sqrt(v.x * v.x + v.y * v.y + v.z * v.z); 43 | } 44 | 45 | Eigen::Vector3f toEigen(const glow::vec4& v) { 46 | return Eigen::Vector3f(v.x, v.y, v.z); 47 | } 48 | 49 | void Mesh::initialize(const std::vector& verts, 50 | const std::vector& faces, 51 | const std::vector& materials) { 52 | if (verts.size() == 0) return; 53 | 54 | std::vector vertices = verts; 55 | 56 | if (std::abs(length(vertices[0].normal) - 1.0f) > 0.01) { 57 | // assume, no normals given: 58 | for (auto face : faces) { 59 | Eigen::Vector3f v0 = toEigen(vertices[face.vertices[0]].position); 60 | Eigen::Vector3f v1 = toEigen(vertices[face.vertices[1]].position); 61 | Eigen::Vector3f v2 = toEigen(vertices[face.vertices[2]].position); 62 | 63 | Eigen::Vector3f n = ((v1 - v0).cross(v2 - v0)).normalized(); 64 | for (uint32_t i = 0; i < 3; ++i) { 65 | vertices[face.vertices[i]].normal.x = n.x(); 66 | vertices[face.vertices[i]].normal.y = n.y(); 67 | vertices[face.vertices[i]].normal.z = n.z(); 68 | } 69 | } 70 | } 71 | std::vector > tris(std::max((int)materials.size(), 1)); 72 | for (uint32_t i = 0; i < faces.size(); ++i) { 73 | assert(faces[i].material < tris.size()); 74 | tris[faces[i].material].push_back(faces[i].vertices[0]); 75 | tris[faces[i].material].push_back(faces[i].vertices[1]); 76 | tris[faces[i].material].push_back(faces[i].vertices[2]); 77 | } 78 | 79 | // reorder triangles & matrials s.t. transparent materials are drawn last. 80 | std::vector reordered_materials(materials.size()); 81 | std::vector > reordered_tris(tris.size()); 82 | uint32_t opaque_idx = 0; 83 | uint32_t transparent_idx = materials.size() - 1; 84 | 85 | for (uint32_t i = 0; i < tris.size(); ++i) { 86 | if (materials[i].alpha < 1.) { 87 | reordered_tris[transparent_idx] = tris[i]; 88 | reordered_materials[transparent_idx] = materials[i]; 89 | tris[i].clear(); // no need to keep data. 90 | transparent_idx -= 1; 91 | } else { 92 | reordered_tris[opaque_idx] = tris[i]; 93 | reordered_materials[opaque_idx] = materials[i]; 94 | tris[i].clear(); // no need to keep data. 95 | opaque_idx += 1; 96 | } 97 | } 98 | 99 | for (uint32_t i = 0; i < reordered_materials.size(); ++i) { 100 | materials_.push_back( 101 | glow::GlUniform("material", reordered_materials[i])); 102 | } 103 | 104 | // copying the data. 105 | vertices_.assign(vertices); 106 | for (uint32_t i = 0; i < reordered_tris.size(); ++i) { 107 | glow::GlBuffer buf(glow::BufferTarget::ELEMENT_ARRAY_BUFFER, 108 | glow::BufferUsage::STATIC_DRAW); 109 | buf.assign(reordered_tris[i]); 110 | triangles_.push_back(buf); 111 | } 112 | 113 | // computing boundaries 114 | max_ << -10000, -10000, -10000; 115 | min_ << 10000, 10000, 10000; 116 | for (uint32_t i = 0; i < vertices.size(); ++i) { 117 | if (vertices[i].position.x > max_(0)) max_(0) = vertices[i].position.x; 118 | if (vertices[i].position.y > max_(1)) max_(1) = vertices[i].position.y; 119 | if (vertices[i].position.z > max_(2)) max_(2) = vertices[i].position.z; 120 | if (vertices[i].position.x < min_(0)) min_(0) = vertices[i].position.x; 121 | if (vertices[i].position.y < min_(1)) min_(1) = vertices[i].position.y; 122 | if (vertices[i].position.z < min_(2)) min_(2) = vertices[i].position.z; 123 | } 124 | 125 | // setting the vaos. 126 | for (uint32_t i = 0; i < reordered_tris.size(); ++i) { 127 | vaos_.push_back(glow::GlVertexArray()); 128 | 129 | vaos_[i].bind(); 130 | vaos_[i].setVertexAttribute(0, vertices_, 4, glow::AttributeType::FLOAT, 131 | false, sizeof(Vertex), (GLvoid*)0); 132 | vaos_[i].setVertexAttribute(1, vertices_, 4, glow::AttributeType::FLOAT, 133 | false, sizeof(Vertex), 134 | (GLvoid*)offsetof(Vertex, normal)); 135 | vaos_[i].setVertexAttribute(2, vertices_, 2, glow::AttributeType::FLOAT, 136 | false, sizeof(Vertex), 137 | (GLvoid*)offsetof(Vertex, texture)); 138 | vaos_[i].enableVertexAttribute(0); 139 | vaos_[i].enableVertexAttribute(1); 140 | vaos_[i].enableVertexAttribute(2); 141 | triangles_[i].bind(); 142 | vaos_[i].release(); 143 | 144 | triangles_[i].release(); // release only afterwards 145 | 146 | CheckGlError(); 147 | } 148 | } 149 | 150 | void Mesh::draw(glow::GlProgram& program) const { 151 | bool blending_enabled = false; 152 | // draw each object for each material. 153 | for (uint32_t i = 0; i < vaos_.size(); ++i) { 154 | if (materials_.size() > i) { 155 | // enable belending for tranparent indexes. 156 | if (materials_[i].value().alpha < 1. && !blending_enabled) { 157 | blending_enabled = true; 158 | glEnable(GL_BLEND); 159 | glBlendFunc(GL_SRC_ALPHA, 160 | GL_ONE_MINUS_SRC_ALPHA); // FIXME: state changed. 161 | // glBlendColor(1., 1., 1., materials_[i].value().alpha); 162 | } 163 | } 164 | 165 | glow::ScopedBinder vao_bind(vaos_[i]); 166 | if (materials_.size() > i) program.setUniform(materials_[i]); 167 | glDrawElements(GL_TRIANGLES, triangles_[i].size(), GL_UNSIGNED_INT, 0); 168 | } 169 | 170 | if (blending_enabled) glDisable(GL_BLEND); 171 | } 172 | 173 | std::pair Mesh::GetBoundingBox() { 174 | return std::pair(min_, max_); 175 | } 176 | -------------------------------------------------------------------------------- /src/fastcd/regions3d_projector.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/regions3d_projector.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "fastcd/point_covariance2d.h" 11 | 12 | namespace fastcd { 13 | 14 | Regions3dProjector::Regions3dProjector( 15 | std::shared_ptr img_sequence, int threshold_change_area, 16 | double chi_square2d, double chi_square3d) { 17 | img_sequence->ComputeAndMatchRegions(threshold_change_area); 18 | 19 | std::map> labels; 20 | for (size_t i = 0; i < img_sequence->size(); i++) { 21 | std::vector l = (*img_sequence)[i].ComputeMeansCovariances(chi_square2d); 22 | for (int idx : l) { 23 | if (labels.count(idx) > 0) { 24 | labels[idx].insert(i); 25 | } else { 26 | std::set tmp; 27 | tmp.insert(i); 28 | labels.insert(std::pair>(idx, tmp)); 29 | } 30 | } 31 | } 32 | for (auto l : labels) { 33 | if (l.second.size() > 1) { 34 | // Point triangulation (forward intersection) 35 | Eigen::Vector3d triangulated_mean = TriangulateMeanRay(img_sequence, l); 36 | std::vector projected_vertices = 37 | ProjectVertices(img_sequence, triangulated_mean, l); 38 | Eigen::Vector3d mean = ComputeMean(projected_vertices); 39 | Eigen::Matrix3d covariance = ComputeCovariance(projected_vertices, mean); 40 | PointCovariance3d cov(mean, covariance, chi_square3d); 41 | projected_regions_.push_back(cov); 42 | } 43 | } 44 | } 45 | 46 | Eigen::Vector3d Regions3dProjector::TriangulateMeanRay( 47 | const std::shared_ptr &img_sequence, 48 | const std::pair> &label) { 49 | Eigen::Matrix A; 50 | int size = 0; 51 | for (uint32_t i = 0; i < img_sequence->size(); i++) { 52 | if (label.second.find(i) != label.second.end()) { 53 | size++; 54 | } 55 | } 56 | A.resize(size * 3, 4); 57 | Eigen::Vector3d point, ray; 58 | int idx = 0; 59 | for (uint32_t i = 0; i < img_sequence->size(); i++) { 60 | if (label.second.find(i) != label.second.end()) { 61 | PointCovariance2d point_covariance = 62 | (*img_sequence)[i].GetMeansCovariances()[label.first]; 63 | point << point_covariance.Point()(0), point_covariance.Point()(1), 1; 64 | ray = (*img_sequence)[i].GetCamera().GetInvK() * point; 65 | Eigen::Matrix3d skew_ray = SkewSymmetric3d(ray); 66 | Eigen::Matrix P; 67 | P.block<3, 3>(0, 0) = 68 | (*img_sequence)[i].GetCamera().GetPose().block<3, 3>(0, 0).transpose(); 69 | P.block<3, 1>(0, 3) = 70 | -P.block<3, 3>(0, 0) * 71 | (*img_sequence)[i].GetCamera().GetPose().block<3, 1>(0, 3); 72 | A.block<3, 4>(idx++ * 3, 0) = skew_ray * P; 73 | } 74 | } 75 | Eigen::JacobiSVD> svd( 76 | A, Eigen::ComputeThinU | Eigen::ComputeThinV); 77 | Eigen::Vector4d triangulated_mean = 78 | svd.matrixV().block<4, 1>(0, 3).transpose() / svd.matrixV()(3, 3); 79 | return triangulated_mean.block<3, 1>(0, 0); 80 | } 81 | 82 | std::vector Regions3dProjector::ProjectVertices( 83 | const std::shared_ptr &img_sequence, 84 | const Eigen::Vector3d &triangulated_mean, 85 | const std::pair> &label) { 86 | std::vector vertices3d; 87 | for (uint32_t i = 0; i < img_sequence->size(); i++) { 88 | if (label.second.find(i) != label.second.end()) { 89 | PointCovariance2d point_covariance = 90 | (*img_sequence)[i].GetMeansCovariances()[label.first]; 91 | // Distance camera/triangulated_mean 92 | Eigen::Vector3d hom_mean; 93 | hom_mean << point_covariance.Point()(0), point_covariance.Point()(1), 1; 94 | Eigen::Vector3d mean_ray = 95 | ((*img_sequence)[i].GetCamera().GetPose().block<3, 3>(0, 0) * 96 | (*img_sequence)[i].GetCamera().GetInvK() * hom_mean) 97 | .normalized(); 98 | double distance = mean_ray.transpose() * triangulated_mean; 99 | // Plane on 3d point with normal direction = ray direction 100 | Eigen::Vector4d plane; 101 | plane << mean_ray(0), mean_ray(1), mean_ray(2), -distance; 102 | for (auto vertex : point_covariance.Vertices()) { 103 | // Convert to Pluecker line 104 | Eigen::Vector3d hom_vertex; 105 | hom_vertex << vertex(0), vertex(1), 1; 106 | Eigen::Vector3d ray = 107 | ((*img_sequence)[i].GetCamera().GetPose().block<3, 3>(0, 0) * 108 | (*img_sequence)[i].GetCamera().GetInvK() * hom_vertex) 109 | .normalized(); 110 | Eigen::Matrix pluecker_line; 111 | // The line passes through 2 points 112 | Eigen::Vector3d p1 = (*img_sequence)[i].GetCamera().GetPosition(); 113 | Eigen::Vector3d p2 = (*img_sequence)[i].GetCamera().GetPosition() + ray; 114 | pluecker_line.block<3, 1>(0, 0) = ray; 115 | pluecker_line.block<3, 1>(3, 0) = p1.cross(p2); 116 | // Intersection line/plane 117 | Eigen::Matrix3d skew_pluecker = 118 | SkewSymmetric3d(pluecker_line.block<3, 1>(3, 0)); 119 | Eigen::Matrix4d pluecker_mat; // transposed Pluecker matrix 120 | pluecker_mat.block<3, 3>(0, 0) = skew_pluecker; 121 | pluecker_mat.block<3, 1>(0, 3) = pluecker_line.block<3, 1>(0, 0); 122 | pluecker_mat.block<1, 3>(3, 0) = 123 | -pluecker_line.block<3, 1>(0, 0).transpose(); 124 | pluecker_mat(3, 3) = 0; 125 | Eigen::Vector4d vertex_hom = pluecker_mat * plane; 126 | Eigen::Vector3d vertex3d = vertex_hom.block<3, 1>(0, 0) / vertex_hom(3); 127 | vertices3d.push_back(vertex3d); 128 | } 129 | } 130 | } 131 | return vertices3d; 132 | } 133 | 134 | Eigen::Matrix3d Regions3dProjector::SkewSymmetric3d( 135 | const Eigen::Vector3d &vec) { 136 | Eigen::Matrix3d skew_symmetric; 137 | skew_symmetric << 0, -vec(2), vec(1), 138 | vec(2), 0, -vec(0), 139 | -vec(1), vec(0), 0; 140 | return skew_symmetric; 141 | } 142 | 143 | Eigen::Vector3d Regions3dProjector::ComputeMean( 144 | const std::vector &points) { 145 | Eigen::Vector3d mean = Eigen::Vector3d::Zero(); 146 | for (auto point : points) mean += point; 147 | mean /= points.size(); 148 | return mean; 149 | } 150 | 151 | Eigen::Matrix3d Regions3dProjector::ComputeCovariance( 152 | const std::vector &points_list, 153 | const Eigen::Vector3d &mean) { 154 | Eigen::Matrix points; 155 | points.resize(points_list.size(), 3); 156 | int i = 0; 157 | for (auto point : points_list) { 158 | points.block<1, 3>(i++, 0) = (point-mean).transpose(); 159 | } 160 | Eigen::Matrix3d covariance; 161 | covariance = points.transpose() * points; 162 | covariance /= points_list.size(); 163 | return covariance; 164 | } 165 | 166 | std::vector Regions3dProjector::GetProjectedRegions() { 167 | return projected_regions_; 168 | } 169 | 170 | } // namespace fastcd 171 | -------------------------------------------------------------------------------- /src/fastcd/regions_matcher_hist.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/regions_matcher_hist.h" 3 | #include 4 | #include 5 | #include 6 | 7 | namespace fastcd { 8 | 9 | void RegionsMatcherHist::Match(const ProcessedImage &img1, 10 | const ProcessedImage &img2, int min_label) { 11 | max_label_ = min_label; 12 | std::vector regions_img1 = img1.GetRegions(); 13 | std::vector regions_img2 = img2.GetRegions(); 14 | labels1_.resize(regions_img1.size()); 15 | labels2_.resize(regions_img2.size()); 16 | std::vector> contours(1); 17 | std::unordered_map epimasks1; 18 | std::vector img1_hists(regions_img1.size()); 19 | std::vector img2_hists(regions_img2.size()); 20 | for (size_t i = 0; i < regions_img1.size(); i++) { 21 | labels1_[i] = regions_img1[i].label_; 22 | cv::Mat epimask = EpilineMask(img1, img2, i); 23 | epimasks1[i] = epimask; 24 | contours[0] = regions_img1[i].contour_; 25 | cv::Mat mask(img1.Height(), img1.Width(), CV_8UC1, cv::Scalar(0)); 26 | cv::drawContours(mask, contours, -1, cv::Scalar(255), CV_FILLED); 27 | cv::Mat hsv; 28 | cv::cvtColor(img1.GetRawImage(), hsv, cv::COLOR_BGR2HSV); 29 | int h_bins = 50; int s_bins = 60; 30 | int histSize[] = { h_bins, s_bins }; 31 | float h_ranges[] = { 0, 180 }; 32 | float s_ranges[] = { 0, 256 }; 33 | const float* ranges[] = { h_ranges, s_ranges }; 34 | int channels[] = { 0, 1 }; 35 | calcHist(&hsv, 1, channels, mask, img1_hists[i], 2, histSize, ranges, true, 36 | false); 37 | normalize(img1_hists[i], img1_hists[i], 0, 1, cv::NORM_MINMAX, -1, 38 | cv::Mat()); 39 | } 40 | std::unordered_map epimasks2; 41 | for (size_t i = 0; i < regions_img2.size(); i++) { 42 | labels2_[i] = regions_img2[i].label_; 43 | cv::Mat epimask = EpilineMask(img2, img1, i); 44 | epimasks2[i] = epimask; 45 | contours[0] = regions_img2[i].contour_; 46 | cv::Mat mask(img2.Height(), img2.Width(), CV_8UC1, cv::Scalar(0)); 47 | cv::drawContours(mask, contours, -1, cv::Scalar(255), CV_FILLED); 48 | cv::Mat hsv; 49 | cv::cvtColor(img2.GetRawImage(), hsv, cv::COLOR_BGR2HSV); 50 | int h_bins = 50; int s_bins = 60; 51 | int histSize[] = { h_bins, s_bins }; 52 | float h_ranges[] = { 0, 180 }; 53 | float s_ranges[] = { 0, 256 }; 54 | const float* ranges[] = { h_ranges, s_ranges }; 55 | int channels[] = { 0, 1 }; 56 | calcHist(&hsv, 1, channels, mask, img2_hists[i], 2, histSize, 57 | ranges, true, false); 58 | normalize(img2_hists[i], img2_hists[i], 0, 1, 59 | cv::NORM_MINMAX, -1, cv::Mat()); 60 | } 61 | std::vector idxs; 62 | std::vector corrs; 63 | for (size_t i = 0; i < regions_img1.size(); i++) { 64 | double max = 0; 65 | int maxj; 66 | bool geomeric_check1, geomeric_check2; 67 | for (size_t j = 0; j < regions_img2.size(); j++) { 68 | geomeric_check1 = false; 69 | geomeric_check2 = false; 70 | std::vector contour = regions_img2[j].contour_; 71 | for (auto &pt : contour) { 72 | if (epimasks1[i].at(pt.y, pt.x) == 0) { 73 | geomeric_check1 = true; 74 | break; 75 | } 76 | } 77 | contour = regions_img1[i].contour_; 78 | for (auto &pt : contour) { 79 | if (epimasks2[j].at(pt.y, pt.x) == 0) { 80 | geomeric_check2 = true; 81 | break; 82 | } 83 | } 84 | 85 | double corr; 86 | if (geomeric_check1 || geomeric_check2) { 87 | corr = 0; 88 | } else { 89 | corr = cv::compareHist(img1_hists[i], 90 | img2_hists[j], CV_COMP_CORREL); 91 | } 92 | if (corr > max) { 93 | max = corr; 94 | maxj = j; 95 | } 96 | } 97 | idxs.push_back(maxj); 98 | corrs.push_back(max); 99 | } 100 | 101 | double threshold, max = 0, min = 1; 102 | for (size_t i = 0; i < corrs.size(); i++) { 103 | if (corrs[i] > max) max = corrs[i]; 104 | if (corrs[i] < min) min = corrs[i]; 105 | } 106 | threshold = std::max((max - min) * 0.7 + min, 0.5); 107 | for (size_t i = 0; i < regions_img1.size(); i++) { 108 | if (corrs[i] >= threshold) { 109 | if (labels2_[idxs[i]] < 0) { 110 | labels2_[idxs[i]] = labels1_[i] = max_label_++; 111 | } else { 112 | labels1_[i] = labels2_[idxs[i]]; 113 | } 114 | } 115 | } 116 | } 117 | 118 | std::vector RegionsMatcherHist::GetLabelsImg1() { 119 | return labels1_; 120 | } 121 | 122 | std::vector RegionsMatcherHist::GetLabelsImg2() { 123 | return labels2_; 124 | } 125 | 126 | int RegionsMatcherHist::GetMaxLabel() { return max_label_; } 127 | 128 | cv::Mat RegionsMatcherHist::EpilineMask(const ProcessedImage &img1, 129 | const ProcessedImage &img2, int idx) { 130 | Eigen::Matrix3d F = ComputeF(img1.GetCamera().GetP(), 131 | img2.GetCamera().GetP()); 132 | cv::Mat epimask(img1.Height(), img1.Width(), CV_8UC1, 133 | cv::Scalar(0)); 134 | std::vector regions_img1 = img1.GetRegions(); 135 | for (size_t i = 0; i < regions_img1[idx].contour_.size(); i++) { 136 | Eigen::Vector3d pt; 137 | pt << regions_img1[idx].contour_[i].x, 138 | regions_img1[idx].contour_[i].y, 1; 139 | Eigen::Vector3d line = F * pt; 140 | cv::line( 141 | epimask, cv::Point(0, -line(2) / line(1)), 142 | cv::Point(epimask.cols, -(line(2) + line(0) * epimask.cols) / line(1)), 143 | cv::Scalar(255)); 144 | } 145 | cv::Mat dilation_kernel = 146 | cv::getStructuringElement(cv::MORPH_RECT, cv::Size(51, 51)); 147 | cv::dilate(epimask, epimask, dilation_kernel); 148 | return epimask; 149 | } 150 | 151 | Eigen::Matrix3d RegionsMatcherHist::ComputeF( 152 | const Eigen::Matrix &P1, 153 | const Eigen::Matrix &P2) { 154 | Eigen::Matrix X1, X2, X3, Y1, Y2, Y3; 155 | 156 | X1.block<1, 4>(0, 0) = P1.block<1, 4>(1, 0); 157 | X1.block<1, 4>(1, 0) = P1.block<1, 4>(2, 0); 158 | 159 | X2.block<1, 4>(0, 0) = P1.block<1, 4>(2, 0); 160 | X2.block<1, 4>(1, 0) = P1.block<1, 4>(0, 0); 161 | 162 | X3.block<1, 4>(0, 0) = P1.block<1, 4>(0, 0); 163 | X3.block<1, 4>(1, 0) = P1.block<1, 4>(1, 0); 164 | 165 | Y1.block<1, 4>(0, 0) = P2.block<1, 4>(1, 0); 166 | Y1.block<1, 4>(1, 0) = P2.block<1, 4>(2, 0); 167 | 168 | Y2.block<1, 4>(0, 0) = P2.block<1, 4>(2, 0); 169 | Y2.block<1, 4>(1, 0) = P2.block<1, 4>(0, 0); 170 | 171 | Y3.block<1, 4>(0, 0) = P2.block<1, 4>(0, 0); 172 | Y3.block<1, 4>(1, 0) = P2.block<1, 4>(1, 0); 173 | 174 | Eigen::Matrix4d XY11, XY21, XY31, XY12, XY22, XY32, XY13, XY23, XY33; 175 | 176 | XY11.block<2, 4>(0, 0) = X1; 177 | XY11.block<2, 4>(2, 0) = Y1; 178 | 179 | XY21.block<2, 4>(0, 0) = X2; 180 | XY21.block<2, 4>(2, 0) = Y1; 181 | 182 | XY31.block<2, 4>(0, 0) = X3; 183 | XY31.block<2, 4>(2, 0) = Y1; 184 | 185 | XY12.block<2, 4>(0, 0) = X1; 186 | XY12.block<2, 4>(2, 0) = Y2; 187 | 188 | XY22.block<2, 4>(0, 0) = X2; 189 | XY22.block<2, 4>(2, 0) = Y2; 190 | 191 | XY32.block<2, 4>(0, 0) = X3; 192 | XY32.block<2, 4>(2, 0) = Y2; 193 | 194 | XY13.block<2, 4>(0, 0) = X1; 195 | XY13.block<2, 4>(2, 0) = Y3; 196 | 197 | XY23.block<2, 4>(0, 0) = X2; 198 | XY23.block<2, 4>(2, 0) = Y3; 199 | 200 | XY33.block<2, 4>(0, 0) = X3; 201 | XY33.block<2, 4>(2, 0) = Y3; 202 | 203 | Eigen::Matrix3d F; 204 | F << XY11.determinant(), XY21.determinant(), XY31.determinant(), 205 | XY12.determinant(), XY22.determinant(), XY32.determinant(), 206 | XY13.determinant(), XY23.determinant(), XY33.determinant(); 207 | return F; 208 | } 209 | 210 | 211 | 212 | } // namespace fastcd 213 | -------------------------------------------------------------------------------- /src/utils/obj_reader.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "utils/obj_reader.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | std::string trim(const std::string& str, 10 | const std::string& whitespaces = " \0\t\n\r\x0B") { 11 | int32_t beg = 0; 12 | int32_t end = 0; 13 | 14 | /** find the beginning **/ 15 | for (beg = 0; beg < (int32_t)str.size(); ++beg) { 16 | bool found = false; 17 | for (uint32_t i = 0; i < whitespaces.size(); ++i) { 18 | if (str[beg] == whitespaces[i]) { 19 | found = true; 20 | break; 21 | } 22 | } 23 | if (!found) break; 24 | } 25 | 26 | /** find the end **/ 27 | for (end = int32_t(str.size()) - 1; end > beg; --end) { 28 | bool found = false; 29 | for (uint32_t i = 0; i < whitespaces.size(); ++i) { 30 | if (str[end] == whitespaces[i]) { 31 | found = true; 32 | break; 33 | } 34 | } 35 | if (!found) break; 36 | } 37 | 38 | return str.substr(beg, end - beg + 1); 39 | } 40 | 41 | std::vector split(const std::string& line, 42 | const std::string& delim, 43 | bool skipEmpty = false) { 44 | std::vector tokens; 45 | 46 | boost::char_separator sep( 47 | delim.c_str(), "", 48 | (skipEmpty ? boost::drop_empty_tokens : boost::keep_empty_tokens)); 49 | boost::tokenizer > tokenizer(line, sep); 50 | 51 | for (auto it = tokenizer.begin(); it != tokenizer.end(); ++it) 52 | tokens.push_back(*it); 53 | 54 | return tokens; 55 | } 56 | 57 | std::string dirName(const std::string& path) { 58 | boost::filesystem::path p(path); 59 | 60 | return p.parent_path().string(); 61 | } 62 | 63 | Mesh ObjReader::FromFile(const std::string& filename) { 64 | // std::cout << "ObjReader: " << filename << std::flush; 65 | 66 | struct ObjFace { 67 | int32_t vertices[3]; 68 | int32_t normals[3]; 69 | int32_t texcoords[3]; 70 | int32_t material; 71 | }; 72 | 73 | std::vector vertices; 74 | std::vector triangles; 75 | std::vector materials; 76 | 77 | std::ifstream in(filename.c_str()); 78 | if (!in.is_open()) throw std::runtime_error("Failed to open obj-file."); 79 | std::vector tokens; 80 | std::string line; 81 | 82 | std::vector obj_verts; 83 | std::vector obj_normals; 84 | std::vector obj_texcoords; 85 | std::vector obj_faces; 86 | std::map material_map; 87 | 88 | int32_t current_material = -1; 89 | // 1. read first all information. 90 | // 2. assembly then the vertices with help of vert_normal. 91 | // 3. construct mesh primitives. 92 | 93 | in.peek(); 94 | while (!in.eof()) { 95 | std::getline(in, line); 96 | tokens = split(line, " "); 97 | in.peek(); 98 | if (tokens.size() < 2) continue; 99 | if (tokens[0] == "#") continue; 100 | 101 | if (tokens[0] == "mtllib") { 102 | std::map mats; 103 | std::string mtl_filename = dirName(filename); 104 | mtl_filename += "/" + tokens[1]; 105 | std::cout << "MATERIAL: " << filename << " " << mtl_filename << std::endl; 106 | ParseMaterials(mtl_filename, mats); 107 | for (auto it = mats.begin(); it != mats.end(); ++it) { 108 | material_map[it->first] = materials.size(); 109 | materials.push_back(it->second); 110 | } 111 | } else if (tokens[0] == "v") { // vertices. 112 | if (tokens.size() < 4) 113 | throw std::runtime_error("Failure parsing vertex: not enough tokens."); 114 | float x = boost::lexical_cast(tokens[1]); 115 | float y = boost::lexical_cast(tokens[2]); 116 | float z = boost::lexical_cast(tokens[3]); 117 | 118 | // FIXME: interpret also w coordinate. 119 | 120 | obj_verts.push_back(glow::vec4(x, y, z, 1.0)); 121 | } else if (tokens[0] == "vn") { // normals 122 | if (tokens.size() < 4) 123 | throw std::runtime_error("Failure parsing normal: not enough tokens."); 124 | float x = boost::lexical_cast(tokens[1]); 125 | float y = boost::lexical_cast(tokens[2]); 126 | float z = boost::lexical_cast(tokens[3]); 127 | 128 | obj_normals.push_back(glow::vec4(x, y, z, 0.0)); 129 | 130 | // FIXME: interpret also w coordinate. 131 | } else if (tokens[0] == "usemtl") { // material 132 | std::string name = trim(tokens[1]); 133 | if (material_map.find(name) == material_map.end()) 134 | throw std::runtime_error("Unknown material."); 135 | current_material = material_map[name]; 136 | } else if (tokens[0] == "f") { 137 | if (tokens.size() < 4) 138 | throw std::runtime_error("Failure parsing face: not enough tokens."); 139 | std::vector face_tokens; 140 | ObjFace face; 141 | face.normals[0] = -1; 142 | face.normals[1] = -1; 143 | face.normals[2] = -1; 144 | face.material = current_material; 145 | 146 | for (uint32_t j = 1; j < 4; ++j) { 147 | face_tokens = split(tokens[j], "/"); 148 | if (face_tokens.size() == 0) { 149 | // std::cout << stringify(tokens) << std::endl; 150 | std::cout << tokens[j] << std::endl; 151 | throw std::runtime_error( 152 | "Failure parsing face: insufficient vertex params"); 153 | } 154 | if (face_tokens.size() > 0) { // vertex 155 | face.vertices[j - 1] = 156 | boost::lexical_cast(face_tokens[0]) - 1; 157 | } 158 | 159 | if (face_tokens.size() > 1) { // texture 160 | if (face_tokens[1].size() > 0) { // non-empty 161 | face.texcoords[j - 1] = 162 | boost::lexical_cast(face_tokens[1]) - 1; 163 | } 164 | } 165 | 166 | if (face_tokens.size() > 2) { // normal 167 | if (face_tokens[2].size() > 0) { // non-empty 168 | face.normals[j - 1] = 169 | boost::lexical_cast(face_tokens[2]) - 1; 170 | } 171 | } 172 | } 173 | 174 | obj_faces.push_back(face); 175 | } 176 | 177 | in.peek(); 178 | } 179 | in.close(); 180 | 181 | // getting normal; vertices together (map v * (N+1) + n -> index, where N is 182 | // normals size) 183 | std::map vert_normal; 184 | // generate for all faces the corresponding vertices: 185 | for (uint32_t f = 0; f < obj_faces.size(); ++f) { 186 | const ObjFace& face = obj_faces[f]; 187 | Mesh::Triangle triangle; 188 | if (face.material >= 0) triangle.material = face.material; 189 | for (uint32_t i = 0; i < 3; ++i) { 190 | uint32_t key = 191 | face.vertices[i] * (obj_normals.size() + 1) + (face.normals[i] + 1); 192 | if (vert_normal.find(key) == vert_normal.end()) { 193 | Mesh::Vertex vertex; 194 | vertex.position = obj_verts[face.vertices[i]]; 195 | if (face.normals[i] >= 0) vertex.normal = obj_normals[face.normals[i]]; 196 | // FIXME: texture coordinates. 197 | 198 | triangle.vertices[i] = vertices.size(); 199 | vert_normal[key] = vertices.size(); 200 | vertices.push_back(vertex); 201 | } else { 202 | triangle.vertices[i] = vert_normal[key]; 203 | } 204 | } 205 | triangles.push_back(triangle); 206 | } 207 | 208 | if (materials.size() == 0) 209 | return Mesh(vertices, triangles); 210 | else 211 | return Mesh(vertices, triangles, materials); 212 | } 213 | 214 | void ObjReader::ParseMaterials( 215 | const std::string& filename, 216 | std::map& materials) { 217 | std::ifstream in(filename.c_str()); 218 | if (!in.is_open()) throw std::runtime_error("Failed to open material file."); 219 | std::vector tokens; 220 | std::string line; 221 | 222 | Mesh::Material* material = nullptr; 223 | 224 | in.peek(); 225 | while (!in.eof()) { 226 | std::getline(in, line); 227 | tokens = split(line, " "); 228 | if (tokens.size() < 2) continue; 229 | if (tokens[0] == "#") continue; 230 | 231 | if (tokens[0] == "newmtl") { 232 | std::string name = trim(tokens[1]); 233 | materials[name] = Mesh::Material(); 234 | material = &materials[name]; 235 | } else if (material != nullptr) { 236 | if (tokens[0] == "Ka") { 237 | if (tokens.size() < 4) 238 | throw std::runtime_error("Failure parsing material file."); 239 | float r = boost::lexical_cast(tokens[1]); 240 | float g = boost::lexical_cast(tokens[2]); 241 | float b = boost::lexical_cast(tokens[3]); 242 | material->ambient = glow::vec3(r, g, b); 243 | } else if (tokens[0] == "Kd") { 244 | if (tokens.size() < 4) 245 | throw std::runtime_error("Failure parsing material file."); 246 | float r = boost::lexical_cast(tokens[1]); 247 | float g = boost::lexical_cast(tokens[2]); 248 | float b = boost::lexical_cast(tokens[3]); 249 | material->diffuse = glow::vec3(r, g, b); 250 | } else if (tokens[0] == "Ks") { 251 | if (tokens.size() < 4) 252 | throw std::runtime_error("Failure parsing material file."); 253 | float r = boost::lexical_cast(tokens[1]); 254 | float g = boost::lexical_cast(tokens[2]); 255 | float b = boost::lexical_cast(tokens[3]); 256 | material->specular = glow::vec3(r, g, b); 257 | } else if (tokens[0] == "Ke") { 258 | if (tokens.size() < 4) 259 | throw std::runtime_error("Failure parsing material file."); 260 | float r = boost::lexical_cast(tokens[1]); 261 | float g = boost::lexical_cast(tokens[2]); 262 | float b = boost::lexical_cast(tokens[3]); 263 | material->emission = glow::vec3(r, g, b); 264 | } else if (tokens[0] == "Ns") { 265 | material->shininess = boost::lexical_cast(tokens[1]); 266 | } else if (tokens[0] == "d") { 267 | material->alpha = boost::lexical_cast(tokens[1]); 268 | } 269 | } 270 | 271 | in.peek(); 272 | } 273 | 274 | in.close(); 275 | } 276 | -------------------------------------------------------------------------------- /src/fastcd/processed_image.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), Cyrill Stachniss, University of Bonn 2 | #include "fastcd/processed_image.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "fastcd/camera.h" 11 | 12 | namespace fastcd { 13 | 14 | ProcessedImage::ProcessedImage(const Image &img, 15 | const std::vector &depth) 16 | : Image(img), depth_(depth) { 17 | inconsistencies_ = 18 | cv::Mat(raw_image_.rows, raw_image_.cols, CV_32FC1, cv::Scalar(0)); 19 | } 20 | 21 | Image ProcessedImage::Warp(const ProcessedImage &target_image) const { 22 | cv::Mat result(raw_image_.rows, raw_image_.cols, raw_image_.type(), 23 | cv::Scalar(0, 0, 0)); 24 | cv::Mat zbuffer(raw_image_.rows / 2, raw_image_.cols / 2, CV_64FC1, 25 | cv::Scalar(1000)); 26 | Eigen::Vector2i coord; 27 | #pragma omp parallel for collapse(2) 28 | for (int i = 0; i < raw_image_.rows; ++i) { 29 | for (int j = 0; j < raw_image_.cols; j++) { 30 | int idx = i * raw_image_.cols + j; 31 | if (depth_[idx].w > 0.5) { 32 | coord = target_image.GetCamera().Project( 33 | static_cast(depth_[idx].x), 34 | static_cast(depth_[idx].y), 35 | static_cast(depth_[idx].z)); 36 | // Check if the point is seen in both images 37 | // (taking into account occlusions) 38 | if (coord(1) > 0 && coord(1) < raw_image_.rows && coord(0) > 0 && 39 | coord(0) < raw_image_.cols) { 40 | double distance = 41 | sqrt(pow(static_cast(depth_[idx].x) - 42 | target_image.GetCamera().GetPosition()(0), 2) + 43 | pow(static_cast(depth_[idx].y) - 44 | target_image.GetCamera().GetPosition()(1), 2) + 45 | pow(static_cast(depth_[idx].z) - 46 | target_image.GetCamera().GetPosition()(2), 2)); 47 | if (distance < zbuffer.at(coord(1) / 2, coord(0) / 2)) { 48 | zbuffer.at(coord(1) / 2, coord(0) / 2) = distance; 49 | int depth_idx = 50 | (raw_image_.rows - coord(1) - 1) * target_image.Width() + 51 | coord(0); 52 | double dist_target = 53 | sqrt(pow(target_image.GetCamera().GetPosition()(0) - 54 | target_image.GetDepth()[depth_idx].x, 2) + 55 | pow(target_image.GetCamera().GetPosition()(1) - 56 | target_image.GetDepth()[depth_idx].y, 2) + 57 | pow(target_image.GetCamera().GetPosition()(2) - 58 | target_image.GetDepth()[depth_idx].z, 2)); 59 | if (dist_target >= distance - 0.5) { 60 | result.at(coord(1), coord(0)) = 61 | raw_image_.at(raw_image_.rows - i - 1, j); 62 | } 63 | } 64 | } 65 | } 66 | } 67 | } 68 | 69 | // Interpolation 70 | cv::Mat interpolation(result.rows, result.cols, result.type()); 71 | int dilation_size = 1; 72 | cv::Mat dilation_kernel = cv::getStructuringElement( 73 | cv::MORPH_RECT, cv::Size(2 * dilation_size + 1, 2 * dilation_size + 1), 74 | cv::Point(dilation_size, dilation_size)); 75 | cv::dilate(result, interpolation, dilation_kernel); 76 | for (int i = 0; i < result.rows; i++) { 77 | for (int j = 0; j < result.cols; j++) { 78 | if (result.at(i, j) == cv::Vec3b(0, 0, 0)) { 79 | result.at(i, j) = interpolation.at(i, j); 80 | } 81 | } 82 | } 83 | return Image(result, target_image.GetCamera()); 84 | } 85 | 86 | void ProcessedImage::UpdateInconsistencies(const Image &image, 87 | int kernel_size) { 88 | cv::Mat inconsistency = 89 | Subtract(raw_image_, image.GetRawImage(), kernel_size); 90 | 91 | cv::normalize(inconsistency, inconsistency, 0.0f, 1.0f, cv::NORM_MINMAX); 92 | if (num_img_compared_ > 0) 93 | inconsistencies_ = Minimum(inconsistencies_, inconsistency); 94 | else 95 | inconsistencies_ = inconsistency; 96 | cv::medianBlur(inconsistencies_, inconsistencies_, 3); 97 | cv::normalize(inconsistencies_, inconsistencies_, 0.0f, 1.0f, 98 | cv::NORM_MINMAX); 99 | 100 | num_img_compared_++; 101 | } 102 | 103 | cv::Mat ProcessedImage::GetInconsistencies() { return inconsistencies_; } 104 | 105 | std::vector ProcessedImage::ComputeMeansCovariances(double chi_square) { 106 | std::vector found_labels; 107 | for (int i = 0; i <= max_region_label_; i++) { 108 | std::vector idx; 109 | for (size_t j = 0; j < regions_.size(); j++) { 110 | if (regions_[j].label_ == i) { 111 | idx.push_back(j); 112 | } 113 | } 114 | if (!idx.empty()) { 115 | found_labels.push_back(i); 116 | cv::Mat region(Height(), Width(), CV_8UC1, cv::Scalar(0)); 117 | std::vector> contours; 118 | for (int j : idx) { 119 | contours.push_back(regions_[j].contour_); 120 | } 121 | cv::drawContours(region, contours, -1, cv::Scalar(255), CV_FILLED); 122 | cv::Mat non_zero = FindNonZero(region); 123 | cv::Mat mean; 124 | cv::Mat covariance(2, 2, CV_64F); 125 | cv::calcCovarMatrix(non_zero, covariance, mean, 126 | CV_COVAR_NORMAL | CV_COVAR_ROWS | CV_COVAR_SCALE); 127 | Eigen::Vector2d emean; 128 | Eigen::Matrix2d ecovariance; 129 | emean << mean.at(0, 0), mean.at(0, 1); 130 | ecovariance << covariance.at(0, 0), covariance.at(0, 1), 131 | covariance.at(1, 0), covariance.at(1, 1); 132 | PointCovariance2d mean_covariance(emean, ecovariance, chi_square); 133 | regions_mean_cov_[i] = mean_covariance; 134 | } 135 | } 136 | return found_labels; 137 | } 138 | 139 | void ProcessedImage::ComputeRegions(int threshold_change_area) { 140 | cv::Mat image(Height(), Width(), CV_8UC1); 141 | inconsistencies_.convertTo(image, CV_8UC1, 255); 142 | cv::threshold(image, image, 30, 255.0f, cv::THRESH_TOZERO); 143 | 144 | cv::Mat dilation_kernel = 145 | cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(11, 11)); 146 | cv::Mat erosion_kernel = 147 | cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5)); 148 | cv::erode(image, image, erosion_kernel); 149 | cv::dilate(image, image, dilation_kernel); 150 | image = AverageSegmentedRegions(image, threshold_change_area); 151 | 152 | cv::equalizeHist(image, image); 153 | cv::normalize(image, image, 0, 255, cv::NORM_MINMAX); 154 | cv::threshold(image, image, 50, 255.0f, cv::THRESH_TOZERO); 155 | 156 | std::vector> contours; 157 | cv::findContours(image, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); 158 | for (size_t i = 0; i < contours.size(); i++) { 159 | ImageRegion region(contours[i]); 160 | regions_.push_back(region); 161 | } 162 | } 163 | 164 | const std::vector& ProcessedImage::GetRegions() const { 165 | return regions_; 166 | } 167 | 168 | const std::vector& ProcessedImage::GetDepth() const { 169 | return depth_; 170 | } 171 | 172 | void ProcessedImage::UpdateLabels(const std::vector &labels) { 173 | for (size_t i = 0; i < regions_.size(); i++) { 174 | if (labels[i] > max_region_label_) max_region_label_ = labels[i]; 175 | regions_[i].label_ = labels[i]; 176 | } 177 | } 178 | 179 | cv::Mat ProcessedImage::AverageSegmentedRegions(const cv::Mat &img, 180 | int threshold_change_area) { 181 | cv::Mat labels(img.rows, img.cols, img.type(), cv::Scalar(0)); 182 | cv::Mat dst(img.rows, img.cols, img.type(), cv::Scalar(0)); 183 | cv::Mat tmp = img.clone(); 184 | std::vector> contours; 185 | cv::findContours(tmp, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); 186 | std::vector cont_avgs(contours.size(), 0.f); 187 | for (size_t i = 0; i < contours.size(); ++i) { 188 | cv::drawContours(labels, contours, i, cv::Scalar(i + 1), CV_FILLED); 189 | cv::Rect roi = cv::boundingRect(contours[i]); 190 | cv::Mat mask; 191 | cv::bitwise_and(labels(roi) == (i + 1), img(roi) != 0, mask); 192 | cv::Scalar mean = cv::mean(img(roi), mask); 193 | cont_avgs[i] = mean[0]; 194 | } 195 | for (size_t i = 0; i < contours.size(); ++i) { 196 | if (cv::contourArea(contours[i]) > threshold_change_area) 197 | cv::drawContours(dst, contours, i, cv::Scalar(cont_avgs[i]), CV_FILLED); 198 | } 199 | return dst; 200 | } 201 | 202 | std::unordered_map 203 | ProcessedImage::GetMeansCovariances() const { 204 | return regions_mean_cov_; 205 | } 206 | 207 | cv::Mat ProcessedImage::Subtract(const cv::Mat &img1, const cv::Mat &img2, 208 | int kernel_size) { 209 | cv::Mat result(img1.rows, img1.cols, CV_32FC1, 0.0f); 210 | 211 | for (int i = 0; i < img1.rows; ++i) { 212 | for (int j = 0; j < img1.cols; ++j) { 213 | Eigen::Vector3f img1_color, img2_color; 214 | img1_color << static_cast(img1.at(i, j)[0]), 215 | static_cast(img1.at(i, j)[1]), 216 | static_cast(img1.at(i, j)[2]); 217 | if (img1_color.norm() > 0.01) { 218 | if (j - kernel_size / 2 > 0 && 219 | i - kernel_size / 2 > 0 && 220 | j + kernel_size / 2 < img2.cols && 221 | i + kernel_size / 2 < img2.rows) { 222 | cv::Rect window(j - kernel_size / 2, 223 | i - kernel_size / 2, kernel_size, 224 | kernel_size); 225 | // Loop on the window 226 | float min = 10000; 227 | for (int subi = 0; subi < window.height; subi++) { 228 | for (int subj = 0; subj < window.width; subj++) { 229 | cv::Mat area = img2(window); 230 | img2_color << static_cast( 231 | area.at(subi, subj)[0]), 232 | static_cast(area.at(subi, subj)[1]), 233 | static_cast(area.at(subi, subj)[2]); 234 | // Compare the values here and keep track of the minimum difference 235 | if (img2_color.norm() > 0.01) { 236 | float difference = (img1_color - img2_color).norm(); 237 | if (difference < min) min = difference; 238 | } 239 | } 240 | } 241 | if (min < 1000) result.at(i, j) = min; 242 | } 243 | } 244 | } 245 | } 246 | return result; 247 | } 248 | 249 | cv::Mat ProcessedImage::Minimum(const cv::Mat &img1, const cv::Mat &img2) { 250 | cv::Mat result(img1.rows, img1.cols, CV_32FC1, 0.0f); 251 | #pragma omp parallel for collapse(2) 252 | for (int i = 0; i < img1.rows; ++i) { 253 | for (int j = 0; j < img1.cols; ++j) { 254 | result.at(i, j) = 255 | std::min(img1.at(i, j), img2.at(i, j)); 256 | } 257 | } 258 | return result; 259 | } 260 | 261 | cv::Mat ProcessedImage::FindNonZero(const cv::Mat &img) { 262 | cv::Mat result(img.rows * img.cols, 2, CV_64F); 263 | int idx = 0; 264 | for (int i = 0; i < img.rows; i++) { 265 | for (int j = 0; j < img.cols; j++) { 266 | if (img.at(i, j) > 0) { 267 | result.at(idx, 0) = j; 268 | result.at(idx, 1) = i; 269 | idx++; 270 | } 271 | } 272 | } 273 | return result(cv::Rect(0, 0, 2, idx)); 274 | } 275 | 276 | int ProcessedImage::ImageCompared() { return num_img_compared_; } 277 | 278 | } // namespace fastcd 279 | -------------------------------------------------------------------------------- /src/visualizer/visualizer.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Jens Behley (jens.behley@igg.uni-bonn.de), Emanuele Palazzolo (emanuele.palazzolo@uni-bonn.de), , Cyrill Stachniss, University of Bonn 2 | #include "visualizer.h" 3 | 4 | #include 5 | #include 6 | #include 7 | #include "fastcd/camera.h" 8 | 9 | namespace fastcd { 10 | 11 | Visualizer::Visualizer(QWidget* parent, const QGLWidget* shareWidget, Qt::WindowFlags f) 12 | : QGLWidget(parent, shareWidget, f), contextInitialized_(initContext()) { 13 | if (!contextInitialized_) throw std::runtime_error("OpenGL context initialization failed!"); 14 | setFocusPolicy(Qt::StrongFocus); 15 | makeCurrent(); 16 | prgDrawMesh_.attach(glow::GlShader::fromCache( 17 | glow::ShaderType::VERTEX_SHADER, "visualizer/shaders/draw_mesh.vert")); 18 | prgDrawMesh_.attach(glow::GlShader::fromCache( 19 | glow::ShaderType::FRAGMENT_SHADER, "visualizer/shaders/draw_mesh.frag")); 20 | prgDrawMesh_.link(); 21 | 22 | // sun comes roughly from above... 23 | prgDrawMesh_.setUniform(glow::GlUniform("lights[0].position", glow::vec4(0, 0, -1, 0))); 24 | prgDrawMesh_.setUniform(glow::GlUniform("lights[0].ambient", glow::vec3(.9, .9, .9))); 25 | prgDrawMesh_.setUniform(glow::GlUniform("lights[0].diffuse", glow::vec3(.9, .9, .9))); 26 | prgDrawMesh_.setUniform(glow::GlUniform("lights[0].specular", glow::vec3(.9, .9, .9))); 27 | 28 | // more evenly distributed sun light... 29 | std::vector dirs = {glow::vec4(1, -1, 1, 0), glow::vec4(-1, -1, 1, 0), glow::vec4(1, -1, -1, 0), glow::vec4(-1, -1, -1, 0)}; 30 | glow::vec3 indirect_intensity = glow::vec3(.1, .1, .1); 31 | 32 | for (uint32_t i = 0; i < 4; ++i) { 33 | std::stringstream light_name; 34 | light_name << "lights[" << (i + 1) << "]"; 35 | 36 | prgDrawMesh_.setUniform(glow::GlUniform(light_name.str() + ".position", dirs[i])); 37 | prgDrawMesh_.setUniform(glow::GlUniform(light_name.str() + ".ambient", indirect_intensity)); 38 | prgDrawMesh_.setUniform(glow::GlUniform(light_name.str() + ".diffuse", indirect_intensity)); 39 | prgDrawMesh_.setUniform(glow::GlUniform(light_name.str() + ".specular", indirect_intensity)); 40 | } 41 | 42 | prgDrawMesh_.setUniform(glow::GlUniform("num_lights", 5)); 43 | 44 | prgDrawMesh_.setUniform(glow::GlUniform("model_mat", Eigen::Matrix4f::Identity())); 45 | prgDrawMesh_.setUniform(glow::GlUniform("normal_mat", Eigen::Matrix4f::Identity())); 46 | 47 | connect(&timer_, SIGNAL(timeout()), this, SLOT(updateGL())); 48 | 49 | coloredPointProgram_.attach( 50 | glow::GlShader::fromCache(glow::ShaderType::VERTEX_SHADER, 51 | "visualizer/shaders/colored_vertices.vert")); 52 | coloredPointProgram_.attach( 53 | glow::GlShader::fromCache(glow::ShaderType::FRAGMENT_SHADER, 54 | "visualizer/shaders/colored_vertices.frag")); 55 | coloredPointProgram_.link(); 56 | 57 | { 58 | // initialize coordinate axis buffer 59 | coordAxisVAO_.bind(); 60 | 61 | float red = rgb2float(1.0f, 0.0f, 0.0f); 62 | float green = rgb2float(0.0f, 1.0f, 0.0f); 63 | float blue = rgb2float(0.0f, 0.0f, 1.0f); 64 | 65 | std::vector cverts( 66 | {0, 0, 0, red, 1, 0, 0, red, 0, 0, 0, green, 0, 1, 0, green, 0, 0, 0, blue, 0, 0, 1, blue}); 67 | coordAxisVBO_.assign(cverts); 68 | coordAxisVAO_.setVertexAttribute(0, coordAxisVBO_, 4, glow::AttributeType::FLOAT, false, 4 * sizeof(GLfloat), 0); 69 | coordAxisVAO_.enableVertexAttribute(0); 70 | 71 | coordAxisVAO_.release(); 72 | } 73 | 74 | renderPoints_.attach( 75 | glow::GlShader::fromCache(glow::ShaderType::VERTEX_SHADER, 76 | "visualizer/shaders/render_points.vert")); 77 | renderPoints_.attach( 78 | glow::GlShader::fromCache(glow::ShaderType::FRAGMENT_SHADER, 79 | "visualizer/shaders/render_points.frag")); 80 | renderPoints_.link(); 81 | 82 | renderPoints_.setUniform(glow::GlUniform("texOutput", 0)); // set to texture 0. 83 | 84 | sampler_.setMagnifyingOperation(glow::TexMagOp::NEAREST); 85 | sampler_.setMinifyingOperation(glow::TexMinOp::NEAREST); 86 | sampler_.setWrapOperation(glow::TexWrapOp::CLAMP_TO_BORDER, glow::TexWrapOp::CLAMP_TO_BORDER); 87 | timer_.start(1. / 30.); 88 | } 89 | 90 | float Visualizer::rgb2float(float r, float g, float b) { 91 | int32_t rgb = int32_t(round(r * 255.0f)); 92 | rgb = (rgb << 8) + int32_t(round(g * 255.0f)); 93 | rgb = (rgb << 8) + int32_t(round(b * 255.0f)); 94 | 95 | return float(rgb); 96 | } 97 | 98 | void Visualizer::initializeGL() { 99 | glClearColor(0.0f, 0.0f, 0.0f, 1.0f); 100 | glEnable(GL_DEPTH_TEST); 101 | glDepthFunc(GL_LEQUAL); 102 | glEnable(GL_LINE_SMOOTH); 103 | 104 | camera_.lookAt(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f); 105 | } 106 | 107 | void Visualizer::resizeGL(int w, int h) { 108 | glViewport(0, 0, w, h); 109 | // set projection matrix 110 | float fov = glow::radians(45.0f); 111 | float aspect = float(w) / float(h); 112 | 113 | projection_ = glow::glPerspective(fov, aspect, 0.1f, 1000.0f); 114 | } 115 | 116 | glow::GlCamera::KeyboardModifier Visualizer::resolveKeyboardModifier(Qt::KeyboardModifiers modifiers) { 117 | // currently only single button presses are supported. 118 | glow::GlCamera::KeyboardModifier modifier = glow::GlCamera::KeyboardModifier::None; 119 | 120 | if (modifiers & Qt::ControlModifier) 121 | modifier = glow::GlCamera::KeyboardModifier::CtrlDown; 122 | else if (modifiers & Qt::ShiftModifier) 123 | modifier = glow::GlCamera::KeyboardModifier::ShiftDown; 124 | else if (modifiers & Qt::AltModifier) 125 | modifier = glow::GlCamera::KeyboardModifier::AltDown; 126 | 127 | return modifier; 128 | } 129 | 130 | glow::GlCamera::MouseButton Visualizer::resolveMouseButton(Qt::MouseButtons button) { 131 | // currently only single button presses are supported. 132 | glow::GlCamera::MouseButton btn = glow::GlCamera::MouseButton::NoButton; 133 | 134 | if (button & Qt::LeftButton) 135 | btn = glow::GlCamera::MouseButton::LeftButton; 136 | else if (button & Qt::RightButton) 137 | btn = glow::GlCamera::MouseButton::RightButton; 138 | else if (button & Qt::MiddleButton) 139 | btn = glow::GlCamera::MouseButton::MiddleButton; 140 | 141 | return btn; 142 | } 143 | 144 | glow::GlCamera::KeyboardKey Visualizer::resolveKeyboardKey(Qt::Key key) { 145 | switch(key) { 146 | case Qt::Key::Key_W: return glow::GlCamera::KeyboardKey::KeyW; 147 | case Qt::Key::Key_A: return glow::GlCamera::KeyboardKey::KeyA; 148 | case Qt::Key::Key_S: return glow::GlCamera::KeyboardKey::KeyS; 149 | case Qt::Key::Key_D: return glow::GlCamera::KeyboardKey::KeyD; 150 | case Qt::Key::Key_C: return glow::GlCamera::KeyboardKey::KeyC; 151 | case Qt::Key::Key_Space: return glow::GlCamera::KeyboardKey::KeySpace; 152 | default: return glow::GlCamera::KeyboardKey::KeyNotSupported; 153 | } 154 | } 155 | 156 | void Visualizer::mousePressEvent(QMouseEvent* event) { 157 | // if camera consumes the signal, simply return. // here we could also include some remapping. 158 | if (camera_.mousePressed(event->windowPos().x(), event->windowPos().y(), resolveMouseButton(event->buttons()), 159 | resolveKeyboardModifier(event->modifiers()))) { 160 | return; 161 | } 162 | } 163 | 164 | void Visualizer::mouseReleaseEvent(QMouseEvent* event) { 165 | // if camera consumes the signal, simply return. // here we could also include some remapping. 166 | if (camera_.mouseReleased(event->windowPos().x(), event->windowPos().y(), resolveMouseButton(event->buttons()), 167 | resolveKeyboardModifier(event->modifiers()))) { 168 | return; 169 | } 170 | } 171 | 172 | void Visualizer::mouseMoveEvent(QMouseEvent* event) { 173 | // if camera consumes the signal, simply return. // here we could also include some remapping. 174 | if (camera_.mouseMoved(event->windowPos().x(), event->windowPos().y(), resolveMouseButton(event->buttons()), 175 | resolveKeyboardModifier(event->modifiers()))) 176 | return; 177 | } 178 | 179 | void Visualizer::keyPressEvent(QKeyEvent* event) { 180 | try { 181 | int idx = std::stoi(event->text().toStdString()); 182 | if (idx == 0) idx = 10; 183 | if (idx - 1 < cameras_.size()) { 184 | Eigen::Matrix3f rotation = cameras_[idx - 1] 185 | .GetPose() 186 | .cast() 187 | .block<3, 3>(0, 0) 188 | .transpose(); 189 | Eigen::Vector3f forward = Eigen::Vector3f(0, 0, 1); 190 | forward = (rotation * forward).normalized(); 191 | forward(0) -= cameras_[idx - 1].GetPosition().cast()(0); 192 | forward(1) -= cameras_[idx - 1].GetPosition().cast()(1); 193 | forward(2) += cameras_[idx - 1].GetPosition().cast()(2); 194 | 195 | camera_.lookAt(-cameras_[idx - 1].GetPosition()(0), 196 | -cameras_[idx - 1].GetPosition()(1), 197 | cameras_[idx - 1].GetPosition()(2), forward(0), forward(1), 198 | forward(2)); 199 | } 200 | } catch (...) { 201 | } 202 | if (camera_.keyPressed(resolveKeyboardKey(static_cast(event->key())), 203 | resolveKeyboardModifier(event->modifiers()))) { 204 | return; 205 | } 206 | } 207 | 208 | void Visualizer::keyReleaseEvent(QKeyEvent* event) { 209 | if (camera_.keyReleased( 210 | resolveKeyboardKey(static_cast(event->key())), 211 | resolveKeyboardModifier(event->modifiers()))) { 212 | return; 213 | } 214 | } 215 | 216 | bool Visualizer::initContext() { 217 | // enabling core profile 218 | QGLFormat corefmt; 219 | corefmt.setVersion(5, 0); // getting highest compatible format... 220 | corefmt.setProfile(QGLFormat::CoreProfile); 221 | setFormat(corefmt); 222 | 223 | // version info. 224 | QGLFormat fmt = this->format(); 225 | std::cout << "OpenGL Context Version " << fmt.majorVersion() << "." << fmt.minorVersion() << " " 226 | << ((fmt.profile() == QGLFormat::CoreProfile) ? "core profile" : "compatibility profile") << std::endl; 227 | 228 | makeCurrent(); 229 | glow::inititializeGLEW(); 230 | 231 | return true; 232 | } 233 | void Visualizer::paintGL() { 234 | makeCurrent(); 235 | 236 | glEnable(GL_DEPTH_TEST); 237 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 238 | glClearColor(bg_color_[0], bg_color_[1], bg_color_[2], 1.0f); 239 | view_ = camera_.matrix(); 240 | //gl to model coordinates change 241 | view_ = view_ * glow::glRotateZ(M_PI); 242 | //gl to ScanNet dataset coordinates change 243 | // view_ = view_ * glow::glRotateX(-M_PI/2); 244 | mvp_ = projection_ * view_ * model_; 245 | if (show_axis_) { 246 | glow::ScopedBinder vao_binder(coordAxisVAO_); 247 | glow::ScopedBinder program_binder(coloredPointProgram_); 248 | 249 | coloredPointProgram_.setUniform(mvp_); 250 | 251 | glDrawArrays(GL_LINES, 0, 6); 252 | } 253 | 254 | //lines drawing 255 | if(!linesVAOs_.empty()){ 256 | for (auto cameraVAO : linesVAOs_) { 257 | glow::ScopedBinder vao_binder(cameraVAO); 258 | glow::ScopedBinder program_binder(coloredPointProgram_); 259 | 260 | coloredPointProgram_.setUniform(mvp_); 261 | 262 | glDrawArrays(GL_LINES, 0, 8); 263 | } 264 | 265 | } 266 | prgDrawMesh_.bind(); 267 | prgDrawMesh_.setUniform(mvp_); 268 | prgDrawMesh_.setUniform(glow::GlUniform("view_pos", camera_.getPosition())); 269 | for (auto mesh : meshes_) { 270 | mesh.draw(prgDrawMesh_); 271 | } 272 | prgDrawMesh_.release(); 273 | 274 | if (show_output_) { 275 | vao_img_coords_.bind(); 276 | renderPoints_.bind(); 277 | renderPoints_.setUniform(mvp_); 278 | 279 | glActiveTexture(GL_TEXTURE0); 280 | output_.bind(); 281 | sampler_.bind(0); // ensure nearest neighbor interp. 282 | 283 | glDrawArrays(GL_POINTS, 0, output_.width() * output_.height()); 284 | 285 | output_.release(); 286 | sampler_.release(0); 287 | renderPoints_.release(); 288 | vao_img_coords_.release(); 289 | } 290 | } 291 | 292 | void Visualizer::ShowAxis(bool option) { show_axis_ = option; } 293 | 294 | void Visualizer::addMesh(Mesh& mesh) { 295 | meshes_.push_back(mesh); 296 | } 297 | 298 | void Visualizer::setBackgroundColor(float r, float g, float b, float a){ 299 | bg_color_ = {r, g, b, a}; 300 | } 301 | 302 | void Visualizer::addCamera(Camera camera, float color, float length) { 303 | cameras_.push_back(camera); 304 | std::vector vertices=camera.GetGlFovVertices(color, length); 305 | glow::GlVertexArray cameraVAO; 306 | glow::GlBuffer cameraVBO{glow::BufferTarget::ARRAY_BUFFER, 307 | glow::BufferUsage::STATIC_DRAW}; 308 | cameraVAO.bind(); 309 | cameraVBO.assign(vertices); 310 | cameraVAO.setVertexAttribute(0, cameraVBO, 4, glow::AttributeType::FLOAT, false, 4 * sizeof(GLfloat), 0); 311 | cameraVAO.enableVertexAttribute(0); 312 | cameraVAO.release(); 313 | linesVAOs_.push_back(cameraVAO); 314 | linesVBOs_.push_back(cameraVBO); 315 | } 316 | 317 | void Visualizer::addPoint(const Eigen::Vector3f& position, float size, 318 | float color) { 319 | std::vector vertices({position(0) + size / 2, 320 | position(1), 321 | position(2), 322 | color, 323 | 324 | position(0) - size / 2, 325 | position(1), 326 | position(2), 327 | color, 328 | 329 | position(0), 330 | position(1), 331 | position(2), 332 | color, 333 | 334 | position(0), 335 | position(1) + size / 2, 336 | position(2), 337 | color, 338 | 339 | position(0), 340 | position(1) - size / 2, 341 | position(2), 342 | color, 343 | 344 | position(0), 345 | position(1), 346 | position(2), 347 | color, 348 | 349 | position(0), 350 | position(1), 351 | position(2) + size / 2, 352 | color, 353 | 354 | position(0), 355 | position(1), 356 | position(2) - size / 2, 357 | color}); 358 | 359 | glow::GlVertexArray cameraVAO; 360 | glow::GlBuffer cameraVBO{glow::BufferTarget::ARRAY_BUFFER, 361 | glow::BufferUsage::STATIC_DRAW}; 362 | cameraVAO.bind(); 363 | cameraVBO.assign(vertices); 364 | cameraVAO.setVertexAttribute(0, cameraVBO, 4, glow::AttributeType::FLOAT, false, 4 * sizeof(GLfloat), 0); 365 | cameraVAO.enableVertexAttribute(0); 366 | cameraVAO.release(); 367 | linesVAOs_.push_back(cameraVAO); 368 | linesVBOs_.push_back(cameraVBO); 369 | } 370 | 371 | void Visualizer::addLine(const Eigen::Vector3f& start, const Eigen::Vector3f& end, 372 | float color) { 373 | std::vector vertices({start(0), start(1), start(2), color, 374 | end(0), end(1), end(2), color}); 375 | glow::GlVertexArray cameraVAO; 376 | glow::GlBuffer cameraVBO{glow::BufferTarget::ARRAY_BUFFER, 377 | glow::BufferUsage::STATIC_DRAW}; 378 | cameraVAO.bind(); 379 | cameraVBO.assign(vertices); 380 | cameraVAO.setVertexAttribute(0, cameraVBO, 4, glow::AttributeType::FLOAT, false, 381 | 4 * sizeof(GLfloat), 0); 382 | cameraVAO.enableVertexAttribute(0); 383 | cameraVAO.release(); 384 | linesVAOs_.push_back(cameraVAO); 385 | linesVBOs_.push_back(cameraVBO); 386 | } 387 | 388 | 389 | void Visualizer::setOutput(glow::GlTexture& texture) { 390 | std::cout << "Should render now points on the mesh surface!" << std::endl; 391 | show_output_ = true; 392 | output_ = texture; 393 | uint32_t width = texture.width(); 394 | uint32_t height = texture.height(); 395 | 396 | // initialize vbo for image coordinates. (needed for the vertex shader) 397 | std::vector img_coords; 398 | img_coords.reserve(width * height); 399 | float inv_width = 1.0f / float(width); 400 | float inv_height = 1.0f / float(height); 401 | 402 | // for each pixel of the texture, we are generating a "virtual vertex" 403 | for (uint32_t x = 0; x < width; ++x) { 404 | for (uint32_t y = 0; y < height; ++y) { 405 | img_coords.push_back(glow::vec2(inv_width * float(x + 0.5f), inv_height * float(y + 0.5f))); 406 | } 407 | } 408 | 409 | vbo_img_coords_.assign(img_coords); 410 | vao_img_coords_.bind(); 411 | vao_img_coords_.setVertexAttribute(0, vbo_img_coords_, 2, glow::AttributeType::FLOAT, false, 2 * sizeof(float), 412 | 0); // Note: does also the binding of the vbo! 413 | vao_img_coords_.enableVertexAttribute(0); 414 | vao_img_coords_.release(); 415 | 416 | updateGL(); 417 | } 418 | 419 | } // namespace fastcd 420 | --------------------------------------------------------------------------------