├── test ├── CMakeLists.txt ├── gtest.h ├── benchmark.CMakeLists.txt ├── normal_computation.CMakeLists.txt ├── utils.CMakeLists.txt ├── test.CMakeLists.txt ├── utils.h ├── utils.cpp ├── benchmark_normal_computation.cpp └── normalComputationTest.cpp ├── 3rdparty ├── CMakeLists.txt ├── opencl.CMakeLists.txt ├── vtk.CMakeLists.txt ├── boost.CMakeLists.txt ├── pcl.CMakeLists.txt ├── benchmark.CMakeLists.txt └── gtest.CMakeLists.txt ├── src ├── knn.h ├── CMakeLists.txt ├── normalComputation.h ├── covariance_simd.cpp └── normalComputation.cpp ├── CMakeLists.txt ├── cmake └── modules │ ├── FindGtest.cmake │ └── FindBenchmark.cmake ├── .gitlab-ci.yml ├── Readme.md ├── .exec-helper └── LICENSE /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include(utils.CMakeLists.txt) 2 | include(test.CMakeLists.txt) 3 | include(benchmark.CMakeLists.txt) 4 | -------------------------------------------------------------------------------- /3rdparty/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules) 2 | 3 | include(benchmark.CMakeLists.txt) 4 | include(boost.CMakeLists.txt) 5 | include(opencl.CMakeLists.txt) 6 | include(vtk.CMakeLists.txt) 7 | include(pcl.CMakeLists.txt) 8 | include(gtest.CMakeLists.txt) 9 | -------------------------------------------------------------------------------- /3rdparty/opencl.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(OpenCL REQUIRED) 2 | 3 | set(LIBRARY opencl) 4 | add_library(${LIBRARY} INTERFACE) 5 | target_include_directories(${LIBRARY} SYSTEM INTERFACE ${OpenCL_INCLUDE_DIRS}) 6 | target_link_libraries(${LIBRARY} INTERFACE ${OpenCL_LIBRARIES}) 7 | link_directories(${OpenCL_LIBRARY_DIRS}) 8 | -------------------------------------------------------------------------------- /3rdparty/vtk.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(VTK REQUIRED) 2 | include(${VTK_USE_FILE}) 3 | 4 | set(LIBRARY VTK) 5 | add_library(${LIBRARY} INTERFACE) 6 | target_include_directories(${LIBRARY} SYSTEM INTERFACE ${VTK_INCLUDE_DIRS}) 7 | target_link_libraries(${LIBRARY} INTERFACE ${VTK_LIBRARIES}) 8 | link_directories(${VTK_LIBRARY_DIRS}) 9 | -------------------------------------------------------------------------------- /src/knn.h: -------------------------------------------------------------------------------- 1 | #ifndef _KNN_INCLUDE 2 | #define _KNN_INCLUDE 3 | 4 | /** 5 | * @file Contains KNN specific variables 6 | */ 7 | 8 | #include 9 | 10 | namespace kapernikov { 11 | // Defines the number of neirest neighbours to use for the covariance calculation 12 | const uint32_t KNN_SIZE = 90U; 13 | } // namespace kapernikov 14 | 15 | #endif /* _KNN_INCLUDE */ 16 | -------------------------------------------------------------------------------- /test/gtest.h: -------------------------------------------------------------------------------- 1 | #ifndef GTEST_INCLUDE 2 | #define GTEST_INCLUDE 3 | 4 | /** 5 | * @file Convenience wrapper for Google Test 6 | */ 7 | 8 | #include 9 | 10 | // Convert BDD-style macros to Google Test macros 11 | #define SCENARIO(test, subtest, scenario_message) TEST(test, subtest) 12 | #define GIVEN(message) 13 | #define WHEN(message) 14 | #define THEN(message) 15 | 16 | #endif /* GTEST_INCLUDE */ 17 | -------------------------------------------------------------------------------- /test/benchmark.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(EXE_NAME benchmark_normal_computation) 2 | add_executable(${EXE_NAME} 3 | benchmark_normal_computation.cpp 4 | ) 5 | 6 | set(DEPENDENCIES 7 | normal-computation 8 | test-utils 9 | benchmark 10 | ) 11 | 12 | target_include_directories(${EXE_NAME} PRIVATE ${DEPENDENCIES}) 13 | target_link_libraries(${EXE_NAME} PRIVATE ${DEPENDENCIES}) 14 | install(TARGETS ${EXE_NAME} DESTINATION bin) 15 | -------------------------------------------------------------------------------- /test/normal_computation.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(EXE_NAME normal_computation_test) 2 | add_executable(${EXE_NAME} 3 | normalComputationTest.cpp 4 | ) 5 | 6 | set(DEPENDENCIES 7 | normal-computation 8 | testutils 9 | gtest 10 | ) 11 | 12 | target_include_directories( PRIVATE ${DEPENDENCIES}) 13 | target_link_libraries(${EXE_NAME} PRIVATE ${DEPENDENCIES}) 14 | link_directories(${Gtest_LIBRARY_DIRS}) 15 | 16 | add_test(normal-computation ${EXE_NAME}) 17 | -------------------------------------------------------------------------------- /test/utils.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(MODULE_NAME test-utils) 2 | set(LIBRARY_NAME ${PROJECT_NAME}-${MODULE_NAME}) 3 | 4 | set(SRCS 5 | utils.cpp 6 | ) 7 | 8 | set(DEPENDENCIES 9 | normal-computation 10 | pcl 11 | ) 12 | 13 | add_library(${LIBRARY_NAME} ${SRCS}) 14 | target_include_directories(${LIBRARY_NAME} PUBLIC . ${DEPENDENCIES}) 15 | target_link_libraries(${LIBRARY_NAME} PUBLIC ${DEPENDENCIES}) 16 | 17 | add_library(${MODULE_NAME} ALIAS ${LIBRARY_NAME}) 18 | -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(MODULE_NAME normal-computation) 2 | set(LIBRARY_NAME ${PROJECT_NAME}-${MODULE_NAME}) 3 | 4 | set(SRCS 5 | normalComputation.cpp 6 | covariance_simd.cpp 7 | ) 8 | 9 | set(DEPENDENCIES 10 | pcl 11 | boost 12 | opencl 13 | ) 14 | 15 | add_library(${LIBRARY_NAME} ${SRCS}) 16 | target_include_directories(${LIBRARY_NAME} PUBLIC . ${DEPENDENCIES}) 17 | target_link_libraries(${LIBRARY_NAME} PUBLIC ${DEPENDENCIES}) 18 | 19 | add_library(${MODULE_NAME} ALIAS ${LIBRARY_NAME}) 20 | -------------------------------------------------------------------------------- /test/test.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(EXE_NAME test_normal_computation) 2 | 3 | add_executable(${EXE_NAME} 4 | normalComputationTest.cpp 5 | ) 6 | 7 | set(DEPENDENCIES 8 | normal-computation 9 | test-utils 10 | gtest 11 | ) 12 | 13 | target_include_directories(${EXE_NAME} PRIVATE ${DEPENDENCIES}) 14 | target_link_libraries(${EXE_NAME} PRIVATE ${DEPENDENCIES}) 15 | link_directories(${Gtest_LIBRARY_DIRS}) 16 | 17 | add_test(normal-computation ${EXE_NAME}) 18 | install(TARGETS ${EXE_NAME} DESTINATION bin) 19 | -------------------------------------------------------------------------------- /3rdparty/boost.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(Boost REQUIRED program_options) 2 | 3 | set(BOOST_LIBRARY boost) 4 | add_library(${BOOST_LIBRARY} INTERFACE) 5 | target_include_directories(${BOOST_LIBRARY} SYSTEM INTERFACE ${Boost_INCLUDE_DIRS}) 6 | target_link_libraries(${BOOST_LIBRARY} INTERFACE ${Boost_LIBRARIES}) 7 | link_directories(${Boost_LIBRARY_DIRS}) 8 | 9 | # boost program-options 10 | set(BOOST_LIBRARY boost-program-options) 11 | add_library(${BOOST_LIBRARY} INTERFACE) 12 | target_include_directories(${BOOST_LIBRARY} SYSTEM INTERFACE ${Boost_INCLUDE_DIRS}) 13 | target_link_libraries(${BOOST_LIBRARY} INTERFACE ${Boost_PROGRAM_OPTIONS_LIBRARY}) 14 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.1) 2 | 3 | project(normal-computation-in-opencl LANGUAGES CXX) 4 | 5 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/") 6 | 7 | # Set compiler options. 8 | set(CMAKE_CXX_STANDARD 14) 9 | set(CMAKE_CXX_STANDARD_REQUIRED 14) 10 | 11 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra") 12 | 13 | include(FindOpenMP) 14 | if(OPENMP_FOUND) 15 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") 16 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 17 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") 18 | endif(OPENMP_FOUND) 19 | 20 | add_subdirectory(3rdparty) 21 | add_subdirectory(src) 22 | 23 | enable_testing() 24 | add_subdirectory(test) 25 | -------------------------------------------------------------------------------- /3rdparty/pcl.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(PCL 1.7 REQUIRED COMPONENTS common io segmentation visualization surface) 2 | 3 | set(LIBRARY pcl) 4 | add_library(${LIBRARY} INTERFACE) 5 | 6 | # For some reason, PCL puts multiple compiler arguments as a one item string. It is the first item in the list. We will handle it separately 7 | list(GET PCL_DEFINITIONS 0 UGLY_PCL_DEFINITIONS_HACK) 8 | 9 | # Convert the string to a proper list 10 | string(REPLACE " " ";" UGLY_DEFINITIONS_HACK_LIST ${UGLY_PCL_DEFINITIONS_HACK}) 11 | target_compile_options(${LIBRARY} INTERFACE ${UGLY_DEFINITIONS_HACK_LIST}) 12 | 13 | # Remove the weird item from the list 14 | list(REMOVE_AT PCL_DEFINITIONS 0) 15 | target_compile_definitions(${LIBRARY} INTERFACE ${PCL_DEFINITIONS}) 16 | 17 | target_include_directories(${LIBRARY} SYSTEM INTERFACE ${PCL_INCLUDE_DIRS}) 18 | target_link_libraries(${LIBRARY} INTERFACE ${PCL_LIBRARIES}) 19 | target_link_libraries(${LIBRARY} INTERFACE ${PCL_LIBRARIES} VTK) 20 | 21 | link_directories(${PCL_LIBRARY_DIRS}) 22 | -------------------------------------------------------------------------------- /cmake/modules/FindGtest.cmake: -------------------------------------------------------------------------------- 1 | set(MODULE_NAME Gtest) 2 | 3 | find_package(PkgConfig QUIET) 4 | if(${PKG_CONFIG_FOUND}) 5 | pkg_check_modules(${MODULE_NAME} gtest) 6 | endif() 7 | 8 | # Attempt to find it if not configured in pkgconfig 9 | if(NOT ${MODULE_NAME}_FOUND) 10 | MESSAGE(STATUS "Looking manually") 11 | set(${MODULE_NAME}_LIBRARIES gtest) 12 | find_path(${MODULE_NAME}_INCLUDE_DIRS NAMES gtest.h PATH_SUFFIXES gtest) 13 | find_library(${MODULE_NAME}_LIBRARY_DIRS NAMES ${${MODULE_NAME}_LIBRARIES}) 14 | 15 | include(FindPackageHandleStandardArgs) 16 | 17 | find_package_handle_standard_args(${MODULE_NAME} 18 | FOUND_VAR ${MODULE_NAME}_FOUND 19 | REQUIRED_VARS ${MODULE_NAME}_INCLUDE_DIRS ${MODULE_NAME}_LIBRARY_DIRS 20 | ) 21 | 22 | mark_as_advanced(${MODULE_NAME}_INCLUDE_DIRS) 23 | mark_as_advanced(${MODULE_NAME}_LIBRARIES) 24 | mark_as_advanced(${MODULE_NAME}_LIBRARY_DIRS) 25 | endif() 26 | -------------------------------------------------------------------------------- /cmake/modules/FindBenchmark.cmake: -------------------------------------------------------------------------------- 1 | set(MODULE_NAME Benchmark) 2 | 3 | find_package(PkgConfig QUIET) 4 | if(${PKG_CONFIG_FOUND}) 5 | pkg_check_modules(${MODULE_NAME} benchmark) 6 | endif() 7 | 8 | # Attempt to find it if not configured in pkgconfig 9 | if(NOT ${MODULE_NAME}_FOUND) 10 | MESSAGE(STATUS "Looking manually") 11 | set(${MODULE_NAME}_LIBRARIES benchmark) 12 | find_path(${MODULE_NAME}_INCLUDE_DIRS NAMES benchmark.h PATH_SUFFIXES benchmark) 13 | find_library(${MODULE_NAME}_LIBRARY_DIRS NAMES ${${MODULE_NAME}_LIBRARIES}) 14 | 15 | include(FindPackageHandleStandardArgs) 16 | 17 | find_package_handle_standard_args(${MODULE_NAME} 18 | FOUND_VAR ${MODULE_NAME}_FOUND 19 | REQUIRED_VARS ${MODULE_NAME}_INCLUDE_DIRS ${MODULE_NAME}_LIBRARY_DIRS 20 | ) 21 | 22 | mark_as_advanced(${MODULE_NAME}_INCLUDE_DIRS) 23 | mark_as_advanced(${MODULE_NAME}_LIBRARIES) 24 | mark_as_advanced(${MODULE_NAME}_LIBRARY_DIRS) 25 | endif() 26 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: ubuntu:artful 2 | variables: 3 | INSTALL_DIR: install 4 | 5 | stages: 6 | - build 7 | - run 8 | 9 | .run_binary: &run_binary 10 | stage: run 11 | variables: 12 | BINARY: "UNKNOWN" 13 | dependencies: 14 | - build 15 | before_script: 16 | - apt-get update 17 | - apt-get install --assume-yes libpcl-dev libvtk6-dev libboost-dev ocl-icd-opencl-dev libpocl-dev # Use *-dev versions of the required packaged so that we do not have to list the version explicitly 18 | script: 19 | - echo "Running binary = ${BINARY}" 20 | - "${BINARY}" 21 | 22 | build: 23 | stage: build 24 | variables: 25 | MODE: Release 26 | before_script: 27 | - apt-get update 28 | - apt-get install --assume-yes git cmake g++ make libpcl-dev libvtk6-dev libboost-dev ocl-icd-opencl-dev 29 | script: 30 | - cmake -H. -Bbuild/${MODE} -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=${MODE} 31 | - make -C build/${MODE} --jobs 4 32 | - make -C build/${MODE} install 33 | artifacts: 34 | expire_in: 10 days 35 | paths: 36 | - ${INSTALL_DIR}/* 37 | 38 | test: 39 | <<: *run_binary 40 | variables: 41 | BINARY: "${INSTALL_DIR}/bin/test_normal_computation" 42 | 43 | benchmark: 44 | <<: *run_binary 45 | variables: 46 | BINARY: "${INSTALL_DIR}/bin/benchmark_normal_computation" 47 | -------------------------------------------------------------------------------- /3rdparty/benchmark.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(MODULE_NAME benchmark) 2 | set(PACKAGE_NAME Benchmark) 3 | 4 | find_package(${PACKAGE_NAME}) 5 | option(USE_SYSTEM_BENCHMARK "Use the ${MODULE_NAME} library provided by the system" ${${PACKAGE_NAME}_FOUND}) 6 | 7 | if(${USE_SYSTEM_BENCHMARK}) 8 | if(NOT ${${PACKAGE_NAME}_FOUND}) 9 | MESSAGE(FATAL_ERROR "Could NOT find ${MODULE_NAME}") 10 | endif() 11 | else() 12 | MESSAGE(STATUS " -> Using latest upstream version of ${PACKAGE_NAME} instead") 13 | 14 | include(ExternalProject) 15 | 16 | set(BENCHMARK_INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}/${MODULE_NAME}) 17 | 18 | ExternalProject_Add(Benchmark 19 | PREFIX benchmark 20 | GIT_REPOSITORY https://github.com/google/benchmark.git 21 | CMAKE_ARGS -DBENCHMARK_ENABLE_GTEST_TESTS=OFF -DBENCHMARK_ENABLE_TESTING=OFF -DBENCHMARK_ENABLE_INSTALL=ON -DCMAKE_INSTALL_PREFIX=${BENCHMARK_INSTALL_DIR} -DCMAKE_BUILD_TYPE=Release 22 | INSTALL_DIR ${BENCHMARK_INSTALL_DIR} 23 | UPDATE_COMMAND "" 24 | ) 25 | 26 | set(${PACKAGE_NAME}_INCLUDE_DIRS ${BENCHMARK_INSTALL_DIR}/include) 27 | set(${PACKAGE_NAME}_LIBRARIES ${BENCHMARK_INSTALL_DIR}/lib/libbenchmark.a) 28 | set(${PACKAGE_NAME}_LIBRARY_DIRS ${BENCHMARK_INSTALL_DIR}/lib) 29 | endif() 30 | 31 | add_library(benchmark INTERFACE) 32 | target_include_directories(benchmark SYSTEM INTERFACE ${${PACKAGE_NAME}_INCLUDE_DIRS}) 33 | target_link_libraries(benchmark INTERFACE ${${PACKAGE_NAME}_LIBRARIES}) 34 | 35 | if(NOT ${USE_SYSTEM_BENCHMARK}) 36 | add_dependencies(benchmark ${PACKAGE_NAME}) 37 | endif() 38 | -------------------------------------------------------------------------------- /3rdparty/gtest.CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(MODULE_NAME gtest) 2 | set(PACKAGE_NAME Gtest) 3 | 4 | find_package(${PACKAGE_NAME}) 5 | option(USE_SYSTEM_GTEST "Use the ${MODULE_NAME} library provided by the system" ${${PACKAGE_NAME}_FOUND}) 6 | 7 | if(${USE_SYSTEM_GTEST}) 8 | if(NOT ${${PACKAGE_NAME}_FOUND}) 9 | MESSAGE(FATAL_ERROR "Could NOT find ${MODULE_NAME}") 10 | endif() 11 | else() 12 | MESSAGE(STATUS " -> Using latest upstream version of ${PACKAGE_NAME} instead") 13 | 14 | include(ExternalProject) 15 | 16 | set(INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}/${MODULE_NAME}) 17 | 18 | ExternalProject_Add(${PACKAGE_NAME} 19 | PREFIX ${MODULE_NAME} 20 | GIT_REPOSITORY https://github.com/google/googletest.git 21 | CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_BUILD_TYPE=Release -DBUILD_GMOCK=OFF -DBUILD_GTEST=ON -DBUILD_SHARED_LIBS=OFF -DINSTALL_GMOCK=OFF -DINSTALL_GTEST=ON -Dgmock_build_tests=OFF -Dgtest_build_samples=OFF -Dgtest_build_tests=OFF -Dgtest_disable_pthreads=OFF -Dgtest_force_shared_crt=OFF -Dgtest_hide_internal_symbols=OFF 22 | INSTALL_DIR ${INSTALL_DIR} 23 | UPDATE_COMMAND "" 24 | ) 25 | 26 | set(${PACKAGE_NAME}_INCLUDE_DIRS ${INSTALL_DIR}/include) 27 | set(${PACKAGE_NAME}_LIBRARIES ${INSTALL_DIR}/lib/libgtest.a ${INSTALL_DIR}/lib/libgtest_main.a) 28 | set(${PACKAGE_NAME}_LIBRARY_DIRS ${INSTALL_DIR}/lib) 29 | endif() 30 | 31 | add_library(gtest INTERFACE) 32 | target_include_directories(gtest SYSTEM INTERFACE ${${PACKAGE_NAME}_INCLUDE_DIRS}) 33 | target_link_libraries(gtest INTERFACE ${${PACKAGE_NAME}_LIBRARIES}) 34 | 35 | if(NOT ${USE_SYSTEM_BENCHMARK}) 36 | add_dependencies(gtest ${PACKAGE_NAME}) 37 | endif() 38 | -------------------------------------------------------------------------------- /test/utils.h: -------------------------------------------------------------------------------- 1 | #ifndef UTILS_INCLUDE 2 | #define UTILS_INCLUDE 3 | 4 | /** 5 | * @file Utilities for generating (test) inputs 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | namespace kapernikov { 12 | namespace test { 13 | /** 14 | * Generate a collection of random unique indices. 15 | * 16 | * @param[in] length The desired length of the random indices collection 17 | * @param[in] max_index The maximum index value to use in the generated collection 18 | * @returns A collection of unique random indices 19 | */ 20 | std::vector generateIndices(unsigned int length, unsigned int max_index) noexcept; 21 | 22 | /** 23 | * Generate a cloud of random points 24 | * 25 | * @param[in] length The desired number of points in the resulting cloud 26 | * @returns A cloud of the given length 27 | */ 28 | ::pcl::PointCloud<::pcl::PointXYZ> generateCloud(unsigned int length) noexcept; 29 | 30 | /** 31 | * Generate a point cloud with random points and associated indices 32 | * 33 | * @param[in] length The desired number of points in the point cloud. The number of unique indices will be equal to the number of points in the cloud 34 | * @param[out] cloud The generated point cloud 35 | * @param[out] indices The generated indices associated with the generated point cloud 36 | */ 37 | void generatePointCloud(unsigned int length, ::pcl::PointCloud<::pcl::PointXYZ>& cloud, std::vector& indices) noexcept; 38 | } // namespace test 39 | } // namespace kapernikov 40 | 41 | #endif /* UTILS_INCLUDE */ 42 | -------------------------------------------------------------------------------- /test/utils.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file Utilities for generating (test) inputs 3 | */ 4 | #include "utils.h" 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | namespace kapernikov { 11 | namespace test { 12 | std::vector generateIndices(unsigned int length, unsigned int max_index) noexcept { 13 | std::vector indices; 14 | 15 | std::default_random_engine engine_int; 16 | std::uniform_int_distribution dist_int(0,max_index); 17 | auto gen_int = std::bind(dist_int,engine_int); 18 | 19 | indices.reserve(length); 20 | while(indices.size() < length) { 21 | auto random_number = gen_int(); 22 | 23 | // Check whether the random number is unique 24 | if(std::find(indices.begin(), indices.end(), random_number) == indices.end()) { 25 | indices.emplace_back(random_number); 26 | } 27 | } 28 | return indices; 29 | } 30 | 31 | pcl::PointCloud generateCloud(unsigned int length) noexcept { 32 | pcl::PointCloud cloud; 33 | 34 | std::default_random_engine engine_float; 35 | std::uniform_real_distribution dist_float; 36 | auto gen_float = std::bind(dist_float,engine_float); 37 | 38 | cloud.reserve(length); 39 | for(size_t i = 0; i < length; ++i) { 40 | cloud.push_back(pcl::PointXYZ(gen_float(), gen_float(), gen_float())); 41 | } 42 | return cloud; 43 | } 44 | 45 | void generatePointCloud(unsigned int length, pcl::PointCloud& cloud, std::vector& indices) noexcept { 46 | indices = generateIndices(length, length); 47 | cloud = generateCloud(length); 48 | } 49 | } // namespace test 50 | } // namespace kapernikov 51 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # gpu-normal-computation 2 | Repository accompanying the associated Kapernikov blog post. 3 | 4 | ## Build and install 5 | ### Prerequisites 6 | - OpenCL development files 7 | 8 | ### Instructions 9 | The build uses cmake. Use the usual cmake incantations with an appropriate generator. Make sure to build a _Release_ build when running the benchmarks (add `-DCMAKE_BUILD_TYPE=Release` to the cmake command line). In addition, install the binaries. A custom install folder can be set by passing `-DCMAKE_INSTALL_PREFIX=` to the cmake command line. The default installation directory is `/usr/local`. See below for some examples on specific platforms. 10 | 11 | _Note_: Check the .gitlab-ci.yml file for an example system installation and configuration. 12 | 13 | ### Examples 14 | #### Linux 15 | The example uses the `make` generator. 16 | 17 | ``` 18 | $ cmake -H. -Bbuild -DCMAKE_INSTALL_PREFIX=install -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release 19 | $ make -C build --jobs 4 20 | $ make -C build install 21 | ``` 22 | The required binaries will then be present in the _install_ directory. 23 | 24 | #### Windows 25 | The example uses the `ninja` generator. 26 | 27 | Prepare the build environment: 28 | 1. Open the appropriate build environment shell 29 | 1. Make sure that cmake.exe and ninja.exe are in your path 30 | 31 | Then execute: 32 | ``` 33 | $ cmake -G "Ninja" -H. -Bbuild -DCMAKE_INSTALL_PREFIX=install -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release 34 | $ ninja -C build --jobs 4 35 | $ ninja -C build install 36 | ``` 37 | The required binaries will then be present in the _install_ directory. 38 | 39 | ## Run 40 | ### Prerequisites 41 | - A properly functioning OpenCL runtime 42 | - _For the benchmarks:_ A tool to fix your CPU to a specific frequency (e.g. cpupower on \*NIX systems) 43 | - _For the benchmarks:_ A binary built in release mode 44 | 45 | ### Benchmarks 46 | #### Linux 47 | ``` 48 | $ /bin/benchmark_normal_computation 49 | ``` 50 | 51 | #### Windows 52 | ``` 53 | $ \bin\benchmark_normal_computation.exe 54 | ``` 55 | 56 | ### Unittests 57 | #### Linux 58 | ``` 59 | $ /bin/test_normal_computation 60 | ``` 61 | 62 | #### Windows 63 | ``` 64 | $ \bin\test_normal_computation.exe 65 | ``` 66 | -------------------------------------------------------------------------------- /src/normalComputation.h: -------------------------------------------------------------------------------- 1 | #ifndef _NORMAL_COMPUTATION_INCLUDE 2 | #define _NORMAL_COMPUTATION_INCLUDE 3 | 4 | /** 5 | * @file Declares the various implementations of normal computation. 6 | */ 7 | 8 | #include 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | namespace kapernikov { 15 | using SigmaMatrix = Eigen::MatrixXf; 16 | using SigmaMatrixPtr = std::shared_ptr; 17 | 18 | /** 19 | * Functions in this namespace use the pcl implementation 20 | */ 21 | namespace pcl { 22 | /** 23 | * Compute the normals of the given cloud. The normal calculation will be based on the given neighbourhood indices. 24 | * 25 | * @param[in] cloud The point cloud to be used 26 | * @param[in] neighbour_indices For each point in the given cloud this parameter contains a collection of the indices of the neighbouring points on the same index 27 | * @returns A matrix denoting a vector of 3 dimensional normals. Each row in the matrix is the normal associated with the neighbours of the respective point in the given cloud. 28 | */ 29 | SigmaMatrixPtr compute_normals(const ::pcl::PointCloud<::pcl::PointXYZ>& cloud, const std::vector>& neighbour_indices) noexcept; 30 | } // namespace pcl 31 | 32 | /** 33 | * Functions in this namespace use the openCL implementation 34 | */ 35 | namespace opencl { 36 | /** 37 | * Builds and initializes the opencl kernel for the currently running system. 38 | */ 39 | bool buildKernel() noexcept; 40 | 41 | /** 42 | * Compute the normals of the given cloud. The normal calculation will be based on the given neighbourhood indices. 43 | * 44 | * @pre The buildKernel() function must have been called in order to initialize the kernel. Initializing once suffices for multiple calls to this function. 45 | * @param[in] cloud The point cloud to be used 46 | * @param[in] neighbour_indices A matrix where each column is associated with the point with the same index in the given cloud. Each column itself consists of the indices of the neighbouring points of the associated point. 47 | * @returns A matrix denoting a vector of 3 dimensional normals. Each row in the matrix is the normal associated with the neighbours of the respective point in the given cloud. 48 | */ 49 | SigmaMatrixPtr compute_normals(const ::pcl::PointCloud<::pcl::PointXYZ>& cloud, const Eigen::MatrixXi& neighbour_indices) noexcept; 50 | } // namespace opencl 51 | } // namespace kapernikov 52 | 53 | #endif /* _NORMAL_COMPUTATION_INCLUDE */ 54 | -------------------------------------------------------------------------------- /test/benchmark_normal_computation.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file Benchmarks for the normal computation implementations 3 | */ 4 | #include 5 | 6 | #include 7 | 8 | #include "normalComputation.h" 9 | #include "knn.h" 10 | #include "utils.h" 11 | 12 | using kapernikov::test::generateCloud; 13 | using kapernikov::test::generateIndices; 14 | using kapernikov::KNN_SIZE; 15 | 16 | namespace { 17 | const uint64_t MIN_RANGE = 2U << 13; 18 | const uint64_t MAX_RANGE = 2U << 20; 19 | } // namespace 20 | 21 | static void normal_computation_cpu(benchmark::State& state) { 22 | for (auto _ : state) { 23 | auto cloud = generateCloud(state.range(0)); 24 | std::vector> indices; 25 | indices.reserve(cloud.size()); 26 | for(size_t i = 0U; i < cloud.size(); ++i) { 27 | std::vector new_indices = generateIndices(KNN_SIZE, state.range(0)-1U); 28 | indices.emplace_back(new_indices); 29 | } 30 | 31 | // In order to be able to do an honest benchmark measurement, this benchmark is measured 32 | // in the same way as the GPU benchmark(s) are measured. 33 | auto start = std::chrono::high_resolution_clock::now(); 34 | auto result = kapernikov::pcl::compute_normals(cloud, indices); 35 | auto end = std::chrono::high_resolution_clock::now(); 36 | 37 | auto elapsed_seconds = std::chrono::duration_cast>(end - start); 38 | 39 | state.SetIterationTime(elapsed_seconds.count()); 40 | 41 | benchmark::DoNotOptimize(result); 42 | } 43 | } 44 | BENCHMARK(normal_computation_cpu)->RangeMultiplier(2)->Range(MIN_RANGE, MAX_RANGE)->UseManualTime()->Unit(benchmark::kMillisecond); 45 | 46 | 47 | static void normal_computation_opencl(benchmark::State& state) { 48 | kapernikov::opencl::buildKernel(); 49 | 50 | for (auto _ : state) { 51 | auto cloud = generateCloud(state.range(0)); 52 | Eigen::MatrixXi indices(KNN_SIZE, cloud.size()); 53 | for(size_t i = 0U; i < cloud.size(); ++i) { 54 | std::vector new_indices = generateIndices(KNN_SIZE, state.range(0)-1U); 55 | for(size_t j = 0U; j < new_indices.size(); ++j) { 56 | indices(j, i) = new_indices.at(j); 57 | } 58 | } 59 | 60 | // Since this is a GPU calculation, do a manual timing 61 | auto start = std::chrono::high_resolution_clock::now(); 62 | auto result = kapernikov::opencl::compute_normals(cloud, indices); 63 | auto end = std::chrono::high_resolution_clock::now(); 64 | 65 | auto elapsed_seconds = std::chrono::duration_cast>(end - start); 66 | 67 | state.SetIterationTime(elapsed_seconds.count()); 68 | 69 | benchmark::DoNotOptimize(result); 70 | } 71 | } 72 | BENCHMARK(normal_computation_opencl)->RangeMultiplier(2)->Range(MIN_RANGE, MAX_RANGE)->UseManualTime()->Unit(benchmark::kMillisecond); 73 | 74 | BENCHMARK_MAIN(); 75 | -------------------------------------------------------------------------------- /test/normalComputationTest.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file Unit tests for normal computation 3 | */ 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #include "gtest.h" 10 | #include "utils.h" 11 | 12 | #include "normalComputation.h" 13 | #include "knn.h" 14 | 15 | using pcl::PointCloud; 16 | using pcl::PointXYZ; 17 | 18 | using kapernikov::test::generateCloud; 19 | using kapernikov::test::generateIndices; 20 | 21 | namespace { 22 | const uint64_t MIN_RANGE = 2U << 13; 23 | const uint64_t MAX_RANGE = 2U << 20; 24 | } // namespace 25 | 26 | namespace kapernikov { 27 | namespace test { 28 | SCENARIO(NormalComputationTest, comparisonTest, "Compare the CPU normal computation with the opencl version") { 29 | opencl::buildKernel(); 30 | 31 | GIVEN("A knn cache") { 32 | for(unsigned int i = MIN_RANGE; i <= MAX_RANGE; i = i << 1U) { 33 | const uint32_t TEST_SIZE = i; 34 | auto cloud = generateCloud(TEST_SIZE); 35 | 36 | std::vector> indices; 37 | indices.reserve(cloud.size()); 38 | for(size_t i = 0U; i < cloud.size(); ++i) { 39 | std::vector new_indices = generateIndices(KNN_SIZE, TEST_SIZE-1U); 40 | indices.emplace_back(new_indices); 41 | } 42 | 43 | Eigen::MatrixXi indices_as_matrix(indices.front().size(), indices.size()); 44 | for(size_t i = 0; i < indices.size(); ++i) { 45 | for(size_t j = 0; j < indices.at(j).size(); ++j) { 46 | indices_as_matrix(j, i) = indices.at(i).at(j); 47 | } 48 | } 49 | 50 | ASSERT_GT(TEST_SIZE, KNN_SIZE); 51 | 52 | WHEN("We compute the normals") { 53 | auto expected_result = pcl::compute_normals(cloud, indices); 54 | auto opencl_result = opencl::compute_normals(cloud, indices_as_matrix); 55 | 56 | THEN("The size of the result should be bigger than 0") { 57 | ASSERT_GT(expected_result->size(), 0U); 58 | ASSERT_GT(opencl_result->size(), 0U); 59 | } 60 | 61 | THEN("The result must have the same dimensions") { 62 | ASSERT_EQ(expected_result->size(), opencl_result->size()); 63 | ASSERT_EQ(expected_result->rows(), opencl_result->rows()); 64 | ASSERT_EQ(expected_result->cols(), opencl_result->cols()); 65 | } 66 | 67 | THEN("The results should be equal") { 68 | for(uint32_t row = 0U; row < expected_result->rows(); ++row) { 69 | for(uint32_t col = 0U; col < expected_result->cols(); ++col) { 70 | //std::cout << "Row = " << row << " col = " << col << std::endl; 71 | ASSERT_NEAR((*expected_result)(row, col), (*opencl_result)(row, col), 14.0E-4); 72 | } 73 | } 74 | } 75 | } 76 | } 77 | } 78 | } 79 | } // namespace test 80 | } // namespace kapernikov 81 | -------------------------------------------------------------------------------- /.exec-helper: -------------------------------------------------------------------------------- 1 | commands: 2 | setup: Setup the project infrastructure (compilation database etc.) 3 | init: Initialize the project build dir 4 | build: Build-only + install-only 5 | build-only: Build compontents 6 | install-only: Install the built components 7 | run: Run a binary 8 | clean: Clean the build directory 9 | distclean: Distclean the build directory 10 | cp-source: Copy the source to the container 11 | start-container: Start the container 12 | docker: Execute the command in Docker 13 | 14 | patterns: 15 | TARGET: 16 | default-values: 17 | - test_normal_computation 18 | - benchmark_normal_computation 19 | short-option: t 20 | long-option: target 21 | MODE: 22 | default-values: 23 | - debug 24 | - release 25 | short-option: m 26 | long-option: mode 27 | CONTAINER: 28 | default-values: 29 | - normal-computation-opencl 30 | short-option: d 31 | long-option: docker 32 | COMMAND: 33 | default-values: 34 | - init 35 | - build 36 | - run 37 | short-option: c 38 | long-option: cmd 39 | 40 | setup: 41 | - command-line-command 42 | 43 | init: 44 | - command-line-command 45 | 46 | cp-source: 47 | - command-line-command 48 | 49 | build: 50 | - build-only 51 | - install-only 52 | 53 | build-only: 54 | - make 55 | 56 | install-only: 57 | - make 58 | 59 | clean: 60 | - make 61 | 62 | distclean: 63 | - command-line-command 64 | 65 | run: 66 | - command-line-command 67 | 68 | start-container: 69 | - command-line-command 70 | 71 | docker: 72 | - cp-source 73 | - command-line-command 74 | 75 | command-line-command: 76 | patterns: 77 | - EH_WORKING_DIR 78 | working-dir: "{EH_WORKING_DIR}" 79 | 80 | setup: 81 | patterns: 82 | - MODE 83 | - EH_WORKING_DIR 84 | command-line: [ ln, -fs, "build/{MODE}/build/compile_commands.json"] 85 | 86 | init: 87 | patterns: 88 | - MODE 89 | - EH_WORKING_DIR 90 | command-line: [cmake, -H., "-Bbuild/{MODE}/build", "-DCMAKE_INSTALL_PREFIX=build/{MODE}/install", -DCMAKE_EXPORT_COMPILE_COMMANDS=ON, "-DCMAKE_BUILD_TYPE={MODE}" ] 91 | distclean: 92 | patterns: 93 | - MODE 94 | - EH_WORKING_DIR 95 | command-line: [ rm, -rf, "build/{MODE}/build"] 96 | 97 | run: 98 | patterns: 99 | - TARGET 100 | - MODE 101 | - EH_WORKING_DIR 102 | working-dir: "{EH_WORKING_DIR}" 103 | command-line: [nice, -n, -10, "build/{MODE}/install/bin/{TARGET}"] 104 | 105 | 106 | cp-source: 107 | patterns: 108 | - CONTAINER 109 | - EH_WORKING_DIR 110 | command-line: 111 | - create-source-dir: [ docker, exec, "{CONTAINER}", "mkdir -p {EH_WORKING_DIR}"] 112 | - copy-sources: [docker, cp, ., "{CONTAINER}:{EH_WORKING_DIR}/"] 113 | 114 | start-container: 115 | patterns: 116 | - CONTAINER 117 | command-line: [ docker, start, "{CONTAINER}"] 118 | 119 | docker: 120 | patterns: 121 | - CONTAINER 122 | - COMMAND 123 | - MODE 124 | - TARGET 125 | - EH_WORKING_DIR 126 | command-line: [ docker, exec, "--workdir={EH_WORKING_DIR}", "{CONTAINER}", "exec-helper {COMMAND} --mode {MODE} --target {TARGET}"] 127 | 128 | make: 129 | patterns: 130 | - TARGET 131 | - MODE 132 | - EH_WORKING_DIR 133 | working-dir: "{EH_WORKING_DIR}" 134 | build-dir: build/{MODE}/build 135 | 136 | build-only: 137 | command-line: 138 | - "{TARGET}" 139 | 140 | clean: 141 | command-line: 142 | - clean 143 | 144 | install-only: 145 | command-line: 146 | - install 147 | -------------------------------------------------------------------------------- /src/covariance_simd.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file File containing a vectorized version of the computeCovarianceMatrix() function of pcl for dense point clouds. Interface and implementation based on \see http://docs.pointclouds.org/1.8.1/centroid_8hpp_source.html#l00263 3 | */ 4 | #include 5 | #include 6 | #include 7 | 8 | namespace compute = boost::compute; 9 | 10 | namespace { 11 | compute::device device = compute::system::default_device(); 12 | compute::context context(device); 13 | compute::command_queue queue(context, device); 14 | } // namespace 15 | 16 | unsigned int 17 | computeCovarianceMatrix(const pcl::PointCloud& cloud, 18 | const std::vector& indices, 19 | const Eigen::Vector4f& centroid, 20 | Eigen::Matrix3f& covariances) noexcept { 21 | if(!cloud.is_dense) { 22 | std::cout << "Currently only dense point clouds are supported" 23 | << std::endl; 24 | return 0; 25 | } 26 | 27 | std::vector host_x; 28 | host_x.reserve(indices.size()); 29 | std::vector host_y; 30 | host_y.reserve(indices.size()); 31 | std::vector host_z; 32 | host_z.reserve(indices.size()); 33 | 34 | for(auto index : indices) { 35 | host_x.emplace_back(cloud[index].x); 36 | host_y.emplace_back(cloud[index].y); 37 | host_z.emplace_back(cloud[index].z); 38 | } 39 | 40 | // create a vector of each dimension on the device 41 | compute::vector x(host_x.begin(), host_x.end(), queue); 42 | compute::vector y(host_y.begin(), host_y.end(), queue); 43 | compute::vector z(host_z.begin(), host_z.end(), queue); 44 | 45 | // Create the centroid scalar as a vector repeating the scalar 46 | compute::vector centroid_x(host_x.size(), centroid[0], queue); 47 | compute::vector centroid_y(host_y.size(), centroid[1], queue); 48 | compute::vector centroid_z(host_z.size(), centroid[2], queue); 49 | 50 | // Create the intermediate pt 51 | compute::vector pt_x(x.size(), context); 52 | compute::vector pt_y(y.size(), context); 53 | compute::vector pt_z(z.size(), context); 54 | 55 | // Create pt 56 | compute::transform(x.begin(), x.end(), centroid_x.begin(), pt_x.begin(), 57 | compute::minus(), queue); 58 | compute::transform(y.begin(), y.end(), centroid_y.begin(), pt_y.begin(), 59 | compute::minus(), queue); 60 | compute::transform(z.begin(), z.end(), centroid_z.begin(), pt_z.begin(), 61 | compute::minus(), queue); 62 | 63 | // Calculate the covariance values 64 | float covariance_xx; 65 | compute::transform_reduce(pt_x.begin(), pt_x.end(), pt_x.begin(), 66 | &covariance_xx, compute::multiplies(), 67 | compute::plus(), queue); 68 | 69 | float covariance_xy; 70 | compute::transform_reduce(pt_x.begin(), pt_x.end(), pt_y.begin(), 71 | &covariance_xy, compute::multiplies(), 72 | compute::plus(), queue); 73 | 74 | float covariance_xz; 75 | compute::transform_reduce(pt_x.begin(), pt_x.end(), pt_z.begin(), 76 | &covariance_xz, compute::multiplies(), 77 | compute::plus(), queue); 78 | 79 | float covariance_yy; 80 | compute::transform_reduce(pt_y.begin(), pt_y.end(), pt_y.begin(), 81 | &covariance_yy, compute::multiplies(), 82 | compute::plus(), queue); 83 | 84 | float covariance_yz; 85 | compute::transform_reduce(pt_y.begin(), pt_y.end(), pt_z.begin(), 86 | &covariance_yz, compute::multiplies(), 87 | compute::plus(), queue); 88 | 89 | float covariance_zz; 90 | compute::transform_reduce(pt_z.begin(), pt_z.end(), pt_z.begin(), 91 | &covariance_zz, compute::multiplies(), 92 | compute::plus(), queue); 93 | 94 | covariances(0, 0) = covariance_xx; 95 | covariances(0, 1) = covariance_xy; 96 | covariances(0, 2) = covariance_xz; 97 | covariances(1, 1) = covariance_yy; 98 | covariances(1, 2) = covariance_yz; 99 | covariances(2, 2) = covariance_zz; 100 | 101 | // The covariance matrix is symmetric: mirror the elements over the diagonal 102 | covariances(1, 0) = covariances(0, 1); 103 | covariances(2, 0) = covariances(0, 2); 104 | covariances(2, 1) = covariances(1, 2); 105 | return indices.size(); 106 | } 107 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /src/normalComputation.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file Implementations of normal computation. The implementations are based on the PCL 1.8.1 implementation. 3 | */ 4 | #include "normalComputation.h" 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | #include "knn.h" 11 | 12 | namespace compute = boost::compute; 13 | 14 | using std::vector; 15 | 16 | using pcl::PointCloud; 17 | using pcl::PointXYZ; 18 | 19 | namespace { 20 | compute::device device = compute::system::default_device(); 21 | compute::context context(device); 22 | compute::command_queue queue(context, device); 23 | 24 | const char source[] = BOOST_COMPUTE_STRINGIZE_SOURCE( 25 | // NOTE: Define KNN_SIZE as a build parameter using "-D KNN_SIZE=" 26 | __constant unsigned int KNN_BATCH_SIZE = KNN_SIZE; 27 | __constant float EPSILON = 1.19209e-07f; 28 | 29 | inline float4 computeRoots2(const float b, const float c) { 30 | //adapted from PCL source 31 | float x = (float)(0.0f); 32 | float d = b * b - 4.0f * c; 33 | d = select( 34 | d, 0.0f, 35 | d < 0.0f); // Use select instead of an if to prevent wavefront divergence. 36 | // This condition should always evaluate to false: in the other case, this function is not used properly 37 | 38 | float sd = sqrt(d); 39 | 40 | float z = (float)(0.5f * (b + sd)); 41 | float y = (float)(0.5f * (b - sd)); 42 | return (float4)(x, y, z, 0); 43 | } 44 | 45 | inline float4 computeRoots(const float4 row1, const float4 row2, const float4 row3) { 46 | //adapted from PCL source 47 | // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The 48 | // eigenvalues are the roots to this equation, all guaranteed to be 49 | // real-valued, because the matrix is symmetric. 50 | float c0 = row1.x * row2.y * row3.z + 2.0f * row1.y * row1.z * row2.z - 51 | row1.x * row2.z * row2.z - row2.y * row1.z * row1.z - 52 | row3.z * row1.y * row1.y; 53 | 54 | float c1 = row1.x * row2.y - row1.y * row1.y + row1.x * row3.z - 55 | row1.z * row1.z + row2.y * row3.z - row2.z * row2.z; 56 | 57 | float c2 = row1.x + row2.y + row3.z; 58 | 59 | 60 | float4 roots; 61 | if(fabs(c0) < EPSILON) { // one root is 0 -> quadratic equation 62 | roots = computeRoots2(c2, c1); 63 | } 64 | else { 65 | const float s_inv3 = (1.0f / 3.0f); 66 | const float s_sqrt3 = sqrt(3.0f); 67 | // Construct the parameters used in classifying the roots of the equation 68 | // and in solving the equation for the roots in closed form. 69 | float c2_over_3 = c2 * s_inv3; 70 | float a_over_3 = (c1 - c2 * c2_over_3) * s_inv3; 71 | if(a_over_3 > 0.0f) { 72 | a_over_3 = 0.0f; 73 | } 74 | 75 | float half_b = 76 | 0.5f * (c0 + c2_over_3 * (2.0f * c2_over_3 * c2_over_3 - c1)); 77 | 78 | float q = half_b * half_b + a_over_3 * a_over_3 * a_over_3; 79 | if(q > 0.0f) { 80 | q = 0.0f; 81 | } 82 | 83 | // Compute the eigenvalues by solving for the roots of the polynomial. 84 | float rho = sqrt(-a_over_3); 85 | float theta = atan2(sqrt(-q), half_b) * s_inv3; 86 | float cos_theta = cos(theta); 87 | float sin_theta = sin(theta); 88 | 89 | roots.x = c2_over_3 + 2.0f * rho * cos_theta; 90 | roots.y = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta); 91 | roots.z = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta); 92 | 93 | // Sort in increasing order. 94 | if(roots.x >= roots.y) { 95 | roots.xy = roots.yx; 96 | } 97 | if(roots.y >= roots.z) { 98 | roots.yz = roots.zy; 99 | if(roots.x >= roots.y) 100 | roots.xy = roots.yx; 101 | } 102 | 103 | if(roots.x <= 104 | 0.0f) // eigenval for symetric positive semi-definite matrix can not be negative! Set it to 0 105 | roots = computeRoots2(c2, c1); 106 | } 107 | return roots; 108 | } 109 | 110 | // Note: the returned eigenvalues are not scaled back to the actual scale 111 | inline float4 eigen33(const float4 row1, const float4 row2, const float4 row3) { 112 | // Calculate the scale by determing the largest value of all rows 113 | float4 abs1 = fabs(row1); 114 | float4 abs2 = fabs(row2); 115 | float4 abs3 = fabs(row3); 116 | float4 max = fmax(abs1, fmax(abs2, abs3)); 117 | float scale = fmax(max.x, fmax(max.y, max.z)); 118 | 119 | if(scale == FLT_MIN) { 120 | scale = 1.0f; 121 | } 122 | 123 | float4 scaled_row1 = row1/scale; 124 | float4 scaled_row2 = row2/scale; 125 | float4 scaled_row3 = row3/scale; 126 | 127 | float4 roots = computeRoots(scaled_row1, scaled_row2, scaled_row3); 128 | return roots; 129 | } 130 | 131 | inline float4 centroid(const float4* indexed_points) { 132 | float4 centroid = (float4)(0, 0, 0, 0); 133 | 134 | // NOTE: We are using a numerically more stable algorithm for calculating the centroid. 135 | // See https://diego.assencio.com/?index=c34d06f4f4de2375658ed41f70177d59 136 | for(unsigned int i = 0; i < KNN_BATCH_SIZE; ++i) { 137 | float4 additional_point = indexed_points[i]; 138 | centroid += (additional_point - centroid) / (i + 1); 139 | } 140 | return centroid; 141 | } 142 | 143 | inline void covariance(const float4* indexed_points, const float4 centroid, float4* cov_matrix_row1, float4* cov_matrix_row2, float4* cov_matrix_row3) { 144 | *cov_matrix_row1 = (float4)(0.0f, 0.0f, 0.0f, 0.0f); 145 | *cov_matrix_row2 = (float4)(0.0f, 0.0f, 0.0f, 0.0f); 146 | *cov_matrix_row3 = (float4)(0.0f, 0.0f, 0.0f, 0.0f); 147 | 148 | for(unsigned int i = 0; i < KNN_BATCH_SIZE; ++i) { 149 | float4 pt = indexed_points[i] - centroid; 150 | cov_matrix_row2->y += pt.y * pt.y; 151 | cov_matrix_row2->z += pt.y * pt.z; 152 | cov_matrix_row3->z += pt.z * pt.z; 153 | pt = pt * pt.x; 154 | cov_matrix_row1->x += pt.x; 155 | cov_matrix_row1->y += pt.y; 156 | cov_matrix_row1->z += pt.z; 157 | } 158 | 159 | // fill in the other half of the matrix 160 | cov_matrix_row3->y = cov_matrix_row2->z; 161 | cov_matrix_row3->x = cov_matrix_row1->z; 162 | cov_matrix_row2->x = cov_matrix_row1->y; 163 | 164 | // Normalize the covariance matrix 165 | *cov_matrix_row1 /= KNN_BATCH_SIZE; 166 | *cov_matrix_row2 /= KNN_BATCH_SIZE; 167 | *cov_matrix_row3 /= KNN_BATCH_SIZE; 168 | } 169 | 170 | inline float squaredNorm(const float4 input) { 171 | return input.x * input.x + input.y * input.y + input.z * input.z; 172 | } 173 | 174 | inline bool isMuchSmallerThan(const float x, const float z) { 175 | return x * x <= EPSILON * EPSILON * z * z; 176 | } 177 | 178 | inline float4 unitOrthogonal(const float4 vector) { 179 | // Take (-y, z, 0) and normalize it, unless x and y are both close to zero. Take (0, -z, y) and normalize instead 180 | if(isMuchSmallerThan(vector.x, vector.z) && isMuchSmallerThan(vector.y, vector.z)) { 181 | float size = rsqrt(vector.y*vector.y + vector.z * vector.z); 182 | return (float4)(0.0f, -vector.z, vector.y, 0.0f)/size; 183 | } 184 | float size = rsqrt(vector.x*vector.x + vector.y * vector.y); 185 | return (float4)(-vector.y, vector.x, 0.0f, 0.0f)/size; 186 | } 187 | 188 | inline void index(__global const float4* cloud, __global const int* indices, float4* indexed_points) { 189 | const unsigned int start_offset = get_global_id(0) * KNN_BATCH_SIZE; 190 | for(unsigned int i = 0; i < KNN_BATCH_SIZE; ++i) { 191 | indexed_points[i] = cloud[indices[start_offset + i]]; 192 | } 193 | } 194 | 195 | inline float4 associated_eigenvector(const float4 scaled_row1, const float4 scaled_row2, const float4 scaled_row3, const float eigenvalue) { 196 | scaled_row1.x -= eigenvalue; 197 | scaled_row2.y -= eigenvalue; 198 | scaled_row3.z -= eigenvalue; 199 | 200 | float4 vec1 = cross(scaled_row1, scaled_row2); 201 | float4 vec2 = cross(scaled_row1, scaled_row3); 202 | float4 vec3 = cross(scaled_row2, scaled_row3); 203 | 204 | float len1 = squaredNorm(vec1); 205 | float len2 = squaredNorm(vec2); 206 | float len3 = squaredNorm(vec3); 207 | 208 | float4 largest_eigenvector = len1 >= len2 ? vec1 : vec2; 209 | float largest_length = len1 >= len2 ? len1 : len2; 210 | largest_eigenvector = largest_length >= len3 ? largest_eigenvector : vec3; 211 | largest_length = largest_length >= len3 ? largest_length : len3; 212 | 213 | return largest_eigenvector/sqrt(largest_length); 214 | } 215 | 216 | inline float4 normal_from_covariance(const float4 row1, const float4 row2, const float4 row3) { 217 | // Note: Calculating the normal is eigenvector associated with the lowest eigenvalue. However, if the original shape was planer, the lowest eigenvalue will be very low, causing an inaccurate normal. 218 | // Therefore, the normal is calculated from the (normalized) cross product of the eigenvectors associated with the two largest eigenvalues 219 | 220 | // Calculate the scale by determing the largest value of all rows 221 | float4 abs1 = fabs(row1); 222 | float4 abs2 = fabs(row2); 223 | float4 abs3 = fabs(row3); 224 | float4 max = fmax(abs1, fmax(abs2, abs3)); 225 | float scale = fmax(max.x, fmax(max.y, max.z)); 226 | 227 | if(scale == FLT_MIN) { 228 | scale = 1.0f; 229 | } 230 | 231 | float4 scaled_row1 = row1/scale; 232 | float4 scaled_row2 = row2/scale; 233 | float4 scaled_row3 = row3/scale; 234 | 235 | // Note: computeRoots will output the eigenvalues in ascending order 236 | float4 eigenvalues = computeRoots(scaled_row1, scaled_row2, scaled_row3); 237 | 238 | if((eigenvalues.z - eigenvalues.x) <= EPSILON) { 239 | // All three are equal => the eigenvectors are the identity matrix 240 | // Return the first eigenvector as the normal 241 | return (float4)(-1.0f, 0.0f, 0.0f, 0.0f); 242 | } 243 | 244 | if((eigenvalues.z - eigenvalues.y) <= EPSILON) { 245 | // second and third equal 246 | float4 eigenvector1 = associated_eigenvector(scaled_row1, scaled_row2, scaled_row3, eigenvalues.x); 247 | 248 | // In this case, it makes little sense to calculate the third eigenvector from the first eigenvalue, only to calculate the normal based on this result. So in this case, just return the first eigenvector 249 | return -eigenvector1; 250 | } 251 | 252 | float4 eigenvector2; 253 | float4 eigenvector3 = associated_eigenvector(scaled_row1, scaled_row2, scaled_row3, eigenvalues.z); 254 | if((eigenvalues.y - eigenvalues.x) <= EPSILON) { 255 | // first and second equal 256 | eigenvector2 = unitOrthogonal(eigenvector3); 257 | } else { 258 | eigenvector2 = associated_eigenvector(scaled_row1, scaled_row2, scaled_row3, eigenvalues.y); 259 | 260 | // In order to improve unicity of the solution, we use the convention of making sure that the first coordinate value 261 | // of the eigenvector is positive 262 | eigenvector2 = eigenvector2.x >= 0 ? eigenvector2 : -eigenvector2; 263 | } 264 | return cross(eigenvector3, eigenvector2); 265 | } 266 | 267 | __kernel void normals(__global const float4* points, 268 | __global const int* indices, 269 | __global float4* eigenvalues) { 270 | // Convert indices to an actual array of ordered points 271 | float4 indexed_points[KNN_SIZE]; 272 | index(points, indices, indexed_points); 273 | 274 | // Calculate centroid 275 | float4 element_centroid = centroid(indexed_points); 276 | 277 | // Calculate the covariance of the neighbourhood 278 | float4 cov_matrix_row1 = (float4)(0.0f, 0.0f, 0.0f, 0.0f); 279 | float4 cov_matrix_row2 = (float4)(0.0f, 0.0f, 0.0f, 0.0f); 280 | float4 cov_matrix_row3 = (float4)(0.0f, 0.0f, 0.0f, 0.0f); 281 | 282 | covariance(indexed_points, element_centroid, &cov_matrix_row1, &cov_matrix_row2, &cov_matrix_row3); 283 | 284 | // Calculate the eigen values based on the covariance matrix 285 | eigenvalues[get_global_id(0)] = normal_from_covariance(cov_matrix_row1, cov_matrix_row2, cov_matrix_row3); 286 | } 287 | ); 288 | 289 | compute::program filter_program = 290 | compute::program::create_with_source(source, context); 291 | } // namespace 292 | 293 | namespace kapernikov { 294 | namespace pcl { 295 | SigmaMatrixPtr compute_normals(const PointCloud& cloud, const vector>& neighbour_indices) noexcept { 296 | assert(neighbour_indices.size() == cloud.size()); 297 | 298 | SigmaMatrixPtr normals(new SigmaMatrix(cloud.size(), 3)); 299 | #pragma omp parallel for 300 | for(size_t idx = 0U; idx < cloud.size(); idx++) { 301 | const vector& indices = neighbour_indices.at(idx); 302 | 303 | Eigen::Vector4f centroid; 304 | ::pcl::compute3DCentroid(cloud, indices, centroid); 305 | 306 | // warning, PCL is INACCURATE in computing the covariance matrix! 307 | // see https://github.com/PointCloudLibrary/pcl/issues/560 308 | // this is enough to completely break the algorithm. 309 | // even with the workaround below, the accuracy is not very good (compared to numpy) but good enough. 310 | EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix(3, 3); 311 | if(::pcl::computeCovarianceMatrixNormalized(cloud, indices, 312 | centroid, 313 | covariance_matrix) != 0) { 314 | // now do PCA! 315 | EIGEN_ALIGN16 Eigen::Vector3f eigen_value{0, 0, 0}; 316 | EIGEN_ALIGN16 Eigen::Matrix3f eigen_vectors; 317 | ::pcl::eigen33(covariance_matrix, eigen_vectors, eigen_value); 318 | 319 | assert(eigen_value(2) >= eigen_value(0)); 320 | assert(eigen_value(2) >= eigen_value(1)); 321 | 322 | auto eigenVector1 = eigen_vectors.col(2); 323 | auto eigenVector2 = eigen_vectors.col(1); 324 | 325 | // Apply the convention where the first coordinate of the second eigenvector is positive 326 | if(eigenVector2(0) < 0) { 327 | eigenVector2 = -eigenVector2; 328 | } 329 | auto normal = eigenVector1.cross(eigenVector2); 330 | (*normals)(idx, 0) = normal(0); 331 | (*normals)(idx, 1) = normal(1); 332 | (*normals)(idx, 2) = normal(2); 333 | } else { 334 | (*normals)(idx, 0) = std::numeric_limits::quiet_NaN(); 335 | (*normals)(idx, 0) = std::numeric_limits::quiet_NaN(); 336 | (*normals)(idx, 0) = std::numeric_limits::quiet_NaN(); 337 | } 338 | } 339 | return normals; 340 | } 341 | } 342 | 343 | namespace opencl { 344 | bool buildKernel() noexcept { 345 | try { 346 | filter_program.build("-D KNN_SIZE=" + std::to_string(KNN_SIZE)); 347 | return true; 348 | } catch (compute::opencl_error e) { 349 | std::cout << "Build Error: " << std::endl << filter_program.build_log(); 350 | } 351 | return false; 352 | } 353 | 354 | SigmaMatrixPtr compute_normals(const PointCloud& cloud, const Eigen::MatrixXi& neighbour_indices) noexcept { 355 | assert(static_cast(neighbour_indices.cols()) == cloud.size()); 356 | 357 | unsigned int total_points = neighbour_indices.cols(); 358 | 359 | // Copy the indices to the gpu 360 | auto indices_bufsize = neighbour_indices.size() * sizeof(int); 361 | compute::buffer indices_device(context, indices_bufsize, CL_MEM_READ_ONLY); 362 | queue.enqueue_write_buffer(indices_device, 0, indices_bufsize, neighbour_indices.data()); 363 | 364 | // Copy the cloud to the gpu 365 | auto bufsize = cloud.size() * sizeof(PointXYZ); 366 | compute::buffer cloud_device(context, bufsize, CL_MEM_READ_ONLY); 367 | queue.enqueue_write_buffer(cloud_device, 0, bufsize, cloud.points.data()); 368 | 369 | // Allocate output 370 | compute::vector output_device(total_points, context); 371 | 372 | // create filter kernel and set arguments 373 | compute::kernel filter_kernel(filter_program, "normals"); 374 | 375 | filter_kernel.set_arg(0, cloud_device); 376 | filter_kernel.set_arg(1, indices_device); 377 | filter_kernel.set_arg(2, output_device); 378 | 379 | // Do the openCL calculation 380 | queue.enqueue_1d_range_kernel(filter_kernel, 0, total_points, 0); 381 | 382 | // Copy back the output 383 | vector normal_collection(output_device.size(), compute::float4_(0.0f, 0.0f, 0.0f, 0.0f)); 384 | compute::copy(output_device.begin(), output_device.end(), normal_collection.begin(), queue); 385 | 386 | SigmaMatrixPtr results(new SigmaMatrix(total_points, 3)); 387 | for(unsigned int i = 0U; i < total_points; ++i) { 388 | const compute::float4_ normal = normal_collection[i]; 389 | for(unsigned int j = 0U; j < 3U; ++j) { 390 | (*results)(i, j) = normal[j]; 391 | } 392 | } 393 | return results; 394 | } 395 | } // namespace opencl 396 | } // namespace kapernikov 397 | --------------------------------------------------------------------------------