├── CMakeLists.txt ├── Python ├── NumPy_and_Open3D.ipynb ├── kdtree.py ├── keypoint_matching.py ├── normal_estimation.py ├── pv.py └── rgbd_and_pcd.py ├── README.md ├── data ├── bun.pcd ├── bun000.pcd ├── bun045.pcd ├── d415.json ├── d435.json ├── depth00100.png ├── image00100.png └── sr300.json ├── doc └── reconstraction.png └── src └── rs-capture.cpp /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #CMakeのバージョンの設定 2 | cmake_minimum_required(VERSION 3.1.0) 3 | 4 | #プロジェクト名 5 | project(rs-capture) 6 | 7 | 8 | include(CheckCXXCompilerFlag) 9 | CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) 10 | CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) 11 | if(COMPILER_SUPPORTS_CXX11) 12 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11") 13 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") 14 | elseif(COMPILER_SUPPORTS_CXX0X) 15 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") 16 | endif() 17 | 18 | find_package(OpenCV REQUIRED) 19 | 20 | set(DEPENDENCIES realsense2 ${OpenCV_LIBS} ) 21 | 22 | add_executable(rs-capture ../src/rs-capture.cpp) 23 | target_link_libraries(rs-capture ${DEPENDENCIES}) -------------------------------------------------------------------------------- /Python/NumPy_and_Open3D.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Open3D点群とNumPy行列の相互変換" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": { 14 | "collapsed": true 15 | }, 16 | "outputs": [], 17 | "source": [ 18 | "import numpy as np\n", 19 | "from open3d import *" 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "### 10x3のNumPy行列の生成(値はランダム)" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 6, 32 | "metadata": { 33 | "collapsed": true 34 | }, 35 | "outputs": [], 36 | "source": [ 37 | "data = np.random.rand(10,3)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 18, 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "[[ 0.09249111 0.57397975 0.55690015]\n", 50 | " [ 0.50281529 0.09562299 0.24029229]\n", 51 | " [ 0.28317499 0.75393479 0.72160014]\n", 52 | " [ 0.96927297 0.08838792 0.1848289 ]\n", 53 | " [ 0.60801702 0.95011524 0.9681312 ]\n", 54 | " [ 0.40435173 0.11033132 0.93538947]\n", 55 | " [ 0.08502744 0.85777887 0.36894673]\n", 56 | " [ 0.9156053 0.40921759 0.69935851]\n", 57 | " [ 0.47280768 0.43104577 0.90596331]\n", 58 | " [ 0.99682753 0.67177541 0.26291334]]\n" 59 | ] 60 | } 61 | ], 62 | "source": [ 63 | "print(data)" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "### NumPy行列をOpen3D点群に変換" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 10, 76 | "metadata": { 77 | "collapsed": true 78 | }, 79 | "outputs": [], 80 | "source": [ 81 | "pcd = PointCloud()\n", 82 | "pcd.points = Vector3dVector(data)" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "### 点群に色を設定する(値の範囲は[0,1])" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 13, 95 | "metadata": { 96 | "collapsed": true 97 | }, 98 | "outputs": [], 99 | "source": [ 100 | "pcd.paint_uniform_color([1, 0.706, 0])" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "### 点群の概要の表示(1行目)と実際のデータの表示(2,3行目)" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 14, 113 | "metadata": {}, 114 | "outputs": [ 115 | { 116 | "name": "stdout", 117 | "output_type": "stream", 118 | "text": [ 119 | "PointCloud with 10 points.\n", 120 | "[[ 0.09249111 0.57397975 0.55690015]\n", 121 | " [ 0.50281529 0.09562299 0.24029229]\n", 122 | " [ 0.28317499 0.75393479 0.72160014]\n", 123 | " [ 0.96927297 0.08838792 0.1848289 ]\n", 124 | " [ 0.60801702 0.95011524 0.9681312 ]\n", 125 | " [ 0.40435173 0.11033132 0.93538947]\n", 126 | " [ 0.08502744 0.85777887 0.36894673]\n", 127 | " [ 0.9156053 0.40921759 0.69935851]\n", 128 | " [ 0.47280768 0.43104577 0.90596331]\n", 129 | " [ 0.99682753 0.67177541 0.26291334]]\n", 130 | "[[ 1. 0.706 0. ]\n", 131 | " [ 1. 0.706 0. ]\n", 132 | " [ 1. 0.706 0. ]\n", 133 | " [ 1. 0.706 0. ]\n", 134 | " [ 1. 0.706 0. ]\n", 135 | " [ 1. 0.706 0. ]\n", 136 | " [ 1. 0.706 0. ]\n", 137 | " [ 1. 0.706 0. ]\n", 138 | " [ 1. 0.706 0. ]\n", 139 | " [ 1. 0.706 0. ]]\n" 140 | ] 141 | } 142 | ], 143 | "source": [ 144 | "print(pcd)\n", 145 | "print(np.asarray(pcd.points))\n", 146 | "print(np.asarray(pcd.colors))" 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "metadata": {}, 152 | "source": [ 153 | "### Open3D点群をNumPy行列に変換" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 15, 159 | "metadata": { 160 | "collapsed": true 161 | }, 162 | "outputs": [], 163 | "source": [ 164 | "xyz = np.asarray(pcd.points)\n", 165 | "color = np.asarray(pcd.colors)" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": 16, 171 | "metadata": {}, 172 | "outputs": [ 173 | { 174 | "name": "stdout", 175 | "output_type": "stream", 176 | "text": [ 177 | "[[ 0.09249111 0.57397975 0.55690015]\n", 178 | " [ 0.50281529 0.09562299 0.24029229]\n", 179 | " [ 0.28317499 0.75393479 0.72160014]\n", 180 | " [ 0.96927297 0.08838792 0.1848289 ]\n", 181 | " [ 0.60801702 0.95011524 0.9681312 ]\n", 182 | " [ 0.40435173 0.11033132 0.93538947]\n", 183 | " [ 0.08502744 0.85777887 0.36894673]\n", 184 | " [ 0.9156053 0.40921759 0.69935851]\n", 185 | " [ 0.47280768 0.43104577 0.90596331]\n", 186 | " [ 0.99682753 0.67177541 0.26291334]]\n", 187 | "[[ 1. 0.706 0. ]\n", 188 | " [ 1. 0.706 0. ]\n", 189 | " [ 1. 0.706 0. ]\n", 190 | " [ 1. 0.706 0. ]\n", 191 | " [ 1. 0.706 0. ]\n", 192 | " [ 1. 0.706 0. ]\n", 193 | " [ 1. 0.706 0. ]\n", 194 | " [ 1. 0.706 0. ]\n", 195 | " [ 1. 0.706 0. ]\n", 196 | " [ 1. 0.706 0. ]]\n" 197 | ] 198 | } 199 | ], 200 | "source": [ 201 | "print(xyz)\n", 202 | "print(color)" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "### 点群のレンダリング" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 17, 215 | "metadata": { 216 | "collapsed": true 217 | }, 218 | "outputs": [], 219 | "source": [ 220 | "draw_geometries([pcd])" 221 | ] 222 | } 223 | ], 224 | "metadata": { 225 | "kernelspec": { 226 | "display_name": "Python 3", 227 | "language": "python", 228 | "name": "python3" 229 | }, 230 | "language_info": { 231 | "codemirror_mode": { 232 | "name": "ipython", 233 | "version": 3 234 | }, 235 | "file_extension": ".py", 236 | "mimetype": "text/x-python", 237 | "name": "python", 238 | "nbconvert_exporter": "python", 239 | "pygments_lexer": "ipython3", 240 | "version": "3.6.2" 241 | } 242 | }, 243 | "nbformat": 4, 244 | "nbformat_minor": 2 245 | } 246 | -------------------------------------------------------------------------------- /Python/kdtree.py: -------------------------------------------------------------------------------- 1 | # kd-treeによる近傍点探索 2 | 3 | import numpy as np 4 | from open3d import * 5 | 6 | if __name__ == "__main__": 7 | 8 | # 点群の読み込み 9 | pcd = read_point_cloud("../data/bun.pcd") 10 | 11 | pcd.paint_uniform_color([0.5, 0.5, 0.5]) #元の点群をグレーにする 12 | pcd_tree = KDTreeFlann(pcd) 13 | 14 | # クエリの点を赤にする 15 | query_id = 20773 16 | pcd.colors[query_id] = [1, 0, 0] 17 | 18 | # 以下は用意された3種類のサーチ方法の例 19 | # 1.RNNの方法 20 | [k, idx, dist] = pcd_tree.search_radius_vector_3d(pcd.points[query_id], 0.01) 21 | # 検出された点を緑にする. 22 | np.asarray(pcd.colors)[idx[1:], :] = [0, 1, 0] 23 | 24 | # 2.KNNの方法 25 | [k, idx, dist] = pcd_tree.search_knn_vector_3d(pcd.points[query_id], 100) 26 | np.asarray(pcd.colors)[idx[1:], :] = [0, 0, 1] 27 | 28 | # 3.RKNNの方法 29 | [k, idx, dist] = pcd_tree.search_hybrid_vector_3d(pcd.points[query_id], radius=0.01, max_nn=100 ) 30 | np.asarray(pcd.colors)[idx[1:], :] = [0, 1, 1] 31 | 32 | 33 | # サーチ結果の解説 34 | # k : 近傍点数 35 | # idx : 見つかった点群のid 36 | # dist: 見つかった点までの二乗距離 37 | print('近傍点数 k=', k) 38 | id = 50 39 | d = np.asarray(pcd.points)[query_id] - np.asarray(pcd.points)[idx[50]] 40 | norm = np.linalg.norm(d) 41 | print('クエリと',id, '番目に近い点の二乗距離',norm*norm) 42 | print('dist[',id, '] =', dist[id]) 43 | 44 | print('点群の可視化') 45 | draw_geometries([pcd]) 46 | -------------------------------------------------------------------------------- /Python/keypoint_matching.py: -------------------------------------------------------------------------------- 1 | # キーポイントマッチングによる位置姿勢認識 2 | 3 | from open3d import * 4 | import numpy as np 5 | import copy 6 | 7 | #sourceをtransformationによって剛体変換してtargetと一緒に表示 8 | def draw_registration_result(source, target, transformation): 9 | source_temp = copy.deepcopy(source) 10 | target_temp = copy.deepcopy(target) 11 | source_temp.paint_uniform_color([1, 0.706, 0]) 12 | target_temp.paint_uniform_color([0, 0.651, 0.929]) 13 | source_temp.transform(transformation) 14 | draw_geometries([source_temp, target_temp]) 15 | 16 | #キーポイント検出,法線推定,特徴記述 17 | # orientをFalseにすることで,法線方向の修正をオフにできます. 18 | def preprocess_point_cloud(pcd, voxel_size, orient=True): 19 | print(":: Downsample with a voxel size %.3f." % voxel_size) 20 | pcd_kp = voxel_down_sample(pcd, voxel_size) 21 | 22 | radius_normal = voxel_size * 2 23 | viewpoint = np.array([0.,0.,100.], dtype='float64') 24 | estimate_normals(pcd_kp, KDTreeSearchParamHybrid(radius = radius_normal, max_nn = 30)) 25 | if orient == True: 26 | orient_normals_towards_camera_location( pcd_kp, camera_location = viewpoint ) 27 | else: 28 | print("Orient is not applied.") 29 | 30 | radius_feature = voxel_size * 5 31 | print(":: Compute FPFH feature with search radius %.3f." % radius_feature) 32 | pcd_fpfh = compute_fpfh_feature(pcd_kp, 33 | KDTreeSearchParamHybrid(radius = radius_feature, max_nn = 200)) 34 | return pcd_kp, pcd_fpfh 35 | 36 | #RANSACによるレジストレーション 37 | def execute_global_registration( 38 | source_kp, target_kp, source_fpfh, target_fpfh, voxel_size): 39 | distance_threshold = voxel_size * 1.5 40 | print(":: RANSAC registration on downsampled point clouds.") 41 | print(" Since the downsampling voxel size is %.3f," % voxel_size) 42 | print(" we use a liberal distance threshold %.3f." % distance_threshold) 43 | result = registration_ransac_based_on_feature_matching( 44 | source_kp, target_kp, source_fpfh, target_fpfh, 45 | distance_threshold, 46 | TransformationEstimationPointToPoint(False), 4, 47 | [CorrespondenceCheckerBasedOnEdgeLength(0.9), 48 | CorrespondenceCheckerBasedOnDistance(distance_threshold)], 49 | RANSACConvergenceCriteria(40000, 500)) 50 | return result 51 | 52 | #ICPによるリファイン 53 | def refine_registration(source, target, trans, voxel_size): 54 | distance_threshold = voxel_size * 0.4 55 | print(":: Point-to-plane ICP registration is applied on original point") 56 | print(" clouds to refine the alignment. This time we use a strict") 57 | print(" distance threshold %.3f." % distance_threshold) 58 | result = registration_icp(source, target, 59 | distance_threshold, trans, 60 | TransformationEstimationPointToPlane()) 61 | return result 62 | 63 | if __name__ == "__main__": 64 | #データ読み込み 65 | print(":: Load two point clouds to be matched.") 66 | source = read_point_cloud("../data/bun000.pcd") 67 | target = read_point_cloud("../data/bun045.pcd") 68 | 69 | draw_registration_result(source, target, np.identity(4)) 70 | 71 | #キーポイント検出と特徴量抽出 72 | voxel_size = 0.01 73 | source_kp, source_fpfh = preprocess_point_cloud(source, voxel_size) 74 | target_kp, target_fpfh = preprocess_point_cloud(target, voxel_size) 75 | draw_registration_result(source_kp, target_kp, np.identity(4)) 76 | 77 | #RANSACによる姿勢推定 78 | result_ransac = execute_global_registration(source_kp, target_kp, 79 | source_fpfh, target_fpfh, voxel_size) 80 | 81 | print(result_ransac) 82 | draw_registration_result(source_kp, target_kp, result_ransac.transformation) 83 | 84 | #ICPによる微修正 85 | result_icp = refine_registration(source, target, result_ransac.transformation, voxel_size) 86 | print(result_icp) 87 | draw_registration_result(source, target, result_icp.transformation) 88 | -------------------------------------------------------------------------------- /Python/normal_estimation.py: -------------------------------------------------------------------------------- 1 | # 法線推定 2 | 3 | import numpy as np 4 | from open3d import * 5 | import copy 6 | 7 | if __name__ == "__main__": 8 | 9 | #データ読み込み 10 | pcd_ = read_point_cloud("../data/bun.pcd") 11 | print(":: 元の点群の可視化") 12 | draw_geometries([pcd_]) # まず,そのままの点群を表示する 13 | 14 | pcd = voxel_down_sample( pcd_, 0.002 ) 15 | draw_geometries([pcd]) 16 | 17 | radius = 0.02 #法線計算のための範囲指定 18 | viewpoint = np.array([0.,0.,100.], dtype='float64') # 法線向き修正のための視点位置 19 | 20 | 21 | #法線推定 22 | estimate_normals( pcd, KDTreeSearchParamHybrid(radius = radius, max_nn = 30) ) 23 | 24 | pcd2 = copy.deepcopy(pcd) #法線向きの修正前後の点群の用意(比較用) 25 | np.asarray(pcd2.points)[:,0] += 0.2 # x軸方向に0.2移動する 26 | 27 | #向きの修正 28 | orient_normals_towards_camera_location( pcd2, camera_location = viewpoint ) 29 | 30 | print(":: 法線推定と向きの修正(nキーで法線を表示できます.)") 31 | draw_geometries([pcd, pcd2]) # 法線推定前後の点群を表示する 32 | 33 | -------------------------------------------------------------------------------- /Python/pv.py: -------------------------------------------------------------------------------- 1 | # 点群ビューワ 2 | 3 | import numpy as np 4 | from open3d import * 5 | import argparse 6 | 7 | if __name__ == "__main__": 8 | 9 | parser = argparse.ArgumentParser( 10 | description="point cloud viewer") 11 | parser.add_argument("input", help="input data (.ply or .pcd)") 12 | args = parser.parse_args() 13 | print(args.input) 14 | pcd = read_point_cloud(args.input) 15 | print(pcd) 16 | print(np.asarray(pcd.points)) 17 | draw_geometries([pcd]) -------------------------------------------------------------------------------- /Python/rgbd_and_pcd.py: -------------------------------------------------------------------------------- 1 | # RGBD画像の表示 2 | 3 | from open3d import * 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | 7 | if __name__ == "__main__": 8 | color_raw = read_image("../data/image00100.png") 9 | depth_raw = read_image("../data/depth00100.png") 10 | camera_intrinsic = read_pinhole_camera_intrinsic("../data/d435.json") 11 | print( camera_intrinsic ) 12 | rgbd_image = create_rgbd_image_from_color_and_depth( color_raw, depth_raw ) 13 | print(rgbd_image) 14 | plt.subplot(1, 2, 1) 15 | plt.title('Grayscale image') 16 | plt.imshow(rgbd_image.color) 17 | plt.subplot(1, 2, 2) 18 | plt.title('Depth image') 19 | plt.imshow(rgbd_image.depth) 20 | plt.show() 21 | pcd = create_point_cloud_from_rgbd_image(rgbd_image, camera_intrinsic) 22 | print(np.asarray(pcd.points)) 23 | print("\n") 24 | draw_geometries([pcd]) 25 | write_point_cloud( "out.ply", pcd ) 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Open3Dを利用した3次元点群処理 2 | ## 概要 3 | 本レポジトリは,第24回画像センシングシンポジウム(SSII2018)の 4 | チュートリアル講演「3D物体検出とロボットビジョンへの応用 5 | -3D点群処理の基礎と位置姿勢推定のしくみ-」のために用意した資料です. 6 | 講演中で解説した点群処理アルゴリズムのコードを紹介します. 7 | 8 | 本レポジトリのコードを実行することによって,以下を試すことができます. 9 | - Open3Dを用いた基本的な点群処理 10 | - Realsenseセンサを使ったRGBD連続画像の取得 11 | - 上記を用いた3Dシーンの再構成 12 | 13 | 講演資料は[こちら](https://www.slideshare.net/SSII_Slides/3d-101077557)で公開していますので,併せてご覧いただければ幸いです. 14 | 15 | ※本レポジトリはUbuntu16.04での動作確認をおこなっています. 16 | 17 | ## ファイル構成 18 | 19 | - [src] ・・・RealsenseセンサによってRGBD画像を取得するプログラムが格納されています. 20 | - [data] ・・・サンプルデータが格納されています. 21 | - [Python]・・・上記の講演で使用したOpen3Dのコードが格納されています. 22 | - CMakeLists.txt 23 | 24 | ## 準備 25 | ### 依存環境の構築 26 | このレポジトリで紹介するすべてのプログラムの実行のためには,下記のライブラリのインストールが必要です. 27 | 1. [Open3D](https://github.com/IntelVCL/Open3D) 点群処理用 28 | 2. [librealsense](https://github.com/IntelRealSense/librealsense) RealSenseセンサからのRGBD画像取得用 29 | 30 | インストール方法はそれぞれのレポジトリで確認してください. 31 | 32 | ### プログラムのビルド 33 | ディレクトリ「src」内のデータ取得のためのプログラム(rs-capture)のみビルドが必要です. 34 | 35 | ``` 36 | mkdir build 37 | cd build 38 | cmake ../ 39 | make 40 | ``` 41 | 42 | ## 各プログラムの説明 43 | ### rs-capture 44 | RealSense SR300, D415, D435でRGBD画像群を取得するコードサンプルです. 45 | 実行時は「build」ディレクトリにて下記のコマンドを実行してください. 46 | ``` 47 | cd /build/ 48 | ./rs-capture 49 | ``` 50 | 「image」と「depth」というディレクトリが作成され,その中にセンサで撮影されたRGB画像と距離画像を保存します. 51 | ここで撮影したデータは,Open3Dが提供する[Reconstruction System](http://www.open3d.org/docs/tutorial/ReconstructionSystem/index.html)によって3次元復元することができます. 52 | 53 | 54 | ### kdtree.py 55 | kd treeによる近傍点探索のコードサンプルです. 56 | 三種類の探索方法(RNN, KNN, RKNN)を試すことができます. 57 | 58 | ### keypoint_matching.py 59 | キーポイントマッチングによる位置姿勢推定のためのコードサンプルです. 60 | 61 | ### normal_estimation.py 62 | 法線推定のコードサンプルです.法線向き修正の効果を確認するために修正前後のデータを同時に可視化します. 63 | 64 | ### NumPy_and_Open3D.ipynb 65 | NumPyとOpen3Dのデータの相互変換の方法の説明です. 66 | 67 | ### pv.py 68 | 点群データ(.pcd, .ply)を可視化するためのビューワです. 69 | ``` 70 | python pv.pcd [可視化したいファイル名] 71 | ``` 72 | で実行できます. 73 | 74 | ### rgbd_and_pcd.py 75 | RGB画像と距離画像を読み込んで,点群に変換するためのサンプルコードです. 76 | このスクリプトでは距離画像がRGBカメラのカメラの座標系でレンダリングされていることを想定しています.(RGB画像を基準として距離画像とピクセル単位で位置合わせ) 77 | したがって,それぞれのセンサのRGBカメラの内部パラメータを用いて点群に変換しています. 78 | 点群データは「out.ply」として保存します. 79 | 80 | 81 | ## RealSenseセンサで取得したRGBD画像を使ったシーンの再構成 82 | 83 | rs-captureで取得したRGBD画像群(「image」と「depth」に格納されています)を用いて,Open3DのReconstruction systemによる3次元シーンの再構成を実行します. 84 | 下記のコマンドで実行できます. 85 | 詳しい方法は[Open3Dのドキュメント](http://www.open3d.org/docs/tutorial/ReconstructionSystem/index.html)を参照してください. 86 | 87 | ``` 88 | cd /Tutorial/ReconstructionSystem/ 89 | python make_fragments.py [imageとdepthのまでのパス] [-path_intrinsic (optional)] 90 | python register_fragments.py [imageとdepthのまでのパス] 91 | python integrate_scene.py [imageとdepthのまでのパス] [-path_intrinsic (optional)] 92 | ``` 93 | 94 | 下記の例のような結果が得られます. 95 | 96 | ![Reconstruction](https://github.com/sakizuki/SSII2018_Tutorial_Open3D/blob/master/doc/reconstraction.png) -------------------------------------------------------------------------------- /data/bun.pcd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakizuki/SSII2018_Tutorial_Open3D/6e653ebfc0c290d42c871ccb098530f4cbd1439a/data/bun.pcd -------------------------------------------------------------------------------- /data/bun000.pcd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakizuki/SSII2018_Tutorial_Open3D/6e653ebfc0c290d42c871ccb098530f4cbd1439a/data/bun000.pcd -------------------------------------------------------------------------------- /data/bun045.pcd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakizuki/SSII2018_Tutorial_Open3D/6e653ebfc0c290d42c871ccb098530f4cbd1439a/data/bun045.pcd -------------------------------------------------------------------------------- /data/d415.json: -------------------------------------------------------------------------------- 1 | { 2 | "width" : 640, 3 | "height" : 480, 4 | "intrinsic_matrix" : 5 | [ 6 | 627.256, 7 | 0, 8 | 0, 9 | 0, 10 | 616.882, 11 | 0, 12 | 312.822, 13 | 227.992, 14 | 1 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /data/d435.json: -------------------------------------------------------------------------------- 1 | { 2 | "width" : 640, 3 | "height" : 480, 4 | "intrinsic_matrix" : 5 | [ 6 | 616.945, 7 | 0, 8 | 0, 9 | 0, 10 | 617.134, 11 | 0, 12 | 325.16, 13 | 238.754, 14 | 1 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /data/depth00100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakizuki/SSII2018_Tutorial_Open3D/6e653ebfc0c290d42c871ccb098530f4cbd1439a/data/depth00100.png -------------------------------------------------------------------------------- /data/image00100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakizuki/SSII2018_Tutorial_Open3D/6e653ebfc0c290d42c871ccb098530f4cbd1439a/data/image00100.png -------------------------------------------------------------------------------- /data/sr300.json: -------------------------------------------------------------------------------- 1 | { 2 | "width" : 640, 3 | "height" : 480, 4 | "intrinsic_matrix" : 5 | [ 6 | 622.439, 7 | 0, 8 | 0, 9 | 0, 10 | 622.439, 11 | 0, 12 | 315.746, 13 | 250.804, 14 | 1 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /doc/reconstraction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakizuki/SSII2018_Tutorial_Open3D/6e653ebfc0c290d42c871ccb098530f4cbd1439a/doc/reconstraction.png -------------------------------------------------------------------------------- /src/rs-capture.cpp: -------------------------------------------------------------------------------- 1 | //====================================================================== 2 | // 3 | // RealsenseでRGB画像,距離画像を取得するプログラム 4 | // 5 | // Shuichi Akizuki, Keio Univ. 6 | //====================================================================== 7 | #include 8 | #include 9 | #include // Include RealSense Cross Platform API 10 | #include // Include OpenCV API 11 | 12 | using namespace cv; 13 | using namespace std; 14 | 15 | Mat frame_to_mat(const rs2::frame& f); 16 | Mat depth_frame_to_meters(const rs2::pipeline& pipe, const rs2::depth_frame& f); 17 | 18 | 19 | int main(int argc, char * argv[]) 20 | { 21 | system("rm -fr image"); 22 | system("rm -fr depth"); 23 | system("mkdir image"); 24 | system("mkdir depth"); 25 | 26 | // Declare depth colorizer for pretty visualization of depth data 27 | rs2::colorizer color_map; 28 | 29 | 30 | rs2::config cfg; //カメラの設定の定義 31 | cfg.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8, 30); 32 | cfg.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); 33 | 34 | // Declare RealSense pipeline, encapsulating the actual device and sensors 35 | rs2::pipeline pipe; 36 | rs2::pipeline_profile selection = pipe.start(cfg); 37 | auto sensor = selection.get_device().first(); 38 | auto scale = sensor.get_depth_scale(); 39 | 40 | cerr<<"Scale: "<(); 90 | const int w = vf.get_width(); 91 | const int h = vf.get_height(); 92 | 93 | if (f.get_profile().format() == RS2_FORMAT_BGR8) 94 | { 95 | //cerr<<"RS2_FORMAT_BGR8"<