├── .gitignore ├── script.txt ├── Calibration ├── 01_collect_data.py ├── 04_camera_config.py ├── 05_gen_dataset.py ├── 02_search_chessboard.py ├── 04_camera_config.ipynb ├── 03_calibration.py ├── 02_search_chessboard.ipynb ├── 01_collect_data.ipynb ├── 05_gen_dataset.ipynb └── 03_calibration.ipynb ├── boost_modify.txt ├── orbslam_jetbot_live.py ├── orbslam_mono_tum.py ├── README.md └── orbslam_dataset_demo.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | datasets/ 2 | out/ 3 | __pycache__ 4 | save/ -------------------------------------------------------------------------------- /script.txt: -------------------------------------------------------------------------------- 1 | python3 orbslam_jetbot_live.py Vocabulary/ORBvoc.txt JetBot960.yaml 2 | 3 | python3 orbslam_mono_tum.py Vocabulary/ORBvoc.txt TUM3.yaml rgbd_dataset_freiburg3_large_cabinet_validation 4 | -------------------------------------------------------------------------------- /Calibration/01_collect_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | from jetbot import Camera 4 | 5 | image_folder = "Images/" 6 | if not os.path.exists(image_folder): 7 | os.makedirs(image_folder) 8 | camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720) 9 | iid = 0 10 | while(True): 11 | img = camera.value 12 | cv2.imshow("test", img) 13 | fname = str(iid).zfill(4) + ".jpg" 14 | cv2.imwrite(image_folder + fname, img) 15 | iid += 1 16 | k = cv2.waitKey(200) 17 | if k == ord('q'): 18 | break 19 | 20 | -------------------------------------------------------------------------------- /Calibration/04_camera_config.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | npz_file = np.load('camera.npz') 5 | ret, mtx, dist, rvecs, tvecs = npz_file['ret'], npz_file['mtx'], npz_file['dist'], npz_file['rvecs'], npz_file['tvecs'] 6 | print(mtx, dist) 7 | print("==============================") 8 | print("Camera.fx: " + str(format(mtx[0,0], '.8f'))) 9 | print("Camera.fy: " + str(format(mtx[1,1], '.8f'))) 10 | print("Camera.cx: " + str(format(mtx[0,2], '.8f'))) 11 | print("Camera.cy: " + str(format(mtx[1,2], '.8f'))) 12 | print() 13 | print("Camera.k1: " + str(format(dist[0,0], '.8f'))) 14 | print("Camera.k2: " + str(format(dist[0,1], '.8f'))) 15 | print("Camera.p1: " + str(format(dist[0,2], '.8f'))) 16 | print("Camera.p2: " + str(format(dist[0,3], '.8f'))) 17 | print("Camera.k3: " + str(format(dist[0,4], '.8f'))) -------------------------------------------------------------------------------- /Calibration/05_gen_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | from jetbot import Camera 4 | 5 | dataset_path = "jetbot_dataset_960test2/" 6 | image_folder = "rgb/" 7 | if not os.path.exists(dataset_path+image_folder): 8 | os.makedirs(dataset_path+image_folder) 9 | file = open(dataset_path+"rgb.txt", "w") 10 | 11 | camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720) 12 | iid = 0 13 | while(True): 14 | img = camera.value 15 | cv2.imshow("test", img) 16 | 17 | fname = str(iid).zfill(4) + ".jpg" 18 | cv2.imwrite(dataset_path + image_folder + fname, img) 19 | # Write file 20 | file.write(str(iid) + " " + image_folder + fname + "\n") 21 | 22 | iid += 1 23 | k = cv2.waitKey(100) 24 | if k == ord('q'): 25 | camera.stop() 26 | break 27 | -------------------------------------------------------------------------------- /boost_modify.txt: -------------------------------------------------------------------------------- 1 | boost::python::dict ORBSlamPython::getTrackedMappoints() const 2 | { 3 | if (!system) 4 | { 5 | return boost::python::dict(); 6 | } 7 | 8 | // This is copied from the ORB_SLAM2 System.SaveTrajectoryKITTI function, with some changes to output a python tuple. 9 | vector Mps = system->GetTrackedMapPoints(); 10 | 11 | //boost::python::list map_points; 12 | boost::python::dict map_points; 13 | for(size_t i=0; iGetWorldPos(); 16 | long unsigned int mid = Mps[i]-> mnId; 17 | map_points[int(mid)] = boost::python::make_tuple( 18 | wp.at(0,0), 19 | wp.at(1,0), 20 | wp.at(2,0)); 21 | } 22 | } 23 | 24 | return map_points; 25 | } 26 | -------------------------------------------------------------------------------- /Calibration/02_search_chessboard.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import os 4 | 5 | cali_folder = "Images_cali/" 6 | if not os.path.exists(cali_folder): 7 | os.makedirs(cali_folder) 8 | 9 | # termination criteria 10 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) 11 | 12 | # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) 13 | objp = np.zeros((6*7,3), np.float32) 14 | objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2) 15 | 16 | # Arrays to store object points and image points from all the images. 17 | objpoints = [] # 3d point in real world space 18 | imgpoints = [] # 2d points in image plane. 19 | 20 | images = os.listdir("Images/") 21 | images.sort() 22 | print(images) 23 | 24 | for fname in images: 25 | print(fname) 26 | img = cv2.imread("Images/" + fname) 27 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 28 | 29 | # Find the chess board corners 30 | ret, corners = cv2.findChessboardCorners(gray, (7,6),None) 31 | 32 | # If found, add object points, image points (after refining them) 33 | if ret == True: 34 | objpoints.append(objp) 35 | 36 | corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) 37 | imgpoints.append(corners2) 38 | 39 | # Draw and display the corners 40 | img_draw = cv2.drawChessboardCorners(img.copy(), (7,6), corners2,ret) 41 | cv2.imshow('img',img_draw) 42 | cv2.waitKey(500) 43 | 44 | cv2.imwrite(cali_folder + fname, img) 45 | 46 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /orbslam_jetbot_live.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import os.path 4 | import orbslam2 5 | import time 6 | import cv2 7 | from jetbot import Camera 8 | 9 | def main(vocab_path, settings_path): 10 | slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) 11 | slam.set_use_viewer(True) 12 | slam.initialize() 13 | 14 | camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720) 15 | print('-----') 16 | print('Start processing sequence ...') 17 | ts = 0 18 | #cv2.namedWindow("test") 19 | while(True): 20 | img = camera.value.copy() 21 | 22 | t1 = time.time() 23 | slam.process_image_mono(img, float(ts)) 24 | t2 = time.time() 25 | 26 | ttrack = t2 - t1 27 | print("frame id:" + str(ts), ttrack) 28 | time.sleep(0.01) 29 | ts += 1 30 | 31 | save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') 32 | slam.shutdown() 33 | return 0 34 | 35 | def save_trajectory(trajectory, filename): 36 | with open(filename, 'w') as traj_file: 37 | traj_file.writelines('{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n'.format( 38 | time=repr(stamp), 39 | r00=repr(r00), 40 | r01=repr(r01), 41 | r02=repr(r02), 42 | t0=repr(t0), 43 | r10=repr(r10), 44 | r11=repr(r11), 45 | r12=repr(r12), 46 | t1=repr(t1), 47 | r20=repr(r20), 48 | r21=repr(r21), 49 | r22=repr(r22), 50 | t2=repr(t2) 51 | ) for stamp, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory) 52 | 53 | 54 | if __name__ == '__main__': 55 | main(sys.argv[1], sys.argv[2]) 56 | -------------------------------------------------------------------------------- /Calibration/04_camera_config.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Extract camera config" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np \n", 17 | "\n", 18 | "npz_file = np.load('camera.npz')\n", 19 | "ret, mtx, dist, rvecs, tvecs = npz_file['ret'], npz_file['mtx'], npz_file['dist'], npz_file['rvecs'], npz_file['tvecs']\n", 20 | "print(mtx, dist)\n", 21 | "print(\"==============================\")\n", 22 | "print(\"Camera.fx: \" + str(format(mtx[0,0], '.8f')))\n", 23 | "print(\"Camera.fy: \" + str(format(mtx[1,1], '.8f')))\n", 24 | "print(\"Camera.cx: \" + str(format(mtx[0,2], '.8f')))\n", 25 | "print(\"Camera.cy: \" + str(format(mtx[1,2], '.8f')))\n", 26 | "print()\n", 27 | "print(\"Camera.k1: \" + str(format(dist[0,0], '.8f')))\n", 28 | "print(\"Camera.k2: \" + str(format(dist[0,1], '.8f')))\n", 29 | "print(\"Camera.p1: \" + str(format(dist[0,2], '.8f')))\n", 30 | "print(\"Camera.p2: \" + str(format(dist[0,3], '.8f')))\n", 31 | "print(\"Camera.k3: \" + str(format(dist[0,4], '.8f')))" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "Copy the above information to ORB-SLAM's yaml file." 39 | ] 40 | } 41 | ], 42 | "metadata": { 43 | "kernelspec": { 44 | "display_name": "Python 3", 45 | "language": "python", 46 | "name": "python3" 47 | }, 48 | "language_info": { 49 | "codemirror_mode": { 50 | "name": "ipython", 51 | "version": 3 52 | }, 53 | "file_extension": ".py", 54 | "mimetype": "text/x-python", 55 | "name": "python", 56 | "nbconvert_exporter": "python", 57 | "pygments_lexer": "ipython3", 58 | "version": "3.6.9" 59 | } 60 | }, 61 | "nbformat": 4, 62 | "nbformat_minor": 2 63 | } 64 | -------------------------------------------------------------------------------- /Calibration/03_calibration.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import os 4 | 5 | images = os.listdir("Images_cali/") 6 | 7 | # termination criteria 8 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) 9 | 10 | # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) 11 | objp = np.zeros((6*7,3), np.float32) 12 | objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2) 13 | 14 | # Arrays to store object points and image points from all the images. 15 | objpoints = [] # 3d point in real world space 16 | imgpoints = [] # 2d points in image plane. 17 | 18 | for fname in images: 19 | print(fname) 20 | img = cv2.imread("Images_cali/" + fname) 21 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 22 | 23 | # Find the chess board corners 24 | ret, corners = cv2.findChessboardCorners(gray, (7,6),None) 25 | 26 | # If found, add object points, image points (after refining them) 27 | if ret == True: 28 | objpoints.append(objp) 29 | 30 | corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) 31 | imgpoints.append(corners2) 32 | 33 | # Draw and display the corners 34 | img = cv2.drawChessboardCorners(img, (7,6), corners2,ret) 35 | cv2.imshow('img',img) 36 | cv2.waitKey(500) 37 | 38 | 39 | cv2.destroyAllWindows() 40 | ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None) 41 | np.savez_compressed('camera.npz', ret=ret, mtx=mtx, dist=dist, rvecs=rvecs, tvecs=tvecs) 42 | image_folder = "Images_undist/" 43 | if not os.path.exists(image_folder): 44 | os.makedirs(image_folder) 45 | 46 | for fname in images: 47 | img = cv2.imread("Images_cali/" + fname) 48 | h, w = img.shape[:2] 49 | newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h)) 50 | 51 | # undistort 52 | dst = cv2.undistort(img, mtx, dist, None, newcameramtx) 53 | 54 | # crop the image 55 | x,y,w,h = roi 56 | print(roi) 57 | dst = dst[y:y+h, x:x+w] 58 | cv2.imwrite(image_folder+fname,dst) 59 | ''' 60 | # undistort 61 | mapx,mapy = cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5) 62 | dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR) 63 | print(dst.shape) 64 | # crop the image 65 | x,y,w,h = roi 66 | dst = dst[y:y+h, x:x+w] 67 | cv2.imwrite('calibresult.png',dst) 68 | ''' 69 | -------------------------------------------------------------------------------- /orbslam_mono_tum.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import os.path 4 | import orbslam2 5 | import time 6 | import cv2 7 | 8 | 9 | def main(vocab_path, settings_path, sequence_path): 10 | 11 | rgb_filenames, timestamps = load_images(sequence_path) 12 | num_images = len(timestamps) 13 | 14 | slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) 15 | slam.set_use_viewer(True) 16 | slam.initialize() 17 | 18 | times_track = [0 for _ in range(num_images)] 19 | print('-----') 20 | print('Start processing sequence ...') 21 | print('Images in the sequence: {0}'.format(num_images)) 22 | map_pts = {} 23 | 24 | for idx in range(num_images): 25 | image = cv2.imread(os.path.join(sequence_path, rgb_filenames[idx]), cv2.IMREAD_UNCHANGED) 26 | tframe = timestamps[idx] 27 | 28 | if image is None: 29 | print("failed to load image at {0}".format(os.path.join(sequence_path, rgb_filenames[idx]))) 30 | return 1 31 | 32 | t1 = time.time() 33 | slam.process_image_mono(image, tframe) 34 | tracked_pts = slam.get_tracked_mappoints() 35 | map_pts.update(tracked_pts) 36 | t2 = time.time() 37 | print(len(map_pts)) 38 | 39 | ttrack = t2 - t1 40 | times_track[idx] = ttrack 41 | 42 | t = 0 43 | if idx < num_images - 1: 44 | t = timestamps[idx + 1] - tframe 45 | elif idx > 0: 46 | t = tframe - timestamps[idx - 1] 47 | 48 | if ttrack < t: 49 | time.sleep(t - ttrack) 50 | 51 | save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') 52 | 53 | slam.shutdown() 54 | 55 | times_track = sorted(times_track) 56 | total_time = sum(times_track) 57 | print('-----') 58 | print('median tracking time: {0}'.format(times_track[num_images // 2])) 59 | print('mean tracking time: {0}'.format(total_time / num_images)) 60 | 61 | return 0 62 | 63 | 64 | def load_images(path_to_association): 65 | rgb_filenames = [] 66 | timestamps = [] 67 | with open(os.path.join(path_to_association, 'rgb.txt')) as times_file: 68 | for line in times_file: 69 | if len(line) > 0 and not line.startswith('#'): 70 | t, rgb = line.rstrip().split(' ')[0:2] 71 | rgb_filenames.append(rgb) 72 | timestamps.append(float(t)) 73 | return rgb_filenames, timestamps 74 | 75 | 76 | def save_trajectory(trajectory, filename): 77 | with open(filename, 'w') as traj_file: 78 | traj_file.writelines('{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n'.format( 79 | time=repr(stamp), 80 | r00=repr(r00), 81 | r01=repr(r01), 82 | r02=repr(r02), 83 | t0=repr(t0), 84 | r10=repr(r10), 85 | r11=repr(r11), 86 | r12=repr(r12), 87 | t1=repr(t1), 88 | r20=repr(r20), 89 | r21=repr(r21), 90 | r22=repr(r22), 91 | t2=repr(t2) 92 | ) for stamp, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory) 93 | 94 | 95 | if __name__ == '__main__': 96 | if len(sys.argv) != 5: 97 | print('Usage: ./orbslam_mono_tum path_to_vocabulary path_to_settings path_to_sequence') 98 | main(sys.argv[1], sys.argv[2], sys.argv[3]) 99 | -------------------------------------------------------------------------------- /Calibration/02_search_chessboard.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import cv2\n", 11 | "import os\n", 12 | "import IPython\n", 13 | "\n", 14 | "cali_folder = \"Images_cali/\"\n", 15 | "if not os.path.exists(cali_folder):\n", 16 | " os.makedirs(cali_folder)" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "# termination criteria\n", 26 | "criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n", 27 | "\n", 28 | "# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n", 29 | "objp = np.zeros((6*7,3), np.float32)\n", 30 | "objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)\n", 31 | "\n", 32 | "# Arrays to store object points and image points from all the images.\n", 33 | "objpoints = [] # 3d point in real world space\n", 34 | "imgpoints = [] # 2d points in image plane.\n", 35 | "\n", 36 | "images = os.listdir(\"Images/\")\n", 37 | "images.sort()\n", 38 | "print(images)" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "def show_image(image):\n", 48 | " _,ret = cv2.imencode('.jpg', image) \n", 49 | " i = IPython.display.Image(data=ret)\n", 50 | " IPython.display.display(i)" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "for fname in images:\n", 60 | " if fname.split(\".\")[-1] != \"jpg\":\n", 61 | " continue\n", 62 | " print(\"\\r\" + fname, end=\"\")\n", 63 | " img = cv2.imread(\"Images/\" + fname)\n", 64 | " gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n", 65 | " \n", 66 | " # Find the chess board corners\n", 67 | " ret, corners = cv2.findChessboardCorners(gray, (7,6),None)\n", 68 | "\n", 69 | " # If found, add object points, image points (after refining them)\n", 70 | " if ret == True:\n", 71 | " objpoints.append(objp)\n", 72 | "\n", 73 | " corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n", 74 | " imgpoints.append(corners2)\n", 75 | "\n", 76 | " # Draw and display the corners\n", 77 | " img_draw = cv2.drawChessboardCorners(img.copy(), (7,6), corners2,ret)\n", 78 | " show_image(img_draw)\n", 79 | " cv2.imwrite(cali_folder + fname, img)\n", 80 | "print(\"\\rFinished !!\")" 81 | ] 82 | } 83 | ], 84 | "metadata": { 85 | "kernelspec": { 86 | "display_name": "Python 3", 87 | "language": "python", 88 | "name": "python3" 89 | }, 90 | "language_info": { 91 | "codemirror_mode": { 92 | "name": "ipython", 93 | "version": 3 94 | }, 95 | "file_extension": ".py", 96 | "mimetype": "text/x-python", 97 | "name": "python", 98 | "nbconvert_exporter": "python", 99 | "pygments_lexer": "ipython3", 100 | "version": "3.6.9" 101 | } 102 | }, 103 | "nbformat": 4, 104 | "nbformat_minor": 2 105 | } 106 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ORB_SLAM2-Python-Jetbot 2 | 3 | ## Part I: ORB-SLAM for Python 3.6 Setup 4 | Clone the repositories into same folder. 5 | 6 | ### Pangulin 7 | https://github.com/stevenlovegrove/Pangolin 8 | - Dependency 9 | ``` 10 | sudo apt install libgl1-mesa-dev 11 | sudo apt install libglew-dev 12 | sudo apt install cmake 13 | sudo apt-get install libxkbcommon-x11-dev 14 | ``` 15 | - Build 16 | ``` 17 | git clone https://github.com/stevenlovegrove/Pangolin.git 18 | cd Pangolin 19 | mkdir build 20 | cd build 21 | cmake .. 22 | cmake --build . 23 | ``` 24 | 25 | ### ORB-SLAM2 26 | sudo apt-get install libeigen3-dev 27 | sudo apt install libwayland-dev 28 | https://github.com/raulmur/ORB_SLAM2 29 | 1. add ```#include ``` into ```Include/System.h```. 30 | 2. Patch the repository (see Python Binding). 31 | 5. Build project 32 | ``` 33 | chmod +x build.sh 34 | ./build.sh 35 | ``` 36 | 6. Python binding requires ```sudo make install```. 37 | 38 | ### Python Binding 39 | https://github.com/jskinn/ORB_SLAM2-PythonBindings 40 | 1. Patch the orb-slam repository. 41 | ``` 42 | git apply ../ORB_SLAM2-PythonBindings/orbslam-changes.diff 43 | ``` 44 | 2. Build ORB-SLAM2 (see ORB-SLAM2). 45 | 3. Modify the python version from 3.5 to 3.6 in "CMakeLists.txt" 46 | ``` 47 | 31 find_package(PythonLibs 3.5 REQUIRED) -> 3.6 48 | 33 find_package(Boost 1.45.0 REQUIRED COMPONENTS python-py35) -> 36 49 | 72 install(TARGETS ${TARGET_MODULE_NAME} DESTINATION lib/python3.5/dist-packages) -> 3.6 50 | ``` 51 | 4. Some Dependency. 52 | ``` 53 | sudo apt-get install libboost-all-dev 54 | ``` 55 | 5. Build project. 56 | ``` 57 | mkdir build 58 | cd build 59 | cmake .. 60 | make 61 | make install 62 | ``` 63 | 64 | 6. Test the python import 65 | ``` 66 | python3 67 | >>> import orbslam2 68 | ``` 69 | 70 | ### Test on TUM Dataset 71 | #### TUM Datasets 72 | https://vision.in.tum.de/data/datasets/rgbd-dataset/download 73 | 74 | Recommand Sequence 'freiburg3_long_office_household' 75 | https://vision.in.tum.de/rgbd/dataset/freiburg3/rgbd_dataset_freiburg3_long_office_household.tgz 76 | 77 | #### Run Code 78 | 1. Copy ```ORB_SLAM2-PythonBindings/txamples/orbslam_mono_tum.py``` to ```ORB-SLAM2/``` 79 | 2. Make folder ```Datasets/``` and extract the dataset into it. 80 | 3. Run orb-slam 81 | ``` 82 | python3 orbslam_mono_tum.py Vocabulary/ORBvoc.txt Examples/Monocular/TUM3.yaml Datasets/rgbd_dataset_freiburg3_long_office_household 83 | ``` 84 | 85 | ## Part II: Camera Calibration & Jetbot SLAM Live Demo 86 | ### Collect Data 87 | ### Search Chessboard 88 | ### Calibration 89 | ### Extract Camera Config 90 | ### Record Dataset 91 | 92 | ### Some Problem 93 | - restart camera 94 | ``` 95 | sudo systemctl restart nvargus-daemon 96 | ``` 97 | - Failed to load module “canberra-gtk-module” 98 | ``` 99 | sudo apt-get install libcanberra-gtk-module 100 | ``` 101 | 102 | ### Opencv Calibration Tutorial 103 | https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html 104 | 105 | ### Matplotlib 106 | - FreeType 107 | http://www.linuxfromscratch.org/blfs/view/svn/general/freetype2.html 108 | ./configure 109 | sudo make install 110 | - LibPng 111 | sudo apt-get install -y libpng-dev 112 | 113 | ### Plotly 114 | 1. Install python plotly library 115 | ``` 116 | pip3 install plotly 117 | ``` 118 | 2. Install jupyter extension and rebuild jupyter. 119 | ``` 120 | sudo jupyter labextension install @jupyterlab/plotly-extension 121 | jupyter lab build 122 | ``` 123 | 3. Restart your jetbot. -------------------------------------------------------------------------------- /Calibration/01_collect_data.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Collect calibration Data" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import traitlets\n", 17 | "import ipywidgets.widgets as widgets\n", 18 | "from IPython.display import display\n", 19 | "from jetbot import Camera, bgr8_to_jpeg\n", 20 | "import os\n", 21 | "import time" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "Create data folder." 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "image_folder = \"Images/\"\n", 38 | "if not os.path.exists(image_folder):\n", 39 | " os.makedirs(image_folder)" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "Save image when camera update the vlaue." 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "def update(change):\n", 56 | " global frame_id, image_folder, image\n", 57 | " fname = str(frame_id).zfill(4) + \".jpg\"\n", 58 | " with open(image_folder+fname, 'wb') as f:\n", 59 | " f.write(image.value)\n", 60 | " print(\"\\rsave data \" + fname, end=\"\")\n", 61 | " frame_id += 1\n", 62 | " time.sleep(0.05)" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "Activate the camera." 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [ 78 | "camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720)\n", 79 | "image = widgets.Image(format='jpeg', width=480, height=270) # this width and height doesn't necessarily have to match the camera\n", 80 | "camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)\n", 81 | "display(image)" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "Start collect data." 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "frame_id = 0\n", 98 | "camera.observe(update, names='value')" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "Unlink the camera." 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "camera.unobserve(update, names='value')\n", 115 | "camera_link.unlink()" 116 | ] 117 | } 118 | ], 119 | "metadata": { 120 | "kernelspec": { 121 | "display_name": "Python 3", 122 | "language": "python", 123 | "name": "python3" 124 | }, 125 | "language_info": { 126 | "codemirror_mode": { 127 | "name": "ipython", 128 | "version": 3 129 | }, 130 | "file_extension": ".py", 131 | "mimetype": "text/x-python", 132 | "name": "python", 133 | "nbconvert_exporter": "python", 134 | "pygments_lexer": "ipython3", 135 | "version": "3.6.9" 136 | } 137 | }, 138 | "nbformat": 4, 139 | "nbformat_minor": 2 140 | } 141 | -------------------------------------------------------------------------------- /Calibration/05_gen_dataset.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Collect calibration Data" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import traitlets\n", 17 | "import ipywidgets.widgets as widgets\n", 18 | "from IPython.display import display\n", 19 | "from jetbot import Camera, bgr8_to_jpeg\n", 20 | "import os\n", 21 | "import time" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "Create data folder." 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "dataset_path = \"jetbot_dataset_960/\"\n", 38 | "image_folder = \"rgb/\"\n", 39 | "if not os.path.exists(dataset_path+image_folder):\n", 40 | " os.makedirs(dataset_path+image_folder)\n", 41 | "file = open(dataset_path+\"rgb.txt\", \"w\")" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "Save image when camera update the vlaue." 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "def update(change):\n", 58 | " global file, dataset_path, image_folder, frame_id\n", 59 | " # Writer image\n", 60 | " fname = str(frame_id).zfill(4) + \".jpg\"\n", 61 | " print(frame_id, fname)\n", 62 | " with open(dataset_path + image_folder + fname, 'wb') as f:\n", 63 | " f.write(image.value)\n", 64 | " \n", 65 | " # Write file\n", 66 | " file.write(str(frame_id) + \" \" + image_folder + fname + \"\\n\")\n", 67 | " frame_id += 1\n", 68 | " time.sleep(0.02)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "Activate the camera." 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720)\n", 85 | "image = widgets.Image(format='jpeg', width=480, height=270) # this width and height doesn't necessarily have to match the camera\n", 86 | "camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)\n", 87 | "display(image)" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "Start collect data." 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "frame_id = 0\n", 104 | "camera.observe(update, names='value')" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": {}, 110 | "source": [ 111 | "Unlink the camera." 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "camera.unobserve(update, names='value')\n", 121 | "camera_link.unlink()\n", 122 | "file.close()" 123 | ] 124 | } 125 | ], 126 | "metadata": { 127 | "kernelspec": { 128 | "display_name": "Python 3", 129 | "language": "python", 130 | "name": "python3" 131 | }, 132 | "language_info": { 133 | "codemirror_mode": { 134 | "name": "ipython", 135 | "version": 3 136 | }, 137 | "file_extension": ".py", 138 | "mimetype": "text/x-python", 139 | "name": "python", 140 | "nbconvert_exporter": "python", 141 | "pygments_lexer": "ipython3", 142 | "version": "3.6.9" 143 | } 144 | }, 145 | "nbformat": 4, 146 | "nbformat_minor": 2 147 | } 148 | -------------------------------------------------------------------------------- /Calibration/03_calibration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import cv2\n", 11 | "import os\n", 12 | "import IPython\n", 13 | "\n", 14 | "images = os.listdir(\"Images_cali/\")" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "def show_image(image):\n", 24 | " _,ret = cv2.imencode('.jpg', image) \n", 25 | " i = IPython.display.Image(data=ret)\n", 26 | " IPython.display.display(i)" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "# termination criteria\n", 36 | "criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n", 37 | "\n", 38 | "# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n", 39 | "objp = np.zeros((6*7,3), np.float32)\n", 40 | "objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)\n", 41 | "\n", 42 | "# Arrays to store object points and image points from all the images.\n", 43 | "objpoints = [] # 3d point in real world space\n", 44 | "imgpoints = [] # 2d points in image plane.\n", 45 | "\n", 46 | "for fname in images:\n", 47 | " if fname.split(\".\")[-1] != \"jpg\":\n", 48 | " continue\n", 49 | " print(\"\\r\" + fname, end=\"\")\n", 50 | " img = cv2.imread(\"Images_cali/\" + fname)\n", 51 | " gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n", 52 | "\n", 53 | " # Find the chess board corners\n", 54 | " ret, corners = cv2.findChessboardCorners(gray, (7,6),None)\n", 55 | "\n", 56 | " # If found, add object points, image points (after refining them)\n", 57 | " if ret == True:\n", 58 | " objpoints.append(objp)\n", 59 | "\n", 60 | " corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n", 61 | " imgpoints.append(corners2)\n", 62 | "\n", 63 | " # Draw and display the corners\n", 64 | " img = cv2.drawChessboardCorners(img, (7,6), corners2,ret)\n", 65 | " show_image(img)" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\n", 75 | "np.savez_compressed('camera.npz', ret=ret, mtx=mtx, dist=dist, rvecs=rvecs, tvecs=tvecs)\n", 76 | "image_folder = \"Images_undist/\"\n", 77 | "if not os.path.exists(image_folder):\n", 78 | " os.makedirs(image_folder)\n", 79 | "\n", 80 | "for fname in images:\n", 81 | " if fname.split(\".\")[-1] != \"jpg\":\n", 82 | " continue\n", 83 | " \n", 84 | " img = cv2.imread(\"Images_cali/\" + fname)\n", 85 | " h, w = img.shape[:2]\n", 86 | " newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\n", 87 | " print(fname, \", ROI:\", roi)\n", 88 | " # undistort\n", 89 | " dst = cv2.undistort(img, mtx, dist, None, newcameramtx)\n", 90 | "\n", 91 | " # crop the image\n", 92 | " x,y,w,h = roi\n", 93 | " #dst = dst[y:y+h, x:x+w]\n", 94 | " cv2.imwrite(image_folder+fname,dst)" 95 | ] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": "Python 3", 101 | "language": "python", 102 | "name": "python3" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 3 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython3", 114 | "version": "3.6.9" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 2 119 | } 120 | -------------------------------------------------------------------------------- /orbslam_dataset_demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import orbslam2\n", 10 | "import time\n", 11 | "\n", 12 | "vocab_path = \"Vocabulary/ORBvoc.txt\" \n", 13 | "#settings_path = \"Calibration/TUM3.yaml\"\n", 14 | "#dataset_path = \"rgbd_dataset_freiburg3_long_office_household_validation/rgb/\"\n", 15 | "settings_path = \"CameraParams/JetBot960.yaml\"\n", 16 | "dataset_path = \"Datasets/jetbot_dataset_960/rgb/\"" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR)\n", 26 | "slam.set_use_viewer(False)\n", 27 | "slam.initialize()\n", 28 | "print(\"Successfully initialize ORB-SLAM.\")" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "def update(img):\n", 38 | " global slam, starttime, map_points \n", 39 | " timestemp = time.time() - starttime\n", 40 | " slam.process_image_mono(img, float(timestemp))\n", 41 | " \n", 42 | " # Get Tracking State\n", 43 | " track_state = slam.get_tracking_state()\n", 44 | " print('\\rTimestemp:', str(timestemp)[0:5], \"| State:\", track_state, end='')\n", 45 | " \n", 46 | " # Get Pose and Map Information\n", 47 | " trajectory = slam.get_trajectory_points()\n", 48 | " if len(trajectory) == 0:\n", 49 | " loc = None\n", 50 | " tracked_points = {}\n", 51 | " else:\n", 52 | " tracked_points = slam.get_tracked_mappoints()\n", 53 | " map_points.update(tracked_points)\n", 54 | " trans = slam.get_trajectory_points()[-1]\n", 55 | " loc = (round(trans[4], 4), round(trans[8], 4), round(trans[12], 4))\n", 56 | " \n", 57 | " print(\" | Location:\", loc, \"| Tracked Map Size:\", len(tracked_points), \"| Map Size:\", len(map_points), end='')" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "import ipywidgets.widgets as widgets\n", 67 | "from IPython.display import display\n", 68 | "import os\n", 69 | "import cv2\n", 70 | "\n", 71 | "# Activate jetbot camera and link it to the display\n", 72 | "image = widgets.Image(format='jpeg', width=320, height=240) \n", 73 | "display(image)\n", 74 | "\n", 75 | "map_points = {}\n", 76 | "starttime = time.time()\n", 77 | "slam.reset()\n", 78 | "\n", 79 | "images = sorted(os.listdir(dataset_path))\n", 80 | "for i, fname in enumerate(images):\n", 81 | " if fname.split(\".\")[-1] != \"jpg\" and fname.split(\".\")[-1] != \"png\":\n", 82 | " continue\n", 83 | " img_file = open(dataset_path + fname, \"rb\")\n", 84 | " image.value = img_file.read()\n", 85 | " img = cv2.imread(dataset_path+fname)\n", 86 | " update(img)\n", 87 | " time.sleep(0.01)\n", 88 | " if i > 500:\n", 89 | " break" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "import numpy as np\n", 99 | "import json\n", 100 | "\n", 101 | "# Save Trajectory\n", 102 | "traj_pts = slam.get_trajectory_points()\n", 103 | "timestemps = np.array([t[0] for t in traj_pts])\n", 104 | "transforms = np.array([t[1:13] for t in traj_pts]).reshape(-1,3,4)\n", 105 | "np.savez(\"dataset_poses.npz\", timestemps=timestemps, transforms=transforms)\n", 106 | "\n", 107 | "# Save Map Points\n", 108 | "file = open(\"dataset_map_points.json\", \"w\")\n", 109 | "json.dump(map_points, file)\n", 110 | "file.close()" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "import numpy as np\n", 120 | "import json\n", 121 | "\n", 122 | "# Load Poses\n", 123 | "rec = np.load('dataset_poses.npz')\n", 124 | "timestemps, transforms = rec['timestemps'], rec['transforms']\n", 125 | "\n", 126 | "# Load Map Points\n", 127 | "file = open(\"dataset_map_points.json\", \"r\")\n", 128 | "map_points = json.load(file)\n", 129 | "file.close()" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "import plotly\n", 139 | "import plotly.graph_objs as go\n", 140 | "\n", 141 | "# Configure Plotly to be rendered inline in the notebook.\n", 142 | "plotly.offline.init_notebook_mode()\n", 143 | "\n", 144 | "print(\"Plot Map Points ...\")\n", 145 | "x_mpts, y_mpts, z_mpts = [], [], []\n", 146 | "for mid in map_points:\n", 147 | " x_mpts.append(map_points[mid][0])\n", 148 | " y_mpts.append(map_points[mid][1])\n", 149 | " z_mpts.append(map_points[mid][2])\n", 150 | "\n", 151 | "mpts = go.Scatter3d(\n", 152 | " x=x_mpts, y=y_mpts, z=z_mpts, \n", 153 | " mode='markers', #'markers','lines','lines+markers'\n", 154 | " marker={'size': 2, 'opacity': 0.8, 'color':'rgb(100,100,100)'},\n", 155 | ")\n", 156 | "\n", 157 | "print(\"Plot Location ...\")\n", 158 | "x_traj, y_traj, z_traj = [], [], []\n", 159 | "for i in range(transforms.shape[0]):\n", 160 | " x_traj.append(transforms[i,0,3])\n", 161 | " y_traj.append(transforms[i,1,3])\n", 162 | " z_traj.append(transforms[i,2,3])\n", 163 | " \n", 164 | "loc = go.Scatter3d(\n", 165 | " x=x_traj, y=y_traj, z=z_traj, \n", 166 | " mode='markers', #'markers','lines','lines+markers'\n", 167 | " marker={'size': 3, 'opacity': 0.8},\n", 168 | ")\n", 169 | "\n", 170 | "print(\"Plot Trajectory ...\")\n", 171 | "traj = go.Scatter3d(\n", 172 | " x=x_traj, y=y_traj, z=z_traj, \n", 173 | " mode='lines', #'markers','lines','lines+markers'\n", 174 | " line=dict(color='rgb(255, 100, 255)',width=3),\n", 175 | ")\n", 176 | "data = [loc, traj, mpts]\n", 177 | "\n", 178 | "# Frame Axis\n", 179 | "print(\"Plot Axis ...\")\n", 180 | "scale = 0.02\n", 181 | "for i in range(0,transforms.shape[0],1):\n", 182 | " print(\"\\rPlot axis of frame\", i, end=\"\")\n", 183 | " trans = transforms[i]\n", 184 | " x, y, z = trans[0,3], trans[1,3], trans[2,3]\n", 185 | " axis_x = go.Scatter3d(x=[x,x+scale*trans[0,0]],y=[y,y+scale*trans[1,0]],z=[z,z+scale*trans[2,0]],mode='lines',line=dict(color='#ff0000',width=4))\n", 186 | " axis_y = go.Scatter3d(x=[x,x+scale*trans[0,1]],y=[y,y+scale*trans[1,1]],z=[z,z+scale*trans[2,1]],mode='lines',line=dict(color='#00ff00',width=4))\n", 187 | " axis_z = go.Scatter3d(x=[x,x+scale*trans[0,2]],y=[y,y+scale*trans[1,2]],z=[z,z+scale*trans[2,2]],mode='lines',line=dict(color='#0000ff',width=4))\n", 188 | " data += [axis_x, axis_y, axis_z]\n", 189 | "\n", 190 | "# Configure the layout.\n", 191 | "print(\"\\nBuild Figure ...\")\n", 192 | "layout = go.Layout(margin={'l': 0, 'r': 0, 'b': 0, 't': 0}, scene=dict(aspectmode=\"data\"))\n", 193 | "plot_figure = go.Figure(data=data, layout=layout)\n", 194 | "\n", 195 | "# Render the plot.\n", 196 | "print(\"Rendering ...\")\n", 197 | "plotly.offline.iplot(plot_figure)" 198 | ] 199 | } 200 | ], 201 | "metadata": { 202 | "kernelspec": { 203 | "display_name": "Python 3", 204 | "language": "python", 205 | "name": "python3" 206 | }, 207 | "language_info": { 208 | "codemirror_mode": { 209 | "name": "ipython", 210 | "version": 3 211 | }, 212 | "file_extension": ".py", 213 | "mimetype": "text/x-python", 214 | "name": "python", 215 | "nbconvert_exporter": "python", 216 | "pygments_lexer": "ipython3", 217 | "version": "3.6.9" 218 | } 219 | }, 220 | "nbformat": 4, 221 | "nbformat_minor": 2 222 | } 223 | --------------------------------------------------------------------------------