├── .gitignore ├── 2d_lidar_slam ├── README.md ├── data │ ├── fr079.clf │ ├── input_INTEL_g2o.g2o │ ├── input_M3500b_g2o.g2o │ ├── input_MITb_g2o.g2o │ ├── intel.clf │ ├── mit-csail.clf │ └── mit-killian.clf ├── literature │ ├── g2o_format.pdf │ └── graphSLAM_formulation.pdf ├── requirements.txt ├── res ├── results │ ├── After_INTEL.png │ ├── Before_INTEL.png │ ├── custom-frontend │ │ ├── After_intel_once.png │ │ └── Before_intel_once.png │ ├── graph_optimization │ │ ├── After_INTEL.png │ │ ├── After_MITb.png │ │ ├── After_intel_3700.png │ │ ├── After_intel_3800.png │ │ ├── Before_INTEL.png │ │ └── Before_MITb.png │ ├── slam_intel_3700.gif │ └── slam_intel_3800.gif └── src │ ├── chi2_grad_hess.py │ ├── edge_odometry.py │ ├── frontend.py │ ├── graph.py │ ├── icp.py │ ├── load.py │ ├── loop_closure.py │ ├── main_clf.py │ ├── main_g2o.py │ ├── pose_se2.py │ ├── util.py │ └── vertex.py ├── Graph_SLAM_Report.pdf ├── README.md └── visual_slam ├── =1.18.3 ├── README.md ├── bash_utils.sh ├── core ├── dataset.py ├── display2D.py ├── display3D.py ├── geocom │ ├── __init__.py │ └── features.py ├── loop_closure.py ├── model.py ├── optimizer.py └── utils.py ├── data ├── 00Gt.npy ├── 00Poses.npy ├── 00Raw.npy ├── 05Gt.npy ├── 05Poses.npy ├── 05Raw.npy ├── 06Gt.npy ├── 06Poses.npy ├── 06Raw.npy ├── 07Gt.npy ├── 07Poses.npy ├── 07Raw.npy ├── out.g2o ├── s00.png ├── s05.png ├── s06.png ├── s07.png ├── sequence00.gif ├── sequence05.gif ├── sequence06.gif └── sequence07.gif ├── install_all.sh ├── install_basic.sh ├── install_pip3_packages.sh ├── install_system_packages.sh ├── install_thirdparty.sh ├── plot.py ├── run_slam.py └── thirdparty ├── g2opy_changes ├── python_CMakeLists.txt ├── sparse_optimizer.h └── types_six_dof_expmap.h └── pangolin_changes └── python_CMakeLists.txt /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | pip-wheel-metadata/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | .python-version 87 | 88 | # pipenv 89 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 90 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 91 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 92 | # install all needed dependencies. 93 | #Pipfile.lock 94 | 95 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 96 | __pypackages__/ 97 | 98 | # Celery stuff 99 | celerybeat-schedule 100 | celerybeat.pid 101 | 102 | # SageMath parsed files 103 | *.sage.py 104 | 105 | # Environments 106 | .env 107 | .venv 108 | env/ 109 | venv/ 110 | ENV/ 111 | env.bak/ 112 | venv.bak/ 113 | 114 | # Spyder project settings 115 | .spyderproject 116 | .spyproject 117 | 118 | # Rope project settings 119 | .ropeproject 120 | 121 | # mkdocs documentation 122 | /site 123 | 124 | # mypy 125 | .mypy_cache/ 126 | .dmypy.json 127 | dmypy.json 128 | 129 | # Pyre type checker 130 | .pyre/ 131 | -------------------------------------------------------------------------------- /2d_lidar_slam/README.md: -------------------------------------------------------------------------------- 1 | # 2D Lidar SLAM 2 | 3 | In this we implement custom frontend and backend modules. Backend consists of custom 2D Pose Graph construction and optimization implementation. Frontend uses odometry, lidar scan and scan matching to compute graph poses. Loop closure constraints are added using ICP. We first implemented (main_g2o.py) the backend graph optimization using the processed g2o data from [Luca Carlone datasets](https://lucacarlone.mit.edu/datasets/). Once this was achieved, we moved on to using the raw data provided by [Intel Research Lab](http://ais.informatik.uni-freiburg.de/slamevaluation/datasets.php) and building our own pose graph from the laser and odometry information. 4 | 5 | ## Setup and Usage 6 | 7 | ``` 8 | conda create -n graph_slam python=3.6 9 | conda activate graph_slam 10 | pip install -r requirements.txt 11 | 12 | python main_clf.py ../data/intel.clf 13 | ``` 14 | 15 | Pose Graph before optimization | Pose Graph after optimization 16 | :-------------------------:|:-------------------------: 17 | ![](results/graph_optimization/Before_INTEL.png) | ![](results/graph_optimization/After_INTEL.png) 18 | 19 | Real-Time Pose Graph Generation and Optimization after Loop Closures 20 | :-------------------------:| 21 | ![](results/slam_intel_3700.gif) 22 | -------------------------------------------------------------------------------- /2d_lidar_slam/literature/g2o_format.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/literature/g2o_format.pdf -------------------------------------------------------------------------------- /2d_lidar_slam/literature/graphSLAM_formulation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/literature/graphSLAM_formulation.pdf -------------------------------------------------------------------------------- /2d_lidar_slam/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | matplotlib 3 | scipy 4 | scikit-learn 5 | imageio 6 | tqdm -------------------------------------------------------------------------------- /2d_lidar_slam/res: -------------------------------------------------------------------------------- 1 | python main.py ../data/input_INTEL_g2o.g2o 2 | Error before optimization: 7191686.382493543 3 | 4 | Iteration chi^2 rel. change 5 | --------- ----- ----------- 6 | 0 7191686.3825 7 | 1 16148867176.4848 2244.491018 8 | 2 1858828534.8563 -0.884894 9 | 3 36767017.2831 -0.980220 10 | 4 2768817.9312 -0.924693 11 | 5 225.2847 -0.999919 12 | 6 215.8409 -0.041919 13 | 7 215.8405 -0.000002 14 | 15 | 16 | 17 | python main.py ../data/input_MITb_g2o.g2o 18 | Error before optimization: 3411019102.211544 19 | 20 | Iteration chi^2 rel. change 21 | --------- ----- ----------- 22 | 1 17694076618.8422 4.187329 23 | 2 2811800240273.2725 157.911951 24 | 3 395556549997.4802 -0.859323 25 | 4 158129022042.0200 -0.600237 26 | 5 34596302635.9224 -0.781215 27 | 6 11738745023.4952 -0.660694 28 | 7 2071584714.2696 -0.823526 29 | 8 3784987054.1789 0.827097 30 | 9 14257760.1444 -0.996233 31 | 10 44117752.6420 2.094298 32 | 11 103842376.0509 1.353755 33 | 12 3834214.6830 -0.963077 34 | 13 27034995.3542 6.050986 35 | 14 3356256.7994 -0.875855 36 | 15 51850.4262 -0.984551 37 | 16 1462.9355 -0.971785 38 | 17 1829.5405 0.250595 39 | 18 779.5402 -0.573915 40 | 19 768.5112 -0.014148 41 | 20 768.4596 -0.000067 -------------------------------------------------------------------------------- /2d_lidar_slam/results/After_INTEL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/After_INTEL.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/Before_INTEL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/Before_INTEL.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/custom-frontend/After_intel_once.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/custom-frontend/After_intel_once.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/custom-frontend/Before_intel_once.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/custom-frontend/Before_intel_once.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/graph_optimization/After_INTEL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/graph_optimization/After_INTEL.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/graph_optimization/After_MITb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/graph_optimization/After_MITb.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/graph_optimization/After_intel_3700.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/graph_optimization/After_intel_3700.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/graph_optimization/After_intel_3800.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/graph_optimization/After_intel_3800.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/graph_optimization/Before_INTEL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/graph_optimization/Before_INTEL.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/graph_optimization/Before_MITb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/graph_optimization/Before_MITb.png -------------------------------------------------------------------------------- /2d_lidar_slam/results/slam_intel_3700.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/slam_intel_3700.gif -------------------------------------------------------------------------------- /2d_lidar_slam/results/slam_intel_3800.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/2d_lidar_slam/results/slam_intel_3800.gif -------------------------------------------------------------------------------- /2d_lidar_slam/src/chi2_grad_hess.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import numpy as np 3 | 4 | 5 | class _Chi2GradientHessian: 6 | r"""A class that is used to aggregate the :math:`\chi^2` error, gradient, and Hessian. 7 | 8 | Parameters 9 | ---------- 10 | dim : int 11 | The compact dimensionality of the poses 12 | 13 | Attributes 14 | ---------- 15 | chi2 : float 16 | The :math:`\chi^2` error 17 | dim : int 18 | The compact dimensionality of the poses 19 | gradient : defaultdict 20 | The contributions to the gradient vector 21 | hessian : defaultdict 22 | The contributions to the Hessian matrix 23 | 24 | """ 25 | 26 | def __init__(self, dim): 27 | self.chi2 = 0.0 28 | self.dim = dim 29 | self.gradient = defaultdict(lambda: np.zeros(dim)) 30 | self.hessian = defaultdict(lambda: np.zeros((dim, dim))) 31 | 32 | @staticmethod 33 | def update(chi2_grad_hess, incoming): 34 | r"""Update the :math:`\chi^2` error and the gradient and Hessian dictionaries. 35 | 36 | Parameters 37 | ---------- 38 | chi2_grad_hess : _Chi2GradientHessian 39 | The ``_Chi2GradientHessian`` that will be updated 40 | incoming : tuple 41 | """ 42 | chi2_grad_hess.chi2 += incoming[0] 43 | 44 | for idx, contrib in incoming[1].items(): 45 | chi2_grad_hess.gradient[idx] += contrib 46 | 47 | for (idx1, idx2), contrib in incoming[2].items(): 48 | if idx1 <= idx2: 49 | chi2_grad_hess.hessian[idx1, idx2] += contrib 50 | else: 51 | chi2_grad_hess.hessian[idx2, idx1] += np.transpose(contrib) 52 | 53 | return chi2_grad_hess 54 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/edge_odometry.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pose_se2 import PoseSE2 4 | import matplotlib.pyplot as plt 5 | 6 | #: The difference that will be used for numerical differentiation 7 | EPSILON = 1e-6 8 | 9 | 10 | class EdgeOdometry: 11 | def __init__(self, vertex_ids, information, estimate, vertices=None): 12 | """ 13 | A class for representing odometry edges in Graph SLAM 14 | 15 | Args: 16 | vertex_ids (list[int]): The ids of all vertices constrained by this edge 17 | information (np.ndarray): The information matrix associated with the edge 18 | estimate (PoseSE2): The expected measurement 19 | vertices (list[graphslam.vertex.Vertex], optional): A list of vertices constrained by the edge 20 | """ 21 | 22 | self.vertex_ids = vertex_ids 23 | self.information = information 24 | self.estimate = estimate 25 | self.vertices = vertices 26 | 27 | def calc_error(self): 28 | 29 | err = self.estimate - (self.vertices[1].pose - self.vertices[0].pose) 30 | # print(err) 31 | return err 32 | 33 | def calc_chi2(self): 34 | 35 | err = self.calc_error() 36 | return np.transpose(err.arr) @ self.information @ err.arr 37 | 38 | def plot(self, color="b"): 39 | 40 | # import ipdb; ipdb.set_trace() 41 | xy = np.array([v.pose.position for v in self.vertices]) 42 | plt.plot(xy[:, 0], xy[:, 1], color=color) 43 | 44 | def calc_chi2_gradient_hessian(self): 45 | r"""Calculate the edge's contributions to the graph's :math:`\chi^2` error, gradient (:math:`\mathbf{b}`), and Hessian (:math:`H`). 46 | 47 | Returns 48 | ------- 49 | float 50 | The :math:`\chi^2` error for the edge 51 | dict 52 | The edge's contribution(s) to the gradient 53 | dict 54 | The edge's contribution(s) to the Hessian 55 | 56 | """ 57 | chi2 = self.calc_chi2() 58 | 59 | err = self.calc_error() 60 | 61 | jacobians = self.calc_jacobians() 62 | 63 | return ( 64 | chi2, 65 | { 66 | v.index: np.dot( 67 | np.dot(np.transpose(err.arr.squeeze()), self.information), jacobian 68 | ) 69 | for v, jacobian in zip(self.vertices, jacobians) 70 | }, 71 | { 72 | (self.vertices[i].index, self.vertices[j].index): np.dot( 73 | np.dot(np.transpose(jacobians[i]), self.information), jacobians[j] 74 | ) 75 | for i in range(len(jacobians)) 76 | for j in range(i, len(jacobians)) 77 | }, 78 | ) 79 | 80 | def calc_jacobians(self): 81 | r"""Calculate the Jacobian of the edge's error with respect to each constrained pose. 82 | 83 | .. math:: 84 | 85 | \frac{\partial}{\partial \Delta \mathbf{x}^k} \left[ \mathbf{e}_j(\mathbf{x}^k \boxplus \Delta \mathbf{x}^k) \right] 86 | 87 | 88 | Returns 89 | ------- 90 | list[np.ndarray] 91 | The Jacobian matrices for the edge with respect to each constrained pose 92 | 93 | """ 94 | err = self.calc_error() 95 | 96 | # The dimensionality of the compact pose representation 97 | dim = len(self.vertices[0].pose.arr) 98 | 99 | return [self._calc_jacobian(err, dim, i) for i in range(len(self.vertices))] 100 | 101 | def _calc_jacobian(self, err, dim, vertex_index): 102 | r"""Calculate the Jacobian of the edge with respect to the specified vertex's pose. 103 | 104 | Parameters 105 | ---------- 106 | err : np.ndarray 107 | The current error for the edge (see :meth:`BaseEdge.calc_error`) 108 | dim : int 109 | The dimensionality of the compact pose representation 110 | vertex_index : int 111 | The index of the vertex (pose) for which we are computing the Jacobian 112 | 113 | Returns 114 | ------- 115 | np.ndarray 116 | The Jacobian of the edge with respect to the specified vertex's pose 117 | 118 | """ 119 | jacobian = np.zeros(err.arr.squeeze().shape + (dim,)) 120 | p0 = self.vertices[vertex_index].pose.copy() 121 | # breakpoint() 122 | for d in range(dim): 123 | # update the pose 124 | delta_pose = np.zeros(dim) 125 | delta_pose[d] = EPSILON 126 | self.vertices[vertex_index].pose += PoseSE2.from_array(delta_pose) 127 | 128 | # compute the numerical derivative 129 | jacobian[:, d] = (self.calc_error().arr - err.arr).squeeze() / EPSILON 130 | 131 | # restore the pose 132 | self.vertices[vertex_index].pose = p0.copy() 133 | 134 | return jacobian 135 | 136 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/frontend.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import imageio 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from tqdm import tqdm 7 | 8 | import icp 9 | from graph import Graph 10 | from loop_closure import find_loop_closure 11 | from pose_se2 import PoseSE2 12 | 13 | 14 | def run(clf_file, name, save_gif=True, plot_every=1000): 15 | if save_gif: 16 | import atexit 17 | 18 | images = [] 19 | 20 | def fn(): 21 | print("Saving GIF") 22 | imageio.mimsave(f"./../results/slam_{int(time.time())}.gif", images, fps=20) 23 | print("Done") 24 | 25 | atexit.register(fn) 26 | 27 | with open(clf_file, "r") as f: 28 | lasers = [] 29 | odoms = [] 30 | for line in f: 31 | tokens = line.split(" ") 32 | if tokens[0] == "FLASER": 33 | num_readings = int(tokens[1]) 34 | scans = np.array(tokens[2 : 2 + num_readings], dtype=np.float) 35 | scan_time = float(tokens[2 + num_readings + 6]) 36 | index = np.arange(-90, 90 + 180 / num_readings, 180 / num_readings) 37 | index = np.delete(index, num_readings // 2) 38 | converted_scans = [] 39 | angles = np.radians(index) 40 | converted_scans = ( 41 | np.array([np.cos(angles), np.sin(angles)]).T * scans[:, np.newaxis] 42 | ) 43 | lasers.append(np.array(converted_scans)) 44 | x = float(tokens[2 + num_readings]) 45 | y = float(tokens[3 + num_readings]) 46 | theta = float(tokens[4 + num_readings]) 47 | odoms.append([x, y, theta]) 48 | 49 | odoms = np.array(odoms) 50 | lasers = np.array(lasers) 51 | 52 | pose_graph = Graph([], []) 53 | pose = np.eye(3) 54 | pose_graph.add_vertex(0, PoseSE2.from_rt_matrix(pose)) 55 | 56 | init_pose = np.eye(3) 57 | vertex_idx = 1 58 | all_lasers = [] 59 | 60 | max_x = -float("inf") 61 | max_y = -float("inf") 62 | min_x = float("inf") 63 | min_y = float("inf") 64 | 65 | for od_idx, odom in tqdm(enumerate(odoms[:3700]), total=len(odoms)): 66 | # Initialize 67 | if od_idx == 0: 68 | prev_odom = odom.copy() 69 | prev_idx = 0 70 | B = lasers[od_idx] 71 | all_lasers.append(B) 72 | continue 73 | 74 | dx = odom - prev_odom 75 | 76 | # IF the robot has moved significantly 77 | if np.linalg.norm(dx[0:2]) > 0.4 or abs(dx[2]) > 0.2: 78 | # Scan Matching 79 | A = lasers[prev_idx] 80 | B = lasers[od_idx] 81 | 82 | x, y, yaw = dx[0], dx[1], dx[2] 83 | init_pose = np.array( 84 | [ 85 | [np.cos(yaw), -np.sin(yaw), x], 86 | [np.sin(yaw), np.cos(yaw), y], 87 | [0, 0, 1], 88 | ] 89 | ) 90 | 91 | with np.errstate(all="raise"): 92 | try: 93 | tran, distances, iter, cov = icp.icp( 94 | B, A, init_pose, max_iterations=80, tolerance=0.0001 95 | ) 96 | except Exception as e: 97 | continue 98 | init_pose = tran 99 | pose = pose @ tran 100 | # breakpoint() 101 | pose_graph.add_vertex(vertex_idx, PoseSE2.from_rt_matrix(pose)) 102 | information = np.linalg.inv(cov) 103 | pose_graph.add_edge( 104 | [vertex_idx - 1, vertex_idx], PoseSE2.from_rt_matrix(tran), information 105 | ) 106 | 107 | prev_odom = odom 108 | prev_idx = od_idx 109 | 110 | all_lasers.append(B) 111 | 112 | if vertex_idx > 10 and vertex_idx % 10 == 0: 113 | find_loop_closure( 114 | curr_pose=PoseSE2.from_rt_matrix(pose), 115 | curr_idx=vertex_idx, 116 | laser=all_lasers, 117 | g=pose_graph, 118 | ) 119 | 120 | pose_graph.optimize() 121 | # print(vertex_idx, pose) 122 | pose = pose_graph.get_rt_matrix(vertex_idx) 123 | # print(vertex_idx, pose) 124 | 125 | # Draw trajectory and map 126 | traj = [] 127 | point_cloud = [] 128 | for idx in range(0, vertex_idx): 129 | # x, y, yaw = pose_graph.get_pose(idx).arr 130 | # r = np.array([[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]]) 131 | # t = np.array([x, y]).T 132 | mat = pose_graph.get_rt_matrix(idx) 133 | r = mat[:2, :2] 134 | t = mat[:2, 2] 135 | filtered = all_lasers[idx] 136 | filtered = filtered[np.linalg.norm(filtered, axis=1) < 80] 137 | point_cloud.append((r @ filtered.T + t[:, np.newaxis]).T) 138 | traj.append(t[0:2]) 139 | point_cloud = np.vstack(point_cloud) 140 | 141 | xyreso = 0.01 # Map resolution (m) 142 | point_cloud = (point_cloud / xyreso).astype("int") 143 | point_cloud = np.unique(point_cloud, axis=0) 144 | point_cloud = point_cloud * xyreso 145 | 146 | current_max = np.max(point_cloud, axis=0) 147 | current_min = np.min(point_cloud, axis=0) 148 | max_x = max(max_x, current_max[0]) 149 | max_y = max(max_y, current_max[1]) 150 | min_x = min(min_x, current_min[0]) 151 | min_y = min(min_y, current_min[1]) 152 | 153 | plt.cla() 154 | plt.axis([min_x, max_x, min_y, max_y]) 155 | # fig = plt.gcf() 156 | # fig.set_size_inches((9, 9), forward=False) 157 | traj = np.array(traj) 158 | plt.plot(traj[:, 0], traj[:, 1], "-g") 159 | plt.plot(point_cloud[:, 0], point_cloud[:, 1], ".b", markersize=0.1) 160 | plt.pause(0.0001) 161 | 162 | if save_gif: 163 | plt.gcf().canvas.draw() 164 | image = np.frombuffer(plt.gcf().canvas.tostring_rgb(), dtype="uint8") 165 | image = image.reshape(plt.gcf().canvas.get_width_height()[::-1] + (3,)) 166 | images.append(image) 167 | 168 | vertex_idx += 1 169 | 170 | # if vertex_idx > 10 and vertex_idx % plot_every == 0: 171 | # pose_graph.plot(title=f"{name}_{vertex_idx}") 172 | 173 | return pose_graph 174 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/graph.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | 3 | import matplotlib.pyplot as plt 4 | import mpl_toolkits.mplot3d as Axes3D 5 | import numpy as np 6 | from scipy.sparse import SparseEfficiencyWarning, lil_matrix 7 | from scipy.sparse.linalg import spsolve 8 | 9 | from chi2_grad_hess import _Chi2GradientHessian 10 | from edge_odometry import EdgeOdometry 11 | from pose_se2 import PoseSE2 12 | from vertex import Vertex 13 | 14 | EPS = np.finfo(float).eps 15 | 16 | 17 | class Graph: 18 | def __init__(self, edges, vertices): 19 | 20 | self._edges = edges 21 | self._vertices = vertices 22 | 23 | self._chi2 = None 24 | self._gradient = None 25 | self._hessian = None 26 | 27 | self._link_edges() 28 | 29 | def _link_edges(self): 30 | 31 | index_id_dict = {i: v.id for i, v in enumerate(self._vertices)} 32 | id_index_dict = {v_id: v_index for v_index, v_id in index_id_dict.items()} 33 | 34 | for v in self._vertices: 35 | v.index = id_index_dict[v.id] 36 | 37 | for e in self._edges: 38 | e.vertices = [self._vertices[id_index_dict[v_id]] for v_id in e.vertex_ids] 39 | 40 | def add_edge(self, vertices, measurement, information): 41 | edge = EdgeOdometry( 42 | vertex_ids=vertices, information=information, estimate=measurement 43 | ) 44 | self._edges.append(edge) 45 | self._link_edges() 46 | 47 | def add_vertex(self, id, pose): 48 | vertex = Vertex(vertex_id=id, pose=pose) 49 | self._vertices.append(vertex) 50 | self._link_edges() 51 | 52 | def get_pose(self, idx): 53 | vert = self._vertices[idx] 54 | return vert.pose 55 | 56 | def get_rt_matrix(self, idx): 57 | pose = self.get_pose(idx) 58 | return pose.get_rt_matrix() 59 | 60 | def calc_chi2(self): 61 | self._chi2 = sum((e.calc_chi2() for e in self._edges))[0, 0] 62 | return self._chi2 63 | 64 | def _calc_chi2_grad_hess(self): 65 | n = len(self._vertices) 66 | dim = len(self._vertices[0].pose.arr.squeeze()) 67 | chi2_gradient_hessian = reduce( 68 | _Chi2GradientHessian.update, 69 | (e.calc_chi2_gradient_hessian() for e in self._edges), 70 | _Chi2GradientHessian(dim), 71 | ) 72 | # breakpoint() 73 | self._chi2 = chi2_gradient_hessian.chi2[0, 0] 74 | 75 | # Populate the gradient vector 76 | self._gradient = np.zeros(n * dim, dtype=np.float64) 77 | for idx, contrib in chi2_gradient_hessian.gradient.items(): 78 | self._gradient[idx * dim : (idx + 1) * dim] += contrib 79 | 80 | # Populate the Hessian matrix 81 | self._hessian = lil_matrix((n * dim, n * dim), dtype=np.float64) 82 | for (row_idx, col_idx), contrib in chi2_gradient_hessian.hessian.items(): 83 | self._hessian[ 84 | row_idx * dim : (row_idx + 1) * dim, col_idx * dim : (col_idx + 1) * dim 85 | ] = contrib 86 | 87 | if row_idx != col_idx: 88 | # mirror the hessian along diagonal 89 | self._hessian[ 90 | col_idx * dim : (col_idx + 1) * dim, 91 | row_idx * dim : (row_idx + 1) * dim, 92 | ] = np.transpose(contrib) 93 | 94 | def optimize(self, tol=1e-4, max_iter=40, fix_first_pose=True): 95 | n = len(self._vertices) 96 | dim = len(self._vertices[0].pose.arr.squeeze()) 97 | 98 | prev_chi2_err = -1 99 | 100 | # For displaying the optimization progress 101 | print("\nIteration chi^2 rel. change") 102 | print("--------- ----- -----------") 103 | 104 | for i in range(max_iter): 105 | self._calc_chi2_grad_hess() 106 | 107 | # only do this for the 2nd iteration onwards 108 | if i > 0: 109 | rel_diff = (prev_chi2_err - self._chi2) / (prev_chi2_err + EPS) 110 | print(f"{i:9} {self._chi2:20.4f} {-rel_diff:18.6f}") 111 | if (self._chi2 < prev_chi2_err) and rel_diff < tol: 112 | return 113 | else: 114 | print(f"{i:9} {self._chi2:20.4f}") 115 | 116 | # Store the prev chi2 error 117 | prev_chi2_err = self._chi2 118 | if fix_first_pose: 119 | self._hessian[:dim, :] = 0.0 120 | self._hessian[:, :dim] = 0.0 121 | self._hessian[:dim, :dim] += np.eye(dim) 122 | self._gradient[:dim] = 0.0 123 | 124 | # run solver 125 | dx = spsolve(self._hessian.tocsr(), -self._gradient) 126 | 127 | # apply 128 | for v, dxi in zip(self._vertices, np.split(dx, n)): 129 | v.pose += PoseSE2.from_array(dxi) 130 | self.calc_chi2() 131 | rel_diff = (prev_chi2_err - self._chi2) / (prev_chi2_err + EPS) 132 | # breakpoint() 133 | print(f"{i:9} {self._chi2:20.4f} {-rel_diff:18.6f}") 134 | 135 | def plot( 136 | self, 137 | vertex_color="r", 138 | vertex_maker="o", 139 | vertex_markersize=3, 140 | edge_color="b", 141 | title=None, 142 | ): 143 | 144 | fig = plt.figure() 145 | 146 | for e in self._edges: 147 | e.plot(edge_color) 148 | # xy = np.array([self._vertices[v_indx].pose.position for v_indx in e.vertex_ids]) 149 | # plt.plot(xy[:, 0], xy[:, 1], color=edge_color) 150 | 151 | for v in self._vertices: 152 | v.plot(vertex_color, vertex_maker, vertex_markersize) 153 | plt.savefig(f"{title}.png") 154 | plt.show() 155 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/icp.py: -------------------------------------------------------------------------------- 1 | # Original code: https://github.com/ClayFlannigan/icp 2 | # Modified to reject pairs that have greater distance than the specified threshold 3 | # Add covariance check 4 | 5 | import numpy as np 6 | from sklearn.neighbors import NearestNeighbors 7 | 8 | def compute_C_k(point1, point2): 9 | d = point1 - point2 10 | alpha = np.pi/2 + np.arctan2(d[1], d[0]) 11 | c = np.cos(alpha) 12 | s = np.sin(alpha) 13 | m = np.array([[c*c, c*s], 14 | [c*s, s*s]]) 15 | return m 16 | 17 | def dC_drho(point1, point2): 18 | eps = 0.001 19 | C_k = compute_C_k(point1, point2) 20 | point1b = point1 + point1 * (eps/np.linalg.norm(point1)) 21 | C_k_eps = compute_C_k(point1b, point2) 22 | return (1/eps) * (C_k_eps - C_k) 23 | 24 | def rotation_matrix(theta): 25 | return np.array([[np.cos(theta), -np.sin(theta)], 26 | [np.sin(theta), np.cos(theta)]]) 27 | 28 | def vers(theta): 29 | return np.array([np.cos(theta), np.sin(theta)]) 30 | 31 | def compute_covariance(laser_ref, laser_sens, t, theta, angles): 32 | # Reference: https://censi.science/research/robot-perception/icpcov/ 33 | d2J_dxdy1 = np.zeros((3, laser_ref.shape[0])) 34 | d2J_dxdy2 = np.zeros((3, laser_sens.shape[0])) 35 | 36 | d2J_dt2 = np.zeros((2, 2)) 37 | d2J_dt_dtheta = np.zeros((2,1)); 38 | d2J_dtheta2 = np.zeros((1, 1)) 39 | 40 | for i in range(laser_ref.shape[0]-1): 41 | p_i = laser_sens[i] 42 | p_j1 = laser_ref[i] 43 | p_j2 = laser_ref[i+1] 44 | v1 = rotation_matrix(theta + np.pi/2) @ p_i 45 | v2 = rotation_matrix(theta) @ p_i + t - p_j1 46 | v3 = vers(theta + angles[i]) 47 | v4 = vers(theta + angles[i] + np.pi/2) 48 | 49 | C_k = compute_C_k(p_j1, p_j2) 50 | d2J_dt2_k = 2 * C_k 51 | d2J_dt_dtheta_k = 2 * (C_k @ v1) 52 | 53 | v_new = rotation_matrix(theta+np.pi) @ p_i 54 | 55 | d2J_dtheta2_k = 2 * ((C_k @ v_new @ v2.T) + (C_k @ v1 @ v1.T)) 56 | 57 | d2J_dt2 += d2J_dt2_k 58 | d2J_dt_dtheta += d2J_dt_dtheta_k[:, np.newaxis] 59 | d2J_dtheta2 += d2J_dtheta2_k 60 | d2Jk_dtdrho_i = 2 * C_k @ v3 61 | d2Jk_dtheta_drho_i = 2 * ((C_k @ v4 @ v2.T) + (C_k @ v1 @ v3.T)) 62 | d2J_dxdy2[:, i] += np.hstack([d2Jk_dtdrho_i, d2Jk_dtheta_drho_i]) 63 | dC_drho_j1 = dC_drho(p_j1, p_j2) 64 | dC_drho_j2 = dC_drho(p_j2, p_j1) 65 | v_j1 = vers(angles[i]) # Probably wrong 66 | d2Jk_dt_drho_j1 = (-2 * (C_k @ v_j1)) + (2 * (dC_drho_j1 @ v2)) 67 | d2Jk_dtheta_drho_j1 = (-2 * (C_k @ v1 @ v_j1.T)) + (dC_drho_j1 @ v1 @ v2.T) 68 | d2J_dxdy1[:, i] += np.hstack([d2Jk_dt_drho_j1, d2Jk_dtheta_drho_j1]) 69 | d2Jk_dt_drho_j2 = 2 * (dC_drho_j2 @ v2) 70 | d2Jk_dtheta_drho_j2 = 2 * (dC_drho_j2 @ v1 @ v2.T) 71 | d2J_dxdy1[:, i+1] += np.hstack([d2Jk_dt_drho_j2, d2Jk_dtheta_drho_j2]) 72 | 73 | d2J_dx2 = np.vstack([np.hstack([d2J_dt2, d2J_dt_dtheta]), 74 | np.hstack([d2J_dt_dtheta.T, d2J_dtheta2])]) 75 | 76 | edx_dy1 = -np.linalg.inv(d2J_dx2) @ d2J_dxdy1 77 | edx_dy2 = -np.linalg.inv(d2J_dx2) @ d2J_dxdy2 78 | 79 | ecov0_x = edx_dy1 @ edx_dy1.T + edx_dy2 @ edx_dy2.T 80 | 81 | return ecov0_x, edx_dy1, edx_dy2 82 | 83 | def best_fit_transform(A, B): 84 | ''' 85 | Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions 86 | Input: 87 | A: Nxm numpy array of corresponding points 88 | B: Nxm numpy array of corresponding points 89 | Returns: 90 | T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B 91 | R: mxm rotation matrix 92 | t: mx1 translation vector 93 | ''' 94 | 95 | assert A.shape == B.shape 96 | 97 | # get number of dimensions 98 | m = A.shape[1] 99 | 100 | # translate points to their centroids 101 | centroid_A = np.mean(A, axis=0) 102 | centroid_B = np.mean(B, axis=0) 103 | AA = A - centroid_A 104 | BB = B - centroid_B 105 | 106 | # rotation matrix 107 | H = np.dot(AA.T, BB) 108 | U, S, Vt = np.linalg.svd(H) 109 | R = np.dot(Vt.T, U.T) 110 | 111 | # special reflection case 112 | if np.linalg.det(R) < 0: 113 | Vt[m-1,:] *= -1 114 | R = np.dot(Vt.T, U.T) 115 | 116 | # translation 117 | t = centroid_B.T - np.dot(R,centroid_A.T) 118 | 119 | # homogeneous transformation 120 | T = np.identity(m+1) 121 | T[:m, :m] = R 122 | T[:m, m] = t 123 | 124 | return T, R, t 125 | 126 | 127 | def nearest_neighbor(src, dst): 128 | ''' 129 | Find the nearest (Euclidean) neighbor in dst for each point in src 130 | Input: 131 | src: Nxm array of points 132 | dst: Nxm array of points 133 | Output: 134 | distances: Euclidean distances of the nearest neighbor 135 | indices: dst indices of the nearest neighbor 136 | ''' 137 | 138 | assert src.shape == dst.shape 139 | 140 | neigh = NearestNeighbors(n_neighbors=1) 141 | neigh.fit(dst) 142 | distances, indices = neigh.kneighbors(src, return_distance=True) 143 | return distances.ravel(), indices.ravel() 144 | 145 | 146 | def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001): 147 | ''' 148 | The Iterative Closest Point method: finds best-fit transform that maps points A on to points B 149 | Input: 150 | A: Nxm numpy array of source mD points 151 | B: Nxm numpy array of destination mD point 152 | init_pose: (m+1)x(m+1) homogeneous transformation 153 | max_iterations: exit algorithm after max_iterations 154 | tolerance: convergence criteria 155 | Output: 156 | T: final homogeneous transformation that maps A on to B 157 | distances: Euclidean distances (errors) of the nearest neighbor 158 | i: number of iterations to converge 159 | ''' 160 | 161 | assert A.shape == B.shape 162 | 163 | # get number of dimensions 164 | m = A.shape[1] 165 | 166 | # make points homogeneous, copy them to maintain the originals 167 | src = np.ones((m+1,A.shape[0])) 168 | dst = np.ones((m+1,B.shape[0])) 169 | src[:m,:] = np.copy(A.T) 170 | dst[:m,:] = np.copy(B.T) 171 | 172 | # apply the initial pose estimation 173 | if init_pose is not None: 174 | src = np.dot(init_pose, src) 175 | 176 | prev_error = 0 177 | 178 | for i in range(max_iterations): 179 | # find the nearest neighbors between the current source and destination points 180 | distances, indices = nearest_neighbor(src[:m,:].T, dst[:m,:].T) 181 | 182 | # Reject pairs that have 1 meter distance between them 183 | indices = indices[np.linalg.norm(src[:m, :], axis=0) < 80] 184 | distances = distances[np.linalg.norm(src[:m, :], axis=0) < 80] 185 | filtered_src = src[:, np.linalg.norm(src[:m, :], axis=0) < 80] 186 | indices = indices[distances < 1.0] 187 | 188 | # compute the transformation between 189 | # the current source and nearest destination points 190 | T,_,_ = best_fit_transform(filtered_src[:m, distances < 1.0].T, dst[:m,indices].T) 191 | 192 | # distances = distances[distances <= 1.0] 193 | 194 | # update the current source 195 | src = np.dot(T, src) 196 | 197 | # check error 198 | mean_error = np.mean(distances) 199 | if np.abs(prev_error - mean_error) < tolerance: 200 | break 201 | prev_error = mean_error 202 | 203 | # calculate final transformation 204 | T,_,_ = best_fit_transform(A, src[:m,:].T) 205 | 206 | 207 | # FIXME: BROKEN 208 | # theta = np.arctan2(T[1,0], T[0,0]) 209 | # t = T[0:2, 2] 210 | # angle_res = 1 211 | # angles = np.arange(-90, 91, angle_res) 212 | # cov, a, b = compute_covariance(A, src[:m, :].T, t, theta, np.radians(angles)) 213 | cov = np.eye(3) 214 | 215 | return T, distances, i, cov 216 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/load.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from edge_odometry import EdgeOdometry 4 | from graph import Graph 5 | from pose_se2 import PoseSE2 6 | from util import upper_triangular_matrix_to_full_matrix 7 | from vertex import Vertex 8 | 9 | 10 | def data_loader(data_file): 11 | """ 12 | Generate SE(2) graph from .g2o file 13 | 14 | VERTEX_SE2: ID x y th 15 | EDGE_SE2: IDout IDin dx dy dth I11 I12 I13 I22 I23 I33 16 | 17 | Args: 18 | data_file (str): Path to .g2o file 19 | 20 | Returns: 21 | graph (Graph): The g2o edges and vertices loaded to our PoseGraph 22 | """ 23 | 24 | edges = [] 25 | vertices = [] 26 | 27 | with open(data_file) as file: 28 | 29 | for line in file.readlines(): 30 | line = line.split() 31 | 32 | if line[0] == "VERTEX_SE2": 33 | vertex_id = int(line[1]) 34 | arr = np.array([float(number) for number in line[2:]], dtype=np.float64) 35 | p = PoseSE2(arr[:2], arr[2]) 36 | v = Vertex(vertex_id, p) 37 | vertices.append(v) 38 | continue 39 | 40 | if line[0] == "EDGE_SE2": 41 | 42 | vertex_ids = [int(line[1]), int(line[2])] 43 | arr = np.array([float(number) for number in line[3:]], dtype=np.float64) 44 | 45 | estimate = PoseSE2(arr[:2], arr[2]) 46 | information = upper_triangular_matrix_to_full_matrix(arr[3:], 3) 47 | e = EdgeOdometry(vertex_ids, information, estimate) 48 | 49 | edges.append(e) 50 | continue 51 | 52 | return Graph(edges, vertices) 53 | 54 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/loop_closure.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy 3 | 4 | import icp 5 | from pose_se2 import PoseSE2 6 | 7 | """ 8 | Detects loop closures and adds the corresponding edge between the current 9 | pose and detected loop closure pose 10 | Input: 11 | @param poses all poses excluding current pose 12 | @param curr_pose pose of the vertex being added to the graph 13 | @param curr_idx index of current pose 14 | @param laser laser readings at every pose 15 | @param g pose graph optimizer class object 16 | """ 17 | 18 | 19 | def find_loop_closure(curr_pose, curr_idx, laser, g): 20 | # print("Attempt: Loop Closure") 21 | kdTreeR = 4.25 22 | poses = np.array( 23 | [g.get_pose(idx).arr[0:2] for idx in range(curr_idx - 1)] 24 | ).squeeze() 25 | # breakpoint() 26 | kdTree = scipy.spatial.cKDTree(poses) 27 | # breakpoint() 28 | idxs = kdTree.query_ball_point(curr_pose.arr[0:2].T, kdTreeR)[0] 29 | 30 | loopThresh = 0.15 31 | for i in idxs: 32 | # breakpoint() 33 | with np.errstate(all="raise"): 34 | try: 35 | tf, dist, _, cov = icp.icp( 36 | laser[i], 37 | laser[curr_idx], 38 | np.eye(3), 39 | max_iterations=80, 40 | tolerance=0.0001, 41 | ) 42 | except Exception as e: 43 | print("ICP Exception", e) 44 | continue 45 | 46 | if np.mean(dist) < loopThresh: 47 | # print("Success: Loop Closure") 48 | g.add_edge( 49 | [curr_idx, i], PoseSE2.from_rt_matrix(tf), np.linalg.inv(cov) 50 | ) 51 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/main_clf.py: -------------------------------------------------------------------------------- 1 | from load import data_loader 2 | import sys 3 | import os 4 | 5 | import frontend 6 | 7 | if __name__ == "__main__": 8 | if len(sys.argv) > 1: 9 | data_file = sys.argv[1] 10 | else: 11 | data_file = "../data/intel.clf" 12 | name = os.path.splitext(os.path.split(data_file)[-1])[0] 13 | if "_" in name: 14 | # breakpoint() 15 | name = name.split("_")[-2] 16 | 17 | final_graph = frontend.run(data_file, name, save_gif=True, plot_every=100) 18 | # g = data_loader(data_file) 19 | 20 | # g.plot(title=f"Before_{name}") 21 | 22 | # print("Error before optimization: ", g.calc_chi2()) 23 | # g.optimize() 24 | final_graph.plot(title=f"After_{name}") 25 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/main_g2o.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from load import data_loader 5 | 6 | if __name__ == "__main__": 7 | if len(sys.argv) > 1: 8 | data_file = sys.argv[1] 9 | else: 10 | data_file = "../data/input_INTEL_g2o.g2o" 11 | name = os.path.splitext(os.path.split(data_file)[-1])[0] 12 | if "_" in name: 13 | # breakpoint() 14 | name = name.split("_")[-2] 15 | 16 | g = data_loader(data_file) 17 | 18 | g.plot(title=f"./../results/Before_{name}") 19 | 20 | print("Error before optimization: ", g.calc_chi2()) 21 | g.optimize() 22 | g.plot(title=f"./../results/After_{name}") 23 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/pose_se2.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | from util import warp2pi 4 | 5 | 6 | class PoseSE2: 7 | def __init__(self, position, orientation): 8 | self.position = position 9 | self.orientation = warp2pi(orientation) 10 | self.arr = np.array( 11 | [[position[0]], [position[1]], [warp2pi(orientation)]], dtype=np.float64 12 | ) 13 | 14 | @staticmethod 15 | def from_array(arr): 16 | return PoseSE2((arr[0], arr[1]), warp2pi(arr[2])) 17 | 18 | @staticmethod 19 | def from_rt_matrix(mat): 20 | # mat: 3x3 2D [R,t] 21 | R = mat[:2, :2] 22 | x, y = mat[:2, 2] 23 | theta = np.arctan2(R[1, 0], R[0, 0]) 24 | return PoseSE2.from_array([x, y, theta]) 25 | 26 | def get_rt_matrix(self): 27 | x, y, yaw = self.arr 28 | return np.array( 29 | [[np.cos(yaw), -np.sin(yaw), x], [np.sin(yaw), np.cos(yaw), y], [0, 0, 1],], 30 | dtype="float64", 31 | ) 32 | 33 | def __sub__(self, other): 34 | 35 | x = (self.position[0] - other.position[0]) * np.cos(other.orientation) + ( 36 | self.position[1] - other.position[1] 37 | ) * np.sin(other.orientation) 38 | y = (other.position[0] - self.position[0]) * np.sin(other.orientation) + ( 39 | self.position[1] - other.position[1] 40 | ) * np.cos(other.orientation) 41 | theta = warp2pi(self.orientation - other.orientation) 42 | 43 | return PoseSE2([x, y], theta) 44 | 45 | def __add__(self, other): 46 | 47 | x = (self.position[0] + other.position[0]) * np.cos(other.orientation) + ( 48 | self.position[1] + other.position[1] 49 | ) * np.sin(other.orientation) 50 | y = (other.position[0] + self.position[0]) * np.sin(other.orientation) + ( 51 | self.position[1] + other.position[1] 52 | ) * np.cos(other.orientation) 53 | theta = warp2pi(self.orientation + other.orientation) 54 | 55 | return PoseSE2([x, y], theta) 56 | 57 | def copy(self): 58 | return PoseSE2(self.position, self.orientation) 59 | 60 | def __str__(self): 61 | return str(self.arr) 62 | -------------------------------------------------------------------------------- /2d_lidar_slam/src/util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def warp2pi(angle_rad): 4 | """ 5 | warps an angle in [-pi, pi] 6 | 7 | Args: 8 | angle_rad (float): An angle (in radians) 9 | 10 | Returns: 11 | float: The angle normalized to [-pi, pi] 12 | """ 13 | if(angle_rad > np.pi): 14 | while(angle_rad > np.pi): 15 | angle_rad -= 2*np.pi 16 | elif (angle_rad < -np.pi): 17 | while(angle_rad < -np.pi): 18 | angle_rad += 2*np.pi 19 | return angle_rad 20 | 21 | def upper_triangular_matrix_to_full_matrix(arr, n): 22 | 23 | triu0 = np.triu_indices(n, 0) 24 | triu1 = np.triu_indices(n, 1) 25 | tril1 = np.tril_indices(n, -1) 26 | 27 | mat = np.zeros((n, n), dtype=np.float64) 28 | mat[triu0] = arr 29 | # filling the lower left-half with same values as right-half 30 | mat[tril1] = mat[triu1] 31 | return mat -------------------------------------------------------------------------------- /2d_lidar_slam/src/vertex.py: -------------------------------------------------------------------------------- 1 | 2 | from pose_se2 import PoseSE2 3 | import matplotlib.pyplot as plt 4 | class Vertex: 5 | 6 | def __init__(self, vertex_id, pose, vertex_index=None): 7 | """ 8 | A class for representing a vertex in Graph SLAM 9 | 10 | Args: 11 | vertex_id (int): The vertex's unique id 12 | pose (PoseSE2): The pose associated with the vertex 13 | vertex_index (int, optional): The vertex's index in the graph's vertices list. Defaults to None. 14 | """ 15 | self.id = vertex_id 16 | self.pose = pose 17 | self.index = vertex_index 18 | 19 | def plot(self, color='r', marker='o', marker_size=3): 20 | 21 | x, y = self.pose.position 22 | plt.plot(x, y, color=color, marker=marker, markersize=marker_size) -------------------------------------------------------------------------------- /Graph_SLAM_Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/Graph_SLAM_Report.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Graph-SLAM 2 | 3 | In this project we have implemented a graph-based SLAM pipeline using 2D laser data and odometry information. We also extended our implementation to accomodate visual data in our SLAM frontend. 4 | 5 | These are two seperate Graph-SLAM pipelines. The individual setup and execution instructions are provided in README's of respective implementation directories. Please refer to the [project report](Graph_SLAM_Report.pdf) for detailed explaination. 6 | 7 | ## 2D Lidar SLAM 8 | In this we implement custom frontend and backend modules. Backend consists of custom 2D Pose Graph construction and optimization implementation. Frontend uses odometry, lidar scan and scan matching to compute graph poses. Loop closure constraints are added using ICP. We first implemented (main_g2o.py) the backend graph optimization using the processed g2o data from [Luca Carlone datasets](https://lucacarlone.mit.edu/datasets/). Once this was achieved, we moved on to using the raw data provided by [Intel Research Lab](http://ais.informatik.uni-freiburg.de/slamevaluation/datasets.php) and building our own pose graph from the laser and odometry information. 9 | 10 | Pose Graph before optimization | Pose Graph after optimization 11 | :-------------------------:|:-------------------------: 12 | ![](2d_lidar_slam/results/graph_optimization/Before_INTEL.png) | ![](2d_lidar_slam/results/graph_optimization/After_INTEL.png) 13 | 14 | Real-Time Pose Graph Generation and Optimization after Loop Closures 15 | :-------------------------:| 16 | ![](2d_lidar_slam/results/slam_intel_3700.gif) 17 | 18 | ## Visual SLAM 19 | 20 | In this we implemented a monocular visual odometry pipeline for frontend. For backend we create pose graph structure based on [g2o library's python wrapper](https://github.com/uoip/g2opy). In order to optimize the pose graph we use Sparse Optimizer from g2o library. The visual odometry frontend is built using 2D-2D feature correspondences between relative frames. We utilize only left camera images from the stereo pair provided in the [KITTI dataset](http://www.cvlibs.net/datasets/kitti/eval_odometry.php) and perform monocular visual odometry. We use SIFT feature detector and Lucas Kanade Tracker to track the features across frames. We compute the relative pose between frames by using the Essential Matrix obtained after performing RANSAC. Using the computed poses we construct our Pose Graph. In order to simplify the scope of our project we use [ground truth poses](http://www.cvlibs.net/datasets/kitti/eval_odometry.php) to detect loop closures. 21 | 22 | Sequence 00 | Sequence 05 | Sequence 07 23 | :----------:|:-----------:|:-----------: 24 | ![](visual_slam/data/sequence00.gif) | ![](visual_slam/data/sequence05.gif)| ![](visual_slam/data/sequence07.gif) 25 | - white represents the ground truth poses. 26 | - red represents the pose graph without optimization. 27 | - green represents the pose graph after optimization as new loop closures are detected. 28 | 29 | ## References 30 | 31 | [1] G. Grisetti, R. Kummerle, C. Stachniss, and W. Burgard, “A tutorial on graph-based slam,” IEEE Intelligent Transportation Systems Magazine, vol. 2, no. 4, pp. 31–43, 2010. 32 | 33 | [2] “Python graph slam reference repository(backend).” https://github.com/JeffLIrion/python-graphslam. 34 | 35 | [3] “g2o python wrapper.” https://github.com/uoip/g2opy. 36 | 37 | [4] “Graph slam formulation.” https://github.com/AtsushiSakai/PythonRobotics/blob/master/SLAM/GraphBasedSLAM/graphSLAM_formulation.pdf. 38 | 39 | [5] “Python graph slam reference repository(frontend).” https://github.com/goktug97/PyGraphSLAM. 40 | 41 | [6] “Icp implementation for frontend.” https://github.com/ClayFlannigan/icp. 42 | 43 | [7] “Monocular visual slam.” https://github.com/sakshamjindal/Monocular-MiniSLAM. 44 | 45 | [8] “Repository of laser and odometry data in carmen format at uni freiburg.” http://ais.informatik.uni-freiburg.de/slamevaluation/index.php. 46 | 47 | [9] “g2o format datasets by luca carlone.” https://lucacarlone.mit.edu/datasets/ 48 | -------------------------------------------------------------------------------- /visual_slam/=1.18.3: -------------------------------------------------------------------------------- 1 | Requirement already satisfied: numpy in /home/mandeep/.local/lib/python3.6/site-packages (1.19.5) 2 | -------------------------------------------------------------------------------- /visual_slam/README.md: -------------------------------------------------------------------------------- 1 | # VISUAL SLAM 2 | 3 | In this we implemented a monocular visual odometry pipeline for frontend. For backend we create pose graph structure based on [g2o library's python wrapper](https://github.com/uoip/g2opy). In order to optimize the pose graph we use Sparse Optimizer from g2o library. The visual odometry frontend is built using 2D-2D feature correspondences between relative frames. We utilize only left camera images from the stereo pair provided in the [KITTI dataset](http://www.cvlibs.net/datasets/kitti/eval_odometry.php) and perform monocular visual odometry. We use SIFT feature detector and Lucas Kanade Tracker to track the features across frames. We compute the relative pose between frames by using the Essential Matrix obtained after performing RANSAC. Using the computed poses we construct our Pose Graph. In order to simplify the scope of our project we use [ground truth poses](http://www.cvlibs.net/datasets/kitti/eval_odometry.php) to detect loop closures. 4 | 5 | ## Setup and Usage 6 | 7 | ``` 8 | conda create -n graph_slam python=3.6 9 | conda activate graph_slam 10 | 11 | . install_all.sh 12 | 13 | cd visual_slam 14 | python run_slam.py --path ../KITTI_dataset/sequences/00/ --gt_loops ../KITTI_GroundTruth/gnd_kitti00.mat 15 | ``` 16 | 17 | ## Results 18 | 19 | ### Optimized Pose Graphs 20 | Sequence 00 | Sequence 05 | Sequence 07 21 | :----------:|:-----------:|:-----------: 22 | ![](data/sequence00.gif) | ![](data/sequence05.gif)| ![](data/sequence07.gif) 23 | - white represents the ground truth poses. 24 | - red represents the pose graph without optimization. 25 | - green represents the pose graph after optimization as new loop closures are detected. 26 | 27 | ### Translation Errors 28 | Sequence 00 | Sequence 05 | Sequence 07 29 | :----------:|:-----------:|:-----------: 30 | ![](data/s00.png) | ![](data/s05.png)| ![](data/s07.png) 31 | 32 | ## References 33 | [1] https://github.com/luigifreda/pyslam 34 | -------------------------------------------------------------------------------- /visual_slam/bash_utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # a collection of bash utils 4 | 5 | # ==================================================== 6 | function get_after_last_slash(){ 7 | ret=$(echo $1 | sed 's:.*/::') 8 | echo $ret 9 | } 10 | function get_virtualenv_name(){ 11 | cmd_out=$(printenv | grep VIRTUAL_ENV) 12 | virtual_env_name=$(get_after_last_slash $cmd_out) 13 | echo $virtual_env_name 14 | } 15 | 16 | function print_blue(){ 17 | printf "\033[34;1m" 18 | printf "$@ \n" 19 | printf "\033[0m" 20 | } 21 | 22 | function make_dir(){ 23 | if [ ! -d $1 ]; then 24 | mkdir -p $1 25 | fi 26 | } 27 | function make_buid_dir(){ 28 | make_dir build 29 | } 30 | 31 | function check_package(){ 32 | package_name=$1 33 | PKG_OK=$(dpkg-query -W --showformat='${Status}\n' $package_name |grep "install ok installed") 34 | #echo "checking for $package_name: $PKG_OK" 35 | if [ "" == "$PKG_OK" ]; then 36 | #echo "$package_name is not installed" 37 | echo 1 38 | else 39 | echo 0 40 | fi 41 | } 42 | function install_package(){ 43 | do_install=$(check_package $1) 44 | if [ $do_install -eq 1 ] ; then 45 | sudo apt-get install -y $1 46 | fi 47 | } 48 | 49 | function check_pip_package(){ 50 | package_name=$1 51 | PKG_OK=$(pip3 list --format=legacy |grep $package_name) 52 | #echo "checking for $package_name: $PKG_OK" 53 | if [ "" == "$PKG_OK" ]; then 54 | #echo "$package_name is not installed" 55 | echo 1 56 | else 57 | echo 0 58 | fi 59 | } 60 | function check_pip_package2(){ 61 | package_name=$1 62 | if python3 -c "import "$package_name &> /dev/null; then 63 | echo 0 64 | else 65 | #echo "$package_name is not installed" 66 | echo 1 67 | fi 68 | } 69 | function install_pip_package(){ 70 | do_install=$(check_pip_package $1) 71 | virtual_env=$(get_virtualenv_name) 72 | if [ $do_install -eq 1 ] ; then 73 | if [ "" == "$virtual_env" ]; then 74 | pip3 install --user $1 75 | else 76 | pip3 install $1 # if you are in a virtual environment the option `--user` will install make pip3 install things outside the env 77 | fi 78 | fi 79 | } 80 | function install_pip_packages(){ 81 | for var in "$@" 82 | do 83 | install_pip_package "$var" 84 | done 85 | } 86 | 87 | function set_git_modules() { 88 | #print_blue "setting up git submodules" 89 | git submodule init -- 90 | git submodule sync --recursive 91 | git submodule update --init --recursive 92 | } 93 | 94 | function update_git_modules() { 95 | git submodule update --recursive --remote 96 | } 97 | 98 | function pause(){ 99 | read -s -n 1 -p "Press any key to continue . . ." 100 | echo "" 101 | } 102 | 103 | # ==================================================== 104 | 105 | -------------------------------------------------------------------------------- /visual_slam/core/dataset.py: -------------------------------------------------------------------------------- 1 | # Added from Stereo Visual Odometry repository 2 | # Originally present at : https://github.com/sakshamjindal/Stereo-Visual-Odometry-SFM/blob/master/stereoVO/datasets/KITTI_Dataset.py 3 | 4 | import cv2 5 | import os 6 | import numpy as np 7 | from pathlib import Path 8 | 9 | __all__ = ['CameraParameters', 'KittiDataset'] 10 | 11 | 12 | class CameraParameters(): 13 | def __init__(self, fx, fy, cx, cy): 14 | 15 | self.fx = fx 16 | self.fy = fy 17 | self.cx = cx 18 | self.cy = cy 19 | 20 | @property 21 | def camera_matrix(self): 22 | 23 | matrix = np.array([[self.fx, 0.0, self.cx], 24 | [0.0, self.fx, self.cy], 25 | [0.0, 0.0, 1.0]]) 26 | return matrix 27 | 28 | def __call__(self): 29 | 30 | return self.camera_matrix 31 | 32 | 33 | class KittiDataset(): 34 | 35 | """ 36 | Dataloader for KIITT dataset : (http://www.cvlibs.net/datasets/kitti/) 37 | KITTI dataset has been recorded from a moving platform while driving in and around Karlsruhe, 38 | Germany (Figure 2). It includes camera images, laser scans, high-precision GPS measurements 39 | and IMU accelerations from a combined GPS/IMU system. The main purpose of this dataset is to 40 | push forward the development of computer vision and robotic algorithms for autonomous driving 41 | """ 42 | 43 | def __init__(self, path): 44 | 45 | """ 46 | :param path (str):path to KIITI sequence (ending with sequence number) 47 | """ 48 | 49 | self.path = Path(path) 50 | 51 | self.left_images_path = str(self.path.joinpath('image_0')) 52 | self.right_images_path = str(self.path.joinpath('image_1')) 53 | self.sequence_count = str(self.path.name) 54 | self.calibfile = str(self.path.joinpath('calib.txt')) 55 | self.gt_path = str(self.path.parents[1].joinpath('poses', self.sequence_count + '.txt')) 56 | # import ipdb 57 | # ipdb.set_trace() 58 | 59 | self.ground_truth = self.load_ground_truth_pose(self.gt_path) 60 | self.left_image_paths = self.load_image_paths(self.left_images_path) 61 | self.right_image_paths = self.load_image_paths(self.right_images_path) 62 | 63 | self.camera_intrinsic, self.PL, self.PR = self.load_camera_parameters(self.calibfile) 64 | self.intrinsic = self.camera_intrinsic() 65 | 66 | assert len(self.ground_truth) == len(self.left_image_paths) 67 | 68 | def convert_text_to_ground_truth(self, gt_line): 69 | 70 | """ 71 | Converts a text line to projection matrix 72 | :param gt_line (str) : flatten pose as a string 73 | :return matrix (np.array) : sizez(3,4) camera pose 74 | """ 75 | 76 | matrix = np.array(gt_line.split()).reshape((3, 4)).astype(np.float32) 77 | 78 | return matrix 79 | 80 | def load_ground_truth_pose(self, gt_path): 81 | 82 | """ 83 | Reads path to ground truth and aggregates to list of ground path 84 | :param gt_path (str) : path to ground truth file with each text line containing true pose of camera 85 | :return ground_truth (list(np.array)): aggregated ground truth for each frame in sequence 86 | """ 87 | 88 | ground_truth = None 89 | if not os.path.exists(gt_path): 90 | print("ground truth path is not found.") 91 | return None 92 | 93 | ground_truth = [] 94 | 95 | with open(gt_path) as gt_file: 96 | gt_lines = gt_file.readlines() 97 | 98 | for gt_line in gt_lines: 99 | pose = self.convert_text_to_ground_truth(gt_line) 100 | ground_truth.append(pose) 101 | 102 | return ground_truth 103 | 104 | 105 | def load_image_paths(self, image_dir): 106 | 107 | """ 108 | Returns images in path sorted by the frame number 109 | :path image_dir(str) : path to image dir 110 | :return img_paths (list) : image paths sorted by frame number 111 | """ 112 | img_paths = [os.path.join(image_dir, img_id) for img_id in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, img_id))] 113 | img_paths.sort() 114 | 115 | return img_paths 116 | 117 | def load_camera_parameters(self, calibfile): 118 | 119 | """ 120 | Loads left projection matrix and right projection matrix (both in the left image coordinate frame) 121 | to project 3D coordinates (left coordinate frame) to left frame and right frame. 122 | :param calibfile(str) : path to text file containing calibration parameter 123 | Returns: 124 | :param (np.array): size(3,3) intrisic camera matrix 125 | :projL (np.array): size (3x4) left projection matrix such that x_L = PL * X_w 126 | :projR (np.array): size (3x4) right projection matrix such that x_R = PR * X_w 127 | (where X_w is in the frame of the left camera) 128 | """ 129 | 130 | if not os.path.exists(calibfile): 131 | print("camera parameter file path is not found.") 132 | return None 133 | 134 | calibParams = open(calibfile, 'r').readlines() 135 | 136 | P1Vals = calibParams[0].split() 137 | P2Vals = calibParams[1].split() 138 | 139 | projL = np.array([[float(P1Vals[row*4 + column + 1]) for column in range(4)] for row in range(3)]) 140 | projR = np.array([[float(P2Vals[row*4 + column + 1]) for column in range(4)] for row in range(3)]) 141 | 142 | param = CameraParameters(float(P1Vals[1]), float(P1Vals[6]), 143 | float(P1Vals[3]), float(P1Vals[7])) 144 | 145 | return param, projL, projR 146 | 147 | def __len__(self): 148 | 149 | """ 150 | Fetch number of images returned by the dataloader 151 | """ 152 | 153 | return len(self.left_image_paths) 154 | 155 | def __getitem__(self, index): 156 | 157 | """ 158 | Fetches left frame, right frame and ground pose for a particular frame number (or time instant) 159 | :param index(int): frame number (index of stereo state) 160 | Returns 161 | :img_left (np.array): size(H,W) left frame of a stereo configuration for a particular frame number 162 | :img_right (np.array): size(H,W) right frame of a stereo configuration for a particular frame number 163 | :true_pose (np.array): size(3,4) true pose of left camera of stereo state relative to initial state 164 | """ 165 | 166 | img_left = cv2.imread(self.left_image_paths[index]) 167 | img_right = cv2.imread(self.right_image_paths[index]) 168 | 169 | img_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2GRAY) 170 | img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2GRAY) 171 | 172 | true_pose = self.ground_truth[index] 173 | 174 | return img_left, img_right, true_pose -------------------------------------------------------------------------------- /visual_slam/core/display2D.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | 3 | class Displayer: 4 | def __init__(self, win_name): 5 | self.win_name = win_name 6 | cv.namedWindow(self.win_name, cv.WINDOW_AUTOSIZE) 7 | def __del__(self): 8 | cv.destroyWindow(self.win_name) 9 | def display(self, image, delay): 10 | cv.imshow(self.win_name, image) 11 | cv.waitKey(delay) 12 | 13 | def draw_keypoints(img, kps): 14 | for kp in kps: 15 | u, v = int(round(kp.pt[0])), int(round(kp.pt[1])) 16 | cv.circle(img, (u,v), radius=2, color=(0, 255, 0), thickness=1) 17 | 18 | def draw_relative_movements(img, matched_uvs): 19 | for match in matched_uvs: 20 | prev_kp = match[0] 21 | cur_kp = match[1] 22 | cv.line(img, prev_kp, cur_kp, (255, 0, 0), thickness = 1) -------------------------------------------------------------------------------- /visual_slam/core/display3D.py: -------------------------------------------------------------------------------- 1 | import pangolin 2 | import cv2 3 | import numpy as np 4 | import OpenGL.GL as gl 5 | from multiprocessing import Process, Queue, Event 6 | 7 | vt_done = Event() 8 | 9 | class Viewer3D(object): 10 | 11 | w_i, h_i = (600, 200) 12 | 13 | def __init__(self): 14 | 15 | self.state = None 16 | self.state_gt = None 17 | self.state_optimized = None 18 | self.q_poses = Queue() 19 | self.q_poses_optimized = Queue() 20 | self.q_gt = Queue() 21 | self.q_img = Queue() 22 | self.q_errors = Queue() 23 | 24 | self.poses = [] 25 | self.gt = [] 26 | self.poses.append(np.eye(4)) 27 | self.gt.append(np.eye(4)) 28 | 29 | self.vt = Process(target=self.viewer_thread, args=(self.q_poses,self.q_gt,self.q_img,self.q_errors, self.q_poses_optimized)) 30 | self.vt.daemon = True 31 | self.vt.start() 32 | 33 | def viewer_thread(self, q_poses, q_gt, q_img, q_errors, q_poses_optimized): 34 | self.viewer_init() 35 | 36 | while not pangolin.ShouldQuit():#True: 37 | # print('refresh') 38 | self.viewer_refresh(q_poses,q_gt, q_img, q_errors, q_poses_optimized) 39 | print("you hit quit") 40 | vt_done.set() 41 | #return None 42 | #self.stop() 43 | 44 | def viewer_init(self): 45 | w, h = (1024,768) 46 | f = 2000 #420 47 | 48 | pangolin.CreateWindowAndBind("Raw vs Optimized Trajectory", w, h) 49 | gl.glEnable(gl.GL_DEPTH_TEST) #prevents point overlapping issue, check out fake-stereo's issues for more info 50 | 51 | # Projection and ModelView Matrices 52 | self.scam = pangolin.OpenGlRenderState( 53 | pangolin.ProjectionMatrix(w, h, f, f, w //2, h//2, 0.1, 100000), 54 | pangolin.ModelViewLookAt(0, -50.0, -10.0, 55 | 0.0, 0.0, 0.0, 56 | 0.0, -1.0, 0.0))#pangolin.AxisDirection.AxisY)) 57 | self.handler = pangolin.Handler3D(self.scam) 58 | 59 | # Interactive View in Window 60 | self.dcam = pangolin.CreateDisplay() 61 | self.dcam.SetBounds(0.0, 1.0, 0.0, 1.0, -w/h) 62 | self.dcam.SetHandler(self.handler) 63 | self.dcam.Activate() 64 | 65 | 66 | #Image viewport 67 | 68 | self.dimg = pangolin.Display('image') 69 | self.dimg.SetBounds(0, self.h_i/h, 1-self.w_i/w, 1.0, -w/h) 70 | self.dimg.SetLock(pangolin.Lock.LockLeft, pangolin.Lock.LockTop) 71 | 72 | self.texture = pangolin.GlTexture(self.w_i, self.h_i, gl.GL_RGB, False, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE) 73 | self.img = np.ones((self.h_i, self.w_i, 3),'uint8')*255 74 | 75 | # Translation error graph 76 | self.log = pangolin.DataLog() 77 | self.labels = ['error_t', 'error_r']#, "error_euclidean"] 78 | self.log.SetLabels(self.labels) 79 | 80 | self.plotter = pangolin.Plotter(self.log,0.0, 1500, -1500, 2500,10, 0.5) 81 | self.plotter.SetBounds(0.0, self.h_i/h, 0.0, 1-self.w_i/w, -w/h) 82 | 83 | self.plotter.Track("$i", "") 84 | 85 | # pangolin.DisplayBase().AddDisplay(self.plotter) 86 | self.errorlog_r, self.errorlog_t = [], [] 87 | 88 | def viewer_refresh(self, q_poses, q_gt, q_img, q_errors, q_poses_optimized): 89 | while not q_poses.empty(): 90 | self.state = q_poses.get() 91 | if True: 92 | while not q_poses_optimized.empty(): 93 | self.state_optimized = q_poses_optimized.get() 94 | if not q_img.empty(): 95 | self.img = q_img.get() 96 | self.img = self.img[::-1, :] 97 | self.img = cv2.resize(self.img, (self.w_i, self.h_i)) 98 | 99 | while not q_gt.empty(): 100 | self.state_gt = q_gt.get() 101 | 102 | while not q_errors.empty(): 103 | error_r, error_t = q_errors.get()[-1] 104 | #self.log.Log(errors[0], errors[1], errors[2]) 105 | 106 | self.errorlog_t.append(error_t) 107 | self.errorlog_r.append(error_r) 108 | #print(np.shape(self.errorlog)) 109 | self.log.Log(np.sum(self.errorlog_t), np.sum(self.errorlog_r)) 110 | 111 | # Clear and Activate Screen (we got a real nice shade of gray 112 | gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) 113 | gl.glClearColor(0.15, 0.15, 0.15, 0.0) 114 | #gl.glClearColor(0.0,0.0, 0.0, 0.0) 115 | self.dcam.Activate(self.scam) 116 | 117 | # Render 118 | if self.state is not None: 119 | gl.glLineWidth(1) 120 | # Render current pose 121 | if self.state_gt[0].shape[0] >= 1: 122 | gl.glColor3f(1.0, 1.0, 1.0) 123 | pangolin.DrawCameras(self.state_gt[1:]) 124 | # Render previous keyframes 125 | if self.state[0].shape[0] >= 2: 126 | gl.glColor3f(1.0, 0.0, 0.0) 127 | pangolin.DrawCameras(self.state[:-1]) 128 | 129 | # Render current pose 130 | if self.state[0].shape[0] >= 1: 131 | gl.glColor3f(0.0, 0.0, 1.0) 132 | pangolin.DrawCameras(self.state[-1:]) 133 | if True: 134 | if self.state_optimized is not None: 135 | if self.state_optimized[0].shape[0] >=1: 136 | gl.glColor3f(0.0, 1.0, 0.0) 137 | pangolin.DrawCameras(self.state_optimized) 138 | 139 | 140 | # print(self.img.shape) 141 | # cv2.imshow("test", self.img) 142 | # cv2.waitKey() 143 | 144 | # self.texture.Upload(self.img, gl.GL_RGB, gl.GL_UNSIGNED_BYTE) 145 | 146 | self.dimg.Activate() 147 | gl.glColor3f(1.0, 1.0, 1.0) 148 | 149 | # self.texture.RenderToViewport() 150 | 151 | pangolin.FinishFrame() 152 | 153 | def update(self, vo): 154 | ''' 155 | Add new data to queue 156 | ''' 157 | 158 | if self.q_img is None or self.q_poses is None: 159 | return 160 | # import ipdb; ipdb.set_trace() 161 | self.q_gt.put(np.array(vo.gt)) 162 | self.q_poses.put(vo.raw) 163 | self.q_poses_optimized.put(vo.poses) 164 | 165 | # if len(vo.errors) > 0: 166 | # self.q_errors.put(vo.errors) 167 | # if len(vo.pose_graph.nodes_optimized) > 1: 168 | # self.q_poses_optimized.put(vo.pose_graph.nodes_optimized) 169 | # else: 170 | # self.q_img.put(vo.annotate_frames()) 171 | 172 | def stop(self): 173 | self.vt.terminate() 174 | self.vt.join() 175 | #qtype = type(Queue()) 176 | for x in self.__dict__.values(): 177 | if isinstance(x, type(Queue())): 178 | while not x.empty(): 179 | _ = x.get() 180 | print("viewer stopped") -------------------------------------------------------------------------------- /visual_slam/core/geocom/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/core/geocom/__init__.py -------------------------------------------------------------------------------- /visual_slam/core/geocom/features.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | lk_params = dict(winSize = (21, 21), 5 | #maxLevel = 3, 6 | criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01)) 7 | 8 | def featureTracking(image_ref, image_cur, px_ref): 9 | 10 | px_ref = px_ref.reshape(-1,1,2).astype('float32') 11 | kp2, st, err = cv2.calcOpticalFlowPyrLK(image_ref, image_cur, px_ref, None, **lk_params) #shape: [k,2] [k,1] [k,1] 12 | 13 | st = st.reshape(st.shape[0]) 14 | kp1 = px_ref[st == 1] 15 | kp2 = kp2[st == 1] 16 | 17 | return kp1, kp2 -------------------------------------------------------------------------------- /visual_slam/core/loop_closure.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io 3 | import cv2 4 | import copy 5 | from .utils import * 6 | 7 | class LoopClosure(): 8 | def __init__(self, path, dataset, intrinsic_mat): 9 | gt_loop_data = scipy.io.loadmat(path) 10 | self.neighbours = gt_loop_data['gnd'] # This is a numpy array of shape (num_images, 1) 11 | self.dataset = dataset 12 | self.K = intrinsic_mat 13 | return 14 | 15 | def check_loop_closure(self, idx, frame_new): 16 | loop_closure_flag = False 17 | pose, matched_idx = None, None 18 | best_kp1, best_kp2, best_matches = None, None, None 19 | local_neighbours = self.neighbours[idx][0][0] # numpy array of neighbours 20 | valid_neighbours = local_neighbours[local_neighbours < idx] 21 | 22 | # Check similarity with all valid neighbours and choose 1 or 2 to create edges 23 | max_num_matches = 0 24 | for img_idx in valid_neighbours: 25 | frame_old, _, _ = self.dataset[img_idx] 26 | # Check similarity using keypoint matches 27 | 28 | kp1, kp2, matches = self.find_matches(frame_new, frame_old) 29 | # Can also set min number of required matches 30 | 31 | if len(matches) > max_num_matches: 32 | max_num_matches = len(matches) 33 | target_frame = frame_old.copy() 34 | best_kp1 = kp1.copy() 35 | best_kp2 = kp2.copy() 36 | best_matches = matches.copy() 37 | matched_idx = img_idx 38 | # if(max_num_matches > 300): 39 | # print("Length of best matches: ", max_num_matches) 40 | # break 41 | 42 | # Compute R and t for maximally matching neighbours 43 | if max_num_matches >= 100: 44 | # import ipdb; ipdb.set_trace() 45 | matched_kp1 = [] 46 | matched_kp2 = [] 47 | self.DrawMatches(frame_new, target_frame, best_kp1, best_kp2, best_matches) 48 | # import ipdb; ipdb.set_trace() 49 | for mat in best_matches[:100]: 50 | matched_kp1.append(best_kp1[mat.queryIdx].pt) 51 | matched_kp2.append(best_kp2[mat.trainIdx].pt) 52 | 53 | matched_kp1 = np.array(matched_kp1) 54 | matched_kp2 = np.array(matched_kp2) 55 | E, _ = cv2.findEssentialMat(matched_kp1, matched_kp2, self.K, method=cv2.RANSAC, prob=0.999, threshold=1.0) 56 | _, R, t, mask = cv2.recoverPose(E, matched_kp1, matched_kp2, self.K) 57 | num_inliers = mask[mask > 0].shape[0] 58 | # num_outliers = mask.shape[0] - num_inliers 59 | inlier_ratio = num_inliers/mask.shape[0] 60 | # import ipdb; ipdb.set_trace() 61 | if inlier_ratio < 0.6: 62 | print("Found a matrix with very poor inlier ratio: ", inlier_ratio) 63 | return False, pose, matched_idx 64 | if (abs(t[1]) > abs(t[2]) or abs(t[0]) > abs(t[2])): 65 | print("Found unexpected translation ", t) 66 | return False, pose, matched_idx 67 | pose = convert_to_4_by_4(convert_to_Rt(R,t)) 68 | print("New index: ", idx) 69 | print("Matched Index: ", matched_idx) 70 | print("Total Matches: ", len(best_matches)) 71 | loop_closure_flag = True 72 | # cv2.imshow('Current', frame_new) 73 | # cv2.waitKey(0) 74 | # cv2.imshow('Target', target_frame) 75 | # cv2.waitKey(0) 76 | return loop_closure_flag, pose, matched_idx 77 | 78 | def DrawMatches(self, img1, img2, kp1, kp2, matches): 79 | # matches = sorted(matches, key = lambda x:x.distance) 80 | img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:],None, flags=2) 81 | cv2.namedWindow('Matches', cv2.WINDOW_NORMAL) 82 | cv2.imshow('Matches', img3) 83 | cv2.waitKey(0) 84 | return 85 | 86 | def find_matches(self, img1, img2, return_ratio = 1): 87 | sift = cv2.SIFT_create() 88 | 89 | kp1, descriptors_1 = sift.detectAndCompute(img1,None) 90 | kp2, descriptors_2 = sift.detectAndCompute(img2,None) 91 | 92 | # Nearest matches for lowe's ratio test 93 | bf = cv2.BFMatcher() 94 | matches = bf.knnMatch(descriptors_1,descriptors_2,k=2) 95 | good = [] 96 | for m,n in matches: 97 | if m.distance < 0.8*n.distance: 98 | good.append(m) 99 | matches = sorted(good, key = lambda x:x.distance) 100 | return kp1, kp2, matches 101 | -------------------------------------------------------------------------------- /visual_slam/core/model.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | from .optimizer import PoseGraph 5 | from .geocom.features import featureTracking 6 | 7 | from .utils import * 8 | import pdb 9 | from .loop_closure import LoopClosure 10 | 11 | KMIN_NUM_FEATURE = 1500 12 | optimize = False 13 | 14 | class VisualSLAM(): 15 | 16 | def __init__(self, camera_intrinsics, ground_pose, dataset, args): 17 | 18 | self.K = camera_intrinsics 19 | self.ground_pose = ground_pose 20 | self.args = args 21 | self.feature_tracker = featureTracking 22 | self.detector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True) 23 | self.cur_R = None 24 | self.cur_t = None 25 | self.curRaw_R = None 26 | self.curRaw_t = None 27 | self.prev_frame = None 28 | self.trueX, self.trueY, self.trueZ = 0, 0, 0 29 | self.poses = [] 30 | self.raw = [] 31 | self.gt = [] 32 | # np_gt = np.load('Gt.npy') 33 | # np_poses = np.load('Poses.npy') 34 | # self.gt = [np_gt[i] for i in range(np_gt.shape[0])] 35 | # self.poses = [np_poses[i] for i in range(np_poses.shape[0])] 36 | self.errors = [] 37 | self.loop_closure_count = 0 38 | self.pose_graph = PoseGraph(verbose = True) 39 | self.loop_closure = LoopClosure(args.gt_loops, dataset, self.K) 40 | 41 | def getAbsoluteScale(self, frame_id): 42 | """ 43 | specialized for KITTI odometry dataset 44 | """ 45 | 46 | gr_pose = self.ground_pose[frame_id-1] 47 | x_prev = float(gr_pose[0][-1]) 48 | y_prev = float(gr_pose[1][-1]) 49 | z_prev = float(gr_pose[2][-1]) 50 | gr_pose = self.ground_pose[frame_id] 51 | x = float(gr_pose[0][-1]) 52 | y = float(gr_pose[1][-1]) 53 | z = float(gr_pose[2][-1]) 54 | self.trueX, self.trueY, self.trueZ = x, y, z 55 | return np.sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev)) 56 | 57 | def getAbsoluteScaleLoop(self, i, j): 58 | """ 59 | specialized for KITTI odometry dataset 60 | """ 61 | 62 | gr_pose_i = self.ground_pose[i] 63 | x_prev = float(gr_pose_i[0][-1]) 64 | y_prev = float(gr_pose_i[1][-1]) 65 | z_prev = float(gr_pose_i[2][-1]) 66 | 67 | gr_pose_j = self.ground_pose[j] 68 | x = float(gr_pose_j[0][-1]) 69 | y = float(gr_pose_j[1][-1]) 70 | z = float(gr_pose_j[2][-1]) 71 | 72 | self.trueX, self.trueY, self.trueZ = x, y, z 73 | return np.sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev)) 74 | 75 | 76 | # def run_optimizer(self, local_window=10): 77 | 78 | # """ 79 | # Add poses to the optimizer graph 80 | # """ 81 | # if len(self.poses) 0: 154 | self.cur_t = self.prev_t + absolute_scale*self.prev_R@t 155 | self.cur_R = self.prev_R@R 156 | 157 | self.curRaw_R = self.cur_R 158 | self.curRaw_t = self.cur_t 159 | 160 | self.cur_Rt = convert_to_Rt(self.cur_R, self.cur_t) 161 | self.poses.append(convert_to_4_by_4(self.cur_Rt)) 162 | self.raw.append(convert_to_4_by_4(self.cur_Rt)) 163 | self.graph_extend(convert_to_4_by_4(self.cur_Rt), self.prev_Rt, stage, stage-1) 164 | else: 165 | """ process subsequent frames after first 2 frames """ 166 | 167 | self.points_ref, points_cur = self.feature_tracker(self.prev_frame, current_frame, self.points_ref) 168 | E, mask = cv2.findEssentialMat(points_cur, self.points_ref, self.K, method=cv2.RANSAC, prob=0.999, threshold=1.0) 169 | _, R, t, mask = cv2.recoverPose(E, points_cur, self.points_ref, self.K) 170 | num_inliers = mask[mask > 0].shape[0] 171 | # num_outliers = mask.shape[0] - num_inliers 172 | inlier_ratio = num_inliers/mask.shape[0] 173 | print("Tracker inlier ratio: ", inlier_ratio) 174 | 175 | if (abs(t[1]) > 0.001): 176 | t[1] = np.random.uniform(-0.0001, 0.0001, size = 1) 177 | # print('true') 178 | 179 | absolute_scale = self.getAbsoluteScale(stage) 180 | # print(absolute_scale) 181 | if absolute_scale > 0.1: 182 | self.cur_t = self.prev_t + absolute_scale*self.prev_R@t 183 | self.cur_R = self.prev_R@R 184 | 185 | self.curRaw_t = self.prevRaw_t + absolute_scale*self.prevRaw_R@t 186 | self.curRaw_R = self.prevRaw_R@R 187 | 188 | if(self.points_ref.shape[0] 0.1 and abs(rel_pose[0,3]*abs_dist) < 1 ): 213 | rel_pose[:3,3] = rel_pose[:3,3]*abs_dist 214 | self.add_loop_constraint(rel_pose, stage, int(idx_j)) 215 | print("Found Loop Closure: ", self.loop_closure_count) 216 | 217 | # if(self.loop_closure_count%10 == 0): 218 | self.pose_graph.optimize(self.args.num_iter) 219 | self.poses = self.pose_graph.nodes_optimized 220 | global_flag = True 221 | 222 | # if self.args.optimize: 223 | # self.run_optimizer(self.args.local_window) 224 | #self.calculate_errors() 225 | ##################################################################################################### 226 | 227 | if global_flag: 228 | last_pose = self.pose_graph.optimizer.vertex(len(self.poses)-1).estimate().matrix() 229 | self.prev_t = last_pose[:3,3].reshape(-1, 1) 230 | self.prev_R = last_pose[:3,:3] 231 | else: 232 | self.prev_t = self.cur_t 233 | self.prev_R = self.cur_R 234 | 235 | self.prevRaw_t = self.curRaw_t 236 | self.prevRaw_R = self.curRaw_R 237 | 238 | self.prev_Rt = convert_to_Rt(self.prev_R, self.prev_t) 239 | if global_flag: 240 | print("opimized_Rt ", self.prev_Rt) 241 | self.prev_frame = current_frame -------------------------------------------------------------------------------- /visual_slam/core/optimizer.py: -------------------------------------------------------------------------------- 1 | import g2o 2 | import numpy as np 3 | import pdb 4 | 5 | class PoseGraph(object): 6 | nodes = [] 7 | edges = [] 8 | nodes_optimized = [] 9 | edges_optimized = [] 10 | 11 | def __init__(self, verbose=False): 12 | 13 | """ 14 | Initialise the pose graph optimizer (G2O) 15 | """ 16 | self.solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3()) 17 | self.solver= g2o.OptimizationAlgorithmLevenberg(self.solver) 18 | self.optimizer = g2o.SparseOptimizer() 19 | self.optimizer.set_verbose(verbose) 20 | self.optimizer.set_algorithm(self.solver) 21 | 22 | def add_vertex(self, id, pose, is_fixed=False): 23 | 24 | # Rt (pose) matrix, absolute 25 | v = g2o.VertexSE3() 26 | v.set_id(id) 27 | v.set_estimate(g2o.Isometry3d(pose)) 28 | v.set_fixed(is_fixed) 29 | 30 | self.optimizer.add_vertex(v) 31 | self.nodes.append(v) 32 | 33 | def add_edge(self, vertices, measurement=None, information=np.eye(6), robust_kernel=None): 34 | 35 | edge = g2o.EdgeSE3() 36 | for i, vertex in enumerate(vertices): # vertices - (j, i) 37 | # check to see if we're passing in actual vertices or just the vertex ids 38 | 39 | if isinstance(vertex, int): 40 | vertex = self.optimizer.vertex(vertex) 41 | 42 | edge.set_vertex(i, vertex) 43 | 44 | edge.set_measurement(g2o.Isometry3d(measurement)) # relative pose transformation between frames 45 | 46 | # information matrix/precision matrix 47 | # represents uncertainty of measurement error 48 | # inverse of covariance matrix 49 | edge.set_information(information) 50 | if robust_kernel is not None: 51 | edge.set_robust_kernel(robust_kernel) 52 | 53 | self.optimizer.add_edge(edge) 54 | 55 | def optimize(self, max_iter=15): 56 | self.optimizer.initialize_optimization() 57 | self.optimizer.optimize(max_iter) 58 | 59 | self.optimizer.save("data/out.g2o") 60 | 61 | self.edges_optimized = [] 62 | if False: 63 | for edge in self.optimizer.edges(): 64 | self.edges_optimized = [(edge.vertices()[0].estimate().matrix(), edge.vertices()[1].estimate().matrix())for edge in self.optimizer.edges()] 65 | 66 | # import ipdb; ipdb.set_trace() 67 | self.nodes_optimized = [i.estimate().matrix() for i in self.optimizer.vertices().values()] 68 | 69 | # self.nodes_optimized = [self.optimizer.vertex(i).estimate().matrix() for i in range(len(self.optimizer.vertices.keys()))] 70 | # self.nodes_optimized = [i.estimate().matrix() for i in self.nodes] 71 | #self.nodes_optimized = (self.nodes_optimized) 72 | self.edges_optimized = np.array(self.edges_optimized) 73 | -------------------------------------------------------------------------------- /visual_slam/core/utils.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from scipy.spatial.transform import Rotation as R 4 | 5 | 6 | def draw_trajectory(traj, frame_id, x, y, z, draw_x, draw_y, true_x, true_y): 7 | 8 | cv2.circle(traj, (draw_x,draw_y), 1, (frame_id*255/4540,255-frame_id*255/4540,0), 1) 9 | cv2.circle(traj, (true_x,true_y), 1, (0,0,255), 2) 10 | cv2.rectangle(traj, (10, 20), (600, 60), (0,0,0), -1) 11 | text = "Coordinates: x=%2fm y=%2fm z=%2fm"%(x,y,z) 12 | cv2.putText(traj, text, (20,40), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1, 8) 13 | cv2.imshow('Trajectory', traj) 14 | 15 | 16 | def convert_to_Rt(R,t): 17 | 18 | """ 19 | converts to 3x4 transformation matrix 20 | """ 21 | 22 | return np.hstack((R, t.reshape(-1,1))) 23 | 24 | def rotation_to_quaternions(rotation: np.ndarray): 25 | 26 | """ 27 | convert rotation matrix to quaternions vector 28 | """ 29 | 30 | assert rotation.shape == (3,3) 31 | 32 | return R.from_matrix(rotation).as_quat() 33 | 34 | def convert_to_4_by_4(Rt): 35 | 36 | try: 37 | assert Rt.shape==(3,4) 38 | except: 39 | print(Rt.shape) 40 | raise AssertionError("Input Matrix form should be of 3x4") 41 | 42 | return np.vstack((Rt, np.array([0,0,0,1]))) 43 | 44 | 45 | def getTransform(cur_pose, prev_pose): 46 | 47 | """ 48 | Computes the error of the transformation between 2 poses 49 | """ 50 | 51 | Rt = np.eye(4) 52 | # Rt[:3,:3] = cur_pose[:3,:3].T @ prev_pose[:3, :3] 53 | # Rt[:3, -1] = cur_pose[:3, :3].T @ (cur_pose[:3,-1] - prev_pose[:3, -1]) 54 | Rt = np.linalg.inv(prev_pose)@cur_pose 55 | return Rt 56 | 57 | def getError(cur_pose, prev_pose, cur_gt, prev_gt): 58 | 59 | """ 60 | Computes the error of the transformation between 2 poses 61 | """ 62 | 63 | error_t = np.linalg.norm((prev_pose[:3, -1] - cur_pose[:3,-1]) - (cur_gt[:3,-1] - prev_gt[:3,-1])) 64 | 65 | gt_prev_qt = rotation_to_quaternions(prev_gt[:3, :3]) 66 | gt_qt = rotation_to_quaternions(cur_gt[:3, :3]) 67 | gt_tform = gt_prev_qt * gt_qt.inverse() 68 | 69 | cur_qt = rotation_to_quaternions(prev_pose[:3, :3]) 70 | prev_qt = rotation_to_quaternions(cur_pose[:3, :3]) 71 | 72 | qt_tform = prev_qt * cur_qt.inverse() 73 | 74 | error_r = np.sum(np.rad2deg(quaternion.rotation_intrinsic_distance(gt_tform, qt_tform))) 75 | 76 | return error_r, error_t 77 | -------------------------------------------------------------------------------- /visual_slam/data/00Gt.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/00Gt.npy -------------------------------------------------------------------------------- /visual_slam/data/00Poses.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/00Poses.npy -------------------------------------------------------------------------------- /visual_slam/data/00Raw.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/00Raw.npy -------------------------------------------------------------------------------- /visual_slam/data/05Gt.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/05Gt.npy -------------------------------------------------------------------------------- /visual_slam/data/05Poses.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/05Poses.npy -------------------------------------------------------------------------------- /visual_slam/data/05Raw.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/05Raw.npy -------------------------------------------------------------------------------- /visual_slam/data/06Gt.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/06Gt.npy -------------------------------------------------------------------------------- /visual_slam/data/06Poses.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/06Poses.npy -------------------------------------------------------------------------------- /visual_slam/data/06Raw.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/06Raw.npy -------------------------------------------------------------------------------- /visual_slam/data/07Gt.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/07Gt.npy -------------------------------------------------------------------------------- /visual_slam/data/07Poses.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/07Poses.npy -------------------------------------------------------------------------------- /visual_slam/data/07Raw.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/07Raw.npy -------------------------------------------------------------------------------- /visual_slam/data/s00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/s00.png -------------------------------------------------------------------------------- /visual_slam/data/s05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/s05.png -------------------------------------------------------------------------------- /visual_slam/data/s06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/s06.png -------------------------------------------------------------------------------- /visual_slam/data/s07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/s07.png -------------------------------------------------------------------------------- /visual_slam/data/sequence00.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/sequence00.gif -------------------------------------------------------------------------------- /visual_slam/data/sequence05.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/sequence05.gif -------------------------------------------------------------------------------- /visual_slam/data/sequence06.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/sequence06.gif -------------------------------------------------------------------------------- /visual_slam/data/sequence07.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HobbySingh/Graph-SLAM/dc2a2c15f6c164d90b3782dd82a35bae2a15cee3/visual_slam/data/sequence07.gif -------------------------------------------------------------------------------- /visual_slam/install_all.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #N.B: this install script allows you to run main_slam.py and all the scripts 4 | 5 | set -e 6 | 7 | # ==================================================== 8 | # import the utils 9 | . bash_utils.sh 10 | 11 | # ==================================================== 12 | 13 | # install basic modules and set up git submodules 14 | . install_basic.sh # use . in order to inherit python env configuration and possible other env vars 15 | 16 | # build and install thirdparty 17 | . install_thirdparty.sh # use . in order to inherit python env configuration and possible other env vars -------------------------------------------------------------------------------- /visual_slam/install_basic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #N.B: this install script allows you to run main_vo.py and the test scripts 4 | # echo "usage: ./${0##*/} " # the arguments are optional 5 | 6 | #set -e 7 | 8 | # ==================================================== 9 | # import the utils 10 | . bash_utils.sh 11 | 12 | # ==================================================== 13 | 14 | export INSTALL_PIP3_PACKAGES=1 # install pip3 packages by default 15 | if [ $# -ge 1 ]; then 16 | # check optional argument 17 | INSTALL_PIP3_PACKAGES=$1 18 | echo INSTALL_PIP3_PACKAGES: $INSTALL_PIP3_PACKAGES 19 | fi 20 | 21 | export INSTALL_CPP=1 # install cpp by default 22 | if [ $# -ge 2 ]; then 23 | # check optional argument 24 | INSTALL_CPP=$2 25 | echo INSTALL_CPP: $INSTALL_CPP 26 | fi 27 | 28 | # ==================================================== 29 | 30 | echo `pwd` 31 | 32 | # install system packages 33 | . install_system_packages.sh # use . in order to inherit python env configuration 34 | 35 | # install pip3 packages 36 | # N.B.: install_pip3_packages script can be skipped if you intend to use a virtual python environment 37 | if [ $INSTALL_PIP3_PACKAGES -eq 1 ]; then 38 | echo 'installing pip3 packages' 39 | ./install_pip3_packages.sh 40 | fi 41 | -------------------------------------------------------------------------------- /visual_slam/install_pip3_packages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | # ==================================================== 5 | # import the bash utils 6 | . bash_utils.sh 7 | 8 | # ==================================================== 9 | 10 | #set -e 11 | 12 | print_blue '================================================' 13 | print_blue "Configuring and installing python packages ..." 14 | 15 | # N.B.: python3 is required! 16 | 17 | pip3 install --upgrade pip 18 | 19 | # pip3 packages 20 | install_pip_packages pygame numpy matplotlib pyopengl Pillow pybind11 21 | install_pip_package numpy>=1.18.3 22 | install_pip_package scipy==1.4.1 23 | install_pip_package scikit-image==0.16.2 24 | install_pip_packages pyyaml termcolor tqdm yacs 25 | install_pip_package opencv-python 26 | 27 | #pip3 uninstall opencv-contrib-python # better to clean it before installing the right version 28 | install_pip_package opencv-contrib-python #==3.4.2.16 29 | 30 | # install_pip_package torch 31 | # install_pip_package torchvision 32 | install_pip_package ordered-set # from https://pypi.org/project/ordered-set/ 33 | # install_pip_package tensorflow-gpu==1.14.0 # 1.14.0 works with all the modules contained in pyslam2 34 | 35 | # it may be required if you have errors with pillow 36 | #pip3 uninstall pillow 37 | #pip3 install pillow==6.2.2 38 | 39 | 40 | -------------------------------------------------------------------------------- /visual_slam/install_system_packages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | # ==================================================== 5 | # import the bash utils 6 | . bash_utils.sh 7 | 8 | # ==================================================== 9 | 10 | #set -e 11 | 12 | print_blue '================================================' 13 | print_blue "Configuring and installing system packages ..." 14 | 15 | 16 | install_package rsync 17 | 18 | # N.B.: python3 is required! 19 | 20 | # system packages 21 | install_package python3-sdl2 22 | install_package python3-tk 23 | 24 | install_package libprotobuf-dev 25 | 26 | install_package libeigen3-dev # pangolin installation 27 | install_package libopencv-dev # orbslam2_features compilation 28 | -------------------------------------------------------------------------------- /visual_slam/install_thirdparty.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #set -e 4 | 5 | # ==================================================== 6 | # import the utils 7 | . bash_utils.sh 8 | 9 | # ==================================================== 10 | 11 | print_blue '================================================' 12 | print_blue "Building Thirdparty" 13 | print_blue '================================================' 14 | 15 | STARTING_DIR=`pwd` # this should be the main folder directory of the repo 16 | 17 | # ==================================================== 18 | # N.B.: this script requires that you have first run: 19 | #./install_basic.sh 20 | # ==================================================== 21 | if [[ -z "${USE_PYSLAM_ENV}" ]]; then 22 | USE_PYSLAM_ENV=0 23 | fi 24 | if [ $USE_PYSLAM_ENV -eq 1 ]; then 25 | . pyenv-activate.sh 26 | fi 27 | 28 | # ==================================================== 29 | # check if we have external options 30 | EXTERNAL_OPTION=$1 31 | if [[ -n "$EXTERNAL_OPTION" ]]; then 32 | echo "external option: $EXTERNAL_OPTION" 33 | fi 34 | 35 | # check if we want to add a python interpreter check 36 | if [[ -n "$WITH_PYTHON_INTERP_CHECK" ]]; then 37 | echo "WITH_PYTHON_INTERP_CHECK: $WITH_PYTHON_INTERP_CHECK " 38 | EXTERNAL_OPTION="$EXTERNAL_OPTION -DWITH_PYTHON_INTERP_CHECK=$WITH_PYTHON_INTERP_CHECK" 39 | fi 40 | # ==================================================== 41 | 42 | CURRENT_USED_PYENV=$(get_virtualenv_name) 43 | print_blue "currently used pyenv: $CURRENT_USED_PYENV" 44 | 45 | # print_blue "==================================================================" 46 | # print_blue "Configuring and building thirdparty/orbslam2_features ..." 47 | # cd thirdparty/orbslam2_features 48 | # . build.sh $EXTERNAL_OPTION 49 | # cd $STARTING_DIR 50 | 51 | 52 | print_blue '================================================' 53 | print_blue "Configuring and building thirdparty/Pangolin ..." 54 | 55 | make_dir thirdparty 56 | 57 | INSTALL_PANGOLIN_ORIGINAL=0 58 | cd thirdparty 59 | if [ $INSTALL_PANGOLIN_ORIGINAL -eq 1 ] ; then 60 | # N.B.: pay attention this will generate a module 'pypangolin' ( it does not have the methods dcam.SetBounds(...) and pangolin.DrawPoints(points, colors) ) 61 | if [ ! -d pangolin ]; then 62 | if [[ "$OSTYPE" == "linux-gnu"* ]]; then 63 | sudo apt-get install -y libglew-dev 64 | fi 65 | git clone https://github.com/stevenlovegrove/Pangolin.git pangolin 66 | cd pangolin 67 | git submodule init && git submodule update 68 | cd .. 69 | fi 70 | cd pangolin 71 | make_dir build 72 | if [ ! -f build/src/libpangolin.so ]; then 73 | cd build 74 | cmake ../ -DAVFORMAT_INCLUDE_DIR="" -DCPP11_NO_BOOST=ON $EXTERNAL_OPTION 75 | make -j8 76 | cd build/src 77 | ln -s pypangolin.*-linux-gnu.so pangolin.linux-gnu.so 78 | fi 79 | else 80 | # N.B.: pay attention this will generate a module 'pangolin' 81 | if [ ! -d pangolin ]; then 82 | if [[ "$OSTYPE" == "linux-gnu"* ]]; then 83 | sudo apt-get install -y libglew-dev 84 | git clone https://github.com/uoip/pangolin.git 85 | cd pangolin 86 | PANGOLIN_UOIP_REVISION=3ac794a 87 | git checkout $PANGOLIN_UOIP_REVISION 88 | cd .. 89 | # copy local changes 90 | rsync ./pangolin_changes/python_CMakeLists.txt ./pangolin/python/CMakeLists.txt 91 | fi 92 | if [[ "$OSTYPE" == "darwin"* ]]; then 93 | git clone --recursive https://gitlab.com/luigifreda/pypangolin.git pangolin 94 | fi 95 | fi 96 | cd pangolin 97 | if [ ! -f pangolin.cpython-*.so ]; then 98 | make_dir build 99 | cd build 100 | cmake .. -DBUILD_PANGOLIN_LIBREALSENSE=OFF $EXTERNAL_OPTION # disable realsense 101 | make -j8 102 | cd .. 103 | #python setup.py install 104 | fi 105 | fi 106 | cd $STARTING_DIR 107 | 108 | 109 | print_blue "==================================================================" 110 | print_blue "Configuring and building thirdparty/g2o ..." 111 | 112 | cd thirdparty 113 | if [ ! -d g2opy ]; then 114 | if [[ "$OSTYPE" == "linux-gnu"* ]]; then 115 | sudo apt-get install -y libsuitesparse-dev libeigen3-dev 116 | fi 117 | git clone https://github.com/uoip/g2opy.git 118 | cd g2opy 119 | G2OPY_REVISION=5587024 120 | git checkout $G2OPY_REVISION 121 | cd .. 122 | # copy local changes 123 | rsync ./g2opy_changes/types_six_dof_expmap.h ./g2opy/python/types/sba/types_six_dof_expmap.h 124 | rsync ./g2opy_changes/sparse_optimizer.h ./g2opy/python/core/sparse_optimizer.h 125 | rsync ./g2opy_changes/python_CMakeLists.txt ./g2opy/python/CMakeLists.txt 126 | #rsync ./g2opy_changes/eigen_types.h ./g2opy/python/core/eigen_types.h 127 | fi 128 | cd g2opy 129 | if [ ! -f lib/g2o.cpython-*.so ]; then 130 | make_buid_dir 131 | cd build 132 | cmake .. $EXTERNAL_OPTION 133 | make -j8 134 | cd .. 135 | #python3 setup.py install --user 136 | fi 137 | cd $STARTING_DIR 138 | 139 | 140 | -------------------------------------------------------------------------------- /visual_slam/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from matplotlib import pyplot as plt 3 | import argparse 4 | import cv2 5 | def parse_argument(): 6 | 7 | parser=argparse.ArgumentParser() 8 | parser.add_argument('--path', required=True) 9 | return parser.parse_args() 10 | 11 | def compute_trans_err(gt, pose): 12 | t1 = gt[:3,3] 13 | t2 = pose[:3,3] 14 | err = np.linalg.norm(t1-t2) 15 | return err 16 | 17 | def compute_rot_err(gt, pose): 18 | mat = gt[:3,:3]@pose[:3,:3].T 19 | r, _ = cv2.Rodrigues(mat) 20 | err = np.linalg.norm(r) 21 | return err 22 | 23 | if __name__ == '__main__': 24 | 25 | sequence = 'data/' + '00' 26 | optimized_poses = np.load(sequence+'Poses.npy') 27 | gt_poses = np.load(sequence+'Gt.npy') 28 | raw_poses = np.load(sequence+'Raw.npy') 29 | 30 | # import ipdb; ipdb.set_trace() 31 | 32 | t_err_raw = [compute_trans_err(gt_poses[i], raw_poses[i]) for i in range(gt_poses.shape[0])] 33 | t_err_optimized = [compute_trans_err(gt_poses[i], optimized_poses[i]) for i in range(gt_poses.shape[0])] 34 | 35 | rot_err_raw = [compute_rot_err(gt_poses[i], raw_poses[i]) for i in range(gt_poses.shape[0])] 36 | rot_err_optimized = [compute_rot_err(gt_poses[i], optimized_poses[i]) for i in range(gt_poses.shape[0])] 37 | 38 | # figure, axis = plt.subplots(1, 2) 39 | # plt.set_title("Errors") 40 | 41 | t = np.arange(len(t_err_raw)) 42 | plt.plot(t, t_err_raw, 'r', label='raw') 43 | plt.plot(t, t_err_optimized, 'b', label='optimized') 44 | plt.legend() 45 | # plt.set_title("Translation Error") 46 | plt.title("Translation Error") 47 | plt.xlabel('Frame') 48 | plt.ylabel('Error') 49 | plt.grid() 50 | 51 | # axis[0].plot(t, t_err_raw, 'r', label='raw') 52 | # axis[0].plot(t, t_err_optimized, 'b', label='optimized') 53 | # axis[0].legend() 54 | # axis[0].set_title("Translation Error") 55 | # axis[0].set_xlabel('Frame') 56 | # axis[0].set_ylabel('Error') 57 | # axis[0].grid() 58 | 59 | # axis[1].plot(t, rot_err_raw, 'r', label='raw') 60 | # axis[1].plot(t, rot_err_optimized, 'b', label='optimized') 61 | # axis[1].legend() 62 | # axis[1].set_title("Rotational Error") 63 | # axis[1].set_xlabel('Frame') 64 | # axis[1].set_ylabel('Error') 65 | # axis[1].grid() 66 | 67 | # Combine all the operations and display 68 | plt.show() -------------------------------------------------------------------------------- /visual_slam/run_slam.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import argparse 3 | import sys 4 | sys.path.append("./thirdparty/g2opy/lib/") 5 | sys.path.append("./thirdparty/pangolin/") 6 | 7 | from core.model import VisualSLAM 8 | from core.dataset import KittiDataset 9 | from core.utils import draw_trajectory 10 | from core.display2D import Displayer 11 | from core.display3D import Viewer3D 12 | 13 | def parse_argument(): 14 | 15 | parser=argparse.ArgumentParser() 16 | parser.add_argument('--dataset', default='kitti') 17 | parser.add_argument('--path', required=True) 18 | parser.add_argument('--gt_loops', required=True) 19 | parser.add_argument('--optimize', action='store_true', help='enable pose graph optimization') 20 | parser.add_argument('--local_window', default=10, type=int, help='number of frames to run the optimization') 21 | parser.add_argument('--num_iter', default=300, type=int, help='number of max iterations to run the optimization') 22 | 23 | return parser.parse_args() 24 | 25 | def main(): 26 | 27 | args = parse_argument() 28 | 29 | # Get data params using the dataloader 30 | dataset = KittiDataset(args.path) 31 | camera_matrix = dataset.intrinsic 32 | ground_truth_poses = dataset.ground_truth 33 | num_frames = len(dataset) 34 | 35 | # Initialise the mono-VO model 36 | model = VisualSLAM(camera_matrix, ground_truth_poses, dataset, args) 37 | 38 | # Initialie the viewer object 39 | viewer = Viewer3D() 40 | 41 | # Iterate over the frames and update the rotation and translation vectors 42 | for index in range(0, num_frames): 43 | 44 | print("Number of frames remaining: ", num_frames-index) 45 | 46 | frame, _ , _ = dataset[index] 47 | model(index, frame) 48 | 49 | if(index == int(num_frames)-10): 50 | model.model_optimize() 51 | 52 | if index>2: 53 | viewer.update(model) 54 | 55 | 56 | viewer.update(model) 57 | viewer.stop() 58 | 59 | optimized_poses = [model.pose_graph.optimizer.vertex(i).estimate().matrix() for i in range(len(model.poses))] 60 | np.save('./data/'+args.path[-3:-1]+'Poses', optimized_poses) 61 | np.save('./data/'+args.path[-3:-1]+'Gt', np.array(model.gt)) 62 | np.save('./data/'+args.path[-3:-1]+'Raw', np.array(model.raw)) 63 | 64 | 65 | if __name__ == "__main__": 66 | main() -------------------------------------------------------------------------------- /visual_slam/thirdparty/g2opy_changes/python_CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(WITH_PYTHON_INTERP_CHECK OFF CACHE BOOL "Checking python interpreter") # to be activated when called within virtual python environment 2 | 3 | include_directories(${PROJECT_SOURCE_DIR}) 4 | 5 | include_directories(${EIGEN3_INCLUDE_DIR}) 6 | include_directories(${CHOLMOD_INCLUDE_DIR}) 7 | include_directories(${CSPARSE_INCLUDE_DIR}) 8 | 9 | # the following 2 lines are added to correctly detect the python version 10 | if(WITH_PYTHON_INTERP_CHECK) 11 | message(STATUS "WITH_PYTHON_INTERP_CHECK: ${WITH_PYTHON_INTERP_CHECK}") 12 | find_package(PythonInterp) 13 | find_package(PythonLibs) 14 | message(STATUS "PythonInterp: ${PythonInterp}") 15 | message(STATUS "PythonLibs: ${PythonLibs}") 16 | endif() 17 | 18 | # pybind11 (version 2.2.1) 19 | LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/EXTERNAL/pybind11/tools) 20 | include_directories(${PROJECT_SOURCE_DIR}/EXTERNAL/pybind11/include) 21 | include(pybind11Tools) 22 | 23 | 24 | pybind11_add_module(g2o g2o.cpp) 25 | target_link_libraries(g2o PRIVATE 26 | core 27 | solver_cholmod 28 | solver_csparse 29 | solver_eigen 30 | solver_dense 31 | solver_pcg 32 | solver_slam2d_linear 33 | solver_structure_only 34 | types_data 35 | types_icp 36 | types_sba 37 | types_sclam2d 38 | types_sim3 39 | types_slam2d 40 | types_slam2d_addons 41 | types_slam3d 42 | types_slam3d_addons 43 | contrib 44 | ) 45 | -------------------------------------------------------------------------------- /visual_slam/thirdparty/g2opy_changes/sparse_optimizer.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | 16 | namespace py = pybind11; 17 | using namespace pybind11::literals; 18 | 19 | namespace g2o { 20 | 21 | class Flag 22 | { 23 | public: 24 | Flag(bool init=false):value(init){} 25 | bool value; 26 | }; 27 | 28 | void declareSparseOptimizer(py::module & m) { 29 | using CLS = SparseOptimizer; 30 | 31 | py::class_(m, "Flag") 32 | .def(py::init<>()) 33 | .def(py::init(),"init"_a) 34 | .def_readwrite("value", &Flag::value); 35 | 36 | 37 | py::class_(m, "SparseOptimizer") 38 | // ATTENTION: _solver & _statistics is own by SparseOptimizer and will be 39 | // deleted in its destructor. 40 | .def(py::init<>()) 41 | 42 | .def("initialize_optimization", (bool (CLS::*) (HyperGraph::EdgeSet&)) &CLS::initializeOptimization, 43 | "eset"_a) // virtual 44 | .def("initialize_optimization", (bool (CLS::*) (HyperGraph::VertexSet&, int)) &CLS::initializeOptimization, 45 | "vset"_a, "level"_a=0) // virtual 46 | .def("initialize_optimization", (bool (CLS::*) (int)) &CLS::initializeOptimization, 47 | "level"_a=0) // virtual 48 | 49 | .def("update_initialization", &CLS::updateInitialization, 50 | "vset"_a, "eset"_a) // virtual, ->bool 51 | .def("compute_initial_guess", (void (CLS::*) ()) &CLS::computeInitialGuess) // virtual 52 | .def("compute_initial_guess", (void (CLS::*) (EstimatePropagatorCost&)) &CLS::computeInitialGuess, 53 | "propagator"_a) // virtual 54 | .def("set_to_origin", &CLS::setToOrigin) // virtual 55 | 56 | .def("optimize", &CLS::optimize, 57 | "iterations"_a, "online"_a=false) // -> int 58 | 59 | // .def("compute_marginals", (bool (CLS::*) (SparseBlockMatrix&, const std::vector >&)) 60 | // &CLS::computeMarginals, "spinv"_a, "block_indices"_a) 61 | // .def("compute_marginals", (bool (CLS::*) (SparseBlockMatrix&, const OptimizableGraph::Vertex*)) 62 | // &CLS::computeMarginals, "spinv"_a, "vertex"_a) 63 | // .def("compute_marginals", (bool (CLS::*) (SparseBlockMatrix&, const OptimizableGraph::VertexContainer&)) 64 | // &CLS::computeMarginals, "spinv"_a, "vertices"_a) 65 | 66 | // segfault (一︿一) 67 | // .def("compute_marginals", [](CLS& optimizer, const std::vector >& block_indices){ 68 | // SparseBlockMatrix spinv; 69 | // optimizer.computeMarginals(spinv, block_indices); 70 | // return spinv;}, 71 | // py::return_value_policy::copy) 72 | // .def("compute_marginals", [](CLS& optimizer, const OptimizableGraph::Vertex* vertex){ 73 | // SparseBlockMatrix spinv; 74 | // optimizer.computeMarginals(spinv, vertex); 75 | // return spinv;}, 76 | // py::return_value_policy::copy) 77 | // .def("compute_marginals", [](CLS& optimizer, size_t vertex_id){ 78 | // SparseBlockMatrix spinv; 79 | // const g2o::OptimizableGraph::Vertex* vertex = optimizer.vertex(vertex_id); 80 | // optimizer.computeMarginals(spinv, vertex); 81 | // return spinv;}, 82 | // py::return_value_policy::copy) 83 | // .def("compute_marginals", [](CLS& optimizer, const OptimizableGraph::VertexContainer& vertices){ 84 | // SparseBlockMatrix spinv; 85 | // optimizer.computeMarginals(spinv, vertices); 86 | // return spinv;}, 87 | // py::return_value_policy::copy) 88 | 89 | // The gauge should be fixed() and then the optimization can work (if no additional dof are in 90 | // the system. The default implementation returns a node with maximum dimension. 91 | .def("find_gauge", &CLS::findGauge) // virtual, -> OptimizableGraph::Vertex* 92 | .def("gauge_freedom", &CLS::gaugeFreedom) // -> bool 93 | .def("active_chi2", &CLS::activeChi2) // -> double 94 | .def("active_robust_chi2", &CLS::activeRobustChi2) // -> double 95 | .def("verbose", &CLS::verbose) // -> bool 96 | .def("set_verbose", &CLS::setVerbose, 97 | "verbose"_a) // -> void 98 | 99 | .def("set_force_stop_flag_old", &CLS::setForceStopFlag, 100 | "flag"_a) // -> void 101 | .def("set_force_stop_flag", [](CLS& optimizer, Flag* flag){ optimizer.setForceStopFlag(&(flag->value)); }, 102 | py::keep_alive<1, 2>()) 103 | .def("force_stop_flag", &CLS::forceStopFlag) // -> bool* 104 | .def("terminate", &CLS::terminate) // -> bool 105 | 106 | .def("index_mapping", &CLS::indexMapping) // -> const VertexContainer& 107 | .def("active_vertices", &CLS::activeVertices) // -> const VertexContainer& 108 | .def("active_edges", &CLS::activeEdges) // -> const EdgeContainer& 109 | .def("remove_vertex", &CLS::removeVertex, 110 | "v"_a, "detach"_a=false) // virtual, -> bool 111 | 112 | .def("find_active_vertex", &CLS::findActiveVertex, 113 | "v"_a) // -> VertexContainer::const_iterator, const 114 | .def("find_active_edge", &CLS::findActiveEdge, 115 | "e"_a) // -> EdgeContainer::const_iterator, const 116 | 117 | .def("algorithm", &CLS::algorithm) // -> const OptimizationAlgorithm*, const 118 | .def("solver", &CLS::solver) // -> OptimizationAlgorithm* 119 | //.def("set_algorithm", &CLS::setAlgorithm, 120 | // "algorithm"_a, py::keep_alive<1, 2>()) // -> void 121 | 122 | .def("set_algorithm", [](CLS& optimizer, OptimizationAlgorithm* algorithm) {optimizer.setAlgorithm(algorithm);}, 123 | py::keep_alive<1, 2>()) 124 | .def("set_algorithm", [](CLS& optimizer, OptimizationAlgorithmWithHessian* algorithm) {optimizer.setAlgorithm(algorithm);}, 125 | py::keep_alive<1, 2>()) 126 | .def("set_algorithm", [](CLS& optimizer, OptimizationAlgorithmGaussNewton* algorithm) {optimizer.setAlgorithm(algorithm);}, 127 | py::keep_alive<1, 2>()) 128 | .def("set_algorithm", [](CLS& optimizer, OptimizationAlgorithmLevenberg* algorithm) {optimizer.setAlgorithm(algorithm);}, 129 | py::keep_alive<1, 2>()) 130 | .def("set_algorithm", [](CLS& optimizer, OptimizationAlgorithmDogleg* algorithm) {optimizer.setAlgorithm(algorithm);}, 131 | py::keep_alive<1, 2>()) 132 | 133 | 134 | 135 | .def("push", (void (CLS::*) (SparseOptimizer::VertexContainer&)) &CLS::push, 136 | "vlist"_a, py::keep_alive<1, 2>()) 137 | .def("push", (void (CLS::*) (HyperGraph::VertexSet&)) &CLS::push, 138 | "vlist"_a, py::keep_alive<1, 2>()) 139 | .def("push", (void (CLS::*) ()) &CLS::push) 140 | 141 | .def("pop", (void (CLS::*) (SparseOptimizer::VertexContainer&)) &CLS::pop, 142 | "vlist"_a, py::keep_alive<1, 2>()) 143 | .def("pop", (void (CLS::*) (HyperGraph::VertexSet&)) &CLS::pop, 144 | "vlist"_a, py::keep_alive<1, 2>()) 145 | .def("pop", (void (CLS::*) ()) &CLS::pop) 146 | 147 | .def("discard_top", (void (CLS::*) (SparseOptimizer::VertexContainer&)) &CLS::discardTop, 148 | "vlist"_a, py::keep_alive<1, 2>()) // -> void 149 | .def("discard_top", (void (CLS::*) ()) &CLS::discardTop) 150 | 151 | .def("clear", &CLS::clear) // virtual, -> void 152 | .def("compute_active_errors", &CLS::computeActiveErrors) // virtual, -> void 153 | .def("update", &CLS::update, 154 | "update"_a) // -> void 155 | 156 | .def("batch_statistics", (const BatchStatisticsContainer& (CLS::*) () const) &CLS::batchStatistics) 157 | .def("set_compute_batch_statistics", &CLS::setComputeBatchStatistics) // -> void 158 | .def("compute_batch_statistics", &CLS::computeBatchStatistics) 159 | 160 | // callbacks 161 | .def("add_compute_error_action", &CLS::addComputeErrorAction, 162 | "action"_a) // -> bool 163 | .def("remove_compute_error_action", &CLS::removeComputeErrorAction, 164 | "action"_a) // -> bool 165 | 166 | ; 167 | 168 | 169 | } 170 | 171 | } // end namespace g2o 172 | -------------------------------------------------------------------------------- /visual_slam/thirdparty/g2opy_changes/types_six_dof_expmap.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include 5 | 6 | #include 7 | //#include "python/core/base_vertex.h" 8 | #include "python/core/base_unary_edge.h" 9 | #include "python/core/base_binary_edge.h" 10 | #include "python/core/base_multi_edge.h" 11 | 12 | 13 | namespace py = pybind11; 14 | using namespace pybind11::literals; 15 | 16 | 17 | namespace g2o { 18 | 19 | void declareTypesSixDofExpmap(py::module & m) { 20 | 21 | py::class_(m, "CameraParameters") 22 | .def(py::init<>()) 23 | .def(py::init(), 24 | "focal_length"_a, "principle_point"_a, "baseline"_a) 25 | 26 | .def("cam_map", &CameraParameters::cam_map, 27 | "trans_xyz") 28 | .def("stereocam_uvu_map", &CameraParameters::stereocam_uvu_map, 29 | "trans_xyz") 30 | .def_readwrite("focal_length", &CameraParameters::focal_length) 31 | .def_readwrite("principal_point", &CameraParameters::principle_point) 32 | .def_readwrite("principle_point", &CameraParameters::principle_point) 33 | .def_readwrite("baseline", &CameraParameters::baseline) 34 | // read 35 | // write 36 | ; 37 | 38 | 39 | py::class_>(m, "VertexSE3Expmap") 40 | .def(py::init<>()) 41 | //.def(py::init([]() {return new VertexSE3Expmap();})) 42 | .def("set_to_origin_impl", &VertexSE3Expmap::setToOriginImpl) 43 | .def("oplus_impl", &VertexSE3Expmap::oplusImpl) // double* -> void 44 | // read 45 | // write 46 | ; 47 | 48 | 49 | templatedBaseBinaryEdge<6, SE3Quat, VertexSE3Expmap, VertexSE3Expmap>(m, "_6_SE3Quat_VertexSE3Expmap_VertexSE3Expmap"); 50 | py::class_>(m, "EdgeSE3Expmap") 51 | .def(py::init<>()) 52 | .def("compute_error", &EdgeSE3Expmap::computeError) 53 | .def("linearize_oplus", &EdgeSE3Expmap::linearizeOplus) 54 | ; 55 | 56 | 57 | templatedBaseBinaryEdge<2, Vector2D, VertexSBAPointXYZ, VertexSE3Expmap>(m, "_2_Vector2D_VertexSBAPointXYZ_VertexSE3Expmap"); 58 | py::class_>(m, "EdgeProjectXYZ2UV") 59 | .def(py::init<>()) 60 | .def("compute_error", &EdgeProjectXYZ2UV::computeError) 61 | .def("linearize_oplus", &EdgeProjectXYZ2UV::linearizeOplus) 62 | ; 63 | 64 | 65 | py::class_>(m, "EdgeProjectPSI2UV") 66 | .def(py::init()) 67 | .def("compute_error", &EdgeProjectPSI2UV::computeError) 68 | .def("linearize_oplus", &EdgeProjectPSI2UV::linearizeOplus) 69 | ; 70 | 71 | 72 | //Stereo Observations 73 | templatedBaseBinaryEdge<3, Vector3D, VertexSBAPointXYZ, VertexSE3Expmap>(m, "_3_Vector3D_VertexSBAPointXYZ_VertexSE3Expmap"); 74 | py::class_>(m, "EdgeProjectXYZ2UVU") 75 | .def(py::init<>()) 76 | .def("compute_error", &EdgeProjectXYZ2UVU::computeError) 77 | ; 78 | 79 | 80 | // Projection using focal_length in x and y directions 81 | py::class_>(m, "EdgeSE3ProjectXYZ") 82 | .def(py::init<>()) 83 | .def("compute_error", &EdgeSE3ProjectXYZ::computeError) 84 | .def("is_depth_positive", &EdgeSE3ProjectXYZ::isDepthPositive) 85 | .def("linearize_oplus", &EdgeSE3ProjectXYZ::linearizeOplus) 86 | .def("cam_project", &EdgeSE3ProjectXYZ::cam_project) 87 | .def_readwrite("fx", &EdgeSE3ProjectXYZ::fx) 88 | .def_readwrite("fy", &EdgeSE3ProjectXYZ::fy) 89 | .def_readwrite("cx", &EdgeSE3ProjectXYZ::cx) 90 | .def_readwrite("cy", &EdgeSE3ProjectXYZ::cy) 91 | ; 92 | 93 | 94 | // Edge to optimize only the camera pose 95 | templatedBaseUnaryEdge<2, Vector2D, VertexSE3Expmap>(m, "BaseUnaryEdge_2_Vector2D_VertexSE3Expmap"); 96 | py::class_>(m, "EdgeSE3ProjectXYZOnlyPose") 97 | .def(py::init<>()) 98 | .def("compute_error", &EdgeSE3ProjectXYZOnlyPose::computeError) 99 | .def("is_depth_positive", &EdgeSE3ProjectXYZOnlyPose::isDepthPositive) 100 | .def("linearize_oplus", &EdgeSE3ProjectXYZOnlyPose::linearizeOplus) 101 | .def("cam_project", &EdgeSE3ProjectXYZOnlyPose::cam_project) 102 | .def_readwrite("fx", &EdgeSE3ProjectXYZOnlyPose::fx) 103 | .def_readwrite("fy", &EdgeSE3ProjectXYZOnlyPose::fy) 104 | .def_readwrite("cx", &EdgeSE3ProjectXYZOnlyPose::cx) 105 | .def_readwrite("cy", &EdgeSE3ProjectXYZOnlyPose::cy) 106 | .def_readwrite("Xw", &EdgeSE3ProjectXYZOnlyPose::Xw) 107 | ; 108 | 109 | 110 | // Projection using focal_length in x and y directions stereo 111 | py::class_>(m, "EdgeStereoSE3ProjectXYZ") 112 | .def(py::init<>()) 113 | .def("compute_error", &EdgeStereoSE3ProjectXYZ::computeError) 114 | .def("is_depth_positive", &EdgeStereoSE3ProjectXYZ::isDepthPositive) 115 | .def("linearize_oplus", &EdgeStereoSE3ProjectXYZ::linearizeOplus) 116 | .def("cam_project", &EdgeStereoSE3ProjectXYZ::cam_project) 117 | ; 118 | 119 | 120 | // Edge to optimize only the camera pose stereo 121 | templatedBaseUnaryEdge<3, Vector3D, VertexSE3Expmap>(m, "BaseUnaryEdge_3_Vector3D_VertexSE3Expmap"); 122 | py::class_>(m, "EdgeStereoSE3ProjectXYZOnlyPose") 123 | .def(py::init<>()) 124 | .def("compute_error", &EdgeStereoSE3ProjectXYZOnlyPose::computeError) 125 | .def("is_depth_positive", &EdgeStereoSE3ProjectXYZOnlyPose::isDepthPositive) 126 | .def("linearize_oplus", &EdgeStereoSE3ProjectXYZOnlyPose::linearizeOplus) 127 | .def("cam_project", &EdgeStereoSE3ProjectXYZOnlyPose::cam_project) 128 | ; 129 | 130 | 131 | // class G2O_TYPES_SBA_API EdgeProjectPSI2UV : public g2o::BaseMultiEdge<2, Vector2D> 132 | // class G2O_TYPES_SBA_API EdgeProjectXYZ2UVU : public BaseBinaryEdge<3, Vector3D, VertexSBAPointXYZ, VertexSE3Expmap> 133 | // class EdgeSE3ProjectXYZ : public BaseBinaryEdge<2, Vector2D, VertexSBAPointXYZ, VertexSE3Expmap> 134 | // class EdgeSE3ProjectXYZOnlyPose : public BaseUnaryEdge<2, Vector2D, VertexSE3Expmap> 135 | // class EdgeStereoSE3ProjectXYZ : public BaseBinaryEdge<3, Vector3D, VertexSBAPointXYZ, VertexSE3Expmap> 136 | // class EdgeStereoSE3ProjectXYZOnlyPose : public BaseUnaryEdge<3, Vector3D, VertexSE3Expmap> 137 | 138 | } 139 | 140 | } // end namespace g2o -------------------------------------------------------------------------------- /visual_slam/thirdparty/pangolin_changes/python_CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(WITH_PYTHON_INTERP_CHECK OFF CACHE BOOL "Checking python interpreter") # to be activated when called within virtual python environment 2 | 3 | include_directories(${PROJECT_SOURCE_DIR}/include) 4 | 5 | # Find Eigen3 6 | find_package(Eigen3 REQUIRED) 7 | include_directories(${EIGEN3_INCLUDE_DIR}) 8 | 9 | # the following 2 lines are added to correctly detect the python version 10 | if(WITH_PYTHON_INTERP_CHECK) 11 | message(STATUS "WITH_PYTHON_INTERP_CHECK: ${WITH_PYTHON_INTERP_CHECK}") 12 | find_package(PythonInterp) 13 | find_package(PythonLibs) 14 | message(STATUS "PythonInterp: ${PythonInterp}") 15 | message(STATUS "PythonLibs: ${PythonLibs}") 16 | endif() 17 | 18 | #find_package(pybind11) 19 | 20 | # pybind11 (version 2.2.1) 21 | LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/external/pybind11/tools) 22 | include_directories(${PROJECT_SOURCE_DIR}/external/pybind11/include) 23 | include(pybind11Tools) 24 | 25 | 26 | pybind11_add_module( pangolin pangolin.cpp ) 27 | SET_TARGET_PROPERTIES(pangolin PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}) 28 | target_link_libraries(pangolin PRIVATE ${LIBRARY_NAME}) 29 | --------------------------------------------------------------------------------