├── .gitignore ├── LICENSE ├── README.md ├── examples ├── Fitting a cubic.ipynb ├── Pose graph relaxation in SE(2).ipynb ├── dense_stereo_vo_kitti.py ├── posegraph_relax.py ├── posegraph_relax_2d.py ├── stereo_ba.py ├── stereo_ba_frame_to_frame.py └── stereo_image_align.py ├── pyslam ├── __init__.py ├── losses.py ├── metrics.py ├── pipelines │ ├── __init__.py │ ├── dense.py │ ├── keyframes.py │ ├── ransac.py │ └── sparse.py ├── problem.py ├── residuals │ ├── __init__.py │ ├── photometric_residual.py │ ├── pose_residual.py │ ├── pose_to_pose_orientation_residual.py │ ├── pose_to_pose_residual.py │ ├── quadratic_residual.py │ ├── reprojection_motion_only_residual.py │ └── reprojection_residual.py ├── sensors │ ├── __init__.py │ ├── rgbd_camera.py │ └── stereo_camera.py ├── utils.py └── visualizers.py ├── setup.py └── tests ├── test_costs.py ├── test_problem.py ├── test_sensors.py └── test_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | __pycache__ 3 | .cache 4 | .vscode 5 | .ipynb_checkpoints 6 | *.egg-info 7 | *.profile 8 | *.pyc 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Lee Clement 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pyslam 2 | Non-linear least-squares SLAM in Python using scipy and numpy. Modelled after Google's Ceres solver. 3 | 4 | Dependencies: 5 | * numpy (for most things) 6 | * scipy (for sparse linear algebra) 7 | * numba (for vectorization speedups) 8 | * [liegroups](https://github.com/utiasSTARS/liegroups) 9 | 10 | ### Installation 11 | To install, `cd` into the repository directory (the one with `setup.py`) and run: 12 | ``` 13 | pip install -e . 14 | ``` 15 | The `-e` flag tells pip to install the package in-place, which lets you make changes to the code without having to reinstall every time. 16 | 17 | ### Testing 18 | Ensure you have `pytest` installed on your system, or install it using `conda install pytest` or `pip install pytest`. Then run `pytest` in the repository directory. 19 | -------------------------------------------------------------------------------- /examples/Fitting a cubic.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%matplotlib inline\n", 10 | "\n", 11 | "import numpy as np\n", 12 | "import matplotlib.pyplot as plt" 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "metadata": {}, 18 | "source": [ 19 | "# Define your model and create a residual function\n", 20 | "In this example, we want to fit a cubic polynomial of the form $y = ax^3 + bx^2 + cx + d$ to data. For later convenience, we'll create a simple method to evaluate the polynomial, although this isn't really necessary." 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "def evaluate_cubic(a,b,c,d,x):\n", 30 | " return a * x**3 + b * x**2 + c * x + d" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "Now let's define a new class for our residual function called `CubicResidual`. Any residual function object must store the input and expected output data ($x$ and $y$ in this case), and implement a method called `evaluate` that computes the estimation error and its jacobian based on a set of model parameters." 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 3, 43 | "metadata": { 44 | "collapsed": true 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "class CubicResidual:\n", 49 | " def __init__(self, x, y):\n", 50 | " # The residuals and jacobians need to be np.arrays\n", 51 | " # The easiest thing is to turn our scalar x,y into 1d arrays\n", 52 | " self.x = np.atleast_1d(x)\n", 53 | " self.y = np.atleast_1d(y)\n", 54 | " \n", 55 | " def evaluate(self, params, compute_jacobians=None):\n", 56 | " # Evaluate the model using the given parameters\n", 57 | " a, b, c, d = params\n", 58 | " y_predicted = evaluate_cubic(a, b, c, d, self.x)\n", 59 | " \n", 60 | " # The residual or error is the difference between the predicted and measured output variable\n", 61 | " residual = y_predicted - self.y\n", 62 | " \n", 63 | " # Compute the jacobian of the residual w.r.t. the model parameters if requested\n", 64 | " # The optimizer can request jacobians w.r.t. only a subset of parameters, if, \n", 65 | " # for example, some parameters are held constant\n", 66 | " if compute_jacobians:\n", 67 | " jacobians = [None for _ in enumerate(params)]\n", 68 | " \n", 69 | " if compute_jacobians[0]:\n", 70 | " jacobians[0] = self.x**3 # d residual / d a\n", 71 | " if compute_jacobians[1]:\n", 72 | " jacobians[1] = self.x**2 # d residual / d b\n", 73 | " if compute_jacobians[2]:\n", 74 | " jacobians[2] = self.x # d residual / d c\n", 75 | " if compute_jacobians[3]:\n", 76 | " jacobians[3] = np.atleast_1d(1.) # d residual / d d\n", 77 | "\n", 78 | " return residual, np.squeeze(jacobians)\n", 79 | " \n", 80 | " return residual" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "# Generate some test data\n", 88 | "\n", 89 | "Note that `pyslam` deals with parameters as dictionaries, so we will create a `params_true` dictionary for later convenience." 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 4, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "params_true = {'a': 2., 'b': 4., 'c': -4., 'd': 0.}\n", 99 | "x_data = np.linspace(-5, 5, 10)\n", 100 | "y_data = evaluate_cubic(params_true['a'], params_true['b'], params_true['c'], params_true['d'], x_data)" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "# Set up the optimization problem\n", 108 | "\n", 109 | "There are five steps to solving an optimization problem using `pyslam`:\n", 110 | "\n", 111 | "1. Initialize the `Problem` object. The optimizer's behaviour can be customized by creating, modifying, and passing an `Options` object to the `Problem` constructor. Among other things, `Options` sets various thresholds and controls the optimizer's verbosity.\n", 112 | "2. Create one or more residual function objects for your data. This will vary depending on how you define your residual function classes. To keep it simple, we'll create a `CubicResidual` object for each $(x,y)$ pair.\n", 113 | "3. Add each residual function object to the `Problem` and specify the names of the parameters it depends on. `pyslam` deals with parameters as dictionaries, so each parameter must have a unique identifier.\n", 114 | "4. Give the `Problem` a dictionary of initial guesses for each parameter.\n", 115 | "5. Call `Problem.solve`" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 5, 121 | "metadata": {}, 122 | "outputs": [ 123 | { 124 | "name": "stdout", 125 | "output_type": "stream", 126 | "text": [ 127 | "Iterations: 2 | Cost: 3.735817e+05 --> 5.045051e-26\n" 128 | ] 129 | } 130 | ], 131 | "source": [ 132 | "from pyslam.problem import Problem, Options\n", 133 | "\n", 134 | "options = Options()\n", 135 | "options.print_summary = True\n", 136 | "\n", 137 | "problem = Problem(options)\n", 138 | "\n", 139 | "for x, y in zip(x_data, y_data):\n", 140 | " residual = CubicResidual(x, y)\n", 141 | " problem.add_residual_block(residual, ['a','b','c','d'])\n", 142 | " \n", 143 | "params_init = {'a': -2., 'b': 10., 'c': -6., 'd': -140.}\n", 144 | "problem.initialize_params(params_init)\n", 145 | "\n", 146 | "params_final = problem.solve()\n", 147 | "print(problem.summary())" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "# Check the results" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 6, 160 | "metadata": {}, 161 | "outputs": [ 162 | { 163 | "data": { 164 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAD8CAYAAAB6paOMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4VGX2wPHvOyEQSIYiTSAoaBBEuoA0C1UILojyMyoi\nCi7iqisWFNtaV1GsiBWwoKwbxQJqFhcFhYCKUVBERALCAkFKaMMQIOX9/XEmkEACCZmZO+V8nuc+\nydy5yZyEcM+87bzGWotSSqno5XI6AKWUUs7SRKCUUlFOE4FSSkU5TQRKKRXlNBEopVSU00SglFJR\nThOBUkpFOU0ESikV5TQRKKVUlKvkdABlUadOHdukSROnw1BKqbDyww8/bLfW1j3edWGRCJo0aUJG\nRobTYSilVFgxxqwvy3XaNaSUUlHOb4nAGBNjjFlqjPnU97ipMeY7Y0ymMSbVGFPZd76K73Gm7/km\n/opBKaVU+fmzRXALsLLI4yeAZ621ScBOYJTv/Chgp+/8s77rlFJKOcQvYwTGmERgIPBP4DZjjAF6\nAVf6LnkLeBB4GRjs+xxgJjDZGGOs1sOOerm5uWzcuJH9+/c7HUrEi4uLIzExkdjYWKdDUSHAX4PF\nzwF3Am7f49rALmttnu/xRqCR7/NGwAYAa22eMWa37/rtfopFhamNGzfidrtp0qQJ8l5CBYK1luzs\nbDZu3EjTpk2dDkeFgAp3DRljLgK2Wmt/8EM8Rb/vaGNMhjEmY9u2bf781ipE7d+/n9q1a2sSCDBj\nDLVr19aWlzrEH2ME3YFBxph1wL+RLqHngZrGmMIWRyKwyff5JqAxgO/5GkD2kd/UWvuatbajtbZj\n3brHnQarIoQmgeDQ37MqqsKJwFp7t7U20VrbBLgcmGetHQbMB4b6LhsBzPJ9Ptv3GN/z8wI6PjB9\nOrz5ZsC+vVJKhbtAriO4Cxk4zkTGAKb5zk8DavvO3waMD2AMMGMGPPww6Fi0KoNu3bod95rrrruO\nX3/9FYDHHnus3F+fkJBQ4vlJkyZx5plnMmzYMGbPns2ECRMA+Pjjjw+9nlKBYMJhsk7Hjh3tCa8s\nfuMNGDkSvvsOOnf2b2DKr1auXMmZZ57pdBjlkpCQwN69e/3yNS1atOCLL74gMTGx2PlrrrmGiy66\niKFDhx71NRURjr9vVT7GmB+stR2Pd13kryy++GKIjYXUVKcjUWGg8N36V199xQUXXMDQoUNp0aIF\nw4YNo/BN0wUXXEBGRgbjx48nJyeHdu3aMWzYsGJfv3fvXnr37k2HDh1o3bo1s2bNKvkFfcaMGcPa\ntWsZMGAAzz77LG+++SY33XQTixcvZvbs2YwbN4527dqxZs2aAP70KlqFRa2hCqlVC/r3l0QwcSK4\nIj/3RYSxY2HZMv9+z3bt4Lnnynz50qVLWbFiBQ0bNqR79+4sWrSIHj16HHp+woQJTJ48mWUlxBkX\nF8dHH31E9erV2b59O126dGHQoEGlDtK+8sorzJkzh/nz51OnTh3e9I1rdevWjUGDBgWkRaBUoei4\nK6akwKZNsHix05GoMNK5c2cSExNxuVy0a9eOdevWlflrrbXcc889tGnThj59+rBp0ya2bNkSuGCV\nqoDIbxEADBoEcXHSKijyjk6FsHK8cw+UKlWqHPo8JiaGvLy8Y1xd3IwZM9i2bRs//PADsbGxNGnS\nROftq5AVHS0CtxsGDoT334f8fKejUREkNjaW3Nzco87v3r2bevXqERsby/z581m/vkzVgEvkdrvx\neDwVCVOpY4qORABw+eWwZQt8/bXTkagIMnr0aNq0aXNosLjQsGHDyMjIoHXr1kyfPp0WLVqc8Gtc\nfvnlTJw4kfbt2+tgsQqIyJ8+WmjfPqhXD668El57zT+BKb/S6YzBpb/vyKfTR49UrRoMHgwffAAH\nDzodjVJKhYzoSQQgrYEdO+Dzz52ORCmlQkZ0JYJ+/aB2bfjXv5yORCmlQkZ0JYLYWLjsMpg1C8pZ\nFkAppSJVdCUCkO6hnBz4+GOnI1FKqZAQfYmgWzc45RTtHlJKKZ/oSwQuF1xxBfz3v6A7n6kjlFYK\n+kRouWkVLqKjxMSRrrwSnnhCVhr/7W9OR6NCyEsvvVSsFPSgQYOC9hoff/wxF110ES1btvT7ayp1\nLNHXIgBo0wZatZJNa5TyKa0UNMieAH//+9/p1q0bp512GjNnzgS03LQKHI/Hw9SpU7nrrruYOnVq\nQMuMRGeLAGDYMLj7blizBk4/3elo1BGcqEJdWinoQps3byY9PZ3ffvuNQYMGMXToUC03rQIiPT2d\n5ORkCgoK8Hq9xMfHc9ttt5GWllasFLq/RGeLACQRGAPvvON0JCpMXHzxxbhcLlq2bHmopLSWm1b+\n5vF4SE5OxuPx4PV6AfB6vYfOl3dHvLKI3hZB48ZwwQXw9tvwj39IUlAhIwSqUB+laFnqwhpdWm5a\n+VtqaioFBQW+R1cC/wW2A1BQUEBqaiqjRo3y62tGb4sA4OqrpWvo22+djkSFKS03rfxt9erVvpbA\nmcAM4MZDz3m9XjIzM/3+mtGdCC69FKpWlVaBUidAy00rf2vWrBnx8fHAXYAXmHzoufj4eJKSkvz+\nmtFThro0V14Jc+bA5s1QpOmvgk/LIgeX/r5Dk8fjoUGDLni9y4AXgVsPPed2u8nKyip1jcqRtAx1\nWQ0fDjt3Qlqa05EopRRut5v+/ecCULXqy4C0BNxuN2lpaWVOAuURvYPFhfr2hfr1pXtoyBCno1FK\nRblt2yAtrSHDhuXSs+edZGZmkpSUREpKSkCSAGgigEqVpHto8mTIzpYy1Uop5ZBJk2D/frjvvlha\ntPDv7KDSaNcQwDXXQG6uFqJTSjlqzx55TzpkCFRg3kG5VTgRGGPijDFLjDE/GWNWGGMe8p1vaoz5\nzhiTaYxJNcZU9p2v4nuc6Xu+SUVjqLA2baBDBzhiJalSSgXTiy/Crl1S9CCY/NEiOAD0sta2BdoB\n/Y0xXYAngGettUnATqCwjTMK2Ok7/6zvOuddcw38+CP8/LPTkSilopDXC888AwMGQMfjzvPxrwon\nAisK1zzH+g4L9AJm+s6/BVzs+3yw7zG+53ub0gqzBNMVV8gOZtoqUD4PPvggTz311DGvCUbp6Kys\nrDLVH3rssccCGocKrFdege3b4f77g//afhkjMMbEGGOWAVuBucAaYJe1Ns93yUagke/zRsAGAN/z\nuwHnR2jr1IFBg6T2UG6u09GoMghmdcbSBCMRNGzY8FC102PRRBC+cnJg4kTo3Ru6dg3+6/slEVhr\n86217YBEoDNQ4WEOY8xoY0yGMSZjW7A2kLnmmsK5W8F5PXXC0tPTadSoEWPHjuXJJ59k7NixNGrU\niPT09Ap933/+85+cccYZ9OjRg1WrVh06P2XKFDp16kTbtm259NJL2bdvX4mlo0u67kgPPvggw4cP\np2vXrjRr1owpU6YAUr9o3LhxtGrVitatW5OamgrAunXraNWqFQBvvvkml1xyCf3796dZs2bceeed\nAIwfP56cnBzatWvHsGHD8Hq9DBw4kLZt29KqVatD30uFpilTYMsWKXvmCGutXw/gH8A4pEpSJd+5\nrsDnvs8/B7r6Pq/ku84c63ueffbZNihyc62tX9/awYOD83qqmF9//bVM1+3Zs8e63W6LdEEWO9xu\nt/V4PCf0+hkZGbZVq1bW6/Xa3bt329NPP91OnDjRWmvt9u3bD11377332kmTJllrrR0xYoR9//33\nDz1X2nVFPfDAA7ZNmzZ23759dtu2bTYxMdFu2rTJzpw50/bp08fm5eXZP//80zZu3NhmZWXZP/74\nw5511lnWWmvfeOMN27RpU7tr1y6bk5NjTznlFPu///3PWmttfHz8odeYOXOmve666w493rVr11Fx\nlPX3rQIrJ8fahg2tPe88/39vIMOW4b7tj1lDdY0xNX2fVwX6AiuB+UBhx+YIoHDHjtm+x/ien+cL\n2HmVKslK488+k/SsQlLx6ozFFVZnPBELFy5kyJAhVKtWjerVqxfbneyXX37h3HPPpXXr1syYMYMV\nK1aU+D3Ket3gwYOpWrUqderUoWfPnixZsoT09HSuuOIKYmJiqF+/Pueffz7ff//9UV/bu3dvatSo\nQVxcHC1btiyx0F3r1q2ZO3cud911FwsXLqRGjRon9DtRgTd1KmRlOdgawD9dQw2A+caYn4HvgbnW\n2k+Rikm3GWMykTGAab7rpwG1fedvA8b7IQb/GTkS8vJg+nSnI1GlOFyd8WiBqs54zTXXMHnyZJYv\nX84DDzxQaqnpsl535PyI8syXKFoOOyYmhry8vKOuOeOMM/jxxx9p3bo19913Hw8//HCZv78Knpwc\neOwxOO886NXLuTj8MWvoZ2tte2ttG2ttK2vtw77za621na21Sdba/7PWHvCd3+97nOR7fm1FY/Cr\nM8+E7t1h2jQIkYaKKu5wdcajVaQ643nnncfHH39MTk4OHo+HTz755NBzUgisAbm5ucwossXpkaWj\nS7vuSLNmzWL//v1kZ2fz1Vdf0alTJ84991xSU1PJz89n27ZtLFiwgM6dO5c5/tjYWHJ9Ex2ysrKo\nVq0aV111FePGjePHH38sz69CBcmrr0q9y4cfdnZLFF1ZXJJRo2DVKli0yOlIVAlSUlJwuUr+03W5\nXKSkpJzQ9+3QoQMpKSm0bduWAQMG0KlTp0PPPfLII5xzzjl07969WKnpI0tHl3bdkdq0aUPPnj3p\n0qUL999/Pw0bNmTIkCG0adOGtm3b0qtXL5588klOPvnkMsc/evRo2rRpw7Bhw1i+fDmdO3emXbt2\nPPTQQ9x3330n9DtRgeP1wuOPS0vg/POdjUXLUJdk715o2BAuuUTXFQRRecoil7Snq8vlCtierv70\n4IMPkpCQwB133OFoHFqG2lkTJ8Kdd8LChRCoP9mylqHWonMlSUiQBWZvvw3PPw860BZyevToQVZW\nFqmpqUGpzqiUP+3dC08+Cf36BS4JlIcmgtJcdx289hq8+y6MGeN0NKoECQkJft+7NRgefPBBp0NQ\nDnvuOVlFHCpj+DpGUJqOHaUY3bRpx79W+U04dFVGAv09Oyc7W7qFBg+Gc85xOhqhiaA0xkirICMD\nli51OpqoEBcXR3Z2tt6kAsxaS3Z2NnFxcU6HEpUmTACPB/75T6cjOUwHi49l504ZNB4xQipCqYDK\nzc1l48aNpc69V/4TFxdHYmIisbGxTocSVTZtgqQkuOwyeOut419fUTpY7A+1akFKCsyYIW05t9vp\niCJabGwsTZs2dToMpQLm4YchPx8eesjpSIrTrqHjuf56GeJ/912nI1FKhbHff5chxzFjoEkTp6Mp\nThPB8XTpIoPGr7yiK42VUifsnnugalW4916nIzmaJoLjMUZaBUuXysCxUkqV0+LF8MEHsoCsfn2n\nozmaJoKyGDYMqlWTwiBKKVUO1sIdd0CDBnDbbU5HUzJNBGVRo4asNH73Xdi92+lolFJh5MMP4Ztv\nZKC4lFqJjtNEUFZjxsC+fVqeWilVZgcPwvjxcNZZcO21TkdTOk0EZdWxI3TuDC+9pIPGSqkyeeUV\nyMyEJ56AmBinoymdJoLyuPFG+O03mDfP6UiUUiEuOxsefBD69IHkZKejOTZNBOVx2WVQpw68+KLT\nkSilQtwDD8iQ4rPPOrvpTFloIiiPuDjZtGbWLNiwwelolFIhasUK6RYaMwZatXI6muPTRFBe118v\nYwQ6lVQpVQJr4dZbpSJNqJWSKI0mgvJq2hQGDoQpU+DAAaejUUqFmE8/hblzZXygTh2noykbTQQn\n4qabYOtWeP99pyNRSoWQ/fth7Fg480z429+cjqbsNBGciL59oXlzmDRJp5IqpQ558klYuxYmT4Zw\nqvCtieBEuFxw883w/ffw7bdOR6OUCgF//AGPPy6TC3v1cjqa8tFEcKJGjJDSE5MmOR2JUioEjB0r\ni8aeftrpSMpPE8GJSkiQqaQzZ8q2Q0qpqJWWBrNnw/33Q2Ki09GUnyaCirjpJigokLITSqmotG+f\nFB1o0UKmjYYjTQQV0bQpDBokawpycpyORinlgEcegXXrZAFZ5cpOR3NiKpwIjDGNjTHzjTG/GmNW\nGGNu8Z0/yRgz1xiz2vexlu+8McZMMsZkGmN+NsZ0qGgMjrrlFikqMmOG05EopYJs+XJ46impLHr+\n+U5Hc+L80SLIA2631rYEugA3GmNaAuOBL621zYAvfY8BBgDNfMdo4GU/xOCc88+H9u3hmWekm0gp\nFRUKCqTQQI0aMm00nFU4EVhrN1trf/R97gFWAo2AwcBbvsveAi72fT4YmG7Ft0BNY0yDisbhGGPg\n9tth5UqYM8fpaJRSQTJlimw48/TT4bOCuDR+HSMwxjQB2gPfAfWttZt9T/0JFO7U2QgoWrFto+/c\nkd9rtDEmwxiTsW3bNn+G6X+XXQaNGoXnvDGlVLlt3Cj7D/fsCVdf7XQ0Fee3RGCMSQA+AMZaa/cU\nfc5aa4FyLcG11r5mre1ore1Yt25df4UZGLGx8Pe/yz4Fy5Y5HY1SKoCshRtugNxcaRWEeonpsvBL\nIjDGxCJJYIa19kPf6S2FXT6+j1t95zcBjYt8eaLvXHgbPVrWFjzzjNORKKUC6N13pbDcP/8Jp5/u\ndDT+4Y9ZQwaYBqy01ha9C84GRvg+HwHMKnL+at/soS7A7iJdSOGrZk1ZYPbuu7rATKkItXWrNP67\ndJGPkcIfLYLuwHCglzFmme9IBiYAfY0xq4E+vscAacBaIBOYAoRRjb7juOUWmUrw/PNOR6KUCoCb\nbwaPB6ZNC+09iMurUkW/gbU2HSitl6x3Cddb4MaKvm5IatoUUlJkZck990grQSkVEVJT4b33pEuo\nZUuno/EvXVnsb3feKW8ZXg7v5RFKqcM2b5b9Bc45R/6LRxpNBP7Wrh1ceKF0D+3f73Q0SqkKshb+\n+lepIvPWW1Cpwv0ooUcTQSDcdRds2SJ/NUqpsPbGG/DZZzBhguxHFYmMDYMdtjp27GgzMjKcDqPs\nrJVpBdnZsGpVZI0qKRVFMjOlgkzHjvDll7InVTgxxvxgre14vOvC7McKE8ZIq2DNGtmvQCkVNjwe\nD1OnTuWOO+6mX7+tVKpkmT49/JJAeURgb1eIuPhiKVD+2GNSgiISlh8qFeHS09NJTk6moKAAr3c8\nUI+4uKtZv340jRv3cDq8gIngHOcwl0umkP78syxDVEqFNI/HQ3JyMh6PB6+3A3AP8Dr7979NcnIy\ne/fudTrEgNFEEEhXXCFrCx59VMYNlFIhKzU1lYKCAuAk4B1k3assHy4oKCA1NdXB6AJLE0EgVaoE\n48fDkiXwxRdOR6OUOobVq1fj9e5DqubXB1IALwBer5fMzEwHowssTQSBNmKElKh+9FGnI1FKHUOz\nZs2oXHk8cBFwB/Djoefi4+NJSkpyKrSA00QQaFWqyFLEBQtg4UKno1FKlaJJkys4ePAhpJDy5GLP\nuVwuUlJSHIkrGDQRBMN110G9evDQQ05HopQqwbZtMHJkPA0a5JOQMJb4+HhAWgJut5u0tDQSEhIc\njjJwdPpoMFSrJusKbr9dWgXnnut0REopn7w8mdexdSssWhRH8+YrSU1NJTMzk6SkJFJSUiI6CYCu\nLA6effvgtNPgrLNkiaJSKiSMHw9PPCGlpUeOdDoa/9KVxaGmsFUwb56MFyilHPfhh5IERo+OvCRQ\nHpoIgmnMGDj5ZHjgAacjUSrqrVghk/o6dYJJk5yOxlmaCIKpalVpFXz1lRxKKUdkZ8OgQbLN+Icf\nyuS+aKaJINiuvx4aNID779fVxko5IDcXhg6VrcU/+ggSE52OyHmaCIKtalVJAunpMGeO09EoFVWs\nla3Fv/oKpkyRavFKE4EzRo2SGkT33iub3SulguL552UX2XHjYPhwp6MJHZoInFC5siwuW7pUOiiV\nUgH30Udw221w6aWy25g6TNcROCU/H9q0kRbB8uWRuRGqUiHiu++gZ0/5Lzd/vvTQRgNdRxDqYmLg\nkUfgt9/g7bedjkapiJWZKTOETj4ZZs+OniRQHpoInDRkiExifuAB2L/f6WiUijibN0O/ftIA/89/\npOSXOpomAicZI8saN2yAF15wOhqlIsquXdC/v9QQSkuD5s2djih0aSJwWs+ekJwsexvv2OF0NEpF\nhJwcGDwYVq6UQeLOnZ2OKLT5JREYY143xmw1xvxS5NxJxpi5xpjVvo+1fOeNMWaSMSbTGPOzMaaD\nP2IIaxMmwO7d8PjjTkeiVNg7cEB6XRcuhOnToW9fpyMKff5qEbwJ9D/i3HjgS2ttM+BL32OAAUAz\n3zEaeNlPMYSv1q2l6MkLL8D69U5Ho1TYys2FlBT4/HNZMHb55U5HFB78kgistQuAI/s1BiObf+L7\neHGR89Ot+BaoaYxp4I84wtrDD8uYwX33OR2JUmEpL08Wic2aBZMny7pNVTaBHCOob63d7Pv8T2Q3\naIBGwIYi1230nYtujRvDrbfCO+/A9987HY1SYaUwCaSmwpNPwo03Oh1ReAnKYLGVVWvlWrlmjBlt\njMkwxmRs27YtQJGFmLvvhvr1JSGEwUI/pUJBbq7sMPbvf0sSGDfO6YjCTyATwZbCLh/fx62+85uA\nxkWuS/SdK8Za+5q1tqO1tmPdunUDGGYIcbvh0Udh0SJ4/32no1Eq5B08KGMCM2fCM89oEjhRgUwE\ns4ERvs9HALOKnL/aN3uoC7C7SBeSuvZaaNsW7rxTF5kpdQxer6wY/ugj2Vjm1ludjih8+Wv66LvA\nN0BzY8xGY8woYALQ1xizGujjewyQBqwFMoEpwN/8EUPEiImBZ5+V2UPPPut0NEqFpJ07ZVro3Lmy\n1/DNNzsdUXjTonOhasgQ+StftQoa6Vi6UoWysmTF8KpV8O67cMklTkcUurToXLh7+mmZCqGdnkod\nsmKFbCbzxx9SNkKTgH9oIghVp50m4wTvvgsLFjgdjVKOmzcPuneX90cLFkDv3k5HFDk0EYSy8ePh\nlFPgppvkr1+pKPXmm9IdlJgI334L7ds7HVFk0UQQyqpVkzlxy5fDK684HY1SQZefD7ffLpPpzj1X\ntvo+5RSno4o8ui1WqLvkEujTR0pPDB0qu2soFYE8Hg+pqamsXr2aZs2a0b9/Cn/9q5s5c6RR/Mwz\nEBvrdJSRSRNBqDNGCqe0aQN33CElKJSKMOnp6SQnJ1NQUIDX66Vq1c7s398TlyueV191MXq00xFG\nNu0aCgfNm8t4wYwZ8OWXTkejlF95PB6Sk5PxeDx4vV5gODk587G2KpUr9+fKK/c6HWLE00QQLu6+\nG04/HW64QVccq4iSmppKQUEBUA1ZYzod+A5oj8u1mNTUVEfjiwaaCMJFXBy89BKsXi3bWyoVIVav\nXo3XezqQAYwEHgf6Alvxer1kZmY6Gl800EQQTvr1k502HntM9uBTKswVFMD69RcDS4AaSAK4B8gH\nID4+nqSkJOcCjBKaCMLNc89BfDz89a/yv0ipMLV2rWzZnZralZiYL4G2wLxi17hcLlJSUhyJL5po\nIgg39etLMbpFi+Bl3eVThZ+CAunlbNMGli2DN96A+fOr43YfID4+HpCWgNvtJi0tjYSEBIcjjnxa\ndC4cWSvLLBcvluIrusJGhYkVK2D0aPnT7dNHKocW/vnu3buX1NRUMjMzSUpKIiUlRZNABZW16Jwm\ngnC1bh20agXnnQeffSbrDZQKUV6vDG1NnAjVq8visOHD9c820LT6aKRr0kT+Z/3nP1KIRakQZK3s\nI9yihfy5Xn65zHO4+mpNAqFEE0E4u+kmaRGMHQsbNjgdjVLFLFkCF1wgN/86daRO0PTpEC07z4YT\nTQThzOWSkbb8fLjuOt3wXoWEzEzZR/icc+Td/8svQ0aGlJBWoUkTQbg77TTpeP3vf+G115yORkWx\nP/6AUaOkG+jTT+Ef/4A1a2DMGNmBVYUuTQSRYMwYmYJx++3yP0+pIPr9d1nWcsYZUg7rxhulVfDQ\nQ+B2Ox2dKgtNBJHAGHj9danRe9VVuomNCoolS6QyeosW8Pbb8n5kzRp4/nlo0MDp6FR5aCKIFI0b\ny+Y1334Ljz7qdDQqQh08KO/6u3SRMYAvv5R6iOvXwwsvQKNGTkeoToTuRxBJUlJkTcEjj8CFF0LX\nrk5HpMLAkRvCpKSk4D6iT2fVKml0vvkmbN0KzZrJO/9rr9Xun0igC8oizZ490LatzChaulRW7yhV\niiM3hImPj8flcpGWlkbLlj2YOVO6fdLTZcB34ECphN6vn/yJqdBW1gVl2iKINNWrS9v9vPNkLf+7\n74bUyp2yvPtUwVF0Q5hCXm8s8Bd69tyNMZbcXEPz5jBhgiwC077/yKSJIBJ16ybdQ/fcA717y5SO\nEFDSu8/bbruNtLQ0evTo4XR4USc1NZX8/ALgDGAAMBC4AIglP38Tffv+wuOPt6Z9+5B6L6ECQBNB\npLrrLpg/H/7+dxnZa93a0XBKfvfpBSA5OZmsrKygFhiL5pbJn3/CV1/BCy+0Zt++n4DTfc/8BjwF\nfIS1GXTocBcdOjzuWJwqeBxLBMaY/sDzQAww1Vo7walYIpLLJZ277drBZZfhmTeP1M8+q9CN78AB\nuYn8+Sds2QLbt8OOHXLs3g0ejxz79slumjk5MsskLw+ys/PZu/cHoHBMygJ5QD5ebz7t2nlp3DiB\nKlWgWrXDR0KCHG639HrVqCFHzZqHj1q1ZAO3sgqllkmgE9KBA/DLL/D99/DNN1L1s3DDr6pV2xMT\nM5f8/KeB/wDrDn2dbggTXRwZLDbGxAC/I9sRbQS+B66w1v5a0vU6WFwB8+Zh+/ZlpsvFtZUr4923\nr9iAYNEbn7WwebOUBVi9WuaEr1kD//ufHNu2lfwSMTFyc65eXW7Y1apB1apyc65cWZY3rFjxE7/9\ntqLIV7mQ9wCVgFiaNGlO48bNDiWQffukYqXXC3vLsHd51apw0kly1K5d/KhT5/DHqlW9DBrUDa93\nHbCn2Pdwu91BbZkca6C2vAkpL0/+jX77TUo9r1gBP/0kSaBwWUm9ejKRrFs32RAmKcnDqac2KtZK\nKxTs34W0rWo7AAAUKUlEQVQKjJAuQ22M6Qo8aK290Pf4bgBrbYntUE0EJ87j8fBMnTo8cPAgfwde\nOPRMLNWqdWXChM9ZuTKOZcvg11/lnX2hKlWkgsWpp8oyhcaNoWFDOPlk2R+nbl258SYkHL8PeerU\nqYwdO/ZQd1BR8fHxPP/884waNarEry0okMSwZ4/Et3s37Nolx86dh4/sbGmdZGcXP/LzS4sqF8j2\nHduJidlN167N6dat+aGkUquWfCxshRS2SGJjj/3zHo/H46FRo7LdhHNz5efaulUS9ebNsHGj3PjX\nr5fSDn/8IdcVatBAegM7dJDj7LOhadOj/538mYxU6An1RDAU6G+tvc73eDhwjrX2ppKu10Rw4qZO\nncqtt9zCK/uqEMe53Mx5bKYLcDYg/SnVq8uM09atoWVLOPNMKRfQsKH/pgiW58bnT9ZK4sjOlq6s\niRPf4IMP5gN1gLpAbd9RBziJhIRTOXiwOgcPHvv7xsXJ762w6yo+/nBLqGpVSaKVK8sRE3P4KIxp\n+fJf+PrrReTlWSAW+beoClTD5apOgwYtiI09iR07JAGWpG5d2dSlSROZ19+smfy7nXWWJLCy0g1h\nIlfYTx81xowGRgOcojtwlduBAzL3e8qUJPbuS+cq2gIuqrCfWDLI5UXgO66/vhMvvTQu4HPCC7cd\nLO3dZ6BuPMYcHks4/XTo3z+fOXM+LLVl8txzzzNy5Cj27ZNWxo4d8rFoK2TPnsNHYdfV3r3SpbVz\np3w8cEDGRw4ckBZJ4VH4jvzAgSTy8hogrZI8YD+QA+RQUOChWrXNdOlyErVqHe7aqldPWmMnnyxJ\nulo1//yOEhISSm2NqShhrQ36AXQFPi/y+G7g7tKuP/vss606vu3brX3rLWuHDLE2Pt5asDYmJs+6\nXF9ZuM+eRg+bTWW7CGxlsPHx8Xbq1KlBjdHj8dipU6fa8ePH26lTp1qPxxPU19+zZ491u90WGa0u\ndrjd7qDFM2XKFBsfH19iHE78u6jIBGTYstyTy3KRvw+kJbIWaApUBn4Czirtek0Epdu509rXX7e2\nb19rY2LkX7RRI2tvuMHaTz6xNiur+I1vqPRM2Glg3QkJQb8Rh4KFCxdat9t96EYcHx9v3W63Xbhw\nYdBiCJWEpCJbWROBI11D1to8Y8xNwOfI1JHXrbUrjvNlyicvDz7/XPak+eQT6YI47TRZOjBkiAwM\nHh4ULN4lM9PrZUJsLONzc7lg5Mio7Avu0aMHWVlZjvaLO9VVplRJtNZQGNmwAV59VYp/bd4sg4XD\nhsGVV0LHjseeuVNsQPC00xgxezaV0tIgLU0K1ClH6ECtCqSQnjVUXtGcCKyFr7+GSZNg1ix5nJws\nO0ENHCizUk7I3r3QowesXSujym3a+DVupZTzypoItH5giMrLg/feg86dZfHPggUwbpzctz/9VLqA\nTjgJgMx5/PRTWQE2cCBkZfktdqVUeNFEEGLy8qTm+5lnyvYCu3fLfjMbNkgFyCZN/PhiiYmSDHbu\nhL/8pWxLeJVSEUcTQYjIz5cE0Lz54c0+PvhAyj1cf70sUgqI9u0hNRWWLYP/+7/iy1OVUlFBE4HD\nrIXZs2Vl77XXyorQ2bPhhx/gkksOr0YNqIEDZRR6zhwYOVJqOiilokbIriyOBj/+CLfcImO1zZrB\n++/DpZc6VPv9uuukpOh990khoaeeciAIpZQTNBEEwZGlhnv1upzHH09g2jQpH/DSS3Ifrmghswq7\n5x6pMf3001LT4O67HQ5IKRUMmggCrHh1x31UrnwzBw8OJSamgFtvdXH//VIHJyQYIzuS79ghSaFa\nNWmyKKUimiaCACq+K1cr4FUOHuwGzKNKlXE89NDXobd4yOWCt96Symljx0pZzeuuczoqpVQA6WBx\nAMmesC7gH8CPyN6wVwO9MWYVqampjsZXqkqVZNP7AQNg9GjZ6UwpFbG0RRBAixZ52LdvHtABmAHc\ngmyCIvv1ZhbuGRiKqlSR+at/+QuMGCEziUaMcDoqpVQAaCIIgIICePZZePvtvyM3/ouBWcWuCYs9\nYatWlbmsF18sc1vz82V6qVIqomjXkJ9lZUH//nDHHZCcXEB8fGeOTAIALpeLlJSU4AdYXtWqSZGj\nCy+UAkevvOJ0REopP9NE4Edz5sjCsEWL4LXXYNasWObMeQe32018fDwgLYHCEsQhN1BcmqpV4aOP\n4KKL4IYbpNaFUipiaNeQH+Tnw0MPwaOPQqtWUiyuRQt5LhRq3/tFXBx8+CFcc42sL9ixA554wqHV\nb0opf9JEUEHbt8MVV8AXX0g3+uTJR+8lGzF7wsbGygyimjVh4kT54V99NQRWwimlKkITQQX89JOM\no27eDNOmRck4qssl2a5OHXj4Ydi0SWpjVK/udGRKqROkYwQn6IMPoFs32SZy4cIoSQKFjJG+sKlT\n4csv4bzzJCEopcKSJoJyslbGAoYOlYHhjAzo1MnpqBwyahR89hmsWQPnnCO/DKVU2NFEUA65uXLv\nu/9+uOoqmD8fGjRwOiqHXXihTJOqVAnOPRf+/W+nI1JKlZMmgjLavVv2Cn7jDfjHP2D6dFl8q5D9\njpcskabRFVdIwbr8fKejUkqVkSaCMtiyBS64AL76SnYRe+ghnTV5lHr1ZOrUX/8Kjz8uLYVt25yO\nSilVBpoIjmPtWujeHX7/Xbb31XI7x1C5sqykmzZNuos6dIBvvnE6KqXUcWgiOIZffpEksHOnTI65\n8EKnIwoTI0fC4sWSGM49Fx57TLuKlAphmghKsXSpdAe5XLBgAXTp4nREYaZ9e9mLc+hQuPde6NtX\np5gqFaI0EZRgyRLo1Uv2ZFmwAM46y+mIwlSNGrKvweuvw3ffQevW8K9/yRxcpVTI0ERwhG++gT59\n4KSTJAmcfrrTEYU5Y6T2xtKl0Lw5DBsmrYStW52OTCnlU6FEYIz5P2PMCmNMgTGm4xHP3W2MyTTG\nrDLGXFjkfH/fuUxjzPiKvL6/ff+9lJA++WT4+ms49VSnI4ogZ5wB6elSqO7TT6FlS9kSU1sHSjmu\noi2CX4BLgAVFTxpjWgKXA2cB/YGXjDExxpgY4EVgANASuMJ3reOWLoV+/aSEzrx5kJjodEQRKCYG\n7rxTxg7OOEMqmfbpA6tXOx2ZUlGtQonAWrvSWruqhKcGA/+21h6w1v4BZAKdfUemtXattfYg8G/f\ntY5avlzGMqtX1yQQFGedJa2Dl1+GH36Q2t333gt79zodmVJRKVBjBI2ADUUeb/SdK+38UYwxo40x\nGcaYjG0BXJi0dq20BKpUkSSg3UFB4nLBmDGwciWkpMgU0+bNYcYM2etTKRU0x00ExpgvjDG/lHAE\n9J28tfY1a21Ha23HunXrBuQ1/vxTWgIHD8LcuTow7IgGDaRex+LF0LChFHHq2FH+QZRSQXHcRGCt\n7WOtbVXCcfRGvIdtAhoXeZzoO1fa+aDbtUsWiG3ZAmlpMnapHNS1q0wxffttWcHXr5+MHyxe7HRk\nSkW8QHUNzQYuN8ZUMcY0BZoBS4DvgWbGmKbGmMrIgPLsAMVQqgMHYPBg6ZX46COpoKxCgMslLYLf\nfoPnnoOff5al3X36yKYPSqmAqOj00SHGmI1AV+AzY8znANbaFcB7wK/AHOBGa22+tTYPuAn4HFgJ\nvOe7NmgKCmSyyoIFMnuxb99gvroqkypV4JZb4I8/4KmnZDT/vPMkKXz8sZarUMrPjA2DedwdO3a0\nGX7a9GT8eJnKPmEC3HWXX76lCrR9+6SQ3TPPwLp10KwZ3HQTXH217J+slCqRMeYHa23H410XVSuL\nX31VksANN8h0dhUmqlWDm2+W9QbvvQe1a0uLoVEjGD1aaoKEwRsapUJVRLcIPB4PqamprF69mtzc\n85k0aQAXXmiYNUs21FJh7McfZR3CjBmQkwNnnil9fldeqQtBQpG1srvT9u2wYwdkZ8ukgN27ZeaG\nxyPrSPbulRbg/v1yHDwIeXnSHVhQICVLXC45KleWbsQqVaQwWEKCHLVqSY2Yk06C+vWlVECDBrJQ\nKMqUtUUQsYkgPT2d5ORkCgoK8HobAt/hcmWRlraHCy/sGphAVfDt3g3vvy87Bi1aJOe6d5e1CUOG\naFIItPx8mYe9caNUl83KkuPPPw8fW7bIJkW5uaV/n0qVDt/Iq1WDuDg5KleW52Ji5OZvrRz5+ZIk\nDh6UhOH1ShLZs0dmg5SkenU45RQ5Tj9dVrc3ayZTBhMTI3K3qahOBB6Ph0aNGuHxeICawLfASUAn\n3O4dZGVlkZCQEKBolWMyMyE1VbqPfv5ZzrVvD4MGwYABsj4hJsbZGMPN7t2wfv3h43//k2PDBjmy\nso4evK9USd6FFx716slRt67UcKldW96t16olYzw1a8pN319ycqS1kZ0txQ03b5ZjwwaJfd06WLOm\n+Er2mjVlhXv79vJ30qmTLHB0hXfveVQngqlTpzJ27Fi83n3IpKULgF7AIuLj43n++ecZNWpUYIJV\noeG332D2bDkWL5Z3kTVrSn3xXr2gRw/5jx/NicFa6aZZv15ujuvWHf688OPu3cW/pnJlaNxY3lU3\nbixHYqIcjRrJUadO6N9ArZXWyu+/w6+/ysy0n3+GZcukdQFSRr17d/lbOf98SQ6xsc7GXU5lTQQR\n2VO+evVqvIX/mLwDzACk28Dr9ZKZmelUaCpYWrSQ4847pV/6iy/kmDsXPvxQrqlRQxaRdOok7wI7\ndJAbW6R0EeTlHX4nXPhuvui7+/Xrj67v5HZDkyZSa6VHj8Ofn3qq3Pzr1Qv9m3xZGCPjBg0ayE2+\nUH4+rFolpYgXL5aaWGlp8pzbDT17ykrUiy6S30eEiMhE0KxZM+Lj433J4O1iz8XHx5OUlORMYMoZ\nderA5ZfLYa3cANPTZZHad9/JXOLC7o0aNaSl0LKl9CGfcQYkJcl/+lDpTrRWuj4K++A3bz7cP79p\nk/TXb9wo54/stqlVS36WpCTo3Vtu8E2bHr7h16oVOYnwRMTEyL99y5aHNyjfvh2++kreRMydK63M\nG2+Edu1kZepll4V9aYKI7BoqPkZQnNvt1jECVVxOjnQJLFsmXQTLl8uy8+zs4teddJK0GBo0kL7v\n+vUPz06pVUsShdstM1iqVJFulMqVD89yMUZuzIXHgQMy0HnggHRHFB3w3LVLumV27Dg8y2b7dunz\n3rZN3u0fye2Wek2F3TWNGhXvwjn1VLlGnThrpcXwySeSEBYtknOtWskbjeHDQ6qlENVjBHDkrCEv\n8fHxuFwu0tLS6NGjR4AiVRFlxw5Zu7BmTfFB0i1bDs+GOXgwcK8fEyMJpnBwtU6dw4Ou9eodTkgn\nnyw3fb3JB9/mzfDBBzJJIT1dkn3v3rIr3yWX+HcQ/AREfSIA2Lt3L6mpqWRmZpKUlERKSoq2BJT/\nWCtz3nfskK6awnnwe/fKu/zC6Y0FBXJtQYHc3AuPKlXkRlF0Hnx8vExzrFlTplFGczdNuFm3TurW\nvPWWlEepUwdGjpRy602bOhKSJgKllHJCQQF8+aUseJw1S94EXHIJjBsX9AqXWmJCKaWc4HJJNcsP\nP5RWwl13SWLo0gXOPRc+/zzkSqJoIlBKqUBp3Bgef1zGl557Tmas9e8v+2+kpYVMQtBEoJRSgeZ2\nS6HE1aul+uWff8LAgdJCKCyN4iBNBEopFSxVqkjF3N9/lzGENWtk4d7FF8u0VIdoIlBKqWCrXFlm\nE2VmwqOPwrx50Lq1DCjv2RP0cDQRKKWUU+Lj4d57pcto+HB4+mmpiPrOO0EdP9BEoJRSTqtfX3bh\nW7JE1hwMHw79+kmLIQg0ESilVKjo2FEGj198UZJC69bw+usBf1lNBEopFUpiYuBvf5N6V3/5i3QV\nBVhEVh9VSqmw17ChbLIUBNoiUEqpKKeJQCmlopwmAqWUinKaCJRSKsppIlBKqSiniUAppaKcJgKl\nlIpymgiUUirKhcVWlcaYbcB6p+M4AXWA7U4HEWT6M0eHaPuZw/XnPdVaW/d4F4VFIghXxpiMsuwX\nGkn0Z44O0fYzR/rPq11DSikV5TQRKKVUlNNEEFivOR2AA/Rnjg7R9jNH9M+rYwRKKRXltEWglFJR\nThNBkBhjbjfGWGNMHadjCTRjzERjzG/GmJ+NMR8ZY2o6HVMgGGP6G2NWGWMyjTHjnY4n0IwxjY0x\n840xvxpjVhhjbnE6pmAxxsQYY5YaYz51OpZA0EQQBMaYxkA/4H9OxxIkc4FW1to2wO/A3Q7H43fG\nmBjgRWAA0BK4whjT0tmoAi4PuN1a2xLoAtwYBT9zoVuAlU4HESiaCILjWeBOICoGZKy1/7XW5vke\nfgskOhlPgHQGMq21a621B4F/A4MdjimgrLWbrbU/+j73IDfGRs5GFXjGmERgIDDV6VgCRRNBgBlj\nBgObrLU/OR2LQ0YC/3E6iABoBGwo8ngjUXBTLGSMaQK0B75zNpKgeA55I1fgdCCBonsW+4Ex5gvg\n5BKeuhe4B+kWiijH+pmttbN819yLdCfMCGZsKrCMMQnAB8BYa+0ep+MJJGPMRcBWa+0PxpgLnI4n\nUDQR+IG1tk9J540xrYGmwE/GGJAukh+NMZ2ttX8GMUS/K+1nLmSMuQa4COhtI3OO8iagcZHHib5z\nEc0YE4skgRnW2g+djicIugODjDHJQBxQ3RjzjrX2Kofj8itdRxBExph1QEdrbTgWryozY0x/4Bng\nfGvtNqfjCQRjTCVkILw3kgC+B6601q5wNLAAMvJu5i1gh7V2rNPxBJuvRXCHtfYip2PxNx0jUIEw\nGXADc40xy4wxrzgdkL/5BsNvAj5HBk3fi+Qk4NMdGA708v27LvO9U1ZhTlsESikV5bRFoJRSUU4T\ngVJKRTlNBEopFeU0ESilVJTTRKCUUlFOE4FSSkU5TQRKKRXlNBEopVSU+3/IMuZGrk1Y9wAAAABJ\nRU5ErkJggg==\n", 165 | "text/plain": [ 166 | "" 167 | ] 168 | }, 169 | "metadata": {}, 170 | "output_type": "display_data" 171 | }, 172 | { 173 | "name": "stdout", 174 | "output_type": "stream", 175 | "text": [ 176 | "a error: [-0.]\n", 177 | "b error: [-0.]\n", 178 | "c error: [ 0.]\n", 179 | "d error: [ 0.]\n" 180 | ] 181 | } 182 | ], 183 | "source": [ 184 | "x_plot = np.linspace(-5,5,100)\n", 185 | "y_est_init = evaluate_cubic(params_init['a'],params_init['b'],params_init['c'],params_init['d'],x_plot)\n", 186 | "y_est_final = evaluate_cubic(params_final['a'],params_final['b'],params_final['c'],params_final['d'],x_plot)\n", 187 | "\n", 188 | "plt.scatter(x_data, y_data, 50., 'k')\n", 189 | "plt.plot(x_plot, y_est_init, 'r')\n", 190 | "plt.plot(x_plot, y_est_final, 'b')\n", 191 | "plt.legend(('initial fit', 'final fit', 'data points'))\n", 192 | "plt.show()\n", 193 | "\n", 194 | "for key in params_true.keys():\n", 195 | " print('{} error: {}'.format(key, params_true[key] - params_final[key]))" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "# Optional: Compute the covariance of the final parameter estimates\n", 203 | "\n", 204 | "You can compute the covariance of the parameter estimates using `Problem.compute_covariance`. Once you've done that, you can obtain the covariance of any pair of parameters using the convenience method `Problem.get_covariance_block`. If need be, you can access the full covariance matrix `Problem._covariance_matrix`." 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 7, 210 | "metadata": {}, 211 | "outputs": [ 212 | { 213 | "name": "stdout", 214 | "output_type": "stream", 215 | "text": [ 216 | "variance of a: 0.00017205419580419603\n", 217 | "covariance of a and b: -2.2655492231125876e-20\n", 218 | "\n", 219 | "Full covariance matrix:\n", 220 | "[[ 0.00017205 -0. -0.00311184 -0. ]\n", 221 | " [-0. 0.00124261 0. -0.01265625]\n", 222 | " [-0.00311184 0. 0.06610031 0. ]\n", 223 | " [-0. -0.01265625 0. 0.22890625]]\n" 224 | ] 225 | } 226 | ], 227 | "source": [ 228 | "problem.compute_covariance()\n", 229 | "print('variance of a: {}'.format( problem.get_covariance_block('a','a') ))\n", 230 | "print('covariance of a and b: {}'.format( problem.get_covariance_block('a','b') ))\n", 231 | "print('\\nFull covariance matrix:\\n{}'.format( problem._covariance_matrix ))" 232 | ] 233 | } 234 | ], 235 | "metadata": { 236 | "kernelspec": { 237 | "display_name": "Python 3", 238 | "language": "python", 239 | "name": "python3" 240 | }, 241 | "language_info": { 242 | "codemirror_mode": { 243 | "name": "ipython", 244 | "version": 3 245 | }, 246 | "file_extension": ".py", 247 | "mimetype": "text/x-python", 248 | "name": "python", 249 | "nbconvert_exporter": "python", 250 | "pygments_lexer": "ipython3", 251 | "version": "3.6.1" 252 | } 253 | }, 254 | "nbformat": 4, 255 | "nbformat_minor": 2 256 | } 257 | -------------------------------------------------------------------------------- /examples/Pose graph relaxation in SE(2).ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "%matplotlib inline\n", 12 | "\n", 13 | "import numpy as np\n", 14 | "import matplotlib.pyplot as plt" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": {}, 20 | "source": [ 21 | "# Create data" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 2, 27 | "metadata": { 28 | "collapsed": true 29 | }, 30 | "outputs": [], 31 | "source": [ 32 | "import copy\n", 33 | "from liegroups import SE2, SO2\n", 34 | "\n", 35 | "params_true = {'T_1_0': SE2.identity(),\n", 36 | " 'T_2_0': SE2(SO2.identity(), -np.array([0.5, 0])),\n", 37 | " 'T_3_0': SE2(SO2.identity(), -np.array([1, 0])),\n", 38 | " 'T_4_0': SE2(SO2.from_angle(np.pi / 2),\n", 39 | " -(SO2.from_angle(np.pi / 2).dot(np.array([1, 0.5])))),\n", 40 | " 'T_5_0': SE2(SO2.from_angle(np.pi),\n", 41 | " -(SO2.from_angle(np.pi).dot(np.array([0.5, 0.5])))),\n", 42 | " 'T_6_0': SE2(SO2.from_angle(-np.pi / 2),\n", 43 | " -(SO2.from_angle(-np.pi / 2).dot(np.array([0.5, 0]))))}\n", 44 | "\n", 45 | "obs = {'T_1_0': params_true['T_1_0'],\n", 46 | " 'T_2_1': params_true['T_2_0'].dot(params_true['T_1_0'].inv()),\n", 47 | " 'T_3_2': params_true['T_3_0'].dot(params_true['T_2_0'].inv()),\n", 48 | " 'T_4_3': params_true['T_4_0'].dot(params_true['T_3_0'].inv()),\n", 49 | " 'T_5_4': params_true['T_5_0'].dot(params_true['T_4_0'].inv()),\n", 50 | " 'T_6_5': params_true['T_6_0'].dot(params_true['T_5_0'].inv()),\n", 51 | " 'T_6_2': params_true['T_6_0'].dot(params_true['T_2_0'].inv())}\n", 52 | "\n", 53 | "params_init = copy.deepcopy(params_true)\n", 54 | "for key in params_init.keys():\n", 55 | " params_init[key] = SE2.exp(5 * np.random.rand(3)).dot(params_init[key])" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "# Create residual functions" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 3, 68 | "metadata": { 69 | "collapsed": true 70 | }, 71 | "outputs": [], 72 | "source": [ 73 | "from pyslam.residuals import PoseResidual, PoseToPoseResidual\n", 74 | "from pyslam.utils import invsqrt\n", 75 | "\n", 76 | "prior_stiffness = invsqrt(1e-12 * np.identity(3))\n", 77 | "odom_stiffness = invsqrt(1e-3 * np.identity(3))\n", 78 | "loop_stiffness = invsqrt(1e-3 * np.identity(3))\n", 79 | "\n", 80 | "residual0 = PoseResidual(obs['T_1_0'], prior_stiffness)\n", 81 | "residual0_params = ['T_1_0']\n", 82 | "\n", 83 | "residual1 = PoseToPoseResidual(obs['T_2_1'], odom_stiffness)\n", 84 | "residual1_params = ['T_1_0', 'T_2_0']\n", 85 | "\n", 86 | "residual2 = PoseToPoseResidual(obs['T_3_2'], odom_stiffness)\n", 87 | "residual2_params = ['T_2_0', 'T_3_0']\n", 88 | "\n", 89 | "residual3 = PoseToPoseResidual(obs['T_4_3'], odom_stiffness)\n", 90 | "residual3_params = ['T_3_0', 'T_4_0']\n", 91 | "\n", 92 | "residual4 = PoseToPoseResidual(obs['T_5_4'], odom_stiffness)\n", 93 | "residual4_params = ['T_4_0', 'T_5_0']\n", 94 | "\n", 95 | "residual5 = PoseToPoseResidual(obs['T_6_5'], odom_stiffness)\n", 96 | "residual5_params = ['T_5_0', 'T_6_0']\n", 97 | "\n", 98 | "residual6 = PoseToPoseResidual(obs['T_6_2'], loop_stiffness)\n", 99 | "residual6_params = ['T_2_0', 'T_6_0']" 100 | ] 101 | }, 102 | { 103 | "cell_type": "markdown", 104 | "metadata": {}, 105 | "source": [ 106 | "# Set up and solve the problem" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 4, 112 | "metadata": {}, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | " Iter | Initial cost --> Final cost | Rel change\n", 119 | "---------------------------------------------------\n", 120 | " 0 | 9.121549e+12 --> 5.589831e+05 | -1.000000\n", 121 | " 1 | 5.589831e+05 --> 1.025884e+06 | +0.835269\n", 122 | " 2 | 1.025884e+06 --> 1.873103e+05 | -0.817416\n", 123 | " 3 | 1.873103e+05 --> 1.382122e-20 | -1.000000\n", 124 | "\n" 125 | ] 126 | } 127 | ], 128 | "source": [ 129 | "from pyslam.problem import Problem, Options\n", 130 | "\n", 131 | "options = Options()\n", 132 | "options.allow_nondecreasing_steps = True\n", 133 | "options.max_nondecreasing_steps = 3\n", 134 | "\n", 135 | "problem = Problem(options)\n", 136 | "\n", 137 | "problem.add_residual_block(residual0, residual0_params)\n", 138 | "problem.add_residual_block(residual1, residual1_params)\n", 139 | "problem.add_residual_block(residual2, residual2_params)\n", 140 | "problem.add_residual_block(residual3, residual3_params)\n", 141 | "problem.add_residual_block(residual4, residual4_params)\n", 142 | "problem.add_residual_block(residual5, residual5_params)\n", 143 | "# problem.add_residual_block(residual6, residual6_params)\n", 144 | "\n", 145 | "problem.initialize_params(params_init)\n", 146 | "\n", 147 | "params_final = problem.solve()\n", 148 | "print(problem.summary(format='full'))" 149 | ] 150 | }, 151 | { 152 | "cell_type": "markdown", 153 | "metadata": {}, 154 | "source": [ 155 | "# Check results" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 5, 161 | "metadata": {}, 162 | "outputs": [ 163 | { 164 | "name": "stdout", 165 | "output_type": "stream", 166 | "text": [ 167 | "Initial Error:\n", 168 | "T_1_0: [-3.57281388 -1.32609006 -1.92862229]\n", 169 | "T_2_0: [-0.20933454 -1.49852304 -0.27623214]\n", 170 | "T_3_0: [ 1.60030389 -1.67503658 2.31333315]\n", 171 | "T_4_0: [ 1.9892008 -3.77873024 1.98757343]\n", 172 | "T_5_0: [ 3.38330382 3.57822363 -0.64490654]\n", 173 | "T_6_0: [-0.9920332 0.54628132 2.27827381]\n", 174 | "\n", 175 | "Final Error:\n", 176 | "T_1_0: [ 0. -0. -0.]\n", 177 | "T_2_0: [ 0. 0. -0.]\n", 178 | "T_3_0: [ 0. -0. -0.]\n", 179 | "T_4_0: [-0. -0. -0.]\n", 180 | "T_5_0: [ 0. -0. -0.]\n", 181 | "T_6_0: [ 0. 0. -0.]\n" 182 | ] 183 | } 184 | ], 185 | "source": [ 186 | "print(\"Initial Error:\")\n", 187 | "for key in params_true.keys():\n", 188 | " print('{}: {}'.format(key, SE2.log(params_init[key].inv().dot(params_true[key]))))\n", 189 | "\n", 190 | "print()\n", 191 | "\n", 192 | "print(\"Final Error:\")\n", 193 | "for key in params_true.keys():\n", 194 | " print('{}: {}'.format(key, SE2.log(params_final[key].inv().dot(params_true[key]))))" 195 | ] 196 | }, 197 | { 198 | "cell_type": "markdown", 199 | "metadata": {}, 200 | "source": [ 201 | "# Optional: Compute the covariance of the final parameter estimates" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": 6, 207 | "metadata": {}, 208 | "outputs": [ 209 | { 210 | "name": "stdout", 211 | "output_type": "stream", 212 | "text": [ 213 | "covariance of T_5_0:\n", 214 | "[[ 0.01275 -0.00025 0.0045 ]\n", 215 | " [-0.00025 0.00425 -0.0005 ]\n", 216 | " [ 0.0045 -0.0005 0.004 ]]\n" 217 | ] 218 | } 219 | ], 220 | "source": [ 221 | "problem.compute_covariance()\n", 222 | "print('covariance of T_5_0:\\n{}'.format( problem.get_covariance_block('T_5_0','T_5_0') ))" 223 | ] 224 | } 225 | ], 226 | "metadata": { 227 | "kernelspec": { 228 | "display_name": "Python 3", 229 | "language": "python", 230 | "name": "python3" 231 | }, 232 | "language_info": { 233 | "codemirror_mode": { 234 | "name": "ipython", 235 | "version": 3 236 | }, 237 | "file_extension": ".py", 238 | "mimetype": "text/x-python", 239 | "name": "python", 240 | "nbconvert_exporter": "python", 241 | "pygments_lexer": "ipython3", 242 | "version": "3.6.2" 243 | } 244 | }, 245 | "nbformat": 4, 246 | "nbformat_minor": 2 247 | } 248 | -------------------------------------------------------------------------------- /examples/dense_stereo_vo_kitti.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import time 4 | from pyslam.visualizers import TrajectoryVisualizer 5 | from pyslam.metrics import TrajectoryMetrics 6 | from pyslam.sensors import StereoCamera 7 | from pyslam.pipelines import DenseStereoPipeline 8 | from liegroups import SE3 9 | import pykitti 10 | import numpy as np 11 | import matplotlib 12 | matplotlib.use('Agg') 13 | 14 | 15 | def run_vo_kitti(basedir, date, drive, frames, outfile=None): 16 | # Load KITTI data 17 | dataset = pykitti.raw(basedir, date, drive, frames=frames, imformat='cv2') 18 | 19 | first_oxts = dataset.oxts[0] 20 | T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu) 21 | T_cam0_imu.normalize() 22 | T_0_w = T_cam0_imu.dot( 23 | SE3.from_matrix(first_oxts.T_w_imu).inv()) 24 | T_0_w.normalize() 25 | 26 | # Create the camera 27 | test_im = np.array(next(dataset.cam0)) 28 | fu = dataset.calib.K_cam0[0, 0] 29 | fv = dataset.calib.K_cam0[1, 1] 30 | cu = dataset.calib.K_cam0[0, 2] 31 | cv = dataset.calib.K_cam0[1, 2] 32 | b = dataset.calib.b_gray 33 | h, w = test_im.shape 34 | camera = StereoCamera(cu, cv, fu, fv, b, w, h) 35 | 36 | # Ground truth 37 | T_w_c_gt = [SE3.from_matrix(o.T_w_imu).dot(T_cam0_imu.inv()) 38 | for o in dataset.oxts] 39 | 40 | # Pipeline 41 | vo = DenseStereoPipeline(camera, first_pose=T_0_w) 42 | # Skip the highest resolution level 43 | vo.pyrlevel_sequence.pop() 44 | vo.pyr_cameras.pop() 45 | 46 | start = time.perf_counter() 47 | for c_idx, impair in enumerate(dataset.gray): 48 | vo.track(np.array(impair[0]), np.array(impair[1])) 49 | # vo.track(impair[0], impair[1], guess=T_w_c_gt[c_idx].inv()) 50 | end = time.perf_counter() 51 | print('Image {}/{} | {:.3f} s'.format(c_idx, len(dataset), end - start)) 52 | start = end 53 | 54 | # Compute errors 55 | T_w_c_est = [T.inv() for T in vo.T_c_w] 56 | tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est) 57 | 58 | # Save to file 59 | if outfile is not None: 60 | print('Saving to {}'.format(outfile)) 61 | tm.savemat(outfile) 62 | 63 | # Clean up 64 | del vo 65 | 66 | return tm 67 | 68 | 69 | def main(): 70 | # Odometry sequences 71 | # Nr. Sequence name Start End 72 | # --------------------------------------- 73 | # 00: 2011_10_03_drive_0027 000000 004540 74 | # 01: 2011_10_03_drive_0042 000000 001100 75 | # 02: 2011_10_03_drive_0034 000000 004660 76 | # 03: 2011_09_26_drive_0067 000000 000800 77 | # 04: 2011_09_30_drive_0016 000000 000270 78 | # 05: 2011_09_30_drive_0018 000000 002760 79 | # 06: 2011_09_30_drive_0020 000000 001100 80 | # 07: 2011_09_30_drive_0027 000000 001100 81 | # 08: 2011_09_30_drive_0028 001100 005170 82 | # 09: 2011_09_30_drive_0033 000000 001590 83 | # 10: 2011_09_30_drive_0034 000000 001200 84 | 85 | basedir = '/path/to/KITTI/raw/' 86 | outdir = '/path_to/output/' 87 | os.makedirs(outdir, exist_ok=True) 88 | 89 | seqs = { 90 | '00': {'date': '2011_10_03', 91 | 'drive': '0027', 92 | 'frames': range(0, 4541)}, 93 | '01': {'date': '2011_10_03', 94 | 'drive': '0042', 95 | 'frames': range(0, 1101)}, 96 | '02': {'date': '2011_10_03', 97 | 'drive': '0034', 98 | 'frames': range(0, 4661)}, 99 | '04': {'date': '2011_09_30', 100 | 'drive': '0016', 101 | 'frames': range(0, 271)}, 102 | '05': {'date': '2011_09_30', 103 | 'drive': '0018', 104 | 'frames': range(0, 2761)}, 105 | '06': {'date': '2011_09_30', 106 | 'drive': '0020', 107 | 'frames': range(0, 1101)}, 108 | '07': {'date': '2011_09_30', 109 | 'drive': '0027', 110 | 'frames': range(0, 1101)}, 111 | '08': {'date': '2011_09_30', 112 | 'drive': '0028', 113 | 'frames': range(1100, 5171)}, 114 | '09': {'date': '2011_09_30', 115 | 'drive': '0033', 116 | 'frames': range(0, 1591)}, 117 | '10': {'date': '2011_09_30', 118 | 'drive': '0034', 119 | 'frames': range(0, 1201)} 120 | } 121 | 122 | for key, val in seqs.items(): 123 | date = val['date'] 124 | drive = val['drive'] 125 | frames = val['frames'] 126 | 127 | print('Odometry sequence {} | {} {}'.format(key, date, drive)) 128 | outfile = os.path.join(outdir, key + '.mat') 129 | tm = run_vo_kitti(basedir, date, drive, frames, outfile) 130 | 131 | # Compute errors 132 | trans_mean_err, rot_mean_err = tm.mean_err() 133 | print('trans mean err: {} meters'.format(trans_mean_err)) 134 | print('rot mean err: {} deg'.format(rot_mean_err * 180. / np.pi)) 135 | 136 | # Make plots 137 | visualizer = TrajectoryVisualizer({'VO': tm}) 138 | 139 | outfile = os.path.join(outdir, key + '_err.pdf') 140 | segs = list(range(100, 801, 100)) 141 | visualizer.plot_segment_errors(segs, outfile=outfile) 142 | 143 | outfile = os.path.join(outdir, key + '_traj.pdf') 144 | visualizer.plot_topdown(which_plane='xy', outfile=outfile) 145 | 146 | # Clean up 147 | del visualizer, tm 148 | 149 | 150 | # Do the thing 151 | main() 152 | -------------------------------------------------------------------------------- /examples/posegraph_relax.py: -------------------------------------------------------------------------------- 1 | # import copy 2 | 3 | import numpy as np 4 | 5 | from liegroups import SE3, SO3 6 | 7 | from pyslam.residuals import PoseResidual, PoseToPoseResidual 8 | from pyslam.problem import Options, Problem 9 | from pyslam.utils import invsqrt 10 | 11 | T_1_0_true = SE3.identity() 12 | T_2_0_true = SE3(SO3.identity(), -np.array([0.5, 0, 0])) 13 | T_3_0_true = SE3(SO3.identity(), -np.array([1, 0, 0])) 14 | T_4_0_true = SE3(SO3.rotz(np.pi / 2), 15 | -(SO3.rotz(np.pi / 2).dot(np.array([1, 0.5, 0])))) 16 | T_5_0_true = SE3(SO3.rotz(np.pi), - 17 | (SO3.rotz(np.pi).dot(np.array([0.5, 0.5, 0])))) 18 | T_6_0_true = SE3(SO3.rotz(-np.pi / 2), 19 | -(SO3.rotz(-np.pi / 2).dot(np.array([0.5, 0, 0])))) 20 | # T_6_0_true = copy.deepcopy(T_2_0_true) 21 | 22 | # Odometry 23 | T_1_0_obs = SE3.identity() 24 | T_2_1_obs = T_2_0_true.dot(T_1_0_true.inv()) 25 | T_3_2_obs = T_3_0_true.dot(T_2_0_true.inv()) 26 | T_4_3_obs = T_4_0_true.dot(T_3_0_true.inv()) 27 | T_5_4_obs = T_5_0_true.dot(T_4_0_true.inv()) 28 | T_6_5_obs = T_6_0_true.dot(T_5_0_true.inv()) 29 | 30 | # Loop closure 31 | T_6_2_obs = T_6_0_true.dot(T_2_0_true.inv()) 32 | 33 | # Random start 34 | # T_1_0_init = SE3.exp(0.1 * 2 * (np.random.rand(6) - 0.5)) * T_1_0_true 35 | # T_2_0_init = SE3.exp(0.1 * 2 * (np.random.rand(6) - 0.5)) * T_2_0_true 36 | # T_3_0_init = SE3.exp(0.1 * 2 * (np.random.rand(6) - 0.5)) * T_3_0_true 37 | # T_4_0_init = SE3.exp(0.1 * 2 * (np.random.rand(6) - 0.5)) * T_4_0_true 38 | # T_5_0_init = SE3.exp(0.1 * 2 * (np.random.rand(6) - 0.5)) * T_5_0_true 39 | # T_6_0_init = SE3.exp(0.1 * 2 * (np.random.rand(6) - 0.5)) * T_6_0_true 40 | 41 | # Constant wrong start 42 | offset1 = np.array([-0.1, 0.1, -0.1, 0.1, -0.1, 0.1]) 43 | offset2 = np.array([0.1, -0.1, 0.1, -0.1, 0.1, -0.1]) 44 | T_1_0_init = SE3.exp(offset2).dot(T_1_0_true) 45 | T_2_0_init = SE3.exp(offset1).dot(T_2_0_true) 46 | T_3_0_init = SE3.exp(offset2).dot(T_3_0_true) 47 | T_4_0_init = SE3.exp(offset1).dot(T_4_0_true) 48 | T_5_0_init = SE3.exp(offset2).dot(T_5_0_true) 49 | T_6_0_init = SE3.exp(offset1).dot(T_6_0_true) 50 | 51 | 52 | # Either we need a prior on the first pose, or it needs to be held constant 53 | # so that the resulting system of linear equations is solveable 54 | prior_stiffness = invsqrt(1e-12 * np.identity(6)) 55 | odom_stiffness = invsqrt(1e-3 * np.identity(6)) 56 | loop_stiffness = invsqrt(1. * np.identity(6)) 57 | 58 | residual0 = PoseResidual(T_1_0_obs, prior_stiffness) 59 | residual0_params = ['T_1_0'] 60 | 61 | residual1 = PoseToPoseResidual(T_2_1_obs, odom_stiffness) 62 | residual1_params = ['T_1_0', 'T_2_0'] 63 | 64 | residual2 = PoseToPoseResidual(T_3_2_obs, odom_stiffness) 65 | residual2_params = ['T_2_0', 'T_3_0'] 66 | 67 | residual3 = PoseToPoseResidual(T_4_3_obs, odom_stiffness) 68 | residual3_params = ['T_3_0', 'T_4_0'] 69 | 70 | residual4 = PoseToPoseResidual(T_5_4_obs, odom_stiffness) 71 | residual4_params = ['T_4_0', 'T_5_0'] 72 | 73 | residual5 = PoseToPoseResidual(T_6_5_obs, odom_stiffness) 74 | residual5_params = ['T_5_0', 'T_6_0'] 75 | 76 | residual6 = PoseToPoseResidual(T_6_2_obs, loop_stiffness) 77 | residual6_params = ['T_2_0', 'T_6_0'] 78 | 79 | options = Options() 80 | options.allow_nondecreasing_steps = True 81 | options.max_nondecreasing_steps = 3 82 | 83 | problem = Problem(options) 84 | problem.add_residual_block(residual0, residual0_params) 85 | problem.add_residual_block(residual1, residual1_params) 86 | problem.add_residual_block(residual2, residual2_params) 87 | problem.add_residual_block(residual3, residual3_params) 88 | problem.add_residual_block(residual4, residual4_params) 89 | problem.add_residual_block(residual5, residual5_params) 90 | problem.add_residual_block(residual6, residual6_params) 91 | 92 | # problem.set_parameters_constant(residual0_params) 93 | # problem.set_parameters_constant(residual1_params) 94 | 95 | params_init = {'T_1_0': T_1_0_init, 'T_2_0': T_2_0_init, 96 | 'T_3_0': T_3_0_init, 'T_4_0': T_4_0_init, 97 | 'T_5_0': T_5_0_init, 'T_6_0': T_6_0_init} 98 | params_true = {'T_1_0': T_1_0_true, 'T_2_0': T_2_0_true, 99 | 'T_3_0': T_3_0_true, 'T_4_0': T_4_0_true, 100 | 'T_5_0': T_5_0_true, 'T_6_0': T_6_0_true} 101 | 102 | problem.initialize_params(params_init) 103 | 104 | params_final = problem.solve() 105 | print(problem.summary(format='full')) 106 | 107 | print("Initial Error:") 108 | for key in params_true.keys(): 109 | print('{}: {}'.format(key, SE3.log( 110 | params_init[key].inv().dot(params_true[key])))) 111 | 112 | print() 113 | 114 | print("Final Error:") 115 | for key in params_true.keys(): 116 | print('{}: {}'.format(key, SE3.log( 117 | params_final[key].inv().dot(params_true[key])))) 118 | -------------------------------------------------------------------------------- /examples/posegraph_relax_2d.py: -------------------------------------------------------------------------------- 1 | # import copy 2 | 3 | import numpy as np 4 | 5 | from liegroups import SE2, SO2 6 | 7 | from pyslam.residuals import PoseResidual, PoseToPoseResidual 8 | from pyslam.problem import Options, Problem 9 | from pyslam.utils import invsqrt 10 | 11 | T_1_0_true = SE2.identity() 12 | T_2_0_true = SE2(SO2.identity(), -np.array([0.5, 0])) 13 | T_3_0_true = SE2(SO2.identity(), -np.array([1, 0])) 14 | T_4_0_true = SE2(SO2.from_angle(np.pi / 2), 15 | -(SO2.from_angle(np.pi / 2).dot(np.array([1, 0.5])))) 16 | T_5_0_true = SE2(SO2.from_angle(np.pi), - 17 | (SO2.from_angle(np.pi).dot(np.array([0.5, 0.5])))) 18 | T_6_0_true = SE2(SO2.from_angle(-np.pi / 2), 19 | -(SO2.from_angle(-np.pi / 2).dot(np.array([0.5, 0])))) 20 | # T_6_0_true = copy.deepcopy(T_2_0_true) 21 | 22 | # Odometry 23 | T_1_0_obs = SE2.identity() 24 | T_2_1_obs = T_2_0_true.dot(T_1_0_true.inv()) 25 | T_3_2_obs = T_3_0_true.dot(T_2_0_true.inv()) 26 | T_4_3_obs = T_4_0_true.dot(T_3_0_true.inv()) 27 | T_5_4_obs = T_5_0_true.dot(T_4_0_true.inv()) 28 | T_6_5_obs = T_6_0_true.dot(T_5_0_true.inv()) 29 | 30 | # Loop closure 31 | T_6_2_obs = T_6_0_true.dot(T_2_0_true.inv()) 32 | 33 | # Random start 34 | # T_1_0_init = SE2.exp(0.1 * 2 * (np.random.rand(3) - 0.5)) * T_1_0_true 35 | # T_2_0_init = SE2.exp(0.1 * 2 * (np.random.rand(3) - 0.5)) * T_2_0_true 36 | # T_3_0_init = SE2.exp(0.1 * 2 * (np.random.rand(3) - 0.5)) * T_3_0_true 37 | # T_4_0_init = SE2.exp(0.1 * 2 * (np.random.rand(3) - 0.5)) * T_4_0_true 38 | # T_5_0_init = SE2.exp(0.1 * 2 * (np.random.rand(3) - 0.5)) * T_5_0_true 39 | # T_6_0_init = SE2.exp(0.1 * 2 * (np.random.rand(3) - 0.5)) * T_6_0_true 40 | 41 | # Constant wrong start 42 | offset1 = np.array([-0.1, 0.1, -0.1]) 43 | offset2 = np.array([0.1, -0.1, 0.1]) 44 | T_1_0_init = T_1_0_true 45 | T_2_0_init = SE2.exp(offset1).dot(T_2_0_true) 46 | T_3_0_init = SE2.exp(offset2).dot(T_3_0_true) 47 | T_4_0_init = SE2.exp(offset1).dot(T_4_0_true) 48 | T_5_0_init = SE2.exp(offset2).dot(T_5_0_true) 49 | T_6_0_init = SE2.exp(offset1).dot(T_6_0_true) 50 | 51 | 52 | # Either we need a prior on the first pose, or it needs to be held constant 53 | # so that the resulting system of linear equations is solveable 54 | prior_stiffness = invsqrt(1e-12 * np.identity(3)) 55 | odom_stiffness = invsqrt(1e-3 * np.identity(3)) 56 | loop_stiffness = invsqrt(1. * np.identity(3)) 57 | 58 | residual0 = PoseResidual(T_1_0_obs, prior_stiffness) 59 | residual0_params = ['T_1_0'] 60 | 61 | residual1 = PoseToPoseResidual(T_2_1_obs, odom_stiffness) 62 | residual1_params = ['T_1_0', 'T_2_0'] 63 | 64 | residual2 = PoseToPoseResidual(T_3_2_obs, odom_stiffness) 65 | residual2_params = ['T_2_0', 'T_3_0'] 66 | 67 | residual3 = PoseToPoseResidual(T_4_3_obs, odom_stiffness) 68 | residual3_params = ['T_3_0', 'T_4_0'] 69 | 70 | residual4 = PoseToPoseResidual(T_5_4_obs, odom_stiffness) 71 | residual4_params = ['T_4_0', 'T_5_0'] 72 | 73 | residual5 = PoseToPoseResidual(T_6_5_obs, odom_stiffness) 74 | residual5_params = ['T_5_0', 'T_6_0'] 75 | 76 | residual6 = PoseToPoseResidual(T_6_2_obs, loop_stiffness) 77 | residual6_params = ['T_2_0', 'T_6_0'] 78 | 79 | options = Options() 80 | options.allow_nondecreasing_steps = True 81 | options.max_nondecreasing_steps = 3 82 | 83 | problem = Problem(options) 84 | problem.add_residual_block(residual0, residual0_params) 85 | problem.add_residual_block(residual1, residual1_params) 86 | problem.add_residual_block(residual2, residual2_params) 87 | problem.add_residual_block(residual3, residual3_params) 88 | problem.add_residual_block(residual4, residual4_params) 89 | problem.add_residual_block(residual5, residual5_params) 90 | problem.add_residual_block(residual6, residual6_params) 91 | 92 | # problem.set_parameters_constant('T_1_0') 93 | # problem.set_parameters_constant('T_3_0') 94 | 95 | params_init = {'T_1_0': T_1_0_init, 'T_2_0': T_2_0_init, 96 | 'T_3_0': T_3_0_init, 'T_4_0': T_4_0_init, 97 | 'T_5_0': T_5_0_init, 'T_6_0': T_6_0_init} 98 | params_true = {'T_1_0': T_1_0_true, 'T_2_0': T_2_0_true, 99 | 'T_3_0': T_3_0_true, 'T_4_0': T_4_0_true, 100 | 'T_5_0': T_5_0_true, 'T_6_0': T_6_0_true} 101 | 102 | problem.initialize_params(params_init) 103 | 104 | params_final = problem.solve() 105 | print(problem.summary(format='full')) 106 | 107 | print("Initial Error:") 108 | for key in params_true.keys(): 109 | print('{}: {}'.format(key, SE2.log( 110 | params_init[key].inv().dot(params_true[key])))) 111 | 112 | print() 113 | 114 | print("Final Error:") 115 | for key in params_true.keys(): 116 | print('{}: {}'.format(key, SE2.log( 117 | params_final[key].inv().dot(params_true[key])))) 118 | -------------------------------------------------------------------------------- /examples/stereo_ba.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from liegroups import SE3 4 | 5 | from pyslam.sensors import StereoCamera 6 | from pyslam.residuals import ReprojectionResidual 7 | from pyslam.problem import Options, Problem 8 | from pyslam.utils import invsqrt 9 | 10 | # Reproducibility 11 | np.random.seed(42) 12 | 13 | # Landmarks (world frame is first camera frame) 14 | pts_w_GT = [ 15 | np.array([0., -1., 10.]), 16 | np.array([1., 1., 5.]), 17 | np.array([-1., 1., 15.]) 18 | ] 19 | 20 | # Trajectory 21 | T_cam_w_GT = [ 22 | SE3.identity(), 23 | SE3.exp(0.1 * np.ones(6)), 24 | SE3.exp(0.2 * np.ones(6)), 25 | SE3.exp(0.3 * np.ones(6)) 26 | ] 27 | 28 | # Camera 29 | camera = StereoCamera(640, 480, 1000, 1000, 0.25, 1280, 960) 30 | 31 | # Observations 32 | obs_var = [1, 1, 2] # [u,v,d] 33 | obs_stiffness = invsqrt(np.diagflat(obs_var)) 34 | 35 | obs = [[camera.project(T.dot(p)) 36 | for p in pts_w_GT] for T in T_cam_w_GT] 37 | 38 | # Optimize 39 | options = Options() 40 | options.allow_nondecreasing_steps = True 41 | options.max_nondecreasing_steps = 3 42 | 43 | problem = Problem(options) 44 | for i, this_pose_obs in enumerate(obs): 45 | for j, o in enumerate(this_pose_obs): 46 | residual = ReprojectionResidual(camera, o, obs_stiffness) 47 | problem.add_residual_block( 48 | residual, ['T_cam{}_w'.format(i), 'pt{}_w'.format(j)]) 49 | 50 | params_true = {} 51 | params_init = {} 52 | 53 | for i, pt in enumerate(pts_w_GT): 54 | pid = 'pt{}_w'.format(i) 55 | params_true.update({pid: pt}) 56 | params_init.update({pid: camera.triangulate( 57 | obs[0][i] + 10. * np.random.rand(3))}) 58 | 59 | for i, pose in enumerate(T_cam_w_GT): 60 | pid = 'T_cam{}_w'.format(i) 61 | params_true.update({pid: pose}) 62 | params_init.update({pid: SE3.identity()}) 63 | 64 | problem.initialize_params(params_init) 65 | problem.set_parameters_constant('T_cam0_w') 66 | 67 | # import ipdb 68 | # ipdb.set_trace() 69 | params_final = problem.solve() 70 | print(problem.summary(format='full')) 71 | 72 | # Compute errors 73 | print("Initial Error:") 74 | for key in params_true.keys(): 75 | p_est = params_init[key] 76 | p_true = params_true[key] 77 | 78 | if isinstance(p_est, SE3): 79 | err = SE3.log(p_est.inv().dot(p_true)) 80 | else: 81 | err = p_est - p_true 82 | 83 | print('{}: {}'.format(key, err)) 84 | 85 | print() 86 | 87 | print("Final Error:") 88 | for key in params_true.keys(): 89 | p_est = params_final[key] 90 | p_true = params_true[key] 91 | 92 | if isinstance(p_est, SE3): 93 | err = SE3.log(p_est.inv().dot(p_true)) 94 | else: 95 | err = p_est - p_true 96 | 97 | print('{}: {}'.format(key, err)) 98 | -------------------------------------------------------------------------------- /examples/stereo_ba_frame_to_frame.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from liegroups import SE3 4 | 5 | from pyslam.sensors import StereoCamera 6 | from pyslam.residuals import ReprojectionResidualFrameToFrame 7 | from pyslam.problem import Options, Problem 8 | from pyslam.utils import invsqrt 9 | from collections import OrderedDict 10 | 11 | # This example performs frame-to-frame bundle adjustment, optimizing only 12 | # for the relative poses (and not the 3D landmarks) 13 | 14 | # Reproducibility 15 | np.random.seed(42) 16 | 17 | # Landmarks (world frame is first camera frame) 18 | pts_w_GT = [ 19 | np.array([0., -1., 10.]), 20 | np.array([1., 1., 5.]), 21 | np.array([-1., 1., 15.]) 22 | ] 23 | 24 | # Trajectory 25 | T_cam_w_GT = [ 26 | SE3.identity(), 27 | SE3.exp(0.1 * np.ones(6)), 28 | SE3.exp(0.2 * np.ones(6)), 29 | SE3.exp(0.3 * np.ones(6)) 30 | ] 31 | 32 | # Camera 33 | camera = StereoCamera(640, 480, 1000, 1000, 0.25, 1280, 960) 34 | 35 | # Observations 36 | obs_var = [1, 1, 2] # [u,v,d] 37 | obs_stiffness = invsqrt(np.diagflat(obs_var)) 38 | 39 | # Collect all observations of the landmarks 40 | obs = [[camera.project(T * p) 41 | for p in pts_w_GT] for T in T_cam_w_GT] 42 | 43 | # Options 44 | options = Options() 45 | options.allow_nondecreasing_steps = True 46 | options.max_nondecreasing_steps = 3 47 | 48 | problem = Problem(options) 49 | 50 | # Collect observations in pairs of poses (i.e. 0-1, 1-2, 2-3) and add 51 | # residuals to problem 52 | for i in range(len(obs) - 1): 53 | pose_1_obs = obs[i] 54 | pose_2_obs = obs[i + 1] 55 | for j, o_1 in enumerate(pose_1_obs): 56 | o_2 = pose_2_obs[j] 57 | residual = ReprojectionResidualFrameToFrame( 58 | camera, o_1, o_2, obs_stiffness) 59 | problem.add_residual_block( 60 | residual, ['T_cam{}_cam{}'.format(i + 1, i)]) 61 | 62 | params_true = OrderedDict({}) 63 | params_init = OrderedDict({}) 64 | 65 | # Initialize the relative SE(3) transforms 66 | for i in range(len(obs) - 1): 67 | T_c1_w = T_cam_w_GT[i] 68 | T_c2_w = T_cam_w_GT[i + 1] 69 | pid = 'T_cam{}_cam{}'.format(i + 1, i) 70 | params_true.update({pid: T_c2_w * T_c1_w.inv()}) 71 | params_init.update({pid: SE3.identity()}) 72 | 73 | problem.initialize_params(params_init) 74 | params_final = problem.solve() 75 | 76 | print() 77 | 78 | # Compute errors 79 | print("Initial Error:") 80 | for key in params_true.keys(): 81 | p_est = params_init[key] 82 | p_true = params_true[key] 83 | 84 | err = SE3.log(p_est.inv() * p_true) 85 | print('{}: {}'.format(key, err)) 86 | 87 | print() 88 | 89 | print("Final Error:") 90 | for key in params_true.keys(): 91 | p_est = params_final[key] 92 | p_true = params_true[key] 93 | 94 | err = SE3.log(p_est.inv() * p_true) 95 | print('{}: {}'.format(key, err)) 96 | -------------------------------------------------------------------------------- /examples/stereo_image_align.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | import cv2 5 | import pykitti 6 | 7 | from liegroups import SE3 8 | from pyslam.problem import Options, Problem 9 | from pyslam.sensors import StereoCamera 10 | from pyslam.residuals import PhotometricResidualSE3 11 | from pyslam.losses import HuberLoss 12 | import time 13 | 14 | # Load KITTI data 15 | basedir = '/path/to/KITTI/raw/' 16 | date = '2011_09_30' 17 | drive = '0016' 18 | frame_range = range(0,2) 19 | 20 | dataset = pykitti.raw(basedir, date, drive, frames=frame_range) 21 | 22 | # Parameters to estimate 23 | T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu) 24 | T_cam0_imu.normalize() 25 | T_0_w = T_cam0_imu.dot(SE3.from_matrix(dataset.oxts[0].T_w_imu).inv()) 26 | T_0_w.normalize() 27 | T_1_w = T_cam0_imu.dot(SE3.from_matrix(dataset.oxts[1].T_w_imu).inv()) 28 | T_1_w.normalize() 29 | T_1_0_true = T_1_w.dot(T_0_w.inv()) 30 | 31 | # params_init = {'T_1_0': T_1_0_true} 32 | params_init = {'T_1_0': SE3.identity()} 33 | 34 | # Scaling parameters 35 | pyrlevels = [3, 2, 1] 36 | 37 | params = params_init 38 | 39 | options = Options() 40 | options.allow_nondecreasing_steps = True 41 | options.max_nondecreasing_steps = 3 42 | options.min_cost_decrease = 0.99 43 | # options.max_iters = 100 44 | # options.print_iter_summary = True 45 | 46 | for pyrlevel in pyrlevels: 47 | pyrfactor = 1. / 2**pyrlevel 48 | 49 | # Disparity computation parameters 50 | window_size = 5 51 | min_disp = 1 52 | max_disp = np.max([16, np.int(64 * pyrfactor)]) + min_disp 53 | 54 | # Use semi-global block matching 55 | stereo = cv2.StereoSGBM_create( 56 | minDisparity=min_disp, 57 | numDisparities=max_disp - min_disp, 58 | blockSize=window_size) 59 | 60 | # Use regular block matching 61 | # stereo = cv2.StereoBM_create( 62 | # numDisparities=max_disp - min_disp, blockSize=window_size) 63 | 64 | impairs = [] 65 | for imL, imR in dataset.gray: 66 | imL = np.array(imL) 67 | imR = np.array(imR) 68 | for _ in range(pyrlevel): 69 | imL = cv2.pyrDown(imL) 70 | imR = cv2.pyrDown(imR) 71 | impairs.append([imL, imR]) 72 | 73 | disp = [] 74 | for ims in impairs: 75 | d = stereo.compute(ims[0], ims[1]) 76 | disp.append(d.astype(float) / 16.) 77 | 78 | for i, d in enumerate(disp): 79 | disp[i][d < min_disp + 1] = np.nan 80 | disp[i][d > max_disp] = np.nan 81 | 82 | # Compute image jacobians 83 | im_jac = [] 84 | for ims in impairs: 85 | gradx = cv2.Sobel(ims[0], -1, 1, 0) 86 | grady = cv2.Sobel(ims[0], -1, 0, 1) 87 | # gradx = cv2.Scharr(ims[0], -1, 1, 0) 88 | # grady = cv2.Scharr(ims[0], -1, 0, 1) 89 | im_jac.append(np.array([gradx.astype(float) / 255., 90 | grady.astype(float) / 255.])) 91 | 92 | # Create the camera 93 | fu = dataset.calib.K_cam0[0, 0] * pyrfactor 94 | fv = dataset.calib.K_cam0[1, 1] * pyrfactor 95 | cu = dataset.calib.K_cam0[0, 2] * pyrfactor 96 | cv = dataset.calib.K_cam0[1, 2] * pyrfactor 97 | b = dataset.calib.b_gray 98 | w = impairs[0][0].shape[1] 99 | h = impairs[0][0].shape[0] 100 | camera = StereoCamera(cu, cv, fu, fv, b, w, h) 101 | camera.compute_pixel_grid() 102 | # Create the cost function 103 | im_ref = impairs[0][0].astype(float) / 255. #left image of first frame 104 | disp_ref = disp[0] 105 | im_track = impairs[1][0].astype(float) / 255. #left image of second frame 106 | jac_ref = im_jac[0] 107 | residual = PhotometricResidualSE3(camera, im_ref, disp_ref, im_track, jac_ref, 1., 1.) 108 | 109 | # Timing debug 110 | # niters = 100 111 | # start = time.perf_counter() 112 | # for _ in range(niters): 113 | # cost.evaluate([params_init['T_1_0']]) 114 | # end = time.perf_counter() 115 | # print('cost.evaluate avg {} s', (end - start) / niters) 116 | 117 | # start = time.perf_counter() 118 | # for _ in range(niters): 119 | # cost.evaluate([params_init['T_1_0']], [True]) 120 | # end = time.perf_counter() 121 | # print('cost.evaluate w jac avg {} s', (end - start) / niters) 122 | 123 | # Optimize 124 | # start = time.perf_counter() 125 | 126 | problem = Problem(options) 127 | problem.add_residual_block(residual, ['T_1_0']) 128 | problem.initialize_params(params) 129 | params = problem.solve() 130 | 131 | # end = time.perf_counter() 132 | # print('Elapsed time: {} s'.format(end - start)) 133 | 134 | print('Error in T_1_w: {}'.format( 135 | SE3.log(T_1_w.dot((params['T_1_0'].dot(T_0_w)).inv())))) 136 | -------------------------------------------------------------------------------- /pyslam/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utiasSTARS/pyslam/5c0de9eaed4ebc7687b6d43079481c9a9336145b/pyslam/__init__.py -------------------------------------------------------------------------------- /pyslam/losses.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numba import vectorize, float32, float64 3 | 4 | # Numba-vectorized is faster than boolean indexing 5 | NUMBA_COMPILATION_TARGET = 'parallel' 6 | 7 | 8 | class L2Loss: 9 | 10 | def loss(self, x): 11 | return 0.5 * x * x 12 | 13 | def influence(self, x): 14 | return x 15 | 16 | def weight(self, x): 17 | return np.ones(x.size) 18 | 19 | 20 | class L1Loss: 21 | 22 | def loss(self, x): 23 | return np.abs(x) 24 | 25 | def influence(self, x): 26 | infl = np.sign(x) 27 | infl[np.isclose(np.abs(x), 0.)] = np.nan 28 | return infl 29 | 30 | def weight(self, x): 31 | wght = 1. / np.abs(x) 32 | wght[np.isclose(np.abs(x), 0.)] = np.nan 33 | return wght 34 | 35 | 36 | class CauchyLoss: 37 | 38 | def __init__(self, k): 39 | self.k = k 40 | 41 | def loss(self, x): 42 | return _cauchy_loss(self.k, x) 43 | 44 | def influence(self, x): 45 | return _cauchy_infl(self.k, x) 46 | 47 | def weight(self, x): 48 | return _cauchy_wght(self.k, x) 49 | 50 | 51 | @vectorize([float32(float32, float32), 52 | float64(float64, float64)], 53 | nopython=True, cache=True, 54 | target=NUMBA_COMPILATION_TARGET) 55 | def _cauchy_loss(k, x): 56 | return (0.5 * k**2) * np.log(1. + (x / k)**2) 57 | 58 | 59 | @vectorize([float32(float32, float32), 60 | float64(float64, float64)], 61 | nopython=True, cache=True, 62 | target=NUMBA_COMPILATION_TARGET) 63 | def _cauchy_infl(k, x): 64 | return x / (1. + (x / k)**2) 65 | 66 | 67 | @vectorize([float32(float32, float32), 68 | float64(float64, float64)], 69 | nopython=True, cache=True, 70 | target=NUMBA_COMPILATION_TARGET) 71 | def _cauchy_wght(k, x): 72 | return 1. / (1. + (x / k)**2) 73 | 74 | 75 | class HuberLoss: 76 | 77 | def __init__(self, k): 78 | self.k = k 79 | 80 | def loss(self, x): 81 | return _huber_loss(self.k, x) 82 | 83 | def influence(self, x): 84 | return _huber_infl 85 | 86 | def weight(self, x): 87 | return _huber_wght(self.k, x) 88 | 89 | 90 | @vectorize([float32(float32, float32), 91 | float64(float64, float64)], 92 | nopython=True, cache=True, 93 | target=NUMBA_COMPILATION_TARGET) 94 | def _huber_loss(k, x): 95 | abs_x = np.abs(x) 96 | if abs_x <= k: 97 | return 0.5 * x * x 98 | else: 99 | return k * (abs_x - 0.5 * k) 100 | 101 | 102 | @vectorize([float32(float32, float32), 103 | float64(float64, float64)], 104 | nopython=True, cache=True, 105 | target=NUMBA_COMPILATION_TARGET) 106 | def _huber_infl(k, x): 107 | abs_x = np.abs(x) 108 | if abs_x <= k: 109 | return x 110 | else: 111 | return k * np.sign(x) 112 | 113 | 114 | @vectorize([float32(float32, float32), 115 | float64(float64, float64)], 116 | nopython=True, cache=True, 117 | target=NUMBA_COMPILATION_TARGET) 118 | def _huber_wght(k, x): 119 | abs_x = np.abs(x) 120 | if abs_x <= k: 121 | return 1. 122 | else: 123 | return k / abs_x 124 | 125 | 126 | class TukeyLoss: 127 | 128 | def __init__(self, k): 129 | self.k = k 130 | 131 | def loss(self, x): 132 | return _tukey_loss(self.k, x) 133 | 134 | def influence(self, x): 135 | return _tukey_infl(self.k, x) 136 | 137 | def weight(self, x): 138 | return _tukey_wght(self.k, x) 139 | 140 | 141 | @vectorize([float32(float32, float32), 142 | float64(float64, float64)], 143 | nopython=True, cache=True, 144 | target=NUMBA_COMPILATION_TARGET) 145 | def _tukey_loss(k, x): 146 | abs_x = np.abs(x) 147 | k_squared_over_six = k**2 / 6. 148 | if abs_x <= k: 149 | return k_squared_over_six * (1. - (1. - (x / k)**2)**3) 150 | else: 151 | return k_squared_over_six 152 | 153 | 154 | @vectorize([float32(float32, float32), 155 | float64(float64, float64)], 156 | nopython=True, cache=True, 157 | target=NUMBA_COMPILATION_TARGET) 158 | def _tukey_infl(k, x): 159 | abs_x = np.abs(x) 160 | if abs_x <= k: 161 | return x * (1. - (x / k)**2) 162 | else: 163 | return 0. 164 | 165 | 166 | @vectorize([float32(float32, float32), 167 | float64(float64, float64)], 168 | nopython=True, cache=True, 169 | target=NUMBA_COMPILATION_TARGET) 170 | def _tukey_wght(k, x): 171 | abs_x = np.abs(x) 172 | if abs_x <= k: 173 | return 1. - (x / k)**2 174 | else: 175 | return 0. 176 | 177 | 178 | class TDistributionLoss: 179 | def __init__(self, k): 180 | self.k = k 181 | """t-distribution degrees of freedom""" 182 | 183 | def loss(self, x): 184 | return _tdist_loss(self.k, x) 185 | 186 | def influence(self, x): 187 | return _tdist_infl(self.k, x) 188 | 189 | def weight(self, x): 190 | return _tdist_wght(self.k, x) 191 | 192 | 193 | @vectorize([float32(float32, float32), 194 | float64(float64, float64)], 195 | nopython=True, cache=True, 196 | target=NUMBA_COMPILATION_TARGET) 197 | def _tdist_loss(k, x): 198 | return 0.5 * (k + 1.) * np.log(1. + x * x / k) 199 | 200 | 201 | @vectorize([float32(float32, float32), 202 | float64(float64, float64)], 203 | nopython=True, cache=True, 204 | target=NUMBA_COMPILATION_TARGET) 205 | def _tdist_infl(k, x): 206 | return (k + 1.) * x / (k + x * x) 207 | 208 | 209 | @vectorize([float32(float32, float32), 210 | float64(float64, float64)], 211 | nopython=True, cache=True, 212 | target=NUMBA_COMPILATION_TARGET) 213 | def _tdist_wght(k, x): 214 | return (k + 1.) / (k + x * x) 215 | -------------------------------------------------------------------------------- /pyslam/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io 3 | 4 | from liegroups import SE2, SE3 5 | 6 | 7 | class TrajectoryMetrics: 8 | """Class for computing metrics on SE2/SE3 trajectories. 9 | 10 | convention='Twv' -- poses are vehicle-to-world transforms 11 | convention='Tvw' -- poses are world-to-vehicle transforms 12 | (will be converted to Twv internally) 13 | """ 14 | 15 | def __init__(self, poses_gt, poses_est, convention='Twv'): 16 | if convention == 'Twv': 17 | Twv_gt = poses_gt 18 | Twv_est = poses_est 19 | elif convention == 'Tvw': 20 | Twv_gt = [T.inv() for T in poses_gt] 21 | Twv_est = [T.inv() for T in poses_est] 22 | else: 23 | raise ValueError('convention must be \'Tvw\' or \'Twv\'') 24 | 25 | if len(Twv_gt) != len(Twv_est): 26 | valid_length = min((len(Twv_gt), len(Twv_est))) 27 | 28 | print('WARNING: poses_gt has length {} but poses_est has length {}. Truncating to {}.'.format( 29 | len(Twv_gt), len(Twv_est), valid_length)) 30 | Twv_gt = Twv_gt[:valid_length] 31 | Twv_est = Twv_est[:valid_length] 32 | 33 | self.convention = convention 34 | """Were the input poses Twv or Tvw?""" 35 | self.Twv_gt = Twv_gt 36 | """List of ground truth vehicle-to-world poses.""" 37 | self.Twv_est = Twv_est 38 | """List of estimated vehicle-to-world poses.""" 39 | self.pose_type = type(Twv_gt[0]) 40 | """SE2 or SE3?""" 41 | 42 | self.num_poses = len(self.Twv_gt) 43 | """Number of poses""" 44 | 45 | self.rel_dists, self.cum_dists = self._compute_distances() 46 | """Relative and cumulative distances traveled at each pose""" 47 | 48 | def _compute_distances(self): 49 | """Returns relative and cumulative distances traveled at each pose""" 50 | pos_gt = np.empty([len(self.Twv_gt), 3]) 51 | for p_idx, Twv in enumerate(self.Twv_gt): 52 | pos_gt[p_idx, :] = Twv.trans 53 | 54 | rel_dist_gt = np.linalg.norm(np.diff(pos_gt[:, 0:3], axis=0), axis=1) 55 | rel_dist_gt = np.append([0.], rel_dist_gt) 56 | cum_dist_gt = np.cumsum(rel_dist_gt) 57 | return rel_dist_gt, cum_dist_gt 58 | 59 | def _convert_meters(self, meters, unit): 60 | """Convert meters to unit ['m', 'dm', 'cm', 'mm']""" 61 | scale = { 62 | 'm': 1., 63 | 'dm': 10., 64 | 'cm': 100., 65 | 'mm': 1000. 66 | }[unit] 67 | 68 | return scale * meters 69 | 70 | def _convert_radians(self, radians, unit): 71 | """Convert radians to unit ['rad', 'deg']""" 72 | scale = { 73 | 'rad': 1, 74 | 'deg': 180. / np.pi 75 | }[unit] 76 | 77 | return scale * radians 78 | 79 | def savemat(self, filename, extras=None): 80 | """Save trajectory data to a .mat file. 81 | Saved poses will use the same convention as the input poses. 82 | 83 | Args: 84 | filename : path of file to save 85 | extras : optional dictionary of extra arrays to save 86 | """ 87 | # Convert poses to matrices using original convention 88 | if self.convention == 'Twv': 89 | poses_gt = [T.as_matrix() for T in self.Twv_gt] 90 | poses_est = [T.as_matrix() for T in self.Twv_est] 91 | elif self.convention == 'Tvw': 92 | poses_gt = [T.inv().as_matrix() for T in self.Twv_gt] 93 | poses_est = [T.inv().as_matrix() for T in self.Twv_est] 94 | 95 | # Permute the dimensions to native MATLAB ordering (NxMxM --> MxMxN) 96 | poses_gt = np.transpose(np.array(poses_gt), [1, 2, 0]) 97 | poses_est = np.transpose(np.array(poses_est), [1, 2, 0]) 98 | 99 | # Save to file 100 | mdict = {'poses_gt': poses_gt, 101 | 'poses_est': poses_est, 102 | 'convention': self.convention, 103 | 'pose_type': self.pose_type.__name__, 104 | 'num_poses': self.num_poses, 105 | 'rel_dists': self.rel_dists, 106 | 'cum_dists': self.cum_dists} 107 | if extras is not None: 108 | mdict.update(extras) 109 | 110 | scipy.io.savemat(filename, mdict, do_compression=True) 111 | 112 | @classmethod 113 | def loadmat(cls, filename): 114 | """Load trajectory data from a .mat file. 115 | 116 | Args: 117 | filename : path of file to load 118 | """ 119 | # Load from file 120 | mdict = scipy.io.loadmat( 121 | filename, verify_compressed_data_integrity=True) 122 | 123 | # Unpack data 124 | if mdict['pose_type'] == 'SE2': 125 | pose_type = SE2 126 | elif mdict['pose_type'] == 'SE3': 127 | pose_type = SE3 128 | else: 129 | raise ValueError( 130 | 'Got invalid pose type: {}'.format(mdict['pose_type'])) 131 | 132 | num_poses = int(mdict['num_poses']) 133 | poses_gt = mdict['poses_gt'] 134 | poses_est = mdict['poses_est'] 135 | 136 | # Convert to liegroups objects and create TrajectoryMetrics object 137 | poses_gt = [pose_type.from_matrix(poses_gt[:, :, i], normalize=True) 138 | for i in range(num_poses)] 139 | poses_est = [pose_type.from_matrix(poses_est[:, :, i], normalize=True) 140 | for i in range(num_poses)] 141 | 142 | tm = cls(poses_gt, poses_est, convention=mdict['convention']) 143 | tm.mdict = mdict 144 | 145 | return tm 146 | 147 | def endpoint_error(self, segment_range=None, trans_unit='m', rot_unit='rad'): 148 | """Returns translational and rotational error at the endpoint of a segment""" 149 | if segment_range is None: 150 | segment_range = range(len(self.Twv_gt)) 151 | 152 | pose_delta_gt = self.Twv_gt[segment_range[0]].inv().dot( 153 | self.Twv_gt[segment_range[-1]]) 154 | pose_delta_est = self.Twv_est[segment_range[0]].inv().dot( 155 | self.Twv_est[segment_range[-1]]) 156 | 157 | pose_err = pose_delta_est.inv().dot(pose_delta_gt) 158 | trans_err = np.linalg.norm(pose_err.trans) 159 | rot_err = np.linalg.norm(pose_err.rot.log()) 160 | 161 | return self._convert_meters(trans_err, trans_unit), self._convert_radians(rot_err, rot_unit) 162 | 163 | def segment_errors(self, segment_lengths, trans_unit='m', rot_unit='rad'): 164 | """Compute endpoint errors and average endpoint errors 165 | all possible segments of specified lengths in meters. 166 | 167 | Output format (Nx3): 168 | length | proportional trans err (unitless) | proportional rot err (rad/meter) 169 | """ 170 | # Compute all endpoint errors for each segment length 171 | errs = [] 172 | for length in segment_lengths: 173 | length = self._convert_meters(length, trans_unit) 174 | 175 | for start in range(self.num_poses): 176 | # Find the index of the pose s.t. distance relative to segment 177 | # start is >= length 178 | stop = np.searchsorted( 179 | self.cum_dists - self.cum_dists[start], 180 | length, side='right') 181 | 182 | # stop == self.num_poses means no solution 183 | if stop < self.num_poses: 184 | trans_err, rot_err = self.endpoint_error( 185 | range(start, stop + 1), trans_unit, rot_unit) 186 | 187 | errs.append([length, trans_err / length, rot_err / length]) 188 | 189 | errs = np.array(errs) 190 | 191 | # Compute average endpoint error for each segment length 192 | avg_errs = [] 193 | for length in segment_lengths: 194 | length = self._convert_meters(length, trans_unit) 195 | avg_errs.append(np.mean(errs[errs[:, 0] == length], axis=0)) 196 | 197 | avg_errs = np.array(avg_errs) 198 | 199 | return errs, avg_errs 200 | 201 | def traj_errors(self, segment_range=None, trans_unit='m', rot_unit='rad'): 202 | """Returns translational (m) and rotational (rad) errors 203 | in all degrees of freedom 204 | """ 205 | if segment_range is None: 206 | segment_range = range(len(self.Twv_gt)) 207 | 208 | trans_err = [] 209 | rot_err = [] 210 | 211 | for p_idx in segment_range: 212 | pose_delta_gt = self.Twv_gt[segment_range[0]].inv().dot( 213 | self.Twv_gt[p_idx]) 214 | pose_delta_est = self.Twv_gt[segment_range[0]].inv().dot( 215 | self.Twv_est[p_idx]) 216 | 217 | pose_err = pose_delta_est.inv().dot(pose_delta_gt) 218 | trans_err.append(pose_err.trans) 219 | rot_err.append(pose_err.rot.log()) 220 | 221 | trans_err = np.array(trans_err) 222 | rot_err = np.array(rot_err) 223 | 224 | return self._convert_meters(trans_err, trans_unit), self._convert_radians(rot_err, rot_unit) 225 | 226 | def rel_errors(self, segment_range=None, trans_unit='m', rot_unit='rad', delta=1): 227 | """Returns translational (m) and rotational (rad) relative pose errors (RPEs) 228 | in all degrees of freedom - See equation (1) in "A Benchmark for the Evaluation of RGB-D SLAM Systems" by Sturm et al. 229 | """ 230 | if segment_range is None: 231 | segment_range = range(len(self.Twv_gt)) 232 | 233 | trans_err = [] 234 | rot_err = [] 235 | 236 | for p_idx in segment_range[:-delta]: 237 | rel_pose_delta_gt = self.Twv_gt[p_idx].inv().dot( 238 | self.Twv_gt[p_idx + delta]) 239 | rel_pose_delta_est = self.Twv_est[p_idx].inv().dot( 240 | self.Twv_est[p_idx + delta]) 241 | 242 | pose_err = rel_pose_delta_gt.inv().dot(rel_pose_delta_est) 243 | trans_err.append(pose_err.trans) 244 | rot_err.append(pose_err.rot.log()) 245 | 246 | trans_err = np.array(trans_err) 247 | rot_err = np.array(rot_err) 248 | 249 | return self._convert_meters(trans_err, trans_unit), self._convert_radians(rot_err, rot_unit) 250 | 251 | def error_norms(self, segment_range=None, trans_unit='m', rot_unit='rad', error_type='traj', delta=1): 252 | """Error norms (magnitude of errors in rotation and translation) of the trajectory.""" 253 | 254 | if error_type == 'traj': 255 | trans_errs, rot_errs = self.traj_errors( 256 | segment_range, trans_unit, rot_unit) 257 | elif error_type == 'rel': 258 | trans_errs, rot_errs = self.rel_errors( 259 | segment_range, trans_unit, rot_unit, delta) 260 | else: 261 | raise ValueError('error_type must be either `traj` or `rel`.') 262 | 263 | trans_norms = np.sqrt(np.sum(trans_errs**2, axis=1)) 264 | rot_norms = np.sqrt(np.sum(rot_errs**2, axis=1)) 265 | 266 | return trans_norms, rot_norms 267 | 268 | def mean_err(self, segment_range=None, trans_unit='m', rot_unit='rad', error_type='traj'): 269 | """Mean of the rotation and translation error magnitudes over the entire trajectory. 270 | 271 | Notes: 272 | error_type='traj' computes errors relative to ground truth for N T_wv poses (with respect to T_wv[0]) 273 | error_type='rel' computes errors relative to ground truth over N-1 consecutive frame-to-frame transforms 274 | 275 | """ 276 | trans_norms, rot_norms = self.error_norms( 277 | segment_range, trans_unit, rot_unit, error_type) 278 | return np.mean(trans_norms), np.mean(rot_norms) 279 | 280 | def cum_err(self, segment_range=None, trans_unit='m', rot_unit='rad', error_type='traj'): 281 | """Cumulative sum of the rotation and translation error magnitudes over the entire trajectory. 282 | 283 | Notes: 284 | error_type='traj' computes errors relative to ground truth for N T_wv poses (with respect to T_wv[0]) 285 | error_type='rel' computes errors relative to ground truth over N-1 consecutive frame-to-frame transforms 286 | """ 287 | trans_norms, rot_norms = self.error_norms( 288 | segment_range, trans_unit, rot_unit, error_type) 289 | return np.cumsum(trans_norms), np.cumsum(rot_norms) 290 | 291 | def rms_err(self, segment_range=None, trans_unit='m', rot_unit='rad', error_type='traj', delta=1): 292 | """RMS of the rotation and translation error magnitudes over the entire trajectory. 293 | 294 | Notes: 295 | error_type='traj' computes errors relative to ground truth for N T_wv poses (with respect to T_wv[0]) 296 | error_type='rel' computes errors relative to ground truth over N-1 consecutive frame-to-frame transforms 297 | """ 298 | trans_norms, rot_norms = self.error_norms( 299 | segment_range, trans_unit, rot_unit, error_type, delta) 300 | return np.sqrt(np.mean(trans_norms**2)), np.sqrt(np.mean(rot_norms**2)) 301 | -------------------------------------------------------------------------------- /pyslam/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [] 2 | 3 | import pkgutil 4 | import inspect 5 | 6 | for loader, name, is_pkg in pkgutil.walk_packages(__path__): 7 | module = loader.find_module(name).load_module(name) 8 | 9 | for name, value in inspect.getmembers(module): 10 | if name.startswith('__'): 11 | continue 12 | 13 | globals()[name] = value 14 | __all__.append(name) 15 | -------------------------------------------------------------------------------- /pyslam/pipelines/dense.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import numpy as np 3 | 4 | import cv2 5 | 6 | from liegroups import SE3 7 | from pyslam.problem import Options, Problem 8 | from pyslam.sensors import StereoCamera, RGBDCamera 9 | from pyslam.residuals import PhotometricResidualSE3 10 | from pyslam.losses import TDistributionLoss, L2Loss, HuberLoss 11 | 12 | from pyslam.pipelines.keyframes import DenseStereoKeyframe, DenseRGBDKeyframe 13 | 14 | 15 | class DenseVOPipeline: 16 | """Base class for dense VO pipelines""" 17 | 18 | def __init__(self, camera, first_pose=SE3.identity()): 19 | self.camera = camera 20 | """Camera model""" 21 | self.first_pose = first_pose 22 | """First pose""" 23 | self.keyframes = [] 24 | """List of keyframes""" 25 | self.T_c_w = [first_pose] 26 | """List of camera poses""" 27 | self.motion_options = Options() 28 | """Optimizer parameters for motion estimation""" 29 | 30 | # Default optimizer parameters for motion estimation 31 | self.motion_options.allow_nondecreasing_steps = True 32 | self.motion_options.max_nondecreasing_steps = 5 33 | self.motion_options.min_cost_decrease = 0.99 34 | self.motion_options.max_iters = 30 35 | self.motion_options.num_threads = 1 36 | self.motion_options.linesearch_max_iters = 0 37 | 38 | self.pyrlevels = 4 39 | """Number of image pyramid levels for coarse-to-fine optimization""" 40 | self.pyrlevel_sequence = list(range(self.pyrlevels)) 41 | self.pyrlevel_sequence.reverse() 42 | 43 | self.keyframe_trans_thresh = 3.0 # meters 44 | """Translational distance threshold to drop new keyframes""" 45 | self.keyframe_rot_thresh = 0.3 # rad 46 | """Rotational distance threshold to drop new keyframes""" 47 | 48 | self.intensity_stiffness = 1. / 0.01 49 | """Photometric measurement stiffness""" 50 | self.depth_stiffness = 1. / 0.01 51 | """Depth or disparity measurement stiffness""" 52 | self.min_grad = 0.1 53 | """Minimum image gradient magnitude to use a given pixel""" 54 | self.depth_map_type = 'depth' 55 | """Is the depth map depth, inverse depth, disparity? ['depth','disparity'] supported""" 56 | self.mode = 'map' 57 | """Create new keyframes or localize against existing ones? ['map'|'track']""" 58 | 59 | self.use_motion_model_guess = True 60 | """Use constant motion model for initial guess.""" 61 | 62 | # self.loss = L2Loss() 63 | self.loss = HuberLoss(10.0) 64 | # self.loss = TukeyLoss(5.0) 65 | # self.loss = CauchyLoss(5.0) 66 | # self.loss = TDistributionLoss(5.0) # Kerl et al. ICRA 2013 67 | # self.loss = TDistributionLoss(3.0) 68 | """Loss function""" 69 | 70 | self._make_pyramid_cameras() 71 | 72 | def _make_pyramid_cameras(self): 73 | self.pyr_cameras = [] 74 | 75 | for pyrlevel in self.pyrlevel_sequence: 76 | pyrfactor = 2**-pyrlevel 77 | 78 | pyr_camera = self.camera.clone() 79 | pyr_camera.fu *= pyrfactor 80 | pyr_camera.fv *= pyrfactor 81 | pyr_camera.cu *= pyrfactor 82 | pyr_camera.cv *= pyrfactor 83 | pyr_camera.h = int(np.ceil(pyr_camera.h * pyrfactor)) 84 | pyr_camera.w = int(np.ceil(pyr_camera.w * pyrfactor)) 85 | pyr_camera.compute_pixel_grid() 86 | 87 | self.pyr_cameras.append(pyr_camera) 88 | 89 | def set_mode(self, mode): 90 | """Set the localization mode to ['map'|'track']""" 91 | self.mode = mode 92 | if self.mode == 'track': 93 | self.active_keyframe_idx = 0 94 | self.T_c_w = [] 95 | 96 | def track(self, trackframe, guess=None): 97 | """ Track an image. 98 | 99 | Args: 100 | trackframe : frame to track 101 | guess : optional initial guess of the motion 102 | """ 103 | if len(self.keyframes) == 0: 104 | # First frame, so don't track anything yet 105 | trackframe.compute_pyramids() 106 | self.keyframes.append(trackframe) 107 | self.active_keyframe_idx = 0 108 | active_keyframe = self.keyframes[0] 109 | else: 110 | # Default behaviour for second frame and beyond 111 | active_keyframe = self.keyframes[self.active_keyframe_idx] 112 | 113 | if guess is None: 114 | # Default initial guess is previous pose relative to keyframe 115 | if len(self.T_c_w) == 0: 116 | # We just started relocalizing 117 | guess = SE3.identity() 118 | else: 119 | guess = self.T_c_w[-1].dot(active_keyframe.T_c_w.inv()) 120 | # Better initial guess is previous pose + previous motion 121 | if self.use_motion_model_guess and len(self.T_c_w) > 1: 122 | guess = self.T_c_w[-1].dot(self.T_c_w[-2].inv().dot(guess)) 123 | else: 124 | guess = guess.dot(active_keyframe.T_c_w.inv()) 125 | 126 | # Estimate pose change from keyframe to tracking frame 127 | T_track_ref = self._compute_frame_to_frame_motion( 128 | active_keyframe, trackframe, guess) 129 | T_track_ref.normalize() # Numerical instability problems otherwise 130 | self.T_c_w.append(T_track_ref.dot(active_keyframe.T_c_w)) 131 | 132 | # Threshold the distance from the active keyframe to drop a new one 133 | se3_vec = SE3.log(T_track_ref) 134 | trans_dist = np.linalg.norm(se3_vec[0:3]) 135 | rot_dist = np.linalg.norm(se3_vec[3:6]) 136 | 137 | if trans_dist > self.keyframe_trans_thresh or \ 138 | rot_dist > self.keyframe_rot_thresh: 139 | if self.mode is 'map': 140 | trackframe.T_c_w = self.T_c_w[-1] 141 | trackframe.compute_pyramids() 142 | self.keyframes.append(trackframe) 143 | 144 | print('Dropped new keyframe. ' 145 | 'Trans dist was {:.3f}. Rot dist was {:.3f}.'.format( 146 | trans_dist, rot_dist)) 147 | 148 | self.active_keyframe_idx += 1 149 | print('Active keyframe idx: {}'.format( 150 | self.active_keyframe_idx)) 151 | 152 | def _compute_frame_to_frame_motion(self, ref_frame, track_frame, 153 | guess=SE3.identity()): 154 | # params = {'T_1_0': guess} 155 | params = {'R_1_0': guess.rot, 't_1_0_1': guess.trans} 156 | 157 | for (pyrlevel, pyr_camera) in zip( 158 | self.pyrlevel_sequence, self.pyr_cameras): 159 | pyrfactor = 2**-pyrlevel 160 | 161 | im_jacobian = ref_frame.jacobian[pyrlevel] 162 | # ESM 163 | # im_jacobian = 0.5 * (ref_frame.jacobian[pyrlevel] + 164 | # track_frame.jacobian[pyrlevel]) 165 | 166 | if self.depth_map_type is 'disparity': 167 | depth_ref = ref_frame.disparity[pyrlevel] 168 | # Disparity is in pixels, so we need to scale it according to the pyramid level 169 | depth_stiffness = self.depth_stiffness / pyrfactor 170 | else: 171 | depth_ref = ref_frame.depth[pyrlevel] 172 | depth_stiffness = self.depth_stiffness 173 | 174 | residual = PhotometricResidualSE3(pyr_camera, 175 | ref_frame.im_pyr[pyrlevel], 176 | depth_ref, 177 | track_frame.im_pyr[pyrlevel], 178 | im_jacobian, 179 | self.intensity_stiffness, 180 | depth_stiffness, 181 | self.min_grad) 182 | 183 | problem = Problem(self.motion_options) 184 | # problem.add_residual_block(residual, ['T_1_0'], loss=self.loss) 185 | problem.add_residual_block( 186 | residual, ['R_1_0', 't_1_0_1'], loss=self.loss) 187 | problem.initialize_params(params) 188 | 189 | if pyrlevel > 2: 190 | problem.set_parameters_constant('t_1_0_1') 191 | # else: 192 | # problem.set_parameters_constant('R_1_0') 193 | 194 | params = problem.solve() 195 | # print(problem.summary(format='brief')) 196 | 197 | # # DEBUG: Store residuals for later 198 | # try: 199 | # self.residuals = np.hstack( 200 | # [self.residuals, residual.evaluate([guess])]) 201 | # except AttributeError: 202 | # self.residuals = residual.evaluate([guess]) 203 | 204 | # return params['T_1_0'] 205 | return SE3(params['R_1_0'], params['t_1_0_1']) 206 | 207 | 208 | class DenseStereoPipeline(DenseVOPipeline): 209 | """Dense stereo VO pipeine""" 210 | 211 | def __init__(self, camera, first_pose=SE3.identity()): 212 | super().__init__(camera, first_pose) 213 | self.depth_map_type = 'disparity' 214 | self.depth_stiffness = 1 / 0.5 215 | 216 | def track(self, im_left, im_right, guess=None): 217 | if len(self.keyframes) == 0: 218 | # First frame, so create first keyframe with given initial pose 219 | trackframe = DenseStereoKeyframe(im_left, im_right, self.pyrlevels, 220 | self.T_c_w[0]) 221 | else: 222 | # Default behaviour for second frame and beyond 223 | trackframe = DenseStereoKeyframe(im_left, im_right, self.pyrlevels) 224 | 225 | super().track(trackframe, guess) 226 | 227 | 228 | class DenseRGBDPipeline(DenseVOPipeline): 229 | """Dense RGBD VO pipeline""" 230 | 231 | def __init__(self, camera, first_pose=SE3.identity()): 232 | super().__init__(camera, first_pose) 233 | self.depth_map_type = 'depth' 234 | self.depth_stiffness = 1 / 0.01 235 | 236 | def track(self, image, depth, guess=None): 237 | if len(self.keyframes) == 0: 238 | # First frame, so create first keyframe with given initial pose 239 | trackframe = DenseRGBDKeyframe(image, depth, self.pyrlevels, 240 | self.T_c_w[0]) 241 | else: 242 | # Default behaviour for second frame and beyond 243 | trackframe = DenseRGBDKeyframe(image, depth, self.pyrlevels) 244 | 245 | super().track(trackframe, guess) 246 | -------------------------------------------------------------------------------- /pyslam/pipelines/keyframes.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import cv2 4 | 5 | from liegroups import SE3 6 | 7 | 8 | class Keyframe: 9 | """Keyframe base class""" 10 | 11 | def __init__(self, data, T_c_w=SE3.identity()): 12 | self.data = data 13 | """Image data (tuple or list)""" 14 | self.T_c_w = T_c_w 15 | """Keyframe pose, world-to-camera.""" 16 | 17 | 18 | class DenseKeyframe(Keyframe): 19 | """Dense keyframe base class""" 20 | 21 | def __init__(self, data, pyrimage, pyrlevels, T_c_w=SE3.identity()): 22 | super().__init__(data, T_c_w) 23 | 24 | self.pyrlevels = pyrlevels 25 | """Number of pyramid levels to downsample""" 26 | 27 | self.compute_image_pyramid(pyrimage) 28 | """Image pyramid""" 29 | 30 | def compute_image_pyramid(self, pyrimage): 31 | """Compute an image pyramid.""" 32 | 33 | for pyrlevel in range(self.pyrlevels): 34 | if pyrlevel == 0: 35 | im_pyr = [pyrimage] 36 | else: 37 | im_pyr.append(cv2.pyrDown(im_pyr[-1])) 38 | 39 | self.im_pyr = [im.astype(float) / 255. for im in im_pyr] 40 | 41 | def compute_jacobian_pyramid(self): 42 | self.jacobian = [] 43 | for im in self.im_pyr: 44 | gradx = 0.5 * cv2.Sobel(im, -1, 1, 0) 45 | grady = 0.5 * cv2.Sobel(im, -1, 0, 1) 46 | self.jacobian.append(np.array([gradx, grady])) 47 | 48 | 49 | class DenseRGBDKeyframe(DenseKeyframe): 50 | """Dense RGBD keyframe""" 51 | 52 | def __init__(self, image, depth, pyrlevels=0, T_c_w=SE3.identity()): 53 | super().__init__((image, depth), image, pyrlevels, T_c_w) 54 | 55 | def compute_depth_pyramid(self): 56 | self.depth = [] 57 | stereo = cv2.StereoBM_create() 58 | # stereo = cv2.StereoSGBM_create(minDisparity=0, 59 | # numDisparities=64, 60 | # blockSize=11) 61 | 62 | # Compute disparity at full resolution and downsample 63 | depth = self.data[1] 64 | 65 | for pyrlevel in range(self.pyrlevels): 66 | if pyrlevel == 0: 67 | self.depth = [depth] 68 | else: 69 | pyr_factor = 2**-pyrlevel 70 | depth = depth[0::2, 0::2] 71 | self.depth.append(depth) 72 | 73 | def compute_pyramids(self): 74 | self.compute_jacobian_pyramid() 75 | self.compute_depth_pyramid() 76 | 77 | 78 | class DenseStereoKeyframe(DenseKeyframe): 79 | """Dense Stereo keyframe""" 80 | 81 | def __init__(self, im_left, im_right, pyrlevels=0, T_c_w=SE3.identity()): 82 | super().__init__((im_left, im_right), im_left, pyrlevels, T_c_w) 83 | 84 | @property 85 | def im_left(self): 86 | return self.data[0] 87 | 88 | @property 89 | def im_right(self): 90 | return self.data[1] 91 | 92 | def compute_disparity_pyramid(self): 93 | self.disparity = [] 94 | stereo = cv2.StereoBM_create() 95 | # stereo = cv2.StereoSGBM_create(minDisparity=0, 96 | # numDisparities=64, 97 | # blockSize=11) 98 | 99 | # Compute disparity at full resolution and downsample 100 | disp = stereo.compute(self.im_left, self.im_right).astype(float) / 16. 101 | 102 | for pyrlevel in range(self.pyrlevels): 103 | if pyrlevel == 0: 104 | self.disparity = [disp] 105 | else: 106 | pyr_factor = 2**-pyrlevel 107 | # disp = cv2.pyrDown(disp) # Applies a large Gaussian blur 108 | # kernel! 109 | disp = disp[0::2, 0::2] 110 | self.disparity.append(disp * pyr_factor) 111 | 112 | def compute_pyramids(self): 113 | self.compute_jacobian_pyramid() 114 | self.compute_disparity_pyramid() 115 | 116 | 117 | class SparseStereoKeyframe(Keyframe): 118 | """Sparse Stereo keyframe""" 119 | 120 | def __init__(self, im_left, im_right, T_c_w=SE3.identity()): 121 | super().__init__((im_left, im_right), T_c_w) 122 | 123 | @property 124 | def im_left(self): 125 | return self.data[0] 126 | 127 | @property 128 | def im_right(self): 129 | return self.data[1] 130 | 131 | 132 | class SparseRGBDKeyframe(Keyframe): 133 | """Sparse RGB-D keyframe""" 134 | 135 | def __init__(self, image, depth, T_c_w=SE3.identity()): 136 | super().__init__((image, depth), T_c_w) 137 | 138 | @property 139 | def image(self): 140 | return self.data[0] 141 | 142 | @property 143 | def depth(self): 144 | return self.data[1] 145 | -------------------------------------------------------------------------------- /pyslam/pipelines/ransac.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | from liegroups import SE3, SO3 4 | from numba import guvectorize, float64, boolean 5 | import time 6 | 7 | 8 | # from pyslam.sensors.stereo_camera import _stereo_project 9 | SE3_SHAPE = np.empty(4) 10 | 11 | 12 | @guvectorize([(float64[:, :], float64[:, :], float64[:], float64[:, :])], 13 | '(n,m),(n,m), (p) ->(p,p)', nopython=True, cache=True, target='parallel') 14 | def compute_transform_fast(pts_1, pts_2, dummy, out): 15 | """Computes SE(3) transformation from a set of rigid 3D point clouds using SVD (See Barfoot's Textbook)""" 16 | 17 | numPts = pts_1.shape[0] 18 | 19 | # This form of np.mean is required for guvectorize (the optional axis parameter is not supported) 20 | centroid_1 = np.array( 21 | [np.mean(pts_1[:, 0]), np.mean(pts_1[:, 1]), np.mean(pts_1[:, 2])]) 22 | centroid_2 = np.array( 23 | [np.mean(pts_2[:, 0]), np.mean(pts_2[:, 1]), np.mean(pts_2[:, 2])]) 24 | 25 | pts_1_c = pts_1 - centroid_1 26 | pts_2_c = pts_2 - centroid_2 27 | 28 | W = (1.0/numPts)*np.dot(pts_2_c.T, pts_1_c) 29 | 30 | U, _, V = np.linalg.svd(W) 31 | S = np.identity(3) 32 | S[2, 2] = np.linalg.det(U)*np.linalg.det(V) 33 | 34 | C_21 = U.dot(S).dot(V) 35 | r_2_12 = centroid_2 - C_21.dot(centroid_1) 36 | 37 | # Each element must be set individually 38 | out[0, 0] = C_21[0, 0] 39 | out[0, 1] = C_21[0, 1] 40 | out[0, 2] = C_21[0, 2] 41 | out[0, 3] = r_2_12[0] 42 | 43 | out[1, 0] = C_21[1, 0] 44 | out[1, 1] = C_21[1, 1] 45 | out[1, 2] = C_21[1, 2] 46 | out[1, 3] = r_2_12[1] 47 | 48 | out[2, 0] = C_21[2, 0] 49 | out[2, 1] = C_21[2, 1] 50 | out[2, 2] = C_21[2, 2] 51 | out[2, 3] = r_2_12[2] 52 | 53 | out[3, 0] = 0. 54 | out[3, 1] = 0. 55 | out[3, 2] = 0. 56 | out[3, 3] = 1. 57 | 58 | 59 | # @guvectorize([(float64[:, :], float64[:, :], float64[:, :], float64[:], float64[:], boolean[:])], 60 | # '(p,p),(m,n), (m,n), (q), () -> (m)', nopython=True, cache=True, target='parallel') 61 | # def compute_ransac_cost_fast(T_21, pts_1, obs_2, cam_params, inlier_thresh, out): 62 | # """Compute a binary mask of inliers for each transform proposal""" 63 | 64 | # num_pts = pts_1.shape[0] 65 | # cu, cv, fu, fv, b = cam_params 66 | # # obs_2_test = np.empty(obs_2.shape) 67 | 68 | # # NOTE: Numba for optimizes raw python loops much better than 'np.dot', hence this ugly code 69 | # for i in range(num_pts): 70 | # x = T_21[0, 3] 71 | # y = T_21[1, 3] 72 | # z = T_21[2, 3] 73 | 74 | # for j in range(3): 75 | # x += T_21[0, j]*pts_1[i, j] 76 | # y += T_21[1, j]*pts_1[i, j] 77 | # z += T_21[2, j]*pts_1[i, j] 78 | 79 | # one_over_z = 1. / z 80 | # x_t = fu * x * one_over_z + cu 81 | # y_t = fv * y * one_over_z + cv 82 | # z_t = fu * b * one_over_z 83 | 84 | # error = (x_t - obs_2[i, 0])*(x_t - obs_2[i, 0]) + ( 85 | # (y_t - obs_2[i, 1])*(y_t - obs_2[i, 1])) + ( 86 | # (z_t - obs_2[i, 2])*(z_t - obs_2[i, 2])) 87 | 88 | # out[i] = error < inlier_thresh[0] 89 | 90 | 91 | class FrameToFrameRANSAC: 92 | def __init__(self, camera): 93 | self.camera = camera 94 | self.ransac_iters = 400 95 | self.ransac_thresh = 5 # (1**2 + 1**2 + 1**2) 96 | self.num_min_set_pts = 3 97 | 98 | def set_obs(self, obs_1, obs_2): 99 | self.obs_1 = np.atleast_2d(obs_1) 100 | self.obs_2 = np.atleast_2d(obs_2) 101 | 102 | self.pts_1 = self.camera.triangulate(self.obs_1) 103 | self.pts_2 = self.camera.triangulate(self.obs_2) 104 | 105 | self.num_pts = self.pts_1.shape[0] 106 | 107 | def perform_ransac(self): 108 | """Main RANSAC Routine""" 109 | max_inliers = 0 110 | 111 | # Select random ids for minimal sets 112 | rand_idx = np.random.randint(self.num_pts, size=( 113 | self.ransac_iters, self.num_min_set_pts)) 114 | pts_1_sample_stacked = self.pts_1[rand_idx] 115 | pts_2_sample_stacked = self.pts_2[rand_idx] 116 | 117 | # Parallel transform computation 118 | # Compute transforms in parallel 119 | # start = time.perf_counter() 120 | T_21_stacked = compute_transform_fast( 121 | pts_1_sample_stacked, pts_2_sample_stacked, SE3_SHAPE) 122 | # end = time.perf_counter() 123 | # print('comp, transform | {}'.format(end - start)) 124 | 125 | # Parallel cost computation 126 | # start = time.perf_counter() 127 | # inlier_masks_stacked = compute_ransac_cost_fast( 128 | # T_21_stacked, self.pts_1, self.obs_2, cam_params, inlier_thresh) 129 | inlier_masks_stacked = self.compute_ransac_cost( 130 | T_21_stacked, self.pts_1, self.obs_2, self.camera, self.ransac_thresh) 131 | 132 | # end = time.perf_counter() 133 | # print('comp, masks | {}'.format(end - start)) 134 | 135 | # start = time.perf_counter() 136 | inlier_nums = np.sum(inlier_masks_stacked, axis=1) 137 | most_inliers_idx = np.argmax(inlier_nums) 138 | T_21_best = SE3.from_matrix(T_21_stacked[most_inliers_idx, :, :]) 139 | max_inliers = inlier_nums[most_inliers_idx] 140 | inlier_indices_best = np.where( 141 | inlier_masks_stacked[most_inliers_idx, :])[0] 142 | # end = time.perf_counter() 143 | # print('comp, rest | {}'.format(end - start)) 144 | 145 | if max_inliers < 5: 146 | raise ValueError( 147 | " RANSAC failed to find more than 5 inliers. Try adjusting the thresholds.") 148 | 149 | # print('After {} RANSAC iters, found best transform with {} / {} inliers.'.format( 150 | # self.ransac_iters, max_inliers, self.num_pts)) 151 | 152 | obs_1_inliers = self.obs_1[inlier_indices_best] 153 | obs_2_inliers = self.obs_2[inlier_indices_best] 154 | 155 | return (T_21_best, obs_1_inliers, obs_2_inliers, inlier_indices_best) 156 | 157 | def compute_ransac_cost(self, T_21_stacked, pts_1, obs_2, camera, inlier_thresh): 158 | out = [] 159 | for T_21 in T_21_stacked: 160 | pts_2_pred = T_21[:3, :3].dot(pts_1.T).T + T_21[:3, 3] 161 | obs_2_pred = camera.project(pts_2_pred) 162 | error = ((obs_2_pred - obs_2) ** 2).sum(axis=1) 163 | out.append(np.atleast_2d(error < inlier_thresh)) 164 | 165 | return np.concatenate(out, axis=0) 166 | -------------------------------------------------------------------------------- /pyslam/pipelines/sparse.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import numpy as np 3 | 4 | import cv2 5 | 6 | from liegroups import SE3 7 | import viso2 8 | 9 | from pyslam.problem import Options, Problem 10 | from pyslam.sensors import StereoCamera, RGBDCamera 11 | from pyslam.residuals import ReprojectionMotionOnlyBatchResidual 12 | from pyslam.losses import L2Loss 13 | 14 | from pyslam.pipelines.keyframes import SparseStereoKeyframe, SparseRGBDKeyframe 15 | from pyslam.pipelines.ransac import FrameToFrameRANSAC 16 | 17 | 18 | class SparseVOPipeline: 19 | """Base class for sparse VO pipelines""" 20 | 21 | def __init__(self, camera, first_pose=SE3.identity()): 22 | self.camera = camera 23 | """Camera model""" 24 | self.first_pose = first_pose 25 | """First pose""" 26 | self.keyframes = [] 27 | """List of keyframes""" 28 | self.T_c_w = [first_pose] 29 | """List of camera poses""" 30 | self.motion_options = Options() 31 | """Optimizer parameters for motion estimation""" 32 | 33 | # Default optimizer parameters for motion estimation 34 | self.motion_options.allow_nondecreasing_steps = True 35 | self.motion_options.max_nondecreasing_steps = 5 36 | self.motion_options.min_cost_decrease = 0.99 37 | self.motion_options.max_iters = 30 38 | self.motion_options.num_threads = 1 39 | self.motion_options.linesearch_max_iters = 0 40 | 41 | self.keyframe_trans_thresh = 3.0 # meters 42 | """Translational distance threshold to drop new keyframes""" 43 | self.keyframe_rot_thresh = 0.3 # rad 44 | """Rotational distance threshold to drop new keyframes""" 45 | 46 | self.matcher_params = viso2.Matcher_parameters() 47 | """Parameters for libviso2 matcher""" 48 | self.matcher = viso2.Matcher(self.matcher_params) 49 | """libviso2 matcher""" 50 | self.matcher_mode = 0 51 | """libviso2 matching mode 0=flow 1=stereo 2=quad""" 52 | 53 | self.ransac = FrameToFrameRANSAC(self.camera) 54 | """RANSAC outlier rejection""" 55 | 56 | self.reprojection_stiffness = np.diag([1., 1., 1.]) 57 | """Reprojection error stiffness matrix""" 58 | self.mode = 'map' 59 | """Create new keyframes or localize against existing ones? ['map'|'track']""" 60 | 61 | self.loss = L2Loss() 62 | """Loss function""" 63 | 64 | def set_mode(self, mode): 65 | """Set the localization mode to ['map'|'track']""" 66 | self.mode = mode 67 | if self.mode == 'track': 68 | self.active_keyframe_idx = 0 69 | self.T_c_w = [] 70 | 71 | def track(self, trackframe): 72 | """ Track an image. 73 | 74 | Args: 75 | trackframe : frame to track 76 | """ 77 | if len(self.keyframes) == 0: 78 | # First frame, so don't track anything yet 79 | self.keyframes.append(trackframe) 80 | self.active_keyframe_idx = 0 81 | active_keyframe = self.keyframes[0] 82 | else: 83 | # Default behaviour for second frame and beyond 84 | active_keyframe = self.keyframes[self.active_keyframe_idx] 85 | 86 | # Estimate pose change from keyframe to tracking frame 87 | T_track_ref = self._compute_frame_to_frame_motion( 88 | active_keyframe, trackframe) 89 | T_track_ref.normalize() # Numerical instability problems otherwise 90 | self.T_c_w.append(T_track_ref.dot(active_keyframe.T_c_w)) 91 | 92 | # Threshold the distance from the active keyframe to drop a new one 93 | se3_vec = SE3.log(T_track_ref) 94 | trans_dist = np.linalg.norm(se3_vec[0:3]) 95 | rot_dist = np.linalg.norm(se3_vec[3:6]) 96 | 97 | if trans_dist > self.keyframe_trans_thresh or \ 98 | rot_dist > self.keyframe_rot_thresh: 99 | if self.mode is 'map': 100 | trackframe.T_c_w = self.T_c_w[-1] 101 | self.keyframes.append(trackframe) 102 | 103 | print('Dropped new keyframe. ' 104 | 'Trans dist was {:.3f}. Rot dist was {:.3f}.'.format( 105 | trans_dist, rot_dist)) 106 | 107 | self.active_keyframe_idx += 1 108 | print('Active keyframe idx: {}'.format( 109 | self.active_keyframe_idx)) 110 | 111 | 112 | class SparseStereoPipeline(SparseVOPipeline): 113 | """Sparse stereo VO pipeine""" 114 | 115 | def __init__(self, camera, first_pose=SE3.identity()): 116 | super().__init__(camera, first_pose) 117 | self.matcher_mode = 2 # stereo quad matching 118 | self.matcher.setIntrinsics(camera.fu, camera.cu, camera.cv, camera.b) 119 | 120 | def track(self, im_left, im_right): 121 | if len(self.keyframes) == 0: 122 | # First frame, so create first keyframe with given initial pose 123 | trackframe = SparseStereoKeyframe(im_left, im_right, self.T_c_w[0]) 124 | else: 125 | # Default behaviour for second frame and beyond 126 | trackframe = SparseStereoKeyframe(im_left, im_right) 127 | 128 | super().track(trackframe) 129 | 130 | def _compute_frame_to_frame_motion(self, ref_frame, track_frame): 131 | # Get feature matches 132 | self.matcher.pushBack(ref_frame.im_left, ref_frame.im_right) 133 | self.matcher.pushBack(track_frame.im_left, track_frame.im_right) 134 | self.matcher.matchFeatures(self.matcher_mode) 135 | matches = self.matcher.getMatches() 136 | # print('libviso2 matched {} features.'.format(matches.size())) 137 | 138 | # Stereo observations (uvd) 139 | self.obs_0 = [np.array([m.u1p, m.v1p, m.u1p - m.u2p]) for m in matches] 140 | self.obs_1 = [np.array([m.u1c, m.v1c, m.u1c - m.u2c]) for m in matches] 141 | 142 | # Prune all observations with disparity <= 0 143 | keep_mask = (self.obs_0[:, 2] > 0) & (self.obs_1[:, 2] > 0) 144 | self.obs_0 = self.obs_0[keep_mask, :] 145 | self.obs_1 = self.obs_1[keep_mask, :] 146 | # print('Matches after pruning: {} '.format(len(self.obs_0))) 147 | 148 | # RANSAC 149 | self.ransac.set_obs(self.obs_0, self.obs_1) 150 | T_1_0_guess, obs_0_inliers, obs_1_inliers, _ = self.ransac.perform_ransac() 151 | 152 | # Optimize with inliers 153 | residual = ReprojectionMotionOnlyBatchResidual( 154 | self.camera, obs_0_inliers, obs_1_inliers, self.reprojection_stiffness) 155 | 156 | problem = Problem(self.motion_options) 157 | problem.add_residual_block(residual, ['T_1_0'], loss=self.loss) 158 | 159 | params = {'T_1_0': T_1_0_guess} 160 | problem.initialize_params(params) 161 | params = problem.solve() 162 | 163 | return params['T_1_0'] 164 | 165 | 166 | class SparseRGBDPipeline(SparseVOPipeline): 167 | """Sparse RGBD VO pipeline""" 168 | 169 | def __init__(self, camera, first_pose=SE3.identity()): 170 | super().__init__(camera, first_pose) 171 | self.matcher_mode = 0 # mono-to-mono 172 | 173 | def track(self, image, depth): 174 | if len(self.keyframes) == 0: 175 | # First frame, so create first keyframe with given initial pose 176 | trackframe = SparseRGBDKeyframe(image, depth, self.T_c_w[0]) 177 | else: 178 | # Default behaviour for second frame and beyond 179 | trackframe = SparseRGBDKeyframe(image, depth) 180 | 181 | super().track(trackframe) 182 | 183 | def _compute_frame_to_frame_motion(self, ref_frame, track_frame): 184 | # Get feature matches 185 | self.matcher.pushBack(ref_frame.image) 186 | self.matcher.pushBack(track_frame.image) 187 | self.matcher.matchFeatures(self.matcher_mode) 188 | matches = self.matcher.getMatches() 189 | # print('libviso2 matched {} features.'.format(matches.size())) 190 | 191 | # RGB-D observations (uvz) 192 | self.obs_0 = np.array( 193 | [[m.u1p, m.v1p, ref_frame.depth[int(m.v1p), int(m.u1p)]] for m in matches]) 194 | self.obs_1 = np.array( 195 | [[m.u1c, m.v1c, track_frame.depth[int(m.v1c), int(m.u1c)]] for m in matches]) 196 | 197 | # Prune all observations with depth <= 0 198 | keep_mask = (self.obs_0[:, 2] > 0) & (self.obs_1[:, 2] > 0) 199 | self.obs_0 = self.obs_0[keep_mask, :] 200 | self.obs_1 = self.obs_1[keep_mask, :] 201 | # print('Matches after pruning: {} '.format(self.obs_0.shape[0])) 202 | 203 | # RANSAC 204 | self.ransac.set_obs(self.obs_0, self.obs_1) 205 | T_1_0_guess, obs_0_inliers, obs_1_inliers, _ = self.ransac.perform_ransac() 206 | 207 | # Optimize with inliers 208 | residual = ReprojectionMotionOnlyBatchResidual( 209 | self.camera, obs_0_inliers, obs_1_inliers, self.reprojection_stiffness) 210 | 211 | problem = Problem(self.motion_options) 212 | problem.add_residual_block(residual, ['T_1_0'], loss=self.loss) 213 | 214 | params = {'T_1_0': T_1_0_guess} 215 | problem.initialize_params(params) 216 | params = problem.solve() 217 | 218 | return params['T_1_0'] 219 | -------------------------------------------------------------------------------- /pyslam/problem.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import concurrent.futures 3 | 4 | import numpy as np 5 | import scipy.sparse as sparse 6 | import scipy.sparse.linalg as splinalg 7 | 8 | from pyslam.losses import L2Loss 9 | 10 | 11 | class Options: 12 | """Class for specifying optimization options.""" 13 | 14 | def __init__(self): 15 | self.max_iters = 100 16 | """Maximum number of iterations before terminating.""" 17 | self.min_update_norm = 1e-6 18 | """Minimum update norm before terminating.""" 19 | self.min_cost = 1e-12 20 | """Minimum cost value before terminating.""" 21 | self.min_cost_decrease = 0.9 22 | """Minimum cost decrease factor to continue optimization.""" 23 | 24 | self.linesearch_alpha = 0.8 25 | """Factor by which line search step size decreases each iteration.""" 26 | self.linesearch_max_iters = 10 27 | """Maximum number of line search steps.""" 28 | self.linesearch_min_cost_decrease = 0.9 29 | """Minimum cost decrease factor to continue the line search.""" 30 | 31 | self.allow_nondecreasing_steps = False 32 | """Enable non-decreasing steps to escape local minima.""" 33 | self.max_nondecreasing_steps = 3 34 | """Maximum number of non-dereasing steps before terminating.""" 35 | 36 | self.num_threads = 1 37 | """Number of threads to use for residual and jacobian evaluation.""" 38 | 39 | 40 | class Problem: 41 | """Class for building optimization problems.""" 42 | 43 | def __init__(self, options=Options()): 44 | self.options = options 45 | """Optimization options.""" 46 | 47 | self.param_dict = dict() 48 | """Dictionary of all parameters with their current values.""" 49 | 50 | self.residual_blocks = [] 51 | """List of residual blocks.""" 52 | self.block_param_keys = [] 53 | """List of parameter keys in param_dict that each block depends on.""" 54 | self.block_loss_functions = [] 55 | """List of loss functions applied to each block. Default: L2Loss.""" 56 | 57 | self.constant_param_keys = [] 58 | """List of parameter keys in param_dict to be held constant.""" 59 | 60 | self._update_partition_dict = {} 61 | """Autogenerated list of update vector ranges corresponding to each parameter.""" 62 | self._covariance_matrix = None 63 | """Covariance matrix of final parameter estimates.""" 64 | self._cost_history = [] 65 | """History of cost values at each iteration of solve.""" 66 | 67 | if self.options.num_threads > 1: 68 | self._thread_pool = concurrent.futures.ThreadPoolExecutor( 69 | max_workers=self.options.num_threads) 70 | """Thread pool for parallel evaluations.""" 71 | 72 | def add_residual_block(self, block, param_keys, loss=L2Loss()): 73 | """Add a cost block to the problem.""" 74 | # param_keys must be a list, but don't force the user to create a 75 | # 1-element list 76 | if isinstance(param_keys, str): 77 | param_keys = [param_keys] 78 | 79 | self.residual_blocks.append(block) 80 | self.block_param_keys.append(param_keys) 81 | self.block_loss_functions.append(loss) 82 | 83 | def initialize_params(self, param_dict): 84 | """Initialize the parameters in the problem.""" 85 | # update does a shallow copy, which is no good for immutable parameters 86 | self.param_dict.update(copy.deepcopy(param_dict)) 87 | 88 | def set_parameters_constant(self, param_keys): 89 | """Hold a list of parameters constant.""" 90 | # param_keys must be a list, but don't force the user to create a 91 | # 1-element list 92 | if isinstance(param_keys, str): 93 | param_keys = [param_keys] 94 | 95 | for key in param_keys: 96 | if key not in self.constant_param_keys: 97 | self.constant_param_keys.append(key) 98 | 99 | def set_parameters_variable(self, param_keys): 100 | """Allow a list of parameters to vary.""" 101 | # param_keys must be a list, but don't force the user to create a 102 | # 1-element list 103 | if isinstance(param_keys, str): 104 | param_keys = [param_keys] 105 | 106 | for key in param_keys: 107 | if key in self.constant_param_keys: 108 | self.constant_param_keys.remove(key) 109 | 110 | def eval_cost(self, param_dict=None): 111 | """Evaluate the cost function using given parameter values.""" 112 | if param_dict is None: 113 | param_dict = self.param_dict 114 | 115 | cost = 0. 116 | for block, keys, loss in zip(self.residual_blocks, 117 | self.block_param_keys, 118 | self.block_loss_functions): 119 | try: 120 | params = [param_dict[key] for key in keys] 121 | except KeyError as e: 122 | print( 123 | "Parameter {} has not been initialized".format(e.args[0])) 124 | 125 | residual = block.evaluate(params) 126 | cost += np.sum(loss.loss(residual)) 127 | 128 | return cost 129 | 130 | def solve(self): 131 | """Solve the problem using Gauss - Newton.""" 132 | self._update_partition_dict = self._get_update_partition_dict() 133 | 134 | cost = self.eval_cost() 135 | dx = np.array([100]) 136 | 137 | optimization_iters = 0 138 | nondecreasing_steps_taken = 0 139 | self._cost_history = [cost] 140 | 141 | done_optimization = False 142 | 143 | while not done_optimization: 144 | optimization_iters += 1 145 | prev_cost = cost 146 | 147 | dx, cost = self.solve_one_iter() 148 | # print("Update vector:\n", str(dx)) 149 | # print("Update norm = %f" % np.linalg.norm(dx)) 150 | 151 | # Update cost history 152 | self._cost_history.append(cost) 153 | 154 | # Update parameters 155 | for k, r in self._update_partition_dict.items(): 156 | self._perturb_by_key(k, dx[r]) 157 | 158 | # Check if done optimizing 159 | done_optimization = optimization_iters > self.options.max_iters or \ 160 | np.linalg.norm(dx) < self.options.min_update_norm or \ 161 | cost < self.options.min_cost 162 | 163 | if self.options.allow_nondecreasing_steps: 164 | if nondecreasing_steps_taken == 0: 165 | best_params = copy.deepcopy(self.param_dict) 166 | 167 | if cost >= self.options.min_cost_decrease * prev_cost: 168 | nondecreasing_steps_taken += 1 169 | else: 170 | nondecreasing_steps_taken = 0 171 | 172 | if nondecreasing_steps_taken \ 173 | >= self.options.max_nondecreasing_steps: 174 | done_optimization = True 175 | self.param_dict.update(best_params) 176 | else: 177 | done_optimization = done_optimization or \ 178 | cost >= self.options.min_cost_decrease * prev_cost 179 | 180 | return self.param_dict 181 | 182 | def solve_one_iter(self): 183 | """Solve one iteration of Gauss-Newton.""" 184 | # precision * dx = information 185 | precision, information, cost = self._get_precision_information_and_cost() 186 | dx = splinalg.spsolve(precision, information) 187 | 188 | # Backtrack line search 189 | if self.options.linesearch_max_iters > 0: 190 | best_step_size, best_cost = self._do_line_search(dx) 191 | else: 192 | best_step_size, best_cost = 1., cost 193 | 194 | return best_step_size * dx, best_cost 195 | 196 | def compute_covariance(self): 197 | """Compute the covariance matrix after solve has terminated.""" 198 | try: 199 | # Re-evaluate the precision matrix with the final parameters 200 | precision, _, _ = self._get_precision_information_and_cost() 201 | self._covariance_matrix = splinalg.inv(precision.tocsc()).toarray() 202 | except Exception as e: 203 | print('Covariance computation failed!\n{}'.format(e)) 204 | 205 | def get_covariance_block(self, param0, param1): 206 | """Get the covariance block corresponding to two parameters.""" 207 | try: 208 | p0_range = self._update_partition_dict[param0] 209 | p1_range = self._update_partition_dict[param1] 210 | return np.squeeze(self._covariance_matrix[ 211 | p0_range.start:p0_range.stop, p1_range.start:p1_range.stop]) 212 | except KeyError as e: 213 | print( 214 | 'Cannot compute covariance for constant parameter {}'.format(e.args[0])) 215 | 216 | return None 217 | 218 | def summary(self, format='brief'): 219 | """Return a summary of the optimization. 220 | 221 | format='brief' : Number of iterations, initial/final cost 222 | format='full' : Initial/final cost and relative change at each iteration 223 | """ 224 | if not self._cost_history: 225 | raise ValueError('solve has not yet been called') 226 | 227 | if format is 'brief': 228 | entry_format_string = 'Iterations: {:3} | Cost: {:12e} --> {:12e}' 229 | summary = entry_format_string.format(len(self._cost_history), 230 | self._cost_history[0], 231 | self._cost_history[-1]) 232 | 233 | elif format is 'full': 234 | header_string = '{:>5s} | {:>12s} --> {:>12s} | {:>10s}\n'.format( 235 | 'Iter', 'Initial cost', 'Final cost', 'Rel change') 236 | entry_format_string = '{:5} | {:12e} --> {:12e} | {:+10f}\n' 237 | summary = [header_string, '-' * len(header_string) + '\n'] 238 | for i, ic, fc in zip(range(len(self._cost_history)), 239 | self._cost_history[:-1], 240 | self._cost_history[1:]): 241 | summary.append(entry_format_string.format( 242 | i, ic, fc, (fc - ic) / ic)) 243 | 244 | summary = ''.join(summary) 245 | else: 246 | raise ValueError( 247 | 'Invalid summary format \'{}\'.'.format(format) + 248 | 'Valid formats are \'brief\' and \'full\'') 249 | 250 | return summary 251 | 252 | def _get_update_partition_dict(self): 253 | """Helper function to partition the full update vector.""" 254 | update_partition_dict = {} 255 | prev_key = '' 256 | for key, param in self.param_dict.items(): 257 | if key not in self.constant_param_keys: 258 | if hasattr(param, 'dof'): 259 | # Check if parameter specifies a tangent space 260 | dof = param.dof 261 | elif hasattr(param, '__len__'): 262 | # Check if parameter is a vector 263 | dof = len(param) 264 | else: 265 | # Must be a scalar 266 | dof = 1 267 | 268 | if not update_partition_dict: 269 | update_partition_dict.update({key: range(dof)}) 270 | else: 271 | update_partition_dict.update({key: range( 272 | update_partition_dict[prev_key].stop, 273 | update_partition_dict[prev_key].stop + dof)}) 274 | 275 | prev_key = key 276 | 277 | return update_partition_dict 278 | 279 | def _get_precision_information_and_cost(self): 280 | """Helper function to build the precision matrix and information vector for the Gauss - Newton update. Also returns the total cost.""" 281 | # The Gauss-Newton step is given by 282 | # (H.T * W * H) dx = -H.T * W * e 283 | # or 284 | # precision * dx = information 285 | # 286 | # However, in our case, W is subsumed into H and e by the stiffness parameter 287 | # so instead we have 288 | # (H'.T * H') dx = -H'.T * e' 289 | # where H' = sqrt(W) * H and e' = sqrt(W) * e 290 | # 291 | # Note that this is an exactly equivalent formulation, but avoids needing 292 | # to explicitly construct and multiply the (possibly very large) W 293 | # matrix. 294 | HT_blocks = [[None for _ in self.residual_blocks] 295 | for _ in self.param_dict] 296 | e_blocks = [None for _ in self.residual_blocks] 297 | cost_blocks = [None for _ in self.residual_blocks] 298 | 299 | block_cidx_dict = dict(zip(self.param_dict.keys(), 300 | list(range(len(self.param_dict))))) 301 | 302 | if self.options.num_threads > 1: 303 | # Evaluate residual and jacobian blocks in parallel 304 | threads = [] 305 | for block_ridx, (block, keys, loss) in \ 306 | enumerate(zip(self.residual_blocks, 307 | self.block_param_keys, 308 | self.block_loss_functions)): 309 | 310 | threads.append(self._thread_pool.submit( 311 | self._populate_residual_jacobian_and_cost_blocks, 312 | HT_blocks, e_blocks, cost_blocks, 313 | block_cidx_dict, block_ridx, 314 | block, keys, loss)) 315 | 316 | concurrent.futures.wait(threads) 317 | else: 318 | # Single thread: Call directly instead of submitting a job 319 | for block_ridx, (block, keys, loss) in \ 320 | enumerate(zip(self.residual_blocks, 321 | self.block_param_keys, 322 | self.block_loss_functions)): 323 | 324 | self._populate_residual_jacobian_and_cost_blocks( 325 | HT_blocks, e_blocks, cost_blocks, 326 | block_cidx_dict, block_ridx, 327 | block, keys, loss) 328 | 329 | HT = sparse.bmat(HT_blocks, format='csr') 330 | e = np.squeeze(np.bmat(e_blocks).A) 331 | 332 | precision = HT.dot(HT.T) 333 | information = -HT.dot(e) 334 | cost = np.sum(np.array(cost_blocks)) 335 | 336 | return precision, information, cost 337 | 338 | def _populate_residual_jacobian_and_cost_blocks(self, 339 | HT_blocks, e_blocks, cost_blocks, 340 | block_cidx_dict, block_ridx, 341 | block, keys, loss): 342 | params = [self.param_dict[key] for key in keys] 343 | compute_jacobians = [False if key in self.constant_param_keys 344 | else True for key in keys] 345 | 346 | # Drop the residual if all the parameters used to compute it are 347 | # being held constant 348 | if any(compute_jacobians): 349 | residual, jacobians = block.evaluate(params, compute_jacobians) 350 | # Weight for iteratively reweighted least squares 351 | sqrt_loss_weight = np.sqrt(loss.weight(residual)) 352 | 353 | for key, jac in zip(keys, jacobians): 354 | if jac is not None: 355 | # transposes needed for proper broadcasting 356 | HT_blocks[block_cidx_dict[key]][block_ridx] = \ 357 | sparse.csr_matrix(sqrt_loss_weight.T * jac.T) 358 | 359 | cost_blocks[block_ridx] = np.sum(loss.loss(residual)) 360 | e_blocks[block_ridx] = sqrt_loss_weight * residual 361 | 362 | def _do_line_search(self, dx): 363 | """Backtrack line search to optimize step size in a given direction.""" 364 | step_size = 1 365 | best_step_size = step_size 366 | best_cost = np.inf 367 | 368 | iters = 0 369 | done_linesearch = False 370 | while not done_linesearch: 371 | iters += 1 372 | test_params = copy.deepcopy(self.param_dict) 373 | 374 | for k, r in self._update_partition_dict.items(): 375 | self._perturb_by_key(k, best_step_size * dx[r], test_params) 376 | 377 | test_cost = self.eval_cost(test_params) 378 | 379 | # print(step_size, " : ", test_cost) 380 | 381 | if iters < self.options.linesearch_max_iters and \ 382 | test_cost < \ 383 | self.options.linesearch_min_cost_decrease * best_cost: 384 | best_cost = test_cost 385 | best_step_size = step_size 386 | else: 387 | if test_cost < best_cost: 388 | best_cost = test_cost 389 | best_step_size = step_size 390 | 391 | done_linesearch = True 392 | 393 | step_size = self.options.linesearch_alpha * step_size 394 | 395 | # print("Best step size: %f" % best_step_size) 396 | # print("Best cost: %f" % best_cost) 397 | 398 | return best_step_size, best_cost 399 | 400 | def _perturb_by_key(self, key, dx, param_dict=None): 401 | """Helper function to update a parameter given an update vector.""" 402 | if param_dict is None: 403 | param_dict = self.param_dict 404 | 405 | try: 406 | param_dict[key].perturb(dx) 407 | except AttributeError: 408 | # Default vector space behaviour 409 | param_dict[key] += dx 410 | -------------------------------------------------------------------------------- /pyslam/residuals/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [] 2 | 3 | import pkgutil 4 | import inspect 5 | 6 | for loader, name, is_pkg in pkgutil.walk_packages(__path__): 7 | module = loader.find_module(name).load_module(name) 8 | 9 | for name, value in inspect.getmembers(module): 10 | if name.startswith('__'): 11 | continue 12 | 13 | globals()[name] = value 14 | __all__.append(name) 15 | -------------------------------------------------------------------------------- /pyslam/residuals/photometric_residual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.interpolate 3 | import time 4 | 5 | from liegroups import SE3 6 | from pyslam.utils import stackmul, bilinear_interpolate 7 | 8 | from numba import guvectorize, float32, float64 9 | 10 | 11 | SE3_ODOT_SHAPE = np.empty(6) 12 | 13 | 14 | @guvectorize([(float32[:], float32[:], float32[:, :]), 15 | (float64[:], float64[:], float64[:, :])], 16 | '(n),(m)->(n,m)', nopython=True, cache=True, target='parallel') 17 | def fast_se3_odot(vec, junk, out): 18 | out[0, 0] = 1. 19 | out[0, 1] = 0. 20 | out[0, 2] = 0. 21 | out[0, 3] = 0. 22 | out[0, 4] = vec[2] 23 | out[0, 5] = -vec[1] 24 | out[1, 0] = 0. 25 | out[1, 1] = 1. 26 | out[1, 2] = 0. 27 | out[1, 3] = -vec[2] 28 | out[1, 4] = 0. 29 | out[1, 5] = vec[0] 30 | out[2, 0] = 0. 31 | out[2, 1] = 0. 32 | out[2, 2] = 1. 33 | out[2, 3] = vec[1] 34 | out[2, 4] = -vec[0] 35 | out[2, 5] = 0. 36 | 37 | 38 | class PhotometricResidualSE3: 39 | """Full SE3 photometric residual for greyscale images. 40 | Uses the pre-computed reference image jacobian as an approximation to the 41 | tracking image jacobian under the assumption that the camera motion is small.""" 42 | 43 | def __init__(self, camera, im_ref, depth_ref, im_track, im_jac, 44 | intensity_stiffness, depth_stiffness, min_grad=0.): 45 | """Depth image and stiffness parameter can also be disparity.""" 46 | self.camera = camera 47 | self.im_ref = im_ref.ravel() 48 | 49 | self.uvd_ref = np.vstack([self.camera.u_grid.ravel(), 50 | self.camera.v_grid.ravel(), 51 | depth_ref.ravel()]).T 52 | self.im_jac = np.vstack([im_jac[0].ravel(), 53 | im_jac[1].ravel()]).T 54 | 55 | self.im_track = im_track 56 | 57 | self.intensity_stiffness = intensity_stiffness 58 | self.depth_stiffness = depth_stiffness 59 | self.intensity_covar = intensity_stiffness ** -2 60 | self.depth_covar = depth_stiffness ** -2 61 | 62 | self.min_grad = min_grad 63 | 64 | # Filter out invalid pixels (NaN or negative depth) 65 | valid_pixels = self.camera.is_valid_measurement(self.uvd_ref) 66 | self.uvd_ref = self.uvd_ref.compress(valid_pixels, axis=0) 67 | self.im_ref = self.im_ref.compress(valid_pixels) 68 | self.im_jac = self.im_jac.compress(valid_pixels, axis=0) 69 | 70 | # Filter out pixels with weak gradients 71 | grad_ref = np.linalg.norm(self.im_jac, axis=1) 72 | strong_pixels = grad_ref >= self.min_grad 73 | self.uvd_ref = self.uvd_ref.compress(strong_pixels, axis=0) 74 | self.im_ref = self.im_ref.compress(strong_pixels) 75 | self.im_jac = self.im_jac.compress(strong_pixels, axis=0) 76 | 77 | # Precompute triangulated 3D points 78 | self.pt_ref, self.triang_jac = self.camera.triangulate( 79 | self.uvd_ref, compute_jacobians=True) 80 | 81 | def evaluate(self, params, compute_jacobians=None): 82 | if len(params) == 1: 83 | T_track_ref = params[0] 84 | elif len(params) == 2: 85 | T_track_ref = SE3(params[0], params[1]) 86 | else: 87 | raise ValueError( 88 | 'In PhotometricResidual.evaluate() params must have length 1 or 2') 89 | 90 | # Reproject reference image pixels into tracking image to predict the 91 | # reference image based on the tracking image 92 | pt_track = T_track_ref.dot(self.pt_ref) 93 | uvd_track, project_jac = self.camera.project( 94 | pt_track, compute_jacobians=True) 95 | 96 | # Filter out bad measurements 97 | # where returns a tuple 98 | valid_pixels = self.camera.is_valid_measurement(uvd_track) 99 | 100 | # The residual is the intensity difference between the estimated 101 | # reference image pixels and the true reference image pixels 102 | # This is actually faster than filtering the intermediate results! 103 | im_ref_est = bilinear_interpolate(self.im_track, 104 | uvd_track[:, 0], 105 | uvd_track[:, 1]) 106 | residual = (im_ref_est - self.im_ref).compress(valid_pixels) 107 | 108 | # We need the jacobian of the residual w.r.t. the depth 109 | # to compute a reasonable stiffness paramater 110 | # This is actually faster than filtering the intermediate results! 111 | im_proj_jac = stackmul(self.im_jac[:, np.newaxis, :], 112 | project_jac[:, 0:2, :]) # Nx1x3 113 | temp = stackmul(im_proj_jac, T_track_ref.rot.as_matrix()) # Nx1x3 114 | im_depth_jac = np.squeeze( 115 | stackmul(temp, self.triang_jac[:, :, 2:3])).compress( 116 | valid_pixels, axis=0) # Nx1x1 117 | 118 | # Compute the overall stiffness 119 | # \sigma^2 = \sigma^2_I + J_d \sigma^2_d J_d^T 120 | stiffness = 1. / np.sqrt(self.intensity_covar + 121 | self.depth_covar * im_depth_jac**2) 122 | # stiffness = self.intensity_stiffness 123 | residual = stiffness * residual 124 | 125 | # DEBUG: Rebuild residual and depth images 126 | # self._rebuild_images(residual, im_ref_est, self.im_ref, valid_pixels) 127 | # import ipdb 128 | # ipdb.set_trace() 129 | 130 | # Jacobian time! 131 | if compute_jacobians: 132 | if any(compute_jacobians): 133 | # This is actually faster than filtering the intermediate 134 | # results! 135 | odot_pt_track = fast_se3_odot(pt_track, SE3_ODOT_SHAPE) 136 | jac = np.squeeze( 137 | stackmul(im_proj_jac, odot_pt_track)).compress( 138 | valid_pixels, axis=0) 139 | jac = (stiffness * jac.T).T 140 | 141 | if len(params) == 1: 142 | # SE3 case 143 | jacobians = [None] 144 | 145 | if compute_jacobians[0]: 146 | jacobians[0] = jac 147 | 148 | elif len(params) == 2: 149 | # (SO3, t) case 150 | jacobians = [None, None] 151 | 152 | if compute_jacobians[0]: 153 | # Rotation part 154 | jacobians[0] = jac[:, 3:6] 155 | if compute_jacobians[1]: 156 | # Translation part 157 | jacobians[1] = jac[:, 0:3] 158 | 159 | return residual, jacobians 160 | 161 | return residual 162 | 163 | def _rebuild_images(self, residual, im_ref_est, im_ref_true, valid_pixels): 164 | """Debug function to rebuild the filtered 165 | residual and depth images as a sanity check""" 166 | uvd_ref = self.uvd_ref[valid_pixels] 167 | imshape = (self.camera.h, self.camera.w) 168 | 169 | self.actual_reference_image = np.full(imshape, np.nan) 170 | self.actual_reference_image[ 171 | uvd_ref.astype(int)[:, 1], 172 | uvd_ref.astype(int)[:, 0]] = im_ref_true[valid_pixels] 173 | 174 | self.estimated_reference_image = np.full(imshape, np.nan) 175 | self.estimated_reference_image[ 176 | uvd_ref.astype(int)[:, 1], 177 | uvd_ref.astype(int)[:, 0]] = im_ref_est[valid_pixels] 178 | 179 | self.residual_image = np.full(imshape, np.nan) 180 | self.residual_image[ 181 | uvd_ref.astype(int)[:, 1], 182 | uvd_ref.astype(int)[:, 0]] = residual 183 | 184 | self.depth_image = np.full(imshape, np.nan) 185 | self.depth_image[ 186 | uvd_ref.astype(int)[:, 1], 187 | uvd_ref.astype(int)[:, 0]] = uvd_ref[:, 2] 188 | -------------------------------------------------------------------------------- /pyslam/residuals/pose_residual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class PoseResidual: 5 | """Unary pose residual given absolute pose measurement in SE2/SE3.""" 6 | 7 | def __init__(self, T_obs, stiffness): 8 | self.T_obs = T_obs 9 | self.stiffness = stiffness 10 | self.obstype = type(T_obs) 11 | 12 | def evaluate(self, params, compute_jacobians=None): 13 | T_est = params[0] 14 | 15 | residual = np.dot(self.stiffness, 16 | self.obstype.log(T_est.dot(self.T_obs.inv()))) 17 | 18 | if compute_jacobians: 19 | jacobians = [None for _ in enumerate(params)] 20 | 21 | if compute_jacobians[0]: 22 | jacobians[0] = np.dot(self.stiffness, 23 | np.identity(self.obstype.dof)) 24 | 25 | return residual, jacobians 26 | 27 | return residual 28 | -------------------------------------------------------------------------------- /pyslam/residuals/pose_to_pose_orientation_residual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class PoseToPoseOrientationResidual: 5 | """Binary pose-to-pose residual given relative rotation mesurement in SO3.""" 6 | 7 | def __init__(self, C_2_1_obs, stiffness): 8 | self.C_2_1_obs = C_2_1_obs 9 | self.stiffness = stiffness 10 | self.obstype = type(C_2_1_obs) 11 | 12 | def evaluate(self, params, compute_jacobians=None): 13 | T_1_0_est = params[0] 14 | T_2_0_est = params[1] 15 | 16 | #TODO: Make this more general for SE(2) poses 17 | P2 = np.zeros((3, 6)) 18 | P2[:, 3:] = np.eye(3) 19 | P1 = np.zeros((3, 6)) 20 | P1[:, 3:] = T_2_0_est.dot(T_1_0_est.inv()).rot.as_matrix() 21 | 22 | residual = np.dot(self.stiffness, 23 | self.obstype.log( 24 | T_2_0_est.dot(T_1_0_est.inv()).rot.dot(self.C_2_1_obs.inv()) 25 | ) 26 | ) 27 | if compute_jacobians: 28 | jacobians = [None for _ in enumerate(params)] 29 | 30 | if compute_jacobians[0]: 31 | jacobians[0] = np.dot(self.stiffness, -P1) 32 | 33 | if compute_jacobians[1]: 34 | jacobians[1] = np.dot(self.stiffness, P2) 35 | 36 | return residual, jacobians 37 | 38 | return residual 39 | -------------------------------------------------------------------------------- /pyslam/residuals/pose_to_pose_residual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class PoseToPoseResidual: 5 | """Binary pose-to-pose residual given relative pose mesurement in SE2/SE3.""" 6 | 7 | def __init__(self, T_2_1_obs, stiffness): 8 | self.T_2_1_obs = T_2_1_obs 9 | self.stiffness = stiffness 10 | self.obstype = type(T_2_1_obs) 11 | 12 | def evaluate(self, params, compute_jacobians=None): 13 | T_1_0_est = params[0] 14 | T_2_0_est = params[1] 15 | 16 | residual = np.dot(self.stiffness, 17 | self.obstype.log( 18 | T_2_0_est.dot(T_1_0_est.inv().dot(self.T_2_1_obs.inv())))) 19 | 20 | if compute_jacobians: 21 | jacobians = [None for _ in enumerate(params)] 22 | 23 | if compute_jacobians[0]: 24 | jacobians[0] = np.dot(self.stiffness, -T_2_0_est.dot(T_1_0_est.inv()).adjoint()) 25 | 26 | if compute_jacobians[1]: 27 | jacobians[1] = np.dot( 28 | self.stiffness, np.identity(self.obstype.dof)) 29 | 30 | return residual, jacobians 31 | 32 | return residual 33 | -------------------------------------------------------------------------------- /pyslam/residuals/quadratic_residual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class QuadraticResidual: 5 | """A simple quadratic residual for fitting a parabola to data.""" 6 | 7 | def __init__(self, x, y, stiffness): 8 | self.x = np.array([x]) 9 | self.y = np.array([y]) 10 | self.stiffness = np.array([stiffness]) 11 | 12 | def evaluate(self, params, compute_jacobians=None): 13 | residual = self.stiffness * (params[0] * self.x * self.x 14 | + params[1] * self.x 15 | + params[2] 16 | - self.y) 17 | 18 | if compute_jacobians: 19 | jacobians = [None for _ in enumerate(params)] 20 | 21 | if compute_jacobians[0]: 22 | jacobians[0] = self.stiffness * self.x * self.x 23 | 24 | if compute_jacobians[1]: 25 | jacobians[1] = self.stiffness * self.x 26 | 27 | if compute_jacobians[2]: 28 | jacobians[2] = self.stiffness * 1. 29 | 30 | return residual, np.squeeze(jacobians) 31 | 32 | return residual 33 | -------------------------------------------------------------------------------- /pyslam/residuals/reprojection_motion_only_residual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from liegroups import SE3 4 | from pyslam.utils import stackmul 5 | 6 | from numba import guvectorize, float64 7 | 8 | 9 | SE3_ODOT_SHAPE = np.empty(6) 10 | 11 | 12 | @guvectorize([(float64[:], float64[:], float64[:, :])], 13 | '(n),(m)->(n,m)', nopython=True, cache=True, target='parallel') 14 | def fast_se3_odot(vec, junk, out): 15 | out[0, 0] = 1. 16 | out[0, 1] = 0. 17 | out[0, 2] = 0. 18 | out[0, 3] = 0. 19 | out[0, 4] = vec[2] 20 | out[0, 5] = -vec[1] 21 | out[1, 0] = 0. 22 | out[1, 1] = 1. 23 | out[1, 2] = 0. 24 | out[1, 3] = -vec[2] 25 | out[1, 4] = 0. 26 | out[1, 5] = vec[0] 27 | out[2, 0] = 0. 28 | out[2, 1] = 0. 29 | out[2, 2] = 1. 30 | out[2, 3] = vec[1] 31 | out[2, 4] = -vec[0] 32 | out[2, 5] = 0. 33 | 34 | 35 | class ReprojectionMotionOnlyResidual: 36 | """Frame to frame reprojection error for any kind of camera.""" 37 | 38 | def __init__(self, camera, obs_1, obs_2, stiffness): 39 | self.camera = camera 40 | self.obs_1 = obs_1 41 | self.obs_2 = obs_2 42 | self.stiffness = stiffness 43 | 44 | self.pt_1 = self.camera.triangulate(self.obs_1) 45 | 46 | def evaluate(self, params, compute_jacobians=None): 47 | """ This is my docstring. """ 48 | T_2_1 = params[0] 49 | pt_2 = T_2_1.dot(self.pt_1) 50 | 51 | if compute_jacobians: 52 | jacobians = [None for _ in enumerate(params)] 53 | 54 | obs_2_pred, cam_jacobian = self.camera.project( 55 | pt_2, compute_jacobians=True) 56 | 57 | residual = np.dot(self.stiffness, obs_2_pred - self.obs_2) 58 | 59 | if compute_jacobians[0]: 60 | jacobians[0] = np.dot(self.stiffness, 61 | cam_jacobian.dot(SE3.odot(pt_2))) 62 | 63 | return residual, jacobians 64 | 65 | residual = np.dot(self.stiffness, 66 | self.camera.project(pt_2) - self.obs_2) 67 | return residual 68 | 69 | 70 | class ReprojectionMotionOnlyBatchResidual: 71 | """Frame to frame reprojection error with batch jacobians (for multiple reprojections).""" 72 | 73 | def __init__(self, camera, obs_1, obs_2, stiffness): 74 | self.camera = camera 75 | self.obs_1 = obs_1 76 | self.obs_2 = obs_2 77 | self.stiffness = stiffness 78 | 79 | self.pts_1 = self.camera.triangulate(self.obs_1) 80 | self.num_pts = self.pts_1.shape[0] 81 | 82 | def evaluate(self, params, compute_jacobians=None): 83 | T_2_1 = params[0] 84 | pts_2 = T_2_1.dot(self.pts_1) 85 | 86 | if compute_jacobians: 87 | jacobians = [None for _ in enumerate(params)] 88 | 89 | obs_2_pred, cam_proj_jac = self.camera.project( 90 | pts_2, compute_jacobians=True) 91 | 92 | residual = self.stiffness.dot((obs_2_pred - self.obs_2).T).T 93 | residual = residual.reshape(3*self.num_pts) 94 | 95 | if compute_jacobians[0]: 96 | odot_pts_2 = fast_se3_odot(pts_2, SE3_ODOT_SHAPE) 97 | jac = stackmul(cam_proj_jac, odot_pts_2) 98 | 99 | stiffness = np.broadcast_to(self.stiffness, 100 | (self.num_pts, 3, 3)) 101 | jac = stackmul(stiffness, jac) 102 | 103 | # Reshape back into a (3*N, 6) Jacobian 104 | jacobians[0] = np.reshape(jac, [3*self.num_pts, 6]) 105 | 106 | return residual, jacobians 107 | 108 | # Multiply (3,3) by (3,N), and then reshape to get a (3*N,) array 109 | obs_2_pred = self.camera.project(pts_2) 110 | residual = self.stiffness.dot((obs_2_pred - self.obs_2).T).T 111 | residual = residual.reshape(3*self.num_pts) 112 | 113 | return residual 114 | -------------------------------------------------------------------------------- /pyslam/residuals/reprojection_residual.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from liegroups import SE3 3 | 4 | 5 | class ReprojectionResidual: 6 | """Reprojection error for any kind of camera.""" 7 | 8 | def __init__(self, camera, obs, stiffness): 9 | self.camera = camera 10 | self.obs = obs 11 | self.stiffness = stiffness 12 | 13 | def evaluate(self, params, compute_jacobians=None): 14 | T_cam_w = params[0] 15 | pt_w = params[1] 16 | pt_cam = T_cam_w.dot(pt_w) 17 | 18 | if compute_jacobians: 19 | jacobians = [None for _ in enumerate(params)] 20 | 21 | predicted_obs, cam_jacobian = self.camera.project( 22 | pt_cam, compute_jacobians=True) 23 | residual = np.dot(self.stiffness, predicted_obs - self.obs) 24 | 25 | if compute_jacobians[0]: 26 | jacobians[0] = np.dot(self.stiffness, 27 | cam_jacobian.dot(SE3.odot(pt_cam))) 28 | 29 | if compute_jacobians[1]: 30 | jacobians[1] = np.dot(self.stiffness, 31 | cam_jacobian.dot(T_cam_w.rot.as_matrix())) 32 | 33 | return residual, jacobians 34 | 35 | residual = np.dot(self.stiffness, 36 | self.camera.project(pt_cam) - self.obs) 37 | return residual 38 | -------------------------------------------------------------------------------- /pyslam/sensors/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [] 2 | 3 | import pkgutil 4 | import inspect 5 | 6 | for loader, name, is_pkg in pkgutil.walk_packages(__path__): 7 | module = loader.find_module(name).load_module(name) 8 | 9 | for name, value in inspect.getmembers(module): 10 | if name.startswith('__'): 11 | continue 12 | 13 | globals()[name] = value 14 | __all__.append(name) 15 | -------------------------------------------------------------------------------- /pyslam/sensors/rgbd_camera.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numba import guvectorize, float32, float64, boolean 3 | 4 | NUMBA_COMPILATION_TARGET = 'parallel' 5 | 6 | 7 | class RGBDCamera: 8 | """Pinhole RGB-D camera model.""" 9 | 10 | def __init__(self, cu, cv, fu, fv, w, h): 11 | self.cu = float(cu) 12 | self.cv = float(cv) 13 | self.fu = float(fu) 14 | self.fv = float(fv) 15 | self.w = int(w) 16 | self.h = int(h) 17 | 18 | def clone(self): 19 | return self.__class__(self.cu, self.cv, 20 | self.fu, self.fv, 21 | self.w, self.h) 22 | 23 | def compute_pixel_grid(self): 24 | self.u_grid, self.v_grid = np.meshgrid(range(0, self.w), 25 | range(0, self.h), indexing='xy') 26 | self.u_grid = self.u_grid.astype(float) 27 | self.v_grid = self.v_grid.astype(float) 28 | 29 | def is_valid_measurement(self, uvz): 30 | """Check if one or more uvz measurements is valid. 31 | Returns indices of valid measurements. 32 | """ 33 | uvz = np.atleast_2d(uvz) 34 | 35 | if not uvz.shape[1] == 3: 36 | raise ValueError("uvz must have shape (3,) or (N,3)") 37 | 38 | return _rgbd_validate(uvz, [self.w, self.h]) 39 | 40 | def project(self, pt_c, compute_jacobians=None): 41 | """Project 3D point(s) in the sensor frame into (u,v,d) coordinates.""" 42 | # Convert to 2D array if it's just a single point. 43 | # We'll remove any singleton dimensions at the end. 44 | pt_c = np.atleast_2d(pt_c) 45 | 46 | if not pt_c.shape[1] == 3: 47 | raise ValueError("pt_c must have shape (3,) or (N,3)") 48 | 49 | # Now do the actual math 50 | params = self.cu, self.cv, self.fu, self.fv 51 | uvz = _rgbd_project(pt_c, params) 52 | 53 | if compute_jacobians: 54 | jacobians = _rgbd_project_jacobian(pt_c, params) 55 | 56 | return np.squeeze(uvz), np.squeeze(jacobians) 57 | 58 | return np.squeeze(uvz) 59 | 60 | def triangulate(self, uvz, compute_jacobians=None): 61 | """Triangulate 3D point(s) in the sensor frame from (u,v,d).""" 62 | # Convert to 2D array if it's just a single point 63 | # We'll remove any singleton dimensions at the end. 64 | uvz = np.atleast_2d(uvz) 65 | 66 | if not uvz.shape[1] == 3: 67 | raise ValueError("uvz must have shape (3,) or (N,3)") 68 | 69 | # Now do the actual math 70 | params = self.cu, self.cv, self.fu, self.fv 71 | pt_c = _rgbd_triangulate(uvz, params) 72 | 73 | if compute_jacobians: 74 | jacobians = _rgbd_triangulate_jacobian(uvz, params) 75 | 76 | return np.squeeze(pt_c), np.squeeze(jacobians) 77 | 78 | return np.squeeze(pt_c) 79 | 80 | def __repr__(self): 81 | return ("{}:\n cu: {:f}\n cv: {:f}\n fu: {:f}\n fv: {:f}\n" 82 | + " b: {:f}\n w: {:d}\n h: {:d}\n").format(self.__class__.__name__, 83 | self.cu, self.cv, 84 | self.fu, self.fv, 85 | self.w, self.h) 86 | 87 | 88 | @guvectorize([(float32[:], float32[:], boolean[:]), 89 | (float64[:], float64[:], boolean[:])], 90 | '(n),(m)->()', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 91 | def _rgbd_validate(uvz, imshape, out): 92 | w, h = imshape 93 | out[0] = (uvz[2] > 0.) & \ 94 | (uvz[1] > 0.) & (uvz[1] < h) & \ 95 | (uvz[0] > 0.) & (uvz[0] < w) 96 | 97 | 98 | @guvectorize([(float32[:], float32[:], float32[:]), 99 | (float64[:], float64[:], float64[:])], 100 | '(n),(m)->(n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 101 | def _rgbd_project(pt_c, params, out): 102 | cu, cv, fu, fv = params 103 | 104 | one_over_z = 1. / pt_c[2] 105 | out[0] = fu * pt_c[0] * one_over_z + cu 106 | out[1] = fv * pt_c[1] * one_over_z + cv 107 | out[2] = pt_c[2] 108 | 109 | 110 | @guvectorize([(float32[:], float32[:], float32[:, :]), 111 | (float64[:], float64[:], float64[:, :])], 112 | '(n),(m)->(n,n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 113 | def _rgbd_project_jacobian(pt_c, params, out): 114 | cu, cv, fu, fv = params 115 | 116 | one_over_z = 1. / pt_c[2] 117 | one_over_z2 = one_over_z * one_over_z 118 | 119 | # d(u) / d(pt_c) 120 | out[0, 0] = fu * one_over_z 121 | out[0, 1] = 0. 122 | out[0, 2] = -fu * pt_c[0] * one_over_z2 123 | 124 | # d(v) / d(pt_c) 125 | out[1, 0] = 0. 126 | out[1, 1] = fv * one_over_z 127 | out[1, 2] = -fv * pt_c[1] * one_over_z2 128 | 129 | # d(d) / d(pt_c) 130 | out[2, 0] = 0. 131 | out[2, 1] = 0. 132 | out[2, 2] = 1. 133 | 134 | 135 | @guvectorize([(float32[:], float32[:], float32[:]), 136 | (float64[:], float64[:], float64[:])], 137 | '(n),(m)->(n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 138 | def _rgbd_triangulate(uvz, params, out): 139 | cu, cv, fu, fv = params 140 | 141 | out[0] = (uvz[0] - cu) * uvz[2] / fu 142 | out[1] = (uvz[1] - cv) * uvz[2] / fv 143 | out[2] = uvz[2] 144 | 145 | 146 | @guvectorize([(float32[:], float32[:], float32[:, :]), 147 | (float64[:], float64[:], float64[:, :])], 148 | '(n),(m)->(n,n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 149 | def _rgbd_triangulate_jacobian(uvz, params, out): 150 | cu, cv, fu, fv = params 151 | 152 | one_over_fu = 1. / fu 153 | one_over_fv = 1. / fv 154 | 155 | # d(x) / d(uvz) 156 | out[0, 0] = uvz[2] * one_over_fu 157 | out[0, 1] = 0. 158 | out[0, 2] = (uvz[0] - cu) * one_over_fu 159 | 160 | # d(y) / d(uvz) 161 | out[1, 0] = 0. 162 | out[1, 1] = uvz[2] * one_over_fv 163 | out[1, 2] = (uvz[1] - cv) * one_over_fv 164 | 165 | # d(z) / d(uvz) 166 | out[2, 0] = 0. 167 | out[2, 1] = 0. 168 | out[2, 2] = 1. 169 | -------------------------------------------------------------------------------- /pyslam/sensors/stereo_camera.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numba import guvectorize, float32, float64, boolean 3 | 4 | NUMBA_COMPILATION_TARGET = 'parallel' 5 | 6 | 7 | class StereoCamera: 8 | """Pinhole stereo camera model with the origin in left camera.""" 9 | 10 | def __init__(self, cu, cv, fu, fv, b, w, h): 11 | self.cu = float(cu) 12 | self.cv = float(cv) 13 | self.fu = float(fu) 14 | self.fv = float(fv) 15 | self.b = float(b) 16 | self.w = int(w) 17 | self.h = int(h) 18 | 19 | def clone(self): 20 | return self.__class__(self.cu, self.cv, 21 | self.fu, self.fv, self.b, 22 | self.w, self.h) 23 | 24 | def compute_pixel_grid(self): 25 | self.u_grid, self.v_grid = np.meshgrid(range(0, self.w), 26 | range(0, self.h), indexing='xy') 27 | self.u_grid = self.u_grid.astype(float) 28 | self.v_grid = self.v_grid.astype(float) 29 | 30 | def is_valid_measurement(self, uvd): 31 | """Check if one or more uvd measurements is valid. 32 | Returns indices of valid measurements. 33 | """ 34 | uvd = np.atleast_2d(uvd) 35 | 36 | if not uvd.shape[1] == 3: 37 | raise ValueError("uvd must have shape (3,) or (N,3)") 38 | 39 | return _stereo_validate(uvd, [self.w, self.h]) 40 | 41 | def project(self, pt_c, compute_jacobians=None): 42 | """Project 3D point(s) in the sensor frame into (u,v,d) coordinates.""" 43 | # Convert to 2D array if it's just a single point. 44 | # We'll remove any singleton dimensions at the end. 45 | pt_c = np.atleast_2d(pt_c) 46 | 47 | if not pt_c.shape[1] == 3: 48 | raise ValueError("pt_c must have shape (3,) or (N,3)") 49 | 50 | # Now do the actual math 51 | params = self.cu, self.cv, self.fu, self.fv, self.b 52 | uvd = _stereo_project(pt_c, params) 53 | 54 | if compute_jacobians: 55 | jacobians = _stereo_project_jacobian(pt_c, params) 56 | 57 | return np.squeeze(uvd), np.squeeze(jacobians) 58 | 59 | return np.squeeze(uvd) 60 | 61 | def triangulate(self, uvd, compute_jacobians=None): 62 | """Triangulate 3D point(s) in the sensor frame from (u,v,d).""" 63 | # Convert to 2D array if it's just a single point 64 | # We'll remove any singleton dimensions at the end. 65 | uvd = np.atleast_2d(uvd) 66 | 67 | if not uvd.shape[1] == 3: 68 | raise ValueError("uvd must have shape (3,) or (N,3)") 69 | 70 | # Now do the actual math 71 | params = self.cu, self.cv, self.fu, self.fv, self.b 72 | pt_c = _stereo_triangulate(uvd, params) 73 | 74 | if compute_jacobians: 75 | jacobians = _stereo_triangulate_jacobian(uvd, params) 76 | 77 | return np.squeeze(pt_c), np.squeeze(jacobians) 78 | 79 | return np.squeeze(pt_c) 80 | 81 | def __repr__(self): 82 | return ("{}:\n cu: {:f}\n cv: {:f}\n fu: {:f}\n fv: {:f}\n" 83 | + " b: {:f}\n w: {:d}\n h: {:d}\n").format(self.__class__.__name__, 84 | self.cu, self.cv, 85 | self.fu, self.fv, 86 | self.b, 87 | self.w, self.h) 88 | 89 | 90 | @guvectorize([(float32[:], float32[:], boolean[:]), 91 | (float64[:], float64[:], boolean[:])], 92 | '(n),(m)->()', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 93 | def _stereo_validate(uvd, imshape, out): 94 | w, h = imshape 95 | out[0] = (uvd[2] > 0.) & (uvd[2] < w) & \ 96 | (uvd[1] > 0.) & (uvd[1] < h) & \ 97 | (uvd[0] > 0.) & (uvd[0] < w) 98 | 99 | 100 | @guvectorize([(float32[:], float32[:], float32[:]), 101 | (float64[:], float64[:], float64[:])], 102 | '(n),(m)->(n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 103 | def _stereo_project(pt_c, params, out): 104 | cu, cv, fu, fv, b = params 105 | 106 | one_over_z = 1. / pt_c[2] 107 | out[0] = fu * pt_c[0] * one_over_z + cu 108 | out[1] = fv * pt_c[1] * one_over_z + cv 109 | out[2] = fu * b * one_over_z 110 | 111 | 112 | @guvectorize([(float32[:], float32[:], float32[:, :]), 113 | (float64[:], float64[:], float64[:, :])], 114 | '(n),(m)->(n,n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 115 | def _stereo_project_jacobian(pt_c, params, out): 116 | cu, cv, fu, fv, b = params 117 | 118 | one_over_z = 1. / pt_c[2] 119 | one_over_z2 = one_over_z * one_over_z 120 | 121 | # d(u) / d(pt_c) 122 | out[0, 0] = fu * one_over_z 123 | out[0, 1] = 0. 124 | out[0, 2] = -fu * pt_c[0] * one_over_z2 125 | 126 | # d(v) / d(pt_c) 127 | out[1, 0] = 0. 128 | out[1, 1] = fv * one_over_z 129 | out[1, 2] = -fv * pt_c[1] * one_over_z2 130 | 131 | # d(d) / d(pt_c) 132 | out[2, 0] = 0. 133 | out[2, 1] = 0. 134 | out[2, 2] = -fu * b * one_over_z2 135 | 136 | 137 | @guvectorize([(float32[:], float32[:], float32[:]), 138 | (float64[:], float64[:], float64[:])], 139 | '(n),(m)->(n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 140 | def _stereo_triangulate(uvd, params, out): 141 | cu, cv, fu, fv, b = params 142 | 143 | b_over_d = b / uvd[2] 144 | fu_over_fv = fu / fv 145 | 146 | out[0] = (uvd[0] - cu) * b_over_d 147 | out[1] = (uvd[1] - cv) * b_over_d * fu_over_fv 148 | out[2] = fu * b_over_d 149 | 150 | 151 | @guvectorize([(float32[:], float32[:], float32[:, :]), 152 | (float64[:], float64[:], float64[:, :])], 153 | '(n),(m)->(n,n)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 154 | def _stereo_triangulate_jacobian(uvd, params, out): 155 | cu, cv, fu, fv, b = params 156 | 157 | b_over_d = b / uvd[2] 158 | b_over_d2 = b_over_d / uvd[2] 159 | fu_over_fv = fu / fv 160 | 161 | # d(x) / d(uvd) 162 | out[0, 0] = b_over_d 163 | out[0, 1] = 0. 164 | out[0, 2] = (cu - uvd[0]) * b_over_d2 165 | 166 | # d(y) / d(uvd) 167 | out[1, 0] = 0. 168 | out[1, 1] = b_over_d * fu_over_fv 169 | out[1, 2] = (cv - uvd[1]) * b_over_d2 * fu_over_fv 170 | 171 | # d(z) / d(uvd) 172 | out[2, 0] = 0. 173 | out[2, 1] = 0. 174 | out[2, 2] = -fu * b_over_d2 175 | -------------------------------------------------------------------------------- /pyslam/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.linalg as splinalg 3 | from numba import vectorize, guvectorize, float32, float64 4 | 5 | NUMBA_COMPILATION_TARGET = 'parallel' 6 | 7 | 8 | def invsqrt(x): 9 | """Convenience function to compute the inverse square root of a scalar or a square matrix.""" 10 | if hasattr(x, 'shape'): 11 | return np.linalg.inv(splinalg.sqrtm(x)) 12 | 13 | return 1. / np.sqrt(x) 14 | 15 | 16 | def bilinear_interpolate(im, x, y): 17 | im = np.atleast_3d(im) 18 | out = np.empty((len(x), im.shape[2])) 19 | 20 | for channel in range(im.shape[2]): 21 | _bilinear_interpolate(np.squeeze( 22 | im[:, :, channel]), x, y, out[:, channel]) 23 | 24 | return np.squeeze(out) 25 | 26 | 27 | @guvectorize([(float32[:, :], float32[:], float32[:], float32[:]), 28 | (float64[:, :], float64[:], float64[:], float64[:])], 29 | '(n,m),(),()->()', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 30 | def _bilinear_interpolate(im, x, y, out): 31 | """Perform bilinear interpolation on a 2D array.""" 32 | # NB: The concrete signature does not allow for scalar values, even though 33 | # the layout may mention them. x, y, and out are delcared as float64[:] 34 | # instead of float64. This is why they must be dereferenced by fetching 35 | # x[0], y[0], and out[0]. 36 | x = x[1] 37 | y = y[1] 38 | 39 | x_min = 0 40 | y_min = 0 41 | x_max = im.shape[1] - 1 42 | y_max = im.shape[0] - 1 43 | 44 | x0 = np.int(x) 45 | x1 = x0 + 1 46 | 47 | y0 = np.int(y) 48 | y1 = y0 + 1 49 | 50 | # Compute weights per bilinear scheme 51 | # Important: this needs to happen before any coordinate clipping! 52 | wa = (x1 - x) * (y1 - y) 53 | wb = (x1 - x) * (y - y0) 54 | wc = (x - x0) * (y1 - y) 55 | wd = (x - x0) * (y - y0) 56 | 57 | # Clip to image boundaries and sample 58 | # NB: at the boundaries this is equivalent to duplicating the first/last row and column 59 | x0 = x_min if x0 < x_min else x0 60 | x0 = x_max if x0 > x_max else x0 61 | 62 | x1 = x_min if x1 < x_min else x1 63 | x1 = x_max if x1 > x_max else x1 64 | 65 | y0 = y_min if y0 < y_min else y0 66 | y0 = y_max if y0 > y_max else y0 67 | 68 | y1 = y_min if y1 < y_min else y1 69 | y1 = y_max if y1 > y_max else y1 70 | 71 | Ia = im[y0, x0] 72 | Ib = im[y1, x0] 73 | Ic = im[y0, x1] 74 | Id = im[y1, x1] 75 | 76 | # Interpolated result 77 | out[1] = wa * Ia + wb * Ib + wc * Ic + wd * Id 78 | 79 | 80 | @guvectorize([(float32[:, :], float32[:, :], float32[:, :]), 81 | (float64[:, :], float64[:, :], float64[:, :])], 82 | '(n,m),(m,p)->(n,p)', nopython=True, cache=True, target=NUMBA_COMPILATION_TARGET) 83 | def stackmul(A, B, out): 84 | """Multiply stacks of matrices in parallel.""" 85 | for i in range(out.shape[0]): 86 | for j in range(out.shape[1]): 87 | out[i, j] = 0. 88 | for k in range(A.shape[1]): 89 | out[i, j] += A[i, k] * B[k, j] 90 | -------------------------------------------------------------------------------- /pyslam/visualizers.py: -------------------------------------------------------------------------------- 1 | from pyslam.metrics import TrajectoryMetrics 2 | from matplotlib import rc 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | 6 | import matplotlib 7 | # Removes the XWindows backend (useful for producing plots via tmux without -X) 8 | matplotlib.use('Agg', warn=False) 9 | 10 | 11 | plt.rc('text', usetex=True) 12 | plt.rc('font', family='serif') 13 | 14 | 15 | class TrajectoryVisualizer: 16 | """Visualization utility to create nice plots from TrajectoryMetrics.""" 17 | 18 | def __init__(self, tm_dict): 19 | self.tm_dict = tm_dict 20 | """Dictionary of TrajectoryMetrics objects to plot.""" 21 | 22 | self.endpoint_markers_list = ['o', '^', 's', '*', 'p', 'h', 23 | 'X', 'D', 'P', 'v', '<', '>', 24 | '8', 'H', 'd'] 25 | 26 | def _parse_kwargs(self, kwargs): 27 | plot_params = {} 28 | plot_params['gt_linewidth'] = kwargs.get('gt_linewidth', 2.) 29 | plot_params['est_linewidth'] = kwargs.get('est_linewidth', 1.) 30 | plot_params['grid_linewidth'] = kwargs.get('grid_linewidth', 0.2) 31 | plot_params['use_endpoint_markers'] = kwargs.get( 32 | 'use_endpoint_markers', False) 33 | plot_params['fontsize'] = kwargs.get('fontsize', 10) 34 | plot_params['legend_fontsize'] = kwargs.get( 35 | 'legend_fontsize', plot_params['fontsize']) 36 | plot_params['err_xlabel'] = kwargs.get('err_xlabel', 'Timestep') 37 | plot_params['line_colours'] = kwargs.get('line_colours', [ 38 | 'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown']) 39 | 40 | for key in plot_params.keys(): 41 | try: 42 | del kwargs[key] 43 | except KeyError: 44 | pass 45 | 46 | return plot_params 47 | 48 | def plot_topdown(self, which_plane='xz', segment_range=None, outfile=None, **kwargs): 49 | """ Plot a top - down view of the trajectory. 50 | 51 | Args: 52 | which_plane : which plane to plot ['xy' | 'xz' | 'yz'] 53 | segment_range : which segment of the trajectory to plot 54 | outfile : full path and filename where the plot should be saved 55 | **kwargs : additional keyword arguments passed to plt.subplots() 56 | """ 57 | if which_plane == 'xy': 58 | x_dim, y_dim = 0, 1 59 | elif which_plane == 'xz': 60 | x_dim, y_dim = 0, 2 61 | elif which_plane == 'yz': 62 | x_dim, y_dim = 1, 2 63 | else: 64 | raise ValueError( 65 | "which_plane must be ['xy' | 'xz' | 'yz']. Got {}".format(which_plane)) 66 | 67 | # Grab plot parameters, pass the rest to subplots 68 | plot_params = self._parse_kwargs(kwargs) 69 | 70 | # Use a sane default figsize if the user doesn't specify one 71 | figsize = kwargs.get('figsize', (4, 3)) 72 | kwargs.update({'figsize': figsize}) 73 | 74 | fig, ax = plt.subplots(**kwargs) 75 | 76 | if plot_params['use_endpoint_markers']: 77 | endpoint_markers_iter = iter(self.endpoint_markers_list) 78 | 79 | plotted_gt = False 80 | 81 | for p_i, (label, tm) in enumerate(self.tm_dict.items()): 82 | pos_gt = np.array([T.trans for T in tm.Twv_gt]) 83 | pos_est = np.array([T.trans for T in tm.Twv_est]) 84 | 85 | if segment_range is not None: 86 | pos_gt = pos_gt[segment_range, :] 87 | pos_est = pos_est[segment_range, :] 88 | 89 | if not plotted_gt: 90 | if plot_params['use_endpoint_markers']: 91 | ax.plot(pos_gt[:, x_dim], pos_gt[:, y_dim], '-k', 92 | linewidth=plot_params['gt_linewidth'], 93 | marker=next(endpoint_markers_iter), markevery=[pos_gt.shape[0] - 1], 94 | label='Ground Truth') 95 | else: 96 | ax.plot(pos_gt[:, x_dim], pos_gt[:, y_dim], '-k', 97 | linewidth=plot_params['gt_linewidth'], label='Ground Truth') 98 | 99 | plotted_gt = True 100 | 101 | if plot_params['use_endpoint_markers']: 102 | ax.plot(pos_est[:, x_dim], pos_est[:, y_dim], 103 | linewidth=plot_params['est_linewidth'], color=plot_params['line_colours'][p_i], 104 | marker=next(endpoint_markers_iter), markevery=[pos_est.shape[0] - 1], 105 | label=label) 106 | else: 107 | ax.plot(pos_est[:, x_dim], pos_est[:, y_dim], color=plot_params['line_colours'][p_i], 108 | linewidth=plot_params['est_linewidth'], label=label) 109 | 110 | ax.axis('equal') 111 | # ax.minorticks_on() 112 | ax.grid(which='both', linestyle=':', 113 | linewidth=plot_params['grid_linewidth']) 114 | ax.set_title('Trajectory', fontsize=plot_params['fontsize']) 115 | ax.set_xlabel('Easting (m)', fontsize=plot_params['fontsize']) 116 | ax.set_ylabel('Northing (m)', fontsize=plot_params['fontsize']) 117 | ax.legend(fontsize=plot_params['legend_fontsize']) 118 | 119 | if outfile is not None: 120 | print('Saving to {}'.format(outfile)) 121 | fig.savefig(outfile, bbox_inches='tight', transparent=True) 122 | 123 | return fig, ax 124 | 125 | def plot_segment_errors(self, segs, outfile=None, **kwargs): 126 | """ Plot average errors for specified segment lengths, 127 | similar to the KITTI odometry leaderboard. 128 | 129 | Args: 130 | segs : list of segment lengths to plot 131 | outfile : full path and filename where the plot should be saved 132 | **kwargs: additional keyword arguments passed to plt.subplots() 133 | """ 134 | # Grab plot parameters, pass the rest to subplots 135 | plot_params = self._parse_kwargs(kwargs) 136 | 137 | # Use a sane default figsize if the user doesn't specify one 138 | figsize = kwargs.get('figsize', (8, 3)) 139 | kwargs.update({'figsize': figsize}) 140 | 141 | fig, ax = plt.subplots(1, 2, **kwargs) 142 | handles = [] 143 | legend = [] 144 | 145 | for p_i, (label, tm) in enumerate(self.tm_dict.items()): 146 | segerr, avg_segerr = tm.segment_errors(segs) 147 | 148 | h = ax[0].plot(avg_segerr[:, 0], avg_segerr[:, 1] 149 | * 100., '-s', color=plot_params['line_colours'][p_i], label=label) 150 | ax[1].plot(avg_segerr[:, 0], avg_segerr[:, 2] 151 | * 180. / np.pi, '-s', color=plot_params['line_colours'][p_i], label=label) 152 | 153 | handles.append(h[0]) 154 | legend.append(label) 155 | 156 | ax[0].minorticks_on() 157 | ax[0].grid(which='both', linestyle=':', 158 | linewidth=plot_params['grid_linewidth']) 159 | ax[0].set_title('Translational error', 160 | fontsize=plot_params['fontsize']) 161 | ax[0].set_xlabel('Segment length (m)', 162 | fontsize=plot_params['fontsize']) 163 | ax[0].set_ylabel('Avg. err. (\%)', 164 | fontsize=plot_params['fontsize']) 165 | 166 | ax[1].minorticks_on() 167 | ax[1].grid(which='both', linestyle=':', 168 | linewidth=plot_params['grid_linewidth']) 169 | ax[1].set_title('Rotational error', 170 | fontsize=plot_params['fontsize']) 171 | ax[1].set_xlabel('Segment length (m)', 172 | fontsize=plot_params['fontsize']) 173 | ax[1].set_ylabel('Avg. err. (deg/m)', 174 | fontsize=plot_params['fontsize']) 175 | 176 | fig.legend(handles, legend, loc='lower center', ncol=len(self.tm_dict), 177 | fontsize=plot_params['legend_fontsize']) 178 | fig.subplots_adjust(wspace=0.3, bottom=0.35) 179 | 180 | if outfile is not None: 181 | print('Saving to {}'.format(outfile)) 182 | fig.savefig(outfile, bbox_inches='tight', transparent=True) 183 | 184 | return fig, ax 185 | 186 | def _trans_rot_err_subplot(self, err_type, segment_range, outfile, **kwargs): 187 | """ Convenience function to plot translational and rotational errors in subplots. 188 | 189 | Args: 190 | err_type : 'rmse', or 'crmse' 191 | segment_range : which segment of the trajectory to plot 192 | outfile : full path and filename where the plot should be saved 193 | **kwargs : additional keyword arguments passed to plt.subplots() 194 | """ 195 | # Grab plot parameters, pass the rest to subplots 196 | plot_params = self._parse_kwargs(kwargs) 197 | 198 | # Use a sane default figsize if the user doesn't specify one 199 | figsize = kwargs.get('figsize', (8, 3)) 200 | kwargs.update({'figsize': figsize}) 201 | fig, ax = plt.subplots(1, 2, **kwargs) 202 | handles = [] 203 | legend = [] 204 | 205 | for p_i, (label, tm) in enumerate(self.tm_dict.items()): 206 | if segment_range is None: 207 | this_segment_range = range(len(tm.Twv_gt)) 208 | 209 | if err_type == 'norm': 210 | trans_err, rot_err = tm.error_norms(this_segment_range) 211 | err_name = 'Err. Norm.' 212 | elif err_type == 'cum': 213 | trans_err, rot_err = tm.cum_err(this_segment_range) 214 | err_name = 'Cumulative Err. Norm.' 215 | else: 216 | raise ValueError( 217 | 'Got invalid err_type \'{}\''.format(err_type)) 218 | 219 | h = ax[0].plot(trans_err, '-', 220 | color=plot_params['line_colours'][p_i], label=label) 221 | ax[1].plot(rot_err * 180. / np.pi, '-', 222 | color=plot_params['line_colours'][p_i], label=label) 223 | 224 | handles.append(h[0]) 225 | legend.append(label) 226 | 227 | ax[0].minorticks_on() 228 | ax[0].grid(which='both', linestyle=':', 229 | linewidth=plot_params['grid_linewidth']) 230 | # ax[0].set_xlim((segment_range.start, segment_range.stop - 1)) 231 | ax[0].set_title('Translational {}'.format(err_name), 232 | fontsize=plot_params['fontsize']) 233 | ax[0].set_xlabel(plot_params['err_xlabel'], 234 | fontsize=plot_params['fontsize']) 235 | ax[0].set_ylabel('{} (m)'.format(err_name), 236 | fontsize=plot_params['fontsize']) 237 | 238 | ax[1].minorticks_on() 239 | ax[1].grid(which='both', linestyle=':', 240 | linewidth=plot_params['grid_linewidth']) 241 | # ax[1].set_xlim((segment_range.start, segment_range.stop - 1)) 242 | ax[1].set_title('Rotational {}'.format(err_name), 243 | fontsize=plot_params['fontsize']) 244 | ax[1].set_xlabel(plot_params['err_xlabel'], 245 | fontsize=plot_params['fontsize']) 246 | ax[1].set_ylabel('{} (deg)'.format(err_name), 247 | fontsize=plot_params['fontsize']) 248 | 249 | fig.legend(handles, legend, loc='lower center', ncol=len(self.tm_dict), 250 | fontsize=plot_params['legend_fontsize']) 251 | fig.subplots_adjust(wspace=0.3, bottom=0.35) 252 | 253 | if outfile is not None: 254 | print('Saving to {}'.format(outfile)) 255 | fig.savefig(outfile, bbox_inches='tight', transparent=True) 256 | 257 | return fig, ax 258 | 259 | def plot_pose_errors(self, segment_range=None, outfile=None, **kwargs): 260 | """ Plot translational and rotational errors over the trajectory. 261 | 262 | Args: 263 | segment_range : which segment of the trajectory to plot 264 | outfile : full path and filename where the plot should be saved 265 | **kwargs : additional keyword arguments passed to plt.subplots() 266 | """ 267 | return self._trans_rot_err_subplot('all', segment_range, outfile, **kwargs) 268 | 269 | def plot_norm_err(self, segment_range=None, outfile=None, **kwargs): 270 | """ Plot translational and rotational error norms over the trajectory. 271 | 272 | Args: 273 | segment_range : which segment of the trajectory to plot 274 | outfile : full path and filename where the plot should be saved 275 | **kwargs : additional keyword arguments passed to plt.subplots() 276 | """ 277 | return self._trans_rot_err_subplot('norm', segment_range, outfile, **kwargs) 278 | 279 | def plot_cum_norm_err(self, segment_range=None, outfile=None, **kwargs): 280 | """ Plot cmulative translational and rotational error norms over the trajectory. 281 | 282 | Args: 283 | segment_range : which segment of the trajectory to plot 284 | outfile : full path and filename where the plot should be saved 285 | **kwargs : additional keyword arguments passed to plt.subplots() 286 | """ 287 | return self._trans_rot_err_subplot('cum', segment_range, outfile, **kwargs) 288 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='pyslam', 5 | version='0.0.0', 6 | description='Non-linear least-squares SLAM in Python using scipy and numpy.', 7 | author='Lee Clement', 8 | author_email='lee.clement@robotics.utias.utoronto.ca', 9 | license='MIT', 10 | packages=['pyslam', 'pyslam.pipelines', 11 | 'pyslam.residuals', 'pyslam.sensors'], 12 | install_requires=['numpy', 'scipy', 'numba', 'liegroups'] 13 | ) 14 | -------------------------------------------------------------------------------- /tests/test_costs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import numpy as np 4 | 5 | from liegroups import SE2, SE3 6 | 7 | 8 | class TestQuadraticResidual: 9 | 10 | @pytest.fixture 11 | def residual(self): 12 | from pyslam.residuals import QuadraticResidual 13 | return QuadraticResidual(2., 3., 1.) 14 | 15 | def test_evaluate(self, residual): 16 | params_good = [1., -2., 3.] 17 | params_bad = [0., 3., 1.] 18 | assert residual.evaluate(params_good) == 0. 19 | assert residual.evaluate(params_bad) != 0. 20 | 21 | def test_jacobians(self, residual): 22 | params = [1., -2., 3.] 23 | expected_jac = [4., 2., 1.] 24 | _, jac1 = residual.evaluate( 25 | params, compute_jacobians=[True, True, True]) 26 | _, jac2 = residual.evaluate(params, compute_jacobians=[ 27 | False, False, False]) 28 | assert len(jac1) == len(jac2) == 3 29 | assert np.allclose(jac1, expected_jac) 30 | assert not any(jac2) 31 | 32 | 33 | class TestPoseResidual: 34 | 35 | @pytest.fixture 36 | def se2_residual(self): 37 | from pyslam.residuals import PoseResidual 38 | return PoseResidual(SE2.exp(np.array([1, 2, 3])), np.eye(3)) 39 | 40 | def test_evaluate_se2(self, se2_residual): 41 | T_same = se2_residual.T_obs 42 | T_diff = SE2.exp([4, 5, 6]) 43 | assert np.allclose(se2_residual.evaluate([T_same]), np.zeros(3)) 44 | assert not np.allclose(se2_residual.evaluate([T_diff]), np.zeros(3)) 45 | 46 | def test_jacobians_se2(self, se2_residual): 47 | T_test = SE2.exp([4, 5, 6]) 48 | _, jacobians = se2_residual.evaluate( 49 | [T_test], compute_jacobians=[True]) 50 | assert len(jacobians) == 1 and jacobians[0].shape == (3, 3) 51 | 52 | @pytest.fixture 53 | def se3_residual(self): 54 | from pyslam.residuals import PoseResidual 55 | return PoseResidual(SE3.exp([1, 2, 3, 4, 5, 6]), np.eye(6)) 56 | 57 | def test_evaluate_se3(self, se3_residual): 58 | T_same = se3_residual.T_obs 59 | T_diff = SE3.exp([7, 8, 9, 10, 11, 12]) 60 | assert np.allclose(se3_residual.evaluate([T_same]), np.zeros(6)) 61 | assert not np.allclose(se3_residual.evaluate([T_diff]), np.zeros(6)) 62 | 63 | def test_jacobians_se3(self, se3_residual): 64 | T_test = SE3.exp([7, 8, 9, 10, 11, 12]) 65 | _, jacobians = se3_residual.evaluate( 66 | [T_test], compute_jacobians=[True]) 67 | assert len(jacobians) == 1 and jacobians[0].shape == (6, 6) 68 | 69 | 70 | class TestPoseToPoseResidual: 71 | 72 | @pytest.fixture 73 | def se2_residual(self): 74 | from pyslam.residuals import PoseToPoseResidual 75 | return PoseToPoseResidual(SE2.identity(), np.eye(3)) 76 | 77 | def test_evaluate_se2(self, se2_residual): 78 | T1 = SE2.exp([1, 2, 3]) 79 | T2 = SE2.exp([4, 5, 6]) 80 | assert np.allclose(se2_residual.evaluate([T1, T1]), np.zeros(3)) 81 | assert not np.allclose(se2_residual.evaluate([T1, T2]), np.zeros(3)) 82 | 83 | def test_jacobians_se2(self, se2_residual): 84 | T1 = SE2.exp([1, 2, 3]) 85 | T2 = SE2.exp([4, 5, 6]) 86 | _, jac1 = se2_residual.evaluate( 87 | [T1, T2], compute_jacobians=[True, True]) 88 | _, jac2 = se2_residual.evaluate( 89 | [T1, T2], compute_jacobians=[True, False]) 90 | _, jac3 = se2_residual.evaluate( 91 | [T1, T2], compute_jacobians=[False, True]) 92 | assert len(jac1) == len(jac2) == len(jac3) == 2 93 | assert jac1[0].shape == (3, 3) and jac1[1].shape == (3, 3) 94 | assert jac2[0].shape == (3, 3) and jac2[1] is None 95 | assert jac3[0] is None and jac3[1].shape == (3, 3) 96 | 97 | @pytest.fixture 98 | def se3_residual(self): 99 | from pyslam.residuals import PoseToPoseResidual 100 | return PoseToPoseResidual(SE3.identity(), np.eye(6)) 101 | 102 | def test_evaluate_se3(self, se3_residual): 103 | T1 = SE3.exp([1, 2, 3, 4, 5, 6]) 104 | T2 = SE3.exp([7, 8, 9, 10, 11, 12]) 105 | assert np.allclose(se3_residual.evaluate([T1, T1]), np.zeros(6)) 106 | assert not np.allclose(se3_residual.evaluate([T1, T2]), np.zeros(6)) 107 | 108 | def test_jacobians_se3(self, se3_residual): 109 | T1 = SE3.exp([1, 2, 3, 4, 5, 6]) 110 | T2 = SE3.exp([7, 8, 9, 10, 11, 12]) 111 | _, jac1 = se3_residual.evaluate( 112 | [T1, T2], compute_jacobians=[True, True]) 113 | _, jac2 = se3_residual.evaluate( 114 | [T1, T2], compute_jacobians=[True, False]) 115 | _, jac3 = se3_residual.evaluate( 116 | [T1, T2], compute_jacobians=[False, True]) 117 | assert len(jac1) == len(jac2) == len(jac3) == 2 118 | assert jac1[0].shape == (6, 6) and jac1[1].shape == (6, 6) 119 | assert jac2[0].shape == (6, 6) and jac2[1] is None 120 | assert jac3[0] is None and jac3[1].shape == (6, 6) 121 | 122 | 123 | class TestReprojectionResidual: 124 | 125 | @pytest.fixture 126 | def stereo_residual(self): 127 | from pyslam.sensors import StereoCamera 128 | from pyslam.residuals import ReprojectionResidual 129 | cu = 100. 130 | cv = 100. 131 | fu = 200. 132 | fv = 200. 133 | b = 1. 134 | w = 200 135 | h = 200 136 | return ReprojectionResidual(StereoCamera(cu, cv, fu, fv, b, w, h), 137 | [40, 60, 10], np.eye(3)) 138 | 139 | def test_evaluate_stereo(self, stereo_residual): 140 | T_cam_w = SE3.exp([1, 2, 3, 4, 5, 6]) 141 | pt_good_w = T_cam_w.inv().dot( 142 | stereo_residual.camera.triangulate(stereo_residual.obs)) 143 | pt_bad_w = pt_good_w + [1, 1, 1] 144 | assert np.allclose(stereo_residual.evaluate( 145 | [T_cam_w, pt_good_w]), np.zeros(3)) 146 | assert not np.allclose(stereo_residual.evaluate( 147 | [T_cam_w, pt_bad_w]), np.zeros(3)) 148 | 149 | def test_jacobians_stereo(self, stereo_residual): 150 | T_cam_w = SE3.exp([1, 2, 3, 4, 5, 6]) 151 | pt_w = T_cam_w.inv().dot( 152 | stereo_residual.camera.triangulate(stereo_residual.obs)) 153 | _, jac1 = stereo_residual.evaluate( 154 | [T_cam_w, pt_w], compute_jacobians=[True, True]) 155 | _, jac2 = stereo_residual.evaluate( 156 | [T_cam_w, pt_w], compute_jacobians=[True, False]) 157 | _, jac3 = stereo_residual.evaluate( 158 | [T_cam_w, pt_w], compute_jacobians=[False, True]) 159 | assert len(jac1) == len(jac2) == len(jac3) == 2 160 | assert jac1[0].shape == (3, 6) and jac1[1].shape == (3, 3) 161 | assert jac2[0].shape == (3, 6) and jac2[1] is None 162 | assert jac3[0] is None and jac3[1].shape == (3, 3) 163 | -------------------------------------------------------------------------------- /tests/test_problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from pyslam.problem import Options, Problem 5 | from pyslam.utils import invsqrt 6 | 7 | np.random.seed(42) 8 | 9 | 10 | class TestBasic: 11 | 12 | def test_residual_blocks(self): 13 | from pyslam.residuals import QuadraticResidual 14 | problem = Problem() 15 | param_keys = ['a', 'b', 'c'] 16 | problem.add_residual_block(QuadraticResidual(2., 4., 1.), 17 | param_keys) 18 | assert param_keys == problem.block_param_keys[0] 19 | 20 | def test_param_dict(self): 21 | problem = Problem() 22 | params = {'a': 1, 'b': 2, 'c': 3} 23 | problem.initialize_params(params) 24 | assert( 25 | problem.param_dict == params 26 | ) 27 | extra_param = {'d': 4} 28 | params.update(extra_param) 29 | problem.initialize_params(extra_param) 30 | assert problem.param_dict == params 31 | 32 | def test_constant_params(self): 33 | problem = Problem() 34 | problem.set_parameters_constant('a') 35 | assert problem.constant_param_keys == ['a'] 36 | problem.set_parameters_constant(['a', 'b_param']) 37 | assert problem.constant_param_keys == ['a', 'b_param'] 38 | problem.set_parameters_variable('a') 39 | assert problem.constant_param_keys == ['b_param'] 40 | problem.set_parameters_variable('c') 41 | assert problem.constant_param_keys == ['b_param'] 42 | problem.set_parameters_variable(['a', 'b_param', 'c']) 43 | assert problem.constant_param_keys == [] 44 | 45 | def test_eval_cost(self): 46 | from pyslam.residuals import QuadraticResidual 47 | problem = Problem() 48 | good_params = {'a': 1., 'b': 2., 'c': 1.} 49 | bad_params = {'a': 1., 'b': 0., 'c': 0.} 50 | residual1 = QuadraticResidual(1., 4., 0.5) 51 | residual2 = QuadraticResidual(0., 1., 2.) 52 | problem.add_residual_block(residual1, ['a', 'b', 'c']) 53 | problem.add_residual_block(residual2, ['a', 'b', 'c']) 54 | problem.initialize_params(good_params) 55 | assert problem.eval_cost() == 0. 56 | assert problem.eval_cost(bad_params) == 0.5 * ((0.5 * 0.5 * 3. * 3.) 57 | + (2. * 2. * 1. * 1.)) 58 | 59 | def test_fit_quadratic(self): 60 | from pyslam.residuals import QuadraticResidual 61 | 62 | params_true = {'a': 1., 'b': -2., 'c': 3.} 63 | params_init = {'a': -20., 'b': 10., 'c': -30.} 64 | 65 | x_data = np.linspace(-5, 5, 10) 66 | y_data = params_true['a'] * x_data * x_data \ 67 | + params_true['b'] * x_data + params_true['c'] 68 | 69 | problem = Problem() 70 | problem.options.num_threads = 1 71 | for x, y in zip(x_data, y_data): 72 | problem.add_residual_block(QuadraticResidual( 73 | x, y, 1.), ['a', 'b', 'c']) 74 | 75 | problem.initialize_params(params_init) 76 | params_final = problem.solve() 77 | 78 | for key in params_true.keys(): 79 | assert(np.allclose(params_final[key], params_true[key])) 80 | 81 | 82 | class TestPoseGraphRelax: 83 | 84 | @pytest.fixture 85 | def poses_true(self): 86 | from liegroups import SE2, SO2 87 | T_k_w = { 88 | 'T_0_w': SE2.identity(), 89 | 'T_1_w': SE2(SO2.identity(), -np.array([0.5, 0])), 90 | 'T_2_w': SE2(SO2.identity(), -np.array([1, 0])), 91 | 'T_3_w': SE2(SO2.from_angle(np.pi / 2), 92 | -(SO2.from_angle(np.pi / 2).dot(np.array([1, 0.5])))), 93 | 'T_4_w': SE2(SO2.from_angle(np.pi), - 94 | (SO2.from_angle(np.pi).dot(np.array([0.5, 0.5])))), 95 | 'T_5_w': SE2(SO2.from_angle(-np.pi / 2), 96 | -(SO2.from_angle(-np.pi / 2).dot(np.array([0.5, 0])))) 97 | } 98 | return T_k_w 99 | 100 | @pytest.fixture 101 | def poses_init(self, poses_true): 102 | from liegroups import SE2 103 | offset1 = SE2.exp([-0.1, 0.1, -0.1]) 104 | offset2 = SE2.exp([0.1, -0.1, 0.1]) 105 | T_k_w = { 106 | 'T_0_w': poses_true['T_0_w'], 107 | 'T_1_w': offset1.dot(poses_true['T_1_w']), 108 | 'T_2_w': offset2.dot(poses_true['T_2_w']), 109 | 'T_3_w': offset1.dot(poses_true['T_3_w']), 110 | 'T_4_w': offset2.dot(poses_true['T_4_w']), 111 | 'T_5_w': offset1.dot(poses_true['T_5_w']), 112 | } 113 | return T_k_w 114 | 115 | @pytest.fixture 116 | def odometry(self, poses_true): 117 | T_obs = { 118 | 'T_0_w': poses_true['T_0_w'], 119 | 'T_1_0': poses_true['T_1_w'].dot(poses_true['T_0_w'].inv()), 120 | 'T_2_1': poses_true['T_2_w'].dot(poses_true['T_1_w'].inv()), 121 | 'T_3_2': poses_true['T_3_w'].dot(poses_true['T_2_w'].inv()), 122 | 'T_4_3': poses_true['T_4_w'].dot(poses_true['T_3_w'].inv()), 123 | 'T_5_4': poses_true['T_5_w'].dot(poses_true['T_4_w'].inv()), 124 | 'T_5_1': poses_true['T_5_w'].dot(poses_true['T_1_w'].inv()) 125 | } 126 | return T_obs 127 | 128 | @pytest.fixture 129 | def residuals(self, odometry): 130 | from pyslam.residuals import PoseResidual, PoseToPoseResidual 131 | prior_stiffness = invsqrt(1e-12 * np.identity(3)) 132 | odom_stiffness = invsqrt(1e-3 * np.identity(3)) 133 | loop_stiffness = invsqrt(1. * np.identity(3)) 134 | return [ 135 | PoseResidual(odometry['T_0_w'], prior_stiffness), 136 | PoseToPoseResidual(odometry['T_1_0'], odom_stiffness), 137 | PoseToPoseResidual(odometry['T_2_1'], odom_stiffness), 138 | PoseToPoseResidual(odometry['T_3_2'], odom_stiffness), 139 | PoseToPoseResidual(odometry['T_4_3'], odom_stiffness), 140 | PoseToPoseResidual(odometry['T_5_4'], odom_stiffness), 141 | PoseToPoseResidual(odometry['T_5_1'], loop_stiffness) 142 | ] 143 | 144 | @pytest.fixture 145 | def residual_params(self): 146 | return [ 147 | ['T_0_w'], 148 | ['T_0_w', 'T_1_w'], 149 | ['T_1_w', 'T_2_w'], 150 | ['T_2_w', 'T_3_w'], 151 | ['T_3_w', 'T_4_w'], 152 | ['T_4_w', 'T_5_w'], 153 | ['T_1_w', 'T_5_w'] 154 | ] 155 | 156 | @pytest.fixture 157 | def options(self): 158 | options = Options() 159 | options.allow_nondecreasing_steps = True 160 | options.max_nondecreasing_steps = 3 161 | return options 162 | 163 | def test_first_pose_constant(self, options, residuals, residual_params, 164 | poses_init, poses_true): 165 | from liegroups import SE2 166 | problem = Problem(options) 167 | for i in range(1, 6): 168 | problem.add_residual_block(residuals[i], residual_params[i]) 169 | problem.set_parameters_constant('T_0_w') 170 | problem.initialize_params(poses_init) 171 | poses_final = problem.solve() 172 | for key in poses_true.keys(): 173 | assert np.linalg.norm( 174 | SE2.log(poses_final[key].inv().dot(poses_true[key]))) < 1e-4 175 | 176 | def test_first_pose_prior(self, options, residuals, residual_params, 177 | poses_init, poses_true): 178 | from liegroups import SE2 179 | problem = Problem(options) 180 | for i in range(0, 6): 181 | problem.add_residual_block(residuals[i], residual_params[i]) 182 | problem.initialize_params(poses_init) 183 | poses_final = problem.solve() 184 | for key in poses_true.keys(): 185 | assert np.linalg.norm( 186 | SE2.log(poses_final[key].inv().dot(poses_true[key]))) < 1e-4 187 | 188 | def test_loop_closure(self, options, residuals, residual_params, 189 | poses_init, poses_true): 190 | from liegroups import SE2 191 | problem = Problem(options) 192 | for i in range(0, 7): 193 | problem.add_residual_block(residuals[i], residual_params[i]) 194 | problem.initialize_params(poses_init) 195 | poses_final = problem.solve() 196 | for key in poses_true.keys(): 197 | assert np.linalg.norm( 198 | SE2.log(poses_final[key].inv().dot(poses_true[key]))) < 1e-4 199 | 200 | 201 | class TestBundleAdjust: 202 | 203 | @pytest.fixture 204 | def options(self): 205 | options = Options() 206 | options.allow_nondecreasing_steps = True 207 | options.max_nondecreasing_steps = 3 208 | return options 209 | 210 | @pytest.fixture 211 | def camera(self): 212 | from pyslam.sensors import StereoCamera 213 | return StereoCamera(640, 480, 1000, 1000, 0.25, 1280, 960) 214 | 215 | @pytest.fixture 216 | def points(self): 217 | pts_w_true = [ 218 | np.array([0., -1., 10.]), 219 | np.array([1., 1., 5.]), 220 | np.array([-1., 1., 15.]) 221 | ] 222 | return pts_w_true 223 | 224 | @pytest.fixture 225 | def poses(self): 226 | from liegroups import SE3 227 | T_cam_w_true = [ 228 | SE3.identity(), 229 | SE3.exp(0.1 * np.ones(6)), 230 | SE3.exp(0.2 * np.ones(6)), 231 | SE3.exp(0.3 * np.ones(6)) 232 | ] 233 | return T_cam_w_true 234 | 235 | @pytest.fixture 236 | def observations(self, camera, points, poses): 237 | return [[camera.project(T.dot(p)) for p in points] for T in poses] 238 | 239 | def test_bundle_adjust(self, options, camera, points, poses, observations): 240 | from liegroups import SE3 241 | from pyslam.residuals import ReprojectionResidual 242 | 243 | problem = Problem(options) 244 | 245 | obs_var = [1, 1, 2] # [u,v,d] 246 | obs_stiffness = invsqrt(np.diagflat(obs_var)) 247 | 248 | for i, this_pose_obs in enumerate(observations): 249 | for j, o in enumerate(this_pose_obs): 250 | residual = ReprojectionResidual(camera, o, obs_stiffness) 251 | problem.add_residual_block( 252 | residual, ['T_cam{}_w'.format(i), 'pt{}_w'.format(j)]) 253 | 254 | params_true = {} 255 | params_init = {} 256 | 257 | for i, pt in enumerate(points): 258 | pid = 'pt{}_w'.format(i) 259 | params_true.update({pid: pt}) 260 | params_init.update({pid: camera.triangulate( 261 | observations[0][i] + 10. * np.random.rand(3))}) 262 | 263 | for i, pose in enumerate(poses): 264 | pid = 'T_cam{}_w'.format(i) 265 | params_true.update({pid: pose}) 266 | params_init.update({pid: SE3.identity()}) 267 | 268 | problem.initialize_params(params_init) 269 | problem.set_parameters_constant('T_cam0_w') 270 | 271 | params_final = problem.solve() 272 | 273 | for key in params_true.keys(): 274 | p_est = params_final[key] 275 | p_true = params_true[key] 276 | 277 | if isinstance(p_est, SE3): 278 | err = SE3.log(p_est.inv().dot(p_true)) 279 | else: 280 | err = p_est - p_true 281 | 282 | assert np.linalg.norm(err) < 1e-4 283 | 284 | 285 | class TestCovariance: 286 | 287 | @pytest.fixture 288 | def options(self): 289 | options = Options() 290 | options.allow_nondecreasing_steps = True 291 | options.max_nondecreasing_steps = 3 292 | return options 293 | 294 | def test_se3(self, options): 295 | from liegroups import SE3 296 | from pyslam.residuals import PoseResidual, PoseToPoseResidual 297 | 298 | problem = Problem(options) 299 | 300 | odom = SE3.exp(0.1 * np.ones(6)) 301 | 302 | odom_stiffness = invsqrt(1e-3 * np.eye(SE3.dof)) 303 | T0_stiffness = invsqrt(1e-6 * np.eye(SE3.dof)) 304 | 305 | odom_covar = np.linalg.inv(np.dot(odom_stiffness, odom_stiffness)) 306 | T0_covar = np.linalg.inv(np.dot(T0_stiffness, T0_stiffness)) 307 | 308 | residual0 = PoseResidual(SE3.identity(), T0_stiffness) 309 | residual1 = PoseToPoseResidual(odom, odom_stiffness) 310 | 311 | params_init = {'T0': SE3.identity(), 'T1': SE3.identity()} 312 | 313 | problem.add_residual_block(residual0, 'T0') 314 | problem.add_residual_block(residual1, ['T0', 'T1']) 315 | problem.initialize_params(params_init) 316 | problem.solve() 317 | problem.compute_covariance() 318 | estimated_covar = problem.get_covariance_block('T1', 'T1') 319 | expected_covar = odom_covar + odom.adjoint().dot(T0_covar.dot(odom.adjoint().T)) 320 | 321 | assert np.allclose(estimated_covar, expected_covar) 322 | -------------------------------------------------------------------------------- /tests/test_sensors.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | 4 | 5 | def get_pinhole_intrinsics(): 6 | cu = 150. 7 | cv = 100. 8 | fu = 250. 9 | fv = 200. 10 | w = 300 11 | h = 200 12 | return cu, cv, fu, fv, w, h 13 | 14 | 15 | class TestStereoCamera: 16 | 17 | @pytest.fixture 18 | def sensor(self): 19 | from pyslam.sensors import StereoCamera 20 | cu, cv, fu, fv, w, h = get_pinhole_intrinsics() 21 | b = 1. 22 | 23 | return StereoCamera(cu, cv, fu, fv, b, w, h) 24 | 25 | def test_is_valid_measurement(self, sensor): 26 | test_uvd = np.array([[110., 120., 10.], 27 | [-10., 100., 10.], 28 | [0., -10., 10.], 29 | [0., 0., -5.]]) 30 | test_expected = [True, False, False, False] 31 | for uvd, expected in zip(test_uvd, test_expected): 32 | assert sensor.is_valid_measurement(uvd) == expected 33 | 34 | assert np.array_equal( 35 | sensor.is_valid_measurement(test_uvd), test_expected) 36 | 37 | def test_project_triangulate(self, sensor): 38 | test_xyz = [1., 2., 10.] 39 | test_uvd = [110., 120., 10.] 40 | assert np.allclose(sensor.triangulate( 41 | sensor.project(test_xyz)), test_xyz) 42 | assert np.allclose(sensor.project( 43 | sensor.triangulate(test_uvd)), test_uvd) 44 | 45 | def test_project_jacobian(self, sensor): 46 | test_xyz = [1., 2., 10.] 47 | expected_jacobian = np.array([[25., 0., -2.5], 48 | [0., 20., -4.], 49 | [0., 0., -2.5]]) 50 | uvd, jacobian = sensor.project(test_xyz, compute_jacobians=True) 51 | assert np.allclose(jacobian, expected_jacobian) 52 | 53 | def test_triangulate_jacobian(self, sensor): 54 | test_uvd = [110., 120., 10.] 55 | expected_jacobian = np.array([[0.1, 0., 0.4], 56 | [0., 0.125, -0.25], 57 | [0., 0., -2.5]]) 58 | xyz, jacobian = sensor.triangulate(test_uvd, compute_jacobians=True) 59 | assert np.allclose(jacobian, expected_jacobian) 60 | 61 | def test_project_vectorized(self, sensor): 62 | test_xyz1 = [1., 2., 10.] 63 | test_xyz2 = [2., 1., -20.] 64 | test_xyz12 = np.array([test_xyz1, test_xyz2]) # 2x3 65 | uvd, jacobians = sensor.project(test_xyz12, True) 66 | assert uvd.shape == (2, 3) 67 | assert jacobians.shape == (2, 3, 3) 68 | 69 | def test_triangulate_vectorized(self, sensor): 70 | test_uvd1 = [110., 120., 10.] 71 | test_uvd2 = [500., 600., 1.] 72 | test_uvd12 = np.array([test_uvd1, test_uvd2]) # 2x3 73 | xyz, jacobians = sensor.triangulate(test_uvd12, True) 74 | assert xyz.shape == (2, 3) 75 | assert jacobians.shape == (2, 3, 3) 76 | 77 | 78 | class TestRGBDCamera: 79 | 80 | @pytest.fixture 81 | def sensor(self): 82 | from pyslam.sensors import RGBDCamera 83 | cu, cv, fu, fv, w, h = get_pinhole_intrinsics() 84 | return RGBDCamera(cu, cv, fu, fv, w, h) 85 | 86 | def test_is_valid_measurement(self, sensor): 87 | test_uvz = np.array([[110., 120., 10.], 88 | [-10., 100., 10.], 89 | [0., -10., 10.], 90 | [0., 0., -5.]]) 91 | test_expected = [True, False, False, False] 92 | for uvz, expected in zip(test_uvz, test_expected): 93 | assert sensor.is_valid_measurement(uvz) == expected 94 | 95 | assert np.array_equal( 96 | sensor.is_valid_measurement(test_uvz), test_expected) 97 | 98 | def test_project_triangulate(self, sensor): 99 | test_xyz = [1., 2., 10.] 100 | test_uvz = [110., 120., 10.] 101 | assert np.allclose(sensor.triangulate( 102 | sensor.project(test_xyz)), test_xyz) 103 | assert np.allclose(sensor.project( 104 | sensor.triangulate(test_uvz)), test_uvz) 105 | 106 | def test_project_jacobian(self, sensor): 107 | test_xyz = [1., 2., 10.] 108 | expected_jacobian = np.array([[25., 0., -2.5], 109 | [0., 20., -4.], 110 | [0., 0., 1.]]) 111 | uvz, jacobian = sensor.project(test_xyz, compute_jacobians=True) 112 | assert np.allclose(jacobian, expected_jacobian) 113 | 114 | def test_triangulate_jacobian(self, sensor): 115 | test_uvz = [110., 120., 10.] 116 | expected_jacobian = np.array([[0.04, 0., -0.16], 117 | [0., 0.05, 0.1], 118 | [0., 0., 1.]]) 119 | xyz, jacobian = sensor.triangulate(test_uvz, compute_jacobians=True) 120 | assert np.allclose(jacobian, expected_jacobian) 121 | 122 | def test_project_vectorized(self, sensor): 123 | test_xyz1 = [1., 2., 10.] 124 | test_xyz2 = [2., 1., -20.] 125 | test_xyz12 = np.array([test_xyz1, test_xyz2]) # 2x3 126 | uvz, jacobians = sensor.project(test_xyz12, True) 127 | assert uvz.shape == (2, 3) 128 | assert jacobians.shape == (2, 3, 3) 129 | 130 | def test_triangulate_vectorized(self, sensor): 131 | test_uvz1 = [110., 120., 10.] 132 | test_uvz2 = [500., 600., 1.] 133 | test_uvz12 = np.array([test_uvz1, test_uvz2]) # 2x3 134 | xyz, jacobians = sensor.triangulate(test_uvz12, True) 135 | assert xyz.shape == (2, 3) 136 | assert jacobians.shape == (2, 3, 3) 137 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import pyslam.utils 4 | 5 | 6 | def test_invsqrt(): 7 | sca = 4 8 | assert pyslam.utils.invsqrt(sca) == 1. / np.sqrt(sca) 9 | mat = np.array([[7, 2, 1], 10 | [0, 3, -1], 11 | [-3, 4, -2]]) 12 | invsqrt_mat = pyslam.utils.invsqrt(mat) 13 | assert np.allclose(np.linalg.inv(np.dot(invsqrt_mat, invsqrt_mat)), mat) 14 | 15 | 16 | def test_bilinear_interpolate(): 17 | im1 = np.eye(2) 18 | im2 = np.ones((2, 2)) 19 | im3 = np.dstack((np.eye(2), np.ones((2, 2)), np.zeros((2, 2)))) 20 | 21 | x = [0.5, 1] 22 | y = [0.5, 0] 23 | 24 | interp1 = pyslam.utils.bilinear_interpolate(im1, x, y) 25 | interp2 = pyslam.utils.bilinear_interpolate(im2, x, y) 26 | interp3 = pyslam.utils.bilinear_interpolate(im3, x, y) 27 | 28 | assert np.allclose(interp1, np.array([0.5, 0.])) 29 | assert np.allclose(interp2, np.array([1., 1.])) 30 | assert np.allclose(interp3[0, :], np.array([0.5, 1., 0.])) 31 | assert np.allclose(interp3[1, :], np.array([0., 1., 0.])) 32 | --------------------------------------------------------------------------------