├── .gitignore ├── 0.Dataset0 ├── Barcodes.dat ├── Groundtruth.dat ├── Landmark_Groundtruth.dat ├── Measurement.dat └── Odometry.dat ├── 0.Dataset1 ├── Barcodes.dat ├── Groundtruth.dat ├── Landmark_Groundtruth.dat ├── Measurement.dat └── Odometry.dat ├── 1.Localization ├── EKF_Localization_known_correspondences.py └── PF_Localization_known_correspondences.py ├── 2.EKF_SLAM ├── EKF_SLAM_known_correspondences.py └── EKF_SLAM_unknown_correspondences.py ├── 3.Graph_SLAM └── Graph_SLAM_known_correspondences.py ├── 4.Fast_SLAM_1 ├── lib │ ├── __init__.py │ ├── measurement.py │ ├── motion.py │ └── particle.py ├── src │ ├── Fast_SLAM_1_known_correspondences.py │ ├── Fast_SLAM_1_unknown_correspondences.py │ └── __init__.py ├── test_Fast_SLAM_1_known_correspondences.py └── test_Fast_SLAM_1_unknown_correspondences.py ├── 5.Fast_SLAM_2 ├── lib │ ├── __init__.py │ ├── measurement.py │ ├── motion.py │ └── particle.py ├── src │ ├── Fast_SLAM_2_unknown_correspondences.py │ └── __init__.py └── test_Fast_SLAM_2_unknown_correspondences.py ├── README.md └── doc ├── EKF_Localization_known_correspondences.png ├── EKF_SLAM_known_correspondences.gif ├── EKF_SLAM_unknown_correspondences.gif ├── Fast_SLAM_1_known_correspondences.gif ├── Fast_SLAM_1_unknown_correspondences.gif ├── Fast_SLAM_2_unknown_correspondences.gif ├── Graph_SLAM_known_correspondences.png ├── Odometry.png └── PF_Localization_known_correspondences.gif /.gitignore: -------------------------------------------------------------------------------- 1 | # Editors 2 | .vscode/ 3 | .idea/ 4 | 5 | # Vagrant 6 | .vagrant/ 7 | 8 | # Mac/OSX 9 | .DS_Store 10 | 11 | # Windows 12 | Thumbs.db 13 | 14 | # Source for the following rules: https://raw.githubusercontent.com/github/gitignore/master/Python.gitignore 15 | # Byte-compiled / optimized / DLL files 16 | __pycache__/ 17 | *.py[cod] 18 | *$py.class 19 | 20 | # C extensions 21 | *.so 22 | 23 | # Distribution / packaging 24 | .Python 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib/ 32 | lib64/ 33 | parts/ 34 | sdist/ 35 | var/ 36 | wheels/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | MANIFEST 41 | 42 | # PyInstaller 43 | # Usually these files are written by a python script from a template 44 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 45 | *.manifest 46 | *.spec 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | 52 | # Unit test / coverage reports 53 | htmlcov/ 54 | .tox/ 55 | .nox/ 56 | .coverage 57 | .coverage.* 58 | .cache 59 | nosetests.xml 60 | coverage.xml 61 | *.cover 62 | .hypothesis/ 63 | .pytest_cache/ 64 | 65 | # Translations 66 | *.mo 67 | *.pot 68 | 69 | # Django stuff: 70 | *.log 71 | local_settings.py 72 | db.sqlite3 73 | 74 | # Flask stuff: 75 | instance/ 76 | .webassets-cache 77 | 78 | # Scrapy stuff: 79 | .scrapy 80 | 81 | # Sphinx documentation 82 | docs/_build/ 83 | 84 | # PyBuilder 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | .python-version 96 | 97 | # celery beat schedule file 98 | celerybeat-schedule 99 | 100 | # SageMath parsed files 101 | *.sage.py 102 | 103 | # Environments 104 | .env 105 | .venv 106 | env/ 107 | venv/ 108 | ENV/ 109 | env.bak/ 110 | venv.bak/ 111 | 112 | # Spyder project settings 113 | .spyderproject 114 | .spyproject 115 | 116 | # Rope project settings 117 | .ropeproject 118 | 119 | # mkdocs documentation 120 | /site 121 | 122 | # mypy 123 | .mypy_cache/ 124 | .dmypy.json 125 | dmypy.json 126 | -------------------------------------------------------------------------------- /0.Dataset0/Barcodes.dat: -------------------------------------------------------------------------------- 1 | # UTIAS Multi-Robot Cooperative Localization and Mapping Dataset 2 | # produced by Keith Leung (keith.leung@robotics.utias.utoronto.ca) 2009 3 | # Barcode Data Fomat: 4 | # Subject # Barcode # 5 | 1 5 6 | 2 14 7 | 3 41 8 | 4 32 9 | 5 23 10 | 6 45 11 | 7 90 12 | 8 72 13 | 9 9 14 | 10 63 15 | 11 18 16 | 12 81 17 | 13 27 18 | 14 61 19 | 15 7 20 | 16 16 21 | 17 36 22 | 18 54 23 | 19 25 24 | 20 70 25 | -------------------------------------------------------------------------------- /0.Dataset0/Landmark_Groundtruth.dat: -------------------------------------------------------------------------------- 1 | # UTIAS Multi-Robot Cooperative Localization and Mapping Dataset 2 | # produced by Keith Leung (keith.leung@robotics.utias.utoronto.ca) 2009 3 | # Landmark Groundtruth Data Fomat: 4 | # Subject # x [m] y [m] x std-dev [m] y std-dev [m] 5 | 6 0.48704624 -4.95127346 0.00003020 0.00017939 6 | 7 3.12907696 -5.55811630 0.00003312 0.00015935 7 | 8 2.65345619 -3.75123336 0.00004179 0.00023282 8 | 9 0.97259759 -3.19151915 0.00007947 0.00022909 9 | 10 0.84527678 -1.61673856 0.00006391 0.00026239 10 | 11 3.72073503 -1.98025384 0.00013685 0.00027930 11 | 12 1.91856554 -0.82058089 0.00018287 0.00020971 12 | 13 0.91765949 0.59631939 0.00006368 0.00033948 13 | 14 2.76327261 0.24394752 0.00009005 0.00017049 14 | 15 4.67239250 0.84409390 0.00014602 0.00027795 15 | 16 0.95289638 2.70933340 0.00033774 0.00016960 16 | 17 3.26730988 2.52731607 0.00107665 0.00298502 17 | 18 0.88917640 4.40906195 0.00005517 0.00010895 18 | 19 2.39221778 3.80018838 0.00025545 0.00256092 19 | 20 4.13634588 3.60883503 0.00007982 0.00050874 20 | -------------------------------------------------------------------------------- /0.Dataset1/Barcodes.dat: -------------------------------------------------------------------------------- 1 | # UTIAS Multi-Robot Cooperative Localization and Mapping Dataset 2 | # produced by Keith Leung (keith.leung@robotics.utias.utoronto.ca) 2009 3 | # Barcode Data Fomat: 4 | # Subject # Barcode # 5 | 1 5 6 | 2 14 7 | 3 41 8 | 4 32 9 | 5 23 10 | 6 63 11 | 7 25 12 | 8 45 13 | 9 16 14 | 10 61 15 | 11 36 16 | 12 18 17 | 13 9 18 | 14 72 19 | 15 70 20 | 16 81 21 | 17 54 22 | 18 27 23 | 19 7 24 | 20 90 25 | -------------------------------------------------------------------------------- /0.Dataset1/Landmark_Groundtruth.dat: -------------------------------------------------------------------------------- 1 | # UTIAS Multi-Robot Cooperative Localization and Mapping Dataset 2 | # produced by Keith Leung (keith.leung@robotics.utias.utoronto.ca) 2009 3 | # Landmark Groundtruth Data Fomat: 4 | # Subject # x [m] y [m] x std-dev [m] y std-dev [m] 5 | 6 1.88032539 -5.57229508 0.00001974 0.00004067 6 | 7 1.77648406 -2.44386354 0.00002415 0.00003114 7 | 8 4.42330143 -4.98170313 0.00010428 0.00010507 8 | 9 -0.68768043 -5.11014717 0.00004077 0.00008785 9 | 10 -0.85117881 -2.49223307 0.00005569 0.00004923 10 | 11 4.42094946 -2.37103644 0.00006128 0.00009175 11 | 12 4.34924478 0.25444762 0.00007713 0.00012118 12 | 13 3.07964257 0.24942861 0.00003449 0.00005609 13 | 14 0.46702834 0.18511889 0.00003942 0.00002907 14 | 15 -1.00015496 0.17453779 0.00006536 0.00005926 15 | 16 0.99953879 2.72607308 0.00003285 0.00002186 16 | 17 -1.04151642 2.80020985 0.00012014 0.00005809 17 | 18 0.34561556 5.02433367 0.00004452 0.00004957 18 | 19 2.96594198 5.09583446 0.00006062 0.00008501 19 | 20 4.30562926 2.86663299 0.00003748 0.00004206 20 | -------------------------------------------------------------------------------- /1.Localization/EKF_Localization_known_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of EKF Localization with known correspondences. 5 | See Probabilistic Robotics: 6 | 1. Page 204, Table 7.2 for full algorithm. 7 | 8 | Author: Chenge Yang 9 | Email: chengeyang2019@u.northwestern.edu 10 | ''' 11 | 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | 15 | 16 | class ExtendedKalmanFilter(): 17 | def __init__(self, dataset, end_frame, R, Q): 18 | self.load_data(dataset, end_frame) 19 | self.initialization(R, Q) 20 | for data in self.data: 21 | if (data[1] == -1): 22 | self.motion_update(data) 23 | else: 24 | self.measurement_update(data) 25 | self.plot_data() 26 | 27 | def load_data(self, dataset, end_frame): 28 | # Loading dataset 29 | # Barcodes: [Subject#, Barcode#] 30 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 31 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 32 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 33 | # Landmark ground truth: [Subject#, x[m], y[m]] 34 | self.landmark_groundtruth_data = np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 35 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 36 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 37 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 38 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 39 | 40 | # Collect all input data and sort by timestamp 41 | # Add subject "odom" = -1 for odometry data 42 | odom_data = np.insert(self.odometry_data, 1, -1, axis = 1) 43 | self.data = np.concatenate((odom_data, self.measurement_data), axis = 0) 44 | self.data = self.data[np.argsort(self.data[:, 0])] 45 | 46 | # Remove all data before the fisrt timestamp of groundtruth 47 | # Use first groundtruth data as the initial location of the robot 48 | for i in range(len(self.data)): 49 | if (self.data[i][0] > self.groundtruth_data[0][0]): 50 | break 51 | self.data = self.data[i:] 52 | 53 | # Remove all data after the specified number of frames 54 | self.data = self.data[:end_frame] 55 | cut_timestamp = self.data[end_frame - 1][0] 56 | # Remove all groundtruth after the corresponding timestamp 57 | for i in range(len(self.groundtruth_data)): 58 | if (self.groundtruth_data[i][0] >= cut_timestamp): 59 | break 60 | self.groundtruth_data = self.groundtruth_data[:i] 61 | 62 | # Combine barcode Subject# with landmark Subject# to create lookup-table 63 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 64 | self.landmark_locations = {} 65 | for i in range(5, len(self.barcodes_data), 1): 66 | self.landmark_locations[self.barcodes_data[i][1]] = self.landmark_groundtruth_data[i - 5][1:] 67 | 68 | # Lookup table to map barcode Subjec# to landmark Subject# 69 | # Barcode 6 is the first landmark (1 - 15 for 6 - 20) 70 | self.landmark_indexes = {} 71 | for i in range(5, len(self.barcodes_data), 1): 72 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 4 73 | 74 | def initialization(self, R, Q): 75 | # Initial state 76 | self.states = np.array([self.groundtruth_data[0]]) 77 | self.last_timestamp = self.states[-1][0] 78 | # Choose very small process covariance because we are using the ground truth data for initial location 79 | self.sigma = np.diagflat([1e-10, 1e-10, 1e-10]) 80 | # States with measurement update 81 | self.states_measurement = [] 82 | 83 | # State covariance matrix 84 | self.R = R 85 | # Measurement covariance matrix 86 | self.Q = Q 87 | 88 | def motion_update(self, control): 89 | # ------------------ Step 1: Mean update ---------------------# 90 | # State: [x, y, θ] 91 | # Control: [v, w] 92 | # State-transition function (simplified): 93 | # [x_t, y_t, θ_t] = g(u_t, x_t-1) 94 | # x_t = x_t-1 + v * cosθ_t-1 * delta_t 95 | # y_t = y_t-1 + v * sinθ_t-1 * delta_t 96 | # θ_t = θ_t-1 + w * delta_t 97 | # Skip motion update if two odometry data are too close 98 | delta_t = control[0] - self.last_timestamp 99 | if (delta_t < 0.001): 100 | return 101 | # Compute updated [x, y, theta] 102 | x_t = self.states[-1][1] + control[2] * np.cos(self.states[-1][3]) * delta_t 103 | y_t = self.states[-1][2] + control[2] * np.sin(self.states[-1][3]) * delta_t 104 | theta_t = self.states[-1][3] + control[3] * delta_t 105 | # Limit θ within [-pi, pi] 106 | if (theta_t > np.pi): 107 | theta_t -= 2 * np.pi 108 | elif (theta_t < -np.pi): 109 | theta_t += 2 * np.pi 110 | self.last_timestamp = control[0] 111 | self.states = np.append(self.states, np.array([[control[0], x_t, y_t, theta_t]]), axis = 0) 112 | 113 | # ------ Step 2: Linearize state-transition by Jacobian ------# 114 | # Jacobian: G = d g(u_t, x_t-1) / d x_t-1 115 | # 1 0 -v * delta_t * sinθ_t-1 116 | # G = 0 1 v * delta_t * cosθ_t-1 117 | # 0 0 1 118 | G_1 = np.array([1, 0, - control[2] * delta_t * np.sin(self.states[-1][3])]) 119 | G_2 = np.array([0, 1, control[2] * delta_t * np.cos(self.states[-1][3])]) 120 | G_3 = np.array([0, 0, 1]) 121 | self.G = np.array([G_1, G_2, G_3]) 122 | 123 | # ---------------- Step 3: Covariance update ------------------# 124 | self.sigma = self.G.dot(self.sigma).dot(self.G.T) + self.R 125 | 126 | def measurement_update(self, measurement): 127 | # Continue if landmark is not found in self.landmark_locations 128 | if not measurement[1] in self.landmark_locations: 129 | return 130 | 131 | # ---------------- Step 1: Measurement update -----------------# 132 | # range = sqrt((x_l - x_t)^2 + (y_l - y_t)^2) 133 | # bearing = atan2((y_l - y_t) / (x_l - x_t)) - θ_t 134 | x_l = self.landmark_locations[measurement[1]][0] 135 | y_l = self.landmark_locations[measurement[1]][1] 136 | x_t = self.states[-1][1] 137 | y_t = self.states[-1][2] 138 | theta_t = self.states[-1][3] 139 | q = (x_l - x_t) * (x_l - x_t) + (y_l - y_t) * (y_l - y_t) 140 | range_expected = np.sqrt(q) 141 | bearing_expected = np.arctan2(y_l - y_t, x_l - x_t) - theta_t 142 | 143 | # -------- Step 2: Linearize Measurement by Jacobian ----------# 144 | # Jacobian: H = d h(x_t) / d x_t 145 | # -(x_l - x_t) / sqrt(q) -(y_l - y_t) / sqrt(q) 0 146 | # H = (y_l - y_t) / q -(x_l - x_t) / q -1 147 | # 0 0 0 148 | # q = (x_l - x_t)^2 + (y_l - y_t)^2 149 | H_1 = np.array([-(x_l - x_t) / np.sqrt(q), -(y_l - y_t) / np. sqrt(q), 0]) 150 | H_2 = np.array([(y_l - y_t) / q, -(x_l - x_t) / q, -1]) 151 | H_3 = np.array([0, 0, 0]) 152 | self.H = np.array([H_1, H_2, H_3]) 153 | 154 | # ---------------- Step 3: Kalman gain update -----------------# 155 | S_t = self.H.dot(self.sigma).dot(self.H.T) + self.Q 156 | self.K = self.sigma.dot(self.H.T).dot(np.linalg.inv(S_t)) 157 | 158 | # ------------------- Step 4: mean update ---------------------# 159 | difference = np.array([measurement[2] - range_expected, measurement[3] - bearing_expected, 0]) 160 | innovation = self.K.dot(difference) 161 | self.last_timestamp = measurement[0] 162 | self.states = np.append(self.states, np.array([[self.last_timestamp, x_t + innovation[0], y_t + innovation[1], theta_t + innovation[2]]]), axis=0) 163 | self.states_measurement.append([x_t + innovation[0], y_t + innovation[1]]) 164 | 165 | # ---------------- Step 5: covariance update ------------------# 166 | self.sigma = (np.identity(3) - self.K.dot(self.H)).dot(self.sigma) 167 | 168 | def plot_data(self): 169 | # Ground truth data 170 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 'b', label="Robot State Ground truth") 171 | 172 | # States 173 | plt.plot(self.states[:, 1], self.states[:, 2], 'r', label="Robot State Estimate") 174 | 175 | # Start and end points 176 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 'go', label="Start point") 177 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 'yo', label="End point") 178 | 179 | # Measurement update locations 180 | if (len(self.states_measurement) > 0): 181 | self.states_measurement = np.array(self.states_measurement) 182 | plt.scatter(self.states_measurement[:, 0], self.states_measurement[:, 1], s=10, c='k', alpha='0.5', label="Measurement updates") 183 | 184 | # Landmark ground truth locations and indexes 185 | landmark_xs = [] 186 | landmark_ys = [] 187 | for location in self.landmark_locations: 188 | landmark_xs.append(self.landmark_locations[location][0]) 189 | landmark_ys.append(self.landmark_locations[location][1]) 190 | index = self.landmark_indexes[location] + 5 191 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), alpha=0.5, fontsize=10) 192 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, marker='*', label='Landmark Locations') 193 | 194 | # plt.title("Localization with only odometry data") 195 | plt.title("EKF Localization with Known Correspondences") 196 | plt.legend() 197 | plt.show() 198 | 199 | 200 | if __name__ == "__main__": 201 | # # Dataset 0 202 | # dataset = "../0.Dataset0" 203 | # end_frame = 10000 204 | # # State covariance matrix 205 | # R = np.diagflat(np.array([1.0, 1.0, 1.0])) ** 2 206 | # # Measurement covariance matrix 207 | # Q = np.diagflat(np.array([350, 350, 1e16])) ** 2 208 | 209 | # Dataset 1 210 | dataset = "../0.Dataset1" 211 | end_frame = 3200 212 | # State covariance matrix 213 | R = np.diagflat(np.array([1.0, 1.0, 10.0])) ** 2 214 | # Measurement covariance matrix 215 | Q = np.diagflat(np.array([30, 30, 1e16])) ** 2 216 | # 217 | ekf = ExtendedKalmanFilter(dataset, end_frame, R, Q) 218 | -------------------------------------------------------------------------------- /1.Localization/PF_Localization_known_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of Particle Filter Localization with known correspondences. 5 | See Probabilistic Robotics: 6 | 1. Page 252, Table 8.2 for main algorithm. 7 | 2. Page 124, Table 5.3 for motion model. 8 | 3. Page 179, Table 6.4 for measurement model. 9 | 10 | Author: Chenge Yang 11 | Email: chengeyang2019@u.northwestern.edu 12 | ''' 13 | 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | from scipy import stats 17 | 18 | 19 | class ParticleFilter(): 20 | def __init__(self, dataset, end_frame, num_particles, motion_noise, measurement_noise): 21 | self.load_data(dataset, end_frame) 22 | self.initialization(num_particles, motion_noise, measurement_noise) 23 | for data in self.data: 24 | if (data[1] == -1): 25 | self.motion_update(data) 26 | else: 27 | self.measurement_update(data) 28 | self.importance_sampling() 29 | self.state_update() 30 | # Plot every n frames 31 | if (len(self.states) > 800 and len(self.states) % 15 == 0): 32 | self.plot_data() 33 | 34 | def load_data(self, dataset, end_frame): 35 | # Loading dataset 36 | # Barcodes: [Subject#, Barcode#] 37 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 38 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 39 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 40 | # Landmark ground truth: [Subject#, x[m], y[m]] 41 | self.landmark_groundtruth_data = np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 42 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 43 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 44 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 45 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 46 | 47 | # Collect all input data and sort by timestamp 48 | # Add subject "odom" = -1 for odometry data 49 | odom_data = np.insert(self.odometry_data, 1, -1, axis=1) 50 | self.data = np.concatenate((odom_data, self.measurement_data), axis = 0) 51 | self.data = self.data[np.argsort(self.data[:, 0])] 52 | 53 | # Remove all data before the fisrt timestamp of groundtruth 54 | # Use first groundtruth data as the initial location of the robot 55 | for i in range(len(self.data)): 56 | if (self.data[i][0] > self.groundtruth_data[0][0]): 57 | break 58 | self.data = self.data[i:] 59 | 60 | # Remove all data after the specified number of frames 61 | self.data = self.data[:end_frame] 62 | cut_timestamp = self.data[end_frame - 1][0] 63 | # Remove all groundtruth after the corresponding timestamp 64 | for i in range(len(self.groundtruth_data)): 65 | if (self.groundtruth_data[i][0] >= cut_timestamp): 66 | break 67 | self.groundtruth_data = self.groundtruth_data[:i] 68 | 69 | # Combine barcode Subject# with landmark Subject# to create lookup-table 70 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 71 | self.landmark_locations = {} 72 | for i in range(5, len(self.barcodes_data), 1): 73 | self.landmark_locations[self.barcodes_data[i][1]] = self.landmark_groundtruth_data[i - 5][1:] 74 | 75 | # Lookup table to map barcode Subjec# to landmark Subject# 76 | # Barcode 6 is the first landmark (1 - 15 for 6 - 20) 77 | self.landmark_indexes = {} 78 | for i in range(5, len(self.barcodes_data), 1): 79 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 4 80 | 81 | def initialization(self, num_particles, motion_noise, measurement_noise): 82 | # Initial state: use first ground truth data 83 | self.states = np.array([self.groundtruth_data[0]]) 84 | self.last_timestamp = self.states[-1][0] 85 | 86 | # Noise covariance 87 | self.motion_noise = motion_noise 88 | self.measurement_noise = measurement_noise 89 | 90 | # Initial particles: set with initial state mean and normalized noise 91 | # num_particles of [x, y, theta] 92 | self.particles = np.zeros((num_particles, 3)) 93 | self.particles[:, 0] = np.random.normal(self.states[-1][1], self.motion_noise[0], num_particles) 94 | self.particles[:, 1] = np.random.normal(self.states[-1][2], self.motion_noise[1], num_particles) 95 | self.particles[:, 2] = np.random.normal(self.states[-1][3], self.motion_noise[2], num_particles) 96 | 97 | # Initial weights: set with uniform weights for each particle 98 | self.weights = np.full(num_particles, 1.0 / num_particles) 99 | 100 | def motion_update(self, control): 101 | # Motion Model (simplified): 102 | # State: [x, y, θ] 103 | # Control: [v, w] 104 | # [x_t, y_t, θ_t] = g(u_t, x_t-1) 105 | # x_t = x_t-1 + v * cosθ_t-1 * delta_t 106 | # y_t = y_t-1 + v * sinθ_t-1 * delta_t 107 | # θ_t = θ_t-1 + w * delta_t 108 | 109 | # Skip motion update if two odometry data are too close 110 | delta_t = control[0] - self.last_timestamp 111 | if (delta_t < 0.1): 112 | return 113 | 114 | for particle in self.particles: 115 | # Apply noise to control input 116 | v = np.random.normal(control[2], self.motion_noise[3], 1) 117 | w = np.random.normal(control[3], self.motion_noise[4], 1) 118 | 119 | # Compute updated [x, y, theta] 120 | particle[0] += v * np.cos(particle[2]) * delta_t 121 | particle[1] += v * np.sin(particle[2]) * delta_t 122 | particle[2] += w * delta_t 123 | 124 | # Limit θ within [-pi, pi] 125 | if (particle[2] > np.pi): 126 | particle[2] -= 2 * np.pi 127 | elif (particle[2] < -np.pi): 128 | particle[2] += 2 * np.pi 129 | 130 | # Update timestamp 131 | self.last_timestamp = control[0] 132 | 133 | def measurement_update(self, measurement): 134 | # Measurement Model: 135 | # range = sqrt((x_l - x_t)^2 + (y_l - y_t)^2) 136 | # bearing = atan2((y_l - y_t) / (x_l - x_t)) - θ_t 137 | 138 | # Continue if landmark is not found in self.landmark_locations 139 | if not measurement[1] in self.landmark_locations: 140 | return 141 | 142 | # Importance factor: update weights for each particle (Table 6.4) 143 | x_l = self.landmark_locations[measurement[1]][0] 144 | y_l = self.landmark_locations[measurement[1]][1] 145 | for i in range(len(self.particles)): 146 | # Compute expected range and bearing given current pose 147 | x_t = self.particles[i][0] 148 | y_t = self.particles[i][1] 149 | theta_t = self.particles[i][2] 150 | q = (x_l - x_t) * (x_l - x_t) + (y_l - y_t) * (y_l - y_t) 151 | range_expected = np.sqrt(q) 152 | bearing_expected = np.arctan2(y_l - y_t, x_l - x_t) - theta_t 153 | 154 | # Compute the probability of range and bearing differences in normal distribution with mean = 0 and sigma = measurement noise 155 | range_error = measurement[2] - range_expected 156 | bearing_error = measurement[3] - bearing_expected 157 | prob_range = stats.norm(0, self.measurement_noise[0]).pdf(range_error) 158 | prob_bearing = stats.norm(0, self.measurement_noise[1]).pdf(bearing_error) 159 | 160 | # Update weights 161 | self.weights[i] = prob_range * prob_bearing 162 | 163 | # Normalization 164 | # Avoid all-zero weights 165 | if (np.sum(self.weights) == 0): 166 | self.weights = np.ones_like(self.weights) 167 | self.weights /= np.sum(self.weights) 168 | 169 | # Update timestamp 170 | self.last_timestamp = measurement[0] 171 | 172 | def importance_sampling(self): 173 | # Resample according to importance weights 174 | new_idexes = np.random.choice(len(self.particles), len(self.particles), replace = True, p = self.weights) 175 | 176 | # Update new particles 177 | self.particles = self.particles[new_idexes] 178 | 179 | def state_update(self): 180 | # Update robot pos by mean of all particles 181 | state = np.mean(self.particles, axis = 0) 182 | self.states = np.append(self.states, np.array([[self.last_timestamp, state[0], state[1], state[2]]]), axis = 0) 183 | 184 | def plot_data(self): 185 | # Clear all 186 | plt.cla() 187 | 188 | # Ground truth data 189 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 'b', label="Robot State Ground truth") 190 | 191 | # States 192 | plt.plot(self.states[:, 1], self.states[:, 2], 'r', label="Robot State Estimate") 193 | 194 | # Start and end points 195 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 'go', label="Start point") 196 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 'yo', label="End point") 197 | 198 | # Particles 199 | plt.scatter(self.particles[:, 0], self.particles[:, 1], s=5, c='k', alpha=0.5, label="Particles") 200 | 201 | # Landmark ground truth locations and indexes 202 | landmark_xs = [] 203 | landmark_ys = [] 204 | for location in self.landmark_locations: 205 | landmark_xs.append(self.landmark_locations[location][0]) 206 | landmark_ys.append(self.landmark_locations[location][1]) 207 | index = self.landmark_indexes[location] + 5 208 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), alpha=0.5, fontsize=10) 209 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, marker='*', label='Landmark Locations') 210 | 211 | plt.title("Particle Filter Localization with Known Correspondences") 212 | plt.legend() 213 | 214 | # Dataset 0 215 | # plt.xlim((0.3, 3.7)) 216 | # plt.ylim((-0.6, 2.7)) 217 | # plt.pause(1e-16) 218 | 219 | # Dataset 1 220 | plt.xlim((-1.5, 5.0)) 221 | plt.ylim((-6.5, 6.0)) 222 | plt.pause(1e-16) 223 | 224 | 225 | if __name__ == "__main__": 226 | # # Dataset 0 227 | # dataset = "../0.Dataset0" 228 | # end_frame = 10000 229 | # # Number of particles 230 | # num_particles = 50 231 | # # Motion model noise (in meters / rad) 232 | # # [noise_x, noise_y, noise_theta, noise_v, noise_w] 233 | # motion_noise = np.array([0.2, 0.2, 0.2, 0.1, 0.1]) 234 | # # Measurement model noise (in meters / rad) 235 | # # [noise_range, noise_bearing] 236 | # measurement_noise = np.array([0.1, 0.2]) 237 | 238 | # Dataset 1 239 | dataset = "../0.Dataset1" 240 | end_frame = 3200 241 | # Number of particles 242 | num_particles = 50 243 | # Motion model noise (in meters / rad) 244 | # [noise_x, noise_y, noise_theta, noise_v, noise_w] 245 | motion_noise = np.array([0.1, 0.1, 0.1, 0.2, 0.2]) 246 | # Measurement model noise (in meters / rad) 247 | # [noise_range, noise_bearing] 248 | measurement_noise = np.array([0.1, 0.1]) 249 | 250 | pf = ParticleFilter(dataset, end_frame, num_particles, motion_noise, measurement_noise) 251 | plt.show() 252 | -------------------------------------------------------------------------------- /2.EKF_SLAM/EKF_SLAM_known_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of EKF SLAM with known correspondences. 5 | See Probabilistic Robotics: 6 | 1. Page 314, Table 10.1 for full algorithm. 7 | 8 | Author: Chenge Yang 9 | Email: chengeyang2019@u.northwestern.edu 10 | ''' 11 | 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | 15 | 16 | class ExtendedKalmanFilterSLAM(): 17 | def __init__(self, dataset, start_frame, end_frame, R, Q): 18 | self.load_data(dataset, start_frame, end_frame) 19 | self.initialization(R, Q) 20 | for data in self.data: 21 | if (data[1] == -1): 22 | self.motion_update(data) 23 | else: 24 | self.measurement_update(data) 25 | # Plot every n frames 26 | if (len(self.states) > (800 - start_frame) and len(self.states) % 10 == 0): 27 | self.plot_data() 28 | # self.plot_data() 29 | 30 | def load_data(self, dataset, start_frame, end_frame): 31 | # Loading dataset 32 | # Barcodes: [Subject#, Barcode#] 33 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 34 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 35 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 36 | # Landmark ground truth: [Subject#, x[m], y[m]] 37 | self.landmark_groundtruth_data = np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 38 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 39 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 40 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 41 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 42 | 43 | # Collect all input data and sort by timestamp 44 | # Add subject "odom" = -1 for odometry data 45 | odom_data = np.insert(self.odometry_data, 1, -1, axis=1) 46 | self.data = np.concatenate((odom_data, self.measurement_data), axis=0) 47 | self.data = self.data[np.argsort(self.data[:, 0])] 48 | 49 | # Select data according to start_frame and end_frame 50 | # Fisrt frame must be control input 51 | while self.data[start_frame][1] != -1: 52 | start_frame += 1 53 | # Remove all data before start_frame and after the end_timestamp 54 | self.data = self.data[start_frame:end_frame] 55 | start_timestamp = self.data[0][0] 56 | end_timestamp = self.data[-1][0] 57 | # Remove all groundtruth outside the range 58 | for i in range(len(self.groundtruth_data)): 59 | if (self.groundtruth_data[i][0] >= end_timestamp): 60 | break 61 | self.groundtruth_data = self.groundtruth_data[:i] 62 | for i in range(len(self.groundtruth_data)): 63 | if (self.groundtruth_data[i][0] >= start_timestamp): 64 | break 65 | self.groundtruth_data = self.groundtruth_data[i:] 66 | 67 | # Combine barcode Subject# with landmark Subject# 68 | # Lookup table to map barcode Subjec# to landmark coordinates 69 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 70 | # Ground truth data is not used in EKF SLAM 71 | self.landmark_locations = {} 72 | for i in range(5, len(self.barcodes_data), 1): 73 | self.landmark_locations[self.barcodes_data[i][1]] = self.landmark_groundtruth_data[i - 5][1:] 74 | 75 | # Lookup table to map barcode Subjec# to landmark Subject# 76 | # Barcode 6 is the first landmark (1 - 15 for 6 - 20) 77 | self.landmark_indexes = {} 78 | for i in range(5, len(self.barcodes_data), 1): 79 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 4 80 | 81 | # Table to record if each landmark has been seen or not 82 | # Element [0] is not used. [1] - [15] represent for landmark# 6 - 20 83 | self.landmark_observed = np.full(len(self.landmark_indexes) + 1, False) 84 | 85 | def initialization(self, R, Q): 86 | # Initial state: 3 for robot, 2 for each landmark 87 | self.states = np.zeros((1, 3 + 2 * len(self.landmark_indexes))) 88 | self.states[0][:3] = self.groundtruth_data[0][1:] 89 | self.last_timestamp = self.groundtruth_data[0][0] 90 | 91 | # EKF state covariance: (3 + 2n) x (3 + 2n) 92 | # For robot states, use first ground truth data as initial value 93 | # - small values for top-left 3 x 3 matrix 94 | # For landmark states, we have no information at the beginning 95 | # - large values for rest of variances (diagonal) data 96 | # - small values for all covariances (off-diagonal) data 97 | self.sigma = 1e-6 * np.full((3 + 2 * len(self.landmark_indexes), 3 + 2 * len(self.landmark_indexes)), 1) 98 | for i in range(3, 3 + 2 * len(self.landmark_indexes)): 99 | self.sigma[i][i] = 1e6 100 | 101 | # State covariance matrix 102 | self.R = R 103 | # Measurement covariance matrix 104 | self.Q = Q 105 | 106 | def motion_update(self, control): 107 | # ------------------ Step 1: Mean update ---------------------# 108 | # State: [x, y, θ, x_l1, y_l1, ......, x_ln, y_ln] 109 | # Control: [v, w] 110 | # Only robot state is updated during each motion update step! 111 | # [x_t, y_t, θ_t] = g(u_t, x_t-1) 112 | # x_t = x_t-1 + v * cosθ_t-1 * delta_t 113 | # y_t = y_t-1 + v * sinθ_t-1 * delta_t 114 | # θ_t = θ_t-1 + w * delta_t 115 | # Skip motion update if two odometry data are too close 116 | delta_t = control[0] - self.last_timestamp 117 | if (delta_t < 0.001): 118 | return 119 | # Compute updated [x, y, theta] 120 | x_t = self.states[-1][0] + control[2] * np.cos(self.states[-1][2]) * delta_t 121 | y_t = self.states[-1][1] + control[2] * np.sin(self.states[-1][2]) * delta_t 122 | theta_t = self.states[-1][2] + control[3] * delta_t 123 | # Limit θ within [-pi, pi] 124 | if (theta_t > np.pi): 125 | theta_t -= 2 * np.pi 126 | elif (theta_t < -np.pi): 127 | theta_t += 2 * np.pi 128 | self.last_timestamp = control[0] 129 | # Append new state 130 | new_state = np.copy(self.states[-1]) 131 | new_state[0] = x_t 132 | new_state[1] = y_t 133 | new_state[2] = theta_t 134 | self.states = np.append(self.states, np.array([new_state]), axis=0) 135 | 136 | # ------ Step 2: Linearize state-transition by Jacobian ------# 137 | # Jacobian of motion: G = d g(u_t, x_t-1) / d x_t-1 138 | # 1 0 -v * delta_t * sinθ_t-1 139 | # G = 0 1 v * delta_t * cosθ_t-1 0 140 | # 0 0 1 141 | # 142 | # 0 I(2n x 2n) 143 | self.G = np.identity(3 + 2 * len(self.landmark_indexes)) 144 | self.G[0][2] = - control[2] * delta_t * np.sin(self.states[-2][2]) 145 | self.G[1][2] = control[2] * delta_t * np.cos(self.states[-2][2]) 146 | 147 | # ---------------- Step 3: Covariance update ------------------# 148 | # sigma = G x sigma x G.T + Fx.T x R x Fx 149 | self.sigma = self.G.dot(self.sigma).dot(self.G.T) 150 | self.sigma[0][0] += self.R[0][0] 151 | self.sigma[1][1] += self.R[1][1] 152 | self.sigma[2][2] += self.R[2][2] 153 | 154 | def measurement_update(self, measurement): 155 | # Continue if landmark is not found in self.landmark_indexes 156 | if not measurement[1] in self.landmark_indexes: 157 | return 158 | 159 | # Get current robot state, measurement and landmark index 160 | x_t = self.states[-1][0] 161 | y_t = self.states[-1][1] 162 | theta_t = self.states[-1][2] 163 | range_t = measurement[2] 164 | bearing_t = measurement[3] 165 | landmark_idx = self.landmark_indexes[measurement[1]] 166 | 167 | # If this landmark has never been seen before: initialize landmark location in the state vector as the observed one 168 | # x_l = x_t + range_t * cos(bearing_t + theta_t) 169 | # y_l = y_t + range_t * sin(bearing_t + theta_t) 170 | if not self.landmark_observed[landmark_idx]: 171 | x_l = x_t + range_t * np.cos(bearing_t + theta_t) 172 | y_l = y_t + range_t * np.sin(bearing_t + theta_t) 173 | self.states[-1][2 * landmark_idx + 1] = x_l 174 | self.states[-1][2 * landmark_idx + 2] = y_l 175 | self.landmark_observed[landmark_idx] = True 176 | # Else use current value in state vector 177 | else: 178 | x_l = self.states[-1][2 * landmark_idx + 1] 179 | y_l = self.states[-1][2 * landmark_idx + 2] 180 | 181 | # ---------------- Step 1: Measurement update -----------------# 182 | # range = sqrt((x_l - x_t)^2 + (y_l - y_t)^2) 183 | # bearing = atan2((y_l - y_t) / (x_l - x_t)) - θ_t 184 | delta_x = x_l - x_t 185 | delta_y = y_l - y_t 186 | q = delta_x ** 2 + delta_y ** 2 187 | range_expected = np.sqrt(q) 188 | bearing_expected = np.arctan2(delta_y, delta_x) - theta_t 189 | 190 | # ------ Step 2: Linearize Measurement Model by Jacobian ------# 191 | # Landmark state becomes a variable in measurement model 192 | # Jacobian: H = d h(x_t, x_l) / d (x_t, x_l) 193 | # 1 0 0 0 ...... 0 0 0 0 ...... 0 194 | # 0 1 0 0 ...... 0 0 0 0 ...... 0 195 | # F_x = 0 0 1 0 ...... 0 0 0 0 ...... 0 196 | # 0 0 0 0 ...... 0 1 0 0 ...... 0 197 | # 0 0 0 0 ...... 0 0 1 0 ...... 0 198 | # (2*landmark_idx - 2) 199 | # -delta_x/√q -delta_y/√q 0 delta_x/√q delta_y/√q 200 | # H_low = delta_y/q -delta_x/q -1 -delta_y/q delta_x/q 201 | # 0 0 0 0 0 202 | # H = H_low x F_x 203 | F_x = np.zeros((5, 3 + 2 * len(self.landmark_indexes))) 204 | F_x[0][0] = 1.0 205 | F_x[1][1] = 1.0 206 | F_x[2][2] = 1.0 207 | F_x[3][2 * landmark_idx + 1] = 1.0 208 | F_x[4][2 * landmark_idx + 2] = 1.0 209 | H_1 = np.array([-delta_x/np.sqrt(q), -delta_y/np.sqrt(q), 0, delta_x/np.sqrt(q), delta_y/np.sqrt(q)]) 210 | H_2 = np.array([delta_y/q, -delta_x/q, -1, -delta_y/q, delta_x/q]) 211 | H_3 = np.array([0, 0, 0, 0, 0]) 212 | self.H = np.array([H_1, H_2, H_3]).dot(F_x) 213 | 214 | # ---------------- Step 3: Kalman gain update -----------------# 215 | S_t = self.H.dot(self.sigma).dot(self.H.T) + self.Q 216 | self.K = self.sigma.dot(self.H.T).dot(np.linalg.inv(S_t)) 217 | 218 | # ------------------- Step 4: mean update ---------------------# 219 | difference = np.array([range_t - range_expected, bearing_t - bearing_expected, 0]) 220 | innovation = self.K.dot(difference) 221 | new_state = self.states[-1] + innovation 222 | self.states = np.append(self.states, np.array([new_state]), axis=0) 223 | self.last_timestamp = measurement[0] 224 | 225 | # ---------------- Step 5: covariance update ------------------# 226 | self.sigma = (np.identity(3 + 2 * len(self.landmark_indexes)) - self.K.dot(self.H)).dot(self.sigma) 227 | 228 | def plot_data(self): 229 | # Clear all 230 | plt.cla() 231 | 232 | # Ground truth data 233 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 'b', label="Robot State Ground truth") 234 | 235 | # States 236 | plt.plot(self.states[:, 0], self.states[:, 1], 'r', label="Robot State Estimate") 237 | 238 | # Start and end points 239 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 'g8', markersize=12, label="Start point") 240 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 'y8', markersize=12, label="End point") 241 | 242 | # Landmark ground truth locations and indexes 243 | landmark_xs = [] 244 | landmark_ys = [] 245 | for location in self.landmark_locations: 246 | landmark_xs.append(self.landmark_locations[location][0]) 247 | landmark_ys.append(self.landmark_locations[location][1]) 248 | index = self.landmark_indexes[location] + 5 249 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), alpha=0.5, fontsize=10) 250 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, marker='*', label='Landmark Ground Truth') 251 | 252 | # Landmark estimated locations 253 | estimate_xs = [] 254 | estimate_ys = [] 255 | for i in range(1, len(self.landmark_indexes) + 1): 256 | if self.landmark_observed[i]: 257 | estimate_xs.append(self.states[-1][2 * i + 1]) 258 | estimate_ys.append(self.states[-1][2 * i + 2]) 259 | plt.text(estimate_xs[-1], estimate_ys[-1], str(i+5), fontsize=10) 260 | plt.scatter(estimate_xs, estimate_ys, s=50, c='k', marker='.', label='Landmark Estimate') 261 | 262 | plt.title('EKF SLAM with known correspondences') 263 | plt.legend() 264 | plt.xlim((-2.0, 5.5)) 265 | plt.ylim((-7.0, 7.0)) 266 | plt.pause(1e-16) 267 | 268 | 269 | if __name__ == "__main__": 270 | # Dataset 1 271 | dataset = "../0.Dataset1" 272 | start_frame = 800 273 | end_frame = 3200 274 | # State covariance matrix 275 | R = np.diagflat(np.array([5.0, 5.0, 100.0])) ** 2 276 | # Measurement covariance matrix 277 | Q = np.diagflat(np.array([110.0, 110.0, 1e16])) ** 2 278 | 279 | ekf_slam = ExtendedKalmanFilterSLAM(dataset, start_frame, end_frame, R, Q) 280 | plt.show() 281 | -------------------------------------------------------------------------------- /2.EKF_SLAM/EKF_SLAM_unknown_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of EKF SLAM with unknown correspondences. 5 | See Probabilistic Robotics: 6 | 1. Page 321, Table 10.2 for full algorithm. 7 | 8 | Author: Chenge Yang 9 | Email: chengeyang2019@u.northwestern.edu 10 | ''' 11 | 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | 15 | 16 | class ExtendedKalmanFilterSLAM(): 17 | def __init__(self, dataset, start_frame, end_frame, R, Q): 18 | self.load_data(dataset, start_frame, end_frame) 19 | self.initialization(R, Q) 20 | for data in self.data: 21 | if (data[1] == -1): 22 | self.motion_update(data) 23 | else: 24 | self.data_association(data) 25 | self.measurement_update(data) 26 | # Plot every n frames 27 | if (len(self.states) > (800 - start_frame) and len(self.states) % 30 == 0): 28 | self.plot_data() 29 | # self.plot_data() 30 | 31 | def load_data(self, dataset, start_frame, end_frame): 32 | # Loading dataset 33 | # Barcodes: [Subject#, Barcode#] 34 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 35 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 36 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 37 | # Landmark ground truth: [Subject#, x[m], y[m]] 38 | self.landmark_groundtruth_data = np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 39 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 40 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 41 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 42 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 43 | 44 | # Collect all input data and sort by timestamp 45 | # Add subject "odom" = -1 for odometry data 46 | odom_data = np.insert(self.odometry_data, 1, -1, axis=1) 47 | self.data = np.concatenate((odom_data, self.measurement_data), axis=0) 48 | self.data = self.data[np.argsort(self.data[:, 0])] 49 | 50 | # Select data according to start_frame and end_frame 51 | # Fisrt frame must be control input 52 | while self.data[start_frame][1] != -1: 53 | start_frame += 1 54 | # Remove all data before start_frame and after the end_timestamp 55 | self.data = self.data[start_frame:end_frame] 56 | start_timestamp = self.data[0][0] 57 | end_timestamp = self.data[-1][0] 58 | # Remove all groundtruth outside the range 59 | for i in range(len(self.groundtruth_data)): 60 | if (self.groundtruth_data[i][0] >= end_timestamp): 61 | break 62 | self.groundtruth_data = self.groundtruth_data[:i] 63 | for i in range(len(self.groundtruth_data)): 64 | if (self.groundtruth_data[i][0] >= start_timestamp): 65 | break 66 | self.groundtruth_data = self.groundtruth_data[i:] 67 | 68 | # Combine barcode Subject# with landmark Subject# 69 | # Lookup table to map barcode Subjec# to landmark coordinates 70 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 71 | # Ground truth data is not used in EKF SLAM 72 | self.landmark_locations = {} 73 | for i in range(5, len(self.barcodes_data), 1): 74 | self.landmark_locations[self.barcodes_data[i][1]] = self.landmark_groundtruth_data[i - 5][1:] 75 | 76 | # Lookup table to map barcode Subjec# to landmark Subject# 77 | # Barcode 6 is the first landmark (1 - 15 for 6 - 20) 78 | # Landmark association is not used for unknown Correspondences 79 | self.landmark_indexes = {} 80 | for i in range(5, len(self.barcodes_data), 1): 81 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 4 82 | 83 | # Table to record if each landmark has been seen or not 84 | # Element [0] is not used. [1] - [15] represent for landmark# 6 - 20 85 | self.landmark_observed = np.full(len(self.landmark_indexes) + 1, False) 86 | 87 | def initialization(self, R, Q): 88 | # Initial state: 3 for robot, 2 for each landmark 89 | # To simplify, use fixed number of landmarks for states and covariances 90 | self.states = np.zeros((1, 3 + 2 * len(self.landmark_indexes))) 91 | self.states[0][:3] = self.groundtruth_data[0][1:] 92 | self.last_timestamp = self.groundtruth_data[0][0] 93 | 94 | # EKF state covariance: (3 + 2n) x (3 + 2n) 95 | # For robot states, use first ground truth data as initial value 96 | # - small values for top-left 3 x 3 matrix 97 | # For landmark states, we have no information at the beginning 98 | # - large values for rest of variances (diagonal) data 99 | # - small values for all covariances (off-diagonal) data 100 | self.sigma = 1e-6 * np.full((3 + 2 * len(self.landmark_indexes), 3 + 2 * len(self.landmark_indexes)), 1) 101 | for i in range(3, 3 + 2 * len(self.landmark_indexes)): 102 | self.sigma[i][i] = 1e6 103 | 104 | # State covariance matrix 105 | self.R = R 106 | # Measurement covariance matrix 107 | self.Q = Q 108 | 109 | def motion_update(self, control): 110 | # ------------------ Step 1: Mean update ---------------------# 111 | # State: [x, y, θ, x_l1, y_l1, ......, x_ln, y_ln] 112 | # Control: [v, w] 113 | # Only robot state is updated during each motion update step! 114 | # [x_t, y_t, θ_t] = g(u_t, x_t-1) 115 | # x_t = x_t-1 + v * cosθ_t-1 * delta_t 116 | # y_t = y_t-1 + v * sinθ_t-1 * delta_t 117 | # θ_t = θ_t-1 + w * delta_t 118 | # Skip motion update if two odometry data are too close 119 | delta_t = control[0] - self.last_timestamp 120 | if (delta_t < 0.001): 121 | return 122 | # Compute updated [x, y, theta] 123 | x_t = self.states[-1][0] + control[2] * np.cos(self.states[-1][2]) * delta_t 124 | y_t = self.states[-1][1] + control[2] * np.sin(self.states[-1][2]) * delta_t 125 | theta_t = self.states[-1][2] + control[3] * delta_t 126 | # Limit θ within [-pi, pi] 127 | if (theta_t > np.pi): 128 | theta_t -= 2 * np.pi 129 | elif (theta_t < -np.pi): 130 | theta_t += 2 * np.pi 131 | self.last_timestamp = control[0] 132 | # Append new state 133 | new_state = np.copy(self.states[-1]) 134 | new_state[0] = x_t 135 | new_state[1] = y_t 136 | new_state[2] = theta_t 137 | self.states = np.append(self.states, np.array([new_state]), axis=0) 138 | 139 | # ------ Step 2: Linearize state-transition by Jacobian ------# 140 | # Jacobian of motion: G = d g(u_t, x_t-1) / d x_t-1 141 | # 1 0 -v * delta_t * sinθ_t-1 142 | # G = 0 1 v * delta_t * cosθ_t-1 0 143 | # 0 0 1 144 | # 145 | # 0 I(2n x 2n) 146 | self.G = np.identity(3 + 2 * len(self.landmark_indexes)) 147 | self.G[0][2] = - control[2] * delta_t * np.sin(self.states[-2][2]) 148 | self.G[1][2] = control[2] * delta_t * np.cos(self.states[-2][2]) 149 | 150 | # ---------------- Step 3: Covariance update ------------------# 151 | # sigma = G x sigma x G.T + Fx.T x R x Fx 152 | self.sigma = self.G.dot(self.sigma).dot(self.G.T) 153 | self.sigma[0][0] += self.R[0][0] 154 | self.sigma[1][1] += self.R[1][1] 155 | self.sigma[2][2] += self.R[2][2] 156 | 157 | def data_association(self, measurement): 158 | # Return if this measurement is not from a a landmark (other robots) 159 | if not measurement[1] in self.landmark_indexes: 160 | return 161 | 162 | # Get current robot state, measurement 163 | x_t = self.states[-1][0] 164 | y_t = self.states[-1][1] 165 | theta_t = self.states[-1][2] 166 | range_t = measurement[2] 167 | bearing_t = measurement[3] 168 | 169 | # The expected landmark's location based on current robot state and measurement 170 | # x_l = x_t + range_t * cos(bearing_t + theta_t) 171 | # y_l = y_t + range_t * sin(bearing_t + theta_t) 172 | landmark_x_expected = x_t + range_t * np.cos(bearing_t + theta_t) 173 | landmark_y_expected = y_t + range_t * np.sin(bearing_t + theta_t) 174 | 175 | # If the current landmark has not been seen, initilize its location as the expected one 176 | landmark_idx = self.landmark_indexes[measurement[1]] 177 | self.landmark_idx = landmark_idx 178 | if not self.landmark_observed[landmark_idx]: 179 | self.landmark_observed[landmark_idx] = True 180 | self.states[-1][2 * landmark_idx + 1] = landmark_x_expected 181 | self.states[-1][2 * landmark_idx + 2] = landmark_y_expected 182 | 183 | # Calculate the Likelihood for each existed landmark 184 | min_distance = 1e16 185 | for i in range(1, len(self.landmark_indexes) + 1): 186 | # Continue if this landmark has not been observed 187 | if not self.landmark_observed[i]: 188 | continue 189 | 190 | # Get current landmark estimate 191 | x_l = self.states[-1][2 * i + 1] 192 | y_l = self.states[-1][2 * i + 2] 193 | 194 | # Calculate expected range and bearing measurement 195 | # range = sqrt((x_l - x_t)^2 + (y_l - y_t)^2) 196 | # bearing = atan2((y_l - y_t) / (x_l - x_t)) - θ_t 197 | delta_x = x_l - x_t 198 | delta_y = y_l - y_t 199 | q = delta_x ** 2 + delta_y ** 2 200 | range_expected = np.sqrt(q) 201 | bearing_expected = np.arctan2(delta_y, delta_x) - theta_t 202 | 203 | # Compute Jacobian H of Measurement Model 204 | # Landmark state becomes a variable in measurement model 205 | # Jacobian: H = d h(x_t, x_l) / d (x_t, x_l) 206 | # 1 0 0 0 ...... 0 0 0 0 ...... 0 207 | # 0 1 0 0 ...... 0 0 0 0 ...... 0 208 | # F_x = 0 0 1 0 ...... 0 0 0 0 ...... 0 209 | # 0 0 0 0 ...... 0 1 0 0 ...... 0 210 | # 0 0 0 0 ...... 0 0 1 0 ...... 0 211 | # (2*landmark_idx - 2) 212 | # -delta_x/√q -delta_y/√q 0 delta_x/√q delta_y/√q 213 | # H_low = delta_y/q -delta_x/q -1 -delta_y/q delta_x/q 214 | # 0 0 0 0 0 215 | # H = H_low x F_x 216 | F_x = np.zeros((5, 3 + 2 * len(self.landmark_indexes))) 217 | F_x[0][0] = 1.0 218 | F_x[1][1] = 1.0 219 | F_x[2][2] = 1.0 220 | F_x[3][2 * i + 1] = 1.0 221 | F_x[4][2 * i + 2] = 1.0 222 | H_1 = np.array([-delta_x/np.sqrt(q), -delta_y/np.sqrt(q), 0, delta_x/np.sqrt(q), delta_y/np.sqrt(q)]) 223 | H_2 = np.array([delta_y/q, -delta_x/q, -1, -delta_y/q, delta_x/q]) 224 | H_3 = np.array([0, 0, 0, 0, 0]) 225 | H = np.array([H_1, H_2, H_3]).dot(F_x) 226 | 227 | # Compute Mahalanobis distance 228 | Psi = H.dot(self.sigma).dot(H.T) + self.Q 229 | difference = np.array([range_t - range_expected, bearing_t - bearing_expected, 0]) 230 | Pi = difference.T.dot(np.linalg.inv(Psi)).dot(difference) 231 | 232 | # Get landmark information with least distance 233 | if Pi < min_distance: 234 | min_distance = Pi 235 | # Values for measurement update 236 | self.H = H 237 | self.Psi = Psi 238 | self.difference = difference 239 | # Values for plotting data association 240 | self.landmark_expected = np.array([landmark_x_expected, landmark_y_expected]) 241 | self.landmark_current = np.array([x_l, y_l]) 242 | 243 | def measurement_update(self, measurement): 244 | # Return if this measurement is not from a a landmark (other robots) 245 | if not measurement[1] in self.landmark_indexes: 246 | return 247 | 248 | # Update mean 249 | self.K = self.sigma.dot(self.H.T).dot(np.linalg.inv(self.Psi)) 250 | innovation = self.K.dot(self.difference) 251 | new_state = self.states[-1] + innovation 252 | self.states = np.append(self.states, np.array([new_state]), axis=0) 253 | self.last_timestamp = measurement[0] 254 | 255 | # Update covariance 256 | self.sigma = (np.identity(3 + 2 * len(self.landmark_indexes)) - self.K.dot(self.H)).dot(self.sigma) 257 | 258 | def plot_data(self): 259 | # Clear all 260 | plt.cla() 261 | 262 | # Ground truth data 263 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 'b', label='Robot State Ground truth') 264 | 265 | # States 266 | plt.plot(self.states[:, 0], self.states[:, 1], 'r', label='Robot State Estimate') 267 | 268 | # Start and end points 269 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 'g8', markersize=12, label='Start point') 270 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 'y8', markersize=12, label='End point') 271 | 272 | # Landmark ground truth locations and indexes 273 | landmark_xs = [] 274 | landmark_ys = [] 275 | for location in self.landmark_locations: 276 | landmark_xs.append(self.landmark_locations[location][0]) 277 | landmark_ys.append(self.landmark_locations[location][1]) 278 | index = self.landmark_indexes[location] + 5 279 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), alpha=0.5, fontsize=10) 280 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, marker='*', label='Landmark Ground Truth') 281 | 282 | # Landmark estimated locations 283 | estimate_xs = [] 284 | estimate_ys = [] 285 | for i in range(1, len(self.landmark_indexes) + 1): 286 | if self.landmark_observed[i]: 287 | estimate_xs.append(self.states[-1][2 * i + 1]) 288 | estimate_ys.append(self.states[-1][2 * i + 2]) 289 | plt.text(estimate_xs[-1], estimate_ys[-1], str(i+5), fontsize=10) 290 | plt.scatter(estimate_xs, estimate_ys, s=50, c='k', marker='.', label='Landmark Estimate') 291 | 292 | # Line pointing from current observed landmark to associated landmark 293 | xs = [self.landmark_expected[0], self.landmark_current[0]] 294 | ys = [self.landmark_expected[1], self.landmark_current[1]] 295 | plt.plot(xs, ys, color='c', label='Data Association') 296 | plt.text(self.landmark_expected[0], self.landmark_expected[1], str(self.landmark_idx+5)) 297 | 298 | # Expected location of current observed landmark 299 | plt.scatter(self.landmark_expected[0], self.landmark_expected[1], s=100, c='r', alpha=0.5, marker='P', label='Current Observed Landmark') 300 | 301 | plt.title('EKF SLAM with Unknown Correspondences') 302 | plt.legend(loc='upper right') 303 | plt.xlim((-2.0, 6.5)) 304 | plt.ylim((-7.0, 7.0)) 305 | plt.pause(0.5) 306 | 307 | 308 | if __name__ == "__main__": 309 | # Dataset 1 310 | dataset = "../0.Dataset1" 311 | start_frame = 800 312 | end_frame = 3200 313 | # State covariance matrix 314 | R = np.diagflat(np.array([120.0, 120.0, 100.0])) ** 2 315 | # Measurement covariance matrix 316 | Q = np.diagflat(np.array([1000.0, 1000.0, 1e16])) ** 2 317 | 318 | ekf_slam = ExtendedKalmanFilterSLAM(dataset, start_frame, end_frame, R, Q) 319 | plt.show() 320 | -------------------------------------------------------------------------------- /3.Graph_SLAM/Graph_SLAM_known_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of Graph SLAM with known correspondences. 5 | See Probabilistic Robotics: 6 | 1. Page 350, Table 11.1 - 11.5 for full algorithm. 7 | 8 | Author: Chenge Yang 9 | Email: chengeyang2019@u.northwestern.edu 10 | ''' 11 | 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | 15 | 16 | class GraphSLAM(): 17 | def __init__(self, dataset, start_fram, end_frame, N_iterations, R, Q): 18 | self.load_data(dataset, start_frame, end_frame) 19 | self.initialization(R, Q) 20 | self.plot_data() 21 | for i in range(N_iterations): 22 | self.linearize() 23 | self.reduce() 24 | self.solve() 25 | self.plot_data() 26 | 27 | def load_data(self, dataset, start_frame, end_frame): 28 | # Loading dataset 29 | # Barcodes: [Subject#, Barcode#] 30 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 31 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 32 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 33 | # Landmark ground truth: [Subject#, x[m], y[m]] 34 | self.landmark_groundtruth_data = np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 35 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 36 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 37 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 38 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 39 | 40 | # Collect all input data and sort by timestamp 41 | # Add subject "odom" = -1 for odometry data 42 | odom_data = np.insert(self.odometry_data, 1, -1, axis=1) 43 | self.data = np.concatenate((odom_data, self.measurement_data), axis=0) 44 | self.data = self.data[np.argsort(self.data[:, 0])] 45 | 46 | # Remove all data before the fisrt timestamp of groundtruth 47 | # Use first groundtruth data as the initial location of the robot 48 | for i in range(len(self.data)): 49 | if (self.data[i][0] > self.groundtruth_data[0][0]): 50 | break 51 | self.data = self.data[i:] 52 | 53 | # Select data according to start_frame and end_frame 54 | # Fisrt frame must be control input 55 | while self.data[start_frame][1] != -1: 56 | start_frame += 1 57 | # Remove all data before start_frame and after the end_timestamp 58 | self.data = self.data[start_frame:end_frame] 59 | start_timestamp = self.data[0][0] 60 | end_timestamp = self.data[-1][0] 61 | # Remove all groundtruth outside the range 62 | for i in range(len(self.groundtruth_data)): 63 | if (self.groundtruth_data[i][0] >= end_timestamp): 64 | break 65 | self.groundtruth_data = self.groundtruth_data[:i] 66 | for i in range(len(self.groundtruth_data)): 67 | if (self.groundtruth_data[i][0] >= start_timestamp): 68 | break 69 | self.groundtruth_data = self.groundtruth_data[i:] 70 | 71 | # Combine barcode Subject# with landmark Subject# 72 | # Lookup table to map barcode Subjec# to landmark coordinates 73 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 74 | # Ground truth data is not used in SLAM 75 | self.landmark_locations = {} 76 | for i in range(5, len(self.barcodes_data), 1): 77 | self.landmark_locations[self.barcodes_data[i][1]] = self.landmark_groundtruth_data[i - 5][1:] 78 | 79 | # Lookup table to map barcode Subjec# to landmark Subject# 80 | # Barcode 6 is the first landmark (1 - 15 for 6 - 20) 81 | self.landmark_indexes = {} 82 | for i in range(5, len(self.barcodes_data), 1): 83 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 4 84 | 85 | # Table to record if each landmark has been seen or not 86 | # Element [0] is not used. [1] - [15] represent for landmark# 6 - 20 87 | self.landmark_observed = np.full(len(self.landmark_indexes) + 1, False) 88 | 89 | def initialization(self, R, Q): 90 | # Initial robot state: 3 for robot [x, y, theta] 91 | # First state is obtained from ground truth 92 | self.states = np.array([self.groundtruth_data[0][1:]]) 93 | self.last_timestamp = self.data[0][0] 94 | 95 | # Initial landmark state: 2 for each landmark [x, y] 96 | self.landmark_states = np.zeros(2 * len(self.landmark_indexes)) 97 | 98 | # State covariance matrix 99 | self.R = R 100 | self.R_inverse = np.linalg.inv(self.R) 101 | # Measurement covariance matrix 102 | self.Q = Q 103 | self.Q_inverse = np.linalg.inv(self.Q) 104 | 105 | # Compute initial state estimates based on motion model only 106 | for data in self.data: 107 | if (data[1] == -1): 108 | next_state = np.copy(self.states[-1]) 109 | next_state = self.motion_update(self.states[-1], data) 110 | self.states = np.append(self.states, np.array([next_state]), axis=0) 111 | self.last_timestamp = data[0] 112 | 113 | def linearize(self): 114 | # Incorporate control and measurement constraints into information matrix & vector 115 | 116 | # Initialize Information Matrix and Vector 117 | n_state = len(self.states) 118 | n_landmark = len(self.landmark_indexes) 119 | self.omega = np.zeros((3 * n_state + 2 * n_landmark, 3 * n_state + 2 * n_landmark)) 120 | self.xi = np.zeros(3 * n_state + 2 * n_landmark) 121 | 122 | # Initialize fisrt state X_0 by current estimate data 123 | self.omega[0:3, 0:3] = 1.0 * np.identity(3) 124 | self.xi[0:3] = self.states[0] 125 | 126 | # Initialize matrix to record at which stata the landmark is observed 127 | self.observe = np.full((n_state, n_landmark), False) 128 | 129 | # Indicators 130 | self.state_idx = 0 131 | self.last_timestamp = self.data[0][0] 132 | 133 | # Loop for all control and measurement data 134 | for data in self.data: 135 | # Get current state 136 | state = self.states[self.state_idx] 137 | # Incorporate control constraints 138 | if (data[1] == -1): 139 | next_state = self.motion_update(state, data) 140 | self.motion_linearize(state, next_state, data) 141 | self.state_idx += 1 142 | # Incorporate measurement constraints 143 | else: 144 | self.measurement_update_linearize(state, data) 145 | # Update indicators 146 | self.last_timestamp = data[0] 147 | 148 | def motion_update(self, current_state, control): 149 | # Input: current robot state X_t-1: [x_t-1, y_t-1, theta_t-1] 150 | # control U_t: [timestamp, -1, v_t, w_t] 151 | # Output: next robot state X_t: [x_t, y_t, theta_t] 152 | 153 | # Motion model: 154 | # State: [x, y, θ, x_l1, y_l1, ......, x_ln, y_ln] 155 | # Control: [v, w] 156 | # [x_t, y_t, θ_t] = g(u_t, x_t-1) 157 | # x_t = x_t-1 + v * cosθ_t-1 * delta_t 158 | # y_t = y_t-1 + v * sinθ_t-1 * delta_t 159 | # θ_t = θ_t-1 + w * delta_t 160 | delta_t = control[0] - self.last_timestamp 161 | x_t = current_state[0] + control[2] * np.cos(current_state[2]) * delta_t 162 | y_t = current_state[1] + control[2] * np.sin(current_state[2]) * delta_t 163 | theta_t = current_state[2] + control[3] * delta_t 164 | 165 | # Limit θ within [-pi, pi] 166 | if (theta_t > np.pi): 167 | theta_t -= 2 * np.pi 168 | elif (theta_t < -np.pi): 169 | theta_t += 2 * np.pi 170 | 171 | # Return next state 172 | return np.array([x_t, y_t, theta_t]) 173 | 174 | def motion_linearize(self, current_state, next_state, control): 175 | # Input: current robot state X_t-1: [x_t-1, y_t-1, theta_t-1] 176 | # next robot state X_t: [x_t, y_t, theta_t] 177 | # control U_t: [timestamp, -1, v_t, w_t] 178 | # Output: None (Update Information matrix and vector) 179 | 180 | # Jacobian of motion: G = d g(u_t, x_t-1) / d x_t-1 181 | # 1 0 -v * delta_t * sinθ_t-1 182 | # G = 0 1 v * delta_t * cosθ_t-1 183 | # 0 0 1 184 | delta_t = control[0] - self.last_timestamp 185 | G = np.identity(3) 186 | G[0][2] = - control[2] * delta_t * np.sin(current_state[2]) 187 | G[1][2] = control[2] * delta_t * np.cos(current_state[2]) 188 | 189 | # Construct extended G = [G I] 190 | G_extend = np.zeros((3, 6)) 191 | G_extend[:, 0:3] = -G 192 | G_extend[:, 3:6] = np.identity(3) 193 | 194 | # Get matrix indexes for curernt and next states 195 | cur_idx = 3 * self.state_idx 196 | nxt_idx = 3 * (self.state_idx + 1) 197 | 198 | # Update Information matrix at X_t-1 and X_t 199 | update = G_extend.T.dot(self.R_inverse).dot(G_extend) 200 | self.omega[cur_idx:cur_idx+3, cur_idx:cur_idx+3] += update[0:3, 0:3] 201 | self.omega[cur_idx:cur_idx+3, nxt_idx:nxt_idx+3] += update[0:3, 3:6] 202 | self.omega[nxt_idx:nxt_idx+3, cur_idx:cur_idx+3] += update[3:6, 0:3] 203 | self.omega[nxt_idx:nxt_idx+3, nxt_idx:nxt_idx+3] += update[3:6, 3:6] 204 | 205 | # Update Information vector at X_t-1 and X_t 206 | update = G_extend.T.dot(self.R_inverse).dot((next_state - G.dot(current_state))) 207 | self.xi[cur_idx:cur_idx+3] += update[0:3] 208 | self.xi[nxt_idx:nxt_idx+3] += update[3:6] 209 | 210 | def measurement_update_linearize(self, current_state, measurement): 211 | # Input: current full state X_t: 3 for robot, 2 for each landmark 212 | # control U_t: [timestamp, #landmark, v_t, w_t] 213 | # Output: None (Update Information matrix and vector) 214 | 215 | # Continue if landmark is not found in self.landmark_indexes 216 | if not measurement[1] in self.landmark_indexes: 217 | return 218 | 219 | # Get current robot state 220 | x_t = current_state[0] 221 | y_t = current_state[1] 222 | theta_t = current_state[2] 223 | 224 | # Get current measurement and landmark index 225 | range_t = measurement[2] 226 | bearing_t = measurement[3] 227 | landmark_idx = self.landmark_indexes[measurement[1]] 228 | 229 | # If this landmark has never been seen before: initialize landmark location in the state vector as the observed one 230 | # x_l = x_t + range_t * cos(bearing_t + theta_t) 231 | # y_l = y_t + range_t * sin(bearing_t + theta_t) 232 | if not self.landmark_observed[landmark_idx]: 233 | self.landmark_states[2 * landmark_idx - 2] = x_t + range_t * np.cos(bearing_t + theta_t) 234 | self.landmark_states[2 * landmark_idx - 1] = y_t + range_t * np.sin(bearing_t + theta_t) 235 | self.landmark_observed[landmark_idx] = True 236 | 237 | # Get current estimate of the landmark 238 | x_l = self.landmark_states[2 * landmark_idx - 2] 239 | y_l = self.landmark_states[2 * landmark_idx - 1] 240 | 241 | # Expected measurement given current robot and landmark states 242 | # Measurement model: 243 | # range = sqrt((x_l - x_t)^2 + (y_l - y_t)^2) 244 | # bearing = atan2((y_l - y_t) / (x_l - x_t)) - θ_t 245 | delta_x = x_l - x_t 246 | delta_y = y_l - y_t 247 | q = delta_x ** 2 + delta_y ** 2 248 | range_expected = np.sqrt(q) 249 | bearing_expected = np.arctan2(delta_y, delta_x) - theta_t 250 | 251 | # Jacobian of measurement: H = d h(x_t, x_l) / d (x_t, x_l) 252 | # -delta_x/√q -delta_y/√q 0 delta_x/√q delta_y/√q 253 | # H = delta_y/q -delta_x/q -1 -delta_y/q delta_x/q 254 | # 0 0 0 0 0 255 | H_1 = np.array([-delta_x/np.sqrt(q), -delta_y/np.sqrt(q), 0, delta_x/np.sqrt(q), delta_y/np.sqrt(q)]) 256 | H_2 = np.array([delta_y/q, -delta_x/q, -1, -delta_y/q, delta_x/q]) 257 | H_3 = np.array([0, 0, 0, 0, 0]) 258 | H = np.array([H_1, H_2, H_3]) 259 | 260 | # Get matrix indexes for curernt state and landmark 261 | s_idx = 3 * self.state_idx 262 | l_idx = 3 * len(self.states) + 2 * (landmark_idx - 1) 263 | 264 | # Update Information matrix at X_t and m_j 265 | update = H.T.dot(self.Q_inverse).dot(H) 266 | self.omega[s_idx:s_idx+3, s_idx:s_idx+3] += update[0:3, 0:3] 267 | self.omega[s_idx:s_idx+3, l_idx:l_idx+2] += update[0:3, 3:5] 268 | self.omega[l_idx:l_idx+2, s_idx:s_idx+3] += update[3:5, 0:3] 269 | self.omega[l_idx:l_idx+2, l_idx:l_idx+2] += update[3:5, 3:5] 270 | 271 | # Update Information vector at X_t and m_j 272 | difference = np.array([[range_t - range_expected], [bearing_t - bearing_expected], [0]]) 273 | state_vector = np.array([[x_t], [y_t], [theta_t], [x_l], [y_l]]) 274 | update = H.T.dot(self.Q_inverse).dot(difference + H.dot(state_vector)) 275 | self.xi[s_idx:s_idx+3] += update.T[0, 0:3] 276 | self.xi[l_idx:l_idx+2] += update.T[0, 3:5] 277 | 278 | # Update observation matrix 279 | self.observe[self.state_idx, landmark_idx - 1] = True 280 | 281 | def reduce(self): 282 | # For the current dataset, there are only 15 landmarks 283 | # We don't need to reduce landmark dimensions 284 | return 285 | 286 | def solve(self): 287 | # Add infinity to Information matrix where landmark is not observeds 288 | for landmark in range(len(self.landmark_indexes)): 289 | if self.landmark_observed[landmark + 1]: 290 | continue 291 | 292 | l_idx = 3 * len(self.states) + 2 * landmark 293 | self.omega[l_idx:l_idx+2, l_idx:l_idx+2] = 1e16 * np.identity(2) 294 | 295 | # Solve for new states and covariance matrix 296 | # Without reducing the dimensionality of landmarks, the landmarks locations can be solved directly from Information matrix and vector 297 | sigma = np.linalg.inv(self.omega) 298 | new_states = sigma.dot(self.xi) 299 | 300 | # Extract landmark locations from state vector 301 | self.landmark_states = new_states[3*len(self.states):] 302 | 303 | # Get updated state vector 304 | new_states = new_states[0:3*len(self.states)] 305 | self.states = np.resize(new_states, (int(len(new_states)/3), 3)) 306 | 307 | def plot_data(self): 308 | # New figure 309 | plt.figure() 310 | 311 | # Ground truth data 312 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 'b', label="Robot State Ground truth") 313 | 314 | # States 315 | plt.plot(self.states[:, 0], self.states[:, 1], 'r', label="Robot State Estimate") 316 | 317 | # Start and end points 318 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 'g8', markersize=12, label="Start point") 319 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 'y8', markersize=12, label="End point") 320 | 321 | # Landmark ground truth locations and indexes 322 | landmark_xs = [] 323 | landmark_ys = [] 324 | for location in self.landmark_locations: 325 | landmark_xs.append(self.landmark_locations[location][0]) 326 | landmark_ys.append(self.landmark_locations[location][1]) 327 | index = self.landmark_indexes[location] + 5 328 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), alpha=0.5, fontsize=10) 329 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, marker='*', label='Landmark Ground Truth') 330 | 331 | # Landmark estimated locations 332 | estimate_xs = [] 333 | estimate_ys = [] 334 | for i in range(1, len(self.landmark_indexes) + 1): 335 | if self.landmark_observed[i]: 336 | estimate_xs.append(self.landmark_states[2 * i - 2]) 337 | estimate_ys.append(self.landmark_states[2 * i - 1]) 338 | plt.text(estimate_xs[-1], estimate_ys[-1], str(i+5), fontsize=10) 339 | plt.scatter(estimate_xs, estimate_ys, s=50, c='k', marker='.', label='Landmark Estimate') 340 | 341 | plt.title('Graph SLAM with known correspondences') 342 | plt.legend() 343 | plt.xlim((-2.0, 5.5)) 344 | plt.ylim((-7.0, 7.0)) 345 | 346 | 347 | if __name__ == "__main__": 348 | # Dataset 1 349 | dataset = "../0.Dataset1" 350 | start_frame = 800 351 | end_frame = 2000 352 | N_iterations = 1 353 | # State covariance matrix 354 | R = np.diagflat(np.array([5, 5, 20])) ** 2 355 | # Measurement covariance matrix 356 | Q = np.diagflat(np.array([100.0, 100.0, 1e16])) ** 2 357 | 358 | graph_slam = GraphSLAM(dataset, start_frame, end_frame, N_iterations, R, Q) 359 | plt.show() 360 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/lib/__init__.py: -------------------------------------------------------------------------------- 1 | from .particle import Particle 2 | from .motion import MotionModel 3 | from .measurement import MeasurementModel 4 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/lib/measurement.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Measurement model for 2D Range Sensor ([range, bearing]). 5 | 6 | Author: Chenge Yang 7 | Email: chengeyang2019@u.northwestern.edu 8 | ''' 9 | 10 | import numpy as np 11 | 12 | 13 | class MeasurementModel(): 14 | def __init__(self, Q): 15 | ''' 16 | Input: 17 | Q: Measurement covariance matrix. 18 | Dimension: [2, 2]. 19 | ''' 20 | self.Q = Q 21 | 22 | def compute_expected_measurement(self, particle, landmark_idx): 23 | ''' 24 | Compute the expected range and bearing given current robot state and 25 | landmark state. 26 | 27 | Measurement model: (expected measurement) 28 | range = sqrt((x_l - x_t)^2 + (y_l - y_t)^2) 29 | bearing = atan2((y_l - y_t) / (x_l - x_t)) - θ_t 30 | 31 | Input: 32 | particle: Particle() object to be updated. 33 | landmark_idx: the index of the landmark (0 ~ 15). 34 | Output: 35 | range, bearing: the expected measurement. 36 | ''' 37 | delta_x = particle.lm_mean[landmark_idx, 0] - particle.x 38 | delta_y = particle.lm_mean[landmark_idx, 1] - particle.y 39 | q = delta_x ** 2 + delta_y ** 2 40 | 41 | range = np.sqrt(q) 42 | bearing = np.arctan2(delta_y, delta_x) - particle.theta 43 | 44 | return range, bearing 45 | 46 | def compute_expected_landmark_state(self, particle, measurement): 47 | ''' 48 | Compute the expected landmark location [x, y] given current robot state 49 | and measurement data. 50 | 51 | Expected landmark state: inverse of the measurement model. 52 | x_l = x_t + range_t * cos(bearing_t + theta_t) 53 | y_l = y_t + range_t * sin(bearing_t + theta_t) 54 | 55 | Input: 56 | particle: Particle() object to be updated. 57 | measurement: measurement data Z_t. 58 | [timestamp, #landmark, range, bearing] 59 | Output: 60 | x, y: expected landmark state [x, y] 61 | ''' 62 | x = particle.x + measurement[2] *\ 63 | np.cos(measurement[3] + particle.theta) 64 | y = particle.y + measurement[2] *\ 65 | np.sin(measurement[3] + particle.theta) 66 | 67 | return x, y 68 | 69 | def compute_landmark_jacobian(self, particle, landmark_idx): 70 | ''' 71 | Computing the Jacobian wrt landmark state X_l. 72 | 73 | Jacobian of measurement: only take derivatives of landmark X_l. 74 | H = d h(x_t, x_l) / d (x_l) 75 | H_m = delta_x/√q delta_y/√q 76 | -delta_y/q delta_x/q 77 | 78 | Input: 79 | particle: Particle() object to be updated. 80 | landmark_idx: the index of the landmark (0 ~ 15). 81 | Output: 82 | H_m: Jacobian h'(X_t, X_l) 83 | Dimension: [2, 2] 84 | ''' 85 | delta_x = particle.lm_mean[landmark_idx, 0] - particle.x 86 | delta_y = particle.lm_mean[landmark_idx, 1] - particle.y 87 | q = delta_x ** 2 + delta_y ** 2 88 | 89 | H_1 = np.array([delta_x/np.sqrt(q), delta_y/np.sqrt(q)]) 90 | H_2 = np.array([-delta_y/q, delta_x/q]) 91 | H_m = np.array([H_1, H_2]) 92 | 93 | return H_m 94 | 95 | def initialize_landmark(self, particle, measurement, landmark_idx, weight): 96 | ''' 97 | Initialize landmark mean and covariance for one landmark of a given 98 | particle. 99 | This landmark is the first time to be observed. 100 | Based on EKF method. 101 | 102 | Input: 103 | particle: Particle() object to be updated. 104 | measurement: measurement data Z_t. 105 | [timestamp, #landmark, range, bearing] 106 | landmark_idx: the index of the landmark (0 ~ 15). 107 | weight: the default importance factor for a new feature. 108 | Output: 109 | None. 110 | ''' 111 | # Update landmark mean by inverse measurement model 112 | particle.lm_mean[landmark_idx] =\ 113 | self.compute_expected_landmark_state(particle, measurement) 114 | 115 | # Get Jacobian wrt landmark state 116 | H_m = self.compute_landmark_jacobian(particle, landmark_idx) 117 | 118 | # Update landmark covariance 119 | H_inverse = np.linalg.inv(H_m) 120 | particle.lm_cov[landmark_idx] = H_inverse.dot(self.Q).dot(H_inverse.T) 121 | 122 | # Mark landmark as observed 123 | particle.lm_ob[landmark_idx] = True 124 | 125 | # Assign default importance weight 126 | particle.weight = weight 127 | 128 | # Update timestamp 129 | particle.timestamp = measurement[0] 130 | 131 | def landmark_update(self, particle, measurement, landmark_idx): 132 | ''' 133 | Implementation for Fast SLAM 1.0. 134 | Update landmark mean and covariance for one landmarks of a given 135 | particle. 136 | This landmark has to be observed before. 137 | Based on EKF method. 138 | 139 | Input: 140 | particle: Particle() object to be updated. 141 | measurement: measurement data Z_t. 142 | [timestamp, #landmark, range, bearing] 143 | landmark_idx: the index of the landmark (0 ~ 15). 144 | Output: 145 | None. 146 | ''' 147 | # Compute expected measurement 148 | range_expected, bearing_expected =\ 149 | self.compute_expected_measurement(particle, landmark_idx) 150 | 151 | # Get Jacobian wrt landmark state 152 | H_m = self.compute_landmark_jacobian(particle, landmark_idx) 153 | 154 | # Compute Kalman gain 155 | Q = H_m.dot(particle.lm_cov[landmark_idx]).dot(H_m.T) + self.Q 156 | K = particle.lm_cov[landmark_idx].dot(H_m.T).dot(np.linalg.inv(Q)) 157 | 158 | # Update mean 159 | difference = np.array([[measurement[2] - range_expected], 160 | [measurement[3] - bearing_expected]]) 161 | innovation = K.dot(difference) 162 | particle.lm_mean[landmark_idx] += innovation.T[0] 163 | 164 | # Update covariance 165 | particle.lm_cov[landmark_idx] =\ 166 | (np.identity(2) - K.dot(H_m)).dot(particle.lm_cov[landmark_idx]) 167 | 168 | # Importance factor 169 | particle.weight = np.linalg.det(2 * np.pi * Q) ** (-0.5) *\ 170 | np.exp(-0.5 * difference.T.dot(np.linalg.inv(Q)). 171 | dot(difference))[0, 0] 172 | 173 | # Update timestamp 174 | particle.timestamp = measurement[0] 175 | 176 | def compute_correspondence(self, particle, measurement, landmark_idx): 177 | ''' 178 | Implementation for Fast SLAM 1.0. 179 | Compute the likelihood of correspondence for between a measurement and 180 | a given landmark. 181 | This process is the same as updating a landmark mean with EKF method. 182 | 183 | Input: 184 | particle: Particle() object to be updated. 185 | measurement: measurement data Z_t. 186 | [timestamp, #landmark, range, bearing] 187 | landmark_idx: the index of the landmark (0 ~ 15). 188 | Output: 189 | likehood: likelihood of correspondence 190 | ''' 191 | # Compute expected measurement 192 | range_expected, bearing_expected =\ 193 | self.compute_expected_measurement(particle, landmark_idx) 194 | 195 | # Get Jacobian wrt landmark state 196 | H_m = self.compute_landmark_jacobian(particle, landmark_idx) 197 | 198 | Q = H_m.dot(particle.lm_cov[landmark_idx]).dot(H_m.T) + self.Q 199 | difference = np.array([[measurement[2] - range_expected], 200 | [measurement[3] - bearing_expected]]) 201 | 202 | # likelihood of correspondence 203 | likelihood = np.linalg.det(2 * np.pi * Q) ** (-0.5) *\ 204 | np.exp(-0.5 * difference.T.dot(np.linalg.inv(Q)). 205 | dot(difference))[0, 0] 206 | 207 | return likelihood 208 | 209 | 210 | if __name__ == '__main__': 211 | pass 212 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/lib/motion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Velocity motion model for 2D differential drive robot: 5 | Robot state: [x, y, θ] 6 | Control: [u, w]. 7 | 8 | Author: Chenge Yang 9 | Email: chengeyang2019@u.northwestern.edu 10 | ''' 11 | 12 | import numpy as np 13 | 14 | 15 | class MotionModel(): 16 | def __init__(self, motion_noise): 17 | ''' 18 | Input: 19 | motion_noise: [noise_x, noise_y, noise_theta, noise_v, noise_w] 20 | (in meters / rad). 21 | ''' 22 | self.motion_noise = motion_noise 23 | 24 | def initialize_particle(self, particle): 25 | ''' 26 | Add motion noise to the robot state in the given particle object. 27 | 28 | Input: 29 | particle: Particle() object which has been initialized by first 30 | ground truth data. 31 | Output: 32 | None. 33 | ''' 34 | # Apply Gaussian noise to the robot state 35 | particle.x = np.random.normal(particle.x, self.motion_noise[0]) 36 | particle.y = np.random.normal(particle.y, self.motion_noise[1]) 37 | particle.theta = np.random.normal(particle.theta, self.motion_noise[2]) 38 | 39 | def motion_update(self, particle, control): 40 | ''' 41 | Conduct motion update for a given particle from current state X_t-1 and 42 | control U_t. 43 | 44 | Motion Model (simplified): 45 | State: [x, y, θ] 46 | Control: [v, w] 47 | [x_t, y_t, θ_t] = g(u_t, x_t-1) 48 | x_t = x_t-1 + v * cosθ_t-1 * delta_t 49 | y_t = y_t-1 + v * sinθ_t-1 * delta_t 50 | θ_t = θ_t-1 + w * delta_t 51 | 52 | Input: 53 | particle: Particle() object to be updated. 54 | control: control input U_t. 55 | [timestamp, -1, v_t, w_t] 56 | Output: 57 | None. 58 | ''' 59 | delta_t = control[0] - particle.timestamp 60 | 61 | # Compute updated [timestamp, x, y, theta] 62 | particle.timestamp = control[0] 63 | particle.x += control[2] * np.cos(particle.theta) * delta_t 64 | particle.y += control[2] * np.sin(particle.theta) * delta_t 65 | particle.theta += control[3] * delta_t 66 | 67 | # Limit θ within [-pi, pi] 68 | if (particle.theta > np.pi): 69 | particle.theta -= 2 * np.pi 70 | elif (particle.theta < -np.pi): 71 | particle.theta += 2 * np.pi 72 | 73 | def sample_motion_model(self, particle, control): 74 | ''' 75 | Implementation for Fast SLAM 1.0. 76 | Sample next state X_t from current state X_t-1 and control U_t with 77 | added motion noise. 78 | 79 | Input: 80 | particle: Particle() object to be updated. 81 | control: control input U_t. 82 | [timestamp, -1, v_t, w_t] 83 | Output: 84 | None. 85 | ''' 86 | # Apply Gaussian noise to control input 87 | v = np.random.normal(control[2], self.motion_noise[3]) 88 | w = np.random.normal(control[3], self.motion_noise[4]) 89 | control_noisy = np.array([control[0], control[1], v, w]) 90 | 91 | # Motion updated 92 | self.motion_update(particle, control_noisy) 93 | 94 | 95 | if __name__ == '__main__': 96 | pass 97 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/lib/particle.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Definition for a single particle object. 5 | 6 | Author: Chenge Yang 7 | Email: chengeyang2019@u.northwestern.edu 8 | ''' 9 | 10 | import numpy as np 11 | 12 | 13 | class Particle(): 14 | def __init__(self): 15 | # Robot state: [timestamp, x, y, 0] 16 | self.timestamp = 0.0 17 | self.x = 0.0 18 | self.y = 0.0 19 | self.theta = 0.0 20 | 21 | # Weight 22 | self.weight = 1.0 23 | 24 | # Landmark state: [x, y] 25 | # All landmarks' Mean and Covariance 26 | self.lm_mean = np.zeros((1, 2)) 27 | self.lm_cov = np.zeros((1, 2, 2)) 28 | self.lm_ob = np.full(1, False) 29 | 30 | def initialization(self, init_state, N_particles, N_landmarks): 31 | ''' 32 | Input: 33 | init_state: initial state (from first ground truth data) 34 | [timestamp_0, x_0, y_0, θ_0] 35 | N_particles: number of particles in Particle Filter. 36 | N_landmarks: number of landmarks each particle tracks. 37 | Output: 38 | None. 39 | ''' 40 | # Robot state: [timestamp, x, y, 0] 41 | # Initialized to init_state 42 | self.timestamp = init_state[0] 43 | self.x = init_state[1] 44 | self.y = init_state[2] 45 | self.theta = init_state[3] 46 | 47 | # Weight 48 | # Initialized to be equally distributed among all particles 49 | self.weight = 1.0 / N_particles 50 | 51 | # Landmark state: [x, y] 52 | # All landmarks' Mean and Covariance 53 | # Initialized as zeros 54 | self.lm_mean = np.zeros((N_landmarks, 2)) 55 | self.lm_cov = np.zeros((N_landmarks, 2, 2)) 56 | 57 | # Table to record if each landmark has been seen or not 58 | # INdex [0] - [14] represent for landmark# 6 - 20 59 | self.lm_ob = np.full(N_landmarks, False) 60 | 61 | 62 | if __name__ == '__main__': 63 | pass 64 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/src/Fast_SLAM_1_known_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of Fast SLAM 1.0 with known correspondences. 5 | 6 | See Probabilistic Robotics: 7 | 1. Page 450, Table 13.1 for full algorithm. 8 | 9 | Author: Chenge Yang 10 | Email: chengeyang2019@u.northwestern.edu 11 | ''' 12 | 13 | import numpy as np 14 | import matplotlib.pyplot as plt 15 | import copy 16 | 17 | from lib.particle import Particle 18 | 19 | 20 | class FastSLAM1(): 21 | def __init__(self, motion_model, measurement_model): 22 | ''' 23 | Input: 24 | motion_model: ModelModel() object 25 | measurement_model: MeasurementModel() object 26 | ''' 27 | self.motion_model = motion_model 28 | self.measurement_model = measurement_model 29 | 30 | def load_data(self, dataset, start_frame, end_frame): 31 | ''' 32 | Load data from UTIAS Multi-Robot Cooperative Localization and Mapping 33 | Dataset. 34 | 35 | Input: 36 | dataset: directory of the dataset. 37 | start_frame: the index of first frame to run the SLAM algorithm on. 38 | end_frame: the index of last frame to run the SLAM algorithm on. 39 | Otput: 40 | None. 41 | ''' 42 | # Loading dataset 43 | # Barcodes: [Subject#, Barcode#] 44 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 45 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 46 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 47 | # Landmark ground truth: [Subject#, x[m], y[m]] 48 | self.landmark_groundtruth_data =\ 49 | np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 50 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 51 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 52 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 53 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 54 | 55 | # Collect all input data and sort by timestamp 56 | # Add subject "odom" = -1 for odometry data 57 | odom_data = np.insert(self.odometry_data, 1, -1, axis=1) 58 | self.data = np.concatenate((odom_data, self.measurement_data), axis=0) 59 | self.data = self.data[np.argsort(self.data[:, 0])] 60 | 61 | # Remove all data before the fisrt timestamp of groundtruth 62 | # Use first groundtruth data as the initial location of the robot 63 | for i in range(len(self.data)): 64 | if (self.data[i][0] > self.groundtruth_data[0][0]): 65 | break 66 | self.data = self.data[i:] 67 | 68 | # Select data according to start_frame and end_frame 69 | # Fisrt frame must be control input 70 | while self.data[start_frame][1] != -1: 71 | start_frame += 1 72 | # Remove all data before start_frame and after the end_timestamp 73 | self.data = self.data[start_frame:end_frame] 74 | start_timestamp = self.data[0][0] 75 | end_timestamp = self.data[-1][0] 76 | # Remove all groundtruth outside the range 77 | for i in range(len(self.groundtruth_data)): 78 | if (self.groundtruth_data[i][0] >= end_timestamp): 79 | break 80 | self.groundtruth_data = self.groundtruth_data[:i] 81 | for i in range(len(self.groundtruth_data)): 82 | if (self.groundtruth_data[i][0] >= start_timestamp): 83 | break 84 | self.groundtruth_data = self.groundtruth_data[i:] 85 | 86 | # Combine barcode Subject# with landmark Subject# 87 | # Lookup table to map barcode Subjec# to landmark coordinates 88 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 89 | # Ground truth data is not used in SLAM 90 | self.landmark_locations = {} 91 | for i in range(5, len(self.barcodes_data), 1): 92 | self.landmark_locations[self.barcodes_data[i][1]] =\ 93 | self.landmark_groundtruth_data[i - 5][1:] 94 | 95 | # Lookup table to map barcode Subjec# to landmark Subject# 96 | # Barcode 6 is the first landmark (0 - 14 for 6 - 20) 97 | self.landmark_indexes = {} 98 | for i in range(5, len(self.barcodes_data), 1): 99 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 5 100 | 101 | def initialization(self, N_particles): 102 | ''' 103 | Initialize robots state, landmark state and all particles. 104 | 105 | Input: 106 | N_particles: number of particles this SLAM algorithms tracks. 107 | Output: 108 | None. 109 | ''' 110 | # Number of particles and landmarks 111 | self.N_particles = N_particles 112 | self.N_landmarks = len(self.landmark_indexes) 113 | 114 | # Robot states: [timestamp, x, y, theta] 115 | # First state is obtained from ground truth 116 | self.states = np.array([self.groundtruth_data[0]]) 117 | 118 | # Landmark states: [x, y] 119 | self.landmark_states = np.zeros((self.N_landmarks, 2)) 120 | 121 | # Table to record if each landmark has been seen or not 122 | # [0] - [14] represent for landmark# 6 - 20 123 | self.landmark_observed = np.full(self.N_landmarks, False) 124 | 125 | # Initial particles 126 | self.particles = [] 127 | for i in range(N_particles): 128 | particle = Particle() 129 | particle.initialization(self.states[0], self.N_particles, 130 | self.N_landmarks) 131 | self.motion_model.initialize_particle(particle) 132 | self.particles.append(particle) 133 | 134 | def robot_update(self, control): 135 | ''' 136 | Update robot pose through sampling motion model for all particles. 137 | 138 | Input: 139 | control: control input U_t. 140 | [timestamp, -1, v_t, w_t] 141 | Output: 142 | None. 143 | ''' 144 | for particle in self.particles: 145 | self.motion_model.sample_motion_model(particle, control) 146 | 147 | def landmark_update(self, measurement): 148 | ''' 149 | Update landmark mean and covariance for all landmarks of all particles. 150 | Based on EKF method. 151 | 152 | Input: 153 | measurement: measurement data Z_t. 154 | [timestamp, #landmark, range, bearing] 155 | Output: 156 | None. 157 | ''' 158 | # Return if the measured object is not a landmark (another robot) 159 | if not measurement[1] in self.landmark_indexes: 160 | return 161 | 162 | for particle in self.particles: 163 | # Get landmark index 164 | landmark_idx = self.landmark_indexes[measurement[1]] 165 | 166 | # Initialize landmark by measurement if it is newly observed 167 | if not particle.lm_ob[landmark_idx]: 168 | self.measurement_model.\ 169 | initialize_landmark(particle, measurement, 170 | landmark_idx, 1.0/self.N_landmarks) 171 | 172 | # Update landmark by EKF if it has been observed before 173 | else: 174 | self.measurement_model.\ 175 | landmark_update(particle, measurement, landmark_idx) 176 | 177 | # Normalize all weights 178 | self.weights_normalization() 179 | 180 | # Resample all particles according to the weights 181 | self.importance_sampling() 182 | 183 | def weights_normalization(self): 184 | ''' 185 | Normalize weight in all particles so that the sum = 1. 186 | 187 | Input: 188 | None. 189 | Output: 190 | None. 191 | ''' 192 | # Compute sum of the weights 193 | sum = 0.0 194 | for particle in self.particles: 195 | sum += particle.weight 196 | 197 | # If sum is too small, equally assign weights to all particles 198 | if sum < 1e-10: 199 | for particle in self.particles: 200 | particle.weight = 1.0 / self.N_particles 201 | return 202 | 203 | for particle in self.particles: 204 | particle.weight /= sum 205 | 206 | def importance_sampling(self): 207 | ''' 208 | Resample all particles through the importance factors. 209 | 210 | Input: 211 | None. 212 | Output: 213 | None. 214 | ''' 215 | # Construct weights vector 216 | weights = [] 217 | for particle in self.particles: 218 | weights.append(particle.weight) 219 | 220 | # Resample all particles according to importance weights 221 | new_indexes =\ 222 | np.random.choice(len(self.particles), len(self.particles), 223 | replace=True, p=weights) 224 | 225 | # Update new particles 226 | new_particles = [] 227 | for index in new_indexes: 228 | new_particles.append(copy.deepcopy(self.particles[index])) 229 | self.particles = new_particles 230 | 231 | def state_update(self): 232 | ''' 233 | Update the robot and landmark states by taking average among all 234 | particles. 235 | 236 | Input: 237 | None. 238 | Output: 239 | None. 240 | ''' 241 | # Robot state 242 | timestamp = self.particles[0].timestamp 243 | x = 0.0 244 | y = 0.0 245 | theta = 0.0 246 | 247 | for particle in self.particles: 248 | x += particle.x 249 | y += particle.y 250 | theta += particle.theta 251 | 252 | x /= self.N_particles 253 | y /= self.N_particles 254 | theta /= self.N_particles 255 | 256 | self.states = np.append(self.states, 257 | np.array([[timestamp, x, y, theta]]), axis=0) 258 | 259 | # Landmark state 260 | landmark_states = np.zeros((self.N_landmarks, 2)) 261 | count = np.zeros(self.N_landmarks) 262 | self.landmark_observed = np.full(self.N_landmarks, False) 263 | 264 | for particle in self.particles: 265 | for landmark_idx in range(self.N_landmarks): 266 | if particle.lm_ob[landmark_idx]: 267 | landmark_states[landmark_idx] +=\ 268 | particle.lm_mean[landmark_idx] 269 | count[landmark_idx] += 1 270 | self.landmark_observed[landmark_idx] = True 271 | 272 | for landmark_idx in range(self.N_landmarks): 273 | if self.landmark_observed[landmark_idx]: 274 | landmark_states[landmark_idx] /= count[landmark_idx] 275 | 276 | self.landmark_states = landmark_states 277 | 278 | def plot_data(self): 279 | ''' 280 | Plot all data through matplotlib. 281 | Conduct animation as the algorithm runs. 282 | 283 | Input: 284 | None. 285 | Output: 286 | None. 287 | ''' 288 | # Clear all 289 | plt.cla() 290 | 291 | # Ground truth data 292 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 293 | 'b', label="Robot State Ground truth") 294 | 295 | # States 296 | plt.plot(self.states[:, 1], self.states[:, 2], 297 | 'r', label="Robot State Estimate") 298 | 299 | # Start and end points 300 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 301 | 'g8', markersize=12, label="Start point") 302 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 303 | 'y8', markersize=12, label="End point") 304 | 305 | # Particles 306 | particle_xs = [] 307 | particle_ys = [] 308 | for particle in self.particles: 309 | particle_xs.append(particle.x) 310 | particle_ys.append(particle.y) 311 | plt.scatter(particle_xs, particle_ys, 312 | s=5, c='k', alpha=0.5, label="Particles") 313 | 314 | # Landmark ground truth locations and indexes 315 | landmark_xs = [] 316 | landmark_ys = [] 317 | for location in self.landmark_locations: 318 | landmark_xs.append(self.landmark_locations[location][0]) 319 | landmark_ys.append(self.landmark_locations[location][1]) 320 | index = self.landmark_indexes[location] + 6 321 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), 322 | alpha=0.5, fontsize=10) 323 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, 324 | marker='*', label='Landmark Ground Truth') 325 | 326 | # Landmark estimated locations 327 | estimate_xs = [] 328 | estimate_ys = [] 329 | for i in range(self.N_landmarks): 330 | if self.landmark_observed[i]: 331 | estimate_xs.append(self.landmark_states[i, 0]) 332 | estimate_ys.append(self.landmark_states[i, 1]) 333 | plt.text(estimate_xs[-1], estimate_ys[-1], 334 | str(i+6), fontsize=10) 335 | plt.scatter(estimate_xs, estimate_ys, 336 | s=50, c='k', marker='P', label='Landmark Estimate') 337 | 338 | plt.title('Fast SLAM 1.0 with known correspondences') 339 | plt.legend() 340 | plt.xlim((-2.0, 5.5)) 341 | plt.ylim((-7.0, 7.0)) 342 | plt.pause(1e-16) 343 | 344 | 345 | if __name__ == "__main__": 346 | pass 347 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/src/Fast_SLAM_1_unknown_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of Fast SLAM 1.0 with unknown correspondences. 5 | 6 | The difference from known correspondence lies in the data_association part. 7 | 8 | See Probabilistic Robotics: 9 | 1. Page 461, Table 13.2 for full algorithm. 10 | 11 | Author: Chenge Yang 12 | Email: chengeyang2019@u.northwestern.edu 13 | ''' 14 | 15 | import numpy as np 16 | import matplotlib.pyplot as plt 17 | import copy 18 | 19 | from lib.particle import Particle 20 | 21 | 22 | class FastSLAM1(): 23 | def __init__(self, motion_model, measurement_model): 24 | ''' 25 | Input: 26 | motion_model: ModelModel() object 27 | measurement_model: MeasurementModel() object 28 | ''' 29 | self.motion_model = motion_model 30 | self.measurement_model = measurement_model 31 | 32 | def load_data(self, dataset, start_frame, end_frame): 33 | ''' 34 | Load data from UTIAS Multi-Robot Cooperative Localization and Mapping 35 | Dataset. 36 | 37 | Input: 38 | dataset: directory of the dataset. 39 | start_frame: the index of first frame to run the SLAM algorithm on. 40 | end_frame: the index of last frame to run the SLAM algorithm on. 41 | Otput: 42 | None. 43 | ''' 44 | # Loading dataset 45 | # Barcodes: [Subject#, Barcode#] 46 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 47 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 48 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 49 | # Landmark ground truth: [Subject#, x[m], y[m]] 50 | self.landmark_groundtruth_data =\ 51 | np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 52 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 53 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 54 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 55 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 56 | 57 | # Collect all input data and sort by timestamp 58 | # Add subject "odom" = -1 for odometry data 59 | odom_data = np.insert(self.odometry_data, 1, -1, axis=1) 60 | self.data = np.concatenate((odom_data, self.measurement_data), axis=0) 61 | self.data = self.data[np.argsort(self.data[:, 0])] 62 | 63 | # Remove all data before the fisrt timestamp of groundtruth 64 | # Use first groundtruth data as the initial location of the robot 65 | for i in range(len(self.data)): 66 | if (self.data[i][0] > self.groundtruth_data[0][0]): 67 | break 68 | self.data = self.data[i:] 69 | 70 | # Select data according to start_frame and end_frame 71 | # Fisrt frame must be control input 72 | while self.data[start_frame][1] != -1: 73 | start_frame += 1 74 | # Remove all data before start_frame and after the end_timestamp 75 | self.data = self.data[start_frame:end_frame] 76 | start_timestamp = self.data[0][0] 77 | end_timestamp = self.data[-1][0] 78 | # Remove all groundtruth outside the range 79 | for i in range(len(self.groundtruth_data)): 80 | if (self.groundtruth_data[i][0] >= end_timestamp): 81 | break 82 | self.groundtruth_data = self.groundtruth_data[:i] 83 | for i in range(len(self.groundtruth_data)): 84 | if (self.groundtruth_data[i][0] >= start_timestamp): 85 | break 86 | self.groundtruth_data = self.groundtruth_data[i:] 87 | 88 | # Combine barcode Subject# with landmark Subject# 89 | # Lookup table to map barcode Subjec# to landmark coordinates 90 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 91 | # Ground truth data is not used in SLAM 92 | self.landmark_locations = {} 93 | for i in range(5, len(self.barcodes_data), 1): 94 | self.landmark_locations[self.barcodes_data[i][1]] =\ 95 | self.landmark_groundtruth_data[i - 5][1:] 96 | 97 | # Lookup table to map barcode Subjec# to landmark Subject# 98 | # Barcode 6 is the first landmark (0 - 14 for 6 - 20) 99 | self.landmark_indexes = {} 100 | for i in range(5, len(self.barcodes_data), 1): 101 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 5 102 | 103 | def initialization(self, N_particles): 104 | ''' 105 | Initialize robots state, landmark state and all particles. 106 | 107 | Input: 108 | N_particles: number of particles this SLAM algorithms tracks. 109 | Output: 110 | None. 111 | ''' 112 | # Number of particles and landmarks 113 | self.N_particles = N_particles 114 | self.N_landmarks = len(self.landmark_indexes) 115 | 116 | # Robot states: [timestamp, x, y, theta] 117 | # First state is obtained from ground truth 118 | self.states = np.array([self.groundtruth_data[0]]) 119 | 120 | # Landmark states: [x, y] 121 | self.landmark_states = np.zeros((self.N_landmarks, 2)) 122 | 123 | # Table to record if each landmark has been seen or not 124 | # [0] - [14] represent for landmark# 6 - 20 125 | self.landmark_observed = np.full(self.N_landmarks, False) 126 | 127 | # Initial particles 128 | self.particles = [] 129 | for i in range(N_particles): 130 | particle = Particle() 131 | particle.initialization(self.states[0], self.N_particles, 132 | self.N_landmarks) 133 | self.motion_model.initialize_particle(particle) 134 | self.particles.append(particle) 135 | 136 | def robot_update(self, control): 137 | ''' 138 | Update robot pose through sampling motion model for all particles. 139 | 140 | Input: 141 | control: control input U_t. 142 | [timestamp, -1, v_t, w_t] 143 | Output: 144 | None. 145 | ''' 146 | for particle in self.particles: 147 | self.motion_model.sample_motion_model(particle, control) 148 | 149 | def landmark_update(self, measurement): 150 | ''' 151 | Update landmark mean and covariance for all landmarks of all particles. 152 | Based on EKF method. 153 | 154 | Input: 155 | measurement: measurement data Z_t. 156 | [timestamp, #landmark, range, bearing] 157 | Output: 158 | None. 159 | ''' 160 | # Return if the measured object is not a landmark (another robot) 161 | if not measurement[1] in self.landmark_indexes: 162 | return 163 | 164 | for particle in self.particles: 165 | # Get landmark index 166 | landmark_idx = self.data_association(particle, measurement) 167 | 168 | # Initialize landmark by measurement if it is newly observed 169 | if not particle.lm_ob[landmark_idx]: 170 | self.measurement_model.\ 171 | initialize_landmark(particle, measurement, 172 | landmark_idx, 1.0/self.N_landmarks) 173 | 174 | # Update landmark by EKF if it has been observed before 175 | else: 176 | self.measurement_model.\ 177 | landmark_update(particle, measurement, landmark_idx) 178 | 179 | # Normalize all weights 180 | self.weights_normalization() 181 | 182 | # Resample all particles according to the weights 183 | self.importance_sampling() 184 | 185 | def data_association(self, particle, measurement): 186 | ''' 187 | For a particle, compute likelihood of correspondence for all observed 188 | landmarks. 189 | Choose landmark according to ML (Maximum Likelihood). 190 | 191 | Input: 192 | particle: single particle to be updated. 193 | measurement: measurement data Z_t. 194 | [timestamp, #landmark, range, bearing] 195 | Output: 196 | landmark_idx: index of the associated landmark. 197 | ''' 198 | # Return if the measured object is not a landmark (another robot) 199 | if not measurement[1] in self.landmark_indexes: 200 | return 201 | 202 | # Trick!!!!!: if the current landmark has not been seen, return its 203 | # corresponding index, and then initilize it at the measured location 204 | if not particle.lm_ob[self.landmark_indexes[measurement[1]]]: 205 | return self.landmark_indexes[measurement[1]] 206 | 207 | # Compute likelihood of correspondence for all observed landmarks 208 | max_likelihood = 0.0 209 | max_idx = 0 210 | for landmark_idx in range(self.N_landmarks): 211 | if particle.lm_ob[landmark_idx]: 212 | likelihood = self.measurement_model.\ 213 | compute_correspondence(particle, measurement, landmark_idx) 214 | if likelihood > max_likelihood: 215 | max_likelihood = likelihood 216 | max_idx = landmark_idx 217 | 218 | return max_idx 219 | 220 | def weights_normalization(self): 221 | ''' 222 | Normalize weight in all particles so that the sum = 1. 223 | 224 | Input: 225 | None. 226 | Output: 227 | None. 228 | ''' 229 | # Compute sum of the weights 230 | sum = 0.0 231 | for particle in self.particles: 232 | sum += particle.weight 233 | 234 | # If sum is too small, equally assign weights to all particles 235 | if sum < 1e-10: 236 | for particle in self.particles: 237 | particle.weight = 1.0 / self.N_particles 238 | return 239 | 240 | for particle in self.particles: 241 | particle.weight /= sum 242 | 243 | def importance_sampling(self): 244 | ''' 245 | Resample all particles through the importance factors. 246 | 247 | Input: 248 | None. 249 | Output: 250 | None. 251 | ''' 252 | # Construct weights vector 253 | weights = [] 254 | for particle in self.particles: 255 | weights.append(particle.weight) 256 | 257 | # Resample all particles according to importance weights 258 | new_indexes =\ 259 | np.random.choice(len(self.particles), len(self.particles), 260 | replace=True, p=weights) 261 | 262 | # Update new particles 263 | new_particles = [] 264 | for index in new_indexes: 265 | new_particles.append(copy.deepcopy(self.particles[index])) 266 | self.particles = new_particles 267 | 268 | def state_update(self): 269 | ''' 270 | Update the robot and landmark states by taking average among all 271 | particles. 272 | 273 | Input: 274 | None. 275 | Output: 276 | None. 277 | ''' 278 | # Robot state 279 | timestamp = self.particles[0].timestamp 280 | x = 0.0 281 | y = 0.0 282 | theta = 0.0 283 | 284 | for particle in self.particles: 285 | x += particle.x 286 | y += particle.y 287 | theta += particle.theta 288 | 289 | x /= self.N_particles 290 | y /= self.N_particles 291 | theta /= self.N_particles 292 | 293 | self.states = np.append(self.states, 294 | np.array([[timestamp, x, y, theta]]), axis=0) 295 | 296 | # Landmark state 297 | landmark_states = np.zeros((self.N_landmarks, 2)) 298 | count = np.zeros(self.N_landmarks) 299 | self.landmark_observed = np.full(self.N_landmarks, False) 300 | 301 | for particle in self.particles: 302 | for landmark_idx in range(self.N_landmarks): 303 | if particle.lm_ob[landmark_idx]: 304 | landmark_states[landmark_idx] +=\ 305 | particle.lm_mean[landmark_idx] 306 | count[landmark_idx] += 1 307 | self.landmark_observed[landmark_idx] = True 308 | 309 | for landmark_idx in range(self.N_landmarks): 310 | if self.landmark_observed[landmark_idx]: 311 | landmark_states[landmark_idx] /= count[landmark_idx] 312 | 313 | self.landmark_states = landmark_states 314 | 315 | def plot_data(self): 316 | ''' 317 | Plot all data through matplotlib. 318 | Conduct animation as the algorithm runs. 319 | 320 | Input: 321 | None. 322 | Output: 323 | None. 324 | ''' 325 | # Clear all 326 | plt.cla() 327 | 328 | # Ground truth data 329 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 330 | 'b', label="Robot State Ground truth") 331 | 332 | # States 333 | plt.plot(self.states[:, 1], self.states[:, 2], 334 | 'r', label="Robot State Estimate") 335 | 336 | # Start and end points 337 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 338 | 'g8', markersize=12, label="Start point") 339 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 340 | 'y8', markersize=12, label="End point") 341 | 342 | # Particles 343 | particle_xs = [] 344 | particle_ys = [] 345 | for particle in self.particles: 346 | particle_xs.append(particle.x) 347 | particle_ys.append(particle.y) 348 | plt.scatter(particle_xs, particle_ys, 349 | s=5, c='k', alpha=0.5, label="Particles") 350 | 351 | # Landmark ground truth locations and indexes 352 | landmark_xs = [] 353 | landmark_ys = [] 354 | for location in self.landmark_locations: 355 | landmark_xs.append(self.landmark_locations[location][0]) 356 | landmark_ys.append(self.landmark_locations[location][1]) 357 | index = self.landmark_indexes[location] + 6 358 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), 359 | alpha=0.5, fontsize=10) 360 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, 361 | marker='*', label='Landmark Ground Truth') 362 | 363 | # Landmark estimated locations 364 | estimate_xs = [] 365 | estimate_ys = [] 366 | for i in range(self.N_landmarks): 367 | if self.landmark_observed[i]: 368 | estimate_xs.append(self.landmark_states[i, 0]) 369 | estimate_ys.append(self.landmark_states[i, 1]) 370 | plt.text(estimate_xs[-1], estimate_ys[-1], 371 | str(i+6), fontsize=10) 372 | plt.scatter(estimate_xs, estimate_ys, 373 | s=50, c='k', marker='P', label='Landmark Estimate') 374 | 375 | plt.title('Fast SLAM 1.0 with unknown correspondences') 376 | plt.legend() 377 | plt.xlim((-2.0, 5.5)) 378 | plt.ylim((-7.0, 7.0)) 379 | plt.pause(1e-16) 380 | 381 | 382 | if __name__ == "__main__": 383 | pass 384 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/4.Fast_SLAM_1/src/__init__.py -------------------------------------------------------------------------------- /4.Fast_SLAM_1/test_Fast_SLAM_1_known_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Run Fast SLAM 1.0 on the UTIAS Multi-Robot Cooperative Localization and Mapping 5 | Dataset. 6 | 7 | Author: Chenge Yang 8 | Email: chengeyang2019@u.northwestern.edu 9 | ''' 10 | 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | 14 | from lib import MotionModel 15 | from lib import MeasurementModel 16 | from src.Fast_SLAM_1_known_correspondences import FastSLAM1 17 | 18 | 19 | if __name__ == '__main__': 20 | # Dataset info 21 | dataset = "../0.Dataset1" 22 | start_frame = 800 23 | end_frame = 3200 24 | 25 | # Initialize Motion Model object 26 | # Motion noise (in meters / rad) 27 | # [noise_x, noise_y, noise_theta, noise_v, noise_w] 28 | # Fisrt three are used for initializing particles 29 | # Last two are used for motion update 30 | motion_noise = np.array([0.0, 0.0, 0.0, 0.1, 0.15]) 31 | motion_model = MotionModel(motion_noise) 32 | 33 | # Initialize Measurement Model object 34 | # Measurement covariance matrix 35 | Q = np.diagflat(np.array([0.05, 0.02])) ** 2 36 | measurement_model = MeasurementModel(Q) 37 | 38 | # Initialize SLAM algorithm 39 | # Number of particles 40 | N_particles = 200 41 | fast_slam = FastSLAM1(motion_model, measurement_model) 42 | fast_slam.load_data(dataset, start_frame, end_frame) 43 | fast_slam.initialization(N_particles) 44 | 45 | # Run full Fast SLAM 1.0 algorithm 46 | for data in fast_slam.data: 47 | if (data[1] == -1): 48 | fast_slam.robot_update(data) 49 | else: 50 | fast_slam.landmark_update(data) 51 | fast_slam.state_update() 52 | # Plot every n frames 53 | if (len(fast_slam.states) % 20 == 0): 54 | fast_slam.plot_data() 55 | plt.show() 56 | -------------------------------------------------------------------------------- /4.Fast_SLAM_1/test_Fast_SLAM_1_unknown_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Run Fast SLAM 1.0 on the UTIAS Multi-Robot Cooperative Localization and Mapping 5 | Dataset. 6 | 7 | Author: Chenge Yang 8 | Email: chengeyang2019@u.northwestern.edu 9 | ''' 10 | 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | 14 | from lib import MotionModel 15 | from lib import MeasurementModel 16 | from src.Fast_SLAM_1_unknown_correspondences import FastSLAM1 17 | 18 | 19 | if __name__ == '__main__': 20 | # Dataset info 21 | dataset = "../0.Dataset1" 22 | start_frame = 800 23 | end_frame = 3200 24 | 25 | # Initialize Motion Model object 26 | # Motion noise (in meters / rad) 27 | # [noise_x, noise_y, noise_theta, noise_v, noise_w] 28 | # Fisrt three are used for initializing particles 29 | # Last two are used for motion update 30 | motion_noise = np.array([0.0, 0.0, 0.0, 0.1, 0.15]) 31 | motion_model = MotionModel(motion_noise) 32 | 33 | # Initialize Measurement Model object 34 | # Measurement covariance matrix 35 | Q = np.diagflat(np.array([0.05, 0.02])) ** 2 36 | measurement_model = MeasurementModel(Q) 37 | 38 | # Initialize SLAM algorithm 39 | # Number of particles 40 | N_particles = 100 41 | fast_slam = FastSLAM1(motion_model, measurement_model) 42 | fast_slam.load_data(dataset, start_frame, end_frame) 43 | fast_slam.initialization(N_particles) 44 | 45 | # Run full Fast SLAM 1.0 algorithm 46 | for data in fast_slam.data: 47 | if (data[1] == -1): 48 | fast_slam.robot_update(data) 49 | else: 50 | fast_slam.landmark_update(data) 51 | fast_slam.state_update() 52 | # Plot every n frames 53 | if (len(fast_slam.states) % 20 == 0): 54 | fast_slam.plot_data() 55 | plt.show() 56 | -------------------------------------------------------------------------------- /5.Fast_SLAM_2/lib/__init__.py: -------------------------------------------------------------------------------- 1 | from .particle import Particle 2 | from .motion import MotionModel 3 | from .measurement import MeasurementModel 4 | -------------------------------------------------------------------------------- /5.Fast_SLAM_2/lib/measurement.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Measurement model for 2D Range Sensor ([range, bearing]). 5 | 6 | Author: Chenge Yang 7 | Email: chengeyang2019@u.northwestern.edu 8 | ''' 9 | 10 | import numpy as np 11 | import copy 12 | 13 | 14 | class MeasurementModel(): 15 | def __init__(self, R, Q): 16 | ''' 17 | Input: 18 | R: Measurement covariance matrix. 19 | Dimension: [3, 3]. 20 | Q: Measurement covariance matrix. 21 | Dimension: [2, 2]. 22 | ''' 23 | self.R = R 24 | self.R_inverse = np.linalg.inv(self.R) 25 | self.Q = Q 26 | self.Q_inverse = np.linalg.inv(self.Q) 27 | 28 | def compute_expected_measurement(self, particle, landmark_idx): 29 | ''' 30 | Compute the expected range and bearing given current robot state and 31 | landmark state. 32 | 33 | Measurement model: (expected measurement) 34 | range = sqrt((x_l - x_t)^2 + (y_l - y_t)^2) 35 | bearing = atan2((y_l - y_t) / (x_l - x_t)) - θ_t 36 | 37 | Input: 38 | particle: Particle() object to be updated. 39 | landmark_idx: the index of the landmark (0 ~ 15). 40 | Output: 41 | range, bearing: the expected measurement. 42 | ''' 43 | delta_x = particle.lm_mean[landmark_idx, 0] - particle.x 44 | delta_y = particle.lm_mean[landmark_idx, 1] - particle.y 45 | q = delta_x ** 2 + delta_y ** 2 46 | 47 | range = np.sqrt(q) 48 | bearing = np.arctan2(delta_y, delta_x) - particle.theta 49 | 50 | return range, bearing 51 | 52 | def compute_expected_landmark_state(self, particle, measurement): 53 | ''' 54 | Compute the expected landmark location [x, y] given current robot state 55 | and measurement data. 56 | 57 | Expected landmark state: inverse of the measurement model. 58 | x_l = x_t + range_t * cos(bearing_t + theta_t) 59 | y_l = y_t + range_t * sin(bearing_t + theta_t) 60 | 61 | Input: 62 | particle: Particle() object to be updated. 63 | measurement: measurement data Z_t. 64 | [timestamp, #landmark, range, bearing] 65 | Output: 66 | x, y: expected landmark state [x, y] 67 | ''' 68 | x = particle.x + measurement[2] *\ 69 | np.cos(measurement[3] + particle.theta) 70 | y = particle.y + measurement[2] *\ 71 | np.sin(measurement[3] + particle.theta) 72 | 73 | return x, y 74 | 75 | def compute_robot_jacobian(self, particle, landmark_idx): 76 | ''' 77 | Computing the Jacobian wrt robot state X_t. 78 | 79 | Jacobian of measurement: only take derivatives of robot X_t. 80 | H = d h(x_t, x_l) / d (x_t) 81 | H_x = -delta_x/√q -delta_y/√q 0 82 | delta_y/q -delta_x/q -1 83 | 84 | Input: 85 | particle: Particle() object to be updated. 86 | landmark_idx: the index of the landmark (0 ~ 15). 87 | Output: 88 | H_x: Jacobian h'(X_t, X_l) 89 | Dimension: [2, 3] 90 | ''' 91 | delta_x = particle.lm_mean[landmark_idx, 0] - particle.x 92 | delta_y = particle.lm_mean[landmark_idx, 1] - particle.y 93 | q = delta_x ** 2 + delta_y ** 2 94 | 95 | H_1 = np.array([-delta_x/np.sqrt(q), -delta_y/np.sqrt(q), 0.0]) 96 | H_2 = np.array([delta_y/q, -delta_x/q, -1.0]) 97 | H_x = np.array([H_1, H_2]) 98 | 99 | return H_x 100 | 101 | def compute_landmark_jacobian(self, particle, landmark_idx): 102 | ''' 103 | Computing the Jacobian wrt landmark state X_l. 104 | 105 | Jacobian of measurement: only take derivatives of landmark X_l. 106 | H = d h(x_t, x_l) / d (x_l) 107 | H_m = delta_x/√q delta_y/√q 108 | -delta_y/q delta_x/q 109 | 110 | Input: 111 | particle: Particle() object to be updated. 112 | landmark_idx: the index of the landmark (0 ~ 15). 113 | Output: 114 | H_m: Jacobian h'(X_t, X_l) 115 | Dimension: [2, 2] 116 | ''' 117 | delta_x = particle.lm_mean[landmark_idx, 0] - particle.x 118 | delta_y = particle.lm_mean[landmark_idx, 1] - particle.y 119 | q = delta_x ** 2 + delta_y ** 2 120 | 121 | H_1 = np.array([delta_x/np.sqrt(q), delta_y/np.sqrt(q)]) 122 | H_2 = np.array([-delta_y/q, delta_x/q]) 123 | H_m = np.array([H_1, H_2]) 124 | 125 | return H_m 126 | 127 | def sample_measurement_model(self, particle, measurement,landmark_idx): 128 | ''' 129 | Implementation for Fast SLAM 2.0. 130 | Sample next state X_t from current state X_t and measurement Z_t with 131 | added measurement covariance. 132 | 133 | Input: 134 | particle: Particle() object to be updated. 135 | measurement: measurement data Z_t. 136 | [timestamp, #landmark, range, bearing] 137 | landmark_idx: the index of the landmark (0 ~ 15). 138 | Output: 139 | pose_sample: [x, t, theta] 140 | Robot pose sampled from the joint posterior 141 | p(X_t | X_1:t-1, U_1:t, Z_1:t, C_1:t). 142 | Q: measurement process covariance. 143 | size: [2, 2]. 144 | ''' 145 | # Robot pose prediction 146 | x_t = np.array([[particle.x], [particle.y], [particle.theta]]) 147 | 148 | # Predict measurement 149 | range_expected, bearing_expected =\ 150 | self.compute_expected_measurement(particle, landmark_idx) 151 | 152 | # Get Jacobian wrt robot pose 153 | H_x = self.compute_robot_jacobian(particle, landmark_idx) 154 | 155 | # Get Jacobian wrt landmark state 156 | H_m = self.compute_landmark_jacobian(particle, landmark_idx) 157 | 158 | # Measurement process covariance 159 | Q = self.Q + H_m.dot(particle.lm_cov[landmark_idx]).dot(H_m.T) 160 | Q_inverse = np.linalg.inv(Q) 161 | 162 | # Mean and covariance of proposal distribution 163 | difference = np.array([[measurement[2] - range_expected], 164 | [measurement[3] - bearing_expected]]) 165 | if difference[1] > np.pi: 166 | difference[1] -= 2 * np.pi 167 | elif difference[1] < -np.pi: 168 | difference[1] += 2 * np.pi 169 | cov = np.linalg.inv(H_x.T.dot(Q_inverse).dot(H_x) + self.R_inverse) 170 | mean = cov.dot(H_x.T).dot(Q_inverse).dot(difference) + x_t 171 | 172 | # Sample from the proposal distribution 173 | x = np.random.normal(mean[0], cov[0, 0]) 174 | y = np.random.normal(mean[1], cov[1, 1]) 175 | theta = np.random.normal(mean[2], cov[2, 2]) 176 | if theta > np.pi: 177 | theta -= 2 * np.pi 178 | elif theta < -np.pi: 179 | theta += 2 * np.pi 180 | pose_sample = np.array([x, y, theta]) 181 | 182 | return pose_sample, Q 183 | 184 | def initialize_landmark(self, particle, measurement, landmark_idx, weight): 185 | ''' 186 | Initialize landmark mean and covariance for one landmark of a given 187 | particle. 188 | This landmark is the first time to be observed. 189 | Based on EKF method. 190 | 191 | Input: 192 | particle: Particle() object to be updated. 193 | measurement: measurement data Z_t. 194 | [timestamp, #landmark, range, bearing] 195 | landmark_idx: the index of the landmark (0 ~ 15). 196 | weight: the default importance factor for a new feature. 197 | Output: 198 | None. 199 | ''' 200 | # Update landmark mean by inverse measurement model 201 | particle.lm_mean[landmark_idx] =\ 202 | self.compute_expected_landmark_state(particle, measurement) 203 | 204 | # Get Jacobian wrt landmark state 205 | H_m = self.compute_landmark_jacobian(particle, landmark_idx) 206 | 207 | # Update landmark covariance 208 | H_m_inverse = np.linalg.inv(H_m) 209 | particle.lm_cov[landmark_idx] =\ 210 | H_m_inverse.dot(self.Q).dot(H_m_inverse.T) 211 | 212 | # Mark landmark as observed 213 | particle.lm_ob[landmark_idx] = True 214 | 215 | # Assign default importance weight 216 | particle.weight = weight 217 | 218 | # Update timestamp 219 | particle.timestamp = measurement[0] 220 | 221 | def landmark_update(self, particle, measurement, landmark_idx, pose_sample): 222 | ''' 223 | Implementation for Fast SLAM 2.0. 224 | Update landmark mean and covariance, as well as the particle's weight 225 | for one landmarks of a given particle. 226 | This landmark has to be observed before. 227 | Based on EKF method. 228 | 229 | Input: 230 | particle: Particle() object to be updated. 231 | measurement: measurement data Z_t. 232 | [timestamp, #landmark, range, bearing] 233 | landmark_idx: the index of the landmark (0 ~ 15). 234 | pose_sample: [x, t, theta] 235 | Robot pose sampled from the joint posterior 236 | p(X_t | X_1:t-1, U_1:t, Z_1:t, C_1:t). 237 | Output: 238 | None. 239 | ''' 240 | # Predict measurement 241 | range_expected, bearing_expected =\ 242 | self.compute_expected_measurement(particle, landmark_idx) 243 | 244 | # Get Jacobian wrt robot pose 245 | H_x = self.compute_robot_jacobian(particle, landmark_idx) 246 | 247 | # Get Jacobian wrt landmark state 248 | H_m = self.compute_landmark_jacobian(particle, landmark_idx) 249 | 250 | # Measurement process covariance 251 | Q = self.Q + H_m.dot(particle.lm_cov[landmark_idx]).dot(H_m.T) 252 | Q_inverse = np.linalg.inv(Q) 253 | 254 | # Compute Kalman gain 255 | difference = np.array([[measurement[2] - range_expected], 256 | [measurement[3] - bearing_expected]]) 257 | if difference[1] > np.pi: 258 | difference[1] -= 2 * np.pi 259 | elif difference[1] < -np.pi: 260 | difference[1] += 2 * np.pi 261 | K = particle.lm_cov[landmark_idx].dot(H_m.T).dot(Q_inverse) 262 | 263 | # Compute importance factor 264 | L = H_x.dot(self.R).dot(H_x.T) + H_m.dot(particle.lm_cov[landmark_idx])\ 265 | .dot(H_m.T) + self.Q 266 | particle.weight = np.linalg.det(2 * np.pi * L) ** (-0.5) *\ 267 | np.exp(-0.5 * difference.T.dot(np.linalg.inv(L)). 268 | dot(difference))[0, 0] 269 | 270 | # Update landmark mean 271 | innovation = K.dot(difference) 272 | particle.lm_mean[landmark_idx] += innovation.T[0] 273 | 274 | # Update landmark covariance 275 | particle.lm_cov[landmark_idx] =\ 276 | (np.identity(2) - K.dot(H_m)).dot(particle.lm_cov[landmark_idx]) 277 | 278 | # Update robot pose 279 | particle.timestamp = measurement[0] 280 | particle.x = pose_sample[0] 281 | particle.y = pose_sample[1] 282 | particle.theta = pose_sample[2] 283 | 284 | def compute_correspondence(self, particle, measurement, landmark_idx, 285 | pose_sample, Q): 286 | ''' 287 | Implementation for Fast SLAM 2.0. 288 | Update the robot pose of a particle to a given pose sample. 289 | Then compute the likelihood of correspondence for between a measurement and a given landmark. 290 | 291 | Input: 292 | particle: Particle() object to be updated. 293 | measurement: measurement data Z_t. 294 | [timestamp, #landmark, range, bearing] 295 | landmark_idx: the index of the landmark (0 ~ 15). 296 | pose_sample: [x, t, theta] 297 | Robot pose sampled from the joint posterior 298 | p(X_t | X_1:t-1, U_1:t, Z_1:t, C_1:t). 299 | Q: measurement process covariance. 300 | size: [2, 2]. 301 | Output: 302 | likehood: likelihood of correspondence 303 | ''' 304 | # Compute expected measurement 305 | particle_copy = copy.deepcopy(particle) 306 | particle_copy.x = pose_sample[0] 307 | particle_copy.y = pose_sample[1] 308 | particle_copy.theta = pose_sample[2] 309 | range_expected, bearing_expected =\ 310 | self.compute_expected_measurement(particle_copy, landmark_idx) 311 | 312 | difference = np.array([measurement[2] - range_expected, 313 | measurement[3] - bearing_expected]) 314 | if difference[1] > np.pi: 315 | difference[1] -= 2 * np.pi 316 | elif difference[1] < -np.pi: 317 | difference[1] += 2 * np.pi 318 | 319 | # likelihood of correspondence 320 | likelihood = np.linalg.det(2 * np.pi * Q) ** (-0.5) *\ 321 | np.exp(-0.5 * difference.T.dot(np.linalg.inv(Q)). 322 | dot(difference))[0, 0] 323 | 324 | return likelihood 325 | 326 | 327 | if __name__ == '__main__': 328 | pass 329 | -------------------------------------------------------------------------------- /5.Fast_SLAM_2/lib/motion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Velocity motion model for 2D differential drive robot: 5 | Robot state: [x, y, θ] 6 | Control: [u, w]. 7 | 8 | Author: Chenge Yang 9 | Email: chengeyang2019@u.northwestern.edu 10 | ''' 11 | 12 | import numpy as np 13 | 14 | 15 | class MotionModel(): 16 | def __init__(self, R, motion_noise): 17 | ''' 18 | Input: 19 | R: Measurement covariance matrix. 20 | Dimension: [3, 3]. 21 | motion_noise: [noise_x, noise_y, noise_theta, noise_v, noise_w] 22 | (in meters / rad). 23 | ''' 24 | self.R = R 25 | self.R_inverse = np.linalg.inv(self.R) 26 | self.motion_noise = motion_noise 27 | 28 | def initialize_particle(self, particle): 29 | ''' 30 | Add motion noise to the robot state in the given particle object. 31 | 32 | Input: 33 | particle: Particle() object which has been initialized by first 34 | ground truth data. 35 | Output: 36 | None. 37 | ''' 38 | # Apply Gaussian noise to the robot state 39 | particle.x = np.random.normal(particle.x, self.motion_noise[0]) 40 | particle.y = np.random.normal(particle.y, self.motion_noise[1]) 41 | particle.theta = np.random.normal(particle.theta, self.motion_noise[2]) 42 | 43 | def motion_update(self, particle, control): 44 | ''' 45 | Conduct motion update for a given particle from current state X_t-1 and 46 | control U_t. 47 | 48 | Motion Model (simplified): 49 | State: [x, y, θ] 50 | Control: [v, w] 51 | [x_t, y_t, θ_t] = g(u_t, x_t-1) 52 | x_t = x_t-1 + v * cosθ_t-1 * delta_t 53 | y_t = y_t-1 + v * sinθ_t-1 * delta_t 54 | θ_t = θ_t-1 + w * delta_t 55 | 56 | Input: 57 | particle: Particle() object to be updated. 58 | control: control input U_t. 59 | [timestamp, -1, v_t, w_t] 60 | Output: 61 | None. 62 | ''' 63 | delta_t = control[0] - particle.timestamp 64 | 65 | # Compute updated [timestamp, x, y, theta] 66 | particle.timestamp = control[0] 67 | particle.x += control[2] * np.cos(particle.theta) * delta_t 68 | particle.y += control[2] * np.sin(particle.theta) * delta_t 69 | particle.theta += control[3] * delta_t 70 | 71 | # Limit θ within [-pi, pi] 72 | if (particle.theta > np.pi): 73 | particle.theta -= 2 * np.pi 74 | elif (particle.theta < -np.pi): 75 | particle.theta += 2 * np.pi 76 | 77 | def sample_motion_model(self, particle, control): 78 | ''' 79 | Implementation for Fast SLAM 1.0. 80 | Sample next state X_t from current state X_t-1 and control U_t with 81 | added motion noise. 82 | 83 | Input: 84 | particle: Particle() object to be updated. 85 | control: control input U_t. 86 | [timestamp, -1, v_t, w_t] 87 | Output: 88 | None. 89 | ''' 90 | # Apply Gaussian noise to control input 91 | v = np.random.normal(control[2], self.motion_noise[3]) 92 | w = np.random.normal(control[3], self.motion_noise[4]) 93 | control_noisy = np.array([control[0], control[1], v, w]) 94 | 95 | # Motion updated 96 | self.motion_update(particle, control_noisy) 97 | 98 | 99 | if __name__ == '__main__': 100 | pass 101 | -------------------------------------------------------------------------------- /5.Fast_SLAM_2/lib/particle.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Definition for a single particle object. 5 | 6 | Author: Chenge Yang 7 | Email: chengeyang2019@u.northwestern.edu 8 | ''' 9 | 10 | import numpy as np 11 | 12 | 13 | class Particle(): 14 | def __init__(self): 15 | # Robot state: [timestamp, x, y, 0] 16 | self.timestamp = 0.0 17 | self.x = 0.0 18 | self.y = 0.0 19 | self.theta = 0.0 20 | 21 | # Weight 22 | self.weight = 1.0 23 | 24 | # Landmark state: [x, y] 25 | # All landmarks' Mean and Covariance 26 | self.lm_mean = np.zeros((1, 2)) 27 | self.lm_cov = np.zeros((1, 2, 2)) 28 | self.lm_ob = np.full(1, False) 29 | 30 | def initialization(self, init_state, N_particles, N_landmarks): 31 | ''' 32 | Input: 33 | init_state: initial state (from first ground truth data) 34 | [timestamp_0, x_0, y_0, θ_0] 35 | N_particles: number of particles in Particle Filter. 36 | N_landmarks: number of landmarks each particle tracks. 37 | Output: 38 | None. 39 | ''' 40 | # Robot state: [timestamp, x, y, 0] 41 | # Initialized to init_state 42 | self.timestamp = init_state[0] 43 | self.x = init_state[1] 44 | self.y = init_state[2] 45 | self.theta = init_state[3] 46 | 47 | # Weight 48 | # Initialized to be equally distributed among all particles 49 | self.weight = 1.0 / N_particles 50 | 51 | # Landmark state: [x, y] 52 | # All landmarks' Mean and Covariance 53 | # Initialized as zeros 54 | self.lm_mean = np.zeros((N_landmarks, 2)) 55 | self.lm_cov = np.zeros((N_landmarks, 2, 2)) 56 | 57 | # Table to record if each landmark has been seen or not 58 | # INdex [0] - [14] represent for landmark# 6 - 20 59 | self.lm_ob = np.full(N_landmarks, False) 60 | 61 | 62 | if __name__ == '__main__': 63 | pass 64 | -------------------------------------------------------------------------------- /5.Fast_SLAM_2/src/Fast_SLAM_2_unknown_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Implementation of Fast SLAM 2.0 with unknown correspondences. 5 | 6 | In general, Fast SLAM 2.0 is more difficult in implementation. 7 | The main difference from Fast SLAM 1.0 lies in the functions: 8 | 1. data_association 9 | 2. landmark_update 10 | 3. sample_motion_measurement_model 11 | We sample the posterior considering the effect of measurement. 12 | 13 | See Probabilistic Robotics: 14 | 1. Page 463, Table 13.3 for full algorithm. 15 | 16 | Author: Chenge Yang 17 | Email: chengeyang2019@u.northwestern.edu 18 | ''' 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | import copy 23 | 24 | from lib.particle import Particle 25 | 26 | 27 | class FastSLAM2(): 28 | def __init__(self, motion_model, measurement_model): 29 | ''' 30 | Input: 31 | motion_model: ModelModel() object 32 | measurement_model: MeasurementModel() object 33 | ''' 34 | self.motion_model = motion_model 35 | self.measurement_model = measurement_model 36 | 37 | def load_data(self, dataset, start_frame, end_frame): 38 | ''' 39 | Load data from UTIAS Multi-Robot Cooperative Localization and Mapping 40 | Dataset. 41 | 42 | Input: 43 | dataset: directory of the dataset. 44 | start_frame: the index of first frame to run the SLAM algorithm on. 45 | end_frame: the index of last frame to run the SLAM algorithm on. 46 | Otput: 47 | None. 48 | ''' 49 | # Loading dataset 50 | # Barcodes: [Subject#, Barcode#] 51 | self.barcodes_data = np.loadtxt(dataset + "/Barcodes.dat") 52 | # Ground truth: [Time[s], x[m], y[m], orientation[rad]] 53 | self.groundtruth_data = np.loadtxt(dataset + "/Groundtruth.dat") 54 | # Landmark ground truth: [Subject#, x[m], y[m]] 55 | self.landmark_groundtruth_data =\ 56 | np.loadtxt(dataset + "/Landmark_Groundtruth.dat") 57 | # Measurement: [Time[s], Subject#, range[m], bearing[rad]] 58 | self.measurement_data = np.loadtxt(dataset + "/Measurement.dat") 59 | # Odometry: [Time[s], Subject#, forward_V[m/s], angular _v[rad/s]] 60 | self.odometry_data = np.loadtxt(dataset + "/Odometry.dat") 61 | 62 | # Collect all input data and sort by timestamp 63 | # Add subject "odom" = -1 for odometry data 64 | odom_data = np.insert(self.odometry_data, 1, -1, axis=1) 65 | self.data = np.concatenate((odom_data, self.measurement_data), axis=0) 66 | self.data = self.data[np.argsort(self.data[:, 0])] 67 | 68 | # Remove all data before the fisrt timestamp of groundtruth 69 | # Use first groundtruth data as the initial location of the robot 70 | for i in range(len(self.data)): 71 | if (self.data[i][0] > self.groundtruth_data[0][0]): 72 | break 73 | self.data = self.data[i:] 74 | 75 | # Select data according to start_frame and end_frame 76 | # Fisrt frame must be control input 77 | while self.data[start_frame][1] != -1: 78 | start_frame += 1 79 | # Remove all data before start_frame and after the end_timestamp 80 | self.data = self.data[start_frame:end_frame] 81 | start_timestamp = self.data[0][0] 82 | end_timestamp = self.data[-1][0] 83 | # Remove all groundtruth outside the range 84 | for i in range(len(self.groundtruth_data)): 85 | if (self.groundtruth_data[i][0] >= end_timestamp): 86 | break 87 | self.groundtruth_data = self.groundtruth_data[:i] 88 | for i in range(len(self.groundtruth_data)): 89 | if (self.groundtruth_data[i][0] >= start_timestamp): 90 | break 91 | self.groundtruth_data = self.groundtruth_data[i:] 92 | 93 | # Combine barcode Subject# with landmark Subject# 94 | # Lookup table to map barcode Subjec# to landmark coordinates 95 | # [x[m], y[m], x std-dev[m], y std-dev[m]] 96 | # Ground truth data is not used in SLAM 97 | self.landmark_locations = {} 98 | for i in range(5, len(self.barcodes_data), 1): 99 | self.landmark_locations[self.barcodes_data[i][1]] =\ 100 | self.landmark_groundtruth_data[i - 5][1:] 101 | 102 | # Lookup table to map barcode Subjec# to landmark Subject# 103 | # Barcode 6 is the first landmark (0 - 14 for 6 - 20) 104 | self.landmark_indexes = {} 105 | for i in range(5, len(self.barcodes_data), 1): 106 | self.landmark_indexes[self.barcodes_data[i][1]] = i - 5 107 | 108 | def initialization(self, N_particles): 109 | ''' 110 | Initialize robots state, landmark state and all particles. 111 | 112 | Input: 113 | N_particles: number of particles this SLAM algorithms tracks. 114 | Output: 115 | None. 116 | ''' 117 | # Number of particles and landmarks 118 | self.N_particles = N_particles 119 | self.N_landmarks = len(self.landmark_indexes) 120 | 121 | # Robot states: [timestamp, x, y, theta] 122 | # First state is obtained from ground truth 123 | self.states = np.array([self.groundtruth_data[0]]) 124 | 125 | # Landmark states: [x, y] 126 | self.landmark_states = np.zeros((self.N_landmarks, 2)) 127 | 128 | # Table to record if each landmark has been seen or not 129 | # [0] - [14] represent for landmark# 6 - 20 130 | self.landmark_observed = np.full(self.N_landmarks, False) 131 | 132 | # Initial particles 133 | self.particles = [] 134 | for i in range(N_particles): 135 | particle = Particle() 136 | particle.initialization(self.states[0], self.N_particles, 137 | self.N_landmarks) 138 | self.motion_model.initialize_particle(particle) 139 | self.particles.append(particle) 140 | 141 | def robot_update(self, control): 142 | ''' 143 | Update robot pose through sampling motion model for all particles. 144 | 145 | Input: 146 | control: control input U_t. 147 | [timestamp, -1, v_t, w_t] 148 | Output: 149 | None. 150 | ''' 151 | for particle in self.particles: 152 | self.motion_model.sample_motion_model(particle, control) 153 | 154 | def landmark_update(self, measurement): 155 | ''' 156 | Update landmark mean and covariance for all landmarks of all particles. 157 | Based on EKF method. 158 | 159 | Input: 160 | measurement: measurement data Z_t. 161 | [timestamp, #landmark, range, bearing] 162 | Output: 163 | None. 164 | ''' 165 | # Return if the measured object is not a landmark (another robot) 166 | if not measurement[1] in self.landmark_indexes: 167 | return 168 | 169 | for particle in self.particles: 170 | # Trick!!!!!: if the current landmark has not been seen, return its 171 | # corresponding index, and initilize it at the measured location 172 | if not particle.lm_ob[self.landmark_indexes[measurement[1]]]: 173 | landmark_idx = self.landmark_indexes[measurement[1]] 174 | self.measurement_model.\ 175 | initialize_landmark(particle, measurement, 176 | landmark_idx, 1.0/self.N_landmarks) 177 | 178 | # Update landmark by EKF if it has been observed before 179 | else: 180 | landmark_idx, pose_sample =\ 181 | self.data_association(particle, measurement) 182 | self.measurement_model.\ 183 | landmark_update(particle, measurement, landmark_idx, 184 | pose_sample) 185 | 186 | # Normalize all weights 187 | self.weights_normalization() 188 | 189 | # Resample all particles according to the weights 190 | self.importance_sampling() 191 | 192 | def data_association(self, particle, measurement): 193 | ''' 194 | For a particle, compute likelihood of correspondence for all observed 195 | landmarks. 196 | Choose landmark according to ML (Maximum Likelihood). 197 | 198 | Input: 199 | particle: single particle to be updated. 200 | measurement: measurement data Z_t. 201 | [timestamp, #landmark, range, bearing] 202 | Output: 203 | landmark_idx: index of the associated landmark. 204 | pose_sample: [x, t, theta] 205 | Robot pose sampled from the joint posterior 206 | p(X_t | X_1:t-1, U_1:t, Z_1:t, C_1:t). 207 | ''' 208 | # Return if the measured object is not a landmark (another robot) 209 | if not measurement[1] in self.landmark_indexes: 210 | return 211 | 212 | # Compute likelihood and pose sample for all observed landmarks 213 | likelihood_array = np.zeros(self.N_landmarks) 214 | pose_sample_array = np.zeros((self.N_landmarks, 3)) 215 | 216 | for landmark_idx in range(self.N_landmarks): 217 | if not particle.lm_ob[landmark_idx]: 218 | continue 219 | 220 | pose_sample, Q = self.measurement_model.\ 221 | sample_measurement_model(particle, measurement, landmark_idx) 222 | 223 | likelihood = self.measurement_model.\ 224 | compute_correspondence(particle, measurement, landmark_idx, 225 | pose_sample, Q) 226 | 227 | likelihood_array[landmark_idx] = likelihood 228 | pose_sample_array[landmark_idx] = pose_sample.T 229 | 230 | # Selete landmark with ML correspondence and corresponding sampled pose 231 | landmark_idx = np.argmax(likelihood_array) 232 | pose_sample = pose_sample_array[landmark_idx] 233 | 234 | return landmark_idx, pose_sample 235 | 236 | def weights_normalization(self): 237 | ''' 238 | Normalize weight in all particles so that the sum = 1. 239 | 240 | Input: 241 | None. 242 | Output: 243 | None. 244 | ''' 245 | # Compute sum of the weights 246 | sum = 0.0 247 | for particle in self.particles: 248 | sum += particle.weight 249 | 250 | # If sum is too small, equally assign weights to all particles 251 | if sum < 1e-10: 252 | for particle in self.particles: 253 | particle.weight = 1.0 / self.N_particles 254 | return 255 | 256 | for particle in self.particles: 257 | particle.weight /= sum 258 | 259 | def importance_sampling(self): 260 | ''' 261 | Resample all particles through the importance factors. 262 | 263 | Input: 264 | None. 265 | Output: 266 | None. 267 | ''' 268 | # Construct weights vector 269 | weights = [] 270 | for particle in self.particles: 271 | weights.append(particle.weight) 272 | 273 | # Resample all particles according to importance weights 274 | new_indexes =\ 275 | np.random.choice(len(self.particles), len(self.particles), 276 | replace=True, p=weights) 277 | 278 | # Update new particles 279 | new_particles = [] 280 | for index in new_indexes: 281 | new_particles.append(copy.deepcopy(self.particles[index])) 282 | self.particles = new_particles 283 | 284 | def state_update(self): 285 | ''' 286 | Update the robot and landmark states by taking average among all 287 | particles. 288 | 289 | Input: 290 | None. 291 | Output: 292 | None. 293 | ''' 294 | # Robot state 295 | timestamp = self.particles[0].timestamp 296 | x = 0.0 297 | y = 0.0 298 | theta = 0.0 299 | 300 | for particle in self.particles: 301 | x += particle.x 302 | y += particle.y 303 | theta += particle.theta 304 | 305 | x /= self.N_particles 306 | y /= self.N_particles 307 | theta /= self.N_particles 308 | 309 | self.states = np.append(self.states, 310 | np.array([[timestamp, x, y, theta]]), axis=0) 311 | 312 | # Landmark state 313 | landmark_states = np.zeros((self.N_landmarks, 2)) 314 | count = np.zeros(self.N_landmarks) 315 | self.landmark_observed = np.full(self.N_landmarks, False) 316 | 317 | for particle in self.particles: 318 | for landmark_idx in range(self.N_landmarks): 319 | if particle.lm_ob[landmark_idx]: 320 | landmark_states[landmark_idx] +=\ 321 | particle.lm_mean[landmark_idx] 322 | count[landmark_idx] += 1 323 | self.landmark_observed[landmark_idx] = True 324 | 325 | for landmark_idx in range(self.N_landmarks): 326 | if self.landmark_observed[landmark_idx]: 327 | landmark_states[landmark_idx] /= count[landmark_idx] 328 | 329 | self.landmark_states = landmark_states 330 | 331 | def plot_data(self): 332 | ''' 333 | Plot all data through matplotlib. 334 | Conduct animation as the algorithm runs. 335 | 336 | Input: 337 | None. 338 | Output: 339 | None. 340 | ''' 341 | # Clear all 342 | plt.cla() 343 | 344 | # Ground truth data 345 | plt.plot(self.groundtruth_data[:, 1], self.groundtruth_data[:, 2], 346 | 'b', label="Robot State Ground truth") 347 | 348 | # States 349 | plt.plot(self.states[:, 1], self.states[:, 2], 350 | 'r', label="Robot State Estimate") 351 | 352 | # Start and end points 353 | plt.plot(self.groundtruth_data[0, 1], self.groundtruth_data[0, 2], 354 | 'g8', markersize=12, label="Start point") 355 | plt.plot(self.groundtruth_data[-1, 1], self.groundtruth_data[-1, 2], 356 | 'y8', markersize=12, label="End point") 357 | 358 | # Particles 359 | particle_xs = [] 360 | particle_ys = [] 361 | for particle in self.particles: 362 | particle_xs.append(particle.x) 363 | particle_ys.append(particle.y) 364 | plt.scatter(particle_xs, particle_ys, 365 | s=5, c='k', alpha=0.5, label="Particles") 366 | 367 | # Landmark ground truth locations and indexes 368 | landmark_xs = [] 369 | landmark_ys = [] 370 | for location in self.landmark_locations: 371 | landmark_xs.append(self.landmark_locations[location][0]) 372 | landmark_ys.append(self.landmark_locations[location][1]) 373 | index = self.landmark_indexes[location] + 6 374 | plt.text(landmark_xs[-1], landmark_ys[-1], str(index), 375 | alpha=0.5, fontsize=10) 376 | plt.scatter(landmark_xs, landmark_ys, s=200, c='k', alpha=0.2, 377 | marker='*', label='Landmark Ground Truth') 378 | 379 | # Landmark estimated locations 380 | estimate_xs = [] 381 | estimate_ys = [] 382 | for i in range(self.N_landmarks): 383 | if self.landmark_observed[i]: 384 | estimate_xs.append(self.landmark_states[i, 0]) 385 | estimate_ys.append(self.landmark_states[i, 1]) 386 | plt.text(estimate_xs[-1], estimate_ys[-1], 387 | str(i+6), fontsize=10) 388 | plt.scatter(estimate_xs, estimate_ys, 389 | s=50, c='k', marker='P', label='Landmark Estimate') 390 | 391 | plt.title('Fast SLAM 2.0 with unknown correspondences') 392 | plt.legend() 393 | plt.xlim((-2.0, 5.5)) 394 | plt.ylim((-7.0, 7.0)) 395 | plt.pause(1e-16) 396 | 397 | 398 | if __name__ == "__main__": 399 | pass 400 | -------------------------------------------------------------------------------- /5.Fast_SLAM_2/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/5.Fast_SLAM_2/src/__init__.py -------------------------------------------------------------------------------- /5.Fast_SLAM_2/test_Fast_SLAM_2_unknown_correspondences.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | ''' 4 | Run Fast SLAM 2.0 on the UTIAS Multi-Robot Cooperative Localization and Mapping 5 | Dataset. 6 | 7 | Author: Chenge Yang 8 | Email: chengeyang2019@u.northwestern.edu 9 | ''' 10 | 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | 14 | from lib import MotionModel 15 | from lib import MeasurementModel 16 | from src.Fast_SLAM_2_unknown_correspondences import FastSLAM2 17 | 18 | 19 | if __name__ == '__main__': 20 | # Dataset info 21 | dataset = "../0.Dataset1" 22 | start_frame = 400 23 | end_frame = 3200 24 | 25 | # Motion covariance matrix 26 | R = np.diagflat(np.array([0.01, 0.01, 0.01])) ** 2 27 | # Measurement covariance matrix 28 | # Q = np.diagflat(np.array([0.02, 0.04])) ** 2 29 | Q = np.diagflat(np.array([0.05, 0.10])) ** 2 30 | # Motion noise (in meters / rad) 31 | # [noise_x, noise_y, noise_theta, noise_v, noise_w] 32 | # Fisrt three are used for initializing particles 33 | # Last two are used for motion update 34 | motion_noise = np.array([0.0, 0.0, 0.0, 0.1, 0.15]) 35 | 36 | # Initialize Motion Model object 37 | motion_model = MotionModel(R, motion_noise) 38 | 39 | # Initialize Measurement Model object 40 | measurement_model = MeasurementModel(R, Q) 41 | 42 | # Initialize SLAM algorithm 43 | # Number of particles 44 | N_particles = 50 45 | fast_slam = FastSLAM2(motion_model, measurement_model) 46 | fast_slam.load_data(dataset, start_frame, end_frame) 47 | fast_slam.initialization(N_particles) 48 | 49 | # Run full Fast SLAM 1.0 algorithm 50 | for data in fast_slam.data: 51 | if (data[1] == -1): 52 | fast_slam.robot_update(data) 53 | else: 54 | fast_slam.landmark_update(data) 55 | fast_slam.state_update() 56 | # Plot every n frames 57 | if (len(fast_slam.states) % 30 == 0): 58 | fast_slam.plot_data() 59 | # fast_slam.plot_data() 60 | plt.show() 61 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SLAM Algorithm Implementation - Probabilistic Robotics 2 | #### Chenge Yang, 2019 Winter, Northwestern University 3 | ----------------------------------------------------------------------------------------- 4 | ## 1. Introduction 5 | This project contains Python3 implementations and results of a variety of state estimation and SLAM algorithms in Sebastian Thrun's book **Probabilistic Robotics** using **UTIAS Multi-Robot Cooperative Localization and Mapping** dataset. 6 | 7 | As a beginner in SLAM, I always found it difficult to understand the non-intuitive mathematical equations, and I can barely find straightforward instructions on implementing these algorithms. Therefore, I created this repo to demonstrate the basic concepts behind the book, paired with results running on a simple dataset. 8 | 9 | If you are new to SLAM problem and is reading the book Probabilistic Robotics, this repo will be perfect for you - I programmed in Python not C++ with abundant inline comments and good demonstrations of the results. 10 | 11 | If you find anything wrong with my implementations, such as inappropriate understanding or code bugs, please leave a comment! 12 | 13 | #### Table of Contents 14 | - [1. Introduction](#1-Introduction) 15 | - [2. Dataset](#2-Dataset) 16 | - [3. Basic Algorithm](#3-Basic-Algorithm) 17 | - [4. Localization](#4-Localization) 18 | - [4.1. EKF Localization](#41-EKF-Localization) 19 | - [4.2. Particle Filter Localization](#42-Particle-Filter-Localization) 20 | - [5. EKF SLAM](#5-EKF-SLAM) 21 | - [5.1. EKF SLAM with Known Correspondence](#51-EKF-SLAM-with-Known-Correspondence) 22 | - [5.2. EKF SLAM with Unknown Correspondence](#52-EKF-SLAM-with-Unknown-Correspondence) 23 | - [6. Graph SLAM](#6-Graph-SLAM) 24 | - [6.1. Graph SLAM with Known Correspondence](#61-Graph-SLAM-with-Known-Correspondence) 25 | - [7. Fast SLAM 1](#7-Fast-SLAM-1) 26 | - [7.1. Fast SLAM 1 with Known Correspondence](#71-Fast-SLAM-1-with-Known-Correspondence) 27 | - [7.2. Fast SLAM 1 with Unknown Correspondence](#72-Fast-SLAM-1-with-Unknown-Correspondence) 28 | - [8. Fast SLAM 2](#8-Fast-SLAM-2) 29 | - [8.1. Fast SLAM 2 with Unknown Correspondence](#81-Fast-SLAM-2-with-Unknown-Correspondence) 30 | ----------------------------------------------------------------------------------------- 31 | ## 2. Dataset 32 | **UTIAS Multi-Robot Cooperative Localization and Mapping** is 2D indoor feature-based dataset. For details please refer [here](http://asrl.utias.utoronto.ca/datasets/mrclam/index.html). 33 | 34 | This project contains [Dataset0](0.Dataset0/) (MRSLAM_Dataset4, Robot3) and [Dataset1](0.Dataset1/) (MRCLAM_Dataset9, Robot3). All algorithms are using Dataset1 to generate the following results. 35 | 36 | Each dataset contains five files: 37 | * **Odometry.dat**: Control data (translation and rotation velocity) 38 | * **Measurement.dat**: Measurement data (range and bearing data for visually observed landmarks and other robots) 39 | * **Groundtruth.dat**: Ground truth robot position (measured via Vicon motion capture – use for assessment only) 40 | * **Landmark_Groundtruth.dat**: Ground truth landmark positions (measured via Vicon motion capture) 41 | * **Barcodes.dat**: Associates the barcode IDs with landmark IDs. 42 | 43 | The data is processed in the following way: 44 | * Use all Odometry data 45 | * Only use Measurement data for landmark 6 ~ 20 (1 ~ 5 are other robots) 46 | * Use Groundtruth data to plot the robot state ground truth 47 | * Use Landmark Groundtruth only for localization problem 48 | * Associate Landmark Groundtruth with Barcodes to get landmark index from measurement 49 | * Combine Odometry data with Measurement data ans sort by timestamp as input data 50 | 51 | ----------------------------------------------------------------------------------------- 52 | ## 3. Basic Algorithm 53 | All algorithms are using the same motion and measurement model: 54 | * Motion model: Motion Model Velocity (simplified from PR P127) 55 | * Measurement model: Feature-Based Measurement Model (PR P178) 56 | 57 | The robot trajectory using only Odometry data is shown in the figure below. In general, the Odometry data is giving a relatively fine initial estimate of the robot pose. But there are significant drifting along with time. 58 | 59 |

60 | 61 |

62 | 63 | ----------------------------------------------------------------------------------------- 64 | ## 4. Localization 65 | Two filter-based localization algorithms are implemented: 66 | * Parametric model: Extended Kalman Filter based localization 67 | * Non-parametric model: Particle Filter based localization 68 | 69 | Localization assumes known landmark locations and correspondences. Thus, both algorithms demonstrate good results. 70 | 71 | ### 4.1. EKF Localization 72 | Probabilistic Robotics Page 204: **Algorithm EKF_Localization_known_correspondences**. 73 | 74 |

75 | 76 |

77 | 78 | ### 4.2. Particle Filter Localization 79 | Probabilistic Robotics Page 252: **Algorithm MCL**. 80 | 81 |

82 | 83 |

84 | 85 | ----------------------------------------------------------------------------------------- 86 | ## 5. EKF SLAM 87 | EKF SLAM applies EKF to SLAM problem with the following features: 88 | * Feature-based map (landmarks) 89 | * Assume Gaussian noise for motion and measurement models 90 | * Maximum Likelihood data association 91 | 92 | ### 5.1. EKF SLAM with Known Correspondence 93 | Probabilistic Robotics Page 314: **Algorithm EKF_SLAM_known_correspondences**. 94 | 95 | The overall process is not very smooth because of inaccurate odometry data and a lack of information in measurement (each measurement only observes single landmark). But with known correspondences, the algorithm can still converge. 96 | 97 |

98 | 99 |

100 | 101 | ### 5.2. EKF SLAM with Unknown Correspondence 102 | Probabilistic Robotics Page 321: **Algorithm EKF_SLAM**. 103 | 104 | The algorithm generates very similar estimate at the beginning compared with algorithm 5.1. However, with inaccurate odometry data, data association cannot work as expected and gives bad correspondences at the end, which degrades both landmark and robot state estimates. 105 | 106 |

107 | 108 |

109 | 110 | ----------------------------------------------------------------------------------------- 111 | ## 6. Graph SLAM 112 | Graph SLAM is the only optimization-based SLAM algorithm implemented in this project. It has the following features: 113 | * Not online SLAM algorithm - require full data when running 114 | * Construct Information Matrix and Information Vector from control and measurement constraints 115 | * Apply factorization to reduce dimensions of Information Matrix and Information Vector 116 | * Solve with least square method 117 | 118 | ### 6.1. Graph SLAM with Known Correspondence 119 | Probabilistic Robotics Page 350: **Algorithm GraphSLAM_known_correspondences**. 120 | 121 | Graph SLAM computes the best estimate for the full SLAM problem. Thus, the global estimate will be influenced by local errors. This is demonstrated in the four plots below. 122 | 123 | When running graph SLAM for the short time, it can give highly accurate estimate. But when running on full problem, the trajectory drifts off significantly. This is because the odometry and measurement data are getting less informative towards the end of the period. 124 | 125 |

126 | 127 |

128 | 129 | ----------------------------------------------------------------------------------------- 130 | ## 7. Fast SLAM 1 131 | Fast SLAM applies Rao-Blackwellized Particle Filter to SLAM problem. It has the following features: 132 | * Factorize SLAM problem into two independent sub-problems: localization and mapping 133 | * Use Particle Filter to estimate robot state (localization) 134 | * Use EKF to estimate each landmark state (mapping) 135 | 136 | ### 7.1. Fast SLAM 1 with Known Correspondence 137 | Probabilistic Robotics Page 450: **Algorithm FastSLAM 1.0_known_correspondences**. 138 | 139 |

140 | 141 |

142 | 143 | ### 7.2. Fast SLAM 1 with Unknown Correspondence 144 | Probabilistic Robotics Page 461: **Algorithm FastSLAM 1.0**. 145 | 146 |

147 | 148 |

149 | 150 | ----------------------------------------------------------------------------------------- 151 | ## 8. Fast SLAM 2 152 | The main difference between Fast SLAM 2.0 and Fast SLAM 1.0 is that when sampling new pose, Fast SLAM 2.0 incorporates the measurement information. However, this makes implementing the algorithm a lot more difficult. 153 | 154 | ### 8.1. Fast SLAM 2 with Unknown Correspondence 155 | Probabilistic Robotics Page 463: **Algorithm FastSLAM 2.0**. 156 | 157 |

158 | 159 |

160 | 161 | ----------------------------------------------------------------------------------------- 162 | -------------------------------------------------------------------------------- /doc/EKF_Localization_known_correspondences.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/EKF_Localization_known_correspondences.png -------------------------------------------------------------------------------- /doc/EKF_SLAM_known_correspondences.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/EKF_SLAM_known_correspondences.gif -------------------------------------------------------------------------------- /doc/EKF_SLAM_unknown_correspondences.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/EKF_SLAM_unknown_correspondences.gif -------------------------------------------------------------------------------- /doc/Fast_SLAM_1_known_correspondences.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/Fast_SLAM_1_known_correspondences.gif -------------------------------------------------------------------------------- /doc/Fast_SLAM_1_unknown_correspondences.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/Fast_SLAM_1_unknown_correspondences.gif -------------------------------------------------------------------------------- /doc/Fast_SLAM_2_unknown_correspondences.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/Fast_SLAM_2_unknown_correspondences.gif -------------------------------------------------------------------------------- /doc/Graph_SLAM_known_correspondences.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/Graph_SLAM_known_correspondences.png -------------------------------------------------------------------------------- /doc/Odometry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/Odometry.png -------------------------------------------------------------------------------- /doc/PF_Localization_known_correspondences.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChengeYang/Probabilistic-Robotics-Algorithms/b597045a9dd86bbb93799607691d7cc3f4e7d8b8/doc/PF_Localization_known_correspondences.gif --------------------------------------------------------------------------------