├── 1_EKF_SLAM ├── data │ ├── sensor_data.dat │ └── world.dat ├── octave │ ├── .prediction_step.m.swp │ ├── correction_step.m │ ├── correction_step.m~ │ ├── ekf_slam.m │ ├── ekf_slam.m~ │ ├── octave-workspace │ ├── prediction_step.m │ ├── prediction_step.m~ │ └── tools │ │ ├── chi2invtable.m │ │ ├── drawellipse.m │ │ ├── drawprobellipse.m │ │ ├── drawrobot.m │ │ ├── normalize_all_bearings.m │ │ ├── normalize_angle.m │ │ ├── plot_state.m │ │ ├── plot_state.m~ │ │ ├── read_data.m │ │ └── read_world.m └── plots │ └── ekf_slam.mp4 ├── 2_Unscented_Transform ├── octave │ ├── compute_sigma_points.m │ ├── compute_sigma_points.m~ │ ├── main.m │ ├── main.m~ │ ├── octave-workspace │ ├── recover_gaussian.m │ ├── recover_gaussian.m~ │ ├── tools │ │ ├── chi2invtable.m │ │ ├── drawellipse.m │ │ └── drawprobellipse.m │ ├── transform.m │ └── transform.m~ └── plots │ └── unscented.png ├── 3_UKF_SLAM ├── data │ ├── sensor_data.dat │ └── world.dat ├── octave │ ├── correction_step.m │ ├── correction_step.m~ │ ├── correction_step_bkup.m │ ├── correction_step_bkup2.m │ ├── prediction_step.m │ ├── prediction_step.m~ │ ├── prediction_step_222.m │ ├── prediction_step_bkup.m │ ├── tools │ │ ├── add_landmark_to_map.m │ │ ├── chi2invtable.m │ │ ├── compute_sigma_points.m │ │ ├── drawellipse.m │ │ ├── drawprobellipse.m │ │ ├── drawrobot.m │ │ ├── normalize_angle.m │ │ ├── plot_state.m │ │ ├── plot_state.m~ │ │ ├── read_data.m │ │ └── read_world.m │ ├── ukf_slam.m │ └── ukf_slam.m~ └── plots │ └── ukf_slam.mp4 ├── 4_Gridmapping ├── data │ ├── csail.log │ └── laser ├── gridmap_0.1.png ├── gridmap_0.5.png ├── octave │ ├── gridmap.m │ ├── gridmap.m~ │ ├── inv_sensor_model.m │ ├── inv_sensor_model.m~ │ ├── log_odds_to_prob.m │ ├── log_odds_to_prob.m~ │ ├── octave-workspace │ ├── prob_to_log_odds.m │ ├── prob_to_log_odds.m~ │ ├── tools │ │ ├── bresenham.m │ │ ├── bresenham.m~ │ │ ├── bresenham2.m │ │ ├── bresenham2.m~ │ │ ├── plot_map.m │ │ ├── plot_map.m~ │ │ ├── read_robotlaser.m │ │ ├── robotlaser_as_cartesian.m │ │ ├── robotlaser_as_cartesian.m~ │ │ ├── t2v.m │ │ └── v2t.m │ ├── world_to_map_coordinates.m │ └── world_to_map_coordinates.m~ ├── plot_0.1_resolution │ └── gridmap_res_0_1.mp4 └── plot_0.5_resolution │ └── gridmap_res_0_5.mp4 ├── 5_Particle_Filters ├── data │ └── odometry.dat ├── octave │ ├── motion.m │ ├── prediction_step.m │ ├── prediction_step.m~ │ ├── resample.m │ ├── resample.m~ │ ├── resampling.m │ └── tools │ │ ├── .svn │ │ ├── all-wcprops │ │ ├── entries │ │ ├── prop-base │ │ │ ├── plot_state.m.svn-base │ │ │ └── read_data.m.svn-base │ │ └── text-base │ │ │ ├── normalize_angle.m.svn-base │ │ │ ├── plot_state.m.svn-base │ │ │ └── read_data.m.svn-base │ │ ├── normalize_angle.m │ │ ├── plot_state.m │ │ ├── plot_state.m~ │ │ └── read_data.m └── plots │ └── pf_motion.mp4 ├── 6_FastSLAM ├── data │ ├── sensor_data.dat │ └── world.dat ├── octave │ ├── correction_step.m │ ├── correction_step.m~ │ ├── fastslam.m │ ├── fastslam.m~ │ └── tools │ │ ├── chi2invtable.m │ │ ├── drawellipse.m │ │ ├── drawprobellipse.m │ │ ├── drawrobot.m │ │ ├── measurement_model.m │ │ ├── normalize_angle.m │ │ ├── plot_state.m │ │ ├── plot_state.m~ │ │ ├── prediction_step.m │ │ ├── read_data.m │ │ ├── read_world.m │ │ └── resample.m └── plots │ └── fastslam.mp4 ├── 7_Odom_Calib_LeastSquares ├── data │ ├── odom_motions │ └── scanmatched_motions ├── octave │ ├── LSCalibrateOdometry.m │ ├── LSCalibrateOdometry.m~ │ ├── apply_odometry_correction.m │ ├── apply_odometry_correction.m~ │ ├── compute_trajectory.m │ ├── compute_trajectory.m~ │ ├── ls_calibrate_odometry.m │ ├── ls_calibrate_odometry.m~ │ ├── octave-workspace │ └── tools │ │ ├── normalize_angle.m │ │ ├── t2v.m │ │ └── v2t.m └── plots │ └── odometry-calibration.png ├── 8_GraphSLAM ├── data │ ├── dlr.dat │ ├── intel.dat │ ├── simulation-pose-landmark.dat │ └── simulation-pose-pose.dat ├── octave │ ├── compute_global_error.m │ ├── compute_global_error.m~ │ ├── linearize_and_solve.m │ ├── linearize_and_solve.m~ │ ├── linearize_pose_landmark_constraint.m │ ├── linearize_pose_landmark_constraint.m~ │ ├── linearize_pose_pose_constraint.m │ ├── linearize_pose_pose_constraint.m~ │ ├── lsSLAM.m │ ├── lsSLAM.m~ │ ├── ls_SLAM.m~ │ ├── octave-workspace │ ├── test_jacobian_pose_landmark.m │ ├── test_jacobian_pose_pose.m │ └── tools │ │ ├── build_structure.m │ │ ├── get_block_for_id.m │ │ ├── get_poses_landmarks.m │ │ ├── invt.m │ │ ├── nnz_of_graph.m │ │ ├── normalize_angle.m │ │ ├── plot_graph.m │ │ ├── plot_graph.m~ │ │ ├── read_graph.m │ │ ├── t2v.m │ │ └── v2t.m └── plots │ ├── dlr │ ├── lsslam_000.png │ ├── lsslam_001.png │ ├── lsslam_002.png │ ├── lsslam_003.png │ ├── lsslam_004.png │ ├── lsslam_005.png │ ├── lsslam_006.png │ ├── lsslam_007.png │ ├── lsslam_008.png │ ├── lsslam_009.png │ └── lsslam_010.png │ ├── intel │ ├── lsslam_000.png │ ├── lsslam_001.png │ ├── lsslam_002.png │ ├── lsslam_003.png │ ├── lsslam_004.png │ └── lsslam_005.png │ ├── sim_pose_landmark │ ├── lsslam_000.png │ ├── lsslam_001.png │ ├── lsslam_002.png │ ├── lsslam_003.png │ └── lsslam_004.png │ └── sim_pose_pose │ ├── lsslam_000.png │ ├── lsslam_001.png │ ├── lsslam_002.png │ ├── lsslam_003.png │ ├── lsslam_004.png │ ├── lsslam_005.png │ ├── lsslam_006.png │ ├── lsslam_007.png │ ├── lsslam_008.png │ └── lsslam_009.png └── README.md /1_EKF_SLAM/data/world.dat: -------------------------------------------------------------------------------- 1 | 1 2 1 2 | 2 0 4 3 | 3 2 7 4 | 4 9 2 5 | 5 10 5 6 | 6 9 8 7 | 7 5 5 8 | 8 5 3 9 | 9 5 9 10 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/.prediction_step.m.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/1_EKF_SLAM/octave/.prediction_step.m.swp -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/correction_step.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma, observedLandmarks] = correction_step(mu, sigma, z, observedLandmarks) 2 | % Updates the belief, i. e., mu and sigma after observing landmarks, according to the sensor model 3 | % The employed sensor model measures the range and bearing of a landmark 4 | % mu: 2N+3 x 1 vector representing the state mean. 5 | % The first 3 components of mu correspond to the current estimate of the robot pose [x; y; theta] 6 | % The current pose estimate of the landmark with id = j is: [mu(2*j+2); mu(2*j+3)] 7 | % sigma: 2N+3 x 2N+3 is the covariance matrix 8 | % z: struct array containing the landmark observations. 9 | % Each observation z(i) has an id z(i).id, a range z(i).range, and a bearing z(i).bearing 10 | % The vector observedLandmarks indicates which landmarks have been observed 11 | % at some point by the robot. 12 | % observedLandmarks(j) is false if the landmark with id = j has never been observed before. 13 | 14 | % Number of measurements in this time step 15 | m = size(z, 2); 16 | twoNpl3 = size(mu,1); 17 | % Z: vectorized form of all measurements made in this time step: [range_1; bearing_1; range_2; bearing_2; ...; range_m; bearing_m] 18 | % ExpectedZ: vectorized form of all expected measurements in the same form. 19 | % They are initialized here and should be filled out in the for loop below 20 | Z = zeros(m*2, 1); 21 | expectedZ = zeros(m*2, 1); 22 | 23 | % Iterate over the measurements and compute the H matrix 24 | % (stacked Jacobian blocks of the measurement function) 25 | % H will be 2m x 2N+3 26 | H = []; 27 | 28 | for i = 1:m 29 | % Get the id of the landmark corresponding to the i-th observation 30 | landmarkId = z(i).id; 31 | % If the landmark is obeserved for the first time: 32 | if(observedLandmarks(landmarkId)==false) 33 | % TODO: Initialize its pose in mu based on the measurement and the current robot pose: 34 | mu(2*landmarkId+2:2*landmarkId+3,1) = mu(1:2,1) + [z(i).range*cos(z(i).bearing + mu(3,1)) ; z(i).range*sin(z(i).bearing+mu(3,1))]; 35 | % Indicate in the observedLandmarks vector that this landmark has been observed 36 | observedLandmarks(landmarkId) = true; 37 | endif 38 | 39 | % TODO: Add the landmark measurement to the Z vector 40 | Z(2*i-1:2*i,1) = [z(i).range;z(i).bearing] 41 | % TODO: Use the current estimate of the landmark pose 42 | % to compute the corresponding expected measurement in expectedZ: 43 | del = mu(2*landmarkId+2:2*landmarkId+3,1) - mu(1:2,1); 44 | q = del'*del; 45 | expectedZ(2*i-1:2*i,1) = [sqrt(q);normalize_angle(atan2(del(2,1),del(1,1)) - mu(3,1))] 46 | % TODO: Compute the Jacobian Hi of the measurement function h for this observation 47 | Fx = zeros(5,twoNpl3); 48 | Fx(1:3,1:3) = eye(3); 49 | Fx(4,2*landmarkId+2) = 1; 50 | Fx(5,2*landmarkId+3) = 1; 51 | Hi = 1/q*[-sqrt(q)*del(1,1) -sqrt(q)*del(2,1) 0 sqrt(q)*del(1,1) sqrt(q)*del(2,1); del(2,1) -del(1,1) -q -del(2,1) del(1,1)]; 52 | Hi = Hi*Fx; 53 | % Augment H with the new Hi 54 | H = [H;Hi]; 55 | size(H) 56 | endfor 57 | 58 | % TODO: Construct the sensor noise matrix Q 59 | Q = 0.01*eye(2*m); 60 | % TODO: Compute the Kalman gain 61 | m 62 | K = sigma*H'/(H*sigma*H' + Q); 63 | % TODO: Compute the difference between the expected and recorded measurements. 64 | % Remember to normalize the bearings after subtracting! 65 | % (hint: use the normalize_all_bearings function available in tools) 66 | delZ = normalize_all_bearings(Z - expectedZ); 67 | % TODO: Finish the correction step by computing the new mu and sigma. 68 | % Normalize theta in the robot pose. 69 | mu = mu + K*delZ; 70 | sigma = (eye(twoNpl3) - K*H)*sigma; 71 | end 72 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/correction_step.m~: -------------------------------------------------------------------------------- 1 | function [mu, sigma, observedLandmarks] = correction_step(mu, sigma, z, observedLandmarks) 2 | % Updates the belief, i. e., mu and sigma after observing landmarks, according to the sensor model 3 | % The employed sensor model measures the range and bearing of a landmark 4 | % mu: 2N+3 x 1 vector representing the state mean. 5 | % The first 3 components of mu correspond to the current estimate of the robot pose [x; y; theta] 6 | % The current pose estimate of the landmark with id = j is: [mu(2*j+2); mu(2*j+3)] 7 | % sigma: 2N+3 x 2N+3 is the covariance matrix 8 | % z: struct array containing the landmark observations. 9 | % Each observation z(i) has an id z(i).id, a range z(i).range, and a bearing z(i).bearing 10 | % The vector observedLandmarks indicates which landmarks have been observed 11 | % at some point by the robot. 12 | % observedLandmarks(j) is false if the landmark with id = j has never been observed before. 13 | 14 | % Number of measurements in this time step 15 | m = size(z, 2); 16 | twoNpl3 = size(mu,1); 17 | % Z: vectorized form of all measurements made in this time step: [range_1; bearing_1; range_2; bearing_2; ...; range_m; bearing_m] 18 | % ExpectedZ: vectorized form of all expected measurements in the same form. 19 | % They are initialized here and should be filled out in the for loop below 20 | Z = zeros(m*2, 1); 21 | expectedZ = zeros(m*2, 1); 22 | 23 | % Iterate over the measurements and compute the H matrix 24 | % (stacked Jacobian blocks of the measurement function) 25 | % H will be 2m x 2N+3 26 | H = []; 27 | 28 | for i = 1:m 29 | % Get the id of the landmark corresponding to the i-th observation 30 | landmarkId = z(i).id; 31 | % If the landmark is obeserved for the first time: 32 | if(observedLandmarks(landmarkId)==false) 33 | % TODO: Initialize its pose in mu based on the measurement and the current robot pose: 34 | mu(2*landmarkId+2:2*landmarkId+3,1) = mu(1:2,1) + [z(i).range*cos(z(i).bearing + mu(3,1)) ; z(i).range*sin(z(i).bearing+mu(3,1))]; 35 | % Indicate in the observedLandmarks vector that this landmark has been observed 36 | observedLandmarks(landmarkId) = true; 37 | endif 38 | 39 | % TODO: Add the landmark measurement to the Z vector 40 | Z(2*i-1:2*i,1) = [z(i).range;z(i).bearing] 41 | % TODO: Use the current estimate of the landmark pose 42 | % to compute the corresponding expected measurement in expectedZ: 43 | del = mu(2*landmarkId+2:2*landmarkId+3,1) - mu(1:2,1); 44 | q = del'*del; 45 | expectedZ(2*i-1:2*i,1) = [sqrt(q);normalize_angle(atan2(del(2,1),del(1,1)) - mu(3,1))] 46 | % TODO: Compute the Jacobian Hi of the measurement function h for this observation 47 | Fx = zeros(5,twoNpl3); 48 | Fx(1:3,1:3) = eye(3); 49 | Fx(4,2*landmarkId+2) = 1; 50 | Fx(4,2*landmarkId+3) = 1; 51 | Hi = 1/q*[-sqrt(q)*del(1,1) -sqrt(q)*del(2,1) 0 sqrt(q)*del(1,1) sqrt(q)*del(2,1); del(2,1) -del(1,1) -q -del(2,1) del(1,1)]; 52 | Hi = Hi*Fx; 53 | % Augment H with the new Hi 54 | H = [H;Hi]; 55 | size(H) 56 | endfor 57 | 58 | % TODO: Construct the sensor noise matrix Q 59 | Q = 0.01*eye(2*m); 60 | % TODO: Compute the Kalman gain 61 | m 62 | K = sigma*H'/(H*sigma*H' + Q); 63 | % TODO: Compute the difference between the expected and recorded measurements. 64 | % Remember to normalize the bearings after subtracting! 65 | % (hint: use the normalize_all_bearings function available in tools) 66 | delZ = normalize_all_bearings(Z - expectedZ); 67 | % TODO: Finish the correction step by computing the new mu and sigma. 68 | % Normalize theta in the robot pose. 69 | mu = mu + K*delZ; 70 | sigma = (eye(twoNpl3) - K*H)*sigma; 71 | end 72 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/ekf_slam.m: -------------------------------------------------------------------------------- 1 | % This is the main extended Kalman filter SLAM loop. This script calls all the required 2 | % functions in the correct order. 3 | % 4 | % You can disable the plotting or change the number of steps the filter 5 | % runs for to ease the debugging. You should however not change the order 6 | % or calls of any of the other lines, as it might break the framework. 7 | % 8 | % If you are unsure about the input and return values of functions you 9 | % should read their documentation which tells you the expected dimensions. 10 | 11 | % Turn off pagination: 12 | more off; 13 | 14 | % clear all variables and close all windows 15 | clear all; 16 | close all; 17 | 18 | % Make tools available 19 | addpath('tools'); 20 | 21 | % Read world data, i.e. landmarks. The true landmark positions are not given to the robot 22 | landmarks = read_world('../data/world.dat'); 23 | % load landmarks; 24 | % Read sensor readings, i.e. odometry and range-bearing sensor 25 | data = read_data('../data/sensor_data.dat'); 26 | %load data; 27 | 28 | INF = 1000; 29 | % Get the number of landmarks in the map 30 | N = size(landmarks,2); 31 | 32 | % observedLandmarks is a vector that keeps track of which landmarks have been observed so far. 33 | % observedLandmarks(i) will be true if the landmark with id = i has been observed at some point by the robot 34 | observedLandmarks = repmat(false,1,N); 35 | 36 | % Initialize belief: 37 | % mu: 2N+3x1 vector representing the mean of the normal distribution 38 | % The first 3 components of mu correspond to the pose of the robot, 39 | % and the landmark poses (xi, yi) are stacked in ascending id order. 40 | % sigma: (2N+3)x(2N+3) covariance matrix of the normal distribution 41 | mu = repmat([0.0], (2*N+3), 1); 42 | robSigma = zeros(3); 43 | robMapSigma = zeros(3,2*N); 44 | mapSigma = INF*eye(2*N); 45 | sigma = [[robSigma robMapSigma];[robMapSigma' mapSigma]]; 46 | 47 | % toogle the visualization type 48 | %showGui = true; % show a window while the algorithm runs 49 | showGui = false; % plot to files instead 50 | 51 | % Perform filter update for each odometry-observation pair read from the 52 | % data file. 53 | for t = 1:size(data.timestep, 2) 54 | %for t = 1:80 55 | 56 | % Perform the prediction step of the EKF 57 | [mu, sigma] = prediction_step(mu, sigma, data.timestep(t).odometry); 58 | 59 | % Perform the correction step of the EKF 60 | [mu, sigma, observedLandmarks] = correction_step(mu, sigma, data.timestep(t).sensor, observedLandmarks); 61 | 62 | %Generate visualization plots of the current state of the filter 63 | plot_state(mu, sigma, landmarks, t, observedLandmarks, data.timestep(t).sensor, showGui); 64 | disp("Current state vector:") 65 | disp("mu = "), disp(mu) 66 | endfor 67 | 68 | disp("Final system covariance matrix:"), disp(sigma) 69 | % Display the final state estimate 70 | disp("Final robot pose:") 71 | disp("mu_robot = "), disp(mu(1:3)), disp("sigma_robot = "), disp(sigma(1:3,1:3)) 72 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/ekf_slam.m~: -------------------------------------------------------------------------------- 1 | % This is the main extended Kalman filter SLAM loop. This script calls all the required 2 | % functions in the correct order. 3 | % 4 | % You can disable the plotting or change the number of steps the filter 5 | % runs for to ease the debugging. You should however not change the order 6 | % or calls of any of the other lines, as it might break the framework. 7 | % 8 | % If you are unsure about the input and return values of functions you 9 | % should read their documentation which tells you the expected dimensions. 10 | 11 | % Turn off pagination: 12 | more off; 13 | 14 | % clear all variables and close all windows 15 | clear all; 16 | close all; 17 | 18 | % Make tools available 19 | addpath('tools'); 20 | 21 | % Read world data, i.e. landmarks. The true landmark positions are not given to the robot 22 | landmarks = read_world('../data/world.dat'); 23 | % load landmarks; 24 | % Read sensor readings, i.e. odometry and range-bearing sensor 25 | data = read_data('../data/sensor_data.dat'); 26 | %load data; 27 | 28 | INF = 1000; 29 | % Get the number of landmarks in the map 30 | N = size(landmarks,2); 31 | 32 | % observedLandmarks is a vector that keeps track of which landmarks have been observed so far. 33 | % observedLandmarks(i) will be true if the landmark with id = i has been observed at some point by the robot 34 | observedLandmarks = repmat(false,1,N); 35 | 36 | % Initialize belief: 37 | % mu: 2N+3x1 vector representing the mean of the normal distribution 38 | % The first 3 components of mu correspond to the pose of the robot, 39 | % and the landmark poses (xi, yi) are stacked in ascending id order. 40 | % sigma: (2N+3)x(2N+3) covariance matrix of the normal distribution 41 | mu = repmat([0.0], (2*N+3), 1); 42 | robSigma = zeros(3); 43 | robMapSigma = zeros(3,2*N); 44 | mapSigma = INF*eye(2*N); 45 | sigma = [[robSigma robMapSigma];[robMapSigma' mapSigma]]; 46 | 47 | % toogle the visualization type 48 | showGui = true; % show a window while the algorithm runs 49 | %showGui = false; % plot to files instead 50 | 51 | % Perform filter update for each odometry-observation pair read from the 52 | % data file. 53 | for t = 1:size(data.timestep, 2) 54 | %for t = 1:80 55 | 56 | % Perform the prediction step of the EKF 57 | [mu, sigma] = prediction_step(mu, sigma, data.timestep(t).odometry); 58 | 59 | % Perform the correction step of the EKF 60 | [mu, sigma, observedLandmarks] = correction_step(mu, sigma, data.timestep(t).sensor, observedLandmarks); 61 | 62 | %Generate visualization plots of the current state of the filter 63 | plot_state(mu, sigma, landmarks, t, observedLandmarks, data.timestep(t).sensor, showGui); 64 | disp("Current state vector:") 65 | disp("mu = "), disp(mu) 66 | endfor 67 | 68 | disp("Final system covariance matrix:"), disp(sigma) 69 | % Display the final state estimate 70 | disp("Final robot pose:") 71 | disp("mu_robot = "), disp(mu(1:3)), disp("sigma_robot = "), disp(sigma(1:3,1:3)) 72 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/octave-workspace: -------------------------------------------------------------------------------- 1 | Octave-1-L -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/prediction_step.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma] = prediction_step(mu, sigma, u) 2 | % Updates the belief concerning the robot pose according to the motion model, 3 | % mu: 2N+3 x 1 vector representing the state mean 4 | % sigma: 2N+3 x 2N+3 covariance matrix 5 | % u: odometry reading (r1, t, r2) 6 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 7 | 8 | % TODO: Compute the new mu based on the noise-free (odometry-based) motion model 9 | % Remember to normalize theta after the update (hint: use the function normalize_angle available in tools) 10 | twoNpl3 = size(mu,1); 11 | th_old = mu(3,1); 12 | Fx = [eye(3) zeros(3,twoNpl3-3)]; 13 | xtmp = [u.t*cos(th_old + u.r1); u.t*sin(th_old + u.r1); normalize_angle(u.r1 + u.r2)]; 14 | mu = mu + Fx'*xtmp; 15 | mu(3) = normalize_angle(mu(3)); 16 | 17 | % TODO: Compute the 3x3 Jacobian Gx of the motion model 18 | Gtx = [zeros(3,2) [-u.t*sin(th_old + u.r1); u.t*cos(th_old + u.r1); 0]]; 19 | 20 | % TODO: Construct the full Jacobian G 21 | Gt = eye(twoNpl3) + Fx'*Gtx*Fx; 22 | 23 | % Motion noise 24 | motionNoise = 0.1; 25 | R3 = [motionNoise, 0, 0; 26 | 0, motionNoise, 0; 27 | 0, 0, motionNoise/10]; 28 | R = zeros(size(sigma,1)); 29 | R(1:3,1:3) = R3; 30 | 31 | % TODO: Compute the predicted sigma after incorporating the motion 32 | sigma = Gt*sigma*Gt' + R; 33 | 34 | end 35 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/prediction_step.m~: -------------------------------------------------------------------------------- 1 | function [mu, sigma] = prediction_step(mu, sigma, u) 2 | % Updates the belief concerning the robot pose according to the motion model, 3 | % mu: 2N+3 x 1 vector representing the state mean 4 | % sigma: 2N+3 x 2N+3 covariance matrix 5 | % u: odometry reading (r1, t, r2) 6 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 7 | 8 | % TODO: Compute the new mu based on the noise-free (odometry-based) motion model 9 | % Remember to normalize theta after the update (hint: use the function normalize_angle available in tools) 10 | twoNpl3 = size(mu,1); 11 | th_old = mu(3,1); 12 | Fx = [eye(3) zeros(3,twoNpl3-3)]; 13 | xtmp = [u.t*cos(th_old + u.r1); u.t*sin(th_old + u.r1); normalize_angle(u.r1 + u.r2)]; 14 | mu = mu + Fx'*xtmp; 15 | mu(3) = normalize_angle(mu(3)); 16 | 17 | % TODO: Compute the 3x3 Jacobian Gx of the motion model 18 | Gtx = [zeros(3,2) [-u.t*sin(th_old + u.r1); u.t*cos(th_old + u.r1); 0]]; 19 | 20 | % TODO: Construct the full Jacobian G 21 | Gt = eye(twoNpl3) + Fx'*Gtx*Fx; 22 | 23 | % Motion noise 24 | motionNoise = 0.1; 25 | R3 = [motionNoise, 0, 0; 26 | 0, motionNoise, 0; 27 | 0, 0, motionNoise/10]; 28 | R = zeros(size(sigma,1)); 29 | R(1:3,1:3) = R3; 30 | 31 | % TODO: Compute the predicted sigma after incorporating the motion 32 | sigma = Gt*sigma*Gt' + R; 33 | 34 | end 35 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/drawellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWELLIPSE Draw ellipse. 2 | % DRAWELLIPSE(X,A,B,COLOR) draws an ellipse at X = [x y theta] 3 | % with half axes A and B. Theta is the inclination angle of A, 4 | % regardless if A is smaller or greater than B. COLOR is a 5 | % [r g b]-vector or a color string such as 'r' or 'g'. 6 | % 7 | % H = DRAWELLIPSE(...) returns the graphic handle H. 8 | % 9 | % See also DRAWPROBELLIPSE. 10 | 11 | % v.1.0-v.1.1, Aug.97-Jan.03, Kai Arras, ASL-EPFL 12 | % v.1.2, 03.12.03, Kai Arras, CAS-KTH: (x,a,b) interface 13 | 14 | 15 | function h = drawellipse(x,a,b,color); 16 | 17 | % Constants 18 | NPOINTS = 100; % point density or resolution 19 | 20 | % Compose point vector 21 | ivec = 0:2*pi/NPOINTS:2*pi; % index vector 22 | p(1,:) = a*cos(ivec); % 2 x n matrix which 23 | p(2,:) = b*sin(ivec); % hold ellipse points 24 | 25 | % Translate and rotate 26 | xo = x(1); yo = x(2); angle = x(3); 27 | R = [cos(angle) -sin(angle); sin(angle) cos(angle)]; 28 | T = [xo; yo]*ones(1,length(ivec)); 29 | p = R*p + T; 30 | 31 | % Plot 32 | h = plot(p(1,:),p(2,:),'Color',color, 'linewidth', 2); 33 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/drawprobellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWPROBELLIPSE Draw elliptic probability region of a Gaussian in 2D. 2 | % DRAWPROBELLIPSE(X,C,ALPHA,COLOR) draws the elliptic iso-probabi- 3 | % lity contour of a Gaussian distributed bivariate random vector X 4 | % at the significance level ALPHA. The ellipse is centered at X = 5 | % [x; y] where C is the associated 2x2 covariance matrix. COLOR is 6 | % a [r g b]-vector or a color string such as 'r' or 'g'. 7 | % 8 | % X and C can also be of size 3x1 and 3x3 respectively. 9 | % 10 | % For proper scaling, the function CHI2INVTABLE is employed to 11 | % avoid the use of CHI2INV from the Matlab statistics toolbox. 12 | % 13 | % In case of a negative definite matrix C, the ellipse collapses 14 | % to a line which is drawn instead. 15 | % 16 | % H = DRAWPROBELLIPSE(...) returns the graphic handle H. 17 | % 18 | % See also DRAWELLIPSE, CHI2INVTABLE, CHI2INV. 19 | 20 | % v.1.0-v.1.3, 97-Jan.03, Kai Arras, ASL-EPFL 21 | % v.1.4, 03.12.03, Kai Arras, CAS-KTH: toolbox version 22 | 23 | 24 | function h = drawprobellipse(x,C,alpha,color); 25 | 26 | % Calculate unscaled half axes 27 | sxx = C(1,1); syy = C(2,2); sxy = C(1,2); 28 | a = sqrt(0.5*(sxx+syy+sqrt((sxx-syy)^2+4*sxy^2))); % always greater 29 | b = sqrt(0.5*(sxx+syy-sqrt((sxx-syy)^2+4*sxy^2))); % always smaller 30 | 31 | % Remove imaginary parts in case of neg. definite C 32 | if ~isreal(a), a = real(a); end; 33 | if ~isreal(b), b = real(b); end; 34 | 35 | % Scaling in order to reflect specified probability 36 | a = a*sqrt(chi2invtable(alpha,2)); 37 | b = b*sqrt(chi2invtable(alpha,2)); 38 | 39 | % Look where the greater half axis belongs to 40 | if sxx < syy, swap = a; a = b; b = swap; end; 41 | 42 | % Calculate inclination (numerically stable) 43 | if sxx ~= syy, 44 | angle = 0.5*atan(2*sxy/(sxx-syy)); 45 | elseif sxy == 0, 46 | angle = 0; % angle doesn't matter 47 | elseif sxy > 0, 48 | angle = pi/4; 49 | elseif sxy < 0, 50 | angle = -pi/4; 51 | end; 52 | x(3) = angle; 53 | 54 | % Draw ellipse 55 | h = drawellipse(x,a,b,color); -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/normalize_all_bearings.m: -------------------------------------------------------------------------------- 1 | function [zNorm] = normalize_all_bearings(z) 2 | % Go over the observations vector and normalize the bearings 3 | % The expected format of z is [range; bearing; range; bearing; ...] 4 | 5 | for(i=2:2:length(z)) 6 | z(i) = normalize_angle(z(i)); 7 | endfor 8 | zNorm = z; 9 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/normalize_angle.m: -------------------------------------------------------------------------------- 1 | function [phiNorm] = normalize_angle(phi) 2 | %Normalize phi to be between -pi and pi 3 | 4 | while(phi>pi) 5 | phi = phi - 2*pi; 6 | endwhile 7 | 8 | while(phi<-pi) 9 | phi = phi + 2*pi; 10 | endwhile 11 | phiNorm = phi; 12 | 13 | end 14 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/plot_state.m: -------------------------------------------------------------------------------- 1 | function plot_state(mu, sigma, landmarks, timestep, observedLandmarks, z, window) 2 | % Visualizes the state of the EKF SLAM algorithm. 3 | % 4 | % The resulting plot displays the following information: 5 | % - map ground truth (black +'s) 6 | % - current robot pose estimate (red) 7 | % - current landmark pose estimates (blue) 8 | % - visualization of the observations made at this time step (line between robot and landmark) 9 | 10 | clf; 11 | hold on 12 | grid("on") 13 | L = struct2cell(landmarks); 14 | drawprobellipse(mu(1:3), sigma(1:3,1:3), 0.6, 'r'); 15 | plot(cell2mat(L(2,:)), cell2mat(L(3,:)), 'k+', 'markersize', 10, 'linewidth', 5); 16 | for(i=1:length(observedLandmarks)) 17 | if(observedLandmarks(i)) 18 | plot(mu(2*i+ 2),mu(2*i+ 3), 'bo', 'markersize', 10, 'linewidth', 5) 19 | drawprobellipse(mu(2*i+ 2:2*i+ 3), sigma(2*i+ 2:2*i+ 3,2*i+ 2:2*i+ 3), 0.6, 'b'); 20 | endif 21 | endfor 22 | 23 | for(i=1:size(z,2)) 24 | mX = mu(2*z(i).id+2); 25 | mY = mu(2*z(i).id+3); 26 | line([mu(1), mX],[mu(2), mY], 'color', 'k', 'linewidth', 1); 27 | endfor 28 | 29 | drawrobot(mu(1:3), 'r', 3, 0.3, 0.3); 30 | xlim([-2, 12]) 31 | ylim([-2, 12]) 32 | hold off; 33 | 34 | if window 35 | figure(1, "visible", "on"); 36 | drawnow; 37 | pause(0.1); 38 | else 39 | % figure(1, "visible", "off"); 40 | filename = sprintf('../plots/ekf_%03d.png', timestep); 41 | print(filename, '-dpng'); 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/plot_state.m~: -------------------------------------------------------------------------------- 1 | function plot_state(mu, sigma, landmarks, timestep, observedLandmarks, z, window) 2 | % Visualizes the state of the EKF SLAM algorithm. 3 | % 4 | % The resulting plot displays the following information: 5 | % - map ground truth (black +'s) 6 | % - current robot pose estimate (red) 7 | % - current landmark pose estimates (blue) 8 | % - visualization of the observations made at this time step (line between robot and landmark) 9 | 10 | clf; 11 | hold on 12 | grid("on") 13 | L = struct2cell(landmarks); 14 | drawprobellipse(mu(1:3), sigma(1:3,1:3), 0.6, 'r'); 15 | plot(cell2mat(L(2,:)), cell2mat(L(3,:)), 'k+', 'markersize', 10, 'linewidth', 5); 16 | for(i=1:length(observedLandmarks)) 17 | if(observedLandmarks(i)) 18 | plot(mu(2*i+ 2),mu(2*i+ 3), 'bo', 'markersize', 10, 'linewidth', 5) 19 | drawprobellipse(mu(2*i+ 2:2*i+ 3), sigma(2*i+ 2:2*i+ 3,2*i+ 2:2*i+ 3), 0.6, 'b'); 20 | endif 21 | endfor 22 | 23 | for(i=1:size(z,2)) 24 | mX = mu(2*z(i).id+2); 25 | mY = mu(2*z(i).id+3); 26 | line([mu(1), mX],[mu(2), mY], 'color', 'k', 'linewidth', 1); 27 | endfor 28 | 29 | drawrobot(mu(1:3), 'r', 3, 0.3, 0.3); 30 | xlim([-2, 12]) 31 | ylim([-2, 12]) 32 | hold off; 33 | 34 | if window 35 | figure(1, "visible", "on"); 36 | drawnow; 37 | pause(0.1); 38 | else 39 | % figure(1, "visible", "off"); 40 | filename = sprintf('../plots/ekf_%03d.png', timestep); 41 | print(filename, '-dpng'); 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/read_data.m: -------------------------------------------------------------------------------- 1 | function data = read_data(filename) 2 | % Reads the odometry and sensor readings from a file. 3 | % 4 | % filename: path to the file to parse 5 | % data: structure containing the parsed information 6 | % 7 | % The data is returned in a structure where the u_t and z_t are stored 8 | % within a single entry. A z_t can contain observations of multiple 9 | % landmarks. 10 | % 11 | % Usage: 12 | % - access the readings for timestep i: 13 | % data.timestep(i) 14 | % this returns a structure containing the odometry reading and all 15 | % landmark obsevations, which can be accessed as follows 16 | % - odometry reading at timestep i: 17 | % data.timestep(i).odometry 18 | % - senor reading at timestep i: 19 | % data.timestep(i).sensor 20 | % 21 | % Odometry readings have the following fields: 22 | % - r1 : rotation 1 23 | % - t : translation 24 | % - r2 : rotation 2 25 | % which correspond to the identically labeled variables in the motion 26 | % mode. 27 | % 28 | % Sensor readings can again be indexed and each of the entris has the 29 | % following fields: 30 | % - id : id of the observed landmark 31 | % - range : measured range to the landmark 32 | % - bearing : measured angle to the landmark (you can ignore this) 33 | % 34 | % Examples: 35 | % - Translational component of the odometry reading at timestep 10 36 | % data.timestep(10).odometry.t 37 | % - Measured range to the second landmark observed at timestep 4 38 | % data.timestep(4).sensor(2).range 39 | input = fopen(filename); 40 | 41 | data = struct; 42 | data.timestep.sensor = struct; 43 | first = 1; 44 | 45 | odom = struct; 46 | sensor = struct; 47 | 48 | while(!feof(input)) 49 | line = fgetl(input); 50 | arr = strsplit(line, ' '); 51 | type = deblank(arr{1}); 52 | 53 | if(strcmp(type, 'ODOMETRY') == 1) 54 | if(first == 0) 55 | data.timestep(end+1).odometry = odom; 56 | data.timestep(end).sensor = sensor(2:end); 57 | odom = struct; 58 | sensor = struct; 59 | endif 60 | first = 0; 61 | odom.r1 = str2double(arr{2}); 62 | odom.t = str2double(arr{3}); 63 | odom.r2 = str2double(arr{4}); 64 | elseif(strcmp(type, 'SENSOR') == 1) 65 | reading = struct; 66 | reading.id = str2double(arr{2}); 67 | reading.range = str2double(arr{3}); 68 | reading.bearing = str2double(arr{4}); 69 | sensor(end+1) = reading; 70 | endif 71 | endwhile 72 | 73 | data.timestep = data.timestep(2:end); 74 | 75 | fclose(input); 76 | end 77 | -------------------------------------------------------------------------------- /1_EKF_SLAM/octave/tools/read_world.m: -------------------------------------------------------------------------------- 1 | function landmarks = read_world(filename) 2 | % Reads the world definition and returns a structure of landmarks. 3 | % 4 | % filename: path of the file to load 5 | % landmarks: structure containing the parsed information 6 | % 7 | % Each landmark contains the following information: 8 | % - id : id of the landmark 9 | % - x : x-coordinate 10 | % - y : y-coordinate 11 | % 12 | % Examples: 13 | % - Obtain x-coordinate of the 5-th landmark 14 | % landmarks(5).x 15 | input = fopen(filename); 16 | 17 | landmarks = struct; 18 | 19 | while(!feof(input)) 20 | line = fgetl(input); 21 | data = strsplit(line, ' '); 22 | 23 | landmark = struct( 24 | "id", str2double(data{1}), 25 | "x" , str2double(data{2}), 26 | "y" , str2double(data{3}) 27 | ); 28 | landmarks(end+1) = landmark; 29 | endwhile 30 | 31 | landmarks = landmarks(2:end); 32 | 33 | fclose(input); 34 | end 35 | -------------------------------------------------------------------------------- /1_EKF_SLAM/plots/ekf_slam.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/1_EKF_SLAM/plots/ekf_slam.mp4 -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/compute_sigma_points.m: -------------------------------------------------------------------------------- 1 | function [sigma_points, w_m, w_c] = compute_sigma_points(mu, sigma, lambda, alpha, beta) 2 | % This function samples 2n+1 sigma points from the distribution given by mu and sigma 3 | % according to the unscented transform, where n is the dimensionality of mu. 4 | % Each column of sigma_points should represent one sigma point 5 | % i.e. sigma_points has a dimensionality of nx2n+1. 6 | % The corresponding weights w_m and w_c of the points are computed using lambda, alpha, and beta: 7 | % w_m = [w_m_0, ..., w_m_2n], w_c = [w_c_0, ..., w_c_2n] (i.e. each of size 1x2n+1) 8 | % They are later used to recover the mean and covariance respectively. 9 | 10 | n = length(mu); 11 | sigma_points = zeros(n,2*n+1); 12 | w_m = zeros(1,2*n+1); 13 | w_c = zeros(1,2*n+1); 14 | 15 | % TODO: compute all sigma points 16 | sigma_points(:,1) = mu; 17 | addTerm = sqrtm((n+lambda)*sigma); 18 | for i=1:n 19 | sigma_points(:,i+1) = mu + addTerm(:,i); 20 | endfor 21 | for i=n+1:2*n 22 | sigma_points(:,i+1) = mu - addTerm(:,i-n); 23 | endfor 24 | 25 | % TODO compute weight vectors w_m and w_c 26 | w_m(1,1) = lambda/(n+lambda); 27 | w_c(1,1) = w_m(1) + (1 - alpha*alpha + beta); 28 | w_m(1,2:end) = 1/(2*(n+lambda)); 29 | w_c(1,2:end) = 1/(2*(n+lambda)); 30 | end 31 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/compute_sigma_points.m~: -------------------------------------------------------------------------------- 1 | function [sigma_points, w_m, w_c] = compute_sigma_points(mu, sigma, lambda, alpha, beta) 2 | % This function samples 2n+1 sigma points from the distribution given by mu and sigma 3 | % according to the unscented transform, where n is the dimensionality of mu. 4 | % Each column of sigma_points should represent one sigma point 5 | % i.e. sigma_points has a dimensionality of nx2n+1. 6 | % The corresponding weights w_m and w_c of the points are computed using lambda, alpha, and beta: 7 | % w_m = [w_m_0, ..., w_m_2n], w_c = [w_c_0, ..., w_c_2n] (i.e. each of size 1x2n+1) 8 | % They are later used to recover the mean and covariance respectively. 9 | 10 | n = length(mu); 11 | sigma_points = zeros(n,2*n+1); 12 | w_m = zeros(1,2*n+1); 13 | w_c = zeros(1,2*n+1); 14 | 15 | % TODO: compute all sigma points 16 | sigma_points(:,1) = mu; 17 | addTerm = sqrtm((n+lambda)*sigma); 18 | for i=1:n 19 | sigma_points(:,i+1) = mu + addTerm(:,i); 20 | endfor 21 | for i=n+1:2*n 22 | sigma_points(:,i+1) = mu - addTerm(:,i-n); 23 | endfor 24 | 25 | % TODO compute weight vectors w_m and w_c 26 | w_m(1) = lambda/(n+lambda); 27 | w_c(1) = w_m(1) + (1 - alpha*alpha + beta); 28 | %w_m(2) = 1/(2(n+lambda)); 29 | %w_c(2) = 1/(2(n+lambda)); 30 | end 31 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/main.m: -------------------------------------------------------------------------------- 1 | % This is the main script for computing a transformed distribution according 2 | % to the unscented transform. This script calls all the required 3 | % functions in the correct order. 4 | % If you are unsure about the input and return values of functions you 5 | % should read their documentation which tells you the expected dimensions. 6 | 7 | % Turn off pagination and open a new figure for plotting 8 | more off 9 | close all 10 | figure 11 | hold on 12 | grid 13 | 14 | % Make tools available 15 | addpath('tools'); 16 | 17 | % Initial distribution 18 | sigma = 0.1*eye(2); 19 | mu = [1;2]; 20 | n = length(mu); 21 | 22 | % Compute lambda 23 | alpha = 0.9; 24 | beta = 2; 25 | kappa = 1; 26 | lambda = alpha*alpha*(n+kappa)-n; 27 | 28 | % Compute the sigma points corresponding to mu and sigma 29 | [sigma_points, w_m, w_c] = compute_sigma_points(mu, sigma, lambda, alpha, beta); 30 | 31 | % Plot original distribution with sampled sigma points 32 | plot(mu(1),mu(2),'ro','markersize',12, 'linewidth',3) 33 | legend('original distribution') 34 | drawprobellipse(mu, sigma, 0.9, 'r'); 35 | plot(sigma_points(1,:),sigma_points(2,:),'kx','markersize', 10, 'linewidth',3) 36 | 37 | % Transform sigma points 38 | sigma_points_trans = transform(sigma_points); 39 | 40 | % Recover mu and sigma of the transformed distribution 41 | [mu_trans, sigma_trans] = recover_gaussian(sigma_points_trans, w_m, w_c); 42 | 43 | % Plot transformed sigma points with corresponding mu and sigma 44 | plot(mu_trans(1),mu_trans(2),'bo','markersize', 12, 'linewidth',3) 45 | legend('transformed distribution') 46 | drawprobellipse(mu_trans, sigma_trans, 0.9, 'b'); 47 | plot(sigma_points_trans(1,:),sigma_points_trans(2,:),'kx','markersize', 10, 'linewidth',3) 48 | 49 | % Figure axes setup 50 | title('Unscented Transform', 'fontsize', 20) 51 | x_min = min(mu(1),mu_trans(1)); 52 | x_max = max(mu(1),mu_trans(1)); 53 | y_min = min(mu(2),mu_trans(2)); 54 | y_max = max(mu(2),mu_trans(2)); 55 | axis([(x_min-3) (x_max+3) (y_min-3) (y_max+3)], "equal") 56 | 57 | % Print and save plot 58 | print('../plots/unscented.png', '-dpng') 59 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/main.m~: -------------------------------------------------------------------------------- 1 | % This is the main script for computing a transformed distribution according 2 | % to the unscented transform. This script calls all the required 3 | % functions in the correct order. 4 | % If you are unsure about the input and return values of functions you 5 | % should read their documentation which tells you the expected dimensions. 6 | 7 | % Turn off pagination and open a new figure for plotting 8 | more off 9 | close all 10 | figure 11 | hold on 12 | grid 13 | 14 | % Make tools available 15 | addpath('tools'); 16 | 17 | % Initial distribution 18 | sigma = 0.1*eye(2); 19 | mu = [1;2]; 20 | n = length(mu); 21 | 22 | % Compute lambda 23 | alpha = 0.9; 24 | beta = 2; 25 | kappa = 1; 26 | lambda = alpha*alpha*(n+kappa)-n; 27 | 28 | % Compute the sigma points corresponding to mu and sigma 29 | [sigma_points, w_m, w_c] = compute_sigma_points(mu, sigma, lambda, alpha, beta); 30 | 31 | % Plot original distribution with sampled sigma points 32 | plot(mu(1),mu(2),'ro','markersize',12, 'linewidth',3) 33 | legend('original distribution') 34 | drawprobellipse(mu, sigma, 0.9, 'r'); 35 | plot(sigma_points(1,:),sigma_points(2,:),'kx','markersize', 10, 'linewidth',3) 36 | 37 | % Transform sigma points 38 | sigma_points_trans = transform(sigma_points); 39 | 40 | % Recover mu and sigma of the transformed distribution 41 | [mu_trans, sigma_trans] = recover_gaussian(sigma_points_trans, w_m, w_m); 42 | 43 | % Plot transformed sigma points with corresponding mu and sigma 44 | plot(mu_trans(1),mu_trans(2),'bo','markersize', 12, 'linewidth',3) 45 | legend('transformed distribution') 46 | drawprobellipse(mu_trans, sigma_trans, 0.9, 'b'); 47 | plot(sigma_points_trans(1,:),sigma_points_trans(2,:),'kx','markersize', 10, 'linewidth',3) 48 | 49 | % Figure axes setup 50 | title('Unscented Transform', 'fontsize', 20) 51 | x_min = min(mu(1),mu_trans(1)); 52 | x_max = max(mu(1),mu_trans(1)); 53 | y_min = min(mu(2),mu_trans(2)); 54 | y_max = max(mu(2),mu_trans(2)); 55 | axis([(x_min-3) (x_max+3) (y_min-3) (y_max+3)], "equal") 56 | 57 | % Print and save plot 58 | print('../plots/unscented.png', '-dpng') 59 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/octave-workspace: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/2_Unscented_Transform/octave/octave-workspace -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/recover_gaussian.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma] = recover_gaussian(sigma_points, w_m, w_c) 2 | % This function computes the recovered Gaussian distribution (mu and sigma) 3 | % given the sigma points (size: nx2n+1) and their weights w_m and w_c: 4 | % w_m = [w_m_0, ..., w_m_2n], w_c = [w_c_0, ..., w_c_2n]. 5 | % The weight vectors are each 1x2n+1 in size, 6 | % where n is the dimensionality of the distribution. 7 | 8 | % Try to vectorize your operations as much as possible 9 | 10 | n = size(sigma_points,1); 11 | mu = zeros(n,1); 12 | sigma = zeros(n,n); 13 | 14 | % TODO: compute mu 15 | for i=1:2*n+1 16 | mu = mu + w_m(1,i)*sigma_points(:,i); 17 | endfor 18 | % TODO: compute sigma 19 | for i=1:2*n+1 20 | sigma = sigma + w_c(1,i)*bsxfun(@minus,sigma_points,mu)*bsxfun(@minus,sigma_points,mu)'; 21 | endfor 22 | end 23 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/recover_gaussian.m~: -------------------------------------------------------------------------------- 1 | function [mu, sigma] = recover_gaussian(sigma_points, w_m, w_c) 2 | % This function computes the recovered Gaussian distribution (mu and sigma) 3 | % given the sigma points (size: nx2n+1) and their weights w_m and w_c: 4 | % w_m = [w_m_0, ..., w_m_2n], w_c = [w_c_0, ..., w_c_2n]. 5 | % The weight vectors are each 1x2n+1 in size, 6 | % where n is the dimensionality of the distribution. 7 | 8 | % Try to vectorize your operations as much as possible 9 | 10 | n = size(sigma_points,1) 11 | mu = zeros(n); 12 | sigma = zeros(n,n); 13 | 14 | % TODO: compute mu 15 | for i=1:2*n+1 16 | mu = mu + w_m(1,i)*sigma_points(:,i); 17 | endfor 18 | % TODO: compute sigma 19 | for i=1:2*n+1 20 | sigma = sigma + w_c(1,i)*bsxfun(@minus,sigma_points,mu)*bsxfun(@minus,sigma_points,mu)'; 21 | endfor 22 | end 23 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/tools/drawellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWELLIPSE Draw ellipse. 2 | % DRAWELLIPSE(X,A,B,COLOR) draws an ellipse at X = [x y theta] 3 | % with half axes A and B. Theta is the inclination angle of A, 4 | % regardless if A is smaller or greater than B. COLOR is a 5 | % [r g b]-vector or a color string such as 'r' or 'g'. 6 | % 7 | % H = DRAWELLIPSE(...) returns the graphic handle H. 8 | % 9 | % See also DRAWPROBELLIPSE. 10 | 11 | % v.1.0-v.1.1, Aug.97-Jan.03, Kai Arras, ASL-EPFL 12 | % v.1.2, 03.12.03, Kai Arras, CAS-KTH: (x,a,b) interface 13 | 14 | 15 | function h = drawellipse(x,a,b,color); 16 | 17 | % Constants 18 | NPOINTS = 100; % point density or resolution 19 | 20 | % Compose point vector 21 | ivec = 0:2*pi/NPOINTS:2*pi; % index vector 22 | p(1,:) = a*cos(ivec); % 2 x n matrix which 23 | p(2,:) = b*sin(ivec); % hold ellipse points 24 | 25 | % Translate and rotate 26 | xo = x(1); yo = x(2); angle = x(3); 27 | R = [cos(angle) -sin(angle); sin(angle) cos(angle)]; 28 | T = [xo; yo]*ones(1,length(ivec)); 29 | p = R*p + T; 30 | 31 | % Plot 32 | h = plot(p(1,:),p(2,:),'Color',color, 'linewidth', 2); 33 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/tools/drawprobellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWPROBELLIPSE Draw elliptic probability region of a Gaussian in 2D. 2 | % DRAWPROBELLIPSE(X,C,ALPHA,COLOR) draws the elliptic iso-probabi- 3 | % lity contour of a Gaussian distributed bivariate random vector X 4 | % at the significance level ALPHA. The ellipse is centered at X = 5 | % [x; y] where C is the associated 2x2 covariance matrix. COLOR is 6 | % a [r g b]-vector or a color string such as 'r' or 'g'. 7 | % 8 | % X and C can also be of size 3x1 and 3x3 respectively. 9 | % 10 | % For proper scaling, the function CHI2INVTABLE is employed to 11 | % avoid the use of CHI2INV from the Matlab statistics toolbox. 12 | % 13 | % In case of a negative definite matrix C, the ellipse collapses 14 | % to a line which is drawn instead. 15 | % 16 | % H = DRAWPROBELLIPSE(...) returns the graphic handle H. 17 | % 18 | % See also DRAWELLIPSE, CHI2INVTABLE, CHI2INV. 19 | 20 | % v.1.0-v.1.3, 97-Jan.03, Kai Arras, ASL-EPFL 21 | % v.1.4, 03.12.03, Kai Arras, CAS-KTH: toolbox version 22 | 23 | 24 | function h = drawprobellipse(x,C,alpha,color); 25 | 26 | % Calculate unscaled half axes 27 | sxx = C(1,1); syy = C(2,2); sxy = C(1,2); 28 | a = sqrt(0.5*(sxx+syy+sqrt((sxx-syy)^2+4*sxy^2))); % always greater 29 | b = sqrt(0.5*(sxx+syy-sqrt((sxx-syy)^2+4*sxy^2))); % always smaller 30 | 31 | % Remove imaginary parts in case of neg. definite C 32 | if ~isreal(a), a = real(a); end; 33 | if ~isreal(b), b = real(b); end; 34 | 35 | % Scaling in order to reflect specified probability 36 | a = a*sqrt(chi2invtable(alpha,2)); 37 | b = b*sqrt(chi2invtable(alpha,2)); 38 | 39 | % Look where the greater half axis belongs to 40 | if sxx < syy, swap = a; a = b; b = swap; end; 41 | 42 | % Calculate inclination (numerically stable) 43 | if sxx ~= syy, 44 | angle = 0.5*atan(2*sxy/(sxx-syy)); 45 | elseif sxy == 0, 46 | angle = 0; % angle doesn't matter 47 | elseif sxy > 0, 48 | angle = pi/4; 49 | elseif sxy < 0, 50 | angle = -pi/4; 51 | end; 52 | x(3) = angle; 53 | 54 | % Draw ellipse 55 | h = drawellipse(x,a,b,color); -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/transform.m: -------------------------------------------------------------------------------- 1 | function [points] = transform(points) 2 | % This function applies a transformation to a set of points. 3 | % Each column in points is one point, i.e. points = [[x1; y1], [x2;y2], ...] 4 | % Select which function you want to use by uncommenting it 5 | % (deleting the corresponding %{...%}) while keeping all other functions commented. 6 | 7 | 8 | %%%%% 9 | % Function 1 (linear) 10 | % Applies a translation to [x; y] 11 | % points(1,:) = points(1,:) + 1; 12 | % points(2,:) = points(2,:) + 2; 13 | % 14 | %%%%% 15 | 16 | %%%%% 17 | %{ 18 | % Function 2 (nonlinear) 19 | % Computes the polar coordinates corresponding to [x; y] 20 | x = points(1,:); 21 | y = points(2,:); 22 | r = sqrt(sum([x.*x; y.*y])); 23 | theta = atan2(y,x); 24 | points = [r;theta]; 25 | %} 26 | %%%%% 27 | 28 | %%%%% 29 | % 30 | % Function 3 (nonlinear) 31 | points(1,:) = points(1,:).*cos(points(1,:)).*sin(points(1,:)); 32 | points(2,:) = points(2,:).*cos(points(2,:)).*sin(points(2,:)); 33 | % 34 | %%%%% 35 | -------------------------------------------------------------------------------- /2_Unscented_Transform/octave/transform.m~: -------------------------------------------------------------------------------- 1 | function [points] = transform(points) 2 | % This function applies a transformation to a set of points. 3 | % Each column in points is one point, i.e. points = [[x1; y1], [x2;y2], ...] 4 | % Select which function you want to use by uncommenting it 5 | % (deleting the corresponding %{...%}) while keeping all other functions commented. 6 | 7 | 8 | %%%%% 9 | % Function 1 (linear) 10 | % Applies a translation to [x; y] 11 | % points(1,:) = points(1,:) + 1; 12 | % points(2,:) = points(2,:) + 2; 13 | % 14 | %%%%% 15 | 16 | %%%%% 17 | % 18 | % Function 2 (nonlinear) 19 | % Computes the polar coordinates corresponding to [x; y] 20 | x = points(1,:); 21 | y = points(2,:); 22 | r = sqrt(sum([x.*x; y.*y])); 23 | theta = atan2(y,x); 24 | points = [r;theta]; 25 | % 26 | %%%%% 27 | 28 | %%%%% 29 | %{ 30 | % Function 3 (nonlinear) 31 | points(1,:) = points(1,:).*cos(points(1,:)).*sin(points(1,:)); 32 | points(2,:) = points(2,:).*cos(points(2,:)).*sin(points(2,:)); 33 | %} 34 | %%%%% 35 | -------------------------------------------------------------------------------- /2_Unscented_Transform/plots/unscented.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/2_Unscented_Transform/plots/unscented.png -------------------------------------------------------------------------------- /3_UKF_SLAM/data/world.dat: -------------------------------------------------------------------------------- 1 | 1 2 1 2 | 2 0 4 3 | 3 2 7 4 | 4 9 2 5 | 5 10 5 6 | 6 9 8 7 | 7 5 5 8 | 8 5 3 9 | 9 5 9 10 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/correction_step_bkup.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma, map] = correction_step(mu, sigma, z, map); 2 | % Updates the belief, i.e., mu and sigma after observing landmarks, 3 | % and augments the map with newly observed landmarks. 4 | % The employed sensor model measures the range and bearing of a landmark 5 | % mu: state vector containing robot pose and poses of landmarks obeserved so far. 6 | % Current robot pose = mu(1:3) 7 | % Note that the landmark poses in mu are stacked in the order by which they were observed 8 | % sigma: the covariance matrix of the system. 9 | % z: struct array containing the landmark observations. 10 | % Each observation z(i) has an id z(i).id, a range z(i).range, and a bearing z(i).bearing 11 | % The vector 'map' contains the ids of all landmarks observed so far by the robot in the order 12 | % by which they were observed, NOT in ascending id order. 13 | 14 | % For computing sigma 15 | global scale; 16 | 17 | % Number of measurements in this time step 18 | m = size(z, 2); 19 | 20 | % Measurement noise 21 | Q = 0.01*eye(2); 22 | n = length(mu); 23 | zm = zeros(2,1); 24 | S = zeros(2); 25 | xbar = 0; 26 | ybar = 0; 27 | 28 | 29 | for i = 1:m 30 | 31 | 32 | % If the landmark is observed for the first time: 33 | if (isempty(find(map == z(i).id))) 34 | % Add new landmark to the map 35 | [mu, sigma, map] = add_landmark_to_map(mu, sigma, z(i), map, Q); 36 | % The measurement has been incorporated so we quit the correction step 37 | continue; 38 | endif 39 | n=length(mu); 40 | sigma_x_z = zeros(n,2); 41 | % Compute sigma points from the predicted mean and covariance 42 | % This corresponds to line 6 on slide 32 43 | sigma_points = compute_sigma_points(mu, sigma); 44 | % Normalize! 45 | sigma_points(3,:) = normalize_angle(sigma_points(3,:)); 46 | 47 | % Compute lambda 48 | n = length(mu); 49 | num_sig = size(sigma_points,2); 50 | lambda = scale - n; 51 | 52 | % extract the current location of the landmark for each sigma point 53 | % Use this for computing an expected measurement, i.e., applying the h function 54 | landmarkIndex = find(map==(z(i).id)); 55 | landmarkXs = sigma_points(2*landmarkIndex + 2, :); 56 | landmarkYs = sigma_points(2*landmarkIndex + 3, :); 57 | 58 | % TODO: Compute z_points (2x2n+1), which consists of predicted measurements from all sigma points 59 | % This corresponds to line 7 on slide 32 60 | z_points(:,:) = [landmarkXs;landmarkYs] + repmat([z(i).range*cos(normalize_angle(z(i).bearing + mu(3)));z(i).range*sin(normalize_angle(z(i).bearing + mu(3)))],1,2*n+1); 61 | 62 | % setup the weight vector for mean and covariance 63 | wm = [lambda/scale, repmat(1/(2*scale), 1, 2*n)]; 64 | wc = wm; 65 | 66 | % TODO: Compute zm, line 8 on slide 32 67 | % zm is the recovered expected measurement mean from z_points. 68 | % It will be a 2x1 vector [expected_range; expected_bearing]. 69 | % For computing the expected_bearing compute a weighted average by 70 | % summing the sines/cosines of the angle 71 | for j=1:2*n+1 72 | zm(1,1) = zm(1,1) + wm(j)*z_points(1,j); 73 | xbar = xbar + wm(j)*cos(z_points(2,j)); 74 | ybar = ybar + wm(j)*sin(z_points(2,j)); 75 | endfor 76 | zm(2,1) = atan2(ybar,xbar); 77 | 78 | % TODO: Compute the innovation covariance matrix S (2x2), line 9 on slide 32 79 | % Remember to normalize the bearing after computing the difference 80 | for j=1:2*n+1 81 | S = S + wc(j)*(z_points(:,j)-zm)*(z_points(:,j)-zm)' + Q; 82 | endfor 83 | 84 | % TODO: Compute Sigma_x_z, line 10 on slide 32 85 | % (which is equivalent to sigma times the Jacobian H transposed in EKF). 86 | % sigma_x_z is an nx2 matrix, where n is the current dimensionality of mu 87 | % Remember to normalize the bearing after computing the difference 88 | n = length(mu); 89 | for j=1:2*n+1 90 | sigma_x_z = sigma_x_z + wc(j)*(sigma_points(:,j) - mu)*(z_points(:,j)-zm)'; 91 | endfor 92 | 93 | % TODO: Compute the Kalman gain, line 11 on slide 32 94 | K = sigma_x_z/S; 95 | 96 | % Get the actual measurement as a vector (for computing the difference to the observation) 97 | z_actual = [z(i).range; z(i).bearing]; 98 | 99 | % TODO: Update mu and sigma, line 12 + 13 on slide 32 100 | % normalize the relative bearing 101 | zdiff = z_actual - zm; 102 | zdiff(2) = normalize_angle(zdiff(2)); 103 | mu = mu + K*zdiff; 104 | mu(3) = normalize_angle(mu(3)); 105 | 106 | % TODO: Normalize the robot heading mu(3) 107 | 108 | endfor 109 | 110 | end 111 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/correction_step_bkup2.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma, map] = correction_step(mu, sigma, z, map); 2 | % Updates the belief, i.e., mu and sigma after observing landmarks, 3 | % and augments the map with newly observed landmarks. 4 | % The employed sensor model measures the range and bearing of a landmark 5 | % mu: state vector containing robot pose and poses of landmarks obeserved so far. 6 | % Current robot pose = mu(1:3) 7 | % Note that the landmark poses in mu are stacked in the order by which they were observed 8 | % sigma: the covariance matrix of the system. 9 | % z: struct array containing the landmark observations. 10 | % Each observation z(i) has an id z(i).id, a range z(i).range, and a bearing z(i).bearing 11 | % The vector 'map' contains the ids of all landmarks observed so far by the robot in the order 12 | % by which they were observed, NOT in ascending id order. 13 | 14 | % For computing sigma 15 | global scale; 16 | 17 | % Number of measurements in this time step 18 | m = size(z, 2); 19 | 20 | % Measurement noise 21 | Q = 0.01*eye(2); 22 | n = length(mu); 23 | zm = zeros(2,1); 24 | S = zeros(2); 25 | xbar = 0; 26 | ybar = 0; 27 | 28 | 29 | for i = 1:m 30 | 31 | 32 | % If the landmark is observed for the first time: 33 | if (isempty(find(map == z(i).id))) 34 | % Add new landmark to the map 35 | [mu, sigma, map] = add_landmark_to_map(mu, sigma, z(i), map, Q); 36 | % The measurement has been incorporated so we quit the correction step 37 | continue; 38 | endif 39 | n=length(mu); 40 | sigma_x_z = zeros(n,2); 41 | % Compute sigma points from the predicted mean and covariance 42 | % This corresponds to line 6 on slide 32 43 | sigma_points = compute_sigma_points(mu, sigma) 44 | % Normalize! 45 | sigma_points(3,:) = normalize_angle(sigma_points(3,:)); 46 | 47 | % Compute lambda 48 | n = length(mu); 49 | num_sig = size(sigma_points,2); 50 | lambda = scale - n; 51 | 52 | % extract the current location of the landmark for each sigma point 53 | % Use this for computing an expected measurement, i.e., applying the h function 54 | landmarkIndex = find(map==(z(i).id)); 55 | landmarkXs = sigma_points(2*landmarkIndex + 2, :); 56 | landmarkYs = sigma_points(2*landmarkIndex + 3, :); 57 | 58 | % TODO: Compute z_points (2x2n+1), which consists of predicted measurements from all sigma points 59 | % This corresponds to line 7 on slide 32 60 | z_points(:,:) = [landmarkXs;landmarkYs] + repmat([z(i).range*cos(z(i).bearing + mu(3)); z(i).range*sin(z(i).bearing + mu(3))],1,2*n+1); 61 | 62 | % setup the weight vector for mean and covariance 63 | wm = [lambda/scale, repmat(1/(2*scale), 1, 2*n)]; 64 | wc = wm; 65 | 66 | % TODO: Compute zm, line 8 on slide 32 67 | % zm is the recovered expected measurement mean from z_points. 68 | % It will be a 2x1 vector [expected_range; expected_bearing]. 69 | % For computing the expected_bearing compute a weighted average by 70 | % summing the sines/cosines of the angle 71 | 72 | for j=1:2*n+1 73 | zm(1,1) = zm(1,1) + wm(j)*z_points(1,j); 74 | xbar = xbar + wm(j)*cos(z_points(2,j)); 75 | ybar = ybar + wm(j)*sin(z_points(2,j)); 76 | endfor 77 | zm(2,1) = atan2(ybar,xbar); 78 | 79 | % TODO: Compute the innovation covariance matrix S (2x2), line 9 on slide 32 80 | % Remember to normalize the bearing after computing the difference 81 | for j=1:2*n+1 82 | tmp = z_points(:,j) - zm; 83 | tmp(2) = normalize_angle(tmp(2)); 84 | S = S + wc(j)*tmp*tmp' + Q; 85 | endfor 86 | 87 | % TODO: Compute Sigma_x_z, line 10 on slide 32 88 | % (which is equivalent to sigma times the Jacobian H transposed in EKF). 89 | % sigma_x_z is an nx2 matrix, where n is the current dimensionality of mu 90 | % Remember to normalize the bearing after computing the difference 91 | n = length(mu); 92 | for j=1:2*n+1 93 | tmp2 = sigma_points(:,j) - mu; 94 | tmp2(3,1) = normalize_angle(tmp2(3,1)); 95 | tmp3 = z_points(:,j)-zm; 96 | tmp3(2) = normalize_angle(tmp3(2)); 97 | sigma_x_z = sigma_x_z + wc(j)*tmp2*tmp3'; 98 | endfor 99 | 100 | % TODO: Compute the Kalman gain, line 11 on slide 32 101 | K = sigma_x_z/S; 102 | 103 | % Get the actual measurement as a vector (for computing the difference to the observation) 104 | z_actual = [z(i).range; z(i).bearing]; 105 | 106 | % TODO: Update mu and sigma, line 12 + 13 on slide 32 107 | % normalize the relative bearing 108 | zdiff = z_actual - zm; 109 | zdiff(2) = normalize_angle(zdiff(2)); 110 | mu = mu + K*zdiff; 111 | mu(3) = normalize_angle(mu(3)); 112 | 113 | sigma = sigma - K*S*K'; 114 | 115 | % TODO: Normalize the robot heading mu(3) 116 | 117 | endfor 118 | 119 | end 120 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/prediction_step.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma, sigma_points] = prediction_step(mu, sigma, u) 2 | % Updates the belief concerning the robot pose according to the motion model. 3 | % mu: state vector containing robot pose and poses of landmarks obeserved so far 4 | % Current robot pose = mu(1:3) 5 | % Note that the landmark poses in mu are stacked in the order by which they were observed 6 | % sigma: the covariance matrix of the system. 7 | % u: odometry reading (r1, t, r2) 8 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 9 | 10 | % For computing lambda. 11 | global scale; 12 | 13 | % Compute sigma points 14 | sigma_points = compute_sigma_points(mu, sigma); 15 | 16 | % Dimensionality 17 | n = length(mu); 18 | % lambda 19 | lambda = scale - n; 20 | 21 | % TODO: Transform all sigma points according to the odometry command 22 | % Remember to vectorize your operations and normalize angles 23 | % Tip: the function normalize_angle also works on a vector (row) of angles 24 | for i=1:2*n+1 25 | sigma_points(1:3,i) = sigma_points(1:3,i) + [u.t*cos(sigma_points(3,i) + u.r1); u.t*sin(sigma_points(3,i) + u.r1); u.r1 + u.r2]; 26 | sigma_points(3,i) = normalize_angle(sigma_points(3,i)); 27 | endfor 28 | % Computing the weights for recovering the mean 29 | wm = [lambda/scale, repmat(1/(2*scale),1,2*n)]; 30 | wc = wm; 31 | 32 | % -------- My initialization ----------- % 33 | xbar = 0; 34 | ybar = 0; 35 | mu = zeros(n,1); 36 | sigma = zeros(n); 37 | % TODO: recover mu. 38 | % Be careful when computing the robot's orientation (sum up the sines and 39 | % cosines and recover the 'average' angle via atan2) 40 | for i=1:2*n+1 41 | mu = mu + wm(i)*sigma_points(:,i); 42 | xbar = xbar + wm(i)*cos(sigma_points(3,i)); 43 | ybar = ybar + wm(i)*sin(sigma_points(3,i)); 44 | endfor 45 | mu(3) = atan2(ybar,xbar); 46 | mu(3) = normalize_angle(mu(3)); 47 | 48 | % TODO: Recover sigma. Again, normalize the angular difference 49 | for i=1:2*n+1 50 | tmp = sigma_points(:,i) - mu; 51 | tmp(3) = normalize_angle(tmp(3)); 52 | sigma = sigma + wc(i)*tmp*tmp'; 53 | endfor 54 | 55 | % Motion noise 56 | motionNoise = 0.1; 57 | R3 = [motionNoise, 0, 0; 58 | 0, motionNoise, 0; 59 | 0, 0, motionNoise/10]; 60 | R = zeros(size(sigma,1)); 61 | R(1:3,1:3) = R3; 62 | 63 | % TODO: Add motion noise to sigma 64 | sigma = sigma + R; 65 | 66 | end 67 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/prediction_step.m~: -------------------------------------------------------------------------------- 1 | function [mu, sigma, sigma_points] = prediction_step(mu, sigma, u) 2 | % Updates the belief concerning the robot pose according to the motion model. 3 | % mu: state vector containing robot pose and poses of landmarks obeserved so far 4 | % Current robot pose = mu(1:3) 5 | % Note that the landmark poses in mu are stacked in the order by which they were observed 6 | % sigma: the covariance matrix of the system. 7 | % u: odometry reading (r1, t, r2) 8 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 9 | 10 | % For computing lambda. 11 | global scale; 12 | 13 | % Compute sigma points 14 | sigma_points = compute_sigma_points(mu, sigma); 15 | 16 | % Dimensionality 17 | n = length(mu); 18 | % lambda 19 | lambda = scale - n; 20 | 21 | % TODO: Transform all sigma points according to the odometry command 22 | % Remember to vectorize your operations and normalize angles 23 | % Tip: the function normalize_angle also works on a vector (row) of angles 24 | for i=1:2*n+1 25 | sigma_points(1:3,i) = sigma_points(1:3,i) + [u.t*cos(sigma_points(3,i) + u.r1); u.t*sin(sigma_points(3,i) + u.r1); u.r1 + u.r2]; 26 | sigma_points(3,i) = normalize_angle(sigma_points(3,i)); 27 | endfor 28 | % Computing the weights for recovering the mean 29 | wm = [lambda/scale, repmat(1/(2*scale),1,2*n)]; 30 | wc = wm; 31 | 32 | % -------- My initialization ----------- % 33 | xbar = 0; 34 | ybar = 0; 35 | mu = zeros(n,1); 36 | sigma = zeros(n); 37 | % TODO: recover mu. 38 | % Be careful when computing the robot's orientation (sum up the sines and 39 | % cosines and recover the 'average' angle via atan2) 40 | for i=1:2*n+1 41 | mu = mu + wm(i)*sigma_points(:,i); 42 | xbar = xbar + wm(i)*cos(sigma_points(3,i)); 43 | ybar = ybar + wm(i)*sin(sigma_points(3,i)); 44 | endfor 45 | mu(3) = atan2(ybar,xbar); 46 | mu(3) = normalize_angle(mu(3)); 47 | 48 | % TODO: Recover sigma. Again, normalize the angular difference 49 | for i=1:2*n+1 50 | tmp = sigma_points(:,i) - mu; 51 | tmp(3) = normalize_angle(tmp(3)); 52 | sigma = sigma + wc(i)*tmp*tmp'; 53 | endfor 54 | 55 | % Motion noise 56 | motionNoise = 0.1; 57 | R3 = [motionNoise, 0, 0; 58 | 0, motionNoise, 0; 59 | 0, 0, motionNoise/10]; 60 | R = zeros(size(sigma,1)); 61 | R(1:3,1:3) = R3; 62 | 63 | % TODO: Add motion noise to sigma 64 | sigma = sigma + R; 65 | 66 | end 67 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/prediction_step_222.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma, sigma_points] = prediction_step(mu, sigma, u) 2 | % Updates the belief concerning the robot pose according to the motion model. 3 | % mu: state vector containing robot pose and poses of landmarks obeserved so far 4 | % Current robot pose = mu(1:3) 5 | % Note that the landmark poses in mu are stacked in the order by which they were observed 6 | % sigma: the covariance matrix of the system. 7 | % u: odometry reading (r1, t, r2) 8 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 9 | 10 | % For computing lambda. 11 | global scale; 12 | 13 | % Compute sigma points 14 | sigma_points = compute_sigma_points(mu, sigma); 15 | 16 | % Dimensionality 17 | n = length(mu); 18 | % lambda 19 | lambda = scale - n; 20 | 21 | % TODO: Transform all sigma points according to the odometry command 22 | % Remember to vectorize your operations and normalize angles 23 | % Tip: the function normalize_angle also works on a vector (row) of angles 24 | for i=1:2*n+1 25 | sigma_points(1:3,i) = sigma_points(1:3,i) + [u.t*cos(sigma_points(3,i) + u.r1); u.t*sin(sigma_points(3,i) + u.r1); normalize_angle(u.r1 + u.r2)]; 26 | endfor 27 | % Computing the weights for recovering the mean 28 | wm = [lambda/scale, repmat(1/(2*scale),1,2*n)]; 29 | wc = wm; 30 | xbar = 0; 31 | ybar = 0; 32 | mu(1:2) = zeros(2,1); 33 | sigma(1:3,1:3) = zeros(3); 34 | % TODO: recover mu. 35 | % Be careful when computing the robot's orientation (sum up the sines and 36 | % cosines and recover the 'average' angle via atan2) 37 | for i=1:2*n+1 38 | mu(1:2,1) = mu(1:2,1) + wm(i)*sigma_points(1:2,i); 39 | xbar = xbar + wm(i)*cos(sigma_points(3,i)); 40 | ybar = ybar + wm(i)*sin(sigma_points(3,i)); 41 | endfor 42 | mu(3,1) = atan2(ybar,xbar); 43 | 44 | % TODO: Recover sigma. Again, normalize the angular difference 45 | for i=1:2*n+1 46 | sigma(1:3,1:3) = sigma(1:3,1:3) + wc(i)*(sigma_points(1:3,i) - mu(1:3))*(sigma_points(1:3,i) - mu(1:3))'; 47 | endfor 48 | 49 | % Motion noise 50 | motionNoise = 0.1; 51 | R3 = [motionNoise, 0, 0; 52 | 0, motionNoise, 0; 53 | 0, 0, motionNoise/10]; 54 | R = zeros(size(sigma,1)); 55 | R(1:3,1:3) = R3; 56 | 57 | % TODO: Add motion noise to sigma 58 | sigma = sigma + R; 59 | 60 | end 61 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/prediction_step_bkup.m: -------------------------------------------------------------------------------- 1 | function [mu, sigma, sigma_points] = prediction_step(mu, sigma, u) 2 | % Updates the belief concerning the robot pose according to the motion model. 3 | % mu: state vector containing robot pose and poses of landmarks obeserved so far 4 | % Current robot pose = mu(1:3) 5 | % Note that the landmark poses in mu are stacked in the order by which they were observed 6 | % sigma: the covariance matrix of the system. 7 | % u: odometry reading (r1, t, r2) 8 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 9 | 10 | % For computing lambda. 11 | global scale; 12 | 13 | % Compute sigma points 14 | sigma_points = compute_sigma_points(mu, sigma); 15 | 16 | % Dimensionality 17 | n = length(mu); 18 | % lambda 19 | lambda = scale - n; 20 | 21 | % TODO: Transform all sigma points according to the odometry command 22 | % Remember to vectorize your operations and normalize angles 23 | % Tip: the function normalize_angle also works on a vector (row) of angles 24 | for i=1:2*n+1 25 | sigma_points(1:3,i) = sigma_points(1:3,i) + [u.t*cos(normalize_angle(sigma_points(3,i) + u.r1)); u.t*sin(normalize_angle(sigma_points(3,i) + u.r1)); normalize_angle(u.r1 + u.r2)]; 26 | endfor 27 | % Computing the weights for recovering the mean 28 | wm = [lambda/scale, repmat(1/(2*scale),1,2*n)]; 29 | wc = wm; 30 | xbar = 0; 31 | ybar = 0; 32 | mu = zeros(n,1); 33 | sigma = zeros(n); 34 | % TODO: recover mu. 35 | % Be careful when computing the robot's orientation (sum up the sines and 36 | % cosines and recover the 'average' angle via atan2) 37 | for i=1:2*n+1 38 | mu(1:2,1) = mu(1:2,1) + wm(i)*sigma_points(1:2,i); 39 | xbar = xbar + wm(i)*cos(sigma_points(3,i)); 40 | ybar = ybar + wm(i)*sin(sigma_points(3,i)); 41 | endfor 42 | mu(3,1) = atan2(ybar,xbar); 43 | 44 | % TODO: Recover sigma. Again, normalize the angular difference 45 | for i=1:2*n+1 46 | sigma = sigma + wc(i)*(sigma_points(1:end,i) - mu)*(sigma_points(1:end,i) - mu)'; 47 | endfor 48 | 49 | % Motion noise 50 | motionNoise = 0.1; 51 | R3 = [motionNoise, 0, 0; 52 | 0, motionNoise, 0; 53 | 0, 0, motionNoise/10]; 54 | R = zeros(size(sigma,1)); 55 | R(1:3,1:3) = R3; 56 | 57 | % TODO: Add motion noise to sigma 58 | sigma = sigma + R; 59 | 60 | end 61 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/add_landmark_to_map.m: -------------------------------------------------------------------------------- 1 | % Add a landmark to the UKF. 2 | % We have to compute the uncertainty of the landmark given the current state 3 | % (and its uncertainty) of the newly observed landmark. To this end, we also 4 | % employ the unscented transform to propagate Q (sensor noise) through the 5 | % current state 6 | 7 | function [mu, sigma, map] = add_landmark_to_map(mu, sigma, z, map, Q); 8 | 9 | % For computing sigma 10 | global scale; 11 | 12 | landmarkId = z.id; 13 | 14 | %add landmark to the map 15 | map = [map; landmarkId]; 16 | % TODO: Initialize its pose according to the measurement and add it to mu 17 | 18 | % Append the measurement to the state vector 19 | mu = [mu; z.range(); z.bearing()]; 20 | % Initialize its uncertainty and add it to sigma 21 | sigma = blkdiag(sigma, Q); 22 | 23 | % Transform from [range, bearing] to the x/y location of the landmark 24 | % This operation intializes the uncertainty in the position of the landmark 25 | % Sample sigma points 26 | sig_pnts_new = compute_sigma_points(mu, sigma); 27 | % Normalize! 28 | sig_pnts_new(3,:) = normalize_angle(sig_pnts_new(3,:)); 29 | % Compute the xy location of the new landmark according to each sigma point 30 | newX = sig_pnts_new(1,:) + sig_pnts_new(end-1,:).*cos(sig_pnts_new(3,:) + sig_pnts_new(end,:)); 31 | newY = sig_pnts_new(2,:) + sig_pnts_new(end-1,:).*sin(sig_pnts_new(3,:) + sig_pnts_new(end,:)); 32 | % The last 2 components of the sigma points can now be replaced by the xy pose of the landmark 33 | sig_pnts_new(end-1,:) = newX; 34 | sig_pnts_new(end,:) = newY; 35 | 36 | % Recover mu and sigma 37 | n = length(mu); 38 | lambda = scale - n; 39 | w0 = lambda/scale; 40 | wm = [w0, repmat(1/(2*scale),1,2*n)]; 41 | wc = wm; 42 | % Theta should be recovered by summing up the sines and cosines 43 | cosines = sum(cos(sig_pnts_new(3,:)).*wm); 44 | sines = sum(sin(sig_pnts_new(3,:)).*wm); 45 | % recompute the angle and normalize it 46 | mu_theta = normalize_angle(atan2(sines, cosines)); 47 | mu = sum(sig_pnts_new .* repmat(wm, rows(sig_pnts_new), 1), 2); 48 | mu(3) = mu_theta; 49 | 50 | diff = sig_pnts_new - repmat(mu,1,size(sig_pnts_new,2)); 51 | % Normalize! 52 | diff(3,:) = normalize_angle(diff(3,:)); 53 | sigma = (repmat(wc, rows(diff), 1) .* diff) * diff'; 54 | 55 | end 56 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/compute_sigma_points.m: -------------------------------------------------------------------------------- 1 | function [sigma_points] = compute_sigma_points(mu, sigma) 2 | % Computes the 2n+1 sigma points according to the unscented transform, 3 | % where n is the dimensionality of the mean vector mu. 4 | % The sigma points should form the columns of sigma_points, 5 | % i.e. sigma_points is an nx2n+1 matrix. 6 | 7 | global scale; 8 | 9 | % Compute lambda 10 | n = length(mu); 11 | num_sig = 2*n+1; 12 | lambda = scale - n; 13 | 14 | % Compute sigma points 15 | sigmasqr = sqrtm(sigma); 16 | sigmasqr = sqrt(n+lambda) * sigmasqr; 17 | 18 | mureplicated = repmat(mu, 1, n); 19 | sigma_points = [mu, mureplicated + sigmasqr, mureplicated - sigmasqr]; 20 | 21 | end 22 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/drawellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWELLIPSE Draw ellipse. 2 | % DRAWELLIPSE(X,A,B,COLOR) draws an ellipse at X = [x y theta] 3 | % with half axes A and B. Theta is the inclination angle of A, 4 | % regardless if A is smaller or greater than B. COLOR is a 5 | % [r g b]-vector or a color string such as 'r' or 'g'. 6 | % 7 | % H = DRAWELLIPSE(...) returns the graphic handle H. 8 | % 9 | % See also DRAWPROBELLIPSE. 10 | 11 | % v.1.0-v.1.1, Aug.97-Jan.03, Kai Arras, ASL-EPFL 12 | % v.1.2, 03.12.03, Kai Arras, CAS-KTH: (x,a,b) interface 13 | 14 | 15 | function h = drawellipse(x,a,b,color); 16 | 17 | % Constants 18 | NPOINTS = 100; % point density or resolution 19 | 20 | % Compose point vector 21 | ivec = 0:2*pi/NPOINTS:2*pi; % index vector 22 | p(1,:) = a*cos(ivec); % 2 x n matrix which 23 | p(2,:) = b*sin(ivec); % hold ellipse points 24 | 25 | % Translate and rotate 26 | xo = x(1); yo = x(2); angle = x(3); 27 | R = [cos(angle) -sin(angle); sin(angle) cos(angle)]; 28 | T = [xo; yo]*ones(1,length(ivec)); 29 | p = R*p + T; 30 | 31 | % Plot 32 | h = plot(p(1,:),p(2,:),'Color',color, 'linewidth', 2); 33 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/drawprobellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWPROBELLIPSE Draw elliptic probability region of a Gaussian in 2D. 2 | % DRAWPROBELLIPSE(X,C,ALPHA,COLOR) draws the elliptic iso-probabi- 3 | % lity contour of a Gaussian distributed bivariate random vector X 4 | % at the significance level ALPHA. The ellipse is centered at X = 5 | % [x; y] where C is the associated 2x2 covariance matrix. COLOR is 6 | % a [r g b]-vector or a color string such as 'r' or 'g'. 7 | % 8 | % X and C can also be of size 3x1 and 3x3 respectively. 9 | % 10 | % For proper scaling, the function CHI2INVTABLE is employed to 11 | % avoid the use of CHI2INV from the Matlab statistics toolbox. 12 | % 13 | % In case of a negative definite matrix C, the ellipse collapses 14 | % to a line which is drawn instead. 15 | % 16 | % H = DRAWPROBELLIPSE(...) returns the graphic handle H. 17 | % 18 | % See also DRAWELLIPSE, CHI2INVTABLE, CHI2INV. 19 | 20 | % v.1.0-v.1.3, 97-Jan.03, Kai Arras, ASL-EPFL 21 | % v.1.4, 03.12.03, Kai Arras, CAS-KTH: toolbox version 22 | 23 | 24 | function h = drawprobellipse(x,C,alpha,color); 25 | 26 | % Calculate unscaled half axes 27 | sxx = C(1,1); syy = C(2,2); sxy = C(1,2); 28 | a = sqrt(0.5*(sxx+syy+sqrt((sxx-syy)^2+4*sxy^2))); % always greater 29 | b = sqrt(0.5*(sxx+syy-sqrt((sxx-syy)^2+4*sxy^2))); % always smaller 30 | 31 | % Remove imaginary parts in case of neg. definite C 32 | if ~isreal(a), a = real(a); end; 33 | if ~isreal(b), b = real(b); end; 34 | 35 | % Scaling in order to reflect specified probability 36 | a = a*sqrt(chi2invtable(alpha,2)); 37 | b = b*sqrt(chi2invtable(alpha,2)); 38 | 39 | % Look where the greater half axis belongs to 40 | if sxx < syy, swap = a; a = b; b = swap; end; 41 | 42 | % Calculate inclination (numerically stable) 43 | if sxx ~= syy, 44 | angle = 0.5*atan(2*sxy/(sxx-syy)); 45 | elseif sxy == 0, 46 | angle = 0; % angle doesn't matter 47 | elseif sxy > 0, 48 | angle = pi/4; 49 | elseif sxy < 0, 50 | angle = -pi/4; 51 | end; 52 | x(3) = angle; 53 | 54 | % Draw ellipse 55 | h = drawellipse(x,a,b,color); -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/normalize_angle.m: -------------------------------------------------------------------------------- 1 | function [phi] = normalize_angle(phi) 2 | % Normalize phi to be between -pi and pi 3 | % phi can also be a vector of angles 4 | 5 | phi = mod(phi + pi, 2*pi) - pi; 6 | 7 | end 8 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/plot_state.m: -------------------------------------------------------------------------------- 1 | function plot_state(mu, sigma, landmarks, timestep, map, z, window) 2 | % Visualizes the state of the UKF SLAM algorithm. 3 | % 4 | % The resulting plot displays the following information: 5 | % - map ground truth (black +'s) 6 | % - current robot pose estimate (red) 7 | % - current landmark pose estimates (blue) 8 | % - visualization of the observations made at this time step (line between robot and landmark) 9 | 10 | clf 11 | hold on 12 | grid("on") 13 | L = struct2cell(landmarks); 14 | drawprobellipse(mu(1:3), sigma(1:3,1:3), 0.95, 'r'); 15 | plot(cell2mat(L(2,:)), cell2mat(L(3,:)), 'k+', 'markersize', 10, 'linewidth', 5); 16 | for(i=1:length(map)) 17 | plot(mu(2*i+ 2),mu(2*i+ 3), 'bo', 'markersize', 10, 'linewidth', 5) 18 | drawprobellipse(mu(2*i+ 2:2*i+ 3), sigma(2*i+ 2:2*i+ 3,2*i+ 2:2*i+ 3), 0.95, 'b'); 19 | endfor 20 | 21 | for(i=1:size(z,2)) 22 | loc = find(map==z(i).id); 23 | mX = mu(2*loc+2); 24 | mY = mu(2*loc+3); 25 | line([mu(1), mX],[mu(2), mY], 'color', 'k', 'linewidth', 1); 26 | endfor 27 | 28 | drawrobot(mu(1:3), 'r', 3, 0.3, 0.3); 29 | xlim([-2, 12]) 30 | ylim([-2, 12]) 31 | hold off 32 | 33 | % plot(sig_pnts(1:2,1),sig_pnts(1:2,2),'gx','linewidth',3); 34 | 35 | if window 36 | figure(1, "visible", "on"); 37 | drawnow; 38 | pause(0.1); 39 | else 40 | % figure(1, "visible", "off"); 41 | filename = sprintf('../plots/ukf_%03d.png', timestep); 42 | print(filename, '-dpng'); 43 | end 44 | 45 | end 46 | 47 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/plot_state.m~: -------------------------------------------------------------------------------- 1 | function plot_state(mu, sigma, landmarks, timestep, map, z, window) 2 | % Visualizes the state of the UKF SLAM algorithm. 3 | % 4 | % The resulting plot displays the following information: 5 | % - map ground truth (black +'s) 6 | % - current robot pose estimate (red) 7 | % - current landmark pose estimates (blue) 8 | % - visualization of the observations made at this time step (line between robot and landmark) 9 | 10 | clf 11 | hold on 12 | grid("on") 13 | L = struct2cell(landmarks); 14 | drawprobellipse(mu(1:3), sigma(1:3,1:3), 0.95, 'r'); 15 | plot(cell2mat(L(2,:)), cell2mat(L(3,:)), 'k+', 'markersize', 10, 'linewidth', 5); 16 | for(i=1:length(map)) 17 | plot(mu(2*i+ 2),mu(2*i+ 3), 'bo', 'markersize', 10, 'linewidth', 5) 18 | drawprobellipse(mu(2*i+ 2:2*i+ 3), sigma(2*i+ 2:2*i+ 3,2*i+ 2:2*i+ 3), 0.95, 'b'); 19 | endfor 20 | 21 | for(i=1:size(z,2)) 22 | loc = find(map==z(i).id); 23 | mX = mu(2*loc+2); 24 | mY = mu(2*loc+3); 25 | line([mu(1), mX],[mu(2), mY], 'color', 'k', 'linewidth', 1); 26 | endfor 27 | 28 | drawrobot(mu(1:3), 'r', 3, 0.3, 0.3); 29 | xlim([-2, 12]) 30 | ylim([-2, 12]) 31 | hold off 32 | 33 | % plot(sig_pnts(1:2,1),sig_pnts(1:2,2),'gx','linewidth',3); 34 | 35 | if window 36 | figure(1, "visible", "on"); 37 | drawnow; 38 | pause(0.1); 39 | else 40 | figure(1, "visible", "off"); 41 | filename = sprintf('../plots/ukf_%03d.png', timestep); 42 | print(filename, '-dpng'); 43 | end 44 | 45 | end 46 | 47 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/read_data.m: -------------------------------------------------------------------------------- 1 | function data = read_data(filename) 2 | % Reads the odometry and sensor readings from a file. 3 | % 4 | % filename: path to the file to parse 5 | % data: structure containing the parsed information 6 | % 7 | % The data is returned in a structure where the u_t and z_t are stored 8 | % within a single entry. A z_t can contain observations of multiple 9 | % landmarks. 10 | % 11 | % Usage: 12 | % - access the readings for timestep i: 13 | % data.timestep(i) 14 | % this returns a structure containing the odometry reading and all 15 | % landmark obsevations, which can be accessed as follows 16 | % - odometry reading at timestep i: 17 | % data.timestep(i).odometry 18 | % - senor reading at timestep i: 19 | % data.timestep(i).sensor 20 | % 21 | % Odometry readings have the following fields: 22 | % - r1 : rotation 1 23 | % - t : translation 24 | % - r2 : rotation 2 25 | % which correspond to the identically labeled variables in the motion 26 | % mode. 27 | % 28 | % Sensor readings can again be indexed and each of the entris has the 29 | % following fields: 30 | % - id : id of the observed landmark 31 | % - range : measured range to the landmark 32 | % - bearing : measured angle to the landmark (you can ignore this) 33 | % 34 | % Examples: 35 | % - Translational component of the odometry reading at timestep 10 36 | % data.timestep(10).odometry.t 37 | % - Measured range to the second landmark observed at timestep 4 38 | % data.timestep(4).sensor(2).range 39 | input = fopen(filename); 40 | 41 | data = struct; 42 | data.timestep.sensor = struct; 43 | first = 1; 44 | 45 | odom = struct; 46 | sensor = struct; 47 | 48 | while(!feof(input)) 49 | line = fgetl(input); 50 | arr = strsplit(line, ' '); 51 | type = deblank(arr{1}); 52 | 53 | if(strcmp(type, 'ODOMETRY') == 1) 54 | if(first == 0) 55 | data.timestep(end+1).odometry = odom; 56 | data.timestep(end).sensor = sensor(2:end); 57 | odom = struct; 58 | sensor = struct; 59 | endif 60 | first = 0; 61 | odom.r1 = str2double(arr{2}); 62 | odom.t = str2double(arr{3}); 63 | odom.r2 = str2double(arr{4}); 64 | elseif(strcmp(type, 'SENSOR') == 1) 65 | reading = struct; 66 | reading.id = str2double(arr{2}); 67 | reading.range = str2double(arr{3}); 68 | reading.bearing = str2double(arr{4}); 69 | sensor(end+1) = reading; 70 | endif 71 | endwhile 72 | 73 | data.timestep = data.timestep(2:end); 74 | 75 | fclose(input); 76 | end 77 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/tools/read_world.m: -------------------------------------------------------------------------------- 1 | function landmarks = read_world(filename) 2 | % Reads the world definition and returns a structure of landmarks. 3 | % 4 | % filename: path of the file to load 5 | % landmarks: structure containing the parsed information 6 | % 7 | % Each landmark contains the following information: 8 | % - id : id of the landmark 9 | % - x : x-coordinate 10 | % - y : y-coordinate 11 | % 12 | % Examples: 13 | % - Obtain x-coordinate of the 5-th landmark 14 | % landmarks(5).x 15 | input = fopen(filename); 16 | 17 | landmarks = struct; 18 | 19 | while(!feof(input)) 20 | line = fgetl(input); 21 | data = strsplit(line, ' '); 22 | 23 | landmark = struct( 24 | "id", str2double(data{1}), 25 | "x" , str2double(data{2}), 26 | "y" , str2double(data{3}) 27 | ); 28 | landmarks(end+1) = landmark; 29 | endwhile 30 | 31 | landmarks = landmarks(2:end); 32 | 33 | fclose(input); 34 | end 35 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/ukf_slam.m: -------------------------------------------------------------------------------- 1 | % This is the main unscented Kalman filter SLAM loop. This script calls all the required 2 | % functions in the correct order. 3 | % 4 | % You can disable the plotting or change the number of steps the filter 5 | % runs for to ease the debugging. You should however not change the order 6 | % or calls of any of the other lines, as it might break the framework. 7 | % 8 | % If you are unsure about the input and return values of functions you 9 | % should read their documentation which tells you the expected dimensions. 10 | 11 | % Turn off pagination: 12 | close all 13 | clear all 14 | more off; 15 | 16 | format long 17 | 18 | % Make tools available 19 | addpath('tools'); 20 | 21 | % Read world data, i.e. landmarks. The true landmark positions are not given to the robot 22 | landmarks = read_world('../data/world.dat'); 23 | %load landmarks 24 | % Read sensor readings, i.e. odometry and range-bearing sensor 25 | data = read_data('../data/sensor_data.dat'); 26 | %load data 27 | % Initialize belief 28 | mu = zeros(3,1); 29 | sigma = 0.001*eye(3); 30 | map = []; 31 | 32 | % For computing lambda 33 | % scale = lambda + dimensionality 34 | global scale; 35 | scale = 3.0; 36 | 37 | % toogle the visualization type 38 | %showGui = true; % show a window while the algorithm runs 39 | showGui = false; % plot to files instead 40 | 41 | % Perform filter update for each odometry-observation pair read from the 42 | % data file. 43 | for t = 1:size(data.timestep, 2) 44 | disp('Time step t ='), disp(t) 45 | 46 | % Perform the prediction step of the UKF 47 | [mu, sigma] = prediction_step(mu, sigma, data.timestep(t).odometry); 48 | 49 | % Perform the correction step of the UKF 50 | [mu, sigma, map] = correction_step(mu, sigma, data.timestep(t).sensor, map); 51 | 52 | %Generate visualization plots of the current state of the filter 53 | plot_state(mu, sigma, landmarks, t, map, data.timestep(t).sensor, showGui); 54 | disp("Current state vector mu ="), disp(mu) 55 | disp("Map contains the following landmarks:"), disp(map) 56 | 57 | endfor 58 | 59 | %disp("Final system covariance matrix:"), disp(sigma) 60 | % Display the final state estimate 61 | disp("Final robot pose:") 62 | disp("mu_robot = "), disp(mu(1:3)), disp("sigma_robot = "), disp(sigma(1:3,1:3)) 63 | -------------------------------------------------------------------------------- /3_UKF_SLAM/octave/ukf_slam.m~: -------------------------------------------------------------------------------- 1 | % This is the main unscented Kalman filter SLAM loop. This script calls all the required 2 | % functions in the correct order. 3 | % 4 | % You can disable the plotting or change the number of steps the filter 5 | % runs for to ease the debugging. You should however not change the order 6 | % or calls of any of the other lines, as it might break the framework. 7 | % 8 | % If you are unsure about the input and return values of functions you 9 | % should read their documentation which tells you the expected dimensions. 10 | 11 | % Turn off pagination: 12 | close all 13 | clear all 14 | more off; 15 | 16 | format long 17 | 18 | % Make tools available 19 | addpath('tools'); 20 | 21 | % Read world data, i.e. landmarks. The true landmark positions are not given to the robot 22 | landmarks = read_world('../data/world.dat'); 23 | %load landmarks 24 | % Read sensor readings, i.e. odometry and range-bearing sensor 25 | data = read_data('../data/sensor_data.dat'); 26 | %load data 27 | % Initialize belief 28 | mu = zeros(3,1); 29 | sigma = 0.001*eye(3); 30 | map = []; 31 | 32 | % For computing lambda 33 | % scale = lambda + dimensionality 34 | global scale; 35 | scale = 3.0; 36 | 37 | % toogle the visualization type 38 | showGui = true; % show a window while the algorithm runs 39 | %showGui = false; % plot to files instead 40 | 41 | % Perform filter update for each odometry-observation pair read from the 42 | % data file. 43 | for t = 1:size(data.timestep, 2) 44 | disp('Time step t ='), disp(t) 45 | 46 | % Perform the prediction step of the UKF 47 | [mu, sigma] = prediction_step(mu, sigma, data.timestep(t).odometry); 48 | 49 | % Perform the correction step of the UKF 50 | [mu, sigma, map] = correction_step(mu, sigma, data.timestep(t).sensor, map); 51 | 52 | %Generate visualization plots of the current state of the filter 53 | plot_state(mu, sigma, landmarks, t, map, data.timestep(t).sensor, showGui); 54 | disp("Current state vector mu ="), disp(mu) 55 | disp("Map contains the following landmarks:"), disp(map) 56 | 57 | endfor 58 | 59 | %disp("Final system covariance matrix:"), disp(sigma) 60 | % Display the final state estimate 61 | disp("Final robot pose:") 62 | disp("mu_robot = "), disp(mu(1:3)), disp("sigma_robot = "), disp(sigma(1:3,1:3)) 63 | -------------------------------------------------------------------------------- /3_UKF_SLAM/plots/ukf_slam.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/3_UKF_SLAM/plots/ukf_slam.mp4 -------------------------------------------------------------------------------- /4_Gridmapping/gridmap_0.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/4_Gridmapping/gridmap_0.1.png -------------------------------------------------------------------------------- /4_Gridmapping/gridmap_0.5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/4_Gridmapping/gridmap_0.5.png -------------------------------------------------------------------------------- /4_Gridmapping/octave/gridmap.m: -------------------------------------------------------------------------------- 1 | addpath('tools') 2 | more off 3 | close all 4 | clear all 5 | 6 | % Load laser scans and robot poses. 7 | load("../data/laser") 8 | %laser = read_robotlaser('../data/csail.log'); 9 | 10 | % Extract robot poses: Nx3 matrix where each row is in the form: [x y theta] 11 | poses = [laser.pose]; 12 | poses = reshape(poses,3,size(poses,2)/3)'; 13 | 14 | % Initial cell occupancy probability. 15 | prior = 0.50; 16 | % Probabilities related to the laser range finder sensor model. 17 | probOcc = 0.9; 18 | probFree = 0.35; 19 | 20 | % Map grid size in meters. Decrease for better resolution. 21 | gridSize = 0.1; 22 | 23 | % Set up map boundaries and initialize map. 24 | border = 30; 25 | robXMin = min(poses(:,1)); 26 | robXMax = max(poses(:,1)); 27 | robYMin = min(poses(:,2)); 28 | robYMax = max(poses(:,2)); 29 | mapBox = [robXMin-border robXMax+border robYMin-border robYMax+border]; 30 | offsetX = mapBox(1); 31 | offsetY = mapBox(3); 32 | mapSizeMeters = [mapBox(2)-offsetX mapBox(4)-offsetY]; 33 | mapSize = ceil([mapSizeMeters/gridSize]); 34 | 35 | % Used when updating the map. Assumes that prob_to_log_odds.m 36 | % has been implemented correctly. 37 | logOddsPrior = prob_to_log_odds(prior); 38 | 39 | % The occupancy value of each cell in the map is initialized with the prior. 40 | map = logOddsPrior*ones(mapSize); 41 | disp('Map initialized. Map size:'), disp(size(map)) 42 | 43 | % Map offset used when converting from world to map coordinates. 44 | offset = [offsetX; offsetY]; 45 | 46 | % Main loop for updating map cells. 47 | % You can also take every other point when debugging to speed up the loop (t=1:2:size(poses,1)) 48 | for(t=1:size(poses,1)) 49 | %for(t=1:50) 50 | t 51 | % Robot pose at time t. 52 | robPose = [poses(t,1);poses(t,2);poses(t,3)]; 53 | 54 | % Laser scan made at time t. 55 | sc = laser(1,t); 56 | % Compute the mapUpdate, which contains the log odds values to add to the map. 57 | [mapUpdate, robPoseMapFrame, laserEndPntsMapFrame] = inv_sensor_model(map, sc, robPose, gridSize, offset, probOcc, probFree); 58 | 59 | mapUpdate -= logOddsPrior*ones(size(map)); 60 | % Update the occupancy values of the affected cells. 61 | map += mapUpdate; 62 | 63 | % Plot current map and robot trajectory so far. 64 | plot_map(map, mapBox, robPoseMapFrame, poses, laserEndPntsMapFrame, gridSize, offset, t); 65 | endfor 66 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/gridmap.m~: -------------------------------------------------------------------------------- 1 | addpath('tools') 2 | more off 3 | close all 4 | clear all 5 | 6 | % Load laser scans and robot poses. 7 | load("../data/laser") 8 | %laser = read_robotlaser('../data/csail.log'); 9 | 10 | % Extract robot poses: Nx3 matrix where each row is in the form: [x y theta] 11 | poses = [laser.pose]; 12 | poses = reshape(poses,3,size(poses,2)/3)'; 13 | 14 | % Initial cell occupancy probability. 15 | prior = 0.50; 16 | % Probabilities related to the laser range finder sensor model. 17 | probOcc = 0.9; 18 | probFree = 0.35; 19 | 20 | % Map grid size in meters. Decrease for better resolution. 21 | gridSize = 0.1; 22 | 23 | % Set up map boundaries and initialize map. 24 | border = 30; 25 | robXMin = min(poses(:,1)); 26 | robXMax = max(poses(:,1)); 27 | robYMin = min(poses(:,2)); 28 | robYMax = max(poses(:,2)); 29 | mapBox = [robXMin-border robXMax+border robYMin-border robYMax+border]; 30 | offsetX = mapBox(1); 31 | offsetY = mapBox(3); 32 | mapSizeMeters = [mapBox(2)-offsetX mapBox(4)-offsetY]; 33 | mapSize = ceil([mapSizeMeters/gridSize]); 34 | 35 | % Used when updating the map. Assumes that prob_to_log_odds.m 36 | % has been implemented correctly. 37 | logOddsPrior = prob_to_log_odds(prior); 38 | 39 | % The occupancy value of each cell in the map is initialized with the prior. 40 | map = logOddsPrior*ones(mapSize); 41 | disp('Map initialized. Map size:'), disp(size(map)) 42 | 43 | % Map offset used when converting from world to map coordinates. 44 | offset = [offsetX; offsetY]; 45 | 46 | % Main loop for updating map cells. 47 | % You can also take every other point when debugging to speed up the loop (t=1:2:size(poses,1)) 48 | for(t=1:size(poses,1)) 49 | %for(t=1:50) 50 | t 51 | % Robot pose at time t. 52 | robPose = [poses(t,1);poses(t,2);poses(t,3)]; 53 | 54 | % Laser scan made at time t. 55 | sc = laser(1,t); 56 | % Compute the mapUpdate, which contains the log odds values to add to the map. 57 | [mapUpdate, robPoseMapFrame, laserEndPntsMapFrame] = inv_sensor_model(map, sc, robPose, gridSize, offset, probOcc, probFree); 58 | 59 | mapUpdate -= logOddsPrior*ones(size(map)); 60 | % Update the occupancy values of the affected cells. 61 | map += mapUpdate; 62 | 63 | % Plot current map and robot trajectory so far. 64 | plot_map(map, mapBox, robPoseMapFrame, poses, laserEndPntsMapFrame, gridSize, offset, t); 65 | endfor 66 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/inv_sensor_model.m: -------------------------------------------------------------------------------- 1 | function [mapUpdate, robPoseMapFrame, laserEndPntsMapFrame] = inv_sensor_model(map, scan, robPose, gridSize, offset, probOcc, probFree) 2 | % Compute the log odds values that should be added to the map based on the inverse sensor model 3 | % of a laser range finder. 4 | 5 | % map is the matrix containing the occupancy values (IN LOG ODDS) of each cell in the map. 6 | % scan is a laser scan made at this time step. Contains the range readings of each laser beam. 7 | % robPose is the robot pose in the world coordinates frame. 8 | % gridSize is the size of each grid in meters. 9 | % offset = [offsetX; offsetY] is the offset that needs to be subtracted from a point 10 | % when converting to map coordinates. 11 | % probOcc is the probability that a cell is occupied by an obstacle given that a 12 | % laser beam endpoint hit that cell. 13 | % probFree is the probability that a cell is occupied given that a laser beam passed through it. 14 | 15 | % mapUpdate is a matrix of the same size as map. It has the log odds values that need to be added for the cells 16 | % affected by the current laser scan. All unaffected cells should be zeros. 17 | % robPoseMapFrame is the pose of the robot in the map coordinates frame. 18 | % laserEndPntsMapFrame are map coordinates of the endpoints of each laser beam (also used for visualization purposes). 19 | 20 | % Initialize mapUpdate. 21 | size(map); 22 | mapUpdate = zeros(size(map)); 23 | 24 | % Robot pose as a homogeneous transformation matrix. 25 | robTrans = v2t(robPose); 26 | 27 | % TODO: compute robPoseMapFrame. Use your world_to_map_coordinates implementation. 28 | %%%%%%%%%%%%%%% 29 | %robPose 30 | %%%%%%%%%%%%%%% 31 | robPoseMapFrame(1:2) = world_to_map_coordinates(robPose(1:2),gridSize,offset); 32 | robPoseMapFrame(3) = robPose(3); 33 | %%%%%%%%%%%%%%% 34 | %robPoseMapFrame 35 | %%%%%%%%%%%%%%% 36 | % Compute the Cartesian coordinates of the laser beam endpoints. 37 | % Set the third argument to 'true' to use only half the beams for speeding up the algorithm when debugging. 38 | laserEndPnts = robotlaser_as_cartesian(scan, 30, false); 39 | 40 | % Compute the endpoints of the laser beams in the world coordinates frame. 41 | laserEndPnts = robTrans*laserEndPnts; 42 | 43 | % TODO: compute laserEndPntsMapFrame from laserEndPnts. Use your world_to_map_coordinates implementation. 44 | laserEndPntsMapFrame = world_to_map_coordinates(laserEndPnts(1:2,:),gridSize,offset); 45 | %laserEndPntsMapFrame(3,:) = laserEndPnts(3,:); 46 | %%%%%%%%%%%% 47 | %laserEndPntsMapFrame 48 | %%%%%%%%%%%% 49 | 50 | % freeCells are the map coordinates of the cells through which the laser beams pass. 51 | freeCells = []; 52 | 53 | % Iterate over each laser beam and compute freeCells. 54 | % Use the bresenham method available to you in tools for computing the X and Y 55 | % coordinates of the points that lie on a line. 56 | % Example use for a line between points p1 and p2: 57 | % [X,Y] = bresenham(map,[p1_x, p1_y; p2_x, p2_y]); 58 | % You only need the X and Y outputs of this function. 59 | for sc=1:columns(laserEndPntsMapFrame) 60 | %%% sc 61 | %TODO: compute the XY map coordinates of the free cells along the laser beam ending in laserEndPntsMapFrame(:,sc) 62 | %%%%%%%%%%%%%% 63 | %robPoseMapFrame(1) 64 | %robPoseMapFrame(2) 65 | %%%%%%%%%%%%%% 66 | % [~,~,map,X,Y] = bresenham(map,[robPoseMapFrame(1), robPoseMapFrame(2); laserEndPntsMapFrame(:,sc)'],0); 67 | [X,Y] = bresenham2([robPoseMapFrame(1), robPoseMapFrame(2); laserEndPntsMapFrame(1,sc), laserEndPntsMapFrame(2,sc)]); 68 | %TODO: add them to freeCells 69 | freeCells = [freeCells, [X;Y]]; 70 | % X 71 | % Y 72 | 73 | endfor 74 | 75 | 76 | %TODO: update the log odds values in mapUpdate for each free cell according to probFree. 77 | for i=1:size(freeCells,2) 78 | tmpR = freeCells(1,i); 79 | tmpC = freeCells(2,i); 80 | mapUpdate(tmpR,tmpC) = prob_to_log_odds(probFree); 81 | endfor 82 | 83 | %TODO: update the log odds values in mapUpdate for each laser endpoint according to probOcc. 84 | for i=1:size(laserEndPnts,2) 85 | mapUpdate(laserEndPntsMapFrame(1,i),laserEndPntsMapFrame(2,i)) = prob_to_log_odds(probOcc); 86 | endfor 87 | 88 | end 89 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/inv_sensor_model.m~: -------------------------------------------------------------------------------- 1 | function [mapUpdate, robPoseMapFrame, laserEndPntsMapFrame] = inv_sensor_model(map, scan, robPose, gridSize, offset, probOcc, probFree) 2 | % Compute the log odds values that should be added to the map based on the inverse sensor model 3 | % of a laser range finder. 4 | 5 | % map is the matrix containing the occupancy values (IN LOG ODDS) of each cell in the map. 6 | % scan is a laser scan made at this time step. Contains the range readings of each laser beam. 7 | % robPose is the robot pose in the world coordinates frame. 8 | % gridSize is the size of each grid in meters. 9 | % offset = [offsetX; offsetY] is the offset that needs to be subtracted from a point 10 | % when converting to map coordinates. 11 | % probOcc is the probability that a cell is occupied by an obstacle given that a 12 | % laser beam endpoint hit that cell. 13 | % probFree is the probability that a cell is occupied given that a laser beam passed through it. 14 | 15 | % mapUpdate is a matrix of the same size as map. It has the log odds values that need to be added for the cells 16 | % affected by the current laser scan. All unaffected cells should be zeros. 17 | % robPoseMapFrame is the pose of the robot in the map coordinates frame. 18 | % laserEndPntsMapFrame are map coordinates of the endpoints of each laser beam (also used for visualization purposes). 19 | 20 | % Initialize mapUpdate. 21 | size(map); 22 | mapUpdate = zeros(size(map)); 23 | 24 | % Robot pose as a homogeneous transformation matrix. 25 | robTrans = v2t(robPose); 26 | 27 | % TODO: compute robPoseMapFrame. Use your world_to_map_coordinates implementation. 28 | %%%%%%%%%%%%%%% 29 | %robPose 30 | %%%%%%%%%%%%%%% 31 | robPoseMapFrame(1:2) = world_to_map_coordinates(robPose(1:2),gridSize,offset); 32 | robPoseMapFrame(3) = robPose(3); 33 | %%%%%%%%%%%%%%% 34 | %robPoseMapFrame 35 | %%%%%%%%%%%%%%% 36 | % Compute the Cartesian coordinates of the laser beam endpoints. 37 | % Set the third argument to 'true' to use only half the beams for speeding up the algorithm when debugging. 38 | laserEndPnts = robotlaser_as_cartesian(scan, 30, false); 39 | 40 | % Compute the endpoints of the laser beams in the world coordinates frame. 41 | laserEndPnts = robTrans*laserEndPnts; 42 | 43 | % TODO: compute laserEndPntsMapFrame from laserEndPnts. Use your world_to_map_coordinates implementation. 44 | laserEndPntsMapFrame = world_to_map_coordinates(laserEndPnts(1:2,:),gridSize,offset); 45 | %laserEndPntsMapFrame(3,:) = laserEndPnts(3,:); 46 | %%%%%%%%%%%% 47 | %laserEndPntsMapFrame 48 | %%%%%%%%%%%% 49 | 50 | % freeCells are the map coordinates of the cells through which the laser beams pass. 51 | freeCells = []; 52 | 53 | % Iterate over each laser beam and compute freeCells. 54 | % Use the bresenham method available to you in tools for computing the X and Y 55 | % coordinates of the points that lie on a line. 56 | % Example use for a line between points p1 and p2: 57 | % [X,Y] = bresenham(map,[p1_x, p1_y; p2_x, p2_y]); 58 | % You only need the X and Y outputs of this function. 59 | for sc=1:columns(laserEndPntsMapFrame) 60 | %%% sc 61 | %TODO: compute the XY map coordinates of the free cells along the laser beam ending in laserEndPntsMapFrame(:,sc) 62 | %%%%%%%%%%%%%% 63 | %robPoseMapFrame(1) 64 | %robPoseMapFrame(2) 65 | %%%%%%%%%%%%%% 66 | % [~,~,map,X,Y] = bresenham(map,[robPoseMapFrame(1), robPoseMapFrame(2); laserEndPntsMapFrame(:,sc)'],0); 67 | [X,Y] = bresenham2([robPoseMapFrame(1), robPoseMapFrame(2); laserEndPntsMapFrame(1,sc), laserEndPntsMapFrame(2,sc)]); 68 | %TODO: add them to freeCells 69 | freeCells = [freeCells, [X;Y]]; 70 | % X 71 | % Y 72 | 73 | endfor 74 | 75 | 76 | %TODO: update the log odds values in mapUpdate for each free cell according to probFree. 77 | for i=1:size(freeCells,2) 78 | tmpR = freeCells(1,i); 79 | tmpC = freeCells(2,i); 80 | mapUpdate(tmpR,tmpC) = prob_to_log_odds(probFree); 81 | endfor 82 | 83 | %TODO: update the log odds values in mapUpdate for each laser endpoint according to probOcc. 84 | for i=1:size(laserEndPnts,2) 85 | mapUpdate(laserEndPntsMapFrame(1,i),laserEndPntsMapFrame(2,i)) = prob_to_log_odds(probOcc); 86 | endfor 87 | 88 | end 89 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/log_odds_to_prob.m: -------------------------------------------------------------------------------- 1 | function p = log_odds_to_prob(l) 2 | % Convert log odds l to the corresponding probability values p. 3 | % l could be a scalar or a matrix. 4 | 5 | % TODO: compute p. 6 | p=[]; 7 | for i = 1:size(l,1) 8 | for j = 1:size(l,2) 9 | p(i,j) = 1 - 1/(1 + exp(l(i,j))); 10 | endfor 11 | endfor 12 | 13 | end 14 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/log_odds_to_prob.m~: -------------------------------------------------------------------------------- 1 | function p = log_odds_to_prob(l) 2 | % Convert log odds l to the corresponding probability values p. 3 | % l could be a scalar or a matrix. 4 | 5 | % TODO: compute p. 6 | p=[]; 7 | for i = 1:size(l,1) 8 | for j = 1:size(l,2) 9 | p(i,j) = 1 - 1/(1 + exp(l(i,j))); 10 | endfor 11 | 12 | end 13 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/octave-workspace: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/4_Gridmapping/octave/octave-workspace -------------------------------------------------------------------------------- /4_Gridmapping/octave/prob_to_log_odds.m: -------------------------------------------------------------------------------- 1 | function l=prob_to_log_odds(p) 2 | % Convert proability values p to the corresponding log odds l. 3 | % p could be a scalar or a matrix. 4 | 5 | % TODO: compute l. 6 | for i=1:size(p,1) 7 | for j=1:size(p,2) 8 | l(i,j) = log(p(i,j)/(1-p(i,j))); 9 | endfor 10 | endfor 11 | end 12 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/prob_to_log_odds.m~: -------------------------------------------------------------------------------- 1 | function l=prob_to_log_odds(p) 2 | % Convert proability values p to the corresponding log odds l. 3 | % p could be a scalar or a matrix. 4 | 5 | % TODO: compute l. 6 | for i=1:size(p,1) 7 | for j=1:size(p,2) 8 | l(i,j) = log(p(i,j)/(1-p(i,j))); 9 | endfor 10 | endfor 11 | end 12 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/bresenham.m: -------------------------------------------------------------------------------- 1 | function [myline,mycoords,outmat,X,Y] = bresenham(mymat,mycoordinates,dispFlag) 2 | 3 | % BRESENHAM: Generate a line profile of a 2d image 4 | % using Bresenham's algorithm 5 | % [myline,mycoords] = bresenham(mymat,mycoordinates,dispFlag) 6 | % 7 | % - For a demo purpose, try >> bresenham(); 8 | % 9 | % - mymat is an input image matrix. 10 | % 11 | % - mycoordinates is coordinate of the form: [x1, y1; x2, y2] 12 | % which can be obtained from ginput function 13 | % 14 | % - dispFlag will show the image with a line if it is 1 15 | % 16 | % - myline is the output line 17 | % 18 | % - mycoords is the same as mycoordinates if provided. 19 | % if not it will be the output from ginput() 20 | % Author: N. Chattrapiban 21 | % 22 | % Ref: nprotech: Chackrit Sangkaew; Citec 23 | % Ref: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm 24 | % 25 | % See also: tut_line_algorithm 26 | 27 | if nargin < 1, % for demo purpose 28 | pxl = 20; 29 | mymat = 1:pxl^2; 30 | mymat = reshape(mymat,pxl,pxl); 31 | disp('This is a demo.') 32 | end 33 | 34 | if nargin < 2, % if no coordinate provided 35 | imagesc(mymat); axis image; 36 | disp('Click two points on the image.') 37 | %[mycoordinates(1:2),mycoordinates(3:4)] = ginput(2); 38 | mycoordinates = ginput(2); 39 | end 40 | 41 | if nargin < 3, dispFlag = 1; end 42 | 43 | outmat = mymat; 44 | mycoords = mycoordinates; 45 | 46 | x = round(mycoords(:,1)); 47 | y = round(mycoords(:,2)); 48 | steep = (abs(y(2)-y(1)) > abs(x(2)-x(1))); 49 | 50 | if steep, [x,y] = swap(x,y); end 51 | 52 | if x(1)>x(2), 53 | [x(1),x(2)] = swap(x(1),x(2)); 54 | [y(1),y(2)] = swap(y(1),y(2)); 55 | end 56 | 57 | delx = x(2)-x(1); 58 | dely = abs(y(2)-y(1)); 59 | error = 0; 60 | x_n = x(1); 61 | y_n = y(1); 62 | if y(1) < y(2), ystep = 1; else ystep = -1; end 63 | for n = 1:delx+1 64 | if steep, 65 | myline(n) = mymat(x_n,y_n); 66 | outmat(x_n,y_n) = 0; 67 | X(n) = x_n; 68 | Y(n) = y_n; 69 | else 70 | myline(n) = mymat(y_n,x_n); 71 | outmat(y_n,x_n) = 0; 72 | X(n) = y_n; 73 | Y(n) = x_n; 74 | end 75 | x_n = x_n + 1; 76 | error = error + dely; 77 | if bitshift(error,1) >= delx, % same as -> if 2*error >= delx, 78 | y_n = y_n + ystep; 79 | error = error - delx; 80 | end 81 | end 82 | % -> a(y,x) 83 | if dispFlag, imagesc(outmat); end 84 | %plot(1:delx,myline) 85 | 86 | function [q,r] = swap(s,t) 87 | % function SWAP 88 | q = t; r = s; 89 | 90 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/bresenham.m~: -------------------------------------------------------------------------------- 1 | function [X,Y] = bresenham(mycoords) 2 | 3 | % BRESENHAM: Generate a line profile of a 2d image 4 | % using Bresenham's algorithm 5 | % [myline,mycoords] = bresenham(mymat,mycoords,dispFlag) 6 | % 7 | % - For a demo purpose, try >> bresenham(); 8 | % 9 | % - mymat is an input image matrix. 10 | % 11 | % - mycoords is coordinate of the form: [x1, y1; x2, y2] 12 | % which can be obtained from ginput function 13 | % 14 | % Author: N. Chattrapiban 15 | % 16 | % Ref: nprotech: Chackrit Sangkaew; Citec 17 | % Ref: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm 18 | % 19 | % See also: tut_line_algorithm 20 | 21 | x = round(mycoords(:,1)); 22 | y = round(mycoords(:,2)); 23 | steep = (abs(y(2)-y(1)) > abs(x(2)-x(1))); 24 | 25 | if steep, [x,y] = swap(x,y); end 26 | 27 | if x(1)>x(2), 28 | [x(1),x(2)] = swap(x(1),x(2)); 29 | [y(1),y(2)] = swap(y(1),y(2)); 30 | end 31 | 32 | delx = x(2)-x(1); 33 | dely = abs(y(2)-y(1)); 34 | error = 0; 35 | x_n = x(1); 36 | y_n = y(1); 37 | if y(1) < y(2), ystep = 1; else ystep = -1; end 38 | for n = 1:delx+1 39 | if steep, 40 | X(n) = x_n; 41 | Y(n) = y_n; 42 | else 43 | X(n) = y_n; 44 | Y(n) = x_n; 45 | end 46 | x_n = x_n + 1; 47 | error = error + dely; 48 | if bitshift(error,1) >= delx, % same as -> if 2*error >= delx, 49 | y_n = y_n + ystep; 50 | error = error - delx; 51 | end 52 | end 53 | 54 | temp = X; 55 | X = Y; 56 | Y = temp; 57 | 58 | 59 | function [q,r] = swap(s,t) 60 | % function SWAP 61 | q = t; r = s; 62 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/bresenham2.m: -------------------------------------------------------------------------------- 1 | function [X,Y] = bresenham2(mycoords) 2 | 3 | % BRESENHAM: Generate a line profile of a 2d image 4 | % using Bresenham's algorithm 5 | % [myline,mycoords] = bresenham(mymat,mycoords,dispFlag) 6 | % 7 | % - For a demo purpose, try >> bresenham(); 8 | % 9 | % - mymat is an input image matrix. 10 | % 11 | % - mycoords is coordinate of the form: [x1, y1; x2, y2] 12 | % which can be obtained from ginput function 13 | % 14 | % Author: N. Chattrapiban 15 | % 16 | % Ref: nprotech: Chackrit Sangkaew; Citec 17 | % Ref: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm 18 | % 19 | % See also: tut_line_algorithm 20 | 21 | x = round(mycoords(:,1)); 22 | y = round(mycoords(:,2)); 23 | steep = (abs(y(2)-y(1)) > abs(x(2)-x(1))); 24 | 25 | if steep, [x,y] = swap(x,y); end 26 | 27 | if x(1)>x(2), 28 | [x(1),x(2)] = swap(x(1),x(2)); 29 | [y(1),y(2)] = swap(y(1),y(2)); 30 | end 31 | 32 | delx = x(2)-x(1); 33 | dely = abs(y(2)-y(1)); 34 | error = 0; 35 | x_n = x(1); 36 | y_n = y(1); 37 | if y(1) < y(2), ystep = 1; else ystep = -1; end 38 | for n = 1:delx+1 39 | if steep, 40 | X(n) = x_n; 41 | Y(n) = y_n; 42 | else 43 | X(n) = y_n; 44 | Y(n) = x_n; 45 | end 46 | x_n = x_n + 1; 47 | error = error + dely; 48 | if bitshift(error,1) >= delx, % same as -> if 2*error >= delx, 49 | y_n = y_n + ystep; 50 | error = error - delx; 51 | end 52 | end 53 | 54 | temp = X; 55 | X = Y; 56 | Y = temp; 57 | 58 | 59 | function [q,r] = swap(s,t) 60 | % function SWAP 61 | q = t; r = s; 62 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/bresenham2.m~: -------------------------------------------------------------------------------- 1 | function [X,Y] = bresenham(mycoords) 2 | 3 | % BRESENHAM: Generate a line profile of a 2d image 4 | % using Bresenham's algorithm 5 | % [myline,mycoords] = bresenham(mymat,mycoords,dispFlag) 6 | % 7 | % - For a demo purpose, try >> bresenham(); 8 | % 9 | % - mymat is an input image matrix. 10 | % 11 | % - mycoords is coordinate of the form: [x1, y1; x2, y2] 12 | % which can be obtained from ginput function 13 | % 14 | % Author: N. Chattrapiban 15 | % 16 | % Ref: nprotech: Chackrit Sangkaew; Citec 17 | % Ref: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm 18 | % 19 | % See also: tut_line_algorithm 20 | 21 | x = round(mycoords(:,1)); 22 | y = round(mycoords(:,2)); 23 | steep = (abs(y(2)-y(1)) > abs(x(2)-x(1))); 24 | 25 | if steep, [x,y] = swap(x,y); end 26 | 27 | if x(1)>x(2), 28 | [x(1),x(2)] = swap(x(1),x(2)); 29 | [y(1),y(2)] = swap(y(1),y(2)); 30 | end 31 | 32 | delx = x(2)-x(1); 33 | dely = abs(y(2)-y(1)); 34 | error = 0; 35 | x_n = x(1); 36 | y_n = y(1); 37 | if y(1) < y(2), ystep = 1; else ystep = -1; end 38 | for n = 1:delx+1 39 | if steep, 40 | X(n) = x_n; 41 | Y(n) = y_n; 42 | else 43 | X(n) = y_n; 44 | Y(n) = x_n; 45 | end 46 | x_n = x_n + 1; 47 | error = error + dely; 48 | if bitshift(error,1) >= delx, % same as -> if 2*error >= delx, 49 | y_n = y_n + ystep; 50 | error = error - delx; 51 | end 52 | end 53 | 54 | temp = X; 55 | X = Y; 56 | Y = temp; 57 | 58 | 59 | function [q,r] = swap(s,t) 60 | % function SWAP 61 | q = t; r = s; 62 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/plot_map.m: -------------------------------------------------------------------------------- 1 | function plot_map(map, mapBox, robPoseMapFrame, poses, laserEndPntsMapFrame, gridSize, offset, t) 2 | 3 | close all 4 | % figure 5 | graphics_toolkit gnuplot 6 | f = figure( "visible", "off"); 7 | axis(mapBox); 8 | hold on; 9 | map = map'; 10 | imshow(ones(size(map)) - log_odds_to_prob(map)) 11 | s = size(map)(1:2); 12 | set(gcf, "position", [50 50 s*5]) 13 | set(gca, "position", [.05 .05 .9 .9]) 14 | traj = [poses(1:t,1)';poses(1:t,2)']; 15 | traj = world_to_map_coordinates(traj, gridSize, offset); 16 | plot(traj(1,:),traj(2,:),'g') 17 | plot(robPoseMapFrame(1),robPoseMapFrame(2),'bo','markersize',5,'linewidth',4) 18 | plot(laserEndPntsMapFrame(1,:),laserEndPntsMapFrame(2,:),'ro','markersize',2) 19 | filename = sprintf('../plots/gridmap_%03d.png', t); 20 | print(f, filename, '-dpng'); 21 | close all; 22 | 23 | end 24 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/plot_map.m~: -------------------------------------------------------------------------------- 1 | function plot_map(map, mapBox, robPoseMapFrame, poses, laserEndPntsMapFrame, gridSize, offset, t) 2 | 3 | close all 4 | % figure 5 | graphics_toolkit gnuplot 6 | f = figure( "visible", "off"); 7 | axis(mapBox); 8 | hold on; 9 | map = map'; 10 | imshow(ones(size(map)) - log_odds_to_prob(map)) 11 | s = size(map)(1:2); 12 | set(gcf, "position", [50 50 s*5]) 13 | set(gca, "position", [.05 .05 .9 .9]) 14 | traj = [poses(1:t,1)';poses(1:t,2)']; 15 | traj = world_to_map_coordinates(traj, gridSize, offset); 16 | plot(traj(1,:),traj(2,:),'g') 17 | plot(robPoseMapFrame(1),robPoseMapFrame(2),'bo','markersize',5,'linewidth',4) 18 | plot(laserEndPntsMapFrame(1,:),laserEndPntsMapFrame(2,:),'ro','markersize',2) 19 | filename = sprintf('../plots/gridmap_%03d.png', t); 20 | print(f, filename, '-dpng'); 21 | close all; 22 | 23 | end 24 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/read_robotlaser.m: -------------------------------------------------------------------------------- 1 | % read a file containing ROBOTLASER1 in CARMEN logfile format 2 | function laser=read_robotlaser(filename) 3 | 4 | fid = fopen(filename, 'r'); 5 | laser = cell(); 6 | 7 | while true 8 | ln = fgetl(fid); 9 | if (ln == -1) 10 | break 11 | endif 12 | tokens = strsplit(ln, ' ', true); 13 | 14 | if (strcmp(tokens(1), "ROBOTLASER1") == 0) 15 | continue; 16 | endif 17 | 18 | num_tokens = str2double(tokens); 19 | 20 | currentReading = struct ( 21 | "start_angle", 0, 22 | "angular_resolution", 0, 23 | "maximum_range", 0, 24 | "ranges", [], 25 | "pose", zeros(3,1), 26 | "laser_offset", zeros(3,1), 27 | "timestamp", 0 28 | ); 29 | 30 | tk = 3; 31 | currentReading.start_angle = num_tokens(tk++); 32 | tk++; % skip FOV 33 | currentReading.angular_resolution = num_tokens(tk++); 34 | currentReading.maximum_range = num_tokens(tk++); 35 | tk += 2; % skip accuracy, remission_mode 36 | 37 | num_readings = int32(num_tokens(tk++)); 38 | currentReading.ranges = num_tokens(tk:tk+num_readings-1); 39 | tk += num_readings; 40 | 41 | num_remissions = int32(num_tokens(tk++)); % skip reading the remission values 42 | 43 | tk += num_remissions; 44 | 45 | laser_pose = num_tokens(tk:tk+2); 46 | tk += 3; 47 | currentReading.pose = num_tokens(tk:tk+2); 48 | tk += 3; 49 | 50 | currentReading.laser_offset = t2v(inv(v2t(currentReading.pose)) * v2t(laser_pose)); 51 | 52 | tk += 5; % skip tv, rv, forward, side, turn 53 | currentReading.timestamp = num_tokens(tk++); 54 | 55 | laser{end+1} = currentReading; 56 | 57 | endwhile 58 | 59 | laser = cell2mat(laser); 60 | 61 | end 62 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/robotlaser_as_cartesian.m: -------------------------------------------------------------------------------- 1 | function points = robotlaser_as_cartesian(rl, maxRange = 15, subsample = false) 2 | 3 | numBeams = length(rl.ranges); 4 | maxRange=min(maxRange, rl.maximum_range); 5 | % apply the max range 6 | idx = rl.ranges0; 7 | 8 | if (subsample) 9 | idx(2:2:end) = 0; 10 | endif 11 | 12 | angles = linspace(rl.start_angle, rl.start_angle + numBeams*rl.angular_resolution, numBeams)(idx); 13 | points = [rl.ranges(idx) .* cos(angles); rl.ranges(idx) .* sin(angles); ones(1, length(angles))]; 14 | transf = v2t(rl.laser_offset); 15 | 16 | % apply the laser offset 17 | points = transf * points; 18 | 19 | end 20 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/robotlaser_as_cartesian.m~: -------------------------------------------------------------------------------- 1 | function points = robotlaser_as_cartesian(rl, maxRange = 15, subsample = false) 2 | 3 | numBeams = length(rl.ranges); 4 | maxRange=min(maxRange, rl.maximum_range); 5 | % apply the max range 6 | idx = rl.ranges0; 7 | 8 | if (subsample) 9 | idx(2:2:end) = 0; 10 | endif 11 | 12 | angles = linspace(rl.start_angle, rl.start_angle + numBeams*rl.angular_resolution, numBeams)(idx); 13 | points = [rl.ranges(idx) .* cos(angles); rl.ranges(idx) .* sin(angles); ones(1, length(angles))]; 14 | transf = v2t(rl.laser_offset); 15 | 16 | % apply the laser offset 17 | points = transf * points; 18 | 19 | end 20 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/t2v.m: -------------------------------------------------------------------------------- 1 | #computes the pose vector v from an homogeneous transform A 2 | function v=t2v(A) 3 | v(1:2, 1)=A(1:2,3); 4 | v(3,1)=atan2(A(2,1),A(1,1)); 5 | end 6 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/tools/v2t.m: -------------------------------------------------------------------------------- 1 | #computes the homogeneous transform matrix A of the pose vector v 2 | function A=v2t(v) 3 | c=cos(v(3)); 4 | s=sin(v(3)); 5 | A=[c, -s, v(1); 6 | s, c, v(2); 7 | 0 0 1 ]; 8 | end 9 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/world_to_map_coordinates.m: -------------------------------------------------------------------------------- 1 | function [pntsMap] = world_to_map_coordinates(pntsWorld, gridSize, offset) 2 | % Convert points from the world coordinates frame to the map frame. 3 | % pntsWorld is a matrix of N points with each column representing a point in world coordinates (meters). 4 | % gridSize is the size of each grid in meters. 5 | % offset = [offsetX; offsetY] is the offset that needs to be subtracted from a point 6 | % when converting to map coordinates. 7 | % pntsMap is a 2xN matrix containing the corresponding points in map coordinates. 8 | 9 | % TODO: compute pntsMap 10 | pntsMap = floor((pntsWorld - repmat(offset,1,size(pntsWorld,2)))./gridSize); 11 | 12 | end 13 | -------------------------------------------------------------------------------- /4_Gridmapping/octave/world_to_map_coordinates.m~: -------------------------------------------------------------------------------- 1 | function [pntsMap] = world_to_map_coordinates(pntsWorld, gridSize, offset) 2 | % Convert points from the world coordinates frame to the map frame. 3 | % pntsWorld is a matrix of N points with each column representing a point in world coordinates (meters). 4 | % gridSize is the size of each grid in meters. 5 | % offset = [offsetX; offsetY] is the offset that needs to be subtracted from a point 6 | % when converting to map coordinates. 7 | % pntsMap is a 2xN matrix containing the corresponding points in map coordinates. 8 | 9 | % TODO: compute pntsMap 10 | pntsMap = floor((pntsWorld - repmat(offset,1,size(pntsWorld,2)))./gridSize); 11 | 12 | end 13 | -------------------------------------------------------------------------------- /4_Gridmapping/plot_0.1_resolution/gridmap_res_0_1.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/4_Gridmapping/plot_0.1_resolution/gridmap_res_0_1.mp4 -------------------------------------------------------------------------------- /4_Gridmapping/plot_0.5_resolution/gridmap_res_0_5.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/4_Gridmapping/plot_0.5_resolution/gridmap_res_0_5.mp4 -------------------------------------------------------------------------------- /5_Particle_Filters/octave/motion.m: -------------------------------------------------------------------------------- 1 | % Turn off pagination: 2 | more off; 3 | 4 | clear all; 5 | close all; 6 | 7 | % Make tools available 8 | addpath('tools'); 9 | 10 | % Read sensor readings, i.e. odometry 11 | data = read_data('../data/odometry.dat'); 12 | 13 | noise = [0.005, 0.01, 0.005]'; 14 | 15 | % how many particles 16 | numParticles = 100; 17 | 18 | % initialize the particles array 19 | particles = struct; 20 | for i = 1:numParticles 21 | particles(i).weight = 1. / numParticles; 22 | particles(i).pose = zeros(3,1); 23 | particles(i).history = cell(); 24 | end 25 | 26 | % Perform filter update for each odometry-observation read from the 27 | % data file. 28 | for t = 1:size(data.timestep, 2) 29 | %for t = 1:50 30 | printf('timestep = %d\n', t); 31 | 32 | % Perform the prediction step of the particle filter 33 | particles = prediction_step(particles, data.timestep(t).odometry, noise); 34 | 35 | % Generate visualization plots of the current state of the filter 36 | plot_state(particles, t); 37 | end 38 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/prediction_step.m: -------------------------------------------------------------------------------- 1 | function particles = prediction_step(particles, u, noise) 2 | % Updates the particles by drawing from the motion model 3 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 4 | % which have to be pertubated with Gaussian noise. 5 | % The position of the i-th particle is given by the 3D vector 6 | % particles(i).pose which represents (x, y, theta). 7 | 8 | % noise parameters 9 | % Assume Gaussian noise in each of the three parameters of the motion model. 10 | % These three parameters may be used as standard deviations for sampling. 11 | r1Noise = noise(1); 12 | transNoise = noise(2); 13 | r2Noise = noise(3); 14 | 15 | numParticles = length(particles); 16 | 17 | for i = 1:numParticles 18 | 19 | % append the old position to the history of the particle 20 | particles(i).history{end+1} = particles(i).pose; 21 | 22 | % TODO: sample a new pose for the particle 23 | mu = particles(i).pose + [u.t*cos(u.r1+particles(i).pose(3)); u.t*sin(u.r1+particles(i).pose(3)); normalize_angle(u.r1+u.r2)]; 24 | mu(3) = normalize_angle(mu(3)); 25 | particles(i).pose(1) = normrnd(mu(1),r1Noise+transNoise+r2Noise); 26 | particles(i).pose(2) = normrnd(mu(2),r1Noise+transNoise+r2Noise); 27 | particles(i).pose(3) = normrnd(mu(3),r1Noise+r2Noise); 28 | 29 | end 30 | 31 | end 32 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/prediction_step.m~: -------------------------------------------------------------------------------- 1 | function particles = prediction_step(particles, u, noise) 2 | % Updates the particles by drawing from the motion model 3 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 4 | % which have to be pertubated with Gaussian noise. 5 | % The position of the i-th particle is given by the 3D vector 6 | % particles(i).pose which represents (x, y, theta). 7 | 8 | % noise parameters 9 | % Assume Gaussian noise in each of the three parameters of the motion model. 10 | % These three parameters may be used as standard deviations for sampling. 11 | r1Noise = noise(1); 12 | transNoise = noise(2); 13 | r2Noise = noise(3); 14 | 15 | numParticles = length(particles); 16 | 17 | for i = 1:numParticles 18 | 19 | % append the old position to the history of the particle 20 | particles(i).history{end+1} = particles(i).pose; 21 | 22 | % TODO: sample a new pose for the particle 23 | mu = particles(i).pose + [u.t*cos(u.r1+particles(i).pose(3)); u.t*sin(u.r1+particles(i).pose(3)); normalie_angle(u.r1+u.r2)]; 24 | mu(3) = normalize_angle(mu(3)); 25 | particles(i).pose(1) = normrnd(mu(1),r1Noise+transNoise+r2Noise); 26 | particles(i).pose(2) = normrnd(mu(2),r1Noise+transNoise+r2Noise); 27 | particles(i).pose(3) = normrnd(mu(3),r1Noise+r2Noise); 28 | 29 | end 30 | 31 | end 32 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/resample.m: -------------------------------------------------------------------------------- 1 | % resample the set of particles. 2 | % A particle has a probability proportional to its weight to get 3 | % selected. A good option for such a resampling method is the so-called low 4 | % variance sampling, Probabilistic Robotics pg. 109 5 | function newParticles = resample(particles) 6 | 7 | numParticles = length(particles); 8 | 9 | w = [particles.weight]; 10 | 11 | % normalize the weight 12 | w = w / sum(w); 13 | 14 | % consider number of effective particles, to decide whether to resample or not 15 | useNeff = false; 16 | %useNeff = true; 17 | if useNeff 18 | neff = 1. / sum(w.^2); 19 | neff 20 | if neff > 0.5*numParticles 21 | newParticles = particles; 22 | for i = 1:numParticles 23 | newParticles(i).weight = w(i); 24 | end 25 | return; 26 | end 27 | end 28 | 29 | newParticles = struct; 30 | 31 | % TODO: implement the low variance re-sampling 32 | M = numParticles; 33 | r = 1/M * rand(); 34 | c = w(1); 35 | i = 1; 36 | for m = 1:M 37 | U = r + (m-1)/M; 38 | while U>c 39 | i = i + 1; 40 | c = c + w(i); 41 | endwhile 42 | newParticles(m) = particles(i); 43 | endfor 44 | % the cumulative sum 45 | 46 | % initialize the step and the current position on the roulette wheel 47 | 48 | % walk along the wheel to select the particles 49 | %for i = 1:numParticles 50 | 51 | %end 52 | 53 | end 54 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/resample.m~: -------------------------------------------------------------------------------- 1 | % resample the set of particles. 2 | % A particle has a probability proportional to its weight to get 3 | % selected. A good option for such a resampling method is the so-called low 4 | % variance sampling, Probabilistic Robotics pg. 109 5 | function newParticles = resample(particles) 6 | 7 | numParticles = length(particles); 8 | 9 | w = [particles.weight]; 10 | 11 | % normalize the weight 12 | w = w / sum(w); 13 | 14 | % consider number of effective particles, to decide whether to resample or not 15 | %useNeff = false; 16 | useNeff = true; 17 | if useNeff 18 | neff = 1. / sum(w.^2); 19 | neff 20 | if neff > 0.5*numParticles 21 | newParticles = particles; 22 | for i = 1:numParticles 23 | newParticles(i).weight = w(i); 24 | end 25 | return; 26 | end 27 | end 28 | 29 | newParticles = struct; 30 | 31 | % TODO: implement the low variance re-sampling 32 | M = numParticles; 33 | r = 1/M * rand(); 34 | c = w(1); 35 | i = 1; 36 | for m = 1:M 37 | U = r + (m-1)/M; 38 | while U>c 39 | i = i + 1; 40 | c = c + w(i); 41 | endwhile 42 | newParticles(m) = particles(i); 43 | endfor 44 | % the cumulative sum 45 | 46 | % initialize the step and the current position on the roulette wheel 47 | 48 | % walk along the wheel to select the particles 49 | %for i = 1:numParticles 50 | 51 | %end 52 | 53 | end 54 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/resampling.m: -------------------------------------------------------------------------------- 1 | % Turn off pagination: 2 | more off; 3 | 4 | clear all; 5 | close all; 6 | 7 | % how many particles 8 | numParticles = 1000; 9 | 10 | % initialize the particles array 11 | particles = struct; 12 | for i = 1:numParticles 13 | particles(i).weight = 1. / numParticles; 14 | particles(i).pose = normrnd([0 0]', [1 2]'); 15 | particles(i).history = cell(); 16 | end 17 | 18 | 19 | % re-weight the particles according to their distance to [0 0] 20 | sigma = diag([0.2 0.2]); 21 | for i = 1:numParticles 22 | particles(i).weight = exp(-1/2 * particles(i).pose' * inv(sigma) * particles(i).pose); 23 | end 24 | 25 | resampledParticles = resample(particles); 26 | 27 | % plot the particles before (red) and after resampling (blue) 28 | bpos = [particles.pose]; 29 | apos = [resampledParticles.pose]; 30 | plot(bpos(1,:), bpos(2,:), 'r+', 'markersize', 5, apos(1,:), apos(2,:), 'b*', 'markersize', 5); 31 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/.svn/all-wcprops: -------------------------------------------------------------------------------- 1 | K 25 2 | svn:wc:ra_dav:version-url 3 | V 106 4 | /svn/lecturematerial-robotmapping/!svn/ver/140/exercises-ws12/sheet5_pf/pf_framework_solution/octave/tools 5 | END 6 | normalize_angle.m 7 | K 25 8 | svn:wc:ra_dav:version-url 9 | V 124 10 | /svn/lecturematerial-robotmapping/!svn/ver/140/exercises-ws12/sheet5_pf/pf_framework_solution/octave/tools/normalize_angle.m 11 | END 12 | plot_state.m 13 | K 25 14 | svn:wc:ra_dav:version-url 15 | V 119 16 | /svn/lecturematerial-robotmapping/!svn/ver/140/exercises-ws12/sheet5_pf/pf_framework_solution/octave/tools/plot_state.m 17 | END 18 | read_data.m 19 | K 25 20 | svn:wc:ra_dav:version-url 21 | V 118 22 | /svn/lecturematerial-robotmapping/!svn/ver/140/exercises-ws12/sheet5_pf/pf_framework_solution/octave/tools/read_data.m 23 | END 24 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/.svn/entries: -------------------------------------------------------------------------------- 1 | 10 2 | 3 | dir 4 | 170 5 | https://aissvn.informatik.uni-freiburg.de/svn/lecturematerial-robotmapping/exercises-ws12/sheet5_pf/pf_framework_solution/octave/tools 6 | https://aissvn.informatik.uni-freiburg.de/svn/lecturematerial-robotmapping 7 | 8 | 9 | 10 | 2012-11-28T13:46:12.149350Z 11 | 66 12 | kuemmerl 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | a4761de2-5f49-44d7-a034-7133aa1e2b13 28 | 29 | read_data.m 30 | file 31 | 32 | 33 | 34 | 35 | 2013-11-01T11:00:22.000000Z 36 | 4f7c92f0b69de7427fe9495c5202ee53 37 | 2012-11-22T11:05:27.811862Z 38 | 60 39 | kuemmerl 40 | has-props 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 2491 62 | 63 | normalize_angle.m 64 | file 65 | 66 | 67 | 68 | 69 | 2013-11-01T11:00:22.000000Z 70 | 1cdc82b03afc90f81db2b9baeb02bfc9 71 | 2012-11-22T11:05:27.811862Z 72 | 60 73 | kuemmerl 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 189 96 | 97 | plot_state.m 98 | file 99 | 100 | 101 | 102 | 103 | 2013-11-01T11:00:22.000000Z 104 | f5248e85d7632f058c927f0e6ca8731e 105 | 2012-11-28T13:46:12.149350Z 106 | 66 107 | kuemmerl 108 | has-props 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 618 130 | 131 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/.svn/prop-base/plot_state.m.svn-base: -------------------------------------------------------------------------------- 1 | K 14 2 | svn:executable 3 | V 1 4 | * 5 | END 6 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/.svn/prop-base/read_data.m.svn-base: -------------------------------------------------------------------------------- 1 | K 14 2 | svn:executable 3 | V 1 4 | * 5 | END 6 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/.svn/text-base/normalize_angle.m.svn-base: -------------------------------------------------------------------------------- 1 | function [phiNorm] = normalize_angle(phi) 2 | %Normalize phi to be between -pi and pi 3 | 4 | while(phi>pi) 5 | phi = phi - 2*pi; 6 | endwhile 7 | 8 | while(phi<-pi) 9 | phi = phi + 2*pi; 10 | endwhile 11 | phiNorm = phi; 12 | 13 | end 14 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/.svn/text-base/plot_state.m.svn-base: -------------------------------------------------------------------------------- 1 | function plot_state(particles, timestep) 2 | % Visualizes the state of the particles 3 | 4 | clf; 5 | hold on 6 | grid("on") 7 | 8 | % Plot the particles 9 | ppos = [particles.pose]; 10 | plot(ppos(1,:), ppos(2,:), 'g.', 'markersize', 10, 'linewidth', 3.5); 11 | 12 | xlim([-2, 12]) 13 | ylim([-2, 12]) 14 | hold off 15 | 16 | % dump to a file or show the window 17 | %window = true; 18 | window = false; 19 | if window 20 | figure(1, "visible", "on"); 21 | drawnow; 22 | pause(0.5); 23 | else 24 | figure(1, "visible", "off"); 25 | filename = sprintf('../plots/pf_%03d.png', timestep); 26 | print(filename, '-dpng'); 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/.svn/text-base/read_data.m.svn-base: -------------------------------------------------------------------------------- 1 | function data = read_data(filename) 2 | % Reads the odometry and sensor readings from a file. 3 | % 4 | % filename: path to the file to parse 5 | % data: structure containing the parsed information 6 | % 7 | % The data is returned in a structure where the u_t and z_t are stored 8 | % within a single entry. A z_t can contain observations of multiple 9 | % landmarks. 10 | % 11 | % Usage: 12 | % - access the readings for timestep i: 13 | % data.timestep(i) 14 | % this returns a structure containing the odometry reading and all 15 | % landmark obsevations, which can be accessed as follows 16 | % - odometry reading at timestep i: 17 | % data.timestep(i).odometry 18 | % - senor reading at timestep i: 19 | % data.timestep(i).sensor 20 | % 21 | % Odometry readings have the following fields: 22 | % - r1 : rotation 1 23 | % - t : translation 24 | % - r2 : rotation 2 25 | % which correspond to the identically labeled variables in the motion 26 | % mode. 27 | % 28 | % Sensor readings can again be indexed and each of the entris has the 29 | % following fields: 30 | % - id : id of the observed landmark 31 | % - range : measured range to the landmark 32 | % - bearing : measured angle to the landmark (you can ignore this) 33 | % 34 | % Examples: 35 | % - Translational component of the odometry reading at timestep 10 36 | % data.timestep(10).odometry.t 37 | % - Measured range to the second landmark observed at timestep 4 38 | % data.timestep(4).sensor(2).range 39 | input = fopen(filename); 40 | 41 | data = struct; 42 | data.timestep.sensor = struct; 43 | first = 1; 44 | 45 | odom = struct; 46 | sensor = struct; 47 | 48 | while(!feof(input)) 49 | line = fgetl(input); 50 | arr = strsplit(line, ' '); 51 | type = deblank(arr{1}); 52 | 53 | if(strcmp(type, 'ODOMETRY') == 1) 54 | if(first == 0) 55 | data.timestep(end+1).odometry = odom; 56 | data.timestep(end).sensor = sensor(2:end); 57 | odom = struct; 58 | sensor = struct; 59 | endif 60 | first = 0; 61 | odom.r1 = str2double(arr{2}); 62 | odom.t = str2double(arr{3}); 63 | odom.r2 = str2double(arr{4}); 64 | elseif(strcmp(type, 'SENSOR') == 1) 65 | reading = struct; 66 | reading.id = str2double(arr{2}); 67 | reading.range = str2double(arr{3}); 68 | reading.bearing = str2double(arr{4}); 69 | sensor(end+1) = reading; 70 | endif 71 | endwhile 72 | 73 | data.timestep = data.timestep(2:end); 74 | 75 | fclose(input); 76 | end 77 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/normalize_angle.m: -------------------------------------------------------------------------------- 1 | function [phiNorm] = normalize_angle(phi) 2 | %Normalize phi to be between -pi and pi 3 | 4 | while(phi>pi) 5 | phi = phi - 2*pi; 6 | endwhile 7 | 8 | while(phi<-pi) 9 | phi = phi + 2*pi; 10 | endwhile 11 | phiNorm = phi; 12 | 13 | end 14 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/plot_state.m: -------------------------------------------------------------------------------- 1 | function plot_state(particles, timestep) 2 | % Visualizes the state of the particles 3 | 4 | clf; 5 | graphics_toolkit gnuplot 6 | hold on 7 | grid("on") 8 | 9 | 10 | % Plot the particles 11 | ppos = [particles.pose]; 12 | plot(ppos(1,:), ppos(2,:), 'g.', 'markersize', 10, 'linewidth', 3.5); 13 | 14 | xlim([-2, 12]) 15 | ylim([-2, 12]) 16 | hold off 17 | 18 | % dump to a file or show the window 19 | %window = true; 20 | window = false; 21 | if window 22 | figure(1, "visible", "on"); 23 | drawnow; 24 | filename = sprintf('../plots/pf_%03d.png', timestep); 25 | print(filename, '-dpng'); 26 | pause(0.5); 27 | else 28 | figure(1, "visible", "off"); 29 | filename = sprintf('../plots/pf_%03d.png', timestep); 30 | print(filename, '-dpng'); 31 | end 32 | 33 | end 34 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/plot_state.m~: -------------------------------------------------------------------------------- 1 | function plot_state(particles, timestep) 2 | % Visualizes the state of the particles 3 | 4 | clf; 5 | graphics_toolkit gnuplot 6 | hold on 7 | grid("on") 8 | 9 | 10 | % Plot the particles 11 | ppos = [particles.pose]; 12 | plot(ppos(1,:), ppos(2,:), 'g.', 'markersize', 10, 'linewidth', 3.5); 13 | 14 | xlim([-2, 12]) 15 | ylim([-2, 12]) 16 | hold off 17 | 18 | % dump to a file or show the window 19 | %window = true; 20 | window = false; 21 | if window 22 | figure(1, "visible", "on"); 23 | drawnow; 24 | filename = sprintf('../plots/pf_%03d.png', timestep); 25 | print(filename, '-dpng'); 26 | pause(0.5); 27 | else 28 | figure(1, "visible", "off"); 29 | filename = sprintf('../plots/pf_%03d.png', timestep); 30 | print(filename, '-dpng'); 31 | end 32 | 33 | end 34 | -------------------------------------------------------------------------------- /5_Particle_Filters/octave/tools/read_data.m: -------------------------------------------------------------------------------- 1 | function data = read_data(filename) 2 | % Reads the odometry and sensor readings from a file. 3 | % 4 | % filename: path to the file to parse 5 | % data: structure containing the parsed information 6 | % 7 | % The data is returned in a structure where the u_t and z_t are stored 8 | % within a single entry. A z_t can contain observations of multiple 9 | % landmarks. 10 | % 11 | % Usage: 12 | % - access the readings for timestep i: 13 | % data.timestep(i) 14 | % this returns a structure containing the odometry reading and all 15 | % landmark obsevations, which can be accessed as follows 16 | % - odometry reading at timestep i: 17 | % data.timestep(i).odometry 18 | % - senor reading at timestep i: 19 | % data.timestep(i).sensor 20 | % 21 | % Odometry readings have the following fields: 22 | % - r1 : rotation 1 23 | % - t : translation 24 | % - r2 : rotation 2 25 | % which correspond to the identically labeled variables in the motion 26 | % mode. 27 | % 28 | % Sensor readings can again be indexed and each of the entris has the 29 | % following fields: 30 | % - id : id of the observed landmark 31 | % - range : measured range to the landmark 32 | % - bearing : measured angle to the landmark (you can ignore this) 33 | % 34 | % Examples: 35 | % - Translational component of the odometry reading at timestep 10 36 | % data.timestep(10).odometry.t 37 | % - Measured range to the second landmark observed at timestep 4 38 | % data.timestep(4).sensor(2).range 39 | input = fopen(filename); 40 | 41 | data = struct; 42 | data.timestep.sensor = struct; 43 | first = 1; 44 | 45 | odom = struct; 46 | sensor = struct; 47 | 48 | while(!feof(input)) 49 | line = fgetl(input); 50 | arr = strsplit(line, ' '); 51 | type = deblank(arr{1}); 52 | 53 | if(strcmp(type, 'ODOMETRY') == 1) 54 | if(first == 0) 55 | data.timestep(end+1).odometry = odom; 56 | data.timestep(end).sensor = sensor(2:end); 57 | odom = struct; 58 | sensor = struct; 59 | endif 60 | first = 0; 61 | odom.r1 = str2double(arr{2}); 62 | odom.t = str2double(arr{3}); 63 | odom.r2 = str2double(arr{4}); 64 | elseif(strcmp(type, 'SENSOR') == 1) 65 | reading = struct; 66 | reading.id = str2double(arr{2}); 67 | reading.range = str2double(arr{3}); 68 | reading.bearing = str2double(arr{4}); 69 | sensor(end+1) = reading; 70 | endif 71 | endwhile 72 | 73 | data.timestep = data.timestep(2:end); 74 | 75 | fclose(input); 76 | end 77 | -------------------------------------------------------------------------------- /5_Particle_Filters/plots/pf_motion.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/5_Particle_Filters/plots/pf_motion.mp4 -------------------------------------------------------------------------------- /6_FastSLAM/data/world.dat: -------------------------------------------------------------------------------- 1 | 1 2 1 2 | 2 0 4 3 | 3 2 7 4 | 4 9 2 5 | 5 10 5 6 | 6 9 8 7 | 7 5 5 8 | 8 5 3 9 | 9 5 9 10 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/correction_step.m: -------------------------------------------------------------------------------- 1 | function particles = correction_step(particles, z) 2 | 3 | % Weight the particles according to the current map of the particle 4 | % and the landmark observations z. 5 | % z: struct array containing the landmark observations. 6 | % Each observation z(j) has an id z(j).id, a range z(j).range, and a bearing z(j).bearing 7 | % The vector observedLandmarks indicates which landmarks have been observed 8 | % at some point by the robot. 9 | 10 | % Number of particles 11 | numParticles = length(particles); 12 | 13 | % Number of measurements in this time step 14 | m = size(z, 2); 15 | 16 | % TODO: Construct the sensor noise matrix Q_t (2 x 2) 17 | Q_t = 0.01*eye(2); 18 | % process each particle 19 | for i = 1:numParticles 20 | robot = particles(i).pose; 21 | % process each measurement 22 | for j = 1:m 23 | % Get the id of the landmark corresponding to the j-th observation 24 | % particles(i).landmarks(l) is the EKF for this landmark 25 | l = z(j).id; 26 | 27 | % The (2x2) EKF of the landmark is given by 28 | % its mean particles(i).landmarks(l).mu 29 | % and by its covariance particles(i).landmarks(l).sigma 30 | 31 | % If the landmark is observed for the first time: 32 | if (particles(i).landmarks(l).observed == false) 33 | 34 | % TODO: Initialize its position based on the measurement and the current robot pose: 35 | 36 | particles(i).landmarks(l).mu = [robot(1)+z(j).range*cos(z(j).bearing + robot(3)); robot(2) + z(j).range*sin(z(j).bearing + robot(3))]; 37 | 38 | % get the Jacobian with respect to the landmark position 39 | [h, H] = measurement_model(particles(i), z(j)); 40 | 41 | % TODO: initialize the EKF for this landmark 42 | 43 | particles(i).landmarks(l).sigma = H\Q_t*inv(H)'; 44 | 45 | % Indicate that this landmark has been observed 46 | particles(i).landmarks(l).observed = true; 47 | 48 | else 49 | 50 | % get the expected measurement 51 | [expectedZ, H] = measurement_model(particles(i), z(j)); 52 | 53 | % TODO: compute the measurement covariance 54 | 55 | Q = H*particles(i).landmarks(l).sigma*H' + Q_t; 56 | 57 | % TODO: calculate the Kalman gain 58 | 59 | K = particles(i).landmarks(l).sigma*H'/Q; 60 | 61 | % TODO: compute the error between the z and expectedZ (remember to normalize the angle) 62 | 63 | z_diff = [z(j).range;z(j).bearing] - expectedZ; 64 | z_diff(2) = normalize_angle(z_diff(2)); 65 | 66 | % TODO: update the mean and covariance of the EKF for this landmark 67 | 68 | particles(i).landmarks(l).mu = particles(i).landmarks(l).mu + K*z_diff; 69 | particles(i).landmarks(l).sigma = (eye(2) - K*H)*particles(i).landmarks(l).sigma; 70 | 71 | % TODO: compute the likelihood of this observation, multiply with the former weight 72 | % to account for observing several features in one time step 73 | particles(i).weight = particles(i).weight*1/sqrt(det(2*pi*Q))*exp(-1/2*z_diff'/Q*z_diff); 74 | end 75 | 76 | end % measurement loop 77 | end % particle loop 78 | 79 | end 80 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/correction_step.m~: -------------------------------------------------------------------------------- 1 | function particles = correction_step(particles, z) 2 | 3 | % Weight the particles according to the current map of the particle 4 | % and the landmark observations z. 5 | % z: struct array containing the landmark observations. 6 | % Each observation z(j) has an id z(j).id, a range z(j).range, and a bearing z(j).bearing 7 | % The vector observedLandmarks indicates which landmarks have been observed 8 | % at some point by the robot. 9 | 10 | % Number of particles 11 | numParticles = length(particles); 12 | 13 | % Number of measurements in this time step 14 | m = size(z, 2); 15 | 16 | % TODO: Construct the sensor noise matrix Q_t (2 x 2) 17 | Q_t = 0.01*eye(2); 18 | % process each particle 19 | for i = 1:numParticles 20 | robot = particles(i).pose; 21 | % process each measurement 22 | for j = 1:m 23 | % Get the id of the landmark corresponding to the j-th observation 24 | % particles(i).landmarks(l) is the EKF for this landmark 25 | l = z(j).id; 26 | 27 | % The (2x2) EKF of the landmark is given by 28 | % its mean particles(i).landmarks(l).mu 29 | % and by its covariance particles(i).landmarks(l).sigma 30 | 31 | % If the landmark is observed for the first time: 32 | if (particles(i).landmarks(l).observed == false) 33 | 34 | % TODO: Initialize its position based on the measurement and the current robot pose: 35 | 36 | particles(i).landmarks(l).mu = [robot(1)+z(j).range*cos(z(j).bearing + robot(3)); robot(2) + z(j).range*sin(z(j).bearing + robot(3))]; 37 | 38 | % get the Jacobian with respect to the landmark position 39 | [h, H] = measurement_model(particles(i), z(j)); 40 | 41 | % TODO: initialize the EKF for this landmark 42 | 43 | particles(i).landmarks(l).sigma = H\Q_t*inv(H)'; 44 | 45 | % Indicate that this landmark has been observed 46 | particles(i).landmarks(l).observed = true; 47 | 48 | else 49 | 50 | % get the expected measurement 51 | [expectedZ, H] = measurement_model(particles(i), z(j)); 52 | 53 | % TODO: compute the measurement covariance 54 | 55 | Q = H*particles(i).landmarks(l).sigma*H' + Q_t; 56 | 57 | % TODO: calculate the Kalman gain 58 | 59 | K = particles(i).landmarks(l).sigma*H'/Q; 60 | 61 | % TODO: compute the error between the z and expectedZ (remember to normalize the angle) 62 | 63 | z_diff = [z(j).range;z(j).bearing] - expectedZ; 64 | z_diff(2) = normalize_angle(z_diff(2)); 65 | 66 | % TODO: update the mean and covariance of the EKF for this landmark 67 | 68 | particles(i).landmarks(l).mu = particles(i).landmarks(l).mu + K*z_diff; 69 | particles(i).landmarks(l).sigma = (eye(2) - K*H)*particles(i).landmarks(l).sigma; 70 | 71 | % TODO: compute the likelihood of this observation, multiply with the former weight 72 | % to account for observing several features in one time step 73 | particles(i).weight = 1/sqrt(det(2*pi*Q))*exp(-1/2*z_diff'/Q*z_diff); 74 | end 75 | 76 | end % measurement loop 77 | end % particle loop 78 | 79 | end 80 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/fastslam.m: -------------------------------------------------------------------------------- 1 | % This is the main FastSLAM loop. This script calls all the required 2 | % functions in the correct order. 3 | % 4 | % You can disable the plotting or change the number of steps the filter 5 | % runs for to ease the debugging. You should however not change the order 6 | % or calls of any of the other lines, as it might break the framework. 7 | % 8 | % If you are unsure about the input and return values of functions you 9 | % should read their documentation which tells you the expected dimensions. 10 | 11 | % Turn off pagination: 12 | more off; 13 | 14 | clear all; 15 | close all; 16 | 17 | % Make tools available 18 | addpath('tools'); 19 | 20 | % Read world data, i.e. landmarks. The true landmark positions are not given to the robot 21 | landmarks = read_world('../data/world.dat'); 22 | % Read sensor readings, i.e. odometry and range-bearing sensor 23 | data = read_data('../data/sensor_data.dat'); 24 | 25 | % Get the number of landmarks in the map 26 | N = size(landmarks,2); 27 | 28 | noise = [0.005, 0.01, 0.005]'; 29 | 30 | % how many particles 31 | numParticles = 100; 32 | 33 | % initialize the particles array 34 | particles = struct; 35 | for i = 1:numParticles 36 | particles(i).weight = 1. / numParticles; 37 | particles(i).pose = zeros(3,1); 38 | particles(i).history = cell(); 39 | for l = 1:N % initialize the landmarks aka the map 40 | particles(i).landmarks(l).observed = false; 41 | particles(i).landmarks(l).mu = zeros(2,1); % 2D position of the landmark 42 | particles(i).landmarks(l).sigma = zeros(2,2); % covariance of the landmark 43 | end 44 | end 45 | 46 | % toogle the visualization type 47 | %showGui = true; % show a window while the algorithm runs 48 | showGui = false; % plot to files instead 49 | 50 | % Perform filter update for each odometry-observation pair read from the 51 | % data file. 52 | for t = 1:size(data.timestep, 2) 53 | %for t = 1:50 54 | printf('timestep = %d\n', t); 55 | 56 | % Perform the prediction step of the particle filter 57 | particles = prediction_step(particles, data.timestep(t).odometry, noise); 58 | 59 | % Perform the correction step of the particle filter 60 | particles = correction_step(particles, data.timestep(t).sensor); 61 | 62 | % Generate visualization plots of the current state of the filter 63 | plot_state(particles, landmarks, t, data.timestep(t).sensor, showGui); 64 | 65 | % Resample the particle set 66 | particles = resample(particles); 67 | end 68 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/fastslam.m~: -------------------------------------------------------------------------------- 1 | % This is the main FastSLAM loop. This script calls all the required 2 | % functions in the correct order. 3 | % 4 | % You can disable the plotting or change the number of steps the filter 5 | % runs for to ease the debugging. You should however not change the order 6 | % or calls of any of the other lines, as it might break the framework. 7 | % 8 | % If you are unsure about the input and return values of functions you 9 | % should read their documentation which tells you the expected dimensions. 10 | 11 | % Turn off pagination: 12 | more off; 13 | 14 | clear all; 15 | close all; 16 | 17 | % Make tools available 18 | addpath('tools'); 19 | 20 | % Read world data, i.e. landmarks. The true landmark positions are not given to the robot 21 | landmarks = read_world('../data/world.dat'); 22 | % Read sensor readings, i.e. odometry and range-bearing sensor 23 | data = read_data('../data/sensor_data.dat'); 24 | 25 | % Get the number of landmarks in the map 26 | N = size(landmarks,2); 27 | 28 | noise = [0.005, 0.01, 0.005]'; 29 | 30 | % how many particles 31 | numParticles = 100; 32 | 33 | % initialize the particles array 34 | particles = struct; 35 | for i = 1:numParticles 36 | particles(i).weight = 1. / numParticles; 37 | particles(i).pose = zeros(3,1); 38 | particles(i).history = cell(); 39 | for l = 1:N % initialize the landmarks aka the map 40 | particles(i).landmarks(l).observed = false; 41 | particles(i).landmarks(l).mu = zeros(2,1); % 2D position of the landmark 42 | particles(i).landmarks(l).sigma = zeros(2,2); % covariance of the landmark 43 | end 44 | end 45 | 46 | % toogle the visualization type 47 | showGui = true; % show a window while the algorithm runs 48 | %showGui = false; % plot to files instead 49 | 50 | % Perform filter update for each odometry-observation pair read from the 51 | % data file. 52 | for t = 1:size(data.timestep, 2) 53 | %for t = 1:50 54 | printf('timestep = %d\n', t); 55 | 56 | % Perform the prediction step of the particle filter 57 | particles = prediction_step(particles, data.timestep(t).odometry, noise); 58 | 59 | % Perform the correction step of the particle filter 60 | particles = correction_step(particles, data.timestep(t).sensor); 61 | 62 | % Generate visualization plots of the current state of the filter 63 | plot_state(particles, landmarks, t, data.timestep(t).sensor, showGui); 64 | 65 | % Resample the particle set 66 | particles = resample(particles); 67 | end 68 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/drawellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWELLIPSE Draw ellipse. 2 | % DRAWELLIPSE(X,A,B,COLOR) draws an ellipse at X = [x y theta] 3 | % with half axes A and B. Theta is the inclination angle of A, 4 | % regardless if A is smaller or greater than B. COLOR is a 5 | % [r g b]-vector or a color string such as 'r' or 'g'. 6 | % 7 | % H = DRAWELLIPSE(...) returns the graphic handle H. 8 | % 9 | % See also DRAWPROBELLIPSE. 10 | 11 | % v.1.0-v.1.1, Aug.97-Jan.03, Kai Arras, ASL-EPFL 12 | % v.1.2, 03.12.03, Kai Arras, CAS-KTH: (x,a,b) interface 13 | 14 | 15 | function h = drawellipse(x,a,b,color); 16 | 17 | % Constants 18 | NPOINTS = 100; % point density or resolution 19 | 20 | % Compose point vector 21 | ivec = 0:2*pi/NPOINTS:2*pi; % index vector 22 | p(1,:) = a*cos(ivec); % 2 x n matrix which 23 | p(2,:) = b*sin(ivec); % hold ellipse points 24 | 25 | % Translate and rotate 26 | xo = x(1); yo = x(2); angle = x(3); 27 | R = [cos(angle) -sin(angle); sin(angle) cos(angle)]; 28 | T = [xo; yo]*ones(1,length(ivec)); 29 | p = R*p + T; 30 | 31 | % Plot 32 | h = plot(p(1,:),p(2,:),'Color',color, 'linewidth', 2); 33 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/drawprobellipse.m: -------------------------------------------------------------------------------- 1 | %DRAWPROBELLIPSE Draw elliptic probability region of a Gaussian in 2D. 2 | % DRAWPROBELLIPSE(X,C,ALPHA,COLOR) draws the elliptic iso-probabi- 3 | % lity contour of a Gaussian distributed bivariate random vector X 4 | % at the significance level ALPHA. The ellipse is centered at X = 5 | % [x; y] where C is the associated 2x2 covariance matrix. COLOR is 6 | % a [r g b]-vector or a color string such as 'r' or 'g'. 7 | % 8 | % X and C can also be of size 3x1 and 3x3 respectively. 9 | % 10 | % For proper scaling, the function CHI2INVTABLE is employed to 11 | % avoid the use of CHI2INV from the Matlab statistics toolbox. 12 | % 13 | % In case of a negative definite matrix C, the ellipse collapses 14 | % to a line which is drawn instead. 15 | % 16 | % H = DRAWPROBELLIPSE(...) returns the graphic handle H. 17 | % 18 | % See also DRAWELLIPSE, CHI2INVTABLE, CHI2INV. 19 | 20 | % v.1.0-v.1.3, 97-Jan.03, Kai Arras, ASL-EPFL 21 | % v.1.4, 03.12.03, Kai Arras, CAS-KTH: toolbox version 22 | 23 | 24 | function h = drawprobellipse(x,C,alpha,color); 25 | 26 | % Calculate unscaled half axes 27 | sxx = C(1,1); syy = C(2,2); sxy = C(1,2); 28 | a = sqrt(0.5*(sxx+syy+sqrt((sxx-syy)^2+4*sxy^2))); % always greater 29 | b = sqrt(0.5*(sxx+syy-sqrt((sxx-syy)^2+4*sxy^2))); % always smaller 30 | 31 | % Remove imaginary parts in case of neg. definite C 32 | if ~isreal(a), a = real(a); end; 33 | if ~isreal(b), b = real(b); end; 34 | 35 | % Scaling in order to reflect specified probability 36 | a = a*sqrt(chi2invtable(alpha,2)); 37 | b = b*sqrt(chi2invtable(alpha,2)); 38 | 39 | % Look where the greater half axis belongs to 40 | if sxx < syy, swap = a; a = b; b = swap; end; 41 | 42 | % Calculate inclination (numerically stable) 43 | if sxx ~= syy, 44 | angle = 0.5*atan(2*sxy/(sxx-syy)); 45 | elseif sxy == 0, 46 | angle = 0; % angle doesn't matter 47 | elseif sxy > 0, 48 | angle = pi/4; 49 | elseif sxy < 0, 50 | angle = -pi/4; 51 | end; 52 | x(3) = angle; 53 | 54 | % Draw ellipse 55 | h = drawellipse(x,a,b,color); -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/measurement_model.m: -------------------------------------------------------------------------------- 1 | % compute the expected measurement for a landmark 2 | % and the Jacobian with respect to the landmark 3 | function [h, H] = measurement_model(particle, z) 4 | 5 | % extract the id of the landmark 6 | landmarkId = z.id; 7 | % two 2D vector for the position (x,y) of the observed landmark 8 | landmarkPos = particle.landmarks(landmarkId).mu; 9 | 10 | % TODO: use the current state of the particle to predict the measurment 11 | landmarkX = landmarkPos(1); 12 | landmarkY = landmarkPos(2); 13 | expectedRange = sqrt((landmarkX - particle.pose(1))^2 + (landmarkY - particle.pose(2))^2); 14 | expectedBearing = normalize_angle(atan2(landmarkY-particle.pose(2), landmarkX-particle.pose(1)) - particle.pose(3)); 15 | h = [expectedRange; expectedBearing]; 16 | 17 | % TODO: Compute the Jacobian H of the measurement function h wrt the landmark location 18 | H = zeros(2,2); 19 | H(1,1) = (landmarkX - particle.pose(1))/expectedRange; 20 | H(1,2) = (landmarkY - particle.pose(2))/expectedRange; 21 | H(2,1) = (particle.pose(2) - landmarkY)/(expectedRange^2); 22 | H(2,2) = (landmarkX - particle.pose(1))/(expectedRange^2); 23 | 24 | end 25 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/normalize_angle.m: -------------------------------------------------------------------------------- 1 | function [phiNorm] = normalize_angle(phi) 2 | %Normalize phi to be between -pi and pi 3 | 4 | while(phi>pi) 5 | phi = phi - 2*pi; 6 | endwhile 7 | 8 | while(phi<-pi) 9 | phi = phi + 2*pi; 10 | endwhile 11 | phiNorm = phi; 12 | 13 | end 14 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/plot_state.m: -------------------------------------------------------------------------------- 1 | function plot_state(particles, landmarks, timestep, z, window) 2 | % Visualizes the state of the FastSLAM algorithm. 3 | % 4 | % The resulting plot displays the following information: 5 | % - map ground truth (black +'s) 6 | % - currently best particle (red) 7 | % - particle set in green 8 | % - current landmark pose estimates (blue) 9 | % - visualization of the observations made at this time step (line between robot and landmark) 10 | 11 | clf; 12 | hold on 13 | grid("on") 14 | graphics_toolkit gnuplot 15 | L = struct2cell(landmarks); 16 | plot(cell2mat(L(2,:)), cell2mat(L(3,:)), 'k+', 'markersize', 10, 'linewidth', 5); 17 | 18 | % Plot the particles 19 | ppos = [particles.pose]; 20 | plot(ppos(1,:), ppos(2,:), 'g.'); 21 | 22 | % determine the currently best particle 23 | [bestWeight, bestParticleIdx] = max([particles.weight]); 24 | 25 | % draw the landmark locations along with the ellipsoids 26 | for(i=1:length(particles(bestParticleIdx).landmarks)) 27 | if(particles(bestParticleIdx).landmarks(i).observed) 28 | l = particles(bestParticleIdx).landmarks(i).mu; 29 | plot(l(1), l(2), 'bo', 'markersize', 3); 30 | drawprobellipse(l, particles(bestParticleIdx).landmarks(i).sigma, 0.95, 'b'); 31 | end 32 | end 33 | 34 | % draw the observations 35 | for(i=1:size(z,2)) 36 | l = particles(bestParticleIdx).landmarks(z(i).id).mu; 37 | line([particles(bestParticleIdx).pose(1), l(1)],[particles(bestParticleIdx).pose(2), l(2)], 'color', 'k', 'linewidth', 1); 38 | end 39 | 40 | % draw the trajectory as estimated by the currently best particle 41 | trajectory = cell2mat(particles(bestParticleIdx).history); 42 | line(trajectory(1,:), trajectory(2, :), 'color', 'r', 'linewidth', 3); 43 | 44 | drawrobot(particles(bestParticleIdx).pose, 'r', 3, 0.3, 0.3); 45 | xlim([-2, 12]) 46 | ylim([-2, 12]) 47 | 48 | hold off 49 | 50 | % dump to a file or show the window 51 | if window 52 | figure(1, "visible", "on"); 53 | drawnow; 54 | pause(0.1); 55 | else 56 | figure(1, "visible", "off"); 57 | filename = sprintf('../plots/fastslam_%03d.png', timestep); 58 | print(filename, '-dpng'); 59 | end 60 | 61 | end 62 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/plot_state.m~: -------------------------------------------------------------------------------- 1 | function plot_state(particles, landmarks, timestep, z, window) 2 | % Visualizes the state of the FastSLAM algorithm. 3 | % 4 | % The resulting plot displays the following information: 5 | % - map ground truth (black +'s) 6 | % - currently best particle (red) 7 | % - particle set in green 8 | % - current landmark pose estimates (blue) 9 | % - visualization of the observations made at this time step (line between robot and landmark) 10 | 11 | clf; 12 | hold on 13 | grid("on") 14 | 15 | L = struct2cell(landmarks); 16 | plot(cell2mat(L(2,:)), cell2mat(L(3,:)), 'k+', 'markersize', 10, 'linewidth', 5); 17 | 18 | % Plot the particles 19 | ppos = [particles.pose]; 20 | plot(ppos(1,:), ppos(2,:), 'g.'); 21 | 22 | % determine the currently best particle 23 | [bestWeight, bestParticleIdx] = max([particles.weight]); 24 | 25 | % draw the landmark locations along with the ellipsoids 26 | for(i=1:length(particles(bestParticleIdx).landmarks)) 27 | if(particles(bestParticleIdx).landmarks(i).observed) 28 | l = particles(bestParticleIdx).landmarks(i).mu; 29 | plot(l(1), l(2), 'bo', 'markersize', 3); 30 | drawprobellipse(l, particles(bestParticleIdx).landmarks(i).sigma, 0.95, 'b'); 31 | end 32 | end 33 | 34 | % draw the observations 35 | for(i=1:size(z,2)) 36 | l = particles(bestParticleIdx).landmarks(z(i).id).mu; 37 | line([particles(bestParticleIdx).pose(1), l(1)],[particles(bestParticleIdx).pose(2), l(2)], 'color', 'k', 'linewidth', 1); 38 | end 39 | 40 | % draw the trajectory as estimated by the currently best particle 41 | trajectory = cell2mat(particles(bestParticleIdx).history); 42 | line(trajectory(1,:), trajectory(2, :), 'color', 'r', 'linewidth', 3); 43 | 44 | drawrobot(particles(bestParticleIdx).pose, 'r', 3, 0.3, 0.3); 45 | xlim([-2, 12]) 46 | ylim([-2, 12]) 47 | 48 | hold off 49 | 50 | % dump to a file or show the window 51 | if window 52 | figure(1, "visible", "on"); 53 | drawnow; 54 | pause(0.1); 55 | else 56 | figure(1, "visible", "off"); 57 | filename = sprintf('../plots/fastslam_%03d.png', timestep); 58 | print(filename, '-dpng'); 59 | end 60 | 61 | end 62 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/prediction_step.m: -------------------------------------------------------------------------------- 1 | function particles = prediction_step(particles, u, noise) 2 | % Updates the particles by drawing from the motion model 3 | % Use u.r1, u.t, and u.r2 to access the rotation and translation values 4 | % which have to be pertubated with Gaussian noise. 5 | % The position of the i-th particle is given by the 3D vector 6 | % particles(i).pose which represents (x, y, theta). 7 | 8 | % noise parameters 9 | % Assume Gaussian noise in each of the three parameters of the motion model. 10 | % These three parameters may be used as standard deviations for sampling. 11 | r1Noise = noise(1); 12 | transNoise = noise(2); 13 | r2Noise = noise(3); 14 | 15 | numParticles = length(particles); 16 | 17 | for i = 1:numParticles 18 | 19 | % append the old position to the history of the particle 20 | particles(i).history{end+1} = particles(i).pose; 21 | 22 | % sample a new pose for the particle 23 | r1 = normrnd(u.r1, r1Noise); 24 | r2 = normrnd(u.r2, r2Noise); 25 | trans = normrnd(u.t, transNoise); 26 | particles(i).pose(1) = particles(i).pose(1) + trans*cos(particles(i).pose(3) + r1); 27 | particles(i).pose(2) = particles(i).pose(2) + trans*sin(particles(i).pose(3) + r1); 28 | particles(i).pose(3) = normalize_angle(particles(i).pose(3) + r1 + r2); 29 | end 30 | 31 | end 32 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/read_data.m: -------------------------------------------------------------------------------- 1 | function data = read_data(filename) 2 | % Reads the odometry and sensor readings from a file. 3 | % 4 | % filename: path to the file to parse 5 | % data: structure containing the parsed information 6 | % 7 | % The data is returned in a structure where the u_t and z_t are stored 8 | % within a single entry. A z_t can contain observations of multiple 9 | % landmarks. 10 | % 11 | % Usage: 12 | % - access the readings for timestep i: 13 | % data.timestep(i) 14 | % this returns a structure containing the odometry reading and all 15 | % landmark obsevations, which can be accessed as follows 16 | % - odometry reading at timestep i: 17 | % data.timestep(i).odometry 18 | % - senor reading at timestep i: 19 | % data.timestep(i).sensor 20 | % 21 | % Odometry readings have the following fields: 22 | % - r1 : rotation 1 23 | % - t : translation 24 | % - r2 : rotation 2 25 | % which correspond to the identically labeled variables in the motion 26 | % mode. 27 | % 28 | % Sensor readings can again be indexed and each of the entris has the 29 | % following fields: 30 | % - id : id of the observed landmark 31 | % - range : measured range to the landmark 32 | % - bearing : measured angle to the landmark (you can ignore this) 33 | % 34 | % Examples: 35 | % - Translational component of the odometry reading at timestep 10 36 | % data.timestep(10).odometry.t 37 | % - Measured range to the second landmark observed at timestep 4 38 | % data.timestep(4).sensor(2).range 39 | input = fopen(filename); 40 | 41 | data = struct; 42 | data.timestep.sensor = struct; 43 | first = 1; 44 | 45 | odom = struct; 46 | sensor = struct; 47 | 48 | while(!feof(input)) 49 | line = fgetl(input); 50 | arr = strsplit(line, ' '); 51 | type = deblank(arr{1}); 52 | 53 | if(strcmp(type, 'ODOMETRY') == 1) 54 | if(first == 0) 55 | data.timestep(end+1).odometry = odom; 56 | data.timestep(end).sensor = sensor(2:end); 57 | odom = struct; 58 | sensor = struct; 59 | endif 60 | first = 0; 61 | odom.r1 = str2double(arr{2}); 62 | odom.t = str2double(arr{3}); 63 | odom.r2 = str2double(arr{4}); 64 | elseif(strcmp(type, 'SENSOR') == 1) 65 | reading = struct; 66 | reading.id = str2double(arr{2}); 67 | reading.range = str2double(arr{3}); 68 | reading.bearing = str2double(arr{4}); 69 | sensor(end+1) = reading; 70 | endif 71 | endwhile 72 | 73 | data.timestep = data.timestep(2:end); 74 | 75 | fclose(input); 76 | end 77 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/read_world.m: -------------------------------------------------------------------------------- 1 | function landmarks = read_world(filename) 2 | % Reads the world definition and returns a structure of landmarks. 3 | % 4 | % filename: path of the file to load 5 | % landmarks: structure containing the parsed information 6 | % 7 | % Each landmark contains the following information: 8 | % - id : id of the landmark 9 | % - x : x-coordinate 10 | % - y : y-coordinate 11 | % 12 | % Examples: 13 | % - Obtain x-coordinate of the 5-th landmark 14 | % landmarks(5).x 15 | input = fopen(filename); 16 | 17 | landmarks = struct; 18 | 19 | while(!feof(input)) 20 | line = fgetl(input); 21 | data = strsplit(line, ' '); 22 | 23 | landmark = struct( 24 | "id", str2double(data{1}), 25 | "x" , str2double(data{2}), 26 | "y" , str2double(data{3}) 27 | ); 28 | landmarks(end+1) = landmark; 29 | endwhile 30 | 31 | landmarks = landmarks(2:end); 32 | 33 | fclose(input); 34 | end 35 | -------------------------------------------------------------------------------- /6_FastSLAM/octave/tools/resample.m: -------------------------------------------------------------------------------- 1 | % resample the set of particles. 2 | % A particle has a probability proportional to its weight to get 3 | % selected. A good option for such a resampling method is the so-called low 4 | % variance sampling, Probabilistic Robotics pg. 109 5 | function newParticles = resample(particles) 6 | 7 | numParticles = length(particles); 8 | 9 | w = [particles.weight]; 10 | 11 | % normalize the weight 12 | w = w / sum(w); 13 | 14 | % consider number of effective particles, to decide whether to resample or not 15 | useNeff = false; 16 | %useNeff = true; 17 | if useNeff 18 | neff = 1. / sum(w.^2); 19 | neff 20 | if neff > 0.5*numParticles 21 | newParticles = particles; 22 | for i = 1:numParticles 23 | newParticles(i).weight = w(i); 24 | end 25 | return; 26 | end 27 | end 28 | 29 | newParticles = struct; 30 | 31 | % TODO: implement the low variance re-sampling 32 | 33 | % the cummulative sum 34 | cs = cumsum(w); 35 | weightSum = cs(length(cs)); 36 | 37 | % initialize the step and the current position on the roulette wheel 38 | step = weightSum / numParticles; 39 | position = unifrnd(0, weightSum); 40 | idx = 1; 41 | 42 | % walk along the wheel to select the particles 43 | for i = 1:numParticles 44 | position += step; 45 | if (position > weightSum) 46 | position -= weightSum; 47 | idx = 1; 48 | end 49 | while (position > cs(idx)) 50 | idx++; 51 | end 52 | newParticles(i) = particles(idx); 53 | newParticles(i).weight = 1/numParticles; 54 | end 55 | 56 | end 57 | -------------------------------------------------------------------------------- /6_FastSLAM/plots/fastslam.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/6_FastSLAM/plots/fastslam.mp4 -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/LSCalibrateOdometry.m: -------------------------------------------------------------------------------- 1 | more off; 2 | clear all; 3 | close all; 4 | 5 | % add tools directory 6 | addpath('tools') 7 | 8 | % load the odometry measurements 9 | load ../data/odom_motions 10 | 11 | % the motions as they are estimated by scan-matching 12 | load ../data/scanmatched_motions 13 | 14 | % create our measurements vector z 15 | z = [scanmatched_motions odom_motions]; 16 | 17 | % perform the calibration 18 | X = ls_calibrate_odometry(z); 19 | disp('calibration result'); disp(X); 20 | 21 | % apply the estimated calibration parameters 22 | calibrated_motions = apply_odometry_correction(X, odom_motions); 23 | 24 | % compute the current odometry trajectory, the scanmatch result, and the calibrated odom 25 | odom_trajectory = compute_trajectory(odom_motions); 26 | scanmatch_trajectory = compute_trajectory(scanmatched_motions); 27 | calibrated_trajectory = compute_trajectory(calibrated_motions); 28 | 29 | % plot the trajectories 30 | plot( 31 | odom_trajectory(:,1), odom_trajectory(:,2), ";Uncalibrated Odometry;", 32 | scanmatch_trajectory(:,1), scanmatch_trajectory(:,2), ";Scan-Matching;", 33 | calibrated_trajectory(:,1), calibrated_trajectory(:,2), ";Calibrated Odometry;"); 34 | print -dpng "../plots/odometry-calibration.png" 35 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/LSCalibrateOdometry.m~: -------------------------------------------------------------------------------- 1 | more off; 2 | clear all; 3 | close all; 4 | 5 | % add tools directory 6 | addpath('tools') 7 | 8 | % load the odometry measurements 9 | load ../data/odom_motions 10 | 11 | % the motions as they are estimated by scan-matching 12 | load ../data/scanmatched_motions 13 | 14 | % create our measurements vector z 15 | z = [scanmatched_motions odom_motions]; 16 | 17 | % perform the calibration 18 | X = ls_calibrate_odometry(z); 19 | disp('calibration result'); disp(X); 20 | 21 | % apply the estimated calibration parameters 22 | calibrated_motions = apply_odometry_correction(X, odom_motions); 23 | 24 | % compute the current odometry trajectory, the scanmatch result, and the calibrated odom 25 | odom_trajectory = compute_trajectory(odom_motions); 26 | scanmatch_trajectory = compute_trajectory(scanmatched_motions); 27 | calibrated_trajectory = compute_trajectory(calibrated_motions); 28 | 29 | % plot the trajectories 30 | plot( 31 | odom_trajectory(:,1), odom_trajectory(:,3), ";Uncalibrated Odometry;", 32 | scanmatch_trajectory(:,1), scanmatch_trajectory(:,3), ";Scan-Matching;", 33 | calibrated_trajectory(:,1), calibrated_trajectory(:,3), ";Calibrated Odometry;"); 34 | print -dpng "../plots/odometry-calibration.png" 35 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/apply_odometry_correction.m: -------------------------------------------------------------------------------- 1 | % computes a calibrated vector of odometry measurements 2 | % by applying the bias term to each line of the measurements 3 | % X: 3x3 matrix obtained by the calibration process 4 | % U: Nx3 matrix containing the odometry measurements 5 | % C: Nx3 matrix containing the corrected odometry measurements 6 | 7 | function C = apply_odometry_correction(X, U) 8 | % TODO: compute the calibrated motion vector, try to vectorize 9 | C = (X*U')'; 10 | end 11 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/apply_odometry_correction.m~: -------------------------------------------------------------------------------- 1 | % computes a calibrated vector of odometry measurements 2 | % by applying the bias term to each line of the measurements 3 | % X: 3x3 matrix obtained by the calibration process 4 | % U: Nx3 matrix containing the odometry measurements 5 | % C: Nx3 matrix containing the corrected odometry measurements 6 | 7 | function C = apply_odometry_correction(X, U) 8 | % TODO: compute the calibrated motion vector, try to vectorize 9 | 10 | end 11 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/compute_trajectory.m: -------------------------------------------------------------------------------- 1 | % computes the trajectory of the robot by chaining up 2 | % the incremental movements of the odometry vector 3 | % U: a Nx3 matrix, each row contains the odoemtry ux, uy utheta 4 | % T: a (N+1)x3 matrix, each row contains the robot position (starting from 0,0,0) 5 | function T = compute_trajectory(U) 6 | % initialize the trajectory matrix 7 | T = zeros(size(U,1)+1, 3); 8 | % store the first pose in the result 9 | T(1, :) = zeros(1,3); 10 | % the current pose in the chain 11 | currentPose = v2t(T(1, :)); 12 | 13 | % TODO: compute the result of chaining up the odometry deltas 14 | % Note that U(i) results in T(i+1). 15 | % T(i+1) can be computed by calling t2v(currentPose) 16 | % after computing the current pose of the robot 17 | for i = 1:size(U,1) 18 | currentPose = currentPose*v2t(U(i,:)); 19 | T(i+1,:) = t2v(currentPose); 20 | endfor 21 | end 22 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/compute_trajectory.m~: -------------------------------------------------------------------------------- 1 | % computes the trajectory of the robot by chaining up 2 | % the incremental movements of the odometry vector 3 | % U: a Nx3 matrix, each row contains the odoemtry ux, uy utheta 4 | % T: a (N+1)x3 matrix, each row contains the robot position (starting from 0,0,0) 5 | function T = compute_trajectory(U) 6 | % initialize the trajectory matrix 7 | T = zeros(size(U,1)+1, 3); 8 | % store the first pose in the result 9 | T(1, :) = zeros(1,3); 10 | % the current pose in the chain 11 | currentPose = v2t(T(1, :)); 12 | 13 | % TODO: compute the result of chaining up the odometry deltas 14 | % Note that U(i) results in T(i+1). 15 | % T(i+1) can be computed by calling t2v(currentPose) 16 | % after computing the current pose of the robot 17 | for i = 1:size(U,1) 18 | currentPose = currentPose*v2t(U(i-1,:)); 19 | T(i+1,:) = t2v(currentPose); 20 | endfor 21 | end 22 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/ls_calibrate_odometry.m: -------------------------------------------------------------------------------- 1 | % this function solves the odometry calibration problem 2 | % given a measurement matrix Z. 3 | % We assume that the information matrix is the identity 4 | % for each of the measurements 5 | % Every row of the matrix contains 6 | % z_i = [u'x, u'y, u'theta, ux, uy, ytheta] 7 | % Z: The measurement matrix 8 | % X: the calibration matrix 9 | % returns the correction matrix X 10 | function X = ls_calibrate_odometry(Z) 11 | % initial solution (the identity transformation) 12 | X = eye(3); 13 | 14 | % TODO: initialize H and b of the linear system 15 | H = zeros(9); 16 | b = zeros(9,1); 17 | omega = eye(3); 18 | 19 | % TODO: loop through the measurements and update H and b 20 | % You may call the functions error_function and jacobian, see below 21 | % We assume that the information matrix is the identity. 22 | for i=1:size(Z,1) 23 | H = H + jacobian(i,Z)'*omega*jacobian(i,Z); 24 | b = b + jacobian(i,Z)'*omega'*error_function(i,X,Z); 25 | endfor 26 | % TODO: solve and update the solution 27 | delX = -H\b; 28 | X = X + [delX(1:3)';delX(4:6)';delX(7:9)']; 29 | end 30 | 31 | % this function computes the error of the i^th measurement in Z 32 | % given the calibration parameters 33 | % i: the number of the measurement 34 | % X: the actual calibration parameters 35 | % Z: the measurement matrix, each row contains first the scan-match result 36 | % and then the motion reported by odometry 37 | % e: the error of the ith measurement 38 | function e = error_function(i, X, Z) 39 | % TODO compute the error of each measurement 40 | e = Z(i,1:3)' - X*Z(i,4:6)'; 41 | end 42 | 43 | % derivative of the error function for the ith measurement in Z 44 | % i: the measurement number 45 | % Z: the measurement matrix 46 | % J: the jacobian of the ith measurement 47 | function J = jacobian(i, Z) 48 | % TODO compute the Jacobian 49 | J = -[Z(i,4) Z(i,5) Z(i,6) zeros(1,6); zeros(1,3) Z(i,4) Z(i,5) Z(i,6) zeros(1,3); zeros(1,6) Z(i,4) Z(i,5) Z(i,6)]; 50 | end 51 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/ls_calibrate_odometry.m~: -------------------------------------------------------------------------------- 1 | % this function solves the odometry calibration problem 2 | % given a measurement matrix Z. 3 | % We assume that the information matrix is the identity 4 | % for each of the measurements 5 | % Every row of the matrix contains 6 | % z_i = [u'x, u'y, u'theta, ux, uy, ytheta] 7 | % Z: The measurement matrix 8 | % X: the calibration matrix 9 | % returns the correction matrix X 10 | function X = ls_calibrate_odometry(Z) 11 | % initial solution (the identity transformation) 12 | X = eye(3); 13 | 14 | % TODO: initialize H and b of the linear system 15 | H = zeros(9); 16 | b = zeros(9,1); 17 | omega = eye(3); 18 | 19 | % TODO: loop through the measurements and update H and b 20 | % You may call the functions error_function and jacobian, see below 21 | % We assume that the information matrix is the identity. 22 | for i=1:size(Z,1) 23 | H = H + jacobian(i,Z)'*omega*jacobian(i,Z); 24 | b = b + jacobian(i,Z)'*omega'*error_function(i,X,Z); 25 | endfor 26 | % TODO: solve and update the solution 27 | delX = -H\b; 28 | X = X + [delX(1:3)';delX(4:6)';delX(7:9)']; 29 | end 30 | 31 | % this function computes the error of the i^th measurement in Z 32 | % given the calibration parameters 33 | % i: the number of the measurement 34 | % X: the actual calibration parameters 35 | % Z: the measurement matrix, each row contains first the scan-match result 36 | % and then the motion reported by odometry 37 | % e: the error of the ith measurement 38 | function e = error_function(i, X, Z) 39 | % TODO compute the error of each measurement 40 | e = Z(i,1:3)' - X*Z(i,4:6)'; 41 | end 42 | 43 | % derivative of the error function for the ith measurement in Z 44 | % i: the measurement number 45 | % Z: the measurement matrix 46 | % J: the jacobian of the ith measurement 47 | function J = jacobian(i, Z) 48 | % TODO compute the Jacobian 49 | J = -[Z(i,4) Z(i,5) Z(i,6) zeros(1,6); zeros(1,3) Z(i,4) Z(i,5) Z(i,6) zeros(1,3); zeros(1,6) Z(i,4) Z(i,5) Z(i,6)]; 50 | end 51 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/octave-workspace: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/7_Odom_Calib_LeastSquares/octave/octave-workspace -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/tools/normalize_angle.m: -------------------------------------------------------------------------------- 1 | function [phi] = normalize_angle(phi) 2 | % Normalize phi to be between -pi and pi 3 | % phi can also be a vector of angles 4 | 5 | phi = mod(phi + pi, 2*pi) - pi; 6 | 7 | end 8 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/tools/t2v.m: -------------------------------------------------------------------------------- 1 | #computes the pose vector v from an homogeneous transform A 2 | function v=t2v(A) 3 | v = [A(1:2,3); atan2(A(2,1),A(1,1))]; 4 | end 5 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/octave/tools/v2t.m: -------------------------------------------------------------------------------- 1 | #computes the homogeneous transform matrix A of the pose vector v 2 | function A=v2t(v) 3 | c=cos(v(3)); 4 | s=sin(v(3)); 5 | A=[c, -s, v(1); 6 | s, c, v(2); 7 | 0 0 1 ]; 8 | end 9 | -------------------------------------------------------------------------------- /7_Odom_Calib_LeastSquares/plots/odometry-calibration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/7_Odom_Calib_LeastSquares/plots/odometry-calibration.png -------------------------------------------------------------------------------- /8_GraphSLAM/data/dlr.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/data/dlr.dat -------------------------------------------------------------------------------- /8_GraphSLAM/data/intel.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/data/intel.dat -------------------------------------------------------------------------------- /8_GraphSLAM/data/simulation-pose-landmark.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/data/simulation-pose-landmark.dat -------------------------------------------------------------------------------- /8_GraphSLAM/data/simulation-pose-pose.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/data/simulation-pose-pose.dat -------------------------------------------------------------------------------- /8_GraphSLAM/octave/compute_global_error.m: -------------------------------------------------------------------------------- 1 | % Computes the total error of the graph 2 | function Fx = compute_global_error(g) 3 | 4 | Fx = 0; 5 | 6 | % Loop over all edges 7 | for eid = 1:length(g.edges) 8 | edge = g.edges(eid); 9 | 10 | % pose-pose constraint 11 | if (strcmp(edge.type, 'P') != 0) 12 | 13 | x1 = v2t(g.x(edge.fromIdx:edge.fromIdx+2)); % the first robot pose 14 | x2 = v2t(g.x(edge.toIdx:edge.toIdx+2)); % the second robot pose 15 | 16 | %TODO compute the error of the constraint and add it to Fx. 17 | % Use edge.measurement and edge.information to access the 18 | % measurement and the information matrix respectively. 19 | % del = [x2(1)-x1(1); x2(2)-x1(2)]; 20 | % q = del'*del; 21 | % zCap = [sqrt(q);normalize_angle(atan2(del(2),del(1))-x1(3))]; 22 | % err_tmp = edge.measurement - zCap; 23 | err_tmp = t2v(v2t(edge.measurement)\(x1\x2)); 24 | ei = err_tmp'*edge.information*err_tmp; 25 | Fx = Fx + ei; 26 | 27 | % pose-landmark constraint 28 | elseif (strcmp(edge.type, 'L') != 0) 29 | x = g.x(edge.fromIdx:edge.fromIdx+2); % the robot pose 30 | l = g.x(edge.toIdx:edge.toIdx+1); % the landmark 31 | 32 | %TODO compute the error of the constraint and add it to Fx. 33 | % Use edge.measurement and edge.information to access the 34 | % measurement and the information matrix respectively. 35 | % del = [l(1)-x(1);l(2)-x(2)]; 36 | % q = del'*del; 37 | % zCap = [sqrt(q);normalize_angle(atan2(del(2),del(1))-x(3))]; 38 | % zCap = [l(1)-x(1);l(2)-x(2)]; 39 | % zCap 40 | % edge.measurement 41 | Z = v2t([edge.measurement;0]); 42 | X = v2t(x); 43 | L = v2t([l;0]); 44 | err_tmp = t2v(Z\(X\L)); 45 | err_tmp = err_tmp(1:2); 46 | % err_tmp = edge.measurement - zCap; 47 | ei = err_tmp'*edge.information*err_tmp; 48 | Fx = Fx + ei; 49 | 50 | end 51 | 52 | end 53 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/compute_global_error.m~: -------------------------------------------------------------------------------- 1 | % Computes the total error of the graph 2 | function Fx = compute_global_error(g) 3 | 4 | Fx = 0; 5 | 6 | % Loop over all edges 7 | for eid = 1:length(g.edges) 8 | edge = g.edges(eid); 9 | 10 | % pose-pose constraint 11 | if (strcmp(edge.type, 'P') != 0) 12 | 13 | x1 = v2t(g.x(edge.fromIdx:edge.fromIdx+2)); % the first robot pose 14 | x2 = v2t(g.x(edge.toIdx:edge.toIdx+2)); % the second robot pose 15 | 16 | %TODO compute the error of the constraint and add it to Fx. 17 | % Use edge.measurement and edge.information to access the 18 | % measurement and the information matrix respectively. 19 | % del = [x2(1)-x1(1); x2(2)-x1(2)]; 20 | % q = del'*del; 21 | % zCap = [sqrt(q);normalize_angle(atan2(del(2),del(1))-x1(3))]; 22 | % err_tmp = edge.measurement - zCap; 23 | err_tmp = t2v(v2t(edge.measurement)\(x1\x2)); 24 | ei = err_tmp'*edge.information*err_tmp; 25 | Fx = Fx + ei; 26 | 27 | % pose-landmark constraint 28 | elseif (strcmp(edge.type, 'L') != 0) 29 | x = g.x(edge.fromIdx:edge.fromIdx+2); % the robot pose 30 | l = g.x(edge.toIdx:edge.toIdx+1); % the landmark 31 | 32 | %TODO compute the error of the constraint and add it to Fx. 33 | % Use edge.measurement and edge.information to access the 34 | % measurement and the information matrix respectively. 35 | % del = [l(1)-x(1);l(2)-x(2)]; 36 | % q = del'*del; 37 | % zCap = [sqrt(q);normalize_angle(atan2(del(2),del(1))-x(3))]; 38 | % zCap = [l(1)-x(1);l(2)-x(2)]; 39 | % zCap 40 | % edge.measurement 41 | Z = v2t([edge.measurement;1]); 42 | X = v2t(x); 43 | L = v2t([l;1]); 44 | err_tmp = t2v(Z\(X\L)); 45 | err_tmp = err_tmp(1:2); 46 | % err_tmp = edge.measurement - zCap; 47 | ei = err_tmp'*edge.information*err_tmp; 48 | Fx = Fx + ei; 49 | 50 | end 51 | 52 | end 53 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/linearize_and_solve.m: -------------------------------------------------------------------------------- 1 | % performs one iteration of the Gauss-Newton algorithm 2 | % each constraint is linearized and added to the Hessian 3 | 4 | function dx = linearize_and_solve(g) 5 | 6 | % number of non-zero elements in graph 7 | nnz = nnz_of_graph(g); 8 | 9 | % allocate the sparse H and the vector b 10 | H = spalloc(length(g.x), length(g.x), nnz); 11 | b = zeros(length(g.x), 1); 12 | 13 | needToAddPrior = true; 14 | 15 | % compute the addend term to H and b for each of our constraints 16 | disp('linearize and build system'); 17 | for eid = 1:length(g.edges) 18 | edge = g.edges(eid); 19 | 20 | % pose-pose constraint 21 | if (strcmp(edge.type, 'P') != 0) 22 | % edge.fromIdx and edge.toIdx describe the location of 23 | % the first element of the pose in the state vector 24 | % You should use also this index when updating the elements 25 | % of the H matrix and the vector b. 26 | % edge.measurement is the measurement 27 | % edge.information is the information matrix 28 | x1 = g.x(edge.fromIdx:edge.fromIdx+2); % the first robot pose 29 | x2 = g.x(edge.toIdx:edge.toIdx+2); % the second robot pose 30 | 31 | % Computing the error and the Jacobians 32 | % e the error vector 33 | % A Jacobian wrt x1 34 | % B Jacobian wrt x2 35 | [e, A, B] = linearize_pose_pose_constraint(x1, x2, edge.measurement); 36 | 37 | 38 | % TODO: compute and add the term to H and b 39 | i = edge.fromIdx:edge.fromIdx+2; 40 | j = edge.toIdx:edge.toIdx+2; 41 | omega = edge.information; 42 | b(i) = b(i) + (e'*omega*A)'; 43 | b(j) = b(j) + (e'*omega*B)'; 44 | H(i,i) = H(i,i) + A'*omega*A; 45 | H(i,j) = H(i,j) + A'*omega*B; 46 | H(j,i) = H(j,i) + B'*omega*A; 47 | H(j,j) = H(j,j) + B'*omega*B; 48 | 49 | if (needToAddPrior) 50 | % TODO: add the prior for one pose of this edge 51 | % This fixes one node to remain at its current location 52 | % prior = zeros(size(H)); 53 | % prior(1,1) = 1; 54 | H(1,1) = H(1,1) + 1; 55 | H(2,2) = H(2,2) + 1; 56 | H(3,3) = H(3,3) + 1; 57 | needToAddPrior = false; 58 | end 59 | 60 | % pose-landmark constraint 61 | elseif (strcmp(edge.type, 'L') != 0) 62 | % edge.fromIdx and edge.toIdx describe the location of 63 | % the first element of the pose and the landmark in the state vector 64 | % You should use also this index when updating the elements 65 | % of the H matrix and the vector b. 66 | % edge.measurement is the measurement 67 | % edge.information is the information matrix 68 | x1 = g.x(edge.fromIdx:edge.fromIdx+2); % the robot pose 69 | x2 = g.x(edge.toIdx:edge.toIdx+1); % the landmark 70 | 71 | % Computing the error and the Jacobians 72 | % e the error vector 73 | % A Jacobian wrt x1 74 | % B Jacobian wrt x2 75 | [e, A, B] = linearize_pose_landmark_constraint(x1, x2, edge.measurement); 76 | 77 | 78 | % TODO: compute and add the term to H and b 79 | i = edge.fromIdx:edge.fromIdx+2; 80 | j = edge.toIdx:edge.toIdx+1; 81 | omega = edge.information; 82 | b(i) = b(i) + (e'*omega*A)'; 83 | b(j) = b(j) + (e'*omega*B)'; 84 | H(i,i) = H(i,i) + A'*omega*A; 85 | H(i,j) = H(i,j) + A'*omega*B; 86 | H(j,i) = H(j,i) + B'*omega*A; 87 | H(j,j) = H(j,j) + B'*omega*B; 88 | 89 | end 90 | 91 | end 92 | 93 | disp('solving system'); 94 | 95 | % TODO: solve the linear system, whereas the solution should be stored in dx 96 | % Remember to use the backslash operator instead of inverting H 97 | dx = -H\b; 98 | 99 | end 100 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/linearize_and_solve.m~: -------------------------------------------------------------------------------- 1 | % performs one iteration of the Gauss-Newton algorithm 2 | % each constraint is linearized and added to the Hessian 3 | 4 | function dx = linearize_and_solve(g) 5 | 6 | % number of non-zero elements in graph 7 | nnz = nnz_of_graph(g); 8 | 9 | % allocate the sparse H and the vector b 10 | H = spalloc(length(g.x), length(g.x), nnz); 11 | b = zeros(length(g.x), 1); 12 | 13 | needToAddPrior = true; 14 | 15 | % compute the addend term to H and b for each of our constraints 16 | disp('linearize and build system'); 17 | for eid = 1:length(g.edges) 18 | edge = g.edges(eid); 19 | 20 | % pose-pose constraint 21 | if (strcmp(edge.type, 'P') != 0) 22 | % edge.fromIdx and edge.toIdx describe the location of 23 | % the first element of the pose in the state vector 24 | % You should use also this index when updating the elements 25 | % of the H matrix and the vector b. 26 | % edge.measurement is the measurement 27 | % edge.information is the information matrix 28 | x1 = g.x(edge.fromIdx:edge.fromIdx+2); % the first robot pose 29 | x2 = g.x(edge.toIdx:edge.toIdx+2); % the second robot pose 30 | 31 | % Computing the error and the Jacobians 32 | % e the error vector 33 | % A Jacobian wrt x1 34 | % B Jacobian wrt x2 35 | [e, A, B] = linearize_pose_pose_constraint(x1, x2, edge.measurement); 36 | 37 | 38 | % TODO: compute and add the term to H and b 39 | i = edge.fromIdx:edge.fromIdx+2; 40 | j = edge.toIdx:edge.toIdx+2; 41 | omega = edge.information; 42 | b(i) = b(i) + (e'*omega*A)'; 43 | b(j) = b(j) + (e'*omega*B)'; 44 | H(i,i) = H(i,i) + A'*omega*A; 45 | H(i,j) = H(i,j) + A'*omega*B; 46 | H(j,i) = H(j,i) + B'*omega*A; 47 | H(j,j) = H(j,j) + B'*omega*B; 48 | 49 | if (needToAddPrior) 50 | % TODO: add the prior for one pose of this edge 51 | % This fixes one node to remain at its current location 52 | % prior = zeros(size(H)); 53 | % prior(1,1) = 1; 54 | H(1,1) = H(1,1) + 1; 55 | H(2,2) = H(2,2) + 1; 56 | H(3,3) = H(3,3) + 1; 57 | needToAddPrior = false; 58 | end 59 | 60 | % pose-landmark constraint 61 | elseif (strcmp(edge.type, 'L') != 0) 62 | % edge.fromIdx and edge.toIdx describe the location of 63 | % the first element of the pose and the landmark in the state vector 64 | % You should use also this index when updating the elements 65 | % of the H matrix and the vector b. 66 | % edge.measurement is the measurement 67 | % edge.information is the information matrix 68 | x1 = g.x(edge.fromIdx:edge.fromIdx+2); % the robot pose 69 | x2 = g.x(edge.toIdx:edge.toIdx+1); % the landmark 70 | 71 | % Computing the error and the Jacobians 72 | % e the error vector 73 | % A Jacobian wrt x1 74 | % B Jacobian wrt x2 75 | [e, A, B] = linearize_pose_landmark_constraint(x1, x2, edge.measurement); 76 | 77 | 78 | % TODO: compute and add the term to H and b 79 | i = edge.fromIdx:edge.fromIdx+2; 80 | j = edge.toIdx:edge.toIdx+1; 81 | omega = edge.information; 82 | b(i) = b(i) + (e'*omega*A)'; 83 | b(j) = b(j) + (e'*omega*B)'; 84 | H(i,i) = H(i,i) + A'*omega*A; 85 | H(i,j) = H(i,j) + A'*omega*B; 86 | H(j,i) = H(j,i) + B'*omega*A; 87 | H(j,j) = H(j,j) + B'*omega*B; 88 | 89 | end 90 | if (needToAddPrior) 91 | % TODO: add the prior for one pose of this edge 92 | % This fixes one node to remain at its current location 93 | % prior = zeros(size(H)); 94 | % prior(1,1) = 1; 95 | H(1,1) = H(1,1) + 1; 96 | H(2,2) = H(2,2) + 1; 97 | H(3,3) = H(3,3) + 1; 98 | needToAddPrior = false; 99 | end 100 | end 101 | 102 | disp('solving system'); 103 | 104 | % TODO: solve the linear system, whereas the solution should be stored in dx 105 | % Remember to use the backslash operator instead of inverting H 106 | dx = -H\b; 107 | 108 | end 109 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/linearize_pose_landmark_constraint.m: -------------------------------------------------------------------------------- 1 | % Compute the error of a pose-landmark constraint 2 | % x 3x1 vector (x,y,theta) of the robot pose 3 | % l 2x1 vector (x,y) of the landmark 4 | % z 2x1 vector (x,y) of the measurement, the position of the landmark in 5 | % the coordinate frame of the robot given by the vector x 6 | % 7 | % Output 8 | % e 2x1 error of the constraint 9 | % A 2x3 Jacobian wrt x 10 | % B 2x2 Jacobian wrt l 11 | function [e, A, B] = linearize_pose_landmark_constraint(x, l, z) 12 | 13 | % TODO compute the error and the Jacobians of the error 14 | X = v2t(x); 15 | 16 | Ri = X(1:2,1:2); 17 | 18 | e = Ri'*(l-x(1:2))-z; 19 | 20 | thi = atan2(Ri(2,1),Ri(1,1)); 21 | 22 | xl = l(1); yl = l(2); 23 | xi = x(1); yi = x(2); 24 | 25 | A = [-cos(thi) -sin(thi) -sin(thi)*(xl-xi) + cos(thi)*(yl-yi); sin(thi) -cos(thi) -cos(thi)*(xl-xi)-sin(thi)*(yl-yi)]; 26 | B = [cos(thi) sin(thi); -sin(thi) cos(thi)]; 27 | end; 28 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/linearize_pose_landmark_constraint.m~: -------------------------------------------------------------------------------- 1 | % Compute the error of a pose-landmark constraint 2 | % x 3x1 vector (x,y,theta) of the robot pose 3 | % l 2x1 vector (x,y) of the landmark 4 | % z 2x1 vector (x,y) of the measurement, the position of the landmark in 5 | % the coordinate frame of the robot given by the vector x 6 | % 7 | % Output 8 | % e 2x1 error of the constraint 9 | % A 2x3 Jacobian wrt x 10 | % B 2x2 Jacobian wrt l 11 | function [e, A, B] = linearize_pose_landmark_constraint(x, l, z) 12 | 13 | % TODO compute the error and the Jacobians of the error 14 | X = v2t(x); 15 | 16 | Ri = X(1:2,1:2) 17 | 18 | e = Ri'*(l-x(1:2))-z; 19 | 20 | thi = atan2(Ri(2,1),Ri(1,1)); 21 | 22 | xl = l(1); yl = l(2); 23 | xi = x(1); yi = x(2); 24 | 25 | A = [-cos(thi) -sin(thi) -sin(thi)*(xl-xi) + cos(thi)*(yl-yi); sin(thi) -cos(thi) -cos(thi)*(xl-xi)-sin(thi)*(yl-yi)]; 26 | B = [cos(thi) sin(thi); -sin(thi) cos(thi)]; 27 | end; 28 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/linearize_pose_pose_constraint.m: -------------------------------------------------------------------------------- 1 | % Compute the error of a pose-pose constraint 2 | % x1 3x1 vector (x,y,theta) of the first robot pose 3 | % x2 3x1 vector (x,y,theta) of the second robot pose 4 | % z 3x1 vector (x,y,theta) of the measurement 5 | % 6 | % You may use the functions v2t() and t2v() to compute 7 | % a Homogeneous matrix out of a (x, y, theta) vector 8 | % for computing the error. 9 | % 10 | % Output 11 | % e 3x1 error of the constraint 12 | % A 3x3 Jacobian wrt x1 13 | % B 3x3 Jacobian wrt x2 14 | function [e, A, B] = linearize_pose_pose_constraint(x1, x2, z) 15 | 16 | % TODO compute the error and the Jacobians of the error 17 | X1 = v2t(x1); 18 | X2 = v2t(x2); 19 | Z = v2t(z); 20 | e = t2v(Z\(X1\X2)); 21 | 22 | Rij = Z(1:3,1:3); 23 | Ri = X1(1:3,1:3); 24 | 25 | thi = atan2(Ri(2,1),Ri(1,1)); 26 | thij = atan2(Rij(2,1),Rij(1,1)); 27 | 28 | xi = x1(1); 29 | yi = x1(2); 30 | xj = x2(1); 31 | yj = x2(2); 32 | 33 | % A = [-Rij'*Ri' 0 0; 0 -Rij'*Ri' 0; 0 0 -1]; 34 | % B = [Rij'*Ri' 0 0; 0 Rij'*Ri' 0; 0 0 1]; 35 | 36 | A = [-cos(thi)*cos(thij)+sin(thi)*sin(thij) -sin(thi)*cos(thij)-cos(thi)*sin(thij) 0; cos(thi)*sin(thij)+sin(thi)*cos(thij) sin(thi)*sin(thij)-cos(thi)*cos(thij) 0; 0 0 -1]; 37 | 38 | A(1:2,3) = [cos(thij)*(-sin(thi)*(xj-xi)+cos(thi)*(yj-yi))+sin(thij)*(-cos(thi)*(xj-xi)-sin(thi)*(yj-yi)); -sin(thij)*(-sin(thi)*(xj-xi)+cos(thi)*(yj-yi))+cos(thij)*(-cos(thi)*(xj-xi)-sin(thi)*(yj-yi))]; 39 | 40 | B = [cos(thi)*cos(thij)-sin(thi)*sin(thij) sin(thi)*cos(thij)+cos(thi)*sin(thij) 0; -cos(thi)*sin(thij)-sin(thi)*cos(thij) -sin(thi)*sin(thij)+cos(thi)*cos(thij) 0; 0 0 1]; 41 | 42 | end; 43 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/linearize_pose_pose_constraint.m~: -------------------------------------------------------------------------------- 1 | % Compute the error of a pose-pose constraint 2 | % x1 3x1 vector (x,y,theta) of the first robot pose 3 | % x2 3x1 vector (x,y,theta) of the second robot pose 4 | % z 3x1 vector (x,y,theta) of the measurement 5 | % 6 | % You may use the functions v2t() and t2v() to compute 7 | % a Homogeneous matrix out of a (x, y, theta) vector 8 | % for computing the error. 9 | % 10 | % Output 11 | % e 3x1 error of the constraint 12 | % A 3x3 Jacobian wrt x1 13 | % B 3x3 Jacobian wrt x2 14 | function [e, A, B] = linearize_pose_pose_constraint(x1, x2, z) 15 | 16 | % TODO compute the error and the Jacobians of the error 17 | X1 = v2t(x1); 18 | X2 = v2t(x2); 19 | Z = v2t(z); 20 | e = t2v(inv(Z)*(inv(X1)*X2)); 21 | 22 | Rij = Z(1:3,1:3); 23 | Ri = X1(1:3,1:3); 24 | 25 | thi = atan2(Ri(2,1),Ri(1,1)); 26 | thij = atan2(Rij(2,1),Rij(1,1)); 27 | 28 | xi = x1(1); 29 | yi = x1(2); 30 | xj = x2(1); 31 | yj = x2(2); 32 | 33 | % A = [-Rij'*Ri' 0 0; 0 -Rij'*Ri' 0; 0 0 -1]; 34 | % B = [Rij'*Ri' 0 0; 0 Rij'*Ri' 0; 0 0 1]; 35 | 36 | A = [-cos(thi)*cos(thij)+sin(thi)*sin(thij) -sin(thi)*cos(thij)-cos(thi)*sin(thij) 0; cos(thi)*sin(thij)+sin(thi)*cos(thij) sin(thi)*sin(thij)-cos(thi)*cos(thij) 0; 0 0 -1]; 37 | 38 | A(1:2,3) = [cos(thij)*(-sin(thi)*(xj-xi)+cos(thi)*(yj-yi))+sin(thij)*(-cos(thi)*(xj-xi)-sin(thi)*(yj-yi)); -sin(thij)*(-sin(thi)*(xj-xi)+cos(thi)*(yj-yi))+cos(thij)*(-cos(thi)*(xj-xi)-sin(thi)*(yj-yi))]; 39 | 40 | B = [cos(thi)*cos(thij)-sin(thi)*sin(thij) sin(thi)*cos(thij)+cos(thi)*sin(thij) 0; -cos(thi)*sin(thij)-sin(thi)*cos(thij) -sin(thi)*sin(thij)+cos(thi)*cos(thij) 0; 0 0 1]; 41 | 42 | end; 43 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/lsSLAM.m: -------------------------------------------------------------------------------- 1 | more off; 2 | clear all; 3 | close all; 4 | 5 | addpath('tools'); 6 | 7 | % load the graph into the variable g 8 | % only leave one line uncommented 9 | 10 | % simulation datasets 11 | %load ../data/simulation-pose-pose.dat 12 | %load ../data/simulation-pose-landmark.dat 13 | 14 | % real-world datasets 15 | %load ../data/intel.dat 16 | load ../data/dlr.dat 17 | 18 | % plot the initial state of the graph 19 | plot_graph(g, 0); 20 | 21 | printf('Initial error %f\n', compute_global_error(g)); 22 | 23 | % the number of iterations 24 | numIterations = 100; 25 | 26 | % maximum allowed dx 27 | EPSILON = 10^-4; 28 | 29 | % Error 30 | err = 0; 31 | 32 | % carry out the iterations 33 | for i = 1:numIterations 34 | printf('Performing iteration %d\n', i); 35 | 36 | dx = linearize_and_solve(g); 37 | 38 | % TODO: apply the solution to the state vector g.x 39 | g.x = g.x + dx; 40 | 41 | % plot the current state of the graph 42 | plot_graph(g, i); 43 | 44 | err = compute_global_error(g); 45 | 46 | % Print current error 47 | printf('Current error %f\n', err); 48 | 49 | % TODO: implement termination criterion as suggested on the sheet 50 | if(max(abs(dx)) epsilon) 18 | disp('Your error function seems to return a wrong value'); 19 | disp('Result of your function'); disp(e); 20 | disp('True value'); disp(e_true); 21 | else 22 | disp('The computation of the error vector appears to be correct'); 23 | end 24 | 25 | % compute it numerically 26 | delta = 1e-6; 27 | scalar = 1 / (2*delta); 28 | 29 | % test for x1 30 | ANumeric = zeros(2,3); 31 | for d = 1:3 32 | curX = x1; 33 | curX(d) += delta; 34 | err = linearize_pose_landmark_constraint(curX, x2, z); 35 | curX = x1; 36 | curX(d) -= delta; 37 | err -= linearize_pose_landmark_constraint(curX, x2, z); 38 | 39 | ANumeric(:, d) = scalar * err; 40 | end 41 | 42 | diff = ANumeric - A; 43 | if max(max(abs(diff))) > epsilon 44 | disp('Error in the Jacobian for x1'); 45 | disp('Your analytic Jacobian'); disp(A); 46 | disp('Numerically computed Jacobian'); disp(ANumeric); 47 | disp('Difference'); disp(diff); 48 | else 49 | disp('Jacobian for x1 appears to be correct'); 50 | end 51 | 52 | 53 | % test for x2 54 | BNumeric = zeros(2,2); 55 | for d = 1:2 56 | curX = x2; 57 | curX(d) += delta; 58 | err = linearize_pose_landmark_constraint(x1, curX, z); 59 | curX = x2; 60 | curX(d) -= delta; 61 | err -= linearize_pose_landmark_constraint(x1, curX, z); 62 | 63 | BNumeric(:, d) = scalar * err; 64 | end 65 | 66 | diff = BNumeric - B; 67 | if max(max(abs(diff))) > epsilon 68 | disp('Error in the Jacobian for x2'); 69 | disp('Your analytic Jacobian'); disp(B); 70 | disp('Numerically computed Jacobian'); disp(BNumeric); 71 | disp('Difference'); disp(diff); 72 | else 73 | disp('Jacobian for x2 appears to be correct'); 74 | end 75 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/test_jacobian_pose_pose.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | close all; 3 | more off; 4 | addpath('tools'); 5 | 6 | epsilon = 1e-5; 7 | 8 | x1 = [1.1 0.9 1]'; 9 | x2 = [2.2 1.85 1.2]'; 10 | z = [0.9 1.1 1.05]'; 11 | 12 | % get the analytic Jacobian 13 | [e, A, B] = linearize_pose_pose_constraint(x1, x2, z); 14 | 15 | % check the error vector 16 | e_true = [-1.06617 -1.18076 -0.85000]'; 17 | if (norm(e - e_true) > epsilon) 18 | disp('Your error function seems to return a wrong value'); 19 | disp('Result of your function'); disp(e); 20 | disp('True value'); disp(e_true); 21 | else 22 | disp('The computation of the error vector appears to be correct'); 23 | end 24 | 25 | % compute it numerically 26 | delta = 1e-6; 27 | scalar = 1 / (2*delta); 28 | 29 | % test for x1 30 | ANumeric = zeros(3,3); 31 | for d = 1:3 32 | curX = x1; 33 | curX(d) += delta; 34 | err = linearize_pose_pose_constraint(curX, x2, z); 35 | curX = x1; 36 | curX(d) -= delta; 37 | err -= linearize_pose_pose_constraint(curX, x2, z); 38 | 39 | ANumeric(:, d) = scalar * err; 40 | end 41 | 42 | diff = ANumeric - A; 43 | if max(max(abs(diff))) > epsilon 44 | disp('Error in the Jacobian for x1'); 45 | disp('Your analytic Jacobian'); disp(A); 46 | disp('Numerically computed Jacobian'); disp(ANumeric); 47 | disp('Difference'); disp(diff); 48 | else 49 | disp('Jacobian for x1 appears to be correct'); 50 | end 51 | 52 | 53 | % test for x2 54 | BNumeric = zeros(3,3); 55 | for d = 1:3 56 | curX = x2; 57 | curX(d) += delta; 58 | err = linearize_pose_pose_constraint(x1, curX, z); 59 | curX = x2; 60 | curX(d) -= delta; 61 | err -= linearize_pose_pose_constraint(x1, curX, z); 62 | 63 | BNumeric(:, d) = scalar * err; 64 | end 65 | 66 | diff = BNumeric - B; 67 | if max(max(abs(diff))) > epsilon 68 | disp('Error in the Jacobian for x2'); 69 | disp('Your analytic Jacobian'); disp(B); 70 | disp('Numerically computed Jacobian'); disp(BNumeric); 71 | disp('Difference'); disp(diff); 72 | else 73 | disp('Jacobian for x2 appears to be correct'); 74 | end 75 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/build_structure.m: -------------------------------------------------------------------------------- 1 | % calculates the non-zero pattern of the Hessian matrix of a given graph 2 | 3 | function idx = build_structure(g) 4 | 5 | idx = []; 6 | 7 | % elements along the diagonal 8 | for [value, key] = g.idLookup 9 | dim = value.dimension; 10 | offset = value.offset; 11 | [r,c] = meshgrid(offset+1 : offset+dim, offset+1 : offset+dim); 12 | idx = [idx; [vec(r) vec(c)]]; 13 | end 14 | 15 | % off-diagonal elements 16 | for eid = 1:length(g.edges) 17 | edge = g.edges(eid); 18 | if (strcmp(edge.type, 'P') != 0) 19 | [r,c] = meshgrid(edge.fromIdx:edge.fromIdx+2, edge.toIdx:edge.toIdx+2); 20 | idx = [idx; [vec(r) vec(c); vec(c) vec(r)]]; 21 | elseif (strcmp(edge.type, 'L') != 0) 22 | [r,c] = meshgrid(edge.fromIdx:edge.fromIdx+2, edge.toIdx:edge.toIdx+1); 23 | idx = [idx; [vec(r) vec(c); vec(c) vec(r)]]; 24 | end 25 | end 26 | 27 | %idx = sort(idx); 28 | 29 | end 30 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/get_block_for_id.m: -------------------------------------------------------------------------------- 1 | % returns the block of the state vector which corresponds to the given id 2 | function block = get_block_for_id(g, id) 3 | 4 | blockInfo = getfield(g.idLookup, num2str(id)); 5 | block = g.x(1+blockInfo.offset : blockInfo.offset + blockInfo.dimension); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/get_poses_landmarks.m: -------------------------------------------------------------------------------- 1 | % extract the offset of the poses and the landmarks 2 | 3 | function [poses, landmarks] = get_poses_landmarks(g) 4 | 5 | poses = []; 6 | landmarks = []; 7 | 8 | for [value, key] = g.idLookup 9 | dim = value.dimension; 10 | offset = value.offset; 11 | if (dim == 3) 12 | poses = [poses; offset]; 13 | elseif (dim == 2) 14 | landmarks = [landmarks; offset]; 15 | end 16 | end 17 | 18 | end 19 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/invt.m: -------------------------------------------------------------------------------- 1 | % inverts a homogenous transform 2 | function A = invt(m) 3 | A = [m(1:2, 1:2)' [0 0]'; [0 0 1]]; 4 | A(1:2, 3) = -A(1:2, 1:2) * m(1:2, 3); 5 | end 6 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/nnz_of_graph.m: -------------------------------------------------------------------------------- 1 | % calculates the number of non-zeros of a graph 2 | % Actually, it is an upper bound, as duplicate edges might be counted several times 3 | 4 | function nnz = nnz_of_graph(g) 5 | 6 | nnz = 0; 7 | 8 | % elements along the diagonal 9 | for [value, key] = g.idLookup 10 | nnz += value.dimension^2; 11 | end 12 | 13 | % off-diagonal elements 14 | for eid = 1:length(g.edges) 15 | edge = g.edges(eid); 16 | if (strcmp(edge.type, 'P') != 0) 17 | nnz += 2 * 9; 18 | elseif (strcmp(edge.type, 'L') != 0) 19 | nnz += 2 * 6; 20 | end 21 | end 22 | 23 | end 24 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/normalize_angle.m: -------------------------------------------------------------------------------- 1 | function [phi] = normalize_angle(phi) 2 | % Normalize phi to be between -pi and pi 3 | % phi can also be a vector of angles 4 | 5 | phi = mod(phi + pi, 2*pi) - pi; 6 | 7 | end 8 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/plot_graph.m: -------------------------------------------------------------------------------- 1 | % plot a 2D SLAM graph 2 | function plot_graph(g, iteration = -1) 3 | 4 | clf; 5 | graphics_toolkit gnuplot 6 | hold on; 7 | 8 | 9 | [p, l] = get_poses_landmarks(g); 10 | 11 | if (length(l) > 0) 12 | landmarkIdxX = l+1; 13 | landmarkIdxY = l+2; 14 | plot(g.x(landmarkIdxX), g.x(landmarkIdxY), '.or', 'markersize', 4); 15 | end 16 | 17 | if (length(p) > 0) 18 | pIdxX = p+1; 19 | pIdxY = p+2; 20 | plot(g.x(pIdxX), g.x(pIdxY), '.xb', 'markersize', 4); 21 | end 22 | 23 | % draw line segments??? 24 | if 0 25 | poseEdgesP1 = []; 26 | poseEdgesP2 = []; 27 | landmarkEdgesP1 = []; 28 | landmarkEdgesP2 = []; 29 | for eid = 1:length(g.edges) 30 | edge = g.edges(eid); 31 | if (strcmp(edge.type, 'P') != 0) 32 | poseEdgesP1 = [poseEdgesP1, g.x(edge.fromIdx:edge.fromIdx+1)]; 33 | poseEdgesP2 = [poseEdgesP2, g.x(edge.toIdx:edge.toIdx+1)]; 34 | elseif (strcmp(edge.type, 'L') != 0) 35 | landmarkEdgesP1 = [landmarkEdgesP1, g.x(edge.fromIdx:edge.fromIdx+1)]; 36 | landmarkEdgesP2 = [landmarkEdgesP2, g.x(edge.toIdx:edge.toIdx+1)]; 37 | end 38 | end 39 | 40 | linespointx = [poseEdgesP1(1,:); poseEdgesP2(1,:)]; 41 | linespointy = [poseEdgesP1(2,:); poseEdgesP2(2,:)]; 42 | 43 | plot(linespointx, linespointy, "r"); 44 | end 45 | 46 | %plot(poseEdgesP1(1,:), poseEdgesP1(2,:), "r"); 47 | 48 | %if (columns(poseEdgesP1) > 0) 49 | %end 50 | %if (columns(landmarkEdges) > 0) 51 | %end 52 | 53 | hold off; 54 | 55 | figure(1, "visible", "off"); 56 | drawnow; 57 | %pause(0.1); 58 | if (iteration >= 0) 59 | filename = sprintf('../plots/lsslam_%03d.png', iteration); 60 | print(filename, '-dpng'); 61 | end 62 | 63 | 64 | end 65 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/plot_graph.m~: -------------------------------------------------------------------------------- 1 | % plot a 2D SLAM graph 2 | function plot_graph(g, iteration = -1) 3 | 4 | clf; 5 | hold on; 6 | graphics_toolkit gnuplot 7 | 8 | [p, l] = get_poses_landmarks(g); 9 | 10 | if (length(l) > 0) 11 | landmarkIdxX = l+1; 12 | landmarkIdxY = l+2; 13 | plot(g.x(landmarkIdxX), g.x(landmarkIdxY), '.or', 'markersize', 4); 14 | end 15 | 16 | if (length(p) > 0) 17 | pIdxX = p+1; 18 | pIdxY = p+2; 19 | plot(g.x(pIdxX), g.x(pIdxY), '.xb', 'markersize', 4); 20 | end 21 | 22 | % draw line segments??? 23 | if 0 24 | poseEdgesP1 = []; 25 | poseEdgesP2 = []; 26 | landmarkEdgesP1 = []; 27 | landmarkEdgesP2 = []; 28 | for eid = 1:length(g.edges) 29 | edge = g.edges(eid); 30 | if (strcmp(edge.type, 'P') != 0) 31 | poseEdgesP1 = [poseEdgesP1, g.x(edge.fromIdx:edge.fromIdx+1)]; 32 | poseEdgesP2 = [poseEdgesP2, g.x(edge.toIdx:edge.toIdx+1)]; 33 | elseif (strcmp(edge.type, 'L') != 0) 34 | landmarkEdgesP1 = [landmarkEdgesP1, g.x(edge.fromIdx:edge.fromIdx+1)]; 35 | landmarkEdgesP2 = [landmarkEdgesP2, g.x(edge.toIdx:edge.toIdx+1)]; 36 | end 37 | end 38 | 39 | linespointx = [poseEdgesP1(1,:); poseEdgesP2(1,:)]; 40 | linespointy = [poseEdgesP1(2,:); poseEdgesP2(2,:)]; 41 | 42 | plot(linespointx, linespointy, "r"); 43 | end 44 | 45 | %plot(poseEdgesP1(1,:), poseEdgesP1(2,:), "r"); 46 | 47 | %if (columns(poseEdgesP1) > 0) 48 | %end 49 | %if (columns(landmarkEdges) > 0) 50 | %end 51 | 52 | hold off; 53 | 54 | figure(1, "visible", "off"); 55 | drawnow; 56 | %pause(0.1); 57 | if (iteration >= 0) 58 | filename = sprintf('../plots/lsslam_%03d.png', iteration); 59 | print(filename, '-dpng'); 60 | end 61 | 62 | 63 | end 64 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/read_graph.m: -------------------------------------------------------------------------------- 1 | % read a g2o data file describing a 2D SLAM instance 2 | function graph = read_graph(filename) 3 | 4 | fid = fopen(filename, 'r'); 5 | 6 | graph = struct ( 7 | 'x', [], 8 | 'edges', [], 9 | 'idLookup', struct 10 | ); 11 | 12 | disp('Parsing File'); 13 | while true 14 | ln = fgetl(fid); 15 | if (ln == -1) 16 | break; 17 | end 18 | tokens = strsplit(ln, ' ', true); 19 | double_tokens = str2double(tokens); 20 | 21 | tk = 2; 22 | if (strcmp(tokens(1), 'VERTEX_SE2') != 0) 23 | id = int32(double_tokens(tk++)); 24 | values = double_tokens(tk:tk+2)'; tk += 3; 25 | graph.idLookup = setfield(graph.idLookup, num2str(id), struct('offset', length(graph.x), 'dimension', length(values))); 26 | graph.x = [graph.x; values]; 27 | elseif (strcmp(tokens(1), 'VERTEX_XY') != 0) 28 | id = int32(double_tokens(tk++)); 29 | values = double_tokens(tk:tk+1)'; tk += 2; 30 | graph.idLookup = setfield(graph.idLookup, num2str(id), struct('offset', length(graph.x), 'dimension', length(values))); 31 | graph.x = [graph.x; values]; 32 | elseif (strcmp(tokens(1), 'EDGE_SE2') != 0) 33 | fromId = int32(double_tokens(tk++)); 34 | toId = int32(double_tokens(tk++)); 35 | measurement = double_tokens(tk:tk+2)'; tk += 3; 36 | uppertri = double_tokens(tk:tk+5)'; tk += 6; 37 | information = [uppertri(1), uppertri(2), uppertri(3); 38 | uppertri(2), uppertri(4), uppertri(5); 39 | uppertri(3), uppertri(5), uppertri(6)]; 40 | graph.edges = [graph.edges; struct( 41 | 'type', 'P', 42 | 'from', fromId, 43 | 'to', toId, 44 | 'measurement', measurement, 45 | 'information', information)]; 46 | elseif (strcmp(tokens(1), 'EDGE_SE2_XY') != 0) 47 | fromId = int32(double_tokens(tk++)); 48 | toId = int32(double_tokens(tk++)); 49 | measurement = double_tokens(tk:tk+1)'; tk += 2; 50 | uppertri = double_tokens(tk:tk+2)'; tk += 3; 51 | information = [uppertri(1), uppertri(2); uppertri(2), uppertri(3)]; 52 | graph.edges = [graph.edges; struct( 53 | 'type', 'L', 54 | 'from', fromId, 55 | 'to', toId, 56 | 'measurement', measurement, 57 | 'information', information)]; 58 | end 59 | 60 | end 61 | 62 | % setup the index into the state vector 63 | disp('Preparing helper structs'); 64 | for eid = 1:length(graph.edges) 65 | graph.edges(eid).fromIdx = getfield(graph.idLookup, num2str(graph.edges(eid).from)).offset + 1; 66 | graph.edges(eid).toIdx = getfield(graph.idLookup, num2str(graph.edges(eid).to)).offset + 1; 67 | end 68 | 69 | end 70 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/t2v.m: -------------------------------------------------------------------------------- 1 | % computes the pose vector v from a homogeneous transform A 2 | function v=t2v(A) 3 | v = [A(1:2,3); atan2(A(2,1),A(1,1))]; 4 | end 5 | -------------------------------------------------------------------------------- /8_GraphSLAM/octave/tools/v2t.m: -------------------------------------------------------------------------------- 1 | % computes the homogeneous transform matrix A of the pose vector v 2 | function A=v2t(v) 3 | c=cos(v(3)); 4 | s=sin(v(3)); 5 | A=[c, -s, v(1); 6 | s, c, v(2); 7 | 0 0 1 ]; 8 | end 9 | -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_000.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_001.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_002.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_003.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_004.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_005.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_006.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_007.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_008.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_009.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/dlr/lsslam_010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/dlr/lsslam_010.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/intel/lsslam_000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/intel/lsslam_000.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/intel/lsslam_001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/intel/lsslam_001.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/intel/lsslam_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/intel/lsslam_002.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/intel/lsslam_003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/intel/lsslam_003.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/intel/lsslam_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/intel/lsslam_004.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/intel/lsslam_005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/intel/lsslam_005.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_landmark/lsslam_000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_landmark/lsslam_000.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_landmark/lsslam_001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_landmark/lsslam_001.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_landmark/lsslam_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_landmark/lsslam_002.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_landmark/lsslam_003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_landmark/lsslam_003.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_landmark/lsslam_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_landmark/lsslam_004.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_000.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_001.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_002.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_003.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_004.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_005.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_006.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_007.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_008.png -------------------------------------------------------------------------------- /8_GraphSLAM/plots/sim_pose_pose/lsslam_009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kiran-mohan/SLAM-Algorithms-Octave/e0254ad38cfca2170b2af68c96c183df77c76252/8_GraphSLAM/plots/sim_pose_pose/lsslam_009.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SLAM-Algorithms-Octave 2 | Solutions to assignments of Robot Mapping Course WS 2013/14 by Dr.Cyrill Stachniss at University of Freiburg 3 | Framework Credits: Dr.Cyrill Stachniss 4 | -------------------------------------- 5 | 6 | This repository contains the solutions to the assignments posted in the course website http://ais.informatik.uni-freiburg.de/teaching/ws13/mapping/index_en.php 7 | 8 | ## What is SLAM? 9 | Simultaneous Localization And Mapping is considered a fundamental problem for any autonomously navigating system. Such a system - mostly, a robot - will need to perceive the environment around it (mapping) and at the same time, answer the question "Where am I in my map?" (localization). Hence, SLAM has become an integral part of robotics. 10 | 11 | ## Theory 12 | The Mathematics behind the heart of SLAM algorithms include Probability theory and Bayesian and Markov methods. Depending on the specifics of the SLAM algorithm, knowledge of several other techniques may also be required - for example, the GraphSLAM (or LeastSquares SLAM) requires knowledge of graph theory. 13 | 14 | ## About the course 15 | Robot Mapping by Dr.Cyrill Stachniss introduces several SLAM Algorithms in sufficient detail so as to facilitate implementation. 16 | The primary textbook for the course is "Probabilistic Robotics" by Thrun et.al. 17 | 18 | ## Software Used 19 | Octave on Ubuntu was used to code the SLAM algorithms. The code should be compatible with MATLAB also. However, it has not been tested. 20 | 21 | ## Results 22 | The video / plot results are in the 'plots' folder in each corresponding algorithm folder. 23 | 24 | ## References: 25 | [1] Dr.Cyrill Stachniss, Robot Mapping course, University of Freiburg. https://www.youtube.com/watch?v=3Yl2aq28LFQ&index=15&list=PLgnQpQtFTOGQrZ4O5QzbIHgl3b1JHimN_ 26 | 27 | [2] Thrun et.al., Probabilistic Robotics 28 | --------------------------------------------------------------------------------