├── data
├── __init__.py
├── pt1_data.pkl
├── pt3_data.pkl
├── utils.py
└── data.py
├── images
├── diagram.png
├── error-plots.png
└── estimated-trajectory.gif
├── output
├── pt1_submission.txt
├── pt2_submission.txt
└── pt3_submission.txt
├── .gitignore
├── rotations.py
├── README.md
└── es_ekf.py
/data/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/data/pt1_data.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jasleon/Vehicle-State-Estimation/HEAD/data/pt1_data.pkl
--------------------------------------------------------------------------------
/data/pt3_data.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jasleon/Vehicle-State-Estimation/HEAD/data/pt3_data.pkl
--------------------------------------------------------------------------------
/images/diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jasleon/Vehicle-State-Estimation/HEAD/images/diagram.png
--------------------------------------------------------------------------------
/images/error-plots.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jasleon/Vehicle-State-Estimation/HEAD/images/error-plots.png
--------------------------------------------------------------------------------
/images/estimated-trajectory.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jasleon/Vehicle-State-Estimation/HEAD/images/estimated-trajectory.gif
--------------------------------------------------------------------------------
/output/pt1_submission.txt:
--------------------------------------------------------------------------------
1 | 118.515 174.702 0.072 137.020 154.306 -0.080 141.691 126.373 0.100 146.913 96.627 0.056 166.757 82.440 -0.027
--------------------------------------------------------------------------------
/output/pt2_submission.txt:
--------------------------------------------------------------------------------
1 | 119.286 173.996 -0.031 137.559 153.844 -0.040 142.216 125.987 0.152 147.193 95.982 0.089 167.110 81.751 0.003
--------------------------------------------------------------------------------
/output/pt3_submission.txt:
--------------------------------------------------------------------------------
1 | 4.680 148.585 0.042 32.475 188.917 -0.026 83.076 198.146 0.590 130.192 165.270 -0.108 144.005 111.448 -0.041
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | # Visual Studio Code project settings
132 | .vscode
--------------------------------------------------------------------------------
/data/utils.py:
--------------------------------------------------------------------------------
1 | # Utility classes and functions for Coursera SDC Course 2.
2 | #
3 | # Authors: Trevor Ablett and Jonathan Kelly
4 | # University of Toronto Institute for Aerospace Studies
5 |
6 | import numpy as np
7 | from numpy import sin, cos, arctan2, sqrt
8 |
9 | class StampedData():
10 | def __init__(self):
11 | self.data = []
12 | self.t = []
13 |
14 | def convert_lists_to_numpy(self):
15 | self.data = np.array(self.data)
16 | self.t = np.array(self.t)
17 |
18 | def to_rot(r):
19 | Rx = np.mat([[ 1, 0, 0],
20 | [ 0, cos(r[0]), -sin(r[0]) ],
21 | [ 0, sin(r[0]), cos(r[0]) ]])
22 |
23 | Ry = np.mat([[ cos(r[1]), 0, sin(r[1]) ],
24 | [ 0, 1, 0 ],
25 | [-sin(r[1]), 0, cos(r[1]) ]])
26 |
27 | Rz = np.mat([[ cos(r[2]), -sin(r[2]), 0 ],
28 | [ sin(r[2]), cos(r[2]), 0 ],
29 | [ 0, 0, 1 ]])
30 |
31 | return Rz*Ry*Rx
32 |
33 | def to_mat(p, r):
34 | "Given a position [m] and orientation as RPY [rad], create homogenous transformation matrix."
35 | R = to_rot(r)
36 | return np.mat(np.r_[np.c_[R, p], [np.array([0, 0, 0, 1])]])
37 |
38 | def from_mat(T):
39 | "Get position [m] and orientation as RPY [rad] from homogenous transformation matrix."
40 | p = [T[0,3], T[1,3], T[2,3]]
41 | r = [arctan2(T[2,1],T[2,2]) , arctan2(-T[2,0],sqrt(T[2,1] ** 2 + T[2,2] ** 2)), arctan2(T[1,0],T[0,0]) ]
42 | return p, r
43 |
44 | def transform_data_right(p, r, T_frame):
45 | "Transform data to a different frame."
46 | p_new = [0]*len(p)
47 | r_new = [0]*len(p)
48 |
49 | for i in (range(len(p))):
50 | T_i = to_mat(p[i, :], r[i, :])
51 | T_new = T_i.dot(T_frame)
52 | p_new[i], r_new[i] = from_mat(T_new)
53 |
54 | return np.array(p_new), np.array(r_new)
55 |
56 | def transform_data_left(p, r, T_frame):
57 | "Transform data to different frame."
58 | p_new = [0]*len(p)
59 | r_new = [0]*len(p)
60 |
61 | for i in (range(len(p))):
62 | T_i = to_mat(p[i, :], r[i, :])
63 | T_new = T_frame.dot(T_i)
64 | p_new[i], r_new[i] = from_mat(T_new)
65 |
66 | return np.array(p_new), np.array(r_new)
67 |
68 | def to_own_frame(r, x):
69 | x_new = np.zeros(x.shape)
70 |
71 | for i in (range(len(x))):
72 | x_new[i] = x[i].dot(to_rot(r[i]))
73 |
74 | return x_new
75 |
76 | def to_angular_rates(r, r_dot):
77 | """
78 | Compute the inverse of the Euler kinematical matrix for the given roll,
79 | pitch, and yaw angles - the kinematical matrix is used to compute the
80 | rate of change of the Euler angles as a function of the angular velocity.
81 | """
82 | # Still need to differentiate the Euler angles first wrt time.
83 | cr = np.cos(r[0])
84 | sr = np.sin(r[0])
85 | sp = np.sin(r[1])
86 | cp = np.cos(r[1])
87 |
88 | # Generate inverse of kinematical matrix.
89 | # M = np.array([[1, tp*sr, tp*cr], [0, cr, -sr], [0, sr/cp, cr/cp]])
90 | G = np.array([[1, 0, -sp], [0, cr, sr*cp], [0, -sr, cr*cp]])
91 |
92 | return G @ r_dot
93 |
94 | def integ(x, t):
95 | out = [None] * (len(x) + 1)
96 | for i in range(len(out)):
97 | dt = t[i + 1] - t[i]
98 | out[i+1,:] = out[i,:] + x[i,:]*dt
99 |
100 | return out
101 |
102 | def diff(x, t):
103 | out = [None] * (len(x) - 1)
104 | for i in (range(len(out))):
105 | # Forward finite difference scheme.
106 | dt = t[i + 1] - t[i]
107 | dx = x[i + 1, :] - x[i, :]
108 | out[i] = dx / dt
109 |
110 | return out
--------------------------------------------------------------------------------
/data/data.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import data.utils as u
3 |
4 |
5 | class Data():
6 | """
7 | Storage class specific to ground truth data generated by CARLA simulator.
8 | """
9 |
10 | def __init__(self, t=np.array([None]), p=np.array([None]), r=np.array([None]), v=np.array([None]),
11 | w=np.array([None]), a=np.array([None]), alpha=np.array([None]), do_diff = False):
12 | """
13 | :param t: Timestamps [s]
14 | :param p: Position [m]
15 | :param r: Orientation [rad]
16 | :param v: Velocity [m/s]
17 | :param w: Ang. Velocity [rad/s]
18 | :param a: Acceleration [m/s^2]
19 | :param alpha: Angular Acceleration [rad/s^2]
20 | :param diff: Flag - generate velocities and accelerations by differentiating
21 | """
22 | self.do_diff = do_diff
23 | self._p_init = p
24 | self._r_init = r
25 | self._v_init = v
26 | self._w_init = w
27 | self._a_init = a
28 | self._alpha_init = alpha
29 |
30 | self._t = t
31 | self._p = p
32 | self._r = r
33 | self._v = v
34 | self._w = w
35 | self._a = a
36 | self._alpha = alpha
37 |
38 | def reset(self):
39 | """
40 | Resets all data back to initial (ground truth) positions and orientations.
41 | """
42 | self._p = self._p_init
43 | self._r = self._r_init
44 | self._v = self._v_init
45 | self._w = self._w_init
46 | self._a = self._a_init
47 | self._alpha = self._alpha_init
48 |
49 | @property
50 | def p(self):
51 | if self._p.any():
52 | return self._p
53 | raise ValueError('No position data available.')
54 |
55 | @p.setter
56 | def p(self, value):
57 | self._p = value
58 |
59 | @property
60 | def r(self):
61 | if self._r.any():
62 | return self._r
63 | raise ValueError('No orientation data available.')
64 |
65 | @r.setter
66 | def r(self, value):
67 | # Active transform convention - roll applied first, then
68 | # pitch and then yaw.
69 | self._r = value
70 |
71 | @property
72 | def v(self):
73 | if self._v.any():
74 | return self._v
75 | elif self.do_diff:
76 | self._v = np.array(u.diff(self.p, self._t))
77 | return self._v
78 | raise ValueError('No velocity data available')
79 |
80 | @v.setter
81 | def v(self, value):
82 | self._v = value
83 |
84 | @property
85 | def a(self):
86 | if self._a.any():
87 | return self._a
88 | elif self.do_diff:
89 | self._a = np.array(u.diff(self.v, self._t))
90 | return self._a
91 | raise ValueError('No acceleration data available')
92 |
93 | @a.setter
94 | def a(self, value):
95 | self._a = value
96 |
97 | @property
98 | def w(self):
99 | if self._w.any():
100 | return self._w
101 | elif self.do_diff:
102 | # First determine \dot{Theta} - the rates of change of the
103 | # Euler angles...
104 | self._w = np.array(u.diff(self.r, self._t))
105 |
106 | # Now convert to angular velocities *in the vehicle (IMU) frame*.
107 | for i in (range(len(self._w))):
108 | # Referencing _r ... must be set before a call to w(self).
109 | self._w[i, :] = u.to_angular_rates(self.r[i, :], self._w[i, :])
110 | return self._w
111 | raise ValueError('No angular velocity data available')
112 |
113 | @w.setter
114 | def w(self, value):
115 | self._w = value
116 |
117 | @property
118 | def alpha(self):
119 | if self._alpha.any():
120 | return self._alpha
121 | elif self.do_diff:
122 | self._alpha = np.array(u.diff(self.w, self._t))
123 | return self._alpha
124 | raise ValueError('No angular acceleration data available')
125 |
126 | @alpha.setter
127 | def alpha(self, value):
128 | self._alpha = value
129 |
130 | def transform(self, T = np.array([[1, 0, 0, 0],[0, 1, 0, 0],[0, 0, 1, 0],[0, 0, 0, 1]]), side = "right"):
131 | if side == "right":
132 | p, r = u.transform_data_right(self.p, self.r, T)
133 | else:
134 | p, r = u.transform_data_left(self.p, self.r, T)
135 |
136 | return Data(self._t, p, r, do_diff = True)
137 |
138 | def slice(self, s = 0, e = 0):
139 | """" Slice all data from s to e."""
140 | self.p = self.p[s:e]
141 | self.r = self.r[s:e]
142 | self.alpha = self.alpha[s:e]
143 | self.v = self.v[s:e]
144 | self.w = self.w[s:e]
145 | self.a = self.a[s:e]
--------------------------------------------------------------------------------
/rotations.py:
--------------------------------------------------------------------------------
1 | # Utitlity file with functions for handling rotations.
2 | #
3 | # Authors: Trevor Ablett and Jonathan Kelly
4 | # University of Toronto Institute for Aerospace Studies
5 | import numpy as np
6 |
7 | def angle_normalize(a):
8 | """Normalize angles to lie in range -pi < a[i] <= pi."""
9 | a = np.remainder(a, 2*np.pi)
10 | a[a <= -np.pi] += 2*np.pi
11 | a[a > np.pi] -= 2*np.pi
12 | return a
13 |
14 | def skew_symmetric(v):
15 | """Skew symmetric form of a 3x1 vector."""
16 | return np.array(
17 | [[0, -v[2], v[1]],
18 | [v[2], 0, -v[0]],
19 | [-v[1], v[0], 0]], dtype=np.float64)
20 |
21 | def rpy_jacobian_axis_angle(a):
22 | """Jacobian of RPY Euler angles with respect to axis-angle vector."""
23 | if not (type(a) == np.ndarray and len(a) == 3):
24 | raise ValueError("'a' must be a np.ndarray with length 3.")
25 | # From three-parameter representation, compute u and theta.
26 | na = np.sqrt(a @ a)
27 | na3 = na**3
28 | t = np.sqrt(a @ a)
29 | u = a/t
30 |
31 | # First-order approximation of Jacobian wrt u, t.
32 | Jr = np.array([[t/(t**2*u[0]**2 + 1), 0, 0, u[0]/(t**2*u[0]**2 + 1)],
33 | [0, t/np.sqrt(1 - t**2*u[1]**2), 0, u[1]/np.sqrt(1 - t**2*u[1]**2)],
34 | [0, 0, t/(t**2*u[2]**2 + 1), u[2]/(t**2*u[2]**2 + 1)]])
35 |
36 | # Jacobian of u, t wrt a.
37 | Ja = np.array([[(a[1]**2 + a[2]**2)/na3, -(a[0]*a[1])/na3, -(a[0]*a[2])/na3],
38 | [ -(a[0]*a[1])/na3, (a[0]**2 + a[2]**2)/na3, -(a[1]*a[2])/na3],
39 | [ -(a[0]*a[2])/na3, -(a[1]*a[2])/na3, (a[0]**2 + a[1]**2)/na3],
40 | [ a[0]/na, a[1]/na, a[2]/na]])
41 |
42 | return Jr @ Ja
43 |
44 | class Quaternion():
45 | def __init__(self, w=1., x=0., y=0., z=0., axis_angle=None, euler=None):
46 | """
47 | Allow initialization with explicit quaterion wxyz, axis-angle, or Euler XYZ (RPY) angles.
48 |
49 | :param w: w (real) of quaternion.
50 | :param x: x (i) of quaternion.
51 | :param y: y (j) of quaternion.
52 | :param z: z (k) of quaternion.
53 | :param axis_angle: Set of three values from axis-angle representation, as list or [3,] or [3,1] np.ndarray.
54 | See C2M5L2 for details.
55 | :param euler: Set of three XYZ Euler angles.
56 | """
57 | if axis_angle is None and euler is None:
58 | self.w = w
59 | self.x = x
60 | self.y = y
61 | self.z = z
62 | elif euler is not None and axis_angle is not None:
63 | raise AttributeError("Only one of axis_angle or euler can be specified.")
64 | elif axis_angle is not None:
65 | if not (type(axis_angle) == list or type(axis_angle) == np.ndarray) or len(axis_angle) != 3:
66 | raise ValueError("axis_angle must be list or np.ndarray with length 3.")
67 | axis_angle = np.array(axis_angle)
68 | norm = np.linalg.norm(axis_angle)
69 | self.w = np.cos(norm / 2)
70 | if norm < 1e-50: # to avoid instabilities and nans
71 | self.x = 0
72 | self.y = 0
73 | self.z = 0
74 | else:
75 | imag = axis_angle / norm * np.sin(norm / 2)
76 | self.x = imag[0].item()
77 | self.y = imag[1].item()
78 | self.z = imag[2].item()
79 | else:
80 | roll = euler[0]
81 | pitch = euler[1]
82 | yaw = euler[2]
83 |
84 | cy = np.cos(yaw * 0.5)
85 | sy = np.sin(yaw * 0.5)
86 | cr = np.cos(roll * 0.5)
87 | sr = np.sin(roll * 0.5)
88 | cp = np.cos(pitch * 0.5)
89 | sp = np.sin(pitch * 0.5)
90 |
91 | # Fixed frame
92 | self.w = cr * cp * cy + sr * sp * sy
93 | self.x = sr * cp * cy - cr * sp * sy
94 | self.y = cr * sp * cy + sr * cp * sy
95 | self.z = cr * cp * sy - sr * sp * cy
96 |
97 | # Rotating frame
98 | # self.w = cr * cp * cy - sr * sp * sy
99 | # self.x = cr * sp * sy + sr * cp * cy
100 | # self.y = cr * sp * cy - sr * cp * sy
101 | # self.z = cr * cp * sy + sr * sp * cy
102 |
103 | def __repr__(self):
104 | return "Quaternion (wxyz): [%2.5f, %2.5f, %2.5f, %2.5f]" % (self.w, self.x, self.y, self.z)
105 |
106 | def to_axis_angle(self):
107 | t = 2*np.arccos(self.w)
108 | return np.array(t*np.array([self.x, self.y, self.z])/np.sin(t/2))
109 |
110 | def to_mat(self):
111 | v = np.array([self.x, self.y, self.z]).reshape(3,1)
112 | return (self.w ** 2 - np.dot(v.T, v)) * np.eye(3) + \
113 | 2 * np.dot(v, v.T) + 2 * self.w * skew_symmetric(v)
114 |
115 | def to_euler(self):
116 | """Return as xyz (roll pitch yaw) Euler angles."""
117 | roll = np.arctan2(2 * (self.w * self.x + self.y * self.z), 1 - 2 * (self.x**2 + self.y**2))
118 | pitch = np.arcsin(2 * (self.w * self.y - self.z * self.x))
119 | yaw = np.arctan2(2 * (self.w * self.z + self.x * self.y), 1 - 2 * (self.y**2 + self.z**2))
120 | return np.array([roll, pitch, yaw])
121 |
122 | def to_numpy(self):
123 | """Return numpy wxyz representation."""
124 | return np.array([self.w, self.x, self.y, self.z])
125 |
126 | def normalize(self):
127 | """Return a (unit) normalized version of this quaternion."""
128 | norm = np.linalg.norm([self.w, self.x, self.y, self.z])
129 | return Quaternion(self.w / norm, self.x / norm, self.y / norm, self.z / norm)
130 |
131 | def quat_mult_right(self, q, out='np'):
132 | """
133 | Quaternion multiplication operation - in this case, perform multiplication
134 | on the right, that is, q*self.
135 |
136 | :param q: Either a Quaternion or 4x1 ndarray.
137 | :param out: Output type, either np or Quaternion.
138 | :return: Returns quaternion of desired type.
139 | """
140 | v = np.array([self.x, self.y, self.z]).reshape(3, 1)
141 | sum_term = np.zeros([4,4])
142 | sum_term[0,1:] = -v[:,0]
143 | sum_term[1:, 0] = v[:,0]
144 | sum_term[1:, 1:] = -skew_symmetric(v)
145 | sigma = self.w * np.eye(4) + sum_term
146 |
147 | if type(q).__name__ == "Quaternion":
148 | quat_np = np.dot(sigma, q.to_numpy())
149 | else:
150 | quat_np = np.dot(sigma, q)
151 |
152 | if out == 'np':
153 | return quat_np
154 | elif out == 'Quaternion':
155 | quat_obj = Quaternion(quat_np[0], quat_np[1], quat_np[2], quat_np[3])
156 | return quat_obj
157 |
158 | def quat_mult_left(self, q, out='np'):
159 | """
160 | Quaternion multiplication operation - in this case, perform multiplication
161 | on the left, that is, self*q.
162 |
163 | :param q: Either a Quaternion or 4x1 ndarray.
164 | :param out: Output type, either np or Quaternion.
165 | :return: Returns quaternion of desired type.
166 | """
167 | v = np.array([self.x, self.y, self.z]).reshape(3, 1)
168 | sum_term = np.zeros([4,4])
169 | sum_term[0,1:] = -v[:,0]
170 | sum_term[1:, 0] = v[:,0]
171 | sum_term[1:, 1:] = skew_symmetric(v)
172 | sigma = self.w * np.eye(4) + sum_term
173 |
174 | if type(q).__name__ == "Quaternion":
175 | quat_np = np.dot(sigma, q.to_numpy())
176 | else:
177 | quat_np = np.dot(sigma, q)
178 |
179 | if out == 'np':
180 | return quat_np
181 | elif out == 'Quaternion':
182 | quat_obj = Quaternion(quat_np[0], quat_np[1], quat_np[2], quat_np[3])
183 | return quat_obj
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Vehicle State Estimation on a Roadway
2 | This project implements the Error-State **Extended Kalman Filter** (ES-EKF) to localize a vehicle using data from the [CARLA](https://carla.org/) simulator. The following diagram shows a graphical representation of the system.
3 |
4 |
5 |
6 | This project is the final programming assignment of the [State Estimation and Localization for Self-Driving Cars](https://www.coursera.org/learn/state-estimation-localization-self-driving-cars?) course from [Coursera](https://www.coursera.org/). The starter code is provided by the [University of Toronto](https://www.utoronto.ca/).
7 |
8 | The **Kalman Filter** algorithm updates a state estimate through two stages:
9 |
10 | 1. *prediction* using the motion model
11 | 2. *correction* using the measurement model
12 |
13 | ## Table of Contents
14 | - [1. Preliminaries](#1-preliminaries)
15 | - [1.1. Vehicle State Initialization](#11-vehicle-state-initialization)
16 | - [2. Prediction](#2-prediction)
17 | - [2.1. Motion Model](#21-motion-model)
18 | - [2.2. Predicted State](#22-predicted-state)
19 | - [2.3. Error State Linearization](#23-error-state-linearization)
20 | - [2.4. Propagate Uncertainty](#24-propagate-uncertainty)
21 | - [3. Correction](#3-correction)
22 | - [3.1. Measurement Availability](#31-measurement-availability)
23 | - [3.2. Measurement Model](#32-measurement-model)
24 | - [3.3. Measurement Update](#33-measurement-update)
25 | - [4. Vehicle Trajectory](#4-vehicle-trajectory)
26 | - [4.1. Ground Truth and Estimate](#41-ground-truth-and-estimate)
27 | - [4.2. Estimation Error and Uncertainty Bounds](#42-estimation-error-and-uncertainty-bounds)
28 |
29 | ## 1. Preliminaries
30 |
31 | ### 1.1. Vehicle State Initialization
32 |
33 | The **vehicle state** at each time step consists of position, velocity, and orientation (parameterized by a unit quaternion); the inputs to the **motion model** are the IMU specific force and angular rate measurements.
34 |
35 | | Description | Equation |
36 | | ---------------- | -------- |
37 | | Vehicle State | $`\boldsymbol{x}_k=[\boldsymbol{p}_k, \boldsymbol{v}_k, \boldsymbol{q}_k]^{T} \in R^{10}`$ |
38 | | IMU Measurements | $`\boldsymbol{u}_k=[\boldsymbol{f}_k, \boldsymbol{\omega}_k]^{T} \in R^6`$ |
39 |
40 | This section of code initializes the variables for the ES-EKF solver.
41 |
42 | ```python
43 | p_est = np.zeros([imu_f.data.shape[0], 3]) # position estimates
44 | v_est = np.zeros([imu_f.data.shape[0], 3]) # velocity estimates
45 | q_est = np.zeros([imu_f.data.shape[0], 4]) # orientation estimates as quaternions
46 | p_cov = np.zeros([imu_f.data.shape[0], 9, 9]) # covariance matrices at each timestep
47 |
48 | # Set initial values.
49 | p_est[0] = gt.p[0]
50 | v_est[0] = gt.v[0]
51 | q_est[0] = Quaternion(euler=gt.r[0]).to_numpy()
52 | p_cov[0] = np.zeros(9) # covariance of estimate
53 | gnss_i = 0
54 | lidar_i = 0
55 | ```
56 |
57 | ## 2. Prediction
58 |
59 | The main filter loop operates by first **predicting** the next state (vehicle pose and velocity). The predicted vehicle state integrates the high-rate IMU measurements by using a nonlinear motion model.
60 |
61 | ### 2.1. Motion Model
62 |
63 | The motion model of the vehicle is given by the following set of equations
64 |
65 | | Description | Equation |
66 | | ----------- | ------------------------------------------------------------ |
67 | | Position | $`\boldsymbol{p}_k = \boldsymbol{p}_{k-1} + {\Delta}t\boldsymbol{v}_{k-1} + \frac{{\Delta}t^2}{2}(\boldsymbol{C}_{ns}\boldsymbol{f}_{k-1} + \boldsymbol{g})`$ |
68 | | Velocity | $`\boldsymbol{v}_{k} = \boldsymbol{v}_{k-1} + {\Delta}t(\boldsymbol{C}_{ns}\boldsymbol{f}_{k-1} + \boldsymbol{g})`$ |
69 | | Orientation | $`\boldsymbol{q}_{k}=\boldsymbol{q}_{k-1}\otimes\boldsymbol{q}(\boldsymbol{\omega}_{k-1}{\Delta}t)=\boldsymbol{\Omega}(\boldsymbol{q}(\boldsymbol{\omega}_{k-1}{\Delta}t))\boldsymbol{q}_{k-1}`$ |
70 |
71 | ### 2.2. Predicted State
72 |
73 | The **predicted** vehicle state is therefore given by the equations
74 |
75 | | Description | Equation | Variable |
76 | | ----------------------- | ------------------------------------------------------------ | --------- |
77 | | *Predicted* State | $`\boldsymbol{\check{x}}_{k}=[\boldsymbol{\check{p}}_{k}, \boldsymbol{\check{v}}_{k}, \boldsymbol{\check{q}}_{k}]^T`$ | `x_check` |
78 | | *Predicted* Position | $`\boldsymbol{\check{p}}_k = \boldsymbol{p}_{k-1} + {\Delta}t\boldsymbol{v}_{k-1} + \frac{{\Delta}t^2}{2}(\boldsymbol{C}_{ns}\boldsymbol{f}_{k-1} + \boldsymbol{g})`$ | `p_check` |
79 | | *Predicted* Velocity | $`\boldsymbol{\check{v}}_{k} = \boldsymbol{v}_{k-1} + {\Delta}t(\boldsymbol{C}_{ns}\boldsymbol{f}_{k-1} + \boldsymbol{g})`$ | `v_check` |
80 | | *Predicted* Orientation | $`\boldsymbol{\check{q}}_{k}=\boldsymbol{q}_{k-1}\otimes\boldsymbol{q}(\boldsymbol{\omega}_{k-1}{\Delta}t)`$ | `q_check` |
81 |
82 | This section of code iterates through the IMU inputs and updates the state.
83 |
84 | ```python
85 | for k in range(1, imu_f.data.shape[0]): # start at 1 b/c we have initial prediction from gt
86 | delta_t = imu_f.t[k] - imu_f.t[k - 1]
87 |
88 | # 1. Update state with IMU inputs
89 | q_prev = Quaternion(*q_est[k - 1, :]) # previous orientation as a quaternion object
90 | q_curr = Quaternion(axis_angle=(imu_w.data[k - 1]*delta_t)) # current IMU orientation
91 | c_ns = q_prev.to_mat() # previous orientation as a matrix
92 | f_ns = (c_ns @ imu_f.data[k - 1]) + g # calculate sum of forces
93 | p_check = p_est[k - 1, :] + delta_t*v_est[k - 1, :] + 0.5*(delta_t**2)*f_ns
94 | v_check = v_est[k - 1, :] + delta_t*f_ns
95 | q_check = q_prev.quat_mult_left(q_curr)
96 | ```
97 |
98 | ### 2.3. Error State Linearization
99 |
100 | In the previous section, we propagated the nominal state (`x_check`) forward in time. However, this prediction ignores noise and perturbations. The ES-EKF algorithm captures these deviations in the error state vector.
101 |
102 | The next step is to consider the linearized error dynamics of the system.
103 |
104 | | Description | Equation |
105 | | ----------------- | ------------------------------------------------------------ |
106 | | Error State | $`\delta\boldsymbol{x}_{k}=[\delta\boldsymbol{p}_{k}, \delta\boldsymbol{v}_{k}, \delta\boldsymbol{\phi}_{k}]^T \in R^9`$ |
107 | | Error Dynamics | $`\delta\boldsymbol{x}_{k}=\boldsymbol{F}_{k-1}\delta\boldsymbol{x}_{k-1}+\boldsymbol{L}_{k-1}\boldsymbol{n}_{k-1}`$ |
108 | | Measurement Noise | $`\boldsymbol{n}_{k}\sim N(\boldsymbol{0}, \boldsymbol{Q}_{k})`$ |
109 |
110 | The Jacobians and the noise covariance matrix are defined as follows
111 |
112 | | Description | Equation | Variable |
113 | | --------------------------- | ------------------------------------------------------------ | -------- |
114 | | Motion Model Jacobian | $`\boldsymbol{F}_{k-1}=\begin{bmatrix}\boldsymbol{I}&\boldsymbol{I}\cdot\Delta t&0\\0&\boldsymbol{I}&-[\boldsymbol{C}_{ns}\boldsymbol{f}_{k-1}]_{\times}\Delta t\\0&0&\boldsymbol{I}\end{bmatrix}`$ | `f_jac` |
115 | | Motion Model Noise Jacobian | $`\boldsymbol{L}_{k-1}=\begin{bmatrix}0&0\\\boldsymbol{I}&0\\0&\boldsymbol{I}\end{bmatrix}`$ | `l_jac` |
116 | | IMU Noise Covariance | $`\boldsymbol{Q}_{k}=\Delta t^2\begin{bmatrix}\boldsymbol{I}\cdot\sigma_{acc}^2&0\\0&\boldsymbol{I}\cdot\sigma_{gyro}^2\end{bmatrix}`$ | `q_cov` |
117 |
118 | where ***I*** is the 3 by 3 identity matrix.
119 |
120 | This section of code calculates the motion model Jacobian
121 |
122 | ```python
123 | # 1.1 Linearize the motion model and compute Jacobians
124 | f_jac = np.eye(9) # motion model jacobian with respect to last state
125 | f_jac[0:3, 3:6] = np.eye(3)*delta_t
126 | f_jac[3:6, 6:9] = -skew_symmetric(c_ns @ imu_f.data[k - 1])*delta_t
127 | ```
128 |
129 | ### 2.4. Propagate Uncertainty
130 |
131 | In the previous section, we computed the Jacobian matrices of the motion model. We will use these matrices to propagate the state uncertainty forward in time.
132 |
133 | The uncertainty in the state is captured by the state covariance (uncertainty) matrix. The uncertainty grows over time until a measurement arrives.
134 |
135 | | Description | Equation | Variable |
136 | | ---------------------------- | ------------------------------------------------------------ | ------------- |
137 | | *Predicted* State Covariance | $`\boldsymbol{\check{P}}_{k}=\boldsymbol{F}_{k-1}\boldsymbol{P}_{k-1}\boldsymbol{F}_{k-1}^{T}+\boldsymbol{L}_{k-1}\boldsymbol{Q}_{k-1}\boldsymbol{L}_{k-1}^{T}`$ | `p_cov_check` |
138 |
139 | This section of code calculates the state uncertainty
140 |
141 | ```python
142 | # 2. Propagate uncertainty
143 | q_cov = np.zeros((6, 6)) # IMU noise covariance
144 | q_cov[0:3, 0:3] = delta_t**2 * np.eye(3)*var_imu_f
145 | q_cov[3:6, 3:6] = delta_t**2 * np.eye(3)*var_imu_w
146 | p_cov_check = f_jac @ p_cov[k - 1, :, :] @ f_jac.T + l_jac @ q_cov @ l_jac.T
147 | ```
148 |
149 | ## 3. Correction
150 |
151 | ### 3.1. Measurement Availability
152 |
153 | The IMU data arrives at a faster rate than either GNSS or LIDAR sensor measurements.
154 |
155 | The algorithm checks the measurement availability and calls a function to correct our prediction.
156 |
157 | ```python
158 | # 3. Check availability of GNSS and LIDAR measurements
159 | if imu_f.t[k] in gnss_t:
160 | gnss_i = gnss_t.index(imu_f.t[k])
161 | p_check, v_check, q_check, p_cov_check = \
162 | measurement_update(var_gnss, p_cov_check, gnss.data[gnss_i], p_check, v_check, q_check)
163 |
164 | if imu_f.t[k] in lidar_t:
165 | lidar_i = lidar_t.index(imu_f.t[k])
166 | p_check, v_check, q_check, p_cov_check = \
167 | measurement_update(var_lidar, p_cov_check, lidar.data[lidar_i], p_check, v_check, q_check)
168 | ```
169 |
170 | ### 3.2. Measurement Model
171 |
172 | The measurement model is the same for both sensors. However, they have different noise covariance.
173 |
174 | | Description | Equation |
175 | | ----------------------- | ------------------------------------------------------------ |
176 | | Measurement Model | $`\begin{split} \boldsymbol{y}_{k} &= \boldsymbol{h}(\boldsymbol{x}_{k})+\boldsymbol{v}_{k} \\ & =\boldsymbol{H}_{k}\boldsymbol{x}_{k}+\boldsymbol{v}_{k} \\ & = \boldsymbol{p}_{k}+\boldsymbol{v}_{k} \end{split}`$ |
177 | | GNSS Measurement Noise | $`\boldsymbol{v}_{k} \sim N(0, \boldsymbol{R}_{GNSS})`$ |
178 | | LIDAR Measurement Noise | $`\boldsymbol{v}_{k} \sim N(0, \boldsymbol{R}_{LIDAR})`$ |
179 |
180 | ### 3.3. Measurement Update
181 |
182 | Measurements are processed sequentially by the EKF as they arrive; in our case, both the GNSS receiver and the LIDAR provide position updates.
183 |
184 | | Description | Equation | Variable |
185 | | -------------------------- | ------------------------------------------------------------ | -------- |
186 | | Measurement Model Jacobian | $`\boldsymbol{H}_{k}=\begin{bmatrix}\boldsymbol{I}&0&0\\\end{bmatrix}`$ | `h_jac` |
187 | | Sensor Noise Covariance | $`\boldsymbol{R}=\boldsymbol{I}\cdot\sigma_{sensor}^2`$ | `r_cov` |
188 | | Kalman Gain | $`\boldsymbol{K}_{k}=\boldsymbol{\check{P}}_{k}\boldsymbol{H}_{k}^{T}(\boldsymbol{H}_{k}\boldsymbol{\check{P}}_{k}\boldsymbol{H}_{k}^{T}+\boldsymbol{R})^{-1}`$ | `k_gain` |
189 |
190 | This section of code defines the measurement update function
191 |
192 | ```python
193 | def measurement_update(sensor_var, p_cov_check, y_k, p_check, v_check, q_check):
194 | # 3.1 Compute Kalman Gain
195 | r_cov = np.eye(3)*sensor_var
196 | k_gain = p_cov_check @ h_jac.T @ np.linalg.inv((h_jac @ p_cov_check @ h_jac.T) + r_cov)
197 | ```
198 |
199 | We use the Kalman gain to compute the error state. Considering the innovation or difference between our predicted vehicle position, `p_check`, and the measured position, `y_k`.
200 |
201 | | Description | Equation | Variable |
202 | | ----------- | ------------------------------------------------------------ | ------------- |
203 | | Error State | $`\delta\boldsymbol{x}_{k}=\boldsymbol{K}_{k}(\boldsymbol{y}_{k}-\boldsymbol{\check{p}}_{k})`$ | `error_state` |
204 |
205 | The error state is then used to update the nominal state vector. We also calculate the corrected nominal state covariance matrix, `p_cov_hat`.
206 |
207 | | Description | Equation | Variable |
208 | | ---------------------------- | ------------------------------------------------------------ | ----------- |
209 | | *Corrected* Position | $`\hat{\boldsymbol{p}}_{k}=\check{\boldsymbol{p}}_{k}+\delta\boldsymbol{p}_{k}`$ | `p_hat` |
210 | | *Corrected* Velocity | $`\hat{\boldsymbol{v}}_{k}=\check{\boldsymbol{v}}_{k}+\delta\boldsymbol{v}_{k}`$ | `v_hat` |
211 | | *Corrected* Orientation | $`\hat{\boldsymbol{q}}_{k}=\boldsymbol{q}(\delta\boldsymbol{\phi}_{k})\otimes\check{\boldsymbol{q}}_{k}`$ | `q_hat` |
212 | | *Corrected* State Covariance | $`\hat{\boldsymbol{P}}_{k}=(\boldsymbol{I}-\boldsymbol{K}_{k}\boldsymbol{H}_{k})\check{\boldsymbol{P}}_{k}`$ | `p_cov_hat` |
213 |
214 | Finally, the function returns the corrected state and state covariants.
215 |
216 | ```python
217 | # 3.2 Compute error state
218 | error_state = k_gain @ (y_k - p_check)
219 |
220 | # 3.3 Correct predicted state
221 | p_hat = p_check + error_state[0:3]
222 | v_hat = v_check + error_state[3:6]
223 | q_hat = Quaternion(axis_angle=error_state[6:9]).quat_mult_left(Quaternion(*q_check))
224 |
225 | # 3.4 Compute corrected covariance
226 | p_cov_hat = (np.eye(9) - k_gain @ h_jac) @ p_cov_check
227 |
228 | return p_hat, v_hat, q_hat, p_cov_hat
229 | ```
230 |
231 | ## 4. Vehicle Trajectory
232 |
233 | We evaluate the algorithm performance by comparing the estimated vehicle trajectory and the ground truth. The source code generates visualizations by running the following command in the terminal.
234 |
235 | ```bash
236 | python es_ekf.py
237 | ```
238 |
239 | ### 4.1. Ground Truth and Estimate
240 |
241 | This figure shows a comparison between the trajectory estimate and the ground truth.
242 |
243 |
244 |
245 |
--------------------------------------------------------------------------------
/es_ekf.py:
--------------------------------------------------------------------------------
1 | # Starter code for the Coursera SDC Course 2 final project.
2 | #
3 | # Author: Trevor Ablett and Jonathan Kelly
4 | # University of Toronto Institute for Aerospace Studies
5 | import pickle
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | from mpl_toolkits.mplot3d import Axes3D
9 | from rotations import angle_normalize, rpy_jacobian_axis_angle, skew_symmetric, Quaternion
10 |
11 | #### 1. Data ###################################################################################
12 |
13 | ################################################################################################
14 | # This is where you will load the data from the pickle files. For parts 1 and 2, you will use
15 | # p1_data.pkl. For Part 3, you will use pt3_data.pkl.
16 | ################################################################################################
17 | with open('data/pt1_data.pkl', 'rb') as file:
18 | data = pickle.load(file)
19 |
20 | ################################################################################################
21 | # Each element of the data dictionary is stored as an item from the data dictionary, which we
22 | # will store in local variables, described by the following:
23 | # gt: Data object containing ground truth. with the following fields:
24 | # a: Acceleration of the vehicle, in the inertial frame
25 | # v: Velocity of the vehicle, in the inertial frame
26 | # p: Position of the vehicle, in the inertial frame
27 | # alpha: Rotational acceleration of the vehicle, in the inertial frame
28 | # w: Rotational velocity of the vehicle, in the inertial frame
29 | # r: Rotational position of the vehicle, in Euler (XYZ) angles in the inertial frame
30 | # _t: Timestamp in ms.
31 | # imu_f: StampedData object with the imu specific force data (given in vehicle frame).
32 | # data: The actual data
33 | # t: Timestamps in ms.
34 | # imu_w: StampedData object with the imu rotational velocity (given in the vehicle frame).
35 | # data: The actual data
36 | # t: Timestamps in ms.
37 | # gnss: StampedData object with the GNSS data.
38 | # data: The actual data
39 | # t: Timestamps in ms.
40 | # lidar: StampedData object with the LIDAR data (positions only).
41 | # data: The actual data
42 | # t: Timestamps in ms.
43 | ################################################################################################
44 | gt = data['gt']
45 | imu_f = data['imu_f']
46 | imu_w = data['imu_w']
47 | gnss = data['gnss']
48 | lidar = data['lidar']
49 |
50 | ################################################################################################
51 | # Let's plot the ground truth trajectory to see what it looks like. When you're testing your
52 | # code later, feel free to comment this out.
53 | ################################################################################################
54 | # gt_fig = plt.figure()
55 | # ax = gt_fig.add_subplot(111, projection='3d')
56 | # ax.plot(gt.p[:,0], gt.p[:,1], gt.p[:,2])
57 | # ax.set_xlabel('x [m]')
58 | # ax.set_ylabel('y [m]')
59 | # ax.set_zlabel('z [m]')
60 | # ax.set_title('Ground Truth trajectory')
61 | # ax.set_zlim(-1, 5)
62 | # plt.show()
63 |
64 | ################################################################################################
65 | # Remember that our LIDAR data is actually just a set of positions estimated from a separate
66 | # scan-matching system, so we can insert it into our solver as another position measurement,
67 | # just as we do for GNSS. However, the LIDAR frame is not the same as the frame shared by the
68 | # IMU and the GNSS. To remedy this, we transform the LIDAR data to the IMU frame using our
69 | # known extrinsic calibration rotation matrix C_li and translation vector t_i_li.
70 | #
71 | # THIS IS THE CODE YOU WILL MODIFY FOR PART 2 OF THE ASSIGNMENT.
72 | ################################################################################################
73 | # Correct calibration rotation matrix, corresponding to Euler RPY angles (0.05, 0.05, 0.1).
74 | C_li = np.array([
75 | [ 0.99376, -0.09722, 0.05466],
76 | [ 0.09971, 0.99401, -0.04475],
77 | [-0.04998, 0.04992, 0.9975 ]
78 | ])
79 |
80 | # Incorrect calibration rotation matrix, corresponding to Euler RPY angles (0.05, 0.05, 0.05).
81 | # C_li = np.array([
82 | # [ 0.9975 , -0.04742, 0.05235],
83 | # [ 0.04992, 0.99763, -0.04742],
84 | # [-0.04998, 0.04992, 0.9975 ]
85 | # ])
86 |
87 | t_i_li = np.array([0.5, 0.1, 0.5])
88 |
89 | # Transform from the LIDAR frame to the vehicle (IMU) frame.
90 | lidar.data = (C_li @ lidar.data.T).T + t_i_li
91 |
92 | #### 2. Constants ##############################################################################
93 |
94 | ################################################################################################
95 | # Now that our data is set up, we can start getting things ready for our solver. One of the
96 | # most important aspects of a filter is setting the estimated sensor variances correctly.
97 | # We set the values here.
98 | ################################################################################################
99 | var_imu_f = 0.10
100 | var_imu_w = 0.10
101 | var_gnss = 0.10
102 | var_lidar = 2.00
103 |
104 | ################################################################################################
105 | # We can also set up some constants that won't change for any iteration of our solver.
106 | ################################################################################################
107 | g = np.array([0, 0, -9.81]) # gravity
108 | l_jac = np.zeros([9, 6])
109 | l_jac[3:, :] = np.eye(6) # motion model noise jacobian
110 | h_jac = np.zeros([3, 9])
111 | h_jac[:, :3] = np.eye(3) # measurement model jacobian
112 |
113 | #### 3. Initial Values #########################################################################
114 |
115 | ################################################################################################
116 | # Let's set up some initial values for our ES-EKF solver.
117 | ################################################################################################
118 | p_est = np.zeros([imu_f.data.shape[0], 3]) # position estimates
119 | v_est = np.zeros([imu_f.data.shape[0], 3]) # velocity estimates
120 | q_est = np.zeros([imu_f.data.shape[0], 4]) # orientation estimates as quaternions
121 | p_cov = np.zeros([imu_f.data.shape[0], 9, 9]) # covariance matrices at each timestep
122 |
123 | # Set initial values.
124 | p_est[0] = gt.p[0]
125 | v_est[0] = gt.v[0]
126 | q_est[0] = Quaternion(euler=gt.r[0]).to_numpy()
127 | p_cov[0] = np.zeros(9) # covariance of estimate
128 | gnss_i = 0
129 | lidar_i = 0
130 | gnss_t = list(gnss.t)
131 | lidar_t = list(lidar.t)
132 |
133 | #### 4. Measurement Update #####################################################################
134 |
135 | ################################################################################################
136 | # Since we'll need a measurement update for both the GNSS and the LIDAR data, let's make
137 | # a function for it.
138 | ################################################################################################
139 | def measurement_update(sensor_var, p_cov_check, y_k, p_check, v_check, q_check):
140 | # 3.1 Compute Kalman Gain
141 | r_cov = np.eye(3)*sensor_var
142 | k_gain = p_cov_check @ h_jac.T @ np.linalg.inv((h_jac @ p_cov_check @ h_jac.T) + r_cov)
143 |
144 | # 3.2 Compute error state
145 | error_state = k_gain @ (y_k - p_check)
146 |
147 | # 3.3 Correct predicted state
148 | p_hat = p_check + error_state[0:3]
149 | v_hat = v_check + error_state[3:6]
150 | q_hat = Quaternion(axis_angle=error_state[6:9]).quat_mult_left(Quaternion(*q_check))
151 |
152 | # 3.4 Compute corrected covariance
153 | p_cov_hat = (np.eye(9) - k_gain @ h_jac) @ p_cov_check
154 |
155 | return p_hat, v_hat, q_hat, p_cov_hat
156 |
157 | #### 5. Main Filter Loop #######################################################################
158 |
159 | ################################################################################################
160 | # Now that everything is set up, we can start taking in the sensor data and creating estimates
161 | # for our state in a loop.
162 | ################################################################################################
163 | for k in range(1, imu_f.data.shape[0]): # start at 1 b/c we have initial prediction from gt
164 | delta_t = imu_f.t[k] - imu_f.t[k - 1]
165 |
166 | # 1. Update state with IMU inputs
167 | q_prev = Quaternion(*q_est[k - 1, :]) # previous orientation as a quaternion object
168 | q_curr = Quaternion(axis_angle=(imu_w.data[k - 1]*delta_t)) # current IMU orientation
169 | c_ns = q_prev.to_mat() # previous orientation as a matrix
170 | f_ns = (c_ns @ imu_f.data[k - 1]) + g # calculate sum of forces
171 | p_check = p_est[k - 1, :] + delta_t*v_est[k - 1, :] + 0.5*(delta_t**2)*f_ns
172 | v_check = v_est[k - 1, :] + delta_t*f_ns
173 | q_check = q_prev.quat_mult_left(q_curr)
174 |
175 | # 1.1 Linearize the motion model and compute Jacobians
176 | f_jac = np.eye(9) # motion model jacobian with respect to last state
177 | f_jac[0:3, 3:6] = np.eye(3)*delta_t
178 | f_jac[3:6, 6:9] = -skew_symmetric(c_ns @ imu_f.data[k - 1])*delta_t
179 |
180 | # 2. Propagate uncertainty
181 | q_cov = np.zeros((6, 6)) # IMU noise covariance
182 | q_cov[0:3, 0:3] = delta_t**2 * np.eye(3)*var_imu_f
183 | q_cov[3:6, 3:6] = delta_t**2 * np.eye(3)*var_imu_w
184 | p_cov_check = f_jac @ p_cov[k - 1, :, :] @ f_jac.T + l_jac @ q_cov @ l_jac.T
185 |
186 | # 3. Check availability of GNSS and LIDAR measurements
187 | if imu_f.t[k] in gnss_t:
188 | gnss_i = gnss_t.index(imu_f.t[k])
189 | p_check, v_check, q_check, p_cov_check = \
190 | measurement_update(var_gnss, p_cov_check, gnss.data[gnss_i], p_check, v_check, q_check)
191 |
192 | if imu_f.t[k] in lidar_t:
193 | lidar_i = lidar_t.index(imu_f.t[k])
194 | p_check, v_check, q_check, p_cov_check = \
195 | measurement_update(var_lidar, p_cov_check, lidar.data[lidar_i], p_check, v_check, q_check)
196 |
197 | # Update states (save)
198 | p_est[k, :] = p_check
199 | v_est[k, :] = v_check
200 | q_est[k, :] = q_check
201 | p_cov[k, :, :] = p_cov_check
202 |
203 | #### 6. Results and Analysis ###################################################################
204 |
205 | ################################################################################################
206 | # Now that we have state estimates for all of our sensor data, let's plot the results. This plot
207 | # will show the ground truth and the estimated trajectories on the same plot. Notice that the
208 | # estimated trajectory continues past the ground truth. This is because we will be evaluating
209 | # your estimated poses from the part of the trajectory where you don't have ground truth!
210 | ################################################################################################
211 | est_traj_fig = plt.figure()
212 | ax = est_traj_fig.add_subplot(111, projection='3d')
213 | ax.plot(p_est[:,0], p_est[:,1], p_est[:,2], label='Estimated')
214 | ax.plot(gt.p[:,0], gt.p[:,1], gt.p[:,2], label='Ground Truth')
215 | ax.set_xlabel('Easting [m]')
216 | ax.set_ylabel('Northing [m]')
217 | ax.set_zlabel('Up [m]')
218 | ax.set_title('Ground Truth and Estimated Trajectory')
219 | ax.set_xlim(0, 200)
220 | ax.set_ylim(0, 200)
221 | ax.set_zlim(-2, 2)
222 | ax.set_xticks([0, 50, 100, 150, 200])
223 | ax.set_yticks([0, 50, 100, 150, 200])
224 | ax.set_zticks([-2, -1, 0, 1, 2])
225 | ax.legend(loc=(0.62,0.77))
226 | ax.view_init(elev=45, azim=-50)
227 | plt.show()
228 |
229 | ################################################################################################
230 | # We can also plot the error for each of the 6 DOF, with estimates for our uncertainty
231 | # included. The error estimates are in blue, and the uncertainty bounds are red and dashed.
232 | # The uncertainty bounds are +/- 3 standard deviations based on our uncertainty (covariance).
233 | ################################################################################################
234 | error_fig, ax = plt.subplots(2, 3)
235 | error_fig.suptitle('Error Plots')
236 | num_gt = gt.p.shape[0]
237 | p_est_euler = []
238 | p_cov_euler_std = []
239 |
240 | # Convert estimated quaternions to euler angles
241 | for i in range(len(q_est)):
242 | qc = Quaternion(*q_est[i, :])
243 | p_est_euler.append(qc.to_euler())
244 |
245 | # First-order approximation of RPY covariance
246 | J = rpy_jacobian_axis_angle(qc.to_axis_angle())
247 | p_cov_euler_std.append(np.sqrt(np.diagonal(J @ p_cov[i, 6:, 6:] @ J.T)))
248 |
249 | p_est_euler = np.array(p_est_euler)
250 | p_cov_euler_std = np.array(p_cov_euler_std)
251 |
252 | # Get uncertainty estimates from P matrix
253 | p_cov_std = np.sqrt(np.diagonal(p_cov[:, :6, :6], axis1=1, axis2=2))
254 |
255 | titles = ['Easting', 'Northing', 'Up', 'Roll', 'Pitch', 'Yaw']
256 | for i in range(3):
257 | ax[0, i].plot(range(num_gt), gt.p[:, i] - p_est[:num_gt, i])
258 | ax[0, i].plot(range(num_gt), 3 * p_cov_std[:num_gt, i], 'r--')
259 | ax[0, i].plot(range(num_gt), -3 * p_cov_std[:num_gt, i], 'r--')
260 | ax[0, i].set_title(titles[i])
261 | ax[0,0].set_ylabel('Meters')
262 |
263 | for i in range(3):
264 | ax[1, i].plot(range(num_gt), \
265 | angle_normalize(gt.r[:, i] - p_est_euler[:num_gt, i]))
266 | ax[1, i].plot(range(num_gt), 3 * p_cov_euler_std[:num_gt, i], 'r--')
267 | ax[1, i].plot(range(num_gt), -3 * p_cov_euler_std[:num_gt, i], 'r--')
268 | ax[1, i].set_title(titles[i+3])
269 | ax[1,0].set_ylabel('Radians')
270 | plt.show()
271 |
272 | #### 7. Submission #############################################################################
273 |
274 | ################################################################################################
275 | # Now we can prepare your results for submission to the Coursera platform. Uncomment the
276 | # corresponding lines to prepare a file that will save your position estimates in a format
277 | # that corresponds to what we're expecting on Coursera.
278 | ################################################################################################
279 |
280 | # Pt. 1 submission
281 | p1_indices = [9000, 9400, 9800, 10200, 10600]
282 | p1_str = ''
283 | for val in p1_indices:
284 | for i in range(3):
285 | p1_str += '%.3f ' % (p_est[val, i])
286 | with open('output/pt1_submission.txt', 'w') as file:
287 | file.write(p1_str)
288 |
289 | # Pt. 2 submission
290 | # p2_indices = [9000, 9400, 9800, 10200, 10600]
291 | # p2_str = ''
292 | # for val in p2_indices:
293 | # for i in range(3):
294 | # p2_str += '%.3f ' % (p_est[val, i])
295 | # with open('output/pt2_submission.txt', 'w') as file:
296 | # file.write(p2_str)
297 |
298 | # Pt. 3 submission
299 | # p3_indices = [6800, 7600, 8400, 9200, 10000]
300 | # p3_str = ''
301 | # for val in p3_indices:
302 | # for i in range(3):
303 | # p3_str += '%.3f ' % (p_est[val, i])
304 | # with open('output/pt3_submission.txt', 'w') as file:
305 | # file.write(p3_str)
--------------------------------------------------------------------------------