├── .coverage ├── .gitignore ├── .idea └── pyMHT.iml ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── preRequirements.txt ├── pymht ├── __init__.py ├── initiators │ ├── __init__.py │ └── m_of_n.py ├── models │ ├── __init__.py │ ├── ais.py │ ├── constants.py │ ├── polar.py │ └── pv.py ├── pyTarget.py ├── tracker.py └── utils │ ├── __init__.py │ ├── cFunctions.pyx │ ├── ckalman.pyx │ ├── classDefinitions.py │ ├── cudaTest.py │ ├── helpFunctions.py │ ├── kalman.py │ ├── simulator.py │ └── xmlDefinitions.py ├── requirements.txt ├── setup.py ├── tests ├── test_classDefinitions.py ├── test_initiator.py ├── test_kalman.py ├── test_models.py ├── test_pyTarget.py ├── test_simulator.py ├── test_tracker.py └── text_Position.py └── tox.ini /.coverage: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/erikliland/pyMHT/58b2cd13a3fb38563a302c0f8380de3a5b0fccb1/.coverage -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | #Solvers 7 | solvers/ 8 | 9 | #Folders 10 | pulp/ 11 | 12 | #Local test file 13 | test.py 14 | 15 | #Data log files 16 | .xml 17 | *.xml 18 | .mps 19 | *.mps 20 | .sol 21 | *.sol 22 | .iml 23 | *.iml 24 | 25 | 26 | # C extensions 27 | *.so 28 | 29 | # Distribution / packaging 30 | .Python 31 | env/ 32 | build/ 33 | develop-eggs/ 34 | dist/ 35 | downloads/ 36 | eggs/ 37 | 38 | 39 | #Sublime files 40 | .sublime-project 41 | .sublime-workspace 42 | *.sublime-project 43 | *.sublime-workspace 44 | .eggs/ 45 | lib/ 46 | lib64/ 47 | parts/ 48 | sdist/ 49 | var/ 50 | *.egg-info/ 51 | .installed.cfg 52 | *.egg 53 | 54 | # PyInstaller 55 | # Usually these files are written by a python script from a template 56 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 57 | *.manifest 58 | *.spec 59 | 60 | # Installer logs 61 | pip-log.txt 62 | pip-delete-this-directory.txt 63 | 64 | # Unit test / coverage reports 65 | htmlcov/ 66 | .tox/ 67 | .coverage 68 | .coverage.* 69 | .cache 70 | nosetests.xml 71 | coverage.xml 72 | *,cover 73 | .hypothesis/ 74 | 75 | # Translations 76 | *.mo 77 | *.pot 78 | 79 | # Django stuff: 80 | *.log 81 | local_settings.py 82 | 83 | # Flask stuff: 84 | instance/ 85 | .webassets-cache 86 | 87 | # Scrapy stuff: 88 | .scrapy 89 | 90 | # Sphinx documentation 91 | docs/_build/ 92 | 93 | # PyBuilder 94 | target/ 95 | 96 | # IPython Notebook 97 | .ipynb_checkpoints 98 | 99 | # pyenv 100 | .python-version 101 | 102 | # celery beat schedule file 103 | celerybeat-schedule 104 | 105 | # dotenv 106 | .env 107 | 108 | # virtualenv 109 | venv/ 110 | ENV/ 111 | 112 | # Spyder project settings 113 | .spyderproject 114 | 115 | # Rope project settings 116 | .ropeproject 117 | .idea/dictionaries/ 118 | .idea/inspectionProfiles/ 119 | mpTest.py 120 | -------------------------------------------------------------------------------- /.idea/pyMHT.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - '3.5' 4 | install: 5 | - pip install . 6 | - pip install -r preRequirements.txt 7 | - pip install -r requirements.txt 8 | script: pytest tests 9 | notifications: 10 | email: false 11 | after_success: 12 | - coveralls -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Erik Liland 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #OS := $(shell uname) 2 | 3 | ifeq ($(shell uname),Darwin) 4 | #Run macOS commands 5 | init: 6 | echo "macOS" 7 | sudo -H pip3 install --upgrade pip 8 | sudo -H pip3 install -r requirements.txt 9 | sudo python3 setup.py install 10 | endif 11 | 12 | ifeq ($(shell uname),Linux) 13 | #Run Linux commands 14 | init: 15 | echo "Linux" 16 | sudo apt-get update 17 | sudo apt-get upgrade 18 | sudo apt-get install python3-dev 19 | sudo apt-get install python3-setuptools 20 | sudo easy_install3 pip 21 | sudo apt-get install python3-tk 22 | sudo apt-get install python-glpk 23 | sudo apt-get install glpk-utils 24 | sudo apt-get install python-numpy 25 | sudo -H pip install -r requirements.txt 26 | sudo python3 setup.py install 27 | endif -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pyMHT 2 | 3 | ## Track oriented, multi target, multi hypothesis tracker 4 | Multi frame multi target tracking module with 2/2&m/n initialization algorithm and an AIS aided track oriented multi hypothesis tracking algorithm. 5 | 6 | 7 | ## Installation 8 | 9 | You can get the latest and greatest from 10 | [github](https://github.com/erikliland/pymht): 11 | 12 | $ git clone git@github.com:erikliland/pymht.git pymht 13 | $ cd pymht 14 | $ sudo python setup.py install 15 | 16 | 17 | `pyMHT` depends on the following modules, 18 | 19 | * `Cython` (for compiling Munkres algorithm) 20 | * `numpy` (for core functionality) 21 | * `scipy` (for core functionality) 22 | * `matplotlib` (for ploting) 23 | * `pytest` (for testing) 24 | * `matplotlib` (for ploting) 25 | * `Munkres` [[Github](https://github.com/jfrelinger/cython-munkres-wrapper)] 26 | * `OR-TOOLS` (for solving ILP´s) [[Github](https://github.com/google/or-tools)] 27 | 28 | All modules except `OR-TOOLS` can be installed via pip: 29 | 30 | $ pip install -r preRequirements.txt 31 | $ pip install -r requirements.txt 32 | 33 | `OR-TOOLS` must be installed manually. 34 | 35 | ## Test instalation 36 | To test the instalation run in the pyMHT directory: 37 | 38 | $ pytest 39 | This module does not contain any scenarios or examples. This is placed in another repository [pyMHT-simulator](https://github.com/erikliland/pyMHT-simulator). 40 | 41 | ## Background 42 | This Python module is the result of a project assignment and a Master´s thesis 43 | 44 | [Project report](https://mfr.osf.io/render?url=https://osf.io/2eeqd/?action=download%26mode=render) 45 | 46 | [Thesis]() 47 | 48 | ## Build status 49 | Master [![Build Status](https://travis-ci.org/erikliland/pyMHT.svg?branch=master)](https://travis-ci.org/erikliland/pyMHT) 50 | 51 | Development [![Build Status](https://travis-ci.org/erikliland/pyMHT.svg?branch=development)](https://travis-ci.org/erikliland/pyMHT) 52 | 53 | Master [![Coverage Status](https://coveralls.io/repos/github/erikliland/pyMHT/badge.svg?branch=master)](https://coveralls.io/github/erikliland/pyMHT?branch=master) -------------------------------------------------------------------------------- /preRequirements.txt: -------------------------------------------------------------------------------- 1 | Cython -------------------------------------------------------------------------------- /pymht/__init__.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | backend = matplotlib.get_backend() 3 | if backend not in ['Agg', 'WebAgg', 'MacOSX']: 4 | matplotlib.use('Agg') 5 | -------------------------------------------------------------------------------- /pymht/initiators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/erikliland/pyMHT/58b2cd13a3fb38563a302c0f8380de3a5b0fccb1/pymht/initiators/__init__.py -------------------------------------------------------------------------------- /pymht/initiators/m_of_n.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | import numpy as np 4 | from scipy.stats import chi2 5 | from ..models import pv, ais 6 | from ..pyTarget import Target 7 | from munkres import munkres # https://github.com/jfrelinger/cython-munkres-wrapper 8 | # import pymunkres # https://github.com/erikliland/munkres 9 | # import scipy.optimize.linear_sum_assignment 10 | 11 | # np.set_printoptions(precision=1, suppress=True, linewidth=120) 12 | 13 | tracking_parameters = { 14 | 'gate_probability': 0.99, 15 | } 16 | tracking_parameters['gamma'] = chi2(df=2).ppf(tracking_parameters['gate_probability']) 17 | 18 | CONFIRMED = 1 19 | PRELIMINARY = 0 20 | DEAD = -1 21 | 22 | log = logging.getLogger(__name__) 23 | 24 | def _solve_global_nearest_neighbour(delta_matrix, gate_distance=np.Inf, **kwargs): 25 | try: 26 | tic = time.time() 27 | DEBUG = kwargs.get('debug', False) 28 | # Copy and gating 29 | if DEBUG: print("delta matrix\n", delta_matrix) 30 | cost_matrix = np.copy(delta_matrix) 31 | cost_matrix[cost_matrix > gate_distance] = np.Inf 32 | if DEBUG: print("cost_matrix\n", cost_matrix) 33 | 34 | # Pre-processing 35 | valid_matrix = cost_matrix < np.Inf 36 | if np.all(valid_matrix == False): 37 | return [] 38 | if DEBUG: print("Valid matrix\n", valid_matrix.astype(int)) 39 | 40 | bigM = np.power(10., 1.0 + np.ceil(np.log10(1. + np.sum(cost_matrix[valid_matrix])))) 41 | cost_matrix[np.logical_not(valid_matrix)] = bigM 42 | if DEBUG: print("Modified cost matrix\n", cost_matrix) 43 | 44 | validCol = np.any(valid_matrix, axis=0) 45 | validRow = np.any(valid_matrix, axis=1) 46 | if DEBUG: print("validCol", validCol) 47 | if DEBUG: print("validRow", validRow) 48 | nRows = int(np.sum(validRow)) 49 | nCols = int(np.sum(validCol)) 50 | n = max(nRows, nCols) 51 | if DEBUG: print("nRows, nCols, n", nRows, nCols, n) 52 | 53 | maxv = 10. * np.max(cost_matrix[valid_matrix]) 54 | if DEBUG: print("maxv", maxv) 55 | 56 | rows = np.arange(nRows) 57 | cols = np.arange(nCols) 58 | dMat = np.zeros((n, n)) + maxv 59 | dMat[np.ix_(rows, cols)] = cost_matrix[np.ix_(validRow, validCol)] 60 | if DEBUG: print("dMat\n", dMat) 61 | 62 | # Assignment 63 | preliminary_assignment_matrix = munkres(dMat.astype(np.double)) 64 | if DEBUG: print("preliminary preliminary_assignment_matrix\n", 65 | np.asarray(preliminary_assignment_matrix,dtype=np.int)) 66 | preliminary_assignments = [(rowI, np.where(row)[0][0]) for rowI, row in 67 | enumerate(preliminary_assignment_matrix)] 68 | if DEBUG: 69 | print("preliminary assignments ", preliminary_assignments) 70 | 71 | # Post-processing 72 | rowIdx = np.where(validRow)[0] 73 | colIdx = np.where(validCol)[0] 74 | assignments = [] 75 | for preliminary_assignment in preliminary_assignments: 76 | row = preliminary_assignment[0] 77 | col = preliminary_assignment[1] 78 | if (row >= nRows) or (col >= nCols): 79 | continue 80 | rowI = rowIdx[row] 81 | colI = colIdx[col] 82 | if valid_matrix[rowI, colI]: 83 | assignments.append((rowI, colI)) 84 | assert all([delta_matrix[a[0], a[1]] <= gate_distance for a in assignments]) 85 | if DEBUG: 86 | print("final assignments", assignments) 87 | toc = time.time() - tic 88 | log.debug("_solve_global_nearest_neighbour runtime: {:.1f}ms".format(toc * 1000)) 89 | return assignments 90 | except Exception as e: 91 | print("#" * 20, "CRASH DEBUG INFO", "#" * 20) 92 | print("deltaMatrix", delta_matrix.shape, "\n", delta_matrix) 93 | print("gateDistance", gate_distance) 94 | print("Valid matrix", valid_matrix.shape, "\n", valid_matrix.astype(int)) 95 | print("validCol", validCol.astype(int)) 96 | print("validRow", validRow.astype(int)) 97 | print("dMat", dMat.shape, "\n", dMat) 98 | print("preliminary assignments", preliminary_assignments) 99 | print("rowIdx", rowIdx) 100 | print("colIdx", colIdx) 101 | print("assignments", assignments) 102 | print("#" * 20, "CRASH DEBUG INFO", "#" * 20) 103 | time.sleep(0.1) 104 | raise e 105 | 106 | def _initiator_distance(delta_vector, dt, v_max, R): 107 | movement_scalar = dt * v_max 108 | d_plus = np.maximum(delta_vector - movement_scalar, np.zeros(2)) 109 | d_minus = np.maximum(-delta_vector - movement_scalar, np.zeros(2)) 110 | d = d_plus + d_minus 111 | D = np.dot(d.T, np.dot(np.linalg.inv(R + R), d)) 112 | return D 113 | 114 | def _merge_targets(targets): 115 | if len(targets) == 1: return targets[0] 116 | 117 | time = targets[0].time 118 | scanNumber = None 119 | x_0 = np.mean(np.array([t.x_0 for t in targets]), axis=0) 120 | assert x_0.shape == targets[0].x_0.shape 121 | P_0 = np.mean(np.array([t.P_0 for t in targets]), axis=0) 122 | assert P_0.shape == targets[0].P_0.shape 123 | return Target(time, scanNumber, x_0, P_0, 124 | measurement=targets[0].measurement, # TODO: Make a less crude solution 125 | # measurementNumber=targets[0].measurementNumber 126 | ) 127 | 128 | def _merge_similar_targets(initial_targets, threshold): 129 | tic = time.time() 130 | if not initial_targets: return initial_targets 131 | targets = [] 132 | used_targets = set() 133 | for target_index, target in enumerate(initial_targets): 134 | if target_index not in used_targets: 135 | distance_to_targets = np.array([np.linalg.norm(target.x_0[0:2] - t.x_0[0:2]) for t in initial_targets]) 136 | close_targets = distance_to_targets < threshold 137 | close_targets_indices = np.where(close_targets)[0] 138 | log.debug("Merging " + str(len(close_targets)) + " initial targets to 1") 139 | selected_targets = [initial_targets[i] for i in close_targets_indices if i not in used_targets] 140 | merged_target = _merge_targets(selected_targets) 141 | for i in close_targets_indices: 142 | used_targets.add(i) 143 | assert type(merged_target) == type(target) 144 | targets.append(merged_target) 145 | toc = time.time() - tic 146 | log.debug("_merge_similar_targets runtime: {:.1f}ms".format(toc * 1000)) 147 | return targets 148 | 149 | class PreliminaryTrack(): 150 | def __init__(self, state, covariance, mmsi = None): 151 | self.state = state 152 | self.covariance = covariance 153 | self.n = 0 154 | self.m = 0 155 | self.predicted_state = None 156 | self.measurement_index = None 157 | self.mmsi = mmsi 158 | 159 | def __str__(self): 160 | formatter = {'float_kind': lambda x: "{: 7.1f}".format(x) } 161 | mmsiStr = " MMSI {:} ".format(self.mmsi) if self.mmsi is not None else "" 162 | predStateStr = ("Pred state:" + np.array2string(self.predicted_state, 163 | precision=1, 164 | suppress_small=True, 165 | formatter=formatter) 166 | if self.predicted_state is not None else "") 167 | return ("State: " + np.array2string(self.state, precision=1, suppress_small=True,formatter=formatter) + 168 | " ({0:}|{1:}) ".format(self.m, self.n) + 169 | predStateStr + 170 | mmsiStr) 171 | 172 | __repr__ = __str__ 173 | 174 | def get_speed(self): 175 | return np.linalg.norm(self.state[2:4]) 176 | 177 | def predict(self, F, Q): 178 | self.predicted_state = F.dot(self.state) 179 | self.covariance = F.dot(self.covariance).dot(F.T) + Q 180 | 181 | def mn_analysis(self, M, N): 182 | m = self.m 183 | n = self.n 184 | if m >= M: # n >= N and m >= M: 185 | return CONFIRMED 186 | elif n >= N and m < M: 187 | return DEAD 188 | else: 189 | return PRELIMINARY 190 | 191 | def get_predicted_state_and_clear(self): 192 | return_value = np.copy(self.predicted_state) 193 | self.predicted_state = None 194 | return return_value 195 | 196 | def compareSimilarity(self, other): 197 | deltaState = self.state - other.state 198 | S = self.covariance + ais.R(False) 199 | S_inv = np.linalg.inv(S) 200 | NIS = deltaState.T.dot(S_inv).dot(deltaState) 201 | return NIS 202 | 203 | class Measurement(): 204 | def __init__(self, value, timestamp): 205 | self.value = value 206 | self.timestamp = timestamp 207 | # self.covariance = pv.R_RADAR() 208 | 209 | def __repr__(self): 210 | from time import strftime, gmtime 211 | meas_str = "Measurement: (%.2f, %.2f)" % (self.value[0], self.value[1]) 212 | time_str = "Time: " + strftime("%H:%M:%S", gmtime(self.timestamp)) 213 | return "{" + meas_str + ", " + time_str + "}" 214 | 215 | class Initiator(): 216 | def __init__(self, M, N, v_max, C, R, mergeThreshold=5, **kwargs): 217 | self.N = N 218 | self.M = M 219 | self.C = C 220 | self.R = R 221 | self.initiators = [] 222 | self.preliminary_tracks = [] 223 | self.v_max = v_max # m/s 224 | self.gamma = tracking_parameters['gamma'] 225 | self.last_timestamp = None 226 | self.merge_threshold = mergeThreshold # meter 227 | log.info("Initiator ready ({0:}/{1:})".format(self.M, self.N)) 228 | log.debug("Initiator gamma: " + str(self.gamma)) 229 | 230 | def getPreliminaryTracksString(self): 231 | return " ".join([str(e) for e in self.preliminary_tracks]) 232 | 233 | def processMeasurements(self, radar_measurement_list, ais_measurement_list=list()): 234 | tic = time.time() 235 | log.info("processMeasurements " + str(radar_measurement_list.measurements.shape[0])) 236 | unused_indices, initial_targets = self._processPreliminaryTracks(radar_measurement_list, ais_measurement_list) 237 | unused_indices = self._processInitiators(unused_indices, radar_measurement_list) 238 | self._spawnInitiators(unused_indices, radar_measurement_list) 239 | self.last_timestamp = radar_measurement_list.time 240 | initial_targets = _merge_similar_targets(initial_targets, self.merge_threshold) 241 | log.info("new initial targets " + str(len(initial_targets))) 242 | toc = time.time() - tic 243 | log.debug("processMeasurements runtime: {:.1f}ms".format(toc * 1000)) 244 | return initial_targets 245 | 246 | def _processPreliminaryTracks(self, measurement_list, ais_measurement_list): 247 | tic = time.time() 248 | newInitialTargets = [] 249 | radarMeasTime = measurement_list.time 250 | measurement_array = np.array(measurement_list.measurements, dtype=np.float32) 251 | 252 | # Predict position 253 | if self.last_timestamp is not None: 254 | dt = radarMeasTime - self.last_timestamp 255 | F = pv.Phi(dt) 256 | Q = pv.Q(dt) 257 | for track in self.preliminary_tracks: 258 | track.predict(F, Q) 259 | else: 260 | assert len(self.preliminary_tracks) == 0, "Undefined situation" 261 | 262 | existingMmsiList = [t.mmsi for t in self.preliminary_tracks if t.mmsi is not None] 263 | existingMmsiSet = set(existingMmsiList) 264 | assert len(existingMmsiList) == len(existingMmsiSet), "Duplicate MMSI in preliminaryTracks" 265 | for measurement in ais_measurement_list: 266 | if measurement.mmsi in existingMmsiSet: 267 | continue 268 | dT = radarMeasTime - measurement.time 269 | state, covariance = measurement.predict(dT) 270 | tempTrack = PreliminaryTrack(state, covariance, measurement.mmsi) 271 | tempTrack.predicted_state = state 272 | nisList = [p.compareSimilarity(tempTrack) for p in self.preliminary_tracks] 273 | threshold = 1.0 274 | if not any([s <= threshold for s in nisList]): 275 | self.preliminary_tracks.append(tempTrack) 276 | else: 277 | log.debug("Discarded new AIS preliminaryTrack because it was to similar" + 278 | str([e for e in nisList if e <= threshold]) + str(tempTrack)) 279 | 280 | log.info("_processPreliminaryTracks " + str(len(self.preliminary_tracks))) 281 | 282 | predicted_states = np.array([track.get_predicted_state_and_clear() 283 | for track in self.preliminary_tracks], 284 | ndmin=2, dtype=np.float32) 285 | # Check for something to work on 286 | n1 = len(self.preliminary_tracks) 287 | n2 = measurement_array.shape[0] 288 | n3 = measurement_array.size 289 | if n1 == 0: 290 | return np.arange(n2).tolist(), newInitialTargets 291 | if len(ais_measurement_list) == 0 and (n2 == 0 or n3 == 0): 292 | return np.arange(n2).tolist(), newInitialTargets 293 | 294 | 295 | # Calculate delta matrix 296 | delta_matrix = np.ones((n1, n2), dtype=np.float32) * np.Inf 297 | for i, predicted_state in enumerate(predicted_states): 298 | predicted_measurement = self.C.dot(predicted_state) 299 | delta_vector = measurement_array - predicted_measurement 300 | distance_vector = np.linalg.norm(delta_vector, axis=1) 301 | P_bar = self.preliminary_tracks[i].covariance 302 | S = self.C.dot(P_bar).dot(self.C.T) + self.R 303 | S_inv = np.linalg.inv(S) 304 | K = P_bar.dot(self.C.T).dot(S_inv) 305 | self.preliminary_tracks[i].K = K 306 | nis_vector = np.sum(np.matmul(delta_vector, S_inv) * delta_vector, axis=1) 307 | inside_gate_vector = nis_vector <= self.gamma 308 | delta_matrix[i,inside_gate_vector] = distance_vector[inside_gate_vector] 309 | 310 | # Assign measurements 311 | log.debug("\n"+np.array_str(delta_matrix, max_line_width=120)) 312 | assignments = _solve_global_nearest_neighbour(delta_matrix) 313 | 314 | # Update tracks 315 | for track_index, meas_index in assignments: 316 | P_bar = self.preliminary_tracks[track_index].covariance 317 | K = self.preliminary_tracks[track_index].K 318 | delta_vector = measurement_array[meas_index] - self.C.dot(predicted_states[track_index]) 319 | filtered_state = predicted_states[track_index] + K.dot(delta_vector) 320 | P_hat = P_bar - K.dot(self.C).dot(P_bar) 321 | self.preliminary_tracks[track_index].state = filtered_state 322 | self.preliminary_tracks[track_index].covariance = P_hat 323 | self.preliminary_tracks[track_index].m += 1 324 | self.preliminary_tracks[track_index].measurement_index = meas_index 325 | 326 | # Add dummy measurement to un-assigned tracks, and increase covariance 327 | assigned_track_indices = [assignment[0] for assignment in assignments] 328 | unassigned_track_indices = [track_index 329 | for track_index in range(len(self.preliminary_tracks)) 330 | if track_index not in assigned_track_indices] 331 | for track_index in unassigned_track_indices: 332 | self.preliminary_tracks[track_index].state = predicted_states[track_index] 333 | 334 | # Increase all N 335 | for track in self.preliminary_tracks: 336 | track.n += 1 337 | 338 | log.debug("Preliminary tracks "+self.getPreliminaryTracksString()) 339 | 340 | #Evaluate destiny 341 | removeIndices = [] 342 | for track_index, track in enumerate(self.preliminary_tracks): 343 | track_status = track.mn_analysis(self.M, self.N) 344 | track_speed = track.get_speed() 345 | if track_speed > self.v_max*1.5: 346 | log.warning("Removing TOO FAST track ({0:6.1f} m/s) i={1:}".format(track_speed, track_index) +"\n"+ repr(track)) 347 | removeIndices.append(track_index) 348 | elif track_status == DEAD: 349 | # log.debug("Removing DEAD track " + str(track_index)) 350 | removeIndices.append(track_index) 351 | elif track_status == CONFIRMED: 352 | log.debug("Removing CONFIRMED track " + str(track_index)) 353 | new_target = Target(radarMeasTime, 354 | None, 355 | np.array(track.state), 356 | track.covariance, 357 | measurementNumber=track.measurement_index + 1, 358 | measurement=measurement_array[track.measurement_index]) 359 | log.debug("Spawning new (initial) Target: " + str(new_target) 360 | + " Covariance:\n" + np.array_str(track.covariance)) 361 | newInitialTargets.append(new_target) 362 | removeIndices.append(track_index) 363 | 364 | #Remove dead preliminaryTracks 365 | for i in reversed(removeIndices): 366 | self.preliminary_tracks.pop(i) 367 | if removeIndices: 368 | log.debug(self.getPreliminaryTracksString()) 369 | 370 | #Return unused radar measurement indices 371 | used_radar_indices = [assignment[1] for assignment in assignments] 372 | unused_radar_indices = [index 373 | for index in np.arange(n2) 374 | if index not in used_radar_indices] 375 | 376 | toc = time.time() - tic 377 | log.debug("_processPreliminaryTracks runtime: {:.1f}ms".format(toc * 1000)) 378 | return unused_radar_indices, newInitialTargets 379 | 380 | def _processInitiators(self, unused_indices, measurement_list): 381 | tic = time.time() 382 | log.debug("_processInitiators " + str(len(self.initiators))) 383 | measTime = measurement_list.time 384 | measurementArray = np.array(measurement_list.measurements, ndmin=2, dtype=np.float32) 385 | n1 = len(self.initiators) 386 | n2 = len(unused_indices) 387 | if n1 == 0 or n2 == 0: 388 | return unused_indices 389 | 390 | #TODO: Improve runtime of this section. It takes about 97% of m/n runtime 391 | unusedMeasurementArray = measurementArray[unused_indices] 392 | initiatorArray = np.array([i.value for i in self.initiators], ndmin=2, dtype=np.float32) 393 | deltaTensor = np.empty((n1, n2, 2)) 394 | for i in range(n1): 395 | deltaTensor[i] = unusedMeasurementArray - initiatorArray[i] 396 | distance_matrix = np.linalg.norm(deltaTensor, axis=2) 397 | 398 | dt = measTime - self.initiators[0].timestamp 399 | gate_distance = (self.v_max * dt) 400 | log.debug("Gate distance {0:.1f}".format(gate_distance)) 401 | 402 | assignments = _solve_global_nearest_neighbour(distance_matrix, gate_distance) 403 | assigned_local_indices = [assignment[1] for assignment in assignments] 404 | used_indices = [unused_indices[j] for j in assigned_local_indices] 405 | unused_indices = [i for i in unused_indices if i not in used_indices] 406 | unused_indices.sort() 407 | assert len(unused_indices) == len(set(unused_indices)) 408 | toc = time.time() - tic 409 | log.debug("_processInitiators runtime: {:.1f}ms".format(toc * 1000)) 410 | # tic1 = time.time() 411 | self.__spawn_preliminary_tracks(unusedMeasurementArray, assignments, measTime) 412 | # log.debug("Test section runtime: {:.1f}ms".format((time.time() - tic1) * 1000)) 413 | return unused_indices 414 | 415 | def _spawnInitiators(self, unused_indices, measurement_list): 416 | tic = time.time() 417 | log.info("_spawnInitiators " + str(len(unused_indices))) 418 | measurementTime = measurement_list.time 419 | measurement_array = measurement_list.measurements 420 | self.initiators = [Measurement(measurement_array[index], measurementTime) 421 | for index in unused_indices] 422 | toc = time.time() - tic 423 | log.debug("_spawnInitiators runtime: {:.1f}ms".format(toc * 1000)) 424 | 425 | def __spawn_preliminary_tracks(self, unusedMeasurementArray, assignments, measTime): 426 | tic = time.time() 427 | log.info("__spawn_preliminary_tracks " + str(len(assignments))) 428 | # initiator_index_vector = np.array([a[0] for a in assignments]) 429 | # assert initiator_index_vector.ndim == 1 430 | # measurement_index_vector = np.array([a[1] for a in assignments]) 431 | # initiator_matrix = np.array([self.initiators[i].value for i in initiator_index_vector], ndmin=2) 432 | # assert initiator_matrix.ndim == 2 433 | # position_matrix = np.array(unusedMeasurementArray[measurement_index_vector], ndmin=2) 434 | # delta_matrix = position_matrix - initiator_matrix 435 | # assert delta_matrix.ndim == 2 436 | # dt_vector = np.array([measTime - self.initiators[i].timestamp for i in initiator_index_vector]) 437 | # assert dt_vector.ndim == 1 438 | # velocity_matrix = delta_matrix / dt_vector[:,None] 439 | # assert velocity_matrix.shape == delta_matrix.shape 440 | # speed_vector = np.linalg.norm(velocity_matrix, axis=1) 441 | # assert speed_vector.ndim == 1 442 | # assert speed_vector.size == velocity_matrix.shape[0] 443 | # # too_fast_vector = speed_vector > self.v_max * 1.5 444 | # # assert too_fast_vector.shape == speed_vector.shape 445 | # x0_matrix = np.hstack((position_matrix, velocity_matrix)) 446 | # assert x0_matrix.ndim == 2 447 | # assert x0_matrix.shape == (position_matrix.shape[0], position_matrix.shape[1]+velocity_matrix.shape[1]) 448 | # if self.preliminary_tracks: 449 | # preliminary_tracks_state_matrix = np.array([t.state for t in self.preliminary_tracks], ndmin=2) 450 | # assert preliminary_tracks_state_matrix.ndim == 2 451 | # assert preliminary_tracks_state_matrix.shape[1] == x0_matrix.shape[1] 452 | # x0_tensor = np.concatenate(x0_matrix) 453 | # delta_tensor = x0_matrix - preliminary_tracks_state_matrix 454 | 455 | for initiator_index, measurement_index in assignments: 456 | delta_vector = unusedMeasurementArray[measurement_index] - self.initiators[initiator_index].value 457 | dt = measTime - self.initiators[initiator_index].timestamp 458 | velocity_vector = delta_vector / dt 459 | speed = np.linalg.norm(velocity_vector) 460 | if speed > self.v_max*1.5: 461 | log.warning("Initiator speed to high {0:6.1f} m/s".format(speed) + 462 | "\n" + str(delta_vector)) 463 | x0 = np.hstack((unusedMeasurementArray[measurement_index], velocity_vector)) 464 | track = PreliminaryTrack(x0, pv.P0) 465 | 466 | # --- TODO: THIS SECTION MUST BE SPEEDED UP--- 467 | nisList = [p.compareSimilarity(track) for p in self.preliminary_tracks] 468 | # ---------------------------------------------- 469 | threshold = 1.0 470 | if not any([s <= threshold for s in nisList]): 471 | self.preliminary_tracks.append(track) 472 | else: 473 | log.debug("Discarded new preliminaryTrack because it was to similar ") 474 | # str([e for e in nisList if e <= threshold]) + str(track)) 475 | # i = nisList.index(min(nisList)) 476 | # log.debug(str(self.preliminary_tracks[i])) 477 | toc = time.time() - tic 478 | log.debug("__spawn_preliminary_tracks runtime: {:.1f}ms".format(toc * 1000)) 479 | 480 | if __name__ == "__main__": 481 | import pymht.utils.simulator as sim 482 | import pymht.models.pv as model 483 | 484 | np.set_printoptions(precision=1, suppress=True) 485 | 486 | seed = 1254 487 | nTargets = 2 488 | p0 = np.array([0., 0.]) 489 | radarRange = 5500 # meters 490 | meanSpeed = 10 # gausian distribution 491 | P_d = 1.0 492 | sigma_Q = pv.sigmaQ_true 493 | 494 | sim.seed_simulator(seed) 495 | 496 | initialTargets = sim.generateInitialTargets(nTargets, p0, radarRange, P_d, sigma_Q) 497 | 498 | nScans = 4 499 | timeStep = 0.7 500 | simTime = nScans * timeStep 501 | simList = sim.simulateTargets(initialTargets, simTime, timeStep, model) 502 | 503 | lambda_phi = 4e-6 504 | scanList = sim.simulateScans(simList, timeStep, model.C_RADAR, model.R_RADAR(0), 505 | lambda_phi, radarRange, p0) 506 | 507 | N_checks = 4 508 | M_required = 2 509 | 510 | v_max = 17 511 | initiator = Initiator(M_required, N_checks, v_max, pv.C_RADAR, pv.R_RADAR(), debug=False) 512 | 513 | for scanIndex, measurementList in enumerate(scanList): 514 | print("Scan index", scanIndex) 515 | # print(measurementList) 516 | initialTargets = initiator.processMeasurements(measurementList) 517 | if initialTargets: 518 | # print(scanIndex, end="\t") 519 | print(*initialTargets, sep="\n", end="\n\n") 520 | # print(*initialTargets, se) 521 | # else: 522 | # print(scanIndex, [], sep="\t") 523 | 524 | print("-" * 50) 525 | -------------------------------------------------------------------------------- /pymht/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/erikliland/pyMHT/58b2cd13a3fb38563a302c0f8380de3a5b0fccb1/pymht/models/__init__.py -------------------------------------------------------------------------------- /pymht/models/ais.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .constants import * 3 | 4 | C = np.eye(N=nObsDim_AIS, M=nDimState) 5 | 6 | sigmaR_AIS_true_highAccuracy = 1.0 7 | sigmaR_AIS_true_lowAccuracy = 3.0 8 | 9 | def R(highAccuracy): 10 | if highAccuracy: 11 | return np.array(np.eye(nObsDim_AIS) * np.power(sigmaR_AIS_true_highAccuracy, 2), dtype=defaultType) 12 | else: 13 | return np.array(np.eye(nObsDim_AIS) * np.power(sigmaR_AIS_true_lowAccuracy, 2), dtype=defaultType) 14 | 15 | def Phi(T): 16 | return np.array([[1.0, 0, T, 0], 17 | [0, 1.0, 0, T], 18 | [0, 0, 1.0, 0], 19 | [0, 0, 0, 1.0]], 20 | dtype=defaultType) -------------------------------------------------------------------------------- /pymht/models/constants.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | defaultType = np.float32 3 | 4 | nDimState = 4 5 | nObsDim_AIS = 4 6 | 7 | sigmaR_RADAR_tracker = 2.5 # Measurement standard deviation used in kalman filter 8 | sigmaR_RADAR_true = 2.5 9 | sigmaQ_tracker = 1.0 # Target standard deviation used in kalman filterUnused 10 | sigmaQ_true = 1.0 # Target standard deviation used in kalman filterUnused 11 | # 95% confidence = +- 2.5*sigma -------------------------------------------------------------------------------- /pymht/models/polar.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .constants import * 3 | from .ais import R as R_AIS 4 | from .ais import Phi as A_AIS 5 | from .ais import C as C_AIS 6 | 7 | C_RADAR = np.array([[1.0, 0, 0, 0], 8 | [0, 1.0, 0, 0]], dtype=defaultType) 9 | H_radar = C_RADAR 10 | 11 | p = 2.5**2 # Initial system state variance 12 | P0 = np.array(np.diag([p, p, 0.3*p, 0.3*p]), dtype=defaultType) # Initial state covariance 13 | 14 | sigma_hdg = 3.0 15 | sigma_speed = 0.8 16 | 17 | def R_RADAR(sigmaR=sigmaR_RADAR_tracker): 18 | return np.array(np.eye(2) * np.power(sigmaR, 2), dtype=defaultType) 19 | 20 | 21 | def Phi(T): 22 | return np.array([[1.0, 0, T, 0], 23 | [0, 1.0, 0, T], 24 | [0, 0, 1.0, 0], 25 | [0, 0, 0, 1.0]], 26 | dtype=defaultType) 27 | -------------------------------------------------------------------------------- /pymht/models/pv.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .constants import * 3 | from .ais import R as R_AIS 4 | from .ais import Phi as A_AIS 5 | from .ais import C as C_AIS 6 | 7 | C_RADAR = np.array([[1.0, 0, 0, 0], # Also known as "H_radar" 8 | [0, 1.0, 0, 0]], dtype=defaultType) 9 | H_radar = C_RADAR 10 | 11 | # Disturbance matrix (only velocity) 12 | p = 2.5**2 # Initial system state variance 13 | P0 = np.array(np.diag([p, p, 0.3*p, 0.3*p]), dtype=defaultType) # Initial state covariance 14 | 15 | GPS_COVARIANCE_PRECISE = np.copy(P0 * 0.5) 16 | 17 | def Q(T, sigmaQ=sigmaQ_tracker): 18 | # Transition/system covariance (process noise) 19 | # return np.array(np.eye(2) * np.power(sigmaQ, 2) * T, dtype=defaultType) 20 | return np.array([[T**4./4., 0., T**3./3., 0.], 21 | [0., T**4./4., 0., T**3./3.], 22 | [T**3./3., 0., T**2., 0.], 23 | [0., T**3./3., 0., T**2.]], dtype=defaultType)* sigmaQ 24 | 25 | 26 | def R_RADAR(sigmaR=sigmaR_RADAR_tracker): 27 | return np.array(np.eye(2) * np.power(sigmaR, 2), dtype=defaultType) 28 | 29 | def Phi(T): 30 | return np.array([[1.0, 0, T, 0], 31 | [0, 1.0, 0, T], 32 | [0, 0, 1.0, 0], 33 | [0, 0, 0, 1.0]], 34 | dtype=defaultType) 35 | 36 | if __name__== '__main__': 37 | print("A(1)\n", Phi(1)) 38 | print("C_Radar\n", C_RADAR) 39 | print("P0\n", P0) 40 | print("Q(2)\n", Q(2)) 41 | print("R_Radar\n", R_RADAR()) 42 | print("A P0 A^T + Q(1)\n", Phi(1).dot(P0).dot(Phi(1).T)) -------------------------------------------------------------------------------- /pymht/pyTarget.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | from pymht.utils.classDefinitions import Position, Velocity 3 | import pymht.models.pv as model 4 | import pymht.utils.kalman as kalman 5 | import pymht.utils.helpFunctions as hpf 6 | import numpy as np 7 | import copy 8 | import datetime 9 | import matplotlib.pyplot as plt 10 | import xml.etree.ElementTree as ET 11 | from pymht.utils.xmlDefinitions import * 12 | 13 | 14 | class Target(): 15 | 16 | def __init__(self, time, scanNumber, x_0, P_0, ID=None, S_inv=None, **kwargs): 17 | assert (scanNumber is None) or (scanNumber == int(scanNumber)) 18 | assert x_0.ndim == 1 19 | assert P_0.ndim == 2, str(P_0.shape) 20 | assert x_0.shape[0] == P_0.shape[0] == P_0.shape[1] 21 | self.isRoot = kwargs.get('isRoot', False) 22 | self.ID = ID 23 | self.time = time 24 | self.scanNumber = scanNumber 25 | self.x_0 = x_0 26 | self.P_0 = P_0 27 | self.S_inv = S_inv 28 | self.P_d = copy.copy(kwargs.get('P_d', 0.8)) 29 | self.parent = kwargs.get("parent") 30 | self.measurementNumber = kwargs.get("measurementNumber", 0) 31 | self.measurement = kwargs.get("measurement") 32 | self.cumulativeNLLR = copy.copy(kwargs.get("cumulativeNLLR", 0)) 33 | self.trackHypotheses = None 34 | self.mmsi = kwargs.get('mmsi') 35 | self.status = kwargs.get('status', activeTag) 36 | # self.score = self.cumulativeNLLR / self.rootHeight() 37 | assert self.P_d >= 0 38 | assert self.P_d <= 1 39 | assert (type(self.parent) == type(self) or self.parent is None) 40 | assert (self.mmsi is None) or (self.mmsi > 1e8) 41 | 42 | def __repr__(self): 43 | if hasattr(self, 'kalmanFilter'): 44 | np.set_printoptions(precision=4, suppress=True) 45 | predStateStr = " \tPredState: " + str(self.kalmanFilter.x_bar) 46 | else: 47 | predStateStr = "" 48 | 49 | if self.ID is not None: 50 | idStr = " \tID: {:2}".format(self.ID) 51 | else: 52 | idStr = "" 53 | 54 | if (self.measurementNumber is not None) and (self.scanNumber is not None): 55 | measStr = (" \tMeasurement(" + 56 | str(self.scanNumber) + 57 | ":" + 58 | str(self.measurementNumber) + 59 | ")") 60 | if self.measurement is not None: 61 | measStr += ":" + str(self.measurement) 62 | else: 63 | measStr = "" 64 | 65 | if hasattr(self, 'kalmanFilter'): 66 | lambda_, _ = np.linalg.eig(self.kalmanFilter.S) 67 | gateStr = (" \tGate size: (" + 68 | '{:5.2f}'.format(np.sqrt(lambda_[0]) * 2) + 69 | "," + 70 | '{:5.2f}'.format(np.sqrt(lambda_[1]) * 2) + 71 | ")") 72 | else: 73 | gateStr = "" 74 | 75 | nllrStr = " \tcNLLR:" + '{: 06.4f}'.format(self.cumulativeNLLR) 76 | 77 | if False: # self.trackHypotheses is None and self.rootHeight()>0: 78 | scoreStr = " \tScore:" + '{: 06.4f}'.format(self.getScore()) 79 | else: 80 | scoreStr = "" 81 | 82 | if self.mmsi is not None: 83 | mmsiString = " \tMMSI: " + str(self.mmsi) 84 | else: 85 | mmsiString = "" 86 | 87 | timeString = datetime.datetime.fromtimestamp(self.time).strftime("%H:%M:%S.%f") 88 | 89 | return ("Time: " + timeString + 90 | "\t" + str(self.getPosition()) + 91 | " \t" + str(self.getVelocity()) + 92 | idStr + 93 | nllrStr + 94 | scoreStr + 95 | measStr + 96 | predStateStr + 97 | gateStr + 98 | mmsiString 99 | ) 100 | 101 | def __str__(self, **kwargs): 102 | level = kwargs.get("level", 0) 103 | hypIndex = kwargs.get("hypIndex", 0) 104 | targetIndex = kwargs.get("targetIndex", "?") 105 | 106 | if (level == 0) and self.trackHypotheses is None: 107 | return repr(self) 108 | ret = "" 109 | if level == 0: 110 | ret += "T" + str(targetIndex) + ": " + repr(self) + "\n" 111 | else: 112 | ret += " " + " " * min(level, 8) + "H" + \ 113 | str(hypIndex) + ": " + repr(self) + "\n" 114 | if self.trackHypotheses is not None: 115 | for hypIndex, hyp in enumerate(self.trackHypotheses): 116 | hasNotZeroHyp = (self.trackHypotheses[0].measurementNumber != 0) 117 | ret += hyp.__str__(level=level + 1, 118 | hypIndex=hypIndex + int(hasNotZeroHyp)) 119 | return ret 120 | 121 | def __sub__(self, other): 122 | return self.x_0 - other.x_0 123 | 124 | def getScore(self): 125 | return self.cumulativeNLLR - self.getRoot().cumulativeNLLR 126 | 127 | def getXmlStateStrings(self, precision=2): 128 | return (str(round(self.x_0[0], precision)), 129 | str(round(self.x_0[1], precision)), 130 | str(round(self.x_0[2], precision)), 131 | str(round(self.x_0[3], precision)) 132 | ) 133 | 134 | def getPosition(self): 135 | return Position(self.x_0[0:2]) 136 | 137 | def getVelocity(self): 138 | return Velocity(self.x_0[2:4]) 139 | 140 | def stepBack(self, stepsBack=1): 141 | if (stepsBack == 0) or (self.parent is None): 142 | return self 143 | return self.parent.stepBack(stepsBack - 1) 144 | 145 | def getInitial(self): 146 | return self.stepBack(float('inf')) 147 | 148 | def getNumOfNodes(self): 149 | if self.trackHypotheses is None: 150 | return 1 151 | return 1 + sum([node.getNumOfNodes() for node in self.trackHypotheses]) 152 | 153 | def depth(self, count=0): 154 | return (count if self.trackHypotheses is None 155 | else self.trackHypotheses[0].depth(count + 1)) 156 | 157 | def height(self, count=1): 158 | return (count if self.parent is None 159 | else self.parent.height(count + 1)) 160 | 161 | def rootHeight(self, count=0): 162 | return (count if (self.parent is None or self.isRoot) 163 | else self.parent.rootHeight(count + 1)) 164 | 165 | def getRoot(self): 166 | if self.isRoot: 167 | return self 168 | if self.parent is not None: 169 | return self.parent.getRoot() 170 | else: 171 | return None 172 | 173 | def predictMeasurement(self, **kwargs): 174 | self.kalmanFilter.predict() 175 | self.kalmanFilter._precalculateMeasurementUpdate() 176 | 177 | def isOutsideRange(self, position, range): 178 | distance = np.linalg.norm(model.C_RADAR.dot(self.x_0) - position) 179 | return distance > range 180 | 181 | def haveNoNeightbours(self, targetList, thresholdDistance): 182 | for target in targetList: 183 | leafNodes = target.getLeafNodes() 184 | for node in leafNodes: 185 | delta = node.x_0[0:2] - self.x_0[0:2] 186 | distance = np.linalg.norm(delta) 187 | if distance < thresholdDistance: 188 | return False 189 | return True 190 | 191 | def gateAndCreateNewHypotheses(self, measurementList, scanNumber, lambda_ex, eta2, kfVars): 192 | assert self.scanNumber == scanNumber - 1, "inconsistent scan numbering" 193 | x_bar, P_bar, z_hat, S, S_inv, K, P_hat = kalman.precalc( 194 | *kfVars, self.x_0.reshape(1, 4), self.P_0.reshape(1, 4, 4)) 195 | scanTime = measurementList.time 196 | z_list = measurementList.measurements 197 | z_tilde = z_list - z_hat 198 | nis = self._normalizedInnovationSquared(z_tilde, S_inv.reshape(2, 2)) 199 | gatedMeasurements = nis <= eta2 200 | self.trackHypotheses = [ 201 | self.createZeroHypothesis(scanTime, scanNumber, x_bar[0], P_bar[0])] 202 | newNodes = [] 203 | usedMeasurementIndices = set() 204 | for measurementIndex, insideGate in enumerate(gatedMeasurements): 205 | if not insideGate: 206 | continue 207 | nllr = kalman.nllr(lambda_ex, self.P_d, S, nis[measurementIndex])[0] 208 | x_hat = kalman.numpyFilter( 209 | x_bar, K.reshape(4, 2), z_tilde[measurementIndex].reshape(1, 2)).reshape(4, ) 210 | assert x_hat.shape == self.x_0.shape 211 | newNodes.append(Target(time=scanTime, 212 | scanNumber=scanNumber, 213 | x_0=x_hat, 214 | P_0=P_hat[0], 215 | ID=self.ID, 216 | measurementNumber=measurementIndex + 1, 217 | measurement=z_list[measurementIndex], 218 | cumulativeNLLR=self.cumulativeNLLR + nllr, 219 | P_d=self.P_d, 220 | parent=self 221 | ) 222 | ) 223 | usedMeasurementIndices.add(measurementIndex) 224 | self.trackHypotheses.extend(newNodes) 225 | return usedMeasurementIndices 226 | 227 | def spawnNewNodes(self, associatedMeasurements, scanTime, scanNumber, x_bar, P_bar, measurementsIndices, 228 | measurements, states, covariance, nllrList, fusedAisData=None): 229 | assert scanTime > self.time 230 | assert self.scanNumber == scanNumber - 1, str(self.scanNumber) + "->" + str(scanNumber) 231 | assert x_bar.shape == (4,) 232 | assert P_bar.shape == (4, 4) 233 | assert all([state.shape == (4,) for state in states]) 234 | assert covariance.shape == (4, 4) 235 | nNewRadarMeasurementsIndices = len(measurementsIndices) 236 | nNewStates = len(states) 237 | nNewScores = len(nllrList) 238 | assert nNewRadarMeasurementsIndices == nNewStates == nNewScores 239 | self.trackHypotheses = [self.createZeroHypothesis( 240 | scanTime, scanNumber, x_bar, P_bar)] 241 | 242 | self.trackHypotheses.extend( 243 | [Target(time=scanTime, 244 | scanNumber=scanNumber, 245 | x_0=states[i], 246 | P_0=covariance, 247 | ID=self.ID, 248 | measurementNumber=measurementsIndices[i] + 1, 249 | measurement=measurements[measurementsIndices[i]], 250 | cumulativeNLLR=self.cumulativeNLLR + nllrList[i], 251 | P_d=self.P_d, 252 | parent=self 253 | ) for i in range(nNewStates)] 254 | ) 255 | for measurementIndex in measurementsIndices: 256 | associatedMeasurements.update( 257 | {(scanNumber, measurementIndex + 1)} 258 | ) 259 | 260 | if fusedAisData is None: 261 | return 262 | (fusedStates, 263 | fusedCovariance, 264 | fusedMeasurementIndices, 265 | fusedNllr, 266 | fusedMMSI) = fusedAisData 267 | if any([e is None for e in fusedAisData]): 268 | return 269 | historicalMmsi = self._getHistoricalMmsi() 270 | acceptedMMSI = [] 271 | for i in range(len(fusedMeasurementIndices)): 272 | if (historicalMmsi is None) or (fusedMMSI[i] == historicalMmsi): 273 | measurementNumber = fusedMeasurementIndices[i] + 1 if fusedMeasurementIndices[i] is not None else None 274 | measurement = measurements[fusedMeasurementIndices[i]] if fusedMeasurementIndices[i] is not None else None 275 | assert np.isfinite(self.cumulativeNLLR) 276 | assert np.isfinite(fusedNllr[i]) 277 | self.trackHypotheses.append( 278 | Target(scanTime, 279 | scanNumber, 280 | fusedStates[i], 281 | fusedCovariance[i], 282 | self.ID, 283 | measurementNumber=measurementNumber, 284 | measurement=measurement, 285 | cumulativeNLLR=self.cumulativeNLLR + fusedNllr[i], 286 | mmsi=fusedMMSI[i], 287 | P_d=self.P_d, 288 | parent=self) 289 | ) 290 | acceptedMMSI.append(fusedMMSI[i]) 291 | 292 | for mmsi in acceptedMMSI: 293 | associatedMeasurements.update( 294 | {(scanNumber, mmsi)} 295 | ) 296 | 297 | def _getHistoricalMmsi(self): 298 | if self.mmsi is not None: 299 | return self.mmsi 300 | if self.parent is not None: 301 | return self.parent._getHistoricalMmsi() 302 | return None 303 | 304 | def _normalizedInnovationSquared(self, measurementsResidual, S_inv): 305 | return np.sum(measurementsResidual.dot(S_inv) * 306 | measurementsResidual, axis=1) 307 | 308 | def calculateCNLLR(self, lambda_ex, measurementResidual, S, S_inv): 309 | P_d = self.P_d 310 | nis = measurementResidual.T.dot(S_inv).dot(measurementResidual) 311 | nllr = (0.5 * nis + 312 | np.log((lambda_ex * np.sqrt(np.linalg.det(2. * np.pi * S))) / P_d)) 313 | return self.cumulativeNLLR + nllr 314 | 315 | def measurementIsInsideErrorEllipse(self, measurement, eta2): 316 | measRes = measurement.position - self.predictedMeasurement 317 | return measRes.T.dot(self.invResidualCovariance).dot(measRes) <= eta2 318 | 319 | def createZeroHypothesis(self, time, scanNumber, x_0, P_0): 320 | return Target(time, 321 | scanNumber, 322 | x_0, 323 | P_0, 324 | self.ID, 325 | measurementNumber=0, 326 | cumulativeNLLR=self.cumulativeNLLR - np.log(1 - self.P_d), 327 | P_d=self.P_d, 328 | parent=self) 329 | 330 | def _pruneAllHypothesisExceptThis(self, keep, backtrack=False): 331 | keepIndex = self.trackHypotheses.index(keep) 332 | indices = np.delete(np.arange(len(self.trackHypotheses)), [keepIndex]) 333 | self.trackHypotheses = np.delete(self.trackHypotheses, indices).tolist() 334 | assert len(self.trackHypotheses) == 1, "It should have been one node left." 335 | 336 | if backtrack and self.parent is not None: 337 | self.parent._pruneAllHypothesisExceptThis(self, backtrack=backtrack) 338 | 339 | def _pruneEverythingExceptHistory(self): 340 | if self.parent is not None: 341 | self.parent._pruneAllHypothesisExceptThis(self, backtrack=True) 342 | 343 | def pruneDepth(self, stepsLeft): 344 | if stepsLeft <= 0: 345 | if self.parent is not None: 346 | self.parent._pruneAllHypothesisExceptThis(self, backtrack=True) 347 | # self.recursiveSubtractScore(self.cumulativeNLLR) 348 | assert self.parent.scanNumber == self.scanNumber - 1, \ 349 | "nScanPruning2: from scanNumber" + str(self.parent.scanNumber) + "->" + str(self.scanNumber) 350 | return self 351 | else: 352 | return self 353 | elif self.parent is not None: 354 | return self.parent.pruneDepth(stepsLeft - 1) 355 | else: 356 | return self 357 | 358 | def pruneSimilarState(self, threshold): 359 | if len(self.trackHypotheses) == 1: 360 | return 361 | p0 = np.array(self.trackHypotheses[0].x_0[0:2], dtype=np.float32) 362 | hypPos = np.array([n.x_0[0:2] for n in self.trackHypotheses[1:]], ndmin=2, dtype=np.float32) 363 | deltaPos = hypPos - p0 364 | distArray = np.linalg.norm(deltaPos, axis=1) 365 | # print("distArray",distArray) 366 | gatedDistArray = distArray < threshold 367 | # print("gatedDistArray",gatedDistArray) 368 | tempFuseIndices = np.where(gatedDistArray)[0] + 1 369 | if tempFuseIndices.size == 0: 370 | return 371 | fuseIndices = [] 372 | for i in tempFuseIndices: 373 | if self.trackHypotheses[i].mmsi is not None: 374 | continue 375 | fuseIndices.append(i) 376 | if len(fuseIndices) == 0: 377 | return 378 | # print("fuseIndices",fuseIndices) 379 | 380 | # Create merged state 381 | fuseStates = np.array([self.trackHypotheses[i].x_0 for i in fuseIndices]) 382 | # print("fuseStates",fuseStates) 383 | meanState = np.mean(fuseStates, axis=0) 384 | assert meanState.shape == self.trackHypotheses[0].x_0.shape 385 | 386 | fuseCovariances = np.array([self.trackHypotheses[i].P_0 for i in fuseIndices]) 387 | meanCovariance = np.mean(fuseCovariances, axis=0) 388 | assert meanCovariance.shape == self.trackHypotheses[0].P_0.shape 389 | 390 | cnllrList = np.array([self.trackHypotheses[i].cumulativeNLLR for i in fuseIndices]) 391 | meanCNLLR = np.mean(cnllrList) 392 | 393 | newNode = Target(self.trackHypotheses[0].time, 394 | self.trackHypotheses[0].scanNumber, 395 | meanState, 396 | meanCovariance, 397 | self.trackHypotheses[0].ID, 398 | P_d=self.trackHypotheses[0].P_d, 399 | parent=self, 400 | cumulativeNLLR=meanCNLLR) 401 | 402 | # Remove "old" nodes 403 | preLength = len(self.trackHypotheses) 404 | for i in sorted(fuseIndices, reverse=True): 405 | # print("i", i) 406 | del self.trackHypotheses[i] 407 | postLength = len(self.trackHypotheses) 408 | assert postLength < preLength 409 | 410 | # Add new node 411 | # print("Replacing 0-node") 412 | self.trackHypotheses[0] = newNode 413 | 414 | def getMeasurementSet(self, root=True): 415 | subSet = set() 416 | if self.trackHypotheses is not None: 417 | for hyp in self.trackHypotheses: 418 | subSet |= hyp.getMeasurementSet(False) 419 | if (self.measurementNumber == 0) or (root): 420 | return subSet 421 | else: 422 | tempSet = set() 423 | if self.measurementNumber is not None: 424 | radarMeasurement = (self.scanNumber, self.measurementNumber) 425 | tempSet.add(radarMeasurement) 426 | if self.mmsi is not None: 427 | aisMeasurement = (self.scanNumber, self.mmsi) 428 | tempSet.add(aisMeasurement) 429 | 430 | return tempSet | subSet 431 | 432 | def processNewMeasurementRec(self, measurementList, usedMeasurementSet, 433 | scanNumber, lambda_ex, eta2, kfVars): 434 | if self.trackHypotheses is None: 435 | usedMeasurementIndices = self.gateAndCreateNewHypotheses(measurementList, 436 | scanNumber, 437 | lambda_ex, 438 | eta2, 439 | kfVars) 440 | usedMeasurementSet.update(usedMeasurementIndices) 441 | else: 442 | for hyp in self.trackHypotheses: 443 | hyp.processNewMeasurementRec( 444 | measurementList, usedMeasurementSet, scanNumber, lambda_ex, eta2, kfVars) 445 | 446 | def _selectBestHypothesis(self): 447 | def recSearchBestHypothesis(target, bestScore, bestHypothesis): 448 | if target.trackHypotheses is None: 449 | if target.cumulativeNLLR <= bestScore[0]: 450 | bestScore[0] = target.cumulativeNLLR 451 | bestHypothesis[0] = target 452 | else: 453 | for hyp in target.trackHypotheses: 454 | recSearchBestHypothesis(hyp, bestScore, bestHypothesis) 455 | 456 | bestScore = [float('Inf')] 457 | bestHypothesis = np.empty(1, dtype=np.dtype(object)) 458 | recSearchBestHypothesis(self, bestScore, bestHypothesis) 459 | return bestHypothesis 460 | 461 | def getLeafNodes(self): 462 | def recGetLeafNode(node, nodes): 463 | if node.trackHypotheses is None: 464 | nodes.append(node) 465 | else: 466 | for hyp in node.trackHypotheses: 467 | recGetLeafNode(hyp, nodes) 468 | 469 | nodes = [] 470 | recGetLeafNode(self, nodes) 471 | return nodes 472 | 473 | def getLeafParents(self): 474 | leafNodes = self.getLeafNodes() 475 | parents = set() 476 | for node in leafNodes: 477 | parents.add(node.parent) 478 | return parents 479 | 480 | def recursiveSubtractScore(self, score): 481 | if score == 0: 482 | return 483 | self.cumulativeNLLR -= score 484 | 485 | if self.trackHypotheses is not None: 486 | for hyp in self.trackHypotheses: 487 | hyp.recursiveSubtractScore(score) 488 | 489 | def _checkScanNumberIntegrity(self): 490 | assert type(self.scanNumber) is int, \ 491 | "self.scanNumber is not an integer %r" % self.scanNumber 492 | 493 | if self.parent is not None: 494 | assert type(self.parent.scanNumber) is int, \ 495 | "self.parent.scanNumber is not an integer %r" % self.parent.scanNumber 496 | assert self.parent.scanNumber == self.scanNumber - 1, \ 497 | "self.parent.scanNumber(%r) == self.scanNumber-1(%r)" % ( 498 | self.parent.scanNumber, self.scanNumber) 499 | if self.trackHypotheses is not None: 500 | for hyp in self.trackHypotheses: 501 | hyp._checkScanNumberIntegrity() 502 | 503 | def _checkReferenceIntegrity(self): 504 | def recCheckReferenceIntegrety(target): 505 | if target.trackHypotheses is not None: 506 | for hyp in target.trackHypotheses: 507 | assert hyp.parent == target, \ 508 | ("Inconsistent parent <-> child reference: Measurement(" + 509 | str(target.scanNumber) + ":" + str(target.measurementNumber) + 510 | ") <-> " + "Measurement(" + str(hyp.scanNumber) + ":" + 511 | str(hyp.measurementNumber) + ")") 512 | recCheckReferenceIntegrety(hyp) 513 | 514 | recCheckReferenceIntegrety(self.getInitial()) 515 | 516 | def _checkMmsiIntegrity(self, activeMMSI=None): 517 | if self.mmsi is not None: 518 | if activeMMSI is None: 519 | if self.parent is not None: 520 | self.parent._checkMmsiIntegrity(self.mmsi) 521 | else: 522 | assert self.mmsi == activeMMSI, "A track is associated with multiple MMSI's" 523 | if self.parent is not None: 524 | self.parent._checkMmsiIntegrity(self.mmsi) 525 | else: 526 | if self.parent is not None: 527 | self.parent._checkMmsiIntegrity(activeMMSI) 528 | 529 | def _estimateRadarPeriod(self): 530 | if self.parent is not None: 531 | return self.time - self.parent.time 532 | 533 | def plotValidationRegion(self, eta2, stepsBack=0): 534 | if not hasattr(self, 'kalmanFilter'): 535 | raise NotImplementedError("plotValidationRegion is not functional in this version") 536 | if self.kalmanFilter.S is not None: 537 | self._plotCovarianceEllipse(eta2) 538 | if (self.parent is not None) and (stepsBack > 0): 539 | self.parent.plotValidationRegion(eta2, stepsBack - 1) 540 | 541 | def _plotCovarianceEllipse(self, eta2): 542 | from matplotlib.patches import Ellipse 543 | lambda_, _ = np.linalg.eig(self.kalmanFilter.S) 544 | ell = Ellipse(xy=(self.kalmanFilter.x_bar[0], self.kalmanFilter.x_bar[1]), 545 | width=np.sqrt(lambda_[0]) * np.sqrt(eta2) * 2, 546 | height=np.sqrt(lambda_[1]) * np.sqrt(eta2) * 2, 547 | angle=np.rad2deg(np.arctan2(lambda_[1], lambda_[0])), 548 | linewidth=2, 549 | ) 550 | ell.set_facecolor('none') 551 | ell.set_linestyle("dotted") 552 | ell.set_alpha(0.5) 553 | ax = plt.subplot(111) 554 | ax.add_artist(ell) 555 | 556 | def backtrackPosition(self, stepsBack=float('inf')): 557 | if self.parent is None: 558 | return [self.x_0[0:2]] 559 | else: 560 | return self.parent.backtrackPosition(stepsBack) + [self.x_0[0:2]] 561 | 562 | def backtrackState(self, stepsBack=float('inf')): 563 | if self.parent is None: 564 | return [self.x_0] 565 | else: 566 | return self.parent.backtrackPosition(stepsBack) + [self.x_0] 567 | 568 | def backtrackMeasurement(self, stepsBack=float('inf')): 569 | if self.parent is None: 570 | return [self.measurement] 571 | else: 572 | return self.parent.backtrackMeasurement(stepsBack) + [self.measurement] 573 | 574 | def backtrackNodes(self, stepsBack=float('inf')): 575 | if self.parent is None: 576 | return [self] 577 | else: 578 | return self.parent.backtrackNodes(stepsBack) + [self] 579 | 580 | def getSmoothTrack(self, radarPeriod): 581 | from pykalman import KalmanFilter 582 | roughTrackArray = self.backtrackMeasurement() 583 | initialNode = self.getInitial() 584 | depth = initialNode.depth() 585 | initialState = initialNode.x_0 586 | for i, m in enumerate(roughTrackArray): 587 | if m is None: 588 | roughTrackArray[i] = [np.NaN, np.NaN] 589 | measurements = np.ma.asarray(roughTrackArray) 590 | for i, m in enumerate(measurements): 591 | if np.isnan(np.sum(m)): 592 | measurements[i] = np.ma.masked 593 | assert measurements.shape[1] == 2, str(measurements.shape) 594 | if depth < 2: 595 | pos = measurements.filled(np.nan) 596 | vel = np.empty_like(pos) * np.nan 597 | return pos, vel, False 598 | kf = KalmanFilter(transition_matrices=model.Phi(radarPeriod), 599 | observation_matrices=model.C_RADAR, 600 | initial_state_mean=initialState) 601 | kf = kf.em(measurements, n_iter=5) 602 | (smoothed_state_means, _) = kf.smooth(measurements) 603 | smoothedPositions = smoothed_state_means[:, 0:2] 604 | smoothedVelocities = smoothed_state_means[:, 2:4] 605 | assert smoothedPositions.shape == measurements.shape, \ 606 | str(smoothedPositions.shape) + str(measurements.shape) 607 | assert smoothedVelocities.shape == measurements.shape, \ 608 | str(smoothedVelocities.shape) + str(measurements.shape) 609 | return smoothedPositions, smoothedVelocities, True 610 | 611 | def plotTrack(self, ax=plt.gca(), root=None, stepsBack=float('inf'), **kwargs): 612 | if kwargs.get('markInitial', False) and stepsBack == float('inf'): 613 | self.getInitial().markInitial(ax, **kwargs) 614 | if kwargs.get('markID', True): 615 | self.getInitial().markID(ax, offset=20, **kwargs) 616 | if kwargs.get('markRoot', False) and root is not None: 617 | root.markRoot(ax) 618 | if kwargs.get('markEnd', True): 619 | self.markEnd(ax, **kwargs) 620 | if kwargs.get('smooth', False) and self.getInitial().depth() > 1: 621 | radarPeriod = kwargs.get('radarPeriod', self._estimateRadarPeriod()) 622 | track, _, smoothingGood = self.getSmoothTrack(radarPeriod) 623 | linestyle = 'dashed' 624 | if not smoothingGood: 625 | return 626 | else: 627 | track = self.backtrackPosition(stepsBack) 628 | linestyle = 'solid' 629 | ax.plot([p[0] for p in track], 630 | [p[1] for p in track], 631 | c=kwargs.get('c'), 632 | linestyle=linestyle) 633 | 634 | def plotMeasurement(self, stepsBack=0, **kwargs): 635 | if (self.measurement is not None) and kwargs.get('real', True): 636 | Position(self.measurement).plot( 637 | self.measurementNumber, self.scanNumber, **kwargs) 638 | if kwargs.get("dummy", False): 639 | self.getPosition().plot(self.measurementNumber, self.scanNumber, **kwargs) 640 | 641 | if (self.parent is not None) and (stepsBack > 0): 642 | self.parent.plotMeasurement(stepsBack - 1, **kwargs) 643 | 644 | def plotStates(self, ax=plt.gca(), stepsBack=0, **kwargs): 645 | if (self.mmsi is not None) and kwargs.get('ais', True): 646 | Position(self.x_0).plot(ax, 647 | self.measurementNumber, 648 | self.scanNumber, 649 | self.mmsi, 650 | **kwargs) 651 | elif (self.measurementNumber is not None) and (self.measurementNumber == 0) and kwargs.get("dummy", True): 652 | Position(self.x_0).plot(ax, 653 | self.measurementNumber, 654 | self.scanNumber, 655 | **kwargs) 656 | elif (self.measurementNumber is not None) and(self.measurementNumber > 0) and kwargs.get('real', True): 657 | Position(self.x_0).plot(ax, 658 | self.measurementNumber, 659 | self.scanNumber, 660 | **kwargs) 661 | if (self.parent is not None) and (stepsBack > 0): 662 | self.parent.plotStates(ax, stepsBack - 1, **kwargs) 663 | 664 | def plotVelocityArrow(self, ax=plt.gca(), stepsBack=1): 665 | if self.kalmanFilter.x_bar is not None: 666 | deltaPos = self.kalmanFilter.x_bar[0:2] - self.kalmanFilter.x_hat[0:2] 667 | ax.arrow(self.kalmanFilter.x_hat[0], 668 | self.kalmanFilter.x_hat[1], 669 | deltaPos[0], 670 | deltaPos[1], 671 | head_width=0.1, 672 | head_length=0.1, 673 | fc="None", ec='k', 674 | length_includes_head="true", 675 | linestyle="-", 676 | alpha=0.3, 677 | linewidth=1) 678 | if (self.parent is not None) and (stepsBack > 0): 679 | self.parent.plotVelocityArrow(ax, stepsBack - 1) 680 | 681 | def markInitial(self, ax=plt.gca(), **kwargs): 682 | ax.plot(self.x_0[0], 683 | self.x_0[1], 684 | "*", 685 | markerfacecolor='black', 686 | markeredgecolor='black') 687 | 688 | def markID(self, ax=plt.gca(), **kwargs): 689 | index = self.ID 690 | if (index is not None): 691 | normVelocity = (self.x_0[2:4] / 692 | np.linalg.norm(self.x_0[2:4])) 693 | offsetScale = kwargs.get('offset', 0.0) 694 | offset = offsetScale * np.array(normVelocity) 695 | position = self.x_0[0:2] - offset 696 | (horizontalalignment, 697 | verticalalignment) = hpf._getBestTextPosition(normVelocity) 698 | ax.text(position[0], 699 | position[1], 700 | "T" + str(index), 701 | fontsize=10, 702 | horizontalalignment=horizontalalignment, 703 | verticalalignment=verticalalignment) 704 | 705 | def markRoot(self, ax=plt.gca()): 706 | ax.plot(self.x_0[0], 707 | self.x_0[1], 708 | 's', 709 | markerfacecolor='None', 710 | markeredgecolor='black') 711 | 712 | def markEnd(self, ax=plt.gca(), **kwargs): 713 | ax.plot(self.x_0[0], 714 | self.x_0[1], 715 | "H", 716 | markerfacecolor='None', 717 | markeredgecolor='black') 718 | if kwargs.get('terminated', False): 719 | ax.plot(self.x_0[0], 720 | self.x_0[1], 721 | "*", 722 | markeredgecolor='red') 723 | 724 | def recDownPlotMeasurements(self, plottedMeasurements, ax=plt.gca(), **kwargs): 725 | if self.parent is not None: 726 | if self.measurementNumber == 0: 727 | self.plotMeasurement(**kwargs) 728 | else: 729 | if kwargs.get('real', True): 730 | measurementID = (self.scanNumber, self.measurementNumber) 731 | if measurementID not in plottedMeasurements: 732 | self.plotMeasurement(ax, **kwargs) 733 | plottedMeasurements.add(measurementID) 734 | if self.trackHypotheses is not None: 735 | for hyp in self.trackHypotheses: 736 | hyp.recDownPlotMeasurements(plottedMeasurements, ax, **kwargs) 737 | 738 | def recDownPlotStates(self, ax=plt.gca(), **kwargs): 739 | if self.parent is not None: 740 | self.plotStates(ax, **kwargs) 741 | if self.trackHypotheses is not None: 742 | for hyp in self.trackHypotheses: 743 | hyp.recDownPlotStates(ax, **kwargs) 744 | 745 | def _storeNode(self, simulationElement, radarPeriod, **kwargs): 746 | trackElement = ET.SubElement(simulationElement, 747 | trackTag) 748 | unSmoothedStates = ET.SubElement(trackElement, 749 | statesTag) 750 | 751 | mmsi = self._getHistoricalMmsi() 752 | if mmsi is not None: 753 | trackElement.attrib[mmsiTag] = str(mmsi) 754 | trackElement.attrib[idTag] = str(self.ID) 755 | for k, v in kwargs.items(): 756 | trackElement.attrib[str(k)] = str(v) 757 | 758 | unSmoothedNodes = self.backtrackNodes() 759 | smoothedPositions, smoothedVelocities, smoothingGood = self.getSmoothTrack(radarPeriod) 760 | 761 | trackElement.attrib[lengthTag] = str(len(unSmoothedNodes)) 762 | 763 | assert len(unSmoothedNodes) == len(smoothedPositions) 764 | 765 | smoothedStateElement = ET.SubElement(trackElement, 766 | smoothedstatesTag) 767 | 768 | for node, sPos, sVel in zip(unSmoothedNodes, smoothedPositions, smoothedVelocities): 769 | stateElement = ET.SubElement(unSmoothedStates, 770 | stateTag, 771 | attrib={timeTag: str(node.time)}) 772 | positionElement = ET.SubElement(stateElement, positionTag) 773 | eastPos, northPos, eastVel, northVel = node.getXmlStateStrings() 774 | ET.SubElement(positionElement, northTag).text = northPos 775 | ET.SubElement(positionElement, eastTag).text = eastPos 776 | velocityElement = ET.SubElement(stateElement, velocityTag) 777 | ET.SubElement(velocityElement, northTag).text = northVel 778 | ET.SubElement(velocityElement, eastTag).text = eastVel 779 | if node.status != activeTag: 780 | stateElement.attrib[stateTag] = node.status 781 | if node.S_inv is not None: 782 | ET.SubElement(stateElement, 783 | inverseResidualCovarianceTag).text = np.array_str(node.S_inv, 784 | max_line_width=9999) 785 | 786 | if smoothingGood: 787 | sStateElement = ET.SubElement(smoothedStateElement, 788 | stateTag, 789 | attrib={timeTag: str(node.time)}) 790 | sPositionElement = ET.SubElement(sStateElement, positionTag) 791 | sEastPos = str(round(sPos[0], 2)) 792 | sNorthPos = str(round(sPos[1], 2)) 793 | ET.SubElement(sPositionElement, northTag).text = sNorthPos 794 | ET.SubElement(sPositionElement, eastTag).text = sEastPos 795 | 796 | sVelocityElement = ET.SubElement(sStateElement, velocityTag) 797 | sEastVel = str(round(sVel[0], 2)) 798 | sNorthVel = str(round(sVel[1], 2)) 799 | ET.SubElement(sVelocityElement, northTag).text = sNorthVel 800 | ET.SubElement(sVelocityElement, eastTag).text = sEastVel 801 | if node.status != activeTag: 802 | sStateElement.attrib[stateTag] = node.status 803 | 804 | def _storeNodeSparse(self, simulationElement, **kwargs): 805 | trackElement = ET.SubElement(simulationElement, trackTag) 806 | unSmoothedStates = ET.SubElement(trackElement, statesTag) 807 | mmsi = self._getHistoricalMmsi() 808 | if mmsi is not None: 809 | trackElement.attrib[mmsiTag] = str(mmsi) 810 | trackElement.attrib[idTag] = str(self.ID) 811 | for k, v in kwargs.items(): 812 | trackElement.attrib[str(k)] = str(v) 813 | 814 | unSmoothedNodes = self.backtrackNodes() 815 | 816 | storeIndices = (0, -1) if len(unSmoothedNodes) > 1 else (0,) 817 | for node in [unSmoothedNodes[i] for i in storeIndices]: 818 | stateElement = ET.SubElement(unSmoothedStates, 819 | stateTag, 820 | attrib={timeTag: str(node.time)}) 821 | positionElement = ET.SubElement(stateElement, positionTag) 822 | eastPos, northPos, eastVel, northVel = node.getXmlStateStrings() 823 | ET.SubElement(positionElement, northTag).text = northPos 824 | ET.SubElement(positionElement, eastTag).text = eastPos 825 | velocityElement = ET.SubElement(stateElement, velocityTag) 826 | ET.SubElement(velocityElement, northTag).text = northVel 827 | ET.SubElement(velocityElement, eastTag).text = eastVel 828 | if node.status != activeTag: 829 | stateElement.attrib[stateTag] = node.status 830 | 831 | if __name__ == '__main__': 832 | pass 833 | -------------------------------------------------------------------------------- /pymht/tracker.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======================================================================================== 3 | TRACK-ORIENTED-(MULTI-TARGET)-MULTI-HYPOTHESIS-TRACKER (with Kalman Filter and PV-model) 4 | by Erik Liland, Norwegian University of Science and Technology 5 | Trondheim, Norway 6 | Spring 2017 7 | ======================================================================================== 8 | """ 9 | import matplotlib 10 | from pymht.utils.xmlDefinitions import * 11 | from pymht.pyTarget import Target 12 | import pymht.utils.kalman as kalman 13 | import pymht.initiators.m_of_n as m_of_n 14 | import pymht.models.pv as pv 15 | import pymht.models.ais as ais_model 16 | import time 17 | import copy 18 | import logging 19 | import datetime 20 | import itertools 21 | import matplotlib.pyplot as plt 22 | import numpy as np 23 | from scipy.sparse.csgraph import connected_components 24 | from ortools.linear_solver import pywraplp 25 | from termcolor import cprint 26 | import xml.etree.ElementTree as ET 27 | from .utils.classDefinitions import AisMessageList 28 | import os 29 | npVersionTuple = np.__version__.split('.') 30 | assert (int(npVersionTuple[0]) >= 1 and int(npVersionTuple[1]) >= 12), str(np.__version__) 31 | 32 | # ---------------------------------------------------------------------------- 33 | # Instantiate logging object 34 | # -------------------------------------------------------1--------------------- 35 | 36 | log = logging.getLogger(__name__) 37 | 38 | 39 | class Tracker(): 40 | 41 | def __init__(self, model, radarPeriod, lambda_phi, lambda_nu, **kwargs): 42 | 43 | # Radar parameters 44 | self.position = kwargs.get('position', np.array([0., 0.])) 45 | self.radarRange = kwargs.get('radarRange', float('inf')) 46 | self.radarPeriod = radarPeriod 47 | self.totalGrowTimeLimit = self.radarPeriod * 0.5 48 | self.nodeGrowTimeLimit = 200e-3 49 | self.fixedPeriod = True 50 | self.default_P_d = kwargs.get('P_d', 0.8) 51 | assert self.default_P_d < 1 and self.default_P_d > 0, "Invalid P_d" 52 | 53 | # State space pv 54 | self.A = model.Phi(radarPeriod) 55 | self.C = model.C_RADAR 56 | self.P_0 = model.P0 57 | self.R_RADAR = model.R_RADAR() 58 | # self.R_AIS = model.R_AIS() 59 | self.Q = model.Q(radarPeriod) 60 | 61 | # Target initiator 62 | self.maxSpeedMS = kwargs.get('maxSpeedMS', 20) 63 | self.M_required = kwargs.get('M_required', 2) 64 | self.N_checks = kwargs.get('N_checks', 3) 65 | self.mergeThreshold = 4 * (model.sigmaR_RADAR_tracker ** 2) 66 | self.initiator = m_of_n.Initiator(self.M_required, 67 | self.N_checks, 68 | self.maxSpeedMS, 69 | self.C, 70 | self.R_RADAR, 71 | self.mergeThreshold, 72 | logLevel='DEBUG') 73 | 74 | # Tracker storage 75 | self.__targetList__ = [] 76 | self.__targetWindowSize__ = [] 77 | self.__scanHistory__ = [] 78 | self.__associatedMeasurements__ = [] 79 | self.__targetProcessList__ = [] 80 | self.__trackNodes__ = np.empty(0, dtype=np.dtype(object)) 81 | self.__terminatedTargets__ = [] 82 | self.__clusterList__ = [] 83 | self.__aisHistory__ = [] 84 | self.trackIdCounter = 0 85 | 86 | # Timing and logging 87 | self.runtimeLog = {'Total': [], 88 | 'Process': [], 89 | 'Cluster': [], 90 | 'Optim': [], 91 | 'ILP-Prune': [], 92 | 'DynN': [], 93 | 'N-Prune': [], 94 | 'Terminate': [], 95 | 'Init': [], 96 | } 97 | self.tic = {} 98 | self.toc = {} 99 | self.nOptimSolved = 0 100 | self.leafNodeTimeList = [] 101 | self.createComputationTime = None 102 | 103 | # Tracker parameters 104 | self.pruneSimilar = kwargs.get('pruneSimilar', False) 105 | self.lambda_phi = lambda_phi 106 | self.lambda_nu = lambda_nu 107 | self.lambda_ex = lambda_phi + lambda_nu 108 | self.P_r = 0.95 109 | self.P_ais = 0.5 110 | self.eta2 = kwargs.get('eta2', 5.99) 111 | self.eta2_ais = kwargs.get('eta2_ais', 9.45) 112 | N = kwargs.get('N', 5) 113 | self.N_max = copy.copy(N) 114 | self.N = copy.copy(N) 115 | self.scoreUpperLimit = -np.log(1 - self.default_P_d) * 0.8 116 | self.clnnrUpperLimit = 3.0 117 | self.pruneThreshold = kwargs.get("pruneThreshold", 4) 118 | self.targetSizeLimit = 3000 119 | 120 | if ((kwargs.get("realTime") is not None) and 121 | (kwargs.get("realTime") is True)): 122 | self.setHighPriority() 123 | 124 | # Misc 125 | self.colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] 126 | 127 | log.info("Initiation done\n" + "#" * 100) 128 | 129 | def setHighPriority(self): 130 | import psutil 131 | import platform 132 | p = psutil.Process(os.getpid()) 133 | OS = platform.system() 134 | if (OS == "Darwin") or (OS == "Linux"): 135 | p.nice(5) 136 | elif OS == "Windows": 137 | p.nice(psutil.HIGH_PRIORITY_CLASS) 138 | 139 | def preInitialize(self, simList): 140 | for initialTarget in simList[0]: 141 | self.initiateTarget(Target(initialTarget.time, 142 | None, 143 | initialTarget.cartesianState(), 144 | pv.P0, 145 | status=preinitializedTag)) 146 | 147 | def initiateTarget(self, newTarget): 148 | if newTarget.haveNoNeightbours(self.__targetList__, self.mergeThreshold): 149 | target = copy.copy(newTarget) 150 | target.scanNumber = len(self.__scanHistory__) 151 | target.P_d = self.default_P_d 152 | target.ID = copy.copy(self.trackIdCounter) 153 | target.isRoot = True 154 | self.trackIdCounter += 1 155 | self.__targetList__.append(target) 156 | self.__associatedMeasurements__.append(set()) 157 | self.__trackNodes__ = np.append(self.__trackNodes__, target) 158 | self.__targetWindowSize__.append(self.N) 159 | else: 160 | log.debug("Discarded an initial target: " + str(newTarget)) 161 | 162 | def addMeasurementList(self, scanList, aisList=AisMessageList(), **kwargs): 163 | if kwargs.get("checkIntegrity", False): 164 | self._checkTrackerIntegrity() 165 | self.tic.clear() 166 | self.toc.clear() 167 | 168 | log.info("addMeasurementList starting " + str(len(self.__scanHistory__) + 1)) 169 | 170 | # Adding new data to history 171 | self.__scanHistory__.append(scanList) 172 | self.__aisHistory__.append(aisList) 173 | 174 | # Verifying time stamps 175 | scanTime = scanList.time 176 | log.debug('Radar time \t' + 177 | datetime.datetime.fromtimestamp(scanTime).strftime("%H:%M:%S.%f")) 178 | 179 | if aisList is not None: 180 | assert all([float(aisMessage.time) < scanTime for aisMessage in aisList]) 181 | assert all([float(aisMessage.time) > scanTime - self.radarPeriod for aisMessage in aisList]),\ 182 | str(scanTime) + str([m.time for m in aisList]) 183 | mmsiList = [m.mmsi for m in aisList] 184 | mmsiSet = set(mmsiList) 185 | assert len(mmsiList) == len(mmsiSet), "Duplicate MMSI in aisList" 186 | log.debug( 187 | 'AIS times \t' + ','.join([aisMeasurement.getTimeString() for aisMeasurement in aisList])) 188 | log.debug("AIS list:\n" + '\n'.join([str(m) for m in aisList])) 189 | nAisMeasurements = len(aisList) 190 | 191 | # 0 --Iterative procedure for tracking -- 192 | self.tic['Total'] = time.time() 193 | 194 | # 1 --Grow each track tree-- 195 | self.tic['Process'] = time.time() 196 | nRadarMeas = len(scanList.measurements) 197 | radarMeasDim = self.C.shape[0] 198 | scanNumber = len(self.__scanHistory__) 199 | nTargets = len(self.__targetList__) 200 | timeSinceLastScan = scanTime - self.__scanHistory__[-1].time 201 | if not self.fixedPeriod: 202 | self.radarPeriod = timeSinceLastScan 203 | unusedRadarMeasurementIndices = np.ones(nRadarMeas, dtype=np.bool) 204 | self.leafNodeTimeList = [] 205 | targetProcessTimes = np.zeros(nTargets) 206 | nTargetNodes = np.zeros(nTargets) 207 | for targetIndex, _ in enumerate(self.__targetList__): 208 | self._growTarget(targetIndex, nTargetNodes, scanList, aisList, radarMeasDim, 209 | unusedRadarMeasurementIndices, scanTime, scanNumber, targetProcessTimes) 210 | self.toc['Process'] = time.time() - self.tic['Process'] 211 | 212 | if kwargs.get("printAssociation", False): 213 | print(*self.__associatedMeasurements__, sep="\n", end="\n\n") 214 | 215 | if kwargs.get("checkIntegrity", False): 216 | self._checkTrackerIntegrity() 217 | 218 | # 2 --Cluster targets -- 219 | self.tic['Cluster'] = time.time() 220 | self.__clusterList__ = self._findClustersFromSets() 221 | self.toc['Cluster'] = time.time() - self.tic['Cluster'] 222 | if kwargs.get("printCluster", False): 223 | self.printClusterList(self.__clusterList__) 224 | 225 | # 3 --Maximize global (cluster vise) likelihood-- 226 | self.tic['Optim'] = time.time() 227 | self.nOptimSolved = 0 228 | for cluster in self.__clusterList__: 229 | if len(cluster) == 1: 230 | if kwargs.get('pruneSimilar', False): 231 | self._pruneSimilarState(cluster, self.pruneThreshold) 232 | self.__trackNodes__[cluster] = self.__targetList__[ 233 | cluster[0]]._selectBestHypothesis() 234 | else: 235 | self.__trackNodes__[cluster] = self._solveOptimumAssociation(cluster) 236 | self.nOptimSolved += 1 237 | self.toc['Optim'] = time.time() - self.tic['Optim'] 238 | 239 | # 4 -- ILP Pruning 240 | self.tic['ILP-Prune'] = time.time() 241 | # Not implemented 242 | self.toc['ILP-Prune'] = time.time() - self.tic['ILP-Prune'] 243 | 244 | # 5 -- Dynamic window size 245 | self.tic['DynN'] = time.time() 246 | if kwargs.get('dynamicWindow', False): 247 | self.__dynamicWindow(targetProcessTimes) 248 | self.toc['DynN'] = time.time() - self.tic['DynN'] 249 | 250 | # 6 -- Pick out dead tracks (terminate) 251 | self.tic['Terminate'] = time.time() 252 | deadTracks = self.__analyzeTrackTermination() 253 | self._terminateTracks(deadTracks) 254 | self.toc['Terminate'] = time.time() - self.tic['Terminate'] 255 | 256 | # 5 --Prune sliding window -- 257 | self.tic['N-Prune'] = time.time() 258 | self._nScanPruning() 259 | self.toc['N-Prune'] = time.time() - self.tic['N-Prune'] 260 | 261 | if kwargs.get("checkIntegrity", False): 262 | self._checkTrackerIntegrity() 263 | 264 | # 7 -- Initiate new tracks 265 | self.tic['Init'] = time.time() 266 | unusedRadarMeasurements = scanList.filterUnused(unusedRadarMeasurementIndices) 267 | usedAisMmsi = [[a[1] for a in targetAssociations if a[0] == scanNumber and a[1] >= 1e8] 268 | for targetAssociations in self.__associatedMeasurements__] 269 | usedAisMmsi = [item for sublist in usedAisMmsi for item in sublist] 270 | unusedAisMeasurements = aisList.filterUnused(set(usedAisMmsi)) 271 | if not kwargs.get('aisInitialization', True): 272 | unusedAisMeasurements = [] 273 | new_initial_targets = self.initiator.processMeasurements(unusedRadarMeasurements, unusedAisMeasurements) 274 | for initial_target in new_initial_targets: 275 | log.info("\tNew target({}): ".format( 276 | len(self.__targetList__) + 1) + str(initial_target)) 277 | self.initiateTarget(initial_target) 278 | self.toc['Init'] = time.time() - self.tic['Init'] 279 | 280 | # Logging critical time constraints 281 | self.toc['Total'] = time.time() - self.tic['Total'] 282 | if self.toc['Total'] > self.radarPeriod: 283 | log.critical("Did not pass real time demand! Used {0:.0f}ms of {1:.0f}ms".format( 284 | self.toc['Total'] * 1000, self.radarPeriod * 1000)) 285 | elif self.toc['Total'] > self.radarPeriod * 0.6: 286 | log.warning("Did almost not pass real time demand! Used {0:.0f}ms of {1:.0f}ms".format( 287 | self.toc['Total'] * 1000, self.radarPeriod * 1000)) 288 | 289 | if kwargs.get("checkIntegrity", False): 290 | self._checkTrackerIntegrity() 291 | 292 | for k, v in self.runtimeLog.items(): 293 | if k in self.toc: 294 | v.append(self.toc[k]) 295 | 296 | if kwargs.get("printInfo", False): 297 | print("Added scan number:", len(self.__scanHistory__), 298 | " \tnRadarMeas ", nRadarMeas, sep="") 299 | 300 | if kwargs.get("printTime", False): 301 | self.printTimeLog(**kwargs) 302 | 303 | if nTargetNodes.size > 0: 304 | avgTimePerNode = self.toc['Process'] * 1e6 / np.sum(nTargetNodes) 305 | log.debug( 306 | "Process time per (old) leaf node = {:.0f}us".format(avgTimePerNode)) 307 | log.info("addMeasurement completed \n" + self.getTimeLogString() + "\n") 308 | 309 | def _growTarget(self, targetIndex, nTargetNodes, scanList, aisList, measDim, unused_measurement_indices, 310 | scanTime, scanNumber, targetProcessTimes): 311 | tic = time.time() 312 | target = self.__targetList__[targetIndex] 313 | targetNodes = target.getLeafNodes() 314 | nNodes = len(targetNodes) 315 | nTargetNodes[targetIndex] = nNodes 316 | dummyNodesData, radarNodesData, fusedNodesData = self._processLeafNodes(targetNodes, 317 | scanList, 318 | aisList) 319 | x_bar_list, P_bar_list = dummyNodesData 320 | gated_x_hat_list, P_hat_list, gatedIndicesList, nllrList = radarNodesData 321 | (fused_x_hat_list, 322 | fused_P_hat_list, 323 | fused_radar_indices_list, 324 | fused_nllr_list, 325 | fused_mmsi_list) = fusedNodesData 326 | gatedMeasurementsList = [np.array(scanList.measurements[gatedIndices]) 327 | for gatedIndices in gatedIndicesList] 328 | assert len(gatedMeasurementsList) == nNodes 329 | assert all([m.shape[1] == measDim for m in gatedMeasurementsList]) 330 | 331 | for gated_index in gatedIndicesList: 332 | unused_measurement_indices[gated_index] = False 333 | 334 | for i, node in enumerate(targetNodes): 335 | node.spawnNewNodes(self.__associatedMeasurements__[targetIndex], 336 | scanTime, 337 | scanNumber, 338 | x_bar_list[i], 339 | P_bar_list[i], 340 | gatedIndicesList[i], 341 | scanList.measurements, 342 | gated_x_hat_list[i], 343 | P_hat_list[i], 344 | nllrList[i], 345 | (fused_x_hat_list[i], 346 | fused_P_hat_list[i], 347 | fused_radar_indices_list[i], 348 | fused_nllr_list[i], 349 | fused_mmsi_list[i])) 350 | 351 | targetProcessTimes[targetIndex] = time.time() - tic 352 | 353 | def _terminateTracks(self, deadTracks): 354 | deadTracks.sort(reverse=True) 355 | for trackIndex in deadTracks: 356 | nTargetPre = len(self.__targetList__) 357 | nTracksPre = self.__trackNodes__.shape[0] 358 | nAssociationsPre = len(self.__associatedMeasurements__) 359 | targetListTypePre = type(self.__targetList__) 360 | trackListTypePre = type(self.__trackNodes__) 361 | associationTypePre = type(self.__associatedMeasurements__) 362 | self.__terminatedTargets__.append( 363 | copy.deepcopy(self.__trackNodes__[trackIndex])) 364 | del self.__targetList__[trackIndex] 365 | del self.__targetWindowSize__[trackIndex] 366 | self.__trackNodes__ = np.delete(self.__trackNodes__, trackIndex) 367 | del self.__associatedMeasurements__[trackIndex] 368 | self.__terminatedTargets__[-1]._pruneEverythingExceptHistory() 369 | nTargetsPost = len(self.__targetList__) 370 | nTracksPost = self.__trackNodes__.shape[0] 371 | nAssociationsPost = len(self.__associatedMeasurements__) 372 | targetListTypePost = type(self.__targetList__) 373 | trackListTypePost = type(self.__trackNodes__) 374 | associationTypePost = type(self.__associatedMeasurements__) 375 | assert nTargetsPost == nTargetPre - 1 376 | assert nTracksPost == nTracksPre - \ 377 | 1, str(nTracksPre) + '=>' + str(nTracksPost) 378 | assert nAssociationsPost == nAssociationsPre - 1 379 | assert targetListTypePost == targetListTypePre 380 | assert trackListTypePost == trackListTypePre 381 | assert associationTypePost == associationTypePre 382 | 383 | def _processLeafNodes(self, targetNodes, scanList, aisList): 384 | dummyNodesData = self.__predictDummyMeasurements(targetNodes) 385 | 386 | gatedRadarData = self.__processMeasurements(targetNodes, 387 | scanList, 388 | dummyNodesData, 389 | pv.C_RADAR, 390 | self.R_RADAR) 391 | 392 | radarNodesData = self.__createPureRadarNodes(gatedRadarData) 393 | 394 | fusedNodesData = self.__fuseRadarAndAis(targetNodes, 395 | aisList, 396 | scanList) 397 | 398 | return dummyNodesData, radarNodesData, fusedNodesData 399 | 400 | @staticmethod 401 | def __createPureRadarNodes(gatedRadarData): 402 | (gated_radar_indices_list, 403 | _, 404 | gated_x_radar_hat_list, 405 | P_radar_hat_list, 406 | _, 407 | _, 408 | radar_nllr_list) = gatedRadarData 409 | 410 | newNodesData = (gated_x_radar_hat_list, 411 | P_radar_hat_list, 412 | gated_radar_indices_list, 413 | radar_nllr_list) 414 | assert all(d is not None for d in newNodesData) 415 | return newNodesData 416 | 417 | def __fuseRadarAndAis(self, targetNodes, aisList, scanList): 418 | nNodes = len(targetNodes) 419 | 420 | if aisList is None: 421 | return ([np.array([]) for _ in range(nNodes)], 422 | [np.array([]) for _ in range(nNodes)], 423 | [np.array([]) for _ in range(nNodes)], 424 | [np.array([]) for _ in range(nNodes)], 425 | [np.array([]) for _ in range(nNodes)]) 426 | 427 | aisMeasurements = aisList 428 | radarMeasurements = scanList.measurements 429 | aisTimeSet = {m.time for m in aisMeasurements} 430 | scanTime = scanList.time 431 | 432 | fused_x_hat_list = [] 433 | fused_P_hat_list = [] 434 | fused_radar_indices_list = [] 435 | fused_nllr_list = [] 436 | fused_mmsi_list = [] 437 | 438 | lambda_ais = (len(self.__targetList__) * self.P_ais) / (np.pi * self.radarRange ** 2) 439 | # print("lambda_ais {:.2e}".format(lambda_ais)) 440 | 441 | for i, node in enumerate(targetNodes): 442 | # print("Node",i, "Target ID", node.ID) 443 | x_hat_list = [] 444 | P_hat_list = [] 445 | radar_indices_list = [] 446 | nllr_list = [] 447 | mmsi_list = [] 448 | for aisTime in aisTimeSet: 449 | # print("aisTime", aisTime) 450 | dT1 = float(aisTime) - node.time 451 | x_bar1, P_bar1 = kalman.predict_single(pv.Phi(dT1), pv.Q(dT1), node.x_0, node.P_0) 452 | # print("x_bar_1", x_bar1) 453 | # print("P_bar1\n", P_bar1) 454 | for highAccuracy in [True, False]: 455 | # print("highAccuracy",highAccuracy) 456 | z_hat_list1, S_list1, S_inv_list1, K_list1, P_hat_list1 = kalman.precalc( 457 | ais_model.C, 458 | ais_model.R(highAccuracy), 459 | np.array(x_bar1, ndmin=2), 460 | np.array(P_bar1, ndmin=3)) 461 | activeAisMeasurements = [m for m in aisMeasurements if 462 | m.time == aisTime and m.highAccuracy == highAccuracy] 463 | if len(activeAisMeasurements) == 0: 464 | continue 465 | # print("activeAisMeasurements",activeAisMeasurements) 466 | # activeAisMeasurementsIndices = np.array([aisMeasurements.index(m) for m in activeAisMeasurements]) 467 | # print("activeAisMeasurementsIndices",activeAisMeasurementsIndices) 468 | z_array1 = np.array([m.state for m in activeAisMeasurements], ndmin=2) 469 | z_tilde_array1 = z_array1 - z_hat_list1[0] 470 | nis_array1 = (kalman.normalizedInnovationSquared(z_tilde_array1, S_inv_list1))[0] 471 | # print("nis_array1",nis_array1) 472 | gated_nis_array1 = nis_array1 <= self.eta2_ais 473 | # print("gated nis array1", nis_array1[gated_nis_array1]) 474 | gated_ais_indices = np.flatnonzero(gated_nis_array1) 475 | if len(gated_ais_indices) == 0: 476 | continue 477 | # print("gated S array1\n", S_list1) 478 | # print("gated_ais_indices",gated_ais_indices) 479 | nllr1_list = kalman.nllr(lambda_ais, 1.0, S_list1, nis_array1[gated_ais_indices]) 480 | # print("nllr1", np.array_str(nllr1_list, precision=2)) 481 | for i, ais_index in enumerate(gated_ais_indices): 482 | # print("ais_index",ais_index) 483 | ais_measurement = activeAisMeasurements[ais_index] 484 | x_hat1 = x_bar1 + K_list1[0].dot(ais_measurement.state - z_hat_list1[0]) 485 | P_hat1 = P_hat_list1[0] 486 | dT2 = scanTime - float(ais_measurement.time) 487 | x_bar2, P_bar2 = kalman.predict_single(pv.Phi(dT2), pv.Q(dT2), x_hat1, P_hat1) 488 | z_hat_list2, S_list2, S_inv_list2, K_list2, P_hat_list2 = kalman.precalc( 489 | pv.C_RADAR, 490 | pv.R_RADAR(), 491 | np.array(x_bar2, ndmin=2), 492 | np.array(P_bar2, ndmin=3)) 493 | z_tilde_array2 = radarMeasurements - z_hat_list2[0] 494 | nis_array2 = (kalman.normalizedInnovationSquared(z_tilde_array2, S_inv_list2))[0] 495 | gated_nis_array2 = nis_array2 <= self.eta2 496 | gated_radar_indices = np.flatnonzero(gated_nis_array2) 497 | nllr2_list = kalman.nllr(self.lambda_ex, node.P_d, S_list2, nis_array2[gated_radar_indices]) 498 | # print("nllr2_list",np.array_str(nllr2_list, precision=2)) 499 | for j, radar_index in enumerate(gated_radar_indices): 500 | x_hat2 = x_bar2 + K_list2[0].dot(radarMeasurements[radar_index] - z_hat_list2[0]) 501 | P_hat2 = P_hat_list2[0] 502 | nllr12 = 0.5 * nllr1_list[i] + 0.5 * nllr2_list[j] 503 | log.debug("Fused node " + 504 | np.array_str(x_hat2, precision=1) + " " + 505 | '{: .2f} '.format(nllr12) + 506 | str(ais_measurement.mmsi)) 507 | 508 | x_hat_list.append(x_hat2) 509 | P_hat_list.append(P_hat2) 510 | radar_indices_list.append(radar_index) 511 | nllr_list.append(nllr12) 512 | mmsi_list.append(ais_measurement.mmsi) 513 | if len(gated_radar_indices) == 0: 514 | x_hat2 = x_bar2 515 | P_hat2 = P_hat_list2[0] 516 | nllr12 = nllr1_list[i] 517 | log.debug("Pure AIS node " + 518 | np.array_str(x_hat2, precision=1) + " " + 519 | '{: .2f} '.format(nllr12) + 520 | 'MMSI: {:}'.format(ais_measurement.mmsi)) 521 | x_hat_list.append(x_hat2) 522 | P_hat_list.append(P_hat2) 523 | radar_indices_list.append(None) 524 | nllr_list.append(nllr12) 525 | mmsi_list.append(ais_measurement.mmsi) 526 | 527 | fused_x_hat_list.append(np.array(x_hat_list, ndmin=2)) 528 | fused_P_hat_list.append(np.array(P_hat_list, ndmin=3)) 529 | fused_radar_indices_list.append(np.array(radar_indices_list)) 530 | fused_nllr_list.append(np.array(nllr_list)) 531 | fused_mmsi_list.append(np.array(mmsi_list)) 532 | 533 | assert len(fused_x_hat_list) == nNodes 534 | assert len(fused_P_hat_list) == nNodes 535 | assert len(fused_radar_indices_list) == nNodes 536 | assert len(fused_nllr_list) == nNodes 537 | assert len(fused_mmsi_list) == nNodes 538 | for i in range(nNodes): 539 | assert fused_x_hat_list[i].ndim == 2, str(fused_x_hat_list[i].ndim) 540 | assert fused_P_hat_list[i].ndim == 3, str(fused_P_hat_list[i].ndim) 541 | nFusedNodes, nStates = fused_x_hat_list[i].shape 542 | if nStates == 0: 543 | continue 544 | assert fused_P_hat_list[i].shape == (nFusedNodes, nStates, nStates), str(fused_P_hat_list[i].shape) 545 | 546 | fusedNodesData = (fused_x_hat_list, 547 | fused_P_hat_list, 548 | fused_radar_indices_list, 549 | fused_nllr_list, 550 | fused_mmsi_list) 551 | 552 | return fusedNodesData 553 | 554 | def __fuseRadarAndAisBulk(self, targetNodes, aisList, scanList): 555 | # TODO: Remove when done debugging 556 | np.set_printoptions(precision=1, suppress=True) 557 | 558 | nNodes = len(targetNodes) 559 | 560 | if aisList is None: 561 | return ([None] * nNodes, 562 | [None] * nNodes, 563 | [None] * nNodes, 564 | [None] * nNodes, 565 | [None] * nNodes) 566 | 567 | nNodes = len(targetNodes) 568 | aisMeasurements = aisList 569 | nAisMeasurements = len(aisMeasurements) 570 | radarMeasurements = scanList.measurements 571 | nRadarMeasurements = len(radarMeasurements) 572 | aisTimeSet = {m.time for m in aisMeasurements} 573 | scanTime = scanList.time 574 | nodesTime = scanTime - self.radarPeriod 575 | ais_meas_dim = ais_model.C.shape[0] 576 | radar_meas_dim = pv.C_RADAR.shape[0] 577 | state_dim = ais_model.C.shape[1] 578 | 579 | print("\nnNodes", nNodes) 580 | print("nAisMeasurements", nAisMeasurements) 581 | 582 | fused_x_hat_list = [None] * nNodes 583 | fused_P_hat_list = [None] * nNodes 584 | fused_radar_indices_list = [None] * nNodes 585 | fused_nllr_list = [None] * nNodes 586 | fused_mmsi_list = [None] * nNodes 587 | 588 | print("fused_x_hat_list", fused_x_hat_list) 589 | 590 | lambda_ais = (len(self.__targetList__) * self.P_ais) / (np.pi * self.radarRange**2) 591 | # print("lambda_ais {:.2e}".format(lambda_ais)) 592 | 593 | for aisTime in aisTimeSet: 594 | print("aisTime", aisTime) 595 | dT1 = float(aisTime) - nodesTime 596 | assert dT1 >= 0, str(dT1) 597 | 598 | x_0_list = np.array([node.x_0 for node in targetNodes], ndmin=2, dtype=np.float32) 599 | assert len(x_0_list) == nNodes, str(len(x_0_list)) 600 | P_0_list = np.array([node.P_0 for node in targetNodes], ndmin=3, dtype=np.float32) 601 | assert len(P_0_list) == nNodes, str(len(P_0_list)) 602 | x_bar1, P_bar1 = kalman.predict(pv.Phi(dT1), pv.Q(dT1), x_0_list, P_0_list) 603 | print("x_bar1", np.array_str(x_bar1, precision=1, suppress_small=True)) 604 | assert len(x_bar1) == nNodes, str(len(x_bar1)) 605 | assert len(P_bar1) == nNodes, str(len(P_bar1)) 606 | 607 | for highAccuracy in [True, False]: 608 | print("highAccuracy", highAccuracy) 609 | activeAisMeasurements = [m for m in aisMeasurements 610 | if m.time == aisTime and 611 | m.highAccuracy == highAccuracy] 612 | nActiveAisMeasurements = len(activeAisMeasurements) 613 | print("nActiveAisMeasurements", nActiveAisMeasurements) 614 | if nActiveAisMeasurements == 0: 615 | print("No active measurements. Skipping") 616 | continue 617 | 618 | z_hat_list1, S_list1, S_inv_list1, K_list1, P_hat_list1 = kalman.precalc( 619 | ais_model.C, 620 | ais_model.R(highAccuracy), 621 | np.array(x_bar1, ndmin=2), 622 | np.array(P_bar1, ndmin=3)) 623 | assert z_hat_list1.shape == (nNodes, ais_meas_dim) 624 | assert S_list1.shape == (nNodes, ais_meas_dim, ais_meas_dim) 625 | assert S_inv_list1.shape == (nNodes, ais_meas_dim, ais_meas_dim) 626 | assert K_list1.shape == (nNodes, state_dim, ais_meas_dim) 627 | assert P_hat_list1.shape == (nNodes, state_dim, state_dim) 628 | 629 | mmsi_array = np.array([m.mmsi for m in activeAisMeasurements], dtype=np.int) 630 | print("K_list1\n", K_list1) 631 | 632 | z_array1 = np.array([m.state for m in activeAisMeasurements], ndmin=2) 633 | assert len(z_array1) == nActiveAisMeasurements 634 | 635 | z_tilde_array1 = kalman.z_tilde(z_array1, z_hat_list1, nNodes, ais_meas_dim) 636 | assert z_tilde_array1.shape == (nNodes, nActiveAisMeasurements, ais_meas_dim) 637 | print("z_tilde_array1", z_tilde_array1.shape, "\n", z_tilde_array1) 638 | 639 | nis_array1 = np.array((kalman.normalizedInnovationSquared(z_tilde_array1, S_inv_list1)), ndmin=2) 640 | assert nis_array1.shape == (nNodes, nActiveAisMeasurements) 641 | print("nis_array1\n", np.array_str(nis_array1, precision=1)) 642 | 643 | gate_nis_array1 = np.array(nis_array1 <= self.eta2_ais, dtype=np.bool, ndmin=2) 644 | assert gate_nis_array1.shape == (nNodes, nActiveAisMeasurements) 645 | print("gate_nis_array1\n", gate_nis_array1) 646 | 647 | gated_ais_indices = np.nonzero(gate_nis_array1) 648 | assert len(gated_ais_indices) == 2 649 | assert len(gated_ais_indices[0]) == len(gated_ais_indices[1]) 650 | 651 | nUniqueGatedAisMeasurements = len(set(gated_ais_indices[1])) 652 | nGatedAisMeasurements = np.count_nonzero(gate_nis_array1, axis=1) 653 | if nUniqueGatedAisMeasurements == 0: 654 | print("No AIS measurements inside the gate. Skipping.") 655 | continue 656 | print("nGatedAisMeasurements", nGatedAisMeasurements) 657 | print("nUniqueGatedAisMeasurements", nUniqueGatedAisMeasurements) 658 | print("Node indices", gated_ais_indices[0], 659 | "are connected with AIS indices", gated_ais_indices[1], "respectively") 660 | 661 | gated_nis1 = [np.array([nis_array1[i1][i2]], ndmin=1) 662 | for i1, i2 in zip(gated_ais_indices[0], gated_ais_indices[1])] 663 | # gated_nis1 = np.array(nis_array1[gated_ais_indices], ndmin=2) 664 | print("gated_nis1\n", gated_nis1) 665 | assert len(gated_nis1) == len(gated_ais_indices[0]) 666 | # assert all([gated_nis1[i].shape == (nGatedAisMeasurements[i],1) for i,e in enumerate(nGatedAisMeasurements) ]) 667 | 668 | nllr1_list = [kalman.nllr(lambda_ais, 1.0, S_list1[i], gated_nis1[i]) for i in range(len(gated_nis1))] 669 | print("nllr1_list", nllr1_list) 670 | # assert nllr1_list.shape == (nNodes, nGatedAisMeasurements), str(nllr1_list.shape) 671 | 672 | print("x_bar1[gated_ais_indices[0]]", x_bar1[gated_ais_indices[0]]) 673 | print("K_list1[gated_ais_indices[0]]\n", K_list1[gated_ais_indices[0]]) 674 | print("z_tilde_array1[gated_ais_indices]\n", z_tilde_array1[gated_ais_indices]) 675 | 676 | x_hat1 = (x_bar1[gated_ais_indices[0]] + np.matmul(K_list1[gated_ais_indices[0]], 677 | z_tilde_array1[gated_ais_indices].T)[0].T) 678 | print("x_hat1", x_hat1.shape, "\n", x_hat1) 679 | assert x_hat1.shape == (np.sum(nGatedAisMeasurements), state_dim), str(x_hat1.shape) 680 | 681 | P_hat1 = P_hat_list1[gated_ais_indices[0]] 682 | print("P_hat1", P_hat1.shape, "\n", P_hat1) 683 | assert P_hat1.shape == (x_hat1.shape[0], state_dim, state_dim) 684 | 685 | dT2 = scanTime - float(aisTime) 686 | assert dT2 >= 0, str(dT2) 687 | 688 | x_bar2, P_bar2 = kalman.predict(pv.Phi(dT2), pv.Q(dT2), x_hat1, P_hat1) 689 | assert x_bar2.shape == x_hat1.shape 690 | assert P_bar2.shape == P_hat1.shape 691 | 692 | z_hat_list2, S_list2, S_inv_list2, K_list2, P_hat_list2 = kalman.precalc( 693 | pv.C_RADAR, 694 | pv.R_RADAR(), 695 | np.array(x_bar2, ndmin=2), 696 | np.array(P_bar2, ndmin=3)) 697 | print("z_hat_list2\n", z_hat_list2) 698 | # print("K_list2\n", K_list2) 699 | assert z_hat_list2.shape == (np.sum(nGatedAisMeasurements), radar_meas_dim) 700 | assert S_list2.shape == (np.sum(nGatedAisMeasurements), radar_meas_dim, radar_meas_dim) 701 | assert S_inv_list2.shape == (np.sum(nGatedAisMeasurements), radar_meas_dim, radar_meas_dim) 702 | assert K_list2.shape == (np.sum(nGatedAisMeasurements), state_dim, radar_meas_dim) 703 | assert P_hat_list2.shape == (np.sum(nGatedAisMeasurements), state_dim, state_dim) 704 | 705 | print("nRadarMeasurements", nRadarMeasurements) 706 | 707 | z_tilde_array2 = kalman.z_tilde(radarMeasurements, z_hat_list2, np.sum(nGatedAisMeasurements), radar_meas_dim) 708 | print("z_tilde_array2", z_tilde_array2.shape, "\n", z_tilde_array2) 709 | assert z_tilde_array2.shape == (np.sum(nGatedAisMeasurements), nRadarMeasurements, radar_meas_dim) 710 | 711 | nis_array2 = (kalman.normalizedInnovationSquared(z_tilde_array2, S_inv_list2)) 712 | # print("nis_array2", nis_array2.shape, "\n", nis_array2) 713 | assert nis_array2.shape == (np.sum(nGatedAisMeasurements), nRadarMeasurements) 714 | 715 | gated_nis_array2 = nis_array2 <= self.eta2 716 | # print("gated_nis_array2", gated_nis_array2.shape, "\n", np.asarray(gated_nis_array2, dtype=np.int)) 717 | assert gated_nis_array2.shape == (np.sum(nGatedAisMeasurements), nRadarMeasurements) 718 | 719 | gated_radar_indices = np.nonzero(gated_nis_array2) 720 | assert len(gated_radar_indices[0]) == len(gated_radar_indices[1]) 721 | print("gated_radar_indices", gated_radar_indices) 722 | nGatedRadarMeasurements = len(gated_radar_indices[0]) 723 | print("nGatedRadarMeasurements", nGatedRadarMeasurements) 724 | print("AIS indices", gated_radar_indices[0], 725 | "(", gated_ais_indices[1][gated_radar_indices[0]], ")", 726 | "[", gated_ais_indices[0][gated_radar_indices[0]], "]", 727 | "are connected with radar indices", gated_radar_indices[1], "respectively") 728 | nllr2_list = kalman.nllr(self.lambda_ex, self.default_P_d, S_list2, nis_array2[gated_radar_indices]) 729 | print("nllr2_list", nllr2_list) 730 | assert nllr2_list.ndim == 1 731 | assert nllr2_list.size == nGatedRadarMeasurements 732 | 733 | if nGatedRadarMeasurements > 0: 734 | x_hat2 = (x_bar2[gated_radar_indices[0]] + 735 | np.matmul(K_list2[gated_radar_indices[0]], 736 | z_tilde_array2[gated_radar_indices].T)[0].T) 737 | print("x_hat2", x_hat2.shape, "\n", x_hat2) 738 | assert x_hat2.shape == (nGatedRadarMeasurements, state_dim) 739 | P_hat2 = P_hat_list2[gated_radar_indices[0]] 740 | assert P_hat2.shape == (x_hat2.shape[0], state_dim, state_dim) 741 | # print("P_hat2", P_hat2.shape, "\n", P_hat2) 742 | 743 | fused_node_indices = [np.where(gated_ais_indices[0] == i for i in gated_radar_indices[0])] 744 | print("fused_node_indices", fused_node_indices) 745 | 746 | continue 747 | for nodeIndex in range(nNodes): 748 | # nodeAisIndices = np.where(gated_ais_indices[0]==nodeIndex)[0] 749 | # if len(nodeAisIndices) == 0: 750 | # continue 751 | # print("nodeAisIndices",nodeAisIndices) 752 | # nodeGatedAisIndices = gated_ais_indices[1][nodeAisIndices] 753 | # print("nodeGatedAisIndices",nodeGatedAisIndices) 754 | if nGatedRadarMeasurements > 0: 755 | for nodeIndex in gated_ais_indices[0]: 756 | fused_x_hat_list[nodeIndex] = x_hat2 757 | fused_P_hat_list[nodeIndex] = P_hat2 758 | fused_radar_indices_list[nodeIndex] = gated_radar_indices[1] 759 | fused_nllr_list[nodeIndex] = nllr2_list 760 | fused_mmsi_list[nodeIndex] = mmsi_array[gated_radar_indices[0]] 761 | else: 762 | print("No radar measurement inside AIS gates. Creating pure AIS node") 763 | # log.debug("Pure AIS node " + 764 | # np.array_str(x_hat2, precision=1) + " " + 765 | # '{: .2f} '.format(nllr12) + 766 | # 'MMSI: {:}'.format(ais_measurement.mmsi)) 767 | for nodeIndex, aisIndex in zip(gated_ais_indices[0], gated_ais_indices[1]): 768 | x_hat2 = x_bar2 769 | P_hat2 = P_bar2 770 | fused_x_hat_list[nodeIndex] = x_hat2 771 | fused_P_hat_list[nodeIndex] = P_hat2 772 | fused_radar_indices_list[nodeIndex] = None 773 | fused_nllr_list[nodeIndex] = nllr1_list 774 | fused_mmsi_list[nodeIndex] = mmsi_array[aisIndex] 775 | 776 | print("fused_x_hat_list", fused_x_hat_list) 777 | print("fused_P_hat_list", fused_P_hat_list) 778 | print("fused_radar_indices_list", fused_radar_indices_list) 779 | print("fused_nllr_list", fused_nllr_list) 780 | print("fused_mmsi_list", fused_mmsi_list) 781 | 782 | assert len(fused_x_hat_list) == nNodes 783 | assert len(fused_P_hat_list) == nNodes 784 | assert len(fused_radar_indices_list) == nNodes 785 | assert len(fused_nllr_list) == nNodes 786 | assert len(fused_mmsi_list) == nNodes 787 | # for i in range(nNodes): 788 | # assert fused_x_hat_list[i].ndim == 2, str(fused_x_hat_list[i].ndim) 789 | # assert fused_P_hat_list[i].ndim == 3, str(fused_P_hat_list[i].ndim) 790 | # nFusedNodes, nStates = fused_x_hat_list[i].shape 791 | # if nStates == 0: 792 | # continue 793 | # assert fused_P_hat_list[i].shape == ( 794 | # nFusedNodes, nStates, nStates), str(fused_P_hat_list[i].shape) 795 | 796 | fusedNodesData = (fused_x_hat_list, 797 | fused_P_hat_list, 798 | fused_radar_indices_list, 799 | fused_nllr_list, 800 | fused_mmsi_list) 801 | 802 | return fusedNodesData 803 | 804 | def __processMeasurements(self, targetNodes, measurementList, dummyNodesData, C, R): 805 | if measurementList is None: 806 | return None 807 | nNodes = len(targetNodes) 808 | nMeas = len(measurementList.measurements) 809 | meas_dim = C.shape[0] 810 | x_bar_list, P_bar_list = dummyNodesData 811 | 812 | nodesPredictionData = self.__predictPrecalcBulk(targetNodes, C, R, dummyNodesData) 813 | 814 | (z_hat_list, 815 | S_list, 816 | S_inv_list, 817 | K_list, 818 | P_hat_list) = nodesPredictionData 819 | 820 | z_list = measurementList.getMeasurements() 821 | assert z_list.shape[1] == meas_dim 822 | 823 | z_tilde_list = kalman.z_tilde(z_list, z_hat_list, nNodes, meas_dim) 824 | assert z_tilde_list.shape == (nNodes, nMeas, meas_dim) 825 | 826 | nis = kalman.normalizedInnovationSquared(z_tilde_list, S_inv_list) 827 | assert nis.shape == (nNodes, nMeas,) 828 | 829 | gated_filter = nis <= self.eta2 830 | assert gated_filter.shape == (nNodes, nMeas) 831 | 832 | gated_indices_list = [np.nonzero(gated_filter[row])[0] 833 | for row in range(nNodes)] 834 | assert len(gated_indices_list) == nNodes 835 | 836 | gated_z_tilde_list = [z_tilde_list[i, gated_indices_list[i]] 837 | for i in range(nNodes)] 838 | assert len(gated_z_tilde_list) == nNodes 839 | assert all([z_tilde.shape[1] == meas_dim for z_tilde in gated_z_tilde_list]) 840 | 841 | gated_x_hat_list = [kalman.numpyFilter( 842 | x_bar_list[i], K_list[i], gated_z_tilde_list[i]) 843 | for i in range(nNodes)] 844 | assert len(gated_x_hat_list) == nNodes 845 | 846 | nllr_list = [kalman.nllr(self.lambda_ex, 847 | targetNodes[i].P_d, 848 | S_list[i], 849 | nis[i, gated_filter[i]]) 850 | for i in range(nNodes)] 851 | assert len(nllr_list) == nNodes 852 | 853 | return (gated_indices_list, 854 | gated_z_tilde_list, 855 | gated_x_hat_list, 856 | P_hat_list, 857 | S_list, 858 | np.array(nis[gated_filter], ndmin=2), 859 | nllr_list) 860 | 861 | def __predictDummyMeasurements(self, targetNodes): 862 | nNodes = len(targetNodes) 863 | radarMeasDim, nStates = pv.C_RADAR.shape 864 | x_0_list = np.array([target.x_0 for target in targetNodes], 865 | ndmin=2) 866 | P_0_list = np.array([target.P_0 for target in targetNodes], 867 | ndmin=3) 868 | assert x_0_list.shape == (nNodes, nStates) 869 | assert P_0_list.shape == (nNodes, nStates, nStates) 870 | 871 | x_bar_list, P_bar_list = kalman.predict( 872 | self.A, self.Q, x_0_list, P_0_list) 873 | return x_bar_list, P_bar_list 874 | 875 | def __predictPrecalcBulk(self, targetNodes, C, R, dummyNodesData): 876 | nNodes = len(targetNodes) 877 | measDim, nStates = C.shape 878 | x_bar_list, P_bar_list = dummyNodesData 879 | 880 | z_hat_list, S_list, S_inv_list, K_list, P_hat_list = kalman.precalc( 881 | C, R, x_bar_list, P_bar_list) 882 | 883 | assert S_list.shape == (nNodes, measDim, measDim) 884 | assert S_inv_list.shape == (nNodes, measDim, measDim) 885 | assert K_list.shape == (nNodes, nStates, measDim) 886 | assert P_hat_list.shape == P_bar_list.shape 887 | assert z_hat_list.shape == (nNodes, measDim) 888 | 889 | return z_hat_list, S_list, S_inv_list, K_list, P_hat_list 890 | 891 | def __analyzeTrackTermination(self): 892 | deadTracks = [] 893 | for trackIndex, trackNode in enumerate(self.__trackNodes__): 894 | # Check outside radarRange 895 | if trackNode.isOutsideRange(self.position, self.radarRange): 896 | trackNode.status = outofrangeTag 897 | deadTracks.append(trackIndex) 898 | log.info("Terminating track {0:} at {1:} since it is out of radarRange".format( 899 | trackIndex, np.array_str(self.__trackNodes__[trackIndex].x_0[0:2]))) 900 | 901 | # Check if track is to insecure 902 | elif trackNode.getScore() / (self.N + 1) > self.scoreUpperLimit: 903 | trackNode.status = toolowscoreTag 904 | deadTracks.append(trackIndex) 905 | log.info("Terminating track {0:} at {1:} since its score is above the threshold ({2:.1f}>{3:.1f})".format( 906 | trackIndex, np.array_str(self.__trackNodes__[trackIndex].x_0[0:2]), 907 | trackNode.getScore() / (self.N + 1), self.scoreUpperLimit)) 908 | elif trackNode.cumulativeNLLR > self.clnnrUpperLimit: 909 | trackNode.status = toolowscoreTag 910 | deadTracks.append(trackIndex) 911 | log.info( 912 | "Terminating track {0:} at {1:} since its CNNLR is above the threshold ({2:.1f}>{3:.1f})".format( 913 | trackIndex, np.array_str( 914 | self.__trackNodes__[trackIndex].x_0[0:2]), 915 | trackNode.cumulativeNLLR, self.clnnrUpperLimit)) 916 | return deadTracks 917 | 918 | def __dynamicWindow(self, targetProcessTimes): 919 | totalGrowTime = sum(targetProcessTimes) 920 | tooSlowTotal = totalGrowTime > self.totalGrowTimeLimit 921 | targetProcessTimeLimit = (self.totalGrowTimeLimit / len(self.__targetList__) 922 | if tooSlowTotal else self.nodeGrowTimeLimit) 923 | for targetIndex, target in enumerate(self.__targetList__): 924 | targetProcessTime = targetProcessTimes[targetIndex] 925 | targetSize = target.getNumOfNodes() 926 | tooSlow = targetProcessTime > targetProcessTimeLimit 927 | tooLarge = targetSize > self.targetSizeLimit 928 | if tooSlow or tooLarge: 929 | target = self.__targetList__[targetIndex] 930 | targetDepth = target.depth() 931 | assert targetDepth <= self.__targetWindowSize__[targetIndex] + 1 932 | infoString = "\tTarget {:2} ".format(targetIndex + 1) 933 | if tooSlow: 934 | infoString += "Too slow {:.1f}ms. ".format(targetProcessTime * 1000) 935 | if tooLarge: 936 | infoString += "To large {:}. ".format(targetSize) 937 | oldN = self.__targetWindowSize__[targetIndex] 938 | self.__targetWindowSize__[targetIndex] -= 1 939 | newN = self.__targetWindowSize__[targetIndex] 940 | infoString += "Reducing window from {0:} to {1:}".format(oldN, newN) 941 | log.debug(infoString) 942 | 943 | tempTotalTime = time.time() - self.tic['Total'] 944 | if tempTotalTime > (self.radarPeriod * 0.8): 945 | self.N = max(1, self.N - 1) 946 | log.warning( 947 | 'Iteration took to long time ({0:.1f}ms), reducing window size roof from {1:} to {2:}'.format( 948 | tempTotalTime * 1000, self.N + 1, self.N)) 949 | self.__targetWindowSize__ = [min(e, self.N) 950 | for e in self.__targetWindowSize__] 951 | 952 | def _compareTracksWithTruth(self, xTrue): 953 | return [(target.filteredStateMean - xTrue[targetIndex].state).T.dot( 954 | np.linalg.inv(target.filteredStateCovariance)).dot( 955 | (target.filteredStateMean - xTrue[targetIndex].state)) 956 | for targetIndex, target in enumerate(self.__trackNodes__)] 957 | 958 | def getRuntimeAverage(self): 959 | return {k: np.mean(np.array(v)) for k, v in self.runtimeLog.items()} 960 | 961 | def _findClustersFromSets(self): 962 | self.superSet = set() 963 | for targetSet in self.__associatedMeasurements__: 964 | self.superSet |= targetSet 965 | nTargets = len(self.__associatedMeasurements__) 966 | nNodes = nTargets + len(self.superSet) 967 | adjacencyMatrix = np.zeros((nNodes, nNodes), dtype=bool) 968 | for targetIndex, targetSet in enumerate(self.__associatedMeasurements__): 969 | for measurementIndex, measurement in enumerate(self.superSet): 970 | adjacencyMatrix[targetIndex, measurementIndex + 971 | nTargets] = (measurement in targetSet) 972 | (nClusters, labels) = connected_components(adjacencyMatrix) 973 | return [np.where(labels[:nTargets] == clusterIndex)[0] 974 | for clusterIndex in range(nClusters)] 975 | 976 | def getTrackNodes(self): 977 | return self.__trackNodes__ 978 | 979 | def _solveOptimumAssociation(self, cluster): 980 | log.debug("Cluster {0:} Sum = {1:}".format(cluster, len(cluster))) 981 | nHypInClusterArray = self._getHypInCluster(cluster) 982 | log.debug("nHypInClusterArray {0:} => Sum = {1:}".format( 983 | nHypInClusterArray, sum(nHypInClusterArray))) 984 | 985 | for i in cluster: 986 | log.debug("AssociatedMeasurements[{0:}] {1:}".format( 987 | i, self.__associatedMeasurements__[i])) 988 | uniqueMeasurementSet = set.union( 989 | *[self.__associatedMeasurements__[i] for i in cluster]) 990 | nRealMeasurementsInCluster = len(uniqueMeasurementSet) 991 | log.debug("Cluster Measurement set: {0:} Sum={1:}".format( 992 | uniqueMeasurementSet, nRealMeasurementsInCluster)) 993 | 994 | (A1, measurementList) = self._createA1( 995 | nRealMeasurementsInCluster, sum(nHypInClusterArray), cluster) 996 | log.debug("Difference: {:}".format( 997 | uniqueMeasurementSet.symmetric_difference(set(measurementList)))) 998 | 999 | assert len(measurementList) == A1.shape[0], str( 1000 | len(measurementList)) + " vs " + str(A1.shape[0]) 1001 | 1002 | assert len(measurementList) == nRealMeasurementsInCluster 1003 | A2 = self._createA2(len(cluster), nHypInClusterArray) 1004 | log.debug("A2 \n" + np.array_str(A2.astype(np.int), max_line_width=200)) 1005 | C = self._createC(cluster) 1006 | log.debug("C =" + np.array_str(np.array(C), precision=1)) 1007 | 1008 | log.debug("Solving optimal association in cluster with targets" + 1009 | str(cluster) + ", \t" + 1010 | str(sum(nHypInClusterArray)) + " hypotheses and " + 1011 | str(nRealMeasurementsInCluster) + " real measurements.") 1012 | selectedHypotheses = self._solveBLP_OR_TOOLS(A1, A2, C) 1013 | log.debug("selectedHypotheses" + str(selectedHypotheses)) 1014 | selectedNodes = self._hypotheses2Nodes(selectedHypotheses, cluster) 1015 | selectedNodesArray = np.array(selectedNodes) 1016 | 1017 | assert len(selectedHypotheses) == len(cluster), \ 1018 | "__solveOptimumAssociation did not find the correct number of hypotheses" 1019 | assert len(selectedNodes) == len(cluster), \ 1020 | "did not find the correct number of nodes" 1021 | assert len(selectedHypotheses) == len(set(selectedHypotheses)), \ 1022 | "selected two or more equal hypotheses" 1023 | assert len(selectedNodes) == len(set(selectedNodes)), \ 1024 | "found same node in more than one track in selectedNodes" 1025 | assert len(selectedNodesArray) == len(set(selectedNodesArray)), \ 1026 | "found same node in more than one track in selectedNodesArray" 1027 | return selectedNodesArray 1028 | 1029 | def _getHypInCluster(self, cluster): 1030 | def nLeafNodes(target): 1031 | if target.trackHypotheses is None: 1032 | return 1 1033 | else: 1034 | return sum(nLeafNodes(hyp) for hyp in target.trackHypotheses) 1035 | 1036 | nHypInClusterArray = np.zeros(len(cluster), dtype=int) 1037 | for i, targetIndex in enumerate(cluster): 1038 | nHypInTarget = nLeafNodes(self.__targetList__[targetIndex]) 1039 | nHypInClusterArray[i] = nHypInTarget 1040 | return nHypInClusterArray 1041 | 1042 | def _createA1(self, nRow, nCol, cluster): 1043 | def recActiveMeasurement(target, A1, measurementList, 1044 | activeMeasurements, hypothesisIndex): 1045 | if target.trackHypotheses is None: # leaf node 1046 | 1047 | if ((target.measurementNumber is not None) and 1048 | (target.measurementNumber != 0)): # we are at a real measurement 1049 | radarMeasurement = (target.scanNumber, target.measurementNumber) 1050 | try: 1051 | radarMeasurementIndex = measurementList.index(radarMeasurement) 1052 | except ValueError: 1053 | measurementList.append(radarMeasurement) 1054 | radarMeasurementIndex = len(measurementList) - 1 1055 | activeMeasurements[radarMeasurementIndex] = True 1056 | 1057 | if target.mmsi is not None: 1058 | aisMeasurement = (target.scanNumber, target.mmsi) 1059 | try: 1060 | aisMeasurementIndex = measurementList.index(aisMeasurement) 1061 | except ValueError: 1062 | measurementList.append(aisMeasurement) 1063 | aisMeasurementIndex = len(measurementList) - 1 1064 | activeMeasurements[aisMeasurementIndex] = True 1065 | 1066 | A1[activeMeasurements, hypothesisIndex[0]] = True 1067 | hypothesisIndex[0] += 1 1068 | 1069 | else: 1070 | for hyp in target.trackHypotheses: 1071 | activeMeasurementsCpy = activeMeasurements.copy() 1072 | if ((hyp.measurementNumber is not None) and 1073 | (hyp.measurementNumber != 0)): 1074 | radarMeasurement = (hyp.scanNumber, hyp.measurementNumber) 1075 | try: 1076 | radarMeasurementIndex = measurementList.index( 1077 | radarMeasurement) 1078 | except ValueError: 1079 | measurementList.append(radarMeasurement) 1080 | radarMeasurementIndex = len(measurementList) - 1 1081 | activeMeasurementsCpy[radarMeasurementIndex] = True 1082 | 1083 | if hyp.mmsi is not None: 1084 | aisMeasurement = (hyp.scanNumber, hyp.mmsi) 1085 | try: 1086 | aisMeasurementIndex = measurementList.index(aisMeasurement) 1087 | except ValueError: 1088 | measurementList.append(aisMeasurement) 1089 | aisMeasurementIndex = len(measurementList) - 1 1090 | activeMeasurementsCpy[aisMeasurementIndex] = True 1091 | 1092 | recActiveMeasurement(hyp, A1, measurementList, 1093 | activeMeasurementsCpy, hypothesisIndex) 1094 | 1095 | A1 = np.zeros((nRow, nCol), dtype=bool) 1096 | activeMeasurements = np.zeros(nRow, dtype=bool) 1097 | measurementList = [] 1098 | hypothesisIndex = [0] 1099 | # TODO: 1100 | # http://stackoverflow.com/questions/15148496/python-passing-an-integer-by-reference 1101 | for targetIndex in cluster: 1102 | recActiveMeasurement(self.__targetList__[targetIndex], 1103 | A1, 1104 | measurementList, 1105 | activeMeasurements, 1106 | hypothesisIndex) 1107 | log.debug("measurementList" + str(measurementList) + 1108 | "Sum=" + str(len(measurementList))) 1109 | log.debug("size(A1) " + str(A1.shape)) 1110 | log.debug("A1 \n" + 'V: Measurements, LeafNodes ---->\n' + 1111 | np.array_str(A1.astype(np.int), max_line_width=200)) 1112 | # assert len(measurementList) == A1.shape[0], str(len(measurementList)) + " vs " + str(A1.shape[0]) 1113 | return A1, measurementList 1114 | 1115 | def _createA2(self, nTargetsInCluster, nHypInClusterArray): 1116 | A2 = np.zeros((nTargetsInCluster, sum(nHypInClusterArray)), dtype=bool) 1117 | colOffset = 0 1118 | for rowIndex, nHyp in enumerate(nHypInClusterArray): 1119 | for colIndex in range(colOffset, colOffset + nHyp): 1120 | A2[rowIndex, colIndex] = True 1121 | colOffset += nHyp 1122 | return A2 1123 | 1124 | def _createC(self, cluster): 1125 | def getTargetScore(target, scoreArray): 1126 | if target.trackHypotheses is None: 1127 | scoreArray.append(target.getScore() / self.N) 1128 | else: 1129 | for hyp in target.trackHypotheses: 1130 | getTargetScore(hyp, scoreArray) 1131 | 1132 | scoreArray = [] 1133 | for targetIndex in cluster: 1134 | getTargetScore(self.__targetList__[targetIndex], scoreArray) 1135 | assert all(np.isfinite(scoreArray)), str(scoreArray) 1136 | return scoreArray 1137 | 1138 | def _hypotheses2Nodes(self, selectedHypotheses, cluster): 1139 | def recDFS(target, selectedHypothesis, nodeList, counter): 1140 | if target.trackHypotheses is None: 1141 | if counter[0] in selectedHypotheses: 1142 | nodeList.append(target) 1143 | counter[0] += 1 1144 | else: 1145 | for hyp in target.trackHypotheses: 1146 | recDFS(hyp, selectedHypotheses, nodeList, counter) 1147 | 1148 | nodeList = [] 1149 | counter = [0] 1150 | for targetIndex in cluster: 1151 | recDFS(self.__targetList__[targetIndex], 1152 | selectedHypotheses, nodeList, counter) 1153 | return nodeList 1154 | 1155 | def _solveBLP_OR_TOOLS(self, A1, A2, f): 1156 | 1157 | tic0 = time.time() 1158 | nScores = len(f) 1159 | (nMeas, nHyp) = A1.shape 1160 | (nTargets, _) = A2.shape 1161 | 1162 | # Check matrix and vector dimension 1163 | assert nScores == nHyp 1164 | assert A1.shape[1] == A2.shape[1] 1165 | 1166 | # Initiate solver 1167 | solver = pywraplp.Solver( 1168 | 'MHT-solver', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING) 1169 | 1170 | # Declare optimization variables 1171 | tau = {i: solver.BoolVar("tau" + str(i)) for i in range(nHyp)} 1172 | # tau = [solver.BoolVar("tau" + str(i)) for i in range(nHyp)] 1173 | 1174 | # Set objective 1175 | solver.Minimize(solver.Sum([f[i] * tau[i] for i in range(nHyp)])) 1176 | 1177 | # <<< Problem child >>> 1178 | tempMatrix = [[A1[row, col] * tau[col] for col in range(nHyp) if A1[row, col]] 1179 | for row in range(nMeas)] 1180 | # <<< Problem child >>> 1181 | toc0 = time.time() - tic0 1182 | 1183 | def setConstaints(solver, nMeas, nTargets, nHyp, tempMatrix, A2): 1184 | for row in range(nMeas): 1185 | constraint = (solver.Sum(tempMatrix[row]) <= 1) 1186 | solver.Add(constraint) 1187 | 1188 | for row in range(nTargets): 1189 | solver.Add(solver.Sum([A2[row, col] * tau[col] 1190 | for col in range(nHyp) if A2[row, col]]) == 1) 1191 | 1192 | tic1 = time.time() 1193 | setConstaints(solver, nMeas, nTargets, nHyp, tempMatrix, A2) 1194 | toc1 = time.time() - tic1 1195 | 1196 | tic2 = time.time() 1197 | # Solving optimization problem 1198 | result_status = solver.Solve() 1199 | log.debug("Optim Time = " + str(solver.WallTime()) + " milliseconds") 1200 | 1201 | if result_status == pywraplp.Solver.OPTIMAL: 1202 | log.debug("Optim result optimal") 1203 | else: 1204 | log.warning("Optim result NOT optimal") 1205 | 1206 | toc2 = time.time() - tic2 1207 | 1208 | tic3 = time.time() 1209 | selectedHypotheses = [i for i in range(nHyp) 1210 | if tau[i].solution_value() > 0.] 1211 | log.debug("Selected hypotheses" + str(selectedHypotheses)) 1212 | assert len(selectedHypotheses) == nTargets 1213 | toc3 = time.time() - tic3 1214 | 1215 | log.debug('_solveBLP_OR_TOOLS ({0:4.0f}|{1:4.0f}|{2:4.0f}|{3:4.0f}) ms = {4:4.0f}'.format( 1216 | toc0 * 1000, toc1 * 1000, toc2 * 1000, toc3 * 1000, (toc0 + toc1 + toc2 + toc3) * 1000)) 1217 | return selectedHypotheses 1218 | 1219 | def _pruneTargetIndex(self, targetIndex, N): 1220 | node = self.__trackNodes__[targetIndex] 1221 | newRootNode = node.pruneDepth(N) 1222 | if newRootNode != self.__targetList__[targetIndex]: 1223 | newRootNode.parent.isRoot = False 1224 | newRootNode.isRoot = True 1225 | self.__targetList__[targetIndex] = newRootNode 1226 | self.__associatedMeasurements__[targetIndex] = self.__targetList__[ 1227 | targetIndex].getMeasurementSet() 1228 | 1229 | def _nScanPruning(self): 1230 | for targetIndex, target in enumerate(self.__trackNodes__): 1231 | self._pruneTargetIndex(targetIndex, self.__targetWindowSize__[targetIndex]) 1232 | 1233 | def _pruneSimilarState(self, cluster, threshold): 1234 | for targetIndex in cluster: 1235 | leafParents = self.__targetList__[targetIndex].getLeafParents() 1236 | for node in leafParents: 1237 | node.pruneSimilarState(threshold) 1238 | self.__associatedMeasurements__[targetIndex] = self.__targetList__[ 1239 | targetIndex].getMeasurementSet() 1240 | 1241 | def _checkTrackerIntegrity(self): 1242 | log.debug("Checking tracker integrity") 1243 | assert len(self.__trackNodes__) == len(self.__targetList__), \ 1244 | "There are not the same number trackNodes as targets" 1245 | assert len(self.__targetList__) == len(set(self.__targetList__)), \ 1246 | "There are copies of targets in the target list" 1247 | assert len(self.__trackNodes__) == len(set(self.__trackNodes__)), \ 1248 | "There are copies of track nodes in __trackNodes__" 1249 | for target in self.__targetList__: 1250 | target._checkScanNumberIntegrity() 1251 | target._checkReferenceIntegrity() 1252 | if len(self.__trackNodes__) > 0: 1253 | assert len({node.scanNumber for node in self.__trackNodes__}) == 1, \ 1254 | "there are inconsistency in trackNodes scanNumber" 1255 | scanNumber = len(self.__scanHistory__) 1256 | for targetIndex, target in enumerate(self.__targetList__): 1257 | leafNodes = target.getLeafNodes() 1258 | for leafNode in leafNodes: 1259 | assert leafNode.scanNumber == scanNumber, \ 1260 | "{0:} != {1:} @ TargetNumber {2:}".format(leafNode.scanNumber, 1261 | scanNumber, 1262 | targetIndex + 1) 1263 | leafNode._checkMmsiIntegrity() 1264 | assert np.isfinite(leafNode.getScore()) 1265 | assert np.isfinite(leafNode.cumulativeNLLR) 1266 | activeMmsiList = [target.mmsi 1267 | for target in self.__trackNodes__ 1268 | if target.mmsi is not None] 1269 | activeMmsiSet = set(activeMmsiList) 1270 | assert len(activeMmsiList) == len( 1271 | activeMmsiSet), "One or more MMSI is used multiple times" 1272 | 1273 | def getSmoothTracks(self): 1274 | return [track.getSmoothTrack() for track in self.__trackNodes__] 1275 | 1276 | def plotValidationRegionFromRoot(self, stepsBack=1): 1277 | def recPlotValidationRegionFromTarget(target, eta2, stepsBack): 1278 | if target.trackHypotheses is None: 1279 | target.plotValidationRegion(eta2, stepsBack) 1280 | else: 1281 | for hyp in target.trackHypotheses: 1282 | recPlotValidationRegionFromTarget(hyp, eta2, stepsBack) 1283 | 1284 | for target in self.__targetList__: 1285 | recPlotValidationRegionFromTarget(target, self.eta2, stepsBack) 1286 | 1287 | def plotValidationRegionFromTracks(self, stepsBack=1): 1288 | for node in self.__trackNodes__: 1289 | node.plotValidationRegion(self.eta2, stepsBack) 1290 | 1291 | def plotHypothesesTrack(self, ax=plt.gca(), **kwargs): 1292 | def recPlotHypothesesTrack(ax, target, track=[], **kwargs): 1293 | newTrack = track[:] + [target.getPosition()] 1294 | if target.trackHypotheses is None: 1295 | ax.plot([p.x() for p in newTrack], 1296 | [p.y() for p in newTrack], 1297 | "--", 1298 | **kwargs) 1299 | else: 1300 | for hyp in target.trackHypotheses: 1301 | recPlotHypothesesTrack(ax, hyp, newTrack, **kwargs) 1302 | 1303 | colors = kwargs.get("colors", self._getColorCycle()) 1304 | for target in self.__targetList__: 1305 | recPlotHypothesesTrack(ax, target, c=next(colors)) 1306 | if kwargs.get('markStates', False): 1307 | defaults = {'dummy': True, 'real': True, 'ais': True, 1308 | 'includeHistory': False, 'color': 'red'} 1309 | self.plotStatesFromRoot(ax, **{**defaults, **kwargs}) 1310 | 1311 | def plotActiveTracks(self, ax=plt.gca(), **kwargs): 1312 | colors = kwargs.get("colors", self._getColorCycle()) 1313 | for i, track in enumerate(self.__trackNodes__): 1314 | track.plotTrack(ax, root=self.__targetList__[i], c=next( 1315 | colors), period=self.radarPeriod, **kwargs) 1316 | if kwargs.get('markStates', True): 1317 | defaults = {'labels': False, 'dummy': True, 1318 | 'real': True, 'ais': True, 'color': 'red'} 1319 | self.plotStatesFromTracks(ax, **{**defaults, **kwargs}) 1320 | 1321 | def plotTerminatedTracks(self, ax=plt.gca(), **kwargs): 1322 | colors = kwargs.get("colors", self._getColorCycle()) 1323 | for track in self.__terminatedTargets__: 1324 | defaults = {'c': next(colors), 'markInitial': True, 1325 | 'markEnd': True, 'terminated': True} 1326 | track.plotTrack(ax, **{**defaults, **kwargs}) 1327 | if kwargs.get('markStates', False): 1328 | defaults = {'labels': False, 'dummy': True, 'real': True, 'ais': True} 1329 | track.plotStates(ax, float('inf'), **{**defaults, **kwargs}) 1330 | 1331 | def plotMeasurementsFromTracks(self, ax=plt.gca(), stepsBack=float('inf'), **kwargs): 1332 | for node in self.__trackNodes__: 1333 | node.plotMeasurement(ax, stepsBack, **kwargs) 1334 | 1335 | def plotStatesFromTracks(self, ax=plt.gca(), stepsBack=float('inf'), **kwargs): 1336 | for node in self.__trackNodes__: 1337 | node.plotStates(ax, stepsBack, **kwargs) 1338 | 1339 | def plotMeasurementsFromRoot(self, ax=plt.gca(), **kwargs): 1340 | if not (("real" in kwargs) or ("dummy" in kwargs) or ("ais" in kwargs)): 1341 | return 1342 | plottedMeasurements = set() 1343 | for target in self.__targetList__: 1344 | if kwargs.get("includeHistory", False): 1345 | target.getInitial().recDownPlotMeasurements(plottedMeasurements, ax, **kwargs) 1346 | else: 1347 | for hyp in target.trackHypotheses: 1348 | hyp.recDownPlotMeasurements(plottedMeasurements, ax, **kwargs) 1349 | 1350 | def plotStatesFromRoot(self, ax=plt.gca(), **kwargs): 1351 | if not (("real" in kwargs) or ("dummy" in kwargs) or ("ais" in kwargs)): 1352 | return 1353 | for target in self.__targetList__: 1354 | if kwargs.get("includeHistory", False): 1355 | target.getInitial().recDownPlotStates(ax, **kwargs) 1356 | elif target.trackHypotheses is not None: 1357 | for hyp in target.trackHypotheses: 1358 | hyp.recDownPlotStates(ax, **kwargs) 1359 | 1360 | def plotScanIndex(self, index, ax=plt.gca(), **kwargs): 1361 | self.__scanHistory__[index].plot(ax, **kwargs) 1362 | 1363 | def plotLastScan(self, ax=plt.gca(), **kwargs): 1364 | self.__scanHistory__[-1].plot(ax, **kwargs) 1365 | 1366 | def plotLastAisUpdate(self, ax=plt.gca(), **kwargs): 1367 | if self.__aisHistory__[-1] is not None: 1368 | self.__aisHistory__[-1].plot(ax, **kwargs) 1369 | 1370 | def plotAllScans(self, ax=plt.gca(), stepsBack=None, **kwargs): 1371 | if stepsBack is not None: 1372 | stepsBack = -(stepsBack + 1) 1373 | for scan in self.__scanHistory__[:stepsBack:-1]: 1374 | scan.plot(ax, **kwargs) 1375 | 1376 | def plotAllAisUpdates(self, ax=plt.gca(), stepsBack=None, **kwargs): 1377 | if stepsBack is not None: 1378 | stepsBack = -(stepsBack + 1) 1379 | for update in self.__aisHistory__[:stepsBack:-1]: 1380 | if update is not None: 1381 | update.plot(ax, markeredgewidth=2, **kwargs) 1382 | 1383 | def plotVelocityArrowForTrack(self, stepsBack=1): 1384 | for track in self.__trackNodes__: 1385 | track.plotVelocityArrow(stepsBack) 1386 | 1387 | def plotInitialTargets(self, **kwargs): 1388 | initialTargets = [target.getInitial() for target in self.__targetList__] 1389 | fig = plt.gcf() 1390 | size = fig.get_size_inches() * fig.dpi 1391 | for i, initialTarget in enumerate(initialTargets): 1392 | index = kwargs.get("index", list(range(len(initialTargets)))) 1393 | offset = 0.05 * size 1394 | if len(index) != len(initialTargets): 1395 | raise ValueError( 1396 | "plotInitialTargets: Need equal number of targets and indices") 1397 | initialTarget.markInitial(index=index[i], offset=offset) 1398 | 1399 | def _getColorCycle(self): 1400 | return itertools.cycle(self.colors) 1401 | 1402 | def printTargetList(self, **kwargs): 1403 | np.set_printoptions(precision=2, suppress=True) 1404 | print("TargetList:") 1405 | for targetIndex, target in enumerate(self.__targetList__): 1406 | if kwargs.get("backtrack", False): 1407 | print(target.stepBack().__str__(targetIndex=targetIndex)) 1408 | else: 1409 | print(target.__str__(targetIndex=targetIndex)) 1410 | print() 1411 | 1412 | def getTimeLogHeader(self): 1413 | return ('{:3} '.format("Nr") + 1414 | '{:9} '.format("Num Targets") + 1415 | '{:12} '.format("Iteration (ms)") + 1416 | '({0:23} '.format("(nMeasurements + nAisUpdates / nNodes) Process time ms") + 1417 | '({0:2}) {1:5}'.format("nClusters", 'Cluster') + 1418 | '({0:3}) {1:6}'.format("nOptimSolved", 'Optim') + 1419 | '{:4}'.format('DynN') + 1420 | '{:5}'.format('N-Prune') + 1421 | '{:3}'.format('Terminate') + 1422 | '{:5}'.format('Init') 1423 | ) 1424 | 1425 | def getTimeLogString(self): 1426 | tocMS = {k: v * 1000 for k, v in self.toc.items()} 1427 | totalTime = tocMS['Total'] 1428 | nNodes = sum([target.getNumOfNodes() for target in self.__targetList__]) 1429 | nMeasurements = len(self.__scanHistory__[-1].measurements) 1430 | nAisUpdates = len( 1431 | self.__aisHistory__[-1]) if self.__aisHistory__[-1] is not None else 0 1432 | scanNumber = len(self.__scanHistory__) 1433 | nTargets = len(self.__targetList__) 1434 | nClusters = len(self.__clusterList__) 1435 | timeLogString = ('{:<3.0f} '.format(scanNumber) + 1436 | 'nTrack {:2.0f} '.format(nTargets) + 1437 | 'Total {0:6.0f} '.format(totalTime) + 1438 | 'Process({0:4.0f}+{1:<3.0f}/{2:6.0f}) {3:6.1f} '.format( 1439 | nMeasurements, nAisUpdates, nNodes, tocMS['Process']) + 1440 | 'Cluster({0:2.0f}) {1:5.1f} '.format(nClusters, tocMS['Cluster']) + 1441 | 'Optim({0:g}) {1:6.1f} '.format(self.nOptimSolved, tocMS['Optim']) + 1442 | # 'ILP-Prune {:5.0f}'.format(self.toc['ILP-Prune']) + " " + 1443 | 'DynN {:4.1f} '.format(tocMS['DynN']) + 1444 | 'N-Prune {:5.1f} '.format(tocMS['N-Prune']) + 1445 | 'Kill {:3.1f} '.format(tocMS['Terminate']) + 1446 | 'Init {:5.1f}'.format(tocMS['Init'])) 1447 | return timeLogString 1448 | 1449 | def printTimeLog(self, **kwargs): 1450 | tooLongWarning = self.toc['Total'] > self.radarPeriod * 0.6 1451 | tooLongCritical = self.toc['Total'] > self.radarPeriod 1452 | on_color = 'on_green' 1453 | on_color = 'on_yellow' if tooLongWarning else on_color 1454 | on_color = 'on_red' if tooLongCritical else on_color 1455 | on_color = kwargs.get('on_color', on_color) 1456 | attrs = ['dark'] 1457 | attrs = attrs.append('bold') if tooLongWarning else attrs 1458 | cprint(self.getTimeLogString(), 1459 | on_color=on_color, 1460 | attrs=attrs 1461 | ) 1462 | 1463 | def printTimeLogHeader(self): 1464 | print(self.getTimeLogHeader()) 1465 | 1466 | def printClusterList(clusterList): 1467 | print("Clusters:") 1468 | for clusterIndex, cluster in enumerate(clusterList): 1469 | print("Cluster ", clusterIndex, " contains target(s):\t", cluster, 1470 | sep="", end="\n") 1471 | 1472 | def getScenarioElement(self, **kwargs): 1473 | return ET.Element(scenarioTag) 1474 | 1475 | def _storeTrackerArgs(self, scenarioElement, **kwargs): 1476 | for k, v in kwargs.items(): 1477 | scenarioElement.attrib[str(k)] = str(v) 1478 | 1479 | trackerSettingElement = ET.SubElement(scenarioElement, trackerSettingsTag) 1480 | ET.SubElement(trackerSettingElement, 'M_required').text = str(self.M_required) 1481 | ET.SubElement(trackerSettingElement, 'N_checks').text = str(self.N_checks) 1482 | ET.SubElement(trackerSettingElement, 'mergeThreshold').text = str( 1483 | self.mergeThreshold) 1484 | ET.SubElement(trackerSettingElement, 'ownPosition').text = str(self.position) 1485 | ET.SubElement(trackerSettingElement, 'radarRange').text = str(self.radarRange) 1486 | ET.SubElement(trackerSettingElement, 'radarPeriod').text = str(self.radarPeriod) 1487 | ET.SubElement(trackerSettingElement, 'lambdaPhi').text = str(self.lambda_phi) 1488 | ET.SubElement(trackerSettingElement, 'lambdaNu').text = str(self.lambda_nu) 1489 | ET.SubElement(trackerSettingElement, 'lambdaEx').text = str(self.lambda_ex) 1490 | ET.SubElement(trackerSettingElement, 'eta2').text = str(self.eta2) 1491 | ET.SubElement(trackerSettingElement, 'N_max').text = str(self.N_max) 1492 | ET.SubElement(trackerSettingElement, 'NLLR_upperLimit').text = str( 1493 | self.scoreUpperLimit) 1494 | ET.SubElement(trackerSettingElement, 'pruneThreshold').text = str( 1495 | self.pruneThreshold) 1496 | ET.SubElement(trackerSettingElement, 'targetSizeLimit').text = str( 1497 | self.targetSizeLimit) 1498 | ET.SubElement(trackerSettingElement, 'maxSpeedMS').text = str(self.maxSpeedMS) 1499 | 1500 | def _storeRun(self, scenarioElement, preInitialized=True, **kwargs): 1501 | runElement = ET.SubElement(scenarioElement, runTag) 1502 | 1503 | if iterationTag in kwargs: 1504 | iteration = kwargs.get(iterationTag) 1505 | else: 1506 | iteration = len(scenarioElement.findall(runTag)) 1507 | runElement.attrib[iterationTag] = str(iteration) 1508 | 1509 | if seedTag in kwargs: 1510 | runElement.attrib[seedTag] = str(kwargs.get(seedTag)) 1511 | 1512 | runtimeElement = ET.SubElement(runElement, 1513 | runtimeTag, 1514 | attrib={descriptionTag: "Per iteration", 1515 | precisionTag: str(timeLogPrecision)}) 1516 | for k, v in self.runtimeLog.items(): 1517 | if not v: 1518 | continue 1519 | array = np.array(v) 1520 | mean = np.mean(array) 1521 | min = np.min(array) 1522 | max = np.max(array) 1523 | meanString = str(round(mean, timeLogPrecision)) 1524 | minString = str(round(min, timeLogPrecision)) 1525 | maxString = str(round(max, timeLogPrecision)) 1526 | ET.SubElement(runtimeElement, 1527 | str(k), 1528 | attrib={meanTag: meanString, 1529 | minTag: minString, 1530 | maxTag: maxString} 1531 | ).text = np.array_str(array, 1532 | precision=timeLogPrecision, 1533 | max_line_width=999999) 1534 | 1535 | for target in self.__trackNodes__: 1536 | if preInitialized: 1537 | target._storeNode(runElement, self.radarPeriod) 1538 | else: 1539 | target._storeNodeSparse(runElement) 1540 | 1541 | for target in self.__terminatedTargets__: 1542 | if preInitialized: 1543 | target._storeNode(runElement, self.radarPeriod, terminated=True) 1544 | else: 1545 | target._storeNodeSparse(runElement, terminated=True) 1546 | 1547 | if __name__ == '__main__': 1548 | pass 1549 | -------------------------------------------------------------------------------- /pymht/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/erikliland/pyMHT/58b2cd13a3fb38563a302c0f8380de3a5b0fccb1/pymht/utils/__init__.py -------------------------------------------------------------------------------- /pymht/utils/cFunctions.pyx: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | cimport numpy as np 3 | from libcpp cimport bool 4 | 5 | cpdef int binomial(int n, int k): 6 | return 1 if k == 0 else (0 if n == 0 else binomial(n - 1, k) + binomial(n - 1, k - 1)) 7 | 8 | cpdef double nllrNoMeasurement(double P_d): 9 | if P_d == 1: 10 | return -np.log(1e-6) 11 | return -np.log(1 - P_d) 12 | 13 | cpdef double nllr(double P_d, 14 | np.ndarray[np.double_t, ndim=1] measurementResidual, 15 | double lambda_ex, 16 | np.ndarray[np.double_t, ndim=2] covariance, 17 | np.ndarray[np.double_t, ndim=2] invCovariance): 18 | return (0.5 * (measurementResidual.T.dot(invCovariance).dot(measurementResidual)) 19 | + np.log((lambda_ex * np.sqrt(2 * np.pi *np.linalg.det(covariance))) / P_d)) 20 | 21 | cpdef np.ndarray[np.double_t, ndim=2] stackNodeMatrix(np.ndarray[np.uint8_t, ndim=1] indecies, 22 | np.ndarray[np.double_t, ndim=2] measurementsResidual, 23 | np.ndarray[np.double_t, ndim=2] measurements, 24 | np.ndarray[np.double_t, ndim=1] cNLLR): 25 | cdef array = np.zeros((indecies.shape[0],6)) 26 | array[:,0] = indecies 27 | array[:,1:3] = measurementsResidual 28 | array[:,3:5] = measurements 29 | array[:,5] = cNLLR 30 | return array 31 | 32 | cpdef np.ndarray[np.double_t,ndim=2] cNewMeasurement(np.ndarray[np.double_t,ndim=2] measurements, 33 | np.ndarray[np.double_t,ndim=1] z_hat, 34 | np.ndarray[np.double_t,ndim=2] S, 35 | np.ndarray[np.double_t,ndim=2] S_inv, 36 | double eta2, 37 | double P_d, 38 | double lambda_ex): 39 | cdef np.ndarray[np.double_t,ndim=2] measurementsResidual = measurements - z_hat 40 | cdef np.ndarray[np.double_t,ndim=1] nis = np.sum(measurementsResidual.dot(S_inv) * measurementsResidual, axis=1) 41 | cdef np.ndarray[np.int64_t,ndim=1, cast = True] gatedFilter = np.less_equal(nis,np.ones(measurements.shape[0])*eta2) 42 | cdef np.ndarray[np.int_t,ndim=1] gatedIndecies = np.where(gatedFilter)[0] 43 | cdef int nMeasurementInsideGate = gatedIndecies.shape[0] 44 | cdef np.ndarray[np.double_t,ndim=2] gatedMeasurements = measurements[gatedIndecies] 45 | cdef np.ndarray[np.double_t,ndim=2] gatedMeasurementResidual = np.array(measurementsResidual[gatedIndecies, :], ndmin=2) 46 | cdef np.ndarray cNLLR = np.zeros(nMeasurementInsideGate,dtype = np.double) 47 | cdef int i = 0 48 | for i in range(nMeasurementInsideGate): 49 | cNLLR[i] = nllr(P_d, gatedMeasurementResidual[i,:], lambda_ex, S, S_inv) 50 | if nMeasurementInsideGate > 0: 51 | print(type(gatedIndecies)) 52 | return stackNodeMatrix(gatedIndecies,gatedMeasurementResidual, gatedMeasurements, cNLLR) 53 | else: 54 | return np.array([],ndmin=2) -------------------------------------------------------------------------------- /pymht/utils/ckalman.pyx: -------------------------------------------------------------------------------- 1 | # cython: profile=True 2 | """ 3 | A module with operations useful for Kalman filtering. 4 | """ 5 | import numpy as np 6 | 7 | 8 | class KalmanFilter(): 9 | """ 10 | A Kalman filter class, does filtering for systems of the type: 11 | x_{k+1} = A*x_k + v_k 12 | y_k = C*z_k + e_k 13 | v_k ~ N(0,Q) 14 | e_k ~ N(0,R) 15 | x_0 - Initial state 16 | P_0 - Initial state covariance 17 | """ 18 | 19 | def __init__(self, A, C, **kwargs): 20 | Q = kwargs.get('Q') 21 | R = kwargs.get('R') 22 | x_0 = kwargs.get('x_0') 23 | P_0 = kwargs.get('P_0') 24 | T = kwargs.get('T') 25 | Gamma = kwargs.get( 26 | 'Gamma', 27 | np.eye(Q(1).shape[0]) if callable(Q) else np.eye(Q.shape[0])) 28 | 29 | self.A = A # Transition matrix 30 | self.C = C # Observation matrix 31 | self.Q = Q # Process noise covariance 32 | self.R = R # Measurement noise covariance 33 | self.Gamma = Gamma # Process noise "observability" 34 | self.x_hat = x_0 # Filtered state 35 | self.P_hat = P_0 # Filtered state covariance 36 | self.x_bar = None # Prediced state 37 | self.P_bar = None # Predictes state covariance 38 | self.T = T # Sampling period (dynamic if None) 39 | self.z_hat = None # Predicted measurement 40 | self.S = None # Residual covariance 41 | self.S_inv = None # Inverse residual covariance 42 | self.K = None # Kalman gain 43 | self.precalculated = False 44 | 45 | def predict(self, **kwargs): 46 | """ 47 | Calculate next state estimate without actually updating 48 | the internal variables 49 | """ 50 | T = kwargs.get('T', self.T) 51 | A = self.A(T) if callable(self.A) else self.A 52 | Q = self.Q(T) if callable(self.Q) else self.Q 53 | x_bar = A.dot(self.x_hat) 54 | P_bar = A.dot(self.P_hat).dot(A.T) + self.Gamma.dot(Q.dot(self.Gamma.T)) 55 | if not kwargs.get('local', False): 56 | self.x_bar = x_bar 57 | self.P_bar = P_bar 58 | return x_bar, P_bar 59 | 60 | def _precalculateMeasurementUpdate(self, T): 61 | self.z_hat = self.C.dot(self.x_bar) 62 | self.S = self.C.dot(self.P_bar).dot(self.C.T) + self.R 63 | self.S_inv = np.linalg.inv(self.S) 64 | self.K = self.P_bar.dot(self.C.T).dot(self.S_inv) 65 | self.precalculated = True 66 | 67 | def filter(self, **kwargs): 68 | """ 69 | Filter state with measurement without updating the internal variables 70 | """ 71 | if not self.precalculated: 72 | self._precalculateMeasurementUpdate(kwargs.get('T', self.T)) 73 | self.precalculated = False 74 | 75 | if 'y_tilde' in kwargs: 76 | y_tilde = kwargs.get('y_tilde') 77 | elif 'y' in kwargs: 78 | z = kwargs.get('y') 79 | y_tilde = z - self.z_hat 80 | else: 81 | x_hat = self.x_bar 82 | P_hat = self.P_bar 83 | if not kwargs.get('local', False): 84 | self.x_hat = x_hat 85 | self.P_hat = P_hat 86 | return x_hat, P_hat 87 | 88 | x_hat = self.x_bar + self.K.dot(y_tilde) 89 | P_hat = self.P_bar - self.K.dot(self.C).dot(self.P_bar) 90 | if not kwargs.get('local', False): 91 | self.x_hat = x_hat 92 | self.P_hat = P_hat 93 | return x_hat, P_hat 94 | -------------------------------------------------------------------------------- /pymht/utils/classDefinitions.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import datetime 4 | import matplotlib.pyplot as plt 5 | import logging 6 | import copy 7 | import collections 8 | import xml.etree.ElementTree as ET 9 | import matplotlib.pyplot as plt 10 | from . import helpFunctions as hpf 11 | from .xmlDefinitions import * 12 | from ..models import pv, polar, ais 13 | log = logging.getLogger(__name__) 14 | 15 | 16 | class SimTarget: 17 | 18 | def __init__(self, state, time, P_d, sigma_Q, **kwargs): 19 | self.state = np.array(state, dtype=np.double) 20 | assert self.state.ndim == 1 21 | self.time = time 22 | self.P_d = P_d 23 | self.sigma_Q = sigma_Q 24 | self.mmsi = kwargs.get('mmsi') 25 | self.aisClass = kwargs.get('aisClass', 'B') 26 | self.timeOfLastAisMessage = kwargs.get('timeOfLastAisMessage', -float('inf')) 27 | self.P_r = kwargs.get('P_r', 1.) 28 | self.model = None 29 | 30 | def __eq__(self, other): 31 | if not np.array_equal(self.state, other.state): 32 | return False 33 | if self.time != other.time: 34 | return False 35 | if self.P_d != other.P_d: 36 | return False 37 | if self.mmsi != other.mmsi: 38 | return False 39 | if self.sigma_Q != other.sigma_Q: 40 | return False 41 | if self.P_r != other.P_r: 42 | return False 43 | return True 44 | 45 | def inRange(self, p0, rRange): 46 | distance = np.linalg.norm(self.state[0:2] - p0) 47 | return distance <= rRange 48 | 49 | def storeString(self): 50 | return ',{0:.2f},{1:.2f}'.format(*self.state[0:2]) 51 | 52 | def getXmlStateStringsCartesian(self, precision=2): 53 | raise NotImplementedError 54 | 55 | def getXmlStateStringsPolar(self, precision=2): 56 | raise NotImplementedError 57 | 58 | def position(self): 59 | return Position(self.state[0], self.state[1]) 60 | 61 | def velocity(self): 62 | raise NotImplementedError 63 | 64 | def speedMS(self): 65 | raise NotImplementedError 66 | 67 | def speedKnots(self): 68 | return self.speedMS() * 1.94384449 69 | 70 | def cartesianState(self): 71 | raise NotImplementedError 72 | 73 | def cartesianVelocity(self): 74 | raise NotImplementedError 75 | 76 | def polarState(self): 77 | raise NotImplementedError 78 | 79 | def calculateNextState(self, timeStep): 80 | raise NotImplementedError 81 | 82 | def positionWithNoise(self): 83 | raise NotImplementedError 84 | 85 | 86 | class SimTargetCartesian(SimTarget): 87 | 88 | def __init__(self, state, time, P_d, sigma_Q, **kwargs): 89 | SimTarget.__init__(self, state, time, P_d, sigma_Q, **kwargs) 90 | self.model = pv 91 | 92 | def __str__(self): 93 | timeString = datetime.datetime.fromtimestamp(self.time).strftime("%H:%M:%S.%f") 94 | mmsiString = 'MMSI: ' + str(self.mmsi) if self.mmsi is not None else "" 95 | return ('Time: ' + timeString + " " + 96 | 'Pos: ({0: 7.1f},{1: 7.1f})'.format(self.state[0], self.state[1]) + " " + 97 | 'Vel: ({0: 5.1f},{1: 5.1f})'.format(self.state[2], self.state[3]) + " " + 98 | 'Speed: {0:4.1f}m/s ({1:4.1f}knt)'.format(self.speedMS(), self.speedKnots()) + " " + 99 | 'Pd: {:3.0f}%'.format(self.P_d * 100.) + " " + 100 | mmsiString) 101 | 102 | def __copy__(self): 103 | return SimTargetCartesian(self.state, self.time, self.P_d, self.sigma_Q, 104 | mmsi=self.mmsi, aisClass=self.aisClass, timeOfLastAisMessage=self.timeOfLastAisMessage, 105 | P_r=self.P_r) 106 | 107 | __repr__ = __str__ 108 | 109 | def getXmlStateStringsCartesian(self, precision=2): 110 | return (str(round(self.state[0], precision)), 111 | str(round(self.state[1], precision)), 112 | str(round(self.state[2], precision)), 113 | str(round(self.state[3], precision))) 114 | 115 | def position(self): 116 | return Position(self.state[0], self.state[1]) 117 | 118 | def velocity(self): 119 | return Velocity(self.state[2], self.state[3]) 120 | 121 | def speedMS(self): 122 | speed_ms = np.linalg.norm(self.state[2:4]) 123 | return speed_ms 124 | 125 | def cartesianState(self): 126 | return self.state 127 | 128 | def cartesianVelocity(self): 129 | return self.state[2:4] 130 | 131 | def calculateNextState(self, timeStep): 132 | Phi = self.model.Phi(timeStep) 133 | Q = self.model.Q(timeStep, self.sigma_Q) 134 | w = np.random.multivariate_normal(np.zeros(4), Q) 135 | nextState = Phi.dot(self.state) + w.T 136 | newVar = {'state': nextState, 'time': self.time + timeStep} 137 | return SimTargetCartesian(**{**self.__dict__, **newVar}) 138 | 139 | def positionWithNoise(self, **kwargs): 140 | # def positionWithNoise(state, H, R): 141 | sigma_R_scale = kwargs.get('sigma_R_scale', 1) 142 | R = self.model.R_RADAR(self.model.sigmaR_RADAR_true * sigma_R_scale) 143 | H = self.model.C_RADAR 144 | assert R.ndim == 2 145 | assert R.shape[0] == R.shape[1] 146 | assert H.shape[1] == self.state.shape[0] 147 | v = np.random.multivariate_normal(np.zeros(R.shape[0]), R) 148 | assert H.shape[0] == v.shape[0], str(self.state.shape) + str(v.shape) 149 | assert v.ndim == 1 150 | return H.dot(self.state) + v 151 | 152 | 153 | class SimTargetPolar(SimTarget): 154 | 155 | def __init__(self, state, time, P_d, sigma_Q, **kwargs): 156 | SimTarget.__init__(self, state, time, P_d, sigma_Q, **kwargs) 157 | self.model = polar 158 | self.headingChangeMean = kwargs.get('headingChangeMean') 159 | #state = east, north, heading (deg), speed 160 | 161 | def __str__(self): 162 | timeString = datetime.datetime.fromtimestamp(self.time).strftime("%H:%M:%S.%f") 163 | mmsiString = 'MMSI: ' + str(self.mmsi) if self.mmsi is not None else "" 164 | return ('Time: ' + timeString + " " + 165 | 'Pos: ({0: 7.1f},{1: 7.1f})'.format(self.state[0], self.state[1]) + " " + 166 | 'Hdg: {0:5.1f}{1:+3.1f} deg'.format(self.state[2], self.headingChangeMean) + " " + 167 | 'Speed: {0:4.1f}m/s ({1:4.1f}knt)'.format(self.speedMS(), self.speedKnots()) + " " + 168 | 'Pd: {:3.0f}%'.format(self.P_d * 100.) + " " + 169 | mmsiString) 170 | 171 | def __copy__(self): 172 | return SimTargetPolar(self.state, self.time, self.P_d, self.sigma_Q, 173 | mmsi=self.mmsi, aisClass=self.aisClass, timeOfLastAisMessage=self.timeOfLastAisMessage, 174 | P_r=self.P_r, headingChangeMean=self.headingChangeMean) 175 | 176 | __repr__ = __str__ 177 | 178 | def getXmlStateStringsCartesian(self, precision=2): 179 | cartesianState = self.cartesianState() 180 | return (str(round(cartesianState[0], precision)), 181 | str(round(cartesianState[1], precision)), 182 | str(round(cartesianState[2], precision)), 183 | str(round(cartesianState[3], precision))) 184 | 185 | def getXmlStateStringsPolar(self, precision=2): 186 | return (str(round(self.state[0], precision)), 187 | str(round(self.state[1], precision)), 188 | str(round(self.state[2], precision)), 189 | str(round(self.state[3], precision))) 190 | 191 | @staticmethod 192 | def normalizeHeadingDeg(heading): 193 | return (heading + 360.) % 360. 194 | 195 | def cartesianState(self): 196 | pos = self.state[0:2] 197 | vel = self.cartesianVelocity() 198 | return np.hstack((pos, vel)) 199 | 200 | def cartesianVelocity(self): 201 | compassHeadingDeg = self.state[2] 202 | theta = math.radians(self.normalizeHeadingDeg(90. - compassHeadingDeg)) 203 | vx = self.state[3] * math.cos(theta) 204 | vy = self.state[3] * math.sin(theta) 205 | return np.array([vx, vy], dtype=np.float32) 206 | 207 | def position(self): 208 | return Position(self.state[0], self.state[1]) 209 | 210 | def velocity(self): 211 | return Velocity(self.cartesianVelocity()) 212 | 213 | def speedMS(self): 214 | return self.state[3] 215 | 216 | def calculateNextState(self, timeStep): 217 | cartesianSpeedVector = self.cartesianVelocity() 218 | stateDelta = timeStep * cartesianSpeedVector 219 | headingDelta = timeStep * np.random.normal(self.headingChangeMean, self.model.sigma_hdg) 220 | speedDelta = timeStep * np.random.normal(0, self.model.sigma_speed) 221 | nextState = np.copy(self.state) 222 | nextState[0:2] += stateDelta 223 | nextState[2] = self.normalizeHeadingDeg(nextState[2] + headingDelta) 224 | nextState[3] = max(0, nextState[3] + speedDelta) 225 | newVar = {'state': nextState, 'time': self.time + timeStep} 226 | return SimTargetPolar(**{**self.__dict__, **newVar}) 227 | 228 | def positionWithNoise(self, **kwargs): 229 | sigma_R_scale = kwargs.get('sigma_R_scale', 1) 230 | R = self.model.R_RADAR(self.model.sigmaR_RADAR_true * sigma_R_scale) 231 | H = self.model.C_RADAR 232 | assert R.ndim == 2 233 | assert R.shape[0] == R.shape[1] 234 | assert H.shape[1] == self.state.shape[0] 235 | v = np.random.multivariate_normal(np.zeros(R.shape[0]), R) 236 | assert H.shape[0] == v.shape[0], str(self.state.shape) + str(v.shape) 237 | assert v.ndim == 1 238 | return H.dot(self.state) + v 239 | 240 | 241 | class Position: 242 | 243 | def __init__(self, *args, **kwargs): 244 | x = kwargs.get('x') 245 | y = kwargs.get('y') 246 | if (x is not None) and (y is not None): 247 | self.array = np.array([x, y]) 248 | elif len(args) == 1: 249 | self.array = np.array(args[0]) 250 | elif len(args) == 2: 251 | self.array = np.array([args[0], args[1]]) 252 | else: 253 | raise ValueError("Invalid arguments to Position") 254 | 255 | def __str__(self): 256 | return 'Pos: ({0: 8.2f},{1: 8.2f})'.format(self.array[0], self.array[1]) 257 | 258 | def __repr__(self): 259 | return '({0:.3e},{1:.3e})'.format(self.array[0], self.array[1]) 260 | 261 | def __add__(self, other): 262 | return Position(self.array + other.position) 263 | 264 | def __sub__(self, other): 265 | return Position(self.array - other.position) 266 | 267 | def __mul__(self, other): 268 | return Position(self.array * other.position) 269 | 270 | def __div__(self, other): 271 | return Position(self.array / other.position) 272 | 273 | def x(self): 274 | return self.array[0] 275 | 276 | def y(self): 277 | return self.array[1] 278 | 279 | def plot(self, ax=plt.gca(), measurementNumber=-1, scanNumber=None, mmsi=None, **kwargs): 280 | if mmsi is not None: 281 | marker = 'h' if kwargs.get('original', False) else 'D' 282 | ax.plot(self.array[0], self.array[1], 283 | marker=marker, markerfacecolor='None', 284 | markeredgewidth=kwargs.get('markeredgewidth', 1), 285 | markeredgecolor=kwargs.get('color', 'black')) 286 | elif measurementNumber > 0: 287 | ax.plot(self.array[0], self.array[1], 'kx', 288 | markeredgecolor=kwargs.get('color', 'black')) 289 | elif measurementNumber == 0: 290 | ax.plot(self.array[0], self.array[1], fillstyle="none", marker="o", 291 | markeredgecolor=kwargs.get('color', 'black')) 292 | else: 293 | raise ValueError("Not a valid measurement number") 294 | 295 | if ((scanNumber is not None) and 296 | (measurementNumber is not None) and 297 | kwargs.get("labels", False)): 298 | ax.text(self.array[0], self.array[1], str( 299 | scanNumber) + ":" + str(measurementNumber), size=7, ha="left", va="top") 300 | 301 | 302 | class Velocity: 303 | 304 | def __init__(self, *args, **kwargs): 305 | x = kwargs.get('x') 306 | y = kwargs.get('y') 307 | if (x is not None) and (y is not None): 308 | self.velocity[0] = np.array([x, y]) 309 | elif len(args) == 1: 310 | self.velocity = np.array(args[0]) 311 | elif len(args) == 2: 312 | self.velocity = np.array(args[0], args[1]) 313 | else: 314 | raise ValueError("Invalid arguments to Velocity") 315 | 316 | def __str__(self): 317 | return 'Vel: ({: 6.2f},{: 6.2f})'.format(self.velocity[0], self.velocity[1]) 318 | 319 | def __repr__(self): 320 | return '({:.3e},{:.3e})'.format(self.velocity[0], self.velocity[1]) 321 | 322 | def __add__(self, other): 323 | return Velocity(self.velocity + other.velocity) 324 | 325 | def __sub__(self, other): 326 | return Velocity(self.velocity - other.velocity) 327 | 328 | def __mul__(self, other): 329 | return Velocity(self.velocity * other.velocity) 330 | 331 | def __div__(self, other): 332 | return Velocity(self.velocity / other.velocity) 333 | 334 | def x(self): 335 | return self.velocity[0] 336 | 337 | def y(self): 338 | return self.velocity[1] 339 | 340 | 341 | class SimList(list): 342 | 343 | def __init__(self, *args): 344 | list.__init__(self, *args) 345 | 346 | def storeGroundTruth(self, scenarioElement, scenario, **kwargs): 347 | if self is None: 348 | return 349 | nSamples = len(self) 350 | nTargets = len(self[0]) 351 | p0 = scenario.p0 352 | radarRange = scenario.radarRange 353 | radarPeriod = scenario.radarPeriod 354 | initialTime = scenario.initTime 355 | groundtruthElement = ET.SubElement(scenarioElement, groundtruthTag) 356 | for i in range(nTargets): 357 | trackElement = ET.SubElement(groundtruthElement, 358 | trackTag, 359 | attrib={idTag: str(i)}) 360 | statesElement = ET.SubElement(trackElement, 361 | statesTag) 362 | sampleCounter = 0 363 | for j in range(nSamples): 364 | simTarget = self[j][i] 365 | inRange = simTarget.inRange(p0, radarRange) 366 | radarTime = ((simTarget.time - initialTime) % radarPeriod) == 0. 367 | if (not inRange) or (not radarTime): 368 | continue 369 | sampleCounter += 1 370 | stateElement = ET.SubElement(statesElement, 371 | stateTag, 372 | attrib={timeTag: str(simTarget.time), 373 | pdTag: str(simTarget.P_d)}) 374 | eastPos, northPos, eastVel, northVel = simTarget.getXmlStateStringsCartesian() 375 | positionElement = ET.SubElement(stateElement, positionTag) 376 | ET.SubElement(positionElement, northTag).text = northPos 377 | ET.SubElement(positionElement, eastTag).text = eastPos 378 | velocityElement = ET.SubElement(stateElement, velocityTag) 379 | ET.SubElement(velocityElement, northTag).text = northVel 380 | ET.SubElement(velocityElement, eastTag).text = eastVel 381 | if simTarget.mmsi is not None: 382 | trackElement.attrib[mmsiTag] = str(simTarget.mmsi) 383 | trackElement.attrib[aisclassTag] = str(simTarget.aisClass) 384 | trackElement.attrib[prTag] = str(simTarget.P_r) 385 | statesElement.attrib[sigmaqTag] = str(simTarget.sigma_Q) 386 | trackElement.attrib[lengthTag] = str(sampleCounter) 387 | 388 | def plot(self, ax=plt.gca(), **kwargs): 389 | colors = kwargs.get("colors") 390 | newArgs = copy.copy(kwargs) 391 | if "colors" in newArgs: 392 | del newArgs["colors"] 393 | 394 | nScan = len(self) 395 | nTargets = len(self[0]) 396 | stateArray = np.zeros((nScan, nTargets, 4)) 397 | for row, targetList in enumerate(self): 398 | stateArray[row, :, :] = np.array([target.cartesianState() for target in targetList]) 399 | for col in range(nTargets): 400 | ax.plot(stateArray[:, col, 0], 401 | stateArray[:, col, 1], 402 | '.', 403 | alpha=kwargs.get('alpha', 0.7), 404 | markeredgewidth=kwargs.get('markeredgewidth', 0.5), 405 | color=next(colors) if colors is not None else None, 406 | markevery=kwargs.get('markevery', 1)) 407 | 408 | for col, target in enumerate(self[0]): 409 | if kwargs.get('markStart', True): 410 | ax.plot(stateArray[0, col, 0], stateArray[0, col, 1], '.', color='black') 411 | if kwargs.get('label', False): 412 | velocity = target.cartesianVelocity() 413 | normVelocity = (velocity / 414 | np.linalg.norm(velocity)) 415 | offsetScale = kwargs.get('offset', 0.0) 416 | offset = offsetScale * np.array(normVelocity) 417 | position = stateArray[0, col, 0:2] - offset 418 | (horizontalalignment, 419 | verticalalignment) = hpf._getBestTextPosition(normVelocity) 420 | ax.text(position[0], 421 | position[1], 422 | "T" + str(col), 423 | fontsize=kwargs.get('fontsize', 10), 424 | horizontalalignment=horizontalalignment, 425 | verticalalignment=verticalalignment) 426 | 427 | 428 | class AIS_message: 429 | 430 | def __init__(self, time, state, mmsi, highAccuracy=False): 431 | self.time = time 432 | self.state = state 433 | self.mmsi = mmsi 434 | self.highAccuracy = highAccuracy 435 | 436 | def __str__(self): 437 | timeString = self.getTimeString() 438 | mmsiString = 'MMSI: ' + str(self.mmsi) if self.mmsi is not None else "" 439 | return ('Time: ' + timeString + " " + 440 | 'State:' + np.array2string(self.state, formatter={'float_kind': lambda x: '{: 7.1f}'.format(x)}) + " " + 441 | 'High accuracy: {:1} '.format(self.highAccuracy) + 442 | mmsiString) 443 | 444 | def __eq__(self, other): 445 | if self.time != other.time: 446 | return False 447 | if not np.array_equal(self.state, other.state): 448 | return False 449 | if not np.array_equal(self.covariance, other.covariance): 450 | return False 451 | if self.mmsi != other.mmsi: 452 | return False 453 | return True 454 | 455 | __repr__ = __str__ 456 | 457 | def getTimeString(self): 458 | if self.time == int(self.time): 459 | timeFormat = "%H:%M:%S" 460 | else: 461 | timeFormat = "%H:%M:%S.%f" 462 | timeString = datetime.datetime.fromtimestamp(self.time).strftime(timeFormat) 463 | if self.time < 1e6: 464 | timeString = str(self.time) 465 | return timeString 466 | 467 | def plot(self, ax=plt.gca(), **kwargs): 468 | Position(self.state[0:2]).plot(ax, mmsi=self.mmsi, original=True, **kwargs) 469 | 470 | def predict(self, dT): 471 | Phi = ais.Phi(dT) 472 | Q = pv.Q(dT) 473 | state = Phi.dot(self.state) 474 | covariance = Phi.dot(pv.P0).dot(Phi.T) + Q 475 | return state, covariance 476 | 477 | 478 | class AIS_prediction: 479 | 480 | def __init__(self, state, covariance, mmsi): 481 | assert state.shape[0] == covariance.shape[0] == covariance.shape[1] 482 | assert type(mmsi) is int 483 | self.state = state 484 | self.covariance = covariance 485 | self.mmsi = mmsi 486 | 487 | def __str__(self): 488 | mmsiString = 'MMSI: ' + str(self.mmsi) if self.mmsi is not None else "" 489 | stateString = np.array_str(self.state, precision=1) 490 | covarianceString = 'Covariance diagonal: ' + np.array_str(np.diagonal(self.covariance), 491 | precision=1, suppress_small=True) 492 | return (stateString + " " + covarianceString + " " + mmsiString) 493 | 494 | __repr__ = __str__ 495 | 496 | 497 | class AisMessagesList: 498 | 499 | def __init__(self, *args): 500 | self._list = list(*args) 501 | self._lastExtractedTime = None 502 | self._iterator = None 503 | self._nextAisMeasurements = None 504 | 505 | def __getitem__(self, item): 506 | return self._list.__getitem__(item) 507 | 508 | def __iter__(self): 509 | return self._list.__iter__() 510 | 511 | def append(self, *args): 512 | self._list.append(*args) 513 | 514 | def pop(self, *args): 515 | self._list.pop(*args) 516 | 517 | def print(self): 518 | print("aisMeasurements:") 519 | for aisTimeList in self._list: 520 | print(*aisTimeList, sep="\n", end="\n\n") 521 | 522 | def getMeasurements(self, scanTime): 523 | if self._iterator is None: 524 | self._iterator = (m for m in self._list) 525 | self._nextAisMeasurements = next(self._iterator, None) 526 | 527 | if self._nextAisMeasurements is not None: 528 | if all((m.time <= scanTime) for m in self._nextAisMeasurements): 529 | self._lastExtractedTime = scanTime 530 | res = AisMessageList(copy.copy(self._nextAisMeasurements)) 531 | self._nextAisMeasurements = next(self._iterator, None) 532 | return res 533 | return AisMessageList() 534 | 535 | def predictAisMeasurements(self, scanTime, aisMeasurements): 536 | import pymht.models.pv as model 537 | import pymht.utils.kalman as kalman 538 | assert len(aisMeasurements) > 0 539 | aisPredictions = AisMessageList(scanTime) 540 | scanTimeString = datetime.datetime.fromtimestamp(scanTime).strftime("%H:%M:%S.%f") 541 | for measurement in aisMeasurements: 542 | aisTimeString = datetime.datetime.fromtimestamp(measurement.time).strftime("%H:%M:%S.%f") 543 | log.debug("Predicting AIS (" + str(measurement.mmsi) + ") from " + aisTimeString + " to " + scanTimeString) 544 | dT = scanTime - measurement.time 545 | assert dT >= 0 546 | state = measurement.state 547 | A = model.Phi(dT) 548 | Q = model.Q(dT) 549 | x_bar, P_bar = kalman.predict(A, Q, np.array(state, ndmin=2), 550 | np.array(measurement.covariance, ndmin=3)) 551 | aisPredictions.measurements.append( 552 | AIS_prediction(model.C_RADAR.dot(x_bar[0]), 553 | model.C_RADAR.dot(P_bar[0]).dot(model.C_RADAR.T), measurement.mmsi)) 554 | log.debug(np.array_str(state) + "=>" + np.array_str(x_bar[0])) 555 | aisPredictions.aisMessages.append(measurement) 556 | assert len(aisPredictions.measurements) == len(aisMeasurements) 557 | return aisPredictions 558 | 559 | 560 | class MeasurementList: 561 | 562 | def __init__(self, time, measurements=None): 563 | self.time = time 564 | self.measurements = measurements if measurements is not None else [] 565 | 566 | def __str__(self): 567 | np.set_printoptions(precision=1, suppress=True) 568 | timeString = datetime.datetime.fromtimestamp(self.time).strftime("%H:%M:%S.%f") 569 | return ("Time: " + timeString + 570 | "\tMeasurements:\t" + ", ".join( 571 | [str(measurement) for measurement in self.measurements])) 572 | 573 | def __eq__(self, other): 574 | if self.time != other.time: 575 | return False 576 | if not np.array_equal(self.measurements, other.measurements): 577 | return False 578 | return True 579 | 580 | __repr__ = __str__ 581 | 582 | def plot(self, ax=plt.gca(), **kwargs): 583 | for measurementIndex, measurement in enumerate(self.measurements): 584 | Position(measurement).plot(ax, measurementIndex + 1, **kwargs) 585 | 586 | def filterUnused(self, unused_measurement_indices): 587 | measurements = self.measurements[np.where(unused_measurement_indices)] 588 | return MeasurementList(self.time, measurements) 589 | 590 | def getTimeString(self, timeFormat="%H:%M:%S"): 591 | return datetime.datetime.fromtimestamp(self.time).strftime(timeFormat) 592 | 593 | def getMeasurements(self): 594 | return self.measurements 595 | 596 | 597 | class AisMessageList(list): 598 | 599 | def __init__(self, *args): 600 | list.__init__(self, *args) 601 | assert all([type(m) is AIS_message for m in self]) 602 | mmsiList = [m.mmsi for m in self] 603 | deleteIndices = [] 604 | duplicateMMSI = [item for item, count in collections.Counter(mmsiList).items() if count > 1] 605 | for mmsi in duplicateMMSI: 606 | indices = [(i, m.time) for i, m in enumerate(self) if m.mmsi == mmsi] 607 | indices.sort(key=lambda tup: tup[1]) 608 | assert len(indices) > 1 609 | for tup in indices[:-1]: 610 | deleteIndices.append(tup[0]) 611 | deleteIndices.sort(reverse=True) 612 | for i in deleteIndices: 613 | del self[i] 614 | 615 | mmsiList = [m.mmsi for m in self] 616 | mmsiSet = set(mmsiList) 617 | assert len(mmsiList) == len(mmsiSet) 618 | 619 | def filterUnused(self, usedMmsiSet): 620 | unusedAisMeasurements = [m for m in self 621 | if m.mmsi not in usedMmsiSet] 622 | return unusedAisMeasurements 623 | 624 | def plot(self, ax=plt.gca(), **kwargs): 625 | for measurement in self: 626 | measurement.plot(ax, **kwargs) 627 | 628 | 629 | class ScanList(list): 630 | 631 | def __init__(self, *args): 632 | list.__init__(self, *args) 633 | assert all([type(m) is AIS_message for m in self]) 634 | 635 | def append(self, item): 636 | if not isinstance(item, MeasurementList): 637 | raise TypeError('item is not of type' + str(type(MeasurementList))) 638 | super(ScanList, self).append(item) 639 | 640 | def plot(self, ax=plt.gca(), **kwargs): 641 | for m in self: 642 | m.plot(ax, **kwargs) 643 | 644 | def plotFast(self, ax=plt.gca(), **kwargs): 645 | for measurementList in self: 646 | measurementArray = np.array(measurementList.measurements, ndmin=2) 647 | assert measurementArray.ndim == 2 648 | assert measurementArray.shape[1] == 2 649 | ax.plot(measurementArray[:, 0], measurementArray[:, 1], '.', color='black', **kwargs) 650 | -------------------------------------------------------------------------------- /pymht/utils/cudaTest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from timeit import default_timer as timer 3 | from numba import vectorize 4 | 5 | def pyVectorAdd(a,b,c): 6 | for i in range(a.size): 7 | c[i] = a[i] + b[i] 8 | 9 | def numpyVectorAdd(a,b): 10 | return a+b 11 | 12 | @vectorize(["float32(float32, float32)"], target='cpu') 13 | def cpuVectorAdd(a,b): 14 | return a + b 15 | 16 | 17 | @vectorize(["float32(float32, float32)"], target='parallel') 18 | def parallelVectorAdd(a,b): 19 | return a + b 20 | 21 | 22 | @vectorize(["float32(float32, float32)"], target='cuda') 23 | def cudaVectorAdd(a,b): 24 | return a + b 25 | 26 | 27 | def main(): 28 | N = 3200 29 | 30 | A = np.ones(N,dtype=np.float32) 31 | B = np.zeros(N, dtype=np.float32) 32 | C1 = np.zeros(N,dtype=np.float32) 33 | 34 | start0 = timer() 35 | C0 = numpyVectorAdd(A, B) 36 | time0 = timer() - start0 37 | 38 | start1 = timer() 39 | pyVectorAdd(A,B, C1) 40 | time1 = timer()-start1 41 | 42 | start2 = timer() 43 | C2 = cpuVectorAdd(A,B) 44 | time2 = timer()-start2 45 | 46 | start3 = timer() 47 | C3 = parallelVectorAdd(A,B) 48 | time3 = timer()-start3 49 | 50 | start4 = timer() 51 | C4 = cudaVectorAdd(A,B) 52 | time4 = timer()-start4 53 | 54 | # print("C_RADAR[:5] = " + str(C_RADAR[:5])) 55 | # print("C_RADAR[-5:] = " + str(C_RADAR[-5:])) 56 | 57 | print("VectorAdd took", time0*1000, time1*1000, time2*1000,time3*1000, time4*1000, "ms", sep = "\n") 58 | 59 | 60 | def main2(): 61 | from mpl_toolkits.basemap import Basemap 62 | import numpy as np 63 | import matplotlib.pyplot as plt 64 | # llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon 65 | # are the lat/lon values of the lower left and upper right corners 66 | # of the map. 67 | # resolution = 'i' means use intermediate resolution coastlines. 68 | # lon_0, lat_0 are the central longitude and latitude of the projection. 69 | m = Basemap(llcrnrlon=9.5, llcrnrlat=63.2, 70 | urcrnrlon=10.9, urcrnrlat=64., 71 | resolution='i', projection='tmerc', 72 | lon_0=10.7, lat_0=63.4) 73 | # can get the identical map this way (by specifying width and 74 | # height instead of lat/lon corners) 75 | # m = Basemap(width=894887,height=1116766,\ 76 | # resolution='i',projection='tmerc',lon_0=-4.36,lat_0=54.7) 77 | m.drawcoastlines() 78 | m.fillcontinents(color='coral', lake_color='aqua') 79 | # m.drawparallels(np.arange(-40, 61., 2.)) 80 | # m.drawmeridians(np.arange(-20., 21., 2.)) 81 | m.drawmapboundary(fill_color='aqua') 82 | plt.title("Transverse Mercator Projection") 83 | plt.show() 84 | 85 | if __name__ == "__main__": 86 | main2() -------------------------------------------------------------------------------- /pymht/utils/helpFunctions.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import numpy as np 3 | import logging 4 | 5 | log = logging.getLogger(__name__) 6 | 7 | def _getBestTextPosition(normVelocity, **kwargs): 8 | DEBUG = kwargs.get('debug', False) 9 | compassHeading = np.arctan2(normVelocity[0], normVelocity[1]) * 180. / np.pi 10 | compassHeading = (compassHeading + 360.) % 360. 11 | assert compassHeading >= 0, str(compassHeading) 12 | assert compassHeading <= 360, str(compassHeading) 13 | quadrant = int(2 + (compassHeading - 90) // 90) 14 | assert quadrant >= 1, str(quadrant) 15 | assert quadrant <= 4, str(quadrant) 16 | assert type(quadrant) is int 17 | if DEBUG: print("Vector {0:} Heading {1:5.1f} Quadrant {2:}".format(normVelocity, compassHeading, quadrant)) 18 | # return horizontal_alignment, vertical_alignment 19 | if quadrant == 1: 20 | return 'right', 'top' 21 | elif quadrant == 2: 22 | return 'right', 'bottom' 23 | elif quadrant == 3: 24 | return 'left', 'bottom' 25 | elif quadrant == 4: 26 | return 'left', 'top' 27 | else: 28 | print('_getBestTextPosition failed. Returning default') 29 | return 'center', 'center' 30 | 31 | 32 | def binomial(n, k): 33 | return 1 if k == 0 else (0 if n == 0 else binomial(n - 1, k) + binomial(n - 1, k - 1)) 34 | 35 | 36 | def plotVelocityArrowFromNode(nodes, **kwargs): 37 | def recPlotVelocityArrowFromNode(node, stepsLeft): 38 | if node.predictedStateMean is not None: 39 | plotVelocityArrow(node) 40 | if stepsLeft > 0 and (node.parent is not None): 41 | recPlotVelocityArrowFromNode(node.parent, stepsLeft - 1) 42 | 43 | for node in nodes: 44 | recPlotVelocityArrowFromNode(node, kwargs.get("stepsBack", 1)) 45 | 46 | 47 | def printScanList(scanList): 48 | for index, measurement in enumerate(scanList): 49 | print("\tMeasurement ", index, ":\t", end='', sep='') 50 | measurement.print() 51 | 52 | 53 | def printHypothesesScore(targetList): 54 | def recPrint(target, targetIndex): 55 | if target.trackHypotheses is not None: 56 | for hyp in target.trackHypotheses: 57 | recPrint(hyp, targetIndex) 58 | 59 | for targetIndex, target in enumerate(targetList): 60 | print("\tTarget: ", targetIndex, 61 | "\tInit", target.initial.position, 62 | "\tPred", target.predictedPosition(), 63 | "\tMeas", target.measurement, sep="") 64 | 65 | 66 | def backtrackMeasurementNumbers(selectedNodes, steps=None): 67 | def recBacktrackNodeMeasurements(node, measurementBacktrack, stepsLeft=None): 68 | if node.parent is not None: 69 | if stepsLeft is None: 70 | measurementBacktrack.append(node.measurementNumber) 71 | recBacktrackNodeMeasurements(node.parent, measurementBacktrack) 72 | elif stepsLeft > 0: 73 | measurementBacktrack.append(node.measurementNumber) 74 | recBacktrackNodeMeasurements( 75 | node.parent, measurementBacktrack, stepsLeft - 1) 76 | 77 | measurementsBacktracks = [] 78 | for node in selectedNodes: 79 | measurementNumberBacktrack = [] 80 | recBacktrackNodeMeasurements(node, measurementNumberBacktrack, steps) 81 | measurementNumberBacktrack.reverse() 82 | measurementsBacktracks.append(measurementNumberBacktrack) 83 | return measurementsBacktracks 84 | 85 | 86 | def writeElementToFile(path, element): 87 | import xml.etree.ElementTree as ET 88 | import os 89 | (head, tail) = os.path.split(path) 90 | if not os.path.isdir(head): 91 | os.makedirs(head) 92 | tree = ET.ElementTree(element) 93 | tree.write(path) -------------------------------------------------------------------------------- /pymht/utils/kalman.py: -------------------------------------------------------------------------------- 1 | """ 2 | A module with operations useful for Kalman filtering. 3 | """ 4 | import numpy as np 5 | 6 | 7 | def nllr_ais(S_list, nis): 8 | result = (0.5 * nis + np.log(np.sqrt(np.linalg.det(2 * np.pi * S_list)))) 9 | assert result.size == nis.size 10 | assert all(np.isfinite(result)), str(result) 11 | return result 12 | 13 | 14 | def nllr(lambda_ex, P_d, S_list, nis): 15 | # assert S_list.shape[0] == nis.size, str(S_list.shape) + str(nis.size) + str(nis.shape) 16 | if lambda_ex == 0: 17 | log.warning("'lambda_ex' can not be zero.") 18 | lambda_ex += 1e-20 19 | result = (0.5 * nis + np.log((lambda_ex * np.sqrt(np.linalg.det(2 * np.pi * S_list))) / P_d)) 20 | assert result.size == nis.size, str(result.size)+'/'+str(nis.size) 21 | assert all(np.isfinite(result)), str(result) 22 | return result 23 | 24 | 25 | def normalizedInnovationSquared(z_tilde_list, S_inv_list): 26 | return np.sum(np.matmul(z_tilde_list, S_inv_list) * 27 | z_tilde_list, 28 | axis=2) 29 | 30 | 31 | def nis_single(z_tilde, S): 32 | nis = z_tilde.dot(np.linalg.inv(S).dot(z_tilde.T)) 33 | return nis 34 | 35 | 36 | def z_tilde(z_list, z_hat_list, nNodes=1, measDim=2): 37 | z_tensor = np.array([z_list, ] * nNodes) 38 | z_hat_tensor = z_hat_list.reshape(nNodes, 1, measDim) 39 | z_tilde_list = z_tensor - z_hat_tensor 40 | return z_tilde_list 41 | 42 | 43 | def numpyFilter(x_bar, K, z_tilde): 44 | x_bar = x_bar.reshape(1, x_bar.shape[0]) 45 | assert z_tilde.ndim == 2 46 | assert z_tilde.shape[1] == K.shape[1], str(z_tilde.shape) + str(x_bar.shape) 47 | assert z_tilde.ndim == 2 48 | assert K.shape[0] == x_bar.shape[1] 49 | # assert x_bar.shape == (1, 4), str(x_bar.shape) 50 | x_hat = x_bar + np.matmul(K, z_tilde.T).T 51 | assert x_hat.shape[1] == x_bar.shape[1], str(x_hat.shape) + str(x_bar.shape) 52 | return x_hat 53 | 54 | 55 | def predict(A, Q, x_0_list, P_0_list): 56 | assert A.ndim == 2 57 | assert Q.ndim == 2 58 | assert x_0_list.ndim == 2 59 | assert P_0_list.ndim == 3 60 | x_bar_list = A.dot(x_0_list.T).T 61 | P_bar_list = (np.matmul(np.matmul(A, P_0_list), A.T) + Q) 62 | assert x_bar_list.shape == x_0_list.shape, "x_bar ERROR" 63 | assert P_bar_list.shape == P_0_list.shape, "P_bar ERROR" 64 | return x_bar_list, P_bar_list 65 | 66 | 67 | def predict_single(A, Q, x_hat, P_hat): 68 | x_bar = A.dot(x_hat) 69 | P_bar = A.dot(P_hat).dot(A.T) + Q 70 | return x_bar, P_bar 71 | 72 | 73 | def filter_single(z, x_bar, P_bar, H, R): 74 | y_tilde = z - H.dot(x_bar) 75 | S = H.dot(P_bar).dot(H.T) + R 76 | K = P_bar.dot(H.T).dot(np.linalg.inv(S)) 77 | x_hat = x_bar + K.dot(y_tilde) 78 | P_hat = P_bar - K.dot(H).dot(P_bar) 79 | return x_hat, P_hat, S, y_tilde 80 | 81 | 82 | def precalc(C, R, x_bar_list, P_bar_list): 83 | assert C.ndim == 2 84 | assert R.ndim == 2 85 | 86 | nMeasurement, nStates = x_bar_list.shape 87 | nObservableState = C.shape[0] 88 | 89 | z_hat_list = C.dot(x_bar_list.T).T 90 | S_list = np.matmul(np.matmul(C, P_bar_list), C.T) + R 91 | S_inv_list = np.linalg.inv(S_list) 92 | K_list = np.matmul(np.matmul(P_bar_list, C.T), S_inv_list) 93 | P_hat_list = P_bar_list - np.matmul(K_list.dot(C), P_bar_list) 94 | 95 | assert z_hat_list.shape == (nMeasurement, nObservableState), "z_hat ERROR" 96 | assert S_list.shape == (nMeasurement, nObservableState, nObservableState), "S ERROR" 97 | assert S_inv_list.shape == S_list.shape, "S_inv ERROR" 98 | assert K_list.shape == (nMeasurement, nStates, nObservableState) 99 | assert P_hat_list.shape == P_bar_list.shape, "P_hat ERROR" 100 | 101 | return z_hat_list, S_list, S_inv_list, K_list, P_hat_list 102 | 103 | 104 | class KalmanFilter(): 105 | """ 106 | A Kalman filterUnused class, does filtering for systems of the type: 107 | x_{k+1} = A*x_k + v_k 108 | y_k = C_RADAR*z_k + e_k 109 | v_k ~ N(0,Q) 110 | e_k ~ N(0,R_RADAR) 111 | x_0 - Initial state 112 | P_0 - Initial state covariance 113 | """ 114 | 115 | def __init__(self, x_0, P_0, A, C, Q, R, **kwargs): 116 | # Q = kwargs.get('Q') 117 | # R_RADAR = kwargs.get('R_RADAR') 118 | # x_0 = kwargs.get('x_0') 119 | # P_0 = kwargs.get('P_0') 120 | # dT = kwargs.get('T') 121 | # Gamma = kwargs.get( 122 | # 'Gamma', 123 | # np.eye(Q(1).shape[0]) if callable(Q) else np.eye(Q.shape[0])) 124 | 125 | self.A = A # Transition matrix 126 | self.C = C # Observation matrix 127 | self.Q = Q # Process noise covariance 128 | self.R = R # Measurement noise covariance 129 | self.x_hat = np.copy(x_0) # Filtered state 130 | self.P_hat = np.copy(P_0) # Filtered state covariance 131 | self.x_bar = None # Prediced state 132 | self.P_bar = None # Predictes state covariance 133 | # self.dT = np.copy(dT) # Sampling period (dynamic if None) 134 | self.z_hat = None # Predicted measurement 135 | self.S = None # Residual covariance 136 | self.S_inv = None # Inverse residual covariance 137 | self.K = None # Kalman gain 138 | self.predicted = False 139 | self.precalculated = False 140 | 141 | def predict(self, **kwargs): 142 | """ 143 | Calculate next state estimate without actually updating 144 | the internal variables 145 | """ 146 | # dT = kwargs.get('T', self.dT) 147 | A = self.A # self.A(dT) if callable(self.A) else self.A 148 | Q = self.Q # self.Q(dT) if callable(self.Q) else self.Q 149 | x_bar = A.dot(self.x_hat) 150 | P_bar = A.dot(self.P_hat).dot(A.T) + Q 151 | if not kwargs.get('local', False): 152 | self.x_bar = x_bar 153 | self.P_bar = P_bar 154 | self.predicted = True 155 | return x_bar, P_bar 156 | 157 | def _precalculateMeasurementUpdate(self): 158 | if not self.predicted: 159 | self.predict() 160 | self.z_hat = self.C.dot(self.x_bar) 161 | self.S = self.C.dot(self.P_bar).dot(self.C.T) + self.R 162 | self.S_inv = np.linalg.inv(self.S) 163 | self.K = self.P_bar.dot(self.C.T).dot(self.S_inv) 164 | self.precalculated = True 165 | 166 | def filter(self, **kwargs): 167 | """ 168 | Filter state with measurement without updating the internal variables 169 | """ 170 | if not self.precalculated: 171 | self._precalculateMeasurementUpdate() 172 | self.precalculated = False 173 | 174 | if 'y_tilde' in kwargs: 175 | y_tilde = kwargs.get('y_tilde') 176 | elif 'y' in kwargs: 177 | z = kwargs.get('y') 178 | y_tilde = z - self.z_hat 179 | else: 180 | x_hat = self.x_bar 181 | P_hat = self.P_bar 182 | if not kwargs.get('local', False): 183 | self.x_hat = x_hat 184 | self.P_hat = P_hat 185 | return x_hat, P_hat 186 | 187 | x_hat = self.x_bar + self.K.dot(y_tilde) 188 | P_hat = self.P_bar - self.K.dot(self.C).dot(self.P_bar) 189 | if not kwargs.get('local', False): 190 | self.x_hat = x_hat 191 | self.P_hat = P_hat 192 | return x_hat, P_hat 193 | 194 | def filterAndCopy(self, *args): 195 | if len(args) == 0: 196 | x_hat = self.x_bar 197 | P_hat = self.P_bar 198 | elif len(args) == 1: 199 | y_tilde = args[0] 200 | x_hat = self.x_bar + self.K.dot(y_tilde) 201 | P_hat = self.P_bar - self.K.dot(self.C).dot(self.P_bar) 202 | else: 203 | raise ValueError("Invalid number of arguments") 204 | return KalmanFilter(x_hat, P_hat, self.A, self.C, self.Q, self.R) 205 | -------------------------------------------------------------------------------- /pymht/utils/simulator.py: -------------------------------------------------------------------------------- 1 | import time 2 | import copy 3 | import math 4 | import logging 5 | import numpy as np 6 | from .classDefinitions import ScanList 7 | from .classDefinitions import SimTarget, SimTargetCartesian 8 | from .classDefinitions import MeasurementList, AIS_message, Position, AisMessagesList, SimList 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | def checkEqualIvo(lst): 13 | return not lst or lst.count(lst[0]) == len(lst) 14 | 15 | def seed_simulator(seed): 16 | np.random.seed(seed) 17 | 18 | def generateInitialTargets(numOfTargets, centerPosition, 19 | radarRange, P_d, sigma_Q, **kwargs): 20 | usedMMSI = [] 21 | initialTime = time.time() 22 | initialList = [] 23 | speeds = np.array([1, 10, 12, 15, 28, 35], dtype=np.float32) * 0.5 # ~knots to m/s 24 | for targetIndex in range(numOfTargets): 25 | heading = np.random.uniform(0, 360) 26 | distance = np.random.uniform(0, radarRange * 0.8) 27 | px, py = _pol2cart(heading, distance) 28 | px += centerPosition[0] 29 | py += centerPosition[1] 30 | heading = np.random.uniform(0, 360) 31 | speed = np.random.choice(speeds) 32 | vx, vy = _pol2cart(heading, speed) 33 | if kwargs.get('assignMMSI',False): 34 | while True: 35 | mmsi = np.random.randint(100000000,999999999) 36 | if mmsi not in usedMMSI: 37 | usedMMSI.append(mmsi) 38 | break 39 | else: 40 | mmsi = None 41 | target = SimTargetCartesian(np.array([px, py, vx, vy], dtype=np.float32), initialTime, P_d, sigma_Q, mmsi = mmsi) 42 | initialList.append(target) 43 | return initialList 44 | 45 | def simulateTargets(initialTargets, simTime, timeStep, model, **kwargs): 46 | simList = SimList() 47 | assert all([isinstance(initialTarget,SimTarget) for initialTarget in initialTargets]) 48 | simList.append(initialTargets) 49 | nTimeSteps = int(np.ceil(simTime / timeStep)) 50 | 51 | for i in range(nTimeSteps): 52 | targetList = [target.calculateNextState(timeStep) 53 | for target in simList[-1]] 54 | simList.append(targetList) 55 | 56 | return simList 57 | 58 | def simulateScans(simList, radarPeriod, H, R, lambda_phi=0, 59 | rRange=None, p0=None, **kwargs): 60 | includeInitialTime = not kwargs.get('preInitialized', False) 61 | area = np.pi * np.power(rRange, 2) 62 | gClutter = lambda_phi * area 63 | lClutter = kwargs.get('lambda_local', 1) 64 | scanList = ScanList() 65 | lastScan = None 66 | skippedFirst = False 67 | for targetList in simList: 68 | simTime = targetList[0].time 69 | if lastScan is None: 70 | if not includeInitialTime and not skippedFirst: 71 | skippedFirst = True 72 | lastScan = simTime 73 | continue 74 | lastScan = simTime 75 | else: 76 | timeSinceLastScan = simTime - lastScan 77 | if timeSinceLastScan >= radarPeriod: 78 | lastScan = simTime 79 | else: 80 | continue 81 | 82 | measurementList = MeasurementList(simTime) 83 | for target in targetList: 84 | visible = np.random.uniform() <= kwargs.get('P_d',target.P_d) 85 | if (rRange is not None) and (p0 is not None): 86 | inRange = target.inRange(p0, rRange) 87 | else: 88 | inRange = True 89 | 90 | if visible and inRange: 91 | measurementList.measurements.append(target.positionWithNoise()) 92 | if kwargs.get('localClutter', True): 93 | nClutter = np.random.poisson(lClutter) 94 | # log.debug("nLocalClutter {:}".format(nClutter)) 95 | measurementList.measurements.extend([target.positionWithNoise(sigma_R_scale = 3) 96 | for _ in range(nClutter)]) 97 | if all(e is not None for e in [rRange, p0]) and kwargs.get('globalClutter', True): 98 | nClutter = np.random.poisson(gClutter) 99 | # log.debug("nGlobalClutter {:}".format(nClutter)) 100 | for i in range(nClutter): 101 | clutter = _generateCartesianClutter(p0, rRange) 102 | measurementList.measurements.append(clutter) 103 | if kwargs.get("shuffle", True): 104 | np.random.shuffle(measurementList.measurements) 105 | nMeas = len(measurementList.measurements) 106 | measurementList.measurements = np.array( 107 | measurementList.measurements, ndmin=2, dtype=np.float32) 108 | measurementList.measurements = measurementList.measurements.reshape((nMeas, 2)) 109 | scanList.append(copy.deepcopy(measurementList)) 110 | return scanList 111 | 112 | def simulateAIS(sim_list, ais_model, radarPeriod, initTime, **kwargs): 113 | ais_measurements = AisMessagesList() 114 | integerTime = kwargs.get('integerTime', True) 115 | tempList = [] 116 | for i, sim in enumerate(sim_list[1:]): 117 | assert checkEqualIvo([t.time for t in sim]) 118 | 119 | for j, target in ((j,target) for j, target in enumerate(sim) if target.mmsi is not None): 120 | if integerTime: 121 | messageTime = math.floor(target.time) 122 | dT = messageTime - target.time 123 | state = target.model.A_AIS(dT).dot(target.cartesianState()) 124 | else: 125 | messageTime = target.time 126 | state = target.cartesianState() 127 | timeSinceLastAisMessage = messageTime - target.timeOfLastAisMessage 128 | speedMS = target.speedMS() 129 | reportingInterval = _aisReportInterval(speedMS, target.aisClass) 130 | shouldSendAisMessage = ((timeSinceLastAisMessage >= reportingInterval) and 131 | ((messageTime-initTime) % radarPeriod != 0)) 132 | # log.debug("MMSI " + str(target.mmsi) + " \t" + 133 | # "Target time " + str(target.time) + " \t" + 134 | # "Message time " + str(messageTime) + " \t" + 135 | # "Time of last AIS message " + str(target.timeOfLastAisMessage) + " \t" + 136 | # "Reporting Interval " + str(reportingInterval) + " \t" + 137 | # "Should send AIS message " + str(shouldSendAisMessage)) 138 | if not shouldSendAisMessage: 139 | try: 140 | sim_list[i + 2][j].timeOfLastAisMessage = target.timeOfLastAisMessage 141 | except IndexError: 142 | pass 143 | continue 144 | try: 145 | sim_list[i+2][j].timeOfLastAisMessage = float(messageTime) 146 | except IndexError: 147 | pass 148 | highAccuracy = True 149 | if kwargs.get('noise', True): 150 | highAccuracy = np.random.uniform() > 0.5 151 | R = target.model.R_AIS(highAccuracy) 152 | v = np.random.multivariate_normal(np.zeros(R.shape[0]), R) 153 | state = target.model.C_AIS.dot(state) + v 154 | assert state.ndim == 1 155 | assert state.size == target.model.nObsDim_AIS, str(state.size) 156 | if kwargs.get('idScrambling',False) and np.random.uniform() > 0.5: 157 | mmsi = target.mmsi + 10 158 | # log.info("Scrambling MMSI {0:} to {1:} at {2:}".format(target.mmsi,mmsi, messageTime)) 159 | else: 160 | mmsi = target.mmsi 161 | 162 | prediction = AIS_message(time=messageTime, 163 | state=state, 164 | mmsi=mmsi, 165 | highAccuracy=highAccuracy) 166 | if np.random.uniform() <= target.P_r: 167 | tempList.append(prediction) 168 | simTime = sim[0].time 169 | if (simTime - initTime) % radarPeriod == 0: 170 | if tempList: 171 | ais_measurements.append(tempList[:]) 172 | tempList = [] 173 | return ais_measurements 174 | 175 | def _aisReportInterval(speedMS, aisClass): 176 | from scipy.constants import knot 177 | speedKnot = speedMS * knot 178 | if aisClass.upper() == 'A': 179 | if speedKnot > 23: 180 | return 2 181 | if speedKnot > 14: 182 | return 4 #Should be 2 or 6, but are missing acceleration data 183 | if speedKnot > 0: 184 | return 6 #Should be 3.3 or 10, but are missing acceleration data 185 | if speedKnot == 0: 186 | return 60 #Should be 10s og 3min, but are missing mored status 187 | raise ValueError("Speed must be positive") 188 | elif aisClass.upper() == 'B': 189 | if speedKnot > 23: 190 | return 10 191 | if speedKnot > 14: 192 | return 5 193 | if speedKnot > 2: 194 | return 30 195 | if speedKnot >= 0: 196 | return 60*3 197 | raise ValueError("Speed must be positive") 198 | else: 199 | raise ValueError("aisClass must be 'A' og 'B'") 200 | 201 | def findCenterPositionAndRange(simList): 202 | xMin = float('Inf') 203 | yMin = float('Inf') 204 | xMax = -float('Inf') 205 | yMax = -float('Inf') 206 | for sim in simList: 207 | for simTarget in sim: 208 | state = simTarget.cartesianState() 209 | xMin = state[0] if state[0] < xMin else xMin 210 | yMin = state[1] if state[1] < yMin else yMin 211 | xMax = state[0] if state[0] > xMax else xMax 212 | yMax = state[1] if state[1] > yMax else yMax 213 | p0 = Position(xMin + (xMax - xMin) / 2, yMin + (yMax - yMin) / 2) 214 | R = np.sqrt(np.power(max(abs(xMax - p0.x), abs(xMin - p0.x)), 2) + 215 | np.power(max(abs(yMax - p0.y), abs(yMin - p0.y)), 2)) 216 | return p0, R 217 | 218 | def _generateRadialClutter(centerPosition, radarRange): 219 | heading = np.random.uniform(0, 360) 220 | distance = np.random.uniform(0, radarRange) 221 | px, py = _pol2cart(heading, distance) 222 | return centerPosition + np.array([px, py]) 223 | 224 | def _generateCartesianClutter(centerPosition, radarRange): 225 | while True: 226 | x = np.random.uniform(-radarRange, radarRange) 227 | y = np.random.uniform(-radarRange, radarRange) 228 | pos = np.array([x, y], dtype=np.float32) 229 | if np.linalg.norm(pos) <= radarRange: 230 | return centerPosition + pos 231 | 232 | def _pol2cart(bearingDEG, distance): 233 | import math 234 | angleDEG = (90 - bearingDEG + 360)%360 235 | angleRAD = np.deg2rad(angleDEG) 236 | x = distance * math.cos(angleRAD) 237 | y = distance * math.sin(angleRAD) 238 | return [x, y] 239 | -------------------------------------------------------------------------------- /pymht/utils/xmlDefinitions.py: -------------------------------------------------------------------------------- 1 | scenarioTag = "Scenario" 2 | groundtruthTag = "groundtruth" 3 | simulationTag = "Simulation" 4 | variationsTag = "Variations" 5 | variationTag = "Variation" 6 | scenariosettingsTag = "Scenario-settings" 7 | trackerSettingsTag = "Tracker-settings" 8 | runTag = "Run" 9 | runtimeTag = "Runtime" 10 | trackTag = "Track" 11 | statesTag = "States" 12 | smoothedstatesTag = "SmoothedStates" 13 | stateTag = "S" 14 | positionTag = "P" 15 | velocityTag = "V" 16 | northTag = "N" 17 | eastTag = "E" 18 | mmsiTag = "mmsi" 19 | timeTag = "t" 20 | meanTag = "mean" 21 | minTag = "min" 22 | maxTag = "max" 23 | precisionTag = "precision" 24 | descriptionTag = "Description" 25 | smoothedTag = "smoothed" 26 | idTag = "id" 27 | iterationTag = "i" 28 | typeTag = "type" 29 | estimateTag = "estimate" 30 | pdTag = "Pd" 31 | sigmaqTag = "sigmaQ" 32 | seedTag = "seed" 33 | lengthTag = "length" 34 | aisclassTag = "aisClass" 35 | prTag = "Pr" 36 | nameTag = "name" 37 | preinitializedTag = "preinitialized" 38 | activeTag = "Active" 39 | outofrangeTag = "OutOfRange" 40 | statusTag = "status" 41 | toolowscoreTag = "TooLowScore" 42 | trueTag = str(True) 43 | falseTag = str(False) 44 | matchidTag = "mathID" 45 | rmserrorTag = "rms" 46 | timematchTag = "timeMatch" 47 | goodtimematchTag = "goodtimeMatch" 48 | nTag = "N" 49 | mInitTag = "M_init" 50 | nInitTag = "N_init" 51 | lambdaphiTag = "lambda_phi" 52 | tracklossTag = "trackloss" 53 | losttrackTag = "lostTrack" 54 | trackpercentTag = "trackPercent" 55 | timematchlengthTag = "timeMatchLength" 56 | goodtimematchlengthTag = "goodTimeMatchLength" 57 | initializationLogTag = "initializationLog" 58 | correctInitialTargetsTag = "correctTargets" 59 | falseTargetsTag = "falseTargets" 60 | inverseResidualCovarianceTag = "S_inv" 61 | terminatedTag = "terminated" 62 | nScansTag = "nScans" 63 | radarPeriodTag = "radarPeriod" 64 | ssErrorTag = "ssError" 65 | 66 | totalTimeTag = "Total" 67 | initTimeTag = "Init" 68 | clusterTimeTag = "Cluster" 69 | dynamicWindowTimeTag = "DynN" 70 | optimizationTimeTag = "Optim" 71 | npruneTimeTag = "N-prune" 72 | growingTimeTag = "Process" 73 | ilpPruneTimeTag = "ILP-Prune" 74 | terminateTimeTag = "Terminate" 75 | 76 | timeLogPrecision = 6 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | scipy >= 0.14 2 | numpy >= 1.12 3 | matplotlib 4 | psutil 5 | pytest 6 | coveralls 7 | coverage 8 | pykalman 9 | git+git://github.com/jfrelinger/cython-munkres-wrapper@master -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Setup script for pyMHT by Erik Liland 2017 3 | """ 4 | from setuptools import find_packages 5 | import os 6 | import sys 7 | from setuptools import setup 8 | 9 | if sys.version_info.major < 3: 10 | sys.exit('Sorry, Python 2 is not supported') 11 | 12 | name = "pyMHT" 13 | version = "2.0" 14 | author = "Erik Liland" 15 | author_email = "erik.liland@gmail.com" 16 | description = "A track oriented multi hypothesis tracker with integer linear programming" 17 | license = "BSD" 18 | keywords = 'mht tomht radar tracking track-split track split multi target multitarget' 19 | url = 'http://autosea.github.io/sf/2016/04/15/radar_ais/' 20 | install_requires = ['matplotlib', 'numpy', 'scipy', 'psutil', 'termcolor', 'Cython'] 21 | packages = find_packages(exclude=['examples', 'docs']) 22 | 23 | setup( 24 | name=name, 25 | version=version, 26 | author=author, 27 | author_email=author_email, 28 | description=description, 29 | license=license, 30 | keywords=keywords, 31 | packages=packages, 32 | install_requires=install_requires 33 | ) 34 | -------------------------------------------------------------------------------- /tests/test_classDefinitions.py: -------------------------------------------------------------------------------- 1 | # content of test_sample.py 2 | 3 | 4 | def func(x): 5 | return x + 2 6 | 7 | 8 | def test_answer(): 9 | assert func(3) == 5 10 | -------------------------------------------------------------------------------- /tests/test_initiator.py: -------------------------------------------------------------------------------- 1 | # content of test_sample.py 2 | def func(x): 3 | return x + 2 4 | 5 | 6 | def test_answer(): 7 | assert func(3) == 5 8 | -------------------------------------------------------------------------------- /tests/test_kalman.py: -------------------------------------------------------------------------------- 1 | from pymht.utils import kalman 2 | import numpy as np 3 | from pymht.models import pv 4 | from pymht.models import polar 5 | from pymht.models import ais 6 | 7 | dT = 1.0 8 | x_0 = np.zeros(4) 9 | p = np.power(1.0, 2) 10 | P_0 = np.diag([p, p, p, p]) 11 | A = np.array([[1.0, 0., dT, 0.], 12 | [0., 1.0, 0., dT], 13 | [0., 0., 1.0, 0.], 14 | [0., 0., 0., 1.0]]) 15 | C = np.array([[1.0, 0., 0., 0.], 16 | [0., 1.0, 0., 0.]]) 17 | sigmaQ = 1.0 18 | Q = pv.Q(dT) 19 | sigmaR = 1.0 20 | R = np.eye(2) * np.power(sigmaR, 2) 21 | n = 10 22 | x_0_list = np.array([x_0, ] * n) 23 | P_0_list = np.array([P_0, ] * n) 24 | x_bar_list = np.copy(x_0_list) 25 | P_bar_list = np.copy(P_0_list) 26 | 27 | def test_KalmanFilter_class(): 28 | kf = kalman.KalmanFilter(x_0, P_0, A, C, Q, R) 29 | y = np.ones(2) 30 | kf.filter(y=y) 31 | kf2 = kf.filterAndCopy() 32 | y_tilde = y 33 | kf3 = kf.filterAndCopy(y_tilde) 34 | 35 | 36 | def test_predict(): 37 | x_bar_list, P_bar_list = kalman.predict(A, Q, x_0_list, P_0_list) 38 | 39 | def test_numpyPredict(): 40 | z_hat_list, S_list, S_inv_list, K_list, P_hat_list = kalman.precalc( 41 | C, R, x_bar_list, P_bar_list) 42 | 43 | gated_z_tilde_list = np.random.random((n, 5, 2)) 44 | gated_x_hat_list = [kalman.numpyFilter(x_bar_list[i], 45 | K_list[i], 46 | gated_z_tilde_list[i]) 47 | for i in range(n)] 48 | -------------------------------------------------------------------------------- /tests/test_models.py: -------------------------------------------------------------------------------- 1 | from pymht.models import pv 2 | 3 | 4 | def test_Q(): 5 | Q_1 = pv.Q(1) 6 | Q_2 = pv.Q(1, 2) 7 | assert Q_1.shape == Q_2.shape 8 | 9 | 10 | def test_R(): 11 | R_1 = pv.R_RADAR() 12 | R_2 = pv.R_RADAR(2) 13 | assert R_1.shape == R_2.shape 14 | 15 | 16 | def test_Phi(): 17 | Phi_1 = pv.Phi(1) 18 | Phi_2 = pv.Phi(2.0) 19 | assert Phi_1.shape == Phi_2.shape 20 | -------------------------------------------------------------------------------- /tests/test_pyTarget.py: -------------------------------------------------------------------------------- 1 | # content of test_sample.py 2 | def func(x): 3 | return x + 2 4 | 5 | 6 | def test_answer(): 7 | assert func(3) == 5 8 | -------------------------------------------------------------------------------- /tests/test_simulator.py: -------------------------------------------------------------------------------- 1 | # content of test_sample.py 2 | import pymht.utils.simulator as sim 3 | import numpy as np 4 | from pymht.models import pv 5 | from pymht.models import ais 6 | from pymht.models import polar 7 | 8 | from pymht.utils.classDefinitions import AIS_message 9 | 10 | seed = 172362 11 | nTargets = 10 12 | radarPeriod = 60./24. 13 | initTime = 0 14 | simTime = 10 15 | simulationTimeStep = radarPeriod/2 16 | p0 = np.array([0, 0]) 17 | radarRange = 1000 18 | P_d = 0.8 19 | lambda_phi = 1e-6 20 | aisPeriod = radarPeriod 21 | nTests = 10 22 | initialTargets = None 23 | simLists = None 24 | aisMeasurements = None 25 | scanList = None 26 | 27 | sim.seed_simulator(seed) 28 | 29 | 30 | def test_initial_target_generation(): 31 | global initialTargets 32 | initialTargets = sim.generateInitialTargets(nTargets, 33 | p0, 34 | radarRange, 35 | P_d, 36 | pv.sigmaQ_true, 37 | assignMMSI = True) 38 | 39 | def test_simulation_seed_consistency(): 40 | global simLists 41 | simLists = [] 42 | for i in range(nTests): 43 | sim.seed_simulator(seed) 44 | simLists.append(sim.simulateTargets(initialTargets, 45 | simTime, 46 | simulationTimeStep, 47 | pv)) 48 | 49 | for i in range(nTests-1): 50 | for simListA, simListB in zip(simLists[i],simLists[i+1]): 51 | for targetA, targetB in zip(simListA, simListB): 52 | assert targetA == targetB 53 | 54 | def test_scan_simulation_consistency(): 55 | global scanList 56 | scanLists = [] 57 | for _ in range(nTests): 58 | sim.seed_simulator(seed) 59 | scanLists.append(sim.simulateScans(simLists[0], 60 | radarPeriod, 61 | pv.C_RADAR, 62 | pv.R_RADAR(pv.sigmaR_RADAR_true), 63 | lambda_phi, 64 | radarRange, 65 | p0, 66 | shuffle=True, 67 | localClutter=True, 68 | globalClutter=True) 69 | ) 70 | for i in range(nTests-1): 71 | scanListA = scanLists[i] 72 | scanListB = scanLists[i+1] 73 | for measurementListA, measurementListB in zip(scanListA, scanListB): 74 | assert measurementListA == measurementListB 75 | 76 | def test_ais_simulation_consistency(): 77 | """ 78 | Known issue: when using integer time, different initial (target) time, 79 | will give different AIS results depending on the decimal time at init. 80 | :return: 81 | """ 82 | global aisMeasurements 83 | aisMeasurementsList = [] 84 | for i in range(nTests): 85 | sim.seed_simulator(seed) 86 | aisMeasurementsList.append( 87 | sim.simulateAIS(simLists[i], 88 | ais, 89 | radarPeriod, 90 | initTime)) 91 | 92 | 93 | for i in range(nTests-1): 94 | simA = aisMeasurementsList[i] 95 | simB = aisMeasurementsList[i+1] 96 | for listA, listB in zip(simA,simB): 97 | for messageA, messageB in zip(listA, listB): 98 | assert type(messageA) == AIS_message 99 | assert messageA == messageB 100 | 101 | 102 | if __name__ == '__main__': 103 | test_initial_target_generation() 104 | test_simulation_seed_consistency() 105 | test_scan_simulation_consistency() 106 | test_ais_simulation_consistency() -------------------------------------------------------------------------------- /tests/test_tracker.py: -------------------------------------------------------------------------------- 1 | # content of test_sample.py 2 | def func(x): 3 | return x + 2 4 | 5 | 6 | def test_answer(): 7 | assert func(3) == 5 8 | -------------------------------------------------------------------------------- /tests/text_Position.py: -------------------------------------------------------------------------------- 1 | # content of test_sample.py 2 | def func(x): 3 | return x + 2 4 | 5 | 6 | def test_answer(): 7 | assert func(3) == 5 8 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # content of: tox.ini , put in same dir as setup.py 2 | [tox] 3 | envlist = py35 4 | [testenv] 5 | deps = -rrequirements.txt 6 | commands=pytest --------------------------------------------------------------------------------