├── .gitignore ├── LICENSE ├── README.md └── src ├── kalman.py ├── multiple_object_tracker.py ├── tracker_base.py ├── tracking_constants.py └── util.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Kyle Hounslow 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # multi_tracking 2 | Multiple object tracking using Kalman filters and Munkres algorithm 3 | -------------------------------------------------------------------------------- /src/kalman.py: -------------------------------------------------------------------------------- 1 | """ 2 | Kalman filter helps smooth noisy detections 3 | """ 4 | import cv2 5 | import numpy as np 6 | 7 | __author__ = "Kyle Hounslow" 8 | 9 | 10 | # kalman filter with 6 states, 4 meas params 11 | # mostly taken from http://www.robot-home.it/blog/en/software/ball-tracker-con-filtro-di-kalman/ 12 | class KalmanFilter(object): 13 | def __init__(self): 14 | self.first_run = True 15 | self.dynamParams = 6 16 | self.measureParams = 4 17 | self.kalman = cv2.KalmanFilter(dynamParams=self.dynamParams, 18 | measureParams=self.measureParams) # 6 states, 4 measurement params 19 | self.kalman.measurementMatrix = np.array([[1, 0, 0, 0, 0, 0], 20 | [0, 1, 0, 0, 0, 0], 21 | [0, 0, 0, 0, 1, 0], 22 | [0, 0, 0, 0, 0, 1]], np.float32) 23 | # Transition matrix ( eg. p(k) = p(k-1) + v(k-1)*dT ), init dT = 1 24 | dT = 0.05 # assume ~20fps 25 | self.kalman.transitionMatrix = np.array([[1, 0, dT, 0, 0, 0], 26 | [0, 1, 0, dT, 0, 0], 27 | [0, 0, 1, 0, 0, 0], 28 | [0, 0, 0, 1, 0, 0], 29 | [0, 0, 0, 0, 1, 0], 30 | [0, 0, 0, 0, 0, 1]], np.float32) 31 | # process noise covariance matrix (rough values) 32 | self.kalman.processNoiseCov = np.array([[0.01, 0, 0, 0, 0, 0], 33 | [0, 0.01, 0, 0, 0, 0], 34 | [0, 0, 2.0, 0, 0, 0], 35 | [0, 0, 0, 1.0, 1.0, 0], 36 | [0, 0, 0, 0, 1.0, 0.01], 37 | [0, 0, 0, 0, 0, 0.01]], np.float32) 38 | # measurement noise covariance matrix (rough values) 39 | self.kalman.measurementNoiseCov = np.eye(4, dtype=np.float32) * 0.1 40 | 41 | def reset(self): 42 | self.kalman.__init__(dynamParams=self.dynamParams, measureParams=self.measureParams) 43 | self.first_run = True 44 | 45 | def get_predicted_bb(self): 46 | pred = self.kalman.predict().T[0] 47 | pred_bb = np.array([pred[0], pred[1], pred[4], pred[5]]) 48 | 49 | return pred_bb 50 | 51 | def get_current_velocity(self): 52 | return self.velocity.copy() 53 | 54 | def get_current_unit_velocity(self): 55 | print self.velocity_unit_vec 56 | return self.velocity_unit_vec.copy() 57 | 58 | def correct(self, bb): 59 | # measurement is numpy array [[x1,y1,x2,y2]] 60 | measurement = np.array([bb], dtype=np.float32).T 61 | if self.first_run is True: 62 | self.kalman.statePre = np.array([measurement[0], measurement[1], [0], [0], measurement[2], measurement[3]], 63 | dtype=np.float32) 64 | self.first_run = False 65 | corr_bb = self.kalman.correct(measurement).T[0] 66 | self.velocity = np.array([corr_bb[2], corr_bb[3]]) 67 | 68 | self.velocity_unit_vec = self.velocity / np.linalg.norm(self.velocity + 1e-6) 69 | return np.array([corr_bb[0], corr_bb[1], corr_bb[4], corr_bb[5]]) 70 | 71 | -------------------------------------------------------------------------------- /src/multiple_object_tracker.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import util 3 | from track import Track 4 | from tracker_base import TrackerBase 5 | import tracking_constants as const 6 | from scipy.optimize import linear_sum_assignment 7 | 8 | 9 | class MultipleObjectTracker(TrackerBase): 10 | def __init__(self): 11 | TrackerBase.__init__(self) 12 | self.tracks = [] 13 | 14 | def __save_tracks_to_json(self): 15 | for track in self.tracks: 16 | if track.is_dead(): 17 | np.save('tracks/' + str(track.uid), track.history) 18 | 19 | def __delete_duplicate_tracks(self): 20 | 21 | # check if tracks 'heads' are identical 22 | for i in xrange(len(self.tracks)): 23 | track1 = self.tracks[i] 24 | for j in xrange(len(self.tracks)): 25 | if j == i: 26 | continue 27 | track2 = self.tracks[j] 28 | if util.check_tracks_equal(track1, track2): 29 | # print 'duplicate found!' 30 | # if so, delete shortest track 31 | if track1.get_length() > track2.get_length(): 32 | track2.delete_me = True 33 | else: 34 | track1.delete_me = True 35 | 36 | self.tracks = [t for t in self.tracks if t.delete_me is False] 37 | 38 | def __assign_detections_to_tracks_munkres(self, detections, frame_id, save=False): 39 | 40 | # if there are no tracks yet, all detections are new tracks 41 | if len(self.tracks) == 0: 42 | for det in detections: 43 | t = Track() 44 | t.add_to_track(det) 45 | self.tracks.append(t) 46 | return True 47 | # find distance from all tracks to all detections and formulate dists matrix 48 | dists = np.zeros(shape=(len(self.tracks), len(detections))) 49 | for i, track in enumerate(self.tracks): 50 | predicted_next_bb = track.get_predicted_next_bb() 51 | for j, det in enumerate(detections): 52 | dist = util.dist_btwn_bb_centroids(predicted_next_bb, det.bbox) 53 | if track.is_singular(): 54 | max_dist = const.MAX_PIXELS_DIST_TRACK_START 55 | else: 56 | max_dist = const.MAX_PIXELS_DIST_TRACK 57 | if dist > max_dist: 58 | dist = 1e6 # set to arbitrarily high number 59 | dists[i, j] = dist 60 | # set all tracks as unassigned 61 | for t in self.tracks: 62 | t.has_match = False 63 | # assign all detections to tracks with munkres algorithm 64 | assigned_rows, assigned_cols = linear_sum_assignment(dists) 65 | for idx, row in enumerate(assigned_rows): 66 | col = assigned_cols[idx] 67 | # if track is assigned a detection with dist=1e6, discard that assignment 68 | if dists[row, col] != 1e6: 69 | self.tracks[row].has_match = True 70 | detections[col].has_match = True 71 | self.tracks[row].add_to_track(detections[col]) 72 | self.tracks[row].num_misses = 0 73 | 74 | # create new tracks from unassigned detections: 75 | for det in detections: 76 | if det.has_match is False: 77 | t = Track() 78 | t.add_to_track(det) 79 | self.tracks.append(t) 80 | 81 | # keep track of how many times a track has gone unassigned 82 | for t in self.tracks: 83 | if t.has_match is False: 84 | t.num_misses += 1 85 | # t.propagate_track(frame_id=frame_id) 86 | 87 | # cleanup any duplicate tracks that have formed (TODO: how do they form?) 88 | self.__delete_duplicate_tracks() 89 | # save dead tracks before deletion 90 | if save: 91 | self.__save_tracks_to_json() 92 | # remove dead tracks 93 | self.tracks = [t for t in self.tracks if (t.is_dead() is False and t.delete_me is False)] 94 | 95 | def __assign_detections_to_tracks(self, detections, frame_id, save=False): 96 | # if there are no tracks yet, all detections are new tracks 97 | if len(self.tracks) == 0: 98 | for det in detections: 99 | t = Track() 100 | t.add_to_track(det) 101 | self.tracks.append(t) 102 | return True 103 | # assign detections to existing tracks 104 | for track in self.tracks: 105 | track.has_match = False 106 | predicted_next_bb = track.get_predicted_next_bb() 107 | for det in detections: 108 | # singular tracks search radially 109 | if track.is_singular(): 110 | iou = util.bb_intersection_over_union(predicted_next_bb, det.bbox) 111 | dist = util.dist_btwn_bb_centroids(predicted_next_bb, det.bbox) 112 | if dist < const.MAX_PIXELS_DIST_TRACK_START and iou > const.MIN_IOU_TRACK_START: 113 | track.add_to_track(det) 114 | track.has_match = True 115 | track.num_misses = 0 116 | break 117 | # established tracks search in predicted location 118 | elif track.is_established(): 119 | # TODO: get distance, iou to det 120 | iou = util.bb_intersection_over_union(predicted_next_bb, det.bbox) 121 | dist = util.dist_btwn_bb_centroids(predicted_next_bb, det.bbox) 122 | if dist < const.MAX_PIXELS_DIST_TRACK and iou > const.MIN_IOU_TRACK: 123 | track.add_to_track(det) 124 | track.has_match = True 125 | track.num_misses = 0 126 | # TODO: handle case where decision is tough (2 detections very close) 127 | break 128 | # if no track was assigned, give penalty to track 129 | if not track.has_match: 130 | # delete singular tracks that didn't get assigned (probably false detection) 131 | if track.num_misses > 0: 132 | if track.is_singular(): 133 | track.delete_me = True 134 | else: 135 | # continue track using predicted state 136 | track.propagate_track(frame_id=frame_id) 137 | track.num_misses += 1 138 | else: 139 | # reset match flag 140 | track.has_match = False 141 | 142 | for i, det in enumerate(detections): 143 | # if det hasn't been assigned yet, create new tracks 144 | if det.num_matches == 0: 145 | # print 'new track created. len(tracks)={}, num_det={}'.format(len(tracks),len(detections)) 146 | t = Track() 147 | t.add_to_track(det) 148 | self.tracks.append(t) 149 | elif det.num_matches > 1: 150 | # TODO: resolve detections with multiple matches 151 | # print 'multiple assignment!! (num_matches({})={})'.format(i, det.num_matches) 152 | pass 153 | 154 | # cleanup any duplicate tracks that have formed (TODO: how do they form?) 155 | self.__delete_duplicate_tracks() 156 | # save dead tracks before deletion 157 | if save: 158 | self.__save_tracks_to_json() 159 | # remove dead tracks 160 | self.tracks = [t for t in self.tracks if (t.is_dead() is False and t.delete_me is False)] 161 | # for i, track in enumerate(tracks): 162 | # print '{}: {}'.format(i, track.get_latest_bb()) 163 | return True 164 | 165 | def update_tracks(self, detections, frame_id, save=False): 166 | self.__assign_detections_to_tracks_munkres(detections, frame_id) 167 | # self._assign_detections_to_tracks(detections, frame_id, save=save) 168 | 169 | def draw_tracks(self, img): 170 | for track in self.tracks: 171 | track.draw_history(img) 172 | -------------------------------------------------------------------------------- /src/tracker_base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base class for different flavours of Tracking algorithms 3 | """ 4 | from abc import abstractmethod 5 | 6 | 7 | class TrackerBase(object): 8 | def __init__(self): 9 | pass 10 | 11 | @abstractmethod 12 | def update_tracks(self, detections, frame_id, save=False): 13 | """ 14 | :param detections: list of bounding boxes in [[x1,x2,y1,y2], ...] format 15 | :param frame_id: current frame id from camera capture 16 | :param save: whether to save history of tracks to file 17 | :return: 18 | """ 19 | pass 20 | 21 | @abstractmethod 22 | def draw_tracks(self, img): 23 | """ 24 | :param img: draw all current tracks to this img 25 | :return: 26 | """ 27 | pass 28 | -------------------------------------------------------------------------------- /src/tracking_constants.py: -------------------------------------------------------------------------------- 1 | # TODO: add to cfg 2 | MAX_NUM_MISSES_TRACK = 3 3 | MAX_PIXELS_DIST_TRACK = 30 # TODO: use a ratio of bboxes instead (smaller bb means further away means shorter dist) 4 | MAX_PIXELS_DIST_TRACK_START = int(MAX_PIXELS_DIST_TRACK/1.5) # TODO: use a ratio of bboxes instead (smaller bb means further away means shorter dist) 5 | MIN_IOU_TRACK = 0.5 6 | MIN_IOU_TRACK_START = 0.5 7 | -------------------------------------------------------------------------------- /src/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some computer vision utility functions 3 | """ 4 | import base64, cv2, os, glob 5 | import numpy as np 6 | import math 7 | 8 | 9 | def resize_pad_image(img, new_dims, pad_output=True): 10 | old_height, old_width, ch = img.shape 11 | old_ar = float(old_width) / float(old_height) 12 | new_ar = float(new_dims[0]) / float(new_dims[1]) 13 | undistorted_scale_factor = [1.0, 1.0] # if you want to resize bounding boxes on a padded img you'll need this 14 | if pad_output is True: 15 | if new_ar > old_ar: 16 | new_width = old_height * new_ar 17 | padding = abs(new_width - old_width) 18 | img = cv2.copyMakeBorder(img, 0, 0, 0, int(padding), cv2.BORDER_CONSTANT, None, [0, 0, 0]) 19 | undistorted_scale_factor = [float(old_width) / (float(new_dims[1]) * old_ar), 20 | float(old_height) / float(new_dims[1])] 21 | elif new_ar < old_ar: 22 | new_height = old_width / new_ar 23 | padding = abs(new_height - old_height) 24 | img = cv2.copyMakeBorder(img, 0, int(padding), 0, 0, cv2.BORDER_CONSTANT, None, [0, 0, 0]) 25 | undistorted_scale_factor = [float(old_width) / float(new_dims[0]), 26 | float(old_height) / (float(new_dims[0]) / old_ar)] 27 | elif new_ar == old_ar: 28 | scale_factor = float(old_width) / new_dims[0] 29 | undistorted_scale_factor = [scale_factor, scale_factor] 30 | outimg = cv2.resize(img, (new_dims[0], new_dims[1])) 31 | return outimg, undistorted_scale_factor 32 | 33 | 34 | def crop_img(bbox, im): 35 | x1 = int(bbox[0]) 36 | y1 = int(bbox[1]) 37 | x2 = int(bbox[2]) 38 | y2 = int(bbox[3]) 39 | cropped_img = im[y1:y2, x1:x2] 40 | return cropped_img 41 | 42 | 43 | def compute_dist(vec1, vec2, mode='cosine'): 44 | """ 45 | compute the distance between two given vectors. 46 | :param vec1: np.array vector 47 | :param vec2: np.array vector 48 | :param mode: cosine for cosine distance; l2 for l2 norm distance; 49 | :return: distance of the input mode 50 | """ 51 | if mode == 'cosine': 52 | dist = 1 - np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2) 53 | elif mode == 'l2': 54 | dist = np.linalg.norm(vec1 - vec2) 55 | else: 56 | dist = None 57 | return dist 58 | 59 | 60 | def make_grids_of_images_from_folder(images_path, image_shape, grid_shape): 61 | """ 62 | makes grids of images in numpy array format from an image folder. 63 | 64 | :param images_path: string, path to images folder 65 | :param image_shape: tuple, size each image will be resized to for display 66 | :param grid_shape: tuple, shape of image grid (rows,cols) 67 | :return: list of grid images in numpy array format 68 | 69 | example usage: grids = make_grids_of_images('/Pictures', (64,64),(5,5)) 70 | 71 | """ 72 | # get all images from folder 73 | img_path_glob = glob.iglob(os.path.join(images_path, '*')) 74 | img_path_list = [] 75 | for ip in img_path_glob: 76 | if ip.endswith('.jpg') or ip.endswith('.jpeg') or ip.endswith('.png'): 77 | img_path_list.append(ip) 78 | if len(img_path_list) < 1: 79 | print 'No images found at {}'.format(images_path) 80 | return None 81 | image_grids = [] 82 | # start with black canvas to draw images to 83 | grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3), 84 | dtype=np.uint8) 85 | cursor_pos = [0, 0] 86 | for img_path in img_path_list: 87 | img = cv2.imread(img_path) 88 | if img is None: 89 | print 'ERROR: reading {}. skipping.'.format(img_path) 90 | continue 91 | img = cv2.resize(img, image_shape) 92 | # draw image to black canvas 93 | grid_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img 94 | cursor_pos[0] += image_shape[0] # increment cursor x position 95 | if cursor_pos[0] >= grid_shape[0] * image_shape[0]: 96 | cursor_pos[1] += image_shape[1] # increment cursor y position 97 | cursor_pos[0] = 0 98 | if cursor_pos[1] >= grid_shape[1] * image_shape[1]: 99 | cursor_pos = [0, 0] 100 | # reset black canvas 101 | grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3), 102 | dtype=np.uint8) 103 | image_grids.append(grid_image) 104 | 105 | return image_grids 106 | 107 | 108 | def make_grids_of_images_from_list(image_list, image_shape, grid_shape): 109 | """ 110 | makes grids of images in numpy array format from an image folder. 111 | 112 | :param images_path: list, input images 113 | :param image_shape: tuple, size each image will be resized to for display 114 | :param grid_shape: tuple, shape of image grid (rows,cols) 115 | :return: list of grid images in numpy array format 116 | 117 | example usage: grids = make_grids_of_images('/Pictures', (64,64),(5,5)) 118 | 119 | """ 120 | image_grids = [] 121 | # start with black canvas to draw images to 122 | grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3), 123 | dtype=np.uint8) 124 | cursor_pos = [0, 0] 125 | for img in image_list: 126 | img = cv2.resize(img, image_shape) 127 | # draw image to black canvas 128 | grid_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img 129 | cursor_pos[0] += image_shape[0] # increment cursor x position 130 | if cursor_pos[0] >= grid_shape[0] * image_shape[0]: 131 | cursor_pos[1] += image_shape[1] # increment cursor y position 132 | cursor_pos[0] = 0 133 | if cursor_pos[1] >= grid_shape[1] * image_shape[1]: 134 | cursor_pos = [0, 0] 135 | # reset black canvas 136 | grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3), 137 | dtype=np.uint8) 138 | image_grids.append(grid_image) 139 | 140 | return image_grids 141 | 142 | 143 | def bb_intersection_over_union(boxA, boxB): 144 | # determine the (x, y)-coordinates of the intersection rectangle 145 | xA = max(boxA[0], boxB[0]) 146 | yA = max(boxA[1], boxB[1]) 147 | xB = min(boxA[2], boxB[2]) 148 | yB = min(boxA[3], boxB[3]) 149 | 150 | # compute the area of intersection rectangle 151 | interArea = (xB - xA + 1) * (yB - yA + 1) 152 | 153 | # compute the area of both the prediction and ground-truth 154 | # rectangles 155 | boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) 156 | boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) 157 | 158 | # compute the intersection over union by taking the intersection 159 | # area and dividing it by the sum of prediction + ground-truth 160 | # areas - the interesection area 161 | iou = interArea / float(boxAArea + boxBArea - interArea) 162 | 163 | # return the intersection over union value 164 | return iou 165 | 166 | 167 | def centroid_from_bb(bb): 168 | x1, y1, x2, y2 = bb 169 | w = abs(x2 - x1) 170 | h = abs(y2 - y1) 171 | c_x = x1 + w / 2 172 | c_y = y1 + h / 2 173 | 174 | return np.array([c_x, c_y]) 175 | 176 | 177 | def dist_btwn_bb_centroids(bb1, bb2): 178 | dx, dy = centroid_from_bb(bb1) - centroid_from_bb(bb2) 179 | dist = math.sqrt(dx * dx + dy * dy) 180 | return dist 181 | 182 | 183 | def wid_ht_from_bb(bb): 184 | wid = int(abs(bb[2] - bb[0])) 185 | ht = int(abs(bb[3] - bb[1])) 186 | return wid, ht 187 | 188 | 189 | def check_tracks_equal(track1, track2): 190 | t1_bb = track1.get_latest_bb() 191 | t2_bb = track2.get_latest_bb() 192 | dist = np.linalg.norm(t2_bb - t1_bb) 193 | return dist < 50 194 | 195 | 196 | def clamp_negative_nums(bb): 197 | temp = [] 198 | for pnt in bb: 199 | tmp = pnt 200 | if tmp < 0: 201 | tmp = 0 202 | temp.append(tmp) 203 | return temp 204 | 205 | 206 | def bb_has_width_height(bb): 207 | w = int(bb[2] - bb[0]) 208 | h = int(bb[3] - bb[1]) 209 | return True if (w > 1 and h > 1) else False 210 | 211 | 212 | def bb_as_ints(bb): 213 | return [int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3])] 214 | --------------------------------------------------------------------------------