├── .gitignore ├── README.md ├── main.py └── mht.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | .vscode/ 3 | __pycache__/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Multiple Hypothesis Tracking 2 | 3 | This is an implementation of the Multiple Hypothesis Tracking (MHT) algorithm [1, 2, 3]. 4 | Check the `main.py`, the demonstration of MHT using counterfeit detections will be shown. 5 | 6 | ***if you have any question about this source code, please feel free to contact me.*** 7 | 8 | ## Dependency 9 | 10 | - opencv, numpy 11 | - [anytree](https://anytree.readthedocs.io/en/latest/) 12 | - [gurobi](https://www.gurobi.com/) or [cvxpy](https://www.cvxpy.org/) 13 | 14 | ## Reference 15 | 16 | - [1] Blackman, Samuel, and Robert Popoli. "Design and analysis of modern tracking systems(Book)." Norwood, MA: Artech House, 1999. (1999). 17 | - [2] Yoon, Kwangjin, Young-min Song, and Moongu Jeon. "Multiple hypothesis tracking algorithm for multi-target multi-camera tracking with disjoint views." IET Image Processing 12.7 (2018): 1175-1184. 18 | - [3] Kim, Chanho, et al. "Multiple hypothesis tracking revisited." Proceedings of the IEEE International Conference on Computer Vision. 2015. -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import io 2 | import sys 3 | import cv2 4 | import copy 5 | import numpy as np 6 | import time 7 | from mht import MHTTracker 8 | 9 | DEBUGGING = True 10 | 11 | def fakeDetections(imwd, imht, time, obstacle, p_D=0.92, m_FA=3): 12 | obj_wd = 35.0 13 | obj_ht = 35.0 14 | size_std = 10.0 15 | m_FA = m_FA 16 | stride = 15 17 | p_FA = m_FA*stride*stride/(imwd*imht) # numer of FAs per area and per scan 18 | p_D = p_D 19 | precision_D = 2.0 20 | margin = 3 21 | 22 | N_objects = 4 23 | obj_m = {1:((-margin, imht*0.5),(imwd+margin, imht*0.5)), 2:((-margin, imht*0.2),(imwd+margin, imht*0.8)), 24 | 3:((-margin, imht*0.8),(imwd+margin, imht*0.2))} 25 | fp_patch = [] 26 | fa_set = [] 27 | tp_patch = [] 28 | tp_set = [] 29 | for h in range(0, imht, stride): 30 | for w in range(0, imwd, stride): 31 | if np.random.random_sample() < p_FA: 32 | cx, cy = w, h 33 | sz = np.fabs(np.random.normal([obj_wd, obj_ht], size_std)) + 2 34 | sz = np.around(sz) 35 | x1 = round(cx-sz[0]/2) 36 | y1 = round(cy-sz[1]/2) 37 | x1 = 0 if x1 < 0 else x1 38 | y1 = 0 if y1 < 0 else y1 39 | sz[0] = imwd-x1-1 if x1+sz[0] >= imwd else sz[0] 40 | sz[1] = imht-y1-1 if y1+sz[1] >= imht else sz[1] 41 | conf = np.random.random_sample() 42 | fa_set.append([x1, y1, sz[0], sz[1], conf, -1]) 43 | fa_im = np.random.random_sample((int(sz[1]),int(sz[0])))*255 44 | fa_im = fa_im.astype(np.uint8) 45 | fp_patch.append(fa_im) 46 | 47 | for o in obj_m: 48 | s = obj_m[o][0] 49 | f = obj_m[o][1] 50 | cx = (f[0]-s[0]) * time + s[0] 51 | cy = (f[1]-s[1]) * time + s[1] 52 | cx = cx + np.random.normal(0, precision_D) 53 | cy = cy + np.random.normal(0, precision_D) 54 | sz = np.fabs(np.random.normal([obj_wd, obj_ht], size_std/2)) + 2 55 | sz = np.around(sz) 56 | if cx < 0 or cx >= imwd or cy < 0 or cy >= imht: 57 | continue 58 | if cx >= obstacle[0] and cx <= obstacle[0]+obstacle[2] and cy >= obstacle[1] and cy <= obstacle[1]+obstacle[3]: 59 | continue 60 | x1 = round( cx - sz[0]/2 ) 61 | y1 = round( cy - sz[1]/2 ) 62 | x1 = 0 if x1 < 0 else x1 63 | y1 = 0 if y1 < 0 else y1 64 | sz[0] = imwd-x1-1 if x1+sz[0] >= imwd else sz[0] 65 | sz[1] = imht-y1-1 if y1+sz[1] >= imht else sz[1] 66 | if np.random.random_sample() < p_D: 67 | conf = np.random.normal(0.8, 0.2) 68 | if conf > 0.99: conf = 0.995 69 | tp_set.append([x1, y1, sz[0], sz[1], conf, o]) 70 | else: 71 | tp_set.append([x1, y1, sz[0], sz[1], -1, o]) 72 | mu = o/N_objects 73 | std = 1/(N_objects*3) 74 | tp_im = np.random.normal(mu, std, (int(sz[1]), int(sz[0]))) 75 | tp_im = np.clip(tp_im, a_min=0, a_max=1)*255 76 | tp_im = tp_im.astype(np.uint8) 77 | tp_patch.append(tp_im) 78 | 79 | if True: 80 | laps = 3 81 | myid = len(obj_m)+1 82 | radius = min([imwd,imht])/2.5 83 | t = time*np.pi*2*laps 84 | cx = radius*np.cos(-t**1.0) + imwd/2 85 | cy = radius*np.sin(-t**1.0) + imht/2 86 | sz = np.fabs(np.random.normal([obj_wd, obj_ht], size_std/2)) + 2 87 | sz = np.around(sz) 88 | if not (cx >= obstacle[0] and cx <= obstacle[0]+obstacle[2] and cy >= obstacle[1] and cy <= obstacle[1]+obstacle[3]): 89 | x1 = round( cx - sz[0]/2 ) 90 | y1 = round( cy - sz[1]/2 ) 91 | x1 = 0 if x1 < 0 else x1 92 | y1 = 0 if y1 < 0 else y1 93 | sz[0] = imwd-x1-1 if x1+sz[0] >= imwd else sz[0] 94 | sz[1] = imht-y1-1 if y1+sz[1] >= imht else sz[1] 95 | if np.random.random_sample() < p_D : 96 | tp_set.append([x1, y1, sz[0], sz[1], np.random.normal(0.8, 0.2), myid]) 97 | else: 98 | tp_set.append([x1, y1, sz[0], sz[1], -1, myid]) 99 | mu = myid/N_objects 100 | std = 1/(N_objects*3) 101 | tp_im = np.random.normal(mu, std, (int(sz[1]), int(sz[0]))) 102 | tp_im = np.clip(tp_im, a_min=0, a_max=1)*255 103 | tp_im = tp_im.astype(np.uint8) 104 | tp_patch.append(tp_im) 105 | return tp_set, fa_set, tp_patch, fp_patch 106 | 107 | def compute_similarity(detections, tracks, width, height): 108 | wd = width 109 | ht = height 110 | sim = {} 111 | for f1 in detections: 112 | im1 = detections[f1]['app'] 113 | im1 = cv2.resize(im1, (wd, ht)) 114 | for f2 in tracks: 115 | im2 = tracks[f2]['app'] 116 | im2 = cv2.resize(im2, (wd, ht)) 117 | sim[(f1,f2)] = np.exp(-((im1/255.0-im2/255.0)**2).sum()/(2*ht)) 118 | return sim 119 | 120 | if __name__ == '__main__': 121 | im_width = 800 122 | im_height = 600 123 | patch_wd = 50 124 | patch_ht = 50 125 | end_of_the_world = 800 126 | obstacle = (600, 275, 70, 50) 127 | obstacle_color = (200, 200, 200) 128 | np.random.seed(20190523) 129 | 130 | cv2.namedWindow('canvas') 131 | key = 0 132 | nframe = 0 133 | 134 | params = {'K':10, 'init_score':1.2, 'kalman_constant_noise':False, 'kalman_Q_xy':0.15, 'kalman_Q_vel':0.035, 'kalman_R':0.15, 135 | 'P_D':0.9, 'P_FA':0.001, 'kin_null':0.2, 'distance_threshold':6, 'canonical_kin_prob':False, 'max_scale_change':2, 136 | 'appearance_weight':0.7, 'app_null':0.2, 'max_missing':100, 'min_track_quality':0.5, 'min_track_length':20, 'max_num_leaves':8, 137 | 'use_gurobi':True, 'min_det_conf':-1.0} 138 | mht = MHTTracker(params) 139 | while key != 27 and nframe <= end_of_the_world: 140 | print('\nframe: {}'.format(nframe)) 141 | canvas = np.zeros((im_height, im_width,3), np.uint8) 142 | cv2.rectangle(canvas, (obstacle[0],obstacle[1]), (obstacle[0]+obstacle[2],obstacle[1]+obstacle[3]), obstacle_color, -1) 143 | 144 | tp_set, fp_set, tp_patch, fp_patch = fakeDetections(im_width, im_height, nframe/end_of_the_world, obstacle, p_D=0.8, m_FA=3) 145 | detections = {} 146 | 147 | # draw detection results 148 | counter = 1 149 | for i, fp in enumerate( fp_set ): 150 | l, t = int(round(fp[0])), int(round(fp[1])) 151 | r, b = int(round(fp[0]+fp[2])), int(round(fp[1]+fp[3])) 152 | for c in range(3): 153 | canvas[t:b,l:r,c] = fp_patch[i] 154 | cv2.rectangle(canvas, (l, t), (r, b), (0,0,255), 2) 155 | detections[counter] = {'det':fp[:-1]+[nframe, counter, 0], 'app':fp_patch[i]} 156 | counter+=1 157 | for i, tp in enumerate(tp_set): 158 | l, t = int(round(tp[0])), int(round(tp[1])) 159 | r, b = int(round(tp[0]+tp[2])), int(round(tp[1]+tp[3])) 160 | if tp[4] != -1: 161 | for c in range(3): 162 | canvas[t:b,l:r,c] = tp_patch[i] 163 | cv2.rectangle(canvas, (l, t), (r, b), (255,0,0), 2) 164 | detections[counter] = {'det':tp[:-1]+[nframe, counter, 0], 'app':tp_patch[i]} 165 | counter+=1 166 | else: 167 | pass 168 | #cv2.rectangle(canvas, (l, t), (r, b), (0,255,255), 2) 169 | features, _ = mht.getTrackPatches() 170 | sim = compute_similarity(detections, features, patch_wd, patch_ht) 171 | tracks = mht.doTracking(nframe, detections, sim, canvas) 172 | for t in tracks: 173 | item = tracks[t] 174 | anode = mht.hypothesis_set[item[0]].nodes[item[1]] 175 | adet = anode.detection 176 | xy1 = anode.kalman_state.copy() 177 | #cv2.putText(canvas, str(t), (int(round(xy1[0,0])), int(round(xy1[1,0]))), cv2.FONT_HERSHEY_COMPLEX, 0.8, (255,0,255), 2) 178 | #assert adet[5] == nframe, 'Check' 179 | assert anode.is_leaf, 'Check 2' 180 | route = anode.path 181 | #fileout.write('{}, {}, {}, {}\n'.format(nframe, anode.track_id, xy1[0,0], xy1[1,0])) 182 | det_id = anode.det_index[-1] 183 | assert len(anode.det_index) == anode.status[1] 184 | if anode.is_dummy == False and DEBUGGING: 185 | assert np.allclose(mht.dets_set[det_id[0]][det_id[1]]['app'], detections[det_id[1]]['app'] ) 186 | assert mht.dets_set[det_id[0]][det_id[1]]['det'] == detections[det_id[1]]['det'] 187 | for p in reversed(route): 188 | if p == anode: 189 | continue 190 | xy2 = p.kalman_state.copy() 191 | if p.status[4] == 0: 192 | cv2.line(canvas, (int(round(xy1[0,0])), int(round(xy1[1,0])) ), 193 | (int(round(xy2[0,0])), int(round(xy2[1,0])) ),(255,0,255), 2) 194 | assert p.detection[7] == 0, 'Check 3' 195 | else: 196 | cv2.line(canvas, (int(round(xy1[0,0])), int(round(xy1[1,0])) ), 197 | (int(round(xy2[0,0])), int(round(xy2[1,0])) ), (255,255,0), 2) 198 | assert p.detection[7] == 1, 'Check 3' 199 | xy1 = xy2 200 | 201 | cv2.imshow('canvas', canvas) 202 | key = cv2.waitKey(1) 203 | nframe+=1 204 | mht.concludeTracks() 205 | 206 | print('\nResutls:') 207 | for t in mht.confirmed_tracks: 208 | canvas2 = np.zeros((im_height, im_width,3), np.uint8) 209 | cv2.rectangle(canvas2, (obstacle[0],obstacle[1]), (obstacle[0]+obstacle[2],obstacle[1]+obstacle[3]), obstacle_color, -1) 210 | track = mht.confirmed_tracks[t] 211 | colorid = cv2.applyColorMap( np.array([(t * 32) % 256], dtype=np.uint8 ), cv2.COLORMAP_HSV ) 212 | colors = (int(colorid[0,0,0]), int(colorid[0,0,1]), int(colorid[0,0,2])) 213 | for l in range(1,len(track)): 214 | t1 = track[l-1] 215 | t2 = track[l] 216 | x1, y1 = int(round(t1[1]+t1[3]/2)), int(round(t1[2]+t1[4]/2)) 217 | x2, y2 = int(round(t2[1]+t2[3]/2)), int(round(t2[2]+t2[4]/2)) 218 | 219 | dummy = t2[5] 220 | if dummy == 0: 221 | cv2.line(canvas2, (x1, y1), (x2, y2), colors, 2) 222 | else: 223 | cv2.line(canvas2, (x1, y1), (x2, y2), (int(colors[0]/2),int(colors[1]/2),int(colors[2]/2)), 1) 224 | x1, y1 = int(round(track[-1][1] + track[-1][3]/2)), int(round(track[-1][2] + track[-1][4]/2)) 225 | time_end = int(round(track[-1][0])) 226 | cv2.putText(canvas2, str(t), (x1, y1-25), cv2.FONT_HERSHEY_COMPLEX, .8, colors, 2) 227 | cv2.putText(canvas2, 'end:'+str(time_end), (x1, y1), cv2.FONT_HERSHEY_COMPLEX, .5, colors, 2) 228 | x1, y1 = int(round(track[0][1] + track[0][3]/2)), int(round(track[0][2] + track[0][4]/2)) 229 | time_start = int(round(track[0][0])) 230 | cv2.putText(canvas2, 'start:'+str(time_start), (x1, y1), cv2.FONT_HERSHEY_COMPLEX, .5, colors, 2) 231 | cv2.imshow('ObjectID_{}'.format(t), canvas2) 232 | print('\tObjectID:{}, start:{}, end:{}, length:{}'.format(t, time_start, time_end, len(track))) 233 | cv2.waitKey() 234 | print('finished.') 235 | -------------------------------------------------------------------------------- /mht.py: -------------------------------------------------------------------------------- 1 | import io 2 | import sys 3 | import cv2 4 | import copy 5 | import numpy as np 6 | from anytree import Node, RenderTree, render 7 | from anytree.search import findall 8 | import gurobipy as grb 9 | import cvxpy, cvxopt 10 | import time 11 | 12 | DEBUGGING = True 13 | 14 | class TrackTree(): 15 | INIT_NODE = 1 16 | STATUS = {'tracking':0, 'end':1, 'purge':2} 17 | def __init__(self, treeNum, detection, init_score, track_id, P_init): 18 | self.nodes = dict() 19 | self.history = {'dets':[], 'estimates':[]} 20 | self.v_num = self.INIT_NODE # Tree is initialized with INIT_NODE 21 | self.root_num = self.INIT_NODE # save current root node number 22 | self.treeNum = treeNum 23 | self.valid_track = [-1, -1, -1, -1] # indicator, node num, track id, score 24 | self.nodes[self.v_num] = Node('{}({})'.format(self.v_num, treeNum), parent=None) 25 | self.nodes[self.v_num].detection = detection # (x, y, w, h, b, t, i, dummy) , b=confidence, t=frame, i=i-th detection at the frame, dummy=dummy indicator 26 | self.nodes[self.v_num].is_dummy = False 27 | self.nodes[self.v_num].det_index = [(detection[5], detection[6])] 28 | self.nodes[self.v_num].scores = [init_score, 0, 0, 0] # score, app_score, st_score, detection confidence 29 | self.nodes[self.v_num].status = [1, 1, 0, 0, 0, self.STATUS['tracking']] # [ total_length, num_obs, num_totoal_missing, num_conseq_missing, dummy_node_indicator, status ] 30 | self.nodes[self.v_num].kalman_state = np.array([ [detection[0]+detection[2]/2], [detection[1]+detection[3]/2], [0], [0]]) # cx, cy, vx, vy 31 | self.nodes[self.v_num].kalman_cov = P_init 32 | self.nodes[self.v_num].track_id = track_id 33 | self.nodes[self.v_num].v_num = self.v_num 34 | self.incrementVertexNum() 35 | 36 | def addNode(self, node_info, parent_node): 37 | if DEBUGGING: assert self.v_num not in self.nodes, 'fatal error: node num' 38 | self.nodes[self.v_num] = Node('{}({})'.format(self.v_num, self.treeNum), parent=parent_node) 39 | self.nodes[self.v_num].detection = node_info['detection'] 40 | self.nodes[self.v_num].is_dummy = node_info['is_dummy'] 41 | self.nodes[self.v_num].det_index = node_info['det_index'] 42 | self.nodes[self.v_num].scores = node_info['scores'] 43 | self.nodes[self.v_num].status = node_info['status'] 44 | self.nodes[self.v_num].kalman_state = node_info['kalman_state'] 45 | self.nodes[self.v_num].kalman_cov = node_info['kalman_cov'] 46 | self.nodes[self.v_num].track_id = node_info['track_id'] 47 | self.nodes[self.v_num].v_num = self.v_num 48 | return self.getVertexNum_and_Increment() 49 | 50 | def getParent(self, node_idx): 51 | p = self.nodes[node_idx].parent 52 | if p is None: 53 | return None 54 | else: 55 | return p.v_num 56 | 57 | def getChildren(self, node_idx): 58 | children = [c.v_num for c in self.nodes[node_idx].children] 59 | return children 60 | 61 | def getNode(self, node_idx): 62 | return self.nodes[node_idx] 63 | 64 | def findLeaves(self): 65 | leaves = [l.v_num for l in self.nodes[self.root_num].leaves] 66 | return leaves 67 | 68 | def getRoot(self): 69 | return self.root_num 70 | 71 | def removeBranch(self, node_idx): 72 | assert self.nodes[node_idx].is_leaf 73 | root = self.nodes[node_idx].root 74 | path = self.nodes[node_idx].path 75 | for p in reversed(path): 76 | if not self.nodes[p.v_num].is_leaf: 77 | break 78 | self.nodes[p.v_num].parent = None 79 | del self.nodes[p.v_num] 80 | 81 | def detachSubTree(self, new_root): 82 | if self.nodes[new_root].is_root: 83 | return 84 | 85 | path = self.nodes[new_root].path 86 | for p in path: 87 | if p.v_num == new_root: 88 | continue 89 | self.history['dets'].append(tuple(p.detection)) 90 | kstate = (p.kalman_state[0,0], p.kalman_state[1,0]) 91 | self.history['estimates'].append(kstate) 92 | 93 | prunedLeaves = [] 94 | allNodesIdx = list(self.nodes.keys()) 95 | descendIdx = { d.v_num for d in self.nodes[new_root].descendants } 96 | for n in allNodesIdx: 97 | if not(n in descendIdx) and n != new_root: 98 | if self.nodes[n].is_leaf: 99 | prunedLeaves.append(n) 100 | self.nodes[n].parent = None 101 | del self.nodes[n] 102 | self.nodes[new_root].parent = None 103 | self.root_num = new_root 104 | return prunedLeaves 105 | 106 | def incrementVertexNum(self): 107 | self.v_num += 1 108 | 109 | def getVertexNum_and_Increment(self): 110 | self.v_num += 1 111 | return self.v_num - 1 112 | 113 | class MHTTracker(): 114 | def __init__(self, parameters): 115 | self.id_pool = 1 # give new ID 1) after updated with a det, 2) init new track with a det, 116 | self.tree_num = 1 117 | self.hypothesis_set = dict() 118 | self.confirmed_tracks = dict() 119 | self.conflictList = dict() 120 | self.dets_set = dict() 121 | self.eps = 1e-10 122 | 123 | self.min_det_conf = parameters['min_det_conf'] 124 | self.max_scale_change = parameters['max_scale_change'] 125 | self.use_denom = parameters['canonical_kin_prob'] 126 | self.use_gurobi = parameters['use_gurobi'] 127 | self.max_num_leaves = parameters['max_num_leaves'] 128 | self.min_track_length = parameters['min_track_length'] 129 | self.K = parameters['K'] 130 | self.init_score = parameters['init_score'] 131 | self.P_D = parameters['P_D'] 132 | self.P_FA = parameters['P_FA'] # false alarms per area 133 | self.d_th = parameters['distance_threshold'] 134 | self.kin_null = parameters['kin_null'] 135 | self.max_missing = parameters['max_missing'] 136 | self.w_appearance = parameters['appearance_weight'] 137 | self.app_null = parameters['app_null'] 138 | self.w_motion = 1 - self.w_appearance 139 | self.min_track_quality = parameters['min_track_quality'] 140 | self.kalman_const_noise = parameters['kalman_constant_noise'] 141 | self.kalman_cov_xy = parameters['kalman_Q_xy'] # covariance is a function of width of an object 142 | self.kalman_cov_vel = parameters['kalman_Q_vel'] # covariance is a function of width of an object 143 | self.kalman_R = parameters['kalman_R'] # observation noise 144 | self.kalman_F = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]]) 145 | self.kalman_H = np.array([[1,0,0,0],[0,1,0,0]]) 146 | #self.noise_R = np.diag([self.kalman_R**2, self.kalman_R**2]) 147 | if self.kalman_const_noise: 148 | self.noise_Q = np.diag([self.kalman_cov_xy**2, self.kalman_cov_xy**2, 149 | self.kalman_cov_vel**2, self.kalman_cov_vel**2]) 150 | 151 | def getKalmanNoiseR(self, size=None): 152 | if self.kalman_const_noise: 153 | return np.diag([self.kalman_R**2, self.kalman_R**2]) 154 | if size > 0 and self.kalman_const_noise==False: 155 | return np.diag([ (self.kalman_R*size)**2, (self.kalman_R*size)**2 ]) 156 | assert False 157 | 158 | def getKalmanNoiseQ(self, size=None, init_P=False): 159 | if init_P and self.kalman_const_noise: 160 | return np.diag([self.kalman_cov_xy**2, self.kalman_cov_xy**2, 161 | self.kalman_cov_xy**2, self.kalman_cov_xy**2]) 162 | if init_P and size > 0 and self.kalman_const_noise==False: 163 | return np.diag([ (self.kalman_cov_xy*size)**2, (self.kalman_cov_xy*size)**2, 164 | (self.kalman_cov_xy*size)**2, (self.kalman_cov_xy*size)**2 ]) 165 | if self.kalman_const_noise and init_P==False: 166 | return self.noise_Q 167 | if size > 0 and self.kalman_const_noise==False and init_P==False: 168 | return np.diag([ (self.kalman_cov_xy*size)**2, (self.kalman_cov_xy*size)**2, 169 | (self.kalman_cov_vel*size)**2, (self.kalman_cov_vel*size)**2 ]) 170 | assert False 171 | 172 | def incrementID(self): 173 | self.id_pool += 1 174 | 175 | def getID_and_Increment(self): 176 | self.id_pool += 1 177 | return self.id_pool - 1 178 | 179 | def incrementTreeNum(self): 180 | self.tree_num += 1 181 | 182 | def getTreeNum_and_Increment(self): 183 | self.tree_num += 1 184 | return self.tree_num - 1 185 | 186 | def multivariateNormalProb(self, x, mean, cov, ln_output=False, inv_cov=None): 187 | d_2 = x.shape[0] * 0.5 188 | if self.use_denom: 189 | denom = np.power(2*np.pi, d_2)*np.sqrt(np.abs(np.linalg.det(cov))) 190 | else: 191 | denom = 1 192 | if inv_cov is None: 193 | exponent = (x-mean).T @ np.linalg.inv(cov) @ (x-mean) 194 | else: 195 | exponent = (x-mean).T @ inv_cov @ (x-mean) 196 | assert exponent >= 0, 'covariance is not PSD 1' 197 | 198 | if ln_output: 199 | prob = -0.5*exponent - np.log(denom) 200 | else: 201 | prob = np.exp(-0.5*exponent) / (denom+self.eps) 202 | return prob.item() 203 | 204 | def copyNodeInfo(self, node): 205 | info = {'kalman_state':node.kalman_state.copy(), 'kalman_cov':node.kalman_cov.copy(), 206 | 'detection':list(node.detection), 'det_index':list(node.det_index), 'is_dummy':node.is_dummy, 207 | 'scores':list(node.scores), 'status':list(node.status), 208 | 'track_id':node.track_id, 'v_num':node.v_num} 209 | return info 210 | 211 | def kalman_predict(self, X, P, size): 212 | Q = self.getKalmanNoiseQ(size=size) 213 | X_pred = self.kalman_F @ X 214 | P_pred = self.kalman_F @ P @ self.kalman_F.T + Q 215 | return X_pred, P_pred 216 | 217 | def addDummyNode(self, current_node, atree, nframe): 218 | info = self.copyNodeInfo(current_node) 219 | info['is_dummy'] = True 220 | if info['status'][3] > self.max_missing: 221 | current_node.status[5] = TrackTree.STATUS['end'] # status 222 | return -1 223 | info['status'][0] += 1 # total length 224 | info['status'][2] += 1 # total_missing 225 | info['status'][3] += 1 # conseq_missing 226 | info['status'][4] = 1 # dummy_indicator 227 | info['detection'][5] = nframe # frame 228 | #info['detection'][6] = -1 # dummy_indicator 229 | info['detection'][7] = 1 # dummy_indicator 230 | 231 | # state, cov, scores 232 | size = info['detection'][2] 233 | info['kalman_state'] = self.kalman_F @ info['kalman_state'] 234 | Q = self.getKalmanNoiseQ(size=size) 235 | R = self.getKalmanNoiseR(size=size) # self.noise_R 236 | P_pred = self.kalman_F @ info['kalman_cov'] @ self.kalman_F.T + Q 237 | IS = np.linalg.inv(R + self.kalman_H @ P_pred @ self.kalman_H.T) 238 | K = P_pred @ self.kalman_H.T @ IS 239 | IKH = np.eye(K.shape[0]) - K @ self.kalman_H 240 | info['kalman_cov'] = IKH @ P_pred @ IKH.T + K @ R @ K.T 241 | 242 | # info['kalman_cov'] = Q*2.0 # P_pred.copy() 243 | # info['kalman_state'], info['kalman_cov'] = self.kalman_predict(info['kalman_state'], info['kalman_cov'], info['detection'][2]) 244 | 245 | info['scores'] = [current_node.scores[0] + np.log(1-self.P_D), 0, np.log(1-self.P_D), current_node.scores[3]] 246 | # if info['scores'][0] < 0: 247 | # info['scores'][0] = 0 248 | v_num = atree.addNode(info, current_node) 249 | return v_num 250 | 251 | def updateNodeWithDetection(self, adet, current_node, atree, 252 | X_predict, P_predict, Kalman_innovation, Kalman_gain, Kalman_S, Kalman_IS, Noise_R, app_score=None): 253 | new_info = self.copyNodeInfo(current_node) 254 | 255 | new_info['status'][0] += 1 # total length 256 | new_info['status'][1] += 1 # num dets 257 | new_info['status'][3] = 0 # reset conseq missings 258 | new_info['status'][4] = 0 # indicator dummy 259 | det_bbox = list(adet) 260 | 261 | if max([ det_bbox[3] / current_node.detection[3], 262 | current_node.detection[3] / det_bbox[3] ]) > self.max_scale_change: # scale gating 263 | del new_info 264 | return -1, -1 265 | if app_score <= 0: 266 | del new_info 267 | return -1, -1 268 | #det_bbox[2] = current_node.detection[2] * 0.5 + det_bbox[2] * 0.5 269 | #det_bbox[3] = current_node.detection[3] * 0.5 + det_bbox[3] * 0.5 270 | new_info['detection'] = det_bbox 271 | new_info['det_index'].append((adet[5], adet[6])) # detection index 272 | new_info['is_dummy'] = False 273 | 274 | X_new = X_predict + Kalman_gain @ Kalman_innovation 275 | IKH = np.eye(Kalman_gain.shape[0]) - Kalman_gain @ self.kalman_H 276 | P_new = IKH @ P_predict @ IKH.T + Kalman_gain @ Noise_R @ Kalman_gain.T 277 | # P_new = (np.eye(P_predict.shape[0]) - Kalman_gain @ self.kalman_H) @ P_predict 278 | new_info['kalman_state'] = X_new 279 | new_info['kalman_cov'] = P_new 280 | #lnlh_kinematic = self.multivariateNormalProb(Kalman_innovation, np.zeros(Kalman_innovation.shape), Kalman_S, True, Kalman_IS) - np.log(self.P_FA) 281 | motion_term = self.multivariateNormalProb(Kalman_innovation, np.zeros(Kalman_innovation.shape), cov=Kalman_S, 282 | ln_output=True, inv_cov=Kalman_IS) - np.log(self.kin_null) 283 | #print(self.multivariateNormalProb(np.zeros((2,1)), np.zeros(Kalman_innovation.shape), Kalman_S, True, Kalman_IS)) 284 | if app_score == None: 285 | appearance_term = np.log(self.P_D/self.P_FA) 286 | else: 287 | appearance_term = np.log(app_score) - np.log(self.app_null) # np.log(self.P_D/self.P_FA) # 288 | llh = self.w_appearance*appearance_term + self.w_motion*motion_term 289 | 290 | if llh <= 0: # no update 291 | del new_info 292 | return -1, -1 293 | sc = current_node.scores[0] + llh 294 | det_score = current_node.scores[3] + adet[4] 295 | new_info['scores'] = [sc, self.w_appearance*appearance_term, self.w_motion*motion_term, det_score] 296 | 297 | new_id = self.getID_and_Increment() # return new id 298 | new_info['track_id'] = new_id 299 | v_num = atree.addNode(new_info, current_node) 300 | return v_num, new_id 301 | 302 | def compDistAll(self, x, y, x0, y0, ivcov): 303 | a, b, c, d = ivcov[0,0], ivcov[0,1], ivcov[1,0], ivcov[1,1] 304 | xx = x-x0 305 | yy = y-y0 306 | return a*((xx)**2) + d*((yy)**2) + (b+c)*(xx*yy) 307 | 308 | def updateTrackTrees(self, nframe, dets, app_scores=None, canvas=None): 309 | det_usage = { d:[] for d in dets } # usage list of each detection 310 | 311 | # loop over track trees 312 | for ti in self.hypothesis_set: 313 | atree = self.hypothesis_set[ti] 314 | leaves = atree.findLeaves() 315 | 316 | for l in leaves: 317 | leaf = atree.nodes[l] 318 | 319 | if leaf.status[5] == TrackTree.STATUS['purge'] or leaf.status[5] == TrackTree.STATUS['end']: 320 | continue 321 | 322 | # add a dummy node 323 | if self.addDummyNode(leaf, atree, nframe) == -1: 324 | continue 325 | 326 | # compute the prediction step of kalman filter 327 | size = leaf.detection[2] # width 328 | X_prior = leaf.kalman_state 329 | P_prior = leaf.kalman_cov 330 | X_predict, P_predict = self.kalman_predict(X_prior, P_prior, size) 331 | 332 | # kalman correction 333 | R = self.getKalmanNoiseR(size=size) # self.noise_R 334 | S = (self.kalman_H @ P_predict @ self.kalman_H.T) + R 335 | IS = np.linalg.inv(S) 336 | K = P_predict @ self.kalman_H.T @ IS 337 | 338 | # vis 339 | if canvas is not None: # nframe > 0: 340 | t_xy = X_predict[:2] # X_prior[:2] 341 | im_wd = canvas.shape[1] 342 | im_ht = canvas.shape[0] 343 | strides = 17 344 | for yy in range(0,im_ht,strides): 345 | for xx in range(0,im_wd,strides): 346 | xyxy = np.array([[xx], [yy]]) 347 | dt = (xyxy-t_xy).T @ IS @ (xyxy-t_xy) 348 | dt = dt.item() 349 | if dt < self.d_th: 350 | cv2.circle(canvas, (xx, yy), 2, (0,0,255), -1) 351 | cv2.circle(canvas, ( int(np.round(t_xy[0,0])), int(np.round(t_xy[1,0])) ), 2, (0,255,0), -2) 352 | #cv2.imshow('cvs', canvas) 353 | 354 | for d in dets: 355 | det_xy = np.array([[dets[d]['det'][0]+dets[d]['det'][2]/2], [dets[d]['det'][1]+dets[d]['det'][3]/2]]) 356 | Y = det_xy - self.kalman_H @ X_predict 357 | distance = Y.T @ IS @ Y 358 | distance = distance.item() 359 | if DEBUGGING: assert distance >= 0, 'Fatal error: covariance is not PSD 2' 360 | 361 | if distance < self.d_th: # gating 362 | adet = dets[d]['det'] # (x, y, w, h, b, t, i, dummy) , b=confidence, t=frame, i=i-th detection at the frame 363 | if app_scores != None: 364 | v_num, new_id = self.updateNodeWithDetection(adet, leaf, atree, X_predict, P_predict, Y, K, S, IS, R, app_scores[(adet[6], leaf.det_index[-1])]) 365 | else: 366 | v_num, new_id = self.updateNodeWithDetection(adet, leaf, atree, X_predict, P_predict, Y, K, S, IS, R) 367 | if v_num != -1 and new_id != -1: 368 | det_usage[d].append((ti, v_num, new_id)) 369 | 370 | # init new tracks 371 | for d in dets: 372 | adet = list(dets[d]['det']) # (x, y, w, h, b, t, i, dummy) , b=confidence, t=frame, i=i-th detection at the frame 373 | P_init = self.getKalmanNoiseQ(size=adet[2], init_P=False) # np.zeros((4,4)) 374 | atree = TrackTree(self.tree_num, adet, self.init_score, self.id_pool, P_init) 375 | det_usage[d].append((self.tree_num, TrackTree.INIT_NODE, self.id_pool)) # tree_num, node_num, id 376 | if DEBUGGING: assert self.tree_num not in self.hypothesis_set, 'Fatal error: tree' 377 | self.hypothesis_set[self.tree_num] = atree 378 | self.incrementID() 379 | self.incrementTreeNum() 380 | 381 | if DEBUGGING: 382 | valid_tracks = 0 383 | totLeaves = 0 # check code 384 | max_leaves = [-1,-1] 385 | for ti in self.hypothesis_set: 386 | if self.hypothesis_set[ti].valid_track[0] == 1: 387 | valid_tracks += 1 388 | leaves = self.hypothesis_set[ti].findLeaves() 389 | totLeaves += len(leaves) 390 | if max_leaves[0] < len(leaves): 391 | max_leaves[0] = len(leaves) 392 | max_leaves[1] = ti 393 | #if max_leaves[0] != -1 and max_leaves[1] != -1: 394 | # print(RenderTree(self.hypothesis_set[max_leaves[1]].nodes[self.hypothesis_set[max_leaves[1]].findRoot().v_num]).by_attr()) 395 | print('\nNUM ({}): {}/{}/{}'.format(valid_tracks,len(self.hypothesis_set), max_leaves[0], totLeaves)) 396 | return det_usage 397 | 398 | def makeConflictList(self, det_usage): 399 | n_tree = len(self.hypothesis_set) 400 | if n_tree == 0: 401 | self.conflictList.clear() 402 | return 403 | 404 | conflictPrev = copy.deepcopy(self.conflictList) 405 | self.conflictList.clear() 406 | self.conflictList = {t:None for t in self.hypothesis_set} 407 | 408 | for t in self.hypothesis_set: 409 | atree = self.hypothesis_set[t] 410 | leaves = atree.findLeaves() 411 | self.conflictList[t] = dict() 412 | for l in leaves: 413 | leaf = atree.getNode(l) 414 | cflcts = [] 415 | 416 | # conflicts from the parent node 417 | # if leaf.status[5] != TrackTree.STATUS['end']: 418 | # parent = atree.getParent(l) 419 | # else: 420 | # parent = l 421 | if leaf.status[5] == TrackTree.STATUS['tracking']: 422 | parent = atree.getParent(l) 423 | else: 424 | parent = l 425 | if DEBUGGING: assert atree.nodes[l].is_leaf 426 | 427 | if DEBUGGING: assert leaf.status[5] != TrackTree.STATUS['purge'] 428 | if parent is None: # root node 429 | cflcts.append((t, l, leaf.track_id)) # tree_num, node_num, track_id 430 | else: 431 | cflctParent = conflictPrev[t][parent] 432 | for acflct in cflctParent: 433 | if DEBUGGING: assert self.hypothesis_set[acflct[0]].nodes[acflct[1]].track_id == acflct[2], 'Fatal error: ID does not match' 434 | 435 | # if self.hypothesis_set[acflct[0]].nodes[acflct[1]].status[5] == TrackTree.STATUS['end']: 436 | # cflcts.append((acflct[0], acflct[1], self.hypothesis_set[acflct[0]].nodes[acflct[1]].track_id)) 437 | # assert self.hypothesis_set[acflct[0]].nodes[acflct[1]].is_leaf, 'Fatal error: not a leaf' 438 | # continue 439 | 440 | cflct_ch = self.hypothesis_set[acflct[0]].getChildren(acflct[1]) 441 | if len(cflct_ch) > 0: 442 | for c in cflct_ch: 443 | if DEBUGGING: assert self.hypothesis_set[acflct[0]].nodes[c].is_leaf, 'Fatal error: not a leaf' 444 | if DEBUGGING: assert self.hypothesis_set[acflct[0]].nodes[c].status[5] != TrackTree.STATUS['purge'] 445 | cflcts.append((acflct[0], c, self.hypothesis_set[acflct[0]].nodes[c].track_id)) 446 | else: 447 | cflcts.append((acflct[0], acflct[1], self.hypothesis_set[acflct[0]].nodes[acflct[1]].track_id)) 448 | 449 | # check the confliction of current detections 450 | if leaf.status[4] == 0: # not a dummy node 451 | det_i = leaf.detection[6] 452 | cflcts = cflcts + det_usage[det_i] 453 | if DEBUGGING: assert leaf.detection[7] == 0 454 | self.conflictList[t][l] = set(cflcts) 455 | 456 | def clustering(self): 457 | if len(self.hypothesis_set) == 0: 458 | return dict() 459 | 460 | conflictTreeList = {t:None for t in self.hypothesis_set} 461 | for t in self.hypothesis_set: 462 | leaves = self.hypothesis_set[t].findLeaves() 463 | cflTrees = set() 464 | for l in leaves: 465 | for k in self.conflictList[t][l]: 466 | cflTrees.add(k[0]) 467 | conflictTreeList[t] = list(cflTrees) 468 | 469 | conflictTrees = {t:[] for t in self.hypothesis_set} 470 | it = [k for k in sorted(self.hypothesis_set)] 471 | for i in range(len(it)): 472 | t = it[i] 473 | conflictTrees[t].append(t) 474 | conflicts_1 = conflictTreeList[t] 475 | for j in range(i+1, len(it)): 476 | t2 = it[j] 477 | conflicts_2 = conflictTreeList[t2] 478 | for f in conflicts_2: 479 | if f in conflicts_1: 480 | conflictTrees[t].append(t2) 481 | break 482 | assert len(conflictTrees[t]) == len(set(conflictTrees[t])), 'error check' 483 | 484 | category = 0 485 | tree_category = {t:0 for t in self.hypothesis_set} 486 | while len(it) > 0: 487 | category += 1 488 | i = it[0] 489 | cflTrees = conflictTrees[i] 490 | for j in cflTrees: 491 | if i == j: continue 492 | conflictTrees[i] = conflictTrees[i] + conflictTrees[j] 493 | conflictTrees[j] = [] 494 | 495 | conflictTrees[i] = list(set(conflictTrees[i])) 496 | it = sorted(list(set(it) - set(cflTrees))) 497 | 498 | cat_cfl = [] 499 | for k in conflictTrees[i]: 500 | if tree_category[k] != 0: 501 | cat_cfl.append(tree_category[k]) 502 | 503 | if len(cat_cfl) == 0: 504 | for k in conflictTrees[i]: 505 | tree_category[k] = category 506 | else: 507 | for k in tree_category: 508 | if tree_category[k] in cat_cfl: 509 | tree_category[k] = category 510 | for k in conflictTrees[i]: 511 | tree_category[k] = category 512 | 513 | clusters = {k:[] for k in set(tree_category.values())} 514 | for c in clusters: 515 | trees = [k for k in tree_category if tree_category[k]==c] 516 | for t in trees: 517 | leaves = self.hypothesis_set[t].findLeaves() 518 | for l in leaves: 519 | clusters[c].append((t, l, self.hypothesis_set[t].nodes[l].track_id)) # tree_no, vertex_no, track_id 520 | 521 | if DEBUGGING: # sanity check 522 | cl = list(clusters.keys()) 523 | for i in range(len(cl)): 524 | ci = cl[i] 525 | list_ci = clusters[ci] 526 | for j in range(i+1, len(cl)): 527 | cj = cl[j] 528 | if ci == cj: continue 529 | list_cj = clusters[cj] 530 | intersect = set(list_ci) & set(list_cj) 531 | assert len(intersect) == 0, 'error chk 2' 532 | 533 | return clusters 534 | 535 | def compBestHypoSet(self, clusters): 536 | best_set = {k:None for k in clusters} 537 | 538 | for c in clusters: 539 | tracks = clusters[c] 540 | n_tracks = len(tracks) 541 | 542 | edges = np.zeros((n_tracks, n_tracks)) 543 | weights = np.zeros(n_tracks) 544 | min_score = 1e12 545 | for l in range(n_tracks): 546 | atrack = tracks[l] 547 | score = list(self.hypothesis_set[atrack[0]].nodes[atrack[1]].scores) 548 | status = list(self.hypothesis_set[atrack[0]].nodes[atrack[1]].status) 549 | cfls = self.conflictList[atrack[0]][atrack[1]] 550 | # cnt = 0 551 | for l2 in range(l+1, n_tracks): 552 | if tracks[l2] in cfls: 553 | edges[l, l2] = 1 554 | # cnt += 1 555 | # assert cnt == len(cfls), 'Fatal error: check clusters' 556 | 557 | if score[0] < min_score: 558 | min_score = score[0] 559 | 560 | track_len = status[0] - status[3] 561 | if status[1]/track_len > self.min_track_quality and track_len > self.min_track_length: 562 | if self.hypothesis_set[atrack[0]].valid_track[0] == 1 and status[5] == TrackTree.STATUS['end']: 563 | for _ in range(status[3]): # compensate for tails 564 | score[0] -= np.log(1-self.P_D) 565 | # if score[0] <= 0: 566 | # score[0] = self.init_score 567 | # if status[1]/track_len > self.min_track_quality and track_len > self.min_track_length: 568 | # score[0] += 0.1 569 | weights[l] = score[0] 570 | 571 | #weights = weights - (min_score - 0.1) 572 | best_scores = [] 573 | if self.use_gurobi: 574 | gb = grb.Model('bestset') 575 | x = {} 576 | for l in range(n_tracks): 577 | x[l] = gb.addVar(obj=weights[l], vtype=grb.GRB.BINARY) 578 | for l in range(n_tracks): 579 | for j in range(l+1, n_tracks): 580 | if edges[l,j] == 1: 581 | gb.addConstr(x[l]+x[j], '<=', 1) 582 | gb.ModelSense = grb.GRB.MAXIMIZE 583 | gb.Params.OutputFlag = 0 584 | gb.update() 585 | gb.optimize() 586 | if gb.status == grb.GRB.OPTIMAL: 587 | best_set[c] = [tracks[l] for l in x if x[l].x == 1] 588 | best_scores = [weights[l] for l in x if x[l].x == 1] 589 | else: 590 | assert False, 'Fatal error: check gurobi solutions' 591 | else: 592 | xx = cvxpy.Variable(shape=n_tracks, boolean=True) 593 | maximize = weights * xx 594 | constraints = [0<=xx, xx<=1] # a meaningless constrain 595 | for l in range(n_tracks): 596 | for j in range(l+1, n_tracks): 597 | if edges[l,j] == 1: 598 | constraints.append(xx[l]+xx[j]<=1) 599 | problem = cvxpy.Problem(cvxpy.Maximize(maximize), constraints) 600 | problem.solve(solver=cvxpy.ECOS_BB, verbose=False) 601 | best_set[c] = [tracks[l] for l in range(n_tracks) if xx[l].value >= 0.98] 602 | best_scores = [weights[l] for l in range(n_tracks) if xx[l].value >= 0.98] 603 | # result = [tracks[l] for l in range(n_tracks) if xx[l].value >= 0.98] 604 | # assert len(result) == len(best_set[c]) 605 | # for r in result: 606 | # assert r in best_set[c] 607 | for bi, b in enumerate(best_set[c]): 608 | status = self.hypothesis_set[b[0]].nodes[b[1]].status 609 | track_len = status[0]-status[3] 610 | if status[1]/track_len > self.min_track_quality and track_len > self.min_track_length: 611 | self.hypothesis_set[b[0]].valid_track[0] = 1 612 | self.hypothesis_set[b[0]].valid_track[1] = b[1] # node num 613 | self.hypothesis_set[b[0]].valid_track[2] = b[2] # track id 614 | self.hypothesis_set[b[0]].valid_track[3] = best_scores[bi] # score 615 | 616 | if DEBUGGING: # satiny check 617 | for c in best_set: 618 | bests = best_set[c] 619 | for b in bests: 620 | cfls = self.conflictList[b[0]][b[1]] 621 | 622 | for c2 in best_set: 623 | bests2 = best_set[c2] 624 | for b2 in bests2: 625 | if c==c2 and b == b2: continue 626 | assert not(b2 in cfls), 'Fatal error: check' 627 | return best_set 628 | 629 | def treePruning(self, clusters, best_set): 630 | # K-Depth pruning 631 | # delete tracks: too many false positives, 632 | # terminate tracks: track termination (conseq missing) 633 | bestTracks = dict() 634 | surviveTracks = set() 635 | currentTracks = dict() 636 | for c in clusters: 637 | for b in best_set[c]: 638 | bestTracks[b[0]] = (b[1], b[2]) 639 | 640 | deathNote = [] 641 | confirmed = [] 642 | treeNums = sorted(self.hypothesis_set.keys()) 643 | for i in range(len(treeNums)): 644 | t = treeNums[i] 645 | 646 | if t in bestTracks: 647 | best_node = bestTracks[t] 648 | # depth pruning 649 | new_root = best_node[0] 650 | sel_node = best_node[0] 651 | sel_id = best_node[1] 652 | for _ in range(self.K): 653 | new_root = self.hypothesis_set[t].getParent(new_root) 654 | if new_root == None: 655 | break 656 | if new_root == None or self.hypothesis_set[t].nodes[new_root].is_root: 657 | new_root = self.hypothesis_set[t].getRoot() 658 | else: 659 | prunedLeaves = self.hypothesis_set[t].detachSubTree(new_root) 660 | for p in prunedLeaves: 661 | del self.conflictList[t][p] 662 | if self.hypothesis_set[t].valid_track[0] == 1: 663 | self.hypothesis_set[t].valid_track = [1, sel_node, sel_id, self.hypothesis_set[t].nodes[sel_node].scores[0]] 664 | elif self.hypothesis_set[t].valid_track[0] == 1: 665 | leaves = self.hypothesis_set[t].findLeaves() 666 | scores = [self.hypothesis_set[t].nodes[l].scores[0] for l in leaves] 667 | pairs = zip(leaves, scores) 668 | sortleaf = sorted(pairs, key=lambda x:x[1], reverse=True) 669 | sel_node = -1 670 | find_node = self.hypothesis_set[t].valid_track[1] 671 | for si in range(len(sortleaf)): 672 | route = self.hypothesis_set[t].nodes[sortleaf[si][0]].path 673 | for r in reversed(route): 674 | if r.v_num == find_node: 675 | sel_node = sortleaf[si][0] 676 | break 677 | if DEBUGGING: assert sel_node != -1 678 | sel_id = self.hypothesis_set[t].nodes[sel_node].track_id 679 | for l in leaves: 680 | if l == sel_node: 681 | continue 682 | self.hypothesis_set[t].removeBranch(l) 683 | del self.conflictList[t][l] 684 | status = self.hypothesis_set[t].nodes[sel_node].status 685 | track_len = status[0]-status[3] 686 | if status[1]/track_len > self.min_track_quality and track_len > self.min_track_length: 687 | self.hypothesis_set[t].valid_track = [1, sel_node, sel_id, self.hypothesis_set[t].nodes[sel_node].scores[0]] 688 | else: 689 | del self.hypothesis_set[t] 690 | del self.conflictList[t] 691 | continue 692 | 693 | # record survived track 694 | leaves = self.hypothesis_set[t].findLeaves() 695 | tempset = [] # a note for survied leaves 696 | # n_bad = 0 # bad tracks among finished tracks 697 | # n_good = 0 # good tracks among finished tracks 698 | # n_tracking = 0 # under tracking 699 | for l in leaves: 700 | anode = self.hypothesis_set[t].getNode(l) 701 | if anode.status[5] != TrackTree.STATUS['purge']: 702 | tempset.append((t, l, anode.track_id)) 703 | 704 | b_status = self.hypothesis_set[t].nodes[sel_node].status 705 | if b_status[5] == TrackTree.STATUS['end']: 706 | track_len = b_status[0]-b_status[3] 707 | quality = b_status[1] / track_len 708 | if quality > self.min_track_quality and track_len > self.min_track_length: 709 | confirmed.append((t, sel_node, sel_id)) 710 | else: 711 | deathNote.append(t) 712 | else: 713 | surviveTracks = surviveTracks.union(tempset) 714 | currentTracks[sel_id] = (t, sel_node) 715 | if DEBUGGING: assert b_status != TrackTree.STATUS['purge'] 716 | 717 | for d in deathNote: 718 | del self.hypothesis_set[d] 719 | del self.conflictList[d] 720 | 721 | for c in confirmed: # confirming 722 | self.saveConfirmedTrack(c[0], c[1], c[2]) 723 | del self.hypothesis_set[c[0]] 724 | del self.conflictList[c[0]] 725 | 726 | # update conflict list 727 | for t in self.hypothesis_set: 728 | leaves = self.hypothesis_set[t].findLeaves() 729 | for l in leaves: 730 | newcfls = set() 731 | for c in self.conflictList[t][l]: 732 | if c in surviveTracks: 733 | newcfls.add(c) 734 | self.conflictList[t][l] = newcfls 735 | 736 | return currentTracks 737 | 738 | def branchMerging(self, currentTracks): 739 | 740 | if DEBUGGING: 741 | curr_trees = { currentTracks[t][0] for t in currentTracks } 742 | assert set(self.hypothesis_set.keys()) == curr_trees 743 | 744 | survived = [] 745 | for t in currentTracks: 746 | best = currentTracks[t] 747 | if DEBUGGING: self.hypothesis_set[best[0]].nodes[best[1]].track_id == t 748 | bestscore = self.hypothesis_set[best[0]].nodes[best[1]].scores[0] 749 | depth = self.hypothesis_set[best[0]].nodes[best[1]].depth 750 | leaves = self.hypothesis_set[best[0]].findLeaves() 751 | scores = [self.hypothesis_set[best[0]].nodes[l].scores[0] for l in leaves] 752 | pairs = zip(leaves, scores) 753 | sortleaf = sorted(pairs, key=lambda x:x[1], reverse=True) 754 | idx = sortleaf.index((best[1],bestscore)) 755 | sortleaf[0], sortleaf[idx] = sortleaf[idx], sortleaf[0] 756 | 757 | if depth < self.K and False: 758 | for i, l in enumerate(sortleaf): 759 | if i < self.max_num_leaves: 760 | survived.append((best[0], l[0], self.hypothesis_set[best[0]].nodes[l[0]].track_id)) 761 | else: 762 | self.hypothesis_set[best[0]].removeBranch(l[0]) 763 | del self.conflictList[best[0]][l[0]] 764 | continue 765 | 766 | #survived.append((best[0], best[1], t)) # best track 767 | #best_dets = set(self.hypothesis_set[best[0]].nodes[best[1]].det_index) 768 | while len(sortleaf) > 0: 769 | item = sortleaf.pop(0) 770 | dets = set(self.hypothesis_set[best[0]].nodes[item[0]].det_index) 771 | loop = list(sortleaf) 772 | for i, l in enumerate(loop): 773 | det_i = set(self.hypothesis_set[best[0]].nodes[l[0]].det_index) 774 | if det_i.issubset(dets): 775 | L_D = self.hypothesis_set[best[0]].nodes[l[0]].scores[0] 776 | L_S = self.hypothesis_set[best[0]].nodes[item[0]].scores[0] 777 | self.hypothesis_set[best[0]].nodes[item[0]].scores[0] = L_S + np.log(1+np.exp(-(L_S-L_D))) 778 | self.hypothesis_set[best[0]].removeBranch(l[0]) 779 | del self.conflictList[best[0]][l[0]] 780 | sortleaf.remove((l[0], l[1])) 781 | 782 | bestscore = self.hypothesis_set[best[0]].nodes[best[1]].scores[0] 783 | leaves = self.hypothesis_set[best[0]].findLeaves() 784 | scores = [self.hypothesis_set[best[0]].nodes[l].scores[0] for l in leaves] 785 | pairs = zip(leaves, scores) 786 | sortleaf = sorted(pairs, key=lambda x:x[1], reverse=True) 787 | idx = sortleaf.index((best[1],bestscore)) 788 | sortleaf[0], sortleaf[idx] = sortleaf[idx], sortleaf[0] 789 | for i, l in enumerate(sortleaf): 790 | if i < self.max_num_leaves: 791 | survived.append((best[0], l[0], self.hypothesis_set[best[0]].nodes[l[0]].track_id)) 792 | else: 793 | self.hypothesis_set[best[0]].removeBranch(l[0]) 794 | del self.conflictList[best[0]][l[0]] 795 | 796 | # for i, l in enumerate(sortleaf): 797 | # if l[0] == best[1]: 798 | # continue 799 | # if i < self.max_num_leaves: 800 | # dets = set(self.hypothesis_set[best[0]].nodes[l[0]].det_index) 801 | # if dets.issubset(best_dets): 802 | # self.hypothesis_set[best[0]].removeBranch(l[0]) 803 | # del self.conflictList[best[0]][l[0]] 804 | # else: 805 | # survived.append((best[0], l[0], self.hypothesis_set[best[0]].nodes[l[0]].track_id)) 806 | # else: 807 | # self.hypothesis_set[best[0]].removeBranch(l[0]) 808 | # del self.conflictList[best[0]][l[0]] 809 | 810 | for t in self.hypothesis_set: 811 | leaves = self.hypothesis_set[t].findLeaves() 812 | if DEBUGGING: assert len(leaves) <= self.max_num_leaves+1 813 | for l in leaves: 814 | newcfls = set() 815 | for c in self.conflictList[t][l]: 816 | if c in survived: 817 | newcfls.add(c) 818 | self.conflictList[t][l] = newcfls 819 | 820 | def saveConfirmedTrack(self, treeNo, v_num, trackID): 821 | status = self.hypothesis_set[treeNo].nodes[v_num].status 822 | route = self.hypothesis_set[treeNo].nodes[v_num].path 823 | score = self.hypothesis_set[treeNo].nodes[v_num].scores 824 | det_score = score[3] 825 | trajectory = [] 826 | n_dummies = 0 827 | if DEBUGGING: assert len(self.hypothesis_set[treeNo].history['dets']) == len(self.hypothesis_set[treeNo].history['estimates']) 828 | history_len = len(self.hypothesis_set[treeNo].history['dets']) 829 | for h in range(history_len): 830 | adet = self.hypothesis_set[treeNo].history['dets'][h] 831 | estimate = self.hypothesis_set[treeNo].history['estimates'][h] 832 | wd, ht, frame, d_i, dummy = adet[2], adet[3], adet[5], adet[6], adet[7] 833 | cx, cy = estimate[0], estimate[1] 834 | track = [frame, cx-wd/2, cy-ht/2, wd, ht, dummy, 0] 835 | trajectory.append(track) 836 | if dummy == 1:n_dummies += 1 837 | for r in route: 838 | adet = r.detection 839 | wd, ht, frame, d_i, dummy = adet[2], adet[3], adet[5], adet[6], adet[7] 840 | cx, cy = r.kalman_state[0].item(), r.kalman_state[1].item() 841 | track = [frame, cx-wd/2, cy-ht/2, wd, ht, dummy, 0] 842 | trajectory.append(track) 843 | if DEBUGGING: assert dummy == int(r.is_dummy) 844 | if dummy == 1:n_dummies += 1 845 | tails = 0 846 | for t in reversed(trajectory): 847 | if t[5] == 0: break # dummy indicator: not a dummy 848 | tails += 1 849 | #trajectory = trajectory[:(len(trajectory)-tails)] 850 | track_len = len(trajectory)-tails 851 | for i in range(track_len, track_len+tails): 852 | trajectory[i][6] = 1 # marking tails 853 | if DEBUGGING: 854 | assert tails == status[3] 855 | assert track_len == (status[0]-status[3]) 856 | assert n_dummies == status[2] 857 | assert not(trackID in self.confirmed_tracks), 'Fatal error: track ID conflict' 858 | assert (n_dummies-tails+status[1]) == track_len 859 | for i in range(1, track_len): 860 | assert trajectory[i][0] - trajectory[i-1][0] == 1 861 | if status[1]/track_len > self.min_track_quality and track_len > self.min_track_length and det_score/status[1] > self.min_det_conf: 862 | self.confirmed_tracks[trackID] = trajectory 863 | 864 | def rand_string(self, param, que): 865 | leaves = self.hypothesis_set[param].findLeaves() 866 | que.put({param:leaves}) 867 | 868 | def doTracking(self, nframe, detections, app_scores=None, canvas=None): # update MHT 869 | 870 | # detections: (x, y, w, h, b, t, i, dummy), b=confidence, t=frame, i=i-th detection at the frame 871 | if DEBUGGING:start = time.time() 872 | det_usage = self.updateTrackTrees(nframe, detections, app_scores=app_scores, canvas=canvas) 873 | if DEBUGGING:print('\nupdateTree: %f' % ((time.time()-start)*1000)) 874 | 875 | if DEBUGGING:start = time.time() 876 | self.makeConflictList(det_usage) 877 | if DEBUGGING:print('conflict: %f' % ((time.time()-start)*1000)) 878 | 879 | if DEBUGGING:start = time.time() 880 | clusters = self.clustering() 881 | if DEBUGGING:print('clustering: %f' % ((time.time()-start)*1000)) 882 | 883 | if DEBUGGING:start = time.time() 884 | best_set = self.compBestHypoSet(clusters) 885 | if DEBUGGING:print('bestset: %f' % ((time.time()-start)*1000)) 886 | 887 | if DEBUGGING:start = time.time() 888 | currentTracks = self.treePruning(clusters, best_set) 889 | if DEBUGGING:print('pruning: %f' % ((time.time()-start)*1000)) 890 | 891 | if DEBUGGING:start = time.time() 892 | self.branchMerging(currentTracks) 893 | if DEBUGGING:print('merging: %f' % ((time.time()-start)*1000)) 894 | 895 | # save detections 896 | if DEBUGGING: assert nframe not in self.dets_set 897 | self.dets_set[nframe] = detections 898 | return currentTracks 899 | 900 | def getTrackPatches(self): 901 | features = {} 902 | feat_list = [] 903 | counter = 1 904 | for t in self.hypothesis_set: 905 | leaves = self.hypothesis_set[t].findLeaves() 906 | for l in leaves: 907 | det_i = self.hypothesis_set[t].nodes[l].det_index[-1] 908 | if det_i not in features: 909 | features[det_i] = {'app':self.dets_set[det_i[0]][det_i[1]]['app'], 'used':[(t,l)],'det':self.dets_set[det_i[0]][det_i[1]]['det']} 910 | feat_list.append(det_i) 911 | else: 912 | features[det_i]['used'].append((t,l)) 913 | return features, feat_list 914 | 915 | def concludeTracks(self): # conclude MHT 916 | if DEBUGGING: 917 | allcfls = set() 918 | assert len(self.conflictList) == len(self.hypothesis_set) 919 | for t in self.conflictList: 920 | assert t in self.hypothesis_set 921 | leaves = self.hypothesis_set[t].findLeaves() 922 | assert len(self.conflictList[t]) == len(leaves) 923 | for l in self.conflictList[t]: 924 | assert l in self.hypothesis_set[t].nodes 925 | allcfls = allcfls.union(self.conflictList[t][l]) 926 | for t in self.hypothesis_set: 927 | leaves = self.hypothesis_set[t].findLeaves() 928 | for l in leaves: 929 | track_id = self.hypothesis_set[t].nodes[l].track_id 930 | item = {(t, l, track_id)} 931 | assert len(allcfls.intersection(item)) == 1 932 | allcfls.difference_update(item) 933 | assert len(allcfls) == 0 934 | clusters = self.clustering() 935 | best_set = self.compBestHypoSet(clusters) 936 | for c in best_set: 937 | for b in best_set[c]: 938 | self.saveConfirmedTrack(b[0], b[1], b[2]) 939 | 940 | new_id = 0 941 | results = [] 942 | for t in self.confirmed_tracks: 943 | new_id += 1 944 | tracks = self.confirmed_tracks[t] 945 | for a in tracks: 946 | if a[6] == 1: # tails 947 | continue 948 | dummy = 1 if a[5] == 1 else -1 # 1 == dummy bbox, -1 == normal bbox 949 | trj = (a[0], new_id, round(a[1]), round(a[2]), round(a[3]), round(a[4]), 1, -1, -1, dummy) 950 | results.append(trj) 951 | results.sort(key=lambda x:(x[0], x[1])) 952 | return results 953 | --------------------------------------------------------------------------------