├── GetGTPlots.m ├── GetGTPlots1.m ├── Handoff.m ├── NoDetect_Track.m ├── NoDetect_Track_A.m ├── README.md ├── SetParam.m ├── SetParamNDTA.m ├── Single_Track.m ├── Single_Track_Original.m ├── Track.m ├── camshift.m ├── camshift2.m └── tlupdate.m /GetGTPlots.m: -------------------------------------------------------------------------------- 1 | %GetGTPlots: 2 | %This function extracts groundtruth data or extended groundtruth data. 3 | %Normal groundtruth data accompanies that OCTBVS database, Dataset 1 and is 4 | %sufficient for Single_Track. However, extended ground_truth data is needed 5 | %for NoDetect_Track. Extended ground_truth data involved manually tacking 6 | %each object detection in a frame as new or old. GetGTPlots extracts the 7 | %ground truth location data for use by other functions. Note: Credit for 8 | %basis of the ground truth extraction code goes to the OCTBVS group 9 | %(cited in the accompanying paper). That code snippet was modified and 10 | %expanded to create GetGTPlots. It was also rebuilt to accomodate the 11 | %modified extended ground_truth data files. This version of GetGTPlots 12 | %extracts extended ground truth data. 13 | 14 | function [GTPlot] = GetGTPlots(~) 15 | GTPlot = []; 16 | 17 | %cd('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 18 | %First access the Dataset 1/5 Directory list for the selected folder: 19 | %cd(c_folder); 20 | 21 | %Grab the Groundtruth file with associated sequence 22 | fid = fopen('groundTruth_Enhanced.txt'); 23 | 24 | %Cycle through comments in the file: 25 | line = fgets(fid); 26 | while line(1) == '%' 27 | line = fgets(fid); 28 | end 29 | 30 | 31 | %Read the number of images in the video 32 | numImages = sscanf(line, '%d', 1); 33 | start = 1; 34 | 35 | for i=1:numImages 36 | 37 | %Load the image name and number of boxes 38 | imageName = fscanf(fid, '%c',13); 39 | numBoxes = fscanf(fid, '%d', 1); 40 | 41 | %Display the image 42 | fname = fullfile(imageName); 43 | Im = imread(fname); 44 | 45 | %Load the ground truth boxes 46 | for j=1:numBoxes 47 | tmp = fscanf(fid, '%c',1); %% [space]( 48 | Object_Status = fscanf(fid, '%c',1); 49 | tmp = fscanf(fid, '%c',1); %% [space]( 50 | coords = fscanf(fid, '%d %d %d %d'); 51 | tmp = fscanf(fid, '%c',1); %% ) 52 | ulX=coords(1); ulY=coords(2); 53 | lrX=coords(3); lrY=coords(4); 54 | GTPlot{i,j}.c = [ulX ulY abs(ulX-lrX) abs(ulY-lrY)]; 55 | GTPlot{i,j}.x = [ulX lrX lrX ulX ulX]; 56 | GTPlot{i,j}.y = [ulY ulY lrY lrY ulY]; 57 | end 58 | 59 | tmp = fgetl(fid); %% get until end of line 60 | 61 | end 62 | 63 | fclose(fid); 64 | 65 | end 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /GetGTPlots1.m: -------------------------------------------------------------------------------- 1 | %GetGTPlots1: 2 | %This function extracts groundtruth data or extended groundtruth data. 3 | %Normal groundtruth data accompanies that OCTBVS database, Dataset 1 and is 4 | %sufficient for Single_Track. However, extended ground_truth data is needed 5 | %for NoDetect_Track. Extended ground_truth data involved manually tacking 6 | %each object detection in a frame as new or old. GetGTPlots extracts the 7 | %ground truth location data for use by other functions. Note: Credit for 8 | %basis of the ground truth extraction code goes to the OCTBVS group 9 | %(cited in the accompanying paper). That code snippet was modified and 10 | %expanded to create GetGTPlots. It was also rebuilt to accomodate the 11 | %modified extended ground_truth data files. This version of GetGTPlots 12 | %extracts normal ground truth data. 13 | 14 | 15 | function [GTPlot] = GetGTPlots(c_folder) 16 | GTPlot = []; 17 | 18 | cd('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 19 | %First access the Dataset 1/5 Directory list for the selected folder: 20 | cd(c_folder); 21 | 22 | %Grab the Groundtruth file with associated sequence 23 | fid = fopen('groundTruth.txt'); 24 | 25 | %Cycle through comments in the file: 26 | line = fgets(fid); 27 | while line(1) == '%' 28 | line = fgets(fid); 29 | end 30 | 31 | 32 | %Read the number of images in the video 33 | numImages = sscanf(line, '%d', 1); 34 | start = 1; 35 | 36 | for i=1:numImages 37 | 38 | %Load the image name and number of boxes 39 | imageName = fscanf(fid, '%c',13); 40 | numBoxes = fscanf(fid, '%d', 1); 41 | 42 | %Display the image 43 | fname = fullfile(imageName); 44 | Im = imread(fname); 45 | 46 | %Load the ground truth boxes 47 | for j=1:numBoxes 48 | tmp = fscanf(fid, '%c',2); %% [space]( 49 | coords = fscanf(fid, '%d %d %d %d'); 50 | tmp = fscanf(fid, '%c',1); %% ) 51 | ulX=coords(1); ulY=coords(2); 52 | lrX=coords(3); lrY=coords(4); 53 | GTPlot{i,j}.c = [ulX ulY abs(ulX-lrX) abs(ulY-lrY)]; 54 | GTPlot{i,j}.x = [ulX lrX lrX ulX ulX]; 55 | GTPlot{i,j}.y = [ulY ulY lrY lrY ulY]; 56 | end 57 | 58 | tmp = fgetl(fid); %% get until end of line 59 | 60 | end 61 | 62 | fclose(fid); 63 | 64 | end 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /Handoff.m: -------------------------------------------------------------------------------- 1 | %Handoffs: 2 | %This function extracts extended groundtruth data.Extended ground_truth 3 | %data is needed for NoDetect_Track. This function extracts the manually 4 | %input labels in the extended ground truth data. The labels are returned in 5 | %a mask format that corresponds to the ground truth matrices produced by 6 | %GetGTPlots and GetGTPlots1 7 | 8 | function [handoffs] = Handoff(~) 9 | 10 | handoffs = []; 11 | %First access the Dataset 1/5 Directory list for the selected folder: 12 | %cd('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 13 | %cd(c_folder); 14 | 15 | %Grab the Groundtruth file with associated sequence 16 | fid = fopen('groundTruth_Enhanced.txt'); 17 | 18 | %Cycle through comments in the file: 19 | line = fgets(fid); 20 | while line(1) == '%' 21 | line = fgets(fid); 22 | end 23 | 24 | %Read the number of images in the video 25 | numImages = sscanf(line, '%d', 1); 26 | 27 | for i=1:numImages 28 | 29 | %Load the image name and number of boxes 30 | imageName = fscanf(fid, '%c',13); 31 | numBoxes = fscanf(fid, '%d', 1); 32 | 33 | fname = fullfile(imageName); 34 | 35 | for j=1:numBoxes 36 | tmp = fscanf(fid, '%c',1); %% [space]( 37 | Object_Status = fscanf(fid, '%c',1); 38 | 39 | if Object_Status == 'N' 40 | handoffs(i,j) = 1; 41 | else 42 | handoffs(i,j) = 0; 43 | end 44 | 45 | tmp = fscanf(fid, '%c',1); %% [space]( 46 | tmp = fscanf(fid, '%d %d %d %d'); 47 | tmp = fscanf(fid, '%c',1); %% ) 48 | 49 | end 50 | 51 | tmp = fgetl(fid); %% get until end of line 52 | 53 | end 54 | 55 | fclose(fid); 56 | 57 | end 58 | 59 | -------------------------------------------------------------------------------- /NoDetect_Track.m: -------------------------------------------------------------------------------- 1 | %-------------------------------------------------------------------------% 2 | %NoDetect_Track: Identifies object positions initally using groundtruth 3 | %data. After this tracking approaches are employed. This method is in 4 | %contrast to Single_Track which uses image preprocessing toidentify targets 5 | % 6 | %Methodology: This method provides a clean evaluation technique of tracker 7 | %performance fully divorced from an ability to identify targets of 8 | %interest. Each new target is 'handed off' to the algorithm in its first 9 | %detection frame. From that point on, it is the job of the algorithm to 10 | %track it and to recognize when the object has left the frame. It receives 11 | %no follow up information from the 'handoff' function other than new object 12 | %declarations. Tracking is performed using a boosting framework. A variety 13 | %of object properties are identified as linearly combined to form a 14 | %stronger object association tool. Details follow in the code below. 15 | %NOTE: Outer for loops in the program allow for implementation of more than 16 | %one track algorithm in the same framework. A goal of this program was to 17 | %have a high level of flexibility to incorporate new tracking approaches in 18 | %the future. It will also allow for experimentation with combining tracking 19 | %approaches. 20 | %-------------------------------------------------------------------------% 21 | 22 | %Bookkeeping: 23 | 24 | %Add the working folder to the path 25 | %addpath('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 26 | %cd('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 27 | 28 | %Select the desired folder 29 | c_folder = '00001'; 30 | 31 | %First access the Dataset 1/5 Directory list for the selected folder: 32 | cd(c_folder); 33 | d_list = dir('*.bmp'); %Directory listing of all frames 34 | close all; 35 | clearvars -except c_folder d_list; 36 | 37 | %Background value: Updates with all non-object frames. Assumes first frame 38 | %is background 39 | I_p = double(imread(d_list(2).name)) - double(imread(d_list(1).name)); 40 | 41 | for n1 = 1:size(I_p,1) 42 | for n2 = 1:size(I_p,2) 43 | if I_p(n1,n2) > 0 44 | I_p(n1,n2) = 0; 45 | end 46 | end 47 | end 48 | I_bg = double(imread(d_list(1).name)) + I_p; 49 | 50 | %Create handoffs and ground truth matrices 51 | GTPlots = GetGTPlots(c_folder); 52 | Handoffs = Handoff(c_folder); 53 | 54 | %Create the tracking lists: Multiple lists can be maintained. Each list 55 | %would correspond to a specific tracking method 56 | num_tmethods = 1; %Define the number of tracking methods 57 | t_list = zeros(0,0,num_tmethods); 58 | background_status = 1; %Sets background status (0 for bg, 1 for fg) 59 | bgf_count = 1; %Background frame count 60 | templates{1,1,3} = zeros(1); %Stores image templates to extract properties 61 | stats{1,1} = zeros(1); 62 | 63 | disp('Program Start...'); 64 | 65 | for i=1:numel(d_list) 66 | 67 | I_curr = double(imread(d_list(i).name)); 68 | %---------------------------------------------------------------------% 69 | %Preprocessing: 70 | %Performs background subtraction, binarization off of an empirically 71 | %defined threshold, erosion to remove small noise objects, and finally 72 | %displays the processed image as well as the original image in seperate 73 | %figures 74 | 75 | %As a default, assume objects are not detected (background frame) 76 | background_status = 1; 77 | 78 | %Take the difference between frame and bg to find foreground 79 | I_diff = abs(I_curr-I_bg); 80 | %---------------------------------------------------------------------% 81 | 82 | 83 | %Step 1: For each new image, first determine if there are any new 84 | %handoffs. After handoff its the algorithms job to maintain track 85 | 86 | for n = 1:size(Handoffs,2) 87 | if Handoffs(i,n) == 1 88 | n_size = size(t_list,1)+1; 89 | for x = 1:size(t_list,3) 90 | t_list(n_size,1,x) = GTPlots{i,n}.x(1); 91 | t_list(n_size,2,x) = GTPlots{i,n}.y(1); 92 | t_list(n_size,3,x) = GTPlots{i,n}.c(3); 93 | t_list(n_size,4,x) = GTPlots{i,n}.c(4); 94 | t_list(n_size,5,x) = GTPlots{i,n}.x(1); 95 | t_list(n_size,6,x) = GTPlots{i,n}.y(1); 96 | t_list(n_size,7,x) = GTPlots{i,n}.x(1); 97 | t_list(n_size,8,x) = GTPlots{i,n}.y(1); 98 | 99 | %All peripheral statistics must be determined for any new 100 | %detections at this point 101 | 102 | %First, create correlation templates: 103 | ulX = t_list(n_size,1,x); 104 | lrX = t_list(n_size,1,x) + t_list(n_size,3,x); 105 | ulY = t_list(n_size,2,x); 106 | lrY = t_list(n_size,2,x) + t_list(n_size,4,x); 107 | templates{n_size,x,1} = I_diff(ulY:lrY,ulX:lrX); 108 | templates{n_size,x,2} = I_curr(ulY:lrY,ulX:lrX); 109 | templates{n_size,x,3} = templates{n_size,x,1}; 110 | 111 | %Accumulate stats identifying this object 112 | S = regionprops((templates{n_size,x,1}>45),'ConvexArea','Extent',... 113 | 'MajorAxisLength','MinorAxisLength','Orientation','Perimeter'); 114 | if isempty(S) == 0 115 | if length(S) == 1 116 | stats{n_size,x} = S; 117 | else 118 | %If there is more than one object found, select the one 119 | %with the largest convex area and use its properties 120 | maxi = S(1).ConvexArea; 121 | ind = 1; 122 | for v = 2:length(S) 123 | if S(v).ConvexArea>maxi 124 | ind = v; 125 | end 126 | end 127 | stats{n_size,x} = S(ind); 128 | end 129 | end 130 | end 131 | end 132 | end 133 | figure(1),imshow(I_curr,[0 255]); 134 | hold on; 135 | %figure(2),imshow(I_diff,[-255 255]); 136 | figure(2),imshow(I_diff>45,[0 1]); 137 | hold on; 138 | 139 | %---------------------------------------------------------------------% 140 | %Perform Tracking: 141 | %Track list updates for each tracking technique at this point 142 | 143 | thresh_l = []; 144 | min_l = zeros(size(t_list,1),1); 145 | %Identify new objects of interest! 146 | c_t = I_diff>45; 147 | c_c = regionprops(c_t, 'Centroid','ConvexArea','Extent',... 148 | 'MajorAxisLength','MinorAxisLength','Orientation','Perimeter'); 149 | val_thresh = .7; 150 | 151 | %Remove objects that are too small! 152 | for d = length(c_c):-1:1 153 | if isempty(c_c(d)) == 0 154 | if c_c(d).ConvexArea < 10 155 | c_c(d) = []; 156 | end 157 | end 158 | end 159 | 160 | for l = 1:size(t_list,3) 161 | for n=1:size(t_list,1) 162 | if t_list(n,1,l) ~= -1 163 | %Find correlation peaks 164 | c = normxcorr2(templates{n,l,1}, I_diff); 165 | c_i = normxcorr2(templates{n,l,2}, I_curr); 166 | 167 | %Make the images of equal size: 168 | ys = round((-size(I_diff,1)+size(c,1))/2); 169 | xs = round((-size(I_diff,2)+size(c,2))/2); 170 | c = c(ys:ys+size(I_diff,1)-1,xs:xs+size(I_diff,2)-1); 171 | 172 | ys = round((-size(I_curr,1)+size(c_i,1))/2); 173 | xs = round((-size(I_curr,2)+size(c_i,2))/2); 174 | c_i = c_i(ys:ys+size(I_curr,1)-1,xs:xs+size(I_curr,2)-1); 175 | 176 | %This was another method I considered using, but later disregarded. This 177 | %method identified objects of interest based on correlation peaks but was 178 | %less effective than later methods. 179 | % c_c = []; 180 | % for b = 1:length(c_cc) 181 | % sx = t_list(n,3,l); 182 | % sy = t_list(n,4,l); 183 | % 184 | % ys = max(1,round(c_cc(b).Centroid(2)-sy/2)); 185 | % xs = max(1,round(c_cc(b).Centroid(1)-sx/2)); 186 | % i1 = I_diff(ys:min(size(I_diff,1),round(ys+sy/2)),... 187 | % xs:min(size(I_diff,2),round(xs+sx/2))); 188 | % 189 | % c_n = regionprops(i1>45, 'Centroid','ConvexArea','Extent',... 190 | % 'MajorAxisLength','MinorAxisLength','Orientation','Perimeter'); 191 | % if isempty(c_n) == 0 192 | % if length(c_n) == 1 193 | % c_c{b} = c_n; 194 | % else 195 | % %If there is more than one object found, select the one 196 | % %with the largest convex area and use its properties 197 | % maxi = c_n(1).ConvexArea; 198 | % ind = 1; 199 | % for v = 2:length(c_n) 200 | % if c_n(v).ConvexArea>maxi 201 | % ind = v; 202 | % end 203 | % end 204 | % c_c{b} = c_n(ind); 205 | % end 206 | % end 207 | % %figure(8), imshow(i1>45,[0 1]); 208 | % %dd = waitforbuttonpress; 209 | % end 210 | 211 | 212 | 213 | %For all detecctions peaks, also get other stats. Then use 214 | %the full range of stats to assign track correspondences 215 | [m0 ind1] = max(c); 216 | [m1 ind2] = max(m0); 217 | peakc = [ind1(ind2) ind2]; 218 | 219 | [m2 ind3] = max(max(c_i)); 220 | 221 | 222 | %Now determine for the object in question what if any 223 | %object in this frame is the best match: 224 | 225 | %C_C is the moderate correlation list. It is a list of starting 226 | %guesses 227 | thresh = []; 228 | x_predicted = t_list(n,7,l); 229 | y_predicted = t_list(n,8,l); 230 | aspect_predicted = stats{n,l}.MajorAxisLength/stats{n,l}.MinorAxisLength; 231 | 232 | %Boosting parameters: Set by guesses initially, and later 233 | %determined to minimize total error: 234 | %These parameters should be altered to desired values! 235 | a1 = .1; %a1 = .05; 236 | a2 = .1; %a2 = .05; 237 | a3 = 0; %a3 = .2; 238 | a4 = 0; %a4 = 0.0; 239 | a5 = 0.1; %a5 = .00; 240 | a6 = 0; %a6 = 0.00; 241 | a7 = 0; %a7 = 0.00; 242 | 243 | %Assume that in the first four frames, background 244 | %subtracted data is inaccurate. After that point assume it 245 | %is more accurate. In this simplified form it has little 246 | %overall impact but could be expanded in the future to 247 | %accurately weight data based on its certainty. The instead 248 | %of discrete changes in the weights at the 5th frame, they 249 | %can change continuously over time. 250 | 251 | if i < 5 252 | a8 = 0; 253 | a11 = .9; 254 | a9 = 0; 255 | a10 = 0; 256 | else 257 | a8 = .45; %a8 = .1; 258 | a11 = .45; %a11 = .55; 259 | a9 = .1; %a9 = .025; 260 | a10 = .1; %a10 = .025; 261 | end 262 | 263 | 264 | for j = 1:length(c_c) 265 | if isempty(c_c(j)) == 0 266 | aspect_ratio = c_c(j).MajorAxisLength/c_c(j).MinorAxisLength; 267 | 268 | window = 4; 269 | x_min = max(round((c_c(j).Centroid(1)))-window,1); 270 | x_max = min(round((c_c(j).Centroid(1)))+window,size(c,2)); 271 | y_min = max(round(c_c(j).Centroid(2))-window,1); 272 | y_max = min(round(c_c(j).Centroid(2))+window,size(c,1)); 273 | corr = max(max(c(y_min:y_max,x_min:x_max))); 274 | corr_i = max(max(c_i(y_min:y_max,x_min:x_max))); 275 | 276 | %Initially started with 11 boosting parameters, 277 | %calculate V! 278 | thresh(j) = a1* abs(c_c(j).Centroid(1) - x_predicted)/size(I_diff,2) +... 279 | a2 * abs(c_c(j).Centroid(2) - y_predicted)/size(I_diff,1) +... 280 | a3 * abs(c_c(j).ConvexArea - stats{n,l}.ConvexArea)/max(.1,stats{n,l}.ConvexArea)+... 281 | a4 * abs(c_c(j).Extent - stats{n,l}.Extent)/max(.1,stats{n,l}.Extent)+... 282 | a5 * abs(aspect_ratio - aspect_predicted)/max(.1,aspect_predicted)+... 283 | a6 * abs(c_c(j).Orientation - stats{n,l}.Orientation)/90+... 284 | a7 * abs(c_c(j).Perimeter - stats{n,l}.Perimeter)/max(.1,stats{n,l}.Perimeter)+... 285 | a8 * abs(1 - corr)/1+... 286 | a9 * abs(c_c(j).Centroid(1) - peakc(1))/size(I_diff,2)+... 287 | a10 * abs(c_c(j).Centroid(2) - peakc(2))/size(I_diff,1)+... 288 | a11 * abs(1 - corr_i)/1; 289 | else 290 | %This obect is now unselectable if we didn't see viable 291 | %objects in the window: 292 | thresh(j) = 10; 293 | end 294 | end 295 | if isempty(thresh) == 0 296 | %Find the minimum association 297 | [val t_ind] = min(thresh); 298 | thresh_l{n} = thresh; 299 | min_l(n) = val; 300 | 301 | t_list(n,1,l) = min(size(I_diff,2),max(1,round(c_c(t_ind).Centroid(1)-t_list(n,3,l)/2))); 302 | t_list(n,2,l) = min(size(I_diff,1),max(1,round(c_c(t_ind).Centroid(2)-t_list(n,4,l)/2))); 303 | if val > val_thresh 304 | %If this threshold is reached, we could not find an 305 | %associate track point. It is then assumed that this 306 | %point has now left the screen. If this is the case, we 307 | %delete it by making the row full of -1's. This 308 | %indicates to the program to ignore it in the future: 309 | t_list(n,:,l) = -1 * ones(1,8); 310 | min_l(n) = 10; 311 | end 312 | else 313 | %If this threshold is reached, we could not find an 314 | %associate track point. It is then assumed that this 315 | %point has now left the screen. If this is the case, we 316 | %delete it by making the row full of -1's. This 317 | %indicates to the program to ignore it in the 318 | %future: 319 | t_list(n,:,l) = -1 * ones(1,8); 320 | %Fill min list with an arbitrary large number 321 | min_l(n) = 10; 322 | end 323 | 324 | end 325 | end 326 | 327 | %Conflict = 0 indicates that there are no repeat associations. 328 | %Conflict = 1 indicates that a repeat association needs to be 329 | %reconciled 330 | conflict = 1; 331 | while conflict == 1 332 | conflict = 0; 333 | for n = 1:length(thresh_l) 334 | if isempty(thresh_l{n}) == 0 && min_l(n) <10 335 | for k=1:length(thresh_l) 336 | if isempty(thresh_l(k)) == 0 && min_l(k) <10 337 | if n ~= k 338 | %Stat compilation now makes it possible to evaluate which 339 | %is the most likely associated object 340 | 341 | %Centroids must not be within 10 pixels or we have 342 | %an assumed conflict 343 | t_dis = 10; 344 | if abs(t_list(n,1,l)-t_list(k,1,l))<=t_dis && ... 345 | abs(t_list(n,2,l)-t_list(k,2,l))<=t_dis 346 | 347 | %Then there is a conflict! Sort out the 348 | %conflict by giving the association to the 349 | %track_object that has the lower val with 350 | %that image patch 351 | if min_l(n) > min_l(k) 352 | [val t_ind] = min(thresh_l{n}); 353 | thresh_l{n}(t_ind) = 10; 354 | [val t_ind] = min(thresh_l{n}); 355 | t_list(n,1,l) = min(size(I_diff,2),... 356 | max(1,round(c_c(t_ind).Centroid(1)-t_list(n,3,l)/2))); 357 | t_list(n,2,l) = min(size(I_diff,1),... 358 | max(1,round(c_c(t_ind).Centroid(2)-t_list(n,4,l)/2))); 359 | %Update the new min and make sure it 360 | %still has a viable association 361 | if val > val_thresh 362 | t_list(n,:,l) = -1 * ones(1,8); 363 | min_l(n) = 10; 364 | else 365 | min_l(n) = val; 366 | end 367 | conflict = 1; 368 | end 369 | end 370 | end 371 | end 372 | end 373 | else 374 | min_l(n) = 10; 375 | end 376 | end 377 | end 378 | end 379 | disp('NEXT'); 380 | %---------------------------------------------------------------------% 381 | %Display all tracks: At this point tracks have been maintained 382 | %seperately in the t_list matrix. Also ground truths have been 383 | %maintained. This section displays all these tracks simultaneously on 384 | %the frame of interest. 385 | p = 1; 386 | while p <= size(GTPlots,2) 387 | if isempty(GTPlots{i,p}) == 0 388 | figure(1), plot(GTPlots{i,p}.x,GTPlots{i,p}.y, 'r'); 389 | hold on; 390 | end 391 | p = p+1; 392 | end 393 | 394 | for x = 1:size(t_list,3) 395 | %Plot the elements of each t_list here 396 | p = 1; 397 | while p <= size(t_list,1) 398 | %Do NOT plot if the row is empty or indicated as a deleted 399 | %track 400 | if sum(t_list(p,:,x)) ~= 0 && t_list(p,1,x) ~= -1 401 | ulX = t_list(p,1,x); 402 | lrX = t_list(p,1,x) + t_list(p,3,x); 403 | ulY = t_list(p,2,x); 404 | lrY = t_list(p,2,x) + t_list(p,4,x); 405 | figure(1), plot([ulX lrX lrX ulX ulX],[ulY ulY lrY lrY ulY],'Color', [.5,.1,1/x]); 406 | hold on; 407 | end 408 | p = p+1; 409 | end 410 | %At this point update position prediction for the next frame 411 | t_list(:,:,x) = tlupdate(t_list(:,:,x),I_diff); 412 | end 413 | 414 | 415 | %---------------------------------------------------------------------% 416 | %Update All Track Statistics based on this frame: 417 | %First update the correlation templates 418 | 419 | for l = 1:size(t_list,3) 420 | for n = 1:size(t_list,1) 421 | if t_list(n,1,l) ~= -1 422 | %First create image templates for each object 423 | ulX = max(1,t_list(n,1,l)); 424 | lrX = min(size(I_diff,2),t_list(n,1,l) + t_list(n,3,l)); 425 | ulY = max(t_list(n,2,l),1); 426 | lrY = min(size(I_diff,1),t_list(n,2,l) + t_list(n,4,l)); 427 | templates{n,l,1} = I_diff(ulY:lrY,ulX:lrX); 428 | templates{n,l,2} = I_curr(ulY:lrY,ulX:lrX); 429 | 430 | if isempty(templates{n,l,3}) == 0 431 | templates{n,l,3} = (templates{n,l,1} + templates{n,l,2})/2; 432 | else 433 | templates{n,l,3} = templates{n,l,1}; 434 | end 435 | 436 | %Accumulate stats identifying this object 437 | S = regionprops((templates{n,l,1}>45),'ConvexArea','Extent',... 438 | 'MajorAxisLength','MinorAxisLength','Orientation','Perimeter'); 439 | 440 | if isempty(S) == 0 441 | if length(S) == 1 442 | stats{n,l} = S; 443 | else 444 | %If there is more than one object found, select the one 445 | %with the largest convex area and use its properties 446 | maxi = S(1).ConvexArea; 447 | ind = 1; 448 | for v = 2:length(S) 449 | if S(v).ConvexArea>maxi 450 | ind = v; 451 | end 452 | end 453 | stats{n,l} = S(ind); 454 | end 455 | end 456 | end 457 | end 458 | end 459 | %Now all objects in the track list have 6 identified properties, as 460 | %well as an image template, an expected position in the next frame. 461 | %These characteristics are used for tracking. 462 | 463 | %---------------------------------------------------------------------% 464 | %Background Updating: 465 | %Improve background estimate over time by averaging in new background 466 | %estimates 467 | %Update the background with curr_frame if a background frame 468 | if background_status == 1 469 | bgf_count = bgf_count + 1; 470 | I_bg = bgupdate(I_bg,I_curr,bgf_count); 471 | else 472 | %If not a background frame, update with our best guess at bg 473 | if i >=2 474 | I_p = double(imread(d_list(i).name)) - double(imread(d_list(i-1).name)); 475 | 476 | for n1 = 1:size(I_p,1) 477 | for n2 = 1:size(I_p,2) 478 | if I_p(n1,n2) > 0 479 | I_p(n1,n2) = 0; 480 | end 481 | end 482 | end 483 | I_nbg = double(imread(d_list(i-1).name)) + I_p; 484 | bgf_count = bgf_count + 1; 485 | I_bg = bgupdate(I_bg,I_nbg,bgf_count); 486 | end 487 | end 488 | %---------------------------------------------------------------------% 489 | 490 | %Pause to allow the user to view the frame. Proceed on a button press: 491 | k = waitforbuttonpress; 492 | 493 | end 494 | 495 | 496 | 497 | -------------------------------------------------------------------------------- /NoDetect_Track_A.m: -------------------------------------------------------------------------------- 1 | %-------------------------------------------------------------------------% 2 | %NOTE: This function is an automated version of NoDetect_Track. It does not 3 | %pause and wait for user input. It also does not show output graphs. It 4 | %only generates counts of associations in each frame for later error 5 | %checking. It also takes input parameters from SetParmNDTA.m to determine 6 | %performance of NoDetect_Track over ranges of inputs. This in conjunction 7 | %with SetParmNDTA was used to get more optimized input parameters. The 8 | %original NoDetect_Track algorihtm is more heavily documented than this 9 | %automated version. 10 | % 11 | %NoDetect_Track_A: Identifies object positions initally using groundtruth 12 | %data. After this tracking approaches are employed. This method is in 13 | %contrast to Single_Track which uses image preprocessing toidentify targets 14 | %Methodology: This method provides a clean evaluation technique of tracker 15 | %performance fully divorced from an ability to identify targets of 16 | %interest. Each new target is 'handed off' to the algorithm in its first 17 | %detection frame. From that point on, it is the job of the algorithm to 18 | %track it and to recognize when the object has left the frame. It receives 19 | %no follow up information from the 'handoff' function other than new object 20 | %declarations. Tracking is performed using a boosting framework. A variety 21 | %of object properties are identified as linearly combined to form a 22 | %stronger object association tool. Details follow in the code below. 23 | %NOTE: Outer for loops in the program allow for implementation of more than 24 | %one track algorithm in the same framework. A goal of this program was to 25 | %have a high level of flexibility to incorporate new tracking approaches in 26 | %the future. It will also allow for experimentation with combining tracking 27 | %approaches. This version is automated and does not take user input or 28 | %provide graph output. 29 | %-------------------------------------------------------------------------% 30 | function[t_count] = NoDetect_Track_A(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,val_thresh,GTPlots, Handoffs,tdiff) 31 | %Bookkeeping: 32 | 33 | %Add the working folder to the path --> Change to appropriate 34 | addpath('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 35 | cd('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 36 | 37 | %Select the desired folder 38 | c_folder = '00001'; 39 | 40 | %First access the Dataset 1/5 Directory list for the selected folder: 41 | cd(c_folder); 42 | d_list = dir('*.bmp'); %Directory listing of all frames 43 | close all; 44 | clearvars -except c_folder d_list a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 val_thresh GTPlots Handoffs tdiff; 45 | 46 | %Background value: Updates with all non-object frames. Assumes first frame 47 | %is background 48 | I_p = double(imread(d_list(2).name)) - double(imread(d_list(1).name)); 49 | 50 | for n1 = 1:size(I_p,1) 51 | for n2 = 1:size(I_p,2) 52 | if I_p(n1,n2) > 0 53 | I_p(n1,n2) = 0; 54 | end 55 | end 56 | end 57 | I_bg = double(imread(d_list(1).name)) + I_p; 58 | 59 | %Create handoffs and ground truth matrices 60 | %GTPlots = GetGTPlots(c_folder); 61 | %Handoffs = Handoff(c_folder); 62 | 63 | %Create the tracking lists: Multiple lists can be maintained. Each list 64 | %would correspond to a specific tracking method 65 | num_tmethods = 1; %Define the number of tracking methods 66 | t_list = zeros(0,0,num_tmethods); 67 | background_status = 1; %Sets background status (0 for bg, 1 for fg) 68 | bgf_count = 1; %Background frame count 69 | templates{1,1,3} = zeros(1); %Stores image templates to extract properties 70 | stats{1,1} = zeros(1); 71 | 72 | %%disp('Program Start...'); 73 | %tic 74 | for i=1:numel(d_list) 75 | 76 | 77 | I_curr = double(imread(d_list(i).name)); 78 | %---------------------------------------------------------------------% 79 | %Preprocessing: 80 | %Performs background subtraction, binarization off of an empirically 81 | %defined threshold, erosion to remove small noise objects, and finally 82 | %%displays the processed image as well as the original image in seperate 83 | %figures 84 | 85 | %As a default, assume objects are not detected (background frame) 86 | background_status = 1; 87 | 88 | %Take the difference between frame and bg to find foreground 89 | I_diff = abs(I_curr-I_bg); 90 | %---------------------------------------------------------------------% 91 | 92 | 93 | %Step 1: For each new image, first determine if there are any new 94 | %handoffs. After handoff its the algorithms job to maintain track 95 | 96 | for n = 1:size(Handoffs,2) 97 | if Handoffs(i,n) == 1 98 | n_size = size(t_list,1)+1; 99 | for x = 1:size(t_list,3) 100 | t_list(n_size,1,x) = GTPlots{i,n}.x(1); 101 | t_list(n_size,2,x) = GTPlots{i,n}.y(1); 102 | t_list(n_size,3,x) = GTPlots{i,n}.c(3); 103 | t_list(n_size,4,x) = GTPlots{i,n}.c(4); 104 | t_list(n_size,5,x) = GTPlots{i,n}.x(1); 105 | t_list(n_size,6,x) = GTPlots{i,n}.y(1); 106 | t_list(n_size,7,x) = GTPlots{i,n}.x(1); 107 | t_list(n_size,8,x) = GTPlots{i,n}.y(1); 108 | 109 | %All peripheral statistics must be determined for any new 110 | %detections at this point 111 | 112 | %First, create correlation templates: 113 | ulX = t_list(n_size,1,x); 114 | lrX = t_list(n_size,1,x) + t_list(n_size,3,x); 115 | ulY = t_list(n_size,2,x); 116 | lrY = t_list(n_size,2,x) + t_list(n_size,4,x); 117 | templates{n_size,x,1} = I_diff(ulY:lrY,ulX:lrX); 118 | templates{n_size,x,2} = I_curr(ulY:lrY,ulX:lrX); 119 | templates{n_size,x,3} = templates{n_size,x,1}; 120 | 121 | %Accumulate stats identifying this object 122 | S = regionprops((templates{n_size,x,1}>tdiff),'ConvexArea','Extent',... 123 | 'MajorAxisLength','MinorAxisLength','Orientation','Perimeter'); 124 | if isempty(S) == 0 125 | if length(S) == 1 126 | stats{n_size,x} = S; 127 | else 128 | %If there is more than one object found, select the one 129 | %with the largest convex area and use its properties 130 | maxi = S(1).ConvexArea; 131 | ind = 1; 132 | for v = 2:length(S) 133 | if S(v).ConvexArea>maxi 134 | ind = v; 135 | end 136 | end 137 | stats{n_size,x} = S(ind); 138 | end 139 | end 140 | end 141 | end 142 | end 143 | %figure(1),imshow(I_curr,[0 255]); 144 | %hold on; 145 | %figure(2),imshow(I_diff,[-255 255]); 146 | %figure(2),imshow(I_diff>tdiff,[0 1]); 147 | %hold on; 148 | 149 | %---------------------------------------------------------------------% 150 | %Perform Tracking: 151 | %Track list updates for each tracking technique at this point 152 | 153 | thresh_l = []; 154 | min_l = zeros(size(t_list,1),1); 155 | c_t = I_diff>tdiff; 156 | c_c = regionprops(c_t, 'Centroid','ConvexArea','Extent',... 157 | 'MajorAxisLength','MinorAxisLength','Orientation','Perimeter'); 158 | 159 | for d = length(c_c):-1:1 160 | if isempty(c_c(d)) == 0 161 | if c_c(d).ConvexArea < 10 162 | c_c(d) = []; 163 | end 164 | end 165 | end 166 | 167 | % val_thresh = .75; 168 | 169 | for l = 1:size(t_list,3) 170 | for n=1:size(t_list,1) 171 | if t_list(n,1,l) ~= -1 172 | %Find correlation peaks 173 | c = normxcorr2(templates{n,l,1}, I_diff); 174 | c_i = normxcorr2(templates{n,l,2}, I_curr); 175 | 176 | %Make the images of equal size: 177 | ys = round((-size(I_diff,1)+size(c,1))/2); 178 | xs = round((-size(I_diff,2)+size(c,2))/2); 179 | c = c(ys:ys+size(I_diff,1)-1,xs:xs+size(I_diff,2)-1); 180 | 181 | ys = round((-size(I_curr,1)+size(c_i,1))/2); 182 | xs = round((-size(I_curr,2)+size(c_i,2))/2); 183 | c_i = c_i(ys:ys+size(I_curr,1)-1,xs:xs+size(I_curr,2)-1); 184 | 185 | %For all detecctions peaks, also get other stats. Then use 186 | %the full range of stats to assign track correspondences 187 | [m0 ind1] = max(c); 188 | [m1 ind2] = max(m0); 189 | peakc = [ind1(ind2) ind2]; 190 | 191 | [m2 ind3] = max(max(c_i)); 192 | 193 | 194 | %Now determine for the object in question what if any 195 | %object in this frame is the best match: 196 | 197 | %C_C is the moderate correlation list. It is a list of starting 198 | %guesses 199 | thresh = []; 200 | x_predicted = t_list(n,7,l); 201 | y_predicted = t_list(n,8,l); 202 | aspect_predicted = stats{n,l}.MajorAxisLength/stats{n,l}.MinorAxisLength; 203 | 204 | %Boosting parameters: Set by guesses initially, and later 205 | %determined to minimize total error: 206 | %SET BY INPUT NOW! 207 | 208 | for j = 1:length(c_c) 209 | if isempty(c_c(j)) == 0 210 | aspect_ratio = c_c(j).MajorAxisLength/c_c(j).MinorAxisLength; 211 | 212 | window = 7; 213 | x_min = max(round((c_c(j).Centroid(1)))-window,1); 214 | x_max = min(round((c_c(j).Centroid(1)))+window,size(c,2)); 215 | y_min = max(round(c_c(j).Centroid(2))-window,1); 216 | y_max = min(round(c_c(j).Centroid(2))+window,size(c,1)); 217 | corr = max(max(c(y_min:y_max,x_min:x_max))); 218 | corr_i = max(max(c_i(y_min:y_max,x_min:x_max))); 219 | 220 | %Initially started with 11 boosting parameters: 221 | thresh(j) = a1* abs(c_c(j).Centroid(1) - x_predicted)/size(I_diff,2) +... 222 | a2 * abs(c_c(j).Centroid(2) - y_predicted)/size(I_diff,1) +... 223 | a3 * abs(c_c(j).ConvexArea - stats{n,l}.ConvexArea)/max(.1,stats{n,l}.ConvexArea)+... 224 | a4 * abs(c_c(j).Extent - stats{n,l}.Extent)/max(.1,stats{n,l}.Extent)+... 225 | a5 * abs(aspect_ratio - aspect_predicted)/max(.1,aspect_predicted)+... 226 | a6 * abs(c_c(j).Orientation - stats{n,l}.Orientation)/max(.1,stats{n,l}.Orientation)+... 227 | a7 * abs(c_c(j).Perimeter - stats{n,l}.Perimeter)/max(.1,stats{n,l}.Perimeter)+... 228 | a8 * abs(1 - corr)/1+... 229 | a9 * abs(c_c(j).Centroid(1) - peakc(1))/size(I_diff,2)+... 230 | a10 * abs(c_c(j).Centroid(2) - peakc(2))/size(I_diff,1)+... 231 | a11 * abs(1 - corr_i)/1; 232 | else 233 | %This is now unselectable if we didn't see viable 234 | %objects in the window: 235 | thresh(j) = 10; 236 | end 237 | end 238 | if isempty(thresh) == 0 239 | [val t_ind] = min(thresh); 240 | thresh_l{n} = thresh; 241 | min_l(n) = val; 242 | 243 | t_list(n,1,l) = min(size(I_diff,2),max(1,round(c_c(t_ind).Centroid(1)-t_list(n,3,l)/2))); 244 | t_list(n,2,l) = min(size(I_diff,1),max(1,round(c_c(t_ind).Centroid(2)-t_list(n,4,l)/2))); 245 | %val 246 | if val > val_thresh 247 | %If this threshold is reached, we could not find an 248 | %associate track point. It is then assumed that this 249 | %point has now left the screen. If this is the case, we 250 | %delete it by making the row full of -1's. This 251 | %indicates to the program to ignore it in the future: 252 | t_list(n,:,l) = -1 * ones(1,8); 253 | min_l(n) = 10; 254 | end 255 | else 256 | %If this threshold is reached, we could not find an 257 | %associate track point. It is then assumed that this 258 | %point has now left the screen. If this is the case, we 259 | %delete it by making the row full of -1's. This 260 | %indicates to the program to ignore it in the 261 | %future: 262 | t_list(n,:,l) = -1 * ones(1,8); 263 | %Fill min list with an arbitrary large number 264 | min_l(n) = 10; 265 | end 266 | 267 | end 268 | end 269 | 270 | conflict = 1; 271 | while conflict == 1 272 | conflict = 0; 273 | for n = 1:length(thresh_l) 274 | if isempty(thresh_l{n}) == 0 && min_l(n) <10 275 | for k=1:length(thresh_l) 276 | if isempty(thresh_l(k)) == 0 && min_l(k) <10 277 | if n ~= k 278 | %Stat compilation now makes it possible to evaluate which 279 | %is the most likely associated object 280 | 281 | %Centroids must not be within 10 pixels or we have 282 | %an assumed conflict 283 | t_dis = 10; 284 | if abs(t_list(n,1,l)-t_list(k,1,l))<=t_dis && ... 285 | abs(t_list(n,2,l)-t_list(k,2,l))<=t_dis 286 | %Then there is a conflict! 287 | if min_l(n) > min_l(k) 288 | [val t_ind] = min(thresh_l{n}); 289 | thresh_l{n}(t_ind) = 10; 290 | [val t_ind] = min(thresh_l{n}); 291 | t_list(n,1,l) = min(size(I_diff,2),... 292 | max(1,round(c_c(t_ind).Centroid(1)-t_list(n,3,l)/2))); 293 | t_list(n,2,l) = min(size(I_diff,1),... 294 | max(1,round(c_c(t_ind).Centroid(2)-t_list(n,4,l)/2))); 295 | %Update the new min 296 | %val 297 | if val > val_thresh 298 | t_list(n,:,l) = -1 * ones(1,8); 299 | min_l(n) = 10; 300 | else 301 | min_l(n) = val; 302 | end 303 | conflict = 1; 304 | end 305 | end 306 | end 307 | end 308 | end 309 | else 310 | min_l(n) = 10; 311 | end 312 | end 313 | end 314 | end 315 | %%disp('NEXT'); 316 | %---------------------------------------------------------------------% 317 | %Display all tracks: At this point tracks have been maintained 318 | %seperately in the t_list matrix. Also ground truths have been 319 | %maintained. This section %displays all these tracks simultaneously on 320 | %the frame of interest. 321 | p = 1; 322 | while p <= size(GTPlots,2) 323 | if isempty(GTPlots{i,p}) == 0 324 | %figure(1), plot(GTPlots{i,p}.x,GTPlots{i,p}.y, 'r'); 325 | %hold on; 326 | end 327 | p = p+1; 328 | end 329 | 330 | for x = 1:size(t_list,3) 331 | %Plot the elements of each t_list here 332 | p = 1; 333 | while p <= size(t_list,1) 334 | %Do NOT plot if the row is empty or indicated as a deleted 335 | %track 336 | if sum(t_list(p,:,x)) ~= 0 && t_list(p,1,x) ~= -1 337 | ulX = t_list(p,1,x); 338 | lrX = t_list(p,1,x) + t_list(p,3,x); 339 | ulY = t_list(p,2,x); 340 | lrY = t_list(p,2,x) + t_list(p,4,x); 341 | %figure(1), plot([ulX lrX lrX ulX ulX],[ulY ulY lrY lrY ulY],'Color', [.5,.1,1/x]); 342 | %hold on; 343 | end 344 | p = p+1; 345 | end 346 | %At this point update position prediction for the next frame 347 | t_list(:,:,x) = tlupdate(t_list(:,:,x),I_diff); 348 | end 349 | 350 | 351 | %---------------------------------------------------------------------% 352 | %Update All Track Statistics based on this frame: 353 | %First update the correlation templates 354 | 355 | for l = 1:size(t_list,3) 356 | for n = 1:size(t_list,1) 357 | if t_list(n,1,l) ~= -1 358 | %First create image templates for each object 359 | ulX = max(1,t_list(n,1,l)); 360 | lrX = min(size(I_diff,2),t_list(n,1,l) + t_list(n,3,l)); 361 | ulY = max(t_list(n,2,l),1); 362 | lrY = min(size(I_diff,1),t_list(n,2,l) + t_list(n,4,l)); 363 | templates{n,l,1} = I_diff(ulY:lrY,ulX:lrX); 364 | templates{n,l,2} = I_curr(ulY:lrY,ulX:lrX); 365 | 366 | if isempty(templates{n,l,3}) == 0 367 | templates{n,l,3} = (templates{n,l,1} + templates{n,l,2})/2; 368 | else 369 | templates{n,l,3} = templates{n,l,1}; 370 | end 371 | 372 | %Accumulate stats identifying this object 373 | S = regionprops((templates{n,l,1}>tdiff),'ConvexArea','Extent',... 374 | 'MajorAxisLength','MinorAxisLength','Orientation','Perimeter'); 375 | 376 | if isempty(S) == 0 377 | if length(S) == 1 378 | stats{n,l} = S; 379 | else 380 | %If there is more than one object found, select the one 381 | %with the largest convex area and use its properties 382 | maxi = S(1).ConvexArea; 383 | ind = 1; 384 | for v = 2:length(S) 385 | if S(v).ConvexArea>maxi 386 | ind = v; 387 | end 388 | end 389 | stats{n,l} = S(ind); 390 | end 391 | end 392 | end 393 | end 394 | end 395 | %Now all objects in the track list have 6 identified properties, as 396 | %well as an image template, an expected position in the next frame. 397 | %These characteristics will be used for tracking. 398 | 399 | %---------------------------------------------------------------------% 400 | %Background Updating: 401 | %Improve background estimate over time by averaging in new background 402 | %estimates 403 | %Update the background with curr_frame if a background frame 404 | if background_status == 1 405 | bgf_count = bgf_count + 1; 406 | I_bg = bgupdate(I_bg,I_curr,bgf_count); 407 | else 408 | %If not a background frame, update with our best guess at bg 409 | if i >=2 410 | I_p = double(imread(d_list(i).name)) - double(imread(d_list(i-1).name)); 411 | 412 | for n1 = 1:size(I_p,1) 413 | for n2 = 1:size(I_p,2) 414 | if I_p(n1,n2) > 0 415 | I_p(n1,n2) = 0; 416 | end 417 | end 418 | end 419 | I_nbg = double(imread(d_list(i-1).name)) + I_p; 420 | bgf_count = bgf_count + 1; 421 | I_bg = bgupdate(I_bg,I_nbg,bgf_count); 422 | end 423 | end 424 | %---------------------------------------------------------------------% 425 | 426 | %Pause to allow the user to view the frame: 427 | %k = waitforbuttonpress; 428 | %pause(.1); 429 | count = 0; 430 | for h = 1:size(t_list,1) 431 | if t_list(h,1) ~= -1 432 | count = count + 1; 433 | end 434 | end 435 | t_count(i) = count; 436 | 437 | 438 | end 439 | %toc 440 | 441 | 442 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Object-Tracking-In-Infrared 2 | This project explores object tracking of human and vehicle targets in the infrared using Matlab. Using the OTCBVS Benchmark Dataset Collection, tracking of pedestrians are performed in a variety of infrared videos. Tracking is performed in cases with single pedestrians in the scene and also in cases with multiple pedestrians. Tracking approaches is centered on techniques most suited to the infrared. The project will not focus on identification of targets but instead on maintenance of accurate track following the identification. Performance of the developed tracking techniques are analyzed using the ground truth data from the databases. Metrics, such as those from the IEEE workshop on Performance Evaluation of Tracking and Surveillance is used for the analysis. All coding is done in MATLAB. 3 | -------------------------------------------------------------------------------- /SetParam.m: -------------------------------------------------------------------------------- 1 | %SetParm: Optimization algorithm for Single_Track 2 | %Methodology: 3 | %SetParm iterates rapidly through a wide range of input parameters (p1 - 4 | %p6) which relate to sizes of morphological processing windows and other 5 | %similar parameters in Single_Track. The Single_Track called is an 6 | %automated version. It is run on all 10 image sequences, and the frame 7 | %error and distance error are calculated for each scenario (and parameter 8 | %input combination). Ground truth data is then extracted and compared to 9 | %the calculated data. The minimum error input parameter combinations are 10 | %estimated and shown below. This function serves as a simplified brute 11 | %force parameter optimization approach. 12 | 13 | %RUN SINGLE TRACK OVER A WIDE PARAMETER RANGE 14 | count = 1; 15 | ALL = []; 16 | p = []; 17 | 18 | for p1 = 85:-10:35 19 | for p2 = 1:1:3 20 | for p3 = 1:1:3 21 | for p4 = 2:2:14 22 | for p5 = 2:2:8 23 | for p6 = 0:1 24 | 25 | 26 | [ALL{count,1} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00001'); 27 | [ALL{count,2} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00002'); 28 | [ALL{count,3} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00003'); 29 | [ALL{count,4} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00004'); 30 | [ALL{count,5} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00005'); 31 | [ALL{count,6} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00006'); 32 | [ALL{count,7} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00007'); 33 | [ALL{count,8} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00008'); 34 | [ALL{count,9} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00009'); 35 | [ALL{count,10} p{count}] = Single_Track(p1,p2,p3,p4,p5,p6,'00010'); 36 | count = count + 1; 37 | 38 | end 39 | end 40 | end 41 | end 42 | end 43 | end 44 | 45 | %EXTRACT REQUIRED GROUND TRUTH DATA: 46 | GTPlots = []; 47 | 48 | GTPlots{1,1} = GetGTPlots1('00001'); 49 | GTPlots{1,1}(1,:) = []; 50 | GTPlots{1,2} = GetGTPlots1('00002'); 51 | GTPlots{1,2}(1,:) = []; 52 | GTPlots{1,3} = GetGTPlots1('00003'); 53 | GTPlots{1,3}(1,:) = []; 54 | GTPlots{1,4} = GetGTPlots1('00004'); 55 | GTPlots{1,4}(1,:) = []; 56 | GTPlots{1,5} = GetGTPlots1('00005'); 57 | GTPlots{1,5}(1,:) = []; 58 | GTPlots{1,6} = GetGTPlots1('00006'); 59 | GTPlots{1,6}(1,:) = []; 60 | GTPlots{1,7} = GetGTPlots1('00007'); 61 | GTPlots{1,7}(1,:) = []; 62 | GTPlots{1,8} = GetGTPlots1('00008'); 63 | GTPlots{1,8}(1,:) = []; 64 | GTPlots{1,9} = GetGTPlots1('00009'); 65 | GTPlots{1,9}(1,:) = []; 66 | GTPlots{1,10} = GetGTPlots1('00010'); 67 | GTPlots{1,10}(1,:) = []; 68 | 69 | %COMPARE GROUND TRUTH DATA TO CALCULATED DATA: 70 | TE = zeros(size(ALL,1),10); 71 | M_Num = zeros(size(ALL,1),10); 72 | 73 | for z = 1:size(ALL,1) 74 | for n = 1:10 75 | for x = 1:size(GTPlots{1,n}, 1) 76 | for y = 1:size(GTPlots{1,n}, 2) 77 | if isempty(GTPlots{1,n}{x,y}) == 0 78 | if isempty(ALL{z,n}{1,x}) == 0 79 | Err = abs(GTPlots{1,n}{x,y}.c(1) - ALL{z,n}{1,x}(:,1))+ ... 80 | abs(GTPlots{1,n}{x,y}.c(2) - ALL{z,n}{1,x}(:,1)); 81 | TE(z,n) = min(Err) + TE(z,n); 82 | m = length(ALL{z,n}{1,x}(:,1)); 83 | else 84 | m = 0; 85 | end 86 | else 87 | break; 88 | end 89 | end 90 | M_Num(z,n) = M_Num(z,n)+abs(y-m); 91 | end 92 | end 93 | end 94 | 95 | for g = 1:size(TE,1) 96 | rows(g) = sum(TE(g,:)); 97 | end 98 | 99 | [min1 idx] = min(rows); 100 | disp(p{idx}); 101 | 102 | 103 | for g = 1:size(M_Num,1) 104 | rows(g) = sum(M_Num(g,:)); 105 | end 106 | 107 | [min2 idx] = min(rows); 108 | disp(p{idx}); 109 | 110 | 111 | % The parameter output was: [85 3 3 2 2 1] 112 | % [65 1 1 12 8 0] 113 | 114 | -------------------------------------------------------------------------------- /SetParamNDTA.m: -------------------------------------------------------------------------------- 1 | %SetParmNDTA: Optimization algorithm for NoDetect_Track 2 | %Methodology: 3 | %SetParmNDTA iterates rapidly through a wide range of input parameters 4 | %(a1-a11) which represent weights of boosting parameters, val_thresh, and 5 | %similar parameters in NoDetect_Track. The automated version of 6 | %NoDetect_Track is called for performance estimation. It is run on only 7 | %image sequence 1 due to the already extensive computation time required. 8 | %Error in number of objects detected in each frame is calculated for 9 | %each scenario (and parameter input combination). Ground truth data is then 10 | %extracted and compared to the calculated data. The minimum error input 11 | %parameter combinations are estimated and shown below. This function serves 12 | %as a simplified brute force parameter optimization approach. 13 | 14 | %NOTE: The full nested loop algorithm below was not run due to time 15 | %constraints. Instead a wide variety of combinations were run and the 16 | %resulting data were analyzed to get a sense of the optimal parameters. In 17 | %general 4 or 5 loops would be run, and optimal parameters for those cases 18 | %would be estiamted. Then I would iterate back and run differnt loop 19 | %combinations. The resulting estimated optimization is a first pass at 20 | %finding optimal weighting values. A more efficient optimization 21 | %methodology will be implemented in the future. Commented in and out loops 22 | %are left as they were on the last optimization run I peformed. In general 23 | %for each useful optimization run, I would store the datapoints (generally 24 | %approximately 300 - 500) in a matrix V_NUMBER. Then in later effective 25 | %runs I would either write over unneeded data or increment to V2 and V3 and 26 | %so on... 27 | 28 | %Add the working folder to the path --> Change to appropriate 29 | addpath('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 30 | 31 | V8 = []; 32 | %Extract ground truth data for image sequence 1: %26 could be 3 or 4 33 | N_T = [5 5 1 1 2 2 2 2 1 1 1 1 1 2 2 2 2 2 2 3 3 3 3 3 4 3 4 7 7 7 7]; 34 | c_folder = '00001'; 35 | count = 1; 36 | 37 | %for a1 = 0:.1:.1 38 | a1 = .1; 39 | a2 = a1; 40 | for a3 = 0:.1:.3 41 | %a3 = 0; 42 | %for a4 = 0:.1:.1 43 | a4 = 0; 44 | %for a5 = 0:.1:.1 45 | a5 = .05; 46 | %for a6 = 0:.05:.1 47 | a6 = 0; 48 | %for a7 = 0:.0:.1 49 | a7 = 0; 50 | for a8 = 0.4:.05:.5 51 | a8 = .45; 52 | for a9 = 0:.1:.1 53 | %a9 = .1; 54 | a10 = a9; 55 | for a11 = .4:.05:.55 56 | %a11 = .45; 57 | for val_thresh = .5:.1:.9 58 | tdiff = 45; 59 | 60 | GTPlots = GetGTPlots(c_folder); 61 | Handoffs = Handoff(c_folder); 62 | O = NoDetect_Track_A(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,val_thresh,GTPlots,Handoffs, tdiff); 63 | %4 or 3 is acceptable for Frame 64 | %26 of the video: 65 | if O(26) == 4 66 | O(26) = 3; 67 | end 68 | V8(count,:) = [sum(abs(N_T - O)) a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 val_thresh tdiff]; 69 | disp(count) 70 | count = count + 1; 71 | 72 | end 73 | end 74 | end 75 | end 76 | %end 77 | %end 78 | %end 79 | %end 80 | end 81 | %end 82 | 83 | %Optimally Determined Values: 84 | %[0.1,0.1,0,0,0.05,0,0,0.45,0.1,0.1,0.45,0.7,45;] 85 | 86 | -------------------------------------------------------------------------------- /Single_Track.m: -------------------------------------------------------------------------------- 1 | %NOTE: This is an automated version of Single_Track_Original, allowing for 2 | %parameter input. All user interaction is also disable. This automated 3 | %version is used in the optimization algorithm SetParm. 4 | %-------------------------------------------------------------------------% 5 | %Single_Track - Generalized Matlab Script capable of implementing 6 | %single color (in this case IR) tracking techniques. Currently 7 | %contains image processing to remove noise and to emphasize detected 8 | %objects. This includes background averaging, and subsequent subtraction 9 | %from the current frame to detect foreground elements. 10 | % 11 | %Technique 1: Foreground objects are maintained in a tracking list. Object 12 | %uncertainty starts as moderate. If the object is not identified in 13 | %subsequent frames, the uncertainty increases. If the object is identified 14 | %the uncertainty decreases. Objects that are stably tracked over time 15 | %therefore have lower uncertainty and objects that are not will have 16 | %uncertainty. If uncertainty passes a certain threshold the object is 17 | %removed from the track list. If an object on the track list cannot be 18 | %associated but has low uncertainty, it is maintained and the track box is 19 | %placed at the predicted object location. 20 | % 21 | %Technique 2: Implements a Meanshift estimation of distribution centers 22 | %based on the same preprocessed images as analyzed in Technique 1. 23 | %This can be turned on or off and is used primarily as a demonstration 24 | %tool. The meanshift tracking is done in the seperate Matlab function 25 | %camshift.m or camshift2.m. They were originally developed to implement 26 | %meanshift tracking but were found to be no more effective than alternative 27 | %techniques. 28 | %-------------------------------------------------------------------------% 29 | 30 | function [biglist parameters] = Single_Track(p1,p2,p3,p4,p5,p6,folder) 31 | biglistcount = 1; 32 | biglist = []; 33 | 34 | %Bookkeeping: 35 | 36 | %Add the working folder to the path --> Change to appropriate 37 | %addpath('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 38 | %cd('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 39 | %c_folder = folder; 40 | 41 | %First access the Dataset 1/5 Directory list for the selected folder: 42 | %cd(c_folder); 43 | %clear all; 44 | close all; 45 | %disp('Program Start...'); 46 | 47 | %Variables: 48 | d_list = dir('*.bmp'); %Directory listing of all frames 49 | background_status = 1; %Sets background status (0 for bg, 1 for fg) 50 | bgf_count = 1; %Background frame count 51 | ms_win = zeros(1,4); %Meanshift search window - Starts empty 52 | ms_x = 0; %Meanshift x plot location 53 | ms_y = 0; %Meanshift y plot location 54 | t_list = []; %Track list keeps record of each track, as well as 55 | %the previous tracked location and a rough 56 | %prediction of where it will be with the following 57 | %formatting: 58 | %[C_x C_y BB_x BB_y Prev_x Prev_y Pre_x Pre_y] 59 | b_pix = 15; %Measure of acceptable closeness to the boundary 60 | uncertainty = 33; %Measure of uncertainty of a track. Increases if 61 | %is not detected in frames and decreases to 0 if 62 | %the object is redetected 63 | o_size = 18; %Minimum detectable object size 64 | cs_list = []; %CAM-Shift Tracking List 65 | 66 | %Background value: Updates with all non-object frames. Assumes first frame 67 | %is background 68 | I_p = double(imread(d_list(2).name)) - double(imread(d_list(1).name)); 69 | 70 | for n1 = 1:size(I_p,1) 71 | for n2 = 1:size(I_p,2) 72 | if I_p(n1,n2) > 0 73 | I_p(n1,n2) = 0; 74 | end 75 | end 76 | end 77 | I_bg = double(imread(d_list(1).name)) + I_p; 78 | 79 | %disp(['Expected Playback Time: ', num2str(numel(d_list)*.4/60),' mins']); 80 | 81 | 82 | %Main Body: Plays videos and performs tracking using various methods 83 | 84 | tic 85 | for i = 2: numel(d_list) 86 | 87 | I_curr = double(imread(d_list(i).name)); 88 | %Previous can be set as the previous frame for motion templating I_prev 89 | %= imread(d_list(i).name); 90 | 91 | %-------------------------------------------------------------------------% 92 | %Preprocessing: 93 | %Performs background subtraction, binarization off of an empirically 94 | %defined threshold, erosion to remove small noise objects, and finally 95 | %displays the processed image as well as the original image in seperate 96 | %figures 97 | 98 | %As a default, assume objects are not detected (background frame) 99 | background_status = 1; 100 | 101 | %Take the difference between frame and bg to find foreground 102 | I_diff = abs(I_curr-I_bg); 103 | %Binarize with an experimentally determined high threshold 104 | 105 | I_bdiff = I_diff > p1; 106 | %I_bdiff = I_diff>50; 107 | 108 | %Erode to remove noise 109 | I_db = imerode(I_bdiff,ones(p2,p3)); 110 | 111 | %Display the image frame and processing frame 112 | %figure(1),imshow(d_list(i).name); 113 | %%%%hold on; 114 | 115 | %-------------------------------------------------------------------------% 116 | %Creating the track list: 117 | %If an object is detected then we dilate the remaining objects to increase 118 | %the likelihood of internally connecting discrete objects. Image labeling 119 | %is used to identify all seperate objects, and bounding boxes are 120 | %determined for each object. If a list already exists, clear the previous 121 | %location elements and replace with a '-1' marker. 122 | 123 | %If we have any objects: 124 | if max(max(I_db))>0 125 | 126 | %If objects, then this is not background 127 | background_status = 0; 128 | %Dilate to keep binarized image as internally connected 129 | 130 | 131 | I_db = imdilate(I_db,ones(p4,p5)); 132 | 133 | if p6 134 | I_db = bwmorph(I_db, 'majority'); 135 | end 136 | 137 | %figure(2),imshow(I_db,[0 1]) 138 | %%%%hold on; 139 | 140 | %Label the image regions 141 | [I_l num_l] = bwlabel(I_db,8); 142 | L_stats = regionprops(I_l,'BoundingBox'); 143 | 144 | %Uses Region Labeling Approach, creating a bounding box around 145 | %sections. Plots the bounding box around each region 146 | 147 | diffs = []; %Assigns best guess for detections 148 | 149 | if isempty(t_list)==0 150 | 151 | %First, remove stored 'previous points' in the t_list 152 | for n = 1:size(t_list,1) 153 | t_list(n,5) = -1; 154 | t_list(n,6) = -1; 155 | end 156 | 157 | %Then, create an ordered difference list between current points 158 | %and previously stored track points 159 | for n = 1:num_l 160 | if (L_stats(n).BoundingBox(3)+L_stats(n).BoundingBox(4)) > o_size 161 | 162 | ix = L_stats(n).BoundingBox(1); 163 | iy = L_stats(n).BoundingBox(2); 164 | 165 | %Check if this object is on the track list by creating a 166 | %difference list between current locations and all 167 | %predicted locations in the track list: 168 | 169 | diff_list = abs(ix - t_list(:,7)) + abs(iy - t_list(:,8)); 170 | 171 | 172 | else 173 | diff_list = ones(size(t_list,1),1); 174 | end 175 | 176 | diffs = [diffs diff_list]; 177 | end 178 | 179 | %The number of unassociated track points. Initally the length 180 | %of the list 181 | num_uatp = size(t_list,1); 182 | used_list = zeros(size(diffs,2),1); 183 | 184 | %Next, associate detected points with old track points. If not 185 | %associated points are found, then add new track points. 186 | 187 | for n = 1:size(diffs,2) 188 | %Find the closest associated predicted track to a new 189 | %detection 190 | %Reset idx1: 191 | idx1 = []; 192 | 193 | if (size(diffs,1) > 1 && size(diffs,2) > 1) 194 | [m1 idx1] = min(diffs); 195 | [m2 idx2] = min(m1); 196 | else 197 | if size(diffs,1) == 1 198 | [m1 idx2] = min(diffs); 199 | idx1(idx2) = 1; 200 | end 201 | if size(diffs,2) == 1 202 | idx2 = 1; 203 | [m1 idx1(idx2)] = min(diffs); 204 | end 205 | end 206 | 207 | y1 = idx1(idx2); 208 | x1 = idx2; 209 | 210 | %If the points still are fairly well associated and also 211 | %there are track points left to associate. If a point has 212 | %been 'missing' decrease the threshold of detection for 213 | %surrounding points by increasing the uncertainty marker 214 | if num_uatp > 0 && diffs(y1,x1) < ... 215 | (120 + t_list(y1,9)) 216 | %Update the current track point and the previous point 217 | t_list(y1,5) = t_list(y1,1); 218 | t_list(y1,6) = t_list(y1,2); 219 | t_list(y1,1) = L_stats(x1).BoundingBox(1); 220 | t_list(y1,2) = L_stats(x1).BoundingBox(2); 221 | t_list(y1,3) = L_stats(x1).BoundingBox(3); 222 | t_list(y1,4) = L_stats(x1).BoundingBox(4); 223 | %Decrease the uncertainty once a detection: 224 | t_list(y1,9) = max([(t_list(y1,9) - uncertainty) 0]); 225 | 226 | if isempty(diffs) == 0 227 | %Fill the used row 228 | diffs(y1,:) = 1000 * ones(1,size(diffs,2)); 229 | %Fill the used column 230 | diffs(:,x1) = 1000 * ones(size(diffs,1),1); 231 | end 232 | 233 | %An old track point has now been associated: 234 | num_uatp = num_uatp - 1; 235 | used_list(x1) = 1; 236 | end 237 | end 238 | 239 | %If we are out of track points to associate or no close 240 | %track points are available, then we create a new track 241 | %point based on the used_list 242 | for n = 1:size(diffs,2) 243 | if used_list(n) == 0 244 | ix = L_stats(n).BoundingBox(1); 245 | iy = L_stats(n).BoundingBox(2); 246 | sx = L_stats(n).BoundingBox(3); 247 | sy = L_stats(n).BoundingBox(4); 248 | 249 | mb = [abs(ix - 1) abs(iy - sy - 1)... 250 | abs(ix + sx -size(I_db,2)) abs(iy - size(I_db,1))]; 251 | [tm pos] = min(mb); 252 | 253 | s_tlist = size(t_list,1); 254 | %Based on which side wall is closest, fill the t_list 255 | %These points have moderate uncertainty (50) 256 | if pos == 1 257 | t_list(s_tlist+1,:) = [ix iy sx sy 1 iy 0 0 50]; 258 | end 259 | if pos == 2 260 | t_list(s_tlist+1,:) = [ix iy sx sy ix 1 0 0 50]; 261 | end 262 | if pos == 3 263 | t_list(s_tlist+1,:) = [ix iy sx sy ... 264 | size(I_db,2) iy 0 0 50]; 265 | end 266 | if pos == 4 267 | t_list(s_tlist+1,:) = [ix iy sx sy ix ... 268 | size(I_db,1) 0 0 50]; 269 | end 270 | end 271 | end 272 | 273 | %Next, we delete all track points that are no longer detected 274 | %and are expected to be gone 275 | %Uses b_pix and uncertainty from the Variable List 276 | 277 | for n = size(t_list,1):-1:1 278 | if t_list(n,5) == -1 && t_list(n,6) == -1 279 | 280 | %If close to the boundary and missing, delete: 281 | %Alternatively, if the track has not been found in too 282 | %long, then the uncertainty is too high and it will be 283 | %deleted: 284 | if t_list(n,7) < b_pix ||... 285 | t_list(n,8) - t_list(n,4) < b_pix ||... 286 | t_list(n,7) + t_list(n,3) > (size(I_db,2)-b_pix) ||... 287 | t_list(n,8) > (size(I_db,1)-b_pix) ||... 288 | t_list(n,9) > 50 289 | 290 | t_list(n,:) = []; 291 | else 292 | %Else guess its position based on a simple prediction 293 | %and hope it comes back. Increase the uncertainty, 294 | %assume the bounding box stays the same, make the 295 | %current location the last predicted location, and the 296 | %previous location the current location 297 | t_list(n,5) = t_list(n,1); 298 | t_list(n,6) = t_list(n,2); 299 | t_list(n,1) = t_list(n,7); 300 | t_list(n,2) = t_list(n,8); 301 | t_list(n,9) = t_list(n,9) + uncertainty; 302 | end 303 | end 304 | end 305 | 306 | %If an object has become too small... then delete it 307 | for n = size(t_list,1):-1:1 308 | if (t_list(n,3) + t_list(n,4)) < (o_size+2) 309 | t_list(n,:) = []; 310 | end 311 | end 312 | else 313 | %If the Track list is empty. Then add all tracks from scratch! 314 | for n = 1:num_l 315 | if (L_stats(n).BoundingBox(3)+L_stats(n).BoundingBox(4)) > o_size 316 | ix = L_stats(n).BoundingBox(1); 317 | iy = L_stats(n).BoundingBox(2); 318 | sx = L_stats(n).BoundingBox(3); 319 | sy = L_stats(n).BoundingBox(4); 320 | 321 | mb = [abs(ix - 1) abs(iy - sy - 1)... 322 | abs(ix + sx -size(I_db,2)) abs(iy - size(I_db,1))]; 323 | [tm pos] = min(mb); 324 | 325 | %Based on which side wall is closest, fill the t_list 326 | %These points are high uncertainty (100) - Newly 327 | %created points become more certain as they have been 328 | %visible for a longer time. When a list of all new 329 | %track points is created at once, it is particularly 330 | %uncertain 331 | if pos == 1 332 | t_list(n,:) = [ix iy sx sy 1 iy 0 0 100]; 333 | end 334 | if pos == 2 335 | t_list(n,:) = [ix iy sx sy ix 1 0 0 100]; 336 | end 337 | if pos == 3 338 | t_list(n,:) = [ix iy sx sy size(I_db,2) iy 0 0 100]; 339 | end 340 | if pos == 4 341 | t_list(n,:) = [ix iy sx sy ix size(I_db,1) 0 0 100]; 342 | end 343 | end 344 | end 345 | cs_list = t_list; 346 | end 347 | %-------------------------------------------------------------------------% 348 | %Tracklist Updating: 349 | %Now that the tracklist has been populated, update it! 350 | if isempty(t_list) == 0 351 | t_list = tlupdate(t_list,I_diff); 352 | end 353 | %-------------------------------------------------------------------------% 354 | %Display Tracks Section: 355 | % for n = 1:num_l 356 | %disp(['SIZE: ', num2str(size(t_list,1))]); 357 | for n=1:size(t_list,1) 358 | 359 | ix = t_list(n,1); 360 | iy = t_list(n,2); 361 | sx = t_list(n,3); 362 | sy = t_list(n,4); 363 | 364 | %Corresponds to Technique 1: 365 | if t_list(n,9) < uncertainty 366 | % figure(1), plot([ix ix ix+sx ix+sx ix],... 367 | % [iy iy+sy iy+sy iy iy], 'r'); 368 | %%%%hold on; 369 | else 370 | % figure(1), plot([ix ix ix+sx ix+sx ix],... 371 | % [iy iy+sy iy+sy iy iy], 'y'); 372 | %%%%%hold on; 373 | end 374 | end 375 | 376 | ms_list = t_list; 377 | 378 | for n = 1:size(t_list,1) 379 | %Corresponds to Technique 2: 380 | 381 | ix = t_list(n,1); 382 | iy = t_list(n,2); 383 | sx = t_list(n,3); 384 | sy = t_list(n,4); 385 | 386 | %initialize a start window: 387 | ms_win(n,1) = round(ix); 388 | ms_win(n,2) = round(iy); 389 | ms_win(n,3) = round(sx); 390 | ms_win(n,4) = round(sy); 391 | 392 | %Apply camshift tracking - gets centroids and new 393 | %window for next iteration 394 | [ms_win(n,:) ms_x ms_y] = camshift(I_db,ms_win(n,:)); 395 | 396 | ix = ms_win(n,1); 397 | iy = ms_win(n,2); 398 | sx = ms_win(n,3); 399 | sy = ms_win(n,4); 400 | 401 | % figure(1), plot([ix ix ix+sx ix+sx ix],... 402 | % [iy iy+sy iy+sy iy iy], 'b'); 403 | %else 404 | %Apply camshift tracking - gets centroids and new 405 | %window for next iteration 406 | % [ms_win(n,:) ms_x ms_y] = camshift(I_db,ms_win(n,:)); 407 | %end 408 | %figure(1), plot(ms_x,ms_y, 'bx','MarkerSize',20); 409 | %%%%%hold on; 410 | end 411 | end 412 | 413 | %-------------------------------------------------------------------------% 414 | %Background Updating: 415 | %Update the background if a background frame 416 | if background_status == 1 417 | bgf_count = bgf_count + 1; 418 | I_bg = bgupdate(I_bg,I_curr,bgf_count); 419 | else 420 | if i >=2 421 | I_p = double(imread(d_list(i).name)) - double(imread(d_list(i-1).name)); 422 | 423 | for n1 = 1:size(I_p,1) 424 | for n2 = 1:size(I_p,2) 425 | if I_p(n1,n2) > 0 426 | I_p(n1,n2) = 0; 427 | end 428 | end 429 | end 430 | I_nbg = double(imread(d_list(i-1).name)) + I_p; 431 | bgf_count = bgf_count + 1; 432 | I_bg = bgupdate(I_bg,I_nbg,bgf_count); 433 | end 434 | end 435 | 436 | biglist{biglistcount} = t_list; 437 | biglistcount = biglistcount + 1; 438 | end 439 | toc 440 | %Return to the Dataset 1/5 Directory 441 | parameters = [p1 p2 p3 p4 p5 p6]; 442 | cd ..; 443 | -------------------------------------------------------------------------------- /Single_Track_Original.m: -------------------------------------------------------------------------------- 1 | %-------------------------------------------------------------------------% 2 | %Single_Track - Generalized Matlab Script capable of implementing 3 | %single color (in this case IR) tracking techniques. Currently 4 | %contains image processing to remove noise and to emphasize detected 5 | %objects. This includes background averaging, and subsequent subtraction 6 | %from the current frame to detect foreground elements. 7 | % 8 | %Technique 1: Foreground objects are maintained in a tracking list. Object 9 | %uncertainty starts as moderate. If the object is not identified in 10 | %subsequent frames, the uncertainty increases. If the object is identified 11 | %the uncertainty decreases. Objects that are stably tracked over time 12 | %therefore have lower uncertainty and objects that are not will have 13 | %uncertainty. If uncertainty passes a certain threshold the object is 14 | %removed from the track list. If an object on the track list cannot be 15 | %associated but has low uncertainty, it is maintained and the track box is 16 | %placed at the predicted object location. 17 | % 18 | %Technique 2: Implements a MEANSHIFT technique to find distribution centers 19 | %identified using technique 1. This can be turned on or off. The 20 | %MEANSHIFT identification is done in the seperate Matlab function camshift.m. 21 | %Camshift1 or 2 can be used to track the distribution on the original image, 22 | %or more processed images at any stage of the alogirthm. This provided 23 | %antoher 'lever' to pull to affect tracking performance. It would be 24 | %possible to use the MEANSHIFT based algorithms to track the objects on its 25 | %own but this was less effective than technique 1. These identified points 26 | %are included as a demonstration of distributional centers identified using 27 | %technique 1. 28 | %-------------------------------------------------------------------------% 29 | 30 | %addpath('C:\Users\aa\Downloads\APOGEE\Pushpak-Object Tracking\Pushpak-Object tracking-code\Attachments_Complete'); 31 | %cd('C:\Users\Pushpak\Documents\MATLAB\OTCBVS\Dataset 1'); 32 | 33 | %Select the desired folder 34 | c_folder = '00001'; 35 | 36 | %First access the Dataset 1/5 Directory list for the selected folder: 37 | cd(c_folder); 38 | %clear all; 39 | close all; 40 | disp('Program Start...'); 41 | 42 | %Variables: 43 | d_list = dir('*.bmp'); %Directory listing of all frames 44 | background_status = 1; %Sets background status (0 for bg, 1 for fg) 45 | bgf_count = 1; %Background frame count 46 | ms_win = zeros(1,4); %Meanshift search window - Starts empty 47 | ms_x = 0; %Meanshift x plot location 48 | ms_y = 0; %Meanshift y plot location 49 | t_list = []; %Track list keeps record of each track, as well as 50 | %the previous tracked location and a rough 51 | %prediction of where it will be with the following 52 | %formatting: 53 | %[C_x C_y BB_x BB_y Prev_x Prev_y Pre_x Pre_y] 54 | b_pix = 15; %Measure of acceptable closeness to the boundary 55 | uncertainty = 33; %Measure of uncertainty of a track. Increases if 56 | %is not detected in frames and decreases to 0 if 57 | %the object is redetected 58 | o_size = 25; %Minimum detectable object size 59 | cs_list = []; %CAM-Shift Tracking List 60 | 61 | %Background value: Updates with all non-object frames. Assumes first frame 62 | %is background 63 | I_p = double(imread(d_list(2).name)) - double(imread(d_list(1).name)); 64 | 65 | for n1 = 1:size(I_p,1) 66 | for n2 = 1:size(I_p,2) 67 | if I_p(n1,n2) > 0 68 | I_p(n1,n2) = 0; 69 | end 70 | end 71 | end 72 | I_bg = double(imread(d_list(1).name)) + I_p; 73 | 74 | disp(['Expected Playback Time: ', num2str(numel(d_list)*.4/60),' mins']); 75 | 76 | 77 | %Main Body: Plays videos and performs tracking using various methods 78 | 79 | tic 80 | for i = 2: numel(d_list) 81 | 82 | I_curr = double(imread(d_list(i).name)); 83 | %Previous can be set as the previous frame for motion templating I_prev 84 | %= imread(d_list(i).name); 85 | 86 | %-------------------------------------------------------------------------% 87 | %Preprocessing: 88 | %Performs background subtraction, binarization off of an empirically 89 | %defined threshold, erosion to remove small noise objects, and finally 90 | %displays the processed image as well as the original image in seperate 91 | %figures 92 | 93 | %As a default, assume objects are not detected (background frame) 94 | background_status = 1; 95 | 96 | %Take the difference between frame and bg to find foreground 97 | I_diff = abs(I_curr-I_bg); 98 | %Binarize with an experimentally determined high threshold 99 | 100 | I_bdiff = I_diff > 65; 101 | %I_bdiff = I_diff > 30; 102 | %I_bdiff = I_diff>50; 103 | 104 | %Erode to remove noise 105 | erodesize = 1; 106 | I_db = imerode(I_bdiff,ones(erodesize,erodesize)); 107 | 108 | %Display the image frame and processing frame 109 | figure(1),imshow(d_list(i).name); 110 | hold on; 111 | 112 | %-------------------------------------------------------------------------% 113 | %Creating the track list: 114 | %If an object is detected then we dilate the remaining objects to increase 115 | %the likelihood of internally connecting discrete objects. Image labeling 116 | %is used to identify all seperate objects, and bounding boxes are 117 | %determined for each object. If a list already exists, clear the previous 118 | %location elements and replace with a '-1' marker. 119 | 120 | %If we have any objects: 121 | if max(max(I_db))>0 122 | 123 | %If objects, then this is not background 124 | background_status = 0; 125 | %Dilate to keep binarized image as internally connected 126 | 127 | dy = 12; 128 | dx = 8; 129 | I_db = imdilate(I_db,ones(dy,dx)); 130 | %I_db = bwmorph(I_db, 'majority'); 131 | 132 | figure(2),imshow(I_db,[0 1]) 133 | hold on; 134 | 135 | %Label the image regions 136 | [I_l num_l] = bwlabel(I_db,8); 137 | L_stats = regionprops(I_l,'BoundingBox'); 138 | 139 | %Uses Region Labeling Approach, creating a bounding box around 140 | %sections. Plots the bounding box around each region 141 | 142 | diffs = []; %Assigns best guess for detections 143 | 144 | if isempty(t_list)==0 145 | 146 | %First, remove stored 'previous points' in the t_list 147 | for n = 1:size(t_list,1) 148 | t_list(n,5) = -1; 149 | t_list(n,6) = -1; 150 | end 151 | 152 | %Then, create an ordered difference list between current points 153 | %and previously stored track points 154 | for n = 1:num_l 155 | if (L_stats(n).BoundingBox(3)+L_stats(n).BoundingBox(4)) > o_size 156 | 157 | ix = L_stats(n).BoundingBox(1); 158 | iy = L_stats(n).BoundingBox(2); 159 | 160 | %Check if this object is on the track list by creating a 161 | %difference list between current locations and all 162 | %predicted locations in the track list: 163 | 164 | diff_list = abs(ix - t_list(:,7)) + abs(iy - t_list(:,8)); 165 | 166 | 167 | else 168 | diff_list = ones(size(t_list,1),1); 169 | end 170 | 171 | diffs = [diffs diff_list]; 172 | end 173 | 174 | %The number of unassociated track points. Initally the length 175 | %of the list 176 | num_uatp = size(t_list,1); 177 | used_list = zeros(size(diffs,2),1); 178 | 179 | %Next, associate detected points with old track points. If not 180 | %associated points are found, then add new track points. 181 | 182 | for n = 1:size(diffs,2) 183 | %Find the closest associated predicted track to a new 184 | %detection 185 | %Reset idx1: 186 | idx1 = []; 187 | 188 | if (size(diffs,1) > 1 && size(diffs,2) > 1) 189 | [m1 idx1] = min(diffs); 190 | [m2 idx2] = min(m1); 191 | else 192 | if size(diffs,1) == 1 193 | [m1 idx2] = min(diffs); 194 | idx1(idx2) = 1; 195 | end 196 | if size(diffs,2) == 1 197 | idx2 = 1; 198 | [m1 idx1(idx2)] = min(diffs); 199 | end 200 | end 201 | 202 | y1 = idx1(idx2); 203 | x1 = idx2; 204 | 205 | %If the points still are fairly well associated and also 206 | %there are track points left to associate. If a point has 207 | %been 'missing' decrease the threshold of detection for 208 | %surrounding points by increasing the uncertainty marker 209 | if num_uatp > 0 && diffs(y1,x1) < ... 210 | (120 + t_list(y1,9)) 211 | %Update the current track point and the previous point 212 | t_list(y1,5) = t_list(y1,1); 213 | t_list(y1,6) = t_list(y1,2); 214 | t_list(y1,1) = L_stats(x1).BoundingBox(1); 215 | t_list(y1,2) = L_stats(x1).BoundingBox(2); 216 | t_list(y1,3) = L_stats(x1).BoundingBox(3); 217 | t_list(y1,4) = L_stats(x1).BoundingBox(4); 218 | %Decrease the uncertainty once a detection: 219 | t_list(y1,9) = max([(t_list(y1,9) - uncertainty) 0]); 220 | 221 | if isempty(diffs) == 0 222 | %Fill the used row 223 | diffs(y1,:) = 1000 * ones(1,size(diffs,2)); 224 | %Fill the used column 225 | diffs(:,x1) = 1000 * ones(size(diffs,1),1); 226 | end 227 | 228 | %An old track point has now been associated: 229 | num_uatp = num_uatp - 1; 230 | used_list(x1) = 1; 231 | end 232 | end 233 | 234 | %If we are out of track points to associate or no close 235 | %track points are available, then we create a new track 236 | %point based on the used_list 237 | for n = 1:size(diffs,2) 238 | if used_list(n) == 0 239 | ix = L_stats(n).BoundingBox(1); 240 | iy = L_stats(n).BoundingBox(2); 241 | sx = L_stats(n).BoundingBox(3); 242 | sy = L_stats(n).BoundingBox(4); 243 | 244 | mb = [abs(ix - 1) abs(iy - sy - 1)... 245 | abs(ix + sx -size(I_db,2)) abs(iy - size(I_db,1))]; 246 | [tm pos] = min(mb); 247 | 248 | s_tlist = size(t_list,1); 249 | %Based on which side wall is closest, fill the t_list 250 | %These points have moderate uncertainty (50) 251 | if pos == 1 252 | t_list(s_tlist+1,:) = [ix iy sx sy 1 iy 0 0 50]; 253 | end 254 | if pos == 2 255 | t_list(s_tlist+1,:) = [ix iy sx sy ix 1 0 0 50]; 256 | end 257 | if pos == 3 258 | t_list(s_tlist+1,:) = [ix iy sx sy ... 259 | size(I_db,2) iy 0 0 50]; 260 | end 261 | if pos == 4 262 | t_list(s_tlist+1,:) = [ix iy sx sy ix ... 263 | size(I_db,1) 0 0 50]; 264 | end 265 | end 266 | end 267 | 268 | %Next, we delete all track points that are no longer detected 269 | %and are expected to be gone 270 | %Uses b_pix and uncertainty from the Variable List 271 | 272 | for n = size(t_list,1):-1:1 273 | if t_list(n,5) == -1 && t_list(n,6) == -1 274 | 275 | %If close to the boundary and missing, delete: 276 | %Alternatively, if the track has not been found in too 277 | %long, then the uncertainty is too high and it will be 278 | %deleted: 279 | if t_list(n,7) < b_pix ||... 280 | t_list(n,8) - t_list(n,4) < b_pix ||... 281 | t_list(n,7) + t_list(n,3) > (size(I_db,2)-b_pix) ||... 282 | t_list(n,8) > (size(I_db,1)-b_pix) ||... 283 | t_list(n,9) > 50 284 | 285 | t_list(n,:) = []; 286 | else 287 | %Else guess its position based on a simple prediction 288 | %and hope it comes back. Increase the uncertainty, 289 | %assume the bounding box stays the same, make the 290 | %current location the last predicted location, and the 291 | %previous location the current location 292 | t_list(n,5) = t_list(n,1); 293 | t_list(n,6) = t_list(n,2); 294 | t_list(n,1) = t_list(n,7); 295 | t_list(n,2) = t_list(n,8); 296 | t_list(n,9) = t_list(n,9) + uncertainty; 297 | end 298 | end 299 | end 300 | 301 | %If an object has become too small... then delete it 302 | for n = size(t_list,1):-1:1 303 | if (t_list(n,3) + t_list(n,4)) < (o_size+2) 304 | t_list(n,:) = []; 305 | end 306 | end 307 | else 308 | %If the Track list is empty. Then add all tracks from scratch! 309 | for n = 1:num_l 310 | if (L_stats(n).BoundingBox(3)+L_stats(n).BoundingBox(4)) > o_size 311 | ix = L_stats(n).BoundingBox(1); 312 | iy = L_stats(n).BoundingBox(2); 313 | sx = L_stats(n).BoundingBox(3); 314 | sy = L_stats(n).BoundingBox(4); 315 | 316 | mb = [abs(ix - 1) abs(iy - sy - 1)... 317 | abs(ix + sx -size(I_db,2)) abs(iy - size(I_db,1))]; 318 | [tm pos] = min(mb); 319 | 320 | %Based on which side wall is closest, fill the t_list 321 | %These points are high uncertainty (100) - Newly 322 | %created points become more certain as they have been 323 | %visible for a longer time. When a list of all new 324 | %track points is created at once, it is particularly 325 | %uncertain 326 | if pos == 1 327 | t_list(n,:) = [ix iy sx sy 1 iy 0 0 100]; 328 | end 329 | if pos == 2 330 | t_list(n,:) = [ix iy sx sy ix 1 0 0 100]; 331 | end 332 | if pos == 3 333 | t_list(n,:) = [ix iy sx sy size(I_db,2) iy 0 0 100]; 334 | end 335 | if pos == 4 336 | t_list(n,:) = [ix iy sx sy ix size(I_db,1) 0 0 100]; 337 | end 338 | end 339 | end 340 | cs_list = t_list; 341 | end 342 | %-------------------------------------------------------------------------% 343 | %Tracklist Updating: 344 | %Now that the tracklist has been populated, update it! 345 | if isempty(t_list) == 0 346 | t_list = tlupdate(t_list,I_diff); 347 | end 348 | %-------------------------------------------------------------------------% 349 | %Display Tracks Section: 350 | % for n = 1:num_l 351 | disp(['SIZE: ', num2str(size(t_list,1))]); 352 | for n=1:size(t_list,1) 353 | 354 | ix = t_list(n,1); 355 | iy = t_list(n,2); 356 | sx = t_list(n,3); 357 | sy = t_list(n,4); 358 | 359 | %Corresponds to Technique 1: 360 | if t_list(n,9) < uncertainty 361 | figure(1), plot([ix ix ix+sx ix+sx ix],... 362 | [iy iy+sy iy+sy iy iy], 'r'); 363 | hold on; 364 | else 365 | figure(1), plot([ix ix ix+sx ix+sx ix],... 366 | [iy iy+sy iy+sy iy iy], 'y'); 367 | hold on; 368 | end 369 | end 370 | 371 | ms_list = t_list; 372 | 373 | for n = 1:size(t_list,1) 374 | %Corresponds to Technique 2: 375 | 376 | ix = t_list(n,1); 377 | iy = t_list(n,2); 378 | sx = t_list(n,3); 379 | sy = t_list(n,4); 380 | 381 | %initialize a start window: 382 | ms_win(n,1) = round(ix); 383 | ms_win(n,2) = round(iy); 384 | ms_win(n,3) = round(sx); 385 | ms_win(n,4) = round(sy); 386 | 387 | %Apply camshift tracking - gets centroids and new 388 | %window for next iteration 389 | [ms_win(n,:) ms_x ms_y] = camshift(I_db,ms_win(n,:)); 390 | 391 | ix = ms_win(n,1); 392 | iy = ms_win(n,2); 393 | sx = ms_win(n,3); 394 | sy = ms_win(n,4); 395 | 396 | figure(1), plot([ix ix ix+sx ix+sx ix],... 397 | [iy iy+sy iy+sy iy iy], 'b'); 398 | %else 399 | %Apply camshift tracking - gets centroids and new 400 | %window for next iteration 401 | % [ms_win(n,:) ms_x ms_y] = camshift(I_db,ms_win(n,:)); 402 | %end 403 | %figure(1), plot(ms_x,ms_y, 'bx','MarkerSize',20); 404 | hold on; 405 | end 406 | end 407 | 408 | %-------------------------------------------------------------------------% 409 | %Background Updating: 410 | %Improve background estimate over time by averaging in new background 411 | %estimates 412 | %Update the background with curr_frame if a background frame 413 | if background_status == 1 414 | bgf_count = bgf_count + 1; 415 | I_bg = bgupdate(I_bg,I_curr,bgf_count); 416 | else 417 | %If not a background frame, update with our best guess at bg 418 | if i >=2 419 | I_p = double(imread(d_list(i).name)) - double(imread(d_list(i-1).name)); 420 | 421 | for n1 = 1:size(I_p,1) 422 | for n2 = 1:size(I_p,2) 423 | if I_p(n1,n2) > 0 424 | I_p(n1,n2) = 0; 425 | end 426 | end 427 | end 428 | I_nbg = double(imread(d_list(i-1).name)) + I_p; 429 | bgf_count = bgf_count + 1; 430 | I_bg = bgupdate(I_bg,I_nbg,bgf_count); 431 | end 432 | end 433 | 434 | %Pause to allow the user to view the frame: 435 | pause(1); 436 | 437 | end 438 | toc 439 | 440 | %Return to the Dataset 1/5 Directory 441 | cd ..; 442 | -------------------------------------------------------------------------------- /Track.m: -------------------------------------------------------------------------------- 1 | %Track.m: 2 | %This was a first pass at meanshift tracking and also correlation tracking. 3 | %However although the correlation (shown, meanshift was subsequently 4 | %removed and called in single_track for better demonstration) was able to 5 | %identify points at least somewhat, there was no logical maintenance of a 6 | %track list included. This development work spurred the creation of single 7 | %track to include uncertainty and a methodology for adding, removing, and 8 | %association objects on the track list. 9 | 10 | SEQ_DIR = '00001'; 11 | 12 | clear T; 13 | clear S; 14 | 15 | %Grab the Groundtruth file with associated sequence 16 | filename = fullfile(SEQ_DIR, 'groundTruth.txt'); 17 | fid = fopen(filename); 18 | 19 | %Cycle through comments in the file: 20 | line = fgets(fid); 21 | while line(1) == '%' 22 | line = fgets(fid); 23 | end 24 | 25 | %Read the number of images in the video 26 | numImages = sscanf(line, '%d', 1); 27 | start = 5; 28 | 29 | for i =start:10; 30 | %for i=1:numImages 31 | 32 | %Load the image name and number of boxes 33 | imageName = fscanf(fid, '%c',13); 34 | numBoxes = fscanf(fid, '%d', 1); 35 | 36 | %Display the image 37 | fname = fullfile(SEQ_DIR, imageName); 38 | Im = imread(fname); 39 | imagesc(Im); 40 | colormap('gray'); 41 | axis('off'); 42 | title(sprintf('Image %d', i)); 43 | hold on; 44 | 45 | %Load the ground truth boxes 46 | for j=1:numBoxes 47 | tmp = fscanf(fid, '%c',2); %% [space]( 48 | coords = fscanf(fid, '%d %d %d %d'); 49 | tmp = fscanf(fid, '%c',1); %% ) 50 | ulX=coords(1); ulY=coords(2); 51 | lrX=coords(3); lrY=coords(4); 52 | boxes{j}.X = [ulX lrX lrX ulX ulX]'; 53 | boxes{j}.Y = [ulY ulY lrY lrY ulY]'; 54 | end 55 | 56 | tmp = fgetl(fid); %% get until end of line 57 | 58 | %Display the ground truth boxes (Colored RED) 59 | for j=1:numBoxes 60 | plot(boxes{j}.X, boxes{j}.Y, 'r'); 61 | 62 | %Create Tracks - for multiple object tracking 63 | 64 | %If there are no tracks to begin with, then all detected 65 | %objects should be added as tracks 66 | %if(size(T,3)>0) 67 | if(i == start) 68 | %Extract template based on groud truth data 69 | Tem = Im(boxes{j}.Y(1):boxes{j}.Y(3),... 70 | boxes{j}.X(1):boxes{j}.X(3)); 71 | T{j} = Tem; 72 | 73 | S(j,2) = boxes{j}.X(3) - boxes{j}.X(1); 74 | S(j,1) = boxes{j}.Y(3) - boxes{j}.Y(1); 75 | end 76 | end 77 | 78 | %Apply correlation tracking - disregard tracks with low correlation 79 | %--> The track object has probably left the screen 80 | t_s = 0; 81 | 82 | if(i>start && isempty(T)== 0) 83 | for n=1:size(T,2) 84 | g = normxcorr2(T{1,n - t_s}, double(Im)); 85 | 86 | if (max(max(g))>.70) 87 | gT = g == max(g(:)); 88 | [iy ix] = find(gT == 1); 89 | numel(ix); 90 | 91 | sx = S(n-t_s,2); 92 | sy = S(n-t_s,1); 93 | 94 | plot([ix ix ix-sx ix-sx ix],... 95 | [iy iy-sy iy-sy iy iy], 'y'); 96 | %Update the template based on current results 97 | T{1,n-t_s} = (T{1,n-t_s} + Im(iy:iy+sy,ix:ix+sx))/2; 98 | else 99 | T(:,n - t_s) = []; 100 | S(n - t_s,:) = []; 101 | t_s = t_s + 1; 102 | end 103 | end 104 | end 105 | 106 | %Pause to set frame rate 107 | pause(.5); 108 | hold off; 109 | clear Im; 110 | 111 | end 112 | 113 | fclose(fid); 114 | %end 115 | -------------------------------------------------------------------------------- /camshift.m: -------------------------------------------------------------------------------- 1 | %Meanshift Function: 2 | %Created a simple convergence algorithm which looks in windows of varying 3 | %sizes. This function is called by wrapper programs such as single 4 | %track.This algorithm identifies the first window that is stable. 5 | 6 | function [s_win x_c y_c] = camshift(I_curr, s_win) 7 | 8 | %Constant Parameters: 9 | T = .5; % Threshold of convergence (in pixels) 10 | del = T; %Pixel delta, defaults at T 11 | max_iterations = 200; 12 | 13 | %Input the initial search window (location and size) 14 | ix = s_win(1); %Initial x location 15 | iy = s_win(2); %Initial y location 16 | sx = s_win(3); %Search window size in x 17 | sy = s_win(4); %Search window size in y 18 | 19 | for x_f = .5:.1:1.5 20 | for y_f = .5:.1:1.5 21 | 22 | x_c = round(ix + sx/2); 23 | y_c = round(iy + sy/2); 24 | 25 | iterations = 0; 26 | 27 | %While the solution is still converging, as long as we haven't been 28 | %searching for too many iterations 29 | while (iterations < max_iterations && del >= T) 30 | 31 | %Save the previous ix and iy: 32 | x_p = x_c; 33 | y_p = y_c; 34 | 35 | % Compute centroid of search window 36 | x_min = round(x_c - x_f * sx); 37 | x_max = round(x_c + x_f * sx); 38 | y_min = round(y_c - y_f * sy); 39 | y_max = round(y_c + y_f * sy); 40 | 41 | TS = double(0); 42 | for i = x_min:x_max 43 | for j = y_min:y_max 44 | if i < size(I_curr,2)&& j < size(I_curr,1)&& i > 1&& j > 1 45 | %Calculate the sum over the area: 46 | TS = TS + double(I_curr(j,i)); 47 | end 48 | end 49 | end 50 | 51 | I_x = double(0); 52 | for i = x_min:x_max 53 | for j = y_min:y_max 54 | if i < size(I_curr,2)&& j < size(I_curr,1)&& i > 1&& j > 1 55 | %Calculate the weighted value: 56 | I_x = I_x + i * double(I_curr(j,i)); 57 | end 58 | end 59 | end 60 | 61 | I_y = double(0); 62 | for i = x_min:x_max 63 | for j = y_min:y_max 64 | if i < size(I_curr,2)&& j < size(I_curr,1)&& i > 1&& j > 1 65 | %Calculate the weighted value: 66 | I_y = I_y + j * double(I_curr(j,i)); 67 | end 68 | end 69 | end 70 | 71 | %Find the centroid: 72 | x_c = round(I_x/TS); 73 | y_c = round(I_y/TS); 74 | 75 | 76 | %Calculate Current Error 77 | del = abs(x_p-x_c) + abs(y_p-y_c); 78 | iterations = iterations + 1; 79 | end 80 | 81 | if del <= T 82 | break; 83 | end 84 | 85 | end 86 | if del <= T 87 | break; 88 | end 89 | end 90 | 91 | sx = min(max(round(x_f * sx),1),size(I_curr,2)); 92 | sy = min(max(round(y_f * sy),1),size(I_curr,1)); 93 | ix = min(max(round(x_c - x_f * sx/2),1),size(I_curr,2)); 94 | iy = min(max(round(y_c - y_f * sy/2),1),size(I_curr,1)); 95 | 96 | %Set return varaibles for new window location 97 | s_win(1) = ix; %Initial x location 98 | s_win(2) = iy; %Initial y location 99 | s_win(3) = sx; %Search window size in x 100 | s_win(4) = sy; %Search window size in y 101 | end 102 | 103 | 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /camshift2.m: -------------------------------------------------------------------------------- 1 | %Meanshift Function 2: 2 | %Created a simple convergence algorithm which looks in windows of varying 3 | %sizes. This function is called by wrapper programs such as single track. 4 | %This algorithm identifes the window size that is most stable. It is 5 | %computationally more intensive than Meanshift Function 1. 6 | 7 | function [s_win x_c y_c] = camshift2(I_curr, s_win) 8 | 9 | %Constant Parameters: 10 | T = .5; % Threshold of convergence (in pixels) 11 | del = T; %Pixel delta, defaults at T 12 | max_iterations = 50; 13 | 14 | %Input the initial search window (location and size) 15 | ix = s_win(1); %Initial x location 16 | iy = s_win(2); %Initial y location 17 | sx = s_win(3); %Search window size in x 18 | sy = s_win(4); %Search window size in y 19 | mins = zeros(121,5); 20 | count = 1; 21 | 22 | for x_f = .5:.1:1.5 23 | for y_f = .5:.1:1.5 24 | 25 | x_c = round(ix + sx/2); 26 | y_c = round(iy + sy/2); 27 | 28 | iterations = 0; 29 | 30 | %While the solution is still converging, as long as we haven't been 31 | %searching for too many iterations 32 | while (iterations < max_iterations) 33 | 34 | %Save the previous ix and iy: 35 | x_p = x_c; 36 | y_p = y_c; 37 | 38 | % Compute centroid of search window 39 | x_min = round(x_c - x_f * sx); 40 | x_max = round(x_c + x_f * sx); 41 | y_min = round(y_c - y_f * sy); 42 | y_max = round(y_c + y_f * sy); 43 | 44 | TS = double(0); 45 | for i = x_min:x_max 46 | for j = y_min:y_max 47 | if i < size(I_curr,2)&& j < size(I_curr,1)&& i > 1&& j > 1 48 | %Calculate the sum over the area: 49 | TS = TS + double(I_curr(j,i)); 50 | end 51 | end 52 | end 53 | 54 | I_x = double(0); 55 | for i = x_min:x_max 56 | for j = y_min:y_max 57 | if i < size(I_curr,2)&& j < size(I_curr,1)&& i > 1&& j > 1 58 | %Calculate the weighted value: 59 | I_x = I_x + i * double(I_curr(j,i)); 60 | end 61 | end 62 | end 63 | 64 | I_y = double(0); 65 | for i = x_min:x_max 66 | for j = y_min:y_max 67 | if i < size(I_curr,2)&& j < size(I_curr,1)&& i > 1&& j > 1 68 | %Calculate the weighted value: 69 | I_y = I_y + j * double(I_curr(j,i)); 70 | end 71 | end 72 | end 73 | 74 | %Find the centroid: 75 | x_c = round(I_x/TS); 76 | y_c = round(I_y/TS); 77 | 78 | 79 | %Calculate Current Error 80 | del = abs(x_p-x_c) + abs(y_p-y_c); 81 | iterations = iterations + 1; 82 | end 83 | 84 | mins(count,:) = [del x_c y_c x_f y_f]; 85 | count = count + 1; 86 | 87 | end 88 | end 89 | 90 | [mins idx] = min(mins(:,1)); 91 | x_c = mins(idx,2); 92 | y_c = mins(idx,3); 93 | x_f = mins(idx,4); 94 | y_f = mins(idx,5); 95 | 96 | sx = min(max(round(x_f * sx),1),size(I_curr,2)); 97 | sy = min(max(round(y_f * sy),1),size(I_curr,1)); 98 | ix = min(max(round(x_c - x_f * sx/2),1),size(I_curr,2)); 99 | iy = min(max(round(y_c - y_f * sy/2),1),size(I_curr,1)); 100 | 101 | %Set return varaibles for new window location 102 | s_win(1) = ix; %Initial x location 103 | s_win(2) = iy; %Initial y location 104 | s_win(3) = sx; %Search window size in x 105 | s_win(4) = sy; %Search window size in y 106 | end 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /tlupdate.m: -------------------------------------------------------------------------------- 1 | %tlupdate.m 2 | %Methodology: Written as a general track list linear motion predictor 3 | %function. This function is called by bother NoDetect_Track and 4 | %Single_Track to perform estimation of object locations in subsequent 5 | %frames. If necessary it could be expanded in the future to do non-linear 6 | %motion prediction based on an established motion profile. 7 | 8 | function [t_list] = tlupdate(t_list,I_diff) 9 | %Updates the tracklist using linear prediction: 10 | %Finds the delta between the previous frames, and adds this to the previous 11 | %frame value in each dimension to predict its next location 12 | 13 | for i = 1:size(t_list,1) 14 | t_list(i,7) = max(1,min(size(I_diff,2),(t_list(i,1) - t_list(i,5))/1 + t_list(i,1))); 15 | t_list(i,8) = max(1,min(size(I_diff,1),(t_list(i,2) - t_list(i,6))/1 + t_list(i,2))); 16 | end 17 | 18 | end 19 | 20 | --------------------------------------------------------------------------------