├── 768x576.avi ├── Blob.cpp ├── Blob.h ├── main.cpp ├── presentation ├── Blob Matching1.png ├── Blob Matching2.png └── Blob Matching3.png └── readme.txt /768x576.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrocontrollersAndMore/OpenCV_3_Multiple_Object_Tracking_by_Image_Subtraction_Cpp/b5e3a0bfc0f64d5caacee4baebf0b662fa8021a6/768x576.avi -------------------------------------------------------------------------------- /Blob.cpp: -------------------------------------------------------------------------------- 1 | // Blob.cpp 2 | 3 | #include "Blob.h" 4 | 5 | /////////////////////////////////////////////////////////////////////////////////////////////////// 6 | Blob::Blob(std::vector _contour) { 7 | 8 | currentContour = _contour; 9 | 10 | currentBoundingRect = cv::boundingRect(currentContour); 11 | 12 | cv::Point currentCenter; 13 | 14 | currentCenter.x = (currentBoundingRect.x + currentBoundingRect.x + currentBoundingRect.width) / 2; 15 | currentCenter.y = (currentBoundingRect.y + currentBoundingRect.y + currentBoundingRect.height) / 2; 16 | 17 | centerPositions.push_back(currentCenter); 18 | 19 | dblCurrentDiagonalSize = sqrt(pow(currentBoundingRect.width, 2) + pow(currentBoundingRect.height, 2)); 20 | 21 | dblCurrentAspectRatio = (float)currentBoundingRect.width / (float)currentBoundingRect.height; 22 | 23 | blnStillBeingTracked = true; 24 | blnCurrentMatchFoundOrNewBlob = true; 25 | 26 | intNumOfConsecutiveFramesWithoutAMatch = 0; 27 | } 28 | 29 | /////////////////////////////////////////////////////////////////////////////////////////////////// 30 | void Blob::predictNextPosition(void) { 31 | 32 | int numPositions = (int)centerPositions.size(); 33 | 34 | if (numPositions == 1) { 35 | 36 | predictedNextPosition.x = centerPositions.back().x; 37 | predictedNextPosition.y = centerPositions.back().y; 38 | 39 | } 40 | else if (numPositions == 2) { 41 | 42 | int deltaX = centerPositions[1].x - centerPositions[0].x; 43 | int deltaY = centerPositions[1].y - centerPositions[0].y; 44 | 45 | predictedNextPosition.x = centerPositions.back().x + deltaX; 46 | predictedNextPosition.y = centerPositions.back().y + deltaY; 47 | 48 | } 49 | else if (numPositions == 3) { 50 | 51 | int sumOfXChanges = ((centerPositions[2].x - centerPositions[1].x) * 2) + 52 | ((centerPositions[1].x - centerPositions[0].x) * 1); 53 | 54 | int deltaX = (int)std::round((float)sumOfXChanges / 3.0); 55 | 56 | int sumOfYChanges = ((centerPositions[2].y - centerPositions[1].y) * 2) + 57 | ((centerPositions[1].y - centerPositions[0].y) * 1); 58 | 59 | int deltaY = (int)std::round((float)sumOfYChanges / 3.0); 60 | 61 | predictedNextPosition.x = centerPositions.back().x + deltaX; 62 | predictedNextPosition.y = centerPositions.back().y + deltaY; 63 | 64 | } 65 | else if (numPositions == 4) { 66 | 67 | int sumOfXChanges = ((centerPositions[3].x - centerPositions[2].x) * 3) + 68 | ((centerPositions[2].x - centerPositions[1].x) * 2) + 69 | ((centerPositions[1].x - centerPositions[0].x) * 1); 70 | 71 | int deltaX = (int)std::round((float)sumOfXChanges / 6.0); 72 | 73 | int sumOfYChanges = ((centerPositions[3].y - centerPositions[2].y) * 3) + 74 | ((centerPositions[2].y - centerPositions[1].y) * 2) + 75 | ((centerPositions[1].y - centerPositions[0].y) * 1); 76 | 77 | int deltaY = (int)std::round((float)sumOfYChanges / 6.0); 78 | 79 | predictedNextPosition.x = centerPositions.back().x + deltaX; 80 | predictedNextPosition.y = centerPositions.back().y + deltaY; 81 | 82 | } 83 | else if (numPositions >= 5) { 84 | 85 | int sumOfXChanges = ((centerPositions[numPositions - 1].x - centerPositions[numPositions - 2].x) * 4) + 86 | ((centerPositions[numPositions - 2].x - centerPositions[numPositions - 3].x) * 3) + 87 | ((centerPositions[numPositions - 3].x - centerPositions[numPositions - 4].x) * 2) + 88 | ((centerPositions[numPositions - 4].x - centerPositions[numPositions - 5].x) * 1); 89 | 90 | int deltaX = (int)std::round((float)sumOfXChanges / 10.0); 91 | 92 | int sumOfYChanges = ((centerPositions[numPositions - 1].y - centerPositions[numPositions - 2].y) * 4) + 93 | ((centerPositions[numPositions - 2].y - centerPositions[numPositions - 3].y) * 3) + 94 | ((centerPositions[numPositions - 3].y - centerPositions[numPositions - 4].y) * 2) + 95 | ((centerPositions[numPositions - 4].y - centerPositions[numPositions - 5].y) * 1); 96 | 97 | int deltaY = (int)std::round((float)sumOfYChanges / 10.0); 98 | 99 | predictedNextPosition.x = centerPositions.back().x + deltaX; 100 | predictedNextPosition.y = centerPositions.back().y + deltaY; 101 | 102 | } 103 | else { 104 | // should never get here 105 | } 106 | 107 | } 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /Blob.h: -------------------------------------------------------------------------------- 1 | // Blob.h 2 | 3 | #ifndef MY_BLOB 4 | #define MY_BLOB 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | /////////////////////////////////////////////////////////////////////////////////////////////////// 11 | class Blob { 12 | public: 13 | // member variables /////////////////////////////////////////////////////////////////////////// 14 | std::vector currentContour; 15 | 16 | cv::Rect currentBoundingRect; 17 | 18 | std::vector centerPositions; 19 | 20 | double dblCurrentDiagonalSize; 21 | double dblCurrentAspectRatio; 22 | 23 | bool blnCurrentMatchFoundOrNewBlob; 24 | 25 | bool blnStillBeingTracked; 26 | 27 | int intNumOfConsecutiveFramesWithoutAMatch; 28 | 29 | cv::Point predictedNextPosition; 30 | 31 | // function prototypes //////////////////////////////////////////////////////////////////////// 32 | Blob(std::vector _contour); 33 | void predictNextPosition(void); 34 | 35 | }; 36 | 37 | #endif // MY_BLOB 38 | 39 | 40 | -------------------------------------------------------------------------------- /main.cpp: -------------------------------------------------------------------------------- 1 | // ObjectTrackingCPP.cpp 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | #include // it may be necessary to change or remove this line if not using Windows 9 | 10 | #include "Blob.h" 11 | 12 | #define SHOW_STEPS // un-comment or comment this line to show steps or not 13 | 14 | // global variables /////////////////////////////////////////////////////////////////////////////// 15 | const cv::Scalar SCALAR_BLACK = cv::Scalar(0.0, 0.0, 0.0); 16 | const cv::Scalar SCALAR_WHITE = cv::Scalar(255.0, 255.0, 255.0); 17 | const cv::Scalar SCALAR_YELLOW = cv::Scalar(0.0, 255.0, 255.0); 18 | const cv::Scalar SCALAR_GREEN = cv::Scalar(0.0, 200.0, 0.0); 19 | const cv::Scalar SCALAR_RED = cv::Scalar(0.0, 0.0, 255.0); 20 | 21 | // function prototypes //////////////////////////////////////////////////////////////////////////// 22 | void matchCurrentFrameBlobsToExistingBlobs(std::vector &existingBlobs, std::vector ¤tFrameBlobs); 23 | void addBlobToExistingBlobs(Blob ¤tFrameBlob, std::vector &existingBlobs, int &intIndex); 24 | void addNewBlob(Blob ¤tFrameBlob, std::vector &existingBlobs); 25 | double distanceBetweenPoints(cv::Point point1, cv::Point point2); 26 | void drawAndShowContours(cv::Size imageSize, std::vector > contours, std::string strImageName); 27 | void drawAndShowContours(cv::Size imageSize, std::vector blobs, std::string strImageName); 28 | void drawBlobInfoOnImage(std::vector &blobs, cv::Mat &imgFrame2Copy); 29 | 30 | /////////////////////////////////////////////////////////////////////////////////////////////////// 31 | int main(void) { 32 | 33 | cv::VideoCapture capVideo; 34 | 35 | cv::Mat imgFrame1; 36 | cv::Mat imgFrame2; 37 | 38 | std::vector blobs; 39 | 40 | capVideo.open("768x576.avi"); 41 | 42 | if (!capVideo.isOpened()) { // if unable to open video file 43 | std::cout << "error reading video file" << std::endl << std::endl; // show error message 44 | _getch(); // it may be necessary to change or remove this line if not using Windows 45 | return(0); // and exit program 46 | } 47 | 48 | if (capVideo.get(CV_CAP_PROP_FRAME_COUNT) < 2) { 49 | std::cout << "error: video file must have at least two frames"; 50 | _getch(); 51 | return(0); 52 | } 53 | 54 | capVideo.read(imgFrame1); 55 | capVideo.read(imgFrame2); 56 | 57 | char chCheckForEscKey = 0; 58 | 59 | bool blnFirstFrame = true; 60 | 61 | int frameCount = 2; 62 | 63 | while (capVideo.isOpened() && chCheckForEscKey != 27) { 64 | 65 | std::vector currentFrameBlobs; 66 | 67 | cv::Mat imgFrame1Copy = imgFrame1.clone(); 68 | cv::Mat imgFrame2Copy = imgFrame2.clone(); 69 | 70 | cv::Mat imgDifference; 71 | cv::Mat imgThresh; 72 | 73 | cv::cvtColor(imgFrame1Copy, imgFrame1Copy, CV_BGR2GRAY); 74 | cv::cvtColor(imgFrame2Copy, imgFrame2Copy, CV_BGR2GRAY); 75 | 76 | cv::GaussianBlur(imgFrame1Copy, imgFrame1Copy, cv::Size(5, 5), 0); 77 | cv::GaussianBlur(imgFrame2Copy, imgFrame2Copy, cv::Size(5, 5), 0); 78 | 79 | cv::absdiff(imgFrame1Copy, imgFrame2Copy, imgDifference); 80 | 81 | cv::threshold(imgDifference, imgThresh, 30, 255.0, CV_THRESH_BINARY); 82 | 83 | cv::imshow("imgThresh", imgThresh); 84 | 85 | cv::Mat structuringElement3x3 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)); 86 | cv::Mat structuringElement5x5 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5)); 87 | cv::Mat structuringElement7x7 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(7, 7)); 88 | cv::Mat structuringElement9x9 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(9, 9)); 89 | 90 | /* 91 | cv::dilate(imgThresh, imgThresh, structuringElement7x7); 92 | cv::erode(imgThresh, imgThresh, structuringElement3x3); 93 | */ 94 | 95 | cv::dilate(imgThresh, imgThresh, structuringElement5x5); 96 | cv::dilate(imgThresh, imgThresh, structuringElement5x5); 97 | cv::erode(imgThresh, imgThresh, structuringElement5x5); 98 | 99 | 100 | cv::Mat imgThreshCopy = imgThresh.clone(); 101 | 102 | std::vector > contours; 103 | 104 | cv::findContours(imgThreshCopy, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE); 105 | 106 | drawAndShowContours(imgThresh.size(), contours, "imgContours"); 107 | 108 | std::vector > convexHulls(contours.size()); 109 | 110 | for (unsigned int i = 0; i < contours.size(); i++) { 111 | cv::convexHull(contours[i], convexHulls[i]); 112 | } 113 | 114 | drawAndShowContours(imgThresh.size(), convexHulls, "imgConvexHulls"); 115 | 116 | for (auto &convexHull : convexHulls) { 117 | Blob possibleBlob(convexHull); 118 | 119 | if (possibleBlob.currentBoundingRect.area() > 100 && 120 | possibleBlob.dblCurrentAspectRatio >= 0.2 && 121 | possibleBlob.dblCurrentAspectRatio <= 1.25 && 122 | possibleBlob.currentBoundingRect.width > 20 && 123 | possibleBlob.currentBoundingRect.height > 20 && 124 | possibleBlob.dblCurrentDiagonalSize > 30.0 && 125 | (cv::contourArea(possibleBlob.currentContour) / (double)possibleBlob.currentBoundingRect.area()) > 0.40) { 126 | currentFrameBlobs.push_back(possibleBlob); 127 | } 128 | } 129 | 130 | drawAndShowContours(imgThresh.size(), currentFrameBlobs, "imgCurrentFrameBlobs"); 131 | 132 | if (blnFirstFrame == true) { 133 | for (auto ¤tFrameBlob : currentFrameBlobs) { 134 | blobs.push_back(currentFrameBlob); 135 | } 136 | } 137 | else { 138 | matchCurrentFrameBlobsToExistingBlobs(blobs, currentFrameBlobs); 139 | } 140 | 141 | drawAndShowContours(imgThresh.size(), blobs, "imgBlobs"); 142 | 143 | imgFrame2Copy = imgFrame2.clone(); // get another copy of frame 2 since we changed the previous frame 2 copy in the processing above 144 | 145 | drawBlobInfoOnImage(blobs, imgFrame2Copy); 146 | 147 | cv::imshow("imgFrame2Copy", imgFrame2Copy); 148 | 149 | //cv::waitKey(0); // uncomment this line to go frame by frame for debugging 150 | 151 | // now we prepare for the next iteration 152 | 153 | currentFrameBlobs.clear(); 154 | 155 | imgFrame1 = imgFrame2.clone(); // move frame 1 up to where frame 2 is 156 | 157 | if ((capVideo.get(CV_CAP_PROP_POS_FRAMES) + 1) < capVideo.get(CV_CAP_PROP_FRAME_COUNT)) { 158 | capVideo.read(imgFrame2); 159 | } 160 | else { 161 | std::cout << "end of video\n"; 162 | break; 163 | } 164 | 165 | blnFirstFrame = false; 166 | frameCount++; 167 | chCheckForEscKey = cv::waitKey(1); 168 | } 169 | 170 | if (chCheckForEscKey != 27) { // if the user did not press esc (i.e. we reached the end of the video) 171 | cv::waitKey(0); // hold the windows open to allow the "end of video" message to show 172 | } 173 | // note that if the user did press esc, we don't need to hold the windows open, we can simply let the program end which will close the windows 174 | 175 | return(0); 176 | } 177 | 178 | /////////////////////////////////////////////////////////////////////////////////////////////////// 179 | void matchCurrentFrameBlobsToExistingBlobs(std::vector &existingBlobs, std::vector ¤tFrameBlobs) { 180 | 181 | for (auto &existingBlob : existingBlobs) { 182 | 183 | existingBlob.blnCurrentMatchFoundOrNewBlob = false; 184 | 185 | existingBlob.predictNextPosition(); 186 | } 187 | 188 | for (auto ¤tFrameBlob : currentFrameBlobs) { 189 | 190 | int intIndexOfLeastDistance = 0; 191 | double dblLeastDistance = 100000.0; 192 | 193 | for (unsigned int i = 0; i < existingBlobs.size(); i++) { 194 | if (existingBlobs[i].blnStillBeingTracked == true) { 195 | double dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions.back(), existingBlobs[i].predictedNextPosition); 196 | 197 | if (dblDistance < dblLeastDistance) { 198 | dblLeastDistance = dblDistance; 199 | intIndexOfLeastDistance = i; 200 | } 201 | } 202 | } 203 | 204 | if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15) { 205 | addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance); 206 | } 207 | else { 208 | addNewBlob(currentFrameBlob, existingBlobs); 209 | } 210 | 211 | } 212 | 213 | for (auto &existingBlob : existingBlobs) { 214 | 215 | if (existingBlob.blnCurrentMatchFoundOrNewBlob == false) { 216 | existingBlob.intNumOfConsecutiveFramesWithoutAMatch++; 217 | } 218 | 219 | if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5) { 220 | existingBlob.blnStillBeingTracked = false; 221 | } 222 | 223 | } 224 | 225 | } 226 | 227 | /////////////////////////////////////////////////////////////////////////////////////////////////// 228 | void addBlobToExistingBlobs(Blob ¤tFrameBlob, std::vector &existingBlobs, int &intIndex) { 229 | 230 | existingBlobs[intIndex].currentContour = currentFrameBlob.currentContour; 231 | existingBlobs[intIndex].currentBoundingRect = currentFrameBlob.currentBoundingRect; 232 | 233 | existingBlobs[intIndex].centerPositions.push_back(currentFrameBlob.centerPositions.back()); 234 | 235 | existingBlobs[intIndex].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize; 236 | existingBlobs[intIndex].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio; 237 | 238 | existingBlobs[intIndex].blnStillBeingTracked = true; 239 | existingBlobs[intIndex].blnCurrentMatchFoundOrNewBlob = true; 240 | } 241 | 242 | /////////////////////////////////////////////////////////////////////////////////////////////////// 243 | void addNewBlob(Blob ¤tFrameBlob, std::vector &existingBlobs) { 244 | 245 | currentFrameBlob.blnCurrentMatchFoundOrNewBlob = true; 246 | 247 | existingBlobs.push_back(currentFrameBlob); 248 | } 249 | 250 | /////////////////////////////////////////////////////////////////////////////////////////////////// 251 | double distanceBetweenPoints(cv::Point point1, cv::Point point2) { 252 | 253 | int intX = abs(point1.x - point2.x); 254 | int intY = abs(point1.y - point2.y); 255 | 256 | return(sqrt(pow(intX, 2) + pow(intY, 2))); 257 | } 258 | 259 | /////////////////////////////////////////////////////////////////////////////////////////////////// 260 | void drawAndShowContours(cv::Size imageSize, std::vector > contours, std::string strImageName) { 261 | cv::Mat image(imageSize, CV_8UC3, SCALAR_BLACK); 262 | 263 | cv::drawContours(image, contours, -1, SCALAR_WHITE, -1); 264 | 265 | cv::imshow(strImageName, image); 266 | } 267 | 268 | /////////////////////////////////////////////////////////////////////////////////////////////////// 269 | void drawAndShowContours(cv::Size imageSize, std::vector blobs, std::string strImageName) { 270 | 271 | cv::Mat image(imageSize, CV_8UC3, SCALAR_BLACK); 272 | 273 | std::vector > contours; 274 | 275 | for (auto &blob : blobs) { 276 | if (blob.blnStillBeingTracked == true) { 277 | contours.push_back(blob.currentContour); 278 | } 279 | } 280 | 281 | cv::drawContours(image, contours, -1, SCALAR_WHITE, -1); 282 | 283 | cv::imshow(strImageName, image); 284 | } 285 | 286 | /////////////////////////////////////////////////////////////////////////////////////////////////// 287 | void drawBlobInfoOnImage(std::vector &blobs, cv::Mat &imgFrame2Copy) { 288 | 289 | for (unsigned int i = 0; i < blobs.size(); i++) { 290 | 291 | if (blobs[i].blnStillBeingTracked == true) { 292 | cv::rectangle(imgFrame2Copy, blobs[i].currentBoundingRect, SCALAR_RED, 2); 293 | 294 | int intFontFace = CV_FONT_HERSHEY_SIMPLEX; 295 | double dblFontScale = blobs[i].dblCurrentDiagonalSize / 60.0; 296 | int intFontThickness = (int)std::round(dblFontScale * 1.0); 297 | 298 | cv::putText(imgFrame2Copy, std::to_string(i), blobs[i].centerPositions.back(), intFontFace, dblFontScale, SCALAR_GREEN, intFontThickness); 299 | } 300 | } 301 | } 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | -------------------------------------------------------------------------------- /presentation/Blob Matching1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrocontrollersAndMore/OpenCV_3_Multiple_Object_Tracking_by_Image_Subtraction_Cpp/b5e3a0bfc0f64d5caacee4baebf0b662fa8021a6/presentation/Blob Matching1.png -------------------------------------------------------------------------------- /presentation/Blob Matching2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrocontrollersAndMore/OpenCV_3_Multiple_Object_Tracking_by_Image_Subtraction_Cpp/b5e3a0bfc0f64d5caacee4baebf0b662fa8021a6/presentation/Blob Matching2.png -------------------------------------------------------------------------------- /presentation/Blob Matching3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrocontrollersAndMore/OpenCV_3_Multiple_Object_Tracking_by_Image_Subtraction_Cpp/b5e3a0bfc0f64d5caacee4baebf0b662fa8021a6/presentation/Blob Matching3.png -------------------------------------------------------------------------------- /readme.txt: -------------------------------------------------------------------------------- 1 | The video pretty much explains it all: 2 | https://www.youtube.com/watch?v=A4UDOAOTRdw --------------------------------------------------------------------------------