├── .gitattributes ├── .gitignore ├── README.md └── projectcV ├── FaceRec.h ├── VideoCap.cpp ├── VideoCap.h ├── imgProc.h ├── main.cpp ├── projectcV.sln ├── projectcV.vcxproj └── projectcV.vcxproj.filters /.gitattributes: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Set default behavior to automatically normalize line endings. 3 | ############################################################################### 4 | * text=auto 5 | 6 | ############################################################################### 7 | # Set default behavior for command prompt diff. 8 | # 9 | # This is need for earlier builds of msysgit that does not have it on by 10 | # default for csharp files. 11 | # Note: This is only used by command line 12 | ############################################################################### 13 | #*.cs diff=csharp 14 | 15 | ############################################################################### 16 | # Set the merge driver for project and solution files 17 | # 18 | # Merging from the command prompt will add diff markers to the files if there 19 | # are conflicts (Merging from VS is not affected by the settings below, in VS 20 | # the diff markers are never inserted). Diff markers may cause the following 21 | # file extensions to fail to load in VS. An alternative would be to treat 22 | # these files as binary and thus will always conflict and require user 23 | # intervention with every merge. To do so, just uncomment the entries below 24 | ############################################################################### 25 | #*.sln merge=binary 26 | #*.csproj merge=binary 27 | #*.vbproj merge=binary 28 | #*.vcxproj merge=binary 29 | #*.vcproj merge=binary 30 | #*.dbproj merge=binary 31 | #*.fsproj merge=binary 32 | #*.lsproj merge=binary 33 | #*.wixproj merge=binary 34 | #*.modelproj merge=binary 35 | #*.sqlproj merge=binary 36 | #*.wwaproj merge=binary 37 | 38 | ############################################################################### 39 | # behavior for image files 40 | # 41 | # image files are treated as binary by default. 42 | ############################################################################### 43 | #*.jpg binary 44 | #*.png binary 45 | #*.gif binary 46 | 47 | ############################################################################### 48 | # diff behavior for common document formats 49 | # 50 | # Convert binary document formats to text before diffing them. This feature 51 | # is only available from the command line. Turn it on by uncommenting the 52 | # entries below. 53 | ############################################################################### 54 | #*.doc diff=astextplain 55 | #*.DOC diff=astextplain 56 | #*.docx diff=astextplain 57 | #*.DOCX diff=astextplain 58 | #*.dot diff=astextplain 59 | #*.DOT diff=astextplain 60 | #*.pdf diff=astextplain 61 | #*.PDF diff=astextplain 62 | #*.rtf diff=astextplain 63 | #*.RTF diff=astextplain 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | 4 | # User-specific files 5 | *.suo 6 | *.user 7 | *.sln.docstates 8 | 9 | # Build results 10 | 11 | [Dd]ebug/ 12 | [Rr]elease/ 13 | x64/ 14 | build/ 15 | [Bb]in/ 16 | [Oo]bj/ 17 | 18 | # Enable "build/" folder in the NuGet Packages folder since NuGet packages use it for MSBuild targets 19 | !packages/*/build/ 20 | 21 | # MSTest test Results 22 | [Tt]est[Rr]esult*/ 23 | [Bb]uild[Ll]og.* 24 | 25 | *_i.c 26 | *_p.c 27 | *.ilk 28 | *.meta 29 | *.obj 30 | *.pch 31 | *.pdb 32 | *.pgc 33 | *.pgd 34 | *.rsp 35 | *.sbr 36 | *.tlb 37 | *.tli 38 | *.tlh 39 | *.tmp 40 | *.tmp_proj 41 | *.log 42 | *.vspscc 43 | *.vssscc 44 | .builds 45 | *.pidb 46 | *.log 47 | *.scc 48 | 49 | # Visual C++ cache files 50 | ipch/ 51 | *.aps 52 | *.ncb 53 | *.opensdf 54 | *.sdf 55 | *.cachefile 56 | 57 | # Visual Studio profiler 58 | *.psess 59 | *.vsp 60 | *.vspx 61 | 62 | # Guidance Automation Toolkit 63 | *.gpState 64 | 65 | # ReSharper is a .NET coding add-in 66 | _ReSharper*/ 67 | *.[Rr]e[Ss]harper 68 | 69 | # TeamCity is a build add-in 70 | _TeamCity* 71 | 72 | # DotCover is a Code Coverage Tool 73 | *.dotCover 74 | 75 | # NCrunch 76 | *.ncrunch* 77 | .*crunch*.local.xml 78 | 79 | # Installshield output folder 80 | [Ee]xpress/ 81 | 82 | # DocProject is a documentation generator add-in 83 | DocProject/buildhelp/ 84 | DocProject/Help/*.HxT 85 | DocProject/Help/*.HxC 86 | DocProject/Help/*.hhc 87 | DocProject/Help/*.hhk 88 | DocProject/Help/*.hhp 89 | DocProject/Help/Html2 90 | DocProject/Help/html 91 | 92 | # Click-Once directory 93 | publish/ 94 | 95 | # Publish Web Output 96 | *.Publish.xml 97 | 98 | # NuGet Packages Directory 99 | ## TODO: If you have NuGet Package Restore enabled, uncomment the next line 100 | #packages/ 101 | 102 | # Windows Azure Build Output 103 | csx 104 | *.build.csdef 105 | 106 | # Windows Store app package directory 107 | AppPackages/ 108 | 109 | # Others 110 | sql/ 111 | *.Cache 112 | ClientBin/ 113 | [Ss]tyle[Cc]op.* 114 | ~$* 115 | *~ 116 | *.dbmdl 117 | *.[Pp]ublish.xml 118 | *.pfx 119 | *.publishsettings 120 | 121 | # RIA/Silverlight projects 122 | Generated_Code/ 123 | 124 | # Backup & report files from converting an old project file to a newer 125 | # Visual Studio version. Backup files are not needed, because we have git ;-) 126 | _UpgradeReport_Files/ 127 | Backup*/ 128 | UpgradeLog*.XML 129 | UpgradeLog*.htm 130 | 131 | # SQL Server files 132 | App_Data/*.mdf 133 | App_Data/*.ldf 134 | 135 | 136 | #LightSwitch generated files 137 | GeneratedArtifacts/ 138 | _Pvt_Extensions/ 139 | ModelManifest.xml 140 | 141 | # ========================= 142 | # Windows detritus 143 | # ========================= 144 | 145 | # Windows image file caches 146 | Thumbs.db 147 | ehthumbs.db 148 | 149 | # Folder config file 150 | Desktop.ini 151 | 152 | # Recycle Bin used on file shares 153 | $RECYCLE.BIN/ 154 | 155 | # Mac desktop service store files 156 | .DS_Store 157 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ComputerVision 2 | ============== 3 | 4 | Computer Vision programs using OpenCV 5 | -------------------------------------------------------------------------------- /projectcV/FaceRec.h: -------------------------------------------------------------------------------- 1 | /*@author gihan tharanga*/ 2 | 3 | #include 4 | #include 5 | 6 | //include opencv core 7 | #include "opencv2\core\core.hpp" 8 | #include "opencv2\contrib\contrib.hpp" 9 | #include "opencv2\highgui\highgui.hpp" 10 | #include "opencv2\objdetect\objdetect.hpp" 11 | #include "opencv2\opencv.hpp" 12 | 13 | //file handling 14 | #include 15 | #include 16 | 17 | using namespace std; 18 | using namespace cv; 19 | 20 | static Mat MatNorm(InputArray _src) { 21 | Mat src = _src.getMat(); 22 | // Create and return normalized image: 23 | Mat dst; 24 | switch (src.channels()) { 25 | case 1: 26 | cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1); 27 | break; 28 | case 3: 29 | cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3); 30 | break; 31 | default: 32 | src.copyTo(dst); 33 | break; 34 | } 35 | return dst; 36 | } 37 | 38 | static void dbread(const string& filename, vector& images, vector& labels, char separator = ';'){ 39 | std::ifstream file(filename.c_str(), ifstream::in); 40 | 41 | if (!file){ 42 | string error = "no valid input file"; 43 | CV_Error(CV_StsBadArg, error); 44 | } 45 | 46 | string line, path, label; 47 | while (getline(file, line)) 48 | { 49 | stringstream liness(line); 50 | getline(liness, path, separator); 51 | getline(liness, label); 52 | if (!path.empty() && !label.empty()){ 53 | images.push_back(imread(path, 0)); 54 | labels.push_back(atoi(label.c_str())); 55 | } 56 | } 57 | } 58 | 59 | void eigenFaceTrainer(){ 60 | vector images; 61 | vector labels; 62 | 63 | try{ 64 | string filename = "E:/at.txt"; 65 | dbread(filename, images, labels); 66 | 67 | cout << "size of the images is " << images.size() << endl; 68 | cout << "size of the labes is " << labels.size() << endl; 69 | cout << "Training begins...." << endl; 70 | } 71 | catch (cv::Exception& e){ 72 | cerr << " Error opening the file " << e.msg << endl; 73 | exit(1); 74 | } 75 | 76 | //create algorithm eigenface recognizer 77 | Ptr model = createEigenFaceRecognizer(); 78 | //train data 79 | model->train(images, labels); 80 | 81 | model->save("E:/FDB/yaml/eigenface.yml"); 82 | 83 | cout << "Training finished...." << endl; 84 | ////get eigenvalue of eigenface model 85 | //Mat eigenValue = model->getMat("eigenvalues"); 86 | 87 | //////get eigenvectors display(eigenface) 88 | //Mat w = model->getMat("eigenvectors"); 89 | 90 | //////get the sample mean from the training data 91 | //Mat mean = model->getMat("mean"); 92 | 93 | //////save or display 94 | //imshow("mean", MatNorm(mean.reshape(1,images[0].rows))); 95 | ////imwrite(format("%s/mean.png", output_folder.c_str()), MatNorm(mean.reshape(1, images[0].rows))); 96 | 97 | ////display or save eigenfaces 98 | //for (int i = 0; i < min(10, w.cols); i++) 99 | //{ 100 | // string msg = format("Eigenvalue #%d = %.5f", i, eigenValue.at(i)); 101 | // cout << msg << endl; 102 | 103 | // //get the eigenvector #i 104 | // Mat ev = w.col(i).clone(); 105 | 106 | // // Reshape to original size & normalize to [0...255] for imshow. 107 | // Mat grayscale = MatNorm(ev.reshape(1, height)); 108 | // // Show the image & apply a Jet colormap for better sensing. 109 | // Mat cgrayscale; 110 | // applyColorMap(grayscale, cgrayscale, COLORMAP_JET); 111 | // //display or save 112 | // imshow(format("eigenface_%d", i), cgrayscale); 113 | // //imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), MatNorm(cgrayscale)); 114 | //} 115 | 116 | ////display or save image reconstruction 117 | //for (int num_components = min(w.cols, 10); num_components < min(w.cols, 300); num_components += 15) 118 | //{ 119 | // // slice the eigenvectors from the model 120 | // Mat evs = Mat(w, Range::all(), Range(0, num_components)); 121 | // Mat projection = subspaceProject(evs, mean, images[0].reshape(1, 1)); 122 | // Mat reconstruction = subspaceReconstruct(evs, mean, projection); 123 | // // Normalize the result: 124 | // reconstruction = MatNorm(reconstruction.reshape(1, images[0].rows)); 125 | // // Display or save: 126 | // imshow(format("eigenface_reconstruction_%d", num_components), reconstruction); 127 | // //imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction); 128 | // 129 | //} 130 | waitKey(10000); 131 | } 132 | 133 | void fisherFaceTrainer(){ 134 | /*in this two vector we put the images and labes for training*/ 135 | vector images; 136 | vector labels; 137 | 138 | try{ 139 | string filename = "E:/at.txt"; 140 | dbread(filename, images, labels); 141 | 142 | cout << "size of the images is " << images.size() << endl; 143 | cout << "size of the labes is " << labels.size() << endl; 144 | cout << "Training begins...." << endl; 145 | } 146 | catch (cv::Exception& e){ 147 | cerr << " Error opening the file " << e.msg << endl; 148 | exit(1); 149 | } 150 | 151 | 152 | Ptr model = createFisherFaceRecognizer(); 153 | 154 | model->train(images, labels); 155 | 156 | int height = images[0].rows; 157 | 158 | model->save("E:/FDB/yaml/fisherface.yml"); 159 | 160 | cout << "Training finished...." << endl; 161 | 162 | Mat eigenvalues = model->getMat("eigenvalues"); 163 | // And we can do the same to display the Eigenvectors (read Eigenfaces): 164 | Mat W = model->getMat("eigenvectors"); 165 | // Get the sample mean from the training data 166 | Mat mean = model->getMat("mean"); 167 | //imshow("mean", MatNorm(mean.reshape(1, images[0].rows))); 168 | //imwrite(format("%s/mean.png", output_folder.c_str()), MatNorm(mean.reshape(1, images[0].rows))); 169 | 170 | // Display or save the first, at most 16 Fisherfaces: 171 | /*for (int i = 0; i < min(16, W.cols); i++) { 172 | string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at(i)); 173 | cout << msg << endl; 174 | // get eigenvector #i 175 | Mat ev = W.col(i).clone(); 176 | // Reshape to original size & normalize to [0...255] for imshow. 177 | Mat grayscale = MatNorm(ev.reshape(1, height)); 178 | // Show the image & apply a Bone colormap for better sensing. 179 | Mat cgrayscale; 180 | applyColorMap(grayscale, cgrayscale, COLORMAP_BONE); 181 | // Display or save: 182 | //imshow(format("fisherface_%d", i), cgrayscale); 183 | //imwrite(format("%s/fisherface_%d.png", output_folder.c_str(), i), MatNorm(cgrayscale)); 184 | } 185 | 186 | // Display or save the image reconstruction at some predefined steps: 187 | for (int num_component = 0; num_component < min(16, W.cols); num_component++) { 188 | // Slice the Fisherface from the model: 189 | Mat ev = W.col(num_component); 190 | Mat projection = subspaceProject(ev, mean, images[0].reshape(1, 1)); 191 | Mat reconstruction = subspaceReconstruct(ev, mean, projection); 192 | // Normalize the result: 193 | reconstruction = MatNorm(reconstruction.reshape(1, images[0].rows)); 194 | // Display or save: 195 | imshow(format("fisherface_reconstruction_%d", num_component), reconstruction); 196 | //imwrite(format("%s/fisherface_reconstruction_%d.png", output_folder.c_str(), num_component), reconstruction); 197 | }*/ 198 | 199 | waitKey(10000); 200 | } 201 | 202 | void LBPHFaceTrainer(){ 203 | 204 | vector images; 205 | vector labels; 206 | 207 | try{ 208 | string filename = "E:/at.txt"; 209 | dbread(filename, images, labels); 210 | 211 | cout << "size of the images is " << images.size() << endl; 212 | cout << "size of the labes is " << labels.size() << endl; 213 | cout << "Training begins...." << endl; 214 | } 215 | catch (cv::Exception& e){ 216 | cerr << " Error opening the file " << e.msg << endl; 217 | exit(1); 218 | } 219 | 220 | //lbph face recognier model 221 | Ptr model = createLBPHFaceRecognizer(); 222 | 223 | //training images with relevant labels 224 | model->train(images, labels); 225 | 226 | //save the data in yaml file 227 | model->save("E:/FDB/yaml/LBPHface.yml"); 228 | 229 | cout << "training finished...." << endl; 230 | 231 | waitKey(10000); 232 | } 233 | 234 | //lbpcascades works in lbphrecognier as fast as haarcascades 235 | int FaceRecognition(){ 236 | 237 | cout << "start recognizing..." << endl; 238 | 239 | //load pre-trained data sets 240 | Ptr model = createFisherFaceRecognizer(); 241 | model->load("E:/FDB/yaml/fisherface.yml"); 242 | 243 | Mat testSample = imread("E:/db/s41/5.pgm", 0); 244 | 245 | int img_width = testSample.cols; 246 | int img_height = testSample.rows; 247 | 248 | 249 | //lbpcascades/lbpcascade_frontalface.xml 250 | string classifier = "C:/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml"; 251 | 252 | CascadeClassifier face_cascade; 253 | string window = "Capture - face detection"; 254 | 255 | if (!face_cascade.load(classifier)){ 256 | cout << " Error loading file" << endl; 257 | return -1; 258 | } 259 | 260 | VideoCapture cap(0); 261 | //VideoCapture cap("C:/Users/lsf-admin/Pictures/Camera Roll/video000.mp4"); 262 | 263 | if (!cap.isOpened()) 264 | { 265 | cout << "exit" << endl; 266 | return -1; 267 | } 268 | 269 | //double fps = cap.get(CV_CAP_PROP_FPS); 270 | //cout << " Frames per seconds " << fps << endl; 271 | namedWindow(window, 1); 272 | long count = 0; 273 | 274 | while (true) 275 | { 276 | vector faces; 277 | Mat frame; 278 | Mat graySacleFrame; 279 | Mat original; 280 | 281 | cap >> frame; 282 | //cap.read(frame); 283 | count = count + 1;//count frames; 284 | 285 | if (!frame.empty()){ 286 | 287 | //clone from original frame 288 | original = frame.clone(); 289 | 290 | //convert image to gray scale and equalize 291 | cvtColor(original, graySacleFrame, CV_BGR2GRAY); 292 | //equalizeHist(graySacleFrame, graySacleFrame); 293 | 294 | //detect face in gray image 295 | face_cascade.detectMultiScale(graySacleFrame, faces, 1.1, 3, 0, cv::Size(90, 90)); 296 | 297 | //number of faces detected 298 | cout << faces.size() << " faces detected" << endl; 299 | std::string frameset = std::to_string(count); 300 | std::string faceset = std::to_string(faces.size()); 301 | 302 | int width = 0, height = 0; 303 | 304 | //region of interest 305 | //cv::Rect roi; 306 | 307 | //person name 308 | string Pname = ""; 309 | 310 | for (int i = 0; i < faces.size(); i++) 311 | { 312 | //region of interest 313 | Rect face_i = faces[i]; 314 | 315 | //crop the roi from grya image 316 | Mat face = graySacleFrame(face_i); 317 | 318 | //resizing the cropped image to suit to database image sizes 319 | Mat face_resized; 320 | cv::resize(face, face_resized, Size(img_width, img_height), 1.0, 1.0, INTER_CUBIC); 321 | 322 | //recognizing what faces detected 323 | int label = -1; double confidence = 0; 324 | model->predict(face_resized, label, confidence); 325 | 326 | cout << " confidencde " << confidence << endl; 327 | 328 | //drawing green rectagle in recognize face 329 | rectangle(original, face_i, CV_RGB(0, 255, 0), 1); 330 | string text = "Detected"; 331 | if (label == 40){ 332 | //string text = format("Person is = %d", label); 333 | Pname = "gihan"; 334 | } 335 | else{ 336 | Pname = "unknown"; 337 | } 338 | 339 | 340 | int pos_x = std::max(face_i.tl().x - 10, 0); 341 | int pos_y = std::max(face_i.tl().y - 10, 0); 342 | 343 | //name the person who is in the image 344 | putText(original, text, Point(pos_x, pos_y), FONT_HERSHEY_COMPLEX_SMALL, 1.0, CV_RGB(0, 255, 0), 1.0); 345 | //cv::imwrite("E:/FDB/"+frameset+".jpg", cropImg); 346 | 347 | } 348 | 349 | 350 | putText(original, "Frames: " + frameset, Point(30, 60), CV_FONT_HERSHEY_COMPLEX_SMALL, 1.0, CV_RGB(0, 255, 0), 1.0); 351 | putText(original, "Person: " + Pname, Point(30, 90), CV_FONT_HERSHEY_COMPLEX_SMALL, 1.0, CV_RGB(0, 255, 0), 1.0); 352 | //display to the winodw 353 | cv::imshow(window, original); 354 | 355 | //cout << "model infor " << model->getDouble("threshold") << endl; 356 | 357 | } 358 | if (waitKey(30) >= 0) break; 359 | } 360 | } 361 | -------------------------------------------------------------------------------- /projectcV/VideoCap.cpp: -------------------------------------------------------------------------------- 1 | /*@author gihan tharanga*/ 2 | 3 | #include "VideoCap.h" 4 | 5 | #include 6 | #include 7 | #include "opencv2\objdetect\objdetect.hpp" 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | using namespace std; 14 | using namespace cv; 15 | 16 | 17 | int FaceDetector(string &classifier){ 18 | 19 | //haarcascade_frontalface_alt2 20 | //string face = "C:/opencv/sources/data/lbpcascades/lbpcascade_frontalface.xml"; 21 | 22 | CascadeClassifier face_cascade; 23 | string window = "Capture - face detection"; 24 | 25 | if (!face_cascade.load(classifier)){ 26 | cout << " Error loading file" << endl; 27 | return -1; 28 | } 29 | 30 | VideoCapture cap(0); 31 | //VideoCapture cap("C:/Users/lsf-admin/Pictures/Camera Roll/video000.mp4"); 32 | 33 | if (!cap.isOpened()) 34 | { 35 | cout << "exit" << endl; 36 | return -1; 37 | } 38 | 39 | //double fps = cap.get(CV_CAP_PROP_FPS); 40 | //cout << " Frames per seconds " << fps << endl; 41 | namedWindow(window, 1); 42 | long count = 0; 43 | 44 | string name = "gihan"; 45 | while (true) 46 | { 47 | vector faces; 48 | Mat frame; 49 | Mat graySacleFrame; 50 | Mat cropImg; 51 | 52 | cap >> frame; 53 | //cap.read(frame); 54 | count = count + 1;//count frames; 55 | 56 | if (!frame.empty()){ 57 | 58 | //convert image to gray scale and equalize 59 | //cvtColor(frame, graySacleFrame, CV_BGR2GRAY); 60 | //equalizeHist(graySacleFrame, graySacleFrame); 61 | 62 | face_cascade.detectMultiScale(frame, faces, 1.1, 3, 0, cv::Size(190, 190), cv::Size(200, 200)); 63 | 64 | cout << faces.size() << " faces detected" << endl; 65 | std::string frameset = std::to_string(count); 66 | std::string faceset = std::to_string(faces.size()); 67 | 68 | int width = 0, height = 0; 69 | 70 | //region of interest 71 | cv::Rect roi; 72 | 73 | for (int i = 0; i < faces.size(); i++) 74 | { 75 | rectangle(frame, Point(faces[i].x, faces[i].y), Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height), Scalar(255, 0, 255), 1, 8, 0); 76 | cout << faces[i].width << faces[i].height << endl; 77 | width = faces[i].width; height = faces[i].height; 78 | 79 | //select the roi 80 | roi.x = faces[i].x; roi.width = faces[i].width; 81 | roi.y = faces[i].y; roi.height = faces[i].height; 82 | 83 | //get the roi from orginal frame 84 | cropImg = frame(roi); 85 | //cv::imshow("ROI", cropImg); 86 | 87 | //cv::imwrite("E:/FDB/"+frameset+".jpg", cropImg); 88 | } 89 | 90 | std::string wi = std::to_string(width); 91 | std::string he = std::to_string(height); 92 | 93 | cv::putText(frame, "Frames: " + frameset, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 255, 0), 1, CV_AA); 94 | cv::putText(frame, "Faces Detected: " + faceset, cvPoint(30, 60), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 255, 0), 1, CV_AA); 95 | cv::putText(frame, "Resolution " + wi + " x " + he, cvPoint(30, 90), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 255, 0), 1, CV_AA); 96 | //cv::putText(frame, "size : " +) 97 | 98 | cv::imshow(window, frame); 99 | } 100 | if (waitKey(30) >= 0) break; 101 | } 102 | } 103 | 104 | int videoCapturing() 105 | { 106 | VideoCapture cap(0); 107 | 108 | if (!cap.isOpened()) 109 | { 110 | cout << "camera opened" << endl; 111 | return -1; 112 | } 113 | 114 | Mat edges; 115 | namedWindow("edges", 1); 116 | for (;;) 117 | { 118 | Mat frame; 119 | cap >> frame; // get a new frame from camera 120 | if (!frame.empty()) { 121 | cvtColor(frame, edges, CV_HLS2BGR); 122 | GaussianBlur(edges, edges, Size(7, 7), 1.5, 1.5); 123 | Canny(edges, edges, 0, 30, 3); 124 | imshow("edges", edges); 125 | } 126 | if (waitKey(30) == 10) break; 127 | } 128 | 129 | return 0; 130 | } 131 | 132 | int videoCapOriginal() 133 | { 134 | /*camera*/ 135 | VideoCapture cap(0); 136 | 137 | /*initiallize*/ 138 | if (!cap.isOpened()) 139 | { 140 | cout << "exit" << endl; 141 | return -1; 142 | } 143 | 144 | /*create window for display video*/ 145 | namedWindow("window", 1); 146 | 147 | while (true) 148 | { 149 | /*reads each frame and assign to mat*/ 150 | Mat frame; 151 | cap.read(frame); 152 | 153 | if (!frame.empty()){ 154 | /*add to window*/ 155 | imshow("window", frame); 156 | } 157 | if (waitKey(30) >= 0) break; 158 | } 159 | return 0; 160 | } -------------------------------------------------------------------------------- /projectcV/VideoCap.h: -------------------------------------------------------------------------------- 1 | /*@author gihan tharanga*/ 2 | 3 | #include 4 | #include 5 | 6 | //video capturing methods 7 | 8 | int videoCapturing(); 9 | int videoCapOriginal(); 10 | 11 | /*detect the faces display the frames and number of face*/ 12 | int FaceDetector(std::string&); 13 | 14 | -------------------------------------------------------------------------------- /projectcV/imgProc.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "opencv2\core\core.hpp" 5 | #include "opencv2\contrib\contrib.hpp" 6 | #include "opencv2\highgui\highgui.hpp" 7 | #include "opencv2\objdetect\objdetect.hpp" 8 | #include "opencv2\opencv.hpp" 9 | 10 | using namespace std; 11 | using namespace cv; 12 | 13 | int smoothingImage(){ 14 | 15 | Mat image, dst; 16 | image = imread("E://Lenna.png",1); 17 | 18 | if (image.empty()){ 19 | cout << "image is not loaded...." << endl; 20 | return -1; 21 | } 22 | 23 | cout << "Resolution is : "<< image.cols << " x " << image.rows << endl; 24 | namedWindow("src", 1); 25 | imshow("src", image); 26 | 27 | for (int i = 1; i < 5; i++) 28 | { 29 | string value = std::to_string(i); 30 | string name = value + " x " + value; 31 | blur(image, dst, Size(i, i), Point(-1, -1), 4); 32 | namedWindow(name,1); 33 | imshow(name, dst); 34 | } 35 | 36 | waitKey(0); 37 | destroyAllWindows(); 38 | return 0; 39 | } 40 | 41 | int edgeDetectionsCanny(){ 42 | VideoCapture capture; 43 | Mat original, gray, edge, detectEdges; 44 | vector > contours; 45 | vector hierarchy; 46 | RNG rng(12345); 47 | 48 | capture.open(0); 49 | 50 | if (!capture.isOpened()){ 51 | cout << " camera opened error" << endl; 52 | return -1; 53 | } 54 | 55 | namedWindow("edge detection", 1); 56 | 57 | while (true) 58 | { 59 | capture >> original; 60 | 61 | if (!original.empty()) 62 | { 63 | /*convert image to gray scale*/ 64 | cvtColor(original, gray, CV_BGR2GRAY); 65 | /*blur image */ 66 | blur(original, edge, Size(3, 3)); 67 | /*detecting edges using canny edge detection*/ 68 | Canny(edge, detectEdges, 10, 100, 3, true); 69 | 70 | findContours(detectEdges, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); 71 | 72 | Mat drawing = Mat::zeros(detectEdges.size(), CV_8UC3); 73 | 74 | for (int i = 0; i< contours.size(); i++) 75 | { 76 | drawContours(original, contours, i, Scalar(0, 0, 255), 1, 8, hierarchy, 0, Point()); 77 | } 78 | /*showing frames in window*/ 79 | imshow("edge detection", original); 80 | } 81 | if (waitKey(30) >= 0) break; 82 | } 83 | } -------------------------------------------------------------------------------- /projectcV/main.cpp: -------------------------------------------------------------------------------- 1 | /*@author gihan tharanga*/ 2 | 3 | #include 4 | #include 5 | 6 | #include "opencv2\core\core.hpp" 7 | #include "opencv2\contrib\contrib.hpp" 8 | #include "opencv2\highgui\highgui.hpp" 9 | #include "opencv2\objdetect\objdetect.hpp" 10 | #include "opencv2\opencv.hpp" 11 | 12 | /*header files*/ 13 | //#include "FaceRec.h" 14 | //#include "VideoCap.h" 15 | #include "imgProc.h" 16 | 17 | using namespace std; 18 | using namespace cv; 19 | 20 | int main() 21 | { 22 | 23 | int x = smoothingImage(); 24 | 25 | 26 | system("pause"); 27 | return 0; 28 | } -------------------------------------------------------------------------------- /projectcV/projectcV.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 2013 4 | VisualStudioVersion = 12.0.30501.0 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "projectcV", "projectcV.vcxproj", "{E9FCB101-A885-49CE-BD89-4958D84B1D3D}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|Win32 = Debug|Win32 11 | Release|Win32 = Release|Win32 12 | EndGlobalSection 13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 14 | {E9FCB101-A885-49CE-BD89-4958D84B1D3D}.Debug|Win32.ActiveCfg = Debug|Win32 15 | {E9FCB101-A885-49CE-BD89-4958D84B1D3D}.Debug|Win32.Build.0 = Debug|Win32 16 | {E9FCB101-A885-49CE-BD89-4958D84B1D3D}.Release|Win32.ActiveCfg = Release|Win32 17 | {E9FCB101-A885-49CE-BD89-4958D84B1D3D}.Release|Win32.Build.0 = Release|Win32 18 | EndGlobalSection 19 | GlobalSection(SolutionProperties) = preSolution 20 | HideSolutionNode = FALSE 21 | EndGlobalSection 22 | EndGlobal 23 | -------------------------------------------------------------------------------- /projectcV/projectcV.vcxproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | Win32 7 | 8 | 9 | Release 10 | Win32 11 | 12 | 13 | 14 | {E9FCB101-A885-49CE-BD89-4958D84B1D3D} 15 | Win32Proj 16 | projectcV 17 | 18 | 19 | 20 | Application 21 | true 22 | v120 23 | Unicode 24 | 25 | 26 | Application 27 | false 28 | v120 29 | true 30 | Unicode 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | true 44 | 45 | 46 | false 47 | 48 | 49 | 50 | 51 | 52 | Level3 53 | Disabled 54 | WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) 55 | $(OPENCV_DIR)\include;%(AdditionalIncludeDirectories) 56 | 57 | 58 | Console 59 | true 60 | $(OPENCV_DIR)\x86\vc12\lib;%(AdditionalLibraryDirectories) 61 | opencv_calib3d249d.lib;opencv_contrib249d.lib;opencv_core249d.lib;opencv_features2d249d.lib;opencv_flann249d.lib;opencv_gpu249d.lib;opencv_highgui249d.lib;opencv_imgproc249d.lib;opencv_legacy249d.lib;opencv_ml249d.lib;opencv_nonfree249d.lib;opencv_objdetect249d.lib;opencv_ocl249d.lib;opencv_photo249d.lib;opencv_stitching249d.lib;opencv_superres249d.lib;opencv_ts249d.lib;opencv_video249d.lib;opencv_videostab249d.lib;%(AdditionalDependencies) 62 | 63 | 64 | 65 | 66 | Level3 67 | 68 | 69 | MaxSpeed 70 | true 71 | true 72 | WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) 73 | 74 | 75 | Console 76 | true 77 | true 78 | true 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /projectcV/projectcV.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {93995380-89BD-4b04-88EB-625FBE52EBFB} 10 | h;hh;hpp;hxx;hm;inl;inc;xsd 11 | 12 | 13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} 14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms 15 | 16 | 17 | 18 | 19 | Source Files 20 | 21 | 22 | Source Files 23 | 24 | 25 | 26 | 27 | Header Files 28 | 29 | 30 | Header Files 31 | 32 | 33 | Header Files 34 | 35 | 36 | --------------------------------------------------------------------------------