├── .gitignore ├── displayimage.cpp ├── saveimage.cpp ├── liveimage.cpp ├── smoothimage.cpp ├── livevideo.cpp ├── writevideo.cpp ├── README.md ├── polygons.cpp ├── colourquery.cpp ├── generic_interface.cpp ├── meanshift_segmentation.cpp ├── bg_fg_mog.cpp ├── CMakeLists.txt ├── mean_filter.cpp ├── optical_flow_fback.cpp ├── bilateral_filter.cpp ├── harris.cpp ├── generic_selection_interface.cpp ├── fourier.cpp ├── opencv_c_from_cpp.cpp ├── LICENSE ├── generic_recognition_interface.cpp ├── histogram_based_recognition_colour.cpp ├── histogram_based_recognition.cpp ├── butterworth_lowpass.cpp ├── nlm.cpp ├── nlm2.cpp └── feature_point_matching.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | *.obj 6 | 7 | # Precompiled Headers 8 | *.gch 9 | *.pch 10 | 11 | # Compiled Dynamic libraries 12 | *.so 13 | *.dylib 14 | *.dll 15 | 16 | # Fortran module files 17 | *.mod 18 | *.smod 19 | 20 | # Compiled Static libraries 21 | *.lai 22 | *.la 23 | *.a 24 | *.lib 25 | 26 | # Executables 27 | *.exe 28 | *.out 29 | *.app 30 | -------------------------------------------------------------------------------- /displayimage.cpp: -------------------------------------------------------------------------------- 1 | // Example : displaying an image 2 | // usage: prog 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | int main( int argc, char** argv ) 22 | { 23 | 24 | Mat img; // image object 25 | 26 | const string windowName = "OPENCV: basic image display"; // window name 27 | 28 | // check that command line arguments are provided and image reads in OK 29 | 30 | if((argc == 2) && !(img = imread( argv[1], IMREAD_COLOR)).empty()) 31 | { 32 | // create window object 33 | 34 | namedWindow(windowName, 1 ); 35 | 36 | // display image in window 37 | 38 | imshow(windowName, img ); 39 | 40 | // start event processing loop (very important,in fact essential for GUI) 41 | 42 | waitKey(0); 43 | 44 | // all OK : main returns 0 45 | 46 | return 0; 47 | } 48 | 49 | // not OK : main returns -1 50 | 51 | return -1; 52 | } 53 | -------------------------------------------------------------------------------- /saveimage.cpp: -------------------------------------------------------------------------------- 1 | // Example : saving an image 2 | // usage: prog 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | int main( int argc, char** argv ) 22 | { 23 | 24 | Mat inputImg; // input image object 25 | Mat outputImg; // output image object 26 | std::vector params; // file saving compression parameters 27 | 28 | // check that command line arguments are provided and image reads in OK 29 | 30 | if ((argc == 3) && !(inputImg = imread( argv[1], IMREAD_COLOR)).empty()) 31 | { 32 | 33 | // invert image_name 34 | 35 | bitwise_not(inputImg, outputImg); 36 | 37 | // write out image to file 38 | 39 | params.push_back(IMWRITE_JPEG_QUALITY); 40 | params.push_back(95); 41 | 42 | imwrite(argv[2], outputImg, params); 43 | 44 | // all OK : main returns 0 45 | 46 | return 0; 47 | } 48 | 49 | // not OK : main returns -1 50 | 51 | return -1; 52 | } 53 | -------------------------------------------------------------------------------- /liveimage.cpp: -------------------------------------------------------------------------------- 1 | // Example : grab and display a single live image 2 | // usage: prog 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | int main( int argc, char** argv ) 22 | { 23 | 24 | Mat img; // image object 25 | 26 | const string windowName = "OPENCV: live image display"; // window name 27 | 28 | // create window object 29 | 30 | namedWindow(windowName, 1); 31 | 32 | // grab an image from camera (here assume only 1 camera, device 0) 33 | 34 | VideoCapture cap(0); // video capture object 35 | 36 | if(!cap.isOpened()){ 37 | std::cout << "error: could not grab a frame" << std::endl; 38 | exit(0); 39 | } 40 | 41 | cap >> img; // retrieve the captured frame as an image 42 | 43 | // display image in window 44 | 45 | imshow(windowName, img); 46 | 47 | // start event processing loop (very important,in fact essential for GUI) 48 | 49 | waitKey(0); 50 | 51 | // the camera will be deinitialized automatically in VideoCapture destructor 52 | 53 | // all OK : main returns 0 54 | 55 | return 0; 56 | 57 | } 58 | -------------------------------------------------------------------------------- /smoothimage.cpp: -------------------------------------------------------------------------------- 1 | // Example : smooth an image 2 | // usage: prog 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | 22 | int main( int argc, char** argv ) 23 | { 24 | 25 | Mat inputImg; // input image object 26 | Mat outputImg; // output image object 27 | 28 | const string windowName = "OPENCV: blurred image"; // window name 29 | 30 | // check that command line arguments are provided and image reads in OK 31 | 32 | if ((argc == 2) && !(inputImg = imread( argv[1], IMREAD_COLOR)).empty()) 33 | { 34 | 35 | // blur the input image using a 5 x 5 mask and store in output image 36 | // (The output image will be created automatically) 37 | 38 | GaussianBlur(inputImg, outputImg, Size(5, 5), 0, 0, BORDER_DEFAULT); 39 | 40 | // create window object 41 | 42 | namedWindow(windowName, 1 ); 43 | 44 | // display image in window 45 | 46 | imshow(windowName, outputImg ); 47 | 48 | // start event processing loop (very important,in fact essential for GUI) 49 | 50 | waitKey(0); 51 | 52 | // all OK : main returns 0 53 | 54 | return 0; 55 | } 56 | 57 | // not OK : main returns -1 58 | 59 | return -1; 60 | } 61 | -------------------------------------------------------------------------------- /livevideo.cpp: -------------------------------------------------------------------------------- 1 | // Example : grab and display live video 2 | // usage: prog 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | int main( int argc, char** argv ) 22 | { 23 | 24 | Mat img; // image object 25 | 26 | const string windowName = "OPENCV: live video display"; // window name 27 | 28 | // create window object 29 | 30 | namedWindow(windowName, 1 ); 31 | 32 | // grab an image from camera (here assume only 1 camera, device 0) 33 | 34 | VideoCapture cap(0); // video capture object 35 | 36 | if(!cap.isOpened()){ 37 | std::cout << "error: could not grab a frame" << std::endl; 38 | exit(0); 39 | } 40 | 41 | // loop and display up to N frames 42 | 43 | int nFrames = 50; 44 | 45 | for (int i=0;i> img; // retrieve the captured frame as an image 48 | 49 | // display image in window 50 | 51 | imshow(windowName, img); 52 | 53 | // start event processing loop (very important,in fact essential for GUI) 54 | // Note that without the 40[msec] delay the captured sequence 55 | // is not displayed properly. 56 | 57 | waitKey(40); 58 | 59 | } 60 | 61 | // the camera will be deinitialized automatically in VideoCapture destructor 62 | 63 | // all OK : main returns 0 64 | 65 | return 0; 66 | 67 | } 68 | -------------------------------------------------------------------------------- /writevideo.cpp: -------------------------------------------------------------------------------- 1 | // Example : grab and write a video file 2 | // usage: prog 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | int main( int argc, char** argv ) 22 | { 23 | 24 | Mat img; // image object 25 | 26 | // check that command line arguments are provided 27 | 28 | if( argc == 2 ) 29 | { 30 | 31 | // here we will use a connected camera as the 32 | // originating source for our video file 33 | 34 | VideoCapture cap(0); // video capture object 35 | 36 | if(!cap.isOpened()){ 37 | std::cout << "error: could not grab a frame" << std::endl; 38 | exit(0); 39 | } 40 | cap >> img; // retrieve the captured frame as an image 41 | 42 | // set up video writer object (using properties of camera capture source) 43 | // N.B. we can use "CV_FOURCC('D','I','V','X')" specify an MPEG-4 encoded video 44 | // just -1 to call up a dialogue box (under MS Windows) 45 | 46 | VideoWriter videoOutput(argv[1], /* CV_FOURCC('D','I','V','X')*/ -1, 25, img.size(), true); 47 | if(!videoOutput.isOpened()){ 48 | std::cout << "error: could not open video file" << std::endl; 49 | exit(0); 50 | } 51 | 52 | // loop and store up to N frames 53 | 54 | int nFrames = 500; // 500 frames @ 25 fps = 20 seconds of video 55 | 56 | std::cout << "\nStarting video capture ........" << std::flush; // signal start to user 57 | 58 | for (int i=0;i> img; // retrieve the captured frame as an image 63 | videoOutput << img; // send to video writer object 64 | } 65 | 66 | std::cout << " finshed" << std::endl; // signal end to user 67 | 68 | // the camera will be deinitialized automatically in VideoCapture destructor 69 | // the video file will be deinitialized automatically in VideoWriter destructor 70 | 71 | // all OK : main returns 0 72 | 73 | return 0; 74 | } 75 | 76 | // not OK : main returns -1 77 | 78 | return -1; 79 | } 80 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # C++ Image Processing and Computer Vision OpenCV Teaching Examples 2 | 3 | OpenCV C++ Image Processing and Computer Vision examples used for teaching over the years (2010-2013+). 4 | 5 | All tested with [OpenCV](http://www.opencv.org) 4.0 (this branch) and GCC (Linux). 6 | 7 | --- 8 | 9 | ### Background: 10 | 11 | If I taught you between 2010 and 2013 at [Cranfield University](http://www.cranfield.ac.uk) or [ESTIA](http://www.estia.fr) - these are the C++ examples from class. 12 | 13 | Additionally used to generate the video examples within the ebook version of: 14 | 15 | [Dictionary of Computer Vision and Image Processing](http://dx.doi.org/10.1002/9781119286462) (R.B. Fisher, T.P. Breckon, K. Dawson-Howe, A. Fitzgibbon, C. Robertson, E. Trucco, C.K.I. Williams), Wiley, 2014. 16 | [[Google Books](http://books.google.co.uk/books?id=TaEQAgAAQBAJ&lpg=PP1&dq=isbn%3A1118706811&pg=PP1v=onepage&q&f=false)] [[doi](http://dx.doi.org/10.1002/9781119286462)] 17 | 18 | --- 19 | 20 | ### How to Build and run: 21 | 22 | ``` 23 | git clone https://github.com/tobybreckon/cpp-examples-ipcv.git 24 | cd cpp-examples-ipcv 25 | cmake . 26 | make 27 | ./ 28 | ``` 29 | 30 | Demo source code is provided "as is" to aid your learning and understanding of topics on the course. 31 | 32 | Most run with a webcam connected or from a command line supplied video file of a format OpenCV supports on your system (otherwise edit the script to provide your own image source). 33 | 34 | N.B. you may need to change the line near the top that specifies the camera device to use on some examples below - change "0" if you have one webcam, I have it set to "1" to skip my built-in laptop webcam and use the connected USB camera. 35 | 36 | --- 37 | 38 | ### Reference: 39 | 40 | Many of these techniques are fully explained in corresponding section of: 41 | 42 | _Fundamentals of Digital Image Processing: A Practical Approach with Examples in Matlab_, 43 | Chris J. Solomon and Toby P. Breckon, Wiley-Blackwell, 2010 44 | ISBN: 0470844736, DOI:10.1002/9780470689776, http://www.fundipbook.com 45 | 46 | ``` 47 | bibtex: 48 | 49 | @Book{solomonbreckon10fundamentals, 50 | author = {Solomon, C.J. and Breckon, T.P.}, 51 | title = {Fundamentals of Digital Image Processing: 52 | A Practical Approach with Examples in Matlab}, 53 | publisher = {Wiley-Blackwell}, 54 | year = {2010}, 55 | isbn = {0470844736}, 56 | doi = {10.1002/9780470689776}, 57 | url = {http://www.fundipbook.com} 58 | } 59 | ``` 60 | 61 | --- 62 | 63 | If you find any bugs report them to me (or better still submit a pull request) - toby.breckon@durham.ac.uk 64 | 65 | _"may the source be with you"_ - anon. 66 | -------------------------------------------------------------------------------- /polygons.cpp: -------------------------------------------------------------------------------- 1 | // Example : using openCV polygon functions in C++ 2 | 3 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 4 | 5 | // Copyright (c) 2011 School of Engineering, Cranfield University 6 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 7 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 8 | 9 | #include "opencv2/videoio.hpp" 10 | #include "opencv2/highgui.hpp" 11 | #include "opencv2/imgproc.hpp" 12 | 13 | #include // standard C++ I/O 14 | #include // standard C++ I/O 15 | #include // includes max() 16 | 17 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 18 | using namespace std; 19 | 20 | /******************************************************************************/ 21 | 22 | int main( int argc, char** argv ) 23 | { 24 | // create a RGB colour image (set it to a black background) 25 | 26 | Mat img = Mat::zeros(400, 400, CV_8UC3); 27 | 28 | // define a polygon (as a vector of points) 29 | 30 | vector contour; 31 | contour.push_back(Point(50,50)); 32 | contour.push_back(Point(300,50)); 33 | contour.push_back(Point(350,200)); 34 | contour.push_back(Point(300,150)); 35 | contour.push_back(Point(150,350)); 36 | contour.push_back(Point(100,100)); 37 | 38 | // create a pointer to the data as an array of points (via a conversion to 39 | // a Mat() object) 40 | 41 | const cv::Point *pts = (const cv::Point*) Mat(contour).data; 42 | int npts = Mat(contour).rows; 43 | 44 | std::cout << "Number of polygon vertices: " << npts << std::endl; 45 | 46 | // draw the polygon 47 | 48 | polylines(img, &pts,&npts, 1, 49 | true, // draw closed contour (i.e. joint end to start) 50 | Scalar(0,255,0),// colour RGB ordering (here = green) 51 | 3, // line thickness 52 | LINE_AA, 0); 53 | 54 | 55 | // do point in polygon test (by conversion/cast to a Mat() object) 56 | // define and test point one (draw it in red) 57 | 58 | Point2f test_pt; 59 | test_pt.x = 150; 60 | test_pt.y = 75; 61 | 62 | rectangle(img, test_pt, test_pt, Scalar(0, 0, 255), 3, 8, 0); // RED point 63 | 64 | if (pointPolygonTest(Mat(contour), test_pt, true) > 0){ 65 | std::cout << "RED {" << test_pt.x << "," << test_pt.y 66 | << "} is in the polygon (dist. " 67 | << pointPolygonTest(Mat(contour), test_pt, 1) << ")" 68 | << std::endl; 69 | } 70 | 71 | // define and test point two (draw it in blue) 72 | 73 | test_pt.x = 50; 74 | test_pt.y = 350; 75 | 76 | rectangle(img, test_pt, test_pt, Scalar(255, 0, 0), 3, 8, 0); // BLUE point 77 | 78 | if (pointPolygonTest(Mat(contour), test_pt, true) < 0){ 79 | std::cout << "BLUE {" << test_pt.x << "," << test_pt.y 80 | << "} is NOT in the polygon (dist. " 81 | << pointPolygonTest(Mat(contour), test_pt, 1) << ")" 82 | << std::endl; 83 | } 84 | 85 | // pointPolygonTest :- 86 | // "The function determines whether the point is inside a contour, outside, 87 | // or lies on an edge (or coincides with a vertex). It returns positive 88 | // (inside), negative (outside) or zero (on an edge) value, correspondingly. 89 | // When measureDist=false , the return value is +1, -1 and 0, respectively. 90 | // Otherwise, the return value it is a signed distance between the point 91 | // and the nearest contour edge." - OpenCV Manual version 2.1 92 | 93 | // create an image and display the image 94 | 95 | namedWindow("Polygon Test", 0); 96 | imshow( "Polygon Test", img ); 97 | waitKey(0); 98 | 99 | return 0; 100 | } 101 | 102 | /******************************************************************************/ 103 | -------------------------------------------------------------------------------- /colourquery.cpp: -------------------------------------------------------------------------------- 1 | // Example : query colour elements in an image 2 | // usage: prog 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | 23 | void colourQueryMouseCallBack(int event, int x, int y, int flags, void* img) 24 | { 25 | 26 | int row = y; // y-axis is image rows (down the side) 27 | int col = x; // x-axis is image columns (along the top) 28 | 29 | switch (event) 30 | { 31 | case EVENT_LBUTTONDOWN : 32 | 33 | // left button prints colour information at click location to stdout 34 | 35 | std::cout << "Colour information at image location (" << x << "," 36 | << y << "): "; 37 | 38 | for(int channel = 0; channel < 3; channel++){ // three channels (B,G,R) 39 | 40 | // note that variable img is now a pointer to the image object 41 | // in this case (as it was passed by reference) 42 | 43 | // IN GENERAL: pixel access is img.at(row,col)[channel] for 44 | // a 3 channel image (3 bytes per pixel) and img.at(row,col) for 45 | // a single channel image (1 byte per pixel) 46 | 47 | std::cout << 48 | (unsigned int) ((Mat*) img)->at(row,col)[channel] << " "; 49 | } 50 | 51 | std::cout << std::endl; 52 | 53 | ; 54 | break; 55 | case EVENT_RBUTTONDOWN: 56 | 57 | // right button sets colour information at click location to white 58 | 59 | std::cout << "Colour information at image location (" << x << "," << y 60 | << ") set to white."; 61 | 62 | for(int channel = 0; channel < 3; channel++){ // three channels (B,G,R) 63 | ((Mat*) img)->at(row,col)[channel] = 255; 64 | } 65 | std::cout << std::endl; 66 | 67 | ; 68 | break; 69 | } 70 | } 71 | 72 | /******************************************************************************/ 73 | 74 | int main( int argc, char** argv ) 75 | { 76 | 77 | Mat img; // image object 78 | unsigned char key; 79 | bool keepProcessing = true; 80 | 81 | const string windowName = "OPENCV: colour query"; // window name 82 | 83 | // check that command line arguments are provided and image reads in OK 84 | 85 | if ((argc == 2) && !(img = imread( argv[1], IMREAD_COLOR)).empty()) 86 | { 87 | // create window object 88 | 89 | namedWindow(windowName, 0 ); 90 | 91 | // set function to be executed everytime the mouse is clicked/moved 92 | // (note: this uses the older cvXXX function naming style from the 93 | // OpenCV C interface) 94 | 95 | setMouseCallback("OPENCV: colour query", colourQueryMouseCallBack, &img); 96 | 97 | 98 | // print out some helpful information about the image 99 | 100 | std::cout << "Image : (width x height) = (" << img.cols << " x " 101 | << img.rows << ")" << std::endl; 102 | std::cout << " Colour channels = " << img.channels() << std::endl; 103 | 104 | // loop so that events are processed and the image constantly redisplayed 105 | 106 | while (keepProcessing){ 107 | 108 | // display image in window 109 | 110 | imshow(windowName, img ); 111 | 112 | // start event processing loop (very important,in fact essential for GUI) 113 | 114 | key=waitKey(20); 115 | 116 | // get any keyboard input given by the user and process it 117 | 118 | if (key == 'x'){ 119 | 120 | // if user presses "x" then exit 121 | 122 | std::cout << "Keyboard exit requested : exiting now - bye!" << std::endl; 123 | keepProcessing = false; 124 | } 125 | 126 | } 127 | 128 | // all OK : main returns 0 129 | 130 | return 0; 131 | } 132 | 133 | // not OK : main returns -1 134 | 135 | return -1; 136 | } 137 | 138 | /******************************************************************************/ 139 | -------------------------------------------------------------------------------- /generic_interface.cpp: -------------------------------------------------------------------------------- 1 | // Example : generic interface to image / video / camera 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 1 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | int main( int argc, char** argv ) 36 | { 37 | 38 | Mat img; // image object 39 | VideoCapture cap; // capture object 40 | 41 | const string windowName = "Cranfield University: "; // window name 42 | 43 | bool keepProcessing = true; // loop control flag 44 | unsigned char key; // user input 45 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 46 | // 40 ms equates to 1000ms/25fps = 40ms per frame 47 | 48 | // if command line arguments are provided try to read image/video_name 49 | // otherwise default to capture from attached H/W camera 50 | 51 | if( 52 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 53 | ( argc == 2 && (cap.open(argv[1]) == true )) || 54 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 55 | ) 56 | { 57 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 58 | 59 | namedWindow(windowName, 0); 60 | 61 | // start main loop 62 | 63 | while (keepProcessing) { 64 | 65 | int64 timeStart = getTickCount(); // get time at start of loop 66 | 67 | // if capture object in use (i.e. video/camera) 68 | // get image from capture object 69 | 70 | if (cap.isOpened()) { 71 | 72 | cap >> img; 73 | if(img.empty()){ 74 | if (argc == 2){ 75 | std::cerr << "End of video file reached" << std::endl; 76 | } else { 77 | std::cerr << "ERROR: cannot get next fram from camera" 78 | << std::endl; 79 | } 80 | exit(0); 81 | } 82 | 83 | } else { 84 | 85 | // if not a capture object set event delay to zero so it waits 86 | // indefinitely (as single image file, no need to loop) 87 | 88 | EVENT_LOOP_DELAY = 0; 89 | } 90 | 91 | // *** 92 | 93 | // *** DO ANY PROCESSING PRIOR TO DISPLAY HERE *** 94 | 95 | // *** 96 | 97 | // display image in window 98 | 99 | imshow(windowName, img); 100 | 101 | // start event processing loop (very important,in fact essential for GUI) 102 | // 40 ms roughly equates to 1000ms/25fps = 40ms per frame 103 | 104 | // here we take account of processing time for the loop by subtracting the time 105 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 106 | // we get a +ve wait time 107 | 108 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 109 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 110 | 111 | if (key == 'x'){ 112 | 113 | // if user presses "x" then exit 114 | 115 | std::cout << "Keyboard exit requested : exiting now - bye!" 116 | << std::endl; 117 | keepProcessing = false; 118 | } 119 | } 120 | 121 | // the camera will be deinitialized automatically in VideoCapture destructor 122 | 123 | // all OK : main returns 0 124 | 125 | return 0; 126 | } 127 | 128 | // not OK : main returns -1 129 | 130 | return -1; 131 | } 132 | /******************************************************************************/ 133 | -------------------------------------------------------------------------------- /meanshift_segmentation.cpp: -------------------------------------------------------------------------------- 1 | // Example : Mean Shift Segmentation of image / video / camera 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 0 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | int main( int argc, char** argv ) 36 | { 37 | Mat img, res; // image objects 38 | VideoCapture cap; // capture object 39 | 40 | const string windowName = "Mean Shift Segmentation"; // window name 41 | 42 | bool keepProcessing = true; // loop control flag 43 | unsigned char key; // user input 44 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 45 | // 40 ms equates to 1000ms/25fps = 40ms per frame 46 | 47 | int spatialRad = 10; // mean shift parameters 48 | int colorRad = 10; 49 | int maxPyrLevel = 2; 50 | 51 | // if command line arguments are provided try to read image/video_name 52 | // otherwise default to capture from attached H/W camera 53 | 54 | if( 55 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 56 | ( argc == 2 && (cap.open(argv[1]) == true )) || 57 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 58 | ) 59 | { 60 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 61 | 62 | namedWindow(windowName, 0); 63 | 64 | createTrackbar( "spatialRad", windowName, &spatialRad, 80); 65 | createTrackbar( "colorRad", windowName, &colorRad, 60); 66 | createTrackbar( "maxPyrLevel", windowName, &maxPyrLevel, 5); 67 | 68 | 69 | // start main loop 70 | 71 | while (keepProcessing) { 72 | 73 | int64 timeStart = getTickCount(); // get time at start of loop 74 | 75 | // if capture object in use (i.e. video/camera) 76 | // get image from capture object 77 | 78 | if (cap.isOpened()) { 79 | 80 | cap >> img; 81 | if(img.empty()){ 82 | if (argc == 2){ 83 | std::cerr << "End of video file reached" << std::endl; 84 | } else { 85 | std::cerr << "ERROR: cannot get next fram from camera" 86 | << std::endl; 87 | } 88 | exit(0); 89 | } 90 | 91 | } else { 92 | 93 | // if not a capture object set event delay to zero so it waits 94 | // indefinitely (as single image file, no need to loop) 95 | 96 | EVENT_LOOP_DELAY = 0; 97 | } 98 | 99 | // *** 100 | 101 | pyrMeanShiftFiltering( img, res, spatialRad, colorRad, maxPyrLevel ); 102 | 103 | // *** 104 | 105 | // display image in window 106 | 107 | imshow(windowName, res); 108 | 109 | // start event processing loop (very important,in fact essential for GUI) 110 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 111 | 112 | // here we take account of processing time for the loop by subtracting the time 113 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 114 | // we get a +ve wait time 115 | 116 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 117 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 118 | 119 | if (key == 'x'){ 120 | 121 | // if user presses "x" then exit 122 | 123 | std::cout << "Keyboard exit requested : exiting now - bye!" 124 | << std::endl; 125 | keepProcessing = false; 126 | } 127 | } 128 | 129 | // the camera will be deinitialized automatically in VideoCapture destructor 130 | 131 | // all OK : main returns 0 132 | 133 | return 0; 134 | } 135 | 136 | // not OK : main returns -1 137 | 138 | return -1; 139 | } 140 | /******************************************************************************/ 141 | -------------------------------------------------------------------------------- /bg_fg_mog.cpp: -------------------------------------------------------------------------------- 1 | // Example : background / foreground separation of video / camera 2 | // usage: prog {} 3 | 4 | // Author : Toby Breckon, toby.breckon@cranfield.ac.uk 5 | 6 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 7 | 8 | // Copyright (c) 2012 School of Engineering, Cranfield University 9 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 10 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 11 | 12 | // #include // open cv general include file 13 | // #include // open cv GUI include file 14 | 15 | // #include // OpenCV BG/FG specific header 16 | 17 | // #include // standard C++ I/O 18 | // #include // includes max() 19 | 20 | // using namespace cv; // OpenCV API is in the C++ "cv" namespace 21 | 22 | #include "opencv2/core.hpp" 23 | #include "opencv2/imgcodecs.hpp" 24 | #include "opencv2/highgui.hpp" 25 | #include "opencv2/imgproc.hpp" 26 | #include "opencv2/video/background_segm.hpp" 27 | #include 28 | 29 | 30 | using namespace cv; 31 | using namespace std; 32 | 33 | /******************************************************************************/ 34 | 35 | int main( int argc, char** argv ) 36 | { 37 | 38 | Mat img, fg, fg_msk, bg; // image objects 39 | VideoCapture cap; // capture object 40 | 41 | const string windowName = "Live Image"; // window name 42 | const string windowNameF = "Foreground"; // window name 43 | const string windowNameB = "Background"; // window name 44 | 45 | bool keepProcessing = true; // loop control flag 46 | unsigned char key; // user input 47 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 48 | // 40 ms equates to 1000ms/25fps = 40ms per frame 49 | 50 | // if command line arguments are provided try to read image/video_name 51 | // otherwise default to capture from attached H/W camera 52 | 53 | if(( argc == 2 && (cap.open(argv[1]) == true )) || 54 | ( argc != 2 && (cap.open(0) == true))) 55 | { 56 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 57 | 58 | namedWindow(windowName, 0); 59 | namedWindow(windowNameF, 0); 60 | namedWindow(windowNameB, 0); 61 | 62 | // create background / foreground Mixture of Gaussian (MoG) model 63 | 64 | Ptr MoG = createBackgroundSubtractorMOG2(); 65 | 66 | // start main loop 67 | 68 | while (keepProcessing) { 69 | 70 | int64 timeStart = getTickCount(); // get time at start of loop 71 | 72 | // if capture object in use (i.e. video/camera) 73 | // get image from capture object 74 | 75 | if (cap.isOpened()) { 76 | 77 | cap >> img; 78 | if(img.empty()){ 79 | if (argc == 2){ 80 | std::cerr << "End of video file reached" << std::endl; 81 | } else { 82 | std::cerr << "ERROR: cannot get next fram from camera" 83 | << std::endl; 84 | } 85 | exit(0); 86 | } 87 | 88 | } else { 89 | 90 | // if not a capture object set event delay to zero so it waits 91 | // indefinitely (as single image file, no need to loop) 92 | 93 | EVENT_LOOP_DELAY = 0; 94 | } 95 | 96 | // update background model and get background/foreground 97 | 98 | MoG->apply(img, fg_msk, 0.001); 99 | MoG->getBackgroundImage(bg); 100 | 101 | fg = Scalar::all(0); 102 | img.copyTo(fg, fg_msk); 103 | 104 | // display image in window 105 | 106 | imshow(windowName, img); 107 | imshow(windowNameF, fg); 108 | if (!bg.empty()) 109 | { 110 | imshow(windowNameB, bg); 111 | } 112 | 113 | // start event processing loop (very important,in fact essential for GUI) 114 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 115 | 116 | // here we take account of processing time for the loop by subtracting the time 117 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 118 | // we get a +ve wait time 119 | 120 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 121 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 122 | 123 | if (key == 'x'){ 124 | 125 | // if user presses "x" then exit 126 | 127 | std::cout << "Keyboard exit requested : exiting now - bye!" 128 | << std::endl; 129 | keepProcessing = false; 130 | } 131 | } 132 | 133 | // the camera will be deinitialized automatically in VideoCapture destructor 134 | 135 | // all OK : main returns 0 136 | 137 | return 0; 138 | } 139 | 140 | // not OK : main returns -1 141 | 142 | return -1; 143 | } 144 | /******************************************************************************/ 145 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 2.6) 2 | 3 | # linux specific stuff 4 | 5 | IF ( UNIX ) 6 | set( CMAKE_CXX_FLAGS "-O3 -Wall -march=native ${CMAKE_CXX_FLAGS}" ) 7 | set( CMAKE_PREFIX_PATH "/opt/opencv/lib64/cmake/opencv4/" ) 8 | set( OPENMP_LINKER_FLAGS "-lgomp") 9 | set_property(GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS TRUE) 10 | MESSAGE( "LINUX CONFIG" ) 11 | ENDIF ( UNIX ) 12 | 13 | # windows (inc. 64-bit specific stuff) 14 | 15 | IF ( WIN32 ) 16 | set( CMAKE_PREFIX_PATH "C:/OpenCV/build" ) 17 | set( OpenCV_DIR "C:/OpenCV/build" ) 18 | MESSAGE( "WINDOWS CONFIG" ) 19 | ENDIF ( WIN32 ) 20 | 21 | project(cpp-examples-ipcv) 22 | find_package( OpenCV REQUIRED ) 23 | MESSAGE( "${OpenCV_INCLUDE_DIRS}" ) 24 | MESSAGE( "${OpenCV_LIBS}" ) 25 | 26 | 27 | project(colourquery) 28 | add_executable(colourquery colourquery.cpp) 29 | target_link_libraries( colourquery ${OpenCV_LIBS} ) 30 | 31 | project(displayimage) 32 | add_executable(displayimage displayimage.cpp) 33 | target_link_libraries( displayimage ${OpenCV_LIBS} ) 34 | 35 | project(liveimage) 36 | add_executable(liveimage liveimage.cpp) 37 | target_link_libraries( liveimage ${OpenCV_LIBS} ) 38 | 39 | project(livevideo) 40 | add_executable(livevideo livevideo.cpp) 41 | target_link_libraries( livevideo ${OpenCV_LIBS} ) 42 | 43 | project(saveimage) 44 | add_executable(saveimage saveimage.cpp) 45 | target_link_libraries( saveimage ${OpenCV_LIBS} ) 46 | 47 | project(smoothimage) 48 | add_executable(smoothimage smoothimage.cpp) 49 | target_link_libraries( smoothimage ${OpenCV_LIBS} ) 50 | 51 | project(writevideo) 52 | add_executable(writevideo writevideo.cpp) 53 | target_link_libraries( writevideo ${OpenCV_LIBS} ) 54 | 55 | find_package( OpenCV REQUIRED ) 56 | 57 | project(bg_fg_mog) 58 | add_executable(bg_fg_mog bg_fg_mog.cpp) 59 | target_link_libraries( bg_fg_mog ${OpenCV_LIBS} ) 60 | 61 | project(butterworth_lowpass) 62 | add_executable(butterworth_lowpass butterworth_lowpass.cpp) 63 | target_link_libraries( butterworth_lowpass ${OpenCV_LIBS} ) 64 | 65 | project(fourier) 66 | add_executable(fourier fourier.cpp) 67 | target_link_libraries( fourier ${OpenCV_LIBS} ) 68 | 69 | project(generic_interface) 70 | add_executable(generic_interface generic_interface.cpp) 71 | target_link_libraries( generic_interface ${OpenCV_LIBS} ) 72 | 73 | project(generic_recognition_interface) 74 | add_executable(generic_recognition_interface generic_recognition_interface.cpp) 75 | target_link_libraries( generic_recognition_interface ${OpenCV_LIBS} ) 76 | 77 | project(generic_selection_interface) 78 | add_executable(generic_selection_interface generic_selection_interface.cpp) 79 | target_link_libraries( generic_selection_interface ${OpenCV_LIBS} ) 80 | 81 | project(harris) 82 | add_executable(harris harris.cpp) 83 | target_link_libraries( harris ${OpenCV_LIBS} ) 84 | 85 | project(histogram_based_recognition_colour) 86 | add_executable(histogram_based_recognition_colour histogram_based_recognition_colour.cpp) 87 | target_link_libraries( histogram_based_recognition_colour ${OpenCV_LIBS} ) 88 | 89 | project(histogram_based_recognition) 90 | add_executable(histogram_based_recognition histogram_based_recognition.cpp) 91 | target_link_libraries( histogram_based_recognition ${OpenCV_LIBS} ) 92 | 93 | project(meanshift_segmentation) 94 | add_executable(meanshift_segmentation meanshift_segmentation.cpp) 95 | target_link_libraries( meanshift_segmentation ${OpenCV_LIBS} ) 96 | 97 | project(polygons) 98 | add_executable(polygons polygons.cpp) 99 | target_link_libraries( polygons ${OpenCV_LIBS} ) 100 | 101 | project(nlm) 102 | add_executable(nlm nlm.cpp) 103 | set_target_properties(nlm PROPERTIES COMPILE_FLAGS "-fopenmp") 104 | target_link_libraries( nlm ${OpenCV_LIBS} ${OPENMP_LINKER_FLAGS}) 105 | 106 | project(nlm2) 107 | add_executable(nlm2 nlm2.cpp) 108 | set_target_properties(nlm2 PROPERTIES COMPILE_FLAGS "-fopenmp") 109 | target_link_libraries( nlm2 ${OpenCV_LIBS} ${OPENMP_LINKER_FLAGS}) 110 | 111 | project(mean_filter) 112 | add_executable(mean_filter mean_filter.cpp) 113 | target_link_libraries( mean_filter ${OpenCV_LIBS} ) 114 | 115 | project(bilateral_filter) 116 | add_executable(bilateral_filter bilateral_filter.cpp) 117 | target_link_libraries( bilateral_filter ${OpenCV_LIBS} ) 118 | 119 | project(optical_flow_fback) 120 | add_executable(optical_flow_fback optical_flow_fback.cpp) 121 | target_link_libraries( optical_flow_fback ${OpenCV_LIBS} ) 122 | 123 | project(feature_point_matching) 124 | add_executable(feature_point_matching feature_point_matching.cpp) 125 | target_link_libraries( feature_point_matching ${OpenCV_LIBS} ) 126 | 127 | # project(opencv_c_from_cpp) 128 | # add_executable(opencv_c_from_cpp opencv_c_from_cpp.cpp) 129 | # set_target_properties(opencv_c_from_cpp PROPERTIES COMPILE_FLAGS "-fpermissive") 130 | # target_link_libraries( opencv_c_from_cpp ${OpenCV_LIBS} ) 131 | -------------------------------------------------------------------------------- /mean_filter.cpp: -------------------------------------------------------------------------------- 1 | // Example : Mean Filtering of image / video / camera 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 0 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | int main( int argc, char** argv ) 36 | { 37 | Mat img, res; // image objects 38 | VideoCapture cap; // capture object 39 | 40 | const string windowName = "Input"; // window name 41 | const string windowName2 = "Mean Filtered Output"; // window name 42 | 43 | bool keepProcessing = true; // loop control flag 44 | unsigned char key; // user input 45 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 46 | // 40 ms equates to 1000ms/25fps = 40ms per frame 47 | 48 | int width = 3; // mean filtering parameters 49 | int height = 3; 50 | 51 | // if command line arguments are provided try to read image/video_name 52 | // otherwise default to capture from attached H/W camera 53 | 54 | if( 55 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 56 | ( argc == 2 && (cap.open(argv[1]) == true )) || 57 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 58 | ) 59 | { 60 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 61 | 62 | namedWindow(windowName, 0); 63 | namedWindow(windowName2, 0); 64 | 65 | createTrackbar( "N - width", windowName2, &width, 25); 66 | createTrackbar( "M - heght", windowName2, &height, 25); 67 | 68 | // start main loop 69 | 70 | while (keepProcessing) { 71 | 72 | int64 timeStart = getTickCount(); // get time at start of loop 73 | 74 | // if capture object in use (i.e. video/camera) 75 | // get image from capture object 76 | 77 | if (cap.isOpened()) { 78 | 79 | cap >> img; 80 | if(img.empty()){ 81 | if (argc == 2){ 82 | std::cerr << "End of video file reached" << std::endl; 83 | } else { 84 | std::cerr << "ERROR: cannot get next fram from camera" 85 | << std::endl; 86 | } 87 | exit(0); 88 | } 89 | 90 | } else { 91 | 92 | // if not a capture object set event delay to zero so it waits 93 | // indefinitely (as single image file, no need to loop) 94 | 95 | EVENT_LOOP_DELAY = 0; 96 | } 97 | 98 | // *** 99 | 100 | // by default the blur() operator in OpenCV (2.4 onwards) is a Mean 101 | // blurring operator 102 | 103 | blur(img, res, Size(width,height), Point(-1,-1), BORDER_DEFAULT ); 104 | 105 | // *** 106 | 107 | // display image in window 108 | 109 | imshow(windowName, img); 110 | imshow(windowName2, res); 111 | 112 | // start event processing loop (very important,in fact essential for GUI) 113 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 114 | 115 | // here we take account of processing time for the loop by subtracting the time 116 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 117 | // we get a +ve wait time 118 | 119 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 120 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 121 | 122 | if (key == 'x'){ 123 | 124 | // if user presses "x" then exit 125 | 126 | std::cout << "Keyboard exit requested : exiting now - bye!" 127 | << std::endl; 128 | keepProcessing = false; 129 | } 130 | } 131 | 132 | // the camera will be deinitialized automatically in VideoCapture destructor 133 | 134 | // all OK : main returns 0 135 | 136 | return 0; 137 | } 138 | 139 | // not OK : main returns -1 140 | 141 | return -1; 142 | } 143 | /******************************************************************************/ 144 | -------------------------------------------------------------------------------- /optical_flow_fback.cpp: -------------------------------------------------------------------------------- 1 | // Example : optical flow demo (Farnback) 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2012 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | #include "opencv2/optflow.hpp" 14 | 15 | #include // standard C++ I/O 16 | #include // standard C++ I/O 17 | #include // includes max() 18 | 19 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 20 | using namespace std; 21 | 22 | /******************************************************************************/ 23 | // setup the cameras properly based on OS platform 24 | 25 | // 0 in linux gives first camera for v4l 26 | //-1 in windows gives first device or user dialog selection 27 | 28 | #ifdef linux 29 | #define CAMERA_INDEX 1 30 | #else 31 | #define CAMERA_INDEX -1 32 | #endif 33 | 34 | /******************************************************************************/ 35 | static void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step, 36 | double, const Scalar& color) 37 | { 38 | for(int y = 0; y < cflowmap.rows; y += step) 39 | for(int x = 0; x < cflowmap.cols; x += step) 40 | { 41 | const Point2f& fxy = flow.at(y, x); 42 | line(cflowmap, Point(x,y), Point(cvRound(x+fxy.x), cvRound(y+fxy.y)), 43 | color); 44 | circle(cflowmap, Point(x,y), 2, color, -1); 45 | } 46 | } 47 | 48 | /******************************************************************************/ 49 | 50 | int main( int argc, char** argv ) 51 | { 52 | 53 | Mat img, gray, prevgray, flow, cflow; // image objects 54 | VideoCapture cap; // capture object 55 | 56 | const string windowName = "Optical Flow"; // window name 57 | 58 | bool keepProcessing = true; // loop control flag 59 | unsigned char key; // user input 60 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 61 | // 40 ms equates to 1000ms/25fps = 40ms per frame 62 | 63 | // if command line arguments are provided try to read image/video_name 64 | // otherwise default to capture from attached H/W camera 65 | 66 | if( 67 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 68 | ( argc == 2 && (cap.open(argv[1]) == true )) || 69 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 70 | ) 71 | { 72 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 73 | 74 | namedWindow(windowName, 0); 75 | 76 | // start main loop 77 | 78 | while (keepProcessing) { 79 | 80 | int64 timeStart = getTickCount(); // get time at start of loop 81 | 82 | // if capture object in use (i.e. video/camera) 83 | // get image from capture object 84 | 85 | if (cap.isOpened()) { 86 | 87 | cap >> img; 88 | if(img.empty()){ 89 | if (argc == 2){ 90 | std::cerr << "End of video file reached" << std::endl; 91 | } else { 92 | std::cerr << "ERROR: cannot get next fram from camera" 93 | << std::endl; 94 | } 95 | exit(0); 96 | } 97 | 98 | } else { 99 | 100 | // if not a capture object set event delay to zero so it waits 101 | // indefinitely (as single image file, no need to loop) 102 | 103 | EVENT_LOOP_DELAY = 0; 104 | } 105 | 106 | // convert to grayscale 107 | 108 | cvtColor(img, gray, COLOR_BGR2GRAY); 109 | 110 | // if we have a previous image 111 | 112 | if(!prevgray.empty()) 113 | { 114 | calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0); 115 | cvtColor(prevgray, cflow, COLOR_GRAY2BGR); 116 | drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0)); 117 | 118 | // display image in window 119 | 120 | imshow(windowName, cflow); 121 | } 122 | 123 | // start event processing loop (very important,in fact essential for GUI) 124 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 125 | 126 | // here we take account of processing time for the loop by subtracting the time 127 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 128 | // we get a +ve wait time 129 | 130 | 131 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 132 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 133 | 134 | 135 | if (key == 'x'){ 136 | 137 | // if user presses "x" then exit 138 | 139 | std::cout << "Keyboard exit requested : exiting now - bye!" 140 | << std::endl; 141 | keepProcessing = false; 142 | } 143 | 144 | // copy img to next image 145 | 146 | std::swap(prevgray, gray); 147 | } 148 | 149 | // the camera will be deinitialized automatically in VideoCapture destructor 150 | 151 | // all OK : main returns 0 152 | 153 | return 0; 154 | } 155 | 156 | // not OK : main returns -1 157 | 158 | return -1; 159 | } 160 | /******************************************************************************/ 161 | -------------------------------------------------------------------------------- /bilateral_filter.cpp: -------------------------------------------------------------------------------- 1 | // Example : Bilateral Filtering of image / video / camera 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2012 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 0 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | int main( int argc, char** argv ) 36 | { 37 | Mat img, res; // image objects 38 | VideoCapture cap; // capture object 39 | 40 | const string windowName = "Input"; // window name 41 | const string windowName2 = "Bilateral Filtered Output"; // window name 42 | 43 | bool keepProcessing = true; // loop control flag 44 | char key; // user input 45 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 46 | // 40 ms equates to 1000ms/25fps = 40ms per frame 47 | 48 | int d = 5; // Bilateral filtering parameters 49 | int sigmaS = 50; 50 | int sigmaR = 50; 51 | 52 | // if command line arguments are provided try to read image/video_name 53 | // otherwise default to capture from attached H/W camera 54 | 55 | if( 56 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 57 | ( argc == 2 && (cap.open(argv[1]) == true )) || 58 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 59 | ) 60 | { 61 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 62 | 63 | namedWindow(windowName, 0); 64 | namedWindow(windowName2, 0); 65 | 66 | createTrackbar( "d - pixel neighbourhood", windowName2, &d, 25); 67 | createTrackbar( "sigma S", windowName2, &sigmaS, 250); 68 | createTrackbar( "sigma R", windowName2, &sigmaR, 250); 69 | 70 | // start main loop 71 | 72 | while (keepProcessing) { 73 | 74 | int64 timeStart = getTickCount(); // get time at start of loop 75 | 76 | // if capture object in use (i.e. video/camera) 77 | // get image from capture object 78 | 79 | if (cap.isOpened()) { 80 | 81 | cap >> img; 82 | if(img.empty()){ 83 | if (argc == 2){ 84 | std::cerr << "End of video file reached" << std::endl; 85 | } else { 86 | std::cerr << "ERROR: cannot get next fram from camera" 87 | << std::endl; 88 | } 89 | exit(0); 90 | } 91 | 92 | } else { 93 | 94 | // if not a capture object set event delay to zero so it waits 95 | // indefinitely (as single image file, no need to loop) 96 | 97 | EVENT_LOOP_DELAY = 0; 98 | } 99 | 100 | // *** 101 | 102 | // d – Diameter of each pixel neighborhood that is used during filtering. 103 | // If it is non-positive, it is computed from sigmaSpace . 104 | 105 | // sigmaR – Filter sigma in the color space. A larger value of the parameter means 106 | // that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed 107 | // together, resulting in larger areas of semi-equal color. 108 | 109 | // sigmaS – Filter sigma in the coordinate space. A larger value of the parameter 110 | // means that farther pixels will influence each other as long as their colors are 111 | // close enough (see sigmaColor ). When d>0 , it specifies the neighborhood 112 | // size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace . 113 | 114 | bilateralFilter(img, res, d, (double) sigmaR, (double) sigmaS, BORDER_DEFAULT ); 115 | 116 | // *** 117 | 118 | // display image in window 119 | 120 | imshow(windowName, img); 121 | imshow(windowName2, res); 122 | 123 | // start event processing loop (very important,in fact essential for GUI) 124 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 125 | 126 | // here we take account of processing time for the loop by subtracting the time 127 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 128 | // we get a +ve wait time 129 | 130 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 131 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 132 | 133 | if (key == 'x'){ 134 | 135 | // if user presses "x" then exit 136 | 137 | std::cout << "Keyboard exit requested : exiting now - bye!" 138 | << std::endl; 139 | keepProcessing = false; 140 | } 141 | } 142 | 143 | // the camera will be deinitialized automatically in VideoCapture destructor 144 | 145 | // all OK : main returns 0 146 | 147 | return 0; 148 | } 149 | 150 | // not OK : main returns -1 151 | 152 | return -1; 153 | } 154 | /******************************************************************************/ 155 | -------------------------------------------------------------------------------- /harris.cpp: -------------------------------------------------------------------------------- 1 | // Example : harris feature point detection 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 1 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | int main( int argc, char** argv ) 36 | { 37 | 38 | Mat img, gray, harris; // image object(s) 39 | 40 | VideoCapture cap; // capture object 41 | 42 | const string windowName = "Input Image"; // window name 43 | const string windowName2 = "Harris Feature Points"; // window name 44 | 45 | bool keepProcessing = true; // loop control flag 46 | int key; // user input 47 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 48 | // 40 ms equates to 1000ms/25fps = 40ms per frame 49 | 50 | vector corners; // harris corners 51 | int N = 3, k = 1; // Harris parameters 52 | 53 | // if command line arguments are provided try to read image/video_name 54 | // otherwise default to capture from attached H/W camera 55 | 56 | if( 57 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 58 | ( argc == 2 && (cap.open(argv[1]) == true )) || 59 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 60 | ) 61 | { 62 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 63 | 64 | namedWindow(windowName, 0); 65 | namedWindow(windowName2, 0); 66 | createTrackbar("N", windowName2, &N, 25); 67 | createTrackbar("k (* 0.01)", windowName2, &k, 100); 68 | 69 | // start main loop 70 | 71 | while (keepProcessing) 72 | { 73 | int64 timeStart = getTickCount(); // get time at start of loop 74 | 75 | // if capture object in use (i.e. video/camera) 76 | // get image from capture object 77 | 78 | if (cap.isOpened()) 79 | { 80 | 81 | cap >> img; 82 | if(img.empty()) 83 | { 84 | if (argc == 2) 85 | { 86 | std::cerr << "End of video file reached" << std::endl; 87 | } 88 | else 89 | { 90 | std::cerr << "ERROR: cannot get next fram from camera" 91 | << std::endl; 92 | } 93 | exit(0); 94 | } 95 | 96 | } 97 | else 98 | { 99 | 100 | // if not a capture object set event delay to zero so it waits 101 | // indefinitely (as single image file, no need to loop) 102 | 103 | EVENT_LOOP_DELAY = 0; 104 | } 105 | 106 | // *** 107 | 108 | // convert input to grayscale 109 | 110 | cvtColor(img, gray, COLOR_BGR2GRAY); 111 | 112 | // do Harris feature point detection (setting = true in goodFeaturesToTrack()) 113 | // (returning up to 200 corners or feature points with a minimum pixel distance of 5 apart 114 | 115 | corners.clear(); 116 | goodFeaturesToTrack(gray, corners, 2000, 0.01, 2, Mat(), N, true, (k * 0.01)); 117 | 118 | // display points 119 | 120 | harris = img.clone(); 121 | 122 | for (unsigned int i=0; i | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 1 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | // callback funtion for mouse to select a region of the image and store that selection 36 | // in global variables origin and selection (acknowledgement: opencv camsiftdemo.cpp) 37 | 38 | static bool selectObject = false; 39 | static Point origin; 40 | static Rect selection; 41 | static bool selectionComplete = false; 42 | 43 | void onMouseSelect( int event, int x, int y, int, void* image) 44 | { 45 | if( selectObject ) 46 | { 47 | selection.x = MIN(x, origin.x); 48 | selection.y = MIN(y, origin.y); 49 | selection.width = std::abs(x - origin.x); 50 | selection.height = std::abs(y - origin.y); 51 | 52 | selection &= Rect(0, 0, ((Mat *) image)->cols, ((Mat *) image)->rows); 53 | } 54 | 55 | switch( event ) 56 | { 57 | case EVENT_LBUTTONDOWN: 58 | origin = Point(x,y); 59 | selection = Rect(x,y,0,0); 60 | selectObject = true; 61 | break; 62 | case EVENT_LBUTTONUP: 63 | selectObject = false; 64 | if( selection.width > 0 && selection.height > 0 ) 65 | selectionComplete = true; 66 | break; 67 | } 68 | } 69 | 70 | /******************************************************************************/ 71 | 72 | int main( int argc, char** argv ) 73 | { 74 | 75 | Mat img, roi, selected; // image object 76 | VideoCapture cap; // capture object 77 | 78 | const string windowName = "Live Video Input"; // window name 79 | const string windowName2 = "Selected Region / Object"; // window name 80 | 81 | bool keepProcessing = true; // loop control flag 82 | unsigned char key; // user input 83 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 84 | // 40 ms equates to 1000ms/25fps = 40ms per frame 85 | 86 | // if command line arguments are provided try to read image/video_name 87 | // otherwise default to capture from attached H/W camera 88 | 89 | if( 90 | ( argc == 2 && (cap.open(argv[1]) == true )) || 91 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 92 | ) 93 | { 94 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 95 | 96 | namedWindow(windowName, 0); 97 | namedWindow(windowName2, 0); 98 | setMouseCallback( windowName, onMouseSelect, &img); 99 | 100 | // start main loop 101 | 102 | while (keepProcessing) { 103 | 104 | int64 timeStart = getTickCount(); // get time at start of loop 105 | 106 | // if capture object in use (i.e. video/camera) 107 | // get image from capture object 108 | 109 | if (cap.isOpened()) { 110 | 111 | cap >> img; 112 | if(img.empty()){ 113 | if (argc == 2){ 114 | std::cerr << "End of video file reached" << std::endl; 115 | } else { 116 | std::cerr << "ERROR: cannot get next frame from camera" 117 | << std::endl; 118 | } 119 | exit(0); 120 | } 121 | 122 | } 123 | 124 | // *** 125 | 126 | // *** DO ANY PROCESSING PRIOR TO DISPLAY HERE *** 127 | 128 | // *** 129 | 130 | 131 | if( selectObject && selection.width > 0 && selection.height > 0 ) 132 | { 133 | roi = img(selection); 134 | bitwise_not(roi, roi); 135 | } else if ( selectionComplete && selection.width > 0 && selection.height > 0 ){ 136 | 137 | selected = roi.clone(); 138 | selectionComplete = false; 139 | 140 | } 141 | 142 | // display image in window 143 | 144 | imshow(windowName, img); 145 | if (!(selected.empty())) 146 | { 147 | imshow(windowName2, selected); 148 | } 149 | 150 | // start event processing loop (very important,in fact essential for GUI) 151 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 152 | 153 | // here we take account of processing time for the loop by subtracting the time 154 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 155 | // we get a +ve wait time 156 | 157 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 158 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 159 | 160 | if (key == 'x'){ 161 | 162 | // if user presses "x" then exit 163 | 164 | std::cout << "Keyboard exit requested : exiting now - bye!" 165 | << std::endl; 166 | keepProcessing = false; 167 | } 168 | } 169 | 170 | // the camera will be deinitialized automatically in VideoCapture destructor 171 | 172 | // all OK : main returns 0 173 | 174 | return 0; 175 | } 176 | 177 | // not OK : main returns -1 178 | 179 | return -1; 180 | } 181 | /******************************************************************************/ 182 | -------------------------------------------------------------------------------- /fourier.cpp: -------------------------------------------------------------------------------- 1 | // Example: display magnitude image of DFT of input image 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2011 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | /******************************************************************************/ 21 | // setup the cameras properly based on OS platform 22 | 23 | // 0 in linux gives first camera for v4l 24 | //-1 in windows gives first device or user dialog selection 25 | 26 | #ifdef linux 27 | #define CAMERA_INDEX 0 28 | #else 29 | #define CAMERA_INDEX -1 30 | #endif 31 | /******************************************************************************/ 32 | // Rearrange the quadrants of a Fourier image so that the origin is at 33 | // the image center 34 | 35 | void shiftDFT(Mat& fImage ) 36 | { 37 | Mat tmp, q0, q1, q2, q3; 38 | 39 | // first crop the image, if it has an odd number of rows or columns 40 | 41 | fImage = fImage(Rect(0, 0, fImage.cols & -2, fImage.rows & -2)); 42 | 43 | int cx = fImage.cols/2; 44 | int cy = fImage.rows/2; 45 | 46 | // rearrange the quadrants of Fourier image 47 | // so that the origin is at the image center 48 | 49 | q0 = fImage(Rect(0, 0, cx, cy)); 50 | q1 = fImage(Rect(cx, 0, cx, cy)); 51 | q2 = fImage(Rect(0, cy, cx, cy)); 52 | q3 = fImage(Rect(cx, cy, cx, cy)); 53 | 54 | q0.copyTo(tmp); 55 | q3.copyTo(q0); 56 | tmp.copyTo(q3); 57 | 58 | q1.copyTo(tmp); 59 | q2.copyTo(q1); 60 | tmp.copyTo(q2); 61 | } 62 | 63 | /******************************************************************************/ 64 | // return a floating point spectrum magnitude image scaled for user viewing 65 | // complexImg- input dft (2 channel floating point, Real + Imaginary fourier image) 66 | // rearrange - perform rearrangement of DFT quadrants if true 67 | 68 | // return value - pointer to output spectrum magnitude image scaled for user viewing 69 | 70 | Mat create_spectrum_magnitude_display(Mat& complexImg, bool rearrange) 71 | { 72 | Mat planes[2]; 73 | 74 | // compute magnitude spectrum (N.B. for display) 75 | // compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2)) 76 | 77 | split(complexImg, planes); 78 | magnitude(planes[0], planes[1], planes[0]); 79 | 80 | Mat mag = (planes[0]).clone(); 81 | mag += Scalar::all(1); 82 | log(mag, mag); 83 | 84 | if (rearrange) 85 | { 86 | // re-arrange the quaderants 87 | shiftDFT(mag); 88 | } 89 | 90 | normalize(mag, mag, 0, 1, NORM_MINMAX); 91 | 92 | return mag; 93 | 94 | } 95 | 96 | /******************************************************************************/ 97 | 98 | int main( int argc, char** argv ) 99 | { 100 | 101 | Mat img, imgGray; // image object 102 | VideoCapture cap; // capture object 103 | 104 | Mat padded; // fourier image objects and arrays 105 | Mat complexImg; 106 | Mat planes[2], mag; 107 | 108 | int N, M; // fourier image sizes 109 | 110 | const string originalName = "Input Image (grayscale)"; // window name 111 | const string spectrumMagName = "Magnitude Image (log transformed)"; // window name 112 | 113 | bool keepProcessing = true; // loop control flag 114 | unsigned char key; // user input 115 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 116 | // 40 ms equates to 1000ms/25fps = 40ms per frame 117 | 118 | // if command line arguments are provided try to read image/video_name 119 | // otherwise default to capture from attached H/W camera 120 | 121 | if( 122 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 123 | ( argc == 2 && (cap.open(argv[1]) == true )) || 124 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 125 | ) 126 | { 127 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 128 | 129 | namedWindow(originalName, 0); 130 | namedWindow(spectrumMagName, 0); 131 | 132 | // start main loop 133 | 134 | while (keepProcessing) { 135 | 136 | int64 timeStart = getTickCount(); // get time at start of loop 137 | 138 | // if capture object in use (i.e. video/camera) 139 | // get image from capture object 140 | 141 | if (cap.isOpened()) { 142 | 143 | cap >> img; 144 | if(img.empty()){ 145 | if (argc == 2){ 146 | std::cerr << "End of video file reached" << std::endl; 147 | } else { 148 | std::cerr << "ERROR: cannot get next fram from camera" 149 | << std::endl; 150 | } 151 | exit(0); 152 | } 153 | 154 | } else { 155 | 156 | // if not a capture object set event delay to zero so it waits 157 | // indefinitely (as single image file, no need to loop) 158 | 159 | EVENT_LOOP_DELAY = 0; 160 | } 161 | 162 | // *** 163 | 164 | // convert input to grayscale 165 | 166 | cvtColor(img, imgGray, COLOR_BGR2GRAY); 167 | 168 | // setup the DFT images 169 | 170 | M = getOptimalDFTSize( imgGray.rows ); 171 | N = getOptimalDFTSize( imgGray.cols ); 172 | 173 | copyMakeBorder(imgGray, padded, 0, M - imgGray.rows, 0, 174 | N - imgGray.cols, BORDER_CONSTANT, Scalar::all(0)); 175 | planes[0] = Mat_(padded); 176 | planes[1] = Mat::zeros(padded.size(), CV_32F); 177 | 178 | merge(planes, 2, complexImg); 179 | 180 | // do the DFT 181 | 182 | dft(complexImg, complexImg); 183 | 184 | // create magnitude for output 185 | 186 | mag = create_spectrum_magnitude_display(complexImg, true); 187 | 188 | // *** 189 | 190 | // display image in window 191 | 192 | imshow(originalName, imgGray); 193 | imshow(spectrumMagName, mag); 194 | 195 | // start event processing loop (very important,in fact essential for GUI) 196 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 197 | 198 | // here we take account of processing time for the loop by subtracting the time 199 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 200 | // we get a +ve wait time 201 | 202 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 203 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 204 | 205 | if (key == 'x'){ 206 | 207 | // if user presses "x" then exit 208 | 209 | std::cout << "Keyboard exit requested : exiting now - bye!" 210 | << std::endl; 211 | keepProcessing = false; 212 | } 213 | } 214 | 215 | // the camera will be deinitialized automatically in VideoCapture destructor 216 | 217 | // all OK : main returns 0 218 | 219 | return 0; 220 | } 221 | 222 | // not OK : main returns -1 223 | 224 | return -1; 225 | } 226 | /******************************************************************************/ 227 | -------------------------------------------------------------------------------- /opencv_c_from_cpp.cpp: -------------------------------------------------------------------------------- 1 | // Example : example of using OpenCV C code from OpenCV C++ code 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 0 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | // !!!!!!! EXAMPLE OpenCV C function from c/contraststretch.cc example !!!!!!!!! 36 | 37 | // function that takes a gray scale image and draws a histogram 38 | // image for it in a pre-allocated image 39 | 40 | void create_histogram_image(IplImage* grayImg, IplImage* histogramImage){ 41 | 42 | CvHistogram *hist = NULL; // pointer to histogram object 43 | float max_value = 0; // max value in histogram 44 | int hist_size = 256; // size of histogram (number of bins) 45 | int bin_w = 0; // initial width to draw bars 46 | float range_0[]={0,256}; 47 | float* ranges[] = { range_0 }; 48 | 49 | hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1); 50 | 51 | cvCalcHist( &grayImg, hist, 0, NULL ); 52 | cvGetMinMaxHistValue( hist, 0, &max_value, 0, 0 ); 53 | cvScale( hist->bins, hist->bins, ((double)histogramImage->height)/max_value, 0 ); 54 | cvSet( histogramImage, cvScalarAll(255), 0 ); 55 | bin_w = cvRound((double)histogramImage->width/hist_size); 56 | 57 | for(int i = 0; i < hist_size; i++ ) 58 | { 59 | cvRectangle( histogramImage, cvPoint(i*bin_w, histogramImage->height), 60 | cvPoint((i+1)*bin_w, histogramImage->height 61 | - cvRound(cvGetReal1D(hist->bins,i))), 62 | cvScalarAll(0), -1, 8, 0 ); 63 | } 64 | 65 | cvReleaseHist (&hist); 66 | } 67 | 68 | /******************************************************************************/ 69 | 70 | int main( int argc, char** argv ) 71 | { 72 | 73 | Mat img; // image object 74 | Mat gray; // gray image object 75 | Mat histogram; // histogram image object 76 | VideoCapture cap; // capture object 77 | 78 | const string windowName = "Input Image (OpenCV C++ code)"; // window name 79 | const string windowName2 = "Histogram (OpenCV C code)"; // window name 80 | const string windowName3 = "Canny (OpenCV C/C++ objects)"; // window name 81 | 82 | bool keepProcessing = true; // loop control flag 83 | unsigned char key; // user input 84 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 85 | // 40 ms equates to 1000ms/25fps = 40ms per frame 86 | 87 | // if command line arguments are provided try to read image/video_name 88 | // otherwise default to capture from attached H/W camera 89 | 90 | if( 91 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 92 | ( argc == 2 && (cap.open(argv[1]) == true )) || 93 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 94 | ) 95 | { 96 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 97 | 98 | namedWindow(windowName, 0); 99 | namedWindow(windowName2,0); 100 | namedWindow(windowName3,0); 101 | 102 | // create trackbars for the canny stuff 103 | 104 | int upper = 200; 105 | int lower = 80; 106 | createTrackbar((string) "upper", windowName3, &upper, 255, 0, 0); 107 | createTrackbar((string) "lower", windowName3, &lower, 255, 0, 0); 108 | 109 | // start main loop 110 | 111 | while (keepProcessing) { 112 | 113 | // if capture object in use (i.e. video/camera) 114 | // get image from capture object 115 | 116 | if (cap.isOpened()) { 117 | 118 | cap >> img; 119 | if(img.empty()){ 120 | if (argc == 2){ 121 | std::cerr << "End of video file reached" << std::endl; 122 | } else { 123 | std::cerr << "ERROR: cannot get next fram from camera" 124 | << std::endl; 125 | } 126 | exit(0); 127 | } 128 | 129 | } else { 130 | 131 | // if not a capture object set event delay to zero so it waits 132 | // indefinitely (as single image file, no need to loop) 133 | 134 | EVENT_LOOP_DELAY = 0; 135 | } 136 | 137 | // convert input image to grayscale 138 | 139 | cvtColor(img, gray, COLOR_BGR2GRAY); 140 | 141 | // here we must explicitly specify the size of the histogram image 142 | // as it is expected to be pre-allocated by our OpenCV C function 143 | // (255 = columns for our histogram) 144 | 145 | histogram = Mat(200, 256, CV_8UC1); 146 | 147 | // to use OpenCV C code with C++ OpenCV image objects we can use the 148 | // Mat::IplImage() operator to return an (old-style) IplImage header 149 | // for the Mat object/image data (N.B. this is a pointer to the 150 | // original Mat object and no data is copied). Where an IplImage* 151 | // (pointer) is required we use &() to return this temporary pointer 152 | 153 | // here we do this for both input and outputs 154 | 155 | IplImage *gray_ipl_p = &(IplImage(gray)); 156 | 157 | create_histogram_image(gray_ipl_p, &(IplImage(histogram))); 158 | 159 | // to use (old-style) C OpenCV image objects with C++ OpenCV methods 160 | // we use the following inverse operator 161 | 162 | IplImage *test = gray_ipl_p; // create an old style C IplImage object 163 | IplImage *canny = // and also an output IplImage image 164 | cvCreateImage(cvSize(test->width,test->height), test->depth, 1); 165 | 166 | // use both of these OpenCV C images with an OpenCV C++ function 167 | 168 | Mat tmp_test = Mat(cv::cvarrToMat(test)); 169 | Mat tmp_canny = Mat(cv::cvarrToMat(canny)); 170 | Canny(tmp_test, tmp_canny, lower, upper, 3); 171 | 172 | // display all images in window 173 | 174 | imshow(windowName, gray); 175 | imshow(windowName2, histogram); 176 | imshow(windowName3, Mat(cv::cvarrToMat(canny))); 177 | 178 | // start event processing loop (very important,in fact essential for GUI) 179 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 180 | 181 | key = waitKey(EVENT_LOOP_DELAY); 182 | 183 | if (key == 'x'){ 184 | 185 | // if user presses "x" then exit 186 | 187 | std::cout << "Keyboard exit requested : exiting now - bye!" 188 | << std::endl; 189 | keepProcessing = false; 190 | } 191 | 192 | // explicitly created OpenCV C images/matrices must be deallocated 193 | 194 | cvReleaseImage(&canny); 195 | 196 | } 197 | 198 | // the camera will be deinitialized automatically in VideoCapture destructor 199 | 200 | // all OK : main returns 0 201 | 202 | return 0; 203 | } 204 | 205 | // not OK : main returns -1 206 | 207 | return -1; 208 | } 209 | /******************************************************************************/ 210 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /generic_recognition_interface.cpp: -------------------------------------------------------------------------------- 1 | // Example : basic interface to example based recognition from video / camera 2 | // usage: prog {} 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | /******************************************************************************/ 21 | 22 | void printhelp() 23 | { 24 | std::cout << std::endl << "Controls:" << std::endl; 25 | std::cout << "\tspace = capture a sample image" << std::endl; 26 | std::cout << "\treturn = move to recognition mode (or m)" << std::endl; 27 | std::cout << "\tr = recognise current image" << std::endl; 28 | std::cout << "\tany key = clear recognition result" << std::endl; 29 | std::cout << "\tx = exit" << std::endl; 30 | } 31 | 32 | #define MAX_NUMBER_OF_SAMPLE_IMAGES 25 33 | 34 | /******************************************************************************/ 35 | 36 | int main( int argc, char** argv ) 37 | { 38 | 39 | Mat img; // image objects 40 | VideoCapture cap; // capture object 41 | 42 | const string windowName = ".... Recognition"; // window name 43 | 44 | bool keepProcessing = true; // loop control flag 45 | int key; // user input 46 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 47 | // 40 ms equates to 1000ms/25fps = 40ms per frame 48 | 49 | // data structures and matrices for image based recognition 50 | 51 | Mat input[MAX_NUMBER_OF_SAMPLE_IMAGES]; 52 | 53 | int imagesCollected = 0; // number of sample images collected 54 | 55 | bool recognitionStage = false; // flag to determine when have started 56 | // recognition 57 | int closestImage = 0; // index of best match 58 | 59 | // if command line arguments are provided try to read video_name 60 | // otherwise default to capture from attached H/W camera 61 | 62 | if(( argc == 2 && (cap.open(argv[1]) == true )) || ( argc != 2 && (cap.open(0) == true))) 63 | { 64 | // print user controls 65 | 66 | printhelp(); 67 | 68 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 69 | 70 | namedWindow(windowName, 0); 71 | 72 | // start main loop 73 | 74 | while (keepProcessing) 75 | { 76 | int64 timeStart = getTickCount(); // get time at start of loop 77 | 78 | // if capture object in use (i.e. video/camera) 79 | // get image from capture object 80 | 81 | if (cap.isOpened()) 82 | { 83 | 84 | cap >> img; 85 | if(img.empty()) 86 | { 87 | if (argc == 2) 88 | { 89 | std::cerr << "End of video file reached" << std::endl; 90 | } 91 | else 92 | { 93 | std::cerr << "ERROR: cannot get next fram from camera" 94 | << std::endl; 95 | } 96 | exit(0); 97 | } 98 | 99 | } 100 | 101 | // ***** DO ANY PRE-DISPLAY PROCESSING HERE **** 102 | 103 | // display image in window (with text) 104 | 105 | if (!recognitionStage) 106 | { 107 | putText(img, "SAMPLE COLLECTION", Point(10,img.rows - 10), 108 | FONT_HERSHEY_PLAIN, 2.0, CV_RGB(0, 255,0), 3, 8, false); 109 | } 110 | else 111 | { 112 | putText(img, "RECOGNITION", Point(10,img.rows - 10), 113 | FONT_HERSHEY_PLAIN, 2.0, CV_RGB(255, 0,0), 3, 8, false); 114 | } 115 | imshow( windowName, img ); 116 | 117 | // start event processing loop (very important,in fact essential for GUI) 118 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 119 | 120 | // here we take account of processing time for the loop by subtracting the time 121 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 122 | // we get a +ve wait time 123 | 124 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 125 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 126 | 127 | if (key == 'x') 128 | { 129 | 130 | // if user presses "x" then exit 131 | 132 | std::cout << "Keyboard exit requested : exiting now - bye!" 133 | << std::endl; 134 | keepProcessing = false; 135 | 136 | } 137 | else if (key == ' ') 138 | { 139 | 140 | // if user presses " " then capture a sample image 141 | 142 | if (!recognitionStage) 143 | { 144 | if (imagesCollected < MAX_NUMBER_OF_SAMPLE_IMAGES) 145 | { 146 | 147 | // copy image and store it 148 | 149 | input[imagesCollected] = img.clone(); 150 | imagesCollected++; 151 | 152 | std::cout << "Sample image collected - " << 153 | imagesCollected << std::endl; 154 | 155 | 156 | // ***** DO ANY OTHER PREPROCESSING / INFO. EXTRACTION HERE ***** 157 | 158 | } 159 | else 160 | { 161 | std::cout << "ERROR: Maximum sample images (" << 162 | imagesCollected << ") collected" << std::endl; 163 | } 164 | } 165 | 166 | } 167 | else if ((key == '\n') || (key == 'm')) // use "m" in windows 168 | { 169 | 170 | // if user presses return then move into recognition mode 171 | 172 | std::cout << "Entering recognition mode - ..." << std::endl << std::endl; 173 | 174 | recognitionStage = true; 175 | if (!(imagesCollected > 0)) 176 | { 177 | std::cerr << "ERROR: not enough samples images caputured" << 178 | std::endl; 179 | } 180 | } 181 | else if (key == 'r') 182 | { 183 | 184 | // if user presses "r" then do recognition 185 | 186 | // *********** EXTRACT INFORMATION FROM THE CURRENT INPUT IMAGE HERE 187 | 188 | if (recognitionStage) 189 | { 190 | 191 | 192 | for (int i = 0; i < imagesCollected; i++) 193 | { 194 | 195 | // *********** MATCH INFORMATION TO STORED IMAGES HERE 196 | 197 | // set closestImage as the index of the best matching 198 | // image in the input array of images 199 | 200 | closestImage = 0; 201 | 202 | } 203 | 204 | // output the result in a window 205 | 206 | std::cout << "Recognition - closest matching image = " << 207 | closestImage << std::endl; 208 | std::cout << "Press any key to clear." << std::endl << std::endl; 209 | 210 | namedWindow("Recognition Result", 1 ); 211 | imshow("Recognition Result", input[closestImage]); 212 | waitKey(0); 213 | destroyWindow("Recognition Result"); // close window 214 | 215 | } 216 | else 217 | { 218 | std::cout << "ERROR - need to enter recognition stage first." 219 | << std::endl; 220 | } 221 | } 222 | } 223 | 224 | 225 | // all images should be killed off by their respective destructors 226 | 227 | // the camera will be deinitialized automatically in VideoCapture destructor 228 | 229 | // all OK : main returns 0 230 | 231 | return 0; 232 | } 233 | 234 | // not OK : main returns -1 235 | 236 | return -1; 237 | } 238 | /******************************************************************************/ 239 | -------------------------------------------------------------------------------- /histogram_based_recognition_colour.cpp: -------------------------------------------------------------------------------- 1 | // Example : RGB colour histogram based recognition from video / camera 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 0 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | void printhelp(){ 36 | std::cout << std::endl << "Controls:" << std::endl; 37 | std::cout << "\tspace = capture a sample image" << std::endl; 38 | std::cout << "\treturn = move to recognition mode (or m)" << std::endl; 39 | std::cout << "\tr = recognise current image" << std::endl; 40 | std::cout << "\tany key = clear recognition result" << std::endl; 41 | std::cout << "\tx = exit" << std::endl; 42 | } 43 | 44 | /******************************************************************************/ 45 | 46 | int main( int argc, char** argv ) 47 | { 48 | 49 | Mat img; // image objects 50 | VideoCapture cap; // capture object 51 | 52 | const string windowName = "Colour Histogram Based Recognition"; // window name 53 | 54 | bool keepProcessing = true; // loop control flag 55 | int key; // user input 56 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 57 | // 40 ms equates to 1000ms/25fps = 40ms per frame 58 | 59 | // histogram specific stuff - create a 3D histogram for RGB image 60 | 61 | #define MAX_NUMBER_OF_SAMPLE_IMAGES 255 62 | int hist_size[] = {256, 256, 256}; // size of histogram (number of bins) 63 | float range_0[]={0, 255}; 64 | const float* ranges[] = { range_0, range_0, range_0 }; 65 | int channels[] = {0, 1, 2}; // we compute the histogram from all 3 channels 66 | 67 | // create the histogram explicitlty, specifying 32-bit float (CV_32F) data 68 | // storage 69 | 70 | MatND currentHistogram; 71 | currentHistogram.create(3, hist_size, CV_32F); 72 | 73 | // data structures and matrices for histogram based recognition 74 | 75 | Mat input[MAX_NUMBER_OF_SAMPLE_IMAGES]; 76 | MatND histogram[MAX_NUMBER_OF_SAMPLE_IMAGES]; 77 | 78 | int imagesCollected = 0; // number of sample images collected 79 | 80 | bool recognitionStage = false; // flag to determine when have started 81 | // recognition 82 | 83 | // if command line arguments are provided try to read image/video_name 84 | // otherwise default to capture from attached H/W camera 85 | 86 | if( 87 | ( argc == 2 && (cap.open(argv[1]) == true )) || 88 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 89 | ) 90 | { 91 | // print user controls 92 | 93 | printhelp(); 94 | 95 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 96 | 97 | namedWindow(windowName, 0); 98 | 99 | // start main loop 100 | 101 | while (keepProcessing) { 102 | 103 | int64 timeStart = getTickCount(); // get time at start of loop 104 | 105 | // if capture object in use (i.e. video/camera) 106 | // get image from capture object 107 | 108 | if (cap.isOpened()) { 109 | 110 | cap >> img; 111 | if(img.empty()){ 112 | if (argc == 2){ 113 | std::cerr << "End of video file reached" << std::endl; 114 | } else { 115 | std::cerr << "ERROR: cannot get next fram from camera" 116 | << std::endl; 117 | } 118 | exit(0); 119 | } 120 | 121 | } 122 | 123 | // flip the image (so movement on screen matches movement in window) 124 | 125 | flip(img, img, 1); 126 | 127 | // display image in window (with text) 128 | 129 | if (!recognitionStage){ 130 | putText(img, "SAMPLE COLLECTION", Point(10,img.rows - 10), 131 | FONT_HERSHEY_PLAIN, 2.0, CV_RGB(0, 255,0), 3, 8, false); 132 | } else { 133 | putText(img, "RECOGNITION", Point(10,img.rows - 10), 134 | FONT_HERSHEY_PLAIN, 2.0, CV_RGB(255, 0,0), 3, 8, false); 135 | } 136 | imshow( windowName, img ); 137 | 138 | // start event processing loop (very important,in fact essential for GUI) 139 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 140 | 141 | // here we take account of processing time for the loop by subtracting the time 142 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 143 | // we get a +ve wait time 144 | 145 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 146 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 147 | 148 | if (key == 'x'){ 149 | 150 | // if user presses "x" then exit 151 | 152 | std::cout << "Keyboard exit requested : exiting now - bye!" 153 | << std::endl; 154 | keepProcessing = false; 155 | 156 | } else if (key == ' '){ 157 | 158 | // if user presses " " then capture a sample image 159 | 160 | if (!recognitionStage) { 161 | if (imagesCollected < MAX_NUMBER_OF_SAMPLE_IMAGES) 162 | { 163 | 164 | // copy image + build/store image histogram 165 | 166 | input[imagesCollected] = img.clone(); 167 | 168 | calcHist(&img, 1, channels, Mat(), currentHistogram, 3, 169 | hist_size, ranges, true, false); 170 | normalize(currentHistogram, currentHistogram, 1, 0, NORM_L1); 171 | histogram[imagesCollected] = currentHistogram.clone(); 172 | 173 | imagesCollected++; 174 | 175 | std::cout << "Sample image collected - " << 176 | imagesCollected << std::endl; 177 | 178 | } else { 179 | std::cout << "ERROR: Maximum sample images (" << 180 | imagesCollected << ") collected" << std::endl; 181 | } 182 | } 183 | 184 | } else if ((key == '\n') || (key == 'm')) { // use "m" in windows 185 | 186 | // if user presses return then move into recognition mode 187 | 188 | std::cout << "Entering recognition mode - histogram models" << 189 | " stored" << std::endl << std::endl; 190 | 191 | recognitionStage = true; 192 | if (!(imagesCollected > 0)) { 193 | std::cerr << "ERROR: not enough samples images caputured" << 194 | std::endl; 195 | } 196 | } else if (key == 'r'){ 197 | 198 | // if user presses "r" then do recognition 199 | 200 | // calc current image histogram 201 | 202 | calcHist(&img, 1, channels, Mat(), currentHistogram, 3, 203 | hist_size, ranges, true, false); 204 | normalize(currentHistogram, currentHistogram, 1, 0, NORM_L1); 205 | 206 | if (recognitionStage) { 207 | 208 | // for each histogram (do comparison) 209 | 210 | double closestDistance = __DBL_MAX__; 211 | int closestImage = 0; 212 | 213 | for (int i = 0; i < imagesCollected; i++) 214 | { 215 | // do histogram comparision here 216 | 217 | double correlation = compareHist(currentHistogram, 218 | histogram[i],HISTCMP_CORREL); 219 | double chisquared = compareHist(currentHistogram, 220 | histogram[i],HISTCMP_CHISQR); 221 | double intersect = compareHist(currentHistogram, 222 | histogram[i],HISTCMP_INTERSECT); 223 | double bhattacharyya = compareHist(currentHistogram, 224 | histogram[i],HISTCMP_BHATTACHARYYA); 225 | 226 | // here we just sum the differences of the measures 227 | // (which as the histograms are all normalised are all 228 | // measures in the range -1->0->1). This *is not* the 229 | // best way to do this - beware. 230 | 231 | // N.B. For the OpenCV implementation: 232 | // low correlation = large difference (so we invert it) 233 | // low intersection = large difference (so we invert it) 234 | // high chisquared = large differences 235 | // high bhatt. = large difference 236 | // - and vice versa 237 | 238 | double diff = (1 - correlation) + chisquared 239 | + (1 - intersect) + bhattacharyya; 240 | 241 | std::cout << "Comparison image " << i << " Corr: " << 242 | correlation << " ChiSq: " << chisquared << 243 | " Intersect: " << intersect << " Bhatt: " << bhattacharyya 244 | << " Total Distance = " << diff << std::endl; 245 | 246 | if (diff < closestDistance){ 247 | closestDistance = diff; 248 | closestImage = i; 249 | 250 | } 251 | } 252 | 253 | std::cout << std::endl; 254 | 255 | // output the result in a window 256 | 257 | std::cout << "Recognition - closest matching image = " << 258 | closestImage << std::endl; 259 | std::cout << "Press any key to clear." << std::endl << std::endl; 260 | 261 | namedWindow("Recognition Result", 1 ); 262 | imshow("Recognition Result", input[closestImage]); 263 | waitKey(0); 264 | destroyWindow("Recognition Result"); // close window 265 | 266 | } else { 267 | std::cout << "ERROR - need to enter recognition stage first." 268 | << std::endl; 269 | } 270 | } 271 | } 272 | 273 | 274 | // all image and histogram objects should be killed off by their respective 275 | // destructors 276 | 277 | // the camera will be deinitialized automatically in VideoCapture destructor 278 | 279 | // all OK : main returns 0 280 | 281 | return 0; 282 | } 283 | 284 | // not OK : main returns -1 285 | 286 | return -1; 287 | } 288 | /******************************************************************************/ 289 | -------------------------------------------------------------------------------- /histogram_based_recognition.cpp: -------------------------------------------------------------------------------- 1 | // Example : basic histogram based recognition from video / camera 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | 14 | #include // standard C++ I/O 15 | #include // standard C++ I/O 16 | #include // includes max() 17 | 18 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 19 | using namespace std; 20 | 21 | /******************************************************************************/ 22 | // setup the cameras properly based on OS platform 23 | 24 | // 0 in linux gives first camera for v4l 25 | //-1 in windows gives first device or user dialog selection 26 | 27 | #ifdef linux 28 | #define CAMERA_INDEX 0 29 | #else 30 | #define CAMERA_INDEX -1 31 | #endif 32 | 33 | /******************************************************************************/ 34 | 35 | void printhelp(){ 36 | std::cout << std::endl << "Controls:" << std::endl; 37 | std::cout << "\tspace = capture a sample image" << std::endl; 38 | std::cout << "\treturn = move to recognition mode (or m)" << std::endl; 39 | std::cout << "\tr = recognise current image" << std::endl; 40 | std::cout << "\tany key = clear recognition result" << std::endl; 41 | std::cout << "\tx = exit" << std::endl; 42 | } 43 | 44 | /******************************************************************************/ 45 | 46 | int main( int argc, char** argv ) 47 | { 48 | 49 | Mat img, grayImg; // image objects 50 | VideoCapture cap; // capture object 51 | 52 | const string windowName = "Histogram Based Recognition"; // window name 53 | 54 | bool keepProcessing = true; // loop control flag 55 | int key; // user input 56 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 57 | // 40 ms equates to 1000ms/25fps = 40ms per frame 58 | 59 | // histogram specific stuff 60 | 61 | #define MAX_NUMBER_OF_SAMPLE_IMAGES 255 62 | int hist_size[] = {256}; // size of histogram (number of bins) 63 | float range_0[]={0, 255}; 64 | const float* ranges[] = { range_0 }; 65 | int channels[] = {0}; // we compute the histogram from the 0-th channel only 66 | 67 | // create the histogram explicitlty, specifying 32-bit float (CV_32F) data 68 | // storage 69 | 70 | MatND currentHistogram; 71 | currentHistogram.create(1, hist_size, CV_32F); 72 | 73 | // data structures and matrices for histogram based recognition 74 | 75 | Mat input[MAX_NUMBER_OF_SAMPLE_IMAGES]; 76 | MatND histogram[MAX_NUMBER_OF_SAMPLE_IMAGES]; 77 | 78 | int imagesCollected = 0; // number of sample images collected 79 | 80 | bool recognitionStage = false; // flag to determine when have started 81 | // recognition 82 | 83 | // if command line arguments are provided try to read image/video_name 84 | // otherwise default to capture from attached H/W camera 85 | 86 | if( 87 | ( argc == 2 && (cap.open(argv[1]) == true )) || 88 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 89 | ) 90 | { 91 | // print user controls 92 | 93 | printhelp(); 94 | 95 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 96 | 97 | namedWindow(windowName, 0); 98 | 99 | // start main loop 100 | 101 | while (keepProcessing) { 102 | 103 | int64 timeStart = getTickCount(); // get time at start of loop 104 | 105 | // if capture object in use (i.e. video/camera) 106 | // get image from capture object 107 | 108 | if (cap.isOpened()) { 109 | 110 | cap >> img; 111 | if(img.empty()){ 112 | if (argc == 2){ 113 | std::cerr << "End of video file reached" << std::endl; 114 | } else { 115 | std::cerr << "ERROR: cannot get next fram from camera" 116 | << std::endl; 117 | } 118 | exit(0); 119 | } 120 | 121 | } 122 | 123 | // flip the image (so movement on screen matches movement in window) 124 | 125 | flip(img, img, 1); 126 | 127 | // if input is not already grayscale, convert to grayscale 128 | 129 | cvtColor(img, grayImg, COLOR_BGR2GRAY); 130 | 131 | // display image in window (with text) 132 | 133 | if (!recognitionStage){ 134 | putText(img, "SAMPLE COLLECTION", Point(10,img.rows - 10), 135 | FONT_HERSHEY_PLAIN, 2.0, CV_RGB(0, 255,0), 3, 8, false); 136 | } else { 137 | putText(img, "RECOGNITION", Point(10,img.rows - 10), 138 | FONT_HERSHEY_PLAIN, 2.0, CV_RGB(255, 0,0), 3, 8, false); 139 | } 140 | imshow( windowName, img ); 141 | 142 | // start event processing loop (very important,in fact essential for GUI) 143 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 144 | 145 | // here we take account of processing time for the loop by subtracting the time 146 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 147 | // we get a +ve wait time 148 | 149 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 150 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 151 | 152 | if (key == 'x'){ 153 | 154 | // if user presses "x" then exit 155 | 156 | std::cout << "Keyboard exit requested : exiting now - bye!" 157 | << std::endl; 158 | keepProcessing = false; 159 | 160 | } else if (key == ' '){ 161 | 162 | // if user presses " " then capture a sample image 163 | 164 | if (!recognitionStage) { 165 | if (imagesCollected < MAX_NUMBER_OF_SAMPLE_IMAGES) 166 | { 167 | 168 | // copy image + build/store image histogram 169 | 170 | input[imagesCollected] = grayImg.clone(); 171 | 172 | calcHist(&grayImg, 1, channels, Mat(), currentHistogram, 1, 173 | hist_size, ranges, true, false); 174 | normalize(currentHistogram, currentHistogram, 1, 0, NORM_L1); 175 | histogram[imagesCollected] = currentHistogram.clone(); 176 | 177 | imagesCollected++; 178 | 179 | std::cout << "Sample image collected - " << 180 | imagesCollected << std::endl; 181 | 182 | } else { 183 | std::cout << "ERROR: Maximum sample images (" << 184 | imagesCollected << ") collected" << std::endl; 185 | } 186 | } 187 | 188 | } else if ((key == '\n') || (key == 'm')) { // use "m" in windows 189 | 190 | // if user presses return then move into recognition mode 191 | 192 | std::cout << "Entering recognition mode - histogram models" << 193 | " stored" << std::endl << std::endl; 194 | 195 | recognitionStage = true; 196 | if (!(imagesCollected > 0)) { 197 | std::cerr << "ERROR: not enough samples images caputured" << 198 | std::endl; 199 | } 200 | } else if (key == 'r'){ 201 | 202 | // if user presses "r" then do recognition 203 | 204 | // calc current image histogram 205 | 206 | calcHist(&grayImg, 1, channels, Mat(), currentHistogram, 1, 207 | hist_size, ranges, true, false); 208 | normalize(currentHistogram, currentHistogram, 1, 0, NORM_L1); 209 | 210 | if (recognitionStage) { 211 | 212 | // for each histogram (do comparison) 213 | 214 | double closestDistance = __DBL_MAX__; 215 | int closestImage = 0; 216 | 217 | for (int i = 0; i < imagesCollected; i++) 218 | { 219 | // do histogram comparision here 220 | 221 | double correlation = compareHist(currentHistogram, 222 | histogram[i],HISTCMP_CORREL); 223 | double chisquared = compareHist(currentHistogram, 224 | histogram[i],HISTCMP_CHISQR); 225 | double intersect = compareHist(currentHistogram, 226 | histogram[i],HISTCMP_INTERSECT); 227 | double bhattacharyya = compareHist(currentHistogram, 228 | histogram[i],HISTCMP_BHATTACHARYYA); 229 | 230 | // here we just sum the differences of the measures 231 | // (which as the histograms are all normalised are all 232 | // measures in the range -1->0->1). This *is not* the 233 | // best way to do this - beware. 234 | 235 | // N.B. For the OpenCV implementation: 236 | // low correlation = large difference (so we invert it) 237 | // low intersection = large difference (so we invert it) 238 | // high chisquared = large differences 239 | // high bhatt. = large difference 240 | // - and vice versa 241 | 242 | double diff = (1 - correlation) + chisquared 243 | + (1 - intersect) + bhattacharyya; 244 | 245 | std::cout << "Comparison image " << i << " Corr: " << 246 | correlation << " ChiSq: " << chisquared << 247 | " Intersect: " << intersect << " Bhatt: " << bhattacharyya 248 | << " Total Distance = " << diff << std::endl; 249 | 250 | if (diff < closestDistance){ 251 | closestDistance = diff; 252 | closestImage = i; 253 | 254 | } 255 | } 256 | 257 | std::cout << std::endl; 258 | 259 | // output the result in a window 260 | 261 | std::cout << "Recognition - closest matching image = " << 262 | closestImage << std::endl; 263 | std::cout << "Press any key to clear." << std::endl << std::endl; 264 | 265 | namedWindow("Recognition Result", 1 ); 266 | imshow("Recognition Result", input[closestImage]); 267 | waitKey(0); 268 | destroyWindow("Recognition Result"); // close window 269 | 270 | } else { 271 | std::cout << "ERROR - need to enter recognition stage first." 272 | << std::endl; 273 | } 274 | } 275 | } 276 | 277 | 278 | // all image and histogram objects should be killed off by their respective 279 | // destructors 280 | 281 | // the camera will be deinitialized automatically in VideoCapture destructor 282 | 283 | // all OK : main returns 0 284 | 285 | return 0; 286 | } 287 | 288 | // not OK : main returns -1 289 | 290 | return -1; 291 | } 292 | /******************************************************************************/ 293 | -------------------------------------------------------------------------------- /butterworth_lowpass.cpp: -------------------------------------------------------------------------------- 1 | // Example : apply butterworth low pass filtering to input image/video 2 | // usage: prog { | } 3 | 4 | // Author : Toby Breckon, toby.breckon@cranfield.ac.uk 5 | 6 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 7 | 8 | // Copyright (c) 2011 School of Engineering, Cranfield University 9 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 10 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 11 | 12 | #include "opencv2/videoio.hpp" 13 | #include "opencv2/highgui.hpp" 14 | #include "opencv2/imgproc.hpp" 15 | 16 | #include // standard C++ I/O 17 | #include // standard C++ I/O 18 | #include // includes max() 19 | 20 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 21 | using namespace std; 22 | 23 | /******************************************************************************/ 24 | // setup the cameras properly based on OS platform 25 | 26 | // 0 in linux gives first camera for v4l 27 | //-1 in windows gives first device or user dialog selection 28 | 29 | #ifdef linux 30 | #define CAMERA_INDEX 0 31 | #else 32 | #define CAMERA_INDEX -1 33 | #endif 34 | /******************************************************************************/ 35 | // Rearrange the quadrants of a Fourier image so that the origin is at 36 | // the image center 37 | 38 | void shiftDFT(Mat& fImage ) 39 | { 40 | Mat tmp, q0, q1, q2, q3; 41 | 42 | // first crop the image, if it has an odd number of rows or columns 43 | 44 | fImage = fImage(Rect(0, 0, fImage.cols & -2, fImage.rows & -2)); 45 | 46 | int cx = fImage.cols/2; 47 | int cy = fImage.rows/2; 48 | 49 | // rearrange the quadrants of Fourier image 50 | // so that the origin is at the image center 51 | 52 | q0 = fImage(Rect(0, 0, cx, cy)); 53 | q1 = fImage(Rect(cx, 0, cx, cy)); 54 | q2 = fImage(Rect(0, cy, cx, cy)); 55 | q3 = fImage(Rect(cx, cy, cx, cy)); 56 | 57 | q0.copyTo(tmp); 58 | q3.copyTo(q0); 59 | tmp.copyTo(q3); 60 | 61 | q1.copyTo(tmp); 62 | q2.copyTo(q1); 63 | tmp.copyTo(q2); 64 | } 65 | 66 | /******************************************************************************/ 67 | // return a floating point spectrum magnitude image scaled for user viewing 68 | // complexImg- input dft (2 channel floating point, Real + Imaginary fourier image) 69 | // rearrange - perform rearrangement of DFT quadrants if true 70 | 71 | // return value - pointer to output spectrum magnitude image scaled for user viewing 72 | 73 | Mat create_spectrum_magnitude_display(Mat& complexImg, bool rearrange) 74 | { 75 | Mat planes[2]; 76 | 77 | // compute magnitude spectrum (N.B. for display) 78 | // compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2)) 79 | 80 | split(complexImg, planes); 81 | magnitude(planes[0], planes[1], planes[0]); 82 | 83 | Mat mag = (planes[0]).clone(); 84 | mag += Scalar::all(1); 85 | log(mag, mag); 86 | 87 | if (rearrange) 88 | { 89 | // re-arrange the quaderants 90 | shiftDFT(mag); 91 | } 92 | 93 | normalize(mag, mag, 0, 1, NORM_MINMAX); 94 | 95 | return mag; 96 | 97 | } 98 | /******************************************************************************/ 99 | 100 | // create a 2-channel butterworth low-pass filter with radius D, order n 101 | // (assumes pre-aollocated size of dft_Filter specifies dimensions) 102 | 103 | // void create_butterworth_lowpass_filter(Mat &dft_Filter, int D, int n) 104 | // { 105 | // Mat tmp = Mat(dft_Filter.rows, dft_Filter.cols, CV_32F); 106 | // 107 | // Point centre = Point(dft_Filter.rows / 2, dft_Filter.cols / 2); 108 | // double radius; 109 | // 110 | // // based on the forumla in the IP notes (p. 130 of 2009/10 version) 111 | // // see also HIPR2 on-line 112 | // 113 | // for(int i = 0; i < dft_Filter.rows; i++) 114 | // { 115 | // for(int j = 0; j < dft_Filter.cols; j++) 116 | // { 117 | // radius = (double) sqrt(pow((i - centre.x), 2.0) + pow((double) (j - centre.y), 2.0)); 118 | // tmp.at(i,j) = (float) 119 | // ( 1 / (1 + pow((double) (radius / D), (double) (2 * n)))); 120 | // } 121 | // } 122 | // 123 | // Mat toMerge[] = {tmp, tmp}; 124 | // merge(toMerge, 2, dft_Filter); 125 | // } 126 | 127 | // improved version thanks to: James Freeman, GP2U 128 | 129 | // fix 1: rows (y) and the cols (x) transposed which then leads on to confusing 130 | // comparison of i (y axis) to centre.x and j (x axis) to centre.y 131 | // fix 2: doesn't work if dftFilter is even size (in above version) 132 | // fix 3: Creating one quadrant correctly and then flipping it into the other 133 | // 3 quadrants also saves 75% of the pow/sqrt calls and speeds it up by ~70% 134 | 135 | void create_butterworth_lowpass_filter(Mat& dftFilter, int radius, int order) 136 | { 137 | Mat tmp = Mat(dftFilter.rows, dftFilter.cols, CV_32F); 138 | 139 | int cy = dftFilter.rows / 2; 140 | int cx = dftFilter.cols / 2; 141 | Mat q0 = tmp(Rect(0, 0, cx, cy)); 142 | Mat q1 = tmp(Rect(cx, 0, cx, cy)); 143 | Mat q2 = tmp(Rect(0, cy, cx, cy)); 144 | Mat q3 = tmp(Rect(cx, cy, cx, cy)); 145 | 146 | // _create one quadrant... 147 | for (int yi = 0; yi < cy; yi++) 148 | for (int xi = 0; xi < cx; xi++) 149 | q3.at(yi, xi) = (1.0 / (1 + pow((cv::sqrt(yi * yi + xi * xi) / radius), order))); 150 | 151 | // now flip into place to _create the rest of filter 152 | 153 | flip(q3, q1, 0); 154 | flip(q3, q2, 1); 155 | flip(q3, q0, -1); 156 | 157 | // to multiply a DFT image by a filter, this filter 158 | // should be real only, otherwise the multiplication 159 | // changes the phase along with the magnitude of each 160 | // pixel in the DFT - bug fix, 01/2023 - https://github.com/epitalon 161 | 162 | Mat toMerge[] = { tmp, Mat::zeros(tmp.size(), CV_32F) }; 163 | merge(toMerge, 2, dftFilter); 164 | } 165 | 166 | /******************************************************************************/ 167 | 168 | int main( int argc, char** argv ) 169 | { 170 | 171 | Mat img, imgGray, imgOutput; // image object(s) 172 | VideoCapture cap; // capture object 173 | 174 | Mat padded; // fourier image objects and arrays 175 | Mat complexImg, filter, filterOutput; 176 | Mat planes[2], mag; 177 | 178 | int N, M; // fourier image sizes 179 | 180 | int radius = 30; // low pass filter parameter 181 | int order = 2; // low pass filter parameter 182 | 183 | const string originalName = "Input Image (grayscale)"; // window name 184 | const string spectrumMagName = "Magnitude Image (log transformed)"; // window name 185 | const string lowPassName = "Butterworth Low Pass Filtered (grayscale)"; // window name 186 | const string filterName = "Filter Image"; // window nam 187 | 188 | bool keepProcessing = true; // loop control flag 189 | unsigned char key; // user input 190 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 191 | // 40 ms equates to 1000ms/25fps = 40ms per frame 192 | 193 | // if command line arguments are provided try to read image/video_name 194 | // otherwise default to capture from attached H/W camera 195 | 196 | if( 197 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 198 | ( argc == 2 && (cap.open(argv[1]) == true )) || 199 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 200 | ) 201 | { 202 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 203 | 204 | namedWindow(originalName, 0); 205 | namedWindow(spectrumMagName, 0); 206 | namedWindow(lowPassName, 0); 207 | namedWindow(filterName, 0); 208 | 209 | // if capture object in use (i.e. video/camera) 210 | // get image from capture object 211 | 212 | if (cap.isOpened()) { 213 | 214 | cap >> img; 215 | if(img.empty()){ 216 | if (argc == 2){ 217 | std::cerr << "End of video file reached" << std::endl; 218 | } else { 219 | std::cerr << "ERROR: cannot get next fram from camera" 220 | << std::endl; 221 | } 222 | exit(0); 223 | } 224 | 225 | } 226 | 227 | // setup the DFT image sizes 228 | 229 | M = getOptimalDFTSize( img.rows ); 230 | N = getOptimalDFTSize( img.cols ); 231 | 232 | // add adjustable trackbar for low pass filter threshold parameter 233 | 234 | createTrackbar("Radius", lowPassName, &radius, (min(M, N) / 2)); 235 | createTrackbar("Order", lowPassName, &order, 10); 236 | 237 | // start main loop 238 | 239 | while (keepProcessing) { 240 | 241 | int64 timeStart = getTickCount(); // get time at start of loop 242 | 243 | // if capture object in use (i.e. video/camera) 244 | // get image from capture object 245 | 246 | if (cap.isOpened()) { 247 | 248 | cap >> img; 249 | if(img.empty()){ 250 | if (argc == 2){ 251 | std::cerr << "End of video file reached" << std::endl; 252 | } else { 253 | std::cerr << "ERROR: cannot get next fram from camera" 254 | << std::endl; 255 | } 256 | exit(0); 257 | } 258 | 259 | } 260 | 261 | // *** 262 | 263 | // convert input to grayscale 264 | 265 | cvtColor(img, imgGray, COLOR_BGR2GRAY); 266 | 267 | // setup the DFT images 268 | 269 | copyMakeBorder(imgGray, padded, 0, M - imgGray.rows, 0, 270 | N - imgGray.cols, BORDER_CONSTANT, Scalar::all(0)); 271 | planes[0] = Mat_(padded); 272 | planes[1] = Mat::zeros(padded.size(), CV_32F); 273 | 274 | merge(planes, 2, complexImg); 275 | 276 | // do the DFT 277 | 278 | dft(complexImg, complexImg); 279 | 280 | // construct the filter (same size as complex image) 281 | 282 | filter = complexImg.clone(); 283 | create_butterworth_lowpass_filter(filter, radius, order); 284 | 285 | // apply filter 286 | shiftDFT(complexImg); 287 | mulSpectrums(complexImg, filter, complexImg, 0); 288 | shiftDFT(complexImg); 289 | 290 | // create magnitude spectrum for display 291 | 292 | mag = create_spectrum_magnitude_display(complexImg, true); 293 | 294 | // do inverse DFT on filtered image 295 | 296 | idft(complexImg, complexImg); 297 | 298 | // split into planes and extract plane 0 as output image 299 | 300 | split(complexImg, planes); 301 | normalize(planes[0], imgOutput, 0, 1, NORM_MINMAX); 302 | 303 | // do the same with the filter image 304 | 305 | split(filter, planes); 306 | normalize(planes[0], filterOutput, 0, 1, NORM_MINMAX); 307 | 308 | // *** 309 | 310 | // display image in window 311 | 312 | imshow(originalName, imgGray); 313 | imshow(spectrumMagName, mag); 314 | imshow(lowPassName, imgOutput); 315 | imshow(filterName, filterOutput); 316 | 317 | // start event processing loop (very important,in fact essential for GUI) 318 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 319 | 320 | // here we take account of processing time for the loop by subtracting the time 321 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 322 | // we get a +ve wait time 323 | 324 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 325 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 326 | 327 | if (key == 'x'){ 328 | 329 | // if user presses "x" then exit 330 | 331 | std::cout << "Keyboard exit requested : exiting now - bye!" 332 | << std::endl; 333 | keepProcessing = false; 334 | } 335 | } 336 | 337 | // the camera will be deinitialized automatically in VideoCapture destructor 338 | 339 | // all OK : main returns 0 340 | 341 | return 0; 342 | } 343 | 344 | // not OK : main returns -1 345 | 346 | return -1; 347 | } 348 | /******************************************************************************/ 349 | -------------------------------------------------------------------------------- /nlm.cpp: -------------------------------------------------------------------------------- 1 | // Non Local Mean - lifted directly from: http://opencv.jp/opencv2-x-samples/non-local-means-filter 2 | 3 | // usage: nlm 4 | 5 | // Code Credit: @fukushima1981(Twitter) 6 | 7 | // Code provided "as is" from original source 8 | 9 | // Reference: 10 | // A. Buades, B. Coll, J.M. Morel “A non local algorithm for image denoising” 11 | // IEEE Computer Vision and Pattern Recognition 2005, Vol 2, pp: 60-65, 2005. 12 | 13 | // This version (minor fixes): 14 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 15 | 16 | // Copyright (c) 2010 School of Engineering, Cranfield University 17 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 18 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 19 | 20 | // ***************************************************************************************** 21 | 22 | #include "opencv2/videoio.hpp" 23 | #include "opencv2/highgui.hpp" 24 | #include "opencv2/imgproc.hpp" 25 | 26 | #include // standard C++ I/O 27 | #include // standard C++ I/O 28 | #include // includes max() 29 | 30 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 31 | using namespace std; 32 | 33 | // ***************************************************************************************** 34 | 35 | // additional functions///////////////////////////////////// 36 | 37 | void addNoiseSoltPepperMono(Mat& src, Mat& dest,double per) 38 | { 39 | cv::RNG rng; 40 | #pragma omp parallel for 41 | for(int j=0;jper) 50 | d[i]=s[i]; 51 | else 52 | { 53 | double a2 = rng.uniform((double)0, (double)1); 54 | if(a2>0.5)d[i]=0; 55 | else d[i]=255; 56 | } 57 | } 58 | } 59 | } 60 | void addNoiseMono(Mat& src, Mat& dest,double sigma) 61 | { 62 | Mat s; 63 | src.convertTo(s,CV_16S); 64 | Mat n(s.size(),CV_16S); 65 | randn(n,0,sigma); 66 | Mat temp = s+n; 67 | temp.convertTo(dest,CV_8U); 68 | } 69 | void addNoise(Mat&src, Mat& dest, double sigma,double sprate=0.0) 70 | { 71 | if(src.channels()==1) 72 | { 73 | addNoiseMono(src,dest,sigma); 74 | if(sprate!=0)addNoiseSoltPepperMono(dest,dest,sprate); 75 | return; 76 | } 77 | else 78 | { 79 | vector s; 80 | vector d(src.channels()); 81 | split(src,s); 82 | for(int i=0;isearchWindowSize) 144 | { 145 | cout<<"searchWindowSize should be larger than templeteWindowSize"<>1; 151 | const int sr = searchWindowSize>>1; 152 | const int bb = sr+tr; 153 | const int D = searchWindowSize*searchWindowSize; 154 | const int H=D/2+1; 155 | //const double div = 1.0/(double)D;//search area div 156 | const int tD = templeteWindowSize*templeteWindowSize; 157 | const double tdiv = 1.0/(double)(tD);//templete square div 158 | 159 | //create large size image for bounding box; 160 | Mat im; 161 | copyMakeBorder(src,im,bb,bb,bb,bb,cv::BORDER_DEFAULT); 162 | 163 | //weight computation; 164 | vector weight(256*256*src.channels()); 165 | double* w = &weight[0]; 166 | const double gauss_sd = (sigma == 0.0) ? h :sigma; 167 | double gauss_color_coeff = -(1.0/(double)(src.channels()))*(1.0/(h*h)); 168 | int emax = INT_MAX; 169 | for(int i = 0; i < 256*256*src.channels(); i++ ) 170 | { 171 | double v = std::exp( max(i-2.0*gauss_sd*gauss_sd,0.0)*gauss_color_coeff); 172 | w[i] = v; 173 | if(v<0.001) 174 | { 175 | emax=i; 176 | break; 177 | } 178 | } 179 | for(int i = emax; i < 256*256*src.channels(); i++ )w[i] = 0.0; 180 | 181 | if(src.channels()==3) 182 | { 183 | const int cstep = im.step-templeteWindowSize*3; 184 | const int csstep = im.step-searchWindowSize*3; 185 | #pragma omp parallel for 186 | for(int j=0;j(r); 249 | d[1] = saturate_cast(g); 250 | d[2] = saturate_cast(b); 251 | d+=3; 252 | }//i 253 | delete[] ww; 254 | delete[] nw; 255 | }//j 256 | } 257 | else if(src.channels()==1) 258 | { 259 | const int cstep = im.step-templeteWindowSize; 260 | const int csstep = im.step-searchWindowSize; 261 | #pragma omp parallel for 262 | for(int j=0;j(v); 322 | }//i 323 | delete[] ww; 324 | delete[] nw; 325 | }//j 326 | } 327 | } 328 | 329 | // ***************************************************************************************** 330 | 331 | int main(int argc, char** argv) 332 | { 333 | //(1) Reading image and add noise(standart deviation = 15) 334 | const double noise_sigma = 15.0; 335 | Mat src = imread(argv[1],1); 336 | Mat snoise; 337 | Mat dest; 338 | addNoise(src,snoise,noise_sigma); 339 | 340 | imshow("Original", src); 341 | 342 | //(2) preview conventional method with PSNR 343 | //(2-1) RAW 344 | cout<<"RAW PSNR: "< | } 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2010 School of Engineering, Cranfield University 7 | // Copyright (c) 2016 School of Engineering & Computing Sciences, Durham University 8 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 9 | 10 | #include "opencv2/videoio.hpp" 11 | #include "opencv2/highgui.hpp" 12 | #include "opencv2/imgproc.hpp" 13 | #include "opencv2/photo.hpp" 14 | 15 | #include // standard C++ I/O 16 | #include // standard C++ I/O 17 | #include // includes max() 18 | 19 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 20 | using namespace std; 21 | 22 | /******************************************************************************/ 23 | // setup the cameras properly based on OS platform 24 | 25 | // 0 in linux gives first camera for v4l 26 | //-1 in windows gives first device or user dialog selection 27 | 28 | #ifdef linux 29 | #define CAMERA_INDEX 1 30 | #else 31 | #define CAMERA_INDEX -1 32 | #endif 33 | 34 | /******************************************************************************/ 35 | 36 | // Non Local Mean - lifted directly from: http://opencv.jp/opencv2-x-samples/non-local-means-filter 37 | // Code Credit: @fukushima1981(Twitter) 38 | // Code provided "as is" from original source 39 | 40 | // Reference: 41 | // A. Buades, B. Coll, J.M. Morel “A non local algorithm for image denoising” 42 | // IEEE Computer Vision and Pattern Recognition 2005, Vol 2, pp: 60-65, 2005. 43 | 44 | static void nonlocalMeansFilter(Mat& src, Mat& dest, int templeteWindowSize, 45 | int searchWindowSize, double h, double sigma=0.0) 46 | { 47 | if(templeteWindowSize>searchWindowSize) 48 | { 49 | std::cout<<"searchWindowSize should be larger than templeteWindowSize"<>1; 55 | const int sr = searchWindowSize>>1; 56 | const int bb = sr+tr; 57 | const int D = searchWindowSize*searchWindowSize; 58 | const int H=D/2+1; 59 | // const double div = 1.0/(double)D;//search area div 60 | const int tD = templeteWindowSize*templeteWindowSize; 61 | const double tdiv = 1.0/(double)(tD);//templete square div 62 | 63 | //create large size image for bounding box; 64 | Mat im; 65 | copyMakeBorder(src,im,bb,bb,bb,bb,cv::BORDER_DEFAULT); 66 | 67 | //weight computation; 68 | vector weight(256*256*src.channels()); 69 | double* w = &weight[0]; 70 | const double gauss_sd = (sigma == 0.0) ? h :sigma; 71 | double gauss_color_coeff = -(1.0/(double)(src.channels()))*(1.0/(h*h)); 72 | int emax = INT_MAX; 73 | for(int i = 0; i < 256*256*src.channels(); i++ ) 74 | { 75 | double v = std::exp( max(i-2.0*gauss_sd*gauss_sd,0.0)*gauss_color_coeff); 76 | w[i] = v; 77 | if(v<0.001) 78 | { 79 | emax=i; 80 | break; 81 | } 82 | } 83 | for(int i = emax; i < 256*256*src.channels(); i++ )w[i] = 0.0; 84 | 85 | if(src.channels()==3) 86 | { 87 | const int cstep = im.step-templeteWindowSize*3; 88 | const int csstep = im.step-searchWindowSize*3; 89 | #pragma omp parallel for 90 | for(int j=0;j(r); 153 | d[1] = saturate_cast(g); 154 | d[2] = saturate_cast(b); 155 | d+=3; 156 | }//i 157 | delete[] ww; 158 | delete[] nw; 159 | }//j 160 | } 161 | else if(src.channels()==1) 162 | { 163 | const int cstep = im.step-templeteWindowSize; 164 | const int csstep = im.step-searchWindowSize; 165 | #pragma omp parallel for 166 | for(int j=0;j(v); 226 | }//i 227 | delete[] ww; 228 | delete[] nw; 229 | }//j 230 | } 231 | } 232 | 233 | /******************************************************************************/ 234 | 235 | int main( int argc, char** argv ) 236 | { 237 | 238 | Mat img, output; // image objects 239 | VideoCapture cap; // capture object 240 | 241 | const string windowName = "Original"; // window name 242 | const string windowName2 = "Non Local Means Filter"; // window name 243 | 244 | bool keepProcessing = true; // loop control flag 245 | unsigned char key; // user input 246 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 247 | // 40 ms equates to 1000ms/25fps = 40ms per frame 248 | 249 | int64 pre = 0; // timing variable 250 | 251 | // NLM parameters 252 | 253 | int templateWindowSize = 3; 254 | int searchWindowSize = 7; 255 | int h = 3; 256 | int hc = 10; 257 | 258 | // check which version of OpenCV we are using 259 | 260 | std::cout << "OpenCV version" << CV_VERSION 261 | << " (" << CV_MAJOR_VERSION << "." << CV_MINOR_VERSION << "." << CV_SUBMINOR_VERSION << ")" << std::endl; 262 | 263 | 264 | // if command line arguments are provided try to read image/video_name 265 | // otherwise default to capture from attached H/W camera 266 | 267 | if( 268 | ( argc == 2 && (!(img = imread( argv[1], IMREAD_COLOR)).empty()))|| 269 | ( argc == 2 && (cap.open(argv[1]) == true )) || 270 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 271 | ) 272 | { 273 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 274 | 275 | namedWindow(windowName, 1); 276 | namedWindow(windowName2, 1); 277 | 278 | // add trackbars 279 | 280 | createTrackbar("template W", windowName2, &templateWindowSize, 25); 281 | createTrackbar("search W", windowName2, &searchWindowSize, 50); 282 | createTrackbar("h", windowName2, &h, 25); 283 | createTrackbar("hc", windowName2, &hc, 25); 284 | 285 | // start main loop 286 | 287 | while (keepProcessing) { 288 | 289 | int64 timeStart = getTickCount(); // get time at start of loop 290 | 291 | // if capture object in use (i.e. video/camera) 292 | // get image from capture object 293 | 294 | if (cap.isOpened()) { 295 | 296 | cap >> img; 297 | if(img.empty()){ 298 | if (argc == 2){ 299 | std::cerr << "End of video file reached" << std::endl; 300 | } else { 301 | std::cerr << "ERROR: cannot get next fram from camera" 302 | << std::endl; 303 | } 304 | exit(0); 305 | } 306 | 307 | } else { 308 | 309 | // if not a capture object set event delay to zero so it waits 310 | // indefinitely (as single image file, no need to loop) 311 | 312 | EVENT_LOOP_DELAY = 0; 313 | } 314 | 315 | if (searchWindowSize <= templateWindowSize) 316 | { 317 | std::cout << "ERROR: search W must be > template W (setting search W = (template W) + 1)" << std::endl; 318 | searchWindowSize = templateWindowSize + 1; 319 | } 320 | 321 | pre = getTickCount(); 322 | 323 | #if ((CV_MAJOR_VERSION >= 2) && (CV_MINOR_VERSION >= 4) && (CV_SUBMINOR_VERSION <= 2)) 324 | 325 | // in OpenCV version 2.4.2 and earlier we use this version 326 | 327 | nonlocalMeansFilter(img,output, templateWindowSize, searchWindowSize, (double) h, (double) h); 328 | 329 | #else 330 | 331 | // use version built-in to later versions of OpenCV 332 | 333 | if (img.channels() == 3) // if RGB then use colour function on L*a*b colour space (see manual) 334 | { 335 | fastNlMeansDenoisingColored(img, output, h, hc, templateWindowSize, searchWindowSize); 336 | } else { 337 | fastNlMeansDenoising(img, output, h, templateWindowSize, searchWindowSize); 338 | } 339 | // Reference: 340 | // A. Buades, B. Coll, J.M. Morel “A non local algorithm for image denoising” 341 | // IEEE Computer Vision and Pattern Recognition 2005, Vol 2, pp: 60-65, 2005. 342 | 343 | #endif 344 | 345 | std::cout << "time: " << 1000.0*(getTickCount()-pre)/(getTickFrequency()) << " ms" << std::endl; 346 | 347 | // display image in window 348 | 349 | imshow(windowName, img); 350 | imshow(windowName2, output); 351 | 352 | // start event processing loop (very important,in fact essential for GUI) 353 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 354 | 355 | // here we take account of processing time for the loop by subtracting the time 356 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 357 | // we get a +ve wait time 358 | 359 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 360 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 361 | 362 | if (key == 'x'){ 363 | 364 | // if user presses "x" then exit 365 | 366 | std::cout << "Keyboard exit requested : exiting now - bye!" 367 | << std::endl; 368 | keepProcessing = false; 369 | } 370 | } 371 | 372 | // the camera will be deinitialized automatically in VideoCapture destructor 373 | 374 | // all OK : main returns 0 375 | 376 | return 0; 377 | } 378 | 379 | // not OK : main returns -1 380 | 381 | return -1; 382 | } 383 | /******************************************************************************/ 384 | -------------------------------------------------------------------------------- /feature_point_matching.cpp: -------------------------------------------------------------------------------- 1 | // Example : feature point matching and homography calculation from camera or video 2 | // usage: prog {} 3 | 4 | // Author : Toby Breckon, toby.breckon@durham.ac.uk 5 | 6 | // Copyright (c) 2011 School of Engineering, Cranfield University 7 | // Copyright (c) 2017 Dept. of Computer Science, Durham University 8 | 9 | // requires OpenCV 3.x (or greater) + ** built with extra contrib modules ** 10 | 11 | // License : LGPL - http://www.gnu.org/licenses/lgpl.html 12 | 13 | #include "opencv2/videoio.hpp" 14 | #include "opencv2/highgui.hpp" 15 | #include "opencv2/imgproc.hpp" 16 | #include "opencv2/calib3d.hpp" 17 | #include 18 | #include 19 | 20 | #include // standard C++ I/O 21 | #include // standard C++ I/O 22 | #include // includes max() 23 | 24 | using namespace cv; // OpenCV API is in the C++ "cv" namespace 25 | using namespace cv::xfeatures2d; 26 | using namespace std; 27 | 28 | /******************************************************************************/ 29 | // setup the cameras properly based on OS platform 30 | 31 | // 0 in linux gives first camera for v4l 32 | //-1 in windows gives first device or user dialog selection 33 | 34 | #ifdef linux 35 | #define CAMERA_INDEX 0 36 | #else 37 | #define CAMERA_INDEX -1 38 | #endif 39 | 40 | /******************************************************************************/ 41 | 42 | // callback funtion for mouse to select a region of the image and store that selection 43 | // in global variables origin and selection (acknowledgement: opencv camsiftdemo.cpp) 44 | 45 | static bool selectObject = false; 46 | static Point origin; 47 | static Rect selection; 48 | static bool selectionComplete = false; 49 | 50 | void onMouseSelect( int event, int x, int y, int, void* image) 51 | { 52 | if( selectObject ) 53 | { 54 | selection.x = MIN(x, origin.x); 55 | selection.y = MIN(y, origin.y); 56 | selection.width = std::abs(x - origin.x); 57 | selection.height = std::abs(y - origin.y); 58 | 59 | selection &= Rect(0, 0, ((Mat *) image)->cols, ((Mat *) image)->rows); 60 | } 61 | 62 | switch( event ) 63 | { 64 | case EVENT_LBUTTONDOWN: 65 | origin = Point(x,y); 66 | selection = Rect(x,y,0,0); 67 | selectObject = true; 68 | break; 69 | case EVENT_LBUTTONUP: 70 | selectObject = false; 71 | if( selection.width > 0 && selection.height > 0 ) 72 | selectionComplete = true; 73 | break; 74 | } 75 | } 76 | 77 | /******************************************************************************/ 78 | 79 | // Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors 80 | 81 | // source: https://github.com/kipr/opencv/blob/master/samples/cpp/brief_match_test.cpp 82 | 83 | static void matches2points(const vector& matches, const vector& kpts_train, 84 | const vector& kpts_query, vector& pts_train, vector& pts_query) 85 | { 86 | pts_train.clear(); 87 | pts_query.clear(); 88 | pts_train.reserve(matches.size()); 89 | pts_query.reserve(matches.size()); 90 | for (size_t i = 0; i < matches.size(); i++) 91 | { 92 | const DMatch& match = matches[i]; 93 | pts_query.push_back(kpts_query[match.queryIdx].pt); 94 | pts_train.push_back(kpts_train[match.trainIdx].pt); 95 | } 96 | } 97 | 98 | /******************************************************************************/ 99 | 100 | int main( int argc, char** argv ) 101 | { 102 | 103 | Mat img, roi, selected, gray, graySelected, output, selectedCopy, transformOverlay; // image objects 104 | VideoCapture cap; // capture object 105 | 106 | const string windowName = "Live Video Input"; // window name 107 | const string windowName2 = "Selected Region / Object"; // window name 108 | const string windowName3 = "Matches"; // window name 109 | 110 | 111 | vector keypointsVideo; // keypoints and descriptors 112 | vector keypointsSelection; 113 | Mat descSelection, descVideo; 114 | // vector matches_internal; 115 | vector > matches; 116 | vector training; 117 | vector detectedPointsVideo; 118 | vector detectedPointsSelection; 119 | 120 | bool keepProcessing = true; // loop control flag 121 | int key; // user input 122 | int EVENT_LOOP_DELAY = 40; // delay for GUI window 123 | // 40 ms equates to 1000ms/25fps = 40ms per frame 124 | 125 | // SURF feature detector with Hessian threshold: 400 using 4 octaves over 3 layers 126 | // (the default parameters - see manual for details) 127 | 128 | Ptr detector = SURF::create(); 129 | 130 | // descriptor matcher (k-NN based) - (FLANN = Fast Library for Approximate Nearest Neighbors) 131 | 132 | Ptr matcher = new FlannBasedMatcher(); 133 | 134 | 135 | int threshold = 10; // matching threshold 136 | 137 | Mat H, H_prev; // image to image homography (transformation) 138 | 139 | bool showEllipse = false; // stuff for drawing the ellipse 140 | RotatedRect ellipseFit; 141 | bool drawLivePoints = false; 142 | bool newFeatureType = false; 143 | bool computeHomography = false; 144 | 145 | // set up the features - here using only SURF, SIFT and KAZE and ORB for now 146 | 147 | // TODO - add FREAK, BRISK, ... AKAZE ... etc. 148 | 149 | enum feature_types { SURF, SIFT, KAZE, ORB }; 150 | int feature_types_max = 4; 151 | int currentFeatureType = SURF; 152 | int match_ratio = 7; 153 | 154 | // matches.push_back(matches_internal); 155 | 156 | // if command line arguments are provided try to read image/video_name 157 | // otherwise default to capture from attached H/W camera 158 | 159 | if( 160 | ( argc == 2 && (cap.open(argv[1]) == true )) || 161 | ( argc != 2 && (cap.open(CAMERA_INDEX) == true)) 162 | ) 163 | { 164 | // create window object (use flag=0 to allow resize, 1 to auto fix size) 165 | 166 | namedWindow(windowName, 0); 167 | namedWindow(windowName2, 0); 168 | namedWindow(windowName3, 0); 169 | setMouseCallback( windowName, onMouseSelect, &img); 170 | createTrackbar("ratio (* 0.1)", windowName3, &match_ratio, 10, NULL); 171 | 172 | std::cout << "'e' - toggle ellipse fit for detected points (default: off)" << std::endl; 173 | std::cout << "'p' - toggle drawing for live feature points (default: off)" << std::endl; 174 | std::cout << "'t' - toggle use of varying points & descriptors (default: SURF)" << std::endl; 175 | std::cout << "'h' - compute image to image homography (default: off)" << std::endl; 176 | 177 | // start main loop 178 | 179 | while (keepProcessing) { 180 | 181 | int64 timeStart = getTickCount(); // get time at start of loop 182 | 183 | // if capture object in use (i.e. video/camera) 184 | // get image from capture object 185 | 186 | if (cap.isOpened()) { 187 | 188 | cap >> img; 189 | if(img.empty()) { 190 | if (argc == 2) { 191 | std::cerr << "End of video file reached" << std::endl; 192 | } else { 193 | std::cerr << "ERROR: cannot get next frame from camera" 194 | << std::endl; 195 | } 196 | exit(0); 197 | } 198 | } 199 | 200 | // convert incoming image to grayscale 201 | 202 | cvtColor(img, gray, COLOR_BGR2GRAY); 203 | 204 | // detect the feature points from the current incoming frame and extract 205 | // corresponding descriptors 206 | 207 | keypointsVideo.clear(); 208 | detector->detect(gray, keypointsVideo); 209 | detector->compute(gray, keypointsVideo, descVideo); 210 | 211 | // match descriptors to selection (if we have a selected object) 212 | 213 | if (!(descSelection.empty())) 214 | { 215 | if (threshold == 0) {threshold = 1; } 216 | matches.clear(); 217 | 218 | // get first and second nearest matches 219 | 220 | matcher->knnMatch(descVideo, matches, 2); 221 | 222 | // filter matches based on match ratio quality 223 | 224 | vector good_matches; 225 | 226 | for (unsigned int i = 0; i < matches.size(); ++i) 227 | { 228 | // match ratio of 1st to 2nd best match 229 | if (matches[i][0].distance < (match_ratio * 0.1) * matches[i][1].distance) 230 | { 231 | good_matches.push_back(matches[i][0]); 232 | } 233 | } 234 | 235 | // draw results on image 236 | 237 | output = Mat::zeros(img.rows, img.cols + selected.cols, img.type()); 238 | drawMatches(gray, keypointsVideo, graySelected, keypointsSelection, good_matches, output, 239 | Scalar(0,255,0), Scalar(-1,-1,-1)); 240 | 241 | // get the matches as points in both images 242 | 243 | matches2points(good_matches, keypointsSelection, keypointsVideo, detectedPointsSelection, detectedPointsVideo); 244 | 245 | // fit ellipse to object location 246 | 247 | if (showEllipse) 248 | { 249 | if (detectedPointsSelection.size() > 6) 250 | { 251 | ellipseFit = fitEllipse(Mat(detectedPointsVideo)); 252 | ellipse(output, ellipseFit, Scalar(0, 0, 255), 2, 8); 253 | } 254 | } 255 | 256 | // compute and display homography (mapping on image to another) if selected 257 | 258 | if (computeHomography && (detectedPointsSelection.size() > 5)) 259 | { 260 | // need at least 5 matched pairs of points (more are better) 261 | 262 | Mat H = findHomography(Mat(detectedPointsSelection), Mat(detectedPointsVideo), RANSAC, 2); 263 | transformOverlay = Mat::zeros(output.rows, output.cols, output.type()); 264 | warpPerspective(selectedCopy, transformOverlay, H, transformOverlay.size(), INTER_LINEAR, BORDER_CONSTANT, 0); 265 | addWeighted(output, 0.5, transformOverlay, 0.5, 0, output); 266 | } 267 | 268 | // display result 269 | 270 | imshow(windowName3, output); 271 | 272 | } 273 | 274 | // whist we are selecting or have selected an object 275 | 276 | if( selectObject && selection.width > 0 && selection.height > 0 ) 277 | { 278 | 279 | // invert selection in image whilst selection is taking place 280 | 281 | roi = img(selection); 282 | bitwise_not(roi, roi); 283 | 284 | } else if ((selectionComplete || newFeatureType) && selection.width > 0 && selection.height > 0 ) { 285 | 286 | // once it is complete then make a copy of the selection and extract descriptors 287 | // from detected points 288 | 289 | if (!newFeatureType) { 290 | selected = roi.clone(); 291 | selectedCopy = roi.clone(); 292 | cvtColor(selected, graySelected, COLOR_BGR2GRAY); 293 | } else { 294 | selected = selectedCopy.clone(); 295 | } 296 | 297 | newFeatureType = false; 298 | selectionComplete = false; 299 | 300 | keypointsSelection.clear(); 301 | detector->detect(graySelected, keypointsSelection); 302 | detector->compute(graySelected, keypointsSelection, descSelection); 303 | 304 | // draw the features 305 | 306 | drawKeypoints(graySelected, keypointsSelection, selected, 307 | Scalar(255, 0, 0), DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::DRAW_RICH_KEYPOINTS); 308 | 309 | // train the matcher on this example 310 | 311 | training.clear(); 312 | training.push_back(descSelection); 313 | matcher->clear(); 314 | matcher->add(training); 315 | matcher->train(); 316 | 317 | } 318 | 319 | // display image in window (with keypoints if activated) 320 | 321 | if (drawLivePoints) 322 | { 323 | drawKeypoints(gray, keypointsVideo, img, 324 | Scalar(255, 0, 0), DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::DRAW_RICH_KEYPOINTS); 325 | } 326 | 327 | imshow(windowName, img); 328 | if (!(selected.empty())) 329 | { 330 | imshow(windowName2, selected); 331 | } 332 | 333 | // start event processing loop (very important,in fact essential for GUI) 334 | // 40 ms roughly equates to 1000ms/25fps = 4ms per frame 335 | 336 | // here we take account of processing time for the loop by subtracting the time 337 | // taken in ms. from this (1000ms/25fps = 40ms per frame) value whilst ensuring 338 | // we get a +ve wait time 339 | 340 | key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - 341 | (((getTickCount() - timeStart) / getTickFrequency()) * 1000))); 342 | 343 | switch (key) 344 | { 345 | case 'x': 346 | // if user presses "x" then exit 347 | 348 | std::cout << "Keyboard exit requested : exiting now - bye!" << std::endl; 349 | keepProcessing = false; 350 | break; 351 | case 'p': 352 | // if user presses "p" live feature point drawing 353 | 354 | drawLivePoints = (!drawLivePoints); 355 | std::cout << "feature point drawing (live) = " << drawLivePoints << std::endl; 356 | break; 357 | case 'e': 358 | // if user presses "e" toggle ellipse drawing 359 | 360 | showEllipse = (!showEllipse); 361 | std::cout << "Ellipse drawing = " << showEllipse << std::endl; 362 | break; 363 | case 't': 364 | // toggle feature point type 365 | 366 | currentFeatureType++; 367 | currentFeatureType = currentFeatureType % feature_types_max; 368 | 369 | switch (currentFeatureType) 370 | { 371 | case SURF: 372 | detector = SURF::create(); 373 | std::cout << "now using SURF" << std::endl; 374 | matcher = new FlannBasedMatcher(); // reset to Euclid. Kd-tree 375 | break; 376 | case SIFT: 377 | detector = SIFT::create(); 378 | std::cout << "now using SIFT" << std::endl; 379 | break; 380 | case KAZE: 381 | detector = KAZE::create(); 382 | std::cout << "now using KAZE" << std::endl; 383 | break; 384 | case ORB: 385 | detector = ORB::create(); 386 | std::cout << "now using ORB" << std::endl; 387 | 388 | // set matcher to non Euclid. hashing approach 389 | 390 | matcher = new FlannBasedMatcher(new flann::LshIndexParams(20, 10, 2)); 391 | break; 392 | 393 | } 394 | 395 | // reset selection and set a new type flag 396 | 397 | newFeatureType = true; 398 | descSelection = Mat(); 399 | 400 | break; 401 | case 'h': 402 | 403 | // compute and display homography transformation from one image to the other 404 | 405 | computeHomography = (!computeHomography); 406 | std::cout << "Compute homography = " << computeHomography << std::endl; 407 | 408 | break; 409 | default: 410 | break; 411 | } 412 | 413 | } 414 | 415 | // pointer objects auto deleted as smart pointers Ptr<> 416 | 417 | // the camera will be deinitialized automatically in VideoCapture destructor 418 | 419 | // all OK : main returns 0 420 | 421 | return 0; 422 | } 423 | 424 | // not OK : main returns -1 425 | 426 | return -1; 427 | } 428 | 429 | /******************************************************************************/ 430 | --------------------------------------------------------------------------------