├── BlueCup.jpg ├── CMakeLists.txt ├── Dockerfile ├── Exercises_11-1-2-5-6-7.cpp ├── Exercises_13_1-2-11.cpp ├── Exercises_13_9.cpp ├── Exercises_5.cpp ├── Exercises_7.cpp ├── Exercises_8_1.cpp ├── Exercises_9_1-2-10-11-12-15-16.cpp ├── Exercises_9_4.cpp ├── Exercises_9_5.cpp ├── HandIndoorColor.jpg ├── HandOutdoorColor.jpg ├── HandOutdoorSunColor.jpg ├── README.md ├── adrian.jpg ├── birdseye ├── .DS_Store ├── IMG_0214.jpg ├── IMG_0214L.jpg ├── IMG_0215.jpg ├── IMG_0215L.jpg ├── IMG_0217.jpg ├── IMG_0217L.jpg ├── IMG_0218.jpg ├── IMG_0218L.jpg ├── IMG_0219.jpg ├── IMG_0219L.jpg ├── IMG_0220.jpg ├── IMG_0220L.jpg └── intrinsics.xml ├── box.png ├── box_in_scene.png ├── calibration ├── IMG_0191.jpg ├── IMG_0192.jpg ├── IMG_0193.jpg ├── IMG_0194.jpg ├── IMG_0195.jpg ├── IMG_0196.jpg ├── IMG_0197.jpg ├── IMG_0198.jpg ├── IMG_0199.jpg ├── IMG_0200.jpg ├── IMG_0201.jpg ├── IMG_0202.jpg ├── IMG_0203.jpg ├── IMG_0204.jpg ├── IMG_0205.jpg ├── IMG_0206.jpg ├── IMG_0207.jpg ├── IMG_0208.jpg ├── IMG_0209.jpg ├── IMG_0210.jpg ├── IMG_0211.jpg ├── IMG_0212.jpg └── IMG_0213.jpg ├── checkerboard9x6.png ├── example_02-01.cpp ├── example_02-02.cpp ├── example_02-03.cpp ├── example_02-04.cpp ├── example_02-05.cpp ├── example_02-06.cpp ├── example_02-07.cpp ├── example_02-08.cpp ├── example_02-09.cpp ├── example_02-10.cpp ├── example_02-11.cpp ├── example_04-01.cpp ├── example_04-02.cpp ├── example_04-03.cpp ├── example_04-04.cpp ├── example_05-01.cpp ├── example_07-01.cpp ├── example_08-01.cpp ├── example_08-02.cpp ├── example_08-03.cpp ├── example_09-01.cpp ├── example_09-02.cpp ├── example_09-03.cpp ├── example_09-04.cpp ├── example_09-05.cpp ├── example_09-06.cpp ├── example_09-07.cpp ├── example_09-08.cpp ├── example_09-09.cpp ├── example_09-10.cpp ├── example_09-11.cpp ├── example_10-01.cpp ├── example_10-02.cpp ├── example_10-03.cpp ├── example_11-01.cpp ├── example_11-02.cpp ├── example_11-03.cpp ├── example_12-01.cpp ├── example_12-02.cpp ├── example_12-03.cpp ├── example_12-04.cpp ├── example_13-01.cpp ├── example_13-02.cpp ├── example_13-03.cpp ├── example_14-01.cpp ├── example_14-02.cpp ├── example_14-03.cpp ├── example_14-04.cpp ├── example_15-01.cpp ├── example_15-02.cpp ├── example_15-03.cpp ├── example_15-04.cpp ├── example_15-05.cpp ├── example_15-BackgroundSubtractor.cpp ├── example_16-01-imgA.png ├── example_16-01-imgB.png ├── example_16-01.cpp ├── example_16-02.cpp ├── example_17-01.cpp ├── example_17-02.cpp ├── example_18-01.cpp ├── example_18-01_from_disk.cpp ├── example_19-01.cpp ├── example_19-02.cpp ├── example_19-03.cpp ├── example_19-04.cpp ├── example_20-01.cpp ├── example_20-02.cpp ├── example_21-01.cpp ├── example_22-01.cpp ├── faceScene.jpg ├── faceTemplate.jpg ├── faces.png ├── fruits.jpg ├── haarcascade_frontalcatface.xml ├── haarcascade_frontalcatface_extended.xml ├── haarcascade_frontalface_alt.xml ├── mushroom ├── Index ├── agaricus-lepiota.data ├── agaricus-lepiota.names ├── citation └── expanded.Z ├── shape_sample ├── 1.png ├── 10.png ├── 11.png ├── 12.png ├── 13.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 2.png ├── 20.png ├── 3.png ├── 4.png ├── 5.png ├── 6.png ├── 7.png ├── 8.png └── 9.png ├── stereoData ├── .DS_Store ├── example_19-03_list.txt ├── left01.jpg ├── left02.jpg ├── left03.jpg ├── left04.jpg ├── left05.jpg ├── left06.jpg ├── left07.jpg ├── left08.jpg ├── left09.jpg ├── left10.jpg ├── left11.jpg ├── left12.jpg ├── left13.jpg ├── left14.jpg ├── right01.jpg ├── right02.jpg ├── right03.jpg ├── right04.jpg ├── right05.jpg ├── right06.jpg ├── right07.jpg ├── right08.jpg ├── right09.jpg ├── right10.jpg ├── right11.jpg ├── right12.jpg ├── right13.jpg └── right14.jpg ├── stuff.jpg ├── test.avi └── tree.avi /BlueCup.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/BlueCup.jpg -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM ubuntu:16.04 3 | 4 | #MAINTAINER Gary Bradski 5 | 6 | ######################################################################## 7 | # Running this docker to set up a shared directory and display with the host: 8 | # 9 | # To newly create and build this docker image: 10 | # ============================================ 11 | # 12 | # Create a directory : 13 | # $ mkdir 14 | # Copy this Dockerfile into that directory: 15 | # cp Dockerfile /. 16 | # Move to that directory: 17 | # $ cd 18 | # To build the docker file (might have to run with sudo 19 | # $ sudo docker build -t . 20 | # 21 | # To run the image, or run it again retaining its state 22 | # ===================================================== 23 | # but also exporting display from the container and 24 | # sharing a directory between host and container: 25 | # 26 | # Allow other processes to share the display: 27 | # $ xhost + #Allows or other processes to capture (show) the display 28 | # Now run the docker (Usually $DISPLAY is :0) and allow use of the camera -- you may need sudo privalage 29 | # $ sudo docker run -it -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix \ 30 | # --device /dev/video0 \ 31 | # -v ///:// 32 | # 33 | # ======================================================= 34 | # Handy docker commands: 35 | # List all the docker images 36 | # $ sudo docker ps -a 37 | # If the docker image is stopped (otherwise can skip the first command below if not stopped) 38 | # $ sudo docker start 39 | # $ sudo docker attach 40 | ######################################################################## 41 | # This is a docker file which will, from scratch: 42 | # 43 | # * pull in all the dependencies needed for OpenCV 3.2 including python 2 dependencies 44 | # * pull in OpenCV 3.2 and opencv_contrib and build them 45 | # + executable files end up in opencv-3.2.0/build/bin 46 | # * pull in the Learning OpenCV 3 example code and build it 47 | # + executable files end up in Learning_OpenCV-3_examples/build 48 | # * To get to the top level directory, just type: cd 49 | # 50 | # If you just want to do this "by hand" in your home, replace the "RUN"s below with "sudo" 51 | # 52 | # This Docker uses the ubuntu 16.04 version of ffmpeg, which is older than the ones in my other dockerfiles. 53 | # this shouldn't cause you any problems but definitely *DO NOT* use this for generating audiofiles / movies for redistribution. 54 | # 55 | # But it is somewhat less capable than the ones in the ffmpeg containers. 56 | ######################################################################## 57 | 58 | 59 | # First: get all the dependencies: 60 | # 61 | RUN apt-get update 62 | RUN apt-get install -y cmake git libgtk2.0-dev pkg-config libavcodec-dev \ 63 | libavformat-dev libswscale-dev python-dev python-numpy libtbb2 libtbb-dev \ 64 | libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev unzip 65 | 66 | RUN apt-get install -y wget 67 | 68 | # Just get a simple editor for convienience (you could just cancel this line) 69 | RUN apt-get install -y vim 70 | 71 | 72 | # Second: get and build OpenCV 3.2 73 | # 74 | RUN cd \ 75 | && wget https://github.com/opencv/opencv/archive/3.2.0.zip \ 76 | && unzip 3.2.0.zip \ 77 | && cd opencv-3.2.0 \ 78 | && mkdir build \ 79 | && cd build \ 80 | && cmake .. \ 81 | && make -j8 \ 82 | && make install \ 83 | && cd \ 84 | && rm 3.2.0.zip 85 | 86 | 87 | # Third: install and build opencv_contrib 88 | # 89 | RUN cd \ 90 | && wget https://github.com/opencv/opencv_contrib/archive/3.2.0.zip \ 91 | && unzip 3.2.0.zip \ 92 | && cd opencv-3.2.0/build \ 93 | && cmake -DOPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-3.2.0/modules/ .. \ 94 | && make -j8 \ 95 | && make install \ 96 | && cd ../.. \ 97 | && rm 3.2.0.zip 98 | 99 | 100 | # Forth: get and build the Learning OpenCV 3 examples: 101 | # I copy the needed data to where the executables will be: opencv-3.2.0/build/bin 102 | # 103 | RUN cd \ 104 | && git clone https://github.com/oreillymedia/Learning-OpenCV-3_examples.git \ 105 | && cd Learning-OpenCV-3_examples \ 106 | && mkdir build \ 107 | && cd build \ 108 | && cmake .. \ 109 | && make -j8 110 | 111 | #You could then run python and do your stuff... 112 | #CMD ["python"] 113 | -------------------------------------------------------------------------------- /Exercises_11-1-2-5-6-7.cpp: -------------------------------------------------------------------------------- 1 | //Exercises at end of Chapter 11 2 | // 1、2、5、6 and 7 3 | #include 4 | #include 5 | 6 | using namespace cv; 7 | using namespace std; 8 | 9 | void help(const char **argv) { 10 | cout << "\n\n" 11 | << "This program solves the Exercises at the end of Chapter 11(without 3、4 and 8)\n" 12 | << "Call:\n" 13 | << argv[0] << " \n\n" 14 | << "For example: " << argv[0] << " /AverageMaleFace.jpg\n" 15 | << endl; 16 | } 17 | 18 | // Get the coordinates of the points after the rotation 19 | Point2f getPointAffinedPos(const Point2f src, const Point2f center, double angle) 20 | { 21 | Point dst; 22 | int x = src.x - center.x; 23 | int y = src.y - center.y; 24 | 25 | dst.x = x * cos(angle) + y * sin(angle) + center.x; 26 | dst.y = -x * sin(angle) + y * cos(angle) + center.y; 27 | return dst; 28 | } 29 | 30 | int main( int argc, const char** argv ) 31 | { 32 | help(argv); 33 | if(argc < 2) { 34 | cout << "\nERROR: You had too few parameters.\n" << endl; 35 | return -1; 36 | } 37 | /************************************************************************/ 38 | /* 1. Find and load a picture of a face where the face is frontal, has eyes open, and 39 | takes up most or all of the image area. Write code to find the pupils of the eyes. */ 40 | /************************************************************************/ 41 | Mat matFrontFace = imread(argv[1]); 42 | Mat gray;Mat temp; 43 | double minPixelValue, maxPixelValue; 44 | Point minPixelPoint,maxPixelPoint; 45 | cvtColor(matFrontFace,gray,COLOR_BGR2GRAY); 46 | // Laplacian pyramid 47 | pyrDown(gray,temp); 48 | pyrUp(temp,temp); 49 | temp = gray - temp; 50 | // find and circle the result 51 | minMaxLoc(temp,&minPixelValue,&maxPixelValue,&minPixelPoint,&maxPixelPoint); 52 | circle(matFrontFace,maxPixelPoint,10,Scalar(255,255,255),2); 53 | /************************************************************************/ 54 | /* 2. Look at the diagrams of how the log-polar function transforms a square into a 55 | wavy line. 56 | a. Draw the log-polar results if the log-polar center point were sitting on one of 57 | the corners of the square. 58 | b. What would a circle look like in a log-polar transform if the center point were 59 | inside the circle and close to the edge? 60 | c. Draw what the transform would look like if the center point were sitting just 61 | outside of the circle. */ 62 | /************************************************************************/ 63 | // draw a circle,white on black. 64 | Mat matLogPolar = Mat(512,512,CV_8U,Scalar(0)); 65 | circle(matLogPolar,Point(256,256),100,Scalar(255),3); 66 | //a 67 | logPolar(matLogPolar,temp,Point(0,0),40,INTER_CUBIC); 68 | //b 69 | logPolar(matLogPolar,temp,Point(256-101,256),40,INTER_CUBIC); 70 | //c the result like two rings 71 | logPolar(matLogPolar,temp,Point(256-101-3,256),40,INTER_CUBIC); 72 | /************************************************************************/ 73 | /* 5. Load an image, take a perspective transform, and then rotate it. Can this trans‐ 74 | form be done in one step? */ 75 | /************************************************************************/ 76 | Mat matE5_1 = imread(argv[1]); 77 | Mat matE5_2 = imread(argv[1]); 78 | double angle = 45; 79 | //perspective matrix 80 | Point2f src_vertices[4]; 81 | src_vertices[0] = Point(0, 0); 82 | src_vertices[1] = Point(matE5_1.cols, 0); 83 | src_vertices[2] = Point(matE5_1.cols, matE5_1.rows); 84 | src_vertices[3] = Point(0, matE5_1.rows); 85 | Point2f dst_vertices[4]; 86 | dst_vertices[0] = Point(100, 0); 87 | dst_vertices[1] = Point(matE5_1.cols - 100, 0); 88 | dst_vertices[2] = Point(matE5_1.cols, matE5_1.rows); 89 | dst_vertices[3] = Point(0, matE5_1.rows); 90 | Mat perspectiveMatrix = getPerspectiveTransform(src_vertices, dst_vertices); 91 | //roate matrix 92 | Mat affineMatrix = getRotationMatrix2D(Point(matE5_1.cols/2,matE5_1.rows/2), angle, 1.0 ); 93 | // perspective -> rotate 94 | warpPerspective( 95 | matE5_1, 96 | matE5_1, 97 | perspectiveMatrix, 98 | matE5_1.size(), 99 | INTER_LINEAR, 100 | BORDER_CONSTANT, 101 | Scalar()); 102 | warpAffine( 103 | matE5_1, 104 | matE5_1, 105 | affineMatrix, 106 | matE5_1.size(), 107 | INTER_LINEAR, 108 | BORDER_CONSTANT, 109 | Scalar() 110 | ); 111 | // do it in one step 112 | Point center = Point(matE5_2.cols/2,matE5_2.rows/2); 113 | dst_vertices[0] = getPointAffinedPos(dst_vertices[0],center,angle); 114 | dst_vertices[1] = getPointAffinedPos(dst_vertices[1],center,angle); 115 | dst_vertices[2] = getPointAffinedPos(dst_vertices[2],center,angle); 116 | dst_vertices[3] = getPointAffinedPos(dst_vertices[3],center,angle); 117 | perspectiveMatrix = getPerspectiveTransform(src_vertices, dst_vertices); 118 | warpPerspective( 119 | matE5_2, 120 | matE5_2, 121 | perspectiveMatrix, 122 | matE5_2.size(), 123 | INTER_LINEAR, 124 | BORDER_CONSTANT, 125 | Scalar()); 126 | // the result is almost the same 127 | /************************************************************************/ 128 | /* 6. Inpainting works pretty well for the repair of writing over textured regions. What 129 | would happen if the writing obscured a real object edge in a picture? Try it. */ 130 | /************************************************************************/ 131 | Mat matInpaint = imread(argv[1]); 132 | Mat matInpaintMask = Mat(matInpaint.size(),CV_8UC1,Scalar(0));//the same size,all in black(oh yeah!) 133 | //draw the same circle on the matInpaint and matInpaintMask 134 | circle(matInpaint,Point(255,255),100,Scalar(255),10); 135 | circle(matInpaintMask,Point(255,255),100,Scalar(255),10); 136 | imshow("befor inpaint",matInpaint); 137 | inpaint(matInpaint,matInpaintMask,matInpaint,10,CV_INPAINT_TELEA); 138 | imshow("after inpaint",matInpaint); 139 | //the result is :if "the writing obscured a real object edge in a picture",the result is bad 140 | /************************************************************************/ 141 | /* 7. Practice histogram equalization on images that you load in, and report the 142 | results. */ 143 | /************************************************************************/ 144 | Mat matSrc = imread(argv[1]); 145 | vector planes; 146 | split(matSrc,planes); 147 | imshow("b",planes[0]); 148 | imshow("g",planes[1]); 149 | imshow("r",planes[2]); 150 | cv::equalizeHist(planes[0],planes[0]); 151 | cv::equalizeHist(planes[1],planes[1]); 152 | cv::equalizeHist(planes[2],planes[2]); 153 | imshow("bh",planes[0]); 154 | imshow("gh",planes[1]); 155 | imshow("rh",planes[2]); 156 | waitKey(); 157 | return 0; 158 | 159 | } 160 | -------------------------------------------------------------------------------- /Exercises_13_1-2-11.cpp: -------------------------------------------------------------------------------- 1 | //Exercises_9_4.cpp Exercises at end of Chapter 9 2 | 3 | #include 4 | #include 5 | 6 | using namespace cv; 7 | using namespace std; 8 | 9 | void help(const char **argv) { 10 | cout << "\n\n" 11 | << "This program solves the Exercise 4、5 at the end of Chapter 9 \n" 12 | << "Call:\n" 13 | << argv[0] << " " << " \n\n" 14 | << "For example: ./" << argv[0] << " ../left.jpg "<< " ../left.jpg\n" 15 | << endl; 16 | } 17 | 18 | int main( int argc, const char** argv ) 19 | { 20 | help(argv); 21 | if(argc < 3) { 22 | cout << "\nERROR: You had too few parameters.\n" << endl; 23 | return -1; 24 | } 25 | 26 | /************************************************************************/ 27 | /* 1.In this exercise, we learn to experiment with parameters by setting good low 28 | Thresh and highThresh values in cv::Canny(). Load an image with suitably 29 | interesting line structures. We’ll use three different high:low threshold settings of 30 | 1.5:1, 2.75:1, and 4:1. 31 | a. Report what you see with a high setting of less than 50. 32 | b. Report what you see with high settings between 50 and 100. 33 | c. Report what you see with high settings between 100 and 150. 34 | d. Report what you see with high settings between 150 and 200. 35 | e. Report what you see with high settings between 200 and 250. 36 | f. Summarize your results and explain what happens as best you can. */ 37 | /************************************************************************/ 38 | Mat src = imread(argv[1],IMREAD_GRAYSCALE); 39 | Mat dst; 40 | 41 | int iHighThresh = 50; 42 | Canny(src,dst,static_cast(iHighThresh/1.5),iHighThresh); 43 | imshow("iHighThresh is 50 and high:low is 1.5:1",dst); 44 | Canny(src,dst,static_cast(iHighThresh/2.75),iHighThresh); 45 | imshow("iHighThresh is 50 and high:low is 2.75:1",dst); 46 | Canny(src,dst,static_cast(iHighThresh/4),iHighThresh); 47 | imshow("iHighThresh is 50 and high:low is 4:1",dst); 48 | 49 | iHighThresh = (50+100)/2; 50 | Canny(src,dst,static_cast(iHighThresh/1.5),iHighThresh); 51 | imshow("iHighThresh is (50+100)/2 and high:low is 1.5:1",dst); 52 | Canny(src,dst,static_cast(iHighThresh/2.75),iHighThresh); 53 | imshow("iHighThresh is (50+100)/2 and high:low is 2.75:1",dst); 54 | Canny(src,dst,static_cast(iHighThresh/4),iHighThresh); 55 | imshow("iHighThresh is (50+100)/2 and high:low is 4:1",dst); 56 | 57 | iHighThresh = (100+150)/2; 58 | Canny(src,dst,static_cast(iHighThresh/1.5),iHighThresh); 59 | imshow("iHighThresh is (100+150)/2 and high:low is 1.5:1",dst); 60 | Canny(src,dst,static_cast(iHighThresh/2.75),iHighThresh); 61 | imshow("iHighThresh is (100+150)/2 and high:low is 2.75:1",dst); 62 | Canny(src,dst,static_cast(iHighThresh/4),iHighThresh); 63 | imshow("iHighThresh is (100+150)/2 and high:low is 4:1",dst); 64 | 65 | iHighThresh = (150+200)/2; 66 | Canny(src,dst,static_cast(iHighThresh/1.5),iHighThresh); 67 | imshow("iHighThresh is (150+200)/2 and high:low is 1.5:1",dst); 68 | Canny(src,dst,static_cast(iHighThresh/2.75),iHighThresh); 69 | imshow("iHighThresh is (150+200)/2 and high:low is 2.75:1",dst); 70 | Canny(src,dst,static_cast(iHighThresh/4),iHighThresh); 71 | imshow("iHighThresh is (150+200)/2 and high:low is 4:1",dst); 72 | 73 | iHighThresh = (200+250)/2; 74 | Canny(src,dst,static_cast(iHighThresh/1.5),iHighThresh); 75 | imshow("iHighThresh is (200+250)/2 and high:low is 1.5:1",dst); 76 | Canny(src,dst,static_cast(iHighThresh/2.75),iHighThresh); 77 | imshow("iHighThresh is (200+250)/2 and high:low is 2.75:1",dst); 78 | Canny(src,dst,static_cast(iHighThresh/4),iHighThresh); 79 | imshow("iHighThresh is (200+250)/2 and high:low is 4:1",dst); 80 | 81 | /************************************************************************/ 82 | /* 2. Load an image containing clear lines and circles such as a side view of a bicycle. 83 | Use the Hough line and Hough circle calls and see how they respond to your 84 | image. */ 85 | /************************************************************************/ 86 | Mat src = imread(argv[1],IMREAD_GRAYSCALE);//a bike in gray 87 | GaussianBlur( src, src, Size(9, 9), 2, 2 ); 88 | Mat temp; 89 | vector linesP; 90 | vector circles; 91 | //first find the canny edge 92 | Canny(src,temp,50,200); 93 | //find lines 94 | HoughLinesP(temp,linesP,1,CV_PI/180,80,50,10); 95 | //find circles 96 | HoughCircles( src, circles, CV_HOUGH_GRADIENT, 1, src.rows/8, 200, 100, 0, 0 ); 97 | //draw lines and circles in the source image 98 | for (int i = 0;i0); 136 | //draw the result 137 | circle(matBoard,maxLoc,1,Scalar(0),1); 138 | circle(matBoard,minLoc,2,Scalar(255),2); 139 | rectangle(matBoard,Rect(maxLoc.x - 10,maxLoc.y - 10,20,20),Scalar(0,0,255),-1); 140 | return 0; 141 | } 142 | 143 | -------------------------------------------------------------------------------- /Exercises_5.cpp: -------------------------------------------------------------------------------- 1 | //Exercises at end of Chapter 5 2 | // 1-6 3 | #include 4 | #include 5 | 6 | using namespace cv; 7 | using namespace std; 8 | 9 | void help(const char **argv) { 10 | cout << "\n\n" 11 | << "This program solves the Exercises at the end of Chapter 5\n" 12 | << "Call:\n" 13 | << argv[0] << " \n\n" 14 | << "For example: ./" << argv[0] << " ../faces.png\n" 15 | << endl; 16 | } 17 | 18 | 19 | 20 | int main( int argc, const char** argv ) 21 | { 22 | help(argv); 23 | if(argc < 2) { 24 | cout << "\nERROR: You had too few parameters.\n" << endl; 25 | return -1; 26 | } 27 | /************************************************************************/ 28 | /* 1. This exercise will accustom you to the idea of many functions taking matrix 29 | types. Create a two-dimensional matrix with three channels of type byte with 30 | data size 100 × 100. Set all the values to 0. 31 | a. Draw a circle in the matrix using void cv::circle(InputOutputArray img, 32 | cv::point center, intradius, cv::Scalar color, int thickness=1, 33 | int line_type=8, int shift=0). 34 | b. Display this image using methods described in Chapter 2. */ 35 | /************************************************************************/ 36 | Mat m1 = Mat(100,100,CV_8U,Scalar(0)); 37 | // a 38 | cv::circle(m1,Point(m1.cols/2,m1.rows/2),40,Scalar(255)); 39 | // b 40 | cv::imshow("execrise 1",m1); 41 | 42 | /************************************************************************/ 43 | /* 2. Create a two-dimensional matrix with three channels of type byte with data size 44 | 100 × 100, and set all the values to 0. Use the cv::Mat element access functions to 45 | modify the pixels. Draw a green rectangle between (20, 5) and (40, 20). */ 46 | /************************************************************************/ 47 | Mat m2 = Mat(100,100,CV_8UC3,Scalar(0)); 48 | for (int i=0;i=20&&j<=40&&i>=5&&i<=20) 53 | { 54 | m2.at(i,j)[0]=0; //b 55 | m2.at(i,j)[1]=255; //g 56 | m2.at(i,j)[2]=0; //r 57 | } 58 | } 59 | } 60 | cv::imshow("execrise 2",m2); 61 | 62 | /************************************************************************/ 63 | /* 3. Create a three-channel RGB image of size 100 × 100. Clear it. Use pointer arith‐ 64 | metic to draw a green square between (20, 5) and (40, 20). */ 65 | /************************************************************************/ 66 | Mat m3 = Mat(100,100,CV_8UC3,Scalar(0)); 67 | for(int i=0;i(i); 70 | for(int j=0;j=20&&j<=40&&i>=5&&i<=20) 73 | { 74 | outData[j*3+1] = 255; 75 | } 76 | } 77 | } 78 | cv::imshow("execrise 3",m3); 79 | 80 | /************************************************************************/ 81 | /* 4. Practice using region of interest (ROI). Create a 210 × 210 single-channel byte 82 | image and zero it. Within the image, build a pyramid of increasing values using 83 | ROI and cv::Mat::setTo(). That is: the outer border should be 0, the next inner 84 | border should be 20, the next inner border should be 40, and so on until the final 85 | innermost square is set to value 200; all borders should be 10 pixels wide. Display 86 | the image. 87 | /************************************************************************/ 88 | Mat m4 = Mat(210,210,CV_8U,Scalar(0)); 89 | for (int i=0;i<210/2;i=i+10) 90 | { 91 | Mat roi = m4(cv::Rect(i,i,210-i*2,210-i*2)); 92 | roi.setTo(i*2);// roi = i*2; 93 | 94 | } 95 | cv::imshow("execrise 4",m4); 96 | 97 | /************************************************************************/ 98 | /* 5. Use multiple headers for one image. Load an image that is at least 100 × 100. 99 | Create two additional headers that are ROIs where width = 20 and the height = 100 | 30. Their origins should be at (5, 10) and (50, 60), respectively. Pass these new 101 | image subheaders to cv::bitwise_not(). Display the loaded image, which 102 | should have two inverted rectangles within the larger image. */ 103 | /************************************************************************/ 104 | Mat m5 = Mat(100,100,CV_8U,Scalar(0)); 105 | Mat roi1 = m5(Rect(5,10,20,30)); 106 | Mat roi2 = m5(Rect(50,60,20,30)); 107 | bitwise_not(roi1,roi1); 108 | bitwise_not(roi2,roi2); 109 | cv::imshow("execrise 5",m5); 110 | 111 | /************************************************************************/ 112 | /* 6. Create a mask using cv::compare(). Load a real image. Use cv::split() to split 113 | the image into red, green, and blue images. 114 | a. Find and display the green image. 115 | b. Clone this green plane image twice (call these clone1 and clone2). 116 | c. Find the green plane’s minimum and maximum value. 117 | d. Set clone1’s values to thresh = (unsigned char)((maximum - minimum)/ 118 | 2.0). 119 | e. Set clone2 to 0 and use cv::compare (green_image, clone1, clone2, 120 | cv::CMP_GE). Now clone2 will have a mask of where the value exceeds 121 | thresh in the green image. 122 | f. Finally, use cv::subtract (green_image,thresh/2, green_image, 123 | clone2) and display the results. */ 124 | /************************************************************************/ 125 | Mat clone1,clone2; 126 | vector bgr_planes; 127 | Mat src = cv::imread(argv[1],1); 128 | split(src, bgr_planes ); 129 | // a 130 | Mat green = bgr_planes[1]; 131 | imshow("green",green); 132 | // b 133 | clone1 = green.clone(); 134 | clone2 = green.clone(); 135 | // c 136 | double minPixelValue, maxPixelValue; 137 | int minPixelID,maxPixelID; 138 | cv::minMaxIdx(green, &minPixelValue, &maxPixelValue,&minPixelID,&maxPixelID); 139 | // d 140 | double thresh= (unsigned char)((maxPixelValue - minPixelValue)/2.0); 141 | int ithresh = (int)thresh; 142 | clone1 = Mat(clone1.size(),clone1.type(),Scalar(ithresh)); 143 | // e 144 | clone2 = Mat(clone2.size(),clone2.type(),Scalar(0)); 145 | compare(green,clone1,clone2,cv::CMP_GE); 146 | // f 147 | cv::subtract(green,thresh/2,green,clone2); 148 | imshow("execrise 6",clone2); 149 | cout << "6" << endl; 150 | waitKey(-1); //Wait here until any key pressed 151 | return 0; 152 | } 153 | -------------------------------------------------------------------------------- /Exercises_7.cpp: -------------------------------------------------------------------------------- 1 | //Exercises at end of Chapter 7 2 | // 1-4 3 | #include 4 | #include 5 | 6 | using namespace cv; 7 | using namespace std; 8 | 9 | 10 | void help(const char **argv) { 11 | cout << "\n\n" 12 | << "This program solves the Exercises at the end of Chapter 7\n" 13 | << endl; 14 | } 15 | 16 | 17 | int main( int argc, const char** argv ) 18 | { 19 | help(argv); 20 | 21 | /************************************************************************/ 22 | /* 1. Using the cv::RNG random number generator: 23 | a. Generate and print three floating-point numbers, each drawn from a uniform 24 | distribution from 0.0 to 1.0. 25 | b. Generate and print three double-precision numbers, each drawn from a 26 | Gaussian distribution centered at 0.0 and with a standard deviation of 1.0. 27 | c. Generate and print three unsigned bytes, each drawn from a uniform distri‐ 28 | bution from 0 to 255. 29 | /************************************************************************/ 30 | RNG rng = theRNG(); 31 | // a 32 | float f1 = rng.uniform(0.f,1.f); 33 | float f2 = rng.uniform(0.f,1.f); 34 | float f3 = rng.uniform(0.f,1.f); 35 | cout<<" f1 " << f1 <<" f2 "< planes; 80 | split(matInt100,planes); 81 | rng.fill(planes[0],RNG::NORMAL,64,10); 82 | rng.fill(planes[1],RNG::NORMAL,192,10); 83 | // b 84 | rng.fill(planes[2],RNG::NORMAL,128,2); 85 | // c 86 | PCA pca(planes[0],Mat(),CV_PCA_DATA_AS_ROW,2); 87 | planes[0] = pca.project(planes[0]); 88 | pca(planes[1],Mat(),CV_PCA_DATA_AS_ROW,2); 89 | planes[1] = pca.project(planes[1]); 90 | pca(planes[2],Mat(),CV_PCA_DATA_AS_ROW,2); 91 | planes[2] = pca.project(planes[2]); 92 | //d 93 | f1 = 0; 94 | f2 = 0; 95 | f3 = 0; 96 | for (int i = 0;i<100;i++) 97 | { 98 | f1 += planes[0].at(i,0); 99 | f2 += planes[1].at(i,0); 100 | f3 += planes[2].at(i,0); 101 | } 102 | f1 = f1/100; 103 | f2 = f2/100; 104 | f3 = f3/100; 105 | /************************************************************************/ 106 | /* 4. page 206 at 《leanring Opencv 3.0》 107 | /************************************************************************/ 108 | Matx32d AX(1, 1, 109 | 0, 1, 110 | -1 ,1); 111 | Mat A = static_cast(AX); 112 | Mat U, W, V; 113 | SVD::compute(A, W, U, V); 114 | 115 | waitKey(); 116 | getchar(); 117 | return 0; 118 | 119 | } 120 | -------------------------------------------------------------------------------- /Exercises_8_1.cpp: -------------------------------------------------------------------------------- 1 | //Exercises_8_1.cpp Exercises at end of Chapter 8 2 | //1 3 | #include 4 | #include 5 | 6 | using namespace cv; 7 | using namespace std; 8 | 9 | 10 | void help(const char **argv) { 11 | cout << "\n\n" 12 | << "This program solves the Exercise 1 at the end of Chapter 8 \n" 13 | << "Call:\n" 14 | << argv[0] << " \n\n" 15 | << "For example: ./" << argv[0] << " ../tree.avi\n" 16 | << endl; 17 | } 18 | 19 | int main( int argc, const char** argv ) 20 | { 21 | help(argv); 22 | if(argc < 2) { 23 | cout << "\nERROR: You had too few parameters.\n" << endl; 24 | return -1; 25 | } 26 | /************************************************************************/ 27 | /* 1. Create a program that 28 | (1) reads frames from a video, 29 | (2) turns the result to gray‐scale, and 30 | (3) performs Canny edge detection on the image. 31 | Display all three stages of processing in three different windows, with each window appropriately 32 | named for its function. 33 | a. Display all three stages of processing in one image. (Hint: create another 34 | image of the same height but three times the width as the video frame. Copy 35 | the images into this, either by using pointers or (more cleverly) by creating 36 | three new image headers that point to the beginning of and to one-third and 37 | two-thirds of the way into the imageData. Then use Mat::copyTo().) 38 | b. Write appropriate text labels describing the processing in each of the three 39 | slots.*/ 40 | /************************************************************************/ 41 | 42 | VideoCapture capture; 43 | if(!capture.open(argv[1])){ 44 | cout << "Could not open " << argv[1] << endl; 45 | return 1; 46 | } 47 | double rate=capture.get(CV_CAP_PROP_FPS); 48 | Mat MatFrame; 49 | Mat MatGray; 50 | Mat MatCanny; 51 | int delay=1000/rate; 52 | cout << "rate = " << rate << ", delay = " << delay << endl; 53 | cout << "\nEsq to exit, or let it run out, then any key to release capture and exit.\n" << endl; 54 | int frame_count = 0; 55 | while(1) 56 | { 57 | capture >> MatFrame; 58 | if( !MatFrame.data ) { 59 | cout << "Done with capture" << endl; 60 | break; 61 | } 62 | 63 | //(1) 64 | imshow("Raw Video",MatFrame); 65 | //(2) 66 | cvtColor(MatFrame,MatGray,COLOR_BGR2GRAY); 67 | imshow("Gray Video",MatGray); 68 | //(3) 69 | Canny(MatGray,MatCanny,100,255); 70 | imshow("Canny Video",MatCanny); 71 | //question a 72 | Mat MatAll(MatFrame.rows,3* MatFrame.cols ,CV_8UC3,Scalar::all(0)); 73 | cvtColor(MatGray,MatGray,COLOR_GRAY2BGR); 74 | cvtColor(MatCanny,MatCanny,COLOR_GRAY2BGR); 75 | Mat MatSub = MatAll.colRange(0, MatFrame.cols); 76 | MatFrame.copyTo(MatSub); 77 | MatSub = MatAll.colRange( MatFrame.cols,2*MatFrame.cols); 78 | MatGray.copyTo(MatSub); 79 | MatSub = MatAll.colRange(2*MatFrame.cols,3*MatFrame.cols); 80 | MatCanny.copyTo(MatSub); 81 | //question b 82 | Scalar color = CV_RGB(255,0,0); 83 | putText(MatAll,"raw video",Point(50,30),CV_FONT_HERSHEY_DUPLEX,1.0f,color); 84 | putText(MatAll,"gray video",Point(50+MatFrame.cols,30),CV_FONT_HERSHEY_DUPLEX,1.0f,color); 85 | putText(MatAll,"canny video",Point(50+2*MatFrame.cols,30),CV_FONT_HERSHEY_DUPLEX,1.0f,color); 86 | imshow("all Video",MatAll); 87 | 88 | if ((cv::waitKey(delay) & 255) == 27) 89 | break; 90 | } 91 | waitKey(); 92 | capture.release(); 93 | return 0; 94 | 95 | } 96 | -------------------------------------------------------------------------------- /Exercises_9_4.cpp: -------------------------------------------------------------------------------- 1 | //Exercises_9_4.cpp Exercises at end of Chapter 9 2 | #include 3 | #include 4 | 5 | using namespace cv; 6 | using namespace std; 7 | 8 | 9 | void help(const char **argv) { 10 | cout << "\n\n" 11 | << "This program solves the Exercise 4、5 at the end of Chapter 9 \n" 12 | << "Call:\n" 13 | << argv[0] << " " << " \n\n" 14 | << "For example: ./" << argv[0] << " ../left.jpg "<< " ../left.jpg\n" 15 | << endl; 16 | } 17 | 18 | int main( int argc, const char** argv ) 19 | { 20 | help(argv); 21 | if(argc < 3) { 22 | cout << "\nERROR: You had too few parameters.\n" << endl; 23 | return -1; 24 | } 25 | Mat temp; 26 | 27 | /************************************************************************/ 28 | /* 4. Use a camera to take two pictures of the same scene while moving the camera as 29 | little as possible. Load these images into the computer as src1 and src1. 30 | a. Take the absolute value of src1 minus src1 (subtract the images); call it 31 | diff12 and display. If this were done perfectly, diff12 would be black. Why 32 | isn’t it? 33 | b. Create cleandiff by using cv::erode() and then cv::dilate() on diff12. 34 | Display the results. 35 | c. Create dirtydiff by using cv::dilate() and then cv::erode() on diff12 36 | and then display. 37 | d. Explain the difference between cleandiff and dirtydiff. */ 38 | /************************************************************************/ 39 | Mat src1 = imread(argv[1],IMREAD_GRAYSCALE); 40 | Mat src2 = imread(argv[2],IMREAD_GRAYSCALE); 41 | if (src1.empty() || src2.empty()) 42 | { 43 | cout << "\nERROR: parameters is not a image name.\n" << endl; 44 | return -1; 45 | } 46 | //a 47 | Mat diff12 = src1 - src2; 48 | imshow("diff12",diff12); 49 | //b 50 | Mat cleandiff; 51 | erode(diff12,cleandiff,Mat()); 52 | dilate(cleandiff,cleandiff,Mat()); 53 | imshow("cleandiff",cleandiff); 54 | //c 55 | Mat dirtydiff; 56 | dilate(diff12,dirtydiff,Mat()); 57 | erode(dirtydiff,dirtydiff,Mat()); 58 | imshow("dirtydiff",dirtydiff); 59 | //d 60 | absdiff(cleandiff,dirtydiff,temp) ; 61 | imshow("absdiff",temp); 62 | //the difference between cleandiff and dirtydiff is the difference between of "open" and "close" 63 | waitKey(); 64 | return 0; 65 | 66 | } 67 | -------------------------------------------------------------------------------- /Exercises_9_5.cpp: -------------------------------------------------------------------------------- 1 | //Exercises_9_5.cpp Exercises at end of Chapter 9 2 | #include 3 | #include 4 | 5 | using namespace cv; 6 | using namespace std; 7 | 8 | 9 | void help(const char **argv) { 10 | cout << "\n\n" 11 | << "This program solves the Exercise 4、5 at the end of Chapter 9 \n" 12 | << "Call:\n" 13 | << argv[0] << " " << " \n\n" 14 | << "For example: ./" << argv[0] << " ../left.jpg "<< " ../left.jpg\n" 15 | << endl; 16 | } 17 | 18 | int main( int argc, const char** argv ) 19 | { 20 | help(argv); 21 | if(argc < 3) { 22 | cout << "\nERROR: You had too few parameters.\n" << endl; 23 | return -1; 24 | } 25 | Mat temp; 26 | Mat temp2; 27 | /************************************************************************/ 28 | /* 5. Create an outline of an object. Take a picture of a scene. Then, without moving 29 | the camera, put a coffee cup in the scene and take a second picture. Load these 30 | images and convert both to 8-bit grayscale images. 31 | a. Take the absolute value of their difference. Display the result, which should 32 | look like a noisy mask of a coffee mug. 33 | b. Do a binary threshold of the resulting image using a level that preserves most 34 | of the coffee mug but removes some of the noise. Display the result. The “on” 35 | values should be set to 255. 36 | c. Do a cv::MOP_OPEN on the image to further clean up noise. 37 | d. Using the erosion operator and logical XOR function, turn the mask of the 38 | coffee cup image into an outline of the coffee cup (only the edge pixels 39 | remaining). */ 40 | /************************************************************************/ 41 | Mat matMug = imread(argv[1],IMREAD_GRAYSCALE); 42 | Mat matNoMug = imread(argv[2],IMREAD_GRAYSCALE); 43 | if (matMug.empty() || matNoMug.empty()) 44 | { 45 | cout << "\nERROR: parameters is not a image name.\n" << endl; 46 | return -1; 47 | } 48 | //a 49 | absdiff(matMug,matNoMug,temp); 50 | imshow("absolute value of mug ",temp); 51 | //b 52 | threshold(temp,temp,100,255,THRESH_OTSU); 53 | imshow("a binary threshold",temp); 54 | //c 55 | morphologyEx(temp,temp,cv::MORPH_OPEN,Mat()); 56 | imshow("MORPH_OPEN",temp); 57 | //d 58 | erode(temp,temp,Mat()); 59 | matMug.copyTo(temp2,temp); 60 | imshow("an outline of the coffee cup",temp2); 61 | waitKey(); 62 | return 0; 63 | 64 | } 65 | -------------------------------------------------------------------------------- /HandIndoorColor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/HandIndoorColor.jpg -------------------------------------------------------------------------------- /HandOutdoorColor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/HandOutdoorColor.jpg -------------------------------------------------------------------------------- /HandOutdoorSunColor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/HandOutdoorSunColor.jpg -------------------------------------------------------------------------------- /adrian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/adrian.jpg -------------------------------------------------------------------------------- /birdseye/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/.DS_Store -------------------------------------------------------------------------------- /birdseye/IMG_0214.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0214.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0214L.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0214L.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0215.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0215.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0215L.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0215L.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0217.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0217.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0217L.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0217L.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0218.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0218.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0218L.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0218L.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0219.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0219.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0219L.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0219L.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0220.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0220.jpg -------------------------------------------------------------------------------- /birdseye/IMG_0220L.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/birdseye/IMG_0220L.jpg -------------------------------------------------------------------------------- /birdseye/intrinsics.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 1600 4 | 1200 5 | 6 | 3 7 | 3 8 |
d
9 | 10 | 1.7473845059199218e+03 0. 800. 0. 1.7523330232672765e+03 600. 0. 0. 11 | 1.
12 | 13 | 1 14 | 5 15 |
d
16 | 17 | 1.0558825619798969e-01 -1.2250501555283355e+00 0. 0. 18 | 4.2302514361517840e+00
19 |
20 | -------------------------------------------------------------------------------- /box.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/box.png -------------------------------------------------------------------------------- /box_in_scene.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/box_in_scene.png -------------------------------------------------------------------------------- /calibration/IMG_0191.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0191.jpg -------------------------------------------------------------------------------- /calibration/IMG_0192.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0192.jpg -------------------------------------------------------------------------------- /calibration/IMG_0193.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0193.jpg -------------------------------------------------------------------------------- /calibration/IMG_0194.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0194.jpg -------------------------------------------------------------------------------- /calibration/IMG_0195.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0195.jpg -------------------------------------------------------------------------------- /calibration/IMG_0196.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0196.jpg -------------------------------------------------------------------------------- /calibration/IMG_0197.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0197.jpg -------------------------------------------------------------------------------- /calibration/IMG_0198.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0198.jpg -------------------------------------------------------------------------------- /calibration/IMG_0199.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0199.jpg -------------------------------------------------------------------------------- /calibration/IMG_0200.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0200.jpg -------------------------------------------------------------------------------- /calibration/IMG_0201.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0201.jpg -------------------------------------------------------------------------------- /calibration/IMG_0202.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0202.jpg -------------------------------------------------------------------------------- /calibration/IMG_0203.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0203.jpg -------------------------------------------------------------------------------- /calibration/IMG_0204.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0204.jpg -------------------------------------------------------------------------------- /calibration/IMG_0205.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0205.jpg -------------------------------------------------------------------------------- /calibration/IMG_0206.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0206.jpg -------------------------------------------------------------------------------- /calibration/IMG_0207.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0207.jpg -------------------------------------------------------------------------------- /calibration/IMG_0208.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0208.jpg -------------------------------------------------------------------------------- /calibration/IMG_0209.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0209.jpg -------------------------------------------------------------------------------- /calibration/IMG_0210.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0210.jpg -------------------------------------------------------------------------------- /calibration/IMG_0211.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0211.jpg -------------------------------------------------------------------------------- /calibration/IMG_0212.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0212.jpg -------------------------------------------------------------------------------- /calibration/IMG_0213.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/calibration/IMG_0213.jpg -------------------------------------------------------------------------------- /checkerboard9x6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/checkerboard9x6.png -------------------------------------------------------------------------------- /example_02-01.cpp: -------------------------------------------------------------------------------- 1 | //Example 2-1. A simple OpenCV program that loads an image from disk and displays it 2 | //on the screen 3 | #include 4 | 5 | void help(char** argv ) { 6 | std::cout << "\n" 7 | << "A simple OpenCV program that loads and displays an image from disk\n" 8 | << argv[0] <<" \n" 9 | << "For example:\n" 10 | << argv[0] << " ../fruits.jpg\n" 11 | << std::endl; 12 | } 13 | 14 | 15 | int main( int argc, char** argv ) { 16 | 17 | if (argc != 2) { 18 | help(argv); 19 | return 0; 20 | } 21 | 22 | 23 | cv::Mat img = cv::imread( argv[1], -1 ); 24 | 25 | if( img.empty() ) return -1; 26 | 27 | cv::namedWindow( "Example 2-1", cv::WINDOW_AUTOSIZE ); 28 | cv::imshow( "Example 2-1", img ); 29 | cv::waitKey( 0 ); 30 | cv::destroyWindow( "Example 2-1" ); 31 | 32 | return 0; 33 | } 34 | -------------------------------------------------------------------------------- /example_02-02.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-2. Same as Example 2-1 but employing the “using namespace” directive 2 | 3 | #include "opencv2/highgui/highgui.hpp" 4 | #include 5 | 6 | using namespace cv; 7 | 8 | void help(char** argv ) { 9 | std::cout << "\n" 10 | << "2.2: Like 2.1, but 'using namespace cv: \n" 11 | << argv[0] <<" \n" 12 | << "For example:\n" 13 | << argv[0] << " ../fruits.jpg\n" 14 | << std::endl; 15 | } 16 | 17 | 18 | int main( int argc, char** argv ) { 19 | 20 | if (argc != 2) { 21 | help(argv); 22 | return 0; 23 | } 24 | 25 | Mat img = imread( argv[1], -1 ); 26 | 27 | if( img.empty() ) return -1; 28 | 29 | namedWindow( "Example 2-2", cv::WINDOW_AUTOSIZE ); 30 | 31 | imshow( "Example 2-2", img ); 32 | 33 | waitKey( 0 ); 34 | 35 | destroyWindow( "Example 2-2" ); 36 | } 37 | -------------------------------------------------------------------------------- /example_02-03.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-3. A simple OpenCV program for playing a video file from disk 2 | 3 | #include "opencv2/highgui/highgui.hpp" 4 | #include "opencv2/imgproc/imgproc.hpp" 5 | #include 6 | 7 | using namespace std; 8 | void help(char** argv ) { 9 | std::cout << "\n" 10 | << "2-03: play video from disk \n" 11 | << argv[0] <<" \n" 12 | << "For example:\n" 13 | << argv[0] << " ../tree.avi\n" 14 | << std::endl; 15 | } 16 | 17 | 18 | int main( int argc, char** argv ) { 19 | 20 | if (argc != 2) { 21 | help(argv); 22 | return 0; 23 | } 24 | 25 | cv::namedWindow( "Example 2-3", cv::WINDOW_AUTOSIZE ); 26 | 27 | cv::VideoCapture cap; 28 | 29 | cap.open( string(argv[1]) ); 30 | cout <<"Opened file: " <> frame; 37 | 38 | if( frame.empty() ) break; // Ran out of film 39 | 40 | cv::imshow( "Example 2-3", frame ); 41 | 42 | if( (char)cv::waitKey(33) >= 0 ) break; 43 | 44 | // int c = cv::waitKey(33); 45 | // for(int i=0;i<32;i++) { 46 | // cout <<((c&(0x1<<(31-i)))?1:0); 47 | // } 48 | // cout <= 0 ) { 51 | // break; 52 | // } 53 | 54 | } 55 | 56 | return 0; 57 | 58 | } 59 | -------------------------------------------------------------------------------- /example_02-04.cpp: -------------------------------------------------------------------------------- 1 | //Example 2-4. Adding a trackbar slider to the basic viewer window for moving around 2 | //within the video file 3 | 4 | #include "opencv2/highgui/highgui.hpp" 5 | #include "opencv2/imgproc/imgproc.hpp" 6 | #include 7 | #include 8 | 9 | using namespace std; 10 | 11 | int g_slider_position = 0; 12 | int g_run = 1, g_dontset = 0; //start out in single step mode 13 | cv::VideoCapture g_cap; 14 | 15 | void onTrackbarSlide( int pos, void *) { 16 | 17 | g_cap.set( CV_CAP_PROP_POS_FRAMES, pos ); 18 | 19 | if( !g_dontset ) g_run = 1; 20 | 21 | g_dontset = 0; 22 | 23 | } 24 | 25 | 26 | void help(char** argv ) { 27 | std::cout << "\n" 28 | << "2-04: Addeing a trackbar to a basic viewer for moving w/in the video file \n" 29 | << argv[0] <<" \n" 30 | << "For example:\n" 31 | << argv[0] << " ../tree.avi\n" 32 | << std::endl; 33 | } 34 | 35 | 36 | int main( int argc, char** argv ) { 37 | 38 | if (argc != 2) { 39 | help(argv); 40 | return 0; 41 | } 42 | 43 | cv::namedWindow( "Example 2-4", cv::WINDOW_AUTOSIZE ); 44 | 45 | g_cap.open( string(argv[1]) ); 46 | 47 | int frames = (int) g_cap.get( CV_CAP_PROP_FRAME_COUNT ); 48 | int tmpw = (int) g_cap.get( CV_CAP_PROP_FRAME_WIDTH ); 49 | int tmph = (int) g_cap.get( CV_CAP_PROP_FRAME_HEIGHT ); 50 | 51 | cout << "Video has " << frames << " frames of dimensions(" 52 | << tmpw << ", " << tmph << ")." << endl; 53 | 54 | cv::createTrackbar( 55 | "Position", 56 | "Example 2-4", 57 | &g_slider_position, 58 | frames, 59 | onTrackbarSlide 60 | ); 61 | cv::Mat frame; 62 | 63 | for(;;) { 64 | 65 | if( g_run != 0 ) { 66 | g_cap >> frame; 67 | if(frame.empty()) break; 68 | int current_pos = (int)g_cap.get( CV_CAP_PROP_POS_FRAMES ); 69 | g_dontset = 1; 70 | 71 | cv::setTrackbarPos("Position", "Example 2-4", current_pos); 72 | cv::imshow( "Example 2-4", frame ); 73 | g_run-=1; 74 | } 75 | 76 | char c = (char) cv::waitKey(10); 77 | 78 | if( c == 's' ) { // single step 79 | g_run = 1; 80 | cout << "Single step, run = " << g_run << endl; 81 | } 82 | 83 | if( c == 'r' ) { // run mode 84 | g_run = -1; 85 | cout << "Run mode, run = " << g_run < 4 | 5 | 6 | 7 | void help(char** argv ) { 8 | std::cout << "\n" 9 | << "2-05: load and smooth an image before displaying \n" 10 | << argv[0] <<" \n" 11 | << "For example:\n" 12 | << argv[0] << " ../tree.avi\n" 13 | << std::endl; 14 | } 15 | 16 | 17 | int main( int argc, char** argv ) { 18 | 19 | if (argc != 2) { 20 | help(argv); 21 | return 0; 22 | } 23 | 24 | // Load an image specified on the command line. 25 | // 26 | cv::Mat image = cv::imread(argv[1],-1); 27 | 28 | // Create some windows to show the input 29 | // and output images in. 30 | // 31 | cv::namedWindow( "Example 2-5-in", cv::WINDOW_AUTOSIZE ); 32 | cv::namedWindow( "Example 2-5-out", cv::WINDOW_AUTOSIZE ); 33 | 34 | // Create a window to show our input image 35 | // 36 | cv::imshow( "Example 2-5-in", image ); 37 | 38 | // Create an image to hold the smoothed output 39 | // 40 | cv::Mat out; 41 | 42 | // Do the smoothing 43 | // ( Note: Could use GaussianBlur(), blur(), medianBlur() or 44 | // bilateralFilter(). ) 45 | // 46 | cv::GaussianBlur( image, out, cv::Size(5,5), 3, 3); 47 | cv::GaussianBlur( out, out, cv::Size(5,5), 3, 3); 48 | 49 | // Show the smoothed image in the output window 50 | // 51 | cv::imshow( "Example 2-5-out", out ); 52 | 53 | // Wait for the user to hit a key, windows will self destruct 54 | // 55 | cv::waitKey( 0 ); 56 | 57 | } 58 | -------------------------------------------------------------------------------- /example_02-06.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-6. Using cv::pyrDown() to create a new image that is half the width and 2 | // height of the input image 3 | #include 4 | 5 | void help(char** argv ) { 6 | std::cout << "\n" 7 | << "2-06: AUsing cv::pyrDown() to create a new image that is half the width and" 8 | << " height of the input image\n" 9 | << argv[0] <<" \n" 10 | << "For example:\n" 11 | << argv[0] << " ../faces.png\n" 12 | << std::endl; 13 | } 14 | 15 | 16 | int main( int argc, char** argv ) { 17 | 18 | if (argc != 2) { 19 | help(argv); 20 | return 0; 21 | } 22 | 23 | cv::Mat img1,img2; 24 | 25 | cv::namedWindow( "Example 2-6-in", cv::WINDOW_AUTOSIZE ); 26 | cv::namedWindow( "Example 2-6-out", cv::WINDOW_AUTOSIZE ); 27 | 28 | img1 = cv::imread( argv[1] ); 29 | 30 | cv::imshow( "Example 2-6-in", img1 ); 31 | cv::pyrDown( img1, img2); 32 | 33 | cv::imshow( "Example 2-6-out", img2 ); 34 | cv::waitKey(0); 35 | 36 | return 0; 37 | 38 | }; 39 | -------------------------------------------------------------------------------- /example_02-07.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-7. The Canny edge detector writes its output to a single-channel (grayscale) image 2 | // 2 3 | #include 4 | 5 | void help(char** argv ) { 6 | std::cout << "\n" 7 | << "\nExample 2-7. The Canny edge detector writes its output to a single-channel (grayscale) image" 8 | << "\nCall:\n" 9 | << argv[0] <<" \n" 10 | << "For example:\n" 11 | << argv[0] << " ../fruits.jpg\n" 12 | << std::endl; 13 | } 14 | 15 | 16 | int main( int argc, char** argv ) { 17 | 18 | if (argc != 2) { 19 | help(argv); 20 | return 0; 21 | } 22 | 23 | cv::Mat img_rgb, img_gry, img_cny; 24 | 25 | cv::namedWindow( "Example Gray", cv::WINDOW_AUTOSIZE ); 26 | cv::namedWindow( "Example Canny", cv::WINDOW_AUTOSIZE ); 27 | 28 | img_rgb = cv::imread( argv[1] ); 29 | 30 | cv::cvtColor( img_rgb, img_gry, cv::COLOR_BGR2GRAY); 31 | cv::imshow( "Example Gray", img_gry ); 32 | 33 | cv::Canny( img_gry, img_cny, 10, 100, 3, true ); 34 | cv::imshow( "Example Canny", img_cny ); 35 | 36 | cv::waitKey(0); 37 | 38 | } 39 | -------------------------------------------------------------------------------- /example_02-08.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-8. Combining the pyramid down operator (twice) and the Canny 2 | // subroutine in a simple image pipeline 3 | //2 4 | #include 5 | 6 | void help(char** argv ) { 7 | std::cout << "\n" 8 | << "\nExample 2-8. Combining the pyramid down operator (twice) and the Canny" 9 | << "\n subroutine in a simple image pipeline" 10 | << "\nCall:\n" 11 | << argv[0] <<" \n" 12 | << "For example:\n" 13 | << argv[0] << " ../fruits.jpg\n" 14 | << std::endl; 15 | } 16 | 17 | 18 | int main( int argc, char** argv ) { 19 | 20 | if (argc != 2) { 21 | help(argv); 22 | return 0; 23 | } 24 | 25 | cv::Mat img_rgb, img_gry, img_cny, img_pyr, img_pyr2; 26 | 27 | cv::namedWindow( "Example Gray", cv::WINDOW_AUTOSIZE ); 28 | cv::namedWindow( "Example Canny", cv::WINDOW_AUTOSIZE ); 29 | 30 | img_rgb = cv::imread( argv[1] ); 31 | 32 | cv::cvtColor( img_rgb, img_gry, cv::COLOR_BGR2GRAY); 33 | 34 | cv::pyrDown( img_gry, img_pyr ); 35 | cv::pyrDown( img_pyr, img_pyr2 ); 36 | 37 | cv::Canny( img_pyr2, img_cny, 10, 100, 3, true ); 38 | 39 | cv::imshow( "Example Gray", img_gry ); 40 | 41 | cv::imshow( "Example Canny", img_cny ); 42 | 43 | cv::waitKey(0); 44 | 45 | } 46 | -------------------------------------------------------------------------------- /example_02-09.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-9. Getting and setting pixels in Example 2-8 2 | 3 | #include 4 | 5 | void help(char** argv ) { 6 | std::cout << "\n" 7 | << "\nExample 2-9. Getting and setting pixels in Example 2-8" 8 | << "\nCall:\n" 9 | << argv[0] <<" \n" 10 | << "For example:\n" 11 | << argv[0] << " ../fruits.jpg\n" 12 | << std::endl; 13 | } 14 | 15 | 16 | int main( int argc, char** argv ) { 17 | 18 | if (argc != 2) { 19 | help(argv); 20 | return 0; 21 | } 22 | 23 | 24 | cv::Mat img_rgb, img_gry, img_cny, img_pyr, img_pyr2; 25 | 26 | cv::namedWindow( "Example Gray", cv::WINDOW_AUTOSIZE ); 27 | cv::namedWindow( "Example Canny", cv::WINDOW_AUTOSIZE ); 28 | 29 | img_rgb = cv::imread( argv[1] ); 30 | 31 | cv::cvtColor( img_rgb, img_gry, cv::COLOR_BGR2GRAY); 32 | 33 | cv::pyrDown( img_gry, img_pyr ); 34 | cv::pyrDown( img_pyr, img_pyr2 ); 35 | 36 | cv::Canny( img_pyr2, img_cny, 10, 100, 3, true ); 37 | 38 | // ---------------------------------------------------- 39 | // Start new code for example 2-9 40 | // 41 | 42 | int x = 16, y = 32; 43 | cv::Vec3b intensity = img_rgb.at< cv::Vec3b >(y, x); 44 | 45 | // ( Note: We could write img_rgb.at< cv::Vec3b >(x,y)[0] ) 46 | // 47 | uchar blue = intensity[0]; 48 | uchar green = intensity[1]; 49 | uchar red = intensity[2]; 50 | 51 | std::cout << "At (x,y) = (" << x << ", " << y << 52 | "): (blue, green, red) = (" << 53 | (unsigned int) blue << 54 | ", " << (unsigned int)green << ", " << 55 | (unsigned int) red << ")" << std::endl; 56 | 57 | std::cout << "Gray pixel there is: " << 58 | (unsigned int) img_gry.at(y, x) << std::endl; 59 | 60 | x /= 4; y /= 4; 61 | 62 | std::cout << "Pyramid2 pixel there is: " << 63 | (unsigned int)img_pyr2.at(y, x) << std::endl; 64 | 65 | img_cny.at(x, y) = 128; // Set the Canny pixel there to 128 66 | 67 | // 68 | // End new code for example 2-9 69 | // ---------------------------------------------------- 70 | 71 | cv::imshow( "Example Gray", img_gry ); 72 | cv::imshow( "Example Canny", img_cny ); 73 | 74 | cv::waitKey(0); 75 | 76 | } 77 | -------------------------------------------------------------------------------- /example_02-10.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-10. The same object can load videos from a camera or a file 2 | // 3 | #include 4 | #include 5 | 6 | void help(char** argv ) { 7 | std::cout << "\n" 8 | << "\nxample 2-10. The same object can load videos from a camera or a file" 9 | << "\nCall:\n" 10 | << argv[0] <<" [path/image]\n" 11 | << "\nor, read from camera:\n" 12 | << argv[0] 13 | << "\nFor example:\n" 14 | << argv[0] << " ../tree.avi\n" 15 | << std::endl; 16 | } 17 | 18 | 19 | int main( int argc, char** argv ) { 20 | 21 | help(argv); 22 | 23 | 24 | 25 | cv::namedWindow( "Example 2-10", cv::WINDOW_AUTOSIZE ); 26 | cv::VideoCapture cap; 27 | 28 | if (argc==1) { 29 | cap.open(0); // open the first camera 30 | } else { 31 | cap.open(argv[1]); 32 | } 33 | 34 | if( !cap.isOpened() ) { // check if we succeeded 35 | std::cerr << "Couldn't open capture." << std::endl; 36 | return -1; 37 | } 38 | 39 | cv::Mat frame; 40 | 41 | for(;;) { 42 | 43 | cap >> frame; 44 | 45 | if( frame.empty() ) break; // Ran out of film 46 | 47 | cv::imshow( "Example 2-10", frame ); 48 | 49 | if( (char) cv::waitKey(33) >= 0 ) break; 50 | 51 | } 52 | 53 | return 0; 54 | 55 | } 56 | -------------------------------------------------------------------------------- /example_02-11.cpp: -------------------------------------------------------------------------------- 1 | // Example 2-11. A complete program to read in a color video and write out the log-polar- 2 | // transformed video 3 | 4 | #include 5 | #include 6 | 7 | 8 | void help(char** argv ) { 9 | std::cout << "\n" 10 | << "Read in a video, write out a log polar of it\n" 11 | << argv[0] <<" \n" 12 | << "For example:\n" 13 | << argv[0] << " ../tree.avi ../vout.avi\n" 14 | << "\nThen read it with:\n ./example_02-10 ../vout.avi\n" 15 | << std::endl; 16 | } 17 | 18 | 19 | int main( int argc, char** argv ) { 20 | 21 | if (argc != 3) { 22 | help(argv); 23 | return 0; 24 | } 25 | 26 | 27 | cv::namedWindow( "Example 2-11", cv::WINDOW_AUTOSIZE ); 28 | cv::namedWindow( "Log_Polar", cv::WINDOW_AUTOSIZE ); 29 | 30 | // ( Note: could capture from a camera by giving a camera id as an int.) 31 | // 32 | 33 | cv::VideoCapture capture( argv[1] ); 34 | double fps = capture.get( CV_CAP_PROP_FPS ); 35 | cv::Size size( 36 | (int)capture.get( CV_CAP_PROP_FRAME_WIDTH ), 37 | (int)capture.get( CV_CAP_PROP_FRAME_HEIGHT ) 38 | ); 39 | 40 | cv::VideoWriter writer; 41 | writer.open( argv[2], CV_FOURCC('M','J','P','G'), fps, size ); 42 | 43 | cv::Mat logpolar_frame, bgr_frame; 44 | 45 | for(;;) { 46 | 47 | capture >> bgr_frame; 48 | if( bgr_frame.empty() ) break; // end if done 49 | 50 | cv::imshow( "Example 2-11", bgr_frame ); 51 | 52 | cv::logPolar( 53 | bgr_frame, // Input color frame 54 | logpolar_frame, // Output log-polar frame 55 | cv::Point2f( // Centerpoint for log-polar transformation 56 | bgr_frame.cols/2, // x 57 | bgr_frame.rows/2 // y 58 | ), 59 | 40, // Magnitude (scale parameter) 60 | CV_WARP_FILL_OUTLIERS // Fill outliers with 'zero' 61 | ); 62 | 63 | cv::imshow( "Log_Polar", logpolar_frame ); 64 | 65 | writer << logpolar_frame; 66 | 67 | char c = cv::waitKey(10); 68 | 69 | if( c == 27 ) break; // allow the user to break out 70 | 71 | } 72 | 73 | writer.release(); 74 | capture.release(); 75 | 76 | } 77 | -------------------------------------------------------------------------------- /example_04-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 4-1. Summation of a multidimensional array, done plane by plane 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | // Summation of a multidimensional array, done plane by plane 8 | // 9 | int main( int argc, char** argv ) { 10 | 11 | cout << "\nExample 4-1. Summation of a multidimensional array, done plane by plane" 12 | << "\nCall:\n" 13 | << argv[0] << endl; 14 | 15 | const int n_mat_size = 5; 16 | const int n_mat_sz[] = { n_mat_size, n_mat_size, n_mat_size }; 17 | cv::Mat n_mat( 3, n_mat_sz, CV_32FC1 ); 18 | 19 | cv::RNG rng; 20 | rng.fill( n_mat, cv::RNG::UNIFORM, 0.f, 1.f ); 21 | 22 | const cv::Mat* arrays[] = { &n_mat, 0 }; 23 | cv::Mat my_planes[1]; 24 | cv::NAryMatIterator it( arrays, my_planes ); 25 | 26 | // On each iteration, it.planes[i] will be the current plane of the 27 | // i-th array from 'arrays'. 28 | // 29 | float s = 0.f; // Total sum over all planes 30 | int n = 0; // Total number of planes 31 | for (int p = 0; p < it.nplanes; p++, ++it) { 32 | s += cv::sum(it.planes[0])[0]; 33 | n++; 34 | } 35 | 36 | cout <<"Total across entire volume: " < 3 | #include 4 | 5 | using namespace std; 6 | 7 | // Summation of a multidimensional array, done plane by plane 8 | // 9 | int main( int argc, char** argv ) { 10 | 11 | cout << "\nExample 4-2. Summation of two arrays using the N-ary operator" 12 | << "\nCall:\n" 13 | << argv[0] << endl; 14 | 15 | const int n_mat_size = 5; 16 | const int n_mat_sz[] = { n_mat_size, n_mat_size, n_mat_size }; 17 | 18 | cv::Mat n_mat0( 3, n_mat_sz, CV_32FC1 ); 19 | cv::Mat n_mat1( 3, n_mat_sz, CV_32FC1 ); 20 | 21 | cv::RNG rng; 22 | rng.fill( n_mat0, cv::RNG::UNIFORM, 0.f, 1.f ); 23 | rng.fill( n_mat1, cv::RNG::UNIFORM, 0.f, 1.f ); 24 | 25 | const cv::Mat* arrays[] = { &n_mat0, &n_mat1, 0 }; 26 | 27 | cv::Mat my_planes[2]; 28 | cv::NAryMatIterator it( arrays, my_planes ); 29 | 30 | float s = 0.f; // Total sum over all planes in both arrays 31 | int n = 0; // Total number of planes 32 | for( int p = 0; p < it.nplanes; p++, ++it ) { 33 | s += cv::sum(it.planes[0])[0]; 34 | s += cv::sum(it.planes[1])[0]; 35 | n++; 36 | } 37 | 38 | cout <<"Total across both volumes: " < 4 | #include 5 | 6 | // Printing all of the nonzero elements of a sparse array 7 | // 8 | using namespace std; 9 | 10 | // Summation of a multidimensional array, done plane by plane 11 | // 12 | int main( int argc, char** argv ) { 13 | 14 | cout << "\nExample 4-3. Printing all of the nonzero elements of a sparse array" 15 | << "\nCall:\n" << argv[0] << endl; 16 | 17 | // Create a 10x10 sparse matrix with a few nonzero elements 18 | // 19 | int size[] = {10,10}; 20 | cv::SparseMat sm( 2, size, CV_32F ); 21 | for( int i=0; i<10; i++ ) { // Fill the array 22 | int idx[2]; 23 | idx[0] = size[0] * rand(); 24 | idx[1] = size[1] * rand(); 25 | sm.ref( idx ) += 1.0f; 26 | } 27 | 28 | // Print out the nonzero elements 29 | // 30 | cv::SparseMatConstIterator_ it = sm.begin(); 31 | cv::SparseMatConstIterator_ it_end = sm.end(); 32 | 33 | for(; it != it_end; ++it) { 34 | const cv::SparseMat::Node* node = it.node(); 35 | printf(" (%3d,%3d) %f\n", node->idx[0], node->idx[1], *it ); 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /example_04-04.cpp: -------------------------------------------------------------------------------- 1 | //Example 4-4. A better way to print a matrix 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | // A better way to print a sparse matrix 8 | // 9 | template void print_matrix( const cv::SparseMat_* sm ) { 10 | cv::SparseMatConstIterator_ it = sm->begin(); 11 | cv::SparseMatConstIterator_ it_end = sm->end(); 12 | 13 | for(; it != it_end; ++it) { 14 | const typename cv::SparseMat_::Node* node = it.node(); 15 | cout <<"( " <idx[0] <<", " <idx[1] 16 | <<" ) = " <<*it < sm( ndim, size ); 26 | 27 | // Create a sparse matrix with a few nonzero elements 28 | // 29 | for( int i=0; i<4; i++ ) { // Fill the array 30 | int idx[2]; 31 | idx[0] = size[0] * rand(); 32 | idx[1] = size[1] * rand(); 33 | sm.ref( idx ) += 1.0f; 34 | } 35 | 36 | print_matrix( &sm ); 37 | } 38 | 39 | void calling_function2( void ) { 40 | 41 | int ndim = 2; 42 | int size[] = {4,4}; 43 | 44 | cv::SparseMat sm( ndim, size, CV_32F ); 45 | 46 | // Create a sparse matrix with a few nonzero elements 47 | // 48 | for( int i=0; i<4; i++ ) { // Fill the array 49 | int idx[2]; 50 | idx[0] = size[0] * rand(); 51 | idx[1] = size[1] * rand(); 52 | sm.ref( idx ) += 1.0f; 53 | } 54 | 55 | print_matrix( (cv::SparseMat_*) &sm ); 56 | 57 | } 58 | 59 | void help(char** argv) { 60 | cout << "\nExample 4-4, a better way to print out a sparse matrix" 61 | << "\n Demonstrates printing of two different sparse matrices" 62 | << "\nCall:" 63 | << argv[0] 64 | << endl; 65 | } 66 | 67 | int main( int argc, char** argv ) { 68 | help(argv); 69 | cout <<"Case 1:" < < 3 | // 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using namespace cv; 10 | using namespace std; 11 | 12 | void help(const char **argv) { 13 | cout << "\n\n" 14 | << "This program alpha blends the first image onto the other \n" 15 | << "alpha is the blending of the first image and beta onto the second\n" 16 | << "Call:\n" 17 | << argv[0] << " \n\n" 18 | << "Example:\n" 19 | << " ./example_05-01 ../faceTemplate.jpg ../faces.png 230 155 0.8 0.2\n" 20 | << endl; 21 | } 22 | 23 | 24 | 25 | int main( int argc, const char** argv ) 26 | { 27 | help(argv); 28 | if(argc != 7) { 29 | cout << "ERROR: Wrong # of parameters (7), you had " << argc << "\n" << endl; 30 | return -1; 31 | } 32 | 33 | 34 | // Using the first two arguments, open up the image to be copied onto 35 | // (src1), and the image that will be copied from (src2). 36 | // 37 | cv::Mat src1 = cv::imread(argv[1],1); 38 | cv::Mat src2 = cv::imread(argv[2],1); 39 | 40 | int from_w = src1.size().width; 41 | int from_h = src1.size().height; 42 | int to_w = src2.size().width; 43 | int to_h = src2.size().height; 44 | 45 | 46 | if( argc==7 && !src1.empty() && !src2.empty() ) { 47 | 48 | // Four more arguments tell where in src1 to paste the chunk taken from 49 | // src2. Note that the width and height also specify what portion of 50 | // src2 to actually use. 51 | // 52 | int x = atoi(argv[3]); 53 | int y = atoi(argv[4]); 54 | 55 | // Make sure we don't exceed bounds: 56 | if((x < 0) || (y < 0) || (x > to_w - 1) || (y > to_h - 1) || (x+from_w > to_w - 1) || (y+from_h > to_h)) { 57 | cout << "\nError, at (x,y) (" << x << ", " << y <<"), your input image [w,h] [" << from_w << ", " 58 | << from_h << "] doesn't fit within the blend to image [w,h] [" << to_w << ", " << to_h <<"]\n" << endl; 59 | return -1; 60 | } 61 | 62 | // Two more arguments set the blending coefficients. 63 | // 64 | double alpha = (double)atof(argv[5]); 65 | double beta = (double)atof(argv[6]); 66 | 67 | cv::Mat roi1( src1, cv::Rect(0,0,from_w - 1,from_h - 1) ); //Just take the whole thing 68 | cv::Mat roi2( src2, cv::Rect(x,y,from_w - 1, from_h - 1) ); 69 | 70 | // Blend together the image src2 onto the image src1 71 | // at the specified location. 72 | // 73 | cv::addWeighted( roi1, alpha, roi2, beta, 0.0, roi2 ); 74 | 75 | // Create a window to shoow the result and show it. 76 | // 77 | cv::namedWindow( "Alpha Blend", 1 ); 78 | cv::imshow( "Alpha Blend", src2 ); 79 | 80 | // Leave the window up and runnnig until the user hits a key 81 | // 82 | cv::waitKey( 0 ); 83 | 84 | } 85 | 86 | return 0; 87 | 88 | } 89 | -------------------------------------------------------------------------------- /example_07-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 7-1. Using the default random number generator to generate a pair of integers 2 | // and a pair of floating-point numbers 3 | 4 | //#include 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | 10 | int main(int argc, char** argv) { 11 | cout << "\nExample 7-1. Using the default random number generator" 12 | << "\nto generate a pair of integers and a pair of" 13 | << "\n floating-point numbers" 14 | << "\n\nCall:\n" << argv[0] << "\n" << endl; 15 | 16 | cv::RNG rng = cv::theRNG(); 17 | cout << "An integer: " << (int)rng << endl; 18 | cout << "Another integer: " << int(rng) << endl; 19 | cout << "A float: " << (float)rng << endl; 20 | cout << "Another float: " << float(rng) << endl; 21 | 22 | return 0; 23 | 24 | } 25 | -------------------------------------------------------------------------------- /example_08-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 8-1. Unpacking a four-character code to identify a video codec 2 | // 3 | //#include 4 | #include 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | void help(char** argv ) { 10 | cout << "\n" 11 | << "Example 8-1. Unpacking a four-character code to identify a video codec" 12 | << "\nCall:\n" 13 | << argv[0] <<" \n" 14 | << "For example:\n" 15 | << argv[0] << " ../tree.avi\n" 16 | << endl; 17 | } 18 | 19 | 20 | int main( int argc, char** argv ) { 21 | 22 | if (argc != 2) { 23 | help(argv); 24 | return 0; 25 | } 26 | 27 | 28 | cv::VideoCapture cap( argv[1] ); 29 | 30 | unsigned f = (unsigned)cap.get( cv::CAP_PROP_FOURCC ); 31 | 32 | char fourcc[] = { 33 | (char) f, // First character is lowest bits 34 | (char)(f >> 8), // Next character is bits 8-15 35 | (char)(f >> 16), // Next character is bits 16-23 36 | (char)(f >> 24), // Last character is bits 24-31 37 | '\0' // and don't forget to terminate 38 | }; 39 | 40 | cout <<"FourCC for this video was: " < 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | 9 | int main(int argc, char** argv) { 10 | 11 | cout << "\nExample 8-2. Using cv::FileStorage to create a .yml data file\n" 12 | << argv[0] 13 | << "\n\n output: test.yml\n\n" << endl; 14 | 15 | cv::FileStorage fs("test.yml", cv::FileStorage::WRITE); 16 | 17 | fs << "frameCount" << 5; 18 | 19 | time_t rawtime; time(&rawtime); 20 | 21 | fs << "calibrationDate" << asctime(localtime(&rawtime)); 22 | 23 | cv::Mat cameraMatrix = ( 24 | cv::Mat_(3,3) << 1000, 0, 320, 0, 1000, 240, 0, 0, 1 25 | ); 26 | cv::Mat distCoeffs = ( 27 | cv::Mat_(5,1) << 0.1, 0.01, -0.001, 0, 0 28 | ); 29 | 30 | fs << "cameraMatrix" << cameraMatrix << "distCoeffs" << distCoeffs; 31 | 32 | fs << "features" << "["; 33 | for( int i = 0; i < 3; i++ ) { 34 | int x = rand() % 640; 35 | int y = rand() % 480; 36 | uchar lbp = rand() % 256; 37 | fs << "{:" << "x" << x << "y" << y << "lbp" << "[:"; 38 | for( int j = 0; j < 8; j++ ) 39 | fs << ((lbp >> j) & 1); 40 | fs << "]" << "}"; 41 | } 42 | fs << "]"; 43 | 44 | fs.release(); 45 | 46 | return 0; 47 | 48 | } 49 | -------------------------------------------------------------------------------- /example_08-03.cpp: -------------------------------------------------------------------------------- 1 | // Example 8-3. Using cv::FileStorage to read a .yml file 2 | // 3 | #include 4 | 5 | using namespace std; 6 | 7 | int main(int argc, char** argv) { 8 | cout << "\nExample 8-3. Using cv::FileStorage to read a .yml file" 9 | << "\nCall:\n" 10 | << argv[0] << endl; 11 | 12 | cv::FileStorage fs2("test.yml", cv::FileStorage::READ); 13 | 14 | // first method: use (type) operator on FileNode. 15 | // 16 | int frameCount = (int)fs2["frameCount"]; 17 | 18 | // second method: use cv::FileNode::operator >> 19 | // 20 | std::string date; 21 | fs2["calibrationDate"] >> date; 22 | 23 | cv::Mat cameraMatrix2, distCoeffs2; 24 | fs2["cameraMatrix"] >> cameraMatrix2; 25 | fs2["distCoeffs"] >> distCoeffs2; 26 | 27 | cout << "frameCount: " << frameCount << endl 28 | << "calibration date: " << date << endl 29 | << "camera matrix: " << cameraMatrix2 << endl 30 | << "distortion coeffs: " << distCoeffs2 << endl; 31 | 32 | cv::FileNode features = fs2["features"]; 33 | cv::FileNodeIterator it = features.begin(), it_end = features.end(); 34 | int idx = 0; 35 | std::vector lbpval; 36 | 37 | // iterate through a sequence using FileNodeIterator 38 | for( ; it != it_end; ++it, idx++ ) { 39 | 40 | cout << "feature #" << idx << ": "; 41 | cout << "x=" << (int)(*it)["x"] 42 | << ", y=" << (int)(*it)["y"] 43 | << ", lbp: ("; 44 | 45 | // ( Note: easily read numerical arrays using FileNode >> std::vector. ) 46 | // 47 | (*it)["lbp"] >> lbpval; 48 | for( int i = 0; i < (int)lbpval.size(); i++ ) 49 | cout << " " << (int)lbpval[i]; 50 | cout << ")" << endl; 51 | 52 | } 53 | 54 | fs2.release(); 55 | 56 | } 57 | -------------------------------------------------------------------------------- /example_09-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 9-1. Creating a window and displaying an image in that window 2 | // 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | 9 | void help(char** argv ) { 10 | cout << "\n" 11 | << "Create a window and display an image\nCall:\n" 12 | << argv[0] <<" image\n" 13 | << endl; 14 | } 15 | 16 | int main(int argc, char** argv) { 17 | // Document the interface 18 | help(argv); 19 | if(argc != 2) { cout << "You need to supply an image path/name" << endl;; exit(0);} 20 | 21 | // Create a named window with the name of the file 22 | // 23 | cv::namedWindow( argv[1], 1 ); 24 | 25 | // Load the image from the given filename 26 | // 27 | cv::Mat img = cv::imread( argv[1] ); 28 | 29 | // Show the image in the named window 30 | // 31 | cv::imshow( argv[1], img ); 32 | 33 | // Idle until the user hits the Esc key 34 | // 35 | cv::waitKey(); //Any key will end the program 36 | 37 | // Clean up and don't be piggies 38 | // 39 | cv::destroyWindow( argv[1] ); 40 | exit(0); 41 | } 42 | -------------------------------------------------------------------------------- /example_09-02.cpp: -------------------------------------------------------------------------------- 1 | // Example 9-2. Toy program for using a mouse to draw boxes on the screen 2 | #include 3 | 4 | // Define our callback which we will install for 5 | // mouse events 6 | // 7 | void my_mouse_callback( 8 | 9 | int event, int x, int y, int flags, void* param 10 | ); 11 | cv::Rect box; 12 | bool drawing_box = false; 13 | 14 | 15 | // A little subroutine to draw a box onto an image 16 | // 17 | void draw_box( cv::Mat& img, cv::Rect box ) { 18 | cv::rectangle( 19 | img, 20 | box.tl(), 21 | box.br(), 22 | 23 | cv::Scalar(0x00,0x00,0xff) /* red */ 24 | ); 25 | } 26 | void help(char** argv) { 27 | std::cout << "Example 9-2. Toy program for using a mouse to draw boxes on the screen" 28 | << "\nCall:\n" << argv[0] << 29 | "\n\nshows how to use a mouse to draw regions in an image. Esc to quit\n" << std::endl; 30 | } 31 | int main( int argc, char** argv ) { 32 | help(argv); 33 | box = cv::Rect(-1,-1,0,0); 34 | cv::Mat image(200, 200, CV_8UC3), temp; 35 | image.copyTo(temp); 36 | box = cv::Rect(-1,-1,0,0); 37 | image = cv::Scalar::all(0); 38 | cv::namedWindow( "Box Example" ); 39 | // Here is the crucial moment where we actually install 40 | // the callback. Note that we set the value of 'params' to 41 | // be the image we are working with so that the callback 42 | // will have the image to edit. 43 | // 44 | cv::setMouseCallback( 45 | "Box Example", 46 | my_mouse_callback, 47 | (void*)&image 48 | ); 49 | // The main program loop. Here we copy the working image 50 | // to the temp image, and if the user is drawing, then 51 | // put the currently contemplated box onto that temp image. 52 | // Display the temp image, and wait 15ms for a keystroke, 53 | // then repeat. 54 | // 55 | for(;;) { 56 | image.copyTo(temp); 57 | if( drawing_box ) draw_box( temp, box ); 58 | cv::imshow( "Box Example", temp ); 59 | if( cv::waitKey( 15 ) == 27 ) break; 60 | } 61 | return 0; 62 | } 63 | 64 | // This is our mouse callback. If the user 65 | // presses the left button, we start a box. 66 | // When the user releases that button, then we 67 | // add the box to the current image. When the 68 | // mouse is dragged (with the button down) we 69 | // resize the box. 70 | // 71 | void my_mouse_callback( 72 | int event, int x, int y, int flags, void* param) 73 | { 74 | cv::Mat& image = *(cv::Mat*) param; 75 | switch( event ) { 76 | case cv::EVENT_MOUSEMOVE: { 77 | if( drawing_box ) { 78 | box.width = x-box.x; 79 | box.height = y-box.y; 80 | } 81 | } 82 | break; 83 | case cv::EVENT_LBUTTONDOWN: { 84 | drawing_box = true; 85 | box = cv::Rect( x, y, 0, 0 ); 86 | } 87 | break; 88 | case cv::EVENT_LBUTTONUP: { 89 | drawing_box = false; 90 | if( box.width < 0 ) { 91 | 92 | box.x += box.width; 93 | box.width *= -1; 94 | } 95 | if( box.height < 0 ) { 96 | box.y += box.height; 97 | box.height *= -1; 98 | } 99 | draw_box( image, box ); 100 | } 101 | break; 102 | } 103 | 104 | } 105 | -------------------------------------------------------------------------------- /example_09-03.cpp: -------------------------------------------------------------------------------- 1 | //Example 9-3. Using a trackbar to create a “switch” that the user can turn on and off; 2 | //this program plays a video and uses the switch to create a pause functionality 3 | // 4 | // An example program in which the user can draw boxes on the screen. 5 | // 6 | #include 7 | #include 8 | using namespace std; 9 | // 10 | // Using a trackbar to create a "switch" that the user can turn on and off. 11 | // We make this value global so everyone can see it. 12 | // 13 | int g_switch_value = 1; 14 | void switch_off_function() { cout << "Pause\n"; }; //YOU COULD DO MORE 15 | void switch_on_function() { cout << "Run\n"; }; 16 | 17 | // This will be the callback that we give to the trackbar. 18 | // 19 | void switch_callback( int position, void* ) { 20 | if( position == 0 ) { 21 | switch_off_function(); 22 | } else { 23 | switch_on_function(); 24 | } 25 | } 26 | 27 | void help(char ** argv) { 28 | cout << "Example 9-3. Using a trackbar to create a “switch” that the user can turn on and off" 29 | << "\n this program plays a video and uses the switch to create a pause functionality." 30 | << "\n\nCall:\n" << argv[0] << " " 31 | << "\n\nShows putting a pause button in a video; Esc to quit\n" << endl; 32 | } 33 | 34 | int main( int argc, char** argv ) { 35 | cv::Mat frame; // To hold movie images 36 | cv::VideoCapture g_capture; 37 | help(argv); 38 | if( argc < 2 || !g_capture.open( argv[1] ) ){ 39 | cout << "Failed to open " << argv[1] << " video file\n" << endl; 40 | return -1; 41 | } 42 | 43 | // Name the main window 44 | // 45 | cv::namedWindow( "Example", 1 ); 46 | 47 | // Create the trackbar. We give it a name, 48 | // and tell it the name of the parent window. 49 | // 50 | cv::createTrackbar( 51 | "Switch", 52 | "Example", 53 | &g_switch_value, 54 | 1, 55 | switch_callback 56 | ); 57 | 58 | // This will cause OpenCV to idle until 59 | // someone hits the Esc key. 60 | // 61 | for(;;) { 62 | if( g_switch_value ) { 63 | g_capture >> frame; 64 | if( frame.empty() ) break; 65 | cv::imshow( "Example", frame); 66 | } 67 | if( cv::waitKey(10)==27 ) break; //esc 68 | } 69 | return 0; 70 | 71 | } 72 | -------------------------------------------------------------------------------- /example_09-04.cpp: -------------------------------------------------------------------------------- 1 | //Example 9-4. Slightly modified code from the OpenCV documentation that draws a 2 | //cube every frame; this modified version uses the global variables rotx and roty that are 3 | //connected to the sliders in Figure 9-6 4 | // Note: This example needs OpenGL installed on your system. It doesn't build if 5 | // the OpenGL libraries cannot be found. 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | using namespace std; 14 | 15 | int rotx = 55, roty = 40; 16 | 17 | void on_opengl(void* param) { 18 | cv::ogl::Texture2D* backgroundTex = (cv::ogl::Texture2D*)param; 19 | glEnable( GL_TEXTURE_2D ); 20 | backgroundTex->bind(); 21 | cv::ogl::render(*backgroundTex); 22 | glDisable( GL_TEXTURE_2D ); 23 | 24 | glMatrixMode( GL_PROJECTION ); 25 | glLoadIdentity(); 26 | glMatrixMode( GL_MODELVIEW ); 27 | glLoadIdentity(); 28 | glTranslatef(0, 0, -0.5); 29 | glRotatef( rotx, 1, 0, 0 ); 30 | glRotatef( roty, 0, 1, 0 ); 31 | glRotatef( 0, 0, 0, 1 ); 32 | glEnable( GL_DEPTH_TEST ); 33 | glDepthFunc( GL_LESS ); 34 | static const int coords[6][4][3] = { 35 | { { +1, -1, -1 }, { -1, -1, -1 }, { -1, +1, -1 }, { +1, +1, -1 } }, 36 | { { +1, +1, -1 }, { -1, +1, -1 }, { -1, +1, +1 }, { +1, +1, +1 } }, 37 | { { +1, -1, +1 }, { +1, -1, -1 }, { +1, +1, -1 }, { +1, +1, +1 } }, 38 | { { -1, -1, -1 }, { -1, -1, +1 }, { -1, +1, +1 }, { -1, +1, -1 } }, 39 | { { +1, -1, +1 }, { -1, -1, +1 }, { -1, -1, -1 }, { +1, -1, -1 } }, 40 | { { -1, -1, +1 }, { +1, -1, +1 }, { +1, +1, +1 }, { -1, +1, +1 } } 41 | }; 42 | for (int i = 0; i < 6; ++i) { 43 | glColor3ub( i*20, 100+i*10, i*42 ); 44 | glBegin( GL_QUADS ); 45 | for (int j = 0; j < 4; ++j) { 46 | glVertex3d( 47 | 0.2 * coords[i][j][0], 48 | 0.2 * coords[i][j][1], 49 | 0.2 * coords[i][j][2] 50 | ); 51 | } 52 | glEnd(); 53 | } 54 | } 55 | 56 | void on_trackbar( int, void* ) { 57 | cv::updateWindow( "Example 9-4" ); 58 | } 59 | 60 | void help(char ** argv) { 61 | 62 | cout << "\n//Example 9-4. Slightly modified code from the OpenCV documentation that draws a" 63 | << "\n//cube every frame; this modified version uses the global variables rotx and roty that are" 64 | << "\n//connected to the sliders in Figure 9-6" 65 | << "\n// Note: This example needs OpenGL installed on your system. It doesn't build if" 66 | << "\n// the OpenGL libraries cannot be found.\n\/" 67 | << "\nCall: " << argv[0] << " \n\n" 68 | << "\nHere OpenGL is used to render a cube on top of an image.\n" 69 | << "\nUser can rotate the cube with the sliders\n" < 6 | #include 7 | #include 8 | #include 9 | 10 | using namespace std; 11 | 12 | int main( int argc, char* argv[] ) { 13 | cout << "\n\nExample 9-5. An example program ch4_qt.cpp, which takes a single argument" 14 | << "\nindicating a video file; that video file will be replayed inside of a Qt object" 15 | << "\nthat we will define, called QMoviePlayer" 16 | << "\nCall:\n" << argv[0] << " " 17 | << "\nExample:\n" << argv[0] << " ../tree.avi\n" << endl; 18 | if(argc != 2) 19 | return -1; 20 | 21 | QApplication app( argc, argv ); 22 | QMoviePlayer mp; 23 | mp.open( argv[1] ); 24 | mp.show(); 25 | return app.exec(); 26 | } 27 | -------------------------------------------------------------------------------- /example_09-06.cpp: -------------------------------------------------------------------------------- 1 | //Example 9-6. The QMoviePlayer object header file QMoviePlayer.hpp 2 | // 3 | #include "ui_QMoviePlayer.h" 4 | #include 5 | #include 6 | using namespace std; 7 | class QMoviePlayer : public QWidget { 8 | Q_OBJECT; 9 | public: 10 | QMoviePlayer( QWidget *parent = NULL ); 11 | virtual ~QMoviePlayer() {;} 12 | bool open( string file ); 13 | private: 14 | Ui::QMoviePlayer ui; 15 | cv::VideoCapture m_cap; 16 | QImage m_qt_img; 17 | cv::Mat m_cv_img; 18 | QTimer* m_timer; 19 | void paintEvent( QPaintEvent* q ); 20 | void _copyImage( void ); 21 | public slots: 22 | void nextFrame(); 23 | }; 24 | -------------------------------------------------------------------------------- /example_09-07.cpp: -------------------------------------------------------------------------------- 1 | //Example 9-7. The QMoviePlayer object source file: QMoviePlayer.cpp 2 | // 3 | #include "QMoviePlayer.hpp" 4 | #include 5 | #include 6 | QMoviePlayer::QMoviePlayer( QWidget *parent ) 7 | : QWidget( parent ) 8 | { 9 | ui.setupUi( this ); 10 | } 11 | -------------------------------------------------------------------------------- /example_09-08.cpp: -------------------------------------------------------------------------------- 1 | //Example 9-8. An example program which takes a single argument 2 | //indicating a video file; that video file will be replayed inside of a wxWidgets object that 3 | //we will define, called WxMoviePlayer 4 | // 5 | #include "wx/wx.h" 6 | #include "WxMoviePlayer.hpp" 7 | // Application class, the top level object in wxWidgets 8 | // 9 | class MyApp : public wxApp { 10 | public: 11 | virtual bool OnInit(); 12 | }; 13 | // Behind the scenes stuff to create a main() function and attach MyApp 14 | // 15 | DECLARE_APP( MyApp ); 16 | IMPLEMENT_APP( MyApp ); 17 | // When MyApp is initialized, do these things. 18 | // 19 | bool MyApp::OnInit() { 20 | wxFrame* frame = new wxFrame( NULL, wxID_ANY, wxT("ch4_wx") ); 21 | frame->Show( true ); 22 | WxMoviePlayer* mp = new WxMoviePlayer( 23 | frame, 24 | wxPoint( -1, -1 ), 25 | wxSize( 640, 480 ) 26 | ); 27 | mp->open( wxString(argv[1]) ); 28 | mp->Show( true ); 29 | return true; 30 | } 31 | -------------------------------------------------------------------------------- /example_09-09.cpp: -------------------------------------------------------------------------------- 1 | // Example 9-9. The WxMoviePlayer object header file WxMoviePlayer.hpp 2 | // 3 | #include "opencv2/opencv.hpp" 4 | #include "wx/wx.h" 5 | #include 6 | #define TIMER_ID 0 7 | using namespace std; 8 | class WxMoviePlayer : public wxWindow { 9 | public: 10 | WxMoviePlayer( 11 | wxWindow* parent, 12 | const wxPoint& pos, 13 | const wxSize& size 14 | ); 15 | virtual ~WxMoviePlayer() {}; 16 | bool open( wxString file ); 17 | private: 18 | cv::VideoCapture m_cap; 19 | cv::Mat m_cv_img; 20 | wxImage m_wx_img; 21 | wxBitmap m_wx_bmp; 22 | wxTimer* m_timer; 23 | wxWindow* m_parent; 24 | void _copyImage( void ); 25 | void OnPaint( wxPaintEvent& e ); 26 | void OnTimer( wxTimerEvent& e ); 27 | void OnKey( wxKeyEvent& e ); 28 | protected: 29 | DECLARE_EVENT_TABLE(); 30 | }; 31 | -------------------------------------------------------------------------------- /example_09-10.cpp: -------------------------------------------------------------------------------- 1 | //Example 9-10. The WxMoviePlayer object source file WxMoviePlayer.cpp 2 | // 3 | #include "WxMoviePlayer.hpp" 4 | BEGIN_EVENT_TABLE( WxMoviePlayer, wxWindow ) 5 | EVT_PAINT( WxMoviePlayer::OnPaint ) 6 | EVT_TIMER( TIMER_ID, WxMoviePlayer::OnTimer ) 7 | EVT_CHAR( WxMoviePlayer::OnKey ) 8 | END_EVENT_TABLE() 9 | 10 | // The first thing we do is to set up the callbacks that will be associated with individual 11 | // events. We do this through macros provided by the wxWidgets framework. 12 | WxMoviePlayer::WxMoviePlayer( 13 | wxWindow* parent, 14 | const wxPoint& pos, 15 | const wxSize& size 16 | ) : wxWindow( parent, -1, pos, size, wxSIMPLE_BORDER ) { 17 | m_timer = NULL; 18 | m_parent = parent; 19 | } 20 | 21 | //We will need to 22 | //know which frame is the parent when it comes time to close the application in 23 | //response to the Esc key. 24 | void WxMoviePlayer::OnPaint( wxPaintEvent& event ) { 25 | wxPaintDC dc( this ); 26 | if( !dc.Ok() ) return; 27 | int x,y,w,h; 28 | dc.BeginDrawing(); 29 | dc.GetClippingBox( &x, &y, &w, &h ); 30 | dc.DrawBitmap( m_wx_bmp, x, y ); 31 | dc.EndDrawing(); 32 | return; 33 | } 34 | 35 | //The WxMoviePlayer::_copyImage() method will get called whenever a new image is 36 | //read from the cv::VideoCapture object. 37 | void WxMoviePlayer::_copyImage( void ) { 38 | m_wx_bmp = wxBitmap( m_wx_img ); 39 | Refresh( FALSE ); // indicate that the object is dirty 40 | Update(); 41 | } 42 | 43 | //The WxMoviePlayer::open() method also does several important things. The first is 44 | //to actually open the cv::VideoCapture object, but there is a lot more to be done. 45 | //Next, an image is read off of the player and is used to create a wxImage object that 46 | //“points at” the OpenCV cv::Mat image. This is the opposite philosophy to the one 47 | //we used in the Qt example: in this case, it turns out to be a little more convenient to 48 | //create the cv::Mat first and have it own the data, and then to create the GUI toolkit’s 49 | //image object 50 | 51 | bool WxMoviePlayer::open( wxString file ) { 52 | 53 | if( !m_cap.open( std::string( file.mb_str() ) )) { 54 | return false; 55 | } 56 | 57 | // If we opened the file, set up everything now: 58 | // 59 | m_cap.read( m_cv_img ); 60 | m_wx_img = wxImage( 61 | m_cv_img.cols, 62 | m_cv_img.rows, 63 | m_cv_img.data, 64 | TRUE // static data, do not free on delete() 65 | ); 66 | _copyImage(); 67 | m_timer = new wxTimer( this, TIMER_ID ); 68 | m_timer->Start( 1000. / m_cap.get( cv::CAP_PROP_FPS ) ); 69 | return true; 70 | } 71 | 72 | //The following handler doesn’t do too much; primarily it just reads a new frame from the video, 73 | //converts that frame from BGR to RGB for display, and then calls our WxMovie 74 | //Player::_copyImage() , which makes the next bitmap for us. 75 | void WxMoviePlayer::OnTimer( wxTimerEvent& event ) { 76 | if( !m_cap.isOpened() ) return; 77 | m_cap.read( m_cv_img ); 78 | cv::cvtColor( m_cv_img, m_cv_img, cv::BGR2RGB ); 79 | _copyImage(); 80 | } 81 | 82 | //Handler for keypresses 83 | void WxMoviePlayer::OnKey( wxKeyEvent& e ) { 84 | if( e.GetKeyCode() == WXK_ESCAPE ) m_parent->Close(); 85 | } 86 | -------------------------------------------------------------------------------- /example_09-11.cpp: -------------------------------------------------------------------------------- 1 | // Example 9-11. An example header file for our custom View class 2 | // 3 | class COpenCVTestView : public CWindowImpl { 4 | public: 5 | DECLARE_WND_CLASS(NULL) 6 | bool OpenFile(std::string file); 7 | void _copyImage(); 8 | BOOL PreTranslateMessage(MSG* pMsg); 9 | BEGIN_MSG_MAP(COpenCVTestView) 10 | MESSAGE_HANDLER(WM_ERASEBKGND, OnEraseBkgnd) 11 | MESSAGE_HANDLER(WM_PAINT, OnPaint) 12 | MESSAGE_HANDLER(WM_TIMER, OnTimer) 13 | END_MSG_MAP() 14 | // Handler prototypes (uncomment arguments if needed): 15 | // LRESULT MessageHandler( 16 | // UINT /*uMsg*/, 17 | // WPARAM /*wParam*/, 18 | // LPARAM /*lParam*/, 19 | // BOOL& /*bHandled*/ 20 | // ); 21 | // LRESULT CommandHandler( 22 | // WORD /*wNotifyCode*/, 23 | // WORD /*wID*/, 24 | // HWND /*hWndCtl*/, 25 | // BOOL& /*bHandled*/ 26 | // ); 27 | // LRESULT NotifyHandler( 28 | // int /*idCtrl*/, 29 | // LPNMHDR /*pnmh*/, 30 | // BOOL& /*bHandled*/ 31 | // ); 32 | LRESULT OnPaint( 33 | UINT /*uMsg*/, 34 | WPARAM /*wParam*/, 35 | LPARAM /*lParam*/, 36 | BOOL& /*bHandled*/ 37 | ); 38 | LRESULT OnTimer( 39 | UINT /*uMsg*/, 40 | WPARAM /*wParam*/, 41 | LPARAM /*lParam*/, 42 | BOOL& /*bHandled*/ 43 | ); 44 | LRESULT OnEraseBkgnd( 45 | UINT /*uMsg*/, 46 | WPARAM /*wParam*/, 47 | LPARAM /*lParam*/, 48 | BOOL& /*bHandled*/ 49 | ); 50 | private: 51 | cv::VideoCapture m_cap; 52 | cv::Mat m_cv_img; 53 | RGBTRIPLE* m_bitmapBits; 54 | }; 55 | 56 | LRESULT CMainFrame::OnFileOpen( 57 | WORD /*wNotifyCode*/, 58 | WORD /*wID*/, 59 | HWND /*hWndCtl*/, 60 | BOOL& /*bHandled*/ 61 | ) { 62 | WTL::CFileDialog dlg(TRUE); 63 | if (IDOK == dlg.DoModal(m_hWnd)) { 64 | m_view.OpenFile(dlg.m_szFileName); 65 | } 66 | return 0; 67 | } 68 | 69 | bool COpenCVTestView::OpenFile(std::string file) { 70 | if( !m_cap.open( file ) ) return false; 71 | // If we opened the file, set up everything now: 72 | // 73 | m_cap.read( m_cv_img ); 74 | // could create a DIBSection here, but let's just allocate memory for raw bits 75 | // 76 | m_bitmapBits = new RGBTRIPLE[m_cv_img.cols * m_cv_img.rows]; 77 | _copyImage(); 78 | SetTimer(0, 1000.0f / m_cap.get( cv::CAP_PROP_FPS ) ); 79 | return true; 80 | } 81 | 82 | void COpenCVTestView::_copyImage() { 83 | // Copy the image data into the bitmap 84 | // 85 | cv::Mat cv_header_to_qt_image( 86 | cv::Size( 87 | m_cv_img.cols, 88 | m_cv_img.rows 89 | ), 90 | CV_8UC3, 91 | m_bitmapBits 92 | ); 93 | cv::cvtColor( m_cv_img, cv_header_to_qt_image, cv::BGR2RGB ); 94 | } 95 | 96 | LRESULT COpenCVTestView::OnPaint( 97 | UINT 98 | /* uMsg 99 | */, 100 | WPARAM /* wParam 101 | */, 102 | LPARAM /* lParam 103 | */, 104 | BOOL& /* bHandled */ 105 | ) { 106 | CPaintDC dc(m_hWnd); 107 | WTL::CRect rect; 108 | GetClientRect(&rect); 109 | if( m_cap.isOpened() ) { 110 | BITMAPINFO bmi = {0}; 111 | bmi.bmiHeader.biSize = sizeof(bmi.bmiHeader); 112 | bmi.bmiHeader.biCompression = BI_RGB; 113 | bmi.bmiHeader.biWidth 114 | = m_cv_img.cols; 115 | // note that bitmaps default to bottom-up, use negative height to 116 | // represent top-down 117 | // 118 | bmi.bmiHeader.biHeight = m_cv_img.rows * -1; 119 | bmi.bmiHeader.biPlanes = 1; 120 | bmi.bmiHeader.biBitCount = 24; 121 | dc.StretchDIBits( 122 | 0, 123 | rect.Width(), 124 | 0, 125 | bmi.bmiHeader.biWidth, 126 | m_bitmapBits, 127 | &bmi, 128 | DIB_RGB_COLORS, 129 | SRCCOPY 130 | // 32 if you use RGBQUADs for the bits 131 | 0, 132 | rect.Height(), 133 | 0, 134 | abs(bmi.bmiHeader.biHeight), 135 | Working with Windows 136 | | 137 | 245); 138 | } else { 139 | dc.FillRect(rect, COLOR_WINDOW); 140 | } 141 | return 0; 142 | } 143 | 144 | RESULT COpenCVTestView::OnTimer( 145 | UINT 146 | /* uMsg 147 | */, 148 | WPARAM /* wParam 149 | */, 150 | LPARAM /* lParam 151 | */, 152 | BOOL& /* bHandled */ 153 | ) { 154 | // Nothing to do if capture object is not open 155 | // 156 | if( !m_cap.isOpened() ) return 0; 157 | m_cap.read( m_cv_img ); 158 | _copyImage(); 159 | Invalidate(); 160 | return 0; 161 | } 162 | 163 | LRESULT COpenCVTestView::OnEraseBkgnd( 164 | UINT 165 | /* uMsg 166 | */, 167 | WPARAM /* wParam 168 | */, 169 | LPARAM /* lParam 170 | */, 171 | BOOL& /* bHandled */ 172 | ) { 173 | // since we completely paint our window in the OnPaint handler, use 174 | // an empty background handler 175 | return 0; 176 | } 177 | -------------------------------------------------------------------------------- /example_10-01.cpp: -------------------------------------------------------------------------------- 1 | //Example 10-1. Using cv::threshold() to sum three channels of an image 2 | 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | void sum_rgb( const cv::Mat& src, cv::Mat& dst ) { 9 | 10 | // Split image onto the color planes. 11 | // 12 | vector< cv::Mat> planes; 13 | cv::split(src, planes); 14 | cv::Mat b = planes[0], g = planes[1], r = planes[2], s; 15 | 16 | // Add equally weighted rgb values. 17 | // 18 | cv::addWeighted( r, 1./3., g, 1./3., 0.0, s ); 19 | cv::addWeighted( s, 1., b, 1./3., 0.0, s ); 20 | 21 | // Truncate values above 100. 22 | // 23 | cv::threshold( s, dst, 100, 100, cv::THRESH_TRUNC ); 24 | } 25 | 26 | void help(char ** argv) { 27 | cout << "\nExample 10-1. Using cv::threshold() to sum three channels of an image\n" << endl; 28 | cout << "Call:\n" << argv[0] << " ../faces.jpg" << endl; 29 | } 30 | 31 | int main(int argc, char** argv) { 32 | help(argv); 33 | if(argc < 2) { cout << "\nSpecify input image" << endl; return -1; } 34 | 35 | // Load the image from the given file name. 36 | // 37 | cv::Mat src = cv::imread( argv[1] ), dst; 38 | if( src.empty() ) { cout << "can not load " << argv[1] << endl; return -1; } 39 | sum_rgb( src, dst); 40 | 41 | // Create a named window with the name of the file and 42 | // show the image in the window 43 | // 44 | cv::imshow( argv[1], dst ); 45 | 46 | // Idle until the user hits any key. 47 | // 48 | cv::waitKey(0); 49 | return 0; 50 | } 51 | -------------------------------------------------------------------------------- /example_10-02.cpp: -------------------------------------------------------------------------------- 1 | // Example 10-2. Alternative method to combine and threshold image planes 2 | 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | void sum_rgb( const cv::Mat& src, cv::Mat& dst ) { 9 | 10 | // Split image onto the color planes. 11 | // 12 | vector planes; 13 | cv::split(src, planes); 14 | cv::Mat b = planes[0], g = planes[1], r = planes[2]; 15 | 16 | // Accumulate separate planes, combine and threshold. 17 | // 18 | cv::Mat s = cv::Mat::zeros(b.size(), CV_32F); 19 | cv::accumulate(b, s); 20 | cv::accumulate(g, s); 21 | cv::accumulate(r, s); 22 | 23 | // Truncate values above 100 and rescale into dst. 24 | // 25 | cv::threshold( s, s, 100, 100, cv::THRESH_TRUNC ); 26 | s.convertTo(dst, b.type()); 27 | } 28 | 29 | void help(char **argv) { 30 | cout << "\nExample 10-2. Alternative method to combine and threshold image planes\n" << endl; 31 | cout << "\nCall:\n" << argv[0] << " ../faces.jpg\n" << endl; 32 | } 33 | 34 | int main(int argc, char** argv) { 35 | help(argv); 36 | if(argc < 2) { cout << "specify input image" << endl; return -1; } 37 | 38 | // Load the image from the given file name. 39 | // 40 | cv::Mat src = cv::imread( argv[1] ), dst; 41 | if( src.empty() ) { cout << "can not load " << argv[1] << endl; return -1; } 42 | sum_rgb( src, dst); 43 | 44 | // Create a named window with the name of the file and 45 | // show the image in the window 46 | // 47 | cv::imshow( argv[1], dst ); 48 | 49 | // Idle until the user hits any key. 50 | // 51 | cv::waitKey(0); 52 | return 0; 53 | } 54 | -------------------------------------------------------------------------------- /example_10-03.cpp: -------------------------------------------------------------------------------- 1 | // Example 10-3. Threshold versus adaptive threshold 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | int main( int argc, char** argv ) 9 | { 10 | if(argc != 7) { cout << "\nExample 10-3. Threshold versus adaptive threshold\n" 11 | "Usage:\n" < 5 | #include 6 | 7 | using namespace std; 8 | 9 | int main(int argc, char** argv) { 10 | 11 | if(argc != 2) { 12 | cout << "Warp affine\nUsage: " <\n" << endl; 13 | return -1; 14 | } 15 | 16 | cv::Mat src = cv::imread(argv[1],1); 17 | if( src.empty() ) { cout << "Can not load " << argv[1] << endl; return -1; } 18 | 19 | cv::Point2f srcTri[] = { 20 | cv::Point2f(0,0), // src Top left 21 | cv::Point2f(src.cols-1, 0), // src Top right 22 | cv::Point2f(0, src.rows-1) // src Bottom left 23 | }; 24 | 25 | cv::Point2f dstTri[] = { 26 | cv::Point2f(src.cols*0.f, src.rows*0.33f), // dst Top left 27 | cv::Point2f(src.cols*0.85f, src.rows*0.25f), // dst Top right 28 | cv::Point2f(src.cols*0.15f, src.rows*0.7f) // dst Bottom left 29 | }; 30 | 31 | // COMPUTE AFFINE MATRIX 32 | // 33 | cv::Mat warp_mat = cv::getAffineTransform(srcTri, dstTri); 34 | cv::Mat dst, dst2; 35 | cv::warpAffine( 36 | src, 37 | dst, 38 | warp_mat, 39 | src.size(), 40 | cv::INTER_LINEAR, 41 | cv::BORDER_CONSTANT, 42 | cv::Scalar() 43 | ); 44 | for( int i = 0; i < 3; ++i ) 45 | cv::circle(dst, dstTri[i], 5, cv::Scalar(255, 0, 255), -1, cv::LINE_AA); 46 | 47 | cv::imshow("Affine Transform Test", dst); 48 | cv::waitKey(); 49 | 50 | for(int frame=0;;++frame) { 51 | 52 | // COMPUTE ROTATION MATRIX 53 | cv::Point2f center(src.cols*0.5f, src.rows*0.5f); 54 | double angle = frame*3 % 360, scale = (cos((angle - 60)* CV_PI/180) + 1.05)*0.8; 55 | 56 | cv::Mat rot_mat = cv::getRotationMatrix2D(center, angle, scale); 57 | 58 | cv::warpAffine( 59 | src, 60 | dst, 61 | rot_mat, 62 | src.size(), 63 | cv::INTER_LINEAR, 64 | cv::BORDER_CONSTANT, 65 | cv::Scalar() 66 | ); 67 | cv::imshow("Rotated Image", dst); 68 | if(cv::waitKey(30) >= 0 ) 69 | break; 70 | 71 | } 72 | 73 | return 0; 74 | } 75 | -------------------------------------------------------------------------------- /example_11-02.cpp: -------------------------------------------------------------------------------- 1 | // Example 11-2. Code for perspective transformation 2 | // Compute a perspective transformation between the 4 src control points 3 | // in srcQuad to 4 dst control points in dstQuad and apply it the image. 4 | 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | 10 | int main(int argc, char** argv) { 11 | 12 | if(argc != 2) { 13 | cout << "Perspective Warp\nUsage: " <\n" << endl; 14 | return -1; 15 | } 16 | 17 | cv::Mat src = cv::imread(argv[1],1); 18 | if( src.empty() ) { cout << "Can not load " << argv[1] << endl; return -1; } 19 | 20 | cv::Point2f srcQuad[] = { 21 | cv::Point2f(0, 0), // src Top left 22 | cv::Point2f(src.cols-1, 0), // src Top right 23 | cv::Point2f(src.cols-1, src.rows-1), // src Bottom right 24 | cv::Point2f(0, src.rows-1) // src Bottom left 25 | }; 26 | 27 | cv::Point2f dstQuad[] = { 28 | cv::Point2f(src.cols*0.05f, src.rows*0.33f), 29 | cv::Point2f(src.cols*0.9f, src.rows*0.25f), 30 | cv::Point2f(src.cols*0.8f, src.rows*0.9f), 31 | cv::Point2f(src.cols*0.2f, src.rows*0.7f) 32 | }; 33 | 34 | // COMPUTE PERSPECTIVE MATRIX 35 | // 36 | cv::Mat warp_mat = cv::getPerspectiveTransform(srcQuad, dstQuad); 37 | cv::Mat dst; 38 | cv::warpPerspective(src, dst, warp_mat, src.size(), cv::INTER_LINEAR, 39 | cv::BORDER_CONSTANT, cv::Scalar()); 40 | 41 | for( int i = 0; i < 4; i++ ) 42 | cv::circle(dst, dstQuad[i], 5, cv::Scalar(255, 0, 255), -1, cv::LINE_AA); 43 | 44 | cv::imshow("Perspective Transform Test", dst); 45 | cv::waitKey(); 46 | return 0; 47 | } 48 | -------------------------------------------------------------------------------- /example_11-03.cpp: -------------------------------------------------------------------------------- 1 | // Example 11-3. Log-polar transform example 2 | // Log-polar transform example. 3 | // This demonstrates the forward and backward (inverse) log-polar 4 | // transform. 5 | 6 | #include 7 | #include 8 | 9 | using namespace std; 10 | 11 | int main(int argc, char** argv) { 12 | if(argc != 3) { 13 | cout << "LogPolar\nUsage: " < \n" 14 | <<"~30 is usually good enough\n"; 15 | return -1; 16 | } 17 | 18 | cv::Mat src = cv::imread(argv[1],1); 19 | 20 | if( src.empty() ) { cout << "Can not load " << argv[1] << endl; return -1; } 21 | 22 | double M = atof(argv[2]); 23 | cv::Mat dst(src.size(), src.type()), src2(src.size(), src.type()); 24 | 25 | cv::logPolar( 26 | src, 27 | dst, 28 | cv::Point2f(src.cols*0.5f, src.rows*0.5f), 29 | M, 30 | cv::INTER_LINEAR | cv::WARP_FILL_OUTLIERS 31 | ); 32 | cv::logPolar( 33 | dst, 34 | src2, 35 | cv::Point2f(src.cols*0.5f, src.rows*0.5f), 36 | M, 37 | cv::INTER_LINEAR | cv::WARP_INVERSE_MAP 38 | ); 39 | cv::imshow( "log-polar", dst ); 40 | cv::imshow( "inverse log-polar", src2 ); 41 | 42 | cv::waitKey(); 43 | 44 | return 0; 45 | } 46 | -------------------------------------------------------------------------------- /example_12-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 12-1. Using cv::dft() and cv::idft() to accelerate 2 | // the computation of convolutions 3 | 4 | #include 5 | #include 6 | 7 | using std::cout; 8 | using std::endl; 9 | 10 | int main(int argc, char** argv) { 11 | if (argc != 2) { 12 | cout << "\nExample 12-1. Using cv::dft() and cv::idft() to accelerate the" 13 | << "\n computation of convolutions" 14 | << "\nFourier Transform\nUsage: " 15 | << argv[0] << " \n" << endl; 16 | return -1; 17 | } 18 | 19 | cv::Mat A = cv::imread(argv[1], 0); 20 | 21 | if (A.empty()) { 22 | cout << "Cannot load " << argv[1] << endl; 23 | return -1; 24 | } 25 | 26 | cv::Size patchSize(100, 100); 27 | cv::Point topleft(A.cols / 2, A.rows /2); 28 | cv::Rect roi(topleft.x, topleft.y, patchSize.width, patchSize.height); 29 | cv::Mat B = A(roi); 30 | 31 | int dft_M = cv::getOptimalDFTSize(A.rows + B.rows - 1); 32 | int dft_N = cv::getOptimalDFTSize(A.cols + B.cols - 1); 33 | 34 | cv::Mat dft_A = cv::Mat::zeros(dft_M, dft_N, CV_32F); 35 | cv::Mat dft_B = cv::Mat::zeros(dft_M, dft_N, CV_32F); 36 | 37 | cv::Mat dft_A_part = dft_A(cv::Rect(0, 0, A.cols, A.rows)); 38 | cv::Mat dft_B_part = dft_B(cv::Rect(0, 0, B.cols, B.rows)); 39 | 40 | A.convertTo(dft_A_part, dft_A_part.type(), 1, -mean(A)[0]); 41 | B.convertTo(dft_B_part, dft_B_part.type(), 1, -mean(B)[0]); 42 | 43 | cv::dft(dft_A, dft_A, 0, A.rows); 44 | cv::dft(dft_B, dft_B, 0, B.rows); 45 | 46 | // set the last parameter to false to compute convolution instead of correlation 47 | // 48 | cv::mulSpectrums(dft_A, dft_B, dft_A, 0, true); 49 | cv::idft(dft_A, dft_A, cv::DFT_SCALE, A.rows + B.rows - 1); 50 | 51 | cv::Mat corr = dft_A(cv::Rect(0, 0, A.cols + B.cols - 1, A.rows + B.rows - 1)); 52 | cv::normalize(corr, corr, 0, 1, cv::NORM_MINMAX, corr.type()); 53 | cv::pow(corr, 3.0, corr); 54 | 55 | B ^= cv::Scalar::all(255); 56 | 57 | cv::imshow("Image", A); 58 | cv::imshow("ROI", B); 59 | 60 | cv::imshow("Correlation", corr); 61 | cv::waitKey(); 62 | 63 | return 0; 64 | } 65 | -------------------------------------------------------------------------------- /example_12-02.cpp: -------------------------------------------------------------------------------- 1 | // Example 12-2. Using cv::HoughCircles() to return a sequence of circles found in a 2 | // grayscale image 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | using std::cout; 12 | using std::endl; 13 | using std::vector; 14 | 15 | void help(char** argv) { 16 | cout << "\nExample 12-1. Using cv::dft() and cv::idft() to accelerate the computation of convolutions" 17 | << "\nHough Circle detect\nUsage: " << argv[0] <<" \n" 18 | << "Example:\n" << argv[0] << " ../stuff.jpg\n" << endl; 19 | } 20 | 21 | int main(int argc, char** argv) { 22 | help(argv); 23 | if (argc != 2) { 24 | return -1; 25 | } 26 | 27 | cv::Mat src, image; 28 | 29 | src = cv::imread(argv[1], 1); 30 | if (src.empty()) { 31 | cout << "Cannot load " << argv[1] << endl; 32 | return -1; 33 | } 34 | 35 | cv::cvtColor(src, image, cv::COLOR_BGR2GRAY); 36 | cv::GaussianBlur(image, image, cv::Size(5, 5), 0, 0); 37 | 38 | vector circles; 39 | cv::HoughCircles(image, circles, cv::HOUGH_GRADIENT, 2, image.cols/4); 40 | 41 | for (size_t i = 0; i < circles.size(); ++i) { 42 | cv::circle(src, 43 | cv::Point(cvRound(circles[i][0]), cvRound(circles[i][1])), 44 | cvRound(circles[i][2]), 45 | cv::Scalar(0, 0, 255), 46 | 2, 47 | cv::LINE_AA); 48 | } 49 | 50 | cv::imshow("Hough Circles", src); 51 | cv::waitKey(0); 52 | 53 | return 0; 54 | } 55 | -------------------------------------------------------------------------------- /example_12-03.cpp: -------------------------------------------------------------------------------- 1 | // Example 12-3. Using GrabCut for background removal 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | using std::cout; 8 | using std::cerr; 9 | using std::endl; 10 | 11 | cv::Mat img_preview; 12 | cv::Mat img; 13 | cv::Mat markers; 14 | cv::Mat drawRect; 15 | int x_0 = -1; 16 | int y_0 = -1; 17 | int x_1, y_1; 18 | int drawr = 0; 19 | 20 | bool finished; 21 | 22 | void displayResult() { 23 | int rows = img.rows; 24 | int cols = img.cols; 25 | cv::Vec3b blackClr(0, 0, 0); 26 | for (int i = 0; i < rows; ++i) { 27 | for (int j = 0; j < cols; ++j) { 28 | if (markers.at(i, j) != cv::GC_FGD && markers.at(i, j) != cv::GC_PR_FGD) { 29 | img.at(i, j) = blackClr; 30 | } 31 | } 32 | } 33 | cv::imshow("image", img); 34 | finished = true; 35 | } 36 | 37 | static void onMouseClick(int event, int x, int y, int, void*) { 38 | if (finished) { 39 | return; 40 | } 41 | 42 | if (event == cv::EVENT_LBUTTONDOWN && drawr == 0) { 43 | if(x_0 < 0) { 44 | x_0 = x; 45 | y_0 = y; 46 | cv::ellipse(markers, cv::Point(x, y), cv::Size(1, 1), 47 | 0, 0, 360, cv::GC_FGD, 3); 48 | cv::ellipse(drawRect, cv::Point(x, y), cv::Size(1, 1), 49 | 0, 0, 360, cv::Scalar(0, 0, 255), 3); 50 | drawr = 1; 51 | } 52 | 53 | cv::addWeighted(img,0.7,drawRect,0.3, 0, img_preview); 54 | 55 | cv::imshow("image", img_preview); 56 | return; 57 | } 58 | if( event == cv::EVENT_LBUTTONUP) { 59 | drawr = 2; 60 | } 61 | if(drawr == 1) { //Just moving 62 | drawRect.setTo(0); 63 | cv::rectangle(drawRect, cv::Point(x_0,y_0), cv::Point(x,y), cv::Scalar(0,0,255), -1); 64 | 65 | cv::addWeighted(img,0.7,drawRect,0.3, 0, img_preview); 66 | x_1 = x; y_1 = y; 67 | cv::imshow("image", img_preview); 68 | return; 69 | } 70 | 71 | if (drawr == 2) { 72 | cv::Mat bg; 73 | cv::Mat fg; 74 | cv::rectangle(markers, cv::Point(x_0,y_0), cv::Point(x_1,y_1), cv::GC_PR_FGD, -1); 75 | cv::grabCut(img, markers, cv::Rect(0, 0, img.cols - 1, img.rows - 1), 76 | bg, fg, 5, cv::GC_EVAL); 77 | displayResult(); 78 | return; 79 | } 80 | } 81 | 82 | void help(char** argv) { 83 | cout << "\nExample 12-3. Using GrabCut for background removal" 84 | << "\n- Use left mouse to drag a rectangle over the object" 85 | << "\n- On release of left mouse button, we will perform GrabCut" 86 | << "\n- Press any key to terminate program" 87 | << "\nUsage: " 88 | << argv[0] << " \n" 89 | << "\nExample:\n" << argv[0] << " ../stuff.jpg\n" << endl; 90 | } 91 | 92 | 93 | int main(int argc, char** argv) { 94 | help(argv); 95 | if (argc != 2) { 96 | return -1; 97 | } 98 | 99 | img = cv::imread(std::string(argv[1]), CV_LOAD_IMAGE_COLOR); 100 | if (img.channels() != 3) { 101 | cerr << "Input image should have 3 channels" << endl; 102 | exit(1); 103 | } 104 | 105 | markers = cv::Mat(img.size(), CV_8UC1); 106 | markers.setTo(cv::GC_PR_BGD); 107 | 108 | img_preview = img.clone(); 109 | drawRect = img.clone(); 110 | 111 | finished = false; 112 | 113 | cv::namedWindow("image", cv::WINDOW_AUTOSIZE); 114 | cv::setMouseCallback("image", onMouseClick, 0); 115 | 116 | cv::imshow("image", img_preview); 117 | 118 | cv::waitKey(0); 119 | 120 | return 0; 121 | } 122 | -------------------------------------------------------------------------------- /example_12-04.cpp: -------------------------------------------------------------------------------- 1 | // Example 12-4. Using Watershed for image segmentation 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | using std::cout; 8 | using std::cerr; 9 | using std::endl; 10 | 11 | cv::Mat img_preview; 12 | cv::Mat img; 13 | cv::Mat markers; 14 | 15 | bool finished; 16 | 17 | void displayResult() { 18 | cv::imshow("image", img); 19 | finished = true; 20 | } 21 | 22 | const int dx[4] = {-1, 1, 0, 0}; 23 | const int dy[4] = {0, 0, -1, 1}; 24 | 25 | void fillMarker(int x, int y, int marker_id) { 26 | if (x < 0 || y < 0 || x >= markers.rows || y >= markers.cols) { 27 | return; 28 | } 29 | 30 | if (markers.at(x, y) != -1) { 31 | return; 32 | } 33 | 34 | markers.at(x, y) = marker_id; 35 | 36 | for (int dir = 0; dir < 4; ++dir) { 37 | int nx = x + dx[dir]; 38 | int ny = y + dy[dir]; 39 | fillMarker(nx, ny, marker_id); 40 | } 41 | } 42 | 43 | static void onMouseClick(int event, int x, int y, int, void*) { 44 | if (finished) { 45 | return; 46 | } 47 | 48 | if (event == cv::EVENT_LBUTTONDOWN) { 49 | cv::ellipse(markers, cv::Point(x, y), cv::Size(1, 1), 50 | 0, 0, 360, -1, 3); 51 | cv::ellipse(img_preview, cv::Point(x, y), cv::Size(1, 1), 52 | 0, 0, 360, cv::Scalar(0, 0, 255), 3); 53 | cv::imshow("image", img_preview); 54 | return; 55 | } 56 | 57 | if (event == cv::EVENT_RBUTTONDOWN) { 58 | int marker_id = 0; 59 | for (int x = 0; x < markers.rows; ++x) { 60 | for (int y = 0; y < markers.cols; ++y) { 61 | if (markers.at(x, y) == -1) { 62 | ++marker_id; 63 | fillMarker(x, y, marker_id); 64 | } 65 | } 66 | } 67 | cv::watershed(img, markers); 68 | cv::Vec3b borderColor(0, 0, 255); 69 | for (int x = 0; x < img.rows; ++x) { 70 | for (int y = 0; y < img.cols; ++y) { 71 | if (markers.at(x, y) == -1) { 72 | img.at(x, y) = borderColor; 73 | continue; 74 | } 75 | for (int dir = 0; dir < 4; ++dir) { 76 | int nx = x + dx[dir]; 77 | int ny = y + dy[dir]; 78 | if (nx < 0 || ny < 0 || nx >= img.rows || ny >= img.cols) { 79 | continue; 80 | } 81 | if (markers.at(x, y) != markers.at(nx, ny)) { 82 | img.at(x, y) = borderColor; 83 | } 84 | } 85 | } 86 | } 87 | 88 | displayResult(); 89 | return; 90 | } 91 | } 92 | 93 | void help(char** argv) { 94 | cout << "\nExample 12-4. Using Watershed for image segmentation" 95 | << "\n- Use left click on the image to place marker for the new segment" 96 | << "\n- Use right clock on the image to perform Watershed" 97 | << "\n- Press any key to terminate program" 98 | << "\nUsage: " 99 | << argv[0] << " \n" 100 | << "\nExample:\n" << argv[0] << " ../stuff.jpg\n" << endl; 101 | } 102 | 103 | int main(int argc, char** argv) { 104 | help(argv); 105 | if (argc != 2) { 106 | return -1; 107 | } 108 | 109 | img = cv::imread(std::string(argv[1]), CV_LOAD_IMAGE_COLOR); 110 | if (img.channels() != 3) { 111 | cerr << "Input image should have 3 channels" << endl; 112 | exit(1); 113 | } 114 | 115 | markers = cv::Mat(img.size(), CV_32SC1); 116 | markers.setTo(0); 117 | 118 | img_preview = img.clone(); 119 | 120 | finished = false; 121 | 122 | cv::namedWindow("image", cv::WINDOW_AUTOSIZE); 123 | cv::setMouseCallback("image", onMouseClick, 0); 124 | 125 | cv::imshow("image", img_preview); 126 | 127 | cv::waitKey(0); 128 | 129 | return 0; 130 | } 131 | -------------------------------------------------------------------------------- /example_13-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 13-1. Histogram computation and display 2 | 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | int main( int argc, char** argv ){ 9 | if(argc != 2) { 10 | cout << "\n// Example 13-1. Histogram computation and display" << endl; 11 | cout << "\nComputer Color Histogram\nUsage: " <\n" << endl; 12 | return -1; 13 | } 14 | 15 | cv::Mat src = cv::imread( argv[1],1 ); 16 | if( src.empty() ) { cout << "Cannot load " << argv[1] << endl; return -1; } 17 | 18 | // Compute the HSV image, and decompose it into separate planes. 19 | // 20 | cv::Mat hsv; 21 | cv::cvtColor(src, hsv, cv::COLOR_BGR2HSV); 22 | 23 | float h_ranges[] = {0, 180}; // hue is [0, 180] 24 | float s_ranges[] = {0, 256}; 25 | const float* ranges[] = {h_ranges, s_ranges}; 26 | int histSize[] = {30, 32}, ch[] = {0, 1}; 27 | 28 | cv::Mat hist; 29 | 30 | // Compute the histogram 31 | // 32 | cv::calcHist(&hsv, 1, ch, cv::noArray(), hist, 2, histSize, ranges, true); 33 | cv::normalize(hist, hist, 0, 255, cv::NORM_MINMAX); 34 | 35 | int scale = 10; 36 | cv::Mat hist_img(histSize[0]*scale, histSize[1]*scale, CV_8UC3); 37 | 38 | // Draw our histogram. 39 | // 40 | for( int h = 0; h < histSize[0]; h++ ) { 41 | for( int s = 0; s < histSize[1]; s++ ){ 42 | float hval = hist.at(h, s); 43 | cv::rectangle( 44 | hist_img, 45 | cv::Rect(h*scale,s*scale,scale,scale), 46 | cv::Scalar::all(hval), 47 | -1 48 | ); 49 | } 50 | } 51 | 52 | cv::imshow("image", src); 53 | cv::imshow("H-S histogram", hist_img); 54 | cv::waitKey(); 55 | return 0; 56 | } 57 | -------------------------------------------------------------------------------- /example_13-02.cpp: -------------------------------------------------------------------------------- 1 | // Example 13-2. Creating signatures from histograms for EMD; note that this code is the 2 | // source of the data in Table 13-1, in which the hand histogram is compared in different 3 | // lighting conditions 4 | 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | 10 | void help( char** argv ){ 11 | cout << "//\nExample 13-2. Creating signatures from histograms for EMD; note that this code is the" 12 | << "\n// source of the data in Table 13-1, in which the hand histogram is compared in different" 13 | << "\n// lighting conditions\n\n" << endl; 14 | cout << "\nCall is:\n" 15 | << argv[0] <<" modelImage0 testImage1 testImage2 badImage3\n\n" 16 | << "for example: " << argv[0] 17 | << " ../HandIndoorColor.jpg ../HandOutdoorColor.jpg " 18 | << "../HandOutdoorSunColor.jpg ../fruits.jpg\n" 19 | << "\n" << endl; 20 | } 21 | 22 | // Compare 3 images' histograms 23 | int main( int argc, char** argv ) { 24 | if( argc != 5 ) { help( argv ); return -1; } 25 | vector src(5); 26 | cv::Mat tmp; 27 | int i; 28 | 29 | tmp = cv::imread( argv[1], 1); 30 | if( tmp.empty() ) { 31 | cerr << "Error on reading image 1," << argv[1] << "\n" << endl; 32 | help( argv ); 33 | return(-1); 34 | } 35 | 36 | // Parse the first image into two image halves divided halfway on y 37 | // 38 | cv::Size size = tmp.size(); 39 | int width = size.width; 40 | int height = size.height; 41 | int halfheight = height >> 1; 42 | 43 | cout <<"Getting size [[" <::iterator tmpit = tmp.begin(); 52 | 53 | // top half 54 | // 55 | cv::Mat_::iterator s0it = src[0].begin(); 56 | for(i = 0; i < width*halfheight; ++i, ++tmpit, ++s0it) *s0it = *tmpit; 57 | 58 | // Bottom half 59 | // 60 | cv::Mat_::iterator s1it = src[1].begin(); 61 | for(i = 0; i < width*halfheight; ++i, ++tmpit, ++s1it) *s1it = *tmpit; 62 | 63 | // Load the other three images 64 | // 65 | for(i = 2; i<5; ++i){ 66 | src[i] = cv::imread(argv[i], 1); 67 | if(src[i].empty()) { 68 | cerr << "Error on reading image " << i << ": " << argv[i] << "\n" << endl; 69 | help( argv ); 70 | return(-1); 71 | } 72 | } 73 | 74 | // Compute the HSV image, and decompose it into separate planes. 75 | // 76 | vector hsv(5), hist(5), hist_img(5); 77 | int h_bins = 8; 78 | int s_bins = 8; 79 | int hist_size[] = { h_bins, s_bins }, ch[] = {0, 1}; 80 | float h_ranges[] = { 0, 180 }; // hue range is [0,180] 81 | float s_ranges[] = { 0, 255 }; 82 | const float* ranges[] = { h_ranges, s_ranges }; 83 | int scale = 10; 84 | 85 | for(i = 0; i<5; ++i) { 86 | cv::cvtColor( src[i], hsv[i], cv::COLOR_BGR2HSV ); 87 | cv::calcHist( &hsv[i], 1, ch, cv::noArray(), hist[i], 2, hist_size, ranges, true ); 88 | cv::normalize( hist[i], hist[i], 0, 255, cv::NORM_MINMAX ); 89 | hist_img[i] = cv::Mat::zeros( hist_size[0]*scale, hist_size[1]*scale, CV_8UC3 ); 90 | 91 | // Draw our histogram For the 5 images 92 | // 93 | for( int h = 0; h < hist_size[0]; h++ ) 94 | for( int s = 0; s < hist_size[1]; s++ ) { 95 | float hval = hist[i].at(h, s); 96 | cv::rectangle( 97 | hist_img[i], 98 | cv::Rect(h*scale, s*scale, scale, scale), 99 | cv::Scalar::all(hval), 100 | -1 101 | ); 102 | } 103 | } 104 | 105 | // Display 106 | // 107 | cv::namedWindow( "Source0", 1 );cv::imshow( "Source0", src[0] ); 108 | cv::namedWindow( "HS Histogram0", 1 );cv::imshow( "HS Histogram0", hist_img[0] ); 109 | 110 | cv::namedWindow( "Source1", 1 );cv::imshow( "Source1", src[1] ); 111 | cv::namedWindow( "HS Histogram1", 1 ); cv::imshow( "HS Histogram1", hist_img[1] ); 112 | 113 | cv::namedWindow( "Source2", 1 ); cv::imshow( "Source2", src[2] ); 114 | cv::namedWindow( "HS Histogram2", 1 ); cv::imshow( "HS Histogram2", hist_img[2] ); 115 | 116 | cv::namedWindow( "Source3", 1 ); cv::imshow( "Source3", src[3] ); 117 | cv::namedWindow( "HS Histogram3", 1 ); cv::imshow( "HS Histogram3", hist_img[3] ); 118 | 119 | cv::namedWindow( "Source4", 1 ); cv::imshow( "Source4", src[4] ); 120 | cv::namedWindow( "HS Histogram4", 1 ); cv::imshow( "HS Histogram4", hist_img[4] ); 121 | 122 | // Compare the histogram src0 vs 1, vs 2, vs 3, vs 4 123 | cout << "Comparison:\n" 124 | << "Corr Chi Intersect Bhat\n"<< endl; 125 | 126 | for(i=1; i<5; ++i) { // For each histogram 127 | cout << "Hist[0] vs Hist[" << i << "]: " << endl;; 128 | for(int j=0; j<4; ++j) { // For each comparison type 129 | cout << "method[" << j << "]: " << cv::compareHist(hist[0],hist[i],j) << " "; 130 | } 131 | cout << endl; 132 | } 133 | 134 | //Do EMD and report 135 | // 136 | vector sig(5); 137 | cout << "\nEMD: " << endl; 138 | 139 | // Oi Vey, parse histograms to earth movers signatures 140 | // 141 | for( i=0; i<5; ++i) { 142 | 143 | vector sigv; 144 | 145 | // (re)normalize histogram to make the bin weights sum to 1. 146 | // 147 | cv::normalize(hist[i], hist[i], 1, 0, cv::NORM_L1); 148 | for( int h = 0; h < h_bins; h++ ) 149 | for( int s = 0; s < s_bins; s++ ) { 150 | float bin_val = hist[i].at(h, s); 151 | if( bin_val != 0 ) 152 | sigv.push_back( cv::Vec3f(bin_val, (float)h, (float)s)); 153 | } 154 | 155 | // make Nx3 32fC1 matrix, where N is the number of nonzero histogram bins 156 | // 157 | sig[i] = cv::Mat(sigv).clone().reshape(1); 158 | if( i > 0 ) 159 | cout << "Hist[0] vs Hist[" << i << "]: " 160 | << EMD(sig[0], sig[i], cv::DIST_L2) << endl; 161 | } 162 | 163 | cv::waitKey(0); 164 | 165 | } 166 | -------------------------------------------------------------------------------- /example_13-03.cpp: -------------------------------------------------------------------------------- 1 | // Example 13-3. Template matching 2 | 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | void help( char** argv ){ 9 | cout << "\n" 10 | <<"\nExample 13-3: using matchTemplate(). The call is:\n" 11 | <<"\n" 12 | < 5 | #include 6 | 7 | using namespace std; 8 | 9 | cv::Mat g_gray, g_binary; 10 | int g_thresh = 100; 11 | 12 | void on_trackbar( int, void* ) { 13 | cv::threshold( g_gray, g_binary, g_thresh, 255, cv::THRESH_BINARY ); 14 | vector< vector< cv::Point> > contours; 15 | cv::findContours( 16 | g_binary, 17 | contours, 18 | cv::noArray(), 19 | cv::RETR_LIST, 20 | cv::CHAIN_APPROX_SIMPLE 21 | ); 22 | g_binary = cv::Scalar::all(0); 23 | 24 | cv::drawContours( g_binary, contours, -1, cv::Scalar::all(255)); 25 | cv::imshow( "Contours", g_binary ); 26 | 27 | } 28 | 29 | int main( int argc, char** argv ) { 30 | if( argc != 2 || ( g_gray = cv::imread(argv[1], 0)).empty() ) { 31 | cout << "\nExample 14-1: Find threshold dependent contours\nUsage:\n" < 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | 9 | struct AreaCmp { 10 | AreaCmp(const vector& _areas) : areas(&_areas) {} 11 | bool operator()(int a, int b) const { return (*areas)[a] > (*areas)[b]; } 12 | const vector* areas; 13 | }; 14 | 15 | int main(int argc, char* argv[]) { 16 | 17 | cv::Mat img, img_edge, img_color; 18 | 19 | // load image or show help if no image was provided 20 | // 21 | if( argc != 2 || (img = cv::imread(argv[1],cv::IMREAD_GRAYSCALE)).empty() ) { 22 | cout << "\nERROR: You need 2 parameters, you had " << argc << "\n" << endl; 23 | cout << "\nExample 14_2: Drawing Contours\nCall is:\n" << argv[0] << " \n\n" 24 | << "Example:\n" << argv[0] << " ../box.png\n" << endl; 25 | return -1; 26 | } 27 | 28 | cv::threshold(img, img_edge, 128, 255, cv::THRESH_BINARY); 29 | cv::imshow("Image after threshold", img_edge); 30 | vector< vector< cv::Point > > contours; 31 | vector< cv::Vec4i > hierarchy; 32 | 33 | cv::findContours( 34 | img_edge, 35 | contours, 36 | hierarchy, 37 | cv::RETR_LIST, 38 | cv::CHAIN_APPROX_SIMPLE 39 | ); 40 | cout << "\n\nHit any key to draw the next contour, ESC to quit\n\n"; 41 | cout << "Total Contours Detected: " << contours.size() << endl; 42 | 43 | vector sortIdx(contours.size()); 44 | vector areas(contours.size()); 45 | for( int n = 0; n < (int)contours.size(); n++ ) { 46 | sortIdx[n] = n; 47 | areas[n] = contourArea(contours[n], false); 48 | } 49 | 50 | // sort contours so that the largest contours go first 51 | // 52 | std::sort( sortIdx.begin(), sortIdx.end(), AreaCmp(areas )); 53 | 54 | for( int n = 0; n < (int)sortIdx.size(); n++ ) { 55 | int idx = sortIdx[n]; 56 | cv::cvtColor( img, img_color, cv::COLOR_GRAY2BGR ); 57 | cv::drawContours( 58 | img_color, contours, idx, 59 | cv::Scalar(0,0,255), 2, 8, hierarchy, 60 | 0 // Try different values of max_level, and see what happens 61 | ); 62 | cout << "Contour #" << idx << ": area=" << areas[idx] << 63 | ", nvertices=" << contours[idx].size() << endl; 64 | cv::imshow(argv[0], img_color); 65 | int k; 66 | if( (k = cv::waitKey()&255) == 27 ) 67 | break; 68 | } 69 | cout << "Finished all contours\n"; 70 | 71 | return 0; 72 | } 73 | -------------------------------------------------------------------------------- /example_14-03.cpp: -------------------------------------------------------------------------------- 1 | // Example 14-3. Drawing labeled connected components 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | 9 | int main(int argc, char* argv[]) { 10 | 11 | cv::Mat img, img_edge, labels, centroids, img_color, stats; 12 | 13 | // load image or show help if no image was provided 14 | if( (argc != 2) 15 | || (img = cv::imread( argv[1], cv::IMREAD_GRAYSCALE )).empty() 16 | ) { 17 | cout << "\nERROR: You need 2 parameters, you had " << argc << "\n" << endl; 18 | cout << "\nExample 14-3: Drawing labeled connected componnents\n" 19 | << "Call is:\n" <\n" 20 | << "\nExample:\n" << argv[0] << " ../HandIndoorColor.jpg\n" << endl; 21 | return -1; 22 | } 23 | 24 | cv::threshold(img, img_edge, 128, 255, cv::THRESH_BINARY); 25 | cv::imshow("Image after threshold", img_edge); 26 | 27 | int i, nccomps = cv::connectedComponentsWithStats ( 28 | img_edge, 29 | labels, 30 | stats, 31 | centroids 32 | ); 33 | cout << "Total Connected Components Detected: " << nccomps << endl; 34 | 35 | vector colors(nccomps+1); 36 | colors[0] = cv::Vec3b(0,0,0); // background pixels remain black. 37 | for( i = 1; i <= nccomps; i++ ) { 38 | colors[i] = cv::Vec3b(rand()%256, rand()%256, rand()%256); 39 | if( stats.at(i-1, cv::CC_STAT_AREA) < 100 ) 40 | colors[i] = cv::Vec3b(0,0,0); // small regions are painted with black too. 41 | } 42 | img_color = cv::Mat::zeros(img.size(), CV_8UC3); 43 | for( int y = 0; y < img_color.rows; y++ ) 44 | for( int x = 0; x < img_color.cols; x++ ) 45 | { 46 | int label = labels.at(y, x); 47 | CV_Assert(0 <= label && label <= nccomps); 48 | img_color.at(y, x) = colors[label]; 49 | } 50 | 51 | cv::imshow("Labeled map", img_color); 52 | cv::waitKey(); 53 | return 0; 54 | } 55 | 56 | -------------------------------------------------------------------------------- /example_14-04.cpp: -------------------------------------------------------------------------------- 1 | // Example 14-4. Using the shape context distance extractor 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "opencv2/opencv.hpp" 8 | 9 | using namespace std; 10 | using namespace cv; 11 | 12 | static vector sampleContour( const Mat& image, int n=300 ) { 13 | 14 | vector > _contours; 15 | vector all_points; 16 | findContours(image, _contours, RETR_LIST, CHAIN_APPROX_NONE); 17 | for (size_t i=0; i <_contours.size(); i++) { 18 | for (size_t j=0; j <_contours[i].size(); j++) 19 | all_points.push_back( _contours[i][j] ); 20 | 21 | // If too little points, replicate them 22 | // 23 | int dummy=0; 24 | for (int add=(int)all_points.size(); add sampled; 30 | for (int i=0; i \n" 40 | << "\nMISSMATCH Example:\n" << argv[0] << " ../shape_sample/1.png ../shape_sample/3.png\n" 41 | << "\MATCH Example:\n" << argv[0] << " ../shape_sample/3.png ../shape_sample/4.png\n" 42 | << endl; 43 | } 44 | 45 | 46 | int main(int argc, char** argv) { 47 | help(argv); 48 | if(argc != 3) { 49 | cout << "\nERROR: you need 2 parameters, you had " << argc << " parameters.\n" << endl; 50 | return -1; 51 | } 52 | string path = "../data/shape_sample/"; 53 | int indexQuery = 1; 54 | 55 | Ptr mysc = createShapeContextDistanceExtractor(); 56 | 57 | Size sz2Sh(300,300); 58 | Mat img1=imread(argv[1], IMREAD_GRAYSCALE); 59 | Mat img2=imread(argv[2], IMREAD_GRAYSCALE); 60 | vector c1 = sampleContour(img1); 61 | vector c2 = sampleContour(img2); 62 | float dis = mysc->computeDistance( c1, c2 ); 63 | cout << "shape context distance between " << 64 | argv[1] << " and " << argv[2] << " is: " << dis << endl; 65 | cv::imshow("SHAPE #1", img1); 66 | cv::imshow("SHAPE #2",img2); 67 | cv::waitKey(); 68 | 69 | return 0; 70 | 71 | } 72 | -------------------------------------------------------------------------------- /example_15-01.cpp: -------------------------------------------------------------------------------- 1 | //Example 15-1. Reading out the RGB values of all pixels in one row of a video and 2 | // accumulating those values into three separate comma separated files 3 | // 4 | #include 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | 10 | void help(char** argv ) { 11 | cout << "\n" 12 | << "Example: 15-1: Read out RGB pixel values and store them to disk\nCall:\n" 13 | << argv[0] <<" \n" 14 | << "\nExample:\n" << argv[0] << " ../tree.avi" 15 | << "\n This will store to files blines.csv, glines.csv and rlines.csv\n\n" 16 | << endl; 17 | } 18 | 19 | int main( int argc, char** argv) { 20 | // Argument handling 21 | // 22 | if(argc != 2) { help(argv); return -1; } 23 | cv::namedWindow( argv[0], CV_WINDOW_AUTOSIZE ); 24 | cv::VideoCapture cap; 25 | if((argc < 2)|| !cap.open(argv[1])) 26 | { 27 | cerr << "Couldn't open video file" << endl; 28 | help(argv); 29 | cap.open(0); 30 | return -1; 31 | } 32 | 33 | //Prepare Output 34 | // 35 | cv::Point pt1(10,10), pt2(30,30); 36 | int max_buffer; 37 | cv::Mat rawImage; 38 | ofstream b,g,r; 39 | b.open("blines.csv"); 40 | g.open("glines.csv"); 41 | r.open("rlines.csv"); 42 | 43 | // MAIN PROCESSING LOOP: 44 | // 45 | for(;;) { 46 | cap >> rawImage; 47 | if( !rawImage.data ) break; 48 | cv::LineIterator it( rawImage, pt1, pt2, 8); 49 | for( int j=0; j 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | 9 | // Global storage 10 | // 11 | // Float, 3-channel images 12 | // 13 | cv::Mat image; 14 | cv::Mat IavgF, IdiffF, IprevF, IhiF, IlowF; 15 | cv::Mat tmp, tmp2, mask; 16 | 17 | // Float, 1-channel images 18 | // 19 | vector Igray(3); 20 | vector Ilow(3); 21 | vector Ihi(3); 22 | 23 | // Byte, 1-channel image 24 | // 25 | cv::Mat Imaskt; 26 | 27 | // Thresholds 28 | // 29 | float high_thresh = 20.0; //scaling the thesholds in backgroundDiff() 30 | float low_thresh = 28.0; 31 | 32 | // Counts number of images learned for averaging later 33 | // 34 | float Icount; 35 | 36 | // I is just a sample image for allocation purposes 37 | // (passed in for sizing) 38 | // 39 | void AllocateImages( const cv::Mat& I ) { 40 | cv::Size sz = I.size(); 41 | IavgF = cv::Mat::zeros(sz, CV_32FC3 ); 42 | IdiffF = cv::Mat::zeros(sz, CV_32FC3 ); 43 | IprevF = cv::Mat::zeros(sz, CV_32FC3 ); 44 | IhiF = cv::Mat::zeros(sz, CV_32FC3 ); 45 | IlowF = cv::Mat::zeros(sz, CV_32FC3 ); 46 | Icount = 0.00001; // Protect against divide by zero 47 | tmp = cv::Mat::zeros( sz, CV_32FC3 ); 48 | tmp2 = cv::Mat::zeros( sz, CV_32FC3 ); 49 | Imaskt = cv::Mat( sz, CV_32FC1 ); 50 | } 51 | 52 | // Learn the background statistics for one more frame 53 | // I is a color sample of the background, 3-channel, 8u 54 | // 55 | void accumulateBackground( cv::Mat& I ){ 56 | static int first = 1; // nb. Not thread safe 57 | I.convertTo( tmp, CV_32F ); // convert to float 58 | if( !first ){ 59 | IavgF += tmp; 60 | cv::absdiff( tmp, IprevF, tmp2 ); 61 | IdiffF += tmp2; 62 | Icount += 1.0; 63 | } 64 | first = 0; 65 | IprevF = tmp; 66 | } 67 | 68 | void setHighThreshold( float scale ) { 69 | IhiF = IavgF + (IdiffF * scale); 70 | cv::split( IhiF, Ihi ); 71 | } 72 | 73 | void setLowThreshold( float scale ) { 74 | IlowF = IavgF - (IdiffF * scale); 75 | cv::split( IlowF, Ilow ); 76 | } 77 | 78 | void createModelsfromStats() { 79 | IavgF *= (1.0/Icount); 80 | IdiffF *= (1.0/Icount); 81 | 82 | // Make sure diff is always something 83 | // 84 | IdiffF += cv::Scalar( 1.0, 1.0, 1.0 ); 85 | setHighThreshold( high_thresh); 86 | setLowThreshold( low_thresh); 87 | } 88 | 89 | 90 | // Create a binary: 0,255 mask where 255 (red) means foreground pixel 91 | // I Input image, 3-channel, 8u 92 | // Imask Mask image to be created, 1-channel 8u 93 | // 94 | void backgroundDiff( 95 | cv::Mat& I, 96 | cv::Mat& Imask) { 97 | 98 | I.convertTo( tmp, CV_32F ); // To float 99 | cv::split( tmp, Igray ); 100 | 101 | // Channel 1 102 | // 103 | cv::inRange( Igray[0], Ilow[0], Ihi[0], Imask ); 104 | 105 | // Channel 2 106 | // 107 | cv::inRange( Igray[1], Ilow[1], Ihi[1], Imaskt ); 108 | Imask = cv::min( Imask, Imaskt ); 109 | 110 | // Channel 3 111 | // 112 | cv::inRange( Igray[2], Ilow[2], Ihi[2], Imaskt ); 113 | Imask = cv::min( Imask, Imaskt ); 114 | 115 | // Finally, invert the results 116 | // 117 | Imask = 255 - Imask; 118 | } 119 | 120 | /////////////////// 121 | void help(char** argv ) { 122 | cout << "\n" 123 | << "Train a background model on the first <#frames to train on> frames of an incoming video, then run the model\n" 124 | << argv[0] <<" <#frames to train on> \n" 125 | << "For example:\n" 126 | << argv[0] << " 50 ../tree.avi\n" 127 | << "'A' or 'a' to adjust thresholds, esc, 'q' or 'Q' to quit" 128 | << endl; 129 | } 130 | 131 | void showForgroundInRed( char** argv, const cv::Mat &img) { 132 | cv::Mat rawImage; 133 | cv::split( img, Igray ); 134 | Igray[2] = cv::max( mask, Igray[2] ); 135 | cv::merge( Igray, rawImage ); 136 | cv::imshow( argv[0], rawImage ); 137 | cv::imshow("Segmentation", mask); 138 | } 139 | 140 | void adjustThresholds(char** argv, cv::Mat &img) { 141 | int key = 1; 142 | while((key = cv::waitKey()) != 27 && key != 'Q' && key != 'q') // Esc or Q or q to exit 143 | { 144 | if(key == 'L') { low_thresh += 0.2;} 145 | if(key == 'l') { low_thresh -= 0.2;} 146 | if(key == 'H') { high_thresh += 0.2;} 147 | if(key == 'h') { high_thresh -= 0.2;} 148 | cout << "H or h, L or l, esq or q to quit; high_thresh = " << high_thresh << ", " << "low_thresh = " << low_thresh << endl; 149 | setHighThreshold(high_thresh); 150 | setLowThreshold(low_thresh); 151 | backgroundDiff(img, mask); 152 | showForgroundInRed(argv, img); 153 | } 154 | } 155 | 156 | //////////////////////////////////////////////////////////////// 157 | int main( int argc, char** argv) { 158 | cv::namedWindow( argv[0], cv::WINDOW_AUTOSIZE ); 159 | cv::VideoCapture cap; 160 | if((argc < 3)|| !cap.open(argv[2])) { 161 | cerr << "Couldn't run the program" << endl; 162 | help(argv); 163 | cap.open(0); 164 | return -1; 165 | } 166 | int number_to_train_on = atoi( argv[1] ); 167 | 168 | // FIRST PROCESSING LOOP (TRAINING): 169 | // 170 | int frame_count = 0; 171 | int key; 172 | bool first_frame = true; 173 | cout << "Total frames to train on = " << number_to_train_on << endl; //db 174 | while(1) { 175 | cout << "frame#: " << frame_count << endl; 176 | cap >> image; 177 | if( !image.data ) exit(1); // Something went wrong, abort 178 | if(frame_count == 0) { AllocateImages(image);} 179 | accumulateBackground( image ); 180 | cv::imshow( argv[0], image ); 181 | frame_count++; 182 | if( (key = cv::waitKey(7)) == 27 || key == 'q' || key == 'Q' || frame_count >= number_to_train_on) break; //Allow early exit on space, esc, q 183 | } 184 | 185 | // We have accumulated our training, now create the models 186 | // 187 | cout << "Creating the background model" << endl; 188 | createModelsfromStats(); 189 | cout << "Done! Hit any key to continue into single step. Hit 'a' or 'A' to adjust thresholds, esq, 'q' or 'Q' to quit\n" << endl; 190 | 191 | // SECOND PROCESSING LOOP (TESTING): 192 | // 193 | cv::namedWindow("Segmentation", cv::WINDOW_AUTOSIZE ); //For the mask image 194 | while((key = cv::waitKey()) != 27 || key == 'q' || key == 'Q' ) { // esc, 'q' or 'Q' to exit 195 | cap >> image; 196 | if( !image.data ) exit(0); 197 | cout << frame_count++ << endl; 198 | backgroundDiff( image, mask ); 199 | cv::imshow("Segmentation", mask); 200 | 201 | // A simple visualization is to write to the red channel 202 | // 203 | showForgroundInRed( argv, image); 204 | if(key == 'a') { 205 | cout << "In adjust thresholds, 'H' or 'h' == high thresh up or down; 'L' or 'l' for low thresh up or down." << endl; 206 | cout << " esq, 'q' or 'Q' to quit " << endl; 207 | adjustThresholds(argv, image); 208 | cout << "Done with adjustThreshold, back to frame stepping, esq, q or Q to quit." << endl; 209 | } 210 | } 211 | exit(0); 212 | } 213 | -------------------------------------------------------------------------------- /example_16-01-imgA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/example_16-01-imgA.png -------------------------------------------------------------------------------- /example_16-01-imgB.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/example_16-01-imgB.png -------------------------------------------------------------------------------- /example_16-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 16-1. Pyramid L-K optical flow 2 | // 3 | #include 4 | #include 5 | #include 6 | 7 | static const int MAX_CORNERS = 1000; 8 | using std::cout; 9 | using std::endl; 10 | using std::vector; 11 | 12 | 13 | void help( char** argv ) { 14 | cout << "\nExample 16-1: Pyramid L-K optical flow example.\n" << endl; 15 | cout << "Call: " < cornersA, cornersB; 38 | const int MAX_CORNERS = 500; 39 | cv::goodFeaturesToTrack( 40 | imgA, // Image to track 41 | cornersA, // Vector of detected corners (output) 42 | MAX_CORNERS, // Keep up to this many corners 43 | 0.01, // Quality level (percent of maximum) 44 | 5, // Min distance between corners 45 | cv::noArray(), // Mask 46 | 3, // Block size 47 | false, // true: Harris, false: Shi-Tomasi 48 | 0.04 // method specific parameter 49 | ); 50 | 51 | cv::cornerSubPix( 52 | imgA, // Input image 53 | cornersA, // Vector of corners (input and output) 54 | cv::Size(win_size, win_size), // Half side length of search window 55 | cv::Size(-1, -1), // Half side length of dead zone (-1=none) 56 | cv::TermCriteria( 57 | cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 58 | 20, // Maximum number of iterations 59 | 0.03 // Minimum change per iteration 60 | ) 61 | ); 62 | 63 | // Call the Lucas Kanade algorithm 64 | // 65 | vector features_found; 66 | cv::calcOpticalFlowPyrLK( 67 | imgA, // Previous image 68 | imgB, // Next image 69 | cornersA, // Previous set of corners (from imgA) 70 | cornersB, // Next set of corners (from imgB) 71 | features_found, // Output vector, each is 1 for tracked 72 | cv::noArray(), // Output vector, lists errors (optional) 73 | cv::Size(win_size * 2 + 1, win_size * 2 + 1), // Search window size 74 | 5, // Maximum pyramid level to construct 75 | cv::TermCriteria( 76 | cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 77 | 20, // Maximum number of iterations 78 | 0.3 // Minimum change per iteration 79 | ) 80 | ); 81 | 82 | // Now make some image of what we are looking at: 83 | // Note that if you want to track cornersB further, i.e. 84 | // pass them as input to the next calcOpticalFlowPyrLK, 85 | // you would need to "compress" the vector, i.e., exclude points for which 86 | // features_found[i] == false. 87 | for (int i = 0; i < static_cast(cornersA.size()); ++i) { 88 | if (!features_found[i]) { 89 | continue; 90 | } 91 | line( 92 | imgC, // Draw onto this image 93 | cornersA[i], // Starting here 94 | cornersB[i], // Ending here 95 | cv::Scalar(0, 255, 0), // This color 96 | 1, // This many pixels wide 97 | cv::LINE_AA // Draw line in this style 98 | ); 99 | } 100 | 101 | cv::imshow("ImageA", imgA); 102 | cv::imshow("ImageB", imgB); 103 | cv::imshow("LK Optical Flow Example", imgC); 104 | cv::waitKey(0); 105 | 106 | return 0; 107 | } 108 | -------------------------------------------------------------------------------- /example_17-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 17-1. Kalman filter example code 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using std::cout; 10 | using std::endl; 11 | 12 | #define phi2xy(mat) \ 13 | cv::Point(cvRound(img.cols / 2 + img.cols / 3 * cos(mat.at(0))), \ 14 | cvRound(img.rows / 2 - img.cols / 3 * sin(mat.at(0)))) 15 | 16 | 17 | void help(char** argv ) { 18 | cout << "\n" 19 | << "Example 17-1: code for using cv::KalmanFilter\n" 20 | << argv[0] << "\n\n" 21 | << "For example:\n" 22 | << argv[0] <<"\n\n" 23 | << "Esc to quit\n" 24 | << endl; 25 | } 26 | 27 | int main(int argc, char** argv) { 28 | help(argv); 29 | 30 | // Initialize, create Kalman filter object, window, random number 31 | // generator etc. 32 | // 33 | cv::Mat img(500, 500, CV_8UC3); 34 | cv::KalmanFilter kalman(2, 1, 0); 35 | 36 | // state is (phi, delta_phi) - angle and angular velocity 37 | // Initialize with random guess. 38 | // 39 | cv::Mat x_k(2, 1, CV_32F); 40 | randn(x_k, 0.0, 0.1); 41 | 42 | // process noise 43 | // 44 | cv::Mat w_k(2, 1, CV_32F); 45 | 46 | // measurements, only one parameter for angle 47 | // 48 | cv::Mat z_k = cv::Mat::zeros(1, 1, CV_32F); 49 | 50 | // Transition matrix 'F' describes relationship between 51 | // model parameters at step k and at step k+1 (this is 52 | // the "dynamics" in our model. 53 | // 54 | float F[] = {1, 1, 0, 1}; 55 | kalman.transitionMatrix = cv::Mat(2, 2, CV_32F, F).clone(); 56 | 57 | // Initialize other Kalman filter parameters. 58 | // 59 | cv::setIdentity(kalman.measurementMatrix, cv::Scalar(1)); 60 | cv::setIdentity(kalman.processNoiseCov, cv::Scalar(1e-5)); 61 | cv::setIdentity(kalman.measurementNoiseCov, cv::Scalar(1e-1)); 62 | cv::setIdentity(kalman.errorCovPost, cv::Scalar(1)); 63 | 64 | // choose random initial state 65 | // 66 | randn(kalman.statePost, 0.0, 0.1); 67 | 68 | for (;;) { 69 | // predict point position 70 | // 71 | cv::Mat y_k = kalman.predict(); 72 | 73 | // generate measurement (z_k) 74 | // 75 | cv::randn(z_k, 0.0, 76 | sqrt(static_cast(kalman.measurementNoiseCov.at(0, 0)))); 77 | z_k = kalman.measurementMatrix*x_k + z_k; 78 | 79 | // plot points (e.g., convert 80 | // 81 | img = cv::Scalar::all(0); 82 | cv::circle(img, phi2xy(z_k), 4, cv::Scalar(128, 255, 255)); // observed 83 | cv::circle(img, phi2xy(y_k), 4, cv::Scalar(255, 255, 255), 2); // predicted 84 | cv::circle(img, phi2xy(x_k), 4, cv::Scalar(0, 0, 255)); // actual to 85 | // planar co-ordinates and draw 86 | 87 | cv::imshow("Kalman", img); 88 | 89 | // adjust Kalman filter state 90 | // 91 | kalman.correct(z_k); 92 | 93 | // Apply the transition matrix 'F' (e.g., step time forward) 94 | // and also apply the "process" noise w_k 95 | // 96 | cv::randn(w_k, 0.0, sqrt(static_cast(kalman.processNoiseCov.at(0, 0)))); 97 | x_k = kalman.transitionMatrix*x_k + w_k; 98 | 99 | // exit if user hits 'Esc' 100 | if ((cv::waitKey(100) & 255) == 27) { 101 | break; 102 | } 103 | } 104 | 105 | return 0; 106 | } 107 | -------------------------------------------------------------------------------- /example_17-02.cpp: -------------------------------------------------------------------------------- 1 | // Example 17-2. Farneback optical flow example code 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | using std::cout; 12 | using std::cerr; 13 | using std::string; 14 | 15 | // Argument values for calcOpticalFlowFarneback 16 | // 17 | const double pyr_scale = 0.85; // Scale between pyramid levels (< 1.0) 18 | const int levels = 7; // Number of pyramid levels 19 | const int winsize = 13; // Size of window for pre-smoothing pass 20 | const int iterations = 10; // Iterations for each pyramid level 21 | const int poly_n = 5; // Area over which polynomial will be fit 22 | const double poly_sigma = 1.1; // Width of fit polygon 23 | 24 | // Function returns cv::Mat object with optical flow visualization 25 | // 26 | cv::Mat get_optflow_image(cv::Mat& optflow, cv::Mat& img) { 27 | cv::Scalar arrow_color(0, 0, 255); 28 | cv::Mat res = img.clone(); 29 | res /= 2; // making image darker 30 | int rows = res.rows; 31 | int cols = res.cols; 32 | const int step = 12; 33 | for (int x = (step >> 1); x < rows; x += step) 34 | for (int y = (step >> 1); y < cols; y += step) { 35 | float vx = optflow.at(x, y)[0]; 36 | float vy = optflow.at(x, y)[1]; 37 | cv::Point pt1(y, x); 38 | cv::Point pt2(y + vx, x + vy); 39 | cv::arrowedLine(res, pt1, pt2, arrow_color, 1); 40 | } 41 | return res; 42 | } 43 | 44 | int main(int argc, char** argv) { 45 | // Program expects at least one argument that is path to video file 46 | // 47 | if (argc < 2) { 48 | cerr << "\nExample 17-2: Farnback optical flow example\n" 49 | << "Use:\n" << argv[0] << " \n" 50 | << "Example:\n" << argv[0] << " ../test.avi\n" 51 | << std::endl; 52 | exit(1); 53 | } 54 | 55 | string file_name = string(argv[1]); 56 | cv::VideoCapture capture(file_name); 57 | 58 | if (!capture.isOpened()) { 59 | cerr << "Cannot open file \"" << file_name << "\"\n"; 60 | exit(-1); 61 | } 62 | 63 | cv::Mat optflow; // optical flow result 64 | cv::Mat optflow_image; // optical flow visualization 65 | cv::Mat prev_frame; // previous frame grayscale image 66 | cv::Mat frame; // current frame grayscale image 67 | cv::Mat colored_frame; // current frame RGB-image 68 | 69 | cv::namedWindow("video"); 70 | 71 | // User can terminate program with hitting ESC 72 | // 73 | while ((cv::waitKey(10) & 255) != 27) { 74 | capture >> colored_frame; 75 | if (!colored_frame.rows || !colored_frame.cols) { 76 | break; 77 | } 78 | if (colored_frame.type() == CV_8UC3) { 79 | cvtColor(colored_frame, frame, CV_BGR2GRAY); 80 | } 81 | if (prev_frame.rows) { 82 | calcOpticalFlowFarneback(prev_frame, frame, optflow, pyr_scale, levels, winsize, 83 | iterations, poly_n, poly_sigma, cv::OPTFLOW_FARNEBACK_GAUSSIAN); 84 | optflow_image = get_optflow_image(optflow, colored_frame); 85 | cv::imshow("video", optflow_image); 86 | } 87 | prev_frame = frame.clone(); 88 | } 89 | cv::destroyAllWindows(); 90 | 91 | return 0; 92 | } 93 | -------------------------------------------------------------------------------- /example_18-01.cpp: -------------------------------------------------------------------------------- 1 | // Example 18-1. Reading a chessboard’s width and height, reading and collecting 2 | // the requested number of views, and calibrating the camera 3 | #include 4 | #include 5 | 6 | using std::vector; 7 | using std::cout; 8 | using std::cerr; 9 | using std::endl; 10 | 11 | void help(char **argv) { // todo rewrite this 12 | cout << "\n\n" 13 | << "Example 18-1:\nReading a chessboard’s width and height,\n" 14 | << " reading and collecting the requested number of views,\n" 15 | << " and calibrating the camera\n\n" 16 | << "Call:\n" << argv[0] << " \n\n" 17 | << "Example:\n" << argv[0] << " 9 6 15 500 0.5\n" 18 | << "-- to use the checkerboard9x6.png provided\n\n" 19 | << " * First it reads in checker boards and calibrates itself\n" 20 | << " * Then it saves and reloads the calibration matricies\n" 21 | << " * Then it creates an undistortion map and finally\n" 22 | << " * It displays an undistorted image\n" 23 | << endl; 24 | } 25 | 26 | int main(int argc, char *argv[]) { 27 | int n_boards = 0; // will be set by input list 28 | float image_sf = 0.5f; // image scaling factor 29 | float delay = 1.f; 30 | int board_w = 0; 31 | int board_h = 0; 32 | 33 | if (argc < 4 || argc > 6) { 34 | cout << "\nERROR: Wrong number of input parameters\n"; 35 | help(argv); 36 | return -1; 37 | } 38 | 39 | board_w = atoi(argv[1]); 40 | board_h = atoi(argv[2]); 41 | n_boards = atoi(argv[3]); 42 | 43 | if (argc > 4) { 44 | delay = atof(argv[4]); 45 | } 46 | if (argc > 5) { 47 | image_sf = atof(argv[5]); 48 | } 49 | 50 | int board_n = board_w * board_h; 51 | cv::Size board_sz = cv::Size(board_w, board_h); 52 | cv::VideoCapture capture(0); 53 | if (!capture.isOpened()) { 54 | cout << "\nCouldn't open the camera\n"; 55 | help(argv); 56 | return -1; 57 | } 58 | 59 | // ALLOCATE STORAGE 60 | // 61 | vector > image_points; 62 | vector > object_points; 63 | 64 | // Capture corner views: loop until we've got n_boards successful 65 | // captures (all corners on the board are found). 66 | // 67 | double last_captured_timestamp = 0; 68 | cv::Size image_size; 69 | while (image_points.size() < (size_t)n_boards) { 70 | cv::Mat image0, image; 71 | capture >> image0; 72 | image_size = image0.size(); 73 | cv::resize(image0, image, cv::Size(), image_sf, image_sf, cv::INTER_LINEAR); 74 | 75 | // Find the board 76 | // 77 | vector corners; 78 | bool found = cv::findChessboardCorners(image, board_sz, corners); 79 | 80 | // Draw it 81 | // 82 | drawChessboardCorners(image, board_sz, corners, found); 83 | 84 | // If we got a good board, add it to our data 85 | // 86 | double timestamp = static_cast(clock()) / CLOCKS_PER_SEC; 87 | if (found && timestamp - last_captured_timestamp > 1) { 88 | last_captured_timestamp = timestamp; 89 | image ^= cv::Scalar::all(255); 90 | cv::Mat mcorners(corners); 91 | 92 | // do not copy the data 93 | mcorners *= (1.0 / image_sf); 94 | 95 | // scale the corner coordinates 96 | image_points.push_back(corners); 97 | object_points.push_back(vector()); 98 | vector &opts = object_points.back(); 99 | 100 | opts.resize(board_n); 101 | for (int j = 0; j < board_n; j++) { 102 | opts[j] = cv::Point3f(static_cast(j / board_w), 103 | static_cast(j % board_w), 0.0f); 104 | } 105 | cout << "Collected our " << static_cast(image_points.size()) 106 | << " of " << n_boards << " needed chessboard images\n" << endl; 107 | } 108 | cv::imshow("Calibration", image); 109 | 110 | // show in color if we did collect the image 111 | if ((cv::waitKey(30) & 255) == 27) 112 | return -1; 113 | } 114 | 115 | // END COLLECTION WHILE LOOP. 116 | cv::destroyWindow("Calibration"); 117 | cout << "\n\n*** CALIBRATING THE CAMERA...\n" << endl; 118 | 119 | // CALIBRATE THE CAMERA! 120 | // 121 | cv::Mat intrinsic_matrix, distortion_coeffs; 122 | double err = cv::calibrateCamera( 123 | object_points, image_points, image_size, intrinsic_matrix, 124 | distortion_coeffs, cv::noArray(), cv::noArray(), 125 | cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_FIX_PRINCIPAL_POINT); 126 | 127 | // SAVE THE INTRINSICS AND DISTORTIONS 128 | cout << " *** DONE!\n\nReprojection error is " << err 129 | << "\nStoring Intrinsics.xml and Distortions.xml files\n\n"; 130 | cv::FileStorage fs("intrinsics.xml", cv::FileStorage::WRITE); 131 | fs << "image_width" << image_size.width << "image_height" << image_size.height 132 | << "camera_matrix" << intrinsic_matrix << "distortion_coefficients" 133 | << distortion_coeffs; 134 | fs.release(); 135 | 136 | // EXAMPLE OF LOADING THESE MATRICES BACK IN: 137 | fs.open("intrinsics.xml", cv::FileStorage::READ); 138 | cout << "\nimage width: " << static_cast(fs["image_width"]); 139 | cout << "\nimage height: " << static_cast(fs["image_height"]); 140 | cv::Mat intrinsic_matrix_loaded, distortion_coeffs_loaded; 141 | fs["camera_matrix"] >> intrinsic_matrix_loaded; 142 | fs["distortion_coefficients"] >> distortion_coeffs_loaded; 143 | cout << "\nintrinsic matrix:" << intrinsic_matrix_loaded; 144 | cout << "\ndistortion coefficients: " << distortion_coeffs_loaded << endl; 145 | 146 | // Build the undistort map which we will use for all 147 | // subsequent frames. 148 | // 149 | cv::Mat map1, map2; 150 | cv::initUndistortRectifyMap(intrinsic_matrix_loaded, distortion_coeffs_loaded, 151 | cv::Mat(), intrinsic_matrix_loaded, image_size, 152 | CV_16SC2, map1, map2); 153 | 154 | // Just run the camera to the screen, now showing the raw and 155 | // the undistorted image. 156 | // 157 | for (;;) { 158 | cv::Mat image, image0; 159 | capture >> image0; 160 | 161 | if (image0.empty()) { 162 | break; 163 | } 164 | cv::remap(image0, image, map1, map2, cv::INTER_LINEAR, 165 | cv::BORDER_CONSTANT, cv::Scalar()); 166 | cv::imshow("Undistorted", image); 167 | if ((cv::waitKey(30) & 255) == 27) { 168 | break; 169 | } 170 | } 171 | 172 | return 0; 173 | } 174 | -------------------------------------------------------------------------------- /example_19-01.cpp: -------------------------------------------------------------------------------- 1 | //Example 19-1. Bird’s - eye view 2 | #include 3 | #include 4 | using namespace std; 5 | 6 | void help(char *argv[]) { 7 | cout << "\nExample 19-01, using homography to get a bird's eye view." 8 | << "\nThis file relies on you having created an intrinsic file via example_18-01_from_disk" 9 | << "\n but here, that file is already stored in ../birdseye/intrinsics.xml" 10 | << "\nCall:" 11 | << "\n./example_19-01 " 12 | << "\n\nExample:" 13 | << "\n./example_19-01 12 12 ../birdseye/intrinsics.xml ../birdseye/IMG_0215L.jpg\n" 14 | << "\nPress 'd' for lower birdseye view, and 'u' for higher (it adjusts the apparent 'Z' height), Esc to exit\n" 15 | << endl; 16 | } 17 | 18 | // args: [board_w] [board_h] [intrinsics.xml] [checker_image] 19 | // 20 | int main(int argc, char *argv[]) { 21 | if (argc != 5) { 22 | cout << "\nERROR: too few parameters\n"; 23 | help(argv); 24 | return -1; 25 | } 26 | // Input Parameters: 27 | // 28 | int board_w = atoi(argv[1]); 29 | int board_h = atoi(argv[2]); 30 | int board_n = board_w * board_h; 31 | cv::Size board_sz(board_w, board_h); 32 | cv::FileStorage fs(argv[3], cv::FileStorage::READ); 33 | cv::Mat intrinsic, distortion; 34 | 35 | fs["camera_matrix"] >> intrinsic; 36 | fs["distortion_coefficients"] >> distortion; 37 | 38 | if (!fs.isOpened() || intrinsic.empty() || distortion.empty()) { 39 | cout << "Error: Couldn't load intrinsic parameters from " << argv[3] 40 | << endl; 41 | return -1; 42 | } 43 | fs.release(); 44 | 45 | cv::Mat gray_image, image, image0 = cv::imread(argv[4], 1); 46 | if (image0.empty()) { 47 | cout << "Error: Couldn't load image " << argv[4] << endl; 48 | return -1; 49 | } 50 | 51 | // UNDISTORT OUR IMAGE 52 | // 53 | cv::undistort(image0, image, intrinsic, distortion, intrinsic); 54 | cv::cvtColor(image, gray_image, cv::COLOR_BGRA2GRAY); 55 | 56 | // GET THE CHECKERBOARD ON THE PLANE 57 | // 58 | vector corners; 59 | bool found = cv::findChessboardCorners( // True if found 60 | image, // Input image 61 | board_sz, // Pattern size 62 | corners, // Results 63 | cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FILTER_QUADS); 64 | if (!found) { 65 | cout << "Couldn't acquire checkerboard on " << argv[4] << ", only found " 66 | << corners.size() << " of " << board_n << " corners\n"; 67 | return -1; 68 | } 69 | 70 | // Get Subpixel accuracy on those corners 71 | // 72 | cv::cornerSubPix( 73 | gray_image, // Input image 74 | corners, // Initial guesses, also output 75 | cv::Size(11, 11), // Search window size 76 | cv::Size(-1, -1), // Zero zone (in this case, don't use) 77 | cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 30, 78 | 0.1)); 79 | 80 | // GET THE IMAGE AND OBJECT POINTS: 81 | // Object points are at (r,c): 82 | // (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1) 83 | // That means corners are at: corners[r*board_w + c] 84 | // 85 | cv::Point2f objPts[4], imgPts[4]; 86 | objPts[0].x = 0; 87 | objPts[0].y = 0; 88 | objPts[1].x = board_w - 1; 89 | objPts[1].y = 0; 90 | objPts[2].x = 0; 91 | objPts[2].y = board_h - 1; 92 | objPts[3].x = board_w - 1; 93 | objPts[3].y = board_h - 1; 94 | imgPts[0] = corners[0]; 95 | imgPts[1] = corners[board_w - 1]; 96 | imgPts[2] = corners[(board_h - 1) * board_w]; 97 | imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1]; 98 | 99 | // DRAW THE POINTS in order: B,G,R,YELLOW 100 | // 101 | cv::circle(image, imgPts[0], 9, cv::Scalar(255, 0, 0), 3); 102 | cv::circle(image, imgPts[1], 9, cv::Scalar(0, 255, 0), 3); 103 | cv::circle(image, imgPts[2], 9, cv::Scalar(0, 0, 255), 3); 104 | cv::circle(image, imgPts[3], 9, cv::Scalar(0, 255, 255), 3); 105 | 106 | // DRAW THE FOUND CHECKERBOARD 107 | // 108 | cv::drawChessboardCorners(image, board_sz, corners, found); 109 | cv::imshow("Checkers", image); 110 | 111 | // FIND THE HOMOGRAPHY 112 | // 113 | cv::Mat H = cv::getPerspectiveTransform(objPts, imgPts); 114 | 115 | // LET THE USER ADJUST THE Z HEIGHT OF THE VIEW 116 | // 117 | cout << "\nPress 'd' for lower birdseye view, and 'u' for higher (it adjusts the apparent 'Z' height), Esc to exit" << endl; 118 | double Z = 15; 119 | cv::Mat birds_image; 120 | for (;;) { 121 | // escape key stops 122 | H.at(2, 2) = Z; 123 | // USE HOMOGRAPHY TO REMAP THE VIEW 124 | // 125 | cv::warpPerspective(image, // Source image 126 | birds_image, // Output image 127 | H, // Transformation matrix 128 | image.size(), // Size for output image 129 | cv::WARP_INVERSE_MAP | cv::INTER_LINEAR, 130 | cv::BORDER_CONSTANT, cv::Scalar::all(0) // Fill border with black 131 | ); 132 | cv::imshow("Birds_Eye", birds_image); 133 | int key = cv::waitKey() & 255; 134 | if (key == 'u') 135 | Z += 0.5; 136 | if (key == 'd') 137 | Z -= 0.5; 138 | if (key == 27) 139 | break; 140 | } 141 | 142 | // SHOW ROTATION AND TRANSLATION VECTORS 143 | // 144 | vector image_points; 145 | vector object_points; 146 | for (int i = 0; i < 4; ++i) { 147 | image_points.push_back(imgPts[i]); 148 | object_points.push_back(cv::Point3f(objPts[i].x, objPts[i].y, 0)); 149 | } 150 | cv::Mat rvec, tvec, rmat; 151 | cv::solvePnP(object_points, // 3-d points in object coordinate 152 | image_points, // 2-d points in image coordinates 153 | intrinsic, // Our camera matrix 154 | cv::Mat(), // Since we corrected distortion in the 155 | // beginning,now we have zero distortion 156 | // coefficients 157 | rvec, // Output rotation *vector*. 158 | tvec // Output translation vector. 159 | ); 160 | cv::Rodrigues(rvec, rmat); 161 | 162 | // PRINT AND EXIT 163 | cout << "rotation matrix: " << rmat << endl; 164 | cout << "translation vector: " << tvec << endl; 165 | cout << "homography matrix: " << H << endl; 166 | cout << "inverted homography matrix: " << H.inv() << endl; 167 | 168 | return 1; 169 | } 170 | -------------------------------------------------------------------------------- /example_19-04.cpp: -------------------------------------------------------------------------------- 1 | // Example 19-4. Two-dimensional line fitting 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | void help(char **argv) { 9 | cout << "\nExample 19-04, two dimensional line fitting" 10 | << "\nCall" 11 | << "\n" << argv[0] << "\n" 12 | << "\n 'q', 'Q' or ESC to quit" 13 | << "\n" << endl; 14 | } 15 | 16 | int main(int argc, char **argv) { 17 | cv::Mat img(500, 500, CV_8UC3); 18 | cv::RNG rng(-1); 19 | help(argv); 20 | for (;;) { 21 | char key; 22 | int i, count = rng.uniform(0, 100) + 3, outliers = count / 5; 23 | float a = (float)rng.uniform(0., 200.); 24 | float b = (float)rng.uniform(0., 40.); 25 | float angle = (float)rng.uniform(0., CV_PI); 26 | float cos_a = cos(angle), sin_a = sin(angle); 27 | cv::Point pt1, pt2; 28 | vector points(count); 29 | cv::Vec4f line; 30 | float d, t; 31 | b = MIN(a * 0.3f, b); 32 | 33 | // generate some points that are close to the line 34 | for (i = 0; i < count - outliers; i++) { 35 | float x = (float)rng.uniform(-1., 1.) * a; 36 | float y = (float)rng.uniform(-1., 1.) * b; 37 | points[i].x = cvRound(x * cos_a - y * sin_a + img.cols / 2); 38 | points[i].y = cvRound(x * sin_a + y * cos_a + img.rows / 2); 39 | } 40 | 41 | // generate outlier points 42 | for (; i < count; i++) { 43 | points[i].x = rng.uniform(0, img.cols); 44 | points[i].y = rng.uniform(0, img.rows); 45 | } 46 | 47 | // find the optimal line 48 | cv::fitLine(points, line, cv::DIST_L1, 1, 0.001, 0.001); 49 | 50 | // draw the points 51 | img = cv::Scalar::all(0); 52 | for (i = 0; i < count; i++) 53 | cv::circle(img, points[i], 2, 54 | i < count - outliers ? cv::Scalar(0, 0, 255) 55 | : cv::Scalar(0, 255, 255), 56 | cv::FILLED, CV_AA, 0); 57 | 58 | // ... and the long enough line to cross the whole image 59 | d = sqrt((double)line[0] * line[0] + (double)line[1] * line[1]); 60 | line[0] /= d; 61 | line[1] /= d; 62 | t = (float)(img.cols + img.rows); 63 | pt1.x = cvRound(line[2] - line[0] * t); 64 | pt1.y = cvRound(line[3] - line[1] * t); 65 | pt2.x = cvRound(line[2] + line[0] * t); 66 | pt2.y = cvRound(line[3] + line[1] * t); 67 | cv::line(img, pt1, pt2, cv::Scalar(0, 255, 0), 3, CV_AA, 0); 68 | cv::imshow("Fit Line", img); 69 | key = (char)cv::waitKey(0); 70 | if (key == 27 || key == 'q' || key == 'Q') // 'ESC' 71 | break; 72 | } 73 | return 0; 74 | } 75 | -------------------------------------------------------------------------------- /example_20-01.cpp: -------------------------------------------------------------------------------- 1 | //Example 20-01. Using K-means 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | static void help(char* argv[]) { 8 | cout << "\nThis program demonstrates kmeans clustering.\n" 9 | " It generates an image with random points, then assigns a random number\n" 10 | " of cluster centers and uses kmeans to move those cluster centers to their\n" 11 | " representative location\n" 12 | "Usage:\n" 13 | << argv[0] << "\n\n" 14 | << "ESC or 'q' or 'Q' to quit\n\n"<< endl; 15 | } 16 | 17 | int main(int argc, char** argv) { 18 | help(argv); 19 | const int MAX_CLUSTERS = 5; 20 | cv::Scalar colorTab[] = { 21 | cv::Scalar( 0, 0, 255 ), 22 | cv::Scalar( 0, 255, 0 ), 23 | cv::Scalar( 255, 100, 100 ), 24 | cv::Scalar( 255, 0, 255 ), 25 | cv::Scalar( 0, 255, 255 ) 26 | }; 27 | cv::Mat img(500, 500, CV_8UC3); 28 | cv::RNG rng(12345); 29 | for(;;) { 30 | int clusterCount = rng.uniform(2, MAX_CLUSTERS+1); 31 | int sampleCount = rng.uniform(1, 1001); 32 | cv::Mat points(sampleCount, 1, CV_32FC2), labels; 33 | clusterCount = MIN(clusterCount, sampleCount); 34 | cv::Mat centers(clusterCount, 1, points.type()); 35 | /* generate random sample from multigaussian distribution */ 36 | for(int k = 0; k < clusterCount; k++) { 37 | cv::Point center; 38 | center.x = rng.uniform(0, img.cols); 39 | center.y = rng.uniform(0, img.rows); 40 | cv::Mat pointChunk = points.rowRange( 41 | k*sampleCount/clusterCount, 42 | k == clusterCount - 1 ? sampleCount : (k+1)*sampleCount/clusterCount 43 | ); 44 | rng.fill( 45 | pointChunk, 46 | cv::RNG::NORMAL, 47 | cv::Scalar(center.x, center.y), 48 | cv::Scalar(img.cols*0.05, img.rows*0.05) 49 | ); 50 | } 51 | randShuffle(points, 1, &rng); 52 | kmeans( 53 | points, 54 | clusterCount, 55 | labels, 56 | cv::TermCriteria( 57 | cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 58 | 10, 59 | 1.0 60 | ), 61 | 3, 62 | cv::KMEANS_PP_CENTERS, 63 | centers 64 | ); 65 | img = cv::Scalar::all(0); 66 | for(int i = 0; i < sampleCount; i++) { 67 | int clusterIdx = labels.at(i); 68 | cv::Point ipt = points.at(i); 69 | cv::circle(img, ipt, 2, colorTab[clusterIdx], cv::FILLED, cv::LINE_AA); 70 | } 71 | cv::imshow("Example 20-01", img); 72 | char key = (char)cv::waitKey(); 73 | if(key == 27 || key == 'q' || key == 'Q') // 'ESC' 74 | break; 75 | } 76 | return 0; 77 | } 78 | -------------------------------------------------------------------------------- /example_20-02.cpp: -------------------------------------------------------------------------------- 1 | //Example 20-02. Using the Mahalanobis distance for classification 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | 9 | const int CLUSTER_COUNT = 4; 10 | const int SAMPLE_COUNT = 500; 11 | const cv::Scalar colorTab[] = { 12 | cv::Scalar( 0, 0, 255 ), 13 | cv::Scalar( 0, 255, 0 ), 14 | cv::Scalar( 255, 0, 0 ), 15 | cv::Scalar( 255, 0, 255 ), 16 | cv::Scalar( 0, 255, 255 ) 17 | }; 18 | 19 | static void help(char* argv[]) { 20 | cout << "\nThis program demonstrates using the Mahalanobis distance for classification.\n" 21 | " It generates an image with random points, uses kmeans clustering.\n" 22 | " And then uses the Mahalanobis distance for classification of new points (colors) .\n" 23 | "Usage:\n" 24 | << argv[0] << "\n\n" 25 | << "ESC to quit\n\n"<< endl; 26 | } 27 | int main(int argc, char** argv) { 28 | cv::Mat img(500, 500, CV_8UC3, cv::Scalar::all(0)); 29 | cv::Mat points(SAMPLE_COUNT, 1, CV_32FC2); 30 | cv::RNG rng(time(NULL)); 31 | help(argv); 32 | rng.fill(points, cv::RNG::UNIFORM, cv::Scalar(0, 0), cv::Scalar(img.cols, img.rows)); 33 | 34 | cv::Mat labels; 35 | kmeans(points, CLUSTER_COUNT, labels, 36 | cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 37 | 10, 1.0), 38 | 3, 39 | cv::KMEANS_PP_CENTERS 40 | ); 41 | 42 | vector clusters(CLUSTER_COUNT); 43 | 44 | for(int i = 0; i < SAMPLE_COUNT; i++) { 45 | int clusterIdx = labels.at(i); 46 | 47 | cv::Point ipt = points.at(i); 48 | 49 | cv::Mat sample(1, 2, CV_32FC1); 50 | sample.at(0, 0) = ipt.x; 51 | sample.at(0, 1) = ipt.y; 52 | clusters[clusterIdx].push_back(sample); 53 | cv::circle(img, ipt, 2, colorTab[clusterIdx], cv::FILLED, cv::LINE_AA); 54 | } 55 | cv::namedWindow("Example 20-02"); 56 | cv::imshow("Example 20-02", img); 57 | 58 | vector covarMats(CLUSTER_COUNT); 59 | vector means(CLUSTER_COUNT); 60 | for(int i = 0; i < CLUSTER_COUNT; i++) { 61 | cv::calcCovarMatrix(clusters[i], covarMats[i], means[i], 62 | CV_COVAR_NORMAL | CV_COVAR_ROWS, 5); 63 | 64 | } 65 | 66 | cout << "Press any button to classify the next point!\n" 67 | << "Press ESC to exit." << endl; 68 | 69 | for(;;) { 70 | char key = (char)cv::waitKey(); 71 | if( key == 27 ) break; 72 | 73 | cv::Mat newPoint(1, 2, CV_32FC1); 74 | newPoint.at(0, 0) = rng.uniform(0, img.cols); 75 | newPoint.at(0, 1) = rng.uniform(0, img.rows); 76 | vector mahalanobisDistance(CLUSTER_COUNT); 77 | 78 | for(int i = 0; i < CLUSTER_COUNT; i++) { 79 | mahalanobisDistance[i] = cv::Mahalanobis(newPoint, means[i], 80 | covarMats[i]); 81 | } 82 | int clusterIdx = std::distance( mahalanobisDistance.begin(), 83 | min_element(mahalanobisDistance.begin(), 84 | mahalanobisDistance.end())); 85 | 86 | cv::circle(img, newPoint.at(0), 5, colorTab[clusterIdx], 87 | cv::FILLED, cv::LINE_AA); 88 | cv::imshow("Example 20-02", img); 89 | } 90 | 91 | cv::destroyAllWindows(); 92 | return 0; 93 | } 94 | -------------------------------------------------------------------------------- /example_21-01.cpp: -------------------------------------------------------------------------------- 1 | //Example 21-1. Creating and training a decision tree 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | using namespace cv; 9 | 10 | void help(char **argv) { 11 | cout << "\n\n" 12 | << "Using binary decision trees to learn to recognize poisonous\n" 13 | << " from edible mushrooms based on visible attributes.\n" 14 | << " This program demonstrates how to create and a train a \n" 15 | << " decision tree using ml library in OpenCV.\n" 16 | << "Call:\n" << argv[0] << " \n\n" 17 | << "\nIf you don't enter a file, it defaults to ../mushroom/agaricus-lepiota.data\n" 18 | << endl; 19 | } 20 | 21 | int main(int argc, char *argv[]) { 22 | // If the caller gave a filename, great. Otherwise, use a default. 23 | // 24 | const char *csv_file_name = argc >= 2 ? argv[1] : "../mushroom/agaricus-lepiota.data"; 25 | cout << "OpenCV Version: " << CV_VERSION << endl; 26 | help(argv); 27 | 28 | // Read in the CSV file that we were given. 29 | // 30 | cv::Ptr data_set = 31 | cv::ml::TrainData::loadFromCSV(csv_file_name, // Input file name 32 | 0, // Header lines (ignore this many) 33 | 0, // Responses are (start) at thie column 34 | 1, // Inputs start at this column 35 | "cat[0-22]" // All 23 columns are categorical 36 | ); 37 | // Use defaults for delimeter (',') and missch ('?') 38 | // Verify that we read in what we think. 39 | // 40 | int n_samples = data_set->getNSamples(); 41 | if (n_samples == 0) { 42 | cerr << "Could not read file: " << csv_file_name << endl; 43 | exit(-1); 44 | } else { 45 | cout << "Read " << n_samples << " samples from " << csv_file_name << endl; 46 | } 47 | 48 | // Split the data, so that 90% is train data 49 | // 50 | data_set->setTrainTestSplitRatio(0.90, false); 51 | int n_train_samples = data_set->getNTrainSamples(); 52 | int n_test_samples = data_set->getNTestSamples(); 53 | cout << "Found " << n_train_samples << " Train Samples, and " 54 | << n_test_samples << " Test Samples" << endl; 55 | 56 | // Create a DTrees classifier. 57 | // 58 | cv::Ptr dtree = cv::ml::RTrees::create(); 59 | // set parameters 60 | // 61 | // These are the parameters from the old mushrooms.cpp code 62 | // Set up priors to penalize "poisonous" 10x as much as "edible" 63 | // 64 | float _priors[] = {1.0, 10.0}; 65 | cv::Mat priors(1, 2, CV_32F, _priors); 66 | dtree->setMaxDepth(8); 67 | dtree->setMinSampleCount(10); 68 | dtree->setRegressionAccuracy(0.01f); 69 | dtree->setUseSurrogates(false /* true */); 70 | dtree->setMaxCategories(15); 71 | dtree->setCVFolds(0 /*10*/); // nonzero causes core dump 72 | dtree->setUse1SERule(true); 73 | dtree->setTruncatePrunedTree(true); 74 | // dtree->setPriors( priors ); 75 | dtree->setPriors(cv::Mat()); // ignore priors for now... 76 | // Now train the model 77 | // NB: we are only using the "train" part of the data set 78 | // 79 | dtree->train(data_set); 80 | 81 | // Having successfully trained the data, we should be able 82 | // to calculate the error on both the training data, as well 83 | // as the test data that we held out. 84 | // 85 | cv::Mat results; 86 | float train_performance = dtree->calcError(data_set, 87 | false, // use train data 88 | results // cv::noArray() 89 | ); 90 | std::vector names; 91 | data_set->getNames(names); 92 | Mat flags = data_set->getVarSymbolFlags(); 93 | 94 | // Compute some statistics on our own: 95 | // 96 | { 97 | cv::Mat expected_responses = data_set->getResponses(); 98 | int good = 0, bad = 0, total = 0; 99 | for (int i = 0; i < data_set->getNTrainSamples(); ++i) { 100 | float received = results.at(i, 0); 101 | float expected = expected_responses.at(i, 0); 102 | cv::String r_str = names[(int)received]; 103 | cv::String e_str = names[(int)expected]; 104 | cout << "Expected: " << e_str << ", got: " << r_str << endl; 105 | if (received == expected) 106 | good++; 107 | else 108 | bad++; 109 | total++; 110 | } 111 | cout << "Correct answers: " <<(float(good)/total) <<" % " << endl; 112 | cout << "Incorrect answers: " << (float(bad) / total) << "%" 113 | << endl; 114 | } 115 | float test_performance = dtree->calcError(data_set, 116 | true, // use test data 117 | results // cv::noArray() 118 | ); 119 | cout << "Performance on training data: " << train_performance << "%" << endl; 120 | cout << "Performance on test data: " < 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "opencv2/objdetect.hpp" 9 | #include 10 | #include 11 | 12 | using std::cout; 13 | using std::cerr; 14 | using std::vector; 15 | using std::string; 16 | 17 | // Detect and draw detected object boxes on image 18 | // 19 | void detectAndDraw( 20 | cv::Mat& img, // input image 21 | cv::Ptr classifier, // preloaded classifier 22 | double scale = 1.3) { // resize image by ... 23 | // Just some pretty colors to draw with 24 | // 25 | enum { BLUE, AQUA, CYAN, GREEN }; 26 | static cv::Scalar colors[] = { 27 | cv::Scalar(0, 0, 255), 28 | cv::Scalar(0, 128, 255), 29 | cv::Scalar(0, 255, 255), 30 | cv::Scalar(0, 255, 0) 31 | }; 32 | // Image preparation: 33 | // 34 | cv::Mat gray(img.size(), CV_8UC1); 35 | cv::Mat small_img(cvSize(cvRound(img.cols / scale), 36 | cvRound(img.rows / scale)), CV_8UC1); 37 | cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY); 38 | cv::resize(gray, small_img, small_img.size(), 0.0, 0.0, cv::INTER_LINEAR); 39 | cv::equalizeHist(small_img, small_img); 40 | // Detect objects if any 41 | // 42 | vector objects; 43 | classifier->detectMultiScale( 44 | small_img, // input image 45 | objects, // place for the results 46 | 1.1, // scale factor 47 | 3, // minimum number of neighbors 48 | CV_HAAR_DO_CANNY_PRUNING, // (old format cascades only) 49 | cv::Size(30, 30)); // throw away detections smaller than this 50 | 51 | // Loop through to found objects and draw boxes around them 52 | // 53 | int i = 0; 54 | for (vector::iterator r = objects.begin(); 55 | r != objects.end(); r++, ++i) { 56 | cv::Rect r_ = (*r); 57 | r_.x *= scale; 58 | r_.y *= scale; 59 | r_.width *= scale; 60 | r_.height *= scale; 61 | cv::rectangle(img, r_, colors[i % 4]); 62 | } 63 | } 64 | 65 | int main(int argc, char** argv) { 66 | // Program expects at least two arguments: 67 | // - path to image file 68 | // - path to .xml classifier file 69 | // 70 | if (argc < 3) { 71 | cerr << "\nError: wrong number of arguments.\n"; 72 | cerr << "\nExample 22-1. Detecting and drawing faces\n\n" 73 | << "Use:\n" << argv[0] << " \n" 74 | << "to run this demo\n\n" 75 | << "Example:\n" 76 | << argv[0] << " ../faces.png ../haarcascade_frontalface_alt.xml\n" 77 | << std::endl; 78 | exit(1); 79 | } 80 | string image_file_name = string(argv[1]); 81 | cv::Mat img = cv::imread(image_file_name, CV_LOAD_IMAGE_COLOR); 82 | string cascade_file_name = string(argv[2]); 83 | cv::Ptr cascade(new cv::CascadeClassifier(cascade_file_name)); 84 | detectAndDraw(img, cascade); 85 | cv::imshow("Result", img); 86 | cv::waitKey(0); 87 | 88 | return 0; 89 | } 90 | -------------------------------------------------------------------------------- /faceScene.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/faceScene.jpg -------------------------------------------------------------------------------- /faceTemplate.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/faceTemplate.jpg -------------------------------------------------------------------------------- /faces.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/faces.png -------------------------------------------------------------------------------- /fruits.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/fruits.jpg -------------------------------------------------------------------------------- /mushroom/Index: -------------------------------------------------------------------------------- 1 | Index of mushroom 2 | 3 | 02 Dec 1996 193 Index 4 | 25 Jun 1990 111577 expanded.Z 5 | 26 Feb 1990 4167 agaricus-lepiota.names 6 | 30 May 1989 853 README 7 | 30 May 1989 373704 agaricus-lepiota.data 8 | -------------------------------------------------------------------------------- /mushroom/agaricus-lepiota.names: -------------------------------------------------------------------------------- 1 | 1. Title: Mushroom Database 2 | 3 | 2. Sources: 4 | (a) Mushroom records drawn from The Audubon Society Field Guide to North 5 | American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred 6 | A. Knopf 7 | (b) Donor: Jeff Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu) 8 | (c) Date: 27 April 1987 9 | 10 | 3. Past Usage: 11 | 1. Schlimmer,J.S. (1987). Concept Acquisition Through Representational 12 | Adjustment (Technical Report 87-19). Doctoral disseration, Department 13 | of Information and Computer Science, University of California, Irvine. 14 | --- STAGGER: asymptoted to 95% classification accuracy after reviewing 15 | 1000 instances. 16 | 2. Iba,W., Wogulis,J., & Langley,P. (1988). Trading off Simplicity 17 | and Coverage in Incremental Concept Learning. In Proceedings of 18 | the 5th International Conference on Machine Learning, 73-79. 19 | Ann Arbor, Michigan: Morgan Kaufmann. 20 | -- approximately the same results with their HILLARY algorithm 21 | 3. In the following references a set of rules (given below) were 22 | learned for this data set which may serve as a point of 23 | comparison for other researchers. 24 | 25 | Duch W, Adamczak R, Grabczewski K (1996) Extraction of logical rules 26 | from training data using backpropagation networks, in: Proc. of the 27 | The 1st Online Workshop on Soft Computing, 19-30.Aug.1996, pp. 25-30, 28 | available on-line at: http://www.bioele.nuee.nagoya-u.ac.jp/wsc1/ 29 | 30 | Duch W, Adamczak R, Grabczewski K, Ishikawa M, Ueda H, Extraction of 31 | crisp logical rules using constrained backpropagation networks - 32 | comparison of two new approaches, in: Proc. of the European Symposium 33 | on Artificial Neural Networks (ESANN'97), Bruge, Belgium 16-18.4.1997, 34 | pp. xx-xx 35 | 36 | Wlodzislaw Duch, Department of Computer Methods, Nicholas Copernicus 37 | University, 87-100 Torun, Grudziadzka 5, Poland 38 | e-mail: duch@phys.uni.torun.pl 39 | WWW http://www.phys.uni.torun.pl/kmk/ 40 | 41 | Date: Mon, 17 Feb 1997 13:47:40 +0100 42 | From: Wlodzislaw Duch 43 | Organization: Dept. of Computer Methods, UMK 44 | 45 | I have attached a file containing logical rules for mushrooms. 46 | It should be helpful for other people since only in the last year I 47 | have seen about 10 papers analyzing this dataset and obtaining quite 48 | complex rules. We will try to contribute other results later. 49 | 50 | With best regards, Wlodek Duch 51 | ________________________________________________________________ 52 | 53 | Logical rules for the mushroom data sets. 54 | 55 | Logical rules given below seem to be the simplest possible for the 56 | mushroom dataset and therefore should be treated as benchmark results. 57 | 58 | Disjunctive rules for poisonous mushrooms, from most general 59 | to most specific: 60 | 61 | P_1) odor=NOT(almond.OR.anise.OR.none) 62 | 120 poisonous cases missed, 98.52% accuracy 63 | 64 | P_2) spore-print-color=green 65 | 48 cases missed, 99.41% accuracy 66 | 67 | P_3) odor=none.AND.stalk-surface-below-ring=scaly.AND. 68 | (stalk-color-above-ring=NOT.brown) 69 | 8 cases missed, 99.90% accuracy 70 | 71 | P_4) habitat=leaves.AND.cap-color=white 72 | 100% accuracy 73 | 74 | Rule P_4) may also be 75 | 76 | P_4') population=clustered.AND.cap_color=white 77 | 78 | These rule involve 6 attributes (out of 22). Rules for edible 79 | mushrooms are obtained as negation of the rules given above, for 80 | example the rule: 81 | 82 | odor=(almond.OR.anise.OR.none).AND.spore-print-color=NOT.green 83 | 84 | gives 48 errors, or 99.41% accuracy on the whole dataset. 85 | 86 | Several slightly more complex variations on these rules exist, 87 | involving other attributes, such as gill_size, gill_spacing, 88 | stalk_surface_above_ring, but the rules given above are the simplest 89 | we have found. 90 | 91 | 92 | 4. Relevant Information: 93 | This data set includes descriptions of hypothetical samples 94 | corresponding to 23 species of gilled mushrooms in the Agaricus and 95 | Lepiota Family (pp. 500-525). Each species is identified as 96 | definitely edible, definitely poisonous, or of unknown edibility and 97 | not recommended. This latter class was combined with the poisonous 98 | one. The Guide clearly states that there is no simple rule for 99 | determining the edibility of a mushroom; no rule like ``leaflets 100 | three, let it be'' for Poisonous Oak and Ivy. 101 | 102 | 5. Number of Instances: 8124 103 | 104 | 6. Number of Attributes: 22 (all nominally valued) 105 | 106 | 7. Attribute Information: (classes: edible=e, poisonous=p) 107 | 1. cap-shape: bell=b,conical=c,convex=x,flat=f, 108 | knobbed=k,sunken=s 109 | 2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s 110 | 3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, 111 | pink=p,purple=u,red=e,white=w,yellow=y 112 | 4. bruises?: bruises=t,no=f 113 | 5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, 114 | musty=m,none=n,pungent=p,spicy=s 115 | 6. gill-attachment: attached=a,descending=d,free=f,notched=n 116 | 7. gill-spacing: close=c,crowded=w,distant=d 117 | 8. gill-size: broad=b,narrow=n 118 | 9. gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g, 119 | green=r,orange=o,pink=p,purple=u,red=e, 120 | white=w,yellow=y 121 | 10. stalk-shape: enlarging=e,tapering=t 122 | 11. stalk-root: bulbous=b,club=c,cup=u,equal=e, 123 | rhizomorphs=z,rooted=r,missing=? 124 | 12. stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s 125 | 13. stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s 126 | 14. stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, 127 | pink=p,red=e,white=w,yellow=y 128 | 15. stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, 129 | pink=p,red=e,white=w,yellow=y 130 | 16. veil-type: partial=p,universal=u 131 | 17. veil-color: brown=n,orange=o,white=w,yellow=y 132 | 18. ring-number: none=n,one=o,two=t 133 | 19. ring-type: cobwebby=c,evanescent=e,flaring=f,large=l, 134 | none=n,pendant=p,sheathing=s,zone=z 135 | 20. spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r, 136 | orange=o,purple=u,white=w,yellow=y 137 | 21. population: abundant=a,clustered=c,numerous=n, 138 | scattered=s,several=v,solitary=y 139 | 22. habitat: grasses=g,leaves=l,meadows=m,paths=p, 140 | urban=u,waste=w,woods=d 141 | 142 | 8. Missing Attribute Values: 2480 of them (denoted by "?"), all for 143 | attribute #11. 144 | 145 | 9. Class Distribution: 146 | -- edible: 4208 (51.8%) 147 | -- poisonous: 3916 (48.2%) 148 | -- total: 8124 instances 149 | -------------------------------------------------------------------------------- /mushroom/citation: -------------------------------------------------------------------------------- 1 | This datasets was obtained from: 2 | 3 | Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. 4 | -------------------------------------------------------------------------------- /mushroom/expanded.Z: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/mushroom/expanded.Z -------------------------------------------------------------------------------- /shape_sample/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/1.png -------------------------------------------------------------------------------- /shape_sample/10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/10.png -------------------------------------------------------------------------------- /shape_sample/11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/11.png -------------------------------------------------------------------------------- /shape_sample/12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/12.png -------------------------------------------------------------------------------- /shape_sample/13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/13.png -------------------------------------------------------------------------------- /shape_sample/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/14.png -------------------------------------------------------------------------------- /shape_sample/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/15.png -------------------------------------------------------------------------------- /shape_sample/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/16.png -------------------------------------------------------------------------------- /shape_sample/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/17.png -------------------------------------------------------------------------------- /shape_sample/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/18.png -------------------------------------------------------------------------------- /shape_sample/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/19.png -------------------------------------------------------------------------------- /shape_sample/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/2.png -------------------------------------------------------------------------------- /shape_sample/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/20.png -------------------------------------------------------------------------------- /shape_sample/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/3.png -------------------------------------------------------------------------------- /shape_sample/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/4.png -------------------------------------------------------------------------------- /shape_sample/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/5.png -------------------------------------------------------------------------------- /shape_sample/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/6.png -------------------------------------------------------------------------------- /shape_sample/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/7.png -------------------------------------------------------------------------------- /shape_sample/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/8.png -------------------------------------------------------------------------------- /shape_sample/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/shape_sample/9.png -------------------------------------------------------------------------------- /stereoData/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/.DS_Store -------------------------------------------------------------------------------- /stereoData/example_19-03_list.txt: -------------------------------------------------------------------------------- 1 | ../stereoData/left01.jpg 2 | ../stereoData/right01.jpg 3 | ../stereoData/left02.jpg 4 | ../stereoData/right02.jpg 5 | ../stereoData/left03.jpg 6 | ../stereoData/right03.jpg 7 | ../stereoData/left04.jpg 8 | ../stereoData/right04.jpg 9 | ../stereoData/left05.jpg 10 | ../stereoData/right05.jpg 11 | ../stereoData/left06.jpg 12 | ../stereoData/right06.jpg 13 | ../stereoData/left07.jpg 14 | ../stereoData/right07.jpg 15 | ../stereoData/left08.jpg 16 | ../stereoData/right08.jpg 17 | ../stereoData/left09.jpg 18 | ../stereoData/right09.jpg 19 | #../stereoData/left10.jpg 20 | #../stereoData/right10.jpg 21 | ../stereoData/left11.jpg 22 | ../stereoData/right11.jpg 23 | ../stereoData/left12.jpg 24 | ../stereoData/right12.jpg 25 | ../stereoData/left13.jpg 26 | ../stereoData/right13.jpg 27 | ../stereoData/left14.jpg 28 | ../stereoData/right14.jpg 29 | -------------------------------------------------------------------------------- /stereoData/left01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left01.jpg -------------------------------------------------------------------------------- /stereoData/left02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left02.jpg -------------------------------------------------------------------------------- /stereoData/left03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left03.jpg -------------------------------------------------------------------------------- /stereoData/left04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left04.jpg -------------------------------------------------------------------------------- /stereoData/left05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left05.jpg -------------------------------------------------------------------------------- /stereoData/left06.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left06.jpg -------------------------------------------------------------------------------- /stereoData/left07.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left07.jpg -------------------------------------------------------------------------------- /stereoData/left08.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left08.jpg -------------------------------------------------------------------------------- /stereoData/left09.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left09.jpg -------------------------------------------------------------------------------- /stereoData/left10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left10.jpg -------------------------------------------------------------------------------- /stereoData/left11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left11.jpg -------------------------------------------------------------------------------- /stereoData/left12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left12.jpg -------------------------------------------------------------------------------- /stereoData/left13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left13.jpg -------------------------------------------------------------------------------- /stereoData/left14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/left14.jpg -------------------------------------------------------------------------------- /stereoData/right01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right01.jpg -------------------------------------------------------------------------------- /stereoData/right02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right02.jpg -------------------------------------------------------------------------------- /stereoData/right03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right03.jpg -------------------------------------------------------------------------------- /stereoData/right04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right04.jpg -------------------------------------------------------------------------------- /stereoData/right05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right05.jpg -------------------------------------------------------------------------------- /stereoData/right06.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right06.jpg -------------------------------------------------------------------------------- /stereoData/right07.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right07.jpg -------------------------------------------------------------------------------- /stereoData/right08.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right08.jpg -------------------------------------------------------------------------------- /stereoData/right09.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right09.jpg -------------------------------------------------------------------------------- /stereoData/right10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right10.jpg -------------------------------------------------------------------------------- /stereoData/right11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right11.jpg -------------------------------------------------------------------------------- /stereoData/right12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right12.jpg -------------------------------------------------------------------------------- /stereoData/right13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right13.jpg -------------------------------------------------------------------------------- /stereoData/right14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stereoData/right14.jpg -------------------------------------------------------------------------------- /stuff.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/stuff.jpg -------------------------------------------------------------------------------- /test.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/test.avi -------------------------------------------------------------------------------- /tree.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oreillymedia/Learning-OpenCV-3_examples/4fe1f6c8bb477e4393ea3cd94749441d93f9b3dd/tree.avi --------------------------------------------------------------------------------