├── .project
├── .pydevproject
├── OpenCV-tutorials
├── .gitignore
├── cameraCalibration
│ ├── cameraCalibration.py
│ ├── depthMap.py
│ ├── epipolarGeometry.py
│ └── poseEstimation.py
├── coreOperations
│ ├── borders.py
│ ├── imageOperations.py
│ └── optimization.py
├── data
│ ├── README.md
│ ├── aloeL.jpg
│ ├── aloeR.jpg
│ ├── apple.jpg
│ ├── box.png
│ ├── box_in_scene.png
│ ├── butterfly.jpg
│ ├── calib.npz
│ ├── chessboard.jpg
│ ├── coins.jpg
│ ├── digits.png
│ ├── drop.avi
│ ├── home.jpg
│ ├── j.png
│ ├── knn_data.npz
│ ├── left.jpg
│ ├── left01.jpg
│ ├── left02.jpg
│ ├── left03.jpg
│ ├── left04.jpg
│ ├── left05.jpg
│ ├── left06.jpg
│ ├── left07.jpg
│ ├── left08.jpg
│ ├── left09.jpg
│ ├── left11.jpg
│ ├── left12.jpg
│ ├── left13.jpg
│ ├── left14.jpg
│ ├── letter-recognition.data
│ ├── messi5.jpg
│ ├── messi_face.jpg
│ ├── opencv_logo.jpg
│ ├── orange.jpg
│ ├── right.jpg
│ ├── rose.png
│ ├── simple.jpg
│ ├── sudoku-original.jpg
│ ├── tsukuba_l.png
│ ├── tsukuba_r.png
│ ├── vtest.avi
│ └── wiki.jpg
├── featureDetection
│ ├── bfMatchingWithORB.py
│ ├── bfMatchingWithSIFT.py
│ ├── brief.py
│ ├── fast.py
│ ├── flannMatchingWithSIFT.py
│ ├── harris.py
│ ├── homography.py
│ ├── orb.py
│ ├── shiTomasi.py
│ ├── sift.py
│ └── surf.py
├── guiFeatures
│ ├── drawingBasics.py
│ ├── guiBasics.py
│ ├── imageBasics.py
│ ├── interactionBasics.py
│ └── videoBasics.py
├── imageProcessing
│ ├── canny.py
│ ├── colorSpaces.py
│ ├── contours.py
│ ├── fourier.py
│ ├── geometricTransformations.py
│ ├── grabCut.py
│ ├── histogram2D.py
│ ├── histogramBackprojection.py
│ ├── histogramBackprojection2.py
│ ├── histogramEqualization.py
│ ├── histogramEqualization2.py
│ ├── histograms.py
│ ├── houghCircles.py
│ ├── houghLines.py
│ ├── imageGradients.py
│ ├── morphologicalTransformations.py
│ ├── pyramids.py
│ ├── smooth.py
│ ├── templateMatching.py
│ ├── threshold.py
│ └── watershed.py
├── machineLearning
│ ├── colorQuantization.py
│ ├── kmeans.py
│ ├── knn.py
│ ├── knnAlphabet.py
│ ├── knnDigits.py
│ └── svmDigits.py
├── objectDetection
│ └── faceDetection.py
├── python
│ └── numpyBasics.py
└── videoAnalysis
│ ├── backgroundSubtraction.py
│ ├── camShift.py
│ ├── denseOpticalFlow.py
│ ├── meanShift.py
│ └── opticalFlow.py
└── README.md
/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | OpenCV-python-tests
4 |
5 |
6 |
7 |
8 |
9 | org.python.pydev.PyDevBuilder
10 |
11 |
12 |
13 |
14 |
15 | org.python.pydev.pythonNature
16 |
17 |
18 |
--------------------------------------------------------------------------------
/.pydevproject:
--------------------------------------------------------------------------------
1 |
2 |
3 | Default
4 | python 2.7
5 |
6 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/.gitignore:
--------------------------------------------------------------------------------
1 | /out/
2 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/cameraCalibration/cameraCalibration.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | import glob
9 |
10 | # Define the chess board rows and columns
11 | rows = 7
12 | cols = 6
13 |
14 | # Set the termination criteria for the corner sub-pixel algorithm
15 | criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 30, 0.001)
16 |
17 | # Prepare the object points: (0,0,0), (1,0,0), (2,0,0), ..., (6,5,0). They are the same for all images
18 | objectPoints = np.zeros((rows * cols, 3), np.float32)
19 | objectPoints[:, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 2)
20 |
21 | # Create the arrays to store the object points and the image points
22 | objectPointsArray = []
23 | imgPointsArray = []
24 |
25 | # Loop over the image files
26 | for path in glob.glob('../data/left[0-1][0-9].jpg'):
27 | # Load the image and convert it to gray scale
28 | img = cv2.imread(path)
29 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
30 |
31 | # Find the chess board corners
32 | ret, corners = cv2.findChessboardCorners(gray, (rows, cols), None)
33 |
34 | # Make sure the chess board pattern was found in the image
35 | if ret:
36 | # Refine the corner position
37 | corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
38 |
39 | # Add the object points and the image points to the arrays
40 | objectPointsArray.append(objectPoints)
41 | imgPointsArray.append(corners)
42 |
43 | # Draw the corners on the image
44 | cv2.drawChessboardCorners(img, (rows, cols), corners, ret)
45 |
46 | # Display the image
47 | cv2.imshow('chess board', img)
48 | cv2.waitKey(500)
49 |
50 | # Calibrate the camera and save the results
51 | ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectPointsArray, imgPointsArray, gray.shape[::-1], None, None)
52 | np.savez('../data/calib.npz', mtx=mtx, dist=dist, rvecs=rvecs, tvecs=tvecs)
53 |
54 | # Print the camera calibration error
55 | error = 0
56 |
57 | for i in range(len(objectPointsArray)):
58 | imgPoints, _ = cv2.projectPoints(objectPointsArray[i], rvecs[i], tvecs[i], mtx, dist)
59 | error += cv2.norm(imgPointsArray[i], imgPoints, cv2.NORM_L2) / len(imgPoints)
60 |
61 | print("Total error: ", error / len(objectPointsArray))
62 |
63 | # Load one of the test images
64 | img = cv2.imread('../data/left12.jpg')
65 | h, w = img.shape[:2]
66 |
67 | # Obtain the new camera matrix and undistort the image
68 | newCameraMtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
69 | undistortedImg = cv2.undistort(img, mtx, dist, None, newCameraMtx)
70 |
71 | # Crop the undistorted image
72 | # x, y, w, h = roi
73 | # undistortedImg = undistortedImg[y:y + h, x:x + w]
74 |
75 | # Display the final result
76 | cv2.imshow('chess board', np.hstack((img, undistortedImg)))
77 | cv2.waitKey(0)
78 | cv2.destroyAllWindows()
79 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/cameraCalibration/depthMap.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the left and right images in gray scale
10 | imgLeft = cv2.imread('../data/tsukuba_l.png', 0)
11 | imgRight = cv2.imread('../data/tsukuba_r.png', 0)
12 |
13 | # Initialize the stereo block matching object
14 | stereo = cv2.StereoBM_create(numDisparities=32, blockSize=13)
15 |
16 | # Compute the disparity image
17 | disparity = stereo.compute(imgLeft, imgRight)
18 |
19 | # Normalize the image for representation
20 | min = disparity.min()
21 | max = disparity.max()
22 | disparity = np.uint8(255 * (disparity - min) / (max - min))
23 |
24 | # Display the result
25 | cv2.imshow('disparity', np.hstack((imgLeft, imgRight, disparity)))
26 | cv2.waitKey(0)
27 | cv2.destroyAllWindows()
28 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/cameraCalibration/epipolarGeometry.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the left and right images in gray scale
11 | imgLeft = cv2.imread('../data/tsukuba_l.png', 0)
12 | imgRight = cv2.imread('../data/tsukuba_r.png', 0)
13 |
14 | # Detect the SIFT key points and compute the descriptors for the two images
15 | sift = cv2.xfeatures2d.SIFT_create()
16 | keyPointsLeft, descriptorsLeft = sift.detectAndCompute(imgLeft, None)
17 | keyPointsRight, descriptorsRight = sift.detectAndCompute(imgRight, None)
18 |
19 | # Create FLANN matcher object
20 | FLANN_INDEX_KDTREE = 0
21 | indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
22 | searchParams = dict(checks=50)
23 | flann = cv2.FlannBasedMatcher(indexParams, searchParams)
24 |
25 | # Match the descriptors (this crashes in OpenCV3.1)
26 | # See https://github.com/Itseez/opencv/issues/5667
27 | matches = flann.knnMatch(descriptorsLeft, descriptorsRight, k=2)
28 |
29 | # Apply ratio test
30 | goodMatches = []
31 | ptsLeft = []
32 | ptsRight = []
33 |
34 | for m, n in matches:
35 | if m.distance < 0.8 * n.distance:
36 | goodMatches.append([m])
37 | ptsLeft.append(keyPointsLeft[m.trainIdx].pt)
38 | ptsRight.append(keyPointsRight[n.trainIdx].pt)
39 |
40 | ptsLeft = np.int32(ptsLeft)
41 | ptsRight = np.int32(ptsRight)
42 | F, mask = cv2.findFundamentalMat(ptsLeft, ptsRight, cv2.FM_LMEDS)
43 |
44 | # We select only inlier points
45 | ptsLeft = ptsLeft[mask.ravel() == 1]
46 | ptsRight = ptsRight[mask.ravel() == 1]
47 |
48 | def drawlines(img1, img2, lines, pts1, pts2):
49 | ''' img1 - image on which we draw the epilines for the points in img2
50 | lines - corresponding epilines '''
51 | r, c = img1.shape
52 | img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
53 | img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
54 | for r, pt1, pt2 in zip(lines, pts1, pts2):
55 | color = tuple(np.random.randint(0, 255, 3).tolist())
56 | x0, y0 = map(int, [0, -r[2] / r[1] ])
57 | x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1] ])
58 | img1 = cv2.line(img1, (x0, y0), (x1, y1), color, 1)
59 | img1 = cv2.circle(img1, tuple(pt1), 5, color, -1)
60 | img2 = cv2.circle(img2, tuple(pt2), 5, color, -1)
61 | return img1, img2
62 |
63 | # Find epilines corresponding to points in right image (second image) and
64 | # drawing its lines on left image
65 | linesLeft = cv2.computeCorrespondEpilines(ptsRight.reshape(-1, 1, 2), 2, F)
66 | linesLeft = linesLeft.reshape(-1, 3)
67 | img5, img6 = drawlines(imgLeft, imgRight, linesLeft, ptsLeft, ptsRight)
68 |
69 | # Find epilines corresponding to points in left image (first image) and
70 | # drawing its lines on right image
71 | linesRight = cv2.computeCorrespondEpilines(ptsLeft.reshape(-1, 1, 2), 1, F)
72 | linesRight = linesRight.reshape(-1, 3)
73 | img3, img4 = drawlines(imgRight, imgLeft, linesRight, ptsRight, ptsLeft)
74 |
75 | plt.subplot(121), plt.imshow(img5)
76 | plt.subplot(122), plt.imshow(img3)
77 | plt.show()
78 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/cameraCalibration/poseEstimation.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_calib3d/py_pose/py_pose.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | import glob
9 |
10 | # This function draws lines joining the given image points to the first chess board corner
11 | def draw(img, corners, imgPoints):
12 | corner = tuple(corners[0].ravel())
13 | img = cv2.line(img, corner, tuple(imgPoints[0].ravel()), (255, 0, 0), 5)
14 | img = cv2.line(img, corner, tuple(imgPoints[1].ravel()), (0, 255, 0), 5)
15 | img = cv2.line(img, corner, tuple(imgPoints[2].ravel()), (0, 0, 255), 5)
16 | return img
17 |
18 | # Load the camera calibration data
19 | with np.load('../data/calib.npz') as calibData:
20 | mtx, dist, rvecs, tvecs = [calibData[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]
21 |
22 | # Define the chess board rows and columns
23 | rows = 7
24 | cols = 6
25 |
26 | # Set the termination criteria for the corner sub-pixel algorithm
27 | criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 30, 0.001)
28 |
29 | # Prepare the object points: (0,0,0), (1,0,0), (2,0,0), ..., (6,5,0). They are the same for all images
30 | objectPoints = np.zeros((rows * cols, 1, 3), np.float32)
31 | objectPoints[:, :, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 1, 2)
32 |
33 | # Create the axis points
34 | axisPoints = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)
35 |
36 | # Loop over the image files
37 | for path in glob.glob('../data/left[0-1][0-9].jpg'):
38 | # Load the image and convert it to gray scale
39 | img = cv2.imread(path)
40 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
41 |
42 | # Find the chess board corners
43 | ret, corners = cv2.findChessboardCorners(gray, (rows, cols), None)
44 |
45 | # Make sure the chess board pattern was found in the image
46 | if ret:
47 | # Refine the corner position
48 | corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
49 |
50 | # Find the rotation and translation vectors
51 | val, rvecs, tvecs, inliers = cv2.solvePnPRansac(objectPoints, corners, mtx, dist)
52 |
53 | # Project the 3D axis points to the image plane
54 | axisImgPoints, jac = cv2.projectPoints(axisPoints, rvecs, tvecs, mtx, dist)
55 |
56 | # Draw the axis lines
57 | img = draw(img, corners, axisImgPoints)
58 |
59 | # Display the image
60 | cv2.imshow('chess board', img)
61 | cv2.waitKey(0)
62 |
63 | cv2.destroyAllWindows()
64 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/coreOperations/borders.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Read the image
11 | img = cv2.imread('../data/opencv_logo.jpg')
12 | print(img.shape)
13 |
14 | # Create different copies
15 | replicate = cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_REPLICATE)
16 | reflect = cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_REFLECT)
17 | reflect101 = cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_REFLECT_101)
18 | wrap = cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_WRAP)
19 | constant = cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=[255, 0, 0])
20 |
21 | # Display the images using matplotlib
22 | plt.subplot(231), plt.imshow(img, 'gray'), plt.title('ORIGINAL')
23 | plt.subplot(232), plt.imshow(replicate, 'gray'), plt.title('REPLICATE')
24 | plt.subplot(233), plt.imshow(reflect, 'gray'), plt.title('REFLECT')
25 | plt.subplot(234), plt.imshow(reflect101, 'gray'), plt.title('REFLECT_101')
26 | plt.subplot(235), plt.imshow(wrap, 'gray'), plt.title('WRAP')
27 | plt.subplot(236), plt.imshow(constant, 'gray'), plt.title('CONSTANT')
28 |
29 | plt.show()
30 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/coreOperations/imageOperations.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load two images
10 | img1 = cv2.imread('../data/butterfly.jpg')
11 | img2 = cv2.imread('../data/messi5.jpg')
12 |
13 | # Calculate the minimum number of rows and columns
14 | minRows = min(img1.shape[0], img2.shape[0])
15 | minCols = min(img1.shape[1], img2.shape[1])
16 |
17 | # Slice the images to have the same size
18 | img1 = img1[:minRows, :minCols]
19 | img2 = img2[:minRows, :minCols]
20 |
21 | # Blend the two images
22 | dst = cv2.addWeighted(img1, 0.3, img2, 0.7, 0)
23 |
24 | # Display the result
25 | cv2.imshow('blend', dst)
26 |
27 | # Load two other images
28 | img1 = cv2.imread('../data/messi5.jpg')
29 | img2 = cv2.imread('../data/opencv_logo.jpg')
30 |
31 | # Create the logo background mask
32 | img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
33 | ret, bgMask = cv2.threshold(img2gray, thresh=170, maxval=255, type=cv2.THRESH_BINARY)
34 |
35 | # Create the foreground mask, using the inverse of the background mask
36 | fgMask = cv2.bitwise_not(bgMask)
37 |
38 | # We want to put the logo on the top-left corner, so we create a ROI
39 | rows, cols, channels = img2.shape
40 | roi = img1[0:rows, 0:cols]
41 |
42 | # Black-out the ROI area covered by the logo
43 | img1Bg = cv2.bitwise_and(roi, roi, mask=bgMask)
44 |
45 | # Take only region of logo from logo image.
46 | img2Fg = cv2.bitwise_and(img2, img2, mask=fgMask)
47 |
48 | # Update the main image with the addition of the two
49 | img1[0:rows, 0:cols] = cv2.add(img1Bg, img2Fg)
50 |
51 | # Show the final image in a new window
52 | cv2.imshow('combined', img1)
53 |
54 | # Wait for user interaction
55 | cv2.waitKey(0)
56 | cv2.destroyAllWindows()
57 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/coreOperations/optimization.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_core/py_optimization/py_optimization.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img1 = cv2.imread('../data/messi5.jpg')
11 |
12 | # Get the initial tick count
13 | startCount = cv2.getTickCount()
14 |
15 | # Perform the calculations
16 | for kernelSize in range(5, 49, 2):
17 | img1 = cv2.medianBlur(img1, kernelSize)
18 |
19 | # Calculate the elapsed time
20 | time = (cv2.getTickCount() - startCount) / cv2.getTickFrequency()
21 | print(time)
22 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/README.md:
--------------------------------------------------------------------------------
1 | # Data, figures and videos used in the scripts
2 |
3 | Most of the images and videos come from the OpenCV samples data directory (opencv/samples/data/).
4 |
5 | The rest have been downloaded from the following pages:
6 |
7 | * [images](https://github.com/abidrahmank/OpenCV2-Python-Tutorials)
8 | * [videos](http://www.engr.colostate.edu/me/facil/dynamics/avis.htm)
9 |
10 |
11 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/aloeL.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/aloeL.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/aloeR.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/aloeR.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/apple.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/apple.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/box.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/box.png
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/box_in_scene.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/box_in_scene.png
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/butterfly.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/butterfly.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/calib.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/calib.npz
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/chessboard.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/chessboard.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/coins.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/coins.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/digits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/digits.png
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/drop.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/drop.avi
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/home.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/home.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/j.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/j.png
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/knn_data.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/knn_data.npz
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left01.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left01.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left02.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left02.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left03.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left03.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left04.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left04.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left05.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left05.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left06.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left06.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left07.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left07.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left08.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left08.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left09.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left09.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left11.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left12.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left13.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/left14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/left14.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/messi5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/messi5.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/messi_face.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/messi_face.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/opencv_logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/opencv_logo.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/orange.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/orange.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/right.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/right.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/rose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/rose.png
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/simple.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/simple.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/sudoku-original.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/sudoku-original.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/tsukuba_l.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/tsukuba_l.png
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/tsukuba_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/tsukuba_r.png
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/vtest.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/vtest.avi
--------------------------------------------------------------------------------
/OpenCV-tutorials/data/wiki.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jagracar/OpenCV-python-tests/58399391e658b4e221495cbd2866c64abaa451db/OpenCV-tutorials/data/wiki.jpg
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/bfMatchingWithORB.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the images in gray scale
10 | img1 = cv2.imread('../data/box.png', 0)
11 | img2 = cv2.imread('../data/box_in_scene.png', 0)
12 |
13 | # Detect the ORB key points and compute the descriptors for the two images
14 | orb = cv2.ORB_create()
15 | keyPoints1, descriptors1 = orb.detectAndCompute(img1, None)
16 | keyPoints2, descriptors2 = orb.detectAndCompute(img2, None)
17 |
18 | # Create brute-force matcher object
19 | bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
20 |
21 | # Match the descriptors
22 | matches = bf.match(descriptors1, descriptors2)
23 |
24 | # Sort them in by distance
25 | matches = sorted(matches, key=lambda x:x.distance)
26 |
27 | # Draw the first 10 matches
28 | result = cv2.drawMatches(img1, keyPoints1, img2, keyPoints2, matches[:10], None, flags=2)
29 |
30 | # Display the results
31 | cv2.imshow('BF matches', result)
32 | cv2.waitKey(0)
33 | cv2.destroyAllWindows()
34 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/bfMatchingWithSIFT.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the images in gray scale
10 | img1 = cv2.imread('../data/box.png', 0)
11 | img2 = cv2.imread('../data/box_in_scene.png', 0)
12 |
13 | # Detect the SIFT key points and compute the descriptors for the two images
14 | sift = cv2.xfeatures2d.SIFT_create()
15 | keyPoints1, descriptors1 = sift.detectAndCompute(img1, None)
16 | keyPoints2, descriptors2 = sift.detectAndCompute(img2, None)
17 |
18 | # Create brute-force matcher object
19 | bf = cv2.BFMatcher()
20 |
21 | # Match the descriptors
22 | matches = bf.knnMatch(descriptors1, descriptors2, k=2)
23 |
24 | # Apply ratio test
25 | goodMatches = []
26 |
27 | for m, n in matches:
28 | if m.distance < 0.75 * n.distance:
29 | goodMatches.append([m])
30 |
31 | # Draw the first 10 matches
32 | result = cv2.drawMatchesKnn(img1, keyPoints1, img2, keyPoints2, goodMatches, None, flags=2)
33 |
34 | # Display the results
35 | cv2.imshow('BF matches', result)
36 | cv2.waitKey(0)
37 | cv2.destroyAllWindows()
38 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/brief.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_brief/py_brief.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/simple.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Detect the CenSurE key points
16 | star = cv2.xfeatures2d.StarDetector_create()
17 | keyPoints = star.detect(gray, None)
18 |
19 | # Create the BRIEF extractor and compute the descriptors
20 | brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
21 | keyPoints, descriptors = brief.compute(img, keyPoints)
22 |
23 | # Paint the key points over the original image
24 | result = cv2.drawKeypoints(img, keyPoints, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
25 |
26 | # Display the results
27 | cv2.imshow('Key points', result)
28 | cv2.waitKey(0)
29 | cv2.destroyAllWindows()
30 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/fast.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_fast/py_fast.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/butterfly.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Detect the FAST key points
16 | fast = cv2.FastFeatureDetector_create(threshold=200)
17 | keyPoints = fast.detect(gray, None)
18 |
19 | # Print some information
20 | print("Threshold: ", fast.getThreshold())
21 | print("nonmaxSuppression: ", fast.getNonmaxSuppression())
22 | print("neighborhood: ", fast.getType())
23 | print("Total Keypoints with nonmaxSuppression: ", len(keyPoints))
24 |
25 | # Paint the key points over the original image
26 | result = cv2.drawKeypoints(img, keyPoints, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
27 |
28 | # Display the results
29 | cv2.imshow('Key points', result)
30 | cv2.waitKey(0)
31 | cv2.destroyAllWindows()
32 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/flannMatchingWithSIFT.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the images in gray scale
10 | img1 = cv2.imread('../data/box.png', 0)
11 | img2 = cv2.imread('../data/box_in_scene.png', 0)
12 |
13 | # Detect the SIFT key points and compute the descriptors for the two images
14 | sift = cv2.xfeatures2d.SIFT_create()
15 | keyPoints1, descriptors1 = sift.detectAndCompute(img1, None)
16 | keyPoints2, descriptors2 = sift.detectAndCompute(img2, None)
17 |
18 | # Create FLANN matcher object
19 | FLANN_INDEX_KDTREE = 0
20 | indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
21 | searchParams = dict(checks=50)
22 | flann = cv2.FlannBasedMatcher(indexParams, searchParams)
23 |
24 | # Match the descriptors (this crashes in OpenCV3.1)
25 | # See https://github.com/Itseez/opencv/issues/5667
26 | matches = flann.knnMatch(descriptors1, descriptors2, k=2)
27 |
28 | # Apply ratio test
29 | goodMatches = []
30 |
31 | for m, n in matches:
32 | if m.distance < 0.75 * n.distance:
33 | goodMatches.append([m])
34 |
35 | # Draw the first 10 matches
36 | result = cv2.drawMatchesKnn(img1, keyPoints1, img2, keyPoints2, goodMatches, None, flags=2)
37 |
38 | # Display the results
39 | cv2.imshow('FLANN matches', result)
40 | cv2.waitKey(0)
41 | cv2.destroyAllWindows()
42 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/harris.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_features_harris/py_features_harris.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/chessboard.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Detect the Harris corners
16 | gray = np.float32(gray)
17 | cornersImg = cv2.cornerHarris(gray, blockSize=2, ksize=3, k=0.04)
18 |
19 | # Dilate the image to increase the corners size
20 | cornersImg = cv2.dilate(cornersImg, np.ones((3, 3)))
21 |
22 | # Paint the corners over the original image
23 | img[cornersImg > 0.2 * cornersImg.max()] = [0, 0, 255]
24 |
25 | # Display the results
26 | cv2.imshow('Corners', img)
27 | cv2.waitKey(0)
28 | cv2.destroyAllWindows()
29 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/homography.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the images in gray scale
10 | img1 = cv2.imread('../data/box.png', 0)
11 | img2 = cv2.imread('../data/box_in_scene.png', 0)
12 |
13 | # Detect the SIFT key points and compute the descriptors for the two images
14 | sift = cv2.xfeatures2d.SIFT_create()
15 | keyPoints1, descriptors1 = sift.detectAndCompute(img1, None)
16 | keyPoints2, descriptors2 = sift.detectAndCompute(img2, None)
17 |
18 | # Create brute-force matcher object
19 | bf = cv2.BFMatcher()
20 |
21 | # Match the descriptors
22 | matches = bf.knnMatch(descriptors1, descriptors2, k=2)
23 |
24 | # Select the good matches using the ratio test
25 | goodMatches = []
26 |
27 | for m, n in matches:
28 | if m.distance < 0.7 * n.distance:
29 | goodMatches.append(m)
30 |
31 | # Apply the homography transformation if we have enough good matches
32 | MIN_MATCH_COUNT = 10
33 |
34 | if len(goodMatches) > MIN_MATCH_COUNT:
35 | # Get the good key points positions
36 | sourcePoints = np.float32([ keyPoints1[m.queryIdx].pt for m in goodMatches ]).reshape(-1, 1, 2)
37 | destinationPoints = np.float32([ keyPoints2[m.trainIdx].pt for m in goodMatches ]).reshape(-1, 1, 2)
38 |
39 | # Obtain the homography matrix
40 | M, mask = cv2.findHomography(sourcePoints, destinationPoints, method=cv2.RANSAC, ransacReprojThreshold=5.0)
41 | matchesMask = mask.ravel().tolist()
42 |
43 | # Apply the perspective transformation to the source image corners
44 | h, w = img1.shape
45 | corners = np.float32([ [0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0] ]).reshape(-1, 1, 2)
46 | transformedCorners = cv2.perspectiveTransform(corners, M)
47 |
48 | # Draw a polygon on the second image joining the transformed corners
49 | img2 = cv2.polylines(img2, [np.int32(transformedCorners)], True, (255, 255, 255), 2, cv2.LINE_AA)
50 | else:
51 | print("Not enough matches are found - %d/%d" % (len(goodMatches), MIN_MATCH_COUNT))
52 | matchesMask = None
53 |
54 | # Draw the matches
55 | drawParameters = dict(matchColor=(0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2)
56 | result = cv2.drawMatches(img1, keyPoints1, img2, keyPoints2, goodMatches, None, **drawParameters)
57 |
58 | # Display the results
59 | cv2.imshow('Homography', result)
60 | cv2.waitKey(0)
61 | cv2.destroyAllWindows()
62 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/orb.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_orb/py_orb.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/simple.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Detect the ORB key points and compute the descriptors
16 | orb = cv2.ORB_create()
17 | keyPoints, descriptors = orb.detectAndCompute(gray, None)
18 |
19 | # Paint the key points over the original image
20 | result = cv2.drawKeypoints(img, keyPoints, None, color=(0, 255, 0), flags=0)
21 |
22 | # Display the results
23 | cv2.imshow('Key points', result)
24 | cv2.waitKey(0)
25 | cv2.destroyAllWindows()
26 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/shiTomasi.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/chessboard.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Detect the Shi-Tomasi corners
16 | gray = np.float32(gray)
17 | corners = cv2.goodFeaturesToTrack(gray, maxCorners=80, qualityLevel=0.01, minDistance=10)
18 |
19 | # Paint the corners over the original image
20 | for x, y in corners[:, 0]:
21 | cv2.circle(img, (int(x), int(y)), 3, (0, 0, 255), -1)
22 |
23 | # Display the results
24 | cv2.imshow('Corners', img)
25 | cv2.waitKey(0)
26 | cv2.destroyAllWindows()
27 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/sift.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/home.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Detect the SHIFT key points
16 | sift = cv2.xfeatures2d.SIFT_create()
17 | keyPoints, descriptors = sift.detectAndCompute(gray, None)
18 |
19 | # Paint the key points over the original image
20 | result = cv2.drawKeypoints(img, keyPoints, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
21 |
22 | # Display the results
23 | cv2.imshow('Key points', result)
24 | cv2.waitKey(0)
25 | cv2.destroyAllWindows()
26 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/featureDetection/surf.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/butterfly.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Detect the SURF key points
16 | surf = cv2.xfeatures2d.SURF_create(hessianThreshold=50000, upright=True, extended=True)
17 | keyPoints, descriptors = surf.detectAndCompute(gray, None)
18 |
19 | # Paint the key points over the original image
20 | result = cv2.drawKeypoints(img, keyPoints, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
21 |
22 | # Display the results
23 | cv2.imshow('Key points', result)
24 | cv2.waitKey(0)
25 | cv2.destroyAllWindows()
26 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/guiFeatures/drawingBasics.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Create a black image
10 | imgWidth = 500
11 | imgHeight = 300
12 | img = np.zeros((imgHeight, imgWidth, 3), dtype=np.uint8)
13 |
14 | # Draw a diagonal blue line
15 | cv2.line(img, (0, 0), (imgWidth - 1, imgHeight - 1), color=(255, 0, 0), thickness=5)
16 |
17 | # Draw a green rectangle
18 | cv2.rectangle(img, (int(imgWidth / 2), 10), (imgWidth, int(imgHeight / 2)), color=(0, 255, 0), thickness=3)
19 |
20 | # Draw a red circle
21 | cv2.circle(img, (int(imgWidth / 2), int(imgHeight / 2)), 50, color=(0, 0, 255), thickness=-1)
22 |
23 | # Draw an inclined ellipse
24 | cv2.ellipse(img, (100, imgHeight - 100), axes=(100, 50), angle=20, startAngle=0, endAngle=180, color=255, thickness=-1)
25 |
26 | # Draw a closed polygon defined by a set of points
27 | pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], dtype=np.int32)
28 | pts = pts.reshape((-1, 1, 2))
29 | cv2.polylines(img, [pts], isClosed=True, color=(0, 255, 255))
30 |
31 | # Finally draw some text with anti-aliased font
32 | font = cv2.FONT_HERSHEY_SIMPLEX
33 | cv2.putText(img, 'OpenCV', (10, 250), fontFace=font, fontScale=2, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
34 |
35 | # Display the final image
36 | cv2.imshow('image', img)
37 |
38 | # Exit when a key is pressed
39 | cv2.waitKey(0)
40 | cv2.destroyAllWindows()
41 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/guiFeatures/guiBasics.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 |
9 | # Create a black image
10 | img = np.zeros((300, 512, 3), np.uint8)
11 |
12 | # Define the trackbars call back function
13 | def nothing(x):
14 | pass
15 |
16 | # Create a window and add the trackbars to it
17 | cv2.namedWindow('image')
18 | cv2.createTrackbar('R', 'image', 100, 255, nothing)
19 | cv2.createTrackbar('G', 'image', 0, 255, nothing)
20 | cv2.createTrackbar('B', 'image', 0, 255, nothing)
21 |
22 | # Add also a switch for ON/OFF functionality
23 | switch = '0 : OFF \n1 : ON'
24 | cv2.createTrackbar(switch, 'image', 1, 1, nothing)
25 |
26 | while True:
27 | # Display the image
28 | cv2.imshow('image', img)
29 | k = cv2.waitKey(1) & 0xFF
30 |
31 | # Exit if the user presses a key
32 | if k == 27:
33 | break
34 |
35 | # Update the image depending on the trackbar values
36 | if cv2.getTrackbarPos(switch, 'image'):
37 | r = cv2.getTrackbarPos('R', 'image')
38 | g = cv2.getTrackbarPos('G', 'image')
39 | b = cv2.getTrackbarPos('B', 'image')
40 | img[:] = [b, g, r]
41 | else:
42 | img[:] = 0
43 |
44 | # Destroy all windows
45 | cv2.destroyAllWindows()
46 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/guiFeatures/imageBasics.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_image_display/py_image_display.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load an image
11 | img = cv2.imread('../data/messi5.jpg', 0) # in greyscale format
12 | # img = cv2.imread('../data/messi5.jpg', -1) # in bgr format
13 |
14 | # Display the image using matplotlib or the opencv methods
15 | useMatplotlib = True
16 |
17 | if(useMatplotlib):
18 | plt.imshow(img, cmap='gray', interpolation='bicubic')
19 | plt.xticks([]), plt.yticks([])
20 | plt.show()
21 | else:
22 | # Create an empty window
23 | windowName = 'image'
24 | cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
25 |
26 | # Display the image on the window
27 | cv2.imshow(windowName, img)
28 |
29 | # Wait until a key is pressed
30 | k = cv2.waitKey(0) & 0xFF
31 |
32 | if k == 27:
33 | # Close all windows
34 | cv2.destroyAllWindows()
35 | # cv2.destroyWindow(windowName)
36 | elif k == ord('s'):
37 | # Save the modified image
38 | cv2.imwrite('../out/messigray.png', img)
39 | cv2.destroyAllWindows()
40 |
41 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/guiFeatures/interactionBasics.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Print all the available events
10 | for i in dir(cv2):
11 | if 'EVENT' in i:
12 | print(i)
13 |
14 | # Define the mouse event callback function
15 | xInit, yInit = -1, -1
16 | drawing = False
17 | drawRectangle = True
18 |
19 | def drawCircle(event, x, y, flags, param):
20 | # Global variables that we want to update
21 | global xInit, yInit, drawing
22 |
23 | if event == cv2.EVENT_LBUTTONDOWN:
24 | xInit, yInit = x, y
25 | drawing = True
26 | elif event == cv2.EVENT_MOUSEMOVE:
27 | if drawing == True: #
28 | drawFigure(x, y)
29 | elif event == cv2.EVENT_LBUTTONUP:
30 | drawFigure(x, y)
31 | drawing = False
32 |
33 | def drawFigure(x, y):
34 | if drawRectangle == True:
35 | cv2.rectangle(img, (xInit, yInit), (x, y), (0, 255, 0), -1)
36 | else:
37 | cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
38 |
39 |
40 | # Create a black image
41 | img = np.zeros((512, 512, 3), np.uint8)
42 |
43 | # Create the display window and bind the mouse event function to it
44 | cv2.namedWindow('image')
45 | cv2.setMouseCallback('image', drawCircle)
46 |
47 | # Update the window until the user decides to exit
48 | while True:
49 | cv2.imshow('image', img)
50 | k = cv2.waitKey(20) & 0xFF
51 |
52 | if k == ord('m'):
53 | drawRectangle = not drawRectangle
54 | elif k == 27 or k == ord('q'):
55 | break
56 |
57 | # Destroy all windows before exit
58 | cv2.destroyAllWindows()
59 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/guiFeatures/videoBasics.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_video_display/py_video_display.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Initialize the video capture
10 | useWebcam = True
11 |
12 | if useWebcam:
13 | cap = cv2.VideoCapture(0)
14 | else:
15 | cap = cv2.VideoCapture('../data/drop.avi')
16 |
17 | # Check that the capture is open
18 | if not cap.isOpened():
19 | cap.open()
20 |
21 | # Get some useful information
22 | width = int(cap.get(3))
23 | height = int(cap.get(4))
24 | fps = int(cap.get(5))
25 |
26 | if fps == -1:
27 | fps = 30
28 |
29 | # Define the video codec and create VideoWriter object
30 | fourcc = cv2.VideoWriter_fourcc(*'XVID')
31 | videoOutput = cv2.VideoWriter('../out/output.avi', fourcc, fps, (width, height))
32 |
33 | # Display the capture frames
34 | while True:
35 | # Get the next frame
36 | ret, frame = cap.read()
37 |
38 | if ret:
39 | # Convert the frame to gray scale
40 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
41 |
42 | # Flip the frame
43 | gray = cv2.flip(gray, 1)
44 |
45 | # Convert the frame again to BGR
46 | gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
47 |
48 | # Write the flipped gray frame
49 | videoOutput.write(gray)
50 |
51 | # Display the resulting frame
52 | cv2.imshow('frame', gray)
53 | k = cv2.waitKey(round(1000 / fps)) & 0xFF
54 |
55 | # User interaction
56 | if k == ord('q'):
57 | break
58 | else:
59 | break
60 |
61 | # When everything is done, release the capture and close all windows
62 | cap.release()
63 | videoOutput.release()
64 | cv2.destroyAllWindows()
65 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/canny.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_canny/py_canny.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Define the trackbars call back functions
10 | def threshold1Callback(x):
11 | global threshold1, edges
12 | threshold1 = x
13 | edges = cv2.Canny(img, threshold1, threshold2)
14 | return
15 |
16 | def threshold2Callback(x):
17 | global threshold2, edges
18 | threshold2 = x
19 | edges = cv2.Canny(img, threshold1, threshold2)
20 | return
21 |
22 | # Load the image in gray scale
23 | img = cv2.imread('../data/messi5.jpg', 0)
24 |
25 | # Apply the Canny edge detection algorithm with the initial threshold values
26 | threshold1 = 100
27 | threshold2 = 200
28 | edges = cv2.Canny(img, threshold1, threshold2)
29 |
30 | # Create the display window and add the two trackbars
31 | cv2.namedWindow('canny')
32 | cv2.createTrackbar('threshold1', 'canny', threshold1, 255, threshold1Callback)
33 | cv2.createTrackbar('threshold2', 'canny', threshold2, 255, threshold2Callback)
34 |
35 | # Display the results
36 | while True:
37 | cv2.imshow('canny', edges)
38 | k = cv2.waitKey(1) & 0xFF
39 |
40 | # Exit if the user presses the ESC key
41 | if k == 27:
42 | break
43 |
44 | # Destroy all windows
45 | cv2.destroyAllWindows()
46 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/colorSpaces.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_core/py_optimization/py_optimization.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Initialize the webcam
10 | cap = cv2.VideoCapture(0)
11 |
12 | while True:
13 | # Get the next frame
14 | ret, frame = cap.read()
15 |
16 | # Convert from BGR scale to HSV scale
17 | hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
18 |
19 | # Define range of blue color in HSV
20 | lowerBlue = np.array([110, 50, 50])
21 | upperBlue = np.array([130, 255, 255])
22 |
23 | # Threshold the HSV image to get only blue colors
24 | mask = cv2.inRange(hsv, lowerBlue, upperBlue)
25 |
26 | # Remove everything that is not in the mask
27 | res = cv2.bitwise_and(frame, frame, mask=mask)
28 |
29 | # Display all the difference images
30 | cv2.imshow('frame', frame)
31 | cv2.imshow('mask', mask)
32 | cv2.imshow('res', res)
33 | k = cv2.waitKey(5) & 0xFF
34 |
35 | # User interaction
36 | if k == 27:
37 | break
38 |
39 | # When everything is done, release the capture and close all windows
40 | cap.release()
41 | cv2.destroyAllWindows()
42 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/contours.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.html
4 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.html
5 | '''
6 |
7 | import numpy as np
8 | import cv2
9 |
10 | # Create a black image
11 | imgWidth = 500
12 | imgHeight = 300
13 | img = np.zeros((imgHeight, imgWidth, 3), dtype=np.uint8)
14 |
15 | # Draw a blue rectangle
16 | startPoint = (int(imgWidth / 4), int(imgHeight / 4))
17 | endPoint = (int(3 * imgWidth / 4), int(3 * imgHeight / 4))
18 | cv2.rectangle(img, startPoint, endPoint, color=(255, 0, 0), thickness=-1)
19 |
20 | # Draw a black rectangle inside
21 | startPoint = (int(imgWidth / 3), int(imgHeight / 3))
22 | endPoint = (int(2 * imgWidth / 3), int(2 * imgHeight / 3))
23 | cv2.rectangle(img, startPoint, endPoint, color=(0, 0, 0), thickness=-1)
24 |
25 | # Rotate the image
26 | M = cv2.getRotationMatrix2D((imgWidth / 2, imgHeight / 2), 40, 1)
27 | img = cv2.warpAffine(img, M, (imgWidth, imgHeight))
28 |
29 | # Convert the image to gray scale
30 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
31 |
32 | # Create a binary image
33 | ret, thresh = cv2.threshold(gray, 20, 255, 0)
34 |
35 | # Obtain the image contours
36 | thresh, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
37 |
38 | # Draw the contours
39 | cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
40 |
41 | # Draw the points from the outer contour
42 | cnt = contours[0]
43 | for point in cnt.reshape(-1, 2):
44 | cv2.circle(img, tuple(point), 7, color=(0, 0, 255), thickness=2)
45 |
46 | # Calculate the centroid
47 | M = cv2.moments(cnt)
48 | cx = int(M['m10'] / M['m00'])
49 | cy = int(M['m01'] / M['m00'])
50 | cv2.circle(img, (cx, cy), 7, color=(255, 255, 255), thickness=-1)
51 |
52 | # Print some other information
53 | print('Contour area:', cv2.contourArea(cnt, True))
54 | print('Contour perimeter:', cv2.arcLength(cnt, True))
55 |
56 | # Simplify the contour
57 | epsilon = 0.1 * cv2.arcLength(cnt, True)
58 | cntApprox = cv2.approxPolyDP(cnt, epsilon, True)
59 | for point in cntApprox.reshape(-1, 2):
60 | cv2.circle(img, tuple(point), 7, color=(255, 0, 0), thickness=2)
61 |
62 | # Draw the bounding rectangle
63 | x, y, w, h = cv2.boundingRect(cnt)
64 | cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)
65 |
66 | # Draw the minimum area rectangle
67 | rect = cv2.minAreaRect(cnt)
68 | box = cv2.boxPoints(rect)
69 | box = np.int0(box)
70 | cv2.drawContours(img, [box], 0, (0, 255, 255), 2)
71 |
72 | # Draw the minimum enclosing circle
73 | (x, y), radius = cv2.minEnclosingCircle(cnt)
74 | center = (int(x), int(y))
75 | radius = int(radius)
76 | cv2.circle(img, center, radius, (255, 0, 255), 2)
77 |
78 | # Draw an ellipse around it
79 | ellipse = cv2.fitEllipse(cnt)
80 | cv2.ellipse(img, ellipse, (255, 255, 255), 2)
81 | rows, cols = img.shape[:2]
82 |
83 | # Fit a line to the contour and draw it
84 | [vx, vy, x, y] = cv2.fitLine(cnt, cv2.DIST_L2, 0, 0.01, 0.01)
85 | lefty = int((-x * vy / vx) + y)
86 | righty = int(((cols - x) * vy / vx) + y)
87 | cv2.line(img, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)
88 |
89 | # Get the extreme points
90 | leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
91 | rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
92 | topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
93 | bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])
94 | cv2.circle(img, leftmost, 4, color=(255, 0, 0), thickness=-1)
95 | cv2.circle(img, rightmost, 4, color=(255, 0, 0), thickness=-1)
96 | cv2.circle(img, topmost, 4, color=(255, 0, 0), thickness=-1)
97 | cv2.circle(img, bottommost, 4, color=(255, 0, 0), thickness=-1)
98 |
99 | # Display the result
100 | cv2.imshow('image', img)
101 | cv2.waitKey(0)
102 | cv2.destroyAllWindows()
103 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/fourier.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image in gray scale
11 | img = cv2.imread('../data/messi5.jpg', 0)
12 | rows, cols = img.shape
13 |
14 | # Transform the image to improve the speed in the fourier transform calculation
15 | optimalRows = cv2.getOptimalDFTSize(rows)
16 | optimalCols = cv2.getOptimalDFTSize(cols)
17 | optimalImg = np.zeros((optimalRows, optimalCols))
18 | optimalImg[:rows, :cols] = img
19 | crow, ccol = optimalRows / 2 , optimalCols / 2
20 |
21 | # Calculate the discrete Fourier transform
22 | dft = cv2.dft(np.float32(optimalImg), flags=cv2.DFT_COMPLEX_OUTPUT)
23 | dftShift = np.fft.fftshift(dft)
24 |
25 | # Mask everything except the center
26 | mask = np.zeros((optimalRows, optimalCols, 2), np.uint8)
27 | mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1
28 | dftShift = dftShift * mask
29 |
30 | # Rescale the values for visualization purposes
31 | magnitudeSpectrum = 20 * np.log(cv2.magnitude(dftShift[:, :, 0], dftShift[:, :, 1]))
32 |
33 | # Reconstruct the image using the inverse Fourier transform
34 | newDft = np.fft.ifftshift(dftShift)
35 | result = cv2.idft(newDft)
36 | result = cv2.magnitude(result[:, :, 0], result[:, :, 1])
37 |
38 | # Display the results
39 | images = [optimalImg, magnitudeSpectrum, result]
40 | imageTitles = ['Input image', 'Magnitude Spectrum', 'Result']
41 |
42 | for i in range(len(images)):
43 | plt.subplot(1, 3, i + 1)
44 | plt.imshow(images[i], cmap='gray')
45 | plt.title(imageTitles[i])
46 | plt.xticks([])
47 | plt.yticks([])
48 |
49 | plt.show()
50 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/geometricTransformations.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/messi5.jpg')
11 |
12 | # Increase the size by a factor of 2
13 | zoomedImg = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
14 |
15 | # Equivalent way to do it
16 | height, width = img.shape[:2]
17 | zoomedImg = cv2.resize(img, (2 * width, 2 * height), interpolation=cv2.INTER_CUBIC)
18 |
19 | # Create a translation
20 | M = np.float32([[1, 0, 100], [0, 1, 50]])
21 | translatedImg = cv2.warpAffine(img, M, (width, height))
22 |
23 | # Create a rotation and reduce the image size
24 | M = cv2.getRotationMatrix2D((width / 2, height / 2), 45, 0.5)
25 | rotatedImg = cv2.warpAffine(img, M, (width, height))
26 |
27 | # Display all the different images
28 | cv2.imshow('zoomed', zoomedImg)
29 | cv2.imshow('translated', translatedImg)
30 | cv2.imshow('rotated', rotatedImg)
31 | cv2.waitKey(0)
32 | cv2.destroyAllWindows()
33 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/grabCut.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/messi5.jpg')
11 |
12 | # Run the grab cut algorithm
13 | mask = np.zeros(img.shape[:2], np.uint8)
14 | rect = (50, 50, 450, 290)
15 | bgdModel = np.zeros((1, 65), np.float64)
16 | fgdModel = np.zeros((1, 65), np.float64)
17 | cv2.grabCut(img, mask, rect, bgdModel, fgdModel, iterCount=5, mode=cv2.GC_INIT_WITH_RECT)
18 |
19 | # Combine the secure regions with the probable ones
20 | resultMask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
21 | img = img * resultMask[:, :, np.newaxis]
22 |
23 | # Display the results
24 | cv2.imshow('Separated coins', img)
25 | cv2.waitKey(0)
26 | cv2.destroyAllWindows()
27 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/histogram2D.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image
11 | img = cv2.imread('../data/home.jpg')
12 |
13 | # Convert the image to HSV
14 | hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
15 |
16 | # Calculate the histogram with mask and without the mask
17 | hist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
18 |
19 | # Show the images and the histograms
20 | plt.subplot(221)
21 | plt.imshow(img[:, :, ::-1])
22 |
23 | plt.subplot(222)
24 | plt.imshow(hist, interpolation='nearest')
25 |
26 | plt.show()
27 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/histogramBackprojection.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the target image
10 | target = cv2.imread('../data/rose.png')
11 | targetHsv = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
12 |
13 | # Define the model image
14 | model = target[100:170, 300:400]
15 | modelHsv = cv2.cvtColor(model, cv2.COLOR_BGR2HSV)
16 |
17 | # Calculate the model 2D histogram
18 | modelHist = cv2.calcHist([modelHsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
19 | maxHistValue = np.max(modelHist)
20 |
21 | # Create a new backprojected image using the model histogram
22 | h, s, v = cv2.split(targetHsv)
23 | probability = modelHist[h.ravel(), s.ravel()] / maxHistValue
24 | backprojectedImg = np.uint8(255 * probability).reshape(target.shape[:2])
25 |
26 | # Close small holes
27 | disk = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
28 | backprojectedImg = cv2.morphologyEx(backprojectedImg, op=cv2.MORPH_CLOSE, kernel=disk)
29 |
30 | # Threshold the backprojected image
31 | ret, thresh = cv2.threshold(backprojectedImg, 1, 255, 0)
32 | thresh = cv2.merge((thresh, thresh, thresh))
33 |
34 | # Obtain the final filtered image
35 | result = cv2.bitwise_and(target, thresh)
36 |
37 | # Display the results
38 | combined1 = np.hstack((target, result))
39 | combined2 = np.hstack((cv2.merge((backprojectedImg, backprojectedImg, backprojectedImg)), thresh))
40 | combined = np.vstack((combined1, combined2))
41 | cv2.imshow('result', combined)
42 | cv2.waitKey(0)
43 | cv2.destroyAllWindows()
44 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/histogramBackprojection2.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the target image
10 | target = cv2.imread('../data/rose.png')
11 | targetHsv = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
12 |
13 | # Define the model image
14 | model = target[100:170, 300:400]
15 | modelHsv = cv2.cvtColor(model, cv2.COLOR_BGR2HSV)
16 |
17 | # Calculate the model 2D histogram
18 | modelHist = cv2.calcHist([modelHsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
19 |
20 | # Normalize the histogram and get the backprojected image
21 | cv2.normalize(modelHist, modelHist, 0, 255, cv2.NORM_MINMAX)
22 | backprojectedImg = cv2.calcBackProject([targetHsv], [0, 1], modelHist, [0, 180, 0, 256], 1)
23 |
24 | # Convolve the backprojected image with a circular disc
25 | disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
26 | backprojectedImg = cv2.filter2D(backprojectedImg, -1, disc)
27 |
28 | # Threshold the backprojected image
29 | ret, thresh = cv2.threshold(backprojectedImg, 40, 255, 0)
30 | thresh = cv2.merge((thresh, thresh, thresh))
31 |
32 | # Obtain the final filtered image
33 | result = cv2.bitwise_and(target, thresh)
34 |
35 | # Display the results
36 | combined1 = np.hstack((target, result))
37 | combined2 = np.hstack((cv2.merge((backprojectedImg, backprojectedImg, backprojectedImg)), thresh))
38 | combined = np.vstack((combined1, combined2))
39 | cv2.imshow('result', combined)
40 | cv2.waitKey(0)
41 | cv2.destroyAllWindows()
42 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/histogramEqualization.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image in gray scale
11 | img = cv2.imread('../data/wiki.jpg', 0)
12 |
13 | # Calculate the histogram
14 | hist, bins = np.histogram(img.flatten(), 256, [0, 256])
15 |
16 | # Calculate the cumulative distribution function of the histogram
17 | cdf = hist.cumsum()
18 |
19 | # Equalize the image
20 | cdfMasked = np.ma.masked_equal(cdf, 0)
21 | cdfMasked = (cdfMasked - cdfMasked.min()) * 255 / (cdfMasked.max() - cdfMasked.min())
22 | equalizeFunction = np.ma.filled(cdfMasked, 0).astype('uint8')
23 | equalizedImg = equalizeFunction[img]
24 |
25 | # Calculate the histogram and the cdf of the equalized image
26 | equalizedHist, bins = np.histogram(equalizedImg.flatten(), 256, [0, 256])
27 | equalizedCdf = equalizedHist.cumsum()
28 |
29 | # Display the results
30 | plt.subplot(221)
31 | plt.title('original')
32 | plt.imshow(img, 'gray')
33 | plt.xticks([])
34 | plt.yticks([])
35 |
36 | plt.subplot(222)
37 | plt.hist(img.flatten(), 256, [0, 256], color='r')
38 | plt.plot(cdf * hist.max() / cdf.max(), color='b')
39 | plt.xlim([0, 256])
40 | plt.legend(('cdf', 'histogram'), loc='upper left')
41 |
42 | plt.subplot(223)
43 | plt.title('equalized')
44 | plt.imshow(equalizedImg, 'gray')
45 | plt.xticks([])
46 | plt.yticks([])
47 |
48 | plt.subplot(224)
49 | plt.hist(equalizedImg.flatten(), 256, [0, 256], color='r')
50 | plt.plot(equalizedCdf * equalizedHist.max() / equalizedCdf.max(), color='b')
51 | plt.xlim([0, 256])
52 | plt.legend(('cdf', 'histogram'), loc='upper left')
53 |
54 | plt.show()
55 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/histogramEqualization2.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image in gray scale
11 | img = cv2.imread('../data/tsukuba_l.png', 0)
12 |
13 | # Equalize the image
14 | equalizedImg = cv2.equalizeHist(img)
15 |
16 | # Use instead Contrast Limited Adaptive Histogram Equalization (CLAHE)
17 | clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(8, 8))
18 | claheImg = clahe.apply(img)
19 |
20 | # Display the results
21 | plt.subplot(131)
22 | plt.title('original')
23 | plt.imshow(img, 'gray')
24 | plt.xticks([])
25 | plt.yticks([])
26 |
27 | plt.subplot(132)
28 | plt.title('equalized')
29 | plt.imshow(equalizedImg, 'gray')
30 | plt.xticks([])
31 | plt.yticks([])
32 |
33 | plt.subplot(133)
34 | plt.title('CLAHE')
35 | plt.imshow(claheImg, 'gray')
36 | plt.xticks([])
37 | plt.yticks([])
38 |
39 | plt.show()
40 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/histograms.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image in gray scale
11 | img = cv2.imread('../data/home.jpg', 0)
12 |
13 | # create a mask
14 | mask = np.zeros(img.shape[:2], np.uint8)
15 | mask[100:300, 100:400] = 255
16 | maskedImg = cv2.bitwise_and(img, img, mask=mask)
17 |
18 | # Calculate the histogram with mask and without the mask
19 | hist = cv2.calcHist([img], [0], None, [256], [0, 256])
20 | maskedHist = cv2.calcHist([img], [0], mask, [256], [0, 256])
21 |
22 | # Show the images and the histograms
23 | plt.subplot(221)
24 | plt.imshow(img, 'gray')
25 |
26 | plt.subplot(222)
27 | plt.imshow(mask, 'gray')
28 |
29 | plt.subplot(223)
30 | plt.imshow(maskedImg, 'gray')
31 |
32 | plt.subplot(224)
33 | plt.plot(hist)
34 | plt.plot(maskedHist)
35 | plt.xlim([0, 256])
36 |
37 | plt.show()
38 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/houghCircles.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/opencv_logo.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Apply a median blur to remove noise
16 | gray = cv2.medianBlur(gray, 3)
17 |
18 | # Detect the circles
19 | circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
20 |
21 | # Draw the circles on the original image
22 | circles = np.uint16(np.around(circles))
23 |
24 | for circle in circles[0, :]:
25 | # Draw the outer circle
26 | cv2.circle(img, (circle[0], circle[1]), circle[2], (0, 255, 0), 2)
27 |
28 | # Draw the circle center
29 | cv2.circle(img, (circle[0], circle[1]), 2, (0, 0, 255), 3)
30 |
31 | # Display the results
32 | cv2.imshow('Detected circles', img)
33 | cv2.waitKey(0)
34 | cv2.destroyAllWindows()
35 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/houghLines.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/sudoku-original.jpg')
11 | rows, cols, channels = img.shape
12 |
13 | # Convert it to gray scale and detect the edges
14 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
15 | edges = cv2.Canny(gray, threshold1=50, threshold2=150, apertureSize=3)
16 |
17 | # Obtain the Hough transform
18 | deltaRho = 1
19 | deltaTheta = np.pi / 180
20 | threshold = 200
21 | lines = cv2.HoughLines(edges, deltaRho, deltaTheta, threshold)
22 |
23 | # Paint the lines on the image
24 | maxLength = np.sqrt(rows ** 2 + cols ** 2)
25 |
26 | for rho, theta in lines[:, 0]:
27 | cos = np.cos(theta)
28 | sin = np.sin(theta)
29 | x0 = rho * cos
30 | y0 = rho * sin
31 | x1 = int(x0 + maxLength * sin)
32 | y1 = int(y0 - maxLength * cos)
33 | x2 = int(x0 - maxLength * sin)
34 | y2 = int(y0 + maxLength * cos)
35 | cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
36 |
37 | # Obtain the probabilistic Hough transform
38 | deltaRho = 1
39 | deltaTheta = np.pi / 180
40 | threshold = 100
41 | minLineLength = 100
42 | maxLineGap = 10
43 | lines = cv2.HoughLinesP(edges, deltaRho, deltaTheta, threshold, minLineLength=minLineLength, maxLineGap=maxLineGap)
44 |
45 | # Paint the lines on the image
46 | for x1, y1, x2, y2 in lines[:, 0]:
47 | cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
48 |
49 | # Display the results
50 | cv2.imshow('result', img)
51 | cv2.waitKey(0)
52 | cv2.destroyAllWindows()
53 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/imageGradients.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image in gray scale
11 | img = cv2.imread('../data/sudoku-original.jpg', 0)
12 |
13 | # Calculate the different filters
14 | laplacian = cv2.Laplacian(img, ddepth=cv2.CV_64F)
15 | sobelx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=5)
16 | sobely = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=5)
17 |
18 | # Remove the negative values taking the absolute
19 | laplacian = np.absolute(laplacian)
20 | sobelx = np.absolute(sobelx)
21 | sobely = np.absolute(sobely)
22 |
23 | # Display the results
24 | titles = ['original', 'Laplacian', 'Sobel x', 'Sobel y']
25 | images = [img, laplacian, sobelx, sobely]
26 |
27 | for i in range(len(titles)):
28 | plt.subplot(2, 2, i + 1)
29 | plt.imshow(images[i], cmap='gray')
30 | plt.title(titles[i])
31 | plt.xticks([])
32 | plt.yticks([])
33 |
34 | plt.show()
35 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/morphologicalTransformations.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image in gray scale
11 | img = cv2.imread('../data/j.png', 0)
12 |
13 | # Create the desired kernel
14 | kernelType = 1
15 |
16 | if kernelType == 0:
17 | kernel = np.ones((5, 5), np.uint8)
18 | elif kernelType == 1:
19 | kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
20 | elif kernelType == 1:
21 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
22 | elif kernelType == 2:
23 | kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
24 |
25 | # Apply the different transformations
26 | erosion = cv2.erode(img, kernel=kernel, iterations=1)
27 | dilation = cv2.dilate(img, kernel=kernel, iterations=1)
28 | opening = cv2.morphologyEx(img, op=cv2.MORPH_OPEN, kernel=kernel)
29 | closing = cv2.morphologyEx(img, op=cv2.MORPH_CLOSE, kernel=kernel)
30 | gradient = cv2.morphologyEx(img, op=cv2.MORPH_GRADIENT, kernel=kernel)
31 | tophat = cv2.morphologyEx(img, op=cv2.MORPH_TOPHAT, kernel=kernel)
32 | blackhat = cv2.morphologyEx(img, op=cv2.MORPH_BLACKHAT, kernel=kernel)
33 |
34 | # Display the results
35 | titles = ['original', 'erosion', 'dilation', 'opening', 'closing', 'gradient', 'tophat', 'blackhat']
36 | images = [img, erosion, dilation, opening, closing, gradient, tophat, blackhat]
37 |
38 | for i in range(len(titles)):
39 | plt.subplot(3, 3, i + 1)
40 | plt.imshow(images[i], cmap='gray', interpolation='bicubic')
41 | plt.title(titles[i])
42 | plt.xticks([])
43 | plt.yticks([])
44 |
45 | plt.show()
46 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/pyramids.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np, sys
8 |
9 | # Load the two images that we want to blend
10 | imgA = cv2.imread('../data/apple.jpg')
11 | imgB = cv2.imread('../data/orange.jpg')
12 |
13 | # Set the total number of pyramid levels
14 | levels = 5
15 |
16 | # Generate Gaussian pyramid for imgA
17 | gaussianPyramidA = [imgA.copy()]
18 | for i in range(1, levels):
19 | gaussianPyramidA.append(cv2.pyrDown(gaussianPyramidA[i - 1]))
20 |
21 | # Generate Gaussian pyramid for imgB
22 | gaussianPyramidB = [imgB.copy()]
23 | for i in range(1, levels):
24 | gaussianPyramidB.append(cv2.pyrDown(gaussianPyramidB[i - 1]))
25 |
26 | # Generate the inverse Laplacian Pyramid for imgA
27 | laplacianPyramidA = [gaussianPyramidA[-1]]
28 | for i in range(levels - 1, 0, -1):
29 | laplacian = cv2.subtract(gaussianPyramidA[i - 1], cv2.pyrUp(gaussianPyramidA[i]))
30 | laplacianPyramidA.append(laplacian)
31 |
32 | # Generate the inverse Laplacian Pyramid for imgB
33 | laplacianPyramidB = [gaussianPyramidB[-1]]
34 | for i in range(levels - 1, 0, -1):
35 | laplacian = cv2.subtract(gaussianPyramidB[i - 1], cv2.pyrUp(gaussianPyramidB[i]))
36 | laplacianPyramidB.append(laplacian)
37 |
38 | # Add the left and right halves of the Laplacian images in each level
39 | laplacianPyramidComb = []
40 | for laplacianA, laplacianB in zip(laplacianPyramidA, laplacianPyramidB):
41 | rows, cols, dpt = laplacianA.shape
42 | laplacianComb = np.hstack((laplacianA[:, 0:cols / 2], laplacianB[:, cols / 2:]))
43 | laplacianPyramidComb.append(laplacianComb)
44 |
45 | # Reconstruct the image from the Laplacian pyramid
46 | imgComb = laplacianPyramidComb[0]
47 | for i in range(1, levels):
48 | imgComb = cv2.add(cv2.pyrUp(imgComb), laplacianPyramidComb[i])
49 |
50 | # Display the result
51 | cv2.imshow('image', imgComb)
52 | cv2.waitKey(0)
53 | cv2.destroyAllWindows()
54 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/smooth.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image
11 | img = cv2.imread('../data/opencv_logo.jpg')
12 |
13 | # Create the uniform kernel
14 | kernel = np.ones((5, 5), np.float32) / 25
15 |
16 | # Apply the different filters
17 | uniformImg = cv2.filter2D(img, -1, kernel=kernel)
18 | bluredImg = cv2.blur(img, ksize=(5, 5))
19 | gaussianImg = cv2.GaussianBlur(img, ksize=(5, 5), sigmaX=0)
20 | medianImg = cv2.medianBlur(img, ksize=5)
21 | bilateralImg = cv2.bilateralFilter(img, d=9, sigmaColor=75, sigmaSpace=75)
22 |
23 | # Display the results
24 | titles = ['original', 'uniform kernel', 'uniform blur', 'gaussian blur', 'median blur', 'bilateral blur']
25 | images = [img, uniformImg, bluredImg, gaussianImg, medianImg, bilateralImg]
26 |
27 | for i in range(len(titles)):
28 | plt.subplot(3, 2, i + 1)
29 | plt.imshow(images[i])
30 | plt.title(titles[i])
31 | plt.xticks([])
32 | plt.yticks([])
33 |
34 | plt.show()
35 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/templateMatching.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the images in grey scale
11 | originalImg = cv2.imread('../data/messi5.jpg', 0)
12 | template = cv2.imread('../data/messi_face.jpg', 0)
13 | w, h = template.shape[::-1]
14 |
15 | # Compare all the methods
16 | methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR', 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
17 |
18 | for method in methods:
19 | # Math the template
20 | methodCode = eval(method)
21 | result = cv2.matchTemplate(originalImg, template, methodCode)
22 | minValue, maxValue, minLoc, maxLoc = cv2.minMaxLoc(result)
23 |
24 | # If the methodCode is TM_SQDIFF or TM_SQDIFF_NORMED, take the minimum position
25 | if methodCode in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
26 | topLeftCorner = minLoc
27 | else:
28 | topLeftCorner = maxLoc
29 | bottomRightCorner = (topLeftCorner[0] + w, topLeftCorner[1] + h)
30 |
31 | # Draw the square in a copy of the original image
32 | img = originalImg.copy()
33 | cv2.rectangle(img, topLeftCorner, bottomRightCorner, 255, 2)
34 |
35 | # Display the results
36 | plt.subplot(121)
37 | plt.imshow(result, cmap='gray')
38 | plt.title('Matching Result')
39 | plt.xticks([])
40 | plt.yticks([])
41 |
42 | plt.subplot(122)
43 | plt.imshow(img, cmap='gray')
44 | plt.title('Detected Point')
45 | plt.xticks([])
46 | plt.yticks([])
47 |
48 | plt.suptitle(method)
49 | plt.show()
50 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/threshold.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 | from matplotlib import pyplot as plt
9 |
10 | # Load the image in grey scale and smooth it a bit
11 | img = cv2.imread('../data/sudoku-original.jpg', 0)
12 | img = cv2.medianBlur(img, 5)
13 |
14 | # Apply the different thresholds
15 | ret, thresh1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
16 | ret, thresh2 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
17 | ret, thresh3 = cv2.threshold(img, 127, 255, cv2.THRESH_TRUNC)
18 | ret, thresh4 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO)
19 | ret, thresh5 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO_INV)
20 | ret, thresh6 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
21 | thresh7 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
22 | thresh8 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
23 |
24 | titles = ['Original Image', 'BINARY', 'BINARY_INV',
25 | 'TRUNC', 'TOZERO', 'TOZERO_INV',
26 | "Otsu's Thresholding", 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
27 | images = [img, thresh1, thresh2, thresh3, thresh4, thresh5, thresh6, thresh7, thresh8]
28 |
29 | for i in range(len(titles)):
30 | plt.subplot(3, 3, i + 1)
31 | plt.imshow(images[i], 'gray')
32 | plt.title(titles[i])
33 | plt.xticks([])
34 | plt.yticks([])
35 |
36 | plt.show()
37 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/imageProcessing/watershed.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the image
10 | img = cv2.imread('../data/coins.jpg')
11 |
12 | # Convert it to gray scale
13 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 |
15 | # Threshold the image using Otsu’s binarization
16 | ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
17 |
18 | # Remove small regions
19 | kernel = np.ones((3, 3), np.uint8)
20 | thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
21 |
22 | # Obtain the region that extends the foreground
23 | extendedFg = cv2.dilate(thresh, kernel, iterations=2)
24 |
25 | # Obtain the foreground centers
26 | distance = cv2.distanceTransform(thresh, cv2.DIST_L2, 5)
27 | ret, fgCenters = cv2.threshold(distance, 0.7 * distance.max(), 255, 0)
28 |
29 | # Find the unknown regions subtrancting the foreground center to the extended foreground
30 | fgCenters = np.uint8(fgCenters)
31 | unknown = cv2.subtract(extendedFg, fgCenters)
32 |
33 | # Define the markers
34 | ret, markers = cv2.connectedComponents(fgCenters)
35 |
36 | # Add one to all labels so that sure background is not 0, but 1
37 | markers = markers + 1
38 |
39 | # Mark the unknown region with zero
40 | markers[unknown == 255] = 0
41 |
42 | # Apply the watershed method
43 | markers = cv2.watershed(img, markers)
44 |
45 | # Draw the boundary regions, marked with -1, on the original image
46 | img[markers == -1] = [255, 0, 0]
47 |
48 | # Display the results
49 | cv2.imshow('Separated coins', img)
50 | cv2.waitKey(0)
51 | cv2.destroyAllWindows()
52 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/machineLearning/colorQuantization.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 |
10 | # Load the image
11 | img = cv2.imread('../data/home.jpg')
12 |
13 | # Use the colors as the descriptors
14 | x = img.reshape(-1, 3).astype('float32')
15 |
16 | # Apply KMeans
17 | criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 10, 1.0)
18 | flags = cv2.KMEANS_RANDOM_CENTERS
19 | compactness, labels, centers = cv2.kmeans(x, K=6, bestLabels=None, criteria=criteria, attempts=10, flags=flags)
20 |
21 | # Create the new image
22 | centers = np.uint8(centers)
23 | newImage = centers[labels.ravel()]
24 | newImage = newImage.reshape(img.shape)
25 |
26 | # Display the results
27 | cv2.imshow('New image', newImage)
28 | cv2.waitKey(0)
29 | cv2.destroyAllWindows()
30 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/machineLearning/kmeans.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 |
10 | # Create two random 2D distributions with different mean values
11 | x1 = np.random.randint(25, 50, (25, 2))
12 | x2 = np.random.randint(60, 85, (25, 2))
13 |
14 | # Combine the two distributions
15 | x = np.vstack((x1, x2)).astype('float32')
16 |
17 | # Apply KMeans
18 | criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 10, 1.0)
19 | flags = cv2.KMEANS_RANDOM_CENTERS
20 | compactness, labels, centers = cv2.kmeans(x, K=2, bestLabels=None, criteria=criteria, attempts=10, flags=flags)
21 |
22 | # Separate the two groups
23 | A = x[labels.ravel() == 0]
24 | B = x[labels.ravel() == 1]
25 |
26 | # Display the results
27 | plt.hist(A[:, 0], bins=20, range=[20, 90], color='r')
28 | plt.hist(B[:, 0], bins=20, range=[20, 90], color='b')
29 | plt.scatter(A[:, 0], A[:, 1], color='r')
30 | plt.scatter(B[:, 0], B[:, 1], color='b')
31 | plt.scatter(centers[:, 0], centers[:, 1], s=80, color='y', marker='s')
32 | plt.xlabel('Height')
33 | plt.ylabel('Weight')
34 | plt.show()
35 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/machineLearning/knn.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 |
10 | # Set the training data (x, y) coordinates
11 | trainData = np.random.randint(0, 100, (25, 2)).astype(np.float32)
12 |
13 | # Split the training data in two groups
14 | responses = np.random.randint(0, 2, (25, 1))
15 | redGroup = trainData[responses.ravel() == 0]
16 | blueGroup = trainData[responses.ravel() == 1]
17 |
18 | # Train the kNN
19 | knn = cv2.ml.KNearest_create()
20 | knn.train(trainData, cv2.ml.ROW_SAMPLE, responses)
21 |
22 | # Create some new data and classify it
23 | newData = np.random.randint(0, 100, (1, 2)).astype(np.float32)
24 | ret, results, neighbours , dist = knn.findNearest(newData, 3)
25 |
26 | print("result: ", results)
27 | print("neighbours: ", neighbours)
28 | print("distance: ", dist)
29 |
30 | # Display the results
31 | plt.scatter(redGroup[:, 0], redGroup[:, 1], 80, 'r', '^')
32 | plt.scatter(blueGroup[:, 0], blueGroup[:, 1], 80, 'b', 's')
33 | plt.scatter(newData[:, 0], newData[:, 1], 200, 'r' if results[0] == 0 else 'b', 'o')
34 | plt.scatter(newData[:, 0], newData[:, 1], 80, 'g', 'o')
35 | plt.show()
36 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/machineLearning/knnAlphabet.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 |
9 | # Load the alphabet data
10 | data = np.loadtxt('../data/letter-recognition.data', dtype='float32', delimiter=',', converters={0: lambda ch: ord(ch) - ord('A')})
11 |
12 | # Split the data in train data and test data
13 | trainData, testData = np.vsplit(data, 2)
14 |
15 | # split trainData and testData to features and responses
16 | trainLabels, trainData = np.hsplit(trainData, [1])
17 | testLabels, testData = np.hsplit(testData, [1])
18 |
19 | # Train the kNN
20 | knn = cv2.ml.KNearest_create()
21 | knn.train(trainData, cv2.ml.ROW_SAMPLE, trainLabels)
22 |
23 | # Test the kNN
24 | ret, result, neighbours, dist = knn.findNearest(testData, k=5)
25 |
26 | # Check the classification accuracy
27 | correctMatches = np.count_nonzero(result == testLabels)
28 | print('kNN classification accuracy:', 100.0 * correctMatches / result.size)
29 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/machineLearning/knnDigits.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 |
9 | # Load the file containing the digits data: 500 different images for each digit
10 | data = cv2.imread('../data/digits.png', 0)
11 |
12 | # Split the data in 5000 images, each of 20x20 pixels size
13 | digitImages = [np.hsplit(row, 100) for row in np.vsplit(data, 50)]
14 | print('digitImages dimensions:', str(len(digitImages)) + 'x' + str(len(digitImages[0])) + 'x' + str(digitImages[0][0].size))
15 |
16 | # Transform the Python array into a Numpy array
17 | digitImages = np.float32(digitImages)
18 |
19 | # Use the image pixel values as descriptors for the train and test data sets
20 | trainData = digitImages[:, :50].reshape(-1, digitImages[0, 0].size)
21 | testData = digitImages[:, 50:].reshape(-1, digitImages[0, 0].size)
22 |
23 | # Create the labels for the train and test data sets
24 | digits = np.arange(10)
25 | trainLabels = np.repeat(digits, trainData.shape[0] / digits.size)[:, np.newaxis]
26 | testLabels = np.repeat(digits, testData.shape[0] / digits.size)[:, np.newaxis]
27 |
28 | # Save the train data
29 | np.savez('../data/knn_data.npz', trainData=trainData, trainLabels=trainLabels)
30 |
31 | # Load the train data
32 | with np.load('../data/knn_data.npz') as data:
33 | trainData = data['trainData']
34 | trainLabels = data['trainLabels']
35 |
36 | # Train the kNN
37 | knn = cv2.ml.KNearest_create()
38 | knn.train(trainData, cv2.ml.ROW_SAMPLE, trainLabels)
39 |
40 | # Test the kNN
41 | ret, result, neighbours, dist = knn.findNearest(testData, k=5)
42 |
43 | # Check the classification accuracy
44 | correctMatches = np.count_nonzero(result == testLabels)
45 | print('kNN classification accuracy:', 100.0 * correctMatches / result.size)
46 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/machineLearning/svmDigits.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_svm/py_svm_opencv/py_svm_opencv.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 |
9 | def deskew(img):
10 | '''
11 | Corrects the image skew
12 | '''
13 | # Calculate the image moments
14 | moments = cv2.moments(img)
15 |
16 | # Check if it's already fine
17 | if abs(moments['mu02']) < 1e-2:
18 | return img.copy()
19 |
20 | # Calculate the skew
21 | skew = moments['mu11'] / moments['mu02']
22 |
23 | # Correct the skew
24 | cellSize = int(np.sqrt(img.size))
25 | M = np.float32([[1, skew, -0.5 * cellSize * skew], [0, 1, 0]])
26 | flags = cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR
27 | return cv2.warpAffine(img, M, (cellSize, cellSize), flags=flags)
28 |
29 |
30 | def hog(img):
31 | '''
32 | Calculates the image Histograms of Oriented Gradients
33 | '''
34 | # Calculate the gradient images in polar coordinates
35 | gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
36 | gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
37 | magnitude, angle = cv2.cartToPolar(gx, gy)
38 |
39 | # Reduce the gradient angles to a fix number of values
40 | nBins = 16
41 | angle = np.int32(nBins * angle / (2 * np.pi))
42 |
43 | # Separate the gradient images in 4 cells
44 | angleCells = angle[:10, :10], angle[10:, :10], angle[:10, 10:], angle[10:, 10:]
45 | magnitudeCells = magnitude[:10, :10], magnitude[10:, :10], magnitude[:10, 10:], magnitude[10:, 10:]
46 |
47 | # Calculate the angle histograms for each cell, weighting the angle values with the magnitude values
48 | angleHists = [np.bincount(a.ravel(), m.ravel(), nBins) for a, m in zip(angleCells, magnitudeCells)]
49 |
50 | # Return the stack of the 4 cell histograms
51 | return np.hstack(angleHists)
52 |
53 |
54 | # Load the file containing the digits data: 500 different images for each digit
55 | data = cv2.imread('../data/digits.png', 0)
56 |
57 | # Split the data in 5000 images, each of 20x20 pixels size
58 | digitImages = [np.hsplit(row, 100) for row in np.vsplit(data, 50)]
59 | print('digitImages dimensions:', str(len(digitImages)) + 'x' + str(len(digitImages[0])) + 'x' + str(digitImages[0][0].size))
60 |
61 | # Deskew the digit images
62 | digitImages = [list(map(deskew, row)) for row in digitImages]
63 |
64 | # Calculate the images HOG histograms
65 | hogHistograms = [list(map(hog, row)) for row in digitImages]
66 |
67 | # Transform the Python array into a Numpy array
68 | hogHistograms = np.float32(hogHistograms)
69 | print('HOG histogram dimensions:', hogHistograms.shape[2])
70 |
71 | # Use the hog histograms as descriptors for the train and test data sets
72 | trainData = hogHistograms[:, :50].reshape(-1, hogHistograms.shape[2])
73 | testData = hogHistograms[:, 50:].reshape(-1, hogHistograms.shape[2])
74 |
75 | # Create the labels for the train and test data sets
76 | digits = np.arange(10)
77 | trainLabels = np.repeat(digits, trainData.shape[0] / digits.size)[:, np.newaxis]
78 | testLabels = np.repeat(digits, testData.shape[0] / digits.size)[:, np.newaxis]
79 |
80 | # Train the SVM
81 | svm = cv2.ml.SVM_create()
82 | svm.setKernel(cv2.ml.SVM_LINEAR)
83 | svm.setType(cv2.ml.SVM_C_SVC)
84 | svm.setC(2.67)
85 | svm.setGamma(5.383)
86 | svm.train(trainData, cv2.ml.ROW_SAMPLE, trainLabels)
87 |
88 | # Test the result
89 | ret, result = svm.predict(testData)
90 |
91 | # Check the classification accuracy
92 | correctMatches = np.count_nonzero(result == testLabels)
93 | print('SVM classification accuracy:', 100.0 * correctMatches / result.size)
94 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/objectDetection/faceDetection.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_objdetect/py_face_detection/py_face_detection.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Load the face and eye cascade classifiers
10 | faceCascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
11 | eyeCascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml')
12 |
13 | # Initialize the video capture
14 | cap = cv2.VideoCapture(0)
15 |
16 | # Display the capture frames
17 | while True:
18 | # Get the next frame
19 | ret, frame = cap.read()
20 |
21 | if ret:
22 | # Convert the frame to gray scale
23 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
24 |
25 | # Detect faces in the frame
26 | faces = faceCascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
27 |
28 | for x, y, w, h in faces:
29 | # Draw the face rectangle
30 | cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
31 |
32 | # Obtain the region of interest and detect the eyes
33 | roi = gray[y:y + h, x:x + w]
34 | eyes = eyeCascade.detectMultiScale(roi)
35 | eyeCounter = 0
36 |
37 | for ex, ey, ew, eh in eyes:
38 | # Draw only the first two eyes
39 | if eyeCounter > 1:
40 | break
41 |
42 | # Draw the eye rectangle
43 | cv2.rectangle(frame, (ex + x, ey + y), (ex + x + ew, ey + y + eh), (0, 255, 0), 2)
44 | eyeCounter += 1
45 |
46 | # Display the resulting frame
47 | cv2.imshow('frame', frame)
48 | k = cv2.waitKey(20) & 0xFF
49 |
50 | # User interaction
51 | if k == ord('q'):
52 | break
53 | else:
54 | break
55 |
56 | # When everything is done, release the capture and close all windows
57 | cap.release()
58 | cv2.destroyAllWindows()
59 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/python/numpyBasics.py:
--------------------------------------------------------------------------------
1 | '''
2 | This script basically follows this tutorial:
3 | http://scipy.github.io/old-wiki/pages/Tentative_NumPy_Tutorial.html
4 |
5 | Other useful links:
6 | http://mathesaurus.sourceforge.net/r-numpy.html
7 | http://scipy.github.io/old-wiki/pages/Numpy_Example_List.html
8 | '''
9 |
10 | import numpy as np
11 |
12 | # Create a range array
13 | a = np.arange(15)
14 |
15 | # Change the array properties (reshape creates a shallow copy)
16 | a = a.reshape(3, 5)
17 |
18 | # Print some important information
19 | print(a)
20 | print(a.shape)
21 | print(a.ndim)
22 | print(a.dtype.name)
23 | print(a.itemsize)
24 | print(a.size)
25 | print(type(a))
26 |
27 | # Create another array from a python array
28 | b = np.array([6, 7, 8])
29 | print(b.data[0] == b[0])
30 |
31 | # Specify the type
32 | c = np.array([[1, 2, 3], [3, 4, 5]], dtype='float32')
33 | print(c)
34 | print(c.dtype)
35 |
36 | # Other constructors
37 | print(np.zeros((2, 3)))
38 | print(np.ones((1, 3), dtype='float'))
39 | print(np.empty((2, 4)))
40 | print(np.arange(2, 7, 0.1))
41 | print(np.linspace(1, 2, 9))
42 | print(np.random.random((2, 3)))
43 |
44 | # Basic operations
45 | a = np.array([20, 30, 40, 50])
46 | b = np.arange(4)
47 | print(a + b)
48 | print(a.sum())
49 | print(a ** 2)
50 | print(a < 35)
51 | print(a * b)
52 | print(np.dot(a, b))
53 | print(a.sum())
54 | print(dir(a))
55 |
56 | # Working with axis
57 | a = np.arange(12).reshape(3, 4)
58 | print(a)
59 | print(a.sum(axis=0))
60 | print(a.min(axis=1))
61 | print(a.cumsum(axis=1))
62 |
63 | # Accessing the data
64 | a = np.arange(20)
65 | print(a[1:10:2])
66 | print(a[-1])
67 | print(a[::-1])
68 | print([x ** 2 for x in a if x > 3])
69 |
70 | # Initialize using a function
71 | def f(x, y):
72 | return x + y
73 | a = np.fromfunction(f, (4, 5))
74 | print(a)
75 | print(a[1, 2:4])
76 | print(a[1, ...])
77 |
78 | # Modifying the shape of an array
79 | print(a.ravel())
80 | a.shape = (10, 2)
81 | print(a)
82 | print(a.reshape(5, -1))
83 |
84 | # Slicing returns a shallow copy!!
85 | a = np.arange(20).reshape(4, 5)
86 | print(a)
87 | b = a[:, 1:3]
88 | b[:] = 10
89 | print(a)
90 |
91 | # Deep copies
92 | b = a.copy()
93 | b[:] = 20
94 | print(b[0, 0] != a[0, 0])
95 |
96 | # Use of argmin
97 | a = np.array([1, 2, 0, 2, 5, 3]).reshape(2, 3)
98 | print(a[1].argmin())
99 |
100 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/videoAnalysis/backgroundSubtraction.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Initialize the video
10 | cap = cv2.VideoCapture('../data/vtest.avi')
11 |
12 | # Create the background subtraction object
13 | method = 1
14 |
15 | if method == 0:
16 | bgSubtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
17 | elif method == 1:
18 | bgSubtractor = cv2.createBackgroundSubtractorMOG2()
19 | else:
20 | bgSubtractor = cv2.bgsegm.createBackgroundSubtractorGMG()
21 |
22 | # Create the kernel that will be used to remove the noise in the foreground mask
23 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
24 |
25 | # Play until the user decides to stop
26 | while True:
27 | # Get the next frame
28 | ret , frame = cap.read()
29 |
30 | if ret:
31 | # Obtain the foreground mask
32 | foregroundMask = bgSubtractor.apply(frame)
33 |
34 | # Remove part of the noise
35 | foregroundMask = cv2.morphologyEx(foregroundMask, cv2.MORPH_OPEN, kernel)
36 |
37 | # Display the mask
38 | cv2.imshow('background subtraction', foregroundMask)
39 | k = cv2.waitKey(30) & 0xff
40 |
41 | # Exit if the user press ESC
42 | if k == 27:
43 | break
44 | else:
45 | break
46 |
47 | # When everything is done, release the capture and close all windows
48 | cap.release()
49 | cv2.destroyAllWindows()
50 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/videoAnalysis/camShift.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_video/py_meanshift/py_meanshift.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Start the webcam
10 | cap = cv2.VideoCapture(0)
11 |
12 | # Take the first frame
13 | ret, frame = cap.read()
14 | rows, cols = frame.shape[:2]
15 |
16 | # Define the initial window location at the frame center
17 | windowWidth = 150
18 | windowHeight = 200
19 | windowCol = int((cols - windowWidth) / 2)
20 | windowRow = int((rows - windowHeight) / 2)
21 | window = (windowCol, windowRow, windowWidth, windowHeight)
22 |
23 | # Get the ROI and convert it to HSV scale
24 | roi = frame[windowRow:windowRow + windowHeight, windowCol:windowCol + windowWidth]
25 | roiHsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
26 |
27 | # Mask the dark areas
28 | lowLimit = np.array((0., 60., 32.))
29 | highLimit = np.array((180., 255., 255.))
30 | mask = cv2.inRange(roiHsv, lowLimit, highLimit)
31 |
32 | # Calculate the hue histogram of the unmasked region
33 | roiHist = cv2.calcHist([roiHsv], [0], mask, [180], [0, 180])
34 | cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
35 |
36 | # Set the termination criteria: either finished 10 iteration or moved less than one pixel
37 | terminationCriteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS , 10, 1)
38 |
39 | # Play until the user decides to stop
40 | while True:
41 | # Get the next frame
42 | ret , frame = cap.read()
43 |
44 | if ret:
45 | # Calculate the histogram back projection
46 | frameHsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
47 | backprojectedFrame = cv2.calcBackProject([frameHsv], [0], roiHist, [0, 180], 1)
48 |
49 | # Mask the dark areas to improve the results
50 | mask = cv2.inRange(frameHsv, lowLimit, highLimit)
51 | backprojectedFrame &= mask
52 |
53 | # Apply meanshift method to get the new window location
54 | ret, window = cv2.CamShift(backprojectedFrame, window, terminationCriteria)
55 |
56 | # Draw the window on the frame
57 | points = cv2.boxPoints(ret)
58 | points = np.int0(points)
59 | frame = cv2.polylines(frame, [points], True, 255, 2)
60 |
61 | # Display the resulting frame
62 | cv2.imshow('camshift', frame)
63 | k = cv2.waitKey(60) & 0xff
64 |
65 | # Exit if the user press ESC
66 | if k == 27:
67 | break
68 | else:
69 | break
70 |
71 | # When everything is done, release the capture and close all windows
72 | cap.release()
73 | cv2.destroyAllWindows()
74 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/videoAnalysis/denseOpticalFlow.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Start the webcam
10 | cap = cv2.VideoCapture(0)
11 | # cap = cv2.VideoCapture('../data/vtest.avi')
12 |
13 | # Take the first frame and convert it to gray
14 | ret, frame = cap.read()
15 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
16 |
17 | # Create the HSV color image
18 | hsvImg = np.zeros_like(frame)
19 | hsvImg[..., 1] = 255
20 |
21 | # Play until the user decides to stop
22 | while True:
23 | # Save the previous frame data
24 | previousGray = gray
25 |
26 | # Get the next frame
27 | ret , frame = cap.read()
28 |
29 | if ret:
30 | # Convert the frame to gray scale
31 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
32 |
33 | # Calculate the dense optical flow
34 | flow = cv2.calcOpticalFlowFarneback(previousGray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
35 |
36 | # Obtain the flow magnitude and direction angle
37 | mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
38 |
39 | # Update the color image
40 | hsvImg[..., 0] = 0.5 * ang * 180 / np.pi
41 | hsvImg[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
42 | rgbImg = cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)
43 |
44 | # Display the resulting frame
45 | cv2.imshow('dense optical flow', np.hstack((frame, rgbImg)))
46 | k = cv2.waitKey(30) & 0xff
47 |
48 | # Exit if the user press ESC
49 | if k == 27:
50 | break
51 | else:
52 | break
53 |
54 | # When everything is done, release the capture and close all windows
55 | cap.release()
56 | cv2.destroyAllWindows()
57 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/videoAnalysis/meanShift.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_video/py_meanshift/py_meanshift.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Start the webcam
10 | cap = cv2.VideoCapture(0)
11 |
12 | # Take the first frame
13 | ret, frame = cap.read()
14 | rows, cols = frame.shape[:2]
15 |
16 | # Define the initial window location at the frame center
17 | windowWidth = 150
18 | windowHeight = 200
19 | windowCol = int((cols - windowWidth) / 2)
20 | windowRow = int((rows - windowHeight) / 2)
21 | window = (windowCol, windowRow, windowWidth, windowHeight)
22 |
23 | # Get the ROI and convert it to HSV scale
24 | roi = frame[windowRow:windowRow + windowHeight, windowCol:windowCol + windowWidth]
25 | roiHsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
26 |
27 | # Mask the dark areas
28 | lowLimit = np.array((0., 60., 32.))
29 | highLimit = np.array((180., 255., 255.))
30 | mask = cv2.inRange(roiHsv, lowLimit, highLimit)
31 |
32 | # Calculate the hue histogram of the unmasked region
33 | roiHist = cv2.calcHist([roiHsv], [0], mask, [180], [0, 180])
34 | cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
35 |
36 | # Set the termination criteria: either finished 10 iteration or moved less than one pixel
37 | terminationCriteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS , 10, 1)
38 |
39 | # Play until the user decides to stop
40 | while True:
41 | # Get the next frame
42 | ret , frame = cap.read()
43 |
44 | if ret:
45 | # Calculate the histogram back projection
46 | frameHsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
47 | backprojectedFrame = cv2.calcBackProject([frameHsv], [0], roiHist, [0, 180], 1)
48 |
49 | # Mask the dark areas to improve the results
50 | mask = cv2.inRange(frameHsv, lowLimit, highLimit)
51 | backprojectedFrame &= mask
52 |
53 | # Apply meanshift method to get the new window location
54 | ret, window = cv2.meanShift(backprojectedFrame, window, terminationCriteria)
55 |
56 | # Draw the window on the frame
57 | windowCol, windowRow = window[:2]
58 | frame = cv2.rectangle(frame, (windowCol, windowRow), (windowCol + windowWidth, windowRow + windowHeight), 255, 2)
59 |
60 | # Display the resulting frame
61 | cv2.imshow('meanshift', frame)
62 | k = cv2.waitKey(60) & 0xff
63 |
64 | # Exit if the user press ESC
65 | if k == 27:
66 | break
67 | else:
68 | break
69 |
70 | # When everything is done, release the capture and close all windows
71 | cap.release()
72 | cv2.destroyAllWindows()
73 |
--------------------------------------------------------------------------------
/OpenCV-tutorials/videoAnalysis/opticalFlow.py:
--------------------------------------------------------------------------------
1 | '''
2 | Based on the following tutorial:
3 | http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.html
4 | '''
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | # Start the webcam
10 | cap = cv2.VideoCapture(0)
11 |
12 | # Take the first frame and convert it to gray
13 | ret, frame = cap.read()
14 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
15 |
16 | # Get the Shi Tomasi corners to use them as initial reference points
17 | corners = cv2.goodFeaturesToTrack(gray, mask=None, maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
18 | cornerColors = np.random.randint(0, 255, (corners.shape[0], 3))
19 |
20 | # Create a mask image for drawing purposes
21 | mask = np.zeros_like(frame)
22 |
23 | # Define the parameters for Lucas Kanade optical flow
24 | lkParameters = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 10, 0.03))
25 |
26 | # Play until the user decides to stop
27 | while True:
28 | # Save the previous frame data
29 | previousGray = gray
30 | previousCorners = corners.reshape(-1, 1, 2)
31 |
32 | # Get the next frame
33 | ret , frame = cap.read()
34 |
35 | if ret:
36 | # Convert the frame to gray scale
37 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
38 |
39 | # Calculate optical flow
40 | corners, st, err = cv2.calcOpticalFlowPyrLK(previousGray, gray, previousCorners, None, **lkParameters)
41 |
42 | # Select only the good corners
43 | corners = corners[st == 1]
44 | previousCorners = previousCorners[st == 1]
45 | cornerColors[st == 1]
46 |
47 | # Check that there are still some corners left
48 | if corners.shape[0] == 0:
49 | print('Stopping. There are no corners left to track')
50 | break
51 |
52 | # Draw the corner tracks
53 | for i in range(corners.shape[0]):
54 | x, y = corners[i]
55 | xPrev, yPrev = previousCorners[i]
56 | color = cornerColors[i].tolist()
57 | frame = cv2.circle(frame, (x, y), 5, color, -1)
58 | mask = cv2.line(mask, (x, y), (xPrev, yPrev), color, 2)
59 | frame = cv2.add(frame, mask)
60 |
61 | # Display the resulting frame
62 | cv2.imshow('optical flow', frame)
63 | k = cv2.waitKey(30) & 0xff
64 |
65 | # Exit if the user press ESC
66 | if k == 27:
67 | break
68 | else:
69 | break
70 |
71 | # When everything is done, release the capture and close all windows
72 | cap.release()
73 | cv2.destroyAllWindows()
74 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OpenCV-python-tests
2 |
3 | Just some scripts that I write while I'm learning OpenCV.
4 |
5 | I'm mostly using the [official OpenCV python tutorials](http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_tutorials.html)
6 |
--------------------------------------------------------------------------------