├── samples └── data │ ├── blox.jpg │ ├── home.jpg │ ├── img0.jpg │ ├── img1.jpg │ ├── img2.jpg │ ├── img3.jpg │ ├── left.jpg │ ├── affine.jpg │ ├── left01.jpg │ ├── left02.jpg │ ├── left03.jpg │ ├── left04.jpg │ ├── left05.jpg │ ├── left06.jpg │ ├── left07.jpg │ ├── left08.jpg │ ├── left09.jpg │ ├── left11.jpg │ ├── left12.jpg │ ├── left13.jpg │ ├── left14.jpg │ ├── mask2.png │ ├── messi5.jpg │ ├── messi_2.png │ ├── right.jpg │ ├── sudoku.png │ ├── vtest.avi │ ├── butterfly.jpg │ ├── tsukuba_l.png │ ├── tsukuba_r.png │ ├── chessboard.png │ └── starry_night.jpg ├── 07-computational-photography ├── 02-image-inpainting.py ├── 01-image-denoising.py └── 03-high-dynamic-range-hdr.py ├── environment.yml ├── 01-gui-features-in-opencv ├── 01-getting-started-with-images.py ├── 03-drawing-functions-in-opencv.py ├── 05-trackbar-as-the-color-palette.py ├── 02-getting-started-with-videos.py └── 04-mouse-as-a-paint-brush.py ├── 04-feature-detection-and-description ├── 02-shi-tomasi-corner-detector-and-good-features-to-track.py ├── 03-introduction-to-sift-scale-invariant-feature-transform.py ├── 06-brief-binary-robust-indipendent-elementary-features.py ├── 05-fast-algorithm-form-corner-detection.py ├── 01-harris-corner-detection.py └── 04-introduction-to-surf-speeded-up-robust-features.py ├── 06-camera-calibration-and-3d-reconstruction ├── 04-depth-map-from-stereo-images.py ├── 03-epipolar-geometry.py ├── 02-pose-estimation.py └── 01-camera-calibration.py ├── .gitignore └── 03-image-processing-in-opencv └── 02-geometric-transformations-of-images.py /samples/data/blox.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/blox.jpg -------------------------------------------------------------------------------- /samples/data/home.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/home.jpg -------------------------------------------------------------------------------- /samples/data/img0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/img0.jpg -------------------------------------------------------------------------------- /samples/data/img1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/img1.jpg -------------------------------------------------------------------------------- /samples/data/img2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/img2.jpg -------------------------------------------------------------------------------- /samples/data/img3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/img3.jpg -------------------------------------------------------------------------------- /samples/data/left.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left.jpg -------------------------------------------------------------------------------- /samples/data/affine.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/affine.jpg -------------------------------------------------------------------------------- /samples/data/left01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left01.jpg -------------------------------------------------------------------------------- /samples/data/left02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left02.jpg -------------------------------------------------------------------------------- /samples/data/left03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left03.jpg -------------------------------------------------------------------------------- /samples/data/left04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left04.jpg -------------------------------------------------------------------------------- /samples/data/left05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left05.jpg -------------------------------------------------------------------------------- /samples/data/left06.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left06.jpg -------------------------------------------------------------------------------- /samples/data/left07.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left07.jpg -------------------------------------------------------------------------------- /samples/data/left08.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left08.jpg -------------------------------------------------------------------------------- /samples/data/left09.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left09.jpg -------------------------------------------------------------------------------- /samples/data/left11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left11.jpg -------------------------------------------------------------------------------- /samples/data/left12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left12.jpg -------------------------------------------------------------------------------- /samples/data/left13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left13.jpg -------------------------------------------------------------------------------- /samples/data/left14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/left14.jpg -------------------------------------------------------------------------------- /samples/data/mask2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/mask2.png -------------------------------------------------------------------------------- /samples/data/messi5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/messi5.jpg -------------------------------------------------------------------------------- /samples/data/messi_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/messi_2.png -------------------------------------------------------------------------------- /samples/data/right.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/right.jpg -------------------------------------------------------------------------------- /samples/data/sudoku.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/sudoku.png -------------------------------------------------------------------------------- /samples/data/vtest.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/vtest.avi -------------------------------------------------------------------------------- /samples/data/butterfly.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/butterfly.jpg -------------------------------------------------------------------------------- /samples/data/tsukuba_l.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/tsukuba_l.png -------------------------------------------------------------------------------- /samples/data/tsukuba_r.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/tsukuba_r.png -------------------------------------------------------------------------------- /samples/data/chessboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/chessboard.png -------------------------------------------------------------------------------- /samples/data/starry_night.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mnslarcher/opencv-python-tutorials/main/samples/data/starry_night.jpg -------------------------------------------------------------------------------- /07-computational-photography/02-image-inpainting.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | 3 | img = cv.imread("../samples/data/messi_2.png") 4 | mask = cv.imread("../samples/data/mask2.png", 0) 5 | 6 | 7 | dst = cv.inpaint(img, mask, 3, cv.INPAINT_TELEA) 8 | 9 | cv.imshow("dst", dst) 10 | cv.waitKey(0) 11 | cv.destroyAllWindows() 12 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | # conda env create --prefix ./env --file environment.yml # create the environment 2 | # conda activate ./env # activate the environment 3 | # conda env update --prefix ./env --file environment.yml --prune # update the environment 4 | # conda deactivate # deactivate the environment 5 | channels: 6 | - conda-forge 7 | dependencies: 8 | - black 9 | - isort 10 | - matplotlib 11 | - notebook 12 | - opencv 13 | -------------------------------------------------------------------------------- /01-gui-features-in-opencv/01-getting-started-with-images.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import cv2 as cv 4 | 5 | img = cv.imread("../samples/data/starry_night.jpg") 6 | 7 | if img is None: 8 | sys.exit("Could not read the image.") 9 | 10 | cv.imshow("Display window", img) 11 | # Delay in milliseconds. 0 is the special value meaning "forever". When the user presses a key, the window is destroyed. 12 | # The return value is the key that was pressed. 13 | k = cv.waitKey(0) 14 | if k == ord("s"): 15 | cv.imwrite("starry_night.png", img) 16 | -------------------------------------------------------------------------------- /04-feature-detection-and-description/02-shi-tomasi-corner-detector-and-good-features-to-track.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import numpy as np 3 | 4 | img = cv.imread("../samples/data/blox.jpg") 5 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) 6 | 7 | # maxCorners = 25 8 | # qualityLevel = 0.01 9 | # minDistance = 10 10 | corners = cv.goodFeaturesToTrack(gray, 25, 0.01, 10) 11 | corners = np.int0( # Integer used for indexing (same as C ssize_t; normally either int32 or int64) 12 | corners 13 | ) 14 | 15 | for i in corners: 16 | x, y = i.ravel() # Flatten 17 | cv.circle(img, (x, y), 3, (0, 0, 255), -1) 18 | 19 | cv.imshow("image", img) 20 | cv.waitKey(0) 21 | cv.destroyAllWindows() 22 | -------------------------------------------------------------------------------- /04-feature-detection-and-description/03-introduction-to-sift-scale-invariant-feature-transform.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 as cv 4 | import numpy as np 5 | 6 | DATA_DIR = os.path.join( 7 | os.sep, *os.path.realpath(__file__).split(os.sep)[:-2], "samples", "data" 8 | ) 9 | 10 | filename = os.path.join(DATA_DIR, "home.jpg") 11 | img = cv.imread(filename) 12 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) 13 | 14 | sift = cv.SIFT_create() 15 | kp = sift.detect(gray, None) 16 | 17 | dst = cv.drawKeypoints(gray, kp, img) 18 | 19 | cv.imshow("sift_keypoints", dst) 20 | cv.waitKey() 21 | cv.destroyAllWindows() 22 | 23 | dst = cv.drawKeypoints(gray, kp, img, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 24 | cv.imshow("sift_keypoints", dst) 25 | cv.waitKey() 26 | cv.destroyAllWindows() 27 | -------------------------------------------------------------------------------- /04-feature-detection-and-description/06-brief-binary-robust-indipendent-elementary-features.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 as cv 4 | import numpy as np 5 | from matplotlib import pyplot as plt 6 | 7 | DATA_DIR = os.path.join( 8 | os.sep, *os.path.realpath(__file__).split(os.sep)[:-2], "samples", "data" 9 | ) 10 | 11 | filename = os.path.join(DATA_DIR, "blox.jpg") 12 | 13 | img = cv.imread(filename, 0) 14 | 15 | # Initiate STAR detector 16 | star = cv.xfeatures2d.StarDetector_create() 17 | 18 | # Initiate BRIEF extractor 19 | brief = cv.xfeatures2d.BriefDescriptorExtractor_create() 20 | 21 | # Find the keypoints with STAR 22 | kp = star.detect(img, None) 23 | 24 | # Compute the descriptors with BRIEF 25 | kp, des = brief.compute(img, kp) 26 | 27 | print(brief.descriptorSize()) 28 | print(des.shape) 29 | -------------------------------------------------------------------------------- /01-gui-features-in-opencv/03-drawing-functions-in-opencv.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import numpy as np 3 | 4 | # Create a black image 5 | img = np.zeros((512, 512, 3), np.uint8) 6 | 7 | # Draw a diagonal blue line with thickness of 5 px 8 | cv.line(img, (0, 0), (511, 511), (255, 0, 0), 5) 9 | 10 | cv.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3) 11 | 12 | cv.circle(img, (447, 63), 63, (0, 0, 255), -1) 13 | 14 | # img, center, axes, angle, startAngle, endAgle (180 = only the first half), color, thickness 15 | cv.ellipse(img, (256, 256), (100, 50), 0, 0, 180, 255, -1) 16 | 17 | pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32) 18 | pts = pts.reshape((-1, 1, 2)) 19 | cv.polylines(img, [pts], True, (0, 255, 255)) # True = close the shape 20 | 21 | font = cv.FONT_HERSHEY_SIMPLEX 22 | # img, text, bottom-left, font type, font scale, color, thickness, line type 23 | cv.putText(img, "OpenCV", (10, 500), font, 4, (255, 255, 255), 2, cv.LINE_AA) 24 | 25 | cv.imshow("Display window", img) 26 | cv.waitKey(0) 27 | -------------------------------------------------------------------------------- /01-gui-features-in-opencv/05-trackbar-as-the-color-palette.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import numpy as np 3 | 4 | 5 | def nothing(x): 6 | pass 7 | 8 | 9 | # Create a black image, a window 10 | img = np.zeros((300, 512, 3), np.uint8) 11 | cv.namedWindow("image") 12 | 13 | # create trackbars for color change 14 | cv.createTrackbar("R", "image", 0, 255, nothing) 15 | cv.createTrackbar("G", "image", 0, 255, nothing) 16 | cv.createTrackbar("B", "image", 0, 255, nothing) 17 | 18 | # create switch for ON/OFF functionality 19 | switch = "0 : OFF \n1 : ON" 20 | cv.createTrackbar(switch, "image", 0, 1, nothing) 21 | 22 | while True: 23 | cv.imshow("image", img) 24 | k = cv.waitKey(1) & 0xFF 25 | if k == 27: 26 | break 27 | 28 | # get current positions of four trackbars 29 | r = cv.getTrackbarPos("R", "image") 30 | g = cv.getTrackbarPos("G", "image") 31 | b = cv.getTrackbarPos("B", "image") 32 | s = cv.getTrackbarPos(switch, "image") 33 | 34 | if s == 0: 35 | img[:] = 0 36 | else: 37 | img[:] = [r, g, b] 38 | 39 | cv.destroyAllWindows() 40 | -------------------------------------------------------------------------------- /06-camera-calibration-and-3d-reconstruction/04-depth-map-from-stereo-images.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | 5 | # Depth Map from Stereo Images 6 | 7 | # disparity = x - x' = B * f / Z 8 | 9 | imgL = cv.imread("../samples/data/tsukuba_l.png", 0) 10 | imgR = cv.imread("../samples/data/tsukuba_r.png", 0) 11 | 12 | # numDisparities: the disparity search range. For each pixel algorithm will find the best disparity from 0 (default 13 | # minimum disparity) to numDisparities. The search range can then be shifted by changing the minimum disparity 14 | 15 | # blockSize: the linear size of the blocks compared by the algorithm. The size should be odd (as the block is centered 16 | # at the current pixel). Larger block size implies smoother, though less accurate disparity map. Smaller block size 17 | # gives more detailed disparity map, but there is higher chance for algorithm to find a wrong correspondence. 18 | stereo = cv.StereoBM_create(numDisparities=16, blockSize=15) 19 | disparity = stereo.compute(imgL, imgR) 20 | plt.imshow(disparity, "gray") 21 | plt.show() 22 | -------------------------------------------------------------------------------- /04-feature-detection-and-description/05-fast-algorithm-form-corner-detection.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 as cv 4 | 5 | DATA_DIR = os.path.join( 6 | os.sep, *os.path.realpath(__file__).split(os.sep)[:-2], "samples", "data" 7 | ) 8 | 9 | filename = os.path.join(DATA_DIR, "blox.jpg") 10 | img = cv.imread(filename, 0) 11 | 12 | # Initiate FAST object with default values 13 | fast = cv.FastFeatureDetector_create() 14 | 15 | # find and draw the keypoints 16 | kp = fast.detect(img, None) 17 | img2 = cv.drawKeypoints(img, kp, None, color=(255, 0, 0)) 18 | 19 | # Print all default params 20 | print(f"Threshold: {fast.getThreshold()}") 21 | print(f"Non-max suppression: {fast.getNonmaxSuppression()}") 22 | print(f"Neighborhood: {fast.getType()}") 23 | print(f"Total keypoints with non-max suppression: {len(kp)}") 24 | 25 | cv.imshow("fast_true", img2) 26 | cv.waitKey() 27 | cv.destroyAllWindows() 28 | 29 | # Disable non-max suppression 30 | fast.setNonmaxSuppression(0) 31 | kp = fast.detect(img, None) 32 | 33 | print(f"Total keypoints without non-max suppression: {len(kp)}") 34 | 35 | img3 = cv.drawKeypoints(img, kp, None, color=(255, 0, 0)) 36 | 37 | cv.imshow("fast_False", img3) 38 | cv.waitKey() 39 | cv.destroyAllWindows() 40 | -------------------------------------------------------------------------------- /07-computational-photography/01-image-denoising.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | img = cv.imread("../samples/data/left.jpg") 6 | gaussian_noise = np.random.normal(scale=0.5, size=img.shape).astype(img.dtype) 7 | img = cv.add(img, gaussian_noise) 8 | 9 | dst = cv.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21) 10 | 11 | plt.subplot(121) 12 | plt.imshow(img) 13 | 14 | plt.subplot(122) 15 | plt.imshow(dst) 16 | 17 | plt.show() 18 | 19 | cap = cv.VideoCapture("../samples/data/vtest.avi") 20 | 21 | # Create a list of first 5 frames 22 | img = [cap.read()[1] for i in range(5)] 23 | 24 | # Convert all to grayscale 25 | gray = [cv.cvtColor(i, cv.COLOR_BGR2GRAY) for i in img] 26 | 27 | # Convert all to float64 28 | gray = [np.float64(i) for i in gray] 29 | 30 | # Create a noise of variance 100 31 | noise = np.random.randn(*gray[1].shape) * 10 32 | 33 | # Add this noise to images 34 | noisy = [i + noise for i in gray] 35 | 36 | # Convert back to uint8 37 | noisy = [np.uint8(np.clip(i, 0, 255)) for i in noisy] 38 | 39 | # Denoise 3rd frame considering all the 5 frames 40 | dst = cv.fastNlMeansDenoisingMulti(noisy, 2, 5, None, 4, 7, 35) 41 | 42 | plt.subplot(131) 43 | plt.imshow(gray[2], "gray") 44 | 45 | plt.subplot(132) 46 | plt.imshow(noisy[2], "gray") 47 | 48 | plt.subplot(133) 49 | plt.imshow(dst, "gray") 50 | 51 | plt.show() 52 | -------------------------------------------------------------------------------- /04-feature-detection-and-description/01-harris-corner-detection.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 as cv 4 | import numpy as np 5 | 6 | DATA_DIR = os.path.join( 7 | os.sep, *os.path.realpath(__file__).split(os.sep)[:-2], "samples", "data" 8 | ) 9 | 10 | filename = os.path.join(DATA_DIR, "chessboard.png") 11 | print(filename) 12 | img = cv.imread(filename) 13 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) 14 | 15 | gray = np.float32(gray) 16 | dst = cv.cornerHarris(gray, 2, 3, 0.04) 17 | 18 | # Result i sdilated for marking the corners, not important 19 | dst = cv.dilate(dst, None) 20 | 21 | # Threshold for an optimal value, it may vary depending on the image 22 | img[dst > 0.01 * dst.max()] = [0, 0, 255] 23 | 24 | cv.imshow("dst", img) 25 | if cv.waitKey(0) & 0xFF == 27: 26 | cv.destroyAllWindows() 27 | 28 | 29 | ret, dst = cv.threshold(dst, 0.01 * dst.max(), 255, 0) 30 | dst = np.uint8(dst) 31 | 32 | # Find centroids 33 | ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst) 34 | 35 | # Define the criteria to stop and refine the corners 36 | criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.001) 37 | corners = cv.cornerSubPix(gray, np.float32(centroids), (5, 5), (-1, -1), criteria) 38 | 39 | # Now draw them 40 | res = np.hstack((centroids, corners)) 41 | res = np.int0(res) 42 | img[res[:, 1], res[:, 0]] = [0, 0, 255] 43 | img[res[:, 3], res[:, 2]] = [0, 255, 0] 44 | 45 | cv.imshow("subpixel5", img) 46 | if cv.waitKey(0) & 0xFF == 27: 47 | cv.destroyAllWindows() 48 | -------------------------------------------------------------------------------- /04-feature-detection-and-description/04-introduction-to-surf-speeded-up-robust-features.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 as cv 4 | import matplotlib.pyplot as plt 5 | 6 | DATA_DIR = os.path.join( 7 | os.sep, *os.path.realpath(__file__).split(os.sep)[:-2], "samples", "data" 8 | ) 9 | 10 | filename = os.path.join(DATA_DIR, "butterfly.jpg") 11 | 12 | img = cv.imread(filename, 0) 13 | 14 | # Create SURF object. You can specify params here or later. 15 | # Here I set Hessian Threshold to 400 16 | surf = cv.xfeatures2d.SURF_create(400) 17 | 18 | # Find keypoints and descriptors directly 19 | kp, des = surf.detectAndCompute(img, None) 20 | 21 | len(kp) 22 | 23 | # Check present Hessian threshold 24 | print(surf.getHessianThreshold()) 25 | 26 | # We set it to some 50000. Remember, it is just for representing in picture. 27 | # In actual cases, it is better to have a value 300-500 28 | 29 | # Again compute keypoints and check its number 30 | kp, des = surf.detectAndCompute(img, None) 31 | 32 | print(len(kp)) 33 | 34 | img2 = cv.drawKeypoints(img, kp, None, (255, 0, 0), 4) 35 | 36 | plt.imshow(img2) 37 | plt.show() 38 | 39 | # Check upright flag, if it is False, set it to True 40 | print(surf.getUpright()) 41 | 42 | surf.setUpright(True) 43 | 44 | # Recompute the feature points and draw it 45 | kp = surf.detect(img, None) 46 | img2 = cv.drawKeypoints(img, kp, None, (255, 0, 0), 4) 47 | 48 | plt.imshow(img2) 49 | plt.show() 50 | 51 | # Find size of descriptor 52 | print(surf.descriptorSize()) 53 | 54 | # That means flag, "extended" is False. 55 | surf.getExtended() 56 | 57 | # So we make it to True to get 128-dim descriptors. 58 | surf.setExtended(True) 59 | kp, des = surf.detectAndCompute(img, None) 60 | print(surf.descriptorSize()) 61 | 62 | print(des.shape) 63 | -------------------------------------------------------------------------------- /01-gui-features-in-opencv/02-getting-started-with-videos.py: -------------------------------------------------------------------------------- 1 | ############################# 2 | # Capture Video from Camera # 3 | ############################# 4 | 5 | import cv2 as cv 6 | 7 | cap = cv.VideoCapture(0) 8 | if not cap.isOpened(): 9 | print("Cannot open camera") 10 | exit() 11 | 12 | while True: 13 | # Capture frame-by-frame 14 | ret, frame = cap.read() 15 | 16 | # if frame is read correctly ret is True 17 | if not ret: 18 | print("Can't receive frame (stream end?). Exiting ...") 19 | break 20 | # Our operations on the frame come here 21 | gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) 22 | # Display the resulting frame 23 | cv.imshow("frame", gray) 24 | if cv.waitKey(1) == ord("q"): 25 | break 26 | 27 | # When everything done, release the capture 28 | cap.release() 29 | cv.destroyAllWindows() 30 | 31 | 32 | ########################### 33 | # Playing Video from file # 34 | ########################### 35 | 36 | cap = cv.VideoCapture("../samples/data/vtest.avi") 37 | 38 | while cap.isOpened(): 39 | ret, frame = cap.read() 40 | 41 | # if frame is read correctly ret is True 42 | if not ret: 43 | print("Can't receive frame (stream end?). Exiting ...") 44 | break 45 | 46 | gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) 47 | 48 | cv.imshow("frame", gray) 49 | if cv.waitKey(25) == ord("q"): 50 | break 51 | 52 | cap.release() 53 | cv.destroyAllWindows() 54 | 55 | 56 | ################## 57 | # Saving a Video # 58 | ################## 59 | 60 | cap = cv.VideoCapture(0) 61 | 62 | # Define the codec and create VideoWriter object 63 | fourcc = cv.VideoWriter_fourcc(*"XVID") 64 | out = cv.VideoWriter("../outputs/output.avi", fourcc, 20.0, (640, 480)) 65 | 66 | while cap.isOpened(): 67 | ret, frame = cap.read() 68 | if not ret: 69 | print("Can't receive frame (stream end?). Exiting ...") 70 | break 71 | 72 | frame = cv.flip(frame, 0) 73 | # write the flipped frame 74 | out.write(frame) 75 | 76 | cv.imshow("frame", frame) 77 | if cv.waitKey(1) == ord("q"): 78 | break 79 | # Release everything if job is finished 80 | cap.release() 81 | out.release() 82 | cv.destroyAllWindows() 83 | -------------------------------------------------------------------------------- /07-computational-photography/03-high-dynamic-range-hdr.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 as cv 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | 7 | # 1. Loading exposure images into a list 8 | img_fn = ["img0.jpg", "img1.jpg", "img2.jpg", "img3.jpg"] 9 | img_list = [cv.imread(os.path.join("../samples/data", fn)) for fn in img_fn] 10 | exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32) 11 | 12 | # 2. Merge exposures into HDR image 13 | 14 | merge_debevec = cv.createMergeDebevec() 15 | hdr_debevec = merge_debevec.process(img_list, times=exposure_times.copy()) 16 | 17 | merge_robertson = cv.createMergeRobertson() 18 | hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy()) 19 | 20 | # 3. Tonemap HDR image 21 | 22 | # Tonemap HDR image 23 | tonemap1 = cv.createTonemap(gamma=2.2) 24 | res_debevec = tonemap1.process(hdr_debevec.copy()) 25 | res_robertson = tonemap1.process(hdr_robertson.copy()) 26 | 27 | # 4. Merge exposures using Mertens fusion 28 | 29 | # Exposure fusion using Mertens 30 | merge_mertens = cv.createMergeMertens() 31 | res_mertens = merge_mertens.process(img_list) 32 | 33 | # 5. Convert to 8-bit and save 34 | 35 | # Convert datatype to 8-bit and save 36 | res_debevec_8bit = np.clip(res_debevec * 255, 0, 255).astype("uint8") 37 | res_robertson_8bit = np.clip(res_robertson * 255, 0, 255).astype("uint8") 38 | res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype("uint8") 39 | 40 | plt.subplot(221) 41 | plt.imshow(cv.cvtColor(res_debevec_8bit, cv.COLOR_BGR2RGB)) 42 | 43 | plt.subplot(222) 44 | plt.imshow(cv.cvtColor(res_robertson_8bit, cv.COLOR_BGR2RGB)) 45 | 46 | plt.subplot(223) 47 | plt.imshow(cv.cvtColor(res_mertens_8bit, cv.COLOR_BGR2RGB)) 48 | 49 | plt.show() 50 | 51 | # Estimating Camera Response Function 52 | 53 | cal_debevec = cv.createCalibrateDebevec() 54 | crf_debevec = cal_debevec.process(img_list, times=exposure_times) 55 | hdr_debevec = merge_debevec.process( 56 | img_list, times=exposure_times.copy(), response=crf_debevec.copy() 57 | ) 58 | 59 | cal_robertson = cv.createCalibrateRobertson() 60 | crf_robertson = cal_debevec.process(img_list, times=exposure_times) 61 | hdr_robertson = merge_robertson.process( 62 | img_list, times=exposure_times.copy(), response=crf_robertson.copy() 63 | ) 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Output dir 132 | outputs 133 | 134 | # VS Code 135 | .vscode 136 | -------------------------------------------------------------------------------- /01-gui-features-in-opencv/04-mouse-as-a-paint-brush.py: -------------------------------------------------------------------------------- 1 | ############### 2 | # Simple Demo # 3 | ############### 4 | 5 | import cv2 as cv 6 | import numpy as np 7 | 8 | events = [i for i in dir(cv) if i.startswith("EVENT_")] 9 | print(events) 10 | 11 | 12 | def draw_circle(event, x, y, flags, param): 13 | if ( 14 | event == cv.EVENT_LBUTTONDBLCLK 15 | ): # indicates that left mouse button is double clicked 16 | cv.circle(img, (x, y), 100, (255, 0, 0), -1) 17 | 18 | 19 | # Create a black image, a window and bind the function to window 20 | img = np.zeros((512, 512, 3), np.uint8) 21 | cv.namedWindow("image") 22 | cv.setMouseCallback("image", draw_circle) # window name, mouse callback 23 | 24 | while True: 25 | cv.imshow("image", img) 26 | k = cv.waitKey(20) 27 | # 0xFF = 11111111 in binary, with & it cancels everything after the first 8 bits 28 | # 27 = ord(ESC), can be different with NumLock active, &0xFF solve the problem 29 | if k & 0xFF == 27: 30 | break 31 | 32 | cv.destroyAllWindows() 33 | 34 | 35 | ###################### 36 | # More Advanced Demo # 37 | ###################### 38 | 39 | drawing = False # True if mouse is pressed 40 | mode = True # if True draw rectangle. Press 'm' to toggle to curve 41 | ix, iy = -1, -1 42 | 43 | # mouse callback function 44 | def draw_circle(event, x, y, flags, param): 45 | global ix, iy, drawing, mode 46 | 47 | if ( 48 | event 49 | == cv.EVENT_LBUTTONDOWN # indicates that the left mouse button is pressed. 50 | ): 51 | drawing = True 52 | ix, iy = x, y 53 | # EVENT_MOUSEMOVE indicates that the mouse pointer has moved over the window. 54 | # If this part is removed, nothing is drawn until the left mouse button is released. 55 | elif event == cv.EVENT_MOUSEMOVE: 56 | if drawing == True: 57 | if mode == True: 58 | cv.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1) 59 | else: 60 | cv.circle(img, (x, y), 5, (0, 0, 255), -1) 61 | elif event == cv.EVENT_LBUTTONUP: # indicates that left mouse button is released. 62 | drawing = False 63 | if mode == True: 64 | cv.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1) 65 | else: 66 | cv.circle(img, (x, y), 5, (0, 0, 255), -1) 67 | 68 | 69 | img = np.zeros((512, 512, 3), np.uint8) 70 | cv.namedWindow("image") 71 | cv.setMouseCallback("image", draw_circle) 72 | 73 | while True: 74 | cv.imshow("image", img) 75 | k = cv.waitKey(1) & 0xFF 76 | if k == ord("m"): 77 | mode = not mode 78 | elif k == 27: 79 | break 80 | 81 | cv.destroyAllWindows() 82 | -------------------------------------------------------------------------------- /06-camera-calibration-and-3d-reconstruction/03-epipolar-geometry.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | img1 = cv.imread("../samples/data/left.jpg", 0) # queryimage, left image 6 | img2 = cv.imread("../samples/data/right.jpg", 0) # trainimage, right image 7 | 8 | sift = cv.SIFT_create() 9 | 10 | # Find the keypoints and descriptors with SIFT 11 | kp1, des1 = sift.detectAndCompute(img1, None) 12 | kp2, des2 = sift.detectAndCompute(img2, None) 13 | 14 | # FLANN parameters 15 | FLANN_INDEX_KDTREE = 1 16 | index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) 17 | search_params = dict(checks=50) 18 | 19 | flann = cv.FlannBasedMatcher(index_params, search_params) 20 | matches = flann.knnMatch(des1, des2, k=2) 21 | 22 | pts1 = [] 23 | pts2 = [] 24 | 25 | # Ratio test as per Lowe's paper 26 | # Note: m, n are the first and second best match 27 | for i, (m, n) in enumerate(matches): 28 | if m.distance < 0.8 * n.distance: 29 | pts2.append(kp2[m.trainIdx].pt) 30 | pts1.append(kp1[m.queryIdx].pt) 31 | 32 | 33 | pts1 = np.int32(pts1) 34 | pts2 = np.int32(pts2) 35 | 36 | # for the LMedS least-median-of-squares algorithm. N >= 8. 7-point algorithm is used. 37 | F, mask = cv.findFundamentalMat(pts1, pts2, cv.FM_LMEDS) 38 | # mask Optional output mask set by a robust method (RANSAC or LMedS), indicates inliers. 39 | # Vector of same length as number of points. 40 | 41 | # We select only inlier points 42 | pts1 = pts1[mask.ravel() == 1] 43 | pts2 = pts2[mask.ravel() == 1] 44 | 45 | 46 | def drawlines(img1, img2, lines, pts1, pts2): 47 | """ 48 | Args: 49 | img1: Image on which we draw the epilines for the points in img2. 50 | lines: Corresponding epilines. 51 | """ 52 | c = img1.shape[1] 53 | img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR) 54 | img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR) 55 | for r, pt1, pt2 in zip(lines, pts1, pts2): 56 | color = tuple(np.random.randint(0, 255, 3).tolist()) 57 | x0, y0 = map(int, [0, -r[2] / r[1]]) 58 | x1, y1 = map(int, [c, -((r[2] + r[0] * c) / r[1])]) 59 | img1 = cv.line(img1, (x0, y0), (x1, y1), color, 1) 60 | img2 = cv.circle(img1, tuple(pt1), 5, color, -1) 61 | img2 = cv.circle(img1, tuple(pt2), 5, color, -1) 62 | 63 | return img1, img2 64 | 65 | 66 | # Find epilines corresponding to points in right image (second image) and 67 | # drawing its lines on left image 68 | lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F) 69 | lines1 = lines1.reshape(-1, 3) 70 | img5, img6 = drawlines(img1, img2, lines1, pts1, pts2) 71 | 72 | # Find epilines corresponding to points in left image (first image) and 73 | # drawing its lines on right image 74 | lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F) 75 | lines2 = lines2.reshape(-1, 3) 76 | img3, img4 = drawlines(img2, img1, lines2, pts2, pts1) 77 | 78 | plt.subplot(121) 79 | plt.imshow(img5) 80 | 81 | plt.subplot(122) 82 | plt.imshow(img3) 83 | 84 | plt.show() 85 | -------------------------------------------------------------------------------- /03-image-processing-in-opencv/02-geometric-transformations-of-images.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | # Scaling 6 | 7 | # Note: use cv.INTER_AREA for shrinking and cv.INTER_CUBIC (slow) or cv.INTER_LINEAR (fast, default) for zooming. 8 | 9 | img = cv.imread("../samples/data/messi5.jpg") 10 | res = cv.resize(img, None, fx=2, fy=2, interpolation=cv.INTER_CUBIC) 11 | 12 | height, width = img.shape[:2] 13 | res = cv.resize(img, (2 * width, 2 * height), interpolation=cv.INTER_CUBIC) 14 | 15 | img_padded = cv.copyMakeBorder(img, 0, height, 0, 0, cv.BORDER_CONSTANT) 16 | 17 | cv.imshow("image", cv.hconcat([img_padded, res])) 18 | cv.waitKey(0) 19 | cv.destroyAllWindows() 20 | 21 | 22 | # Translation 23 | 24 | img = cv.imread("../samples/data/messi5.jpg", 0) # 0 = grayscale 25 | rows, cols = img.shape 26 | 27 | # Translation matrix with tx = 100, ty = 50 28 | M = np.float32([[1, 0, 100], [0, 1, 50]]) 29 | dst = cv.warpAffine(img, M, (cols, rows)) # third arg is width, height 30 | 31 | cv.imshow("image", cv.hconcat([img, dst])) 32 | cv.waitKey(0) 33 | cv.destroyAllWindows() 34 | 35 | 36 | # Rotation 37 | 38 | # Rotation by a theta angle: 39 | # [[cosO, -sinO], 40 | # [sinO, cosO]] 41 | 42 | # Scaled rotation with adjustable center of rotation 43 | # [[a, b, (1 - a) * center_x - b * center_y], 44 | # [-b, a, b * center_x + (1 - a) * center_y]] 45 | 46 | img = cv.imread("../samples/data/messi5.jpg", 0) 47 | rows, cols = img.shape 48 | 49 | # cols - 1 and rows - 1 are the coordinates limits 50 | # center, angle, scale 51 | M = cv.getRotationMatrix2D(((cols - 1) / 2.0, (rows - 1) / 2.0), 90, 1) 52 | dst = cv.warpAffine(img, M, (cols, rows)) 53 | 54 | cv.imshow("image", cv.hconcat([img, dst])) 55 | cv.waitKey(0) 56 | cv.destroyAllWindows() 57 | 58 | 59 | # Affine transformation 60 | 61 | # [[A, t], 62 | # [0^T, 1] 63 | # Invariance: parallelism, volume ratio 64 | 65 | img = cv.imread("../samples/data/sudoku.png") 66 | rows, cols, ch = img.shape 67 | 68 | pts1 = np.float32([[50, 50], [200, 50], [50, 200]]) 69 | pts2 = np.float32([[10, 100], [200, 50], [100, 250]]) 70 | 71 | for pt in pts1: 72 | img = cv.circle(img, pt.astype(int), 5, color=(0, 255, 0), thickness=-1) 73 | 74 | M = cv.getAffineTransform(pts1, pts2) 75 | dst = cv.warpAffine(img, M, (cols, rows)) 76 | 77 | plt.figure("Affine transformation") 78 | plt.subplot(121) 79 | plt.imshow(img) 80 | plt.title("Input") 81 | 82 | plt.subplot(122) 83 | plt.imshow(dst) 84 | plt.title("Ouput") 85 | 86 | plt.show() 87 | 88 | 89 | # Perspective Transformation 90 | 91 | # [[A, t], 92 | # [a^T, v] 93 | # Invariance: plane intersection and tangency 94 | 95 | img = cv.imread("../samples/data/sudoku.png") 96 | rows, cols, ch = img.shape 97 | 98 | pts1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]]) 99 | pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) 100 | 101 | for pt in pts1: 102 | img = cv.circle(img, pt.astype(int), 5, color=(0, 255, 0), thickness=-1) 103 | 104 | M = cv.getPerspectiveTransform(pts1, pts2) 105 | dst = cv.warpPerspective(img, M, (cols, rows)) 106 | 107 | plt.figure("Perspective Transformation") 108 | plt.subplot(121) 109 | plt.imshow(img) 110 | plt.title("Input") 111 | 112 | plt.subplot(122) 113 | plt.imshow(dst) 114 | plt.title("Ouput") 115 | 116 | plt.show() 117 | -------------------------------------------------------------------------------- /06-camera-calibration-and-3d-reconstruction/02-pose-estimation.py: -------------------------------------------------------------------------------- 1 | import glob 2 | 3 | import cv2 as cv 4 | import numpy as np 5 | 6 | # Camera Calibration 7 | 8 | criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) 9 | objp = np.zeros((6 * 7, 3), np.float32) 10 | objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2) 11 | objpts = [] 12 | imgpts = [] 13 | images = glob.glob("../samples/data/left*") 14 | 15 | for fname in images: 16 | img = cv.imread(fname) 17 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) 18 | ret, corners = cv.findChessboardCorners(gray, (7, 6), None) 19 | if ret: 20 | objpts.append(objp) 21 | corners = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) 22 | imgpts.append(corners) 23 | 24 | ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera( 25 | objpts, imgpts, gray.shape[::-1], None, None 26 | ) 27 | 28 | 29 | # Draw 30 | 31 | 32 | def draw(img, corners, imgpts): 33 | corner = tuple(corners[0].ravel()) 34 | img = cv.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5) 35 | img = cv.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5) 36 | img = cv.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5) 37 | return img 38 | 39 | 40 | criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) 41 | objp = np.zeros((6 * 7, 3), np.float32) 42 | objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2) 43 | 44 | axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3) 45 | 46 | for fname in images: 47 | img = cv.imread(fname) 48 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) 49 | ret, corners = cv.findChessboardCorners(gray, (7, 6), None) 50 | if ret: 51 | corners = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) 52 | 53 | # Find the rotation and translation vectors. 54 | ret, rvecs, tvecs = cv.solvePnP(objp, corners, mtx, dist) 55 | 56 | # Project 3D points to image plane 57 | imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist) 58 | 59 | img = draw(img, corners.astype(int), imgpts.astype(int)) 60 | cv.imshow("img", img) 61 | k = cv.waitKey(0) & 0xFF 62 | if k == ord("s"): 63 | cv.imwrite(fname[:6] + ".png", img) 64 | 65 | cv.destroyAllWindows() 66 | 67 | 68 | # Render a Cube 69 | 70 | 71 | def draw(img, corners, imgpts): 72 | imgpts = np.int32(imgpts).reshape(-1, 2) 73 | 74 | # Draw ground floor in green 75 | img = cv.drawContours(img, [imgpts[:4]], -1, (0, 255, 0), -1) 76 | 77 | # Draw pillars in blue color 78 | for i, j in zip(range(4), range(4, 8)): 79 | img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]), (255), 3) 80 | 81 | # Draw top layer in red color 82 | img = cv.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3) 83 | 84 | return img 85 | 86 | 87 | axis = np.float32( 88 | [ 89 | [0, 0, 0], 90 | [0, 3, 0], 91 | [3, 3, 0], 92 | [3, 0, 0], 93 | [0, 0, -3], 94 | [0, 3, -3], 95 | [3, 3, -3], 96 | [3, 0, -3], 97 | ] 98 | ).reshape(-1, 3) 99 | 100 | for fname in images: 101 | img = cv.imread(fname) 102 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) 103 | ret, corners = cv.findChessboardCorners(gray, (7, 6), None) 104 | if ret: 105 | corners = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) 106 | 107 | # Find the rotation and translation vectors. 108 | ret, rvecs, tvecs = cv.solvePnP(objp, corners, mtx, dist) 109 | 110 | # Project 3D points to image plane 111 | imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist) 112 | 113 | img = draw(img, corners.astype(int), imgpts.astype(int)) 114 | cv.imshow("img", img) 115 | k = cv.waitKey(0) & 0xFF 116 | if k == ord("s"): 117 | cv.imwrite(fname[:6] + ".png", img) 118 | 119 | cv.destroyAllWindows() 120 | -------------------------------------------------------------------------------- /06-camera-calibration-and-3d-reconstruction/01-camera-calibration.py: -------------------------------------------------------------------------------- 1 | import glob 2 | 3 | import cv2 as cv 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | 7 | # See also: https://learnopencv.com/camera-calibration-using-opencv/ 8 | 9 | # Termination criteria 10 | criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) 11 | 12 | # Prepare object points like (0, 0, 0), (1, 0, 0), ..., (6, 5, 0) 13 | objp = np.zeros((6 * 7, 3), np.float32) 14 | # [[[0, 0, 0, 0, 0, 0], 15 | # [1, 1, 1, 1, 1, 1], 16 | # [2, 2, 2, 2, 2, 2], 17 | # [3, 3, 3, 3, 3, 3], 18 | # [4, 4, 4, 4, 4, 4], 19 | # [5, 5, 5, 5, 5, 5], 20 | # [6, 6, 6, 6, 6, 6]], 21 | 22 | # [[0, 1, 2, 3, 4, 5], 23 | # [0, 1, 2, 3, 4, 5], 24 | # [0, 1, 2, 3, 4, 5], 25 | # [0, 1, 2, 3, 4, 5], 26 | # [0, 1, 2, 3, 4, 5], 27 | # [0, 1, 2, 3, 4, 5], 28 | # [0, 1, 2, 3, 4, 5]]] 29 | # After reshape is the combination of the elements of the first grid with the elements of the second grid in the same 30 | # position. 31 | objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2) 32 | 33 | # Arrays to store object points and image points from all the images. 34 | objpoints = [] # 3d points in real world space 35 | imgpoints = [] # 2d points in image plane 36 | 37 | images = glob.glob("../samples/data/left*") 38 | 39 | for fname in images: 40 | img = cv.imread(fname) 41 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) 42 | 43 | # Find the chess board corners 44 | ret, corners = cv.findChessboardCorners(gray, (7, 6), None) 45 | 46 | # If found, add object points, image points (after refining them) 47 | if ret: 48 | objpoints.append(objp) 49 | corners = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) 50 | imgpoints.append(corners) 51 | 52 | # Draw and display the corners 53 | cv.drawChessboardCorners(img, (7, 6), corners, ret) 54 | cv.imshow("img", img) 55 | cv.waitKey(500) 56 | 57 | cv.destroyAllWindows() 58 | 59 | 60 | # Calibration 61 | 62 | # mtx: Input/output 3x3 floating-point camera intrinsic matrix 63 | # dist: Input/output vector of distortion coefficients 64 | # rvecs: Output vector of rotation vectors (Rodrigues) estimated for each pattern view 65 | # tvecs: Output vector of translation vectors estimated for each pattern view 66 | ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera( 67 | objpoints, imgpoints, gray.shape[::-1], None, None 68 | ) 69 | 70 | 71 | # Undistortion 72 | 73 | img = cv.imread("../samples/data/left12.jpg") 74 | h, w = img.shape[:2] 75 | # If the scaling parameter alpha=0, it returns undistorted image with minimum unwanted pixels. So it may even remove 76 | # some pixels at image corners. If alpha=1, all pixels are retained with some extra black images. This function also 77 | # returns an image ROI which can be used to crop the result. 78 | newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h)) 79 | 80 | # Undistort 81 | dst = cv.undistort(img, mtx, dist, None, newcameramtx) 82 | 83 | # Crop the image 84 | x, y, w, h = roi 85 | dst = dst[y : y + h, x : x + w] 86 | 87 | 88 | # Remapping 89 | 90 | # Undistort 91 | mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5) 92 | dst2 = cv.remap(img, mapx, mapy, cv.INTER_LINEAR) 93 | 94 | # Crop the image 95 | x, y, w, h = roi 96 | dst2 = dst2[y : y + h, x : x + w] 97 | 98 | plt.figure("Undistort") 99 | plt.subplot(131) 100 | plt.imshow(img) 101 | plt.title("Original") 102 | 103 | plt.subplot(132) 104 | plt.imshow(dst) 105 | plt.title("Undistortion") 106 | 107 | plt.subplot(133) 108 | plt.imshow(dst2) 109 | plt.title("Remapping") 110 | 111 | plt.show() 112 | 113 | 114 | # Re-projection Error 115 | 116 | mean_error = 0 117 | for i in range(len(objpoints)): 118 | imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist) 119 | error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2) / len(imgpoints) 120 | mean_error += error 121 | 122 | print(f"Total error: {mean_error / len(objpoints)}") 123 | --------------------------------------------------------------------------------