├── Chapter09 ├── bin │ ├── python │ ├── python2 │ ├── python2.7 │ ├── pip │ ├── pip2 │ ├── pip2.7 │ ├── flask │ ├── wheel │ ├── easy_install │ ├── easy_install-2.7 │ ├── activate.csh │ ├── activate_this.py │ ├── activate │ ├── activate.fish │ └── python-config ├── pip-selfcheck.json ├── include │ └── python2.7 ├── images │ ├── row 1.png │ ├── CVaaS Page.png │ ├── CVaaS Overview.png │ └── Canny Example.png ├── temp_image.jpg ├── Client │ ├── index.html │ └── index.js ├── Server │ └── app.py ├── corners.py └── app.py ├── Chapter07 ├── codes │ ├── read.py │ ├── write.py │ ├── crop.py │ ├── gray.py │ ├── median_blur.py │ ├── gaussian.py │ ├── canny.py │ ├── resize.py │ ├── erosion.py │ ├── dilation.py │ ├── rotation.py │ ├── filter.py │ ├── thresholding.py │ ├── translation.py │ ├── sobel.py │ ├── contours.py │ └── contour.py └── images │ ├── image.jpg │ ├── filter_kernel.png │ ├── rotation_matrix.png │ ├── combine_canny_image.jpg │ ├── combine_crop_image.jpg │ ├── combine_filter_image.jpg │ ├── combine_gray_images.jpg │ ├── combine_resize_image.jpg │ ├── combine_rotate_image.jpg │ ├── combine_sobelx_image.jpg │ ├── combine_sobely_image.jpg │ ├── translation_matrix.png │ ├── combine_contour_image.jpg │ ├── combine_dilation_image.jpg │ ├── combine_erosion_image.jpg │ ├── combine_median_blur_image.jpg │ ├── combine_translation_image.jpg │ ├── combine_gaussian_blur_image.jpg │ └── combine_thresholding_image.jpg ├── Chapter01 ├── codes │ ├── pillow_read_image.py │ ├── image.png │ ├── new_image.png │ ├── pillow_write_image.py │ ├── skimage_read_image.py │ ├── skimage_write_image.py │ ├── pillow_rotate_image.py │ ├── pillow_resize_image.py │ ├── pillow_crop_image.py │ ├── pillow_image_enhance.py │ ├── pillow_rgb_gray.py │ ├── skimage_data.py │ ├── skimage_rgb_gray.py │ ├── skimage_rgb_hsv.py │ ├── skimage_draw_image.py │ ├── skimage_draw_eclipse_image.py │ ├── pillow_image_contrast.py │ └── skimage_draw_polygon_image.py └── images │ ├── cmyk.jpg │ ├── hsv.png │ ├── maths.png │ ├── tumor.png │ ├── camera.png │ ├── circle.png │ ├── ellipse.png │ ├── polygon.png │ ├── chapter1.docx │ ├── lena_blue.png │ ├── lena_green.png │ ├── lena_page1.png │ ├── lena_red.png │ ├── chapter1_up.docx │ ├── lena_bright.png │ ├── lena_cropped.png │ ├── lena_general.png │ ├── lena_contrast.png │ ├── lena_rotated_90.png │ ├── lena_black_white.png │ └── lena_matrix_page1.png ├── Chapter08 ├── gray.png ├── color_track.jpg ├── Screen Shot 2017-06-03 at 11.06.48 PM.png └── codes │ ├── gray.py │ ├── farneback.py │ └── lk_opticalflow.py ├── Chapter03 ├── code │ ├── test.png │ ├── harris.py │ ├── orb.py │ ├── lbp.py │ └── stitch.py └── images │ ├── ORB1.png │ ├── ORB2.png │ ├── ORB3.png │ ├── ORB4.png │ ├── ORB5.png │ ├── ORB6.png │ ├── ORB7.png │ ├── ORB8.png │ ├── output.jpg │ ├── DoG SIFT.png │ ├── ORB Result.png │ ├── output_old.jpg │ ├── LBP Features.png │ ├── goldengate1.png │ ├── goldengate2.png │ ├── Haar Cascades.png │ ├── Harris Corner.png │ ├── harris_output.png │ ├── panorama example.png │ ├── Harris Corner Equation.png │ └── Keypoint Localisation.png ├── Chapter02 ├── codes │ ├── image.jpg │ ├── image.png │ ├── input_save.jpg │ ├── thresh_out.jpg │ ├── thresh_out.png │ ├── adaptive_thresh.jpg │ ├── erosion.py │ ├── gaussain_blur_pillow.py │ ├── gaussian_filter_scikit.py │ ├── Sobel_scikit.py │ ├── canny_edge_scikit.py │ ├── create_filter.py │ ├── gaussian_distribution.py │ ├── harris.py │ ├── hough_lines.py │ └── thresholding.py └── images │ ├── image.jpg │ ├── black_box.png │ ├── gaussian_1.png │ ├── convolution.jpg │ ├── harriswindow.jpg │ ├── sobel_kernel.png │ ├── convolution_ex2.png │ ├── derivative_eq.png │ ├── dilation_image.png │ ├── dilation_matrix.png │ ├── equation_circle.png │ ├── erosion_image.png │ ├── erosion_matrix.png │ ├── line_equation.png │ ├── line_equation1.png │ ├── line_equation2.png │ ├── gaussian-function.jpg │ ├── gradient_formula.gif │ ├── gradient_formula.png │ ├── image_derivative.jpg │ ├── non_maximal_supression.png │ ├── combine_images_canny_edge.jpg │ ├── combine_images_sobel_edge.jpg │ ├── derivative_mask_example.png │ ├── combine_images_createfilter.jpg │ ├── combine_images_gaussian_blur_pillow.jpg │ └── combine_images_gaussian_filter_scikit.jpg ├── Chapter05 ├── codes │ ├── digit.png │ ├── mnist_pca.png │ ├── SVM.py │ ├── pca.py │ ├── KMeans.py │ ├── LR.py │ ├── LR_user_input.py │ ├── tsne.py │ └── plot_kmeans_digits.py └── Images │ ├── digit_6.png │ ├── PCA output.png │ ├── baseball.jpg │ ├── TSNE output.png │ ├── mnistdigits.gif │ ├── svm-variations.png │ └── separating-lines-svm.png ├── Chapter06 ├── images │ ├── pug.jpg │ ├── perceptron.png │ ├── NN Flow chart.png │ ├── neural network.png │ └── perceptron formula.png └── codes │ └── mlp.py ├── Chapter10 ├── codes │ ├── image.jpg │ ├── output.jpg │ ├── image_rot.jpg │ ├── combined out.png │ ├── sift_matches.jpg │ ├── gradient_formula.png │ ├── sift_keypoints.jpg │ ├── face_detection.py │ └── sift.py └── images │ ├── DoG.png │ ├── Keypoint.png │ ├── extremum.png │ ├── haar_demo.png │ ├── sift_dog.jpg │ ├── surf_lxy.png │ ├── SIFT Octave.png │ ├── integral sum.png │ ├── sift-octaves.jpg │ ├── Approx formula.png │ ├── Haar Features.png │ ├── efficient sum.png │ ├── SURF Box filters.png │ ├── SURF orientation.png │ ├── Subpixel formula.png │ ├── haar_features_new.jpg │ ├── sift_orieintation.png │ ├── surf_orientation.jpg │ ├── Descriptor histogram.png │ ├── haar_combined_demo.png │ ├── orientation formula.png │ └── sift_local_extrema.jpg ├── Chapter04 ├── images │ ├── contours.png │ ├── original.png │ ├── segmented.png │ ├── watershed.png │ ├── superpixels.png │ ├── contours example.png │ ├── watershed_output.png │ ├── segmentation example.png │ ├── watershed_astronaut.png │ └── graph_cut_illustration.png └── CODES │ ├── superpixels.py │ ├── contours.py │ ├── plot_ncut.py │ └── plot_marked_watershed.py ├── LICENSE └── README.md /Chapter09/bin/python: -------------------------------------------------------------------------------- 1 | python2.7 -------------------------------------------------------------------------------- /Chapter09/bin/python2: -------------------------------------------------------------------------------- 1 | python2.7 -------------------------------------------------------------------------------- /Chapter09/pip-selfcheck.json: -------------------------------------------------------------------------------- 1 | {"last_check":"2017-07-22T22:00:20Z","pypi_version":"9.0.1"} -------------------------------------------------------------------------------- /Chapter07/codes/read.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | cv2.imshow("image",img) -------------------------------------------------------------------------------- /Chapter07/codes/write.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | cv2.imwrite("saved_image.jpg", img) 4 | -------------------------------------------------------------------------------- /Chapter01/codes/pillow_read_image.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | img = Image.open('image.png') 4 | img.show() 5 | -------------------------------------------------------------------------------- /Chapter08/gray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter08/gray.png -------------------------------------------------------------------------------- /Chapter09/include/python2.7: -------------------------------------------------------------------------------- 1 | /usr/local/Cellar/python/2.7.13/Frameworks/Python.framework/Versions/2.7/include/python2.7 -------------------------------------------------------------------------------- /Chapter03/code/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/code/test.png -------------------------------------------------------------------------------- /Chapter09/bin/python2.7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter09/bin/python2.7 -------------------------------------------------------------------------------- /Chapter01/codes/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/codes/image.png -------------------------------------------------------------------------------- /Chapter01/images/cmyk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/cmyk.jpg -------------------------------------------------------------------------------- /Chapter01/images/hsv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/hsv.png -------------------------------------------------------------------------------- /Chapter01/images/maths.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/maths.png -------------------------------------------------------------------------------- /Chapter01/images/tumor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/tumor.png -------------------------------------------------------------------------------- /Chapter02/codes/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/codes/image.jpg -------------------------------------------------------------------------------- /Chapter02/codes/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/codes/image.png -------------------------------------------------------------------------------- /Chapter02/images/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/image.jpg -------------------------------------------------------------------------------- /Chapter03/images/ORB1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB1.png -------------------------------------------------------------------------------- /Chapter03/images/ORB2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB2.png -------------------------------------------------------------------------------- /Chapter03/images/ORB3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB3.png -------------------------------------------------------------------------------- /Chapter03/images/ORB4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB4.png -------------------------------------------------------------------------------- /Chapter03/images/ORB5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB5.png -------------------------------------------------------------------------------- /Chapter03/images/ORB6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB6.png -------------------------------------------------------------------------------- /Chapter03/images/ORB7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB7.png -------------------------------------------------------------------------------- /Chapter03/images/ORB8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB8.png -------------------------------------------------------------------------------- /Chapter05/codes/digit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/codes/digit.png -------------------------------------------------------------------------------- /Chapter06/images/pug.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter06/images/pug.jpg -------------------------------------------------------------------------------- /Chapter07/images/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/image.jpg -------------------------------------------------------------------------------- /Chapter08/color_track.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter08/color_track.jpg -------------------------------------------------------------------------------- /Chapter09/images/row 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter09/images/row 1.png -------------------------------------------------------------------------------- /Chapter09/temp_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter09/temp_image.jpg -------------------------------------------------------------------------------- /Chapter10/codes/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/codes/image.jpg -------------------------------------------------------------------------------- /Chapter10/codes/output.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/codes/output.jpg -------------------------------------------------------------------------------- /Chapter10/images/DoG.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/DoG.png -------------------------------------------------------------------------------- /Chapter01/images/camera.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/camera.png -------------------------------------------------------------------------------- /Chapter01/images/circle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/circle.png -------------------------------------------------------------------------------- /Chapter01/images/ellipse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/ellipse.png -------------------------------------------------------------------------------- /Chapter01/images/polygon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/polygon.png -------------------------------------------------------------------------------- /Chapter03/images/output.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/output.jpg -------------------------------------------------------------------------------- /Chapter05/Images/digit_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/Images/digit_6.png -------------------------------------------------------------------------------- /Chapter01/codes/new_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/codes/new_image.png -------------------------------------------------------------------------------- /Chapter01/images/chapter1.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/chapter1.docx -------------------------------------------------------------------------------- /Chapter01/images/lena_blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_blue.png -------------------------------------------------------------------------------- /Chapter01/images/lena_green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_green.png -------------------------------------------------------------------------------- /Chapter01/images/lena_page1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_page1.png -------------------------------------------------------------------------------- /Chapter01/images/lena_red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_red.png -------------------------------------------------------------------------------- /Chapter02/codes/input_save.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/codes/input_save.jpg -------------------------------------------------------------------------------- /Chapter02/codes/thresh_out.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/codes/thresh_out.jpg -------------------------------------------------------------------------------- /Chapter02/codes/thresh_out.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/codes/thresh_out.png -------------------------------------------------------------------------------- /Chapter02/images/black_box.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/black_box.png -------------------------------------------------------------------------------- /Chapter02/images/gaussian_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/gaussian_1.png -------------------------------------------------------------------------------- /Chapter03/images/DoG SIFT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/DoG SIFT.png -------------------------------------------------------------------------------- /Chapter03/images/ORB Result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/ORB Result.png -------------------------------------------------------------------------------- /Chapter03/images/output_old.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/output_old.jpg -------------------------------------------------------------------------------- /Chapter04/images/contours.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/contours.png -------------------------------------------------------------------------------- /Chapter04/images/original.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/original.png -------------------------------------------------------------------------------- /Chapter04/images/segmented.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/segmented.png -------------------------------------------------------------------------------- /Chapter04/images/watershed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/watershed.png -------------------------------------------------------------------------------- /Chapter05/Images/PCA output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/Images/PCA output.png -------------------------------------------------------------------------------- /Chapter05/Images/baseball.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/Images/baseball.jpg -------------------------------------------------------------------------------- /Chapter05/codes/mnist_pca.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/codes/mnist_pca.png -------------------------------------------------------------------------------- /Chapter06/images/perceptron.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter06/images/perceptron.png -------------------------------------------------------------------------------- /Chapter09/images/CVaaS Page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter09/images/CVaaS Page.png -------------------------------------------------------------------------------- /Chapter10/codes/image_rot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/codes/image_rot.jpg -------------------------------------------------------------------------------- /Chapter10/images/Keypoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/Keypoint.png -------------------------------------------------------------------------------- /Chapter10/images/extremum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/extremum.png -------------------------------------------------------------------------------- /Chapter10/images/haar_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/haar_demo.png -------------------------------------------------------------------------------- /Chapter10/images/sift_dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/sift_dog.jpg -------------------------------------------------------------------------------- /Chapter10/images/surf_lxy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/surf_lxy.png -------------------------------------------------------------------------------- /Chapter01/images/chapter1_up.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/chapter1_up.docx -------------------------------------------------------------------------------- /Chapter01/images/lena_bright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_bright.png -------------------------------------------------------------------------------- /Chapter01/images/lena_cropped.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_cropped.png -------------------------------------------------------------------------------- /Chapter01/images/lena_general.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_general.png -------------------------------------------------------------------------------- /Chapter02/images/convolution.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/convolution.jpg -------------------------------------------------------------------------------- /Chapter02/images/harriswindow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/harriswindow.jpg -------------------------------------------------------------------------------- /Chapter02/images/sobel_kernel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/sobel_kernel.png -------------------------------------------------------------------------------- /Chapter03/images/LBP Features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/LBP Features.png -------------------------------------------------------------------------------- /Chapter03/images/goldengate1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/goldengate1.png -------------------------------------------------------------------------------- /Chapter03/images/goldengate2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/goldengate2.png -------------------------------------------------------------------------------- /Chapter04/images/superpixels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/superpixels.png -------------------------------------------------------------------------------- /Chapter05/Images/TSNE output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/Images/TSNE output.png -------------------------------------------------------------------------------- /Chapter05/Images/mnistdigits.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/Images/mnistdigits.gif -------------------------------------------------------------------------------- /Chapter10/codes/combined out.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/codes/combined out.png -------------------------------------------------------------------------------- /Chapter10/codes/sift_matches.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/codes/sift_matches.jpg -------------------------------------------------------------------------------- /Chapter10/images/SIFT Octave.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/SIFT Octave.png -------------------------------------------------------------------------------- /Chapter10/images/integral sum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/integral sum.png -------------------------------------------------------------------------------- /Chapter10/images/sift-octaves.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/sift-octaves.jpg -------------------------------------------------------------------------------- /Chapter01/images/lena_contrast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_contrast.png -------------------------------------------------------------------------------- /Chapter01/images/lena_rotated_90.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_rotated_90.png -------------------------------------------------------------------------------- /Chapter02/codes/adaptive_thresh.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/codes/adaptive_thresh.jpg -------------------------------------------------------------------------------- /Chapter02/images/convolution_ex2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/convolution_ex2.png -------------------------------------------------------------------------------- /Chapter02/images/derivative_eq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/derivative_eq.png -------------------------------------------------------------------------------- /Chapter02/images/dilation_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/dilation_image.png -------------------------------------------------------------------------------- /Chapter02/images/dilation_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/dilation_matrix.png -------------------------------------------------------------------------------- /Chapter02/images/equation_circle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/equation_circle.png -------------------------------------------------------------------------------- /Chapter02/images/erosion_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/erosion_image.png -------------------------------------------------------------------------------- /Chapter02/images/erosion_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/erosion_matrix.png -------------------------------------------------------------------------------- /Chapter02/images/line_equation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/line_equation.png -------------------------------------------------------------------------------- /Chapter02/images/line_equation1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/line_equation1.png -------------------------------------------------------------------------------- /Chapter02/images/line_equation2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/line_equation2.png -------------------------------------------------------------------------------- /Chapter03/images/Haar Cascades.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/Haar Cascades.png -------------------------------------------------------------------------------- /Chapter03/images/Harris Corner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/Harris Corner.png -------------------------------------------------------------------------------- /Chapter03/images/harris_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/harris_output.png -------------------------------------------------------------------------------- /Chapter05/Images/svm-variations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/Images/svm-variations.png -------------------------------------------------------------------------------- /Chapter06/images/NN Flow chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter06/images/NN Flow chart.png -------------------------------------------------------------------------------- /Chapter06/images/neural network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter06/images/neural network.png -------------------------------------------------------------------------------- /Chapter07/images/filter_kernel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/filter_kernel.png -------------------------------------------------------------------------------- /Chapter07/images/rotation_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/rotation_matrix.png -------------------------------------------------------------------------------- /Chapter09/images/CVaaS Overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter09/images/CVaaS Overview.png -------------------------------------------------------------------------------- /Chapter09/images/Canny Example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter09/images/Canny Example.png -------------------------------------------------------------------------------- /Chapter10/codes/gradient_formula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/codes/gradient_formula.png -------------------------------------------------------------------------------- /Chapter10/codes/sift_keypoints.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/codes/sift_keypoints.jpg -------------------------------------------------------------------------------- /Chapter10/images/Approx formula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/Approx formula.png -------------------------------------------------------------------------------- /Chapter10/images/Haar Features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/Haar Features.png -------------------------------------------------------------------------------- /Chapter10/images/efficient sum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/efficient sum.png -------------------------------------------------------------------------------- /Chapter01/images/lena_black_white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_black_white.png -------------------------------------------------------------------------------- /Chapter01/images/lena_matrix_page1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter01/images/lena_matrix_page1.png -------------------------------------------------------------------------------- /Chapter02/images/gaussian-function.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/gaussian-function.jpg -------------------------------------------------------------------------------- /Chapter02/images/gradient_formula.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/gradient_formula.gif -------------------------------------------------------------------------------- /Chapter02/images/gradient_formula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/gradient_formula.png -------------------------------------------------------------------------------- /Chapter02/images/image_derivative.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/image_derivative.jpg -------------------------------------------------------------------------------- /Chapter03/images/panorama example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/panorama example.png -------------------------------------------------------------------------------- /Chapter04/images/contours example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/contours example.png -------------------------------------------------------------------------------- /Chapter04/images/watershed_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/watershed_output.png -------------------------------------------------------------------------------- /Chapter10/images/SURF Box filters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/SURF Box filters.png -------------------------------------------------------------------------------- /Chapter10/images/SURF orientation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/SURF orientation.png -------------------------------------------------------------------------------- /Chapter10/images/Subpixel formula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/Subpixel formula.png -------------------------------------------------------------------------------- /Chapter10/images/haar_features_new.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/haar_features_new.jpg -------------------------------------------------------------------------------- /Chapter10/images/sift_orieintation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/sift_orieintation.png -------------------------------------------------------------------------------- /Chapter10/images/surf_orientation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/surf_orientation.jpg -------------------------------------------------------------------------------- /Chapter04/images/segmentation example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/segmentation example.png -------------------------------------------------------------------------------- /Chapter04/images/watershed_astronaut.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/watershed_astronaut.png -------------------------------------------------------------------------------- /Chapter05/Images/separating-lines-svm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter05/Images/separating-lines-svm.png -------------------------------------------------------------------------------- /Chapter06/images/perceptron formula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter06/images/perceptron formula.png -------------------------------------------------------------------------------- /Chapter07/images/combine_canny_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_canny_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_crop_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_crop_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_filter_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_filter_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_gray_images.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_gray_images.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_resize_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_resize_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_rotate_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_rotate_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_sobelx_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_sobelx_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_sobely_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_sobely_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/translation_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/translation_matrix.png -------------------------------------------------------------------------------- /Chapter10/images/Descriptor histogram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/Descriptor histogram.png -------------------------------------------------------------------------------- /Chapter10/images/haar_combined_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/haar_combined_demo.png -------------------------------------------------------------------------------- /Chapter10/images/orientation formula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/orientation formula.png -------------------------------------------------------------------------------- /Chapter10/images/sift_local_extrema.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter10/images/sift_local_extrema.jpg -------------------------------------------------------------------------------- /Chapter02/images/non_maximal_supression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/non_maximal_supression.png -------------------------------------------------------------------------------- /Chapter03/images/Harris Corner Equation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/Harris Corner Equation.png -------------------------------------------------------------------------------- /Chapter03/images/Keypoint Localisation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter03/images/Keypoint Localisation.png -------------------------------------------------------------------------------- /Chapter04/images/graph_cut_illustration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter04/images/graph_cut_illustration.png -------------------------------------------------------------------------------- /Chapter07/images/combine_contour_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_contour_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_dilation_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_dilation_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_erosion_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_erosion_image.jpg -------------------------------------------------------------------------------- /Chapter02/images/combine_images_canny_edge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/combine_images_canny_edge.jpg -------------------------------------------------------------------------------- /Chapter02/images/combine_images_sobel_edge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/combine_images_sobel_edge.jpg -------------------------------------------------------------------------------- /Chapter02/images/derivative_mask_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/derivative_mask_example.png -------------------------------------------------------------------------------- /Chapter07/images/combine_median_blur_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_median_blur_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_translation_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_translation_image.jpg -------------------------------------------------------------------------------- /Chapter02/images/combine_images_createfilter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/combine_images_createfilter.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_gaussian_blur_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_gaussian_blur_image.jpg -------------------------------------------------------------------------------- /Chapter07/images/combine_thresholding_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter07/images/combine_thresholding_image.jpg -------------------------------------------------------------------------------- /Chapter07/codes/crop.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | img_crop = img[0:200, 150:350] 4 | cv2.imwrite("crop_img.jpg", img_crop) 5 | cv2.imshow("crop", img_crop) 6 | -------------------------------------------------------------------------------- /Chapter08/Screen Shot 2017-06-03 at 11.06.48 PM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter08/Screen Shot 2017-06-03 at 11.06.48 PM.png -------------------------------------------------------------------------------- /Chapter02/images/combine_images_gaussian_blur_pillow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/combine_images_gaussian_blur_pillow.jpg -------------------------------------------------------------------------------- /Chapter07/codes/gray.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 4 | cv2.imwrite("gray_image.jpg", gray) 5 | cv2.imshow("image",gray) 6 | -------------------------------------------------------------------------------- /Chapter02/images/combine_images_gaussian_filter_scikit.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Computer-Vision-with-Python-3/HEAD/Chapter02/images/combine_images_gaussian_filter_scikit.jpg -------------------------------------------------------------------------------- /Chapter01/codes/pillow_write_image.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | #Open any image 4 | img = Image.open('image.png') 5 | #Save the image with a different name 6 | img.save('new_image.png') 7 | 8 | -------------------------------------------------------------------------------- /Chapter07/codes/median_blur.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | new_img = cv2.medianBlur(img,5) 4 | cv2.imwrite("median_blur.jpg", new_img) 5 | cv2.imshow("median_blur", new_img) 6 | -------------------------------------------------------------------------------- /Chapter02/codes/erosion.py: -------------------------------------------------------------------------------- 1 | from skimage import morphology 2 | from skimage import io 3 | 4 | img = io.imread('image.png') 5 | eroded_img = morphology.erode(img) 6 | 7 | io.imshow(eroded_img) 8 | io.show() 9 | -------------------------------------------------------------------------------- /Chapter07/codes/gaussian.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | new_img = cv2.GaussianBlur(img,(5,5),0) 4 | cv2.imwrite("gaussian_blur.jpg", new_img) 5 | cv2.imshow("gaussian_blur.jpg", new_img) 6 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_read_image.py: -------------------------------------------------------------------------------- 1 | #Program to read an image using skimage 2 | 3 | from skimage import io 4 | 5 | #Read image 6 | img = io.imread('image.png') 7 | io.imshow(img) 8 | io.show() 9 | 10 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_write_image.py: -------------------------------------------------------------------------------- 1 | #Program to write an image 2 | 3 | from skimage import io 4 | 5 | #Read an image 6 | img = io.imread('image.png') 7 | 8 | #Write an image to a file 9 | io.imsave('new_image.png', img) 10 | -------------------------------------------------------------------------------- /Chapter02/codes/gaussain_blur_pillow.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from PIL import ImageFilter 3 | img = Image.open("image.jpg") 4 | blur_img = img.filter(ImageFilter.GaussianBlur(5)) 5 | blur_img.save("GaussianBlur.jpg") 6 | blur_img.show() -------------------------------------------------------------------------------- /Chapter07/codes/canny.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 4 | edges = cv2.Canny(gray, 100, 200, 3) 5 | cv2.imwrite("canny_edges.jpg", edges) 6 | cv2.imshow("canny_edges", edges) 7 | -------------------------------------------------------------------------------- /Chapter07/codes/resize.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | r,c = img.shape[:2] 4 | new_img = cv2.resize(img, (2*r,2*c), interpolation = cv2.INTER_CUBIC) 5 | cv2.imwrite("resize_image.jpg", new_img) 6 | cv2.imshow("resize", new_img) -------------------------------------------------------------------------------- /Chapter02/codes/gaussian_filter_scikit.py: -------------------------------------------------------------------------------- 1 | from skimage import io 2 | from skimage import filters 3 | img = io.imread("image.jpg") 4 | out = filters.gaussian(img, sigma=5) 5 | io.imsave("gaussian_filter_scikit.jpg", out) 6 | io.imshow(out) 7 | io.show() -------------------------------------------------------------------------------- /Chapter07/codes/erosion.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | img = cv2.imread("thresholding.jpg") 4 | ker = np.ones((5,5),np.uint8) 5 | new_img = cv2.erode(img,ker,iterations = 1) 6 | cv2.imwrite("erosion.jpg", new_img) 7 | cv2.imshow("erosion", new_img) 8 | -------------------------------------------------------------------------------- /Chapter07/codes/dilation.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | img = cv2.imread("thresholding.jpg") 4 | ker = np.ones((5,5),np.uint8) 5 | new_img = cv2.dilate(img,ker,iterations = 1) 6 | cv2.imwrite("dilation.jpg", new_img) 7 | cv2.imshow("dilation", new_img) 8 | -------------------------------------------------------------------------------- /Chapter07/codes/rotation.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | r,c = img.shape[:2] 4 | M = cv2.getRotationMatrix2D((c/2,r/2),90,1) 5 | new_img = cv2.warpAffine(img,M,(c,r)) 6 | cv2.imwrite("rotate_img.jpg", new_img) 7 | cv2.imshow("rotate", new_img) 8 | -------------------------------------------------------------------------------- /Chapter02/codes/Sobel_scikit.py: -------------------------------------------------------------------------------- 1 | from skimage import io 2 | from skimage import filters 3 | from skimage import color 4 | img = io.imread("image.jpg") 5 | img = color.rgb2gray(img) 6 | edge = filters.sobel(img) 7 | io.imshow(edge) 8 | io.imsave("sobel_edge.jpg", edge) 9 | io.show() -------------------------------------------------------------------------------- /Chapter07/codes/filter.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | img = cv2.imread("image.jpg") 4 | ker = np.array([[1, 1, 1], 5 | [1, 1, 1], 6 | [1, 1, 1]]) 7 | new_img = cv2.filter2D(img,-1,ker) 8 | cv2.imwrite("filter.jpg", new_img) 9 | cv2.imshow("filter", new_img) 10 | -------------------------------------------------------------------------------- /Chapter07/codes/thresholding.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 4 | new_img = cv2.threshold(gray,120,255,cv2.THRESH_BINARY) 5 | cv2.imwrite("thresholding.jpg", new_img[1]) 6 | cv2.imshow("thresholding", new_img[1]) 7 | -------------------------------------------------------------------------------- /Chapter02/codes/canny_edge_scikit.py: -------------------------------------------------------------------------------- 1 | from skimage import io 2 | from skimage import feature 3 | from skimage import color 4 | img = io.imread("image.jpg") 5 | img = color.rgb2gray(img) 6 | edge = feature.canny(img,3) 7 | io.imshow(edge) 8 | io.imsave("canny_edge.jpg", edge) 9 | io.show() -------------------------------------------------------------------------------- /Chapter02/codes/create_filter.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from PIL import ImageFilter 3 | img = Image.open("image.jpg") 4 | img = img.convert("L") 5 | new_img = img.filter(ImageFilter.Kernel((3,3),[1,0,-1,5,0,-5,1,0,1])) 6 | new_img.save("create_filter_image.jpg") 7 | new_img.show() 8 | -------------------------------------------------------------------------------- /Chapter01/codes/pillow_rotate_image.py: -------------------------------------------------------------------------------- 1 | #Program to rotate an image 2 | 3 | from PIL import Image 4 | 5 | #Read the image 6 | img = Image.read('image.png') 7 | 8 | #Rotate the image by 90 degress anti-clockwise 9 | rotated_img = img.rotate(90) 10 | 11 | rotated_img.show() 12 | -------------------------------------------------------------------------------- /Chapter02/codes/gaussian_distribution.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import matplotlib.mlab as mlab 4 | import math 5 | 6 | mu = 0 7 | sigma = 0.1 8 | x = np.linspace(-3, 3, 100) 9 | plt.plot(x,mlab.normpdf(x, mu, sigma)) 10 | 11 | plt.show() 12 | -------------------------------------------------------------------------------- /Chapter01/codes/pillow_resize_image.py: -------------------------------------------------------------------------------- 1 | #Program to resize a given image 2 | 3 | from PIL import Image 4 | 5 | #Read the image that you want to resize 6 | img = Image.open('image.png') 7 | 8 | #Resize the image 9 | resize_img = img.resize((200, 200)) 10 | 11 | resize_img.show() 12 | -------------------------------------------------------------------------------- /Chapter07/codes/translation.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | img = cv2.imread("image.jpg") 4 | r,c = img.shape[:2] 5 | M = np.float32([[1,0,100],[0,1,100]]) 6 | new_img = cv2.warpAffine(img,M,(c,r)) 7 | cv2.imwrite("translation.jpg", new_img) 8 | cv2.imshow("translation", new_img) 9 | -------------------------------------------------------------------------------- /Chapter01/codes/pillow_crop_image.py: -------------------------------------------------------------------------------- 1 | #Program to crop an image 2 | 3 | from PIL import Image 4 | 5 | #Read the image that you want to crop 6 | img = Image.open('image.png') 7 | 8 | #Create a dimension tuple 9 | dim = (100, 100, 400, 400) 10 | crop_img = img.crop(dim) 11 | 12 | crop_img.show() 13 | -------------------------------------------------------------------------------- /Chapter01/codes/pillow_image_enhance.py: -------------------------------------------------------------------------------- 1 | #Program to enhance an image 2 | 3 | from PIL import Image 4 | from PIL import ImageEnhance 5 | 6 | #Read an image 7 | img = Image.open('image.png') 8 | 9 | enchancer = ImageEnhance.Brightness(img) 10 | bright_img = enhancer.enhance(2) 11 | 12 | bright_img.show() 13 | -------------------------------------------------------------------------------- /Chapter01/codes/pillow_rgb_gray.py: -------------------------------------------------------------------------------- 1 | #Program to convert RGB images to Gray scale images 2 | 3 | from PIL import Image 4 | 5 | #Read the image that you wnat to convert 6 | img = Image.open('image.png') 7 | 8 | #Convert the image to grayscale 9 | gray_image = img.convert("L") 10 | 11 | gray_image.show() 12 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_data.py: -------------------------------------------------------------------------------- 1 | #Program to get default images in scikit-image 2 | 3 | from skimage import data 4 | from skimage import io 5 | 6 | #Get the camera image 7 | img_camera = data.camera() 8 | 9 | #Get an image with handwritten text 10 | img_text = data.text() 11 | 12 | io.show(img_text) 13 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_rgb_gray.py: -------------------------------------------------------------------------------- 1 | #Program to convert a color image to grayscale image 2 | 3 | from skimage import color, io 4 | 5 | #Read an image from a file 6 | img = io.imread('image.png') 7 | 8 | #Convert the image tp grayscale 9 | gray_image = color.rgb2gray(img) 10 | 11 | io.imshow(gray_image) 12 | io.show() 13 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_rgb_hsv.py: -------------------------------------------------------------------------------- 1 | #Program to convert a color image to grayscale image 2 | 3 | from skimage import color, io 4 | 5 | #Read an image from a file 6 | img = io.imread('image.png') 7 | 8 | #Convert the image tp grayscale 9 | hsv_image = color.rgb2hsv(img) 10 | 11 | io.imshow(hsv_image) 12 | io.show() 13 | -------------------------------------------------------------------------------- /Chapter09/bin/pip: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python2.7 2 | 3 | # -*- coding: utf-8 -*- 4 | import re 5 | import sys 6 | 7 | from pip import main 8 | 9 | if __name__ == '__main__': 10 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 11 | sys.exit(main()) 12 | -------------------------------------------------------------------------------- /Chapter09/bin/pip2: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python2.7 2 | 3 | # -*- coding: utf-8 -*- 4 | import re 5 | import sys 6 | 7 | from pip import main 8 | 9 | if __name__ == '__main__': 10 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 11 | sys.exit(main()) 12 | -------------------------------------------------------------------------------- /Chapter09/bin/pip2.7: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python2.7 2 | 3 | # -*- coding: utf-8 -*- 4 | import re 5 | import sys 6 | 7 | from pip import main 8 | 9 | if __name__ == '__main__': 10 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 11 | sys.exit(main()) 12 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_draw_image.py: -------------------------------------------------------------------------------- 1 | #Program to draw a circle on a black image 2 | 3 | import numpy as np 4 | from skimage import io, draw 5 | 6 | #Read an image 7 | img = np.zeros((100, 100), dtype=np.uint8) 8 | 9 | #Draw a circle 10 | x, y = draw.circle(50, 50, 10) 11 | img[x, y] = 1 12 | 13 | io.imshow(img) 14 | io.show() 15 | -------------------------------------------------------------------------------- /Chapter09/bin/flask: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python2.7 2 | 3 | # -*- coding: utf-8 -*- 4 | import re 5 | import sys 6 | 7 | from flask.cli import main 8 | 9 | if __name__ == '__main__': 10 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 11 | sys.exit(main()) 12 | -------------------------------------------------------------------------------- /Chapter09/bin/wheel: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python2.7 2 | 3 | # -*- coding: utf-8 -*- 4 | import re 5 | import sys 6 | 7 | from wheel.tool import main 8 | 9 | if __name__ == '__main__': 10 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 11 | sys.exit(main()) 12 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_draw_eclipse_image.py: -------------------------------------------------------------------------------- 1 | #Program to draw an eclipse on a black image 2 | 3 | import numpy as np 4 | from skimage import io, draw 5 | 6 | #Read an image 7 | img = np.zeros((100, 100), dtype=np.uint8) 8 | 9 | #Draw a circle 10 | x, y = draw.ellipse(50, 50, 10, 20) 11 | img[x, y] = 1 12 | 13 | io.imshow(img) 14 | io.show() 15 | -------------------------------------------------------------------------------- /Chapter01/codes/pillow_image_contrast.py: -------------------------------------------------------------------------------- 1 | #Program to change the brightness 2 | 3 | from PIL import Image 4 | from PIL import ImageEnhancer 5 | 6 | #Read the image 7 | img = Image.open('image.png') 8 | 9 | #Change the constrast of the image 10 | enhancer = ImageEnhancer.Contrast(img) 11 | 12 | new_img = enhancer.enchance(2) 13 | 14 | new_img.show() 15 | -------------------------------------------------------------------------------- /Chapter09/bin/easy_install: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python2.7 2 | 3 | # -*- coding: utf-8 -*- 4 | import re 5 | import sys 6 | 7 | from setuptools.command.easy_install import main 8 | 9 | if __name__ == '__main__': 10 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 11 | sys.exit(main()) 12 | -------------------------------------------------------------------------------- /Chapter09/bin/easy_install-2.7: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python2.7 2 | 3 | # -*- coding: utf-8 -*- 4 | import re 5 | import sys 6 | 7 | from setuptools.command.easy_install import main 8 | 9 | if __name__ == '__main__': 10 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 11 | sys.exit(main()) 12 | -------------------------------------------------------------------------------- /Chapter07/codes/sobel.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread("image.jpg") 3 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 4 | x_edges = cv2.Sobel(gray,-1,1,0,ksize=5) 5 | cv2.imwrite("sobel_edges_x.jpg", x_edges) 6 | y_edges = cv2.Sobel(gray,-1,0,1,ksize=5) 7 | cv2.imwrite("sobel_edges_y.jpg", y_edges) 8 | cv2.imshow("xedges", x_edges) 9 | cv2.imshow("yedges", y_edges) 10 | -------------------------------------------------------------------------------- /Chapter07/codes/contours.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread('image.jpg') 3 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 4 | thresh_img = cv2.threshold(gray,127,255,0) 5 | im, contours, hierarchy = cv2.findContours(thresh_img[1],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 6 | cv2.drawContours(img, contours, -1, (255,0,0), 3) 7 | cv2.imwrite("contours.jpg", img) 8 | cv2.imshow("contours", img) -------------------------------------------------------------------------------- /Chapter07/codes/contour.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | img = cv2.imread('image.jpg') 3 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 4 | thresh_img = cv2.threshold(gray,127,255,0) 5 | im, contours, hierarchy = cv2.findContours(thresh_img[1],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 6 | cv2.drawContours(img, contours, -1, (255,0,0), 3) 7 | cv2.imwrite("contours.jpg", img) 8 | cv2.imshow("contours", img) 9 | -------------------------------------------------------------------------------- /Chapter01/codes/skimage_draw_polygon_image.py: -------------------------------------------------------------------------------- 1 | #Program to draw a polygon on an image 2 | 3 | import numpy as np 4 | from skimage import io, draw 5 | 6 | #Make an empty(blank) image 7 | img = np.zeros((100, 100), dtype=np.uint8) 8 | r = np.array([10, 25, 80, 50]) 9 | c = np.array([10, 60, 40, 10]) 10 | x, y = polygon(r, c) 11 | img[x, y] = 1 12 | 13 | 14 | io.imshow(img) 15 | io.show() 16 | -------------------------------------------------------------------------------- /Chapter08/codes/gray.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | cam = cv2.VideoCapture('/Users/salil/Downloads/VID_20170701_160759.mp4') 4 | 5 | while (cam.isOpened()): 6 | 7 | ret, frame = cam.read() 8 | frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) 9 | gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 10 | cv2.imshow('gray_frame',gray_frame) 11 | cv2.imshow('original_frame',frame) 12 | 13 | if cv2.waitKey(1) & 0xFF == ord('q'): 14 | break 15 | 16 | cam.release() 17 | cv2.destroyAllWindows() 18 | -------------------------------------------------------------------------------- /Chapter04/CODES/superpixels.py: -------------------------------------------------------------------------------- 1 | from skimage import segmentation, color 2 | from skimage.io import imread 3 | from skimage.future import graph 4 | from matplotlib import pyplot as plt 5 | 6 | img = imread('test.jpeg') 7 | 8 | img_segments = segmentation.slic(img, compactness=20, n_segments=500) 9 | superpixels = color.label2rgb(img_segments, img, kind='avg') 10 | 11 | fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(6, 8)) 12 | 13 | ax[0].imshow(img) 14 | ax[1].imshow(superpixels) 15 | 16 | for a in ax: 17 | a.axis('off') 18 | 19 | plt.tight_layout() 20 | plt.show() 21 | -------------------------------------------------------------------------------- /Chapter02/codes/harris.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | 3 | from skimage import data, io 4 | from skimage.feature import corner_harris, corner_subpix, corner_peaks 5 | 6 | img = io.imread('image.png') 7 | coords = corner_peaks(corner_harris(image), min_distance=5) 8 | coords_subpix = corner_subpix(image, coords, window_size=13) 9 | 10 | fig, ax = plt.subplots() 11 | ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray) 12 | ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3) 13 | ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15) 14 | ax.axis((0, 350, 350, 0)) 15 | plt.show() 16 | -------------------------------------------------------------------------------- /Chapter02/codes/hough_lines.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from skimage.transform import (hough_line, probabilistic_hough_line) 4 | from skimage.feature import canny 5 | 6 | #Read an image 7 | image = io.imread('image.png') 8 | 9 | #Apply your favorite edge detection algorithm. We use 'canny' for this example. 10 | edges = canny(image, 2, 1, 25) 11 | 12 | #Once you have the edges, run the hough transform over the image 13 | lines = hough_lines(image) 14 | probabilistic_lines = probabilistic_hough_line(edges, threshold=10, line_length=5, line_gap=3) 15 | 16 | #As an exercise you can compare the results of both the methods. 17 | -------------------------------------------------------------------------------- /Chapter10/codes/face_detection.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 5 | eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') 6 | 7 | img = cv2.imread('image.jpg') 8 | img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 9 | 10 | faces = face_cascade.detectMultiScale(img_gray, 1.3, 5) 11 | 12 | 13 | for (x,y,w,h) in faces: 14 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 15 | roi_gray = img_gray[y:y+h, x:x+w] 16 | roi_color = img[y:y+h, x:x+w] 17 | eyes = eye_cascade.detectMultiScale(roi_gray) 18 | for (ex,ey,ew,eh) in eyes: 19 | cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 20 | 21 | cv2.imwrite('output.jpg',img) 22 | -------------------------------------------------------------------------------- /Chapter04/CODES/contours.py: -------------------------------------------------------------------------------- 1 | from skimage import measure 2 | from skimage.io import imread 3 | from skimage.color import rgb2gray 4 | from skimage.filters import sobel 5 | import matplotlib.pyplot as plt 6 | 7 | #Read an image 8 | img = imread('contours.png') 9 | 10 | #Convert the image to grayscale 11 | img_gray = rgb2gray(img) 12 | 13 | #Find edges in the image 14 | img_edges = sobel(img_gray) 15 | 16 | #Find contours in the image 17 | contours = measure.find_contours(img_edges, 0.2) 18 | 19 | # Display the image and plot all contours found 20 | fig, ax = plt.subplots() 21 | ax.imshow(img_edges, interpolation='nearest', cmap=plt.cm.gray) 22 | 23 | for n, contour in enumerate(contours): 24 | ax.plot(contour[:, 1], contour[:, 0], linewidth=2) 25 | 26 | ax.axis('image') 27 | ax.set_xticks([]) 28 | ax.set_yticks([]) 29 | plt.show() 30 | -------------------------------------------------------------------------------- /Chapter02/codes/thresholding.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | from skimage import data 4 | from skimage.filters import threshold_otsu, threshold_local 5 | from skimage.io import imread 6 | from skimage.color import rgb2gray 7 | 8 | image = imread('image.jpg') 9 | image = rgb2gray(image) 10 | 11 | global_thresh = threshold_otsu(image) 12 | binary_global = image > global_thresh 13 | 14 | block_size = 35 15 | binary_adaptive = threshold_local(image, block_size, offset=10) 16 | 17 | fig, axes = plt.subplots(nrows=3, figsize=(7, 8)) 18 | ax0, ax1, ax2 = axes 19 | plt.gray() 20 | 21 | ax0.imshow(image) 22 | ax0.set_title('Image') 23 | 24 | ax1.imshow(binary_global) 25 | ax1.set_title('Global thresholding') 26 | 27 | ax2.imshow(binary_adaptive) 28 | ax2.set_title('Adaptive thresholding') 29 | 30 | for ax in axes: 31 | ax.axis('off') 32 | 33 | plt.show() 34 | 35 | -------------------------------------------------------------------------------- /Chapter05/codes/SVM.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets, metrics, svm 2 | 3 | mnist = datasets.load_digits() 4 | 5 | images = mnist.images 6 | 7 | data_size = len(images) 8 | 9 | #Preprocessing images 10 | images = images.reshape(len(images), -1) 11 | labels = mnist.target 12 | 13 | #Initialize Logistic Regression 14 | SVM_classifier = svm.SVC(gamma=0.001) 15 | #Training the data on only 75% of the dataset. Rest of the 25% will be used in testing the Logistic Regression 16 | SVM_classifier.fit(images[:int((data_size / 4) * 3)], labels[:int((data_size / 4) * 3)]) 17 | 18 | #Testing the data 19 | predictions = SVM_classifier.predict(images[int((data_size / 4)):]) 20 | target = labels[int((data_size/4)):] 21 | 22 | #Print the performance report of the Logistic Regression model that we learnt 23 | print("Performance Report: \n %s \n" % (metrics.classification_report(target, predictions))) 24 | -------------------------------------------------------------------------------- /Chapter05/codes/pca.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from itertools import product 4 | from sklearn.decomposition import RandomizedPCA 5 | from sklearn.datasets import fetch_mldata 6 | from sklearn.utils import shuffle 7 | 8 | #use all digits 9 | mnist = fetch_mldata("MNIST original") 10 | X_train, y_train = mnist.data[:70000] / 255., mnist.target[:70000] 11 | 12 | X_train, y_train = shuffle(X_train, y_train) 13 | X_train, y_train = X_train[:5000], y_train[:5000] # lets subsample a bit for a first impression 14 | 15 | pca = RandomizedPCA(n_components=3) 16 | fig, plot = plt.subplots() 17 | fig.set_size_inches(50, 50) 18 | plt.prism() 19 | 20 | X_transformed = pca.fit_transform(X_train) 21 | plot.scatter(X_transformed[:, 0], X_transformed[:, 1], c=y_train) 22 | plot.set_xticks(()) 23 | plot.set_yticks(()) 24 | 25 | #plt.tight_layout() 26 | plt.savefig("mnist_pca.png") 27 | -------------------------------------------------------------------------------- /Chapter10/codes/sift.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import random 4 | 5 | image = cv2.imread('image.jpg') 6 | image_rot = cv2.imread('image_rot.jpg') 7 | gray= cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) 8 | gray_rot = cv2.cvtColor(image_rot,cv2.COLOR_BGR2GRAY) 9 | 10 | surf = cv2.xfeatures2d.SURF_create() 11 | 12 | kp, desc = surf.detectAndCompute(gray,None) 13 | kp_rot, desc_rot = surf.detectAndCompute(gray_rot, None) 14 | 15 | # BFMatcher with default params 16 | bf = cv2.BFMatcher() 17 | matches = bf.knnMatch(desc,desc_rot, k=2) 18 | 19 | # Apply ratio test 20 | good = [] 21 | for m,n in matches: 22 | if m.distance < 0.4*n.distance: 23 | good.append([m]) 24 | random.shuffle(good) 25 | 26 | # cv2.drawMatchesKnn expects list of lists as matches. 27 | image_match = cv2.drawMatchesKnn(image,kp,image_rot,kp_rot,good[:10],flags=2, outImg=None) 28 | 29 | cv2.imwrite('surf_matches.jpg',image_match) 30 | -------------------------------------------------------------------------------- /Chapter05/codes/KMeans.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets, metrics 2 | from sklearn.cluster import KMeans 3 | 4 | mnist = datasets.load_digits() 5 | 6 | images = mnist.images 7 | 8 | data_size = len(images) 9 | 10 | #Preprocessing images 11 | images = images.reshape(len(images), -1) 12 | labels = mnist.target 13 | 14 | #Initialize Logistic Regression 15 | clustering = KMeans(n_clusters=10, init='k-means++', n_init=10) 16 | 17 | #Training the data on only 75% of the dataset. Rest of the 25% will be used in testing the KMeans Clustering 18 | clustering.fit(images[:int((data_size / 4) * 3)]) 19 | 20 | #Print the centers of the different clusters 21 | print(clustering.labels_) 22 | 23 | #Testing the data 24 | predictions = clustering.predict(images[int((data_size / 4)):]) 25 | target = labels[int((data_size/4)):] 26 | 27 | #Print the performance report of the Logistic Regression model that we learnt 28 | print("Performance Report: \n %s \n" % (metrics.classification_report(target, predictions))) 29 | -------------------------------------------------------------------------------- /Chapter03/code/harris.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | from skimage.io import imread 3 | from skimage.color import rgb2gray 4 | from skimage.feature import corner_harris, corner_subpix, corner_peaks 5 | 6 | #Read an image 7 | image = imread('test.png') 8 | image = rgb2gray(image) 9 | 10 | #Compute the Harris corners in the image. This returns a corner measure response for each pixel in the image 11 | corners = corner_harris(image) 12 | 13 | #Using the corner response image we calculate the actual corners in the image 14 | coords = corner_peaks(corners, min_distance=5) 15 | 16 | # This function decides if the corner point is an edge point or an isolated peak 17 | coords_subpix = corner_subpix(image, coords, window_size=13) 18 | 19 | fig, ax = plt.subplots() 20 | ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray) 21 | ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3) 22 | ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15) 23 | ax.axis((0, 350, 350, 0)) 24 | plt.show() 25 | -------------------------------------------------------------------------------- /Chapter05/codes/LR.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets, metrics 2 | from sklearn.linear_model import LogisticRegression 3 | from sklearn.preprocessing import StandardScaler 4 | 5 | mnist = datasets.load_digits() 6 | 7 | img_tuple = list(zip(mnist.images, mnist.target)) 8 | 9 | images = mnist.images 10 | 11 | data_size = len(images) 12 | 13 | #Preprocessing images 14 | images = images.reshape(len(images), -1) 15 | labels = mnist.target 16 | 17 | #Initialize Logistic Regression 18 | LR_classifier = LogisticRegression(C=0.01, penalty='l1', tol=0.01) 19 | #Training the data on only 75% of the dataset. Rest of the 25% will be used in testing the Logistic Regression 20 | LR_classifier.fit(images[:int((data_size / 4) * 3)], labels[:int((data_size / 4) * 3)]) 21 | 22 | #Testing the data 23 | predictions = LR_classifier.predict(images[int((data_size / 4)):]) 24 | target = labels[int((data_size/4)):] 25 | 26 | #Print the performance report of the Logistic Regression model that we learnt 27 | print("Performance Report: \n %s \n" % (metrics.classification_report(target, predictions))) 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Packt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Chapter06/codes/mlp.py: -------------------------------------------------------------------------------- 1 | from sklearn.datasets import fetch_mldata 2 | from sklearn.neural_network import MLPClassifier 3 | from sklearn.preprocessing import normalize 4 | from sklearn.model_selection import train_test_split 5 | 6 | #Get MNIST Dataset 7 | print('Getting MNIST Data...') 8 | mnist = fetch_mldata('MNIST original') 9 | print('MNIST Data downloaded!') 10 | 11 | images = mnist.data 12 | labels = mnist.target 13 | 14 | #Preprocess the images 15 | images = normalize(images, norm='l2') #You can use l1 norm too 16 | 17 | #Split the data into training set and test set 18 | images_train, images_test, labels_train, labels_test = train_test_split(images, labels, test_size=0.25, random_state=17) 19 | 20 | #Setup the neural network that we want to train on 21 | nn = MLPClassifier(hidden_layer_sizes=(200), max_iter=20, solver='sgd', learning_rate_init=0.001, verbose=True) 22 | 23 | #Start training the network 24 | print('NN Training started...') 25 | nn.fit(images_train, labels_train) 26 | print('NN Training completed!') 27 | 28 | #Evaluate the performance of the neural network on test data 29 | print('Network Performance: %f' % nn.score(images_test, labels_test)) 30 | -------------------------------------------------------------------------------- /Chapter09/Client/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 16 | 17 |
18 |
19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /Chapter09/bin/activate.csh: -------------------------------------------------------------------------------- 1 | # This file must be used with "source bin/activate.csh" *from csh*. 2 | # You cannot run it directly. 3 | # Created by Davide Di Blasi . 4 | 5 | alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' 6 | 7 | # Unset irrelevant variables. 8 | deactivate nondestructive 9 | 10 | setenv VIRTUAL_ENV "/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask" 11 | 12 | set _OLD_VIRTUAL_PATH="$PATH" 13 | setenv PATH "$VIRTUAL_ENV/bin:$PATH" 14 | 15 | 16 | 17 | if ("" != "") then 18 | set env_name = "" 19 | else 20 | set env_name = `basename "$VIRTUAL_ENV"` 21 | endif 22 | 23 | # Could be in a non-interactive environment, 24 | # in which case, $prompt is undefined and we wouldn't 25 | # care about the prompt anyway. 26 | if ( $?prompt ) then 27 | set _OLD_VIRTUAL_PROMPT="$prompt" 28 | set prompt = "[$env_name] $prompt" 29 | endif 30 | 31 | unset env_name 32 | 33 | alias pydoc python -m pydoc 34 | 35 | rehash 36 | 37 | -------------------------------------------------------------------------------- /Chapter04/CODES/plot_ncut.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============== 3 | Normalized Cut 4 | ============== 5 | 6 | This example constructs a Region Adjacency Graph (RAG) and recursively performs 7 | a Normalized Cut on it. 8 | 9 | References 10 | ---------- 11 | .. [1] Shi, J.; Malik, J., "Normalized cuts and image segmentation", 12 | Pattern Analysis and Machine Intelligence, 13 | IEEE Transactions on, vol. 22, no. 8, pp. 888-905, August 2000. 14 | """ 15 | 16 | from skimage import data, segmentation, color 17 | from skimage.io import imread 18 | from skimage import data 19 | from skimage.future import graph 20 | from matplotlib import pyplot as plt 21 | 22 | 23 | img = data.astronaut() 24 | 25 | img_segments = segmentation.slic(img, compactness=30, n_segments=200) 26 | out1 = color.label2rgb(img_segments, img, kind='avg') 27 | 28 | segment_graph = graph.rag_mean_color(img, img_segments, mode='similarity') 29 | img_cuts = graph.cut_normalized(img_segments, segment_graph) 30 | normalized_cut_segments = color.label2rgb(img_cuts, img, kind='avg') 31 | 32 | fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(6, 8)) 33 | 34 | ax[0].imshow(img) 35 | ax[1].imshow(normalized_cut_segments) 36 | 37 | for a in ax: 38 | a.axis('off') 39 | 40 | plt.tight_layout() 41 | plt.show() 42 | -------------------------------------------------------------------------------- /Chapter09/bin/activate_this.py: -------------------------------------------------------------------------------- 1 | """By using execfile(this_file, dict(__file__=this_file)) you will 2 | activate this virtualenv environment. 3 | 4 | This can be used when you must use an existing Python interpreter, not 5 | the virtualenv bin/python 6 | """ 7 | 8 | try: 9 | __file__ 10 | except NameError: 11 | raise AssertionError( 12 | "You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))") 13 | import sys 14 | import os 15 | 16 | old_os_path = os.environ.get('PATH', '') 17 | os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path 18 | base = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 19 | if sys.platform == 'win32': 20 | site_packages = os.path.join(base, 'Lib', 'site-packages') 21 | else: 22 | site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages') 23 | prev_sys_path = list(sys.path) 24 | import site 25 | site.addsitedir(site_packages) 26 | sys.real_prefix = sys.prefix 27 | sys.prefix = base 28 | # Move the added items to the front of the path: 29 | new_sys_path = [] 30 | for item in list(sys.path): 31 | if item not in prev_sys_path: 32 | new_sys_path.append(item) 33 | sys.path.remove(item) 34 | sys.path[:0] = new_sys_path 35 | -------------------------------------------------------------------------------- /Chapter05/codes/LR_user_input.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets, metrics 2 | from sklearn.linear_model import LogisticRegression 3 | from sklearn.preprocessing import StandardScaler 4 | from skimage import io, color, feature, transform 5 | 6 | mnist = datasets.load_digits() 7 | 8 | img_tuple = list(zip(mnist.images, mnist.target)) 9 | 10 | images = mnist.images 11 | 12 | data_size = len(images) 13 | 14 | #Preprocessing images 15 | images = images.reshape(len(images), -1) 16 | labels = mnist.target 17 | 18 | #Initialize Logistic Regression 19 | LR_classifier = LogisticRegression(C=0.01, penalty='l1', tol=0.01) 20 | #Training the data on only 75% of the dataset. Rest of the 25% will be used in testing the Logistic Regression 21 | LR_classifier.fit(images[:int((data_size / 4) * 3)], labels[:int((data_size / 4) * 3)]) 22 | 23 | #Load a custom image 24 | digit_img = io.imread('digit.png') 25 | #Convert image to grayscale 26 | digit_img = color.rgb2gray(digit_img) 27 | 28 | #Resize the image to 28x28 29 | digit_img = transform.resize(digit_img, (8, 8), mode="wrap") 30 | 31 | #Run edge detection on the image 32 | digit_edge = feature.canny(digit_img, sigma=5) 33 | 34 | io.imshow(digit_img) 35 | io.show() 36 | 37 | digit_edge = digit_edge.flatten() 38 | 39 | #Testing the data 40 | predictions = LR_classifier.predict(digit_edge) 41 | 42 | print(predictions) 43 | -------------------------------------------------------------------------------- /Chapter09/Server/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request 2 | from flask_cors import CORS, cross_origin 3 | import base64 4 | from PIL import Image 5 | from io import StringIO, BytesIO 6 | import cv2 7 | import numpy as np 8 | 9 | app = Flask('CVaaS') 10 | CORS(app) 11 | 12 | def read_image(image_data): 13 | image_data = base64.decodebytes(image_data) 14 | with open('temp_image.jpg', 'wb') as f: 15 | f.write(image_data) 16 | f.close() 17 | img = cv2.imread('temp_image.jpg') 18 | return img 19 | 20 | def encode_image(img): 21 | ret, data = cv2.imencode('.jpg', img) 22 | return base64.b64encode(data) 23 | 24 | # This is the server to handle requests and get images from client 25 | @app.route('/process_image', methods=['POST']) 26 | def process_image(): 27 | if not request.json or 'msg' not in request.json: 28 | return 'Server Error!', 500 29 | 30 | image_data = request.json['image_data'][23:].encode()#.strip('data:image/jpeg;base64,') 31 | img = read_image(image_data) 32 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 33 | image_data = encode_image(img) 34 | #img_file = open('') 35 | result = {'image_data': image_data, 'msg':'Operation Completed'} 36 | return image_data, 200 37 | 38 | @app.route('/') 39 | def index(): 40 | return 'Hello World' 41 | 42 | if __name__ == '__main__': 43 | app.run(debug=True) 44 | -------------------------------------------------------------------------------- /Chapter09/corners.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | from skimage.io import imread 3 | from skimage.color import rgb2gray 4 | from skimage.draw import circle 5 | from skimage.feature import corner_harris, corner_subpix, corner_peaks 6 | import math 7 | import numpy as np 8 | 9 | #Read an image 10 | image = imread('test.png') 11 | image = rgb2gray(image) 12 | 13 | #Compute the Harris corners in the image. This returns a corner measure response for each pixel in the image 14 | corners = corner_harris(image) 15 | 16 | #Using the corner response image we calculate the actual corners in the image 17 | coords = corner_peaks(corners, min_distance=5) 18 | 19 | # This function decides if the corner point is an edge point or an isolated peak 20 | coords_subpix = corner_subpix(image, coords, window_size=13) 21 | image_corner = np.copy(image) 22 | 23 | for corner in coords_subpix: 24 | if math.isnan(corner[0]) or math.isnan(corner[1]): 25 | continue 26 | corner = [int(x) for x in corner] 27 | rr, cc = circle(corner[0], corner[1], 5) 28 | image_corner[rr, cc] = 255 29 | 30 | print(image) 31 | image = image * 255 + image_corner 32 | 33 | fig, ax = plt.subplots() 34 | ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray) 35 | #ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3) 36 | #ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15) 37 | #ax.axis((0, 350, 350, 0)) 38 | plt.show() 39 | -------------------------------------------------------------------------------- /Chapter08/codes/farneback.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import time 4 | 5 | start_time = time.time() 6 | 7 | cap = cv2.VideoCapture("/Users/salil/Work/DeepMagic/Data/DM_Shopping_Data/dm170317_1a/ch03_20170317113600.mp4", cv2.CAP_FFMPEG) 8 | cap.set(cv2.CAP_PROP_POS_FRAMES, 1800) 9 | ret, frame1 = cap.read() 10 | frame1 =cv2.resize(frame1, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC) 11 | 12 | prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY) 13 | hsv = np.zeros_like(frame1) 14 | hsv[...,1] = 255 15 | 16 | count = 0 17 | 18 | while(1): 19 | print("Processing frame " + str(count) + " ...") 20 | count = count + 1 21 | if count % 3 == 0: 22 | continue 23 | ret, frame2 = cap.read() 24 | if ret == None: 25 | break 26 | 27 | frame2 = cv2.resize(frame2, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC) 28 | 29 | next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY) 30 | 31 | flow = cv2.calcOpticalFlowFarneback(prvs,next, None, pyr_scale=0.5, levels=3, winsize=15, iterations=3, poly_n=5, poly_sigma=1.2, flags=0) 32 | 33 | mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) 34 | hsv[...,0] = ang*180/np.pi/2 35 | hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) 36 | bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR) 37 | 38 | #cv2.imshow('motion',bgr) 39 | #cv2.imshow('video', frame2) 40 | 41 | k = cv2.waitKey(30) & 0xff 42 | if k == 27: 43 | break 44 | elif k == ord('s'): 45 | cv2.imwrite('opticalfb.png',frame2) 46 | cv2.imwrite('opticalhsv.png',bgr) 47 | prvs = next 48 | 49 | print("Run time: " % (time.time() - start_time)) 50 | cap.release() 51 | cv2.destroyAllWindows() 52 | -------------------------------------------------------------------------------- /Chapter09/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request 2 | from flask_cors import CORS, cross_origin 3 | import base64 4 | from PIL import Image 5 | from io import StringIO, BytesIO 6 | import cv2 7 | import numpy as np 8 | 9 | app = Flask('CVaaS') 10 | CORS(app) 11 | 12 | def cv_engine(img, operation): 13 | if operation == 'to_grayscale': 14 | return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 15 | elif operation == 'get_edge_canny': 16 | print('Canny') 17 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 18 | canny_edges = cv2.Canny(gray, 100, 200, 3) 19 | return canny_edges 20 | else: 21 | return None 22 | 23 | def read_image(image_data): 24 | image_data = base64.decodebytes(image_data) 25 | with open('temp_image.jpg', 'wb') as f: 26 | f.write(image_data) 27 | f.close() 28 | img = cv2.imread('temp_image.jpg') 29 | return img 30 | 31 | def encode_image(img): 32 | ret, data = cv2.imencode('.jpg', img) 33 | return base64.b64encode(data) 34 | 35 | # This is the server to handle requests and get images from client 36 | @app.route('/process_image', methods=['POST']) 37 | def process_image(): 38 | if not request.json or 'msg' not in request.json: 39 | return 'Server Error!', 500 40 | 41 | image_data = request.json['image_data'][23:].encode()#.strip('data:image/jpeg;base64,') 42 | operation = request.json['operation'] 43 | print(operation) 44 | img = read_image(image_data) 45 | img_out = cv_engine(img, operation) 46 | image_data = encode_image(img_out) 47 | #img_file = open('') 48 | result = {'image_data': image_data, 'msg':'Operation Completed'} 49 | return image_data, 200 50 | 51 | @app.route('/') 52 | def index(): 53 | return 'Hello World' 54 | 55 | if __name__ == '__main__': 56 | app.run(debug=True) 57 | -------------------------------------------------------------------------------- /Chapter03/code/orb.py: -------------------------------------------------------------------------------- 1 | from skimage import data 2 | from skimage.io import imread 3 | from skimage import transform as tf 4 | from skimage.feature import (match_descriptors, corner_harris, 5 | corner_peaks, ORB, plot_matches) 6 | from skimage.color import rgb2gray 7 | import matplotlib.pyplot as plt 8 | 9 | #Read the original image 10 | #image_org = data.astronaut() 11 | image = imread('test.png') 12 | 13 | #Convert the image gray scale 14 | image_org = rgb2gray(image) 15 | 16 | #We prepare another image by rotating it. Only to demonstrate feature mathcing 17 | image_rot = tf.rotate(image_org, 180) 18 | 19 | #We create another image by applying affine transform on the image 20 | tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5, 21 | translation=(0, -200)) 22 | image_aff = tf.warp(image_org, tform) 23 | 24 | #We initialize ORB feature descriptor 25 | descriptor_extractor = ORB(n_keypoints=200) 26 | 27 | #We first extract features from the original image 28 | descriptor_extractor.detect_and_extract(image_org) 29 | keypoints_org = descriptor_extractor.keypoints 30 | descriptors_org = descriptor_extractor.descriptors 31 | 32 | descriptor_extractor.detect_and_extract(image_rot) 33 | keypoints_rot = descriptor_extractor.keypoints 34 | descriptors_rot = descriptor_extractor.descriptors 35 | 36 | descriptor_extractor.detect_and_extract(image_aff) 37 | keypoints_aff = descriptor_extractor.keypoints 38 | descriptors_aff = descriptor_extractor.descriptors 39 | 40 | matches_org_rot = match_descriptors(descriptors_org, descriptors_rot, cross_check=True) 41 | matches_org_aff = match_descriptors(descriptors_org, descriptors_aff, cross_check=True) 42 | 43 | fig, ax = plt.subplots(nrows=2, ncols=1) 44 | 45 | plt.gray() 46 | 47 | plot_matches(ax[0], image_org, image_rot, keypoints_org, keypoints_rot, matches_org_rot) 48 | ax[0].axis('off') 49 | ax[0].set_title("Original Image vs. Transformed Image") 50 | 51 | plot_matches(ax[1], image_org, image_aff, keypoints_org, keypoints_aff, matches_org_aff) 52 | ax[1].axis('off') 53 | ax[1].set_title("Original Image vs. Transformed Image") 54 | 55 | plt.show() 56 | -------------------------------------------------------------------------------- /Chapter08/codes/lk_opticalflow.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | cap = cv2.VideoCapture(0) 5 | 6 | # params for ShiTomasi corner detection 7 | feature_params = dict( maxCorners = 1000, 8 | qualityLevel = 0.3, 9 | minDistance = 7, 10 | blockSize = 5, 11 | useHarrisDetector=1, 12 | k=0.04) 13 | # Parameters for lucas kanade optical flow 14 | lk_params = dict( winSize = (15,15), 15 | maxLevel = 2) 16 | 17 | # Create some random colors 18 | color = np.random.randint(0,255,(1000,3)) 19 | 20 | # Take first frame and find corners in it 21 | ret, old_frame = cap.read() 22 | old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) 23 | 24 | p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) 25 | # Create a mask image for drawing purposes 26 | mask = np.zeros_like(old_frame) 27 | 28 | count = 0 #To keep track of how many frames have been read 29 | 30 | while(1): 31 | ret,frame = cap.read() 32 | frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 33 | # calculate optical flow 34 | p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) 35 | # Select good points 36 | good_new = p1[st==1] 37 | good_old = p0[st==1] 38 | # draw the tracks 39 | for i,(new,old) in enumerate(zip(good_new,good_old)): 40 | a,b = new.ravel() 41 | c,d = old.ravel() 42 | mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2) 43 | frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) 44 | img = cv2.add(frame,mask) 45 | cv2.imshow('frame',img) 46 | k = cv2.waitKey(30) & 0xff 47 | if k == 27: 48 | break 49 | # Now update the previous frame and previous points 50 | old_gray = frame_gray.copy() 51 | #Recompute the goodFeaturesToTrack as the scene may have changed drastically 52 | count = count + 1 53 | if count % 100 == 0: 54 | p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) 55 | else: 56 | p0 = good_new.reshape(-1,1,2) 57 | cv2.destroyAllWindows() 58 | cap.release() 59 | 60 | -------------------------------------------------------------------------------- /Chapter04/CODES/plot_marked_watershed.py: -------------------------------------------------------------------------------- 1 | """ 2 | =============================== 3 | Markers for watershed transform 4 | =============================== 5 | 6 | The watershed is a classical algorithm used for **segmentation**, that 7 | is, for separating different objects in an image. 8 | 9 | Here a marker image is built from the region of low gradient inside the image. 10 | In a gradient image, the areas of high values provide barriers that help to 11 | segment the image. 12 | Using markers on the lower values will ensure that the segmented objects are 13 | found. 14 | 15 | See Wikipedia_ for more details on the algorithm. 16 | 17 | .. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing) 18 | 19 | """ 20 | from sys import exit 21 | from scipy import ndimage as ndi 22 | import matplotlib.pyplot as plt 23 | 24 | from skimage.morphology import watershed, disk 25 | from skimage import data 26 | from skimage.io import imread 27 | from skimage.filters import rank 28 | from skimage.color import rgb2gray 29 | from skimage.util import img_as_ubyte 30 | 31 | img = data.astronaut() 32 | img_gray = rgb2gray(img) 33 | 34 | image = img_as_ubyte(img_gray) 35 | 36 | #Calculate the local gradients of the image 37 | #and only select the points that have a 38 | #gradient value of less than 20 39 | markers = rank.gradient(image, disk(5)) < 20 40 | markers = ndi.label(markers)[0] 41 | 42 | gradient = rank.gradient(image, disk(2)) 43 | 44 | #Watershed Algorithm 45 | labels = watershed(gradient, markers) 46 | 47 | fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'}) 48 | ax = axes.ravel() 49 | 50 | ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest') 51 | ax[0].set_title("Original") 52 | 53 | ax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest') 54 | ax[1].set_title("Local Gradient") 55 | 56 | ax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') 57 | ax[2].set_title("Markers") 58 | 59 | ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest') 60 | ax[3].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7) 61 | ax[3].set_title("Segmented") 62 | 63 | for a in ax: 64 | a.axis('off') 65 | 66 | fig.tight_layout() 67 | plt.show() 68 | -------------------------------------------------------------------------------- /Chapter03/code/lbp.py: -------------------------------------------------------------------------------- 1 | from skimage.transform import rotate 2 | from skimage.feature import local_binary_pattern 3 | from skimage import data 4 | from skimage.color import label2rgb 5 | import numpy as np 6 | 7 | # settings for LBP 8 | radius = 3 9 | n_points = 8 * radius 10 | 11 | # Get three different images to test the algorithm with 12 | brick = data.load('brick.png') 13 | grass = data.load('grass.png') 14 | wall = data.load('rough-wall.png') 15 | 16 | # Calculate the LBP features for all the three images 17 | brick_lbp = local_binary_pattern(brick, 16, 2, 'uniform') 18 | grass_lbp = local_binary_pattern(grass, 16, 2, 'uniform') 19 | wall_lbp = local_binary_pattern(wall, 16, 2, 'uniform') 20 | 21 | # Next we will augment these images by rotating the images by 22 degrees 22 | brick_rot = rotate(brick, angle = 22, resize = False) 23 | grass_rot = rotate(grass, angle = 22, resize = False) 24 | wall_rot = rotate(wall, angle = 22, resize = False) 25 | 26 | # Let us calculate the LBP features for all the rotated images 27 | brick_rot_lbp = local_binary_pattern(brick_rot, 16, 2, 'uniform') 28 | grass_rot_lbp = local_binary_pattern(grass_rot, 16, 2, 'uniform') 29 | wall_rot_lbp = local_binary_pattern(wall_rot, 16, 2, 'uniform') 30 | 31 | # We will pick any one image say brick image and try to find 32 | # its best match among the rotated images 33 | # Create a list with LBP features of all three images 34 | 35 | bins_num = int(brick_lbp.max() + 1) 36 | brick_hist = np.histogram(brick_lbp, normed=True, bins=bins_num, range=(0, bins_num)) 37 | 38 | lbp_features = [brick_rot_lbp, grass_rot_lbp, wall_rot_lbp] 39 | min_score = 1000 # Set a very large best score value initially 40 | idx = 0 # To keep track of the winner 41 | 42 | for feature in lbp_features: 43 | histogram, _ = np.histogram(feature, normed=True, bins=bins_num, range=(0, bins_num)) 44 | p = np.asarray(brick_hist) 45 | q = np.asarray(histogram) 46 | filter_idx = np.logical_and(p != 0, q != 0) 47 | score = np.sum(p[filter_idx] * np.log2(p[filter_idx] / q[filter_idx])) 48 | if score < min_score: 49 | min_score = score 50 | winner = idx 51 | idx = idx + 1 52 | 53 | if idx == 0: 54 | print('Brick matched with Brick Rotated') 55 | elif idx == 1: 56 | print('Brick matched with Grass Rotated') 57 | elif idx == 2: 58 | print('Brick matched with Wall Rotated') 59 | -------------------------------------------------------------------------------- /Chapter09/bin/activate: -------------------------------------------------------------------------------- 1 | # This file must be used with "source bin/activate" *from bash* 2 | # you cannot run it directly 3 | 4 | deactivate () { 5 | unset -f pydoc >/dev/null 2>&1 6 | 7 | # reset old environment variables 8 | # ! [ -z ${VAR+_} ] returns true if VAR is declared at all 9 | if ! [ -z "${_OLD_VIRTUAL_PATH+_}" ] ; then 10 | PATH="$_OLD_VIRTUAL_PATH" 11 | export PATH 12 | unset _OLD_VIRTUAL_PATH 13 | fi 14 | if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then 15 | PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" 16 | export PYTHONHOME 17 | unset _OLD_VIRTUAL_PYTHONHOME 18 | fi 19 | 20 | # This should detect bash and zsh, which have a hash command that must 21 | # be called to get it to forget past commands. Without forgetting 22 | # past commands the $PATH changes we made may not be respected 23 | if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then 24 | hash -r 2>/dev/null 25 | fi 26 | 27 | if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then 28 | PS1="$_OLD_VIRTUAL_PS1" 29 | export PS1 30 | unset _OLD_VIRTUAL_PS1 31 | fi 32 | 33 | unset VIRTUAL_ENV 34 | if [ ! "${1-}" = "nondestructive" ] ; then 35 | # Self destruct! 36 | unset -f deactivate 37 | fi 38 | } 39 | 40 | # unset irrelevant variables 41 | deactivate nondestructive 42 | 43 | VIRTUAL_ENV="/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask" 44 | export VIRTUAL_ENV 45 | 46 | _OLD_VIRTUAL_PATH="$PATH" 47 | PATH="$VIRTUAL_ENV/bin:$PATH" 48 | export PATH 49 | 50 | # unset PYTHONHOME if set 51 | if ! [ -z "${PYTHONHOME+_}" ] ; then 52 | _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" 53 | unset PYTHONHOME 54 | fi 55 | 56 | if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then 57 | _OLD_VIRTUAL_PS1="$PS1" 58 | if [ "x" != x ] ; then 59 | PS1="$PS1" 60 | else 61 | PS1="(`basename \"$VIRTUAL_ENV\"`) $PS1" 62 | fi 63 | export PS1 64 | fi 65 | 66 | # Make sure to unalias pydoc if it's already there 67 | alias pydoc 2>/dev/null >/dev/null && unalias pydoc 68 | 69 | pydoc () { 70 | python -m pydoc "$@" 71 | } 72 | 73 | # This should detect bash and zsh, which have a hash command that must 74 | # be called to get it to forget past commands. Without forgetting 75 | # past commands the $PATH changes we made may not be respected 76 | if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then 77 | hash -r 2>/dev/null 78 | fi 79 | -------------------------------------------------------------------------------- /Chapter09/Client/index.js: -------------------------------------------------------------------------------- 1 | var fr; // Variable to store the file reader 2 | var is_img_ready = false; 3 | 4 | //Function to load the image from local path to img and canvas 5 | function loadImage() { 6 | img_src = document.getElementById('img_src'); 7 | if(!img_src.files[0]) { 8 | alert('Please select an Image first!') 9 | return; 10 | } 11 | fr = new FileReader(); 12 | fr.onload = updateImage; 13 | fr.readAsDataURL(img_src.files[0]) 14 | } 15 | 16 | function updateImage() { 17 | img = new Image(); 18 | 19 | img.onload = function() { 20 | var canvas = document.getElementById("local_canvas") 21 | canvas.width = img.width; 22 | canvas.height = img.height; 23 | var ctx = canvas.getContext("2d"); 24 | ctx.drawImage(img,0,0); 25 | //alert(canvas.toDataURL("image/png")); 26 | }; 27 | img.src = fr.result; 28 | is_img_ready = true; 29 | } 30 | 31 | function loadProcessedImage(data) { 32 | img = new Image(); 33 | 34 | img.onload = function() { 35 | var processedCanvas = document.getElementById('processed_canvas'); 36 | var localCanvas = document.getElementById('local_canvas'); 37 | processedCanvas.width = localCanvas.width; 38 | processedCanvas.height = localCanvas.height; 39 | ctx = processedCanvas.getContext('2d'); 40 | ctx.drawImage(img, 0, 0); 41 | }; 42 | console.log(data); 43 | img.src = 'data:image/jpeg;base64,' + data; 44 | } 45 | 46 | function processImage() { 47 | if (is_img_ready == false) { 48 | alert('No image to process!'); 49 | return; 50 | } 51 | 52 | //Send the image to the server and wait for a response 53 | canvas = document.getElementById('local_canvas'); 54 | image_data = canvas.toDataURL('image/jpeg'); 55 | img_op = document.getElementById('image_op'); 56 | op = img_op.options[img_op.selectedIndex].value; 57 | 58 | $.ajax({ 59 | url:"http://localhost:5000/process_image", 60 | method: "POST", 61 | contentType: 'application/json', 62 | crossDomain: true, 63 | data: JSON.stringify({ 64 | image_data: image_data, 65 | msg: 'This is image data', 66 | operation: op 67 | }), 68 | success: function(data){ 69 | loadProcessedImage(data); 70 | }, 71 | error: function(err) { 72 | console.log(err) 73 | } 74 | }); 75 | } 76 | -------------------------------------------------------------------------------- /Chapter09/bin/activate.fish: -------------------------------------------------------------------------------- 1 | # This file must be used using `. bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. 2 | # Do not run it directly. 3 | 4 | function deactivate -d 'Exit virtualenv mode and return to the normal environment.' 5 | # reset old environment variables 6 | if test -n "$_OLD_VIRTUAL_PATH" 7 | set -gx PATH $_OLD_VIRTUAL_PATH 8 | set -e _OLD_VIRTUAL_PATH 9 | end 10 | 11 | if test -n "$_OLD_VIRTUAL_PYTHONHOME" 12 | set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME 13 | set -e _OLD_VIRTUAL_PYTHONHOME 14 | end 15 | 16 | if test -n "$_OLD_FISH_PROMPT_OVERRIDE" 17 | # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. 18 | set -l fish_function_path 19 | 20 | # Erase virtualenv's `fish_prompt` and restore the original. 21 | functions -e fish_prompt 22 | functions -c _old_fish_prompt fish_prompt 23 | functions -e _old_fish_prompt 24 | set -e _OLD_FISH_PROMPT_OVERRIDE 25 | end 26 | 27 | set -e VIRTUAL_ENV 28 | 29 | if test "$argv[1]" != 'nondestructive' 30 | # Self-destruct! 31 | functions -e pydoc 32 | functions -e deactivate 33 | end 34 | end 35 | 36 | # Unset irrelevant variables. 37 | deactivate nondestructive 38 | 39 | set -gx VIRTUAL_ENV "/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask" 40 | 41 | set -gx _OLD_VIRTUAL_PATH $PATH 42 | set -gx PATH "$VIRTUAL_ENV/bin" $PATH 43 | 44 | # Unset `$PYTHONHOME` if set. 45 | if set -q PYTHONHOME 46 | set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME 47 | set -e PYTHONHOME 48 | end 49 | 50 | function pydoc 51 | python -m pydoc $argv 52 | end 53 | 54 | if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" 55 | # Copy the current `fish_prompt` function as `_old_fish_prompt`. 56 | functions -c fish_prompt _old_fish_prompt 57 | 58 | function fish_prompt 59 | # Save the current $status, for fish_prompts that display it. 60 | set -l old_status $status 61 | 62 | # Prompt override provided? 63 | # If not, just prepend the environment name. 64 | if test -n "" 65 | printf '%s%s' "" (set_color normal) 66 | else 67 | printf '%s(%s) ' (set_color normal) (basename "$VIRTUAL_ENV") 68 | end 69 | 70 | # Restore the original $status 71 | echo "exit $old_status" | source 72 | _old_fish_prompt 73 | end 74 | 75 | set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" 76 | end 77 | -------------------------------------------------------------------------------- /Chapter03/code/stitch.py: -------------------------------------------------------------------------------- 1 | from skimage.feature import ORB, match_descriptors 2 | from skimage.io import imread 3 | from skimage.measure import ransac 4 | from skimage.transform import ProjectiveTransform 5 | from skimage.color import rgb2gray 6 | from skimage.io import imsave, show 7 | from skimage.color import gray2rgb 8 | from skimage.exposure import rescale_intensity 9 | from skimage.transform import warp 10 | from skimage.transform import SimilarityTransform 11 | import numpy as np 12 | 13 | 14 | image0 = imread('goldengate1.png') 15 | image0 = rgb2gray(image0) 16 | 17 | image1 = imread('goldengate2.png') 18 | image1 = rgb2gray(image1) 19 | 20 | orb = ORB(n_keypoints=1000, fast_threshold=0.05) 21 | 22 | orb.detect_and_extract(image0) 23 | keypoints1 = orb.keypoints 24 | descriptors1 = orb.descriptors 25 | 26 | orb.detect_and_extract(image1) 27 | keypoints2 = orb.keypoints 28 | descriptors2 = orb.descriptors 29 | 30 | matches12 = match_descriptors(descriptors1, 31 | descriptors2, 32 | cross_check=True) 33 | 34 | src = keypoints2[matches12[:, 1]][:, ::-1] 35 | dst = keypoints1[matches12[:, 0]][:, ::-1] 36 | 37 | transform_model, inliers = \ 38 | ransac((src, dst), ProjectiveTransform, 39 | min_samples=4, residual_threshold=2) 40 | 41 | r, c = image1.shape[:2] 42 | 43 | corners = np.array([[0, 0], 44 | [0, r], 45 | [c, 0], 46 | [c, r]]) 47 | 48 | warped_corners = transform_model(corners) 49 | 50 | all_corners = np.vstack((warped_corners, corners)) 51 | 52 | corner_min = np.min(all_corners, axis=0) 53 | corner_max = np.max(all_corners, axis=0) 54 | 55 | output_shape = (corner_max - corner_min) 56 | output_shape = np.ceil(output_shape[::-1]) 57 | 58 | offset = SimilarityTransform(translation=-corner_min) 59 | 60 | image0_warp = warp(image0, offset.inverse, 61 | output_shape=output_shape, cval=-1) 62 | 63 | image1_warp = warp(image1, (model_robust + offset).inverse, 64 | output_shape=output_shape, cval=-1) 65 | 66 | image0_mask = (image0_warp != -1) 67 | image0_warp[~image0_mask] = 0 68 | image0_alpha = np.dstack((gray2rgb(image0_warp), image0_mask)) 69 | 70 | 71 | image1_mask = (image1_warp != -1) 72 | image1_warp[~image1_mask] = 0 73 | image1_alpha = np.dstack((gray2rgb(image1_warp), image1_mask)) 74 | 75 | 76 | merged = (image0_alpha + image1_alpha) 77 | 78 | alpha = merged[..., 3]. 79 | merged /= np.maximum(alpha, 1)[..., np.newaxis] 80 | 81 | imsave('output.jpg', merged) 82 | 83 | -------------------------------------------------------------------------------- /Chapter09/bin/python-config: -------------------------------------------------------------------------------- 1 | #!/Users/salil/Work/Book_CV_Using_Python3/Chapter9/CVaaS/flask/bin/python 2 | 3 | import sys 4 | import getopt 5 | import sysconfig 6 | 7 | valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags', 8 | 'ldflags', 'help'] 9 | 10 | if sys.version_info >= (3, 2): 11 | valid_opts.insert(-1, 'extension-suffix') 12 | valid_opts.append('abiflags') 13 | if sys.version_info >= (3, 3): 14 | valid_opts.append('configdir') 15 | 16 | 17 | def exit_with_usage(code=1): 18 | sys.stderr.write("Usage: {0} [{1}]\n".format( 19 | sys.argv[0], '|'.join('--'+opt for opt in valid_opts))) 20 | sys.exit(code) 21 | 22 | try: 23 | opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) 24 | except getopt.error: 25 | exit_with_usage() 26 | 27 | if not opts: 28 | exit_with_usage() 29 | 30 | pyver = sysconfig.get_config_var('VERSION') 31 | getvar = sysconfig.get_config_var 32 | 33 | opt_flags = [flag for (flag, val) in opts] 34 | 35 | if '--help' in opt_flags: 36 | exit_with_usage(code=0) 37 | 38 | for opt in opt_flags: 39 | if opt == '--prefix': 40 | print(sysconfig.get_config_var('prefix')) 41 | 42 | elif opt == '--exec-prefix': 43 | print(sysconfig.get_config_var('exec_prefix')) 44 | 45 | elif opt in ('--includes', '--cflags'): 46 | flags = ['-I' + sysconfig.get_path('include'), 47 | '-I' + sysconfig.get_path('platinclude')] 48 | if opt == '--cflags': 49 | flags.extend(getvar('CFLAGS').split()) 50 | print(' '.join(flags)) 51 | 52 | elif opt in ('--libs', '--ldflags'): 53 | abiflags = getattr(sys, 'abiflags', '') 54 | libs = ['-lpython' + pyver + abiflags] 55 | libs += getvar('LIBS').split() 56 | libs += getvar('SYSLIBS').split() 57 | # add the prefix/lib/pythonX.Y/config dir, but only if there is no 58 | # shared library in prefix/lib/. 59 | if opt == '--ldflags': 60 | if not getvar('Py_ENABLE_SHARED'): 61 | libs.insert(0, '-L' + getvar('LIBPL')) 62 | if not getvar('PYTHONFRAMEWORK'): 63 | libs.extend(getvar('LINKFORSHARED').split()) 64 | print(' '.join(libs)) 65 | 66 | elif opt == '--extension-suffix': 67 | ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') 68 | if ext_suffix is None: 69 | ext_suffix = sysconfig.get_config_var('SO') 70 | print(ext_suffix) 71 | 72 | elif opt == '--abiflags': 73 | if not getattr(sys, 'abiflags', None): 74 | exit_with_usage() 75 | print(sys.abiflags) 76 | 77 | elif opt == '--configdir': 78 | print(sysconfig.get_config_var('LIBPL')) 79 | -------------------------------------------------------------------------------- /Chapter05/codes/tsne.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from matplotlib import offsetbox 4 | from sklearn import manifold, datasets, decomposition 5 | 6 | digits = datasets.load_digits(n_class=6) 7 | X = digits.data 8 | y = digits.target 9 | n_samples, n_features = X.shape 10 | n_neighbors = 30 11 | 12 | 13 | #---------------------------------------------------------------------- 14 | # Scale and visualize the embedding vectors 15 | def plot_embedding(X, title=None): 16 | x_min, x_max = np.min(X, 0), np.max(X, 0) 17 | X = (X - x_min) / (x_max - x_min) 18 | 19 | plt.figure() 20 | ax = plt.subplot(111) 21 | 22 | for i in range(X.shape[0]): 23 | plt.text(X[i, 0], X[i, 1], str(digits.target[i]), 24 | color=plt.cm.Set1(y[i] / 10.), 25 | fontdict={'weight': 'bold', 'size': 9}) 26 | 27 | ''' 28 | if hasattr(offsetbox, 'AnnotationBbox'): 29 | # only print thumbnails with matplotlib > 1.0 30 | shown_images = np.array([[1., 1.]]) # just something big 31 | for i in range(digits.data.shape[0]): 32 | dist = np.sum((X[i] - shown_images) ** 2, 1) 33 | if np.min(dist) < 4e-3: 34 | # don't show points that are too close 35 | continue 36 | shown_images = np.r_[shown_images, [X[i]]] 37 | imagebox = offsetbox.AnnotationBbox( 38 | offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r), 39 | X[i]) 40 | ax.add_artist(imagebox) 41 | ''' 42 | plt.xticks([]), plt.yticks([]) 43 | if title is not None: 44 | plt.title(title) 45 | 46 | 47 | #---------------------------------------------------------------------- 48 | # Plot images of the digits 49 | n_img_per_row = 20 50 | img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row)) 51 | for i in range(n_img_per_row): 52 | ix = 10 * i + 1 53 | for j in range(n_img_per_row): 54 | iy = 10 * j + 1 55 | img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8)) 56 | 57 | plt.imshow(img, cmap=plt.cm.binary) 58 | plt.xticks([]) 59 | plt.yticks([]) 60 | plt.title('A selection from the 64-dimensional digits dataset') 61 | 62 | #---------------------------------------------------------------------- 63 | # Projection on to the first 2 principal components 64 | 65 | print("Computing PCA projection") 66 | X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X) 67 | plot_embedding(X_pca) 68 | 69 | #---------------------------------------------------------------------- 70 | # t-SNE embedding of the digits dataset 71 | print("Computing t-SNE embedding") 72 | tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) 73 | X_tsne = tsne.fit_transform(X) 74 | 75 | plot_embedding(X_tsne) 76 | plt.show() 77 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Computer Vision with Python 3 2 | 3 | This is the code repository for [Computer Vision with Python 3](https://www.packtpub.com/application-development/computer-vision-python-3?utm_source=github&utm_medium=repository&utm_campaign=9781788299763), published by [Packt](https://www.packtpub.com/?utm_source=github). It contains all the supporting project files necessary to work through the book from start to finish. 4 | ## About the Book 5 | This book is a thorough guide for developers who want to get started with building computer vision applications using Python 3. The book is divided into five sections: The Fundamentals of Image Processing, Applied Computer Vision, Making Applications Smarter,Extending your Capabilities using OpenCV, and Getting Hands on. Throughout this book, three image processing libraries Pillow, Scikit-Image, and OpenCV will be used to implement different computer vision algorithms. 6 | 7 | The book aims to equip readers to build Computer Vision applications that are capable of working in real-world scenarios effectively. Some of the applications that we will look at in the book are Optical Character Recognition, Object Tracking and building a Computer Vision as a Service platform that works over the internet. 8 | 9 | ## Instructions and Navigation 10 | All of the code is organized into folders. Each folder starts with a number followed by the application name. For example, Chapter02. 11 | 12 | 13 | 14 | The code will look like the following: 15 | ``` 16 | private void initialiseBluetooth() { 17 | bluetoothManager = 18 | (BluetoothManager)getSystemService 19 | (Context.BLUETOOTH_SERVICE); 20 | bluetoothAdapter = bluetoothManager.getAdapter(); 21 | bluetoothLeScanner = bluetoothAdapter.getBluetoothLeScanner(); 22 | } 23 | ``` 24 | 25 | This book will guide you through the installation of all the tools that you need to follow the code samples. Code samples introduced in various chapters are for both Android and iOS platforms hence you will need to install the Android Studio and XCode IDEs. Since simulators lack Bluetooth functionality, hence you will need physical Android and iOS devices to run the code samples. In terms of hardware, you will be needing a Raspberry Pi for the Code Lab specific for Chapter 5, Beacons with Raspberry Pi. For Chapter 4, Designing a Personal Tracking System, and Chapter 6, Weather Monitoring Using BLE in Warehouses, you will be needing a very low cost iTag and the Texas Instruments Sensor Tag. All of the hardware can be easily procured online. 26 | 27 | ## Related Products 28 | * [Learning OpenCV 3 Computer Vision with Python - Second Edition](https://www.packtpub.com/application-development/learning-opencv-3-computer-vision-python-second-edition?utm_source=github&utm_medium=repository&utm_campaign=9781785283840) 29 | 30 | * [OpenCV: Computer Vision Projects with Python](https://www.packtpub.com/application-development/opencv-computer-vision-projects-python?utm_source=github&utm_medium=repository&utm_campaign=9781787125490) 31 | 32 | * [OpenCV 3 Computer Vision Application Programming Cookbook - Third Edition](https://www.packtpub.com/application-development/opencv-3-computer-vision-application-programming-cookbook-third-edition?utm_source=github&utm_medium=repository&utm_campaign=9781786469717) 33 | 34 | 35 | -------------------------------------------------------------------------------- /Chapter05/codes/plot_kmeans_digits.py: -------------------------------------------------------------------------------- 1 | from time import time 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | from sklearn import metrics 6 | from sklearn.cluster import KMeans 7 | from sklearn.datasets import load_digits 8 | from sklearn.decomposition import PCA 9 | from sklearn.preprocessing import scale 10 | 11 | np.random.seed(42) 12 | 13 | digits = load_digits() 14 | data = scale(digits.data) 15 | 16 | n_samples, n_features = data.shape 17 | n_digits = len(np.unique(digits.target)) 18 | labels = digits.target 19 | 20 | sample_size = 300 21 | 22 | print("n_digits: %d, \t n_samples %d, \t n_features %d" 23 | % (n_digits, n_samples, n_features)) 24 | 25 | 26 | print(79 * '_') 27 | print('% 9s' % 'init' 28 | ' time inertia homo compl v-meas ARI AMI silhouette') 29 | 30 | 31 | def bench_k_means(estimator, name, data): 32 | t0 = time() 33 | estimator.fit(data) 34 | print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' 35 | % (name, (time() - t0), estimator.inertia_, 36 | metrics.homogeneity_score(labels, estimator.labels_), 37 | metrics.completeness_score(labels, estimator.labels_), 38 | metrics.v_measure_score(labels, estimator.labels_), 39 | metrics.adjusted_rand_score(labels, estimator.labels_), 40 | metrics.adjusted_mutual_info_score(labels, estimator.labels_), 41 | metrics.silhouette_score(data, estimator.labels_, 42 | metric='euclidean', 43 | sample_size=sample_size))) 44 | 45 | bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10), 46 | name="k-means++", data=data) 47 | 48 | bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10), 49 | name="random", data=data) 50 | 51 | # in this case the seeding of the centers is deterministic, hence we run the 52 | # kmeans algorithm only once with n_init=1 53 | pca = PCA(n_components=n_digits).fit(data) 54 | bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1), 55 | name="PCA-based", 56 | data=data) 57 | print(79 * '_') 58 | 59 | ############################################################################### 60 | # Visualize the results on PCA-reduced data 61 | 62 | reduced_data = PCA(n_components=2).fit_transform(data) 63 | kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10) 64 | kmeans.fit(reduced_data) 65 | 66 | # Step size of the mesh. Decrease to increase the quality of the VQ. 67 | h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max]. 68 | 69 | # Plot the decision boundary. For that, we will assign a color to each 70 | x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1 71 | y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1 72 | xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) 73 | 74 | # Obtain labels for each point in mesh. Use last trained model. 75 | Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]) 76 | 77 | # Put the result into a color plot 78 | Z = Z.reshape(xx.shape) 79 | plt.figure(1) 80 | plt.clf() 81 | plt.imshow(Z, interpolation='nearest', 82 | extent=(xx.min(), xx.max(), yy.min(), yy.max()), 83 | cmap=plt.cm.Paired, 84 | aspect='auto', origin='lower') 85 | 86 | plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2) 87 | # Plot the centroids as a white X 88 | centroids = kmeans.cluster_centers_ 89 | plt.scatter(centroids[:, 0], centroids[:, 1], 90 | marker='x', s=169, linewidths=3, 91 | color='w', zorder=10) 92 | plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n' 93 | 'Centroids are marked with white cross') 94 | plt.xlim(x_min, x_max) 95 | plt.ylim(y_min, y_max) 96 | plt.xticks(()) 97 | plt.yticks(()) 98 | plt.show() 99 | --------------------------------------------------------------------------------