├── .gitattributes ├── .gitignore ├── Makefile ├── README.md ├── data ├── butterfly.jpg ├── home.jpg ├── left.jpg ├── messi5.jpg ├── readme.txt └── right.jpg ├── indexplan.ods ├── make.bat └── source ├── conf.py ├── index.rst ├── opencv-logo-white.png └── py_tutorials ├── images ├── MachineLearnings.jpg ├── calib3d_icon.jpg ├── core.jpg ├── featureicon.jpg ├── gui.jpg ├── imgproc.jpg ├── intro.png ├── obj_icon.jpg ├── photoicon.jpg └── videoicon.jpg ├── py_bindings ├── py_bindings_basics │ └── py_bindings_basics.rst └── py_table_of_contents_bindings │ ├── images │ ├── inpainticon.jpg │ └── nlm_icon.jpg │ └── py_table_of_contents_bindings.rst ├── py_calib3d ├── py_calibration │ ├── images │ │ ├── calib_pattern.jpg │ │ ├── calib_radial.jpg │ │ └── calib_result.jpg │ └── py_calibration.rst ├── py_depthmap │ ├── images │ │ ├── disparity_map.jpg │ │ └── stereo_depth.jpg │ └── py_depthmap.rst ├── py_epipolar_geometry │ ├── images │ │ ├── epipolar.jpg │ │ ├── epiresult.jpg │ │ └── essential_matrix.jpg │ └── py_epipolar_geometry.rst ├── py_pose │ ├── images │ │ ├── pose_1.jpg │ │ └── pose_2.jpg │ └── py_pose.rst └── py_table_of_contents_calib3d │ ├── images │ ├── calibration_icon.jpg │ ├── depthmap_icon.jpg │ ├── epipolar_icon.jpg │ └── pose_icon.jpg │ └── py_table_of_contents_calib3d.rst ├── py_core ├── py_basic_ops │ ├── images │ │ ├── border.jpg │ │ └── roi.jpg │ └── py_basic_ops.rst ├── py_image_arithmetics │ ├── images │ │ ├── blending.jpg │ │ └── overlay.jpg │ └── py_image_arithmetics.rst ├── py_maths_tools │ └── py_maths_tools.rst ├── py_optimization │ └── py_optimization.rst └── py_table_of_contents_core │ ├── images │ ├── image_arithmetic.jpg │ ├── maths_tools.jpg │ ├── pixel_ops.jpg │ └── speed.jpg │ └── py_table_of_contents_core.rst ├── py_feature2d ├── py_brief │ └── py_brief.rst ├── py_fast │ ├── images │ │ ├── fast_eqns.jpg │ │ ├── fast_kp.jpg │ │ └── fast_speedtest.jpg │ └── py_fast.rst ├── py_feature_homography │ ├── images │ │ └── homography_findobj.jpg │ └── py_feature_homography.rst ├── py_features_harris │ ├── images │ │ ├── harris_region.jpg │ │ ├── harris_result.jpg │ │ └── subpixel3.png │ └── py_features_harris.rst ├── py_features_meaning │ ├── images │ │ ├── feature_building.jpg │ │ └── feature_simple.png │ └── py_features_meaning.rst ├── py_matcher │ ├── images │ │ ├── matcher_flann.jpg │ │ ├── matcher_result1.jpg │ │ └── matcher_result2.jpg │ └── py_matcher.rst ├── py_orb │ ├── images │ │ └── orb_kp.jpg │ └── py_orb.rst ├── py_shi_tomasi │ ├── images │ │ ├── shitomasi_block1.jpg │ │ └── shitomasi_space.png │ └── py_shi_tomasi.rst ├── py_sift_intro │ ├── images │ │ ├── sift_dog.jpg │ │ ├── sift_keypoints.jpg │ │ ├── sift_local_extrema.jpg │ │ └── sift_scale_invariant.jpg │ └── py_sift_intro.rst ├── py_surf_intro │ ├── images │ │ ├── surf_boxfilter.jpg │ │ ├── surf_kp1.jpg │ │ ├── surf_kp2.jpg │ │ ├── surf_matching.jpg │ │ └── surf_orientation.jpg │ └── py_surf_intro.rst └── py_table_of_contents_feature2d │ ├── images │ ├── brief.jpg │ ├── fast_icon.jpg │ ├── features_icon.jpg │ ├── harris_icon.jpg │ ├── homography_icon.jpg │ ├── matching.jpg │ ├── orb.jpg │ ├── shi_icon.jpg │ ├── sift_icon.jpg │ └── surf_icon.jpg │ └── py_table_of_contents_feature2d.rst ├── py_gui ├── py_drawing_functions │ ├── images │ │ └── drawing.jpg │ └── py_drawing_functions.rst ├── py_image_display │ ├── images │ │ ├── matplotlib_screenshot.jpg │ │ └── opencv_screenshot.jpg │ └── py_image_display.rst ├── py_mouse_handling │ └── py_mouse_handling.rst ├── py_table_of_contents_gui │ ├── images │ │ ├── drawing.jpg │ │ ├── image_display.jpg │ │ ├── mouse_drawing.jpg │ │ ├── trackbar.jpg │ │ └── video_display.jpg │ └── py_table_of_contents_gui.rst ├── py_trackbar │ ├── images │ │ └── trackbar_screenshot.jpg │ └── py_trackbar.rst └── py_video_display │ └── py_video_display.rst ├── py_imgproc ├── py_canny │ ├── images │ │ ├── canny1.jpg │ │ ├── hysteresis.jpg │ │ └── nms.jpg │ └── py_canny.rst ├── py_colorspaces │ ├── images │ │ └── frame.jpg │ └── py_colorspaces.rst ├── py_contours │ ├── py_contour_features │ │ ├── images │ │ │ ├── approx.jpg │ │ │ ├── boundingrect.png │ │ │ ├── circumcircle.png │ │ │ ├── convexitydefects.jpg │ │ │ ├── fitellipse.png │ │ │ └── fitline.jpg │ │ └── py_contour_features.rst │ ├── py_contour_properties │ │ ├── images │ │ │ └── extremepoints.jpg │ │ └── py_contour_properties.rst │ ├── py_contours_begin │ │ ├── images │ │ │ └── none.jpg │ │ └── py_contours_begin.rst │ ├── py_contours_hierarchy │ │ ├── images │ │ │ ├── ccomp_hierarchy.png │ │ │ ├── hierarchy.png │ │ │ └── tree_hierarchy.png │ │ └── py_contours_hierarchy.rst │ ├── py_contours_more_functions │ │ ├── images │ │ │ ├── defects.jpg │ │ │ └── matchshapes.jpg │ │ └── py_contours_more_functions.rst │ └── py_table_of_contents_contours │ │ ├── images │ │ ├── contour_defects.jpg │ │ ├── contour_features.jpg │ │ ├── contour_hierarchy.jpg │ │ ├── contour_properties.jpg │ │ └── contour_starting.jpg │ │ └── py_table_of_contents_contours.rst ├── py_filtering │ ├── images │ │ ├── bilateral.jpg │ │ ├── blur.jpg │ │ ├── filter.jpg │ │ ├── gaussian.jpg │ │ └── median.jpg │ └── py_filtering.rst ├── py_geometric_transformations │ ├── images │ │ ├── affine.jpg │ │ ├── perspective.jpg │ │ ├── rotation.jpg │ │ └── translation.jpg │ └── py_geometric_transformations.rst ├── py_grabcut │ ├── images │ │ ├── grabcut.jpg │ │ ├── grabcut_mask.jpg │ │ ├── grabcut_output1.jpg │ │ └── grabcut_rect.jpg │ └── py_grabcut.rst ├── py_gradients │ ├── images │ │ ├── double_edge.jpg │ │ └── gradients.jpg │ └── py_gradients.rst ├── py_histograms │ ├── py_2d_histogram │ │ ├── images │ │ │ ├── 2dhist_matplotlib.jpg │ │ │ └── 2dhist_opencv.jpg │ │ └── py_2d_histogram.rst │ ├── py_histogram_backprojection │ │ ├── images │ │ │ └── backproject_opencv.jpg │ │ └── py_histogram_backprojection.rst │ ├── py_histogram_begins │ │ ├── images │ │ │ ├── histogram_masking.jpg │ │ │ ├── histogram_matplotlib.jpg │ │ │ ├── histogram_rgb_plot.jpg │ │ │ └── histogram_sample.jpg │ │ └── py_histogram_begins.rst │ ├── py_histogram_equalization │ │ ├── images │ │ │ ├── clahe_1.jpg │ │ │ ├── clahe_2.jpg │ │ │ ├── equalization_opencv.jpg │ │ │ ├── histeq_numpy1.jpg │ │ │ ├── histeq_numpy2.jpg │ │ │ └── histogram_equalization.png │ │ └── py_histogram_equalization.rst │ └── py_table_of_contents_histograms │ │ ├── images │ │ ├── histograms_1d.jpg │ │ ├── histograms_2d.jpg │ │ ├── histograms_bp.jpg │ │ └── histograms_equ.jpg │ │ └── py_table_of_contents_histograms.rst ├── py_houghcircles │ ├── images │ │ └── houghcircles2.jpg │ └── py_houghcircles.rst ├── py_houghlines │ ├── images │ │ ├── houghlines1.svg │ │ ├── houghlines2.jpg │ │ ├── houghlines3.jpg │ │ ├── houghlines4.png │ │ ├── houghlines5.jpg │ │ └── houghlinesdemo.gif │ └── py_houghlines.rst ├── py_morphological_ops │ ├── images │ │ ├── blackhat.png │ │ ├── closing.png │ │ ├── dilation.png │ │ ├── erosion.png │ │ ├── gradient.png │ │ ├── j.png │ │ ├── opening.png │ │ └── tophat.png │ └── py_morphological_ops.rst ├── py_pyramids │ ├── images │ │ ├── lap.jpg │ │ ├── messipyr.jpg │ │ ├── messiup.jpg │ │ └── orapple.jpg │ └── py_pyramids.rst ├── py_table_of_contents_imgproc │ ├── images │ │ ├── blurring.jpg │ │ ├── canny.jpg │ │ ├── colorspace.jpg │ │ ├── contours.jpg │ │ ├── geometric.jpg │ │ ├── grabcut.jpg │ │ ├── gradient.jpg │ │ ├── histogram.jpg │ │ ├── houghcircles.jpg │ │ ├── houghlines.jpg │ │ ├── morphology.jpg │ │ ├── pyramid.png │ │ ├── template.jpg │ │ ├── thresh.jpg │ │ ├── transforms.jpg │ │ └── watershed.jpg │ └── py_table_of_contents_imgproc.rst ├── py_template_matching │ ├── images │ │ ├── messi_face.jpg │ │ ├── res_mario.jpg │ │ ├── template_ccoeff_1.jpg │ │ ├── template_ccoeffn_2.jpg │ │ ├── template_ccorr_3.jpg │ │ ├── template_ccorrn_4.jpg │ │ ├── template_sqdiff_5.jpg │ │ └── template_sqdiffn_6.jpg │ └── py_template_matching.rst ├── py_thresholding │ ├── images │ │ ├── ada_threshold.jpg │ │ ├── otsu.jpg │ │ └── threshold.jpg │ └── py_thresholding.rst ├── py_transforms │ ├── py_fourier_transform │ │ ├── images │ │ │ ├── fft1.jpg │ │ │ ├── fft2.jpg │ │ │ ├── fft4.jpg │ │ │ └── fft5.jpg │ │ └── py_fourier_transform.rst │ └── py_table_of_contents_transforms │ │ ├── images │ │ └── transform_fourier.jpg │ │ └── py_table_of_contents_transforms.rst └── py_watershed │ ├── images │ ├── water_coins.jpg │ ├── water_dt.jpg │ ├── water_fgbg.jpg │ ├── water_marker.jpg │ ├── water_result.jpg │ └── water_thresh.jpg │ └── py_watershed.rst ├── py_ml ├── py_kmeans │ ├── images │ │ ├── kmeans_begin.jpg │ │ └── kmeans_demo.jpg │ ├── py_kmeans_index.rst │ ├── py_kmeans_opencv │ │ ├── images │ │ │ ├── oc_1d_clustered.png │ │ │ ├── oc_1d_testdata.png │ │ │ ├── oc_2d_clustered.jpg │ │ │ ├── oc_color_quantization.jpg │ │ │ └── oc_feature_representation.jpg │ │ └── py_kmeans_opencv.rst │ └── py_kmeans_understanding │ │ ├── images │ │ ├── final_clusters.jpg │ │ ├── initial_labelling.jpg │ │ ├── testdata.jpg │ │ ├── tshirt.jpg │ │ ├── tshirt_grouped.jpg │ │ └── update_centroid.jpg │ │ └── py_kmeans_understanding.rst ├── py_knn │ ├── images │ │ ├── knn_icon1.jpg │ │ └── knn_icon2.jpg │ ├── py_knn_index.rst │ ├── py_knn_opencv │ │ └── py_knn_opencv.rst │ └── py_knn_understanding │ │ ├── images │ │ ├── knn_simple.png │ │ └── knn_theory.png │ │ └── py_knn_understanding.rst ├── py_svm │ ├── images │ │ ├── svm_icon1.jpg │ │ └── svm_icon2.jpg │ ├── py_svm_basics │ │ ├── images │ │ │ ├── svm_basics1.png │ │ │ ├── svm_basics2.png │ │ │ └── svm_basics3.png │ │ └── py_svm_basics.rst │ ├── py_svm_index.rst │ └── py_svm_opencv │ │ ├── images │ │ └── deskew.jpg │ │ └── py_svm_opencv.rst └── py_table_of_contents_ml │ ├── images │ ├── kmeansicon.jpg │ ├── knnicon.png │ └── svmicon.png │ └── py_table_of_contents_ml.rst ├── py_objdetect ├── py_face_detection │ ├── images │ │ ├── face.jpg │ │ ├── haar.png │ │ └── haar_features.jpg │ └── py_face_detection.rst └── py_table_of_contents_objdetect │ ├── images │ └── face_icon.jpg │ └── py_table_of_contents_objdetect.rst ├── py_photo ├── py_inpainting │ ├── images │ │ ├── inpaint_basics.jpg │ │ └── inpaint_result.jpg │ └── py_inpainting.rst ├── py_non_local_means │ ├── images │ │ ├── nlm_multi.jpg │ │ ├── nlm_patch.jpg │ │ └── nlm_result1.jpg │ └── py_non_local_means.rst └── py_table_of_contents_photo │ ├── images │ ├── inpainticon.jpg │ └── nlm_icon.jpg │ └── py_table_of_contents_photo.rst ├── py_setup ├── py_intro │ └── py_intro.rst ├── py_setup_in_fedora │ └── py_setup_in_fedora.rst ├── py_setup_in_windows │ ├── images │ │ ├── Capture1.jpg │ │ ├── Capture2.png │ │ ├── Capture3.png │ │ ├── Capture5.png │ │ ├── Capture6.png │ │ ├── Capture7.png │ │ ├── Capture8.png │ │ └── Capture80.png │ └── py_setup_in_windows.rst └── py_table_of_contents_setup │ ├── images │ ├── fedora_logo.jpg │ ├── opencv_logo.jpg │ └── windows_logo.jpg │ └── py_table_of_contents_setup.rst ├── py_tutorials.rst └── py_video ├── py_bg_subtraction ├── images │ ├── resframe.jpg │ ├── resgmg.jpg │ ├── resmog.jpg │ └── resmog2.jpg └── py_bg_subtraction.rst ├── py_lucas_kanade ├── images │ ├── optical_flow_basic1.jpg │ ├── opticalfb.jpg │ └── opticalflow_lk.jpg └── py_lucas_kanade.rst ├── py_meanshift ├── images │ ├── camshift_face.gif │ ├── camshift_result.jpg │ ├── meanshift_basics.jpg │ ├── meanshift_face.gif │ └── meanshift_result.jpg └── py_meanshift.rst └── py_table_of_contents_video ├── images ├── background.jpg ├── camshift.jpg ├── lucas.jpg └── opticalflow.jpeg └── py_table_of_contents_video.rst /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | *.sln merge=union 7 | *.csproj merge=union 8 | *.vbproj merge=union 9 | *.fsproj merge=union 10 | *.dbproj merge=union 11 | 12 | # Standard to msysgit 13 | *.doc diff=astextplain 14 | *.DOC diff=astextplain 15 | *.docx diff=astextplain 16 | *.DOCX diff=astextplain 17 | *.dot diff=astextplain 18 | *.DOT diff=astextplain 19 | *.pdf diff=astextplain 20 | *.PDF diff=astextplain 21 | *.rtf diff=astextplain 22 | *.RTF diff=astextplain 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | build/ 3 | build/* 4 | *~ 5 | 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | OpenCV2-Python-Guide 2 | ==================== 3 | 4 | This repo contains tutorials on OpenCV-Python library using new cv2 interface 5 | 6 | **IMP - This tutorial is meant for OpenCV 3x version. Not OpenCV 2x** 7 | ======================================================================= 8 | 9 | **IMP - This tutorial is meant for OpenCV 3x version. Not OpenCV 2x** 10 | 11 | **IMP - This tutorial is meant for OpenCV 3x version. Not OpenCV 2x** 12 | 13 | Please try the examples with OpenCV 3x before sending any bug reports 14 | 15 | Data files 16 | ----------- 17 | 18 | The input data used in these tutorials are given in **Data** folder 19 | 20 | Online 21 | --------- 22 | 23 | * **For official tutorials, please visit : http://docs.opencv.org/trunk/doc/py_tutorials/py_tutorials.html** 24 | * https://opencv-python-tutroals.readthedocs.org/en/latest/index.html - This is only for checking. May contain lots of errors, please stick to the official tutorials. 25 | 26 | Offline 27 | --------- 28 | To build docs from source, 29 | * Install sphinx 30 | * Download/Clone this repo and navigate to the base folder 31 | * run command : `make html` , html docs will be available in **build/html/** folder 32 | -------------------------------------------------------------------------------- /data/butterfly.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/data/butterfly.jpg -------------------------------------------------------------------------------- /data/home.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/data/home.jpg -------------------------------------------------------------------------------- /data/left.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/data/left.jpg -------------------------------------------------------------------------------- /data/messi5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/data/messi5.jpg -------------------------------------------------------------------------------- /data/readme.txt: -------------------------------------------------------------------------------- 1 | This folder contains data files used in these tutorials. 2 | 3 | Not all files are available. I had to format my system and lost many files. Some are taken from internet and I have put them in this folder. Some files are my own, so no way to recover them. 4 | 5 | Some video files are also missing. 6 | 7 | Image 8 | ------- 9 | lena.jpg 10 | butterfly.jpg 11 | home.jpg 12 | messi5.jpg 13 | left.jpg 14 | right.jpg 15 | 16 | Feature Matching - https://github.com/Itseez/opencv/blob/master/samples/c/box.png 17 | - https://github.com/Itseez/opencv/blob/master/samples/c/box_in_scene.png 18 | 19 | Background subtraction - https://github.com/Itseez/opencv/blob/master/samples/gpu/768x576.avi 20 | -------------------------------------------------------------------------------- /data/right.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/data/right.jpg -------------------------------------------------------------------------------- /indexplan.ods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/indexplan.ods -------------------------------------------------------------------------------- /source/index.rst: -------------------------------------------------------------------------------- 1 | .. OpenCV-Python Tutorials documentation master file, created by 2 | sphinx-quickstart on Fri May 31 12:04:12 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to OpenCV-Python Tutorials's documentation! 7 | =================================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | py_tutorials/py_tutorials 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | 23 | -------------------------------------------------------------------------------- /source/opencv-logo-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/opencv-logo-white.png -------------------------------------------------------------------------------- /source/py_tutorials/images/MachineLearnings.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/MachineLearnings.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/calib3d_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/calib3d_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/core.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/core.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/featureicon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/featureicon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/gui.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/gui.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/imgproc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/imgproc.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/intro.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/intro.png -------------------------------------------------------------------------------- /source/py_tutorials/images/obj_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/obj_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/photoicon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/photoicon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/images/videoicon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/images/videoicon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_bindings/py_table_of_contents_bindings/images/inpainticon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_bindings/py_table_of_contents_bindings/images/inpainticon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_bindings/py_table_of_contents_bindings/images/nlm_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_bindings/py_table_of_contents_bindings/images/nlm_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_bindings/py_table_of_contents_bindings/py_table_of_contents_bindings.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Bindings: 2 | 3 | 4 | OpenCV-Python Bindings 5 | -------------------------------- 6 | 7 | Here, you will learn how OpenCV-Python bindings are generated. 8 | 9 | 10 | * :ref:`Bindings_Basics` 11 | 12 | .. tabularcolumns:: m{100pt} m{300pt} 13 | .. cssclass:: toctableopencv 14 | 15 | =========== ====================================================== 16 | |bind1| Learn how OpenCV-Python bindings are generated. 17 | 18 | =========== ====================================================== 19 | 20 | .. |bind1| image:: images/nlm_icon.jpg 21 | :height: 90pt 22 | :width: 90pt 23 | 24 | 25 | 26 | 27 | 28 | .. raw:: latex 29 | 30 | \pagebreak 31 | 32 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 33 | .. toctree:: 34 | :hidden: 35 | 36 | ../py_bindings_basics/py_bindings_basics 37 | -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_calibration/images/calib_pattern.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_calibration/images/calib_pattern.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_calibration/images/calib_radial.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_calibration/images/calib_radial.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_calibration/images/calib_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_calibration/images/calib_result.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_depthmap/images/disparity_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_depthmap/images/disparity_map.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_depthmap/images/stereo_depth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_depthmap/images/stereo_depth.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_depthmap/py_depthmap.rst: -------------------------------------------------------------------------------- 1 | .. _py_depthmap: 2 | 3 | 4 | Depth Map from Stereo Images 5 | ****************************** 6 | 7 | Goal 8 | ======= 9 | 10 | In this session, 11 | * We will learn to create depth map from stereo images. 12 | 13 | 14 | Basics 15 | =========== 16 | In last session, we saw basic concepts like epipolar constraints and other related terms. We also saw that if we have two images of same scene, we can get depth information from that in an intuitive way. Below is an image and some simple mathematical formulas which proves that intuition. (Image Courtesy : 17 | 18 | .. image:: images/stereo_depth.jpg 19 | :alt: Calculating depth 20 | :align: center 21 | 22 | The above diagram contains equivalent triangles. Writing their equivalent equations will yield us following result: 23 | 24 | .. math:: 25 | 26 | disparity = x - x' = \frac{Bf}{Z} 27 | 28 | :math:`x` and :math:`x'` are the distance between points in image plane corresponding to the scene point 3D and their camera center. :math:`B` is the distance between two cameras (which we know) and :math:`f` is the focal length of camera (already known). So in short, above equation says that the depth of a point in a scene is inversely proportional to the difference in distance of corresponding image points and their camera centers. So with this information, we can derive the depth of all pixels in an image. 29 | 30 | So it finds corresponding matches between two images. We have already seen how epiline constraint make this operation faster and accurate. Once it finds matches, it finds the disparity. Let's see how we can do it with OpenCV. 31 | 32 | 33 | Code 34 | ======== 35 | 36 | Below code snippet shows a simple procedure to create disparity map. 37 | :: 38 | 39 | import numpy as np 40 | import cv2 41 | from matplotlib import pyplot as plt 42 | 43 | imgL = cv2.imread('tsukuba_l.png',0) 44 | imgR = cv2.imread('tsukuba_r.png',0) 45 | 46 | stereo = cv2.createStereoBM(numDisparities=16, blockSize=15) 47 | disparity = stereo.compute(imgL,imgR) 48 | plt.imshow(disparity,'gray') 49 | plt.show() 50 | 51 | Below image contains the original image (left) and its disparity map (right). As you can see, result is contaminated with high degree of noise. By adjusting the values of numDisparities and blockSize, you can get better results. 52 | 53 | .. image:: images/disparity_map.jpg 54 | :alt: Disparity Map 55 | :align: center 56 | 57 | .. note:: More details to be added 58 | 59 | 60 | Additional Resources 61 | ============================= 62 | 63 | 64 | Exercises 65 | ============ 66 | 67 | 1. OpenCV samples contain an example of generating disparity map and its 3D reconstruction. Check ``stereo_match.py`` in OpenCV-Python samples. 68 | -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_epipolar_geometry/images/epipolar.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_epipolar_geometry/images/epipolar.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_epipolar_geometry/images/epiresult.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_epipolar_geometry/images/epiresult.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_epipolar_geometry/images/essential_matrix.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_epipolar_geometry/images/essential_matrix.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_pose/images/pose_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_pose/images/pose_1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_pose/images/pose_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_pose/images/pose_2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/calibration_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/calibration_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/depthmap_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/depthmap_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/epipolar_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/epipolar_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/pose_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/images/pose_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_calib3d/py_table_of_contents_calib3d/py_table_of_contents_calib3d.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Calib: 2 | 3 | 4 | Camera Calibration and 3D Reconstruction 5 | ---------------------------------------------- 6 | 7 | * :ref:`calibration` 8 | 9 | .. tabularcolumns:: m{100pt} m{300pt} 10 | .. cssclass:: toctableopencv 11 | 12 | =========== ====================================================== 13 | |calib_1| Let's find how good is our camera. Is there any distortion in images taken with it? If so how to correct it? 14 | 15 | =========== ====================================================== 16 | 17 | .. |calib_1| image:: images/calibration_icon.jpg 18 | :height: 90pt 19 | :width: 90pt 20 | 21 | 22 | * :ref:`pose_estimation` 23 | 24 | .. tabularcolumns:: m{100pt} m{300pt} 25 | .. cssclass:: toctableopencv 26 | 27 | =========== ====================================================== 28 | |calib_2| This is a small section which will help you to create some cool 3D effects with calib module. 29 | 30 | =========== ====================================================== 31 | 32 | .. |calib_2| image:: images/pose_icon.jpg 33 | :height: 90pt 34 | :width: 90pt 35 | 36 | 37 | * :ref:`epipolar_geometry` 38 | 39 | .. tabularcolumns:: m{100pt} m{300pt} 40 | .. cssclass:: toctableopencv 41 | 42 | =========== ====================================================== 43 | |calib_3| Let's understand epipolar geometry and epipolar constraint. 44 | 45 | =========== ====================================================== 46 | 47 | .. |calib_3| image:: images/epipolar_icon.jpg 48 | :height: 90pt 49 | :width: 90pt 50 | 51 | 52 | * :ref:`py_depthmap` 53 | 54 | .. tabularcolumns:: m{100pt} m{300pt} 55 | .. cssclass:: toctableopencv 56 | 57 | =========== ====================================================== 58 | |calib_4| Extract depth information from 2D images. 59 | 60 | =========== ====================================================== 61 | 62 | .. |calib_4| image:: images/depthmap_icon.jpg 63 | :height: 90pt 64 | :width: 90pt 65 | 66 | 67 | 68 | .. raw:: latex 69 | 70 | \pagebreak 71 | 72 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 73 | .. toctree:: 74 | :hidden: 75 | 76 | ../py_calibration/py_calibration 77 | ../py_pose/py_pose 78 | ../py_epipolar_geometry/py_epipolar_geometry 79 | ../py_depthmap/py_depthmap 80 | 81 | -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_basic_ops/images/border.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_basic_ops/images/border.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_basic_ops/images/roi.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_basic_ops/images/roi.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_image_arithmetics/images/blending.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_image_arithmetics/images/blending.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_image_arithmetics/images/overlay.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_image_arithmetics/images/overlay.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.rst: -------------------------------------------------------------------------------- 1 | .. _Image_Arithmetics: 2 | 3 | Arithmetic Operations on Images 4 | ********************************* 5 | 6 | Goal 7 | ===== 8 | 9 | * Learn several arithmetic operations on images like addition, subtraction, bitwise operations etc. 10 | * You will learn these functions : **cv2.add()**, **cv2.addWeighted()** etc. 11 | 12 | Image Addition 13 | ================ 14 | 15 | You can add two images by OpenCV function, ``cv2.add()`` or simply by numpy operation, ``res = img1 + img2``. Both images should be of same depth and type, or second image can just be a scalar value. 16 | 17 | .. note:: There is a difference between OpenCV addition and Numpy addition. OpenCV addition is a saturated operation while Numpy addition is a modulo operation. 18 | 19 | For example, consider below sample: 20 | :: 21 | 22 | >>> x = np.uint8([250]) 23 | >>> y = np.uint8([10]) 24 | 25 | >>> print cv2.add(x,y) # 250+10 = 260 => 255 26 | [[255]] 27 | 28 | >>> print x+y # 250+10 = 260 % 256 = 4 29 | [4] 30 | 31 | It will be more visible when you add two images. OpenCV function will provide a better result. So always better stick to OpenCV functions. 32 | 33 | Image Blending 34 | ================= 35 | 36 | This is also image addition, but different weights are given to images so that it gives a feeling of blending or transparency. Images are added as per the equation below: 37 | 38 | .. math:: 39 | 40 | g(x) = (1 - \alpha)f_{0}(x) + \alpha f_{1}(x) 41 | 42 | By varying :math:`\alpha` from :math:`0 \rightarrow 1`, you can perform a cool transition between one image to another. 43 | 44 | Here I took two images to blend them together. First image is given a weight of 0.7 and second image is given 0.3. ``cv2.addWeighted()`` applies following equation on the image. 45 | 46 | .. math:: 47 | 48 | dst = \alpha \cdot img1 + \beta \cdot img2 + \gamma 49 | 50 | Here :math:`\gamma` is taken as zero. 51 | :: 52 | 53 | img1 = cv2.imread('ml.png') 54 | img2 = cv2.imread('opencv_logo.jpg') 55 | 56 | dst = cv2.addWeighted(img1,0.7,img2,0.3,0) 57 | 58 | cv2.imshow('dst',dst) 59 | cv2.waitKey(0) 60 | cv2.destroyAllWindows() 61 | 62 | Check the result below: 63 | 64 | .. image:: images/blending.jpg 65 | :alt: Image Blending 66 | :align: center 67 | 68 | Bitwise Operations 69 | =================== 70 | 71 | This includes bitwise AND, OR, NOT and XOR operations. They will be highly useful while extracting any part of the image (as we will see in coming chapters), defining and working with non-rectangular ROI etc. Below we will see an example on how to change a particular region of an image. 72 | 73 | I want to put OpenCV logo above an image. If I add two images, it will change color. If I blend it, I get an transparent effect. But I want it to be opaque. If it was a rectangular region, I could use ROI as we did in last chapter. But OpenCV logo is a not a rectangular shape. So you can do it with bitwise operations as below: 74 | :: 75 | 76 | # Load two images 77 | img1 = cv2.imread('messi5.jpg') 78 | img2 = cv2.imread('opencv_logo.png') 79 | 80 | # I want to put logo on top-left corner, So I create a ROI 81 | rows,cols,channels = img2.shape 82 | roi = img1[0:rows, 0:cols ] 83 | 84 | # Now create a mask of logo and create its inverse mask also 85 | img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY) 86 | ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) 87 | mask_inv = cv2.bitwise_not(mask) 88 | 89 | # Now black-out the area of logo in ROI 90 | img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 91 | 92 | # Take only region of logo from logo image. 93 | img2_fg = cv2.bitwise_and(img2,img2,mask = mask) 94 | 95 | # Put logo in ROI and modify the main image 96 | dst = cv2.add(img1_bg,img2_fg) 97 | img1[0:rows, 0:cols ] = dst 98 | 99 | cv2.imshow('res',img1) 100 | cv2.waitKey(0) 101 | cv2.destroyAllWindows() 102 | 103 | See the result below. Left image shows the mask we created. Right image shows the final result. For more understanding, display all the intermediate images in the above code, especially ``img1_bg`` and ``img2_fg``. 104 | 105 | .. image:: images/overlay.jpg 106 | :alt: Otsu's Thresholding 107 | :align: center 108 | 109 | 110 | Additional Resources 111 | ====================== 112 | 113 | Exercises 114 | ============ 115 | #. Create a slide show of images in a folder with smooth transition between images using ``cv2.addWeighted`` function 116 | -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_maths_tools/py_maths_tools.rst: -------------------------------------------------------------------------------- 1 | .. _Mathematical_Tools: 2 | 3 | Mathematical Tools in OpenCV 4 | ******************************** 5 | -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_table_of_contents_core/images/image_arithmetic.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_table_of_contents_core/images/image_arithmetic.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_table_of_contents_core/images/maths_tools.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_table_of_contents_core/images/maths_tools.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_table_of_contents_core/images/pixel_ops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_table_of_contents_core/images/pixel_ops.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_table_of_contents_core/images/speed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_core/py_table_of_contents_core/images/speed.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_core/py_table_of_contents_core/py_table_of_contents_core.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Core: 2 | 3 | Core Operations 4 | ----------------------------------------------------------- 5 | 6 | 7 | * :ref:`Basic_Ops` 8 | 9 | .. tabularcolumns:: m{100pt} m{300pt} 10 | .. cssclass:: toctableopencv 11 | 12 | =========== ====================================================== 13 | |core_1| Learn to read and edit pixel values, working with image ROI and other basic operations. 14 | 15 | =========== ====================================================== 16 | 17 | .. |core_1| image:: images/pixel_ops.jpg 18 | :height: 90pt 19 | :width: 90pt 20 | 21 | * :ref:`Image_Arithmetics` 22 | 23 | .. tabularcolumns:: m{100pt} m{300pt} 24 | .. cssclass:: toctableopencv 25 | 26 | =========== ====================================================== 27 | |core_2| Perform arithmetic operations on images 28 | 29 | =========== ====================================================== 30 | 31 | .. |core_2| image:: images/image_arithmetic.jpg 32 | :height: 90pt 33 | :width: 90pt 34 | 35 | * :ref:`Optimization_Techniques` 36 | 37 | .. tabularcolumns:: m{100pt} m{300pt} 38 | .. cssclass:: toctableopencv 39 | 40 | =========== ====================================================== 41 | |core_4| Getting a solution is important. But getting it in the fastest way is more important. Learn to check the speed of your code, optimize the code etc. 42 | 43 | =========== ====================================================== 44 | 45 | .. |core_4| image:: images/speed.jpg 46 | :height: 90pt 47 | :width: 90pt 48 | 49 | * :ref:`Mathematical_Tools` 50 | 51 | .. tabularcolumns:: m{100pt} m{300pt} 52 | .. cssclass:: toctableopencv 53 | 54 | =========== ====================================================== 55 | |core_5| Learn some of the mathematical tools provided by OpenCV like PCA, SVD etc. 56 | 57 | =========== ====================================================== 58 | 59 | .. |core_5| image:: images/maths_tools.jpg 60 | :height: 90pt 61 | :width: 90pt 62 | 63 | 64 | .. raw:: latex 65 | 66 | \pagebreak 67 | 68 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 69 | .. toctree:: 70 | :hidden: 71 | 72 | ../py_basic_ops/py_basic_ops 73 | ../py_image_arithmetics/py_image_arithmetics 74 | ../py_optimization/py_optimization 75 | ../py_maths_tools/py_maths_tools 76 | -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_brief/py_brief.rst: -------------------------------------------------------------------------------- 1 | .. _BRIEF: 2 | 3 | 4 | BRIEF (Binary Robust Independent Elementary Features) 5 | *********************************************************** 6 | 7 | Goal 8 | ======= 9 | In this chapter 10 | * We will see the basics of BRIEF algorithm 11 | 12 | 13 | Theory 14 | ============= 15 | 16 | We know SIFT uses 128-dim vector for descriptors. Since it is using floating point numbers, it takes basically 512 bytes. Similarly SURF also takes minimum of 256 bytes (for 64-dim). Creating such a vector for thousands of features takes a lot of memory which are not feasible for resouce-constraint applications especially for embedded systems. Larger the memory, longer the time it takes for matching. 17 | 18 | But all these dimensions may not be needed for actual matching. We can compress it using several methods like PCA, LDA etc. Even other methods like hashing using LSH (Locality Sensitive Hashing) is used to convert these SIFT descriptors in floating point numbers to binary strings. These binary strings are used to match features using Hamming distance. This provides better speed-up because finding hamming distance is just applying XOR and bit count, which are very fast in modern CPUs with SSE instructions. But here, we need to find the descriptors first, then only we can apply hashing, which doesn't solve our initial problem on memory. 19 | 20 | BRIEF comes into picture at this moment. It provides a shortcut to find the binary strings directly without finding descriptors. It takes smoothened image patch and selects a set of :math:`n_d` (x,y) location pairs in an unique way (explained in paper). Then some pixel intensity comparisons are done on these location pairs. For eg, let first location pairs be :math:`p` and :math:`q`. If :math:`I(p) < I(q)`, then its result is 1, else it is 0. This is applied for all the :math:`n_d` location pairs to get a :math:`n_d`-dimensional bitstring. 21 | 22 | This :math:`n_d` can be 128, 256 or 512. OpenCV supports all of these, but by default, it would be 256 (OpenCV represents it in bytes. So the values will be 16, 32 and 64). So once you get this, you can use Hamming Distance to match these descriptors. 23 | 24 | One important point is that BRIEF is a feature descriptor, it doesn't provide any method to find the features. So you will have to use any other feature detectors like SIFT, SURF etc. The paper recommends to use CenSurE which is a fast detector and BRIEF works even slightly better for CenSurE points than for SURF points. 25 | 26 | In short, BRIEF is a faster method feature descriptor calculation and matching. It also provides high recognition rate unless there is large in-plane rotation. 27 | 28 | BRIEF in OpenCV 29 | ===================== 30 | 31 | Below code shows the computation of BRIEF descriptors with the help of CenSurE detector. (CenSurE detector is called STAR detector in OpenCV) 32 | :: 33 | 34 | import numpy as np 35 | import cv2 36 | from matplotlib import pyplot as plt 37 | 38 | img = cv2.imread('simple.jpg',0) 39 | 40 | # Initiate STAR detector 41 | star = cv2.FeatureDetector_create("STAR") 42 | 43 | # Initiate BRIEF extractor 44 | brief = cv2.DescriptorExtractor_create("BRIEF") 45 | 46 | # find the keypoints with STAR 47 | kp = star.detect(img,None) 48 | 49 | # compute the descriptors with BRIEF 50 | kp, des = brief.compute(img, kp) 51 | 52 | print brief.getInt('bytes') 53 | print des.shape 54 | 55 | The function ``brief.getInt('bytes')`` gives the :math:`n_d` size used in bytes. By default it is 32. Next one is matching, which will be done in another chapter. 56 | 57 | 58 | Additional Resources 59 | ========================== 60 | 61 | #. Michael Calonder, Vincent Lepetit, Christoph Strecha, and Pascal Fua, "BRIEF: Binary Robust Independent Elementary Features", 11th European Conference on Computer Vision (ECCV), Heraklion, Crete. LNCS Springer, September 2010. 62 | 63 | #. LSH (Locality Sensitive Hasing) at wikipedia. 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_fast/images/fast_eqns.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_fast/images/fast_eqns.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_fast/images/fast_kp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_fast/images/fast_kp.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_fast/images/fast_speedtest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_fast/images/fast_speedtest.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_feature_homography/images/homography_findobj.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_feature_homography/images/homography_findobj.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.rst: -------------------------------------------------------------------------------- 1 | .. _feature_homography: 2 | 3 | 4 | Feature Matching + Homography to find Objects 5 | *********************************************** 6 | 7 | Goal 8 | ====== 9 | In this chapter, 10 | * We will mix up the feature matching and findHomography from calib3d module to find known objects in a complex image. 11 | 12 | 13 | Basics 14 | ========= 15 | 16 | So what we did in last session? We used a queryImage, found some feature points in it, we took another trainImage, found the features in that image too and we found the best matches among them. In short, we found locations of some parts of an object in another cluttered image. This information is sufficient to find the object exactly on the trainImage. 17 | 18 | For that, we can use a function from calib3d module, ie **cv2.findHomography()**. If we pass the set of points from both the images, it will find the perpective transformation of that object. Then we can use **cv2.perspectiveTransform()** to find the object. It needs atleast four correct points to find the transformation. 19 | 20 | We have seen that there can be some possible errors while matching which may affect the result. To solve this problem, algorithm uses RANSAC or LEAST_MEDIAN (which can be decided by the flags). So good matches which provide correct estimation are called inliers and remaining are called outliers. **cv2.findHomography()** returns a mask which specifies the inlier and outlier points. 21 | 22 | So let's do it !!! 23 | 24 | 25 | Code 26 | ========= 27 | 28 | First, as usual, let's find SIFT features in images and apply the ratio test to find the best matches. 29 | :: 30 | 31 | import numpy as np 32 | import cv2 33 | from matplotlib import pyplot as plt 34 | 35 | MIN_MATCH_COUNT = 10 36 | 37 | img1 = cv2.imread('box.png',0) # queryImage 38 | img2 = cv2.imread('box_in_scene.png',0) # trainImage 39 | 40 | # Initiate SIFT detector 41 | sift = cv2.SIFT() 42 | 43 | # find the keypoints and descriptors with SIFT 44 | kp1, des1 = sift.detectAndCompute(img1,None) 45 | kp2, des2 = sift.detectAndCompute(img2,None) 46 | 47 | FLANN_INDEX_KDTREE = 0 48 | index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) 49 | search_params = dict(checks = 50) 50 | 51 | flann = cv2.FlannBasedMatcher(index_params, search_params) 52 | 53 | matches = flann.knnMatch(des1,des2,k=2) 54 | 55 | # store all the good matches as per Lowe's ratio test. 56 | good = [] 57 | for m,n in matches: 58 | if m.distance < 0.7*n.distance: 59 | good.append(m) 60 | 61 | 62 | Now we set a condition that atleast 10 matches (defined by MIN_MATCH_COUNT) are to be there to find the object. Otherwise simply show a message saying not enough matches are present. 63 | 64 | If enough matches are found, we extract the locations of matched keypoints in both the images. They are passed to find the perpective transformation. Once we get this 3x3 transformation matrix, we use it to transform the corners of queryImage to corresponding points in trainImage. Then we draw it. 65 | :: 66 | 67 | if len(good)>MIN_MATCH_COUNT: 68 | src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) 69 | dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) 70 | 71 | M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) 72 | matchesMask = mask.ravel().tolist() 73 | 74 | h,w = img1.shape 75 | pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) 76 | dst = cv2.perspectiveTransform(pts,M) 77 | 78 | img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) 79 | 80 | else: 81 | print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) 82 | matchesMask = None 83 | 84 | 85 | Finally we draw our inliers (if successfully found the object) or matching keypoints (if failed). 86 | :: 87 | 88 | draw_params = dict(matchColor = (0,255,0), # draw matches in green color 89 | singlePointColor = None, 90 | matchesMask = matchesMask, # draw only inliers 91 | flags = 2) 92 | 93 | img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) 94 | 95 | plt.imshow(img3, 'gray'),plt.show() 96 | 97 | 98 | See the result below. Object is marked in white color in cluttered image: 99 | 100 | .. image:: images/homography_findobj.jpg 101 | :alt: Finding object with feature homography 102 | :align: center 103 | 104 | 105 | Additional Resources 106 | ============================ 107 | 108 | 109 | Exercises 110 | ================== 111 | 112 | -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_features_harris/images/harris_region.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_features_harris/images/harris_region.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_features_harris/images/harris_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_features_harris/images/harris_result.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_features_harris/images/subpixel3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_features_harris/images/subpixel3.png -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_features_meaning/images/feature_building.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_features_meaning/images/feature_building.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_features_meaning/images/feature_simple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_features_meaning/images/feature_simple.png -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_features_meaning/py_features_meaning.rst: -------------------------------------------------------------------------------- 1 | .. _Features_Meaning: 2 | 3 | Understanding Features 4 | ************************ 5 | 6 | Goal 7 | ===== 8 | 9 | In this chapter, we will just try to understand what are features, why are they important, why corners are important etc. 10 | 11 | Explanation 12 | ============== 13 | 14 | Most of you will have played the jigsaw puzzle games. You get a lot of small pieces of a images, where you need to assemble them correctly to form a big real image. **The question is, how you do it?** What about the projecting the same theory to a computer program so that computer can play jigsaw puzzles? If the computer can play jigsaw puzzles, why can't we give a lot of real-life images of a good natural scenery to computer and tell it to stitch all those images to a big single image? If the computer can stitch several natural images to one, what about giving a lot of pictures of a building or any structure and tell computer to create a 3D model out of it? 15 | 16 | Well, the questions and imaginations continue. But it all depends on the most basic question? How do you play jigsaw puzzles? How do you arrange lots of scrambled image pieces into a big single image? How can you stitch a lot of natural images to a single image? 17 | 18 | The answer is, we are looking for specific patterns or specific features which are unique, which can be easily tracked, which can be easily compared. If we go for a definition of such a feature, we may find it difficult to express it in words, but we know what are they. If some one asks you to point out one good feature which can be compared across several images, you can point out one. That is why, even small children can simply play these games. We search for these features in an image, we find them, we find the same features in other images, we align them. That's it. (In jigsaw puzzle, we look more into continuity of different images). All these abilities are present in us inherently. 19 | 20 | So our one basic question expands to more in number, but becomes more specific. **What are these features?**. *(The answer should be understandable to a computer also.)* 21 | 22 | Well, it is difficult to say how humans find these features. It is already programmed in our brain. But if we look deep into some pictures and search for different patterns, we will find something interesting. For example, take below image: 23 | 24 | .. image:: images/feature_building.jpg 25 | :alt: Understanding features 26 | :align: center 27 | 28 | Image is very simple. At the top of image, six small image patches are given. Question for you is to find the exact location of these patches in the original image. How many correct results you can find ? 29 | 30 | A and B are flat surfaces, and they are spread in a lot of area. It is difficult to find the exact location of these patches. 31 | 32 | C and D are much more simpler. They are edges of the building. You can find an approximate location, but exact location is still difficult. It is because, along the edge, it is same everywhere. Normal to the edge, it is different. So edge is much more better feature compared to flat area, but not good enough (It is good in jigsaw puzzle for comparing continuity of edges). 33 | 34 | Finally, E and F are some corners of the building. And they can be easily found out. Because at corners, wherever you move this patch, it will look different. So they can be considered as a good feature. So now we move into more simpler (and widely used image) for better understanding. 35 | 36 | .. image:: images/feature_simple.png 37 | :alt: Features 38 | :align: center 39 | 40 | Just like above, blue patch is flat area and difficult to find and track. Wherever you move the blue patch, it looks the same. For black patch, it is an edge. If you move it in vertical direction (i.e. along the gradient) it changes. Put along the edge (parallel to edge), it looks the same. And for red patch, it is a corner. Wherever you move the patch, it looks different, means it is unique. So basically, corners are considered to be good features in an image. (Not just corners, in some cases blobs are considered good features). 41 | 42 | So now we answered our question, "what are these features?". But next question arises. How do we find them? Or how do we find the corners?. That also we answered in an intuitive way, i.e., look for the regions in images which have maximum variation when moved (by a small amount) in all regions around it. This would be projected into computer language in coming chapters. So finding these image features is called **Feature Detection**. 43 | 44 | So we found the features in image (Assume you did it). Once you found it, you should find the same in the other images. What we do? We take a region around the feature, we explain it in our own words, like "upper part is blue sky, lower part is building region, on that building there are some glasses etc" and you search for the same area in other images. Basically, you are describing the feature. Similar way, computer also should describe the region around the feature so that it can find it in other images. So called description is called **Feature Description**. Once you have the features and its description, you can find same features in all images and align them, stitch them or do whatever you want. 45 | 46 | So in this module, we are looking to different algorithms in OpenCV to find features, describe them, match them etc. 47 | 48 | Additional Resources 49 | ======================= 50 | 51 | Exercises 52 | =========== 53 | -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_matcher/images/matcher_flann.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_matcher/images/matcher_flann.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_matcher/images/matcher_result1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_matcher/images/matcher_result1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_matcher/images/matcher_result2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_matcher/images/matcher_result2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_orb/images/orb_kp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_orb/images/orb_kp.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_orb/py_orb.rst: -------------------------------------------------------------------------------- 1 | .. _ORB: 2 | 3 | ORB (Oriented FAST and Rotated BRIEF) 4 | *************************************** 5 | 6 | Goal 7 | ====== 8 | 9 | In this chapter, 10 | * We will see the basics of ORB 11 | 12 | 13 | Theory 14 | ========== 15 | 16 | As an OpenCV enthusiast, the most important thing about the ORB is that it came from "OpenCV Labs". This algorithm was brought up by Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary R. Bradski in their paper **ORB: An efficient alternative to SIFT or SURF** in 2011. As the title says, it is a good alternative to SIFT and SURF in computation cost, matching performance and mainly the patents. Yes, SIFT and SURF are patented and you are supposed to pay them for its use. But ORB is not !!! 17 | 18 | ORB is basically a fusion of FAST keypoint detector and BRIEF descriptor with many modifications to enhance the performance. First it use FAST to find keypoints, then apply Harris corner measure to find top N points among them. It also use pyramid to produce multiscale-features. But one problem is that, FAST doesn't compute the orientation. So what about rotation invariance? Authors came up with following modification. 19 | 20 | It computes the intensity weighted centroid of the patch with located corner at center. The direction of the vector from this corner point to centroid gives the orientation. To improve the rotation invariance, moments are computed with x and y which should be in a circular region of radius :math:`r`, where :math:`r` is the size of the patch. 21 | 22 | Now for descriptors, ORB use BRIEF descriptors. But we have already seen that BRIEF performs poorly with rotation. So what ORB does is to "steer" BRIEF according to the orientation of keypoints. For any feature set of :math:`n` binary tests at location 23 | :math:`(x_i, y_i)`, define a :math:`2 \times n` matrix, :math:`S` which contains the coordinates of these pixels. Then using the orientation of patch, :math:`\theta`, its rotation matrix is found and rotates the :math:`S` to get steered(rotated) version :math:`S_\theta`. 24 | 25 | ORB discretize the angle to increments of :math:`2 \pi /30` (12 degrees), and construct a lookup table of precomputed BRIEF patterns. As long as the keypoint orientation :math:`\theta` is consistent across views, the correct set of points :math:`S_\theta` will be used to compute its descriptor. 26 | 27 | BRIEF has an important property that each bit feature has a large variance and a mean near 0.5. But once it is oriented along keypoint direction, it loses this property and become more distributed. High variance makes a feature more discriminative, since it responds differentially to inputs. Another desirable property is to have the tests uncorrelated, since then each test will contribute to the result. To resolve all these, ORB runs a greedy search among all possible binary tests to find the ones that have both high variance and means close to 0.5, as well as being uncorrelated. The result is called **rBRIEF**. 28 | 29 | For descriptor matching, multi-probe LSH which improves on the traditional LSH, is used. The paper says ORB is much faster than SURF and SIFT and ORB descriptor works better than SURF. ORB is a good choice in low-power devices for panorama stitching etc. 30 | 31 | 32 | ORB in OpenCV 33 | ================ 34 | 35 | As usual, we have to create an ORB object with the function, **cv2.ORB()** or using feature2d common interface. It has a number of optional parameters. Most useful ones are ``nFeatures`` which denotes maximum number of features to be retained (by default 500), ``scoreType`` which denotes whether Harris score or FAST score to rank the features (by default, Harris score) etc. Another parameter, ``WTA_K`` decides number of points that produce each element of the oriented BRIEF descriptor. By default it is two, ie selects two points at a time. In that case, for matching, ``NORM_HAMMING`` distance is used. If WTA_K is 3 or 4, which takes 3 or 4 points to produce BRIEF descriptor, then matching distance is defined by ``NORM_HAMMING2``. 36 | 37 | Below is a simple code which shows the use of ORB. 38 | :: 39 | 40 | import numpy as np 41 | import cv2 42 | from matplotlib import pyplot as plt 43 | 44 | img = cv2.imread('simple.jpg',0) 45 | 46 | # Initiate STAR detector 47 | orb = cv2.ORB() 48 | 49 | # find the keypoints with ORB 50 | kp = orb.detect(img,None) 51 | 52 | # compute the descriptors with ORB 53 | kp, des = orb.compute(img, kp) 54 | 55 | # draw only keypoints location,not size and orientation 56 | img2 = cv2.drawKeypoints(img,kp,color=(0,255,0), flags=0) 57 | plt.imshow(img2),plt.show() 58 | 59 | See the result below: 60 | 61 | .. image:: images/orb_kp.jpg 62 | :alt: ORB Keypoints 63 | :align: center 64 | 65 | 66 | ORB feature matching, we will do in another chapter. 67 | 68 | Additional Resources 69 | ========================== 70 | 71 | #. Ethan Rublee, Vincent Rabaud, Kurt Konolige, Gary R. Bradski: ORB: An efficient alternative to SIFT or SURF. ICCV 2011: 2564-2571. 72 | 73 | 74 | Exercises 75 | ============== 76 | -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_shi_tomasi/images/shitomasi_block1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_shi_tomasi/images/shitomasi_block1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_shi_tomasi/images/shitomasi_space.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_shi_tomasi/images/shitomasi_space.png -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.rst: -------------------------------------------------------------------------------- 1 | .. _shi_tomasi: 2 | 3 | Shi-Tomasi Corner Detector & Good Features to Track 4 | ******************************************************* 5 | 6 | Goal 7 | ======= 8 | 9 | In this chapter, 10 | 11 | * We will learn about the another corner detector: Shi-Tomasi Corner Detector 12 | * We will see the function: **cv2.goodFeaturesToTrack()** 13 | 14 | Theory 15 | ========= 16 | 17 | In last chapter, we saw Harris Corner Detector. Later in 1994, J. Shi and C. Tomasi made a small modification to it in their paper **Good Features to Track** which shows better results compared to Harris Corner Detector. The scoring function in Harris Corner Detector was given by: 18 | 19 | .. math:: 20 | 21 | R = \lambda_1 \lambda_2 - k(\lambda_1+\lambda_2)^2 22 | 23 | Instead of this, Shi-Tomasi proposed: 24 | 25 | .. math:: 26 | 27 | R = min(\lambda_1, \lambda_2) 28 | 29 | If it is a greater than a threshold value, it is considered as a corner. If we plot it in :math:`\lambda_1 - \lambda_2` space as we did in Harris Corner Detector, we get an image as below: 30 | 31 | .. image:: images/shitomasi_space.png 32 | :alt: Shi-Tomasi Corner Space 33 | :align: center 34 | 35 | From the figure, you can see that only when :math:`\lambda_1` and :math:`\lambda_2` are above a minimum value, :math:`\lambda_{min}`, it is conidered as a corner(green region). 36 | 37 | Code 38 | ======= 39 | 40 | OpenCV has a function, **cv2.goodFeaturesToTrack()**. It finds N strongest corners in the image by Shi-Tomasi method (or Harris Corner Detection, if you specify it). As usual, image should be a grayscale image. Then you specify number of corners you want to find. Then you specify the quality level, which is a value between 0-1, which denotes the minimum quality of corner below which everyone is rejected. Then we provide the minimum euclidean distance between corners detected. 41 | 42 | With all these informations, the function finds corners in the image. All corners below quality level are rejected. Then it sorts the remaining corners based on quality in the descending order. Then function takes first strongest corner, throws away all the nearby corners in the range of minimum distance and returns N strongest corners. 43 | 44 | In below example, we will try to find 25 best corners: 45 | :: 46 | 47 | import numpy as np 48 | import cv2 49 | from matplotlib import pyplot as plt 50 | 51 | img = cv2.imread('simple.jpg') 52 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 53 | 54 | corners = cv2.goodFeaturesToTrack(gray,25,0.01,10) 55 | corners = np.int0(corners) 56 | 57 | for i in corners: 58 | x,y = i.ravel() 59 | cv2.circle(img,(x,y),3,255,-1) 60 | 61 | plt.imshow(img),plt.show() 62 | 63 | See the result below: 64 | 65 | .. image:: images/shitomasi_block1.jpg 66 | :alt: Shi-Tomasi Corners 67 | :align: center 68 | 69 | 70 | This function is more appropriate for tracking. We will see that when its time comes. 71 | 72 | Additional Resources 73 | ====================== 74 | 75 | 76 | Exercises 77 | ============ 78 | 79 | 80 | -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_sift_intro/images/sift_dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_sift_intro/images/sift_dog.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_sift_intro/images/sift_keypoints.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_sift_intro/images/sift_keypoints.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_sift_intro/images/sift_local_extrema.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_sift_intro/images/sift_local_extrema.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_sift_intro/images/sift_scale_invariant.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_sift_intro/images/sift_scale_invariant.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_surf_intro/images/surf_boxfilter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_surf_intro/images/surf_boxfilter.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_surf_intro/images/surf_kp1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_surf_intro/images/surf_kp1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_surf_intro/images/surf_kp2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_surf_intro/images/surf_kp2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_surf_intro/images/surf_matching.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_surf_intro/images/surf_matching.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_surf_intro/images/surf_orientation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_surf_intro/images/surf_orientation.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/brief.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/brief.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/fast_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/fast_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/features_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/features_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/harris_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/harris_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/homography_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/homography_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/matching.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/matching.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/orb.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/orb.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/shi_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/shi_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/sift_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/sift_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/surf_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_feature2d/py_table_of_contents_feature2d/images/surf_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_drawing_functions/images/drawing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_drawing_functions/images/drawing.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.rst: -------------------------------------------------------------------------------- 1 | .. _Drawing_Functions: 2 | 3 | Drawing Functions in OpenCV 4 | ****************************** 5 | 6 | Goal 7 | ===== 8 | 9 | .. container:: enumeratevisibleitemswithsquare 10 | 11 | * Learn to draw different geometric shapes with OpenCV 12 | * You will learn these functions : **cv2.line()**, **cv2.circle()** , **cv2.rectangle()**, **cv2.ellipse()**, **cv2.putText()** etc. 13 | 14 | Code 15 | ===== 16 | 17 | In all the above functions, you will see some common arguments as given below: 18 | 19 | * img : The image where you want to draw the shapes 20 | * color : Color of the shape. for BGR, pass it as a tuple, eg: ``(255,0,0)`` for blue. For grayscale, just pass the scalar value. 21 | * thickness : Thickness of the line or circle etc. If **-1** is passed for closed figures like circles, it will fill the shape. *default thickness = 1* 22 | * lineType : Type of line, whether 8-connected, anti-aliased line etc. *By default, it is 8-connected.* ``cv2.LINE_AA`` gives anti-aliased line which looks great for curves. 23 | 24 | Drawing Line 25 | ------------- 26 | To draw a line, you need to pass starting and ending coordinates of line. We will create a black image and draw a blue line on it from top-left to bottom-right corners. 27 | :: 28 | 29 | import numpy as np 30 | import cv2 31 | 32 | # Create a black image 33 | img = np.zeros((512,512,3), np.uint8) 34 | 35 | # Draw a diagonal blue line with thickness of 5 px 36 | img = cv2.line(img,(0,0),(511,511),(255,0,0),5) 37 | 38 | Drawing Rectangle 39 | ------------------- 40 | To draw a rectangle, you need top-left corner and bottom-right corner of rectangle. This time we will draw a green rectangle at the top-right corner of image. 41 | :: 42 | 43 | img = cv2.rectangle(img,(384,0),(510,128),(0,255,0),3) 44 | 45 | Drawing Circle 46 | ---------------- 47 | To draw a circle, you need its center coordinates and radius. We will draw a circle inside the rectangle drawn above. 48 | :: 49 | 50 | img = cv2.circle(img,(447,63), 63, (0,0,255), -1) 51 | 52 | Drawing Ellipse 53 | -------------------- 54 | 55 | To draw the ellipse, we need to pass several arguments. One argument is the center location (x,y). Next argument is axes lengths (major axis length, minor axis length). ``angle`` is the angle of rotation of ellipse in anti-clockwise direction. ``startAngle`` and ``endAngle`` denotes the starting and ending of ellipse arc measured in clockwise direction from major axis. i.e. giving values 0 and 360 gives the full ellipse. For more details, check the documentation of **cv2.ellipse()**. Below example draws a half ellipse at the center of the image. 56 | :: 57 | 58 | img = cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1) 59 | 60 | 61 | Drawing Polygon 62 | ------------------ 63 | To draw a polygon, first you need coordinates of vertices. Make those points into an array of shape ``ROWSx1x2`` where ROWS are number of vertices and it should be of type ``int32``. Here we draw a small polygon of with four vertices in yellow color. 64 | :: 65 | 66 | pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32) 67 | pts = pts.reshape((-1,1,2)) 68 | img = cv2.polylines(img,[pts],True,(0,255,255)) 69 | 70 | .. Note:: If third argument is ``False``, you will get a polylines joining all the points, not a closed shape. 71 | 72 | .. Note:: ``cv2.polylines()`` can be used to draw multiple lines. Just create a list of all the lines you want to draw and pass it to the function. All lines will be drawn individually. It is more better and faster way to draw a group of lines than calling ``cv2.line()`` for each line. 73 | 74 | Adding Text to Images: 75 | ------------------------ 76 | To put texts in images, you need specify following things. 77 | * Text data that you want to write 78 | * Position coordinates of where you want put it (i.e. bottom-left corner where data starts). 79 | * Font type (Check **cv2.putText()** docs for supported fonts) 80 | * Font Scale (specifies the size of font) 81 | * regular things like color, thickness, lineType etc. For better look, ``lineType = cv2.LINE_AA`` is recommended. 82 | 83 | We will write **OpenCV** on our image in white color. 84 | :: 85 | 86 | font = cv2.FONT_HERSHEY_SIMPLEX 87 | cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA) 88 | 89 | Result 90 | ---------- 91 | So it is time to see the final result of our drawing. As you studied in previous articles, display the image to see it. 92 | 93 | .. image:: images/drawing.jpg 94 | :alt: Drawing Functions in OpenCV 95 | :align: center 96 | 97 | 98 | Additional Resources 99 | ======================== 100 | 101 | 1. The angles used in ellipse function is not our circular angles. For more details, visit `this discussion `_. 102 | 103 | 104 | Exercises 105 | ============== 106 | #. Try to create the logo of OpenCV using drawing functions available in OpenCV 107 | -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_image_display/images/matplotlib_screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_image_display/images/matplotlib_screenshot.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_image_display/images/opencv_screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_image_display/images/opencv_screenshot.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.rst: -------------------------------------------------------------------------------- 1 | .. _Mouse_Handling: 2 | 3 | Mouse as a Paint-Brush 4 | *********************** 5 | 6 | Goal 7 | ====== 8 | 9 | .. container:: enumeratevisibleitemswithsquare 10 | 11 | * Learn to handle mouse events in OpenCV 12 | * You will learn these functions : **cv2.setMouseCallback()** 13 | 14 | Simple Demo 15 | ============= 16 | 17 | Here, we create a simple application which draws a circle on an image wherever we double-click on it. 18 | 19 | First we create a mouse callback function which is executed when a mouse event take place. Mouse event can be anything related to mouse like left-button down, left-button up, left-button double-click etc. It gives us the coordinates (x,y) for every mouse event. With this event and location, we can do whatever we like. To list all available events available, run the following code in Python terminal: 20 | :: 21 | 22 | >>> import cv2 23 | >>> events = [i for i in dir(cv2) if 'EVENT' in i] 24 | >>> print events 25 | 26 | Creating mouse callback function has a specific format which is same everywhere. It differs only in what the function does. So our mouse callback function does one thing, it draws a circle where we double-click. So see the code below. Code is self-explanatory from comments : 27 | :: 28 | 29 | import cv2 30 | import numpy as np 31 | 32 | # mouse callback function 33 | def draw_circle(event,x,y,flags,param): 34 | if event == cv2.EVENT_LBUTTONDBLCLK: 35 | cv2.circle(img,(x,y),100,(255,0,0),-1) 36 | 37 | # Create a black image, a window and bind the function to window 38 | img = np.zeros((512,512,3), np.uint8) 39 | cv2.namedWindow('image') 40 | cv2.setMouseCallback('image',draw_circle) 41 | 42 | while(1): 43 | cv2.imshow('image',img) 44 | if cv2.waitKey(20) & 0xFF == 27: 45 | break 46 | cv2.destroyAllWindows() 47 | 48 | More Advanced Demo 49 | =================== 50 | 51 | Now we go for much more better application. In this, we draw either rectangles or circles (depending on the mode we select) by dragging the mouse like we do in Paint application. So our mouse callback function has two parts, one to draw rectangle and other to draw the circles. This specific example will be really helpful in creating and understanding some interactive applications like object tracking, image segmentation etc. 52 | :: 53 | 54 | import cv2 55 | import numpy as np 56 | 57 | drawing = False # true if mouse is pressed 58 | mode = True # if True, draw rectangle. Press 'm' to toggle to curve 59 | ix,iy = -1,-1 60 | 61 | # mouse callback function 62 | def draw_circle(event,x,y,flags,param): 63 | global ix,iy,drawing,mode 64 | 65 | if event == cv2.EVENT_LBUTTONDOWN: 66 | drawing = True 67 | ix,iy = x,y 68 | 69 | elif event == cv2.EVENT_MOUSEMOVE: 70 | if drawing == True: 71 | if mode == True: 72 | cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) 73 | else: 74 | cv2.circle(img,(x,y),5,(0,0,255),-1) 75 | 76 | elif event == cv2.EVENT_LBUTTONUP: 77 | drawing = False 78 | if mode == True: 79 | cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) 80 | else: 81 | cv2.circle(img,(x,y),5,(0,0,255),-1) 82 | 83 | Next we have to bind this mouse callback function to OpenCV window. In the main loop, we should set a keyboard binding for key 'm' to toggle between rectangle and circle. 84 | :: 85 | 86 | img = np.zeros((512,512,3), np.uint8) 87 | cv2.namedWindow('image') 88 | cv2.setMouseCallback('image',draw_circle) 89 | 90 | while(1): 91 | cv2.imshow('image',img) 92 | k = cv2.waitKey(1) & 0xFF 93 | if k == ord('m'): 94 | mode = not mode 95 | elif k == 27: 96 | break 97 | 98 | cv2.destroyAllWindows() 99 | 100 | 101 | Additional Resources 102 | ======================== 103 | 104 | 105 | Exercises 106 | ========== 107 | 108 | #. In our last example, we drew filled rectangle. You modify the code to draw an unfilled rectangle. 109 | -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_table_of_contents_gui/images/drawing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_table_of_contents_gui/images/drawing.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_table_of_contents_gui/images/image_display.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_table_of_contents_gui/images/image_display.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_table_of_contents_gui/images/mouse_drawing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_table_of_contents_gui/images/mouse_drawing.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_table_of_contents_gui/images/trackbar.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_table_of_contents_gui/images/trackbar.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_table_of_contents_gui/images/video_display.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_table_of_contents_gui/images/video_display.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_table_of_contents_gui/py_table_of_contents_gui.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Gui: 2 | 3 | Gui Features in OpenCV 4 | ----------------------------------------------------------- 5 | 6 | 7 | * :ref:`Display_Image` 8 | 9 | .. tabularcolumns:: m{100pt} m{300pt} 10 | .. cssclass:: toctableopencv 11 | 12 | =========== ====================================================== 13 | |gui_1| Learn to load an image, display it and save it back 14 | 15 | =========== ====================================================== 16 | 17 | .. |gui_1| image:: images/image_display.jpg 18 | :height: 90pt 19 | :width: 90pt 20 | 21 | * :ref:`Display_Video` 22 | 23 | .. tabularcolumns:: m{100pt} m{300pt} 24 | .. cssclass:: toctableopencv 25 | 26 | =========== ====================================================== 27 | |gui_2| Learn to play videos, capture videos from Camera and write it as a video 28 | 29 | =========== ====================================================== 30 | 31 | .. |gui_2| image:: images/video_display.jpg 32 | :height: 90pt 33 | :width: 90pt 34 | 35 | * :ref:`Drawing_Functions` 36 | 37 | .. tabularcolumns:: m{100pt} m{300pt} 38 | .. cssclass:: toctableopencv 39 | 40 | =========== ====================================================== 41 | |gui_5| Learn to draw lines, rectangles, ellipses, circles etc with OpenCV 42 | 43 | =========== ====================================================== 44 | 45 | .. |gui_5| image:: images/drawing.jpg 46 | :height: 90pt 47 | :width: 90pt 48 | 49 | * :ref:`Mouse_Handling` 50 | 51 | .. tabularcolumns:: m{100pt} m{300pt} 52 | .. cssclass:: toctableopencv 53 | 54 | =========== ====================================================== 55 | |gui_3| Draw stuffs with your mouse 56 | 57 | =========== ====================================================== 58 | 59 | .. |gui_3| image:: images/mouse_drawing.jpg 60 | :height: 90pt 61 | :width: 90pt 62 | 63 | * :ref:`Trackbar` 64 | 65 | .. tabularcolumns:: m{100pt} m{300pt} 66 | .. cssclass:: toctableopencv 67 | 68 | =========== ====================================================== 69 | |gui_4| Create trackbar to control certain parameters 70 | 71 | =========== ====================================================== 72 | 73 | .. |gui_4| image:: images/trackbar.jpg 74 | :height: 90pt 75 | :width: 90pt 76 | 77 | .. raw:: latex 78 | 79 | \pagebreak 80 | 81 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 82 | .. toctree:: 83 | :hidden: 84 | 85 | ../py_image_display/py_image_display 86 | ../py_video_display/py_video_display 87 | ../py_drawing_functions/py_drawing_functions 88 | ../py_mouse_handling/py_mouse_handling 89 | ../py_trackbar/py_trackbar 90 | -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_trackbar/images/trackbar_screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_gui/py_trackbar/images/trackbar_screenshot.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_gui/py_trackbar/py_trackbar.rst: -------------------------------------------------------------------------------- 1 | .. _Trackbar: 2 | 3 | Trackbar as the Color Palette 4 | ******************************** 5 | 6 | Goal 7 | ===== 8 | 9 | .. container:: enumeratevisibleitemswithsquare 10 | 11 | * Learn to bind trackbar to OpenCV windows 12 | * You will learn these functions : **cv2.getTrackbarPos()**, **cv2.createTrackbar()** etc. 13 | 14 | Code Demo 15 | ========== 16 | 17 | Here we will create a simple application which shows the color you specify. You have a window which shows the color and three trackbars to specify each of B,G,R colors. You slide the trackbar and correspondingly window color changes. By default, initial color will be set to Black. 18 | 19 | For cv2.getTrackbarPos() function, first argument is the trackbar name, second one is the window name to which it is attached, third argument is the default value, fourth one is the maximum value and fifth one is the callback function which is executed everytime trackbar value changes. The callback function always has a default argument which is the trackbar position. In our case, function does nothing, so we simply pass. 20 | 21 | Another important application of trackbar is to use it as a button or switch. OpenCV, by default, doesn't have button functionality. So you can use trackbar to get such functionality. In our application, we have created one switch in which application works only if switch is ON, otherwise screen is always black. 22 | :: 23 | 24 | import cv2 25 | import numpy as np 26 | 27 | def nothing(x): 28 | pass 29 | 30 | # Create a black image, a window 31 | img = np.zeros((300,512,3), np.uint8) 32 | cv2.namedWindow('image') 33 | 34 | # create trackbars for color change 35 | cv2.createTrackbar('R','image',0,255,nothing) 36 | cv2.createTrackbar('G','image',0,255,nothing) 37 | cv2.createTrackbar('B','image',0,255,nothing) 38 | 39 | # create switch for ON/OFF functionality 40 | switch = '0 : OFF \n1 : ON' 41 | cv2.createTrackbar(switch, 'image',0,1,nothing) 42 | 43 | while(1): 44 | cv2.imshow('image',img) 45 | k = cv2.waitKey(1) & 0xFF 46 | if k == 27: 47 | break 48 | 49 | # get current positions of four trackbars 50 | r = cv2.getTrackbarPos('R','image') 51 | g = cv2.getTrackbarPos('G','image') 52 | b = cv2.getTrackbarPos('B','image') 53 | s = cv2.getTrackbarPos(switch,'image') 54 | 55 | if s == 0: 56 | img[:] = 0 57 | else: 58 | img[:] = [b,g,r] 59 | 60 | cv2.destroyAllWindows() 61 | 62 | The screenshot of the application looks like below : 63 | 64 | .. image:: images/trackbar_screenshot.jpg 65 | :alt: Screenshot of Image with Trackbars 66 | :align: center 67 | 68 | Exercises 69 | =========== 70 | 71 | #. Create a Paint application with adjustable colors and brush radius using trackbars. For drawing, refer previous tutorial on mouse handling. 72 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_canny/images/canny1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_canny/images/canny1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_canny/images/hysteresis.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_canny/images/hysteresis.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_canny/images/nms.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_canny/images/nms.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_canny/py_canny.rst: -------------------------------------------------------------------------------- 1 | .. _Canny: 2 | 3 | Canny Edge Detection 4 | *********************** 5 | 6 | Goal 7 | ====== 8 | 9 | In this chapter, we will learn about 10 | 11 | * Concept of Canny edge detection 12 | * OpenCV functions for that : **cv2.Canny()** 13 | 14 | Theory 15 | ========= 16 | 17 | Canny Edge Detection is a popular edge detection algorithm. It was developed by John F. Canny in 1986. It is a multi-stage algorithm and we will go through each stages. 18 | 19 | 1. **Noise Reduction** 20 | 21 | Since edge detection is susceptible to noise in the image, first step is to remove the noise in the image with a 5x5 Gaussian filter. We have already seen this in previous chapters. 22 | 23 | 2. **Finding Intensity Gradient of the Image** 24 | 25 | Smoothened image is then filtered with a Sobel kernel in both horizontal and vertical direction to get first derivative in horizontal direction (:math:`G_x`) and vertical direction (:math:`G_y`). From these two images, we can find edge gradient and direction for each pixel as follows: 26 | 27 | .. math:: 28 | 29 | Edge\_Gradient \; (G) = \sqrt{G_x^2 + G_y^2} 30 | 31 | Angle \; (\theta) = \tan^{-1} \bigg(\frac{G_y}{G_x}\bigg) 32 | 33 | Gradient direction is always perpendicular to edges. It is rounded to one of four angles representing vertical, horizontal and two diagonal directions. 34 | 35 | 3. **Non-maximum Suppression** 36 | 37 | After getting gradient magnitude and direction, a full scan of image is done to remove any unwanted pixels which may not constitute the edge. For this, at every pixel, pixel is checked if it is a local maximum in its neighborhood in the direction of gradient. Check the image below: 38 | 39 | .. image:: images/nms.jpg 40 | :alt: Non-Maximum Suppression 41 | :align: center 42 | 43 | Point A is on the edge ( in vertical direction). Gradient direction is normal to the edge. Point B and C are in gradient directions. So point A is checked with point B and C to see if it forms a local maximum. If so, it is considered for next stage, otherwise, it is suppressed ( put to zero). 44 | 45 | In short, the result you get is a binary image with "thin edges". 46 | 47 | 4. **Hysteresis Thresholding** 48 | 49 | This stage decides which are all edges are really edges and which are not. For this, we need two threshold values, `minVal` and `maxVal`. Any edges with intensity gradient more than `maxVal` are sure to be edges and those below `minVal` are sure to be non-edges, so discarded. Those who lie between these two thresholds are classified edges or non-edges based on their connectivity. If they are connected to "sure-edge" pixels, they are considered to be part of edges. Otherwise, they are also discarded. See the image below: 50 | 51 | .. image:: images/hysteresis.jpg 52 | :alt: Hysteresis Thresholding 53 | :align: center 54 | 55 | The edge A is above the `maxVal`, so considered as "sure-edge". Although edge C is below `maxVal`, it is connected to edge A, so that also considered as valid edge and we get that full curve. But edge B, although it is above `minVal` and is in same region as that of edge C, it is not connected to any "sure-edge", so that is discarded. So it is very important that we have to select `minVal` and `maxVal` accordingly to get the correct result. 56 | 57 | This stage also removes small pixels noises on the assumption that edges are long lines. 58 | 59 | So what we finally get is strong edges in the image. 60 | 61 | Canny Edge Detection in OpenCV 62 | =============================== 63 | 64 | OpenCV puts all the above in single function, **cv2.Canny()**. We will see how to use it. First argument is our input image. Second and third arguments are our `minVal` and `maxVal` respectively. Third argument is `aperture_size`. It is the size of Sobel kernel used for find image gradients. By default it is 3. Last argument is `L2gradient` which specifies the equation for finding gradient magnitude. If it is ``True``, it uses the equation mentioned above which is more accurate, otherwise it uses this function: :math:`Edge\_Gradient \; (G) = |G_x| + |G_y|`. By default, it is ``False``. 65 | :: 66 | 67 | import cv2 68 | import numpy as np 69 | from matplotlib import pyplot as plt 70 | 71 | img = cv2.imread('messi5.jpg',0) 72 | edges = cv2.Canny(img,100,200) 73 | 74 | plt.subplot(121),plt.imshow(img,cmap = 'gray') 75 | plt.title('Original Image'), plt.xticks([]), plt.yticks([]) 76 | plt.subplot(122),plt.imshow(edges,cmap = 'gray') 77 | plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) 78 | 79 | plt.show() 80 | 81 | See the result below: 82 | 83 | .. image:: images/canny1.jpg 84 | :alt: Canny Edge Detection 85 | :align: center 86 | 87 | Additional Resources 88 | ======================= 89 | 90 | #. Canny edge detector at `Wikipedia `_ 91 | #. `Canny Edge Detection Tutorial `_ by Bill Green, 2002. 92 | 93 | 94 | Exercises 95 | =========== 96 | 97 | #. Write a small application to find the Canny edge detection whose threshold values can be varied using two trackbars. This way, you can understand the effect of threshold values. 98 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_colorspaces/images/frame.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_colorspaces/images/frame.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.rst: -------------------------------------------------------------------------------- 1 | .. _Converting_colorspaces: 2 | 3 | Changing Colorspaces 4 | **************************** 5 | 6 | Goal 7 | ========= 8 | 9 | * In this tutorial, you will learn how to convert images from one color-space to another, like BGR :math:`\leftrightarrow` Gray, BGR :math:`\leftrightarrow` HSV etc. 10 | * In addition to that, we will create an application which extracts a colored object in a video 11 | * You will learn following functions : **cv2.cvtColor()**, **cv2.inRange()** etc. 12 | 13 | Changing Color-space 14 | ====================== 15 | 16 | There are more than 150 color-space conversion methods available in OpenCV. But we will look into only two which are most widely used ones, BGR :math:`\leftrightarrow` Gray and BGR :math:`\leftrightarrow` HSV. 17 | 18 | For color conversion, we use the function ``cv2.cvtColor(input_image, flag)`` where ``flag`` determines the type of conversion. 19 | 20 | For BGR :math:`\rightarrow` Gray conversion we use the flags ``cv2.COLOR_BGR2GRAY``. Similarly for BGR :math:`\rightarrow` HSV, we use the flag ``cv2.COLOR_BGR2HSV``. To get other flags, just run following commands in your Python terminal : 21 | :: 22 | 23 | >>> import cv2 24 | >>> flags = [i for i in dir(cv2) if i.startswith('COLOR_')] 25 | >>> print flags 26 | 27 | 28 | .. note:: For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255]. Different softwares use different scales. So if you are comparing OpenCV values with them, you need to normalize these ranges. 29 | 30 | Object Tracking 31 | ================== 32 | 33 | Now we know how to convert BGR image to HSV, we can use this to extract a colored object. In HSV, it is more easier to represent a color than RGB color-space. In our application, we will try to extract a blue colored object. So here is the method: 34 | 35 | * Take each frame of the video 36 | * Convert from BGR to HSV color-space 37 | * We threshold the HSV image for a range of blue color 38 | * Now extract the blue object alone, we can do whatever on that image we want. 39 | 40 | Below is the code which are commented in detail : 41 | :: 42 | 43 | import cv2 44 | import numpy as np 45 | 46 | cap = cv2.VideoCapture(0) 47 | 48 | while(1): 49 | 50 | # Take each frame 51 | _, frame = cap.read() 52 | 53 | # Convert BGR to HSV 54 | hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 55 | 56 | # define range of blue color in HSV 57 | lower_blue = np.array([110,50,50]) 58 | upper_blue = np.array([130,255,255]) 59 | 60 | # Threshold the HSV image to get only blue colors 61 | mask = cv2.inRange(hsv, lower_blue, upper_blue) 62 | 63 | # Bitwise-AND mask and original image 64 | res = cv2.bitwise_and(frame,frame, mask= mask) 65 | 66 | cv2.imshow('frame',frame) 67 | cv2.imshow('mask',mask) 68 | cv2.imshow('res',res) 69 | k = cv2.waitKey(5) & 0xFF 70 | if k == 27: 71 | break 72 | 73 | cv2.destroyAllWindows() 74 | 75 | Below image shows tracking of the blue object: 76 | 77 | .. image:: images/frame.jpg 78 | :width: 780 pt 79 | :alt: Blue Object Tracking 80 | :align: center 81 | 82 | .. note:: There are some noises in the image. We will see how to remove them in later chapters. 83 | 84 | .. note:: This is the simplest method in object tracking. Once you learn functions of contours, you can do plenty of things like find centroid of this object and use it to track the object, draw diagrams just by moving your hand in front of camera and many other funny stuffs. 85 | 86 | How to find HSV values to track? 87 | ----------------------------------- 88 | This is a common question found in `stackoverflow.com `_. It is very simple and you can use the same function, `cv2.cvtColor()`. Instead of passing an image, you just pass the BGR values you want. For example, to find the HSV value of Green, try following commands in Python terminal: 89 | :: 90 | 91 | >>> green = np.uint8([[[0,255,0 ]]]) 92 | >>> hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV) 93 | >>> print hsv_green 94 | [[[ 60 255 255]]] 95 | 96 | Now you take [H-10, 100,100] and [H+10, 255, 255] as lower bound and upper bound respectively. Apart from this method, you can use any image editing tools like GIMP or any online converters to find these values, but don't forget to adjust the HSV ranges. 97 | 98 | 99 | Additional Resources 100 | ======================== 101 | 102 | Exercises 103 | ============ 104 | #. Try to find a way to extract more than one colored objects, for eg, extract red, blue, green objects simultaneously. 105 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/approx.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/approx.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/boundingrect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/boundingrect.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/circumcircle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/circumcircle.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/convexitydefects.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/convexitydefects.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/fitellipse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/fitellipse.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/fitline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contour_features/images/fitline.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_properties/images/extremepoints.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contour_properties/images/extremepoints.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.rst: -------------------------------------------------------------------------------- 1 | .. _Contour_Properties: 2 | 3 | Contour Properties 4 | ********************* 5 | 6 | Here we will learn to extract some frequently used properties of objects like Solidity, Equivalent Diameter, Mask image, Mean Intensity etc. More features can be found at `Matlab regionprops documentation `_. 7 | 8 | *(NB : Centroid, Area, Perimeter etc also belong to this category, but we have seen it in last chapter)* 9 | 10 | 1. Aspect Ratio 11 | ================ 12 | 13 | It is the ratio of width to height of bounding rect of the object. 14 | 15 | .. math:: 16 | 17 | Aspect \; Ratio = \frac{Width}{Height} 18 | 19 | .. code-block:: python 20 | 21 | x,y,w,h = cv2.boundingRect(cnt) 22 | aspect_ratio = float(w)/h 23 | 24 | 2. Extent 25 | ========== 26 | 27 | Extent is the ratio of contour area to bounding rectangle area. 28 | 29 | .. math:: 30 | Extent = \frac{Object \; Area}{Bounding \; Rectangle \; Area} 31 | 32 | .. code-block:: python 33 | 34 | area = cv2.contourArea(cnt) 35 | x,y,w,h = cv2.boundingRect(cnt) 36 | rect_area = w*h 37 | extent = float(area)/rect_area 38 | 39 | 3. Solidity 40 | ============ 41 | 42 | Solidity is the ratio of contour area to its convex hull area. 43 | 44 | .. math:: 45 | Solidity = \frac{Contour \; Area}{Convex \; Hull \; Area} 46 | 47 | .. code-block:: python 48 | 49 | area = cv2.contourArea(cnt) 50 | hull = cv2.convexHull(cnt) 51 | hull_area = cv2.contourArea(hull) 52 | solidity = float(area)/hull_area 53 | 54 | 4. Equivalent Diameter 55 | ======================= 56 | 57 | Equivalent Diameter is the diameter of the circle whose area is same as the contour area. 58 | 59 | .. math:: 60 | Equivalent \; Diameter = \sqrt{\frac{4 \times Contour \; Area}{\pi}} 61 | 62 | .. code-block:: python 63 | 64 | area = cv2.contourArea(cnt) 65 | equi_diameter = np.sqrt(4*area/np.pi) 66 | 67 | 5. Orientation 68 | ================ 69 | 70 | Orientation is the angle at which object is directed. Following method also gives the Major Axis and Minor Axis lengths. 71 | :: 72 | 73 | (x,y),(MA,ma),angle = cv2.fitEllipse(cnt) 74 | 75 | 6. Mask and Pixel Points 76 | ========================= 77 | 78 | In some cases, we may need all the points which comprises that object. It can be done as follows: 79 | :: 80 | 81 | mask = np.zeros(imgray.shape,np.uint8) 82 | cv2.drawContours(mask,[cnt],0,255,-1) 83 | pixelpoints = np.transpose(np.nonzero(mask)) 84 | #pixelpoints = cv2.findNonZero(mask) 85 | 86 | Here, two methods, one using Numpy functions, next one using OpenCV function (last commented line) are given to do the same. Results are also same, but with a slight difference. Numpy gives coordinates in **(row, column)** format, while OpenCV gives coordinates in **(x,y)** format. So basically the answers will be interchanged. Note that, **row = x** and **column = y**. 87 | 88 | 7. Maximum Value, Minimum Value and their locations 89 | ======================================================= 90 | 91 | We can find these parameters using a mask image. 92 | :: 93 | 94 | min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray,mask = mask) 95 | 96 | 8. Mean Color or Mean Intensity 97 | =================================== 98 | 99 | Here, we can find the average color of an object. Or it can be average intensity of the object in grayscale mode. We again use the same mask to do it. 100 | :: 101 | 102 | mean_val = cv2.mean(im,mask = mask) 103 | 104 | 9. Extreme Points 105 | ================== 106 | 107 | Extreme Points means topmost, bottommost, rightmost and leftmost points of the object. 108 | :: 109 | 110 | leftmost = tuple(cnt[cnt[:,:,0].argmin()][0]) 111 | rightmost = tuple(cnt[cnt[:,:,0].argmax()][0]) 112 | topmost = tuple(cnt[cnt[:,:,1].argmin()][0]) 113 | bottommost = tuple(cnt[cnt[:,:,1].argmax()][0]) 114 | 115 | For eg, if I apply it to an Indian map, I get the following result : 116 | 117 | .. image:: images/extremepoints.jpg 118 | :alt: Extreme Points 119 | :align: center 120 | 121 | Additional Resources 122 | ====================== 123 | 124 | Exercises 125 | =========== 126 | #. There are still some features left in matlab regionprops doc. Try to implement them. 127 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_begin/images/none.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contours_begin/images/none.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.rst: -------------------------------------------------------------------------------- 1 | .. _Contours_Getting_Started: 2 | 3 | Contours : Getting Started 4 | **************************** 5 | 6 | Goal 7 | ====== 8 | 9 | * Understand what contours are. 10 | * Learn to find contours, draw contours etc 11 | * You will see these functions : **cv2.findContours()**, **cv2.drawContours()** 12 | 13 | What are contours? 14 | =================== 15 | 16 | Contours can be explained simply as a curve joining all the continuous points (along the boundary), having same color or intensity. The contours are a useful tool for shape analysis and object detection and recognition. 17 | 18 | * For better accuracy, use binary images. So before finding contours, apply threshold or canny edge detection. 19 | * findContours function modifies the source image. So if you want source image even after finding contours, already store it to some other variables. 20 | * In OpenCV, finding contours is like finding white object from black background. So remember, object to be found should be white and background should be black. 21 | 22 | Let's see how to find contours of a binary image: 23 | :: 24 | 25 | import numpy as np 26 | import cv2 27 | 28 | im = cv2.imread('test.jpg') 29 | imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) 30 | ret,thresh = cv2.threshold(imgray,127,255,0) 31 | image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 32 | 33 | See, there are three arguments in **cv2.findContours()** function, first one is source image, second is contour retrieval mode, third is contour approximation method. And it outputs the image, contours and hierarchy. ``contours`` is a Python list of all the contours in the image. Each individual contour is a Numpy array of (x,y) coordinates of boundary points of the object. 34 | 35 | .. note:: We will discuss second and third arguments and about hierarchy in details later. Until then, the values given to them in code sample will work fine for all images. 36 | 37 | 38 | How to draw the contours? 39 | =========================== 40 | 41 | To draw the contours, ``cv2.drawContours`` function is used. It can also be used to draw any shape provided you have its boundary points. Its first argument is source image, second argument is the contours which should be passed as a Python list, third argument is index of contours (useful when drawing individual contour. To draw all contours, pass -1) and remaining arguments are color, thickness etc. 42 | 43 | To draw all the contours in an image: 44 | :: 45 | 46 | img = cv2.drawContours(img, contours, -1, (0,255,0), 3) 47 | 48 | To draw an individual contour, say 4th contour: 49 | :: 50 | 51 | img = cv2.drawContours(img, contours, 3, (0,255,0), 3) 52 | 53 | But most of the time, below method will be useful: 54 | :: 55 | 56 | cnt = contours[4] 57 | img = cv2.drawContours(img, [cnt], 0, (0,255,0), 3) 58 | 59 | .. note:: Last two methods are same, but when you go forward, you will see last one is more useful. 60 | 61 | Contour Approximation Method 62 | ================================ 63 | 64 | This is the third argument in ``cv2.findContours`` function. What does it denote actually? 65 | 66 | Above, we told that contours are the boundaries of a shape with same intensity. It stores the (x,y) coordinates of the boundary of a shape. But does it store all the coordinates ? That is specified by this contour approximation method. 67 | 68 | If you pass ``cv2.CHAIN_APPROX_NONE``, all the boundary points are stored. But actually do we need all the points? For eg, you found the contour of a straight line. Do you need all the points on the line to represent that line? No, we need just two end points of that line. This is what ``cv2.CHAIN_APPROX_SIMPLE`` does. It removes all redundant points and compresses the contour, thereby saving memory. 69 | 70 | Below image of a rectangle demonstrate this technique. Just draw a circle on all the coordinates in the contour array (drawn in blue color). First image shows points I got with ``cv2.CHAIN_APPROX_NONE`` (734 points) and second image shows the one with ``cv2.CHAIN_APPROX_SIMPLE`` (only 4 points). See, how much memory it saves!!! 71 | 72 | .. image:: images/none.jpg 73 | :alt: Contour Retrieval Method 74 | :align: center 75 | 76 | Additional Resources 77 | ======================== 78 | 79 | Exercises 80 | ============= 81 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/images/ccomp_hierarchy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/images/ccomp_hierarchy.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/images/hierarchy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/images/hierarchy.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/images/tree_hierarchy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/images/tree_hierarchy.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/images/defects.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/images/defects.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/images/matchshapes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/images/matchshapes.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.rst: -------------------------------------------------------------------------------- 1 | .. _Contours_More_Functions: 2 | 3 | Contours : More Functions 4 | ****************************** 5 | 6 | Goal 7 | ====== 8 | 9 | In this chapter, we will learn about 10 | * Convexity defects and how to find them. 11 | * Finding shortest distance from a point to a polygon 12 | * Matching different shapes 13 | 14 | Theory and Code 15 | ================ 16 | 17 | 1. Convexity Defects 18 | ----------------------- 19 | 20 | We saw what is convex hull in second chapter about contours. Any deviation of the object from this hull can be considered as convexity defect. 21 | 22 | OpenCV comes with a ready-made function to find this, **cv2.convexityDefects()**. A basic function call would look like below: 23 | :: 24 | 25 | hull = cv2.convexHull(cnt,returnPoints = False) 26 | defects = cv2.convexityDefects(cnt,hull) 27 | 28 | .. note:: Remember we have to pass ``returnPoints = False`` while finding convex hull, in order to find convexity defects. 29 | 30 | It returns an array where each row contains these values - **[ start point, end point, farthest point, approximate distance to farthest point ]**. We can visualize it using an image. We draw a line joining start point and end point, then draw a circle at the farthest point. Remember first three values returned are indices of ``cnt``. So we have to bring those values from ``cnt``. 31 | :: 32 | 33 | import cv2 34 | import numpy as np 35 | 36 | img = cv2.imread('star.jpg') 37 | img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 38 | ret, thresh = cv2.threshold(img_gray, 127, 255,0) 39 | contours,hierarchy = cv2.findContours(thresh,2,1) 40 | cnt = contours[0] 41 | 42 | hull = cv2.convexHull(cnt,returnPoints = False) 43 | defects = cv2.convexityDefects(cnt,hull) 44 | 45 | for i in range(defects.shape[0]): 46 | s,e,f,d = defects[i,0] 47 | start = tuple(cnt[s][0]) 48 | end = tuple(cnt[e][0]) 49 | far = tuple(cnt[f][0]) 50 | cv2.line(img,start,end,[0,255,0],2) 51 | cv2.circle(img,far,5,[0,0,255],-1) 52 | 53 | cv2.imshow('img',img) 54 | cv2.waitKey(0) 55 | cv2.destroyAllWindows() 56 | 57 | And see the result: 58 | 59 | .. image:: images/defects.jpg 60 | :alt: Convexity Defects 61 | :align: center 62 | 63 | 2. Point Polygon Test 64 | ----------------------- 65 | 66 | This function finds the shortest distance between a point in the image and a contour. It returns the distance which is negative when point is outside the contour, positive when point is inside and zero if point is on the contour. 67 | 68 | For example, we can check the point (50,50) as follows: 69 | :: 70 | 71 | dist = cv2.pointPolygonTest(cnt,(50,50),True) 72 | 73 | In the function, third argument is ``measureDist``. If it is ``True``, it finds the signed distance. If ``False``, it finds whether the point is inside or outside or on the contour (it returns +1, -1, 0 respectively). 74 | 75 | .. note:: If you don't want to find the distance, make sure third argument is ``False``, because, it is a time consuming process. So, making it ``False`` gives about 2-3X speedup. 76 | 77 | 3. Match Shapes 78 | ----------------- 79 | 80 | OpenCV comes with a function **cv2.matchShapes()** which enables us to compare two shapes, or two contours and returns a metric showing the similarity. The lower the result, the better match it is. It is calculated based on the hu-moment values. Different measurement methods are explained in the docs. 81 | :: 82 | 83 | import cv2 84 | import numpy as np 85 | 86 | img1 = cv2.imread('star.jpg',0) 87 | img2 = cv2.imread('star2.jpg',0) 88 | 89 | ret, thresh = cv2.threshold(img1, 127, 255,0) 90 | ret, thresh2 = cv2.threshold(img2, 127, 255,0) 91 | contours,hierarchy = cv2.findContours(thresh,2,1) 92 | cnt1 = contours[0] 93 | contours,hierarchy = cv2.findContours(thresh2,2,1) 94 | cnt2 = contours[0] 95 | 96 | ret = cv2.matchShapes(cnt1,cnt2,1,0.0) 97 | print ret 98 | 99 | I tried matching shapes with different shapes given below: 100 | 101 | .. image:: images/matchshapes.jpg 102 | :alt: Match Shapes 103 | :align: center 104 | 105 | I got following results: 106 | 107 | * Matching Image A with itself = 0.0 108 | * Matching Image A with Image B = 0.001946 109 | * Matching Image A with Image C = 0.326911 110 | 111 | See, even image rotation doesn't affect much on this comparison. 112 | 113 | .. seealso:: `Hu-Moments `_ are seven moments invariant to translation, rotation and scale. Seventh one is skew-invariant. Those values can be found using **cv2.HuMoments()** function. 114 | 115 | Additional Resources 116 | ===================== 117 | 118 | Exercises 119 | ============ 120 | #. Check the documentation for **cv2.pointPolygonTest()**, you can find a nice image in Red and Blue color. It represents the distance from all pixels to the white curve on it. All pixels inside curve is blue depending on the distance. Similarly outside points are red. Contour edges are marked with White. So problem is simple. Write a code to create such a representation of distance. 121 | 122 | #. Compare images of digits or letters using **cv2.matchShapes()**. ( That would be a simple step towards OCR ) 123 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_defects.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_defects.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_features.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_features.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_hierarchy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_hierarchy.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_properties.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_properties.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_starting.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/images/contour_starting.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_contours/py_table_of_contents_contours/py_table_of_contents_contours.rst: -------------------------------------------------------------------------------- 1 | .. _Table-Of-Content-Contours: 2 | 3 | Contours in OpenCV 4 | ----------------------------------------------------------- 5 | 6 | * :ref:`Contours_Getting_Started` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== =================================================================== 12 | |contour_1| Learn to find and draw Contours 13 | 14 | 15 | =========== =================================================================== 16 | 17 | .. |contour_1| image:: images/contour_starting.jpg 18 | :height: 90pt 19 | :width: 90pt 20 | 21 | * :ref:`Contour_Features` 22 | 23 | .. tabularcolumns:: m{100pt} m{300pt} 24 | .. cssclass:: toctableopencv 25 | 26 | =========== =================================================================== 27 | |contour_2| Learn to find different features of contours like area, perimeter, bounding rectangle etc. 28 | 29 | =========== =================================================================== 30 | 31 | .. |contour_2| image:: images/contour_features.jpg 32 | :height: 90pt 33 | :width: 90pt 34 | 35 | * :ref:`Contour_Properties` 36 | 37 | .. tabularcolumns:: m{100pt} m{300pt} 38 | .. cssclass:: toctableopencv 39 | 40 | =========== =================================================================== 41 | |contour_3| Learn to find different properties of contours like Solidity, Mean Intensity etc. 42 | 43 | =========== =================================================================== 44 | 45 | .. |contour_3| image:: images/contour_properties.jpg 46 | :height: 90pt 47 | :width: 90pt 48 | 49 | * :ref:`Contours_More_Functions` 50 | 51 | .. tabularcolumns:: m{100pt} m{300pt} 52 | .. cssclass:: toctableopencv 53 | 54 | =========== =================================================================== 55 | |contour_4| Learn to find convexity defects, pointPolygonTest, match different shapes etc. 56 | 57 | =========== =================================================================== 58 | 59 | .. |contour_4| image:: images/contour_defects.jpg 60 | :height: 90pt 61 | :width: 90pt 62 | 63 | * :ref:`Contours_Hierarchy` 64 | 65 | .. tabularcolumns:: m{100pt} m{300pt} 66 | .. cssclass:: toctableopencv 67 | 68 | =========== =================================================================== 69 | |contour_5| Learn about Contour Hierarchy 70 | 71 | =========== =================================================================== 72 | 73 | .. |contour_5| image:: images/contour_hierarchy.jpg 74 | :height: 90pt 75 | :width: 90pt 76 | 77 | .. raw:: latex 78 | 79 | \pagebreak 80 | 81 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 82 | .. toctree:: 83 | :hidden: 84 | 85 | ../py_contours_begin/py_contours_begin 86 | ../py_contour_features/py_contour_features 87 | ../py_contour_properties/py_contour_properties 88 | ../py_contours_more_functions/py_contours_more_functions 89 | ../py_contours_hierarchy/py_contours_hierarchy 90 | 91 | 92 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_filtering/images/bilateral.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_filtering/images/bilateral.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_filtering/images/blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_filtering/images/blur.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_filtering/images/filter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_filtering/images/filter.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_filtering/images/gaussian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_filtering/images/gaussian.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_filtering/images/median.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_filtering/images/median.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_geometric_transformations/images/affine.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_geometric_transformations/images/affine.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_geometric_transformations/images/perspective.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_geometric_transformations/images/perspective.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_geometric_transformations/images/rotation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_geometric_transformations/images/rotation.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_geometric_transformations/images/translation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_geometric_transformations/images/translation.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_grabcut/images/grabcut.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_grabcut/images/grabcut.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_grabcut/images/grabcut_mask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_grabcut/images/grabcut_mask.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_grabcut/images/grabcut_output1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_grabcut/images/grabcut_output1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_grabcut/images/grabcut_rect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_grabcut/images/grabcut_rect.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_gradients/images/double_edge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_gradients/images/double_edge.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_gradients/images/gradients.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_gradients/images/gradients.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_gradients/py_gradients.rst: -------------------------------------------------------------------------------- 1 | .. _Gradients: 2 | 3 | Image Gradients 4 | ********************** 5 | 6 | Goal 7 | ====== 8 | 9 | In this chapter, we will learn to: 10 | 11 | * Find Image gradients, edges etc 12 | * We will see following functions : **cv2.Sobel()**, **cv2.Scharr()**, **cv2.Laplacian()** etc 13 | 14 | Theory 15 | ======= 16 | 17 | OpenCV provides three types of gradient filters or High-pass filters, Sobel, Scharr and Laplacian. We will see each one of them. 18 | 19 | 1. Sobel and Scharr Derivatives 20 | --------------------------------- 21 | 22 | Sobel operators is a joint Gausssian smoothing plus differentiation operation, so it is more resistant to noise. You can specify the direction of derivatives to be taken, vertical or horizontal (by the arguments, ``yorder`` and ``xorder`` respectively). You can also specify the size of kernel by the argument ``ksize``. If ksize = -1, a 3x3 Scharr filter is used which gives better results than 3x3 Sobel filter. Please see the docs for kernels used. 23 | 24 | 2. Laplacian Derivatives 25 | -------------------------- 26 | 27 | It calculates the Laplacian of the image given by the relation, :math:`\Delta src = \frac{\partial ^2{src}}{\partial x^2} + \frac{\partial ^2{src}}{\partial y^2}` where each derivative is found using Sobel derivatives. If ``ksize = 1``, then following kernel is used for filtering: 28 | 29 | .. math:: 30 | 31 | kernel = \begin{bmatrix} 0 & 1 & 0 \\ 1 & -4 & 1 \\ 0 & 1 & 0 \end{bmatrix} 32 | 33 | Code 34 | ======= 35 | 36 | Below code shows all operators in a single diagram. All kernels are of 5x5 size. Depth of output image is passed -1 to get the result in np.uint8 type. 37 | :: 38 | 39 | import cv2 40 | import numpy as np 41 | from matplotlib import pyplot as plt 42 | 43 | img = cv2.imread('dave.jpg',0) 44 | 45 | laplacian = cv2.Laplacian(img,cv2.CV_64F) 46 | sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) 47 | sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) 48 | 49 | plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray') 50 | plt.title('Original'), plt.xticks([]), plt.yticks([]) 51 | plt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray') 52 | plt.title('Laplacian'), plt.xticks([]), plt.yticks([]) 53 | plt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray') 54 | plt.title('Sobel X'), plt.xticks([]), plt.yticks([]) 55 | plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray') 56 | plt.title('Sobel Y'), plt.xticks([]), plt.yticks([]) 57 | 58 | plt.show() 59 | 60 | Result: 61 | 62 | .. image:: images/gradients.jpg 63 | :alt: Image Gradients 64 | :align: center 65 | 66 | One Important Matter! 67 | ======================= 68 | 69 | In our last example, output datatype is cv2.CV_8U or np.uint8. But there is a slight problem with that. Black-to-White transition is taken as Positive slope (it has a positive value) while White-to-Black transition is taken as a Negative slope (It has negative value). So when you convert data to np.uint8, all negative slopes are made zero. In simple words, you miss that edge. 70 | 71 | If you want to detect both edges, better option is to keep the output datatype to some higher forms, like cv2.CV_16S, cv2.CV_64F etc, take its absolute value and then convert back to cv2.CV_8U. Below code demonstrates this procedure for a horizontal Sobel filter and difference in results. 72 | :: 73 | 74 | import cv2 75 | import numpy as np 76 | from matplotlib import pyplot as plt 77 | 78 | img = cv2.imread('box.png',0) 79 | 80 | # Output dtype = cv2.CV_8U 81 | sobelx8u = cv2.Sobel(img,cv2.CV_8U,1,0,ksize=5) 82 | 83 | # Output dtype = cv2.CV_64F. Then take its absolute and convert to cv2.CV_8U 84 | sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) 85 | abs_sobel64f = np.absolute(sobelx64f) 86 | sobel_8u = np.uint8(abs_sobel64f) 87 | 88 | plt.subplot(1,3,1),plt.imshow(img,cmap = 'gray') 89 | plt.title('Original'), plt.xticks([]), plt.yticks([]) 90 | plt.subplot(1,3,2),plt.imshow(sobelx8u,cmap = 'gray') 91 | plt.title('Sobel CV_8U'), plt.xticks([]), plt.yticks([]) 92 | plt.subplot(1,3,3),plt.imshow(sobel_8u,cmap = 'gray') 93 | plt.title('Sobel abs(CV_64F)'), plt.xticks([]), plt.yticks([]) 94 | 95 | plt.show() 96 | 97 | Check the result below: 98 | 99 | .. image:: images/double_edge.jpg 100 | :alt: Double Edges 101 | :align: center 102 | 103 | Additional Resources 104 | ====================== 105 | 106 | Exercises 107 | =========== 108 | 109 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/images/2dhist_matplotlib.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/images/2dhist_matplotlib.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/images/2dhist_opencv.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/images/2dhist_opencv.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/images/backproject_opencv.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/images/backproject_opencv.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_masking.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_masking.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_matplotlib.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_matplotlib.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_rgb_plot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_rgb_plot.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/images/histogram_sample.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/clahe_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/clahe_1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/clahe_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/clahe_2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/equalization_opencv.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/equalization_opencv.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/histeq_numpy1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/histeq_numpy1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/histeq_numpy2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/histeq_numpy2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/histogram_equalization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/images/histogram_equalization.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_1d.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_1d.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_2d.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_2d.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_bp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_bp.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_equ.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/images/histograms_equ.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_histograms/py_table_of_contents_histograms/py_table_of_contents_histograms.rst: -------------------------------------------------------------------------------- 1 | .. _Table-Of-Content-Histograms: 2 | 3 | Histograms in OpenCV 4 | ----------------------------------------------------------- 5 | 6 | * :ref:`Histograms_Getting_Started` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== =================================================================== 12 | |hist_1| Learn to find and draw Contours 13 | 14 | 15 | =========== =================================================================== 16 | 17 | .. |hist_1| image:: images/histograms_1d.jpg 18 | :height: 90pt 19 | :width: 90pt 20 | 21 | * :ref:`Histogram_Equalization` 22 | 23 | .. tabularcolumns:: m{100pt} m{300pt} 24 | .. cssclass:: toctableopencv 25 | 26 | =========== =================================================================== 27 | |hist_2| Learn to Equalize Histograms to get better contrast for images 28 | 29 | 30 | =========== =================================================================== 31 | 32 | .. |hist_2| image:: images/histograms_equ.jpg 33 | :height: 90pt 34 | :width: 90pt 35 | 36 | * :ref:`TwoD_Histogram` 37 | 38 | .. tabularcolumns:: m{100pt} m{300pt} 39 | .. cssclass:: toctableopencv 40 | 41 | =========== =================================================================== 42 | |hist_3| Learn to find and plot 2D Histograms 43 | 44 | 45 | =========== =================================================================== 46 | 47 | .. |hist_3| image:: images/histograms_2d.jpg 48 | :height: 90pt 49 | :width: 90pt 50 | 51 | * :ref:`Histogram_Backprojection` 52 | 53 | .. tabularcolumns:: m{100pt} m{300pt} 54 | .. cssclass:: toctableopencv 55 | 56 | =========== =================================================================== 57 | |hist_4| Learn histogram backprojection to segment colored objects 58 | 59 | 60 | =========== =================================================================== 61 | 62 | .. |hist_4| image:: images/histograms_bp.jpg 63 | :height: 90pt 64 | :width: 90pt 65 | 66 | .. raw:: latex 67 | 68 | \pagebreak 69 | 70 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 71 | .. toctree:: 72 | :hidden: 73 | 74 | ../py_histogram_begins/py_histogram_begins 75 | ../py_histogram_equalization/py_histogram_equalization 76 | ../py_2d_histogram/py_2d_histogram 77 | ../py_histogram_backprojection/py_histogram_backprojection 78 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_houghcircles/images/houghcircles2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_houghcircles/images/houghcircles2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.rst: -------------------------------------------------------------------------------- 1 | .. _Hough_Circles: 2 | 3 | Hough Circle Transform 4 | ************************** 5 | 6 | Goal 7 | ===== 8 | 9 | In this chapter, 10 | * We will learn to use Hough Transform to find circles in an image. 11 | * We will see these functions: **cv2.HoughCircles()** 12 | 13 | Theory 14 | ======== 15 | 16 | A circle is represented mathematically as :math:`(x-x_{center})^2 + (y - y_{center})^2 = r^2` where :math:`(x_{center},y_{center})` is the center of the circle, and :math:`r` is the radius of the circle. From equation, we can see we have 3 parameters, so we need a 3D accumulator for hough transform, which would be highly ineffective. So OpenCV uses more trickier method, **Hough Gradient Method** which uses the gradient information of edges. 17 | 18 | The function we use here is **cv2.HoughCircles()**. It has plenty of arguments which are well explained in the documentation. So we directly go to the code. 19 | :: 20 | 21 | import cv2 22 | import numpy as np 23 | 24 | img = cv2.imread('opencv_logo.png',0) 25 | img = cv2.medianBlur(img,5) 26 | cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR) 27 | 28 | circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20, 29 | param1=50,param2=30,minRadius=0,maxRadius=0) 30 | 31 | circles = np.uint16(np.around(circles)) 32 | for i in circles[0,:]: 33 | # draw the outer circle 34 | cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2) 35 | # draw the center of the circle 36 | cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3) 37 | 38 | cv2.imshow('detected circles',cimg) 39 | cv2.waitKey(0) 40 | cv2.destroyAllWindows() 41 | 42 | Result is shown below: 43 | 44 | .. image:: images/houghcircles2.jpg 45 | :alt: Hough Circles 46 | :align: center 47 | 48 | Additional Resources 49 | ===================== 50 | 51 | Exercises 52 | =========== 53 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_houghlines/images/houghlines2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_houghlines/images/houghlines2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_houghlines/images/houghlines3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_houghlines/images/houghlines3.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_houghlines/images/houghlines4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_houghlines/images/houghlines4.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_houghlines/images/houghlines5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_houghlines/images/houghlines5.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_houghlines/images/houghlinesdemo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_houghlines/images/houghlinesdemo.gif -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/blackhat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/blackhat.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/closing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/closing.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/dilation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/dilation.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/erosion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/erosion.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/gradient.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/gradient.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/j.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/j.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/opening.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/opening.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_morphological_ops/images/tophat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_morphological_ops/images/tophat.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_pyramids/images/lap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_pyramids/images/lap.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_pyramids/images/messipyr.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_pyramids/images/messipyr.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_pyramids/images/messiup.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_pyramids/images/messiup.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_pyramids/images/orapple.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_pyramids/images/orapple.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/blurring.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/blurring.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/canny.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/canny.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/colorspace.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/colorspace.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/contours.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/contours.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/geometric.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/geometric.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/grabcut.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/grabcut.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/gradient.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/gradient.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/histogram.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/histogram.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/houghcircles.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/houghcircles.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/houghlines.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/houghlines.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/morphology.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/morphology.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/pyramid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/pyramid.png -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/template.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/template.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/thresh.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/thresh.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/transforms.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/transforms.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/watershed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_table_of_contents_imgproc/images/watershed.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/messi_face.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/messi_face.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/res_mario.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/res_mario.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/template_ccoeff_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/template_ccoeff_1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/template_ccoeffn_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/template_ccoeffn_2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/template_ccorr_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/template_ccorr_3.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/template_ccorrn_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/template_ccorrn_4.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/template_sqdiff_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/template_sqdiff_5.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_template_matching/images/template_sqdiffn_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_template_matching/images/template_sqdiffn_6.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_thresholding/images/ada_threshold.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_thresholding/images/ada_threshold.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_thresholding/images/otsu.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_thresholding/images/otsu.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_thresholding/images/threshold.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_thresholding/images/threshold.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft4.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/images/fft5.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_transforms/py_table_of_contents_transforms/images/transform_fourier.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_transforms/py_table_of_contents_transforms/images/transform_fourier.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_transforms/py_table_of_contents_transforms/py_table_of_contents_transforms.rst: -------------------------------------------------------------------------------- 1 | .. _Table-Of-Content-Transforms: 2 | 3 | Image Transforms in OpenCV 4 | ----------------------------------------------------------- 5 | 6 | * :ref:`Fourier_Transform` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | ============= =================================================================== 12 | |transform_1| Learn to find the Fourier Transform of images 13 | 14 | 15 | ============= =================================================================== 16 | 17 | .. |transform_1| image:: images/transform_fourier.jpg 18 | :height: 90pt 19 | :width: 90pt 20 | 21 | 22 | .. raw:: latex 23 | 24 | \pagebreak 25 | 26 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 27 | .. toctree:: 28 | :hidden: 29 | 30 | ../py_fourier_transform/py_fourier_transform 31 | -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_watershed/images/water_coins.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_watershed/images/water_coins.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_watershed/images/water_dt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_watershed/images/water_dt.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_watershed/images/water_fgbg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_watershed/images/water_fgbg.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_watershed/images/water_marker.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_watershed/images/water_marker.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_watershed/images/water_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_watershed/images/water_result.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_imgproc/py_watershed/images/water_thresh.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_imgproc/py_watershed/images/water_thresh.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/images/kmeans_begin.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/images/kmeans_begin.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/images/kmeans_demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/images/kmeans_demo.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_index.rst: -------------------------------------------------------------------------------- 1 | .. _KMeans_Clustering: 2 | 3 | K-Means Clustering 4 | ********************* 5 | 6 | * :ref:`KMeans_Clustering_Understanding` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== =================================================================== 12 | |KM_1| Read to get an intuitive understanding of K-Means Clustering 13 | =========== =================================================================== 14 | 15 | .. |KM_1| image:: images/kmeans_begin.jpg 16 | :height: 90pt 17 | :width: 90pt 18 | 19 | * :ref:`KMeans_OpenCV` 20 | 21 | .. tabularcolumns:: m{100pt} m{300pt} 22 | .. cssclass:: toctableopencv 23 | 24 | =========== =================================================================== 25 | |KM_2| Now let's try K-Means functions in OpenCV 26 | =========== =================================================================== 27 | 28 | .. |KM_2| image:: images/kmeans_demo.jpg 29 | :height: 90pt 30 | :width: 90pt 31 | 32 | .. raw:: latex 33 | 34 | \pagebreak 35 | 36 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 37 | .. toctree:: 38 | :hidden: 39 | 40 | py_kmeans_understanding/py_kmeans_understanding 41 | py_kmeans_opencv/py_kmeans_opencv 42 | -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_1d_clustered.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_1d_clustered.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_1d_testdata.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_1d_testdata.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_2d_clustered.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_2d_clustered.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_color_quantization.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_color_quantization.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_feature_representation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/images/oc_feature_representation.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/final_clusters.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/final_clusters.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/initial_labelling.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/initial_labelling.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/testdata.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/testdata.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/tshirt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/tshirt.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/tshirt_grouped.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/tshirt_grouped.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/update_centroid.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/images/update_centroid.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/py_kmeans_understanding.rst: -------------------------------------------------------------------------------- 1 | .. _KMeans_Clustering_Understanding: 2 | 3 | Understanding K-Means Clustering 4 | *********************************** 5 | 6 | Goal 7 | ===== 8 | 9 | In this chapter, we will understand the concepts of K-Means Clustering, how it works etc. 10 | 11 | Theory 12 | ======= 13 | 14 | We will deal this with an example which is commonly used. 15 | 16 | T-shirt size problem 17 | ------------------------ 18 | 19 | Consider a company, which is going to release a new model of T-shirt to market. Obviously they will have to manufacture models in different sizes to satisfy people of all sizes. So the company make a data of people's height and weight, and plot them on to a graph, as below: 20 | 21 | .. image:: images/tshirt.jpg 22 | :alt: T-shirt Problem 23 | :align: center 24 | 25 | Company can't create t-shirts with all the sizes. Instead, they divide people to Small, Medium and Large, and manufacture only these 3 models which will fit into all the people. This grouping of people into three groups can be done by k-means clustering, and algorithm provides us best 3 sizes, which will satisfy all the people. And if it doesn't, company can divide people to more groups, may be five, and so on. Check image below : 26 | 27 | .. image:: images/tshirt_grouped.jpg 28 | :alt: People Grouped into Different Sizes 29 | :align: center 30 | 31 | How does it work ? 32 | ------------------------------ 33 | 34 | This algorithm is an iterative process. We will explain it step-by-step with the help of images. 35 | 36 | Consider a set of data as below ( You can consider it as t-shirt problem). We need to cluster this data into two groups. 37 | 38 | .. image:: images/testdata.jpg 39 | :alt: Test Data 40 | :align: center 41 | 42 | **Step : 1** - Algorithm randomly chooses two centroids, :math:`C1` and :math:`C2` (sometimes, any two data are taken as the centroids). 43 | 44 | **Step : 2** - It calculates the distance from each point to both centroids. If a test data is more closer to :math:`C1`, then that data is labelled with '0'. If it is closer to :math:`C2`, then labelled as '1' (If more centroids are there, labelled as '2','3' etc). 45 | 46 | In our case, we will color all '0' labelled with red, and '1' labelled with blue. So we get following image after above operations. 47 | 48 | .. image:: images/initial_labelling.jpg 49 | :alt: Initial Centroid Selection and Data Collection 50 | :align: center 51 | 52 | **Step : 3** - Next we calculate the average of all blue points and red points separately and that will be our new centroids. That is :math:`C1` and :math:`C2` shift to newly calculated centroids. (Remember, the images shown are not true values and not to true scale, it is just for demonstration only). 53 | 54 | And again, perform step 2 with new centroids and label data to '0' and '1'. 55 | 56 | So we get result as below : 57 | 58 | .. image:: images/update_centroid.jpg 59 | :alt: New Centroid Calculated and Data Re-laballed 60 | :align: center 61 | 62 | Now **Step - 2** and **Step - 3** are iterated until both centroids are converged to fixed points. *(Or it may be stopped depending on the criteria we provide, like maximum number of iterations, or a specific accuracy is reached etc.)* **These points are such that sum of distances between test data and their corresponding centroids are minimum**. Or simply, sum of distances between :math:`C1 \leftrightarrow Red\_Points` and :math:`C2 \leftrightarrow Blue\_Points` is minimum. 63 | 64 | .. math:: 65 | 66 | minimize \;\bigg[J = \sum_{All\: Red_Points}distance(C1,Red\_Point) + \sum_{All\: Blue\_Points}distance(C2,Blue\_Point)\bigg] 67 | 68 | Final result almost looks like below : 69 | 70 | .. image:: images/final_clusters.jpg 71 | :alt: Final Result 72 | :align: center 73 | 74 | So this is just an intuitive understanding of K-Means Clustering. For more details and mathematical explanation, please read any standard machine learning textbooks or check links in additional resources. It is just a top layer of K-Means clustering. There are a lot of modifications to this algorithm like, how to choose the initial centroids, how to speed up the iteration process etc. 75 | 76 | Additional Resources 77 | ===================== 78 | #. `Machine Learning Course `_, Video lectures by Prof. Andrew Ng (Some of the images are taken from this) 79 | 80 | Exercises 81 | =========== 82 | 83 | 84 | -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_knn/images/knn_icon1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_knn/images/knn_icon1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_knn/images/knn_icon2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_knn/images/knn_icon2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_knn/py_knn_index.rst: -------------------------------------------------------------------------------- 1 | .. _KNN: 2 | 3 | K-Nearest Neighbour 4 | ********************** 5 | 6 | * :ref:`KNN_Understanding` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== =================================================================== 12 | |KNN_1| Get a basic understanding of what kNN is 13 | =========== =================================================================== 14 | 15 | .. |KNN_1| image:: images/knn_icon1.jpg 16 | :height: 90pt 17 | :width: 90pt 18 | 19 | * :ref:`KNN_OpenCV` 20 | 21 | .. tabularcolumns:: m{100pt} m{300pt} 22 | .. cssclass:: toctableopencv 23 | 24 | =========== =================================================================== 25 | |KNN_2| Now let's use kNN in OpenCV for digit recognition OCR 26 | =========== =================================================================== 27 | 28 | .. |KNN_2| image:: images/knn_icon2.jpg 29 | :height: 90pt 30 | :width: 90pt 31 | 32 | .. raw:: latex 33 | 34 | \pagebreak 35 | 36 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 37 | .. toctree:: 38 | :hidden: 39 | 40 | py_knn_understanding/py_knn_understanding 41 | py_knn_opencv/py_knn_opencv 42 | -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.rst: -------------------------------------------------------------------------------- 1 | .. _KNN_OpenCV: 2 | 3 | OCR of Hand-written Data using kNN 4 | *********************************************** 5 | 6 | Goal 7 | ======= 8 | 9 | In this chapter 10 | * We will use our knowledge on kNN to build a basic OCR application. 11 | * We will try with Digits and Alphabets data available that comes with OpenCV. 12 | 13 | 14 | OCR of Hand-written Digits 15 | ============================ 16 | 17 | Our goal is to build an application which can read the handwritten digits. For this we need some train_data and test_data. OpenCV comes with an image `digits.png` (in the folder ``opencv/samples/python2/data/``) which has 5000 handwritten digits (500 for each digit). Each digit is a 20x20 image. So our first step is to split this image into 5000 different digits. For each digit, we flatten it into a single row with 400 pixels. That is our feature set, ie intensity values of all pixels. It is the simplest feature set we can create. We use first 250 samples of each digit as train_data, and next 250 samples as test_data. So let's prepare them first. 18 | :: 19 | 20 | import numpy as np 21 | import cv2 22 | from matplotlib import pyplot as plt 23 | 24 | img = cv2.imread('digits.png') 25 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 26 | 27 | # Now we split the image to 5000 cells, each 20x20 size 28 | cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)] 29 | 30 | # Make it into a Numpy array. It size will be (50,100,20,20) 31 | x = np.array(cells) 32 | 33 | # Now we prepare train_data and test_data. 34 | train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400) 35 | test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400) 36 | 37 | # Create labels for train and test data 38 | k = np.arange(10) 39 | train_labels = np.repeat(k,250)[:,np.newaxis] 40 | test_labels = train_labels.copy() 41 | 42 | # Initiate kNN, train the data, then test it with test data for k=1 43 | knn = cv2.KNearest() 44 | knn.train(train,train_labels) 45 | ret,result,neighbours,dist = knn.find_nearest(test,k=5) 46 | 47 | # Now we check the accuracy of classification 48 | # For that, compare the result with test_labels and check which are wrong 49 | matches = result==test_labels 50 | correct = np.count_nonzero(matches) 51 | accuracy = correct*100.0/result.size 52 | print accuracy 53 | 54 | 55 | So our basic OCR app is ready. This particular example gave me an accuracy of 91%. One option improve accuracy is to add more data for training, especially the wrong ones. So instead of finding this training data everytime I start application, I better save it, so that next time, I directly read this data from a file and start classification. You can do it with the help of some Numpy functions like np.savetxt, np.savez, np.load etc. Please check their docs for more details. 56 | :: 57 | 58 | # save the data 59 | np.savez('knn_data.npz',train=train, train_labels=train_labels) 60 | 61 | # Now load the data 62 | with np.load('knn_data.npz') as data: 63 | print data.files 64 | train = data['train'] 65 | train_labels = data['train_labels'] 66 | 67 | In my system, it takes around 4.4 MB of memory. Since we are using intensity values (uint8 data) as features, it would be better to convert the data to np.uint8 first and then save it. It takes only 1.1 MB in this case. Then while loading, you can convert back into float32. 68 | 69 | OCR of English Alphabets 70 | =========================== 71 | 72 | Next we will do the same for English alphabets, but there is a slight change in data and feature set. Here, instead of images, OpenCV comes with a data file, ``letter-recognition.data`` in ``opencv/samples/cpp/`` folder. If you open it, you will see 20000 lines which may, on first sight, look like garbage. Actually, in each row, first column is an alphabet which is our label. Next 16 numbers following it are its different features. These features are obtained from `UCI Machine Learning Repository `_. You can find the details of these features in `this page `_. 73 | 74 | There are 20000 samples available, so we take first 10000 data as training samples and remaining 10000 as test samples. We should change the alphabets to ascii characters because we can't work with alphabets directly. 75 | :: 76 | 77 | import cv2 78 | import numpy as np 79 | import matplotlib.pyplot as plt 80 | 81 | # Load the data, converters convert the letter to a number 82 | data= np.loadtxt('letter-recognition.data', dtype= 'float32', delimiter = ',', 83 | converters= {0: lambda ch: ord(ch)-ord('A')}) 84 | 85 | # split the data to two, 10000 each for train and test 86 | train, test = np.vsplit(data,2) 87 | 88 | # split trainData and testData to features and responses 89 | responses, trainData = np.hsplit(train,[1]) 90 | labels, testData = np.hsplit(test,[1]) 91 | 92 | # Initiate the kNN, classify, measure accuracy. 93 | knn = cv2.KNearest() 94 | knn.train(trainData, responses) 95 | ret, result, neighbours, dist = knn.find_nearest(testData, k=5) 96 | 97 | correct = np.count_nonzero(result == labels) 98 | accuracy = correct*100.0/10000 99 | print accuracy 100 | 101 | It gives me an accuracy of 93.22%. Again, if you want to increase accuracy, you can iteratively add error data in each level. 102 | 103 | Additional Resources 104 | ======================= 105 | 106 | Exercises 107 | ============= 108 | 109 | -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_simple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_simple.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_svm/images/svm_icon1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_svm/images/svm_icon1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_svm/images/svm_icon2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_svm/images/svm_icon2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics1.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_svm/py_svm_index.rst: -------------------------------------------------------------------------------- 1 | .. _SVM: 2 | 3 | Support Vector Machines (SVM) 4 | ******************************** 5 | 6 | * :ref:`SVM_Understanding` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== =================================================================== 12 | |SVM_1| Get a basic understanding of what SVM is 13 | =========== =================================================================== 14 | 15 | .. |SVM_1| image:: images/svm_icon1.jpg 16 | :height: 90pt 17 | :width: 90pt 18 | 19 | 20 | * :ref:`svm_opencv` 21 | 22 | .. tabularcolumns:: m{100pt} m{300pt} 23 | .. cssclass:: toctableopencv 24 | 25 | =========== =================================================================== 26 | |SVM_2| Let's use SVM functionalities in OpenCV 27 | =========== =================================================================== 28 | 29 | .. |SVM_2| image:: images/svm_icon2.jpg 30 | :height: 90pt 31 | :width: 90pt 32 | 33 | .. raw:: latex 34 | 35 | \pagebreak 36 | 37 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 38 | .. toctree:: 39 | :hidden: 40 | 41 | py_svm_basics/py_svm_basics 42 | py_svm_opencv/py_svm_opencv 43 | 44 | 45 | -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_table_of_contents_ml/images/kmeansicon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_table_of_contents_ml/images/kmeansicon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_table_of_contents_ml/images/knnicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_table_of_contents_ml/images/knnicon.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_table_of_contents_ml/images/svmicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_ml/py_table_of_contents_ml/images/svmicon.png -------------------------------------------------------------------------------- /source/py_tutorials/py_ml/py_table_of_contents_ml/py_table_of_contents_ml.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-ML: 2 | 3 | Machine Learning 4 | ******************** 5 | 6 | * :ref:`KNN` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== =================================================================== 12 | |ML_KNN| Learn to use kNN for classification 13 | Plus learn about handwritten digit recognition using kNN 14 | =========== =================================================================== 15 | 16 | .. |ML_KNN| image:: images/knnicon.png 17 | :height: 90pt 18 | :width: 90pt 19 | 20 | 21 | * :ref:`SVM` 22 | 23 | .. tabularcolumns:: m{100pt} m{300pt} 24 | .. cssclass:: toctableopencv 25 | 26 | =========== =================================================================== 27 | |ML_SVM| Understand concepts of SVM 28 | =========== =================================================================== 29 | 30 | .. |ML_SVM| image:: images/svmicon.png 31 | :height: 90pt 32 | :width: 90pt 33 | 34 | * :ref:`KMeans_Clustering` 35 | 36 | .. tabularcolumns:: m{100pt} m{300pt} 37 | .. cssclass:: toctableopencv 38 | 39 | =========== =================================================================== 40 | |ML_KM| Learn to use K-Means Clustering to group data to a number of clusters. 41 | Plus learn to do color quantization using K-Means Clustering 42 | =========== =================================================================== 43 | 44 | .. |ML_KM| image:: images/kmeansicon.jpg 45 | :height: 90pt 46 | :width: 90pt 47 | 48 | 49 | .. raw:: latex 50 | 51 | \pagebreak 52 | 53 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 54 | .. toctree:: 55 | :hidden: 56 | 57 | ../py_knn/py_knn_index 58 | ../py_svm/py_svm_index 59 | ../py_kmeans/py_kmeans_index 60 | -------------------------------------------------------------------------------- /source/py_tutorials/py_objdetect/py_face_detection/images/face.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_objdetect/py_face_detection/images/face.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_objdetect/py_face_detection/images/haar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_objdetect/py_face_detection/images/haar.png -------------------------------------------------------------------------------- /source/py_tutorials/py_objdetect/py_face_detection/images/haar_features.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_objdetect/py_face_detection/images/haar_features.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_objdetect/py_table_of_contents_objdetect/images/face_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_objdetect/py_table_of_contents_objdetect/images/face_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_objdetect/py_table_of_contents_objdetect/py_table_of_contents_objdetect.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Objdetection: 2 | 3 | 4 | Object Detection 5 | -------------------------------- 6 | 7 | 8 | 9 | * :ref:`face_detection` 10 | 11 | .. tabularcolumns:: m{100pt} m{300pt} 12 | .. cssclass:: toctableopencv 13 | 14 | =========== ====================================================== 15 | |objdet_1| Face detection using haar-cascades 16 | 17 | =========== ====================================================== 18 | 19 | .. |objdet_1| image:: images/face_icon.jpg 20 | :height: 90pt 21 | :width: 90pt 22 | 23 | 24 | 25 | .. raw:: latex 26 | 27 | \pagebreak 28 | 29 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 30 | .. toctree:: 31 | :hidden: 32 | 33 | ../py_face_detection/py_face_detection 34 | -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_inpainting/images/inpaint_basics.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_photo/py_inpainting/images/inpaint_basics.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_inpainting/images/inpaint_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_photo/py_inpainting/images/inpaint_result.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_inpainting/py_inpainting.rst: -------------------------------------------------------------------------------- 1 | .. _inpainting: 2 | 3 | 4 | Image Inpainting 5 | ********************** 6 | 7 | Goal 8 | ====== 9 | 10 | In this chapter, 11 | * We will learn how to remove small noises, strokes etc in old photographs by a method called inpainting 12 | * We will see inpainting functionalities in OpenCV. 13 | 14 | 15 | Basics 16 | =========== 17 | 18 | Most of you will have some old degraded photos at your home with some black spots, some strokes etc on it. Have you ever thought of restoring it back? We can't simply erase them in a paint tool because it is will simply replace black structures with white structures which is of no use. In these cases, a technique called image inpainting is used. The basic idea is simple: Replace those bad marks with its neighbouring pixels so that it looks like the neigbourhood. Consider the image shown below (taken from `Wikipedia `_): 19 | 20 | .. image:: images/inpaint_basics.jpg 21 | :alt: Inpainting example 22 | :align: center 23 | 24 | Several algorithms were designed for this purpose and OpenCV provides two of them. Both can be accessed by the same function, **cv2.inpaint()** 25 | 26 | First algorithm is based on the paper **"An Image Inpainting Technique Based on the Fast Marching Method"** by Alexandru Telea in 2004. It is based on Fast Marching Method. Consider a region in the image to be inpainted. Algorithm starts from the boundary of this region and goes inside the region gradually filling everything in the boundary first. It takes a small neighbourhood around the pixel on the neigbourhood to be inpainted. This pixel is replaced by normalized weighted sum of all the known pixels in the neigbourhood. Selection of the weights is an important matter. More weightage is given to those pixels lying near to the point, near to the normal of the boundary and those lying on the boundary contours. Once a pixel is inpainted, it moves to next nearest pixel using Fast Marching Method. FMM ensures those pixels near the known pixels are inpainted first, so that it just works like a manual heuristic operation. This algorithm is enabled by using the flag, ``cv2.INPAINT_TELEA``. 27 | 28 | Second algorithm is based on the paper **"Navier-Stokes, Fluid Dynamics, and Image and Video Inpainting"** by Bertalmio, Marcelo, Andrea L. Bertozzi, and Guillermo Sapiro in 2001. This algorithm is based on fluid dynamics and utilizes partial differential equations. Basic principle is heurisitic. It first travels along the edges from known regions to unknown regions (because edges are meant to be continuous). It continues isophotes (lines joining points with same intensity, just like contours joins points with same elevation) while matching gradient vectors at the boundary of the inpainting region. For this, some methods from fluid dynamics are used. Once they are obtained, color is filled to reduce minimum variance in that area. This algorithm is enabled by using the flag, ``cv2.INPAINT_NS``. 29 | 30 | 31 | Code 32 | =========== 33 | 34 | We need to create a mask of same size as that of input image, where non-zero pixels corresponds to the area which is to be inpainted. Everything else is simple. My image is degraded with some black strokes (I added manually). I created a corresponding strokes with Paint tool. 35 | :: 36 | 37 | import numpy as np 38 | import cv2 39 | 40 | img = cv2.imread('messi_2.jpg') 41 | mask = cv2.imread('mask2.png',0) 42 | 43 | dst = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA) 44 | 45 | cv2.imshow('dst',dst) 46 | cv2.waitKey(0) 47 | cv2.destroyAllWindows() 48 | 49 | 50 | See the result below. First image shows degraded input. Second image is the mask. Third image is the result of first algorithm and last image is the result of second algorithm. 51 | 52 | .. image:: images/inpaint_result.jpg 53 | :alt: Inpainting result 54 | :align: center 55 | 56 | 57 | Additional Resources 58 | ========================= 59 | 60 | #. Bertalmio, Marcelo, Andrea L. Bertozzi, and Guillermo Sapiro. "Navier-stokes, fluid dynamics, and image and video inpainting." In Computer Vision and Pattern Recognition, 2001. CVPR 2001. Proceedings of the 2001 IEEE Computer Society Conference on, vol. 1, pp. I-355. IEEE, 2001. 61 | 62 | #. Telea, Alexandru. "An image inpainting technique based on the fast marching method." Journal of graphics tools 9.1 (2004): 23-34. 63 | 64 | 65 | Exercises 66 | ================ 67 | 68 | #. OpenCV comes with an interactive sample on inpainting, ``samples/python2/inpaint.py``, try it. 69 | 70 | #. A few months ago, I watched a video on `Content-Aware Fill `_, an advanced inpainting technique used in Adobe Photoshop. On further search, I was able to find that same technique is already there in GIMP with different name, "Resynthesizer" (You need to install separate plugin). I am sure you will enjoy the technique. 71 | -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_non_local_means/images/nlm_multi.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_photo/py_non_local_means/images/nlm_multi.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_non_local_means/images/nlm_patch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_photo/py_non_local_means/images/nlm_patch.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_non_local_means/images/nlm_result1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_photo/py_non_local_means/images/nlm_result1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_table_of_contents_photo/images/inpainticon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_photo/py_table_of_contents_photo/images/inpainticon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_table_of_contents_photo/images/nlm_icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_photo/py_table_of_contents_photo/images/nlm_icon.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_photo/py_table_of_contents_photo/py_table_of_contents_photo.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Photo: 2 | 3 | 4 | Computational Photography 5 | -------------------------------- 6 | 7 | Here you will learn different OpenCV functionalities related to Computational Photography like image denoising etc. 8 | 9 | 10 | * :ref:`non_local_means` 11 | 12 | .. tabularcolumns:: m{100pt} m{300pt} 13 | .. cssclass:: toctableopencv 14 | 15 | =========== ====================================================== 16 | |photo_1| See a good technique to remove noises in images called Non-Local Means Denoising 17 | 18 | =========== ====================================================== 19 | 20 | .. |photo_1| image:: images/nlm_icon.jpg 21 | :height: 90pt 22 | :width: 90pt 23 | 24 | 25 | * :ref:`inpainting` 26 | 27 | .. tabularcolumns:: m{100pt} m{300pt} 28 | .. cssclass:: toctableopencv 29 | 30 | =========== ====================================================== 31 | |photo_2| Do you have a old degraded photo with many black spots and strokes on it? Take it. Let's try to restore them with a technique called image inpainting. 32 | 33 | =========== ====================================================== 34 | 35 | .. |photo_2| image:: images/inpainticon.jpg 36 | :height: 90pt 37 | :width: 90pt 38 | 39 | 40 | .. raw:: latex 41 | 42 | \pagebreak 43 | 44 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 45 | .. toctree:: 46 | :hidden: 47 | 48 | ../py_non_local_means/py_non_local_means 49 | ../py_inpainting/py_inpainting 50 | -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_intro/py_intro.rst: -------------------------------------------------------------------------------- 1 | .. _Intro: 2 | 3 | 4 | Introduction to OpenCV-Python Tutorials 5 | ******************************************* 6 | 7 | OpenCV 8 | =============== 9 | 10 | OpenCV was started at Intel in 1999 by **Gary Bradsky** and the first release came out in 2000. **Vadim Pisarevsky** joined Gary Bradsky to manage Intel's Russian software OpenCV team. In 2005, OpenCV was used on Stanley, the vehicle who won 2005 DARPA Grand Challenge. Later its active development continued under the support of Willow Garage, with Gary Bradsky and Vadim Pisarevsky leading the project. Right now, OpenCV supports a lot of algorithms related to Computer Vision and Machine Learning and it is expanding day-by-day. 11 | 12 | Currently OpenCV supports a wide variety of programming languages like C++, Python, Java etc and is available on different platforms including Windows, Linux, OS X, Android, iOS etc. Also, interfaces based on CUDA and OpenCL are also under active development for high-speed GPU operations. 13 | 14 | OpenCV-Python is the Python API of OpenCV. It combines the best qualities of OpenCV C++ API and Python language. 15 | 16 | 17 | OpenCV-Python 18 | =============== 19 | 20 | Python is a general purpose programming language started by **Guido van Rossum**, which became very popular in short time mainly because of its simplicity and code readability. It enables the programmer to express his ideas in fewer lines of code without reducing any readability. 21 | 22 | Compared to other languages like C/C++, Python is slower. But another important feature of Python is that it can be easily extended with C/C++. This feature helps us to write computationally intensive codes in C/C++ and create a Python wrapper for it so that we can use these wrappers as Python modules. This gives us two advantages: first, our code is as fast as original C/C++ code (since it is the actual C++ code working in background) and second, it is very easy to code in Python. This is how OpenCV-Python works, it is a Python wrapper around original C++ implementation. 23 | 24 | And the support of Numpy makes the task more easier. **Numpy** is a highly optimized library for numerical operations. It gives a MATLAB-style syntax. All the OpenCV array structures are converted to-and-from Numpy arrays. So whatever operations you can do in Numpy, you can combine it with OpenCV, which increases number of weapons in your arsenal. Besides that, several other libraries like SciPy, Matplotlib which supports Numpy can be used with this. 25 | 26 | So OpenCV-Python is an appropriate tool for fast prototyping of computer vision problems. 27 | 28 | 29 | OpenCV-Python Tutorials 30 | ============================= 31 | 32 | OpenCV introduces a new set of tutorials which will guide you through various functions available in OpenCV-Python. **This guide is mainly focused on OpenCV 3.x version** (although most of the tutorials will work with OpenCV 2.x also). 33 | 34 | A prior knowledge on Python and Numpy is required before starting because they won't be covered in this guide. **Especially, a good knowledge on Numpy is must to write optimized codes in OpenCV-Python.** 35 | 36 | This tutorial has been started by *Abid Rahman K.* as part of Google Summer of Code 2013 program, under the guidance of *Alexander Mordvintsev*. 37 | 38 | 39 | OpenCV Needs You !!! 40 | ========================== 41 | 42 | Since OpenCV is an open source initiative, all are welcome to make contributions to this library. And it is same for this tutorial also. 43 | 44 | So, if you find any mistake in this tutorial (whether it be a small spelling mistake or a big error in code or concepts, whatever), feel free to correct it. 45 | 46 | And that will be a good task for freshers who begin to contribute to open source projects. Just fork the OpenCV in github, make necessary corrections and send a pull request to OpenCV. OpenCV developers will check your pull request, give you important feedback and once it passes the approval of the reviewer, it will be merged to OpenCV. Then you become a open source contributor. Similar is the case with other tutorials, documentation etc. 47 | 48 | As new modules are added to OpenCV-Python, this tutorial will have to be expanded. So those who knows about particular algorithm can write up a tutorial which includes a basic theory of the algorithm and a code showing basic usage of the algorithm and submit it to OpenCV. 49 | 50 | Remember, we **together** can make this project a great success !!! 51 | 52 | 53 | Contributors 54 | ================= 55 | 56 | Below is the list of contributors who submitted tutorials to OpenCV-Python. 57 | 58 | 1. Alexander Mordvintsev (GSoC-2013 mentor) 59 | 2. Abid Rahman K. (GSoC-2013 intern) 60 | 61 | 62 | Additional Resources 63 | ======================= 64 | 65 | 1. A Quick guide to Python - `A Byte of Python `_ 66 | 2. `Basic Numpy Tutorials `_ 67 | 3. `Numpy Examples List `_ 68 | 4. `OpenCV Documentation `_ 69 | 5. `OpenCV Forum `_ 70 | -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture2.png -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture3.png -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture5.png -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture6.png -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture7.png -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture8.png -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_setup_in_windows/images/Capture80.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_setup_in_windows/images/Capture80.png -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_table_of_contents_setup/images/fedora_logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_table_of_contents_setup/images/fedora_logo.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_table_of_contents_setup/images/opencv_logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_table_of_contents_setup/images/opencv_logo.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_table_of_contents_setup/images/windows_logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_setup/py_table_of_contents_setup/images/windows_logo.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_setup/py_table_of_contents_setup/py_table_of_contents_setup.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Setup: 2 | 3 | Introduction to OpenCV 4 | ----------------------------------------------------------- 5 | 6 | * :ref:`Intro` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== ====================================================== 12 | |Intro_1| Getting Started with OpenCV-Python 13 | 14 | =========== ====================================================== 15 | 16 | .. |Intro_1| image:: images/opencv_logo.jpg 17 | :height: 90pt 18 | :width: 90pt 19 | 20 | 21 | 22 | * :ref:`Install-OpenCV-Python-in-Windows` 23 | 24 | .. tabularcolumns:: m{100pt} m{300pt} 25 | .. cssclass:: toctableopencv 26 | 27 | =========== ====================================================== 28 | |Install_1| Set Up OpenCV-Python in Windows 29 | 30 | =========== ====================================================== 31 | 32 | .. |Install_1| image:: images/windows_logo.jpg 33 | :height: 90pt 34 | :width: 90pt 35 | 36 | * :ref:`Install-OpenCV-Python-in-Fedora` 37 | 38 | .. tabularcolumns:: m{100pt} m{300pt} 39 | .. cssclass:: toctableopencv 40 | 41 | =========== ====================================================== 42 | |Install_2| Set Up OpenCV-Python in Fedora 43 | 44 | =========== ====================================================== 45 | 46 | .. |Install_2| image:: images/fedora_logo.jpg 47 | :height: 90pt 48 | :width: 90pt 49 | 50 | .. raw:: latex 51 | 52 | \pagebreak 53 | 54 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 55 | .. toctree:: 56 | :hidden: 57 | 58 | ../py_intro/py_intro 59 | ../py_setup_in_windows/py_setup_in_windows 60 | ../py_setup_in_fedora/py_setup_in_fedora 61 | -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_bg_subtraction/images/resframe.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_bg_subtraction/images/resframe.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_bg_subtraction/images/resgmg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_bg_subtraction/images/resgmg.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_bg_subtraction/images/resmog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_bg_subtraction/images/resmog.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_bg_subtraction/images/resmog2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_bg_subtraction/images/resmog2.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_lucas_kanade/images/optical_flow_basic1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_lucas_kanade/images/optical_flow_basic1.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_lucas_kanade/images/opticalfb.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_lucas_kanade/images/opticalfb.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_lucas_kanade/images/opticalflow_lk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_lucas_kanade/images/opticalflow_lk.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_meanshift/images/camshift_face.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_meanshift/images/camshift_face.gif -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_meanshift/images/camshift_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_meanshift/images/camshift_result.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_meanshift/images/meanshift_basics.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_meanshift/images/meanshift_basics.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_meanshift/images/meanshift_face.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_meanshift/images/meanshift_face.gif -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_meanshift/images/meanshift_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_meanshift/images/meanshift_result.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_table_of_contents_video/images/background.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_table_of_contents_video/images/background.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_table_of_contents_video/images/camshift.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_table_of_contents_video/images/camshift.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_table_of_contents_video/images/lucas.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_table_of_contents_video/images/lucas.jpg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_table_of_contents_video/images/opticalflow.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abidrahmank/OpenCV2-Python-Tutorials/435328569162104db9c14f718f4ba170d1206470/source/py_tutorials/py_video/py_table_of_contents_video/images/opticalflow.jpeg -------------------------------------------------------------------------------- /source/py_tutorials/py_video/py_table_of_contents_video/py_table_of_contents_video.rst: -------------------------------------------------------------------------------- 1 | .. _PY_Table-Of-Content-Video: 2 | 3 | Video Analysis 4 | ------------------------------------------ 5 | 6 | * :ref:`meanshift` 7 | 8 | .. tabularcolumns:: m{100pt} m{300pt} 9 | .. cssclass:: toctableopencv 10 | 11 | =========== ====================================================== 12 | |vdo_1| We have already seen an example of color-based tracking. It is simpler. This time, we see much more better algorithms like "Meanshift", and its upgraded version, "Camshift" to find and track them. 13 | 14 | =========== ====================================================== 15 | 16 | .. |vdo_1| image:: images/camshift.jpg 17 | :height: 90pt 18 | :width: 90pt 19 | 20 | 21 | * :ref:`Lucas_Kanade` 22 | 23 | .. tabularcolumns:: m{100pt} m{300pt} 24 | .. cssclass:: toctableopencv 25 | 26 | =========== ====================================================== 27 | |vdo_2| Now let's discuss an important concept, "Optical Flow", which is related to videos and has many applications. 28 | =========== ====================================================== 29 | 30 | .. |vdo_2| image:: images/opticalflow.jpeg 31 | :height: 90pt 32 | :width: 90pt 33 | 34 | 35 | * :ref:`background_subtraction` 36 | 37 | .. tabularcolumns:: m{100pt} m{300pt} 38 | .. cssclass:: toctableopencv 39 | 40 | =========== ====================================================== 41 | |vdo_b| In several applications, we need to extract foreground for further operations like object tracking. Background Subtraction is a well-known method in those cases. 42 | =========== ====================================================== 43 | 44 | .. |vdo_b| image:: images/background.jpg 45 | :height: 90pt 46 | :width: 90pt 47 | 48 | 49 | 50 | .. raw:: latex 51 | 52 | \pagebreak 53 | 54 | .. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it. 55 | .. toctree:: 56 | :hidden: 57 | 58 | ../py_meanshift/py_meanshift 59 | ../py_lucas_kanade/py_lucas_kanade 60 | ../py_bg_subtraction/py_bg_subtraction 61 | --------------------------------------------------------------------------------