├── .gitattributes ├── .gitignore ├── docs ├── tutorials │ ├── cv2py.odp │ ├── final doc.odt │ ├── serial commn.docx │ └── tutorials.docx ├── user mnls │ ├── 10.1.1.46.5663.pdf │ ├── CS676Report.pdf │ ├── Paper_11-Gesture_Controlled_Robot_using_Image_Processing.pdf │ ├── Report.pdf │ ├── books.pdf │ └── image processing - How to define the markers for Watershed in OpenCV - Stack Overflow.htm └── work log │ └── log.docx └── software ├── firmware ├── backprojection.py ├── backsub.py ├── backsub2.py ├── cam.py ├── cntrs.py ├── ex1.PY ├── ex3.py ├── ex4.py ├── exe file │ └── cam.exe ├── haar.py ├── hand_gesture_with_serial.py ├── img.py ├── ip1.py ├── ip2.py └── yellowtrack.py ├── pyserial-2.6.tar.gz └── used sw ├── matplotlib-1.2.1.win32-py2.7.exe ├── numpy-1.6.1-win32-superpack-python2.7.exe └── python-2.7.3.msi /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | *.sln merge=union 7 | *.csproj merge=union 8 | *.vbproj merge=union 9 | *.fsproj merge=union 10 | *.dbproj merge=union 11 | 12 | # Standard to msysgit 13 | *.doc diff=astextplain 14 | *.DOC diff=astextplain 15 | *.docx diff=astextplain 16 | *.DOCX diff=astextplain 17 | *.dot diff=astextplain 18 | *.DOT diff=astextplain 19 | *.pdf diff=astextplain 20 | *.PDF diff=astextplain 21 | *.rtf diff=astextplain 22 | *.RTF diff=astextplain 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ################# 2 | ## Eclipse 3 | ################# 4 | 5 | *.pydevproject 6 | .project 7 | .metadata 8 | bin/ 9 | tmp/ 10 | *.tmp 11 | *.bak 12 | *.swp 13 | *~.nib 14 | local.properties 15 | .classpath 16 | .settings/ 17 | .loadpath 18 | 19 | # External tool builders 20 | .externalToolBuilders/ 21 | 22 | # Locally stored "Eclipse launch configurations" 23 | *.launch 24 | 25 | # CDT-specific 26 | .cproject 27 | 28 | # PDT-specific 29 | .buildpath 30 | 31 | 32 | ################# 33 | ## Visual Studio 34 | ################# 35 | 36 | ## Ignore Visual Studio temporary files, build results, and 37 | ## files generated by popular Visual Studio add-ons. 38 | 39 | # User-specific files 40 | *.suo 41 | *.user 42 | *.sln.docstates 43 | 44 | # Build results 45 | 46 | [Dd]ebug/ 47 | [Rr]elease/ 48 | x64/ 49 | build/ 50 | [Bb]in/ 51 | [Oo]bj/ 52 | 53 | # MSTest test Results 54 | [Tt]est[Rr]esult*/ 55 | [Bb]uild[Ll]og.* 56 | 57 | *_i.c 58 | *_p.c 59 | *.ilk 60 | *.meta 61 | *.obj 62 | *.pch 63 | *.pdb 64 | *.pgc 65 | *.pgd 66 | *.rsp 67 | *.sbr 68 | *.tlb 69 | *.tli 70 | *.tlh 71 | *.tmp 72 | *.tmp_proj 73 | *.log 74 | *.vspscc 75 | *.vssscc 76 | .builds 77 | *.pidb 78 | *.log 79 | *.scc 80 | 81 | # Visual C++ cache files 82 | ipch/ 83 | *.aps 84 | *.ncb 85 | *.opensdf 86 | *.sdf 87 | *.cachefile 88 | 89 | # Visual Studio profiler 90 | *.psess 91 | *.vsp 92 | *.vspx 93 | 94 | # Guidance Automation Toolkit 95 | *.gpState 96 | 97 | # ReSharper is a .NET coding add-in 98 | _ReSharper*/ 99 | *.[Rr]e[Ss]harper 100 | 101 | # TeamCity is a build add-in 102 | _TeamCity* 103 | 104 | # DotCover is a Code Coverage Tool 105 | *.dotCover 106 | 107 | # NCrunch 108 | *.ncrunch* 109 | .*crunch*.local.xml 110 | 111 | # Installshield output folder 112 | [Ee]xpress/ 113 | 114 | # DocProject is a documentation generator add-in 115 | DocProject/buildhelp/ 116 | DocProject/Help/*.HxT 117 | DocProject/Help/*.HxC 118 | DocProject/Help/*.hhc 119 | DocProject/Help/*.hhk 120 | DocProject/Help/*.hhp 121 | DocProject/Help/Html2 122 | DocProject/Help/html 123 | 124 | # Click-Once directory 125 | publish/ 126 | 127 | # Publish Web Output 128 | *.Publish.xml 129 | *.pubxml 130 | 131 | # NuGet Packages Directory 132 | ## TODO: If you have NuGet Package Restore enabled, uncomment the next line 133 | #packages/ 134 | 135 | # Windows Azure Build Output 136 | csx 137 | *.build.csdef 138 | 139 | # Windows Store app package directory 140 | AppPackages/ 141 | 142 | # Others 143 | sql/ 144 | *.Cache 145 | ClientBin/ 146 | [Ss]tyle[Cc]op.* 147 | ~$* 148 | *~ 149 | *.dbmdl 150 | *.[Pp]ublish.xml 151 | *.pfx 152 | *.publishsettings 153 | 154 | # RIA/Silverlight projects 155 | Generated_Code/ 156 | 157 | # Backup & report files from converting an old project file to a newer 158 | # Visual Studio version. Backup files are not needed, because we have git ;-) 159 | _UpgradeReport_Files/ 160 | Backup*/ 161 | UpgradeLog*.XML 162 | UpgradeLog*.htm 163 | 164 | # SQL Server files 165 | App_Data/*.mdf 166 | App_Data/*.ldf 167 | 168 | ############# 169 | ## Windows detritus 170 | ############# 171 | 172 | # Windows image file caches 173 | Thumbs.db 174 | ehthumbs.db 175 | 176 | # Folder config file 177 | Desktop.ini 178 | 179 | # Recycle Bin used on file shares 180 | $RECYCLE.BIN/ 181 | 182 | # Mac crap 183 | .DS_Store 184 | 185 | 186 | ############# 187 | ## Python 188 | ############# 189 | 190 | *.py[co] 191 | 192 | # Packages 193 | *.egg 194 | *.egg-info 195 | dist/ 196 | build/ 197 | eggs/ 198 | parts/ 199 | var/ 200 | sdist/ 201 | develop-eggs/ 202 | .installed.cfg 203 | 204 | # Installer logs 205 | pip-log.txt 206 | 207 | # Unit test / coverage reports 208 | .coverage 209 | .tox 210 | 211 | #Translations 212 | *.mo 213 | 214 | #Mr Developer 215 | .mr.developer.cfg 216 | -------------------------------------------------------------------------------- /docs/tutorials/cv2py.odp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/tutorials/cv2py.odp -------------------------------------------------------------------------------- /docs/tutorials/final doc.odt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/tutorials/final doc.odt -------------------------------------------------------------------------------- /docs/tutorials/serial commn.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/tutorials/serial commn.docx -------------------------------------------------------------------------------- /docs/tutorials/tutorials.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/tutorials/tutorials.docx -------------------------------------------------------------------------------- /docs/user mnls/10.1.1.46.5663.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/user mnls/10.1.1.46.5663.pdf -------------------------------------------------------------------------------- /docs/user mnls/CS676Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/user mnls/CS676Report.pdf -------------------------------------------------------------------------------- /docs/user mnls/Paper_11-Gesture_Controlled_Robot_using_Image_Processing.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/user mnls/Paper_11-Gesture_Controlled_Robot_using_Image_Processing.pdf -------------------------------------------------------------------------------- /docs/user mnls/Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/user mnls/Report.pdf -------------------------------------------------------------------------------- /docs/user mnls/books.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/user mnls/books.pdf -------------------------------------------------------------------------------- /docs/user mnls/image processing - How to define the markers for Watershed in OpenCV - Stack Overflow.htm: -------------------------------------------------------------------------------- 1 | Stack Exchange 2 | sign up 3 | 4 | | log in 5 | 6 | | careers 2.0 | 7 | 8 | 9 | Stack Overflow 10 | 11 | * Questions 12 | * Tags 13 | * Tour 14 | * Users 15 | 16 | * Ask Question 17 | 18 | Tell me more × 19 | Stack Overflow is a question and answer site for professional and 20 | enthusiast programmers. It's 100% free, no registration required. 21 | 22 | 23 | How to define the markers for Watershed in OpenCV? 24 | 25 | 26 | 27 | No problem. We won't show you that ad again. Why didn't you like it? 28 | 29 | * Uninteresting 30 | * Misleading 31 | * Offensive 32 | * Repetitive 33 | 34 | Oops! I didn't mean to do this. <#> 35 | 36 | up vote 17 down vote favorite <#> 37 | *18* 38 | 39 | 40 | I'm writing for Android with OpenCV. I'm segmenting an image similar to 41 | below using marker-controlled watershed, without the user manually 42 | marking the image. I'm planning to use the regional maxima as markers. 43 | 44 | |minMaxLoc()| would give me the value, but how can I restrict it to the 45 | blobs which is what I'm interested in? Can I utilize the results from 46 | |findContours()| or cvBlob blobs to restrict the ROI and apply maxima to 47 | each blob? 48 | 49 | input image 50 | 51 | opencv image-processing 52 | computer-vision 53 | image-segmentation 54 | watershed 55 | 56 | share |improve this question 57 | 58 | edited Mar 8 at 11:15 59 | 60 | Harriv 61 | 3,1511749 62 | 63 | asked Jul 2 '12 at 13:37 64 | 65 | Tru 66 | 323515 67 | 68 | 69 | 70 | 71 | 72 | 2 Answers 73 | 74 | active 75 | 76 | oldest 77 | 78 | votes 79 | 80 | 81 | up vote 26 down vote accepted 82 | 83 | 84 | I would like to explain a simple code on how to use watershed here. I am 85 | using OpenCV-Python, but i hope you won't have any difficulty to understand. 86 | 87 | In this code, I will be using watershed as a tool for 88 | *foreground-background extraction.* (This example is the python 89 | counterpart of the C++ code in OpenCV cookbook). This is a simple case 90 | to understand watershed. Apart from that, you can use watershed to count 91 | the number of objects in this image. That will be a slightly advanced 92 | version of this code. 93 | 94 | *1* - First we load our image, convert it to grayscale, and threshold it 95 | with a suitable value. I took *Otsu's binarization*, so it would find 96 | the best threshold value. 97 | 98 | |import cv2 99 | import numpy as np 100 | 101 | img = cv2.imread('sofwatershed.jpg') 102 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 103 | ret,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 104 | | 105 | 106 | Below is the result I got: 107 | 108 | enter image description here 109 | 110 | ( even that result is good, because great contrast between foreground 111 | and background images) 112 | 113 | 2 - Now we have to create the marker. *Marker is the image with same 114 | size as that of original image which is 32SC1 (32 bit signed single 115 | channel).* 116 | 117 | Now there will be some regions in the original image where you are 118 | simply sure, that part belong to foreground. Mark such region with 255 119 | in marker image. Now the region where you are sure to be the background 120 | are marked with 128. The region you are not sure are marked with 0. That 121 | is we are going to do next. 122 | 123 | *A - Foreground region*:- We have already got a threshold image where 124 | pills are white color. We erode them a little, so that we are sure 125 | remaining region belongs to foreground. 126 | 127 | |fg = cv2.erode(thresh,None,iterations = 2) 128 | | 129 | 130 | *fg* : 131 | 132 | enter image description here 133 | 134 | *B - Background region* :- Here we dilate the thresholded image so that 135 | background region is reduced. But we are sure remaining black region is 136 | 100% background. We set it to 128. 137 | 138 | |bgt = cv2.dilate(thresh,None,iterations = 3) 139 | ret,bg = cv2.threshold(bgt,1,128,1) 140 | | 141 | 142 | Now we get *bg* as follows : 143 | 144 | enter image description here 145 | 146 | *C - Now we add both fg and bg* : 147 | 148 | |marker = cv2.add(fg,bg) 149 | | 150 | 151 | Below is what we get : 152 | 153 | enter image description here 154 | 155 | Now we can clearly understand from above image, that white region is 156 | 100% foreground, gray region is 100% background, and black region we are 157 | not sure. 158 | 159 | Then we convert it into 32SC1 : 160 | 161 | |marker32 = np.int32(marker) 162 | | 163 | 164 | 3 - Finally we *apply watershed* and convert result back into *uint8* image: 165 | 166 | |cv2.watershed(img,marker32) 167 | m = cv2.convertScaleAbs(marker32) 168 | | 169 | 170 | *m :* 171 | 172 | enter image description here 173 | 174 | *4* - *We threshold it properly to get the mask and perform 175 | |bitwise_and| with the input image:* 176 | 177 | |ret,thresh = cv2.threshold(m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 178 | res = cv2.bitwise_and(img,img,mask = thresh) 179 | | 180 | 181 | *res :* 182 | 183 | enter image description here 184 | 185 | Hope it helps!!! 186 | 187 | ARK 188 | 189 | share |improve this answer 190 | 191 | edited May 8 at 15:40 192 | 193 | 194 | answered Jul 11 '12 at 17:30 195 | 196 | Abid Rahman K 197 | 11.6k33262 198 | 199 | 200 | 201 | Consider rechecking this, because it is showing a completely incorrect 202 | use of watershed. – mmgp Jan 31 at 3:22 203 | <#comment20414161_11438165> 204 | 205 | @mmgp : may be you are right. This is just a python version of basic C++ 206 | code given in cookbook, which shows to set markers, etc. I thought it 207 | would be a nice sample for freshers, |including me|. Anyway, your answer 208 | is great. It fills what lacks in my answer. – Abid Rahman K 209 | Jan 31 at 9:28 <#comment20421614_11438165> 210 | 211 | 212 | No problem. We won't show you that ad again. Why didn't you like it? 213 | 214 | * Uninteresting 215 | * Misleading 216 | * Offensive 217 | * Repetitive 218 | 219 | Oops! I didn't mean to do this. <#> 220 | 221 | up vote 15 down vote 222 | 223 | 224 | First of all: the function |minMaxLoc| finds only the global minimum and 225 | global maximum for a given input, so it is mostly useless for 226 | determining regional minima and/or regional maxima. But your idea is 227 | right, extracting markers based on regional minima/maxima for performing 228 | a Watershed Transform based on markers is totally fine. Let me try to 229 | clarify what is the Watershed Transform and how you should correctly use 230 | the implementation present in OpenCV. 231 | 232 | Some decent amount of papers that deal with watershed describe it 233 | similarly to what follows (I might miss some detail, if you are unsure: 234 | ask). Consider the surface of some region you know, it contains valleys 235 | and peaks (among other details that are irrelevant for us here). Suppose 236 | below this surface all you have is water, colored water. Now, make holes 237 | in each valley of your surface and then the water starts to fill all the 238 | area. At some point, differently colored waters will meet, and when this 239 | happen, you construct a dam such that they don't touch each other. In 240 | the end you have a collection of dams, which is the watershed separating 241 | all the different colored water. 242 | 243 | Now, if you make too many holes in that surface, you end up with too 244 | many regions: over-segmentation. If you make too few you get an 245 | under-segmentation. So, virtually any paper that suggests using 246 | watershed actually presents techniques to avoid these problems for the 247 | application the paper is dealing with. 248 | 249 | I wrote all this (which is possibly too naïve for anyone that knows what 250 | the Watershed Transform is) because it reflects directly on how you 251 | should use watershed implementations (which the current accepted answer 252 | is doing in a completely wrong manner). Let us start on the OpenCV 253 | example now, using the Python bindings. 254 | 255 | The image presented in the question is composed of many objects that are 256 | mostly too close and in some instances overlapping. The usefulness of 257 | watershed here is to separate correctly these objects, not to group them 258 | into a single component. So you need at least one marker for each object 259 | and good markers for the background. As an example, first binarize the 260 | input image by Otsu and perform a morphological opening for removing 261 | small objects. The result of this step is shown below in the left image. 262 | Now with the binary image consider applying the distance transform to 263 | it, result at right. 264 | 265 | enter image description here enter image description here 266 | 267 | With the distance transform result, we can consider some threshold such 268 | that we consider only the regions most distant to the background (left 269 | image below). Doing this, we can obtain a marker for each object by 270 | labeling the different regions after the earlier threshold. Now, we can 271 | also consider the border of a dilated version of the left image above to 272 | compose our marker. The complete marker is shown below at right (some 273 | markers are too dark to be seen, but each white region in the left image 274 | is represented at the right image). 275 | 276 | enter image description here enter image description here 277 | 278 | This marker we have here makes a lot of sense. Each |colored water == 279 | one marker| will start to fill the region, and the watershed 280 | transformation will construct dams to impede that the different "colors" 281 | merge. If we do the transform, we get the image at left. Considering 282 | only the dams by composing them with the original image, we get the 283 | result at right. 284 | 285 | enter image description here enter image description here 286 | 287 | |import sys 288 | import cv2 289 | import numpy 290 | from scipy.ndimage import label 291 | 292 | def segment_on_dt(a, img): 293 | border = cv2.dilate(img, None, iterations=5) 294 | border = border - cv2.erode(border, None) 295 | 296 | dt = cv2.distanceTransform(img, 2, 3) 297 | dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(numpy.uint8) 298 | _, dt = cv2.threshold(dt, 180, 255, cv2.THRESH_BINARY) 299 | lbl, ncc = label(dt) 300 | lbl = lbl * (255/ncc) 301 | # Completing the markers now. 302 | lbl[border == 255] = 255 303 | 304 | lbl = lbl.astype(numpy.int32) 305 | cv2.watershed(a, lbl) 306 | 307 | lbl[lbl == -1] = 0 308 | lbl = lbl.astype(numpy.uint8) 309 | return 255 - lbl 310 | 311 | 312 | img = cv2.imread(sys.argv[1]) 313 | 314 | # Pre-processing. 315 | img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 316 | _, img_bin = cv2.threshold(img_gray, 0, 255, 317 | cv2.THRESH_OTSU) 318 | img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, 319 | numpy.ones((3, 3), dtype=int)) 320 | 321 | result = segment_on_dt(img, img_bin) 322 | cv2.imwrite(sys.argv[2], result) 323 | 324 | result[result != 255] = 0 325 | result = cv2.dilate(result, None) 326 | img[result == 255] = (0, 0, 255) 327 | cv2.imwrite(sys.argv[3], img) 328 | | 329 | 330 | share |improve this answer 331 | 332 | answered Jan 31 at 2:15 333 | 334 | mmgp 335 | 7,217425 336 | 337 | 338 | 339 | Thanks for the tutorial. Very well explained for us unfamiliar with the 340 | watershed algorithm. As you mentioned, the number of segmented objects 341 | will mostly be dependent on how many markers were found in the primary 342 | steps and here, it seems the distance transform followed by thresholding 343 | left some pills segmented as one. Could we have improved the results by 344 | changing the parameters of the thresholding? – Denis 345 | May 23 at 20:42 <#comment24079351_14617359> 346 | 347 | 348 | Your Answer 349 | 350 | * 351 | * 352 | * 353 | * 354 | * 355 | * 356 | * 357 | * 358 | * 359 | * 360 | * 361 | * 362 | * 363 | * 364 | * 365 | 366 | 367 | draft saved 368 | draft discarded 369 | 370 | 371 | Sign up or log in 372 | 373 | 374 | Sign up using Google 375 | 376 | Sign up using Facebook 377 | 378 | Sign up using Stack Exchange 379 | 380 | 381 | Post as a guest 382 | 383 | Name 384 | Email required, but not shown 385 | 386 | discard <#> 387 | 388 | By posting your answer, you agree to the privacy policy 389 | and terms of service 390 | . 391 | 392 | 393 | Not the answer you're looking for? Browse other questions tagged 394 | opencv image-processing 395 | computer-vision 396 | image-segmentation 397 | watershed 398 | or ask your own question 399 | . 400 | 401 | tagged 402 | 403 | opencv × 11661 404 | image-processing × 10367 405 | computer-vision × 2054 406 | image-segmentation × 191 407 | watershed × 21 408 | 409 | asked 410 | 411 | 412 | 413 | *1 year ago* 414 | 415 | viewed 416 | 417 | 418 | 419 | *7597 times* 420 | 421 | active 422 | 423 | 424 | 425 | *2 months ago * 426 | 427 | No problem. We won't show you that ad again. Why didn't you like it? 428 | 429 | * Uninteresting 430 | * Misleading 431 | * Offensive 432 | * Repetitive 433 | 434 | Oops! I didn't mean to do this. <#> 435 | 436 | 437 | Linked 438 | 439 | 1 440 | watershed segmentation opencv 441 | xcode 442 | 443 | 444 | 8 445 | python separate round 446 | particles by offsetting contours / shrinking polygones 447 | 448 | 449 | 1 450 | OpenCV image segmentation in 451 | C++ using Watershed 452 | 453 | 454 | 0 455 | How to segment an image 456 | efficiently? 457 | 458 | 459 | 0 460 | Watershed in OpenCV 461 | 462 | 463 | 464 | Related 465 | 466 | 17 467 | Implementing Watershed 468 | Segmentation in Java 469 | 470 | 471 | 4 472 | Watershed segmentation 473 | algorithm for segmenting occluded leaves in matlab 474 | 475 | 476 | 0 477 | How to perform Watershed 478 | segmentation and Blob analysis on a single image? 479 | 480 | 481 | 3 482 | How to segment objects based 483 | on color and size? 484 | 485 | 486 | 4 487 | How to get threshold value 488 | from histogram? 489 | 490 | 491 | 3 492 | How to use Kinect depth data 493 | in watershed image segmentation 494 | 495 | 496 | 1 497 | Image Segmentation for Color 498 | Analysis in OpenCV 499 | 500 | 501 | 0 502 | Using appropriate markers for 503 | watershed [opencv] 504 | 505 | 506 | 1 507 | OpenCV image segmentation in 508 | C++ using Watershed 509 | 510 | 511 | -1 512 | 3D Image segmentation using 513 | Otsu thresholding or Watershed Transform 514 | 515 | 516 | question feed 517 | about help badges blog 518 | chat 519 | data 520 | legal privacy policy 521 | jobs 522 | advertising info 523 | 524 | mobile *contact us * *feedback * 525 | Technology Life / Arts Culture / Recreation Science Other 526 | 527 | 1. Stack Overflow 528 | 2. Server Fault 529 | 3. Super User 530 | 4. Web Applications 531 | 5. Ask Ubuntu 532 | 6. Webmasters 533 | 7. Game Development 534 | 8. TeX - LaTeX 535 | 536 | 537 | 538 | 1. Programmers 539 | 2. Unix & Linux 540 | 3. Ask Different (Apple) 541 | 4. WordPress Answers 542 | 5. Geographic Information Systems 543 | 6. Electrical Engineering 544 | 7. Android Enthusiasts 545 | 8. IT Security 546 | 547 | 548 | 549 | 1. Database Administrators 550 | 2. Drupal Answers 551 | 3. SharePoint 552 | 4. User Experience 553 | 5. Mathematica 554 | 6. more (13) 555 | 556 | 557 | 558 | 1. Photography 559 | 2. Science Fiction & Fantasy 560 | 3. Seasoned Advice (cooking) 561 | 4. Home Improvement 562 | 5. more (13) 563 | 564 | 565 | 566 | 1. English Language & Usage 567 | 2. Skeptics 568 | 3. Mi Yodeya (Judaism) 569 | 4. Travel 570 | 5. Christianity 571 | 6. Arqade (gaming) 572 | 7. Bicycles 573 | 8. Role-playing Games 574 | 9. more (21) 575 | 576 | 577 | 578 | 1. Mathematics 579 | 2. Cross Validated (stats) 580 | 3. Theoretical Computer Science 581 | 4. Physics 582 | 5. MathOverflow 583 | 6. more (7) 584 | 585 | 586 | 587 | 1. Stack Apps 588 | 2. Meta Stack Overflow 589 | 3. Area 51 590 | 4. Stack Overflow Careers 591 | 592 | site design / logo © 2013 stack exchange inc; user contributions 593 | licensed under cc-wiki 594 | with attribution required 595 | 596 | 597 | rev 2013.7.17.854 598 | -------------------------------------------------------------------------------- /docs/work log/log.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/docs/work log/log.docx -------------------------------------------------------------------------------- /software/firmware/backprojection.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | 5 | #roi is the object or region of object we need to find 6 | roi = cv2.imread('hand.jpg') 7 | hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV) 8 | M = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] ) 9 | cap = cv2.VideoCapture(0) 10 | #target is the image we search in 11 | while( cap.isOpened() ) : 12 | ret,target = cap.read() 13 | hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV) 14 | 15 | # Find the histograms. I used calcHist. It can be done with np.histogram2d also 16 | 17 | I = cv2.calcHist([hsvt],[0, 1], None, [180, 256], [0, 180, 0, 256] ) 18 | R = M/(I+1) 19 | h,s,v = cv2.split(hsvt) 20 | B = R[h.ravel(),s.ravel()] 21 | B = np.minimum(B,1) 22 | B = B.reshape(hsvt.shape[:2]) 23 | disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) 24 | cv2.filter2D(B,-1,disc,B) 25 | B = np.uint8(B) 26 | cv2.normalize(B,B,0,255,cv2.NORM_MINMAX) 27 | ret,thresh = cv2.threshold(B,50,255,0) 28 | thresh = cv2.merge((thresh,thresh,thresh)) 29 | #res = cv2.bitwise_and(target,thresh) 30 | #res = np.vstack((target,thresh,res)) 31 | cv2.imshow('res.jpg',thresh) 32 | -------------------------------------------------------------------------------- /software/firmware/backsub.py: -------------------------------------------------------------------------------- 1 | 2 | import cv2 3 | import numpy as np 4 | 5 | def CannyThreshold(lowThreshold): 6 | detected_edges = cv2.GaussianBlur(gray,(3,3),0) 7 | detected_edges = cv2.Canny(detected_edges,lowThreshold,lowThreshold*ratio,apertureSize = kernel_size) 8 | dst = cv2.bitwise_and(img,img,mask = detected_edges) # just add some colours to edges from original image. 9 | cv2.imshow('canny demo',dst) 10 | 11 | lowThreshold = 0 12 | max_lowThreshold = 100 13 | ratio = 3 14 | kernel_size = 3 15 | 16 | img = cv2.imread('C:\MyPic.jpg') 17 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 18 | 19 | cv2.namedWindow('canny demo') 20 | 21 | cv2.createTrackbar('Min threshold','canny demo',lowThreshold, max_lowThreshold, CannyThreshold) 22 | 23 | CannyThreshold(0) # initialization 24 | if cv2.waitKey(0) == 27: 25 | cv2.destroyAllWindows() 26 | -------------------------------------------------------------------------------- /software/firmware/backsub2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | cap = cv2.VideoCapture(0) 5 | 6 | fgbg = cv2.createBackgroundSubtractorGMG() 7 | 8 | while(1): 9 | ret, frame = cap.read() 10 | 11 | fgmask = fgbg.apply(frame) 12 | 13 | cv2.imshow('frame',fgmask) 14 | k = cv2.waitKey(30) & 0xff 15 | if k == 27: 16 | break 17 | 18 | cap.release() 19 | cv2.destroyAllWindows() 20 | -------------------------------------------------------------------------------- /software/firmware/cam.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | cap = cv2.VideoCapture(0) 4 | while( cap.isOpened() ) : 5 | ret,img = cap.read() 6 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 7 | blur = cv2.GaussianBlur(gray,(5,5),0) 8 | ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 9 | 10 | contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 11 | drawing = np.zeros(img.shape,np.uint8) 12 | 13 | max_area=0 14 | 15 | for i in range(len(contours)): 16 | cnt=contours[i] 17 | area = cv2.contourArea(cnt) 18 | if(area>max_area): 19 | max_area=area 20 | ci=i 21 | cnt=contours[ci] 22 | hull = cv2.convexHull(cnt) 23 | moments = cv2.moments(cnt) 24 | if moments['m00']!=0: 25 | cx = int(moments['m10']/moments['m00']) # cx = M10/M00 26 | cy = int(moments['m01']/moments['m00']) # cy = M01/M00 27 | 28 | centr=(cx,cy) 29 | cv2.circle(img,centr,5,[0,0,255],2) 30 | cv2.drawContours(drawing,[cnt],0,(0,255,0),2) 31 | cv2.drawContours(drawing,[hull],0,(0,0,255),2) 32 | 33 | cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) 34 | hull = cv2.convexHull(cnt,returnPoints = False) 35 | 36 | if(1): 37 | defects = cv2.convexityDefects(cnt,hull) 38 | mind=0 39 | maxd=0 40 | for i in range(defects.shape[0]): 41 | s,e,f,d = defects[i,0] 42 | start = tuple(cnt[s][0]) 43 | end = tuple(cnt[e][0]) 44 | far = tuple(cnt[f][0]) 45 | dist = cv2.pointPolygonTest(cnt,centr,True) 46 | cv2.line(img,start,end,[0,255,0],2) 47 | 48 | cv2.circle(img,far,5,[0,0,255],-1) 49 | print(i) 50 | i=0 51 | cv2.imshow('output',drawing) 52 | cv2.imshow('input',img) 53 | 54 | k = cv2.waitKey(10) 55 | if k == 27: 56 | break 57 | -------------------------------------------------------------------------------- /software/firmware/cntrs.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | template = cv2.imread('hand.jpg') 4 | cap = cv2.VideoCapture(0) 5 | while( cap.isOpened() ) : 6 | ret,img = cap.read() 7 | fgmask=cv2.BackgroundSubtractor.apply(img) 8 | cv2.imshow('output',fgmask) 9 | if cv2.waitKey(0) == 27: 10 | cv2.destroyAllWindows() 11 | -------------------------------------------------------------------------------- /software/firmware/ex1.PY: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/software/firmware/ex1.PY -------------------------------------------------------------------------------- /software/firmware/ex3.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | img=cv2.imread('C:\Python27\hand1.jpg') 4 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 5 | blur = cv2.GaussianBlur(gray,(5,5),0) 6 | ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 7 | 8 | #thresh1 = cv2.adaptiveThreshold(thresh1,255,cv2.ADAPTIVE_THRESH_MEAN_C,\ 9 | #cv2.THRESH_BINARY,11,2) 10 | contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 11 | drawing = np.zeros(img.shape,np.uint8) 12 | 13 | 14 | #print(len(contours)) 15 | #cnt=contours[0] 16 | 17 | for cnt in contours: 18 | hull = cv2.convexHull(cnt) 19 | cv2.drawContours(drawing,[cnt],0,(0,255,0),2) # draw contours in green color 20 | cv2.drawContours(drawing,[hull],0,(0,0,255),2) # draw contours in red color 21 | 22 | cv2.imshow('output',drawing) 23 | cv2.imshow('input',img) 24 | -------------------------------------------------------------------------------- /software/firmware/ex4.py: -------------------------------------------------------------------------------- 1 | import cv 2 | import cv2 3 | import numpy as np 4 | import copy 5 | from optparse import OptionParser 6 | import haar 7 | import os 8 | 9 | 10 | def loadRawSample(im): 11 | im = np.asarray(im) 12 | im = 255-(im/np.max(im)*255).astype('uint8') 13 | return im 14 | 15 | def loadSample(im): 16 | 17 | img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY).astype('uint8') 18 | return img 19 | 20 | def extractBinary(img): 21 | element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)) 22 | #img = cv2.equalizeHist(img) 23 | 24 | # remove artefacts and noise 25 | img = cv2.erode(img, element) 26 | #img = cv2.dilate(img, element) 27 | 28 | # renormalize 29 | #img = ((img/np.max(img).astype('float'))*255).astype('uint8') 30 | 31 | thresh = findThresh(smoothHist(img)) 32 | 33 | if thresh is not None: 34 | _, imb = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY) 35 | else: 36 | _, imb = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 37 | return imb 38 | 39 | def drawPolygon(im, points, color, thickness=1): 40 | 41 | first = None 42 | last = None 43 | prev = None 44 | 45 | for p in points: 46 | if first == None: 47 | first = p 48 | else: 49 | cv2.line(im, prev, p, color, thickness) 50 | 51 | prev = p 52 | last = p 53 | 54 | cv2.line(im, last, first, color, thickness) 55 | 56 | def drawPoints(im, points, color, radius = 2): 57 | for p in points: 58 | cv2.circle(im, p, radius, color, -1) 59 | 60 | def drawOrientation(im, ellipse, color, thickness): 61 | e = ellipse 62 | cv2.ellipse(im, (e[0], (0, e[1][1]), e[2]), color, thickness) 63 | 64 | def bestContourAsInt(contours, minArea = -1): 65 | maxArea = -1 66 | contour = None 67 | 68 | for cnt in contours: 69 | cnt_int = cnt.astype('int') 70 | area = cv2.contourArea(cnt_int) 71 | if(area > maxArea and area > minArea): 72 | contour = cnt_int 73 | maxArea = area 74 | 75 | return contour 76 | 77 | def refineHullDefects(hull, defects, contour, thresh): 78 | hull_refined = list(hull) 79 | defects_points = list() 80 | 81 | for d in defects: 82 | index = hull.index(tuple(contour[d[0][0]][0])) 83 | value = tuple(contour[d[0][2]][0]) 84 | 85 | if(d[0][3] > thresh): 86 | hull_refined.insert(index, value) 87 | defects_points.append(value) 88 | 89 | return hull_refined, defects_points 90 | 91 | def drawResult(im, features): 92 | imc = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB) 93 | 94 | drawPolygon(imc, features.get('hull'), (0, 255, 255), 2) 95 | drawPolygon(imc, features.get('shape'), (0, 255, 0), 2) 96 | drawPoints(imc, features.get('defects'), (255, 0, 0), 4) 97 | drawPoints(imc, [features.get('centroid')], (255, 0, 255), 6) 98 | drawOrientation(imc, features.get('boundingellipse'), (0, 0, 255), 1) 99 | 100 | return imc 101 | 102 | def packFeatures(contour, hull, defects, shape, rect): 103 | ellipse = cv2.fitEllipse(contour) 104 | 105 | #(x,y,w,h) = rect if rect is not None else (0,0,shape[1],shape[0]) 106 | 107 | M = cv2.moments(contour) 108 | centroid_x = int(M['m10']/M['m00']) 109 | centroid_y = int(M['m01']/M['m00']) 110 | center = (centroid_x, centroid_y) 111 | 112 | 113 | return {'contour': contour, 'hull': hull, 'defects_nb': len(defects), 'defects': defects, 'shape': shape, 'boundingellipse': ellipse, 'angle': ellipse[2], 'centroid': center, 'rect': rect} 114 | 115 | def findROI(img, haarc): 116 | hands = haar.haarDetectHands(haarc, img) 117 | 118 | maxi = 0 119 | rect = None 120 | for (x,y,w,h) in hands: 121 | if(w*h > maxi): 122 | maxi = w*h 123 | rect = (x,y,w,h) 124 | 125 | if rect is not None: 126 | (x,y,w,h) = rect 127 | imr = img[y:y+h, x:x+w] 128 | else: 129 | imr = img 130 | 131 | cv2.namedWindow("ROI") 132 | cv2.imshow("ROI", imr) 133 | 134 | return imr, rect 135 | 136 | cap = cv2.VideoCapture(0) 137 | while( cap.isOpened() ) : 138 | ret,img = cap.read() 139 | return process(loadSample(im), haarc) 140 | 141 | def process(im, haarc=None,silent=False): 142 | 143 | if haarc is None: 144 | haarc = haar.haarInit(im + '/../haar/cascade.xml') 145 | 146 | img_ref = im 147 | img, rect = findROI(img_ref, haarc) 148 | imb = extractBinary(img) 149 | imb_contours = imb.copy() 150 | vect = None 151 | img_tr = np.copy(img_ref) 152 | 153 | if not silent: 154 | debugThresh(img) 155 | 156 | if rect is None: 157 | img_ref = cv2.cvtColor(img_ref,cv2.COLOR_GRAY2BGR) 158 | return img_ref, img_ref, None 159 | 160 | contours, _ = cv2.findContours(imb_contours, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 161 | 162 | if contours: 163 | contour = bestContourAsInt(contours) 164 | hull = cv2.convexHull(contour, returnPoints=False).astype('int') 165 | defects = cv2.convexityDefects(contour, hull) 166 | 167 | hull_points = [tuple(p[0]) for p in cv2.convexHull(contour, returnPoints=True)] 168 | contour_points = [tuple(p[0]) for p in contour] 169 | 170 | hull_refined, defects_points = refineHullDefects(hull_points, defects, contour, 2500) 171 | 172 | features = packFeatures(contour, hull_points, defects_points, hull_refined, rect) 173 | 174 | img = drawResult(img, features) 175 | 176 | img_ref = cv2.cvtColor(img_ref,cv2.COLOR_GRAY2BGR) 177 | 178 | (x,y,w,h) = rect 179 | img_ref[y:y+h, x:x+w] = img 180 | cv2.rectangle(img_ref, (x,y), (x+w,y+h), (255,0,0)) 181 | img_tr[y:y+h, x:x+w] = imb 182 | 183 | 184 | else: 185 | img_ref = cv2.cvtColor(img_ref,cv2.COLOR_GRAY2BGR) 186 | img_tr = imb 187 | 188 | densityVect = zoning(imb) 189 | img_tr = cv2.cvtColor(img_tr,cv2.COLOR_GRAY2BGR) 190 | 191 | return img_ref, img_tr,densityVect 192 | 193 | def smoothHist(im): 194 | hist_item = cv2.calcHist([im],[0],None,[256],[0,255]) 195 | cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX) 196 | hist=np.int32(np.around(hist_item)) 197 | 198 | data = [int(x[0]) for x in hist] 199 | b = 1/5. 200 | data = np.convolve([b,b,b,b,b], data, 'same').astype('uint8') 201 | data = np.convolve([b,b,b,b,b], data, 'same').astype('uint8') 202 | 203 | return data 204 | 205 | def findThresh(a): 206 | peaks = [] 207 | last = -999 208 | default = None 209 | 210 | if len(a) < 3: 211 | return default 212 | 213 | for i in range(3, len(a)-3): 214 | if(a[i] > a[i-3] and a[i] > a[i+3] and i-last > 4 ): 215 | last = i 216 | peaks.append(i) 217 | 218 | if len(peaks) > 1: 219 | return int(peaks[-2] + (peaks[-1] - peaks[-2])/2) 220 | else: 221 | return default 222 | 223 | def debugThresh(im): 224 | cv2.namedWindow("dt") 225 | h = np.zeros((300,256,3)) 226 | bins = np.arange(256).reshape(256,1) 227 | color = (255,0,0) 228 | 229 | data = smoothHist(im) 230 | 231 | pts = np.column_stack((bins,data)) 232 | cv2.polylines(h,[pts],False,color) 233 | h=np.flipud(h).astype('uint8') 234 | 235 | thresh = findThresh(data) 236 | 237 | if thresh is not None: 238 | #print thresh, data[thresh] 239 | drawPoints(h, [(int(thresh), int(300-data[thresh]))], (0,0,255)) 240 | 241 | cv2.imshow("dt", h) 242 | 243 | def zoning(imb): 244 | 245 | cut = 7 246 | imHand = binaryCrop(imb) 247 | 248 | imWidth = imHand.shape[1] 249 | imHeight = imHand.shape[0] 250 | 251 | stepH = imHeight/cut -1 252 | stepW = imWidth/cut -1 253 | 254 | if(stepH < 1): 255 | stepH = 1 256 | 257 | if (stepW < 1): 258 | stepW = 1 259 | 260 | if imWidth <= cut or imHeight <= cut: 261 | return None 262 | 263 | density = [] 264 | 265 | x = 1 266 | 267 | for i in range(0,imWidth-stepW,stepW): 268 | 269 | if x>cut: 270 | continue 271 | 272 | x = x+1 273 | y = 1 274 | 275 | for j in range(0,imHeight-stepH,stepH): 276 | 277 | if y>cut : 278 | continue 279 | 280 | y = y+1 281 | 282 | zone = imHand[j:j+stepH,i:i+stepW] 283 | zoneSize = zone.shape[0]*zone.shape[1] 284 | 285 | density.append(float(np.count_nonzero(zone))/float(zoneSize)) 286 | 287 | 288 | if(len(density)!= cut*cut): 289 | print "Cut is too large for ROI" 290 | return None 291 | 292 | return density 293 | 294 | def binaryCrop(imb): 295 | 296 | vertInd = np.where(np.argmax(imb,axis = 0)>0) 297 | y = vertInd[0][0] if vertInd[0].size else 0 298 | y2 = vertInd[0][-1] if vertInd[0].size else imb.shape[0] 299 | 300 | horInd = np.where(np.argmax(imb,axis = 1)>0) 301 | x = horInd[0][0] if horInd[0].size else 0 302 | x2 = horInd[0][-1] if horInd[0].size else imb.shape[1] 303 | 304 | crop = imb[x:x2,y:y2] 305 | return crop 306 | 307 | 308 | 309 | if __name__ == '__main__' : 310 | cv2.namedWindow("Debug") 311 | cv2.namedWindow("Result") 312 | 313 | img_result, img_debug, density = process(loadSample(im)) 314 | 315 | cv2.imshow("Debug", img_debug) 316 | cv2.imshow("Result", img_result) 317 | 318 | debugThresh(loadSample(im)) 319 | 320 | cv2.waitKey(0) 321 | -------------------------------------------------------------------------------- /software/firmware/exe file/cam.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/software/firmware/exe file/cam.exe -------------------------------------------------------------------------------- /software/firmware/haar.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from optparse import OptionParser 3 | 4 | def haarInit(cascadeXml): 5 | return cv2.CascadeClassifier(cascadeXml) 6 | 7 | def haarDetectHands(classifier, im): 8 | return classifier.detectMultiScale(im) 9 | 10 | if __name__ == '__main__' : 11 | parser = OptionParser() 12 | parser.add_option("-c", "--cascade-xml", dest="cascadeXml", help="Haar cascade xml file", metavar="DIR") 13 | parser.add_option("-i", "--image", dest="image", help="Image to work on") 14 | (options, args) = parser.parse_args() 15 | 16 | im = cv2.imread(options.image) 17 | hands = haarDetectHands(im, haarInit(options.cascadeXml)) 18 | 19 | for (x,y,w,h) in hands: 20 | cv2.rectangle(im, (x,y), (x+w,y+h), 255) 21 | 22 | cv2.namedWindow("Hand Detection") 23 | cv2.imshow("Hand Detection", im) 24 | 25 | cv2.waitKey(0) 26 | 27 | -------------------------------------------------------------------------------- /software/firmware/hand_gesture_with_serial.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import serial 4 | ser = serial.Serial('com17', 9600) 5 | cap = cv2.VideoCapture(0) 6 | while( cap.isOpened() ) : 7 | ret,img = cap.read() 8 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 9 | blur = cv2.GaussianBlur(gray,(5,5),0) 10 | ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 11 | 12 | #thresh1 = cv2.adaptiveThreshold(thresh1,255,cv2.ADAPTIVE_THRESH_MEAN_C,\ 13 | #cv2.THRESH_BINARY,11,2) 14 | contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 15 | drawing = np.zeros(img.shape,np.uint8) 16 | 17 | 18 | max_area=0 19 | #print(len(contours)) 20 | for i in range(len(contours)): 21 | cnt=contours[i] 22 | area = cv2.contourArea(cnt) 23 | if(area>max_area): 24 | max_area=area 25 | ci=i 26 | cnt=contours[ci] 27 | hull = cv2.convexHull(cnt) 28 | moments = cv2.moments(cnt) 29 | if moments['m00']!=0: 30 | cx = int(moments['m10']/moments['m00']) # cx = M10/M00 31 | cy = int(moments['m01']/moments['m00']) # cy = M01/M00 32 | #moment_area = moments['m00'] # Contour area from moment 33 | # contour_area = cv2.contourArea(cnt) 34 | # rect = cv2.minAreaRect(cnt) 35 | #box = cv2.cv.BoxPoints(rect) 36 | # box = np.int0(box) 37 | centr=(cx,cy) 38 | cv2.circle(img,centr,5,[0,0,255],2) 39 | cv2.drawContours(drawing,[cnt],0,(0,255,0),2) # draw contours in green color 40 | cv2.drawContours(drawing,[hull],0,(0,0,255),2) # draw contours in red color 41 | #cv2.drawContours(drawing,[box],0,(0,0,255),2) 42 | cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) 43 | hull = cv2.convexHull(cnt,returnPoints = False) 44 | 45 | if(1): 46 | defects = cv2.convexityDefects(cnt,hull) 47 | 48 | mind=0 49 | maxd=0 50 | for i in range(defects.shape[0]): 51 | s,e,f,d = defects[i,0] 52 | start = tuple(cnt[s][0]) 53 | end = tuple(cnt[e][0]) 54 | far = tuple(cnt[f][0]) 55 | dist = cv2.pointPolygonTest(cnt,centr,True) 56 | cv2.line(img,start,end,[0,255,0],2) 57 | cv2.circle(img,far,5,[0,0,255],-1) 58 | print(i) 59 | ser.write(i) 60 | i=0 61 | cv2.imshow('output',drawing) 62 | cv2.imshow('input',img) 63 | 64 | k = cv2.waitKey(10) 65 | if k == 27: 66 | break 67 | -------------------------------------------------------------------------------- /software/firmware/img.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | ramp_frames = 100 3 | camera = cv2.VideoCapture(0) 4 | def get_image(): 5 | retval, im = camera.read() 6 | return im 7 | for i in xrange(ramp_frames): 8 | temp = get_image() 9 | camera_capture = get_image() 10 | file = "C:\test_image.png" 11 | print("Taking image...") 12 | cv2.imwrite(file, camera_capture) 13 | cv2.imshow("lll",temp) 14 | -------------------------------------------------------------------------------- /software/firmware/ip1.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | cv2.namedWindow("lll") 3 | cap = cv2.VideoCapture(0) 4 | x=100 5 | if(cap.isOpened()): 6 | ret,img2 = cap.read() 7 | ret,img2 = cap.read() 8 | ret,img2 = cap.read() 9 | ret,img2 = cap.read() 10 | ret,img2 = cap.read() 11 | img2=img2[:,:,1] 12 | #img2[img2[:,:]>x]=255 13 | #img2[img2[:,:]x]=255 22 | #img[img[:,:]128]=0 33 | #image[image[:,:]!=0]=0 34 | 35 | cv2.imshow("lll",img) 36 | k = cv2.waitKey(10) 37 | if k == 27: 38 | break 39 | -------------------------------------------------------------------------------- /software/firmware/ip2.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | cap = cv2.VideoCapture(0) 4 | while( cap.isOpened() ) : 5 | ret,roi = cv2.imread('C:\Users\KAMAL\Pictures\rose.jpg') 6 | hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV) 7 | 8 | target = cap.read() 9 | hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV) 10 | 11 | # calculating object histogram 12 | roihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] ) 13 | 14 | # normalize histogram and apply backprojection 15 | cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX) 16 | dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1) 17 | 18 | # Now convolute with circular disc 19 | disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) 20 | cv2.filter2D(dst,-1,disc,dst) 21 | 22 | # threshold and binary AND 23 | ret,thresh = cv2.threshold(dst,50,255,0) 24 | thresh = cv2.merge((thresh,thresh,thresh)) 25 | res = cv2.bitwise_and(target,thresh) 26 | 27 | res = np.vstack((target,thresh,res)) 28 | cv2.imshow("lll",res) 29 | -------------------------------------------------------------------------------- /software/firmware/yellowtrack.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | cap = cv2.VideoCapture(0) 5 | 6 | while(1): 7 | 8 | # Take each frame 9 | _, frame = cap.read() 10 | 11 | # Convert BGR to HSV 12 | hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 13 | 14 | # define range of blue color in HSV 15 | lower_blue = np.array([50,100,100]) 16 | upper_blue = np.array([70,255,255]) 17 | 18 | # Threshold the HSV image to get only blue colors 19 | mask = cv2.inRange(hsv, lower_blue, upper_blue) 20 | 21 | # Bitwise-AND mask and original image 22 | res = cv2.bitwise_and(frame,frame, mask= mask) 23 | 24 | cv2.imshow('frame',frame) 25 | cv2.imshow('mask',mask) 26 | cv2.imshow('res',res) 27 | k = cv2.waitKey(5) & 0xFF 28 | if k == 27: 29 | break 30 | 31 | cv2.destroyAllWindows() 32 | # to find thereshold for green color 33 | #green = np.uint8([[[0,255,0 ]]]) 34 | # hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV) 35 | #print hsv_green 36 | -------------------------------------------------------------------------------- /software/pyserial-2.6.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/software/pyserial-2.6.tar.gz -------------------------------------------------------------------------------- /software/used sw/matplotlib-1.2.1.win32-py2.7.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/software/used sw/matplotlib-1.2.1.win32-py2.7.exe -------------------------------------------------------------------------------- /software/used sw/numpy-1.6.1-win32-superpack-python2.7.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/software/used sw/numpy-1.6.1-win32-superpack-python2.7.exe -------------------------------------------------------------------------------- /software/used sw/python-2.7.3.msi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iftheqhar/opencv2_python/07a13bca99e04bd0ad77f6375560016ca88d880c/software/used sw/python-2.7.3.msi --------------------------------------------------------------------------------