├── image.png ├── image1.jpg ├── image1_1.jpg ├── a135a347-2d05-4257-9007-2787b9610f62.jpg ├── README.md └── sizeandcolor_v1.py /image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/A7med01/Documentation/master/image.png -------------------------------------------------------------------------------- /image1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/A7med01/Documentation/master/image1.jpg -------------------------------------------------------------------------------- /image1_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/A7med01/Documentation/master/image1_1.jpg -------------------------------------------------------------------------------- /a135a347-2d05-4257-9007-2787b9610f62.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/A7med01/Documentation/master/a135a347-2d05-4257-9007-2787b9610f62.jpg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Size and color sorting - version one 2 | 3 | [image1]: ./image1_1.jpg "first" 4 | [image2]: ./image.png "second" 5 | 6 | ![alt text][image1] 7 | 8 | This code give the color, diameter and shape of an fruit : 9 | - we need the background to be white so we used a white box as shown above 10 | - industrial camera on the top of the box and light source as shown above 11 | - first we will take picture using the camera then detect the fruit shape usning classical computer vision techniques , then using this shape we can get the diameter and the color of this area as shown below 12 | 13 | ![alt text][image2] 14 | 15 | 16 | Tools : 17 | - python 3 18 | - OpenCV 4 19 | 20 | Detaied discription of the code in the note book 21 | Notes : 22 | - Simple and fast cod (speed is about 50 image per second) 23 | - Work bad if we change the backgroun color or change the light source position , it will work as long as the code is customize with the background and the light source and itensity and this is the case in our application 24 | 25 | # This notebook has code with discription:[Here](https://github.com/A7med01/Documentation/blob/master/size_color_fruits_v1.ipynb) 26 | 27 | # This python file to run locally :[Here](https://github.com/A7med01/Documentation/blob/master/sizeandcolor_v1.py) 28 | -------------------------------------------------------------------------------- /sizeandcolor_v1.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import cv2 4 | import pandas as pd 5 | import numpy as np 6 | import imutils 7 | from scipy.spatial import distance as dist 8 | from imutils import perspective 9 | from imutils import contours 10 | import time 11 | import matplotlib.image as mpimg 12 | from PIL import Image 13 | import matplotlib.pyplot as plt 14 | 15 | 16 | def safe_div(x,y): # so we don't crash so often 17 | if y==0: return 0 18 | return x/y 19 | 20 | def nothing(x): # for trackbar 21 | pass 22 | 23 | 24 | def rescale_frame(frame, percent=100): # make the video windows a bit smaller 25 | width = int(frame.shape[1] * percent/ 100) 26 | height = int(frame.shape[0] * percent/ 100) 27 | dim = (width, height) 28 | return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) 29 | 30 | #---------------------------------------------------------------------- 31 | #---------------------------------------------------------------------- 32 | 33 | def midpoint(ptA, ptB): 34 | return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5) 35 | 36 | #---------------------------------------------------------------------- 37 | #---------------------------------------------------------------------- 38 | 39 | def thesholding(image , thresh = 75 ,thresh2 = 200 ): 40 | 41 | # to detect just the fruit (removing background and the cub) 42 | 43 | # to detect the fruit wich is in defferent color with the white background 44 | # If the pixel value is smaller than the threshold, it is set to 0, otherwise it is set to a maximum value 45 | 46 | ret,thresholded = cv2.threshold(image,thresh,255,cv2.THRESH_BINARY_INV) 47 | 48 | # to remove the black cub 49 | #gray = cv2.cvtColor(thresholded, cv2.COLOR_BGR2GRAY) 50 | #ret, thresh = cv2.threshold(gray, thresh2, 255, cv2.THRESH_BINARY) 51 | #thresholded[thresh == 255] = 0 52 | 53 | return thresholded 54 | 55 | #--------------------------------------------------------------------- 56 | #--------------------------------------------------------------------- 57 | 58 | def getsturct(thresholded , kern= 5 , itera= 1): 59 | 60 | #remove all small colored pixels and detect shape 61 | 62 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) 63 | #try iterations = 2 64 | thresh_getstruct = cv2.erode(thresholded, kernel, iterations = 1) 65 | 66 | kernel = np.ones((kern,kern),np.uint8) # square image kernel used for erosion 67 | 68 | dilation = cv2.dilate(thresh_getstruct, kernel, iterations=itera) 69 | erosion = cv2.erode(dilation,kernel,iterations = itera) # refines all edges in the binary image 70 | 71 | #Opening is just another name of erosion followed by dilation. It is useful in removing noise 72 | opening = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, kernel) 73 | 74 | #Closing is reverse of Opening, Dilation followed by Erosion. It is useful in closing small holes inside the foreground objects 75 | closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) 76 | 77 | closing[closing > 0] = 255 78 | 79 | 80 | return closing 81 | 82 | #------------------------------------------------------------ 83 | #------------------------------------------------------------ 84 | 85 | def toblackandwhite(ttt): 86 | 87 | ttt = cv2.bitwise_not(ttt) 88 | ccc = ttt 89 | image2 = np.zeros(frame_resize.shape, np.uint8) 90 | ccc = cv2.cvtColor(ccc, cv2.COLOR_BGR2GRAY) 91 | image2[ccc < 255] =255 92 | 93 | return image2 94 | 95 | 96 | 97 | 98 | #------------------------------------------------------------ 99 | #------------------------------------------------------------ 100 | 101 | def findbiggestcontour(closing): 102 | closing = cv2.cvtColor(closing, cv2.COLOR_BGR2GRAY) 103 | # find contours with simple approximation cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE 104 | contours,hierarchy = cv2.findContours(closing,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) 105 | #print(contours) 106 | closing = cv2.cvtColor(closing,cv2.COLOR_GRAY2RGB) 107 | 108 | #focus on only the largest outline by area 109 | areas = [] #list to hold all areas 110 | 111 | for contour in contours: 112 | ar = cv2.contourArea(contour) 113 | areas.append(ar) 114 | 115 | if areas : 116 | max_area = max(areas) 117 | max_area_index = areas.index(max_area) # index of the list element with largest area 118 | cnt = contours[max_area_index - 1] # largest area contour is usually the viewing window itself, why? 119 | return cnt , areas , contours 120 | else : 121 | return 0 , areas , contours 122 | 123 | 124 | 125 | 126 | def find_box_points(orig , cnt): 127 | 128 | # compute the rotated bounding box of the contour 129 | box = cv2.minAreaRect(cnt) #Finds a rotated rectangle of the minimum area enclosing the input 2D point set. 130 | box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box) 131 | box = np.array(box, dtype="int") 132 | 133 | # order the points in the contour such that they appear 134 | # in top-left, top-right, bottom-right, and bottom-left 135 | # order, then draw the outline of the rotated bounding 136 | # box 137 | box = perspective.order_points(box) 138 | # loop over the original points and draw box points 139 | 140 | #for (x, y) in box: 141 | #cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1) 142 | 143 | return box , orig 144 | 145 | def Diamerterandcenter(box , cnt): 146 | 147 | # unpack the ordered bounding box, then compute the midpoint 148 | # between the top-left and top-right coordinates, followed by 149 | # the midpoint between bottom-left and bottom-right coordinates 150 | (tl, tr, br, bl) = box 151 | #print(tl) 152 | (tltrX, tltrY) = midpoint(tl, tr) 153 | (blbrX, blbrY) = midpoint(bl, br) 154 | 155 | # compute the midpoint between the top-left and bottom-left points, 156 | # followed by the midpoint between the top-righ and bottom-right 157 | (tlblX, tlblY) = midpoint(tl, bl) 158 | (trbrX, trbrY) = midpoint(tr, br) 159 | 160 | # compute the Euclidean distance between the midpoints 161 | dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY)) 162 | dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY)) 163 | dC = dist.euclidean((tl[0], tl[1]), (br[0], br[1])) 164 | dD = dist.euclidean((tr[0], tr[1]), (bl[0], bl[1])) 165 | 166 | # compute the size of the object 167 | pixelsPerMetric = 3 # more to do here to get actual measurements that have meaning in the real world 168 | dimA = dA / pixelsPerMetric 169 | dimB = dB / pixelsPerMetric 170 | dimC = dC / pixelsPerMetric 171 | dimD = dD / pixelsPerMetric 172 | 173 | 174 | # compute the center of the contour 175 | M = cv2.moments(cnt) 176 | cX = int(safe_div(M["m10"],M["m00"])) 177 | cY = int(safe_div(M["m01"],M["m00"])) 178 | 179 | return dimA , dimB , cX , cY , tltrX, tltrY , blbrX, blbrY , tlblX, tlblY , trbrX, trbrY , tl, tr, br, bl ,dimC ,dimD 180 | 181 | 182 | def drawings(closing ,orig , ctt , cX, cY ,mean , contours , box , tltrX, tltrY , blbrX, blbrY , tlblX, tlblY , trbrX, trbrY , dimA , dimB , tl, tr, br, bl,dimC ,dimD): 183 | 184 | cv2.rectangle(orig,(500,350),(600,450),(int(mean[0]), int(mean[1]), int(mean[2])),-1) 185 | cv2.drawContours(closing, contours, -1, (128,255,0), 1) 186 | cv2.drawContours(closing, [ctt], 0, (0,0,255), 1) 187 | cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 1) 188 | 189 | #draw the midpoints on the image 190 | cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1) 191 | cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1) 192 | cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1) 193 | cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1) 194 | 195 | #draw lines between the midpoints 196 | cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),(255, 0, 255), 1) 197 | cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),(255, 0, 255), 1) 198 | 199 | cv2.line(orig, (int(tl[0]), int(tl[1])), (int(br[0]), int(br[1])),(255, 0, 255), 1) 200 | cv2.line(orig, (int(bl[0]), int(bl[1])), (int(tr[0]), int(tr[1])),(255, 0, 255), 1) 201 | 202 | 203 | cv2.drawContours(orig, [ctt], 0, (0,0,255), 1) 204 | 205 | #draw the object sizes on the image 206 | cv2.putText(orig, "{:.1f}mm".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2) 207 | cv2.putText(orig, "{:.1f}mm".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2) 208 | cv2.putText(orig, "{:.1f}mm".format(dimC), (int(tl[0] - 15), int(tl[1] - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2) 209 | cv2.putText(orig, "{:.1f}mm".format(dimD), (int(bl[0] + 10), int(bl[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2) 210 | 211 | 212 | # draw the contour and center of the shape on the image 213 | cv2.circle(orig, (cX, cY), 5, (255, 255, 255), -1) 214 | cv2.putText(orig, "center", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) 215 | return closing ,orig 216 | 217 | 218 | # using cam built-in to computer 219 | videocapture=cv2.VideoCapture(1) 220 | 221 | 222 | 223 | windowName="Webcam Live video feed" 224 | 225 | cv2.namedWindow(windowName) 226 | 227 | cv2.createTrackbar("threshold", windowName, 75, 255, nothing) 228 | cv2.createTrackbar("kernel", windowName, 5, 30, nothing) 229 | cv2.createTrackbar("iterations", windowName, 1, 10, nothing) 230 | 231 | showLive=True 232 | while(showLive): 233 | t1 = time.time() 234 | ret, frame=videocapture.read() 235 | 236 | 237 | frame_resize = rescale_frame(frame) 238 | 239 | if not ret: 240 | print("cannot capture the frame") 241 | exit() 242 | 243 | thresh= cv2.getTrackbarPos("threshold", windowName) 244 | 245 | kern=cv2.getTrackbarPos("kernel", windowName) 246 | 247 | itera=cv2.getTrackbarPos("iterations", windowName) 248 | 249 | 250 | thresholded = thesholding(frame_resize , thresh) 251 | 252 | closing = getsturct(thresholded,kern, itera) 253 | blackandwhite = toblackandwhite(closing) 254 | ctt , areas, contours = findbiggestcontour(blackandwhite) 255 | 256 | #print(ctt) 257 | #print(areas) 258 | orig = frame_resize.copy() 259 | 260 | if areas : 261 | 262 | box , orig = find_box_points(orig , ctt) 263 | dimA , dimB , cX , cY , tltrX, tltrY , blbrX, blbrY , tlblX, tlblY , trbrX, trbrY , tl, tr, br, bl ,dimC , dimD = Diamerterandcenter(box, ctt) 264 | #print('dimA = ' , dimA,'dimB = ' , dimB,'cX = ' , cX,'cY = ' , cY) 265 | print('dimA = ' , int(dimA),'dimB = ' , int(dimB)) 266 | 267 | if (dimA < 140 and dimA > 40) or (dimB < 140 and dimB > 40) : 268 | 269 | mask = np.zeros(frame_resize.shape[:2], np.uint8) 270 | 271 | cv2.drawContours(mask, ctt, -1, 255, -1) 272 | cv2.fillPoly(mask, pts =[ctt], color=(255,0,0)) 273 | 274 | mean = cv2.mean(frame_resize, mask=mask) 275 | #print('mean is' , mean) 276 | 277 | origeee = frame_resize.copy() 278 | 279 | origeee[mask == 0] = 255 280 | 281 | closing ,orig = drawings(closing ,orig , ctt ,cX, cY , mean ,contours,box , tltrX, tltrY , blbrX, blbrY , tlblX, tlblY , trbrX, trbrY,dimA , dimB, tl, tr, br, bl,dimC , dimD) 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | t2 = time.time() 291 | t = t2 - t1 292 | print('time = ', t) 293 | cv2.imshow(windowName, orig) 294 | cv2.imshow('--', blackandwhite) 295 | #cv2.imshow('mask', mask) 296 | 297 | if cv2.waitKey(30)>=0: 298 | showLive=False 299 | else : 300 | cv2.imshow(windowName, frame) 301 | cv2.imshow('--', blackandwhite) 302 | if cv2.waitKey(30)>=0: 303 | showLive=False 304 | 305 | 306 | videocapture.release() 307 | cv2.destroyAllWindows() 308 | 309 | --------------------------------------------------------------------------------