├── README.md ├── haarcascades ├── haarcascade_eye.xml ├── haarcascade_eye_tree_eyeglasses.xml ├── haarcascade_frontalface_alt.xml ├── haarcascade_frontalface_alt2.xml ├── haarcascade_frontalface_alt_tree.xml ├── haarcascade_frontalface_default.xml ├── haarcascade_fullbody.xml ├── haarcascade_lefteye_2splits.xml ├── haarcascade_lowerbody.xml ├── haarcascade_mcs_eyepair_big.xml ├── haarcascade_mcs_eyepair_small.xml ├── haarcascade_mcs_leftear.xml ├── haarcascade_mcs_lefteye.xml ├── haarcascade_mcs_mouth.xml ├── haarcascade_mcs_nose.xml ├── haarcascade_mcs_rightear.xml ├── haarcascade_mcs_righteye.xml ├── haarcascade_mcs_upperbody.xml ├── haarcascade_profileface.xml ├── haarcascade_righteye_2splits.xml └── haarcascade_upperbody.xml ├── image ├── 1b-Hello_World.py ├── 1c-Resize.py ├── 1d-transformation.py ├── 1e-Split.py ├── 2a-Filters.py ├── 2b-Slider.py ├── 2c-Operations.py ├── 2d-Arithmetics.py ├── 3a-Scan.py ├── 3b-Noise.py ├── 3c-Pattern.py ├── 4a-histogram.py ├── 4b-backprojecting.py ├── 5a-laplace.py ├── 5b-sobel.py ├── 5c-morph.py ├── 5d-canny.py ├── 5e-contours.py ├── 5f-harris.py ├── 6a-good_features.py ├── 6b-Haar_Face_dectection.py ├── 8b-grabcut.py └── 8meanshift.py ├── img ├── alkaline.jpg ├── build.png ├── fruits.jpg ├── lena.jpg └── road.png ├── officialsamples ├── MinimumArea.py ├── OpticalFlowFarneback.py ├── OpticalFlowPyrLK.py ├── camshift.py ├── motempl.py └── watershed.py └── video ├── 1-Readvideo.py ├── 2-WriteVideo.py ├── 3-Processvideo_Canny.py ├── 4-Haar_webcam.py ├── 5-Trackingvideo-easy.py ├── 6-Trackingvideo-harder.py ├── 7-raw_compare.py ├── 8-raw_compare2.py ├── 9-Background.py └── getFPSWebcam.py /README.md: -------------------------------------------------------------------------------- 1 | OpenCV-tutorials 2 | ================ 3 | 4 | This project contains the hole source code of my OpenCV tutorial available at http://www.robindavid.fr/opencv-tutorial/ 5 | 6 | -------------------------------------------------------------------------------- /haarcascades/haarcascade_mcs_leftear.xml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobinDavid/OpenCV-tutorials/031c92418235e12cb9ce8f8868e38025a7407e5c/haarcascades/haarcascade_mcs_leftear.xml -------------------------------------------------------------------------------- /haarcascades/haarcascade_mcs_rightear.xml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobinDavid/OpenCV-tutorials/031c92418235e12cb9ce8f8868e38025a7407e5c/haarcascades/haarcascade_mcs_rightear.xml -------------------------------------------------------------------------------- /image/1b-Hello_World.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | image=cv.LoadImage('../img/lena.jpg', cv.CV_LOAD_IMAGE_COLOR) #Load the image 4 | 5 | font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8) #Creates a font 6 | 7 | y = image.height / 2 # y position of the text 8 | x = image.width / 4 # x position of the text 9 | 10 | cv.PutText(image,"Hello World !", (x,y),font, cv.RGB(255, 255, 255)) #Draw the text 11 | 12 | cv.ShowImage('Hello World', image) #Show the image 13 | cv.WaitKey(0) -------------------------------------------------------------------------------- /image/1c-Resize.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | im = cv.LoadImage("../img/alkaline.jpg") #get the image 4 | 5 | thumb = cv.CreateImage((im.width / 2, im.height / 2), 8, 3) #Create an image thatis twice smaller than the original 6 | cv.Resize(im, thumb) #resize the original image into thumb 7 | #cv.PyrDown(im, thumb) 8 | cv.SaveImage("thumb.png", thumb) # save the thumb image 9 | -------------------------------------------------------------------------------- /image/1d-transformation.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | im=cv.LoadImage('../img/fruits.jpg',cv.CV_LOAD_IMAGE_COLOR) 4 | 5 | res = cv.CreateImage(cv.GetSize(im), cv.CV_8UC2, 3) #cv.CV_32F, cv.IPL_DEPTH_16S, ... 6 | cv.Convert(im, res) 7 | cv.ShowImage("Converted",res) 8 | 9 | res2 = cv.CreateImage(cv.GetSize(im), cv.CV_8UC2, 3) 10 | cv.CvtColor(im, res2, cv.CV_RGB2BGR) # HLS, HSV, YCrCb, .... 11 | cv.ShowImage("CvtColor", res2) 12 | 13 | cv.WaitKey(0) -------------------------------------------------------------------------------- /image/1e-Split.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | orig = cv.LoadImage('../img/fruits.jpg') 4 | 5 | b = cv.CreateImage(cv.GetSize(orig), orig.depth, 1) 6 | g = cv.CloneImage(b) 7 | r = cv.CloneImage(b) 8 | cv.Split(orig, b, g, r, None) 9 | 10 | merged = cv.CreateImage(cv.GetSize(orig), 8, 3) 11 | cv.Merge(g, b, r, None, merged) 12 | 13 | cv.ShowImage("Image", orig) 14 | cv.ShowImage("Blue", b) 15 | cv.ShowImage("Green", g) 16 | cv.ShowImage("Red", r) 17 | cv.ShowImage("Merged", merged) 18 | 19 | cv.WaitKey(0) -------------------------------------------------------------------------------- /image/2a-Filters.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | image=cv.LoadImage('../img/lena.jpg', cv.CV_LOAD_IMAGE_COLOR) #Load the image 4 | cv.ShowImage("Original", image) 5 | 6 | grey = cv.CreateImage((image.width ,image.height),8,1) #8depth, 1 channel so grayscale 7 | cv.CvtColor(image, grey, cv.CV_RGBA2GRAY) #Convert to gray so act as a filter 8 | cv.ShowImage('Greyed', grey) 9 | 10 | smoothed = cv.CloneImage(image) 11 | cv.Smooth(image,smoothed,cv.CV_MEDIAN) #Apply a smooth alogrithm with the specified algorithm cv.MEDIAN 12 | cv.ShowImage("Smoothed", smoothed) 13 | 14 | cv.EqualizeHist(grey, grey) #Work only on grayscaled pictures 15 | cv.ShowImage('Equalized', grey) 16 | 17 | threshold1 = cv.CloneImage(grey) 18 | cv.Threshold(threshold1,threshold1, 100, 255, cv.CV_THRESH_BINARY) 19 | cv.ShowImage("Threshold Binary", threshold1) 20 | 21 | threshold2 = cv.CloneImage(grey) 22 | cv.Threshold(threshold2,threshold2, 100, 255, cv.CV_THRESH_OTSU) 23 | cv.ShowImage("Threshold OTSU", threshold2) 24 | 25 | element_shape = cv.CV_SHAPE_RECT 26 | pos=3 27 | element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape) 28 | cv.Dilate(grey,grey,element,2) #Replace a pixel value with the maximum value of neighboors 29 | #There is others like Erode which replace take the lowest value of the neighborhood 30 | #Note: The Structuring element is optionnal 31 | cv.ShowImage("Dilated", grey) 32 | 33 | cv.WaitKey(0) -------------------------------------------------------------------------------- /image/2b-Slider.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | im = cv.LoadImage("../img/lena.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE) 4 | thresholded = cv.CreateImage(cv.GetSize(im), 8, 1) 5 | 6 | def onChange(val): 7 | cv.Threshold(im, thresholded, val, 255, cv.CV_THRESH_BINARY) 8 | cv.ShowImage("Image", thresholded) 9 | 10 | 11 | onChange(100) #Call here otherwise at startup. Show nothing until we move the trackbar 12 | cv.CreateTrackbar("Thresh", "Image", 100, 255, onChange) #Threshold value arbitrarily set to 100 13 | 14 | cv.WaitKey(0) 15 | 16 | ''' 17 | capture=cv.CaptureFromCAM(0) 18 | 19 | value = 100 20 | 21 | def onChange(val): 22 | global value 23 | value = val 24 | #cv.Threshold(im, dst, value, 255, cv.CV_THRESH_BINARY) 25 | 26 | cv.NamedWindow("Image") 27 | cv.CreateTrackbar("Mytrack", "Image", 100, 255, onChange) 28 | tmp = cv.QueryFrame(capture) 29 | gray = cv.CreateImage(cv.GetSize(tmp), 8, 1) 30 | 31 | while True: 32 | frame=cv.QueryFrame(capture) 33 | cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) 34 | cv.Threshold(gray, gray, value, 255, cv.CV_THRESH_BINARY) 35 | cv.ShowImage("Image",gray) 36 | c=cv.WaitKey(1) 37 | if c==27: #Break if user enters 'Esc'. 38 | break 39 | ''' -------------------------------------------------------------------------------- /image/2c-Operations.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | im = cv.LoadImage("../img/lena.jpg",3) 4 | 5 | cv.SetImageROI(im, (50,50,150,150)) 6 | 7 | cv.Zero(im) 8 | #cv.Set(im, cv.RGB(100, 100, 100)) 9 | 10 | cv.ResetImageROI(im) 11 | 12 | cv.ShowImage("Image",im) 13 | 14 | cv.WaitKey(0) -------------------------------------------------------------------------------- /image/2d-Arithmetics.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv#or simply import cv 2 | 3 | im = cv.LoadImage("../img/lena.jpg") 4 | im2 = cv.LoadImage("../img/fruits-larger.jpg") 5 | cv.ShowImage("Image1", im) 6 | cv.ShowImage("Image2", im2) 7 | 8 | res = cv.CreateImage(cv.GetSize(im2), 8, 3) 9 | 10 | cv.Add(im, im2, res) #Add every pixels together (black is 0 so low change and white overload anyway) 11 | cv.ShowImage("Add", res) 12 | 13 | cv.AbsDiff(im, im2, res) # Like minus for each pixel im(i) - im2(i) 14 | cv.ShowImage("AbsDiff", res) 15 | 16 | cv.Mul(im, im2, res) #Multiplie each pixels (almost white) 17 | cv.ShowImage("Mult", res) 18 | 19 | cv.Div(im, im2, res) #Values will be low so the image will likely to be almost black 20 | cv.ShowImage("Div", res) 21 | 22 | cv.And(im, im2, res) #Bit and for every pixels 23 | cv.ShowImage("And", res) 24 | 25 | cv.Or(im, im2, res) # Bit or for every pixels 26 | cv.ShowImage("Or", res) 27 | 28 | cv.Not(im, res) # Bit not of an image 29 | cv.ShowImage("Not", res) 30 | 31 | cv.Xor(im, im2, res) #Bit Xor 32 | cv.ShowImage("Xor", res) 33 | 34 | cv.Pow(im, res, 2) #Pow the each pixel with the given value 35 | cv.ShowImage("Pow", res) 36 | 37 | cv.Max(im, im2, res) #Maximum between two pixels 38 | #Same form Min MinS 39 | cv.ShowImage("Max",res) 40 | 41 | cv.WaitKey(0) -------------------------------------------------------------------------------- /image/3a-Scan.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | im = cv.LoadImageM("../img/alkaline.jpg") 4 | 5 | #Access a specific pixel 6 | print im[3,3] 7 | 8 | print cv.Get1D(im, 3) 9 | 10 | print cv.Get2D(im, 3, 3) # etc.. 11 | 12 | #cv.GetND(im, [3,3,3,3]) for a 4 dimension array 13 | 14 | col0 = cv.GetCol(im, 0) #Return the first column 15 | cols = cv.GetCols(im, 0, 10) # Return a matrix of the ten first column 16 | 17 | row = cv.GetRow(im, 0) #Return the first row (first pixels line) 18 | rows = cv.GetRows(im, 0, 10) # Return the ten first rows of the image 19 | 20 | #--------------------------- 21 | 22 | 23 | #Iterate throught pixels 24 | red_sum = 0 25 | green_sum = 0 26 | blue_sum = 0 27 | c = 0 28 | for i in range(0,im.rows-1): 29 | for j in range(0,im.cols-1): 30 | c= c +1 31 | red_sum += im[i,j][0] 32 | green_sum += im[i,j][1] 33 | blue_sum += im[i,j][2] 34 | print red_sum, green_sum, blue_sum, c 35 | 36 | dur = cv.GetTickCount() #Calculate time between two points 37 | print cv.GetTickCount() - dur 38 | 39 | 40 | 41 | #2 42 | li = cv.InitLineIterator(im, (0, 0), (im.rows, im.cols)) 43 | red_sum = 0 44 | green_sum = 0 45 | blue_sum = 0 46 | c = 0 47 | for (r, g, b) in li: 48 | red_sum += r 49 | green_sum += g 50 | blue_sum += b 51 | c = c + 1 52 | print red_sum, green_sum, blue_sum, c 53 | 54 | 55 | # 3 56 | li = cv.InitLineIterator(im, (0, 0), (im.rows, im.cols)) 57 | print [sum(c) for c in zip(*li)] 58 | 59 | -------------------------------------------------------------------------------- /image/3b-Noise.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv#or simply import cv 2 | 3 | import random 4 | 5 | im = cv.LoadImage("../img/lena.jpg") #or LoadImage and access pixel with Get2D/Set2D 6 | 7 | for k in range(5000): #Create 5000 noisy pixels 8 | i = random.randint(0,im.height-1) 9 | j = random.randint(0,im.width-1) 10 | color = (random.randrange(256),random.randrange(256),random.randrange(256)) 11 | im[i,j] = color 12 | 13 | cv.ShowImage("Noize", im) 14 | cv.WaitKey(0) 15 | -------------------------------------------------------------------------------- /image/3c-Pattern.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | im = cv.LoadImageM("../img/fruits.jpg",cv.CV_32F) 4 | 5 | def getDistance(pixel,refcolor): 6 | return abs( (pixel[0]-refcolor[0]) + (pixel[1]-refcolor[1]) + (pixel[2]-refcolor[2]) ) 7 | 8 | 9 | refcolor = (0,0,0) 10 | minDist = 100 11 | 12 | for row in range(im.rows): 13 | for col in range(im.cols): 14 | if getDistance(im[row,col], refcolor) 0) and (h > 0) 8 | 9 | class CamShiftDemo: 10 | 11 | def __init__(self): 12 | self.capture = cv.CaptureFromCAM(0) 13 | cv.NamedWindow( "CamShiftDemo", 1 ) 14 | cv.NamedWindow("Backprojection", 1) 15 | cv.NamedWindow( "Histogram", 1 ) 16 | 17 | cv.SetMouseCallback( "CamShiftDemo", self.on_mouse) #Instantiate call back for mouse event 18 | 19 | self.drag_start = None # Set to (x,y) when mouse starts drag 20 | self.track_window = None # Set to rect when the mouse drag finishes 21 | 22 | 23 | def hue_histogram_as_image(self, hist): 24 | """ Returns a nice representation of a hue histogram """ 25 | 26 | histimg_hsv = cv.CreateImage( (320,200), 8, 3) 27 | 28 | mybins = cv.CloneMatND(hist.bins) #Contain all values 29 | cv.Log(mybins, mybins) #Calculate logarithm of all values (so there are all above 0) 30 | 31 | (_, hi, _, _) = cv.MinMaxLoc(mybins) 32 | cv.ConvertScale(mybins, mybins, 255. / hi) #Rescale all element to get the highest at 255 33 | 34 | w,h = cv.GetSize(histimg_hsv) 35 | hdims = cv.GetDims(mybins)[0] 36 | for x in range(w): 37 | xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image 38 | val = int(mybins[int(hdims * x / w)] * h / 255) 39 | cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1) 40 | cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1) 41 | 42 | histimg = cv.CreateImage( (320,200), 8, 3) #Convert image from hsv to RGB 43 | cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR) 44 | return histimg 45 | 46 | def on_mouse(self, event, x, y, flags, param): 47 | if event == cv.CV_EVENT_LBUTTONDOWN: #when start pressing 48 | self.drag_start = (x, y) 49 | if event == cv.CV_EVENT_LBUTTONUP: #when release left click 50 | self.drag_start = None 51 | self.track_window = self.selection 52 | if self.drag_start: #in both cases compute coordinates 53 | xmin = min(x, self.drag_start[0]) 54 | ymin = min(y, self.drag_start[1]) 55 | xmax = max(x, self.drag_start[0]) 56 | ymax = max(y, self.drag_start[1]) 57 | self.selection = (xmin, ymin, xmax - xmin, ymax - ymin) 58 | 59 | 60 | def run(self): 61 | hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 ) 62 | backproject_mode = True 63 | 64 | while True: 65 | frame = cv.QueryFrame( self.capture ) 66 | 67 | # Convert to HSV and keep the hue 68 | hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) 69 | cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) 70 | self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) 71 | cv.Split(hsv, self.hue, None, None, None) 72 | 73 | # Compute back projection 74 | backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) 75 | cv.CalcArrBackProject( [self.hue], backproject, hist ) 76 | 77 | # Run the cam-shift (if the a window is set and != 0) 78 | if self.track_window and is_rect_nonzero(self.track_window): 79 | crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) 80 | (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) #Call the camshift !! 81 | self.track_window = rect #Put the current rectangle as the tracked area 82 | 83 | 84 | # If mouse is pressed, highlight the current selected rectangle and recompute histogram 85 | if self.drag_start and is_rect_nonzero(self.selection): 86 | sub = cv.GetSubRect(frame, self.selection) #Get specified area 87 | 88 | #Make the effect of background shadow when selecting a window 89 | save = cv.CloneMat(sub) 90 | cv.ConvertScale(frame, frame, 0.5) 91 | cv.Copy(save, sub) 92 | 93 | #Draw temporary rectangle 94 | x,y,w,h = self.selection 95 | cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255)) 96 | 97 | #Take the same area but in hue image to calculate histogram 98 | sel = cv.GetSubRect(self.hue, self.selection ) 99 | cv.CalcArrHist( [sel], hist, 0) 100 | 101 | #Used to rescale the histogram with the max value (to draw it later on) 102 | (_, max_val, _, _) = cv.GetMinMaxHistValue( hist) 103 | if max_val != 0: 104 | cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) 105 | 106 | elif self.track_window and is_rect_nonzero(self.track_window): #If window set draw an elipseBox 107 | cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) 108 | 109 | 110 | cv.ShowImage( "CamShiftDemo", frame ) 111 | cv.ShowImage( "Backprojection", backproject) 112 | cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist)) 113 | 114 | c = cv.WaitKey(7) % 0x100 115 | if c == 27: 116 | break 117 | 118 | 119 | if __name__=="__main__": 120 | demo = CamShiftDemo() 121 | demo.run() 122 | -------------------------------------------------------------------------------- /officialsamples/motempl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import urllib2 3 | import sys 4 | import time 5 | from math import cos, sin 6 | import cv2.cv as cv 7 | 8 | CLOCKS_PER_SEC = 1.0 9 | MHI_DURATION = 1 10 | MAX_TIME_DELTA = 0.5 11 | MIN_TIME_DELTA = 0.05 12 | N = 4 13 | buf = range(10) 14 | last = 0 15 | mhi = None # MHI 16 | orient = None # orientation 17 | mask = None # valid orientation mask 18 | segmask = None # motion segmentation map 19 | storage = None # temporary storage 20 | 21 | def update_mhi(img, dst, diff_threshold): 22 | global last 23 | global mhi 24 | global storage 25 | global mask 26 | global orient 27 | global segmask 28 | timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds 29 | size = cv.GetSize(img) # get current frame size 30 | idx1 = last 31 | if not mhi or cv.GetSize(mhi) != size: 32 | for i in range(N): 33 | buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) 34 | cv.Zero(buf[i]) 35 | mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) 36 | cv.Zero(mhi) # clear MHI at the beginning 37 | orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) 38 | segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) 39 | mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1) 40 | 41 | cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale 42 | idx2 = (last + 1) % N # index of (last - (N-1))th frame 43 | last = idx2 44 | silh = buf[idx2] 45 | cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames 46 | cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it 47 | cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI 48 | cv.CvtScale(mhi, mask, 255./MHI_DURATION, 49 | (MHI_DURATION - timestamp)*255./MHI_DURATION) 50 | cv.Zero(dst) 51 | cv.Merge(mask, None, None, None, dst) 52 | cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3) 53 | if not storage: 54 | storage = cv.CreateMemStorage(0) 55 | seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA) 56 | for (area, value, comp_rect) in seq: 57 | if comp_rect[2] + comp_rect[3] > 100: # reject very small components 58 | color = cv.CV_RGB(255, 0,0) 59 | silh_roi = cv.GetSubRect(silh, comp_rect) 60 | mhi_roi = cv.GetSubRect(mhi, comp_rect) 61 | orient_roi = cv.GetSubRect(orient, comp_rect) 62 | mask_roi = cv.GetSubRect(mask, comp_rect) 63 | angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION) 64 | 65 | count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI 66 | if count < (comp_rect[2] * comp_rect[3] * 0.05): 67 | continue 68 | 69 | magnitude = 30. 70 | center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2)) 71 | cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0) 72 | cv.Line(dst, 73 | center, 74 | (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)), 75 | cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))), 76 | color, 77 | 3, 78 | cv.CV_AA, 79 | 0) 80 | 81 | if __name__ == "__main__": 82 | motion = 0 83 | capture = 0 84 | 85 | if len(sys.argv)==1: 86 | capture = cv.CreateCameraCapture(0) 87 | elif len(sys.argv)==2 and sys.argv[1].isdigit(): 88 | capture = cv.CreateCameraCapture(int(sys.argv[1])) 89 | elif len(sys.argv)==2: 90 | capture = cv.CreateFileCapture(sys.argv[1]) 91 | 92 | if not capture: 93 | print "Could not initialize capturing..." 94 | sys.exit(-1) 95 | 96 | cv.NamedWindow("Motion", 1) 97 | while True: 98 | image = cv.QueryFrame(capture) 99 | if(image): 100 | if(not motion): 101 | motion = cv.CreateImage((image.width, image.height), 8, 3) 102 | cv.Zero(motion) 103 | #motion.origin = image.origin 104 | update_mhi(image, motion, 30) 105 | cv.ShowImage("Motion", motion) 106 | if(cv.WaitKey(10) != -1): 107 | break 108 | else: 109 | break 110 | cv.DestroyWindow("Motion") 111 | -------------------------------------------------------------------------------- /officialsamples/watershed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import urllib2 3 | import sys 4 | import cv2.cv as cv 5 | 6 | class Sketcher: 7 | def __init__(self, windowname, dests): 8 | self.prev_pt = None 9 | self.windowname = windowname 10 | self.dests = dests 11 | cv.SetMouseCallback(self.windowname, self.on_mouse) 12 | 13 | def on_mouse(self, event, x, y, flags, param): 14 | pt = (x, y) 15 | if event == cv.CV_EVENT_LBUTTONUP or not (flags & cv.CV_EVENT_FLAG_LBUTTON): 16 | self.prev_pt = None 17 | elif event == cv.CV_EVENT_LBUTTONDOWN: 18 | self.prev_pt = pt 19 | elif event == cv.CV_EVENT_MOUSEMOVE and (flags & cv.CV_EVENT_FLAG_LBUTTON) : 20 | if self.prev_pt: 21 | for dst in self.dests: 22 | cv.Line(dst, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0) 23 | self.prev_pt = pt 24 | cv.ShowImage(self.windowname, img) 25 | 26 | if __name__ == "__main__": 27 | 28 | img0 = cv.LoadImage("../img/fruits.jpg") 29 | 30 | rng = cv.RNG(-1) 31 | 32 | print "Hot keys:" 33 | print "\tESC - quit the program" 34 | print "\tr - restore the original image" 35 | print "\tw - run watershed algorithm" 36 | print "\t (before that, roughly outline several markers on the image)" 37 | 38 | cv.NamedWindow("image", 1) 39 | cv.NamedWindow("watershed transform", 1) 40 | 41 | img = cv.CloneImage(img0) 42 | img_gray = cv.CloneImage(img0) 43 | wshed = cv.CloneImage(img0) 44 | marker_mask = cv.CreateImage(cv.GetSize(img), 8, 1) 45 | markers = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_32S, 1) 46 | 47 | cv.CvtColor(img, marker_mask, cv.CV_BGR2GRAY) 48 | cv.CvtColor(marker_mask, img_gray, cv.CV_GRAY2BGR) 49 | 50 | cv.Zero(marker_mask) 51 | cv.Zero(wshed) 52 | 53 | cv.ShowImage("image", img) 54 | cv.ShowImage("watershed transform", wshed) 55 | 56 | sk = Sketcher("image", [img, marker_mask]) 57 | 58 | while True: 59 | c = cv.WaitKey(0) % 0x100 60 | if c == 27 or c == ord('q'): 61 | break 62 | if c == ord('r'): 63 | cv.Zero(marker_mask) 64 | cv.Copy(img0, img) 65 | cv.ShowImage("image", img) 66 | if c == ord('w'): 67 | storage = cv.CreateMemStorage(0) 68 | #cv.SaveImage("wshed_mask.png", marker_mask) 69 | #marker_mask = cv.LoadImage("wshed_mask.png", 0) 70 | contours = cv.FindContours(marker_mask, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) 71 | def contour_iterator(contour): 72 | while contour: 73 | yield contour 74 | contour = contour.h_next() 75 | 76 | cv.Zero(markers) 77 | comp_count = 0 78 | for c in contour_iterator(contours): 79 | cv.DrawContours(markers, c, cv.ScalarAll(comp_count + 1), cv.ScalarAll(comp_count + 1), -1, -1, 8) 80 | comp_count += 1 81 | 82 | cv.Watershed(img0, markers) 83 | 84 | cv.Set(wshed, cv.ScalarAll(255)) 85 | 86 | # paint the watershed image 87 | color_tab = [(cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50) for i in range(comp_count)] 88 | for j in range(markers.height): 89 | for i in range(markers.width): 90 | idx = markers[j, i] 91 | if idx != -1: 92 | wshed[j, i] = color_tab[int(idx - 1)] 93 | 94 | cv.AddWeighted(wshed, 0.5, img_gray, 0.5, 0, wshed) 95 | cv.ShowImage("watershed transform", wshed) 96 | -------------------------------------------------------------------------------- /video/1-Readvideo.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture = cv.CaptureFromFile('../img/paulvideo.avi') 4 | 5 | nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) 6 | 7 | #CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream 8 | #CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream 9 | 10 | fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) 11 | 12 | wait = int(1/fps * 1000/1) 13 | 14 | duration = (nbFrames * fps) / 1000 15 | 16 | print 'Num. Frames = ', nbFrames 17 | print 'Frame Rate = ', fps, 'fps' 18 | print 'Duration = ', duration, 'sec' 19 | 20 | 21 | for f in xrange( nbFrames ): 22 | 23 | frameImg = cv.QueryFrame(capture) 24 | 25 | print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES) # Number of the frame 26 | 27 | cv.ShowImage("The Video", frameImg) 28 | cv.WaitKey(wait) 29 | -------------------------------------------------------------------------------- /video/2-WriteVideo.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | 4 | capture=cv.CaptureFromCAM(0) 5 | temp=cv.QueryFrame(capture) 6 | writer=cv.CreateVideoWriter("output.avi", cv.CV_FOURCC("D", "I", "B", " "), 5, cv.GetSize(temp), 1) 7 | #On linux I used to take "M","J","P","G" as fourcc 8 | 9 | count=0 10 | while count<50: 11 | print count 12 | image=cv.QueryFrame(capture) 13 | cv.WriteFrame(writer, image) 14 | cv.ShowImage('Image_Window',image) 15 | cv.WaitKey(1) 16 | count+=1 17 | ''' 18 | 19 | capture = cv.CaptureFromFile('img/mic.avi') 20 | 21 | nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) 22 | width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) 23 | height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) 24 | fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) 25 | codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC) 26 | 27 | wait = int(1/fps * 1000/1) #Compute the time to wait between each frame query 28 | 29 | duration = (nbFrames * fps) / 1000 #Compute duration 30 | 31 | print 'Num. Frames = ', nbFrames 32 | print 'Frame Rate = ', fps, 'fps' 33 | 34 | writer=cv.CreateVideoWriter("img/new.avi", int(codec), int(fps), (width,height), 1) #Create writer with same parameters 35 | 36 | cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES,80) #Set the number of frames 37 | 38 | for f in xrange( nbFrames - 80 ): #Just recorded the 80 first frames of the video 39 | 40 | frame = cv.QueryFrame(capture) 41 | 42 | print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES) 43 | 44 | cv.WriteFrame(writer, frame) 45 | 46 | cv.WaitKey(wait) 47 | ''' -------------------------------------------------------------------------------- /video/3-Processvideo_Canny.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture = cv.CaptureFromFile('img/paulvideo.avi') 4 | 5 | nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) 6 | fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) 7 | wait = int(1/fps * 1000/1) 8 | 9 | dst = cv.CreateImage((int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)), 10 | int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))), 8, 1) 11 | 12 | for f in xrange( nbFrames ): 13 | 14 | frame = cv.QueryFrame(capture) 15 | 16 | cv.CvtColor(frame, dst, cv.CV_BGR2GRAY) 17 | cv.Canny(dst, dst, 125, 350) 18 | cv.Threshold(dst, dst, 128, 255, cv.CV_THRESH_BINARY_INV) 19 | 20 | cv.ShowImage("The Video", frame) 21 | cv.ShowImage("The Dst", dst) 22 | cv.WaitKey(wait) -------------------------------------------------------------------------------- /video/4-Haar_webcam.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture=cv.CaptureFromCAM(0) 4 | 5 | hc = cv.Load("../haarcascades/haarcascade_frontalface_alt.xml") 6 | 7 | while True: 8 | frame=cv.QueryFrame(capture) 9 | faces = cv.HaarDetectObjects(frame, hc, cv.CreateMemStorage(), 1.2,2, cv.CV_HAAR_DO_CANNY_PRUNING, (0,0) ) 10 | 11 | for ((x,y,w,h),stub) in faces: 12 | cv.Rectangle(frame,(int(x),int(y)),(int(x)+w,int(y)+h),(0,255,0),2,0) 13 | 14 | cv.ShowImage("Window",frame) 15 | c=cv.WaitKey(1) 16 | if c==27 or c == 1048603: #If Esc entered 17 | break -------------------------------------------------------------------------------- /video/5-Trackingvideo-easy.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture = cv.CaptureFromFile('img/micnew.avi') 4 | 5 | #-- Informations about the video -- 6 | nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) 7 | fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) 8 | wait = int(1/fps * 1000/1) 9 | width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) 10 | height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) 11 | #For recording 12 | #codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC) 13 | #writer=cv.CreateVideoWriter("img/output.avi", int(codec), int(fps), (width,height), 1) #Create writer with same parameters 14 | #---------------------------------- 15 | 16 | prev_gray = cv.CreateImage((width,height), 8, 1) #Will hold the frame at t-1 17 | gray = cv.CreateImage((width,height), 8, 1) # Will hold the current frame 18 | 19 | prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) #Will hold the pyr frame at t-1 20 | currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) # idem at t 21 | 22 | max_count = 500 23 | qLevel= 0.01 24 | minDist = 10 25 | prev_points = [] #Points at t-1 26 | curr_points = [] #Points at t 27 | lines=[] #To keep all the lines overtime 28 | 29 | for f in xrange( nbFrames ): 30 | 31 | frame = cv.QueryFrame(capture) #Take a frame of the video 32 | 33 | cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray 34 | output = cv.CloneImage(frame) 35 | 36 | prev_points = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist) #Find points on the image 37 | 38 | #Calculate the movement using the previous and the current frame using the previous points 39 | curr_points, status, err = cv.CalcOpticalFlowPyrLK(prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03), 0) 40 | 41 | 42 | #If points status are ok and distance not negligible keep the point 43 | k = 0 44 | for i in range(len(curr_points)): 45 | nb = abs( int(prev_points[i][0])-int(curr_points[i][0]) ) + abs( int(prev_points[i][1])-int(curr_points[i][1]) ) 46 | if status[i] and nb > 2 : 47 | prev_points[k] = prev_points[i] 48 | curr_points[k] = curr_points[i] 49 | k += 1 50 | 51 | prev_points = prev_points[:k] 52 | curr_points = curr_points[:k] 53 | #At the end only interesting points are kept 54 | 55 | #Draw all the previously kept lines otherwise they would be lost the next frame 56 | for (pt1, pt2) in lines: 57 | cv.Line(frame, pt1, pt2, (255,255,255)) 58 | 59 | #Draw the lines between each points at t-1 and t 60 | for prevpoint, point in zip(prev_points,curr_points): 61 | prevpoint = (int(prevpoint[0]),int(prevpoint[1])) 62 | cv.Circle(frame, prevpoint, 15, 0) 63 | point = (int(point[0]),int(point[1])) 64 | cv.Circle(frame, point, 3, 255) 65 | cv.Line(frame, prevpoint, point, (255,255,255)) 66 | lines.append((prevpoint,point)) #Append current lines to the lines list 67 | 68 | 69 | cv.Copy(gray, prev_gray) #Put the current frame prev_gray 70 | prev_points = curr_points 71 | 72 | cv.ShowImage("The Video", frame) 73 | #cv.WriteFrame(writer, frame) 74 | cv.WaitKey(wait) 75 | 76 | -------------------------------------------------------------------------------- /video/6-Trackingvideo-harder.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture = cv.CaptureFromFile('img/micnew.avi') 4 | 5 | #-- Informations about the video -- 6 | nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) 7 | fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) 8 | wait = int(1/fps * 1000/1) 9 | width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) 10 | height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) 11 | #For recording 12 | #codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC) 13 | #writer=cv.CreateVideoWriter("img/output.avi", int(codec), int(fps), (width,height), 1) #Create writer with same parameters 14 | #---------------------------------- 15 | 16 | prev_gray = cv.CreateImage((width,height), 8, 1) #Will hold the frame at t-1 17 | gray = cv.CreateImage((width,height), 8, 1) # Will hold the current frame 18 | 19 | output = cv.CreateImage((width,height), 8, 3) 20 | 21 | prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) 22 | currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) 23 | 24 | max_count = 500 25 | qLevel= 0.01 26 | minDist = 10 27 | 28 | begin = True 29 | 30 | initial = [] 31 | features = [] 32 | prev_points = [] 33 | curr_points = [] 34 | 35 | for f in xrange( nbFrames ): 36 | 37 | frame = cv.QueryFrame(capture) 38 | 39 | cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray 40 | cv.Copy(frame, output) 41 | 42 | 43 | if (len(prev_points) <= 10): #Try to get more points 44 | #Detect points on the image 45 | features = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist) 46 | prev_points.extend(features) #Add the new points to list 47 | initial.extend(features) #Idem 48 | 49 | if begin: 50 | cv.Copy(gray, prev_gray) #Now we have two frames to compare 51 | begin = False 52 | 53 | #Compute movement 54 | curr_points, status, err = cv.CalcOpticalFlowPyrLK(prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03), 0) 55 | 56 | #If points status are ok and distance not negligible keep the point 57 | k = 0 58 | for i in range(len(curr_points)): 59 | nb = abs( int(prev_points[i][0])-int(curr_points[i][0]) ) + abs( int(prev_points[i][1])-int(curr_points[i][1]) ) 60 | if status[i] and nb > 2 : 61 | initial[k] = initial[i] 62 | curr_points[k] = curr_points[i] 63 | k += 1 64 | 65 | curr_points = curr_points[:k] 66 | initial = initial[:k] 67 | #At the end only interesting points are kept 68 | 69 | #Draw the line between the first position of a point and the 70 | #last recorded position of the same point 71 | for i in range(len(curr_points)): 72 | cv.Line(output, (int(initial[i][0]),int(initial[i][1])), (int(curr_points[i][0]),int(curr_points[i][1])), (255,255,255)) 73 | cv.Circle(output, (int(curr_points[i][0]),int(curr_points[i][1])), 3, (255,255,255)) 74 | 75 | 76 | cv.Copy(gray, prev_gray) 77 | prev_points = curr_points 78 | 79 | 80 | cv.ShowImage("The Video", output) 81 | cv.WriteFrame(writer, output) 82 | cv.WaitKey(wait) -------------------------------------------------------------------------------- /video/7-raw_compare.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture=cv.CaptureFromCAM(0) 4 | 5 | frame1 = cv.QueryFrame(capture) 6 | frame1gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U) 7 | cv.CvtColor(frame1, frame1gray, cv.CV_RGB2GRAY) 8 | 9 | res = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U) 10 | 11 | frame2gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U) 12 | 13 | while True: 14 | frame2 = cv.QueryFrame(capture) 15 | cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY) 16 | 17 | cv.Smooth(frame2gray, frame2gray, cv.CV_BLUR, 12,12) 18 | 19 | cv.Cmp(frame1gray, frame2gray, res, cv.CV_CMP_EQ) #Call the compare with the tow frames 20 | 21 | cv.ShowImage("Image", frame2) 22 | cv.ShowImage("Res", res) 23 | 24 | cv.Copy(frame2gray, frame1gray) 25 | c=cv.WaitKey(1) 26 | if c==27: #Break if user enters 'Esc'. 27 | break 28 | 29 | -------------------------------------------------------------------------------- /video/8-raw_compare2.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture=cv.CaptureFromCAM(0) 4 | 5 | frame1 = cv.QueryFrame(capture) 6 | frame1gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U) 7 | cv.CvtColor(frame1, frame1gray, cv.CV_RGB2GRAY) 8 | 9 | res = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U) 10 | 11 | frame2gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U) 12 | 13 | w= frame2gray.width 14 | h= frame2gray.height 15 | nb_pixels = frame2gray.width * frame2gray.height 16 | 17 | while True: 18 | frame2 = cv.QueryFrame(capture) 19 | cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY) 20 | 21 | cv.AbsDiff(frame1gray, frame2gray, res) 22 | cv.ShowImage("After AbsDiff", res) 23 | 24 | cv.Smooth(res, res, cv.CV_BLUR, 5,5) 25 | element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT) 26 | cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN) 27 | cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE) 28 | cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY_INV) 29 | 30 | cv.ShowImage("Image", frame2) 31 | cv.ShowImage("Res", res) 32 | 33 | #----------- 34 | nb=0 35 | for y in range(h): 36 | for x in range(w): 37 | if res[y,x] == 0.0: 38 | nb += 1 39 | avg = (nb*100.0)/nb_pixels 40 | #print "Average: ",avg, "%\r", 41 | if avg >= 5: 42 | print "Something is moving !" 43 | #----------- 44 | 45 | 46 | cv.Copy(frame2gray, frame1gray) 47 | c=cv.WaitKey(1) 48 | if c==27: #Break if user enters 'Esc'. 49 | break 50 | 51 | -------------------------------------------------------------------------------- /video/9-Background.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | 3 | capture = cv.CaptureFromFile('img/mic.avi') 4 | nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) 5 | fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) 6 | wait = int(1/fps * 1000/1) 7 | width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) 8 | height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) 9 | 10 | gray = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1) 11 | 12 | background = cv.CreateMat(height, width, cv.CV_32F) 13 | backImage = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1) 14 | foreground = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1) 15 | output = cv.CreateImage((width,height), 8, 1) 16 | 17 | begin = True 18 | threshold = 10 19 | 20 | for f in xrange( nbFrames ): 21 | frame = cv.QueryFrame( capture ) 22 | 23 | cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) 24 | 25 | if begin: 26 | cv.Convert(gray, background) #Convert gray into background format 27 | begin = False 28 | 29 | cv.Convert(background, backImage) #convert existing background to backImage 30 | 31 | cv.AbsDiff(backImage, gray, foreground) #Absdiff to get differences 32 | 33 | cv.Threshold(foreground, output, threshold, 255, cv.CV_THRESH_BINARY_INV) 34 | 35 | cv.Acc(foreground, background,output) #Accumulate to background 36 | 37 | cv.ShowImage("Output", output) 38 | cv.ShowImage("Gray", gray) 39 | c = cv.WaitKey(wait) 40 | if c==27: #Break if user enters 'Esc'. 41 | break 42 | 43 | -------------------------------------------------------------------------------- /video/getFPSWebcam.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | from time import time 3 | 4 | capture=cv.CaptureFromCAM(0) 5 | temp=cv.QueryFrame(capture) 6 | 7 | startat=10 8 | sum = 0 9 | count=0 10 | 11 | t1= time() 12 | t2 = 0 13 | 14 | while count<30: 15 | print count 16 | image=cv.QueryFrame(capture) 17 | t2 = time() 18 | val = t2 - t1 19 | print val 20 | #I ignore the ten first frames because tests shows that the elapsed time value is anormaly too low 21 | if count > startat: 22 | sum += val #Add the current value 23 | print "Avg: ", sum / (count - startat) #Compute the temp average 24 | t1 = t2 25 | count+=1 26 | 27 | avg = 1/ (sum / (count - startat)) 28 | fps = cv.Round(avg) 29 | 30 | print fps, "fps" 31 | --------------------------------------------------------------------------------