├── 2cams.py
├── README.md
├── cams-sidebyside.py
├── camtest.cpp
├── colortrack.py
├── display.py
├── face.xml
├── facedetect.py
├── facedetect.sh
├── filter.py
├── make.sh
├── multicam.py
├── piVision.py
├── roverFetch.py
└── stopservos.sh
/2cams.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | cap1 = cv2.VideoCapture(0)
5 | cap2 = cv2.VideoCapture(1)
6 |
7 | while True:
8 | _,cam1 = cap1.read()
9 | _,cam2 = cap2.read()
10 |
11 | cv2.imshow("Cam1",cam1)
12 | cv2.imshow("Cam2",cam2)
13 |
14 | if cv2.waitKey(1) == ord('q'):
15 | break
16 | cap1.release()
17 | cap2.release()
18 | cv2.destroyAllWindow
19 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | piVision
2 | ========
3 |
4 | Python based vision tests - face / object detection and tracking with pi cam
5 |
6 | opencv: sudo apt-get install python-opencv
7 |
8 | filters.py - GUI demo of basic python image filters:
9 |
10 | takes a photo with the raspberry camera, and give options of
11 | blur,contour,find_edges,emboss,edge_enhance
12 |
13 |
14 | facedetect.sh - starter script for facedetect.py
15 |
16 |
17 | facedetect.py - face detection using the pi camera
18 |
19 |
20 |
21 | draws a red box around each face detected
22 | (supports muliple faces per frame)
23 | currently 8fps
24 |
25 | colortrack.py - tracking by colour
26 |
27 |
28 |
29 | blob object tracking based on colour range
30 | double click object in main window to select colour
31 |
32 |
33 | display.py - openCV display test
34 |
35 | repeatedly loads frames from the pi camera to test frame rate
36 | using BMP or JPEG
37 |
--------------------------------------------------------------------------------
/cams-sidebyside.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | cap1 = cv2.VideoCapture(0)
5 | cap2 = cv2.VideoCapture(1)
6 |
7 | while True:
8 | _,cam1 = cap1.read()
9 | _,cam2 = cap2.read()
10 | sidebyside = np.hstack((cam1,cam2))
11 | cv2.imshow("Multicam",sidebyside)
12 |
13 | if cv2.waitKey(1) == ord('q'):
14 | break
15 | cap1.release()
16 | cap2.release()
17 | cv2.destroyAllWindow
18 |
--------------------------------------------------------------------------------
/camtest.cpp:
--------------------------------------------------------------------------------
1 | /* Compile with
2 |
3 | gcc -std=c++0 -lopencv_core -lopencv_highgui
4 | -L/usr/lib/uv4l/uv4lext/armv6l -luv4lext -Wl,-rpath,'/usr/lib/uv4l/uv4lext/armv6l'
5 | test.cpp -o test
6 | */
7 |
8 | #include "opencv2/highgui/highgui.hpp"
9 | #include
10 | #include
11 |
12 | using namespace std;
13 | using namespace chrono;
14 | using namespace cv;
15 |
16 | int main(int argc, char** argv){
17 | VideoCapture cap(-1);
18 | if (!cap.isOpened())
19 | {
20 | cout << "Cannot open camera" << endl;
21 | return -1;
22 | }
23 | cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
24 | cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
25 |
26 | namedWindow("Output",CV_WINDOW_AUTOSIZE);
27 |
28 | unsigned long f = 0;
29 | auto start = monotonic_clock::now();
30 | while (1)
31 | {
32 | Mat frame;
33 | bool bSuccess = cap.read(frame);
34 |
35 | if (!bSuccess)
36 | {
37 | cout << "Cannot read a frame from camera" << endl;
38 | break;
39 | }
40 |
41 | auto elapse = duration_cast(monotonic_clock::now() - start).count();
42 | ++f;
43 | auto fps = elapse ? f / elapse : 0;
44 | if (!(elapse % 5) && fps) // print the framerate every 5s, for 1s
45 | cout << "fps: " << fps << ", total frames: " << f
46 | << " elapsed time: " << elapse << "s\n";
47 |
48 | imshow("Output", frame);
49 |
50 | if (waitKey(30) == 27)
51 | {
52 | cout << "Exit, fps: " << fps << endl;
53 | break;
54 | }
55 | }
56 | return 0;
57 | }
58 |
59 |
--------------------------------------------------------------------------------
/colortrack.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | """
3 | colortrack.py - track object by it's colour
4 |
5 | first double click on selected colour,
6 | then track any blobs with that colour
7 | """
8 |
9 | import os,subprocess,glob,time
10 | #import cv2.cv as cv
11 | import cv
12 |
13 | posx=0
14 | posy=0
15 | global h,s,v,i,im,evente
16 | h,s,v,i,r,g,b,j,evente=0,0,0,0,0,0,0,0,0
17 |
18 | # Mouse callback function
19 | def my_mouse_callback(event,x,y,flags,param):
20 | global evente,h,s,v,i,r,g,b,j
21 | evente=event
22 | if event==cv.CV_EVENT_LBUTTONDBLCLK: # Here event is left mouse button double-clicked
23 | hsv=cv.CreateImage(cv.GetSize(frame),8,3)
24 | cv.CvtColor(frame,hsv,cv.CV_BGR2HSV)
25 | (h,s,v,i)=cv.Get2D(hsv,y,x)
26 | (r,g,b,j)=cv.Get2D(frame,y,x)
27 | print "x,y =",x,y
28 | print "hsv= ",cv.Get2D(hsv,y,x) # Gives you HSV at clicked point
29 | print "im= ",cv.Get2D(frame,y,x) # Gives you RGB at clicked point
30 |
31 | # Thresholding function
32 | def getthresholdedimg(im):
33 | '''This function take RGB image.Then convert it into HSV for easy colour detection and threshold it with the given part as white and all other regions as black.Then return that image'''
34 | imghsv=cv.CreateImage(cv.GetSize(im),8,3)
35 | cv.CvtColor(im,imghsv,cv.CV_BGR2HSV)
36 | imgthreshold=cv.CreateImage(cv.GetSize(im),8,1)
37 | cv.InRangeS(imghsv,cv.Scalar(h,100,10),cv.Scalar(h+20,255,255),imgthreshold)
38 | return imgthreshold
39 |
40 | def getpositions(im):
41 | ''' this function returns leftmost,rightmost,topmost and bottommost values of the white blob in the thresholded image'''
42 | leftmost=0
43 | rightmost=0
44 | topmost=0
45 | bottommost=0
46 | temp=0
47 | for i in range(im.width):
48 | col=cv.GetCol(im,i)
49 | if cv.Sum(col)[0]!=0.0:
50 | rightmost=i
51 | if temp==0:
52 | leftmost=i
53 | temp=1
54 | for i in range(im.height):
55 | row=cv.GetRow(im,i)
56 | if cv.Sum(row)[0]!=0.0:
57 | bottommost=i
58 | if temp==1:
59 | topmost=i
60 | temp=2
61 | return (leftmost,rightmost,topmost,bottommost)
62 |
63 | raspicam = 1
64 | if raspicam:
65 |
66 | command = "raspistill -tl 65 -n -rot 180 -hf -o /run/shm/image%d.jpg -w 320 -h 240 -e bmp >/dev/null"
67 | p=subprocess.Popen(command,shell = True)
68 |
69 | # wait until we have at least 2 image files
70 |
71 | for timeout in range (5):
72 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*.jpg"))
73 | if len(files) > 1:
74 | break
75 | print "waiting for images"
76 | time.sleep(5)
77 | if ( not len (files) > 1):
78 | print "No images"
79 | exit (1)
80 |
81 | # get (last-1) recent image
82 | files.sort(key=lambda x: os.path.getmtime(x))
83 | imagefile = (files[-2])
84 |
85 | frame=cv.LoadImage(imagefile,cv.CV_LOAD_IMAGE_COLOR)
86 |
87 | else:
88 | #usb cam
89 |
90 | capture=cv.CaptureFromCAM(0)
91 | cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320 );
92 | cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240 );
93 |
94 | frame=cv.QueryFrame(capture)
95 |
96 | test=cv.CreateImage(cv.GetSize(frame),8,3)
97 | imdraw=cv.CreateImage(cv.GetSize(frame),8,3) # We make all drawings on imdraw.
98 |
99 | cv.NamedWindow("pick")
100 | cv.SetMouseCallback("pick",my_mouse_callback)
101 |
102 | cv.NamedWindow("output")
103 | cv.NamedWindow("threshold")
104 |
105 | h = 8; # orange
106 | pan = 100
107 | pandir = 0
108 |
109 | while(1):
110 |
111 | if raspicam:
112 | if p.poll() is not None:
113 | #exit (0)
114 | print "restarting raspistill"
115 | p=subprocess.Popen(command,shell=True)
116 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*jpg"))
117 | files.sort(key=lambda x: os.path.getmtime(x))
118 | imagefile = (files[-2])
119 |
120 | frame=cv.LoadImage(imagefile,cv.CV_LOAD_IMAGE_COLOR)
121 | else:
122 | #usb cam
123 | frame=cv.QueryFrame(capture)
124 |
125 | thresh_img=getthresholdedimg(frame) # We get coordinates from thresh_img
126 |
127 | cv.Erode(thresh_img,thresh_img,None,1) # Eroding removes small noises
128 | cv.Dilate(thresh_img,thresh_img,None,1) # Dilate
129 |
130 | cv.ShowImage("threshold",thresh_img)
131 | storage = cv.CreateMemStorage(0)
132 | contour = cv.FindContours(thresh_img, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_NONE)
133 | points = []
134 |
135 | while contour:
136 | # Draw bounding rectangles
137 | bound_rect = cv.BoundingRect(list(contour))
138 |
139 |
140 | if (cv.ContourArea(contour) > 200):
141 | pt1 = (bound_rect[0], bound_rect[1])
142 | pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
143 | points.append(pt1)
144 | points.append(pt2)
145 | cv.Rectangle(frame, pt1, pt2, cv.CV_RGB(255,0,0), 1)
146 |
147 | obj_mid = bound_rect[0] + ( bound_rect[2] /2 )
148 | frame_mid = frame.width / 2
149 | mid = frame_mid - obj_mid
150 |
151 | # only move if not near middle
152 |
153 | offset = abs(mid)
154 | if offset > 20:
155 | pandir= (mid / offset)
156 | else:
157 | pandir=0
158 | contour = contour.h_next()
159 |
160 | (leftmost,rightmost,topmost,bottommost)=getpositions(thresh_img)
161 | if (leftmost-rightmost!=0) or (topmost-bottommost!=0):
162 | lastx=posx
163 | lasty=posy
164 | posx=cv.Round((rightmost+leftmost)/2)
165 | posy=cv.Round((bottommost+topmost)/2)
166 | if lastx!=0 and lasty!=0:
167 | cv.Line(imdraw,(posx,posy),(lastx,lasty),(b,g,r))
168 | cv.Circle(imdraw,(posx,posy),5,(b,g,r),-1)
169 |
170 | cv.Add(test,imdraw,test) # Adding imdraw on test keeps all lines there on the test frame. If not, we don't get full drawing, instead we get only that fraction of line at the moment.
171 |
172 | cv.ShowImage("pick", frame)
173 | cv.ShowImage("output",test)
174 |
175 | pan = int (pan + pandir)
176 | if pan > 180:
177 | pan = 180
178 | if pan < 0:
179 | pan = 0
180 |
181 | os.system('echo "0="' + str(pan) + ' >/dev/servoblaster')
182 |
183 | if cv.WaitKey(1)>= 0:
184 | break
185 | if evente == cv.CV_EVENT_LBUTTONDBLCLK:
186 | print "double click"
187 | cv.Set(test, cv.CV_RGB(0,0,0));
188 |
189 |
--------------------------------------------------------------------------------
/display.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | import os,subprocess,glob,time
4 | import cv2.cv as cv
5 | from optparse import OptionParser
6 |
7 | cv.NamedWindow("result", 1)
8 |
9 | raspicam=0
10 | if raspicam:
11 | command = "raspistill -tl 65 -n -rot 180 -hf -o /run/shm/image%d.jpg -w 640 -h 480 -e jpg"
12 | p=subprocess.Popen(command,shell = True)
13 |
14 | # wait until we have at least 2 image files
15 |
16 | for timeout in range (5):
17 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*.jpg"))
18 | if len(files) > 1:
19 | break
20 | print "waiting for images"
21 | time.sleep(1)
22 | if ( not len (files) > 1):
23 | print "No images"
24 | exit (1)
25 | else:
26 | capture=cv.CaptureFromCAM(0)
27 | cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320 );
28 | cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240 );
29 |
30 | oldimage = None
31 | if True:
32 | detected = 0
33 | frame = None
34 | while True:
35 |
36 | t = cv.GetTickCount()
37 |
38 | if raspicam:
39 | if p.poll() is not None:
40 | print "restarting raspistill"
41 | p=subprocess.Popen(command,shell=True)
42 |
43 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*jpg"))
44 | files.sort(key=lambda x: os.path.getmtime(x))
45 | imagefile = (files[-2])
46 |
47 | #delete old files
48 | for filename in files:
49 | if (filename == imagefile):
50 | break
51 | # os.remove(filename)
52 |
53 | if (imagefile == oldimage):
54 | #no new image from raspistill
55 | time.sleep (0.1)
56 | else:
57 | # uncomment for spare cpu (reduce frame rate)
58 | # time.sleep (0.1)
59 | frame=cv.LoadImage(imagefile,cv.CV_LOAD_IMAGE_COLOR)
60 | oldimage = imagefile
61 | else:
62 | frame=cv.QueryFrame(capture)
63 | cv.ShowImage("result", frame)
64 | if cv.WaitKey(10) >= 0:
65 | break
66 | t = cv.GetTickCount() - t
67 | print "capture = %gfps" % (1000 / (t/(cv.GetTickFrequency()*1000.)))
68 | p.kill()
69 | cv.DestroyWindow("result")
70 |
71 | oldtime=0
72 | for filename in files:
73 | mtime = os.path.getmtime(filename)
74 | print mtime - oldtime
75 | oldtime = mtime
76 |
--------------------------------------------------------------------------------
/facedetect.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | """
3 | Face and object detection using haar-like features.
4 | Finds faces in a camera image or video stream and displays a red box around them.
5 |
6 | Based on a python implementation by: Roman Stanchak, James Bowman
7 | """
8 | import os,subprocess,glob,time
9 | import cv2.cv as cv
10 | from optparse import OptionParser
11 |
12 | # Parameters for haar detection
13 | # From the API:
14 | # The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned
15 | # for accurate yet slow object detection. For a faster operation on real video
16 | # images the settings are:
17 | # scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
18 | # min_size= /dev/servoblaster')
33 |
34 | def detect_and_draw(img, cascade, detected):
35 |
36 | global pan
37 |
38 | # allocate temporary images
39 |
40 | gray = cv.CreateImage((img.width,img.height), 8, 1)
41 | image_scale = img.width / smallwidth
42 |
43 | small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1)
44 | # gray = cv.CreateImage((img.width,img.height), 8, 1)
45 | image_scale = img.width / smallwidth
46 | # small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1)
47 |
48 | # convert color input image to grayscale
49 | cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
50 |
51 | # scale input image for faster processing
52 | cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
53 |
54 | cv.EqualizeHist(small_img, small_img)
55 |
56 | if(cascade):
57 | faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size)
58 | if faces:
59 | if detected == 0:
60 | # os.system('festival --tts hi &')
61 | detected = 1
62 | for ((x, y, w, h), n) in faces:
63 | # the input to cv.HaarDetectObjects was resized, so scale the
64 | # bounding box of each face and convert it to two CvPoints
65 | pt1 = (int(x * image_scale), int(y * image_scale))
66 | pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
67 | cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
68 | print "Face at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[1]
69 | # find amount needed to pan/tilt
70 | span = pt1[0]
71 | mid = smallwidth /2
72 |
73 | if span < mid:
74 | print "left", mid -span
75 | pandir = -1
76 | else:
77 | print "right", span - mid
78 | pandir = 1
79 | pan = pan + pandir
80 | if pan > 180:
81 | pan = 180
82 | if pan < 0:
83 | pan = 0
84 | os.system('echo "0="' + str(pan) + ' > /dev/servoblaster')
85 | else:
86 | if detected == 1:
87 | #print "Last seen at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[1]
88 | #os.system('festival --tts bye &')
89 | status = "just disappeared"
90 | detected = 0
91 |
92 | # fps = int ( (t/(cv.GetTickFrequency()) / 1000))
93 | # font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,1,1,0,3,8)
94 | # cv.PutText(img,"% fps" % fps,(img.width/2,img.height-10), font,255)
95 | cv.ShowImage("result", img)
96 | return detected
97 |
98 | if __name__ == '__main__':
99 | parser = OptionParser(usage = "usage: %prog [options] [filename|camera_index]")
100 | parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default = "../data/haarcascades/haarcascade_frontalface_alt.xml")
101 | (options, args) = parser.parse_args()
102 |
103 | cascade = cv.Load(options.cascade)
104 |
105 | cv.NamedWindow("result", 1)
106 |
107 |
108 | raspicam = 0
109 |
110 | if raspicam:
111 | # fps = 1000 / tl
112 | command = "raspistill -tl 65 -n -rot 180 -hf -o /run/shm/image%d.jpg -w 320 -h 240 -e bmp"
113 | p=subprocess.Popen(command,shell=True)
114 |
115 | # wait until we have at least 2 image files
116 |
117 | while True:
118 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*jpg"))
119 | if len(files) > 1:
120 | break
121 | print "waiting for images"
122 | time.sleep(0.5)
123 | else:
124 | #usb cam
125 | capture = cv.CaptureFromCAM(0)
126 | frame = cv.QueryFrame(capture)
127 | cv.SetCaptureProperty( capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320 )
128 | cv.SetCaptureProperty( capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240 )
129 |
130 | if True:
131 | detected = 0
132 | frame = None
133 | while True:
134 |
135 | t = cv.GetTickCount()
136 | if raspicam:
137 | # restart raspistill when it exits
138 | if p.poll() is not None:
139 | print "restarting raspistill"
140 | p=subprocess.Popen(command,shell=True)
141 |
142 | # list most recent images,
143 | # and get the 2nd most recent image
144 | # since this is the last complete one
145 |
146 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*jpg"))
147 | files.sort(key=lambda x: os.path.getmtime(x))
148 | imagefile = (files[-2])
149 |
150 | # print imagefile
151 | frame=cv.LoadImage(imagefile,cv.CV_LOAD_IMAGE_COLOR)
152 |
153 |
154 | else:
155 | #usb cam
156 | frame=cv.QueryFrame(capture)
157 |
158 | detected = detect_and_draw(frame, cascade, detected)
159 |
160 | # uncomment if you want some spare cpu - reduced from 7fps to 5fps
161 | # time.sleep(0.1)
162 |
163 | t = cv.GetTickCount() - t
164 | fps = int (1000 / (t/(cv.GetTickFrequency() * 1000)))
165 | # print fps
166 |
167 | #exit when any key pressed
168 | if cv.WaitKey(10) >= 0:
169 | break
170 |
171 | cv.DestroyWindow("result")
172 |
--------------------------------------------------------------------------------
/facedetect.sh:
--------------------------------------------------------------------------------
1 | python facedetect.py --cascade=face.xml 0
2 |
--------------------------------------------------------------------------------
/filter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | # quick demo of some python image filters
4 | # using raspberry pi camera
5 |
6 | import subprocess
7 | import Tkinter as tk
8 | from PIL import Image,ImageFilter,ImageChops,ImageTk
9 |
10 | imagefile = "/dev/shm/image.jpg"
11 | w = 800
12 | h = 600
13 |
14 | def takephoto():
15 | command = "raspistill -n -w %s -h %s -t 0 -o %s" % (w, h, imagefile)
16 | subprocess.check_output(command, shell=True)
17 | image1 = Image.open(imagefile)
18 | return image1
19 |
20 | def newphoto():
21 | global image1
22 | image1 = takephoto()
23 |
24 | tkimage1 = ImageTk.PhotoImage(image1)
25 | panel1.configure(image=tkimage1)
26 | panel1.image = tkimage1
27 |
28 | def invert():
29 | global image1
30 | image1= ImageChops.invert(image1)
31 |
32 | tkimage1 = ImageTk.PhotoImage(image1)
33 | panel1.configure(image=tkimage1)
34 | panel1.image = tkimage1
35 |
36 | def grayscale():
37 | global image1
38 | r, g, b = image1.split()
39 | image1 = Image.merge("RGB", (g,g,g))
40 |
41 | tkimage1 = ImageTk.PhotoImage(image1)
42 | panel1.configure(image=tkimage1)
43 | panel1.image = tkimage1
44 |
45 | def dofilter (theimage,thefilter):
46 | global image1
47 | image1 = image1.filter(thefilter)
48 | tkimage1 = ImageTk.PhotoImage(image1)
49 | panel1.configure(image=tkimage1)
50 | panel1.image = tkimage1
51 |
52 | # Setup a window
53 | root = tk.Tk()
54 | root.title('Image')
55 |
56 | image1 = takephoto()
57 | tkimage1 = ImageTk.PhotoImage(image1)
58 |
59 | w = tkimage1.width()
60 | h = tkimage1.height()
61 | root.geometry("%dx%d+%d+%d" % (w, h, 0, 0))
62 |
63 | # root has no image argument, so use a label as a panel
64 | panel1 = tk.Label(root, image=tkimage1)
65 | panel1.pack(side='top', fill='both', expand='yes')
66 |
67 | # save the panel's image from 'garbage collection'
68 | panel1.image = tkimage1
69 |
70 | # Add some buttons
71 | buttonrow = tk.Frame(root)
72 | buttonrow.place(y=0,x=0)
73 |
74 | button = tk.Button(buttonrow, text='CAMERA',command = newphoto)
75 | button.pack(side='left',)
76 | button = tk.Button(buttonrow, text='INVERT',command = invert)
77 | button.pack(side='left',)
78 | button = tk.Button(buttonrow, text='GRAY',command = grayscale)
79 | button.pack(side='left',)
80 | # add some filter buttons
81 | button = tk.Button(buttonrow, text='BLUR',command = lambda: dofilter(image1,ImageFilter.BLUR))
82 | button.pack(side='left')
83 | button = tk.Button(buttonrow, text='CONTOUR',command = lambda: dofilter(image1,ImageFilter.CONTOUR))
84 | button.pack(side='left')
85 | button = tk.Button(buttonrow, text='FIND_EDGES',command = lambda: dofilter(image1,ImageFilter.FIND_EDGES))
86 | button.pack(side='left')
87 | button = tk.Button(buttonrow, text='EMBOSS',command = lambda: dofilter(image1,ImageFilter.EMBOSS))
88 | button.pack(side='left')
89 | button = tk.Button(buttonrow, text='EDGE_ENHANCE',command = lambda: dofilter(image1,ImageFilter.EDGE_ENHANCE))
90 | button.pack(side='left')
91 |
92 | # resize event:
93 |
94 | def onResizeWindow(event):
95 | global image1
96 | image1 = image1.resize((event.width,event.height),Image.NEAREST)
97 | tkimage1 = ImageTk.PhotoImage(image1)
98 | panel1.configure(image=tkimage1)
99 | panel1.image = tkimage1
100 | # root.bind( '', onResizeWindow )
101 |
102 | root.mainloop()
103 |
--------------------------------------------------------------------------------
/make.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | gcc -std=c++0x -lopencv_core -lopencv_highgui -L/usr/lib/uv4l/uv4lext/armv6l -luv4lext -Wl,-rpath,'/usr/lib/uv4l/uv4lext/armv6l' camtest.cpp -o camtest
3 |
--------------------------------------------------------------------------------
/multicam.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | cams = []
5 | for i in range(0,9):
6 | cap = cv2.VideoCapture(i)
7 | if cap.isOpened():
8 | cams.append(cap)
9 | print "found camera at index " , i
10 |
11 | while True:
12 | name = "Camera "
13 | for cam in cams:
14 | name = name + "."
15 | _,frame = cam.read()
16 | cv2.imshow(name,frame)
17 |
18 | if cv2.waitKey(1) == ord('q'):
19 | break
20 | cv2.destroyAllWindow
21 |
--------------------------------------------------------------------------------
/piVision.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import StringIO
3 | import subprocess
4 | import os
5 | import time
6 | from datetime import datetime
7 | from PIL import Image
8 |
9 | # Original code written by brainflakes and modified to exit
10 | # image scanning for loop as soon as the sensitivity value is exceeded.
11 | # this can speed taking of larger photo if motion detected early in scan
12 |
13 | # Motion detection settings:
14 | # need future changes to read values dynamically via command line parameter or xml file
15 | # --------------------------
16 | # Threshold - (how much a pixel has to change by to be marked as "changed")
17 | # Sensitivity - (how many changed pixels before capturing an image) needs to be higher if noisy view
18 | # ForceCapture - (whether to force an image to be captured every forceCaptureTime seconds)
19 | # filepath - location of folder to save photos
20 | # filenamePrefix - string that prefixes the file name for easier identification of files.
21 | threshold = 10
22 | sensitivity = 180
23 | forceCapture = True
24 | forceCaptureTime = 60 * 60 # Once an hour
25 | filepath = "/home/pi/"
26 | filenamePrefix = "capture"
27 | # File photo size settings
28 | saveWidth = 1280
29 | saveHeight = 960
30 | diskSpaceToReserve = 40 * 1024 * 1024 # Keep 40 mb free on disk
31 |
32 | # Capture a small test image (for motion detection)
33 | def captureTestImage():
34 | command = "raspistill -n -w %s -h %s -t 0 -e bmp -o -" % (100, 75)
35 | imageData = StringIO.StringIO()
36 | imageData.write(subprocess.check_output(command, shell=True))
37 | imageData.seek(0)
38 | im = Image.open(imageData)
39 | buffer = im.load()
40 | imageData.close()
41 | return im, buffer
42 |
43 | # Save a full size image to disk
44 | def saveImage(width, height, diskSpaceToReserve):
45 | keepDiskSpaceFree(diskSpaceToReserve)
46 | time = datetime.now()
47 | filename = filepath + "/" + filenamePrefix + "-%04d%02d%02d-%02d%02d%02d.jpg" % ( time.year, time.month, time.day, time.hour, time.minute, time.second)
48 | subprocess.call("raspistill -n -hf -w 1296 -h 972 -t 0 -e jpg -q 15 -o %s" % filename, shell=True)
49 | print "Captured %s" % filename
50 |
51 | # Keep free space above given level
52 | def keepDiskSpaceFree(bytesToReserve):
53 | if (getFreeSpace() < bytesToReserve):
54 | for filename in sorted(os.listdir(".")):
55 | if filename.startswith(fileNamePrefix) and filename.endswith(".jpg"):
56 | os.remove(filename)
57 | print "Deleted %s to avoid filling disk" % filename
58 | if (getFreeSpace() > bytesToReserve):
59 | return
60 |
61 | # Get available disk space
62 | def getFreeSpace():
63 | st = os.statvfs(".")
64 | du = st.f_bavail * st.f_frsize
65 | return du
66 |
67 | # Get first image
68 | image1, buffer1 = captureTestImage()
69 |
70 | # Reset last capture time
71 | lastCapture = time.time()
72 |
73 | # added this to give visual feedback of camera motion capture activity. Can be removed as required
74 | os.system('clear')
75 | print " Motion Detection Started"
76 | print " ------------------------"
77 | print "Pixel Threshold (How much) = " + str(threshold)
78 | print "Sensitivity (changed Pixels) = " + str(sensitivity)
79 | print "File Path for Image Save = " + filepath
80 | print "---------- Motion Capture File Activity --------------"
81 |
82 | while (True):
83 |
84 | # Get comparison image
85 | image2, buffer2 = captureTestImage()
86 |
87 | # Count changed pixels
88 | changedPixels = 0
89 | for x in xrange(0, 100):
90 | # Scan one line of image then check sensitivity for movement
91 | for y in xrange(0, 75):
92 | # Just check green channel as it's the highest quality channel
93 | pixdiff = abs(buffer1[x,y][1] - buffer2[x,y][1])
94 | if pixdiff > threshold:
95 | changedPixels += 1
96 |
97 | # Changed logic - If movement sensitivity exceeded then
98 | # Save image and Exit before full image scan complete
99 | if changedPixels > sensitivity:
100 | lastCapture = time.time()
101 | saveImage(saveWidth, saveHeight, diskSpaceToReserve)
102 | break
103 | continue
104 |
105 | # Check force capture
106 | if forceCapture:
107 | if time.time() - lastCapture > forceCaptureTime:
108 | changedPixels = sensitivity + 1
109 |
110 | # Swap comparison buffers
111 | image1 = image2
112 | buffer1 = buffer2
113 |
--------------------------------------------------------------------------------
/roverFetch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | """
3 | colortrack.py - track object by its colour
4 |
5 | ifirst double lcik on selected colour,
6 | then track any blobs with that colour
7 | """
8 |
9 | import os,subprocess,glob,time
10 | #import cv2.cv as cv
11 | import cv
12 |
13 | posx=0
14 | posy=0
15 | global h,s,v,i,im,evente
16 | h,s,v,i,r,g,b,j,evente=0,0,0,0,0,0,0,0,0
17 |
18 | # Mouse callback function
19 | def my_mouse_callback(event,x,y,flags,param):
20 | global evente,h,s,v,i,r,g,b,j
21 | evente=event
22 | if event==cv.CV_EVENT_LBUTTONDBLCLK: # Here event is left mouse button double-clicked
23 | hsv=cv.CreateImage(cv.GetSize(frame),8,3)
24 | cv.CvtColor(frame,hsv,cv.CV_BGR2HSV)
25 | (h,s,v,i)=cv.Get2D(hsv,y,x)
26 | (r,g,b,j)=cv.Get2D(frame,y,x)
27 | print "x,y =",x,y
28 | print "hsv= ",cv.Get2D(hsv,y,x) # Gives you HSV at clicked point
29 | print "im= ",cv.Get2D(frame,y,x) # Gives you RGB at clicked point
30 |
31 | # Thresholding function
32 | def getthresholdedimg(im):
33 | '''This function take RGB image.Then convert it into HSV for easy colour detection and threshold it with the given part as white and all other regions as black.Then return that image'''
34 | imghsv=cv.CreateImage(cv.GetSize(im),8,3)
35 | cv.CvtColor(im,imghsv,cv.CV_BGR2HSV)
36 | imgthreshold=cv.CreateImage(cv.GetSize(im),8,1)
37 | cv.InRangeS(imghsv,cv.Scalar(h,100,10),cv.Scalar(h+20,255,255),imgthreshold)
38 | return imgthreshold
39 |
40 | def getpositions(im):
41 | ''' this function returns leftmost,rightmost,topmost and bottommost values of the white blob in the thresholded image'''
42 | leftmost=0
43 | rightmost=0
44 | topmost=0
45 | bottommost=0
46 | temp=0
47 | for i in range(im.width):
48 | col=cv.GetCol(im,i)
49 | if cv.Sum(col)[0]!=0.0:
50 | rightmost=i
51 | if temp==0:
52 | leftmost=i
53 | temp=1
54 | for i in range(im.height):
55 | row=cv.GetRow(im,i)
56 | if cv.Sum(row)[0]!=0.0:
57 | bottommost=i
58 | if temp==1:
59 | topmost=i
60 | temp=2
61 | return (leftmost,rightmost,topmost,bottommost)
62 |
63 | raspicam = 1
64 | if raspicam:
65 |
66 | command = "raspistill -tl 65 -n -hf -o /run/shm/image%d.jpg -w 320 -h 240 -e bmp >/dev/null"
67 | p=subprocess.Popen(command,shell = True)
68 |
69 | # wait until we have at least 2 image files
70 |
71 | for timeout in range (5):
72 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*.jpg"))
73 | if len(files) > 1:
74 | break
75 | print "waiting for images"
76 | time.sleep(5)
77 | if ( not len (files) > 1):
78 | print "No images"
79 | exit (1)
80 |
81 | # get (last-1) recent image
82 | files.sort(key=lambda x: os.path.getmtime(x))
83 | imagefile = (files[-2])
84 |
85 | frame=cv.LoadImage(imagefile,cv.CV_LOAD_IMAGE_COLOR)
86 |
87 | else:
88 | #usb cam
89 |
90 | capture=cv.CaptureFromCAM(0)
91 | cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320 );
92 | cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240 );
93 |
94 | frame=cv.QueryFrame(capture)
95 |
96 | test=cv.CreateImage(cv.GetSize(frame),8,3)
97 | imdraw=cv.CreateImage(cv.GetSize(frame),8,3) # We make all drawings on imdraw.
98 | gui = 0
99 | if (gui):
100 | cv.NamedWindow("pick")
101 | cv.SetMouseCallback("pick",my_mouse_callback)
102 |
103 | cv.NamedWindow("output")
104 | cv.NamedWindow("threshold")
105 |
106 | h = 2 # orange ball
107 | pan = 100
108 | pandir = 0
109 | detected = 0
110 |
111 | print "started"
112 | while(1):
113 |
114 | if raspicam:
115 | if p.poll() is not None:
116 | print "restarting raspistill"
117 | #p=subprocess.Popen(command,shell=True)
118 | os.system('rm /run/shm/image*')
119 | os.system('echo "0=0" >/dev/servoblaster')
120 | os.system('echo "7=0" >/dev/servoblaster')
121 | exit (0)
122 |
123 | files = filter(os.path.isfile, glob.glob('/run/shm/' + "image*jpg"))
124 | files.sort(key=lambda x: os.path.getmtime(x))
125 | imagefile = (files[-2])
126 |
127 | frame=cv.LoadImage(imagefile,cv.CV_LOAD_IMAGE_COLOR)
128 | else:
129 | #usb cam
130 | frame=cv.QueryFrame(capture)
131 |
132 | thresh_img=getthresholdedimg(frame) # We get coordinates from thresh_img
133 |
134 | cv.Erode(thresh_img,thresh_img,None,1) # Eroding removes small noises
135 | cv.Dilate(thresh_img,thresh_img,None,1) # Dilate
136 |
137 | if (gui):
138 | cv.ShowImage("threshold",thresh_img)
139 | storage = cv.CreateMemStorage(0)
140 | contour = cv.FindContours(thresh_img, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_NONE)
141 | points = []
142 |
143 | while contour:
144 | # Draw bounding rectangles
145 | bound_rect = cv.BoundingRect(list(contour))
146 |
147 |
148 | if (cv.ContourArea(contour) > 300):
149 | pt1 = (bound_rect[0], bound_rect[1])
150 | pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
151 | points.append(pt1)
152 | points.append(pt2)
153 | cv.Rectangle(frame, pt1, pt2, cv.CV_RGB(255,0,0), 1)
154 |
155 | obj_mid = bound_rect[0] + ( bound_rect[2] /2 )
156 | frame_mid = frame.width / 2
157 | mid = frame_mid - obj_mid
158 |
159 | # only move if not near middle
160 |
161 | offset = abs(mid)
162 | if offset > 0:
163 | pandir = (mid / offset)
164 | else:
165 | pandir=0
166 | contour = contour.h_next()
167 |
168 | (leftmost,rightmost,topmost,bottommost)=getpositions(thresh_img)
169 | if (leftmost-rightmost!=0) or (topmost-bottommost!=0):
170 | lastx=posx
171 | lasty=posy
172 | posx=cv.Round((rightmost+leftmost)/2)
173 | posy=cv.Round((bottommost+topmost)/2)
174 | if lastx!=0 and lasty!=0:
175 | cv.Line(imdraw,(posx,posy),(lastx,lasty),(b,g,r))
176 | cv.Circle(imdraw,(posx,posy),5,(b,g,r),-1)
177 |
178 | cv.Add(test,imdraw,test) # Adding imdraw on test keeps all lines there on the test frame. If not, we don't get full drawing, instead we get only that fraction of line at the moment.
179 |
180 | if (gui):
181 | cv.ShowImage("pick", frame)
182 | cv.ShowImage("output",test)
183 |
184 | pan = int (pan + pandir)
185 | if pan > 180:
186 | pan = 180
187 | if pan < 0:
188 | pan = 0
189 |
190 | if (pandir == 1):
191 | print "left"
192 | os.system('echo "0=100" >/dev/servoblaster')
193 | os.system('echo "7=100" >/dev/servoblaster')
194 | time.sleep(0.05)
195 | os.system('echo "0=100" >/dev/servoblaster')
196 | os.system('echo "7=180" >/dev/servoblaster')
197 | time.sleep(0.5)
198 | if (pandir == -1):
199 | print "right"
200 | os.system('echo "0=180" >/dev/servoblaster')
201 | os.system('echo "7=180" >/dev/servoblaster')
202 | time.sleep(0.05)
203 | os.system('echo "0=100" >/dev/servoblaster')
204 | os.system('echo "7=180" >/dev/servoblaster')
205 | time.sleep(0.5)
206 |
207 | os.system('echo "0=180" >/dev/servoblaster')
208 | os.system('echo "7=100" >/dev/servoblaster')
209 |
210 | pandir = 0;
211 | if (gui):
212 | if cv.WaitKey(1)>= 0:
213 | break
214 | if evente == cv.CV_EVENT_LBUTTONDBLCLK:
215 | print "double click"
216 | cv.Set(test, cv.CV_RGB(0,0,0));
217 |
218 |
--------------------------------------------------------------------------------
/stopservos.sh:
--------------------------------------------------------------------------------
1 | sudo echo "0=0" >/dev/servoblaster
2 | sudo echo "7=0" >/dev/servoblaster
3 |
--------------------------------------------------------------------------------