├── .bzrignore ├── .gitignore ├── MANIFEST ├── README ├── capture_picture.py ├── capture_picture_delayed.py ├── capture_video.py ├── list_devices.py ├── setup.py └── v4l2capture.c /.bzrignore: -------------------------------------------------------------------------------- 1 | ./build 2 | ./dist 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | dist 3 | -------------------------------------------------------------------------------- /MANIFEST: -------------------------------------------------------------------------------- 1 | README 2 | capture_picture.py 3 | capture_picture_delayed.py 4 | list_devices.py 5 | setup.py 6 | v4l2capture.c 7 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | python-v4l2capture 1.4.x 2 | Python extension to capture video with video4linux2 3 | 4 | 2009, 2010, 2011 Fredrik Portstrom 5 | 2011 Joakim Gebart 6 | 7 | I, the copyright holder of this file, hereby release it into the 8 | public domain. This applies worldwide. In case this is not legally 9 | possible: I grant anyone the right to use this work for any purpose, 10 | without any conditions, unless such conditions are required by law. 11 | 12 | Introduction 13 | ============ 14 | 15 | python-v4l2capture is a slim and easy to use Python extension for 16 | capturing video with video4linux2. It supports libv4l to convert any 17 | image format to RGB or YUV420. 18 | 19 | this fork of python-v4l2capture: https://github.com/gebart/python-v4l2capture 20 | 21 | original python-v4l2capture: http://fredrik.jemla.eu/v4l2capture 22 | 23 | libv4l: http://freshmeat.net/projects/libv4l 24 | 25 | Installation 26 | ============ 27 | 28 | v4l2capture requires libv4l by default. You can compile v4l2capture 29 | without libv4l, but that reduces image format support to YUYV input 30 | and RGB output only. You can do so by erasing ', libraries = ["v4l2"]' 31 | in setup.py and erasing '#define USE_LIBV4L' in v4l2capture.c. 32 | 33 | python-v4l2capture uses distutils. 34 | To build: ./setup.py build 35 | To build and install: ./setup.py install 36 | 37 | Example 38 | ======= 39 | 40 | See capture_picture.py, capture_picture_delayed.py and list_devices.py. 41 | 42 | Change log 43 | ========== 44 | 45 | (see git log for latest changes) 46 | 47 | 1.4 (2011-03-18) - Added support for YUV420 output. 48 | 49 | 1.3 (2010-07-21) - Added set of capabilities to the return value of 50 | get_info. Updated list_devices.py. 51 | 52 | 1.2 (2010-04-01) - Forked example script into capture_picture.py and 53 | capture_picture_delayed.py. 54 | 55 | 1.1 (2009-11-03) - Updated URL and documentation. 56 | 57 | 1.0 (2009-02-28) - Initial release. 58 | -------------------------------------------------------------------------------- /capture_picture.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # python-v4l2capture 4 | # 5 | # This file is an example on how to capture a picture with 6 | # python-v4l2capture. 7 | # 8 | # 2009, 2010 Fredrik Portstrom 9 | # 10 | # I, the copyright holder of this file, hereby release it into the 11 | # public domain. This applies worldwide. In case this is not legally 12 | # possible: I grant anyone the right to use this work for any 13 | # purpose, without any conditions, unless such conditions are 14 | # required by law. 15 | 16 | import Image 17 | import select 18 | import v4l2capture 19 | 20 | # Open the video device. 21 | video = v4l2capture.Video_device("/dev/video0") 22 | 23 | # Suggest an image size to the device. The device may choose and 24 | # return another size if it doesn't support the suggested one. 25 | size_x, size_y = video.set_format(1280, 1024) 26 | 27 | # Create a buffer to store image data in. This must be done before 28 | # calling 'start' if v4l2capture is compiled with libv4l2. Otherwise 29 | # raises IOError. 30 | video.create_buffers(1) 31 | 32 | # Send the buffer to the device. Some devices require this to be done 33 | # before calling 'start'. 34 | video.queue_all_buffers() 35 | 36 | # Start the device. This lights the LED if it's a camera that has one. 37 | video.start() 38 | 39 | # Wait for the device to fill the buffer. 40 | select.select((video,), (), ()) 41 | 42 | # The rest is easy :-) 43 | image_data = video.read() 44 | video.close() 45 | image = Image.fromstring("RGB", (size_x, size_y), image_data) 46 | image.save("image.jpg") 47 | print "Saved image.jpg (Size: " + str(size_x) + " x " + str(size_y) + ")" 48 | -------------------------------------------------------------------------------- /capture_picture_delayed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # python-v4l2capture 4 | # 5 | # This file is an example on how to capture a picture with 6 | # python-v4l2capture. It waits between starting the video device and 7 | # capturing the picture, to get a good picture from cameras that 8 | # require a delay to get enough brightness. It does not work with some 9 | # devices that require starting to capture pictures immediatly when 10 | # the device is started. 11 | # 12 | # 2009, 2010 Fredrik Portstrom 13 | # 14 | # I, the copyright holder of this file, hereby release it into the 15 | # public domain. This applies worldwide. In case this is not legally 16 | # possible: I grant anyone the right to use this work for any 17 | # purpose, without any conditions, unless such conditions are 18 | # required by law. 19 | 20 | import Image 21 | import select 22 | import time 23 | import v4l2capture 24 | 25 | # Open the video device. 26 | video = v4l2capture.Video_device("/dev/video0") 27 | 28 | # Suggest an image size to the device. The device may choose and 29 | # return another size if it doesn't support the suggested one. 30 | size_x, size_y = video.set_format(1280, 1024) 31 | 32 | # Create a buffer to store image data in. This must be done before 33 | # calling 'start' if v4l2capture is compiled with libv4l2. Otherwise 34 | # raises IOError. 35 | video.create_buffers(1) 36 | 37 | # Start the device. This lights the LED if it's a camera that has one. 38 | video.start() 39 | 40 | # Wait a little. Some cameras take a few seconds to get bright enough. 41 | time.sleep(2) 42 | 43 | # Send the buffer to the device. 44 | video.queue_all_buffers() 45 | 46 | # Wait for the device to fill the buffer. 47 | select.select((video,), (), ()) 48 | 49 | # The rest is easy :-) 50 | image_data = video.read() 51 | video.close() 52 | image = Image.fromstring("RGB", (size_x, size_y), image_data) 53 | image.save("image.jpg") 54 | print "Saved image.jpg (Size: " + str(size_x) + " x " + str(size_y) + ")" 55 | -------------------------------------------------------------------------------- /capture_video.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # python-v4l2capture 4 | # 5 | # This file is an example on how to capture a mjpeg video with 6 | # python-v4l2capture. 7 | # 8 | # 2009, 2010 Fredrik Portstrom 9 | # 10 | # I, the copyright holder of this file, hereby release it into the 11 | # public domain. This applies worldwide. In case this is not legally 12 | # possible: I grant anyone the right to use this work for any 13 | # purpose, without any conditions, unless such conditions are 14 | # required by law. 15 | 16 | import Image 17 | import select 18 | import v4l2capture 19 | import time 20 | 21 | # Open the video device. 22 | video = v4l2capture.Video_device("/dev/video0") 23 | 24 | # Suggest an image size to the device. The device may choose and 25 | # return another size if it doesn't support the suggested one. 26 | size_x, size_y = video.set_format(1280, 1024, fourcc='MJPG') 27 | 28 | # Create a buffer to store image data in. This must be done before 29 | # calling 'start' if v4l2capture is compiled with libv4l2. Otherwise 30 | # raises IOError. 31 | video.create_buffers(30) 32 | 33 | # Send the buffer to the device. Some devices require this to be done 34 | # before calling 'start'. 35 | video.queue_all_buffers() 36 | 37 | # Start the device. This lights the LED if it's a camera that has one. 38 | video.start() 39 | 40 | stop_time = time.time() + 10.0 41 | with open('video.mjpg', 'wb') as f: 42 | while stop_time >= time.time(): 43 | # Wait for the device to fill the buffer. 44 | select.select((video,), (), ()) 45 | 46 | # The rest is easy :-) 47 | image_data = video.read_and_queue() 48 | f.write(image_data) 49 | 50 | video.close() 51 | print "Saved video.mjpg (Size: " + str(size_x) + " x " + str(size_y) + ")" 52 | -------------------------------------------------------------------------------- /list_devices.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # python-v4l2capture 4 | # 5 | # 2009, 2010 Fredrik Portstrom 6 | # 7 | # I, the copyright holder of this file, hereby release it into the 8 | # public domain. This applies worldwide. In case this is not legally 9 | # possible: I grant anyone the right to use this work for any 10 | # purpose, without any conditions, unless such conditions are 11 | # required by law. 12 | 13 | import os 14 | import v4l2capture 15 | file_names = [x for x in os.listdir("/dev") if x.startswith("video")] 16 | file_names.sort() 17 | for file_name in file_names: 18 | path = "/dev/" + file_name 19 | print path 20 | try: 21 | video = v4l2capture.Video_device(path) 22 | driver, card, bus_info, capabilities = video.get_info() 23 | print " driver: %s\n card: %s" \ 24 | "\n bus info: %s\n capabilities: %s" % ( 25 | driver, card, bus_info, ", ".join(capabilities)) 26 | video.close() 27 | except IOError, e: 28 | print " " + str(e) 29 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # python-v4l2capture 4 | # 5 | # 2009, 2010, 2011 Fredrik Portstrom 6 | # 7 | # I, the copyright holder of this file, hereby release it into the 8 | # public domain. This applies worldwide. In case this is not legally 9 | # possible: I grant anyone the right to use this work for any 10 | # purpose, without any conditions, unless such conditions are 11 | # required by law. 12 | 13 | from distutils.core import Extension, setup 14 | setup( 15 | name = "v4l2capture", 16 | version = "1.5", 17 | author = "Fredrik Portstrom", 18 | author_email = "fredrik@jemla.se", 19 | url = "http://fredrik.jemla.eu/v4l2capture", 20 | description = "Capture video with video4linux2", 21 | long_description = "python-v4l2capture is a slim and easy to use Python " 22 | "extension for capturing video with video4linux2.", 23 | license = "Public Domain", 24 | classifiers = [ 25 | "License :: Public Domain", 26 | "Programming Language :: C"], 27 | ext_modules = [ 28 | Extension("v4l2capture", ["v4l2capture.c"], libraries = ["v4l2"])]) 29 | -------------------------------------------------------------------------------- /v4l2capture.c: -------------------------------------------------------------------------------- 1 | // python-v4l2capture 2 | // Python extension to capture video with video4linux2 3 | // 4 | // 2009, 2010, 2011 Fredrik Portstrom 5 | // 6 | // I, the copyright holder of this file, hereby release it into the 7 | // public domain. This applies worldwide. In case this is not legally 8 | // possible: I grant anyone the right to use this work for any 9 | // purpose, without any conditions, unless such conditions are 10 | // required by law. 11 | 12 | #define USE_LIBV4L 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | #ifdef USE_LIBV4L 20 | #include 21 | #else 22 | #include 23 | #define v4l2_close close 24 | #define v4l2_ioctl ioctl 25 | #define v4l2_mmap mmap 26 | #define v4l2_munmap munmap 27 | #define v4l2_open open 28 | #endif 29 | 30 | #ifndef Py_TYPE 31 | #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) 32 | #endif 33 | 34 | 35 | #define ASSERT_OPEN if(self->fd < 0) \ 36 | { \ 37 | PyErr_SetString(PyExc_ValueError, \ 38 | "I/O operation on closed file"); \ 39 | return NULL; \ 40 | } 41 | 42 | #define CLEAR(x) memset(&(x), 0, sizeof(x)) 43 | 44 | struct buffer { 45 | void *start; 46 | size_t length; 47 | }; 48 | 49 | typedef struct { 50 | PyObject_HEAD 51 | int fd; 52 | struct buffer *buffers; 53 | int buffer_count; 54 | } Video_device; 55 | 56 | struct capability { 57 | int id; 58 | const char *name; 59 | }; 60 | 61 | static struct capability capabilities[] = { 62 | { V4L2_CAP_ASYNCIO, "asyncio" }, 63 | { V4L2_CAP_AUDIO, "audio" }, 64 | { V4L2_CAP_HW_FREQ_SEEK, "hw_freq_seek" }, 65 | { V4L2_CAP_RADIO, "radio" }, 66 | { V4L2_CAP_RDS_CAPTURE, "rds_capture" }, 67 | { V4L2_CAP_READWRITE, "readwrite" }, 68 | { V4L2_CAP_SLICED_VBI_CAPTURE, "sliced_vbi_capture" }, 69 | { V4L2_CAP_SLICED_VBI_OUTPUT, "sliced_vbi_output" }, 70 | { V4L2_CAP_STREAMING, "streaming" }, 71 | { V4L2_CAP_TUNER, "tuner" }, 72 | { V4L2_CAP_VBI_CAPTURE, "vbi_capture" }, 73 | { V4L2_CAP_VBI_OUTPUT, "vbi_output" }, 74 | { V4L2_CAP_VIDEO_CAPTURE, "video_capture" }, 75 | { V4L2_CAP_VIDEO_OUTPUT, "video_output" }, 76 | { V4L2_CAP_VIDEO_OUTPUT_OVERLAY, "video_output_overlay" }, 77 | { V4L2_CAP_VIDEO_OVERLAY, "video_overlay" } 78 | }; 79 | 80 | static int my_ioctl(int fd, int request, void *arg) 81 | { 82 | // Retry ioctl until it returns without being interrupted. 83 | 84 | for(;;) 85 | { 86 | int result = v4l2_ioctl(fd, request, arg); 87 | 88 | if(!result) 89 | { 90 | return 0; 91 | } 92 | 93 | if(errno != EINTR) 94 | { 95 | PyErr_SetFromErrno(PyExc_IOError); 96 | return 1; 97 | } 98 | } 99 | } 100 | 101 | static void Video_device_unmap(Video_device *self) 102 | { 103 | int i; 104 | 105 | for(i = 0; i < self->buffer_count; i++) 106 | { 107 | v4l2_munmap(self->buffers[i].start, self->buffers[i].length); 108 | } 109 | } 110 | 111 | static void Video_device_dealloc(Video_device *self) 112 | { 113 | if(self->fd >= 0) 114 | { 115 | if(self->buffers) 116 | { 117 | Video_device_unmap(self); 118 | } 119 | 120 | v4l2_close(self->fd); 121 | } 122 | 123 | Py_TYPE(self)->tp_free((PyObject *)self); 124 | } 125 | 126 | static int Video_device_init(Video_device *self, PyObject *args, 127 | PyObject *kwargs) 128 | { 129 | const char *device_path; 130 | 131 | if(!PyArg_ParseTuple(args, "s", &device_path)) 132 | { 133 | return -1; 134 | } 135 | 136 | int fd = v4l2_open(device_path, O_RDWR | O_NONBLOCK); 137 | 138 | if(fd < 0) 139 | { 140 | PyErr_SetFromErrnoWithFilename(PyExc_IOError, (char *)device_path); 141 | return -1; 142 | } 143 | 144 | self->fd = fd; 145 | self->buffers = NULL; 146 | return 0; 147 | } 148 | 149 | static PyObject *Video_device_close(Video_device *self) 150 | { 151 | if(self->fd >= 0) 152 | { 153 | if(self->buffers) 154 | { 155 | Video_device_unmap(self); 156 | } 157 | 158 | v4l2_close(self->fd); 159 | self->fd = -1; 160 | } 161 | 162 | Py_RETURN_NONE; 163 | } 164 | 165 | static PyObject *Video_device_fileno(Video_device *self) 166 | { 167 | ASSERT_OPEN; 168 | #if PY_MAJOR_VERSION < 3 169 | return PyInt_FromLong(self->fd); 170 | #else 171 | return PyLong_FromLong(self->fd); 172 | #endif 173 | } 174 | 175 | static PyObject *Video_device_get_info(Video_device *self) 176 | { 177 | ASSERT_OPEN; 178 | struct v4l2_capability caps; 179 | 180 | if(my_ioctl(self->fd, VIDIOC_QUERYCAP, &caps)) 181 | { 182 | return NULL; 183 | } 184 | 185 | PyObject *set = PySet_New(NULL); 186 | 187 | if(!set) 188 | { 189 | return NULL; 190 | } 191 | 192 | struct capability *capability = capabilities; 193 | 194 | while((void *)capability < (void *)capabilities + sizeof(capabilities)) 195 | { 196 | if(caps.capabilities & capability->id) 197 | { 198 | #if PY_MAJOR_VERSION < 3 199 | PyObject *s = PyString_FromString(capability->name); 200 | #else 201 | PyObject *s = PyBytes_FromString(capability->name); 202 | #endif 203 | 204 | if(!s) 205 | { 206 | Py_DECREF(set); 207 | return NULL; 208 | } 209 | 210 | PySet_Add(set, s); 211 | } 212 | 213 | capability++; 214 | } 215 | 216 | return Py_BuildValue("sssO", caps.driver, caps.card, caps.bus_info, set); 217 | } 218 | 219 | static PyObject *Video_device_set_format(Video_device *self, PyObject *args, PyObject *keywds) 220 | { 221 | int size_x; 222 | int size_y; 223 | int yuv420 = 0; 224 | int fourcc; 225 | const char *fourcc_str; 226 | int fourcc_len = 0; 227 | static char *kwlist [] = { 228 | "size_x", 229 | "size_y", 230 | "yuv420", 231 | "fourcc", 232 | NULL 233 | }; 234 | 235 | if (!PyArg_ParseTupleAndKeywords(args, keywds, "ii|is#", kwlist, &size_x, &size_y, &yuv420, &fourcc_str, &fourcc_len)) 236 | { 237 | return NULL; 238 | } 239 | 240 | struct v4l2_format format; 241 | CLEAR(format); 242 | format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 243 | /* Get the current format */ 244 | if(my_ioctl(self->fd, VIDIOC_G_FMT, &format)) 245 | { 246 | return NULL; 247 | } 248 | 249 | #ifdef USE_LIBV4L 250 | format.fmt.pix.pixelformat = 251 | yuv420 ? V4L2_PIX_FMT_YUV420 : V4L2_PIX_FMT_RGB24; 252 | #else 253 | format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; 254 | #endif 255 | format.fmt.pix.field = V4L2_FIELD_INTERLACED; 256 | 257 | if (fourcc_len == 4) { 258 | fourcc = v4l2_fourcc(fourcc_str[0], 259 | fourcc_str[1], 260 | fourcc_str[2], 261 | fourcc_str[3]); 262 | format.fmt.pix.pixelformat = fourcc; 263 | format.fmt.pix.field = V4L2_FIELD_ANY; 264 | } 265 | 266 | format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 267 | format.fmt.pix.width = size_x; 268 | format.fmt.pix.height = size_y; 269 | format.fmt.pix.bytesperline = 0; 270 | 271 | if(my_ioctl(self->fd, VIDIOC_S_FMT, &format)) 272 | { 273 | return NULL; 274 | } 275 | 276 | return Py_BuildValue("ii", format.fmt.pix.width, format.fmt.pix.height); 277 | } 278 | 279 | static PyObject *Video_device_set_fps(Video_device *self, PyObject *args) 280 | { 281 | int fps; 282 | if(!PyArg_ParseTuple(args, "i", &fps)) 283 | { 284 | return NULL; 285 | } 286 | struct v4l2_streamparm setfps; 287 | CLEAR(setfps); 288 | setfps.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 289 | setfps.parm.capture.timeperframe.numerator = 1; 290 | setfps.parm.capture.timeperframe.denominator = fps; 291 | if(my_ioctl(self->fd, VIDIOC_S_PARM, &setfps)){ 292 | return NULL; 293 | } 294 | return Py_BuildValue("i",setfps.parm.capture.timeperframe.denominator); 295 | } 296 | 297 | static void get_fourcc_str(char *fourcc_str, int fourcc) 298 | { 299 | if (fourcc_str == NULL) 300 | return; 301 | fourcc_str[0] = (char)(fourcc & 0xFF); 302 | fourcc_str[1] = (char)((fourcc >> 8) & 0xFF); 303 | fourcc_str[2] = (char)((fourcc >> 16) & 0xFF); 304 | fourcc_str[3] = (char)((fourcc >> 24) & 0xFF); 305 | fourcc_str[4] = 0; 306 | } 307 | 308 | static PyObject *Video_device_get_format(Video_device *self) 309 | { 310 | struct v4l2_format format; 311 | CLEAR(format); 312 | format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 313 | 314 | /* Get the current format */ 315 | if(my_ioctl(self->fd, VIDIOC_G_FMT, &format)) 316 | { 317 | return NULL; 318 | } 319 | 320 | char current_fourcc[5]; 321 | get_fourcc_str(current_fourcc, format.fmt.pix.pixelformat); 322 | return Py_BuildValue("iis", format.fmt.pix.width, format.fmt.pix.height, current_fourcc); 323 | } 324 | 325 | static PyObject *Video_device_get_fourcc(Video_device *self, PyObject *args) 326 | { 327 | char *fourcc_str; 328 | int size; 329 | int fourcc; 330 | if (!PyArg_ParseTuple(args, "s#", &fourcc_str, &size)) 331 | { 332 | return NULL; 333 | } 334 | 335 | if(size < 4) 336 | { 337 | return NULL; 338 | } 339 | 340 | fourcc = v4l2_fourcc(fourcc_str[0], 341 | fourcc_str[1], 342 | fourcc_str[2], 343 | fourcc_str[3]); 344 | return Py_BuildValue("i", fourcc); 345 | } 346 | 347 | static PyObject *Video_device_set_auto_white_balance(Video_device *self, PyObject *args) 348 | { 349 | #ifdef V4L2_CID_AUTO_WHITE_BALANCE 350 | int autowb; 351 | if(!PyArg_ParseTuple(args, "i", &autowb)) 352 | { 353 | return NULL; 354 | } 355 | 356 | struct v4l2_control ctrl; 357 | CLEAR(ctrl); 358 | ctrl.id = V4L2_CID_AUTO_WHITE_BALANCE; 359 | ctrl.value = autowb; 360 | if(my_ioctl(self->fd, VIDIOC_S_CTRL, &ctrl)){ 361 | return NULL; 362 | } 363 | return Py_BuildValue("i",ctrl.value); 364 | #else 365 | return NULL; 366 | #endif 367 | } 368 | 369 | static PyObject *Video_device_get_auto_white_balance(Video_device *self) 370 | { 371 | #ifdef V4L2_CID_AUTO_WHITE_BALANCE 372 | struct v4l2_control ctrl; 373 | CLEAR(ctrl); 374 | ctrl.id = V4L2_CID_AUTO_WHITE_BALANCE; 375 | if(my_ioctl(self->fd, VIDIOC_G_CTRL, &ctrl)){ 376 | return NULL; 377 | } 378 | return Py_BuildValue("i",ctrl.value); 379 | #else 380 | return NULL; 381 | #endif 382 | } 383 | 384 | static PyObject *Video_device_set_white_balance_temperature(Video_device *self, PyObject *args) 385 | { 386 | #ifdef V4L2_CID_WHITE_BALANCE_TEMPERATURE 387 | int wb; 388 | if(!PyArg_ParseTuple(args, "i", &wb)) 389 | { 390 | return NULL; 391 | } 392 | 393 | struct v4l2_control ctrl; 394 | CLEAR(ctrl); 395 | ctrl.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE; 396 | ctrl.value = wb; 397 | if(my_ioctl(self->fd, VIDIOC_S_CTRL, &ctrl)){ 398 | return NULL; 399 | } 400 | return Py_BuildValue("i",ctrl.value); 401 | #else 402 | return NULL; 403 | #endif 404 | } 405 | 406 | static PyObject *Video_device_get_white_balance_temperature(Video_device *self) 407 | { 408 | #ifdef V4L2_CID_WHITE_BALANCE_TEMPERATURE 409 | struct v4l2_control ctrl; 410 | CLEAR(ctrl); 411 | ctrl.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE; 412 | if(my_ioctl(self->fd, VIDIOC_G_CTRL, &ctrl)){ 413 | return NULL; 414 | } 415 | return Py_BuildValue("i",ctrl.value); 416 | #else 417 | return NULL; 418 | #endif 419 | } 420 | 421 | static PyObject *Video_device_set_exposure_absolute(Video_device *self, PyObject *args) 422 | { 423 | #ifdef V4L2_CID_EXPOSURE_ABSOLUTE 424 | int exposure; 425 | if(!PyArg_ParseTuple(args, "i", &exposure)) 426 | { 427 | return NULL; 428 | } 429 | 430 | struct v4l2_control ctrl; 431 | CLEAR(ctrl); 432 | ctrl.id = V4L2_CID_EXPOSURE_ABSOLUTE; 433 | ctrl.value = exposure; 434 | if(my_ioctl(self->fd, VIDIOC_S_CTRL, &ctrl)){ 435 | return NULL; 436 | } 437 | return Py_BuildValue("i",ctrl.value); 438 | #else 439 | return NULL; 440 | #endif 441 | } 442 | 443 | static PyObject *Video_device_get_exposure_absolute(Video_device *self) 444 | { 445 | #ifdef V4L2_CID_EXPOSURE_ABSOLUTE 446 | struct v4l2_control ctrl; 447 | CLEAR(ctrl); 448 | ctrl.id = V4L2_CID_EXPOSURE_ABSOLUTE; 449 | if(my_ioctl(self->fd, VIDIOC_G_CTRL, &ctrl)){ 450 | return NULL; 451 | } 452 | return Py_BuildValue("i",ctrl.value); 453 | #else 454 | return NULL; 455 | #endif 456 | } 457 | 458 | static PyObject *Video_device_set_exposure_auto(Video_device *self, PyObject *args) 459 | { 460 | #ifdef V4L2_CID_EXPOSURE_AUTO 461 | int autoexposure; 462 | if(!PyArg_ParseTuple(args, "i", &autoexposure)) 463 | { 464 | return NULL; 465 | } 466 | 467 | struct v4l2_control ctrl; 468 | CLEAR(ctrl); 469 | ctrl.id = V4L2_CID_EXPOSURE_AUTO; 470 | ctrl.value = autoexposure; 471 | if(my_ioctl(self->fd, VIDIOC_S_CTRL, &ctrl)){ 472 | return NULL; 473 | } 474 | return Py_BuildValue("i",ctrl.value); 475 | #else 476 | return NULL; 477 | #endif 478 | } 479 | 480 | static PyObject *Video_device_get_exposure_auto(Video_device *self) 481 | { 482 | #ifdef V4L2_CID_EXPOSURE_AUTO 483 | struct v4l2_control ctrl; 484 | CLEAR(ctrl); 485 | ctrl.id = V4L2_CID_EXPOSURE_AUTO; 486 | if(my_ioctl(self->fd, VIDIOC_G_CTRL, &ctrl)){ 487 | return NULL; 488 | } 489 | return Py_BuildValue("i",ctrl.value); 490 | #else 491 | return NULL; 492 | #endif 493 | } 494 | 495 | static PyObject *Video_device_set_focus_auto(Video_device *self, PyObject *args) 496 | { 497 | #ifdef V4L2_CID_FOCUS_AUTO 498 | int autofocus; 499 | if(!PyArg_ParseTuple(args, "i", &autofocus)) 500 | { 501 | return NULL; 502 | } 503 | 504 | struct v4l2_control ctrl; 505 | CLEAR(ctrl); 506 | ctrl.id = V4L2_CID_FOCUS_AUTO; 507 | ctrl.value = autofocus; 508 | if(my_ioctl(self->fd, VIDIOC_S_CTRL, &ctrl)){ 509 | return NULL; 510 | } 511 | return Py_BuildValue("i",ctrl.value); 512 | #else 513 | return NULL; 514 | #endif 515 | } 516 | 517 | static PyObject *Video_device_get_focus_auto(Video_device *self) 518 | { 519 | #ifdef V4L2_CID_FOCUS_AUTO 520 | struct v4l2_control ctrl; 521 | CLEAR(ctrl); 522 | ctrl.id = V4L2_CID_FOCUS_AUTO; 523 | if(my_ioctl(self->fd, VIDIOC_G_CTRL, &ctrl)){ 524 | return NULL; 525 | } 526 | return Py_BuildValue("i",ctrl.value); 527 | #else 528 | return NULL; 529 | #endif 530 | } 531 | 532 | static PyObject *Video_device_start(Video_device *self) 533 | { 534 | ASSERT_OPEN; 535 | enum v4l2_buf_type type; 536 | type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 537 | 538 | if(my_ioctl(self->fd, VIDIOC_STREAMON, &type)) 539 | { 540 | return NULL; 541 | } 542 | 543 | Py_RETURN_NONE; 544 | } 545 | 546 | static PyObject *Video_device_stop(Video_device *self) 547 | { 548 | ASSERT_OPEN; 549 | enum v4l2_buf_type type; 550 | type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 551 | 552 | if(my_ioctl(self->fd, VIDIOC_STREAMOFF, &type)) 553 | { 554 | return NULL; 555 | } 556 | 557 | Py_RETURN_NONE; 558 | } 559 | 560 | static PyObject *Video_device_create_buffers(Video_device *self, PyObject *args) 561 | { 562 | int buffer_count; 563 | 564 | if(!PyArg_ParseTuple(args, "I", &buffer_count)) 565 | { 566 | return NULL; 567 | } 568 | 569 | ASSERT_OPEN; 570 | 571 | if(self->buffers) 572 | { 573 | PyErr_SetString(PyExc_ValueError, "Buffers are already created"); 574 | return NULL; 575 | } 576 | 577 | struct v4l2_requestbuffers reqbuf; 578 | reqbuf.count = buffer_count; 579 | reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 580 | reqbuf.memory = V4L2_MEMORY_MMAP; 581 | 582 | if(my_ioctl(self->fd, VIDIOC_REQBUFS, &reqbuf)) 583 | { 584 | return NULL; 585 | } 586 | 587 | if(!reqbuf.count) 588 | { 589 | PyErr_SetString(PyExc_IOError, "Not enough buffer memory"); 590 | return NULL; 591 | } 592 | 593 | self->buffers = malloc(reqbuf.count * sizeof(struct buffer)); 594 | 595 | if(!self->buffers) 596 | { 597 | PyErr_NoMemory(); 598 | return NULL; 599 | } 600 | 601 | int i; 602 | 603 | for(i = 0; i < reqbuf.count; i++) 604 | { 605 | struct v4l2_buffer buffer; 606 | buffer.index = i; 607 | buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 608 | buffer.memory = V4L2_MEMORY_MMAP; 609 | 610 | if(my_ioctl(self->fd, VIDIOC_QUERYBUF, &buffer)) 611 | { 612 | return NULL; 613 | } 614 | 615 | self->buffers[i].length = buffer.length; 616 | self->buffers[i].start = v4l2_mmap(NULL, buffer.length, 617 | PROT_READ | PROT_WRITE, MAP_SHARED, self->fd, buffer.m.offset); 618 | 619 | if(self->buffers[i].start == MAP_FAILED) 620 | { 621 | PyErr_SetFromErrno(PyExc_IOError); 622 | return NULL; 623 | } 624 | } 625 | 626 | self->buffer_count = i; 627 | Py_RETURN_NONE; 628 | } 629 | 630 | static PyObject *Video_device_queue_all_buffers(Video_device *self) 631 | { 632 | if(!self->buffers) 633 | { 634 | ASSERT_OPEN; 635 | PyErr_SetString(PyExc_ValueError, "Buffers have not been created"); 636 | return NULL; 637 | } 638 | 639 | int i; 640 | int buffer_count = self->buffer_count; 641 | 642 | for(i = 0; i < buffer_count; i++) 643 | { 644 | struct v4l2_buffer buffer; 645 | buffer.index = i; 646 | buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 647 | buffer.memory = V4L2_MEMORY_MMAP; 648 | 649 | if(my_ioctl(self->fd, VIDIOC_QBUF, &buffer)) 650 | { 651 | return NULL; 652 | } 653 | } 654 | 655 | Py_RETURN_NONE; 656 | } 657 | 658 | static PyObject *Video_device_read_internal(Video_device *self, int queue) 659 | { 660 | if(!self->buffers) 661 | { 662 | ASSERT_OPEN; 663 | PyErr_SetString(PyExc_ValueError, "Buffers have not been created"); 664 | return NULL; 665 | } 666 | 667 | struct v4l2_buffer buffer; 668 | buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 669 | buffer.memory = V4L2_MEMORY_MMAP; 670 | 671 | if(my_ioctl(self->fd, VIDIOC_DQBUF, &buffer)) 672 | { 673 | return NULL; 674 | } 675 | 676 | #ifdef USE_LIBV4L 677 | #if PY_MAJOR_VERSION < 3 678 | PyObject *result = PyString_FromStringAndSize( 679 | #else 680 | PyObject *result = PyBytes_FromStringAndSize( 681 | #endif 682 | self->buffers[buffer.index].start, buffer.bytesused); 683 | 684 | if(!result) 685 | { 686 | return NULL; 687 | } 688 | #else 689 | // Convert buffer from YUYV to RGB. 690 | // For the byte order, see: http://v4l2spec.bytesex.org/spec/r4339.htm 691 | // For the color conversion, see: http://v4l2spec.bytesex.org/spec/x2123.htm 692 | int length = buffer.bytesused * 6 / 4; 693 | #if PY_MAJOR_VERSION < 3 694 | PyObject *result = PyString_FromStringAndSize(NULL, length); 695 | #else 696 | PyObject *result = PyBytes_FromStringAndSize(NULL, length); 697 | #endif 698 | 699 | if(!result) 700 | { 701 | return NULL; 702 | } 703 | 704 | char *rgb = PyString_AS_STRING(result); 705 | char *rgb_max = rgb + length; 706 | unsigned char *yuyv = self->buffers[buffer.index].start; 707 | 708 | #define CLAMP(c) ((c) <= 0 ? 0 : (c) >= 65025 ? 255 : (c) >> 8) 709 | while(rgb < rgb_max) 710 | { 711 | int u = yuyv[1] - 128; 712 | int v = yuyv[3] - 128; 713 | int uv = 100 * u + 208 * v; 714 | u *= 516; 715 | v *= 409; 716 | 717 | int y = 298 * (yuyv[0] - 16); 718 | rgb[0] = CLAMP(y + v); 719 | rgb[1] = CLAMP(y - uv); 720 | rgb[2] = CLAMP(y + u); 721 | 722 | y = 298 * (yuyv[2] - 16); 723 | rgb[3] = CLAMP(y + v); 724 | rgb[4] = CLAMP(y - uv); 725 | rgb[5] = CLAMP(y + u); 726 | 727 | rgb += 6; 728 | yuyv += 4; 729 | } 730 | #undef CLAMP 731 | #endif 732 | 733 | if(queue && my_ioctl(self->fd, VIDIOC_QBUF, &buffer)) 734 | { 735 | return NULL; 736 | } 737 | 738 | return result; 739 | } 740 | 741 | static PyObject *Video_device_read(Video_device *self) 742 | { 743 | return Video_device_read_internal(self, 0); 744 | } 745 | 746 | static PyObject *Video_device_read_and_queue(Video_device *self) 747 | { 748 | return Video_device_read_internal(self, 1); 749 | } 750 | 751 | static PyMethodDef Video_device_methods[] = { 752 | {"close", (PyCFunction)Video_device_close, METH_NOARGS, 753 | "close()\n\n" 754 | "Close video device. Subsequent calls to other methods will fail."}, 755 | {"fileno", (PyCFunction)Video_device_fileno, METH_NOARGS, 756 | "fileno() -> integer \"file descriptor\".\n\n" 757 | "This enables video devices to be passed select.select for waiting " 758 | "until a frame is available for reading."}, 759 | {"get_info", (PyCFunction)Video_device_get_info, METH_NOARGS, 760 | "get_info() -> driver, card, bus_info, capabilities\n\n" 761 | "Returns three strings with information about the video device, and one " 762 | "set containing strings identifying the capabilities of the video " 763 | "device."}, 764 | {"get_fourcc", (PyCFunction)Video_device_get_fourcc, METH_VARARGS, 765 | "get_fourcc(fourcc_string) -> fourcc_int\n\n" 766 | "Return the fourcc string encoded as int."}, 767 | {"get_format", (PyCFunction)Video_device_get_format, METH_NOARGS, 768 | "get_format() -> size_x, size_y, fourcc\n\n" 769 | "Request the current video format."}, 770 | {"set_format", (PyCFunction)Video_device_set_format, METH_VARARGS|METH_KEYWORDS, 771 | "set_format(size_x, size_y, yuv420 = 0, fourcc='MJPEG') -> size_x, size_y\n\n" 772 | "Request the video device to set image size and format. The device may " 773 | "choose another size than requested and will return its choice. The " 774 | "image format will be RGB24 if yuv420 is zero (default) or YUV420 if " 775 | "yuv420 is 1, if fourcc keyword is set that will be the fourcc pixel format used."}, 776 | {"set_fps", (PyCFunction)Video_device_set_fps, METH_VARARGS, 777 | "set_fps(fps) -> fps \n\n" 778 | "Request the video device to set frame per seconds.The device may " 779 | "choose another frame rate than requested and will return its choice. " }, 780 | {"set_auto_white_balance", (PyCFunction)Video_device_set_auto_white_balance, METH_VARARGS, 781 | "set_auto_white_balance(autowb) -> autowb \n\n" 782 | "Request the video device to set auto white balance to value. The device may " 783 | "choose another value than requested and will return its choice. " }, 784 | {"get_auto_white_balance", (PyCFunction)Video_device_get_auto_white_balance, METH_NOARGS, 785 | "get_auto_white_balance() -> autowb \n\n" 786 | "Request the video device to get auto white balance value. " }, 787 | {"set_white_balance_temperature", (PyCFunction)Video_device_set_white_balance_temperature, METH_VARARGS, 788 | "set_white_balance_temperature(temp) -> temp \n\n" 789 | "Request the video device to set white balance tempature to value. The device may " 790 | "choose another value than requested and will return its choice. " }, 791 | {"get_white_balance_temperature", (PyCFunction)Video_device_get_white_balance_temperature, METH_NOARGS, 792 | "get_white_balance_temperature() -> temp \n\n" 793 | "Request the video device to get white balance temperature value. " }, 794 | {"set_exposure_auto", (PyCFunction)Video_device_set_exposure_auto, METH_VARARGS, 795 | "set_exposure_auto(autoexp) -> autoexp \n\n" 796 | "Request the video device to set auto exposure to value. The device may " 797 | "choose another value than requested and will return its choice. " }, 798 | {"get_exposure_auto", (PyCFunction)Video_device_get_exposure_auto, METH_NOARGS, 799 | "get_exposure_auto() -> autoexp \n\n" 800 | "Request the video device to get auto exposure value. " }, 801 | {"set_exposure_absolute", (PyCFunction)Video_device_set_exposure_absolute, METH_VARARGS, 802 | "set_exposure_absolute(exptime) -> exptime \n\n" 803 | "Request the video device to set exposure time to value. The device may " 804 | "choose another value than requested and will return its choice. " }, 805 | {"get_exposure_absolute", (PyCFunction)Video_device_get_exposure_absolute, METH_NOARGS, 806 | "get_exposure_absolute() -> exptime \n\n" 807 | "Request the video device to get exposure time value. " }, 808 | {"set_focus_auto", (PyCFunction)Video_device_set_focus_auto, METH_VARARGS, 809 | "set_auto_focus_auto(autofocus) -> autofocus \n\n" 810 | "Request the video device to set auto focuse on or off. The device may " 811 | "choose another value than requested and will return its choice. " }, 812 | {"get_focus_auto", (PyCFunction)Video_device_get_focus_auto, METH_NOARGS, 813 | "get_focus_auto() -> autofocus \n\n" 814 | "Request the video device to get auto focus value. " }, 815 | {"start", (PyCFunction)Video_device_start, METH_NOARGS, 816 | "start()\n\n" 817 | "Start video capture."}, 818 | {"stop", (PyCFunction)Video_device_stop, METH_NOARGS, 819 | "stop()\n\n" 820 | "Stop video capture."}, 821 | {"create_buffers", (PyCFunction)Video_device_create_buffers, METH_VARARGS, 822 | "create_buffers(count)\n\n" 823 | "Create buffers used for capturing image data. Can only be called once " 824 | "for each video device object."}, 825 | {"queue_all_buffers", (PyCFunction)Video_device_queue_all_buffers, 826 | METH_NOARGS, 827 | "queue_all_buffers()\n\n" 828 | "Let the video device fill all buffers created."}, 829 | {"read", (PyCFunction)Video_device_read, METH_NOARGS, 830 | "read() -> string\n\n" 831 | "Reads image data from a buffer that has been filled by the video " 832 | "device. The image data is in RGB och YUV420 format as decided by " 833 | "'set_format'. The buffer is removed from the queue. Fails if no buffer " 834 | "is filled. Use select.select to check for filled buffers."}, 835 | {"read_and_queue", (PyCFunction)Video_device_read_and_queue, METH_NOARGS, 836 | "read_and_queue()\n\n" 837 | "Same as 'read', but adds the buffer back to the queue so the video " 838 | "device can fill it again."}, 839 | {NULL} 840 | }; 841 | 842 | static PyTypeObject Video_device_type = { 843 | #if PY_MAJOR_VERSION < 3 844 | PyObject_HEAD_INIT(NULL) 0, 845 | #else 846 | PyVarObject_HEAD_INIT(NULL, 0) 847 | #endif 848 | "v4l2capture.Video_device", sizeof(Video_device), 0, 849 | (destructor)Video_device_dealloc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 850 | 0, Py_TPFLAGS_DEFAULT, "Video_device(path)\n\nOpens the video device at " 851 | "the given path and returns an object that can capture images. The " 852 | "constructor and all methods except close may raise IOError.", 0, 0, 0, 853 | 0, 0, 0, Video_device_methods, 0, 0, 0, 0, 0, 0, 0, 854 | (initproc)Video_device_init 855 | }; 856 | 857 | static PyMethodDef module_methods[] = { 858 | {NULL} 859 | }; 860 | 861 | #if PY_MAJOR_VERSION < 3 862 | PyMODINIT_FUNC initv4l2capture(void) 863 | #else 864 | PyMODINIT_FUNC PyInit_v4l2capture(void) 865 | #endif 866 | { 867 | Video_device_type.tp_new = PyType_GenericNew; 868 | 869 | if(PyType_Ready(&Video_device_type) < 0) 870 | { 871 | #if PY_MAJOR_VERSION < 3 872 | return; 873 | #else 874 | return NULL; 875 | #endif 876 | } 877 | 878 | PyObject *module; 879 | 880 | #if PY_MAJOR_VERSION < 3 881 | module = Py_InitModule3("v4l2capture", module_methods, 882 | "Capture video with video4linux2."); 883 | #else 884 | static struct PyModuleDef moduledef = { 885 | PyModuleDef_HEAD_INIT, 886 | "v4l2capture", 887 | "Capture video with video4linux2.", 888 | -1, 889 | module_methods, 890 | NULL, 891 | NULL, 892 | NULL, 893 | NULL 894 | }; 895 | module = PyModule_Create(&moduledef); 896 | #endif 897 | 898 | if(!module) 899 | { 900 | #if PY_MAJOR_VERSION < 3 901 | return; 902 | #else 903 | return NULL; 904 | #endif 905 | } 906 | 907 | Py_INCREF(&Video_device_type); 908 | PyModule_AddObject(module, "Video_device", (PyObject *)&Video_device_type); 909 | #if PY_MAJOR_VERSION >= 3 910 | return module; 911 | #endif 912 | } 913 | --------------------------------------------------------------------------------