├── .gitignore ├── LICENSE ├── README.md ├── index.md ├── pyproject.toml ├── setup.cfg ├── setup.py └── src ├── __init__.py └── camera4kivy ├── __init__.py ├── based_on_kivy_core ├── __init__.py └── camera │ ├── __init__.py │ ├── camera_gi.py │ ├── camera_opencv.py │ ├── camera_picamera.py │ └── camera_picamera2.py ├── preview.py ├── preview_camerax.py ├── preview_common.py └── preview_kivycamera.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | dist/*.tar.gz 3 | dist/*.whl 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Android-for-Python 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Camera4Kivy 2 | =========== 3 | 4 | *Yet Another Camera for Kivy* 5 | 6 | **2023-11-13 This repository is archived.** 7 | 8 | 2023/02/09 : Android users: camerax_provider has been updated to version 0.0.3, and when updating see also [enable_video](#enable_video). 9 | 10 | - [Overview](#overview) 11 | - [Install](#install) 12 | * [Install Camera4Kivy on Desktop](#install-camera4kivy-on-desktop) 13 | * [Install Camera4Kivy on Android](#install-camera4kivy-on-android) 14 | + [buildozer.spec:](#buildozerspec-) 15 | + [Run Time Permissions](#run-time-permissions) 16 | * [Install Camera4Kivy on iOS](#install-camera4kivy-on-ios) 17 | + [Run Time Permissions](#run-time-permissions-1) 18 | - [Examples](#examples) 19 | * [Tested Examples](#tested-examples) 20 | + [C4K Photo Example](#c4k-photo-example) 21 | + [C4K QR Example](#c4k-qr-example) 22 | + [C4K OpenCV Example](#c4k-opencv-example) 23 | + [C4K MLKit Example](#c4k-mlkit-example) 24 | + [C4K TFLite Example](#c4k-tflite-example) 25 | * [Tested Platforms](#tested-platforms) 26 | - [Preview Widget](#preview-widget) 27 | * [Preview Widget Properties](#preview-widget-properties) 28 | + [aspect_ratio](#aspect-ratio) 29 | + [letterbox_color](#letterbox-color) 30 | + [orientation](#orientation) 31 | * [Preview Widget API](#preview-widget-api) 32 | + [Connect Camera](#connect-camera) 33 | - [camera_id](#camera-id) 34 | - [mirrored](#mirrored) 35 | - [filepath_callback](#filepath_callback) 36 | - [sensor_resolution](#sensor_resolution) 37 | - [sensor_rotation](#sensor_rotation) 38 | - [default_zoom](#default_zoom) 39 | - [analyze_pixels_resolution](#analyze_pixels_resolution) 40 | - [enable_analyze_pixels](#enable_analyze_pixels) 41 | - [enable_analyze_imageproxy](#enable_analyze_imageproxy) 42 | - [enable_zoom_gesture](#enable_zoom_gesture) 43 | - [enable_focus_gesture](#enable_focus_gesture) 44 | - [enable_video](#enable_video) 45 | - [imageproxy_data_format](#imageproxy_data_format) 46 | + [Disconnect Camera](#disconnect-camera) 47 | + [Capture](#capture) 48 | - [location](#location) 49 | - [subdir](#subdir) 50 | - [name](#name) 51 | + [Select Camera](#select-camera) 52 | + [Zoom](#zoom) 53 | + [Pan/scroll](#panscroll) 54 | + [Flash](#flash) 55 | + [Torch](#torch) 56 | + [Focus](#focus) 57 | + [camera_connected](#camera_connected) 58 | - [Image analysis](#image-analysis) 59 | * [Overview and Examples](#overview-and-examples) 60 | * [User Interaction](#user-interaction) 61 | * [Coordinates and image encoding](#coordinates-and-image-encoding) 62 | * [Analysis Configuration](#analysis-configuration) 63 | * [Debugging](#debugging) 64 | * [Performance](#performance) 65 | - [Camera Behavior](#camera-behavior) 66 | * [A Physical Camera](#a-physical-camera) 67 | * [Resolution](#resolution) 68 | + [Sensor Resolution](#sensor-resolution) 69 | + [Cropped Sensor Resolution](#cropped-sensor-resolution) 70 | + [Preview Resolution](#preview-resolution) 71 | + [Capture Resolution](#capture-resolution) 72 | + [Analysis Resolution](#analysis-resolution) 73 | + [Display Resolution.](#display-resolution) 74 | - [Camera Provider](#camera-provider) 75 | * [Android Camera Provider](#android-camera-provider) 76 | * [OpenCV](#opencv) 77 | * [GStreamer](#gstreamer) 78 | * [Picamera](#picamera) 79 | * [AVFoundation](#avfoundation) 80 | - [Known Behavior](#known-behavior) 81 | * [Behavior: Android .mp4 Orientation](#behavior-android-mp4-orientation) 82 | * [Behavior: Android .jpg Orientation.](#behavior-android-jpg-orientation) 83 | * [Behavior: Android armeabi-v7a build installed on an arm64-v8a device](#behavior-android-armeabi-v7a-build-installed-on-an-arm64-v8a-device) 84 | * [Behavior: Android "No supported surface combination"](#behavior-android-no-supported-surface-combination) 85 | 86 | ## Overview 87 | 88 | Available on all the usual platforms. 89 | 90 | ```python 91 | from camera4kivy import Preview 92 | ``` 93 | 94 | Camera4Kivy consists of a `Preview` widget with an api to connect to the physical camera unit. The Preview widget layout is [configured with Kivy properties](https://github.com/Android-for-Python/Camera4Kivy#preview-widget-properties) , the camera unit and image analysis behavior are [configured with an api](https://github.com/Android-for-Python/Camera4Kivy#preview-widget-api). For example: 95 | 96 | In .kv 97 | ``` 98 | Preview: 99 | id: preview 100 | aspect_ratio: '16:9' 101 | ``` 102 | 103 | In Python 104 | ```python 105 | self.preview = Preview(aspect_ratio = '16:9') 106 | ``` 107 | 108 | To connect the camera unit to the Preview call the preview's `connect_camera()` method, **at least one timestep after on_start()**. For example to connect the camera with the image analysis api enabled : 109 | 110 | ```python 111 | self.preview.connect_camera(enable_analyze_pixels = True) 112 | ``` 113 | 114 | Well behaved apps disconnect the camera when it is no longer in use. It is important to be well behaved. 115 | 116 | ```python 117 | self.preview.disconnect_camera() 118 | ``` 119 | 120 | To take a photo: 121 | 122 | ```python 123 | self.preview.capture_photo() 124 | ``` 125 | 126 | The captured file location may be specified and is also reported in a callback. A data analysis api allows per frame analysis and preview annotation or preview image replacement. 127 | 128 | On Android a pinch/spread gesture controls zoom, and a tap overrides any automatic focus and metering (if available). Some `connect_camera()` options are platform specific. 129 | 130 | Be aware Preview operation depends on the performance of the graphics hardware. In general Preview uses the highest available resolution for 30fps performance. On devices with low performance graphics hardware sush as low end laptops or Raspberry, you will probably have to explicitly set a lower image resolution inorder to increase the frame rate. 131 | 132 | ## Install 133 | 134 | A [camera provider](https://github.com/Android-for-Python/camera4kivy#camera-provider) may be required. On a destop the camera provider is installed once. On Android the camera provider is [added to each project](https://github.com/Android-for-Python/camera4kivy#android-camera-provider). 135 | 136 | ### Install Camera4Kivy on Desktop 137 | 138 | `pip3 install camera4kivy` 139 | 140 | ### Install Camera4Kivy on Android 141 | 142 | Camera4Kivy depends on Buildozer 1.3.0 or later 143 | 144 | #### buildozer.spec: 145 | 146 | `android.api = 33` (Constrained by Android packages imported by camerax_provider) 147 | 148 | `requirements = python3, kivy, camera4kivy, gestures4kivy` 149 | 150 | Set `p4a.hook` to enable the app's use of the [camera provider](https://github.com/Android-for-Python/camera4kivy#android-camera-provider). This sets the required p4a options. 151 | 152 | `p4a.hook = camerax_provider/gradle_options.py` 153 | 154 | The implementation of the camerax gradle dependencies is architecture specific, an app built for *only* armeabi-v7a will crash on an arm64-v8a device. 155 | 156 | #### Run Time Permissions 157 | 158 | The following run time permissions must be in be requested in the app. As usual request these at least one timestep after on_start(). See the examples. 159 | 160 | Always required: `CAMERA` 161 | 162 | Required to record video with audio: `RECORD_AUDIO` 163 | 164 | Required when capturing photo, screenshot, or video and saving to shared storage, and only on devices running api_version < 29: `WRITE_EXTERNAL_STORAGE` 165 | 166 | ### Install Camera4Kivy on iOS 167 | 168 | Install the 'master' (2022/04/22 or later) version of kivy-ios. 169 | ``` 170 | pip3 install git+https://kivy/kivy-ios.git 171 | toolchain build python3 kivy 172 | ``` 173 | 174 | `toolchain pip3 install camera4kivy` 175 | 176 | #### Run Time Permissions 177 | 178 | Permission to use the camera and save images is **required** by iOS. To enable permissions edit `-ios/NSCameraUsageDescription 183 | 184 | ``` 185 | To enable saving image captures to the Photos App (the default behavior) add: 186 | ``` 187 | NSPhotoLibraryAddUsageDescription 188 | 189 | ``` 190 | To enable viewing images saved to app local storage with the File Manager: 191 | ``` 192 | UIFileSharingEnabled 193 | 194 | LSSupportsOpeningDocumentsInPlace 195 | 196 | ``` 197 | 198 | 199 | ## Examples 200 | 201 | A prerequisite is that a working camera is installed. Test this with the platform's camera app before proceeding. All examples use the platform specific camera provider, and assume the typical default camera_id of '0'. If you find the example does not connect to a camera review the available camera ids and your camera provider choice. 202 | 203 | ### Tested Examples 204 | 205 | The Photo example illustrates basic camera usage, try this first. The remaining examples illustrate image analysis using various packages. 206 | 207 | On Android and iOS the app can rotate when the device rotates, on the desktop you can change the window size to simulate orientation, and thus rotating a mobile device. 208 | 209 | #### C4K Photo Example 210 | [C4K-Photo-Example](https://github.com/Android-for-Python/c4k_photo_example). Illustrates basic layout using screens. Basic camera functionality including photo capture, screenshot capture, and on Android capture of video with audio. 211 | On Raspberry PI a mouse must be used, a touch pad does not work correctly. 212 | 213 | #### C4K QR Example 214 | [C4K-QR-Example](https://github.com/Android-for-Python/c4k_qr_example). Everything you need to read a restaurant menu. Long press or double click on a highlighted QR code to open a web browser. Illustrates basic analysis, screen annotation, and user interaction. 215 | 216 | #### C4K OpenCV Example 217 | [C4K OpenCV Example](https://github.com/Android-for-Python/c4k_opencv_example) 218 | Edge detect the video stream. Illustrates using OpenCV analysis and replacing the original preview with the transformed image. 219 | 220 | #### C4K MLKit Example 221 | [C4K MLKit Example](https://github.com/Android-for-Python/c4k_mlkit_example) 222 | Face detect, MLKit is Android only. Illustrates using the ImageProxy api. 223 | 224 | #### C4K TFLite Example 225 | [C4K TFLite Example](https://github.com/Android-for-Python/c4k_tflite_example) 226 | Object classification. Illustrates using a large Tensorflow Lite model, and writing text to the Preview image. 227 | 228 | ### Tested Platforms 229 | 230 | | Example | Windows | Macos | Linux | Android | iOS | Coral | 231 | |---------|---------|-------|-------|---------|-----|-------| 232 | | Photo | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | 233 | | QR | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | 234 | | OpenCV | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | 235 | | MLKit | | | | :heavy_check_mark: | | | 236 | | TFLite | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | 237 | 238 | - Windows : Windows 11, i7-10 @ 1.1GHz, Python 3.11.1 Kivy==2.2.0.dev0 239 | - Windows : Windows 10, i3-7 @ 2.4GHz, Python 3.9.7 Kivy==2.0.0 240 | - Macos : Big Sur, i5-10 @ @ 1.1GHz, Python 3.9.9 Kivy==2.0.0 241 | - Linux : Raspberry Buster, Cortex-A72 @ 1.5GHz Python3.7.3 Kivy==2.0.0 242 | - Linux : Raspberry Bullseye, Cortex-A72 @ 1.5GHz Python3.9.2 Kivy==2.1.0 243 | - Android : build : arm64-v8a device: Android 12, Pixel 5 3.10.6 Kivy==2.2.0.dev0 244 | - Android : build : armeabi-v7a device: Android 6, Nexus 5 Start is somewhat slow. 245 | - iOS : iPhone SE (second generation) 246 | - Coral : [Accelerator](https://coral.ai/products/accelerator) tested with Windows 11 , gave very approximately an order of magnitude speed up. 247 | 248 | ## Preview Widget 249 | 250 | An app can have multiple `Preview` widgets instantiated, but only one can be connected to the physical camera unit at a time. A natural way to implement this is to add a Preview widget to a screen's contents, then connect to the camera unit `on_enter` and disconnect `on_pre_leave`. Or if using a ModalView, Popup, or MDDialog use `on_open` and `on_pre_dismiss`. The C4K-Photo-Example illustrates the screen case. 251 | 252 | The other examples simply connect the camera **after** `on_start()` and disconnect `on_stop()`. The **after** `on_start` is required on Android; both for reliable camera function, and so that the camera is only connected after CAMERA permission has been granted. 253 | 254 | There is one special case. For the **first** screen, Kivy calls `on_enter` before `on_start`. This violates the requirements described in the previous paragraph. For a Preview used in the **first screen only**, connect the camera after `on_start` and after CAMERA permission has been granted, and not from `on_enter`. 255 | 256 | ### Preview Widget Properties 257 | 258 | The widget has these Kivy properties that configure its layout: 259 | 260 | #### aspect_ratio 261 | A string property. Either '4:3' (default) or '16:9'. 262 | 263 | #### letterbox_color 264 | A color property. Geometry tells us that layout of a fixed aspect ratio widget almost always results in letterboxing. Art tells us we can hide letterboxes by filling them with a color that matches the surrounding widgets. 265 | 266 | #### orientation 267 | A string property. One of 'portrait' , 'landscape', 'same', 'opposite'. The default is the 'same' as the device or window. This choice modifies effective resolution, [see](https://github.com/Android-for-Python/camera4kivy#cropped-sensor-resolution). The best resolution is always obtained with 'same'. 268 | 269 | ### Preview Widget API 270 | 271 | The widget has these methods: 272 | 273 | #### Connect Camera 274 | 275 | This may only be called after `on_start()`. 276 | 277 | ```python 278 | def connect_camera(self,kwargs): 279 | ``` 280 | 281 | Optional arguments: 282 | 283 | ##### camera_id 284 | Specify which camera to connect to. For example `camera_id = 'front'`. A string containing an integer (default '0'), or on Android or iOS 'back' (default), or 'front'. 285 | 286 | ##### mirrored 287 | Mirrors the Preview image, default is `True`. This option is ignored on Android and iOS where by convention 'front' is always mirrored and 'back' is never mirrored. This option should usually be `True` for any camera facing the user, and `False` for any camera not facing the user. 288 | 289 | Captures are never mirrored, except a screenshot capture if the Preview is mirrored. 290 | 291 | The pixels argument to `image_analysis_callback()` is never mirrored, if a Preview is mirrored a `texture` result of any image analysis will be automatically mirrored in the `canvas_instructions_callback()` by the tex_size and tex_pos arguments. If image analysis genetates image annotation locations, these locations must be adjusted by the app for a mirrored preview. See the [Image Analysis Section](https://github.com/Android-for-Python/camera4kivy#image-analysis) for code fragments and links to examples. 292 | 293 | ##### filepath_callback 294 | On a capture of a photo, video, or screenshot, this argument specifies a method to receive the path and name of the saved file. For example `filepath_callback = my_method`, where `def my_method(self, path):` is an app supplied method with a string argument. 295 | 296 | Photo and Video captures may be implemented in a different thread. The only way to know that a capture is complete is a filepath_callback. There may be [latency](https://github.com/Android-for-Python/Camera4Kivy#latency) concequences of disconnecting the camera after initiating a capture and before a filepath_callback. 297 | 298 | The filepath_callback can also be used to reset any 'video recording' indicator in the UI. While video recording is normally terminated by the user, it can also be terminated by app pause, device rotation, or camera selection. In these last cases the any recording indicator can be reset by the callback, which occurs on any capture termination regardless of cause. 299 | 300 | ##### sensor_resolution 301 | Overrides the default sensor resolution, which is the highest resolution available, except Raspberry Pi where it is (1024, 768). Tuple of two integers, for example `sensor_resolution = (640, 480)`. The resulting capture resolution obtained depends on the behavior of the camera provider (for example it is ignored by GStreamer). The capture resolution also depends on the relative orientation and aspect ratio of the Preview. Treat the value specified as a request that may not be exactly honored. 302 | 303 | ##### sensor_rotation 304 | On Picamera2 sensor_rotation can be sepecifed in `[0, 90, 180, 270]`, where `0` is the default landscape orientation. 305 | 306 | ##### default_zoom 307 | Set the default zoom when the camera is connected. On Android `0.5` is the default value. 308 | 309 | ##### analyze_pixels_resolution 310 | Sets the pixels resolution passed by `analyze_pixels_callback()`. A scalar, representing the number of pixels on the long edge, the short edge is determined using the aspect ratio. For example `analyze_pixels_resolution = 720`. The default is the minimum of cropped sensor resolution and 1024. 311 | 312 | As an alternative, sometimes the analysis package will need to set the resolution. This is done with `self.auto_analyze_resolution` as described in [Analyze Configuration](https://github.com/Android-for-Python/camera4kivy#analyze-configuration). 313 | 314 | ##### enable_analyze_pixels 315 | Use `enable_analyze_pixels = True` to enable the `analyze_pixels_callback()` 316 | 317 | ##### enable_analyze_imageproxy 318 | Use `enable_analyze_imageproxy = True` to enable the `analyze_imageproxy_callback()` 319 | Android only. 320 | 321 | ##### enable_zoom_gesture 322 | Default True. Android and iOS only. 323 | 324 | ##### enable_focus_gesture 325 | Default True. Android only. 326 | 327 | ##### enable_video 328 | Default True. Android only. 329 | 330 | By default Preview is configured to implement photo and video capture. Some low end Android devices may not behave correctly if video capture is enabled and `enable_analyze_pixels = True`. In this case set `enable_video = False`. In part this is due to changes to the Android camera api implementatiom. 331 | 332 | ##### imageproxy_data_format 333 | Applies only to the Android ImageProxy api. 'yuv420' (default) or 'rgba'. 334 | 335 | 336 | #### Disconnect Camera 337 | 338 | Always do this, it is important to disconnect the camera when not in use. 339 | 340 | ```python 341 | def disconnect_camera(self): 342 | ``` 343 | 344 | A disconnect while a capture is in progress generally results in termination of the capture and saving the result as usual. 345 | 346 | However on Android a disconnect immediately after a capture has be initiated may prevent the start of the file save and nothing is saved. A warning message is reported by filepath_callback, this may be filtered as shown in [this example](https://github.com/Android-for-Python/c4k_photo_example/blob/main/applayout/toast.py#L15) 347 | 348 | #### Capture 349 | ```python 350 | def capture_photo(self, kwargs): 351 | def capture_screenshot(self, kwargs): 352 | def capture_video(self, kwargs): # Android only 353 | def stop_capture_video(self): # Android only 354 | ``` 355 | 356 | Video capture is only available on Android, Picamera2, or OpenCV camera providers. Capturing audio with video is available on Android and Picamera2. OpenCV video quality is poor. 357 | 358 | Captures are never mirrored, except a screenshot capture if the Preview is mirrored. Capture resolution is discussed [here](https://github.com/Android-for-Python/Camera4Kivy#capture-resolution). 359 | 360 | Captures are saved to `//.jpg` or `.mp4`. 361 | 362 | 363 | The default values are as follows: 364 | 365 | - On a desktop `` is the current directory `.`, on Android `` is `DCIM/`, and on iOS is the Photos App. 366 | 367 | - The value of `` is the current date, the format is 'YYYY_MM_DD'. 368 | 369 | - The value of `` is the current time, the format is 'hh_mm_ss_xx' (xx is 1/100 sec). 370 | 371 | The [filepath_callback](https://github.com/Android-for-Python/Camera4Kivy#filepath_callback) occurs on capture completion, with an argument that is the actual path for a particular capture. 372 | 373 | Be aware that on Android >= 10 shared storage files are saved in a database, called MediaStore, and not in a file system. The architecture of Android storage is outside the scope of this document. 374 | 375 | The values of ``, ``, and `` can be modified with optional keyword arguments to the three `capture_` methods. 376 | 377 | For example `self.capture_photo(subdir='foo', name='bar')` 378 | 379 | ##### location 380 | 381 | The value replaces the default value of ``. 382 | 383 | - On a desktop the value is a directory that must exist. 384 | 385 | - On Android and iOS the value can only be `'shared'` or `'private'`, other values default to `'shared'`. 386 | 387 | - On Android the value `'shared'` specifies Android shared storage `DCIM/`. The value `'private'` specifies [app local storage](https://github.com/kivy/python-for-android/blob/develop/doc/source/apis.rst#storage-paths) `app_storage_path()/DCIM`. If you want a different location use `'private'` and move the resulting file based on the path provided by filepath_callback. 388 | 389 | - On iOS the value `'shared'` specifies the iOS Photos App. The value `'private'` specifies app local storage. For `'shared'` the filepath_callback returns an empty string, for `'private'` it returns the paths to the file in app local storage. 390 | 391 | 392 | ##### subdir 393 | 394 | The value replaces the default value of ``. The subdirectory will be created or added to the Android MediaStore path. For iOS when `location='shared'` this is ignored. 395 | 396 | ##### name 397 | 398 | The value replaces the default value of ``, the `.jpg` or `.mp4` extensions will be added automatically. 399 | 400 | Note that it is a characteristic of Android MediaStore that a second capture with the same subdir and name values as the first will not overwrite the first. It will create a second file named `/ (1).jpg`, this name is created by Android MediaStore. The MediaStore may crash if it creates too many (31 ?) such names. 401 | 402 | For iOS when `location='shared'` this is ignored. 403 | 404 | #### Select Camera 405 | 406 | Change the currently connected camera, camera_id must specify a physically connected camera. 407 | 408 | ```python 409 | def select_camera(self, camera_id): 410 | ``` 411 | 412 | #### Zoom 413 | On Android and Picamera2, zoom_delta() is called by pinch/spread gesture unless disabled. 414 | On iOS only, zoom_abs() is called by pinch/spread gesture unless disabled. 415 | ```python 416 | def zoom_delta(self, delta_scale): 417 | def zoom_abs(self, scale): 418 | ``` 419 | 420 | #### Pan/scroll 421 | On Picamera2 pan/scroll a zoom'd image with a drag gesture. 422 | ```python 423 | def drag(self, delta_x, delta_y): 424 | ``` 425 | 426 | #### Flash 427 | Android only, and for capture photo only, the value is ignored for video and data. The `state` argument must be in `['on', 'auto', 'off']`, alternatively if `state=None` sequential calls sequence through that list. Note that 'on' always turns on the flash around the time a photo is captured, 'auto' only does this if the light level is low enough. 428 | 429 | ```python 430 | def flash(self, state = None) 431 | ``` 432 | 433 | #### Torch 434 | Android only, immediately turns the torch on in any use case. The `state` argument must be in `['on', 'off']` 435 | 436 | ```python 437 | def flash(self, state) 438 | ``` 439 | 440 | #### Focus 441 | Android only, if available on device. Called by a tap gesture unless disabled 442 | ```python 443 | def focus(x, y): 444 | ``` 445 | 446 | #### camera_connected 447 | 448 | This is a boolean variable describing the camera state. It is `True` immediatedal *after* the camera is connected, and `False` immediately *before* the camera is disconnected. 449 | 450 | ## Image analysis 451 | 452 | ### Overview and Examples 453 | 454 | The programming pattern for video data analysis is to create a subclass of `Preview` and implement two predefined methods. One to analyze the frame, the second to modify the Preview image with the analysis result. In general like this: 455 | 456 | ```python 457 | class CustomAnalyzer(Preview): 458 | def analyze_pixels_callback(self, pixels, size, image_pos, 459 | image_scale, mirror): 460 | ### Add your pixels analysis code here 461 | ### Add your coordinate transforms here 462 | 463 | def canvas_instructions_callback(self, texture, tex_size, tex_pos): 464 | ### Add your Preview annotation or image replacement code here 465 | ``` 466 | 467 | The `analyze_pixels_callback()` is called each time new pixels are available, and the `canvas_instructions_callback()` is called on each iteration of the Kivy event loop. The availability of new pixels depends on the camera data rate, and the latency of any analysis code included with the previous call of `analyze_pixels_callback()`. Thus `analyze_pixels_callback()` is typically called at a rate less than `canvas_instructions_callback()`, so the annotation update rate is typically less than the image frame rate. 468 | 469 | On Android this is an alternative to analyze_pixels_callback(), it is used for Android only analysis packages. 470 | ```python 471 | def analyze_imageproxy_callback(self, image_proxy, image_pos, 472 | image_scale, mirror, degrees): 473 | ### Add your imageproxy specific analysis code here 474 | ``` 475 | 476 | Keep to this pattern. Perform analysis and coordinate transforms in the 'analyze_pixel_callback' (or imageproxy) method. And in 'canvas_instructions_callback' only display the results of previous calculations. Data passed from the analysis method to the display method must be passed in a thread safe way. 477 | 478 | The `analyze_pixels_callback` method is used to analyze its RGBA `pixels` and `size` arguments. The `pos`, `scale`, and `mirror` arguments enable mapping the analyzed pixels coordinates to Preview coordinates. The `mirror` parameter is required because `pixels` image is never mirrored, but the Preview may be. An example: 479 | 480 | ```python 481 | def analyze_pixels_callback(self, pixels, image_size, image_pos, 482 | scale, mirror): 483 | # Convert the image encoding 484 | pil_image = Image.frombytes(mode='RGBA', size=image_size, 485 | data= pixels) 486 | # Analyze the image 487 | barcodes = pyzbar.decode(pil_image, symbols=[ZBarSymbol.QRCODE]) 488 | # Collect the results and transform the coordinates 489 | found = [] 490 | for barcode in barcodes: 491 | text = barcode.data.decode('utf-8') 492 | if 'https://' in text or 'http://' in text: 493 | x, y, w, h = barcode.rect 494 | # Map Zbar coordinates to Kivy coordinates 495 | y = image_size[1] -y -h 496 | # Map Analysis coordinates to Preview coordinates 497 | if mirror: 498 | x = image_size[0] -x -w 499 | x = round(x * scale + image_pos[0]) 500 | y = round(y * scale + image_pos[1]) 501 | w = round(w * scale) 502 | h = round(h * scale) 503 | found.append({'x':x, 'y':y, 'w':w, 'h':h, 't':text}) 504 | # Save the results in a thread safe way 505 | self.make_thread_safe(list(found)) ## A COPY of the list 506 | ``` 507 | 508 | Analysis and canvas annotation callbacks occur on different threads. The result of the analysis must be saved in a thread safe way, so that it is available for the canvas callback. We pass a **copy** of the result to: 509 | 510 | ```python 511 | @mainthread 512 | def make_thread_safe(self, found): 513 | if self.camera_connected: 514 | self.annotations = found 515 | else: 516 | self.annotations = [] 517 | ``` 518 | Note that we null the application state when the camera is not connected. This prevents saved annotations from being shown when a camera is re-connected, due to the multi-threaded implementation. 519 | 520 | Then add the thread safe annotations to the canvas. 521 | 522 | ```python 523 | def canvas_instructions_callback(self, texture, tex_size, tex_pos): 524 | # Add the annotations determinined during analyze callback. 525 | Color(1,0,0,1) 526 | for r in self.annotations: 527 | Line(rectangle=(r['x'], r['y'], r['w'], r['h']), width = dp(2)) 528 | ``` 529 | We can also replace the existing Preview image with some other texture, positioned with the 'tex_size' and 'tex_pos' arguments. Use a thread safe texture created as a result of some image analysis like this: 530 | 531 | ```python 532 | def canvas_instructions_callback(self, texture, tex_size, tex_pos): 533 | # Add a different preview image, which is a transformed camera image 534 | # this image has 'analyze_pixels_resolution' 535 | if self.analyzed_texture: 536 | # 'self.analyzed_texture' contents created 537 | # by analyze_pixels_callback() 538 | Color(1,1,1,1) 539 | Rectangle(texture= self.analyzed_texture, 540 | size = tex_size, pos = tex_pos) 541 | ``` 542 | The new texture will be automatically mirrored by 'text_size' and 'tex_pos' if required. These 'text_size' and 'tex_pos' arguments are for adding a texture, and not valid for coordinate calculations as they are potentially mirrored. 543 | 544 | See the OpenCV example for details on creating a thread safe texture. 545 | 546 | The above code fragments are fully implemented in two examples: [QR Reader](https://github.com/Android-for-Python/c4k_qr_example/blob/main/qrreader.py), and [OpenCV](https://github.com/Android-for-Python/c4k_opencv_example/blob/main/edgedetect.py). Similar examples exhibiting this pattern are [tflite](https://github.com/Android-for-Python/c4k_tflite_example/blob/main/classifyobject.py) and [mlkit](https://github.com/Android-for-Python/c4k_mlkit_example/blob/main/facedetect.py). 547 | 548 | ### User Interaction 549 | 550 | But wait, there is more, a user can interact with the analysis results in the Preview. The Preview subclass may have multiple inheritance, for example to allow the user to interact with annotations on the screen. The QR Reader example illustrates this, by inheriting from a gestures package: 551 | 552 | ```python 553 | class QRReader(Preview, CommonGestures): 554 | ``` 555 | 556 | That package's gesture callbacks, and an annotation location test are used to initiate some action. In this case open a web browser based on a URL in a QR code, and a long press or mouse double click inside the box drawn around the QR code. 557 | 558 | ```python 559 | def cg_long_press(self, touch, x, y): 560 | self.open_browser(x, y) 561 | 562 | def cg_double_tap(self, touch, x, y): 563 | self.open_browser(x, y) 564 | 565 | def open_browser(self, x, y): 566 | for r in self.annotations: 567 | if x >= r['x'] and x <= r['x'] + r['w'] and\ 568 | y >= r['y'] and y <= r['y'] + r['h']: 569 | webbrowser.open_new_tab(r['t']) 570 | ``` 571 | 572 | ### Coordinates and image encoding 573 | 574 | **Important**, be aware of different coordinate systems and image encoding. A test with a print statement of third party analysis code coordinates can be valuable. 575 | 576 | - Kivy image coordinates have their origin at the bottom left. Most other systems use top left (with positive y increaing downwards) as their origin. 577 | 578 | - Kivy image properties are a (width, height) tuple. Some packages, notably numpy images, reverse the order to (height, width). 579 | 580 | - Kivy pixels are encoded RGBA. Third party analysis code may expect some other encoding, both Pillow and OpenCV provide encoding converions. Some image recodings are computationally expensive. 581 | 582 | - The 'canvas_instructions_callback()' arguments 'tex_size' and 'tex_pos' are potentially mirrored and their values are not valid for coordinate mapping. Perform mapping in 'analyze_pixels_callback()' using the 'image_size' and 'image_pos' arguments. 583 | 584 | 585 | ### Analysis Configuration 586 | 587 | Image analysis is enabled with a parameter to `connect_camera()`: 588 | 589 | `connect_camera(enable_analyze_pixels = True)` 590 | 591 | To change the default analysis resolution specify the number of pixels in the long edge (the default is the smaller of 1024 or the cropped resolution) as shown below. 592 | 593 | Also some low end Android devices may not support being configured to analayze images and record video at the same. Since by default Preview is configured to record video, if your device has an unexpected issue set `enable_video=False`. 594 | 595 | ```python 596 | connect_camera(enable_analyze_pixels = True, 597 | analyze_pixels_resolution = 720, 598 | enable_video = False) 599 | ``` 600 | 601 | The `analyze_pixels_resolution` option provides analysis images with the same orientation and aspect ratio as the Preview. 602 | 603 | As an alternative the analysis software may set `self.auto_analyze_resolution` a two element list of [width, height]. In this case the aspect ratio is not necessarily maintained for analysis, the `analyze_pixels_callback()` `image_scale` parameter is a two element list [width, height] allowing scaling of any results. As used in [c4k_tflite_example classifyobject.py](https://github.com/Android-for-Python/c4k_tflite_example/blob/main/classifyobject.py). 604 | 605 | On Android only, the imageproxy api is an alternative to the pixels api. 606 | 607 | `connect_camera(enable_analyze_imageproxy = True)` 608 | 609 | The imageproxy api provides images in landscape, regardless of the preview orientation. A degrees parameter enables adjusting the analysis accordingly. Android implements automatic changes to frame rate and resolution in the case of slow analysis. 610 | 611 | ### Debugging 612 | 613 | Check that the app analysis code is doing what you expect. If the result of this is coordinates (most cases) then check these with a print statement. Move whatever you expect to be detected to the four corners of the camera view. Look the printed values, do they reflect the analysed image pixels size and orientation? Repeat for the coordinates after they are mapped to a Kivy widget. 614 | 615 | Measure the time the analysis algorithm takes to execute on one frame. Do this in the cases of detection and nothing to detect. This along with some overhead will define the maximum analysis frame rate. The [tflite example](https://github.com/Android-for-Python/c4k_tflite_example/blob/main/classifyobject.py) monitors analysis frame rate as part of its normal operation. 616 | 617 | ### Performance 618 | 619 | The camera provides a stream of images for analysis via `analyze_pixels_callback()`. Images arrive at typically 30 fps, so given some overhead the app has probably less than 30mS to do the analysis. 620 | 621 | The api has a builtin mechanism so that images are analyzed only when the previous analysis is complete. This mechanism does not alter the canvas instructions frame rate. If the analysis results are 'jerky' it is because the analysis algorithm is slow for the hardware. 622 | 623 | Conversely, you can explicitly decrease the analysis frame rate without changing anything else using a flag set using Kivy Clock. Clock rates close to the actual analyze rate will exhibit jitter. For example for a one second analyze interval: 624 | 625 | ```python 626 | self.enable_analyze_frame = True 627 | Clock.schedule_interval(self.analyze_filter,1) 628 | 629 | def analyze_filter(self, dt): 630 | self.enable_analyze_frame = True 631 | 632 | def analyze_pixels_callback(self, pixels, image_size, image_pos, 633 | scale, mirror): 634 | if self.enable_analyze_frame: 635 | self.enable_analyze_frame = False 636 | # place usual analyse code inside this if block 637 | ``` 638 | One could modify this in various ways, for example a single sample after some delay. 639 | 640 | One way to improve performance is to reduce the `analyze_pixels_resolution` as shown above. This option may alter the qualitative behavior, perhaps because of resolution bias in some third party analyzers. Experiment, some analysis code will work well at much less than VGA resolution. 641 | 642 | The analysis code must be lean. So for example Keras is a complete development environment, a whole bunch of stuff you don't need to run an inference. Port the application to Tensorflow Lite, then use the tflite-runtime not the full Tensorflow Lite. 643 | 644 | ## Camera Behavior 645 | 646 | ### A Physical Camera 647 | 648 | A camera is a single physical device with a software api, it is not a software object though it can look like one. It is a physical object with physical constraints. 649 | 650 | ### Resolution 651 | 652 | In the context of a camera, resolution has several uses. It is always a tuple, in this context (width, height). 653 | 654 | #### Sensor Resolution 655 | 656 | This is a phyical property of the sensor module. The default behavior of `connect_camera()` is to use the maximum resolution provided by the camera provider. Is can be overridden with the `sensor_resolution` option, but in general camera providers take this a a hint which may be ignored or re-interpreted. `GStreamer` ignores this option, `picamera` may require it. 657 | 658 | #### Cropped Sensor Resolution 659 | 660 | The [sensor resolution](https://github.com/Android-for-Python/Camera4Kivy#sensor-resolution) cropped according to the orientation of the sensor, the orientation of the Preview, and the aspect ratio of the Preview. The will impact the capture resolution, for example a 16:9 aspect image maybe cropped from a 4:3 sensor image. Thus the product of width and height will less for 16:9 that for 4:3 in this case. 661 | 662 | Rotating a mobile device also rotates the sensor, the highest resolution images are obtained when the Priview widget orientation is the same as the device orientation. Conversly for example a landscape preview with the device in portrait orientation will result in an image width resolution that is the sensor height resolution. 663 | 664 | This behaviour is a characteric of the camera sensor having physical constraints. Notably that image sensors are usually not square, they are rectangular and give the highest quality results when the sensor has the same orientation as the captured image. This is mostly transparent to the app user unless the sensor resolution is low, or a photo capture has lower than expected resolution. 665 | 666 | #### Preview Resolution 667 | 668 | Is a physical property of the screen ([display resolution](https://github.com/Android-for-Python/Camera4Kivy#display-resolution)) and the Preview widget size on the screen. The preview resolution can be less than or greater than the cropped sensor resolution. 669 | 670 | #### Capture Resolution 671 | 672 | The resolution of a capture. A photo capture resolution is [cropped sensor resolution](https://github.com/Android-for-Python/Camera4Kivy#cropped-sensor-resolution). Video resolution is one to the standard resolutions, depending on the cropped sensor resolution. A screenshot capture size in pixels is the [Preview resolution](https://github.com/Android-for-Python/Camera4Kivy#preview-resolution). 673 | 674 | #### Analysis Resolution 675 | 676 | Analysis resolution is less than or equal to [cropped sensor resolution](https://github.com/Android-for-Python/Camera4Kivy#cropped-sensor-resolution). It may be useful to reduce this in order to decrease analysis time, at the cost of analysis resolution. 677 | 678 | The `analyze_pixels_callback()` resolution may be changed with the `analyze_pixels_resolution` camera connect option. The scale parameter allows re-scaling of the analysis results to the Preview resolution. 679 | 680 | The `analyze_imageproxy_callback()` implements a graceful degradation mechanism. This automatically reduces frame rate and/or image resolution. A very slow frame analysis will case the feed to stop. 681 | 682 | #### Display Resolution. 683 | 684 | Nothing to do with a camera, it is a physical property of a screen. A scalar measured in dpi. 685 | 686 | ## Camera Provider 687 | 688 | Camera4Kivy depends on a 'camera provider' to access the OS camera api. On most platforms this uses the same provider as Kivy, with modified defaults. On Android, iOS, and MacOS there is only a single provider. 689 | 690 | | Platform | Provider | Requires | 691 | |-------------|---------------|----------------| 692 | | Windows | [OpenCV](https://github.com/Android-for-Python/camera4kivy#opencv) | 693 | | | [Gstreamer](https://github.com/Android-for-Python/camera4kivy#gstreamer) | 694 | | Macos | [AVFoundation](https://github.com/Android-for-Python/camera4kivy#avfoundation)| OSX >= 10.7 | 695 | | Linux | [Gstreamer](https://github.com/Android-for-Python/camera4kivy#gstreamer) | 696 | | | [OpenCV](https://github.com/Android-for-Python/camera4kivy#opencv) | 697 | | Rasberry | [Picamera](https://github.com/Android-for-Python/camera4kivy#picamera)(2) | | 698 | | | [Gstreamer](https://github.com/Android-for-Python/camera4kivy#gstreamer) | | 699 | | |[OpenCV](https://github.com/Android-for-Python/camera4kivy#opencv) | | 700 | | Android | [CameraX](https://github.com/Android-for-Python/camera4kivy#android-camera-provider) | Android >= 5.0 | 701 | | iOS | [AVFoundation](https://github.com/Android-for-Python/camera4kivy#avfoundation) | 702 | 703 | Like Kivy, the first available provider is selected. Some camera provider specific behavior should be expected. For example a switch to a camera that does not exist will be ignored on MacOS and Rasberry Pi, but generate a screen message with OpenCV or GStreamer. Camera resolution defaults to the maximum available camera provider resolution, except on Raspberry Pi where the default is (1024, 768). 704 | 705 | You can remove a camera provider ('picamera' in the example below) from the above lists by inserting this code **before** `from kivy.app import App`. 706 | 707 | ```python 708 | from kivy import kivy_options 709 | providers= list(kivy_options['camera']) 710 | providers.remove('picamera') 711 | kivy_options['camera'] = tuple(providers) 712 | ``` 713 | 714 | You can read back the chosen camera provider with: 715 | 716 | ```python 717 | from camera4kivy import CameraProviderInfo 718 | 719 | provider_string = CameraProviderInfo().get_name() 720 | ``` 721 | 722 | ### Android Camera Provider 723 | 724 | `cd ` 725 | 726 | `git clone https://github.com/Android-for-Python/camerax_provider.git` 727 | 728 | `rm -rf camerax_provider/.git` 729 | 730 | Set `p4a.hook` to enable the app's use of the camera provider. 731 | 732 | `p4a.hook = camerax_provider/gradle_options.py` 733 | 734 | ### OpenCV 735 | 736 | `pip3 install opencv-python` 737 | 738 | Video recording (no audio) is available, but uncompressed, and may be low quality. 739 | 740 | ### GStreamer 741 | 742 | Depends on the Linux flavor, but commonly: 743 | 744 | `sudo apt-get install gstreamer-1.0` 745 | 746 | `sudo apt-get install gstreamer1.0-dev` 747 | 748 | ### Picamera 749 | This uses either the Picamera or Picamera2 package, depending on which is installed (they are mutually exclusive). 750 | 751 | If Picamera2 is installed there are additional features for native Pi cameras: higher resolution photos, video (optionally with audio), zoom, pan when zoom'd, sensor rotation. 752 | 753 | For Picamera2 USB cameras are available with physical rotation support, including for photo and screen shot capture. However zoom and video capture is not available. 754 | 755 | ### AVFoundation 756 | Pre-installed 757 | 758 | 759 | ## Known Behavior 760 | 761 | ### Behavior: Android .mp4 Orientation 762 | 763 | Video file orientation is incorrect if the preview orientation is not the same as the device orientation. Do not use this layout configuration when recording video. [Google issue tracker](https://issuetracker.google.com/issues/201085351). 764 | 765 | ### Behavior: Android .jpg Orientation. 766 | 767 | Some image viewers (including Kivy Image widget) will incorrectly display a .jpg as rotated by 90 degrees. This occurs if the capture preview orientation is not the same as the device orientation, and the third party viewer does not use the Exif metadata. 768 | 769 | ### Behavior: Android armeabi-v7a build installed on an arm64-v8a device 770 | 771 | The implementation of Google's camerax gradle dependencies appears to be architecture specific, an app built for armeabi-v7a will crash on an arm64-v8a device. To run on an arm64-v8a device you **must** build for arm64-v8a. 772 | 773 | ### Behavior: Android "No supported surface combination" 774 | 775 | `No supported surface combination is found for camera device - Id : 0. May be attempting to bind too many use cases.` 776 | 777 | On very low end Android devices the camera may have limited hardware resources. So far only one device has exhibited this issue. By default c4k configures connect_camera() for either 'photo and video` or 'photo and image analysis' - this keeps the api as simple as possible. But in this case it is too expensive for the hardware. We can override these configurations. 778 | 779 | If the connection is only used for photo use `connect_camera(enable_video = False, ...other options..)`. If the connection is only used for video or data analysis use `connect_camera(enable_photo = False, ...other options..)`. 780 | 781 | -------------------------------------------------------------------------------- /index.md: -------------------------------------------------------------------------------- 1 | Camera4Kivy 2 | =========== 3 | 4 | *Yet Another Camera for Kivy* 5 | 6 | **2023-11-13 This repository is archived.** 7 | 8 | 2023/02/09 : Android users: camerax_provider has been updated to version 0.0.3 9 | 10 | - [Overview](https://github.com/Android-for-Python/Camera4Kivy#overview) 11 | - [Install](https://github.com/Android-for-Python/Camera4Kivy#install) 12 | * [Install Camera4Kivy on Desktop](https://github.com/Android-for-Python/Camera4Kivy#install-camera4kivy-on-desktop) 13 | * [Install Camera4Kivy on Android](https://github.com/Android-for-Python/Camera4Kivy#install-camera4kivy-on-android) 14 | + [buildozer.spec:](https://github.com/Android-for-Python/Camera4Kivy#buildozerspec-) 15 | + [Run Time Permissions](https://github.com/Android-for-Python/Camera4Kivy#run-time-permissions) 16 | * [Install Camera4Kivy on iOS](https://github.com/Android-for-Python/Camera4Kivy#install-camera4kivy-on-ios) 17 | + [Run Time Permissions](https://github.com/Android-for-Python/Camera4Kivy#run-time-permissions-1) 18 | - [Examples](https://github.com/Android-for-Python/Camera4Kivy#examples) 19 | * [Tested Examples](https://github.com/Android-for-Python/Camera4Kivy#tested-examples) 20 | + [C4K Photo Example](https://github.com/Android-for-Python/Camera4Kivy#c4k-photo-example) 21 | + [C4K QR Example](https://github.com/Android-for-Python/Camera4Kivy#c4k-qr-example) 22 | + [C4K OpenCV Example](https://github.com/Android-for-Python/Camera4Kivy#c4k-opencv-example) 23 | + [C4K MLKit Example](https://github.com/Android-for-Python/Camera4Kivy#c4k-mlkit-example) 24 | + [C4K TFLite Example](https://github.com/Android-for-Python/Camera4Kivy#c4k-tflite-example) 25 | * [Tested Platforms](https://github.com/Android-for-Python/Camera4Kivy#tested-platforms) 26 | - [Preview Widget](https://github.com/Android-for-Python/Camera4Kivy#preview-widget) 27 | * [Preview Widget Properties](https://github.com/Android-for-Python/Camera4Kivy#preview-widget-properties) 28 | + [aspect_ratio](https://github.com/Android-for-Python/Camera4Kivy#aspect-ratio) 29 | + [letterbox_color](https://github.com/Android-for-Python/Camera4Kivy#letterbox-color) 30 | + [orientation](https://github.com/Android-for-Python/Camera4Kivy#orientation) 31 | * [Preview Widget API](https://github.com/Android-for-Python/Camera4Kivy#preview-widget-api) 32 | + [Connect Camera](https://github.com/Android-for-Python/Camera4Kivy#connect-camera) 33 | - [camera_id](https://github.com/Android-for-Python/Camera4Kivy#camera-id) 34 | - [mirrored](https://github.com/Android-for-Python/Camera4Kivy#mirrored) 35 | - [filepath_callback](https://github.com/Android-for-Python/Camera4Kivy#filepath_callback) 36 | - [sensor_resolution](https://github.com/Android-for-Python/Camera4Kivy#sensor_resolution) 37 | - [sensor_rotation](https://github.com/Android-for-Python/Camera4Kivy#sensor_rotation) 38 | - [default_zoom](https://github.com/Android-for-Python/Camera4Kivy#default_zoom) 39 | - [analyze_pixels_resolution](https://github.com/Android-for-Python/Camera4Kivy#analyze_pixels_resolution) 40 | - [enable_analyze_pixels](https://github.com/Android-for-Python/Camera4Kivy#enable_analyze_pixels) 41 | - [enable_analyze_imageproxy](https://github.com/Android-for-Python/Camera4Kivy#enable_analyze_imageproxy) 42 | - [enable_zoom_gesture](https://github.com/Android-for-Python/Camera4Kivy#enable_zoom_gesture) 43 | - [enable_focus_gesture](https://github.com/Android-for-Python/Camera4Kivy#enable_focus_gesture) 44 | - [imageproxy_data_format](https://github.com/Android-for-Python/Camera4Kivy#imageproxy_data_format) 45 | + [Disconnect Camera](https://github.com/Android-for-Python/Camera4Kivy#disconnect-camera) 46 | + [Capture](https://github.com/Android-for-Python/Camera4Kivy#capture) 47 | - [location](https://github.com/Android-for-Python/Camera4Kivy#location) 48 | - [subdir](https://github.com/Android-for-Python/Camera4Kivy#subdir) 49 | - [name](https://github.com/Android-for-Python/Camera4Kivy#name) 50 | + [Select Camera](https://github.com/Android-for-Python/Camera4Kivy#select-camera) 51 | + [Zoom](https://github.com/Android-for-Python/Camera4Kivy#zoom) 52 | + [Pan/scroll](https://github.com/Android-for-Python/Camera4Kivy#panscroll) 53 | + [Flash](https://github.com/Android-for-Python/Camera4Kivy#flash) 54 | + [Torch](https://github.com/Android-for-Python/Camera4Kivy#torch) 55 | + [Focus](https://github.com/Android-for-Python/Camera4Kivy#focus) 56 | + [camera_connected](https://github.com/Android-for-Python/Camera4Kivy#camera_connected) 57 | - [Image analysis](https://github.com/Android-for-Python/Camera4Kivy#image-analysis) 58 | * [Overview and Examples](https://github.com/Android-for-Python/Camera4Kivy#overview-and-examples) 59 | * [User Interaction](https://github.com/Android-for-Python/Camera4Kivy#user-interaction) 60 | * [Coordinates and image encoding](https://github.com/Android-for-Python/Camera4Kivy#coordinates-and-image-encoding) 61 | * [Analysis Configuration](https://github.com/Android-for-Python/Camera4Kivy#analysis-configuration) 62 | * [Debugging](https://github.com/Android-for-Python/Camera4Kivy#debugging) 63 | * [Performance](https://github.com/Android-for-Python/Camera4Kivy#performance) 64 | - [Camera Behavior](https://github.com/Android-for-Python/Camera4Kivy#camera-behavior) 65 | * [A Physical Camera](https://github.com/Android-for-Python/Camera4Kivy#a-physical-camera) 66 | * [Resolution](https://github.com/Android-for-Python/Camera4Kivy#resolution) 67 | + [Sensor Resolution](https://github.com/Android-for-Python/Camera4Kivy#sensor-resolution) 68 | + [Cropped Sensor Resolution](https://github.com/Android-for-Python/Camera4Kivy#cropped-sensor-resolution) 69 | + [Preview Resolution](https://github.com/Android-for-Python/Camera4Kivy#preview-resolution) 70 | + [Capture Resolution](https://github.com/Android-for-Python/Camera4Kivy#capture-resolution) 71 | + [Analysis Resolution](https://github.com/Android-for-Python/Camera4Kivy#analysis-resolution) 72 | + [Display Resolution.](https://github.com/Android-for-Python/Camera4Kivy#display-resolution) 73 | - [Camera Provider](https://github.com/Android-for-Python/Camera4Kivy#camera-provider) 74 | * [Android Camera Provider](https://github.com/Android-for-Python/Camera4Kivy#android-camera-provider) 75 | * [OpenCV](https://github.com/Android-for-Python/Camera4Kivy#opencv) 76 | * [GStreamer](https://github.com/Android-for-Python/Camera4Kivy#gstreamer) 77 | * [Picamera](https://github.com/Android-for-Python/Camera4Kivy#picamera) 78 | * [AVFoundation](https://github.com/Android-for-Python/Camera4Kivy#avfoundation) 79 | - [Known Behavior](https://github.com/Android-for-Python/Camera4Kivy#known-behavior) 80 | * [Behavior: Android .mp4 Orientation](https://github.com/Android-for-Python/Camera4Kivy#behavior-android-mp4-orientation) 81 | * [Behavior: Android .jpg Orientation.](https://github.com/Android-for-Python/Camera4Kivy#behavior-android-jpg-orientation) 82 | * [Behavior: Android armeabi-v7a build installed on an arm64-v8a device](https://github.com/Android-for-Python/Camera4Kivy#behavior-android-armeabi-v7a-build-installed-on-an-arm64-v8a-device) 83 | * [Behavior: Android "No supported surface combination"](https://github.com/Android-for-Python/Camera4Kivy#behavior-android-no-supported-surface-combination) 84 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = camera4kivy 3 | version = 0.3.3 4 | author = Robert Flatt 5 | description = Yet Another Camera for Kivy 6 | long_description = file: index.md 7 | long_description_content_type = text/markdown 8 | url = https://github.com/Android-for-Python/Camera4Kivy 9 | classifiers = 10 | Intended Audience :: Developers 11 | Topic :: Software Development :: Build Tools 12 | Programming Language :: Python :: 3 13 | License :: OSI Approved :: MIT License 14 | Operating System :: OS Independent 15 | 16 | [options] 17 | package_dir = 18 | = src 19 | packages = find: 20 | python_requires = >=3.7 21 | install_requires = 22 | gestures4kivy >= 0.1.3 23 | 24 | [options.packages.find] 25 | where = src 26 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup; 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Android-for-Python/Camera4Kivy/7ca8901c648630681fb5709439cb92d45748d658/src/__init__.py -------------------------------------------------------------------------------- /src/camera4kivy/__init__.py: -------------------------------------------------------------------------------- 1 | from .preview import Preview 2 | from .preview import CameraProviderInfo 3 | -------------------------------------------------------------------------------- /src/camera4kivy/based_on_kivy_core/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Core Abstraction 3 | ================ 4 | 5 | This module defines the abstraction layers for our core providers and their 6 | implementations. For further information, please refer to 7 | :ref:`architecture` and the :ref:`providers` section of the documentation. 8 | 9 | In most cases, you shouldn't directly use a library that's already covered 10 | by the core abstraction. Always try to use our providers first. 11 | In case we are missing a feature or method, please let us know by 12 | opening a new Bug report instead of relying on your library. 13 | 14 | .. warning:: 15 | These are **not** widgets! These are just abstractions of the respective 16 | functionality. For example, you cannot add a core image to your window. 17 | You have to use the image **widget** class instead. If you're really 18 | looking for widgets, please refer to :mod:`kivy.uix` instead. 19 | ''' 20 | 21 | 22 | import os 23 | import sysconfig 24 | import sys 25 | import traceback 26 | import tempfile 27 | import subprocess 28 | import importlib 29 | import kivy 30 | from kivy.logger import Logger 31 | 32 | 33 | class CoreCriticalException(Exception): 34 | pass 35 | 36 | 37 | def core_select_lib(category, llist, create_instance=False, 38 | base='kivy.core', basemodule=None): 39 | if 'KIVY_DOC' in os.environ: 40 | return 41 | category = category.lower() 42 | basemodule = basemodule or category 43 | libs_ignored = [] 44 | errs = [] 45 | for option, modulename, classname in llist: 46 | try: 47 | # module activated in config ? 48 | try: 49 | if option not in kivy.kivy_options[category]: 50 | libs_ignored.append(modulename) 51 | Logger.debug( 52 | '{0}: Provider <{1}> ignored by config'.format( 53 | category.capitalize(), option)) 54 | continue 55 | except KeyError: 56 | pass 57 | 58 | # import module 59 | mod = importlib.__import__(name='{2}.{0}.{1}'.format( 60 | basemodule, modulename, base), 61 | globals=globals(), 62 | locals=locals(), 63 | fromlist=[modulename], level=0) 64 | cls = mod.__getattribute__(classname) 65 | 66 | # ok ! 67 | Logger.info('{0}: Provider: {1}{2}'.format( 68 | category.capitalize(), option, 69 | '({0} ignored)'.format(libs_ignored) if libs_ignored else '')) 70 | if create_instance: 71 | cls = cls() 72 | return cls 73 | 74 | except ImportError as e: 75 | errs.append((option, e, sys.exc_info()[2])) 76 | libs_ignored.append(modulename) 77 | Logger.debug('{0}: Ignored <{1}> (import error)'.format( 78 | category.capitalize(), option)) 79 | Logger.trace('', exc_info=e) 80 | 81 | except CoreCriticalException as e: 82 | errs.append((option, e, sys.exc_info()[2])) 83 | Logger.error('{0}: Unable to use {1}'.format( 84 | category.capitalize(), option)) 85 | Logger.error( 86 | '{0}: The module raised an important error: {1!r}'.format( 87 | category.capitalize(), e.message)) 88 | raise 89 | 90 | except Exception as e: 91 | errs.append((option, e, sys.exc_info()[2])) 92 | libs_ignored.append(modulename) 93 | Logger.trace('{0}: Unable to use {1}'.format( 94 | category.capitalize(), option)) 95 | Logger.trace('', exc_info=e) 96 | 97 | err = '\n'.join(['{} - {}: {}\n{}'.format(opt, e.__class__.__name__, e, 98 | ''.join(traceback.format_tb(tb))) for opt, e, tb in errs]) 99 | Logger.critical( 100 | '{0}: Unable to find any valuable {0} provider. Please enable ' 101 | 'debug logging (e.g. add -d if running from the command line, or ' 102 | 'change the log level in the config) and re-run your app to ' 103 | 'identify potential causes\n{1}'.format(category.capitalize(), err)) 104 | 105 | def core_register_libs(category, libs, base='kivy.core'): 106 | if 'KIVY_DOC' in os.environ: 107 | return 108 | category = category.lower() 109 | kivy_options = kivy.kivy_options[category] 110 | libs_loadable = {} 111 | libs_ignored = [] 112 | 113 | for option, lib in libs: 114 | # module activated in config ? 115 | if option not in kivy_options: 116 | Logger.debug('{0}: option <{1}> ignored by config'.format( 117 | category.capitalize(), option)) 118 | libs_ignored.append(lib) 119 | continue 120 | libs_loadable[option] = lib 121 | 122 | libs_loaded = [] 123 | for item in kivy_options: 124 | try: 125 | # import module 126 | try: 127 | lib = libs_loadable[item] 128 | except KeyError: 129 | continue 130 | importlib.__import__(name='{2}.{0}.{1}'.format(category, lib, base), 131 | globals=globals(), 132 | locals=locals(), 133 | fromlist=[lib], 134 | level=0) 135 | 136 | libs_loaded.append(lib) 137 | 138 | except Exception as e: 139 | Logger.trace('{0}: Unable to use <{1}> as loader!'.format( 140 | category.capitalize(), option)) 141 | Logger.trace('', exc_info=e) 142 | libs_ignored.append(lib) 143 | 144 | Logger.info('{0}: Providers: {1} {2}'.format( 145 | category.capitalize(), 146 | ', '.join(libs_loaded), 147 | '({0} ignored)'.format( 148 | ', '.join(libs_ignored)) if libs_ignored else '')) 149 | return libs_loaded 150 | 151 | 152 | def handle_win_lib_import_error(category, provider, mod_name): 153 | if sys.platform != 'win32': 154 | return 155 | 156 | assert mod_name.startswith('kivy.') 157 | kivy_root = os.path.dirname(kivy.__file__) 158 | dirs = mod_name[5:].split('.') 159 | mod_path = os.path.join(kivy_root, *dirs) 160 | 161 | # get the full expected path to the compiled pyd file 162 | # filename is .cp-.pyd 163 | # https://github.com/python/cpython/blob/master/Doc/whatsnew/3.5.rst 164 | if hasattr(sys, 'gettotalrefcount'): # debug 165 | mod_path += '._d' 166 | mod_path += '.cp{}{}-{}.pyd'.format( 167 | sys.version_info.major, sys.version_info.minor, 168 | sysconfig.get_platform().replace('-', '_')) 169 | 170 | # does the compiled pyd exist at all? 171 | if not os.path.exists(mod_path): 172 | Logger.debug( 173 | '{}: Failed trying to import "{}" for provider {}. Compiled file ' 174 | 'does not exist. Have you perhaps forgotten to compile Kivy, or ' 175 | 'did not install all required dependencies?'.format( 176 | category, provider, mod_path)) 177 | return 178 | 179 | # tell user to provide dependency walker 180 | env_var = 'KIVY_{}_DEPENDENCY_WALKER'.format(provider.upper()) 181 | if env_var not in os.environ: 182 | Logger.debug( 183 | '{0}: Failed trying to import the "{1}" provider from "{2}". ' 184 | 'This error is often encountered when a dependency is missing,' 185 | ' or if there are multiple copies of the same dependency dll on ' 186 | 'the Windows PATH and they are incompatible with each other. ' 187 | 'This can occur if you are mixing installations (such as different' 188 | ' python installations, like anaconda python and a system python) ' 189 | 'or if another unrelated program added its directory to the PATH. ' 190 | 'Please examine your PATH and python installation for potential ' 191 | 'issues. To further troubleshoot a "DLL load failed" error, ' 192 | 'please download ' 193 | '"Dependency Walker" (64 or 32 bit version - matching your python ' 194 | 'bitness) from dependencywalker.com and set the environment ' 195 | 'variable {3} to the full path of the downloaded depends.exe file ' 196 | 'and rerun your application to generate an error report'. 197 | format(category, provider, mod_path, env_var)) 198 | return 199 | 200 | depends_bin = os.environ[env_var] 201 | if not os.path.exists(depends_bin): 202 | raise ValueError('"{}" provided in {} does not exist'.format( 203 | depends_bin, env_var)) 204 | 205 | # make file for the resultant log 206 | fd, temp_file = tempfile.mkstemp( 207 | suffix='.dwi', prefix='kivy_depends_{}_log_'.format(provider), 208 | dir=os.path.expanduser('~/')) 209 | os.close(fd) 210 | 211 | Logger.info( 212 | '{}: Running dependency walker "{}" on "{}" to generate ' 213 | 'troubleshooting log. Please wait for it to complete'.format( 214 | category, depends_bin, mod_path)) 215 | Logger.debug( 216 | '{}: Dependency walker command is "{}"'.format( 217 | category, 218 | [depends_bin, '/c', '/od:{}'.format(temp_file), mod_path])) 219 | 220 | try: 221 | subprocess.check_output([ 222 | depends_bin, '/c', '/od:{}'.format(temp_file), mod_path]) 223 | except subprocess.CalledProcessError as exc: 224 | if exc.returncode >= 0x00010000: 225 | Logger.error( 226 | '{}: Dependency walker failed with error code "{}". No ' 227 | 'error report was generated'. 228 | format(category, exc.returncode)) 229 | return 230 | 231 | Logger.info( 232 | '{}: dependency walker generated "{}" containing troubleshooting ' 233 | 'information about provider {} and its failing file "{} ({})". You ' 234 | 'can open the file in dependency walker to view any potential issues ' 235 | 'and troubleshoot it yourself. ' 236 | 'To share the file with the Kivy developers and request support, ' 237 | 'please contact us at our support channels ' 238 | 'https://kivy.org/doc/master/contact.html (not on github, unless ' 239 | 'it\'s truly a bug). Make sure to provide the generated file as well ' 240 | 'as the *complete* Kivy log being printed here. Keep in mind the ' 241 | 'generated dependency walker log file contains paths to dlls on your ' 242 | 'system used by kivy or its dependencies to help troubleshoot them, ' 243 | 'and these paths may include your name in them. Please view the ' 244 | 'log file in dependency walker before sharing to ensure you are not ' 245 | 'sharing sensitive paths'.format( 246 | category, temp_file, provider, mod_name, mod_path)) 247 | -------------------------------------------------------------------------------- /src/camera4kivy/based_on_kivy_core/camera/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Camera 3 | ====== 4 | 5 | Core class for acquiring the camera and converting its input into a 6 | :class:`~kivy.graphics.texture.Texture`. 7 | 8 | .. versionchanged:: 1.10.0 9 | The pygst and videocapture providers have been removed. 10 | 11 | .. versionchanged:: 1.8.0 12 | There is now 2 distinct Gstreamer implementation: one using Gi/Gst 13 | working for both Python 2+3 with Gstreamer 1.0, and one using PyGST 14 | working only for Python 2 + Gstreamer 0.10. 15 | ''' 16 | 17 | __all__ = ('CameraBase', 'Camera') 18 | 19 | 20 | from kivy.utils import platform 21 | from kivy.event import EventDispatcher 22 | from kivy.logger import Logger 23 | from .. import core_select_lib 24 | 25 | 26 | class CameraBase(EventDispatcher): 27 | 28 | def __init__(self, **kwargs): 29 | kwargs.setdefault('stopped', False) 30 | kwargs.setdefault('index', 0) 31 | kwargs.setdefault('context', None) 32 | self.stopped = kwargs.get('stopped') 33 | self._resolution = kwargs.get('resolution') 34 | self._index = kwargs.get('index') 35 | self._context = kwargs.get('context') 36 | self._buffer = None 37 | self._format = 'rgb' 38 | self._texture = None 39 | self.capture_device = None 40 | super().__init__() 41 | self.init_camera() 42 | #if not self.stopped and not self._context: 43 | # self.start() 44 | 45 | def _get_texture(self): 46 | return self._texture 47 | 48 | texture = property(lambda self: self._get_texture(), 49 | doc='Return the camera texture with the latest capture') 50 | 51 | def init_camera(self): 52 | '''Initialise the camera (internal)''' 53 | pass 54 | 55 | def start(self): 56 | '''Start the camera acquire''' 57 | self.stopped = False 58 | 59 | def stop(self): 60 | '''Release the camera''' 61 | self.stopped = True 62 | 63 | def _update(self, dt): 64 | '''Update the camera (internal)''' 65 | pass 66 | 67 | def _copy_to_gpu(self): 68 | '''Copy the the buffer into the texture''' 69 | if self._texture is None: 70 | Logger.debug('Camera: copy_to_gpu() failed, _texture is None !') 71 | return 72 | self._texture.blit_buffer(self._buffer, colorfmt=self._format) 73 | self._buffer = None 74 | if self._context: 75 | self._context.on_texture() 76 | else: 77 | self.dispatch('on_texture') 78 | 79 | #def on_texture(self): 80 | # pass 81 | 82 | #def on_load(self): 83 | # pass 84 | 85 | import kivy 86 | import importlib 87 | 88 | def select_provider(category, llist, base='kivy.core'): 89 | category = category.lower() 90 | basemodule = category 91 | for option, modulename, classname in llist: 92 | if option not in kivy.kivy_options[category]: 93 | continue 94 | try: 95 | name = '{0}.{1}.{2}'.format(base, basemodule, modulename) 96 | mod = importlib.__import__(name=name, 97 | globals=globals(), 98 | locals=locals(), 99 | fromlist=[modulename], level=0) 100 | cls = mod.__getattribute__(classname) 101 | 102 | Logger.info('{0}: Provider: {1}'.format(category.capitalize(), 103 | option)) 104 | return cls 105 | 106 | except Exception as e: 107 | pass 108 | 109 | if platform not in ['android']: 110 | Logger.warning('{0}: No Provider found.'.format(category.capitalize())) 111 | 112 | providers = () 113 | 114 | if platform in ['macosx', 'ios']: 115 | providers += (('avfoundation', 'camera_avfoundation', 116 | 'CameraAVFoundation'), ) 117 | Camera = core_select_lib('camera', (providers)) 118 | providers = () 119 | elif platform == 'win': 120 | providers += (('opencv', 'camera_opencv', 'CameraOpenCV'), ) 121 | providers += (('gi', 'camera_gi', 'CameraGi'), ) 122 | elif platform == 'android': 123 | pass 124 | else: 125 | providers += (('picamera', 'camera_picamera2', 'CameraPiCamera2'), ) 126 | providers += (('picamera', 'camera_picamera', 'CameraPiCamera'), ) 127 | providers += (('gi', 'camera_gi', 'CameraGi'), ) 128 | providers += (('opencv', 'camera_opencv', 'CameraOpenCV'), ) 129 | 130 | if providers: 131 | Camera = select_provider('camera', (providers), 132 | base='camera4kivy.based_on_kivy_core') 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /src/camera4kivy/based_on_kivy_core/camera/camera_gi.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Gi Camera 3 | ========= 4 | 5 | Implement CameraBase with Gi / Gstreamer, working on both Python 2 and 3 6 | ''' 7 | 8 | __all__ = ('CameraGi', ) 9 | try: 10 | from gi import require_version 11 | require_version('Gst', '1.0') 12 | except: 13 | pass 14 | 15 | from gi.repository import Gst 16 | from kivy.clock import Clock 17 | from kivy.graphics.texture import Texture 18 | from kivy.core.camera import CameraBase 19 | from kivy.support import install_gobject_iteration 20 | from kivy.logger import Logger 21 | from ctypes import Structure, c_void_p, c_int, string_at 22 | from weakref import ref 23 | import atexit 24 | 25 | # initialize the camera/gi. if the older version is used, don't use camera_gi. 26 | Gst.init(None) 27 | version = Gst.version() 28 | if version < (1, 0, 0, 0): 29 | raise Exception('Cannot use camera_gi, Gstreamer < 1.0 is not supported.') 30 | Logger.info('CameraGi: Using Gstreamer {}'.format( 31 | '.'.join(['{}'.format(x) for x in Gst.version()]))) 32 | install_gobject_iteration() 33 | 34 | 35 | class _MapInfo(Structure): 36 | _fields_ = [ 37 | ('memory', c_void_p), 38 | ('flags', c_int), 39 | ('data', c_void_p)] 40 | # we don't care about the rest 41 | 42 | 43 | def _on_cameragi_unref(obj): 44 | if obj in CameraGi._instances: 45 | CameraGi._instances.remove(obj) 46 | 47 | 48 | class CameraGi(CameraBase): 49 | '''Implementation of CameraBase using GStreamer 50 | 51 | :Parameters: 52 | `video_src`: str, default is 'v4l2src' 53 | Other tested options are: 'dc1394src' for firewire 54 | dc camera (e.g. firefly MV). Any gstreamer video source 55 | should potentially work. 56 | Theoretically a longer string using "!" can be used 57 | describing the first part of a gstreamer pipeline. 58 | ''' 59 | 60 | _instances = [] 61 | 62 | def __init__(self, **kwargs): 63 | self._pipeline = None 64 | self._camerasink = None 65 | self._decodebin = None 66 | self._texturesize = None 67 | self._callback = None 68 | self._video_src = kwargs.get('video_src', 'v4l2src') 69 | self._callback = kwargs.get('callback') 70 | wk = ref(self, _on_cameragi_unref) 71 | CameraGi._instances.append(wk) 72 | super(CameraGi, self).__init__(**kwargs) 73 | 74 | def init_camera(self): 75 | # TODO: This doesn't work when camera resolution is resized at runtime. 76 | # There must be some other way to release the camera? 77 | if self._pipeline: 78 | self._pipeline = None 79 | 80 | video_src = self._video_src 81 | if video_src == 'v4l2src': 82 | video_src += ' device=/dev/video%d' % self._index 83 | elif video_src == 'dc1394src': 84 | video_src += ' camera-number=%d' % self._index 85 | 86 | if Gst.version() < (1, 0, 0, 0): 87 | caps = ('video/x-raw-rgb,red_mask=(int)0xff0000,' 88 | 'green_mask=(int)0x00ff00,blue_mask=(int)0x0000ff') 89 | pl = ('{} ! decodebin name=decoder ! ffmpegcolorspace ! ' 90 | 'appsink name=camerasink emit-signals=True caps={}') 91 | else: 92 | caps = 'video/x-raw,format=RGB' 93 | pl = '{} ! decodebin name=decoder ! videoconvert ! appsink ' + \ 94 | 'name=camerasink emit-signals=True caps={}' 95 | 96 | self._pipeline = Gst.parse_launch(pl.format(video_src, caps)) 97 | # Watch for invalid camera id 98 | bus = self._pipeline.get_bus() 99 | bus.add_signal_watch() 100 | bus.connect("message::error", self.on_error) 101 | bus.connect("message::state-changed", self.on_state_changed) 102 | # Force wake up 103 | ret = self._pipeline.set_state(Gst.State.READY) 104 | if ret == Gst.StateChangeReturn.FAILURE: 105 | pass # Implemment retry ? 106 | self._camerasink = self._pipeline.get_by_name('camerasink') 107 | self._camerasink.connect('new-sample', self._gst_new_sample) 108 | self._decodebin = self._pipeline.get_by_name('decoder') 109 | 110 | if self._camerasink and not self.stopped: 111 | self.start() 112 | 113 | def on_error(self, bus, msg): 114 | # Exceptions seem to be silenced, so callback 115 | if self._callback: 116 | self._callback() 117 | 118 | def on_state_changed(self, bus, msg): 119 | # Seems to be more responsive if we listen for this! 120 | pass 121 | 122 | def _gst_new_sample(self, *largs): 123 | sample = self._camerasink.emit('pull-sample') 124 | if sample is None: 125 | return False 126 | 127 | self._sample = sample 128 | 129 | if self._texturesize is None: 130 | # try to get the camera image size 131 | for pad in self._decodebin.srcpads: 132 | s = pad.get_current_caps().get_structure(0) 133 | self._texturesize = ( 134 | s.get_value('width'), 135 | s.get_value('height')) 136 | Clock.schedule_once(self._update) 137 | return False 138 | 139 | Clock.schedule_once(self._update) 140 | return False 141 | 142 | def start(self): 143 | super(CameraGi, self).start() 144 | self._pipeline.set_state(Gst.State.PLAYING) 145 | 146 | def stop(self): 147 | super(CameraGi, self).stop() 148 | self._pipeline.set_state(Gst.State.PAUSED) 149 | 150 | def unload(self): 151 | self._pipeline.set_state(Gst.State.NULL) 152 | 153 | def _update(self, dt): 154 | sample, self._sample = self._sample, None 155 | if sample is None: 156 | return 157 | 158 | if self._texture is None and self._texturesize is not None: 159 | self._texture = Texture.create( 160 | size=self._texturesize, colorfmt='rgb') 161 | self._texture.flip_vertical() 162 | self.dispatch('on_load') 163 | 164 | # decode sample 165 | # read the data from the buffer memory 166 | try: 167 | buf = sample.get_buffer() 168 | result, mapinfo = buf.map(Gst.MapFlags.READ) 169 | 170 | # We cannot get the data out of mapinfo, using Gst 1.0.6 + Gi 3.8.0 171 | # related bug report: 172 | # https://bugzilla.gnome.org/show_bug.cgi?id=6t8663 173 | # ie: mapinfo.data is normally a char*, but here, we have an int 174 | # So right now, we use ctypes instead to read the mapinfo ourself. 175 | addr = mapinfo.__hash__() 176 | c_mapinfo = _MapInfo.from_address(addr) 177 | 178 | # now get the memory 179 | self._buffer = string_at(c_mapinfo.data, mapinfo.size) 180 | self._copy_to_gpu() 181 | finally: 182 | if mapinfo is not None: 183 | buf.unmap(mapinfo) 184 | 185 | 186 | @atexit.register 187 | def camera_gi_clean(): 188 | # if we leave the python process with some video running, we can hit a 189 | # segfault. This is forcing the stop/unload of all remaining videos before 190 | # exiting the python process. 191 | for weakcamera in CameraGi._instances: 192 | camera = weakcamera() 193 | if isinstance(camera, CameraGi): 194 | camera.stop() 195 | camera.unload() 196 | -------------------------------------------------------------------------------- /src/camera4kivy/based_on_kivy_core/camera/camera_opencv.py: -------------------------------------------------------------------------------- 1 | __all__ = ('CameraOpenCV') 2 | 3 | from kivy.logger import Logger 4 | from kivy.clock import Clock 5 | from kivy.graphics.texture import Texture 6 | from kivy.utils import platform 7 | from kivy.graphics import Color, Rectangle, Rotate, Fbo 8 | import cv2 9 | from . import CameraBase 10 | 11 | class CameraOpenCV(CameraBase): 12 | 13 | def __init__(self, **kwargs): 14 | self._device = None 15 | self._update_ev = None 16 | super(CameraOpenCV, self).__init__(**kwargs) 17 | 18 | def init_camera(self): 19 | self._format = 'bgr' 20 | if platform == 'win': 21 | self._index = self._index + cv2.CAP_DSHOW 22 | self._device = cv2.VideoCapture(self._index) 23 | self._device.set(cv2.CAP_PROP_FRAME_WIDTH, self._resolution[0]) 24 | self._device.set(cv2.CAP_PROP_FRAME_HEIGHT, self._resolution[1]) 25 | ret, frame = self._device.read() 26 | self._resolution = (int(frame.shape[1]), int(frame.shape[0])) 27 | self.fps = self._device.get(cv2.CAP_PROP_FPS) 28 | if self.fps == 0 or self.fps == 1: 29 | self.fps = 1.0 / 30 30 | elif self.fps > 1: 31 | self.fps = 1.0 / self.fps 32 | self.crop = self._context.crop_for_aspect_orientation(*self._resolution) 33 | self.stopped = True 34 | 35 | def update(self, dt): 36 | if self.stopped: 37 | return 38 | if self._texture is None: 39 | self._texture = Texture.create(self._resolution) 40 | self._texture.flip_vertical() 41 | self._context.on_load() 42 | try: 43 | ret, frame = self._device.read() 44 | if ret: 45 | self._buffer = frame.reshape(-1) 46 | self._copy_to_gpu() 47 | if self.photo_capture: 48 | self.photo_capture = False 49 | cropped = frame[self.crop[1]: self.crop[1]+self.crop[3], 50 | self.crop[0]: self.crop[0]+self.crop[2], :] 51 | cv2.imwrite(self.photo_path, cropped) 52 | if self.photo_callback: 53 | self.photo_callback(self.photo_path) 54 | if self.video_capture: 55 | cropped = frame[self.crop[1]: self.crop[1]+self.crop[3], 56 | self.crop[0]: self.crop[0]+self.crop[2], :] 57 | self.video_stream.write(cropped) 58 | 59 | except Exception as e: 60 | Logger.exception('OpenCV: Couldn\'t get image from Camera') 61 | 62 | def start(self): 63 | self.stopped = False 64 | self.photo_capture = False 65 | self.video_capture = False 66 | if self._update_ev is not None: 67 | self._update_ev.cancel() 68 | self._update_ev = Clock.schedule_interval(self.update, 1/30) 69 | 70 | def stop(self): 71 | self.stopped = True 72 | self._device = None 73 | if self._update_ev is not None: 74 | self._update_ev.cancel() 75 | self._update_ev = None 76 | 77 | def photo(self, path, callback): 78 | self.photo_capture = True 79 | self.photo_path = path 80 | self.photo_callback = callback 81 | 82 | def video_start(self, path, callback): 83 | self.video_capture = True 84 | self.video_path = path 85 | self.video_callback = callback 86 | size = (self.crop[2], self.crop[3]) 87 | rate = Clock.get_fps() 88 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') 89 | self.video_stream = cv2.VideoWriter(path, fourcc, rate, size) 90 | 91 | def video_stop(self): 92 | self.video_capture = False 93 | self.video_stream.release() 94 | 95 | -------------------------------------------------------------------------------- /src/camera4kivy/based_on_kivy_core/camera/camera_picamera.py: -------------------------------------------------------------------------------- 1 | ''' 2 | PiCamera Camera: Implement CameraBase with PiCamera 3 | ''' 4 | 5 | # 6 | # TODO: make usage of thread or multiprocess 7 | # 8 | 9 | __all__ = ('CameraPiCamera', ) 10 | 11 | from math import ceil 12 | 13 | from kivy.logger import Logger 14 | from kivy.clock import Clock 15 | from kivy.graphics.texture import Texture 16 | from kivy.core.camera import CameraBase 17 | 18 | from picamera import PiCamera 19 | import numpy 20 | 21 | 22 | class CameraPiCamera(CameraBase): 23 | '''Implementation of CameraBase using PiCamera 24 | ''' 25 | _update_ev = None 26 | 27 | def __init__(self, **kwargs): 28 | self._camera = None 29 | self._format = 'bgr' 30 | self._framerate = kwargs.get('framerate', 30) 31 | super(CameraPiCamera, self).__init__(**kwargs) 32 | 33 | def init_camera(self): 34 | if self._camera is not None: 35 | self._camera.close() 36 | 37 | self._camera = PiCamera() 38 | self._camera.resolution = self.resolution 39 | self._camera.framerate = self._framerate 40 | self._camera.iso = 800 41 | 42 | self.fps = 1. / self._framerate 43 | 44 | if not self.stopped: 45 | self.start() 46 | 47 | def raw_buffer_size(self): 48 | '''Round buffer size up to 32x16 blocks. 49 | 50 | See https://picamera.readthedocs.io/en/release-1.13/recipes2.html#capturing-to-a-numpy-array 51 | ''' # noqa 52 | return ( 53 | ceil(self.resolution[0] / 32.) * 32, 54 | ceil(self.resolution[1] / 16.) * 16 55 | ) 56 | 57 | def _update(self, dt): 58 | if self.stopped: 59 | return 60 | 61 | if self._texture is None: 62 | # Create the texture 63 | self._texture = Texture.create(self._resolution) 64 | self._texture.flip_vertical() 65 | self.dispatch('on_load') 66 | 67 | try: 68 | bufsize = self.raw_buffer_size() 69 | output = numpy.empty( 70 | (bufsize[0] * bufsize[1] * 3,), dtype=numpy.uint8) 71 | self._camera.capture(output, self._format, use_video_port=True) 72 | 73 | # Trim the buffer to fit the actual requested resolution. 74 | # TODO: Is there a simpler way to do all this reshuffling? 75 | output = output.reshape((bufsize[0], bufsize[1], 3)) 76 | output = output[:self.resolution[0], :self.resolution[1], :] 77 | self._buffer = output.reshape( 78 | (self.resolution[0] * self.resolution[1] * 3,)) 79 | 80 | self._copy_to_gpu() 81 | except KeyboardInterrupt: 82 | raise 83 | except Exception: 84 | Logger.exception('PiCamera: Couldn\'t get image from Camera') 85 | 86 | def start(self): 87 | super(CameraPiCamera, self).start() 88 | if self._update_ev is not None: 89 | self._update_ev.cancel() 90 | self._update_ev = Clock.schedule_interval(self._update, self.fps) 91 | 92 | def stop(self): 93 | super(CameraPiCamera, self).stop() 94 | self._camera.close() 95 | if self._update_ev is not None: 96 | self._update_ev.cancel() 97 | self._update_ev = None 98 | -------------------------------------------------------------------------------- /src/camera4kivy/based_on_kivy_core/camera/camera_picamera2.py: -------------------------------------------------------------------------------- 1 | ''' 2 | PiCamera2 Camera: Implement CameraBase with PiCamera2 3 | ''' 4 | 5 | __all__ = ('CameraPiCamera2', ) 6 | import io 7 | 8 | from kivy.logger import Logger 9 | from kivy.clock import Clock, mainthread 10 | from kivy.graphics.texture import Texture 11 | from kivy.graphics import Color, Rectangle, Rotate, Translate, Fbo, BindTexture 12 | 13 | from picamera2 import Picamera2 14 | from picamera2.previews import NullPreview 15 | from picamera2.encoders import H264Encoder, Quality 16 | from picamera2.outputs import Output 17 | from picamera2.request import _MappedBuffer 18 | 19 | import numpy as np 20 | from os import environ 21 | from PIL import Image 22 | from . import CameraBase 23 | 24 | import signal 25 | import subprocess 26 | import prctl 27 | 28 | # Libcamera defaults to INFO, too verbose 29 | environ['LIBCAMERA_LOG_LEVELS'] = 'ERROR' 30 | 31 | ################################# 32 | # Picamera2 Sensor Interface 33 | ################################# 34 | 35 | class SensorInterface(NullPreview): 36 | 37 | def __init__(self): 38 | super().__init__() 39 | self.mute = False 40 | self.y = None 41 | self.u = None 42 | self.v = None 43 | self.mjpeg = None 44 | self.stream_size = () 45 | 46 | # Sync event loops 47 | ################### 48 | @mainthread 49 | def sync_yuv(self,y,u,v): 50 | self.y = y 51 | self.u = u 52 | self.v = v 53 | 54 | @mainthread 55 | def sync_mjpeg(self,mjpeg): 56 | self.mjpeg = mjpeg 57 | 58 | # Request Handlers 59 | ################### 60 | def handle_request(self, picam2): 61 | if self.picam2.display_stream_name: 62 | camera_config = self.picam2.camera_config 63 | self.display_stream_name = self.picam2.display_stream_name 64 | stream_config = camera_config[self.display_stream_name] 65 | self.stream_fmt = stream_config["format"] 66 | self.stream_size = stream_config['size'] 67 | picam2.process_requests(self) 68 | 69 | def render_request(self, request): 70 | try: 71 | # The added latency due to array manipulation in some of 72 | # these, occurs in the Picamera2 thread and is much less 73 | # than sample period of the Kivy thread. Nyquist is happy. 74 | with _MappedBuffer(request,self.display_stream_name) as mm: 75 | if self.stream_fmt == 'YUV420': 76 | size = len(mm) 77 | end_y = size * 2 // 3 78 | end_u = end_y + end_y // 4 79 | y = bytes(mm[:end_y]) 80 | u = bytes(mm[end_y:end_u]) 81 | v = bytes(mm[end_u:]) 82 | self.sync_yuv(y,u,v) 83 | 84 | elif self.stream_fmt == 'MJPEG': 85 | self.sync_mjpeg(bytes(mm)) 86 | 87 | elif self.stream_fmt and not self.mute: 88 | self.mute = True 89 | Logger.error( 90 | "Picamera2 SensorInterface unsupported format " +\ 91 | self.stream_fmt) 92 | 93 | except Exception as e: 94 | Logger.error("Picamera2 SensorInterface\n" + str(e)) 95 | 96 | 97 | #################################### 98 | # FfmpegOutput with rotate metadata 99 | #################################### 100 | 101 | class FfmpegOutputPlus(Output): 102 | 103 | def __init__(self, output_filename, audio=False, audio_device="default", 104 | audio_sync=-0.3, audio_samplerate=48000, audio_codec="aac", 105 | audio_bitrate=128000, pts=None, rotate = None): 106 | super().__init__(pts=pts) 107 | self.output_filename = output_filename 108 | self.audio = audio 109 | self.audio_device = audio_device 110 | self.audio_sync = audio_sync 111 | self.audio_samplerate = audio_samplerate 112 | self.audio_codec = audio_codec 113 | self.audio_bitrate = audio_bitrate 114 | self.rotate = rotate 115 | 116 | def start(self): 117 | general_options = ['-loglevel', 'warning', '-y'] 118 | video_input = ['-use_wallclock_as_timestamps', '1', 119 | '-thread_queue_size', '32', 120 | '-i', '-'] 121 | if self.rotate: #-map_metadata 0 -metadata:s:v rotate="90" 122 | video_input = video_input + ['-map_metadata', '0', '-metadata:s:v', 123 | 'rotate='+str(self.rotate)] 124 | video_codec = ['-c:v', 'copy'] 125 | audio_input = [] 126 | audio_codec = [] 127 | if self.audio: 128 | audio_input = ['-itsoffset', str(self.audio_sync), 129 | '-f', 'pulse', 130 | '-sample_rate', str(self.audio_bitrate), 131 | '-thread_queue_size', '512', 132 | '-i', self.audio_device] 133 | audio_codec = ['-b:a', str(self.audio_bitrate), 134 | '-c:a', self.audio_codec] 135 | 136 | command = ['ffmpeg'] + general_options + audio_input + video_input + \ 137 | audio_codec + video_codec + self.output_filename.split() 138 | 139 | self.ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, 140 | preexec_fn=lambda: prctl.set_pdeathsig(signal.SIGKILL)) 141 | super().start() 142 | 143 | def stop(self): 144 | super().stop() 145 | if self.ffmpeg is not None: 146 | self.ffmpeg.stdin.close() # FFmpeg needs this to shut down tidily 147 | self.ffmpeg.terminate() 148 | self.ffmpeg = None 149 | 150 | def outputframe(self, frame, keyframe=True, timestamp=None): 151 | if self.recording: 152 | self.ffmpeg.stdin.write(frame) 153 | self.ffmpeg.stdin.flush() # forces every frame to get timestamped individually 154 | self.outputtimestamp(timestamp) 155 | 156 | 157 | 158 | 159 | ################################# 160 | # Picamera2 Camera Lifecycle 161 | ################################# 162 | 163 | class CameraPi2(): 164 | 165 | def __init__(self): 166 | super().__init__() 167 | self.sensor = None 168 | self.picam2 = None 169 | self.base_scaler_crop = None 170 | self.scaler_crop = None 171 | self.zoom_level = None 172 | self._rotate = 0 173 | self.video_recording = False 174 | self.audio = False 175 | self.is_usb = False 176 | 177 | # Start 178 | # Choose sensor, configure pc2 179 | ############################### 180 | 181 | def start(self, index): 182 | self.zoom_level = 1 183 | self.previous_fmt = '' 184 | self.previous_size = (0,0) 185 | self.previous_tsize = (0,0) 186 | self.fbo = None 187 | 188 | # Get info about this camera 189 | num_cameras = len(Picamera2.global_camera_info()) 190 | if num_cameras == 0: 191 | Logger.error('C4k Picamera2: No camera found.') 192 | return 193 | if index <0 or index >= num_cameras: 194 | Logger.warning('C4k Picamera2: Requested camera '+ str(index) +\ 195 | ' not found, using ' + str(num_cameras -1) + ' .') 196 | index = num_cameras -1 197 | 198 | # initialize 199 | Id = Picamera2.global_camera_info()[index]['Id'] 200 | self.picam2 = Picamera2(index) 201 | if 'i2c' in Id.lower(): 202 | self.is_usb = False 203 | self.create_picam_configurations(index) 204 | else: 205 | self.is_usb = True 206 | self.create_usb_configurations() 207 | self.base_scaler_crop = self.crop_limits 208 | self.scaler_crop = self.crop_limits 209 | self.picam2.configure(self.preview_config) 210 | self.sensor= SensorInterface() 211 | self.picam2.start_preview(self.sensor) 212 | self.picam2.start() 213 | 214 | def create_usb_configurations(self): 215 | self.crop_limits = None 216 | self.preview_config = self.picam2.create_preview_configuration( 217 | {"format": "MJPEG"}) 218 | self.photo_config = self.picam2.create_still_configuration( 219 | {"format": "MJPEG"}) 220 | self.video_config = self.picam2.create_video_configuration( 221 | {"format": "MJPEG"}) # Not supported 222 | 223 | def create_picam_configurations(self, index): 224 | # Sensor configuration 225 | size_s = (0,0) 226 | wide = self._context.aspect_ratio == '16:9' 227 | for m in self.picam2.sensor_modes: 228 | # Raspberry camera resolution is also field of view. 229 | # get highest sensor resoluton for this framerate 230 | # so framerate will set field of view depending on camera 231 | # 232 | # Because video players don't support crop metadata well, 233 | # we can't crop from 4:3 to 16:9 234 | # So use native 16:9 and shift in fbo 235 | if 'fps' in m and 'size' in m and 'bit_depth' in m: 236 | fps = m['fps'] 237 | size = m['size'] 238 | bits = m['bit_depth'] 239 | if fps >= self._framerate and bits == 8: 240 | if not wide and size[0]/size[1] < 1.5: 241 | if size[0] > size_s[0]: 242 | size_s = size 243 | self.crop_limits = m['crop_limits'] 244 | elif wide and size[0]/size[1] >= 1.5: 245 | if size[0] > size_s[0]: 246 | size_s = size 247 | self.crop_limits = m['crop_limits'] 248 | 249 | if not size_s[0]: 250 | Logger.error('No sensor found in supporting ' +\ 251 | self.aspect_ratio + ' and ' +\ 252 | str(self._framerate) + ' fps.') 253 | return 254 | 255 | # Stream sizes for each configuration 256 | def align(edge, val): 257 | return val * round(edge / val) 258 | 259 | dw = align(self._resolution[0] , 64) 260 | if wide: 261 | dh = align(self._resolution[0] * 9 / 16, 64) 262 | vh = 720 263 | else: 264 | dh = align(self._resolution[1], 64) 265 | vh = 960 266 | main = {"size": (align(size_s[0], 16), align(size_s[1], 16)) } 267 | preview_lores = {"size": (dw, dh)} 268 | video_lores = {"size": (1280, vh)} 269 | 270 | # Configurations 271 | self.preview_config = self.picam2.create_preview_configuration( 272 | main = main, 273 | lores = preview_lores, 274 | display = 'lores') 275 | self.photo_config = self.picam2.create_still_configuration( 276 | main = main) 277 | self.video_config = self.picam2.create_video_configuration( 278 | main = main, 279 | lores = video_lores, 280 | encode = 'lores', 281 | display = 'lores') 282 | 283 | 284 | # Stop 285 | ############################### 286 | 287 | def stop(self): 288 | if self.sensor: 289 | self.sensor.stop() 290 | if self.picam2: 291 | self.picam2.close() 292 | self.sensor = None 293 | self.picam2 = None 294 | self.photo_config = None 295 | self.video_config = None 296 | 297 | # Display Update 298 | ############################### 299 | 300 | def update(self): 301 | ss = self.sensor 302 | if ss and ss.y: 303 | return self._yuv_to_rgba('YUV420', ss.y, ss.u, ss.v, 304 | ss.stream_size, self._resolution) 305 | elif ss and ss.mjpeg: 306 | img = Image.open(io.BytesIO(ss.mjpeg)) 307 | img = img.convert('RGBA') 308 | img = img.rotate(self._rotate) 309 | img = img.resize(self._resolution) 310 | return img.tobytes() 311 | return None 312 | 313 | # Zoom and Drag events 314 | ############################### 315 | 316 | def zoom(self, scale): 317 | if self.picam2 and self.base_scaler_crop: 318 | self.zoom_level /= scale # wheel on pi is backwards 319 | self.set_zoom() 320 | 321 | def set_zoom(self): 322 | max_zoom = 7.0 323 | if self.zoom_level < 1: 324 | self.zoom_level = 1.0 325 | if self.zoom_level > max_zoom: 326 | self.zoom_level = max_zoom 327 | factor = 1.0 / self.zoom_level 328 | full_img = self.base_scaler_crop 329 | center = (self.scaler_crop[0] + self.scaler_crop[2] // 2, 330 | self.scaler_crop[1] + self.scaler_crop[3] // 2) 331 | w = int(factor * full_img[2]) 332 | h = int(factor * full_img[3]) 333 | x = full_img[0] + center[0] - w // 2 334 | y = full_img[1] + center[1] - h // 2 335 | self.limit_and_save([x, y, w, h]) 336 | 337 | def drag(self, dx, dy): 338 | if self.picam2 and self.base_scaler_crop: 339 | full_img = self.base_scaler_crop 340 | w = self.scaler_crop[2] 341 | h = self.scaler_crop[3] 342 | x = self.scaler_crop[0] + int(full_img[2] * dx) 343 | y = self.scaler_crop[1] + int(full_img[3] * dy) 344 | self.limit_and_save([x, y, w, h]) 345 | 346 | def limit_and_save(self,new_scaler_crop): 347 | full_img = self.base_scaler_crop 348 | new_scaler_crop[1] = min(max(new_scaler_crop[1], full_img[1]), 349 | full_img[1] + full_img[3] - new_scaler_crop[3]) 350 | new_scaler_crop[0] = min(max(new_scaler_crop[0], full_img[0]), 351 | full_img[0] + full_img[2] - new_scaler_crop[2]) 352 | self.scaler_crop = tuple(new_scaler_crop) 353 | self.picam2.controls.ScalerCrop = self.scaler_crop 354 | 355 | 356 | # Photo start/stop capture 357 | ############################### 358 | 359 | def capture_file(self, file_output, callback): 360 | request = self.picam2.capture_request() 361 | size = request.config['main']['size'] 362 | with _MappedBuffer(request,'main') as pixels: 363 | if self.is_usb: 364 | img = Image.open(io.BytesIO(pixels)) 365 | else: 366 | img = Image.frombytes('RGB', size, bytes(pixels)) 367 | request.release() 368 | if self._rotate in [90,270]: 369 | size = size[::-1] 370 | crop = self._context.crop_for_aspect_orientation(size[0], 371 | size[1]) 372 | bottom = crop[3] + crop[1] 373 | right = crop[2] + crop[0] 374 | img = img.rotate(self._rotate, expand = True) 375 | img = img.crop((crop[0], crop[1], right, bottom)) 376 | with open(file_output, 'wb') as fp: 377 | img.save(fp) 378 | if callback: 379 | callback(file_output) 380 | 381 | # picam2.switch_mode loses ScalarCrop 382 | def switch_config(self, new_config): 383 | self.picam2.stop() 384 | self.picam2.configure(new_config) 385 | if self.is_usb: 386 | self.picam2.start() 387 | else: 388 | self.picam2.controls.ScalerCrop = self.scaler_crop 389 | self.picam2.start() 390 | self.picam2.controls.ScalerCrop = self.scaler_crop 391 | 392 | def photo(self, path, callback): 393 | if self.picam2 and self.sensor and not self.video_recording: 394 | self.switch_config(self.photo_config) 395 | self.capture_file(path, callback) 396 | self.switch_config(self.preview_config) 397 | 398 | # Video start/stop 399 | ############################### 400 | 401 | def video_start(self, filepath, callback): 402 | if self.is_usb: 403 | Logger.error('Camera4Kivy, USB video recording not supported.') 404 | return 405 | self.video_filepath = filepath 406 | self.video_callback = callback 407 | if self.picam2 and self.sensor: 408 | self.video_recording = True 409 | self.picam2.switch_mode(self.video_config) 410 | encoder = H264Encoder() 411 | output = FfmpegOutputPlus(filepath, rotate = self._rotate, 412 | audio= self.audio, audio_sync = 0) 413 | self.picam2.start_encoder(encoder, output) 414 | 415 | def video_stop(self): 416 | if self.is_usb: 417 | return 418 | self.picam2.stop_encoder() 419 | self.picam2.switch_mode(self.preview_config) 420 | self.video_recording = False 421 | if self.video_callback: 422 | self.video_callback(self.video_filepath) 423 | 424 | # YUV reformatting 425 | ############################### 426 | 427 | YUV_RGB_FS = ''' 428 | $HEADER$ 429 | uniform sampler2D tex_y; 430 | uniform sampler2D tex_u; 431 | uniform sampler2D tex_v; 432 | mat3 YUV2RGB_JPEG = mat3(1.0, 1.0, 1.0 , 433 | 0.0, -0.344, 1.772, 434 | 1.402, -0.714, 0.0); 435 | mat3 YUV2RGB_SMPTE170M = mat3(1.164, 1.164, 1.164, 436 | 0.0, -0.392, 2.017, 437 | 1.596, -0.813, 0.0); 438 | mat3 YUV2RGB_REC709 = mat3(1.164, 1.164, 1.164, 439 | 0.0, -0.213, 2.112, 440 | 1.793, -0.533, 0.0); 441 | void main(void) { 442 | vec3 yuv; 443 | yuv.r = texture2D(tex_y, tex_coord0).r; 444 | yuv.g = texture2D(tex_u, tex_coord0).r -0.5; 445 | yuv.b = texture2D(tex_v, tex_coord0).r -0.5; 446 | gl_FragColor = vec4(YUV2RGB_JPEG * yuv, 1.0); 447 | } 448 | ''' 449 | 450 | def _yuv_to_rgba(self, fmt, y, u, v, size, tsize): 451 | if self._context.aspect_ratio == '16:9': 452 | isize = [tsize[0], round(tsize[0] * 9 / 16)] 453 | translate = (tsize[1] - isize[1]) // 2 454 | else: 455 | isize = tsize 456 | translate = 0 457 | origin = (tsize[0]//2, tsize[1]//2) 458 | if fmt == 'YUV420': 459 | uv_size = (size[0]//2, size[1]//2 ) 460 | else: 461 | uv_size = (size[0]//2, size[1]) 462 | 463 | if self.previous_size[0] != size[0] or self.previous_size[1] != size[1]: 464 | self.tex_y = Texture.create(size= size, colorfmt='luminance') 465 | self.tex_u = Texture.create(size= uv_size, colorfmt='luminance') 466 | self.tex_v = Texture.create(size= uv_size, colorfmt='luminance') 467 | 468 | if self.previous_tsize[0] != tsize[0] or\ 469 | self.previous_tsize[1] != tsize[1] or\ 470 | self.fbo == None: 471 | self.previous_tsize = tsize 472 | self.fbo = Fbo(size=tsize) # size for bilt to self._texture 473 | self.fbo.texture.flip_vertical() 474 | with self.fbo: 475 | self.b_u = BindTexture(texture=self.tex_u, index=1) 476 | self.b_v = BindTexture(texture=self.tex_v, index=2) 477 | Rotate(origin = origin, angle = 360-self._rotate, 478 | axis = (0, 0, 1)) 479 | Translate(0, translate) 480 | self.r_y = Rectangle(size=isize, texture=self.tex_y) 481 | self.fbo.shader.fs = self.YUV_RGB_FS 482 | self.fbo['tex_y'] = 0 483 | self.fbo['tex_u'] = 1 484 | self.fbo['tex_v'] = 2 485 | 486 | if self.previous_size[0] != size[0] or self.previous_size[1] != size[1]: 487 | self.previous_size = size 488 | self.r_y.size = isize 489 | self.r_y.texture = self.tex_y 490 | self.b_u.texture = self.tex_u 491 | self.b_v.texture = self.tex_v 492 | # Repeat previous pixels to prevent flicker on change 493 | return bytes(self.fbo.texture.pixels) 494 | 495 | self.tex_y.blit_buffer(y, colorfmt='luminance') 496 | self.tex_u.blit_buffer(u, colorfmt='luminance') 497 | self.tex_v.blit_buffer(v, colorfmt='luminance') 498 | self.fbo.ask_update() 499 | self.fbo.draw() 500 | return self.fbo.texture.pixels 501 | 502 | 503 | ################################# 504 | # Kivy Camera Provider 505 | ################################# 506 | 507 | class CameraPiCamera2(CameraBase): 508 | '''Implementation of CameraBase using PiCamera2 509 | ''' 510 | 511 | def __init__(self, **kwargs): 512 | self._update_ev = None 513 | self._camera = None 514 | self._framerate = kwargs.get('framerate', 30) 515 | self.started = False 516 | self.fbo = None 517 | self._rotate = kwargs.get('rotation', 0) 518 | self.audio = kwargs.get('audio', False) 519 | super().__init__(**kwargs) 520 | 521 | # Lifecycle 522 | ################################ 523 | 524 | def init_camera(self): 525 | self._format = 'rgba' 526 | if self._camera is not None: 527 | self._camera.close() 528 | self._texture = None 529 | self.stopped = True 530 | self.fps = 1. / self._framerate 531 | 532 | def update(self, dt): 533 | if self.stopped: 534 | return 535 | if self._texture is None: 536 | self._texture = Texture.create(self._resolution) 537 | self._texture.flip_vertical() 538 | self._context.on_load() 539 | try: 540 | self._buffer = self._camera.update() 541 | if self._buffer: 542 | self._copy_to_gpu() 543 | except Exception as e: 544 | Logger.error('CameraPiCamera2\n' + str(e)) 545 | 546 | def start(self): 547 | if not self.started: 548 | self.started = True 549 | super().start() 550 | self._camera = CameraPi2() 551 | self._camera._resolution = self._resolution 552 | self._camera._framerate = self._framerate 553 | self._camera._context = self._context 554 | self._camera._rotate = self._rotate 555 | self._camera.audio = self.audio 556 | self._texture = None 557 | if self._update_ev is not None: 558 | self._update_ev.cancel() 559 | self._camera.start(self._index) 560 | self._update_ev = Clock.schedule_interval(self.update, self.fps) 561 | 562 | def stop(self): 563 | super().stop() 564 | self.started = False 565 | #self._camera = None 566 | 567 | if self._update_ev is not None: 568 | self._update_ev.cancel() 569 | self._update_ev = None 570 | if self._camera: 571 | self._camera.stop() 572 | self._texture = None 573 | self.fbo = None 574 | 575 | def photo(self,filepath, callback): 576 | if self._camera: 577 | self._camera.photo(filepath, callback) 578 | 579 | def video_start(self,filepath, callback): 580 | if self._camera: 581 | self._camera.video_start(filepath, callback) 582 | 583 | def video_stop(self): 584 | if self._camera: 585 | self._camera.video_stop() 586 | 587 | def zoom(self, scale): 588 | if self._camera: 589 | self._camera.zoom(scale) 590 | 591 | def drag(self, dx, dy): 592 | if self._camera: 593 | self._camera.drag(dx, dy) 594 | 595 | -------------------------------------------------------------------------------- /src/camera4kivy/preview.py: -------------------------------------------------------------------------------- 1 | from kivy.uix.anchorlayout import AnchorLayout 2 | from kivy.uix.label import Label 3 | from kivy.graphics import Fbo, Color, Rectangle, Scale 4 | from kivy.properties import ColorProperty, StringProperty, ObjectProperty 5 | from kivy.utils import platform 6 | from threading import Thread, Event 7 | 8 | 9 | if platform == 'android': 10 | from .preview_camerax import PreviewCameraX as CameraPreview 11 | else: 12 | from .preview_kivycamera import PreviewKivyCamera as CameraPreview 13 | from .preview_kivycamera import KivyCameraProviderInfo 14 | 15 | class CameraProviderInfo(): 16 | def get_name(self): 17 | if platform == 'android': 18 | provider = 'android' 19 | else: 20 | provider = KivyCameraProviderInfo().get_name() 21 | return provider 22 | 23 | class Preview(AnchorLayout): 24 | 25 | ########################################## 26 | # Layout Properties 27 | ########################################## 28 | 29 | aspect_ratio = StringProperty() 30 | orientation = StringProperty() 31 | letterbox_color = ColorProperty('black') 32 | filepath_callback = ObjectProperty() 33 | inhibit_property = False 34 | preview = None 35 | 36 | ########################################## 37 | # Camera Events 38 | ########################################## 39 | 40 | def __init__(self, **kwargs): 41 | super().__init__(**kwargs) 42 | self.anchor_x = 'center' 43 | self.anchor_y = 'center' 44 | self.label = Label() 45 | self.preview = CameraPreview() 46 | self.add_widget(self.label) 47 | self.add_widget(self.preview) 48 | self.inhibit_property = False 49 | for key in ['letterbox_color', 'aspect_ratio', 50 | 'orientation']: 51 | if key in kwargs: 52 | setattr(self, key, kwargs[key]) 53 | if key == 'aspect_ratio': 54 | self.preview.set_aspect_ratio(kwargs[key]) 55 | if key == 'orientation': 56 | self.preview.set_orientation(kwargs[key]) 57 | self._fbo = None 58 | self._busy = False 59 | self.camera_connected = False 60 | self._image_available = Event() 61 | self.analyze_resolution = 1024 62 | self.auto_analyze_resolution = [] 63 | 64 | def on_orientation(self,instance,orientation): 65 | if self.preview and not self.inhibit_property: 66 | self.preview.set_orientation(orientation) 67 | 68 | def on_aspect_ratio(self,instance, aspect_ratio): 69 | if self.preview and not self.inhibit_property: 70 | self.preview.set_aspect_ratio(aspect_ratio) 71 | 72 | def on_size(self, layout, size): 73 | self.label.canvas.clear() 74 | with self.label.canvas: 75 | Color (*self.letterbox_color) 76 | Rectangle(pos = self.pos, size = self.size) 77 | 78 | ########################################## 79 | # User Events - All Platforms 80 | ########################################## 81 | 82 | def connect_camera(self, analyze_pixels_resolution = 1024, 83 | enable_analyze_pixels = False, **kwargs): 84 | self.analyze_resolution = analyze_pixels_resolution 85 | self.inhibit_property = True 86 | self.camera_connected = True 87 | self._fbo = None 88 | if enable_analyze_pixels: 89 | Thread(target=self.image_scheduler, daemon=True).start() 90 | self.preview.connect_camera(analyze_callback = 91 | self.analyze_image_callback_schedule, 92 | analyze_proxy_callback = 93 | self.analyze_imageproxy_callback, 94 | canvas_callback = 95 | self.possible_canvas_callback, 96 | **kwargs) 97 | 98 | def disconnect_camera(self): 99 | self._image_available.set() 100 | self.camera_connected = False 101 | self.preview.disconnect_camera() 102 | self.inhibit_property = False 103 | 104 | def capture_screenshot(self, **kwargs): 105 | self.preview.capture_screenshot(**kwargs) 106 | 107 | def select_camera(self, camera_id): 108 | return self.preview.select_camera(camera_id) 109 | 110 | ########################################## 111 | # User Events - some platforms 112 | ########################################## 113 | 114 | def capture_photo(self, **kwargs): 115 | self.preview.capture_photo(**kwargs) 116 | 117 | def capture_video(self, **kwargs): 118 | self.preview.capture_video(**kwargs) 119 | 120 | def stop_capture_video(self): 121 | self.preview.stop_capture_video() 122 | 123 | ########################################## 124 | # User Events - Android Only 125 | ########################################## 126 | 127 | def flash(self, state = None): 128 | return self.preview.flash(state) 129 | 130 | def torch(self, state): 131 | return self.preview.torch(state) 132 | 133 | def focus(self, x, y): 134 | self.preview.focus(x, y) 135 | 136 | def zoom(self, delta_scale): 137 | self.preview.zoom(delta_scale) 138 | 139 | ########################################## 140 | # Data Analysis, Image Size and Schedule 141 | ########################################## 142 | 143 | def analyze_image_callback_schedule(self, texture, tpos, tscale, mirror): 144 | # texture : Kivy Texture with same orientation as the Preview 145 | # tpos : location of texture in Preview 146 | # tscale : scale from oriented Texture resolution to Preview resolution 147 | # mirror : true if preview is mirrored 148 | if not self._busy: 149 | self._busy = True 150 | # Create a texture with lower resolution 151 | if self.auto_analyze_resolution: 152 | # resolution set by the analyzer [w,h] regardless of 153 | # Preview orientation or aspect ratio. 154 | # If the aspect ratio is not the same the Fbo is distorted. 155 | # self.scale is a two element array 156 | fbo_size = self.auto_analyze_resolution 157 | scale = [tscale * texture.width / fbo_size[0], 158 | tscale * texture.height / fbo_size[1]] 159 | else: 160 | # resolution is 'self.analyze_resolution' along the long edge 161 | # default value is 1024 162 | # Optionally set as a connect option. 163 | # Value is never greater that the sensor resolution. 164 | # The aspect ratio is always the same as the Preview 165 | # self.scale is a scalar 166 | fbo_scale = max(max(texture.size) / self.analyze_resolution, 1) 167 | fbo_size = (round(texture.size[0]/fbo_scale), 168 | round(texture.size[1]/fbo_scale)) 169 | scale = tscale * fbo_scale 170 | origin = (round(fbo_size[0]/2), round(fbo_size[1]/2)) 171 | # new or resized texture 172 | if not self._fbo or self._fbo.size[0] != fbo_size[0] or\ 173 | self._fbo.size[1] != fbo_size[1]: 174 | self._fbo = Fbo(size = fbo_size) 175 | self._fbo.clear() 176 | with self._fbo: 177 | Color(1,1,1,1) 178 | Scale(1,-1,1, origin = origin) 179 | Rectangle(texture= texture, size = fbo_size) 180 | self._fbo.draw() 181 | 182 | # save these for self.analyze_pixels_callback() 183 | self.pixels = self._fbo.texture.pixels 184 | self.im_size = self._fbo.texture.size 185 | self.scale = scale # 2 ele list , or scalar 186 | self.tpos = tpos 187 | self.mirror = mirror 188 | # ready 189 | self._image_available.set() 190 | 191 | def image_scheduler(self): 192 | while True: 193 | self._image_available.wait() 194 | self._image_available.clear() 195 | if not self.camera_connected: 196 | break 197 | # Must pass pixels not Texture, becuase we are in a different 198 | # Thread 199 | self.analyze_pixels_callback(self.pixels, self.im_size, self.tpos, 200 | self.scale, self.mirror) 201 | self._busy = False 202 | 203 | def possible_canvas_callback(self, texture, tex_size, tex_pos): 204 | if self.camera_connected: 205 | self.canvas_instructions_callback(texture, tex_size, tex_pos) 206 | 207 | ########################################## 208 | # Data Analysis Callbacks 209 | ########################################## 210 | 211 | # analyze_pixels_callback() 212 | # 213 | # pixels : Kivy Texture pixels, always RGBA 214 | # image_size : size of pixels 215 | # image_pos : Bottom left corner of analysis Texture inside the 216 | # Preview. AKA the letterbox size plus modified aspect ratio adjustment. 217 | # image_scale : Ratio between the analyzed Texture resolution and 218 | # screen image resolution. 219 | # mirror : True if Preview is mirrored 220 | 221 | def analyze_pixels_callback(self, pixels, image_size, image_pos, 222 | image_scale, mirror): 223 | pass 224 | 225 | # canvas_instructions_callback() 226 | # 227 | # texture : the default texture to be displayed in the Priview 228 | # tex_size : texture size with mirror information 229 | # tex_pos : texture pos with mirror information 230 | def canvas_instructions_callback(self, texture, tex_size, tex_pos): 231 | pass 232 | 233 | # analyze_imageproxy_callback() 234 | # Android only 235 | # 236 | # image_proxy : 237 | # https://developer.android.com/reference/androidx/camera/core/ImageProxy 238 | # image_pos : Bottom left corner of screen image insie the Preview, the 239 | # letterbox size. 240 | # image_scale : Scale image_proxy size to screen image size. 241 | # mirror : True if Preview is mirrored 242 | # degrees : clockwise rotation required to make image_proxy the same 243 | # orientation as the screen. 244 | def analyze_imageproxy_callback(self, image_proxy, image_pos, image_scale, 245 | mirror, degrees): 246 | pass 247 | 248 | 249 | -------------------------------------------------------------------------------- /src/camera4kivy/preview_camerax.py: -------------------------------------------------------------------------------- 1 | # An implementation of Android CameraX called from a Kivy Preview widget. 2 | # 3 | # About CameraX: 4 | # https://developer.android.com/training/camerax 5 | # Tested devices: 6 | # https://developer.android.com/training/camerax/devices 7 | # 8 | # Source 9 | # https://github.com/Android-for-Python/Camera4Kivy/preview_camerax.py 10 | # 11 | 12 | from kivy.clock import Clock, mainthread 13 | from kivy.graphics import Fbo, Callback, Rectangle, Rotate, Scale, Translate,\ 14 | Color 15 | from kivy.graphics.texture import Texture 16 | from kivy.logger import Logger 17 | 18 | from datetime import datetime 19 | from os.path import exists, join 20 | from os import mkdir, remove 21 | from pathlib import Path 22 | from threading import Thread 23 | 24 | from gestures4kivy import CommonGestures 25 | from camera4kivy.preview_common import PreviewCommon 26 | 27 | from android.storage import app_storage_path, primary_external_storage_path 28 | from android.runnable import run_on_ui_thread 29 | from android import mActivity, api_version 30 | from jnius import autoclass, PythonJavaClass, java_method 31 | 32 | GL_TEXTURE_EXTERNAL_OES = autoclass( 33 | 'android.opengl.GLES11Ext').GL_TEXTURE_EXTERNAL_OES 34 | Environment = autoclass('android.os.Environment') 35 | CameraX = autoclass('org.kivy.camerax.CameraX') 36 | if api_version >= 29: 37 | ContentValues = autoclass('android.content.ContentValues') 38 | MediaStoreMediaColumns =\ 39 | autoclass('android.provider.MediaStore$MediaColumns') 40 | MediaStoreImagesMedia =\ 41 | autoclass('android.provider.MediaStore$Images$Media') 42 | FileInputStream = autoclass('java.io.FileInputStream') 43 | FileUtils = autoclass('android.os.FileUtils') 44 | 45 | class PreviewCameraX(PreviewCommon, CommonGestures): 46 | 47 | def __init__(self, **kwargs): 48 | super().__init__(**kwargs) 49 | self._camera = None 50 | self.enable_zoom_gesture = False 51 | self.enable_focus_gesture = False 52 | self.block_pipeline = True 53 | self._fbo = None 54 | 55 | ############################## 56 | # Lifecycle events 57 | ############################## 58 | 59 | def connect_camera(self, 60 | enable_photo = True, 61 | enable_video = True, 62 | enable_analyze_imageproxy = False, 63 | camera_id = 'back', 64 | optimize = 'quality', 65 | sensor_resolution = None, 66 | default_flash = 'off', 67 | default_zoom = 0.5, 68 | enable_zoom_gesture = True, 69 | enable_focus_gesture = True, 70 | data_format = 'yuv420', 71 | filepath_callback = None, 72 | analyze_proxy_callback = None, 73 | analyze_callback = None, 74 | canvas_callback = None, 75 | **kwargs): 76 | 77 | self._camera = None 78 | self._update_ev = None 79 | self._name_pipe = [] 80 | self.texture_size = [] 81 | self.rotation = 0 82 | self.capture_in_progress = False 83 | 84 | # uniform case 85 | self.flash_state = default_flash.lower() 86 | data_format = data_format.lower() 87 | optimize = optimize.lower() 88 | 89 | self.canvas_callback = canvas_callback 90 | self.set_filepath_callback(filepath_callback) 91 | self.set_facing(camera_id) 92 | self.set_resolution(sensor_resolution) 93 | self.enable_data = enable_analyze_imageproxy 94 | 95 | # flash 96 | if self.flash_state not in ['on','off','auto']: 97 | self.flash_state = 'off' 98 | 99 | # optimize 100 | if optimize not in ['latency','quality']: 101 | optimize = 'quality' 102 | 103 | # zoom and focus 104 | default_zoom = min(max(default_zoom,0),1) 105 | self.enable_zoom_gesture = enable_zoom_gesture 106 | self.enable_focus_gesture = enable_focus_gesture 107 | 108 | # Analyse Image format 109 | if data_format not in ['rgba', 'yuv420']: 110 | data_format = 'yuv420' 111 | 112 | self._analyze_callback = analyze_callback 113 | self._analyze_proxy_callback = analyze_proxy_callback 114 | 115 | 116 | # These Java callbacks will execute in Java Main Thread 117 | self.cb_wrapper = CallbackWrapper(self._filename_callback, 118 | self._analyze_image_proxy, 119 | self._configure_pipeline) 120 | 121 | # Create an Android camera with the required behavior 122 | self._camera = CameraX( 123 | enable_photo, 124 | enable_video, 125 | self.enable_data, 126 | self.facing, 127 | self._sensor_resolution, 128 | self.aspect_ratio, 129 | self.cb_wrapper, 130 | self.flash_state, 131 | optimize, 132 | default_zoom, 133 | data_format) 134 | 135 | # check camerax_provider version 136 | # Set in org/kivy/camerax/CameraX.java 137 | latest = '0.0.3' 138 | try: 139 | if self._camera.providerVersion() < latest: 140 | Logger.warning('Update camerax_provider to the latest version, this is ' + latest) 141 | except: 142 | Logger.warning('Update camerax_provider to the latest version, this is ' + latest) 143 | 144 | # Configure the camera for the Kivy view port 145 | self._configure_camera(True) 146 | 147 | def disconnect_camera(self): 148 | self.destroy_camera() 149 | self.block_and_clear_pipeline() 150 | 151 | @run_on_ui_thread 152 | def destroy_camera(self): 153 | self.stop_capture_video() 154 | self._deschedule_pipeline() 155 | if self._camera: 156 | self._camera.unbind_camera() 157 | self._camera = None 158 | 159 | # configure camera 160 | def _configure_camera(self, start): 161 | self.configure_viewport() 162 | if self._camera: 163 | self._camera.setViewPort(self.view_size) 164 | self._camera.startCamera() 165 | else: 166 | self.canvas.clear() 167 | with self.canvas: 168 | Color(1,1,1,1) 169 | Rectangle(size = self.view_size, pos = self.view_pos) 170 | 171 | # Device Rotate 172 | def on_size(self, instance, size): 173 | if self._camera: 174 | self.stop_capture_video() 175 | self._configure_camera(False) 176 | 177 | ################################## 178 | # Parse options 179 | ################################## 180 | 181 | def set_facing(self, facing): 182 | facing = facing.lower() 183 | if facing == '0': 184 | facing = 'back' 185 | elif facing == '1': 186 | facing = 'front' 187 | elif facing not in ['back','front']: 188 | facing = 'back' 189 | self.facing = facing 190 | 191 | ############################## 192 | # Preview Widget Touch Events 193 | ############################## 194 | 195 | # CommonGestures Touch Events 196 | # tap for focus 197 | def cgb_primary(self, touch, x, y): 198 | if self._camera and self.enable_focus_gesture: 199 | self.focus(x, y) 200 | 201 | # pinch/spread for zoom 202 | def cgb_zoom(self, touch0, touch1, x, y, scale): 203 | if self._camera and self.enable_zoom_gesture: 204 | self.zoom_delta(scale) 205 | 206 | ############################## 207 | # User events 208 | ############################## 209 | 210 | def capture_photo(self, location = '', subdir = '', name = ''): 211 | if self._camera: 212 | self.capture_in_progress = True 213 | self._set_location(location) 214 | subdir = self._default_subdir_android(subdir) 215 | name = self._default_file_name(name, '.jpg') 216 | if self.file_storage: 217 | self._name_pipe.append(join(subdir, name)) 218 | self._camera.capture_photo(subdir, name, self.file_storage) 219 | 220 | def capture_video(self, location = '', subdir = '', name = ''): 221 | if self._camera: 222 | self.capture_in_progress = True 223 | self._set_location(location) 224 | subdir = self._default_subdir_android(subdir) 225 | name = self._default_file_name(name,'.mp4') 226 | if self.file_storage: 227 | self._name_pipe.append(join(subdir, name)) 228 | self._camera.capture_video(subdir, name, self.file_storage) 229 | 230 | def stop_capture_video(self): 231 | if self._camera: 232 | self._camera.stop_capture_video() 233 | 234 | def capture_screenshot(self, location = '.', subdir = '', name = ''): 235 | view_crop = self.screenshot_crop() 236 | self._set_location(location) 237 | subdir = self._default_subdir_android(subdir) 238 | name = self._default_file_name(name, '.jpg') 239 | tex = self.export_as_image().texture.get_region(*view_crop) 240 | path = join(subdir, name) 241 | if self.file_storage: 242 | # local or, shared and api<=29 243 | tex.save(path, flipped = True) 244 | if self.callback: 245 | self.callback(path) 246 | else: 247 | # MediaStore 248 | cache = self.cache_path() 249 | if cache: 250 | # write to cache 251 | cachefile = join(cache, name) 252 | tex.save(cachefile, flipped = True) 253 | # create MediaStore entry 254 | cv = ContentValues() 255 | cv.put(MediaStoreMediaColumns.DISPLAY_NAME, name) 256 | cv.put(MediaStoreMediaColumns.MIME_TYPE, 'image/jpeg') 257 | cv.put(MediaStoreMediaColumns.RELATIVE_PATH, subdir) 258 | root_uri = MediaStoreImagesMedia.getContentUri('external') 259 | context = mActivity.getApplicationContext() 260 | uri = context.getContentResolver().insert(root_uri, cv) 261 | # copy cache file to MediaStore 262 | rs = FileInputStream(cachefile) 263 | ws = context.getContentResolver().openOutputStream(uri) 264 | FileUtils.copy(rs,ws) 265 | ws.flush() 266 | ws.close() 267 | rs.close() 268 | remove(cachefile) 269 | if self.callback: 270 | self.callback(path) 271 | 272 | 273 | # Select back, front camera 274 | def select_camera(self, facing): 275 | if self._camera: 276 | facing = facing.lower() 277 | if facing == 'toggle': 278 | if self.facing == 'back': 279 | self.facing = 'front' 280 | else: 281 | self.facing = 'back' 282 | elif facing == 'front' or facing == '1': 283 | self.facing = 'front' 284 | else: 285 | self.facing = 'back' 286 | 287 | # may have to wait for a capture to complete 288 | if not self.capture_in_progress: 289 | self.block_and_clear_pipeline() 290 | self.do_select_camera() 291 | else: 292 | self.stop_capture_video() 293 | self._facing_ev = Clock.schedule_interval( 294 | self.can_select_camera, 1 / 30) 295 | facing = self.facing 296 | return facing 297 | 298 | def can_select_camera(self,dt): 299 | if not self.capture_in_progress: 300 | self.block_and_clear_pipeline() 301 | self.do_select_camera() 302 | Clock.unschedule(self._facing_ev) 303 | 304 | @run_on_ui_thread 305 | def do_select_camera(self): 306 | self._camera.select_camera(self.facing) 307 | 308 | # Sequence flash : off, on, auto, ... 309 | def flash(self, state = None): 310 | # None, auto sequence, 0 ->2 set state 311 | if self._camera: 312 | if state == None: 313 | if self.flash_state == 'off': 314 | self.flash_state = 'on' 315 | elif self.flash_state == 'on': 316 | self.flash_state = 'auto' 317 | else: 318 | self.flash_state = 'off' 319 | elif state in ['off', 'on', 'auto']: 320 | self.flash_state = state 321 | self.flash_state = self._camera.flash(self.flash_state) 322 | return self.flash_state 323 | return "off" 324 | 325 | def torch(self, state = None): 326 | if self._camera: 327 | if state in ['off', 'on']: 328 | try: 329 | return self._camera.torch(state) 330 | except: 331 | Logger.warning('Update camerax_provider to >= 0.0.3') 332 | return 'off' 333 | else: 334 | return 'off' 335 | 336 | # if enable_focus_gesture == True, then this is called by a tap gesture 337 | def focus(self, x, y): 338 | if self._camera: 339 | self._camera.focus(x, y) 340 | 341 | # if enable_zoom_gesture == True, then this called by pinch/spread gesture 342 | def zoom_delta(self, delta_scale): 343 | if self._camera: 344 | self._camera.zoom(delta_scale, False) 345 | 346 | def zoom_abs(self, scale): 347 | if self._camera: 348 | self._camera.zoom(scale, True) 349 | 350 | ############################## 351 | # Create Preview Pipeline 352 | ############################## 353 | 354 | def _create_texture(self, size): 355 | self._camera_texture = Texture(width = size[0], 356 | height= size[1], 357 | target=GL_TEXTURE_EXTERNAL_OES, 358 | colorfmt='rgba') 359 | return int(self._camera_texture.id) 360 | 361 | def _create_fbo(self, texture_size, rotation): 362 | long_edge = max(texture_size) 363 | short_edge = min(texture_size) 364 | origin = (texture_size[0]/2, texture_size[1]/2) 365 | translate = 0 366 | scalex = 1 367 | scaley = 1 368 | 369 | if rotation == 90: 370 | translate = -(long_edge - short_edge) /2 371 | elif rotation == 270: 372 | translate = (long_edge - short_edge) /2 373 | 374 | if texture_size[0] < texture_size[1]: 375 | translate = -translate 376 | scalex = -scalex 377 | scaley = -scaley 378 | 379 | if rotation in [90 , 270]: 380 | fbo_size = (texture_size[1],texture_size[0]) 381 | else: 382 | fbo_size = texture_size 383 | 384 | self._fbo = Fbo(size=fbo_size) 385 | self._fbo.shader.fs = ''' 386 | #extension GL_OES_EGL_image_external : require 387 | $HEADER$ 388 | uniform samplerExternalOES texture1; 389 | void main() 390 | { 391 | gl_FragColor = texture2D(texture1, tex_coord0); 392 | } 393 | ''' 394 | 395 | with self._fbo.before: 396 | Rotate(origin = origin, angle = 360 - rotation, axis = (0, 0, 1)) 397 | Translate(translate, translate) 398 | Scale(scalex, scaley, 1, origin = origin ) 399 | Rectangle(size = texture_size) 400 | 401 | with self._fbo: 402 | self._camera_texture_cb = Callback(lambda instr: 403 | self._camera_texture.bind) 404 | 405 | # Run on Kivy main thread because required by FBO. 406 | @mainthread 407 | def _create_pipeline(self, texture_size, rotation): 408 | id = self._create_texture(texture_size) 409 | self._create_fbo(texture_size, rotation) 410 | self._camera.setTexture(id,texture_size) 411 | self._schedule_pipeline() 412 | 413 | ############################## 414 | # Fill Preview Pipeline 415 | ############################## 416 | 417 | def block_and_clear_pipeline(self): 418 | self.block_pipeline = True 419 | if self._camera and self._fbo and self._fbo.texture: 420 | tex_size = self._fbo.texture.size 421 | buf = bytes([255] * tex_size[0] * tex_size[1] * 4) 422 | self._fbo.texture.blit_buffer(buf, colorfmt='rgba', 423 | bufferfmt='ubyte') 424 | self._analyze_texture() 425 | self._update_canvas() 426 | 427 | def _schedule_pipeline(self): 428 | self._deschedule_pipeline() 429 | if self._camera and self._camera_texture and self._fbo and\ 430 | self._fbo.texture: 431 | self._set_surface_provider(True) 432 | self.block_pipeline = False 433 | self._update_ev = Clock.schedule_interval(self._update_pipeline, 434 | 1 / 30) 435 | 436 | def _deschedule_pipeline(self): 437 | if self._update_ev is not None: 438 | self._set_surface_provider(False) 439 | self._update_ev.cancel() 440 | self._update_ev = None 441 | 442 | def _update_pipeline(self, dt): 443 | if self._camera.imageReady() and self._fbo and not self.block_pipeline: 444 | self._camera_texture_cb.ask_update() 445 | self._fbo.draw() 446 | self._analyze_texture() 447 | self._update_canvas() 448 | 449 | # Run on UI thread because required by CameraX 450 | @run_on_ui_thread 451 | def _set_surface_provider(self, enable): 452 | self._camera.setSurfaceProvider(enable) 453 | 454 | # Run on mainthread because required by Kivy canvas 455 | @mainthread 456 | def _update_canvas(self): 457 | if self._fbo: 458 | tex = self._fbo.texture.get_region(*self.crop) 459 | 460 | # moved from create_fbo 461 | if self.facing == 'front': 462 | view_size = (-self.view_size[0], self.view_size[1]) 463 | view_pos = (self.view_pos[0] + self.view_size[0], 464 | self.view_pos[1]) 465 | else: 466 | view_size = self.view_size 467 | view_pos = self.view_pos 468 | 469 | self.canvas.clear() 470 | with self.canvas: 471 | Color(1,1,1,1) 472 | Rectangle(texture= tex, size = view_size, pos = view_pos) 473 | if self.canvas_callback: 474 | self.canvas_callback(tex, view_size, view_pos) 475 | 476 | ####################################### 477 | # Storage Location 478 | ####################################### 479 | 480 | def _set_location(self, location): 481 | storage = location.lower() 482 | if storage not in ['private', 'shared']: 483 | storage = 'shared' 484 | self.private_storage = storage == 'private' 485 | self.file_storage = self.private_storage or api_version < 29 486 | 487 | def _default_location(self): 488 | if self.private_storage: 489 | root = join(app_storage_path(),Environment.DIRECTORY_DCIM) 490 | if not exists(root): 491 | mkdir(root) 492 | else: 493 | if api_version < 29: 494 | root = join(primary_external_storage_path(), 495 | Environment.DIRECTORY_DCIM, 496 | self._app_name()) 497 | if not exists(root): 498 | mkdir(root) 499 | else: 500 | root = join(Environment.DIRECTORY_DCIM, self._app_name()) 501 | return root 502 | 503 | def _default_subdir_android(self, subdir=''): 504 | root = self._default_location() 505 | if not subdir: 506 | # Today's date 507 | subdir = datetime.now().strftime("%Y_%m_%d") 508 | path = join(root,subdir) 509 | if self.private_storage or api_version < 29: 510 | if not exists(path): 511 | mkdir(path) 512 | return path 513 | 514 | def _app_name(self): 515 | context = mActivity.getApplicationContext() 516 | appinfo = context.getApplicationInfo() 517 | if appinfo.labelRes: 518 | name = context.getString(appinfo.labelRes) 519 | else: 520 | name = appinfo.nonLocalizedLabel.toString() 521 | return name 522 | 523 | def cache_path(self): 524 | context = mActivity.getApplicationContext() 525 | cache = context.getExternalCacheDir() 526 | return str(cache.toString()) 527 | 528 | ####################################### 529 | # Callbacks 530 | ####################################### 531 | 532 | # Runs in Java Main Thread 533 | def _configure_pipeline(self, croprect, resolution, rotation): 534 | if rotation in [ 90, 270]: 535 | self.crop = [croprect.top, croprect.left, 536 | croprect.bottom - croprect.top, 537 | croprect.right - croprect.left] 538 | else: 539 | self.crop = [croprect.left, croprect.top, 540 | croprect.right - croprect.left, 541 | croprect.bottom - croprect.top] 542 | texture_size = [resolution.getWidth(), resolution.getHeight()] 543 | self.texture_size = texture_size 544 | self.tscale = self.view_size[1] / self.crop[3] 545 | self.rotation = rotation 546 | self._create_pipeline(texture_size, rotation) 547 | 548 | # Runs in some Java thread 549 | def _filename_callback(self, file_id): 550 | if not file_id: 551 | # The callback returns "" for non-MediaStore saves 552 | if self._name_pipe: 553 | file_id = self._name_pipe[0] 554 | self._name_pipe = self._name_pipe[1:] 555 | self.capture_in_progress = False 556 | if self.callback: 557 | self.callback(str(file_id)) 558 | 559 | def _analyze_texture(self): 560 | if not self.enable_data and self._fbo and self._analyze_callback: 561 | tex = self._fbo.texture.get_region(*self.crop) 562 | self._analyze_callback(tex, self.view_pos, 563 | self.tscale, self.facing=='front') 564 | 565 | def _analyze_image_proxy(self, image_proxy): 566 | if self.enable_data and self._analyze_proxy_callback: 567 | if self.rotation in [0, 180]: 568 | tscale = self.view_size[1] / image_proxy.getHeight() 569 | else: 570 | tscale = self.view_size[1] / image_proxy.getWidth() 571 | self._analyze_proxy_callback(image_proxy, self.view_pos, 572 | tscale, self.facing == 'front', 573 | self.rotation) 574 | 575 | 576 | class CallbackWrapper(PythonJavaClass): 577 | __javacontext__ = 'app' 578 | __javainterfaces__ = ['org/kivy/camerax/CallbackWrapper'] 579 | 580 | def __init__(self, callback0, callback1, callback2): 581 | super().__init__() 582 | self.callback0 = callback0 583 | self.callback1 = callback1 584 | self.callback2 = callback2 585 | 586 | @java_method('(Ljava/lang/String;)V') 587 | def callback_string(self, filepath): 588 | if self.callback0: 589 | self.callback0(filepath) 590 | 591 | @java_method('(Landroidx/camera/core/ImageProxy;)V') 592 | def callback_image(self, image): 593 | if self.callback1: 594 | self.callback1(image) 595 | 596 | @java_method('(Landroid/graphics/Rect;Landroid/util/Size;I)V') 597 | def callback_config(self, croprect, resolution, rotation): 598 | if self.callback2: 599 | self.callback2(croprect, resolution, rotation) 600 | 601 | -------------------------------------------------------------------------------- /src/camera4kivy/preview_common.py: -------------------------------------------------------------------------------- 1 | from kivy.core.window import Window 2 | from kivy.uix.widget import Widget 3 | from kivy.utils import platform 4 | 5 | from os import mkdir 6 | from os.path import exists, join 7 | from pathlib import Path 8 | from datetime import datetime 9 | from inspect import ismethod, signature 10 | 11 | class PreviewCommon(Widget): 12 | 13 | def __init__(self, **kwargs): 14 | super().__init__(**kwargs) 15 | self._camera = None 16 | self._camera_texture = None 17 | self.view_size = (10, 10) 18 | self.view_pos = (0, 0) 19 | self.tex_crop = (0, 0, 10, 10) 20 | self.tscale = 1 21 | self.orientation = 'same' 22 | self.aspect_ratio = '4:3' 23 | self.callback = None 24 | self._sensor_resolution = [] 25 | 26 | ############################################# 27 | # Parse Arguments 28 | ############################################# 29 | 30 | def set_aspect_ratio(self, aspect_ratio): 31 | if aspect_ratio in ['4:3', '16:9']: 32 | self.aspect_ratio = aspect_ratio 33 | 34 | def set_orientation(self, orientation): 35 | orientation = orientation.lower() 36 | if orientation in ['landscape', 'portrait','same','opposite']: 37 | self.orientation = orientation 38 | 39 | def set_rotation(self, rotation): 40 | self._sensor_rotation = 0 41 | if rotation in [0,90,180,270]: 42 | self._sensor_rotation = rotation 43 | 44 | def set_resolution(self, resolution): 45 | if resolution and\ 46 | (type(resolution) is tuple or type(resolution) is list) and\ 47 | len(resolution) == 2: 48 | self._sensor_resolution = (max(resolution), min(resolution)) 49 | 50 | def set_filepath_callback(self,callback): 51 | if callback: 52 | if not ismethod(callback) or\ 53 | len(signature(callback).parameters) !=1: 54 | callback = None 55 | self.callback = callback 56 | 57 | ############################################# 58 | # Viewport 59 | ############################################# 60 | 61 | def configure_viewport(self): 62 | orientation = self.decode_orientation() 63 | width_self, height_self = self.size 64 | 65 | if self.aspect_ratio == "4:3": 66 | aspect = 4/3 67 | else: 68 | aspect = 16/9 69 | 70 | if orientation == 'portrait': 71 | width_view = height_self / aspect 72 | height_view = height_self 73 | if self.width < width_view: 74 | width_view = self.width 75 | height_view = self.width * aspect 76 | pos_x = round((self.width - width_view)/2) 77 | pos_y = round((self.height - height_view)/2) 78 | width_view = round(width_view) 79 | height_view = round(height_view) 80 | else: 81 | width_view = width_self 82 | height_view = width_self / aspect 83 | if self.height < height_view: 84 | width_view = self.height * aspect 85 | height_view = self.height 86 | pos_x = round((self.width - width_view)/2) 87 | pos_y = round((self.height - height_view)/2) 88 | width_view = round(width_view) 89 | height_view = round(height_view) 90 | 91 | self.view_size = (width_view, height_view) 92 | self.view_pos = [self.pos[0] + pos_x, self.pos[1] + pos_y] 93 | 94 | def decode_orientation(self): 95 | orientation = self.orientation 96 | if orientation == 'same': 97 | if Window.width > Window.height: 98 | orientation = 'landscape' 99 | else: 100 | orientation = 'portrait' 101 | elif orientation == 'opposite': 102 | if Window.width > Window.height: 103 | orientation = 'portrait' 104 | else: 105 | orientation = 'landscape' 106 | return orientation 107 | 108 | def screenshot_crop(self): 109 | pos_x = 0 110 | pos_y = 0 111 | if self.view_size[0] == round(self.width) and\ 112 | self.view_size[1] != round(self.height): 113 | pos_y = (self.height - self.view_size[1])/2 114 | elif self.view_size[1] == round(self.height) and\ 115 | self.view_size[0] != round(self.width): 116 | pos_x = (self.width - self.view_size[0])/2 117 | return (pos_x,pos_y, self.view_size[0],self.view_size[1]) 118 | 119 | ############################################# 120 | # File Utilities 121 | ############################################# 122 | 123 | def capture_path(self,location, subdir ,name, ext): 124 | if platform == 'ios': 125 | storage = location.lower() 126 | if storage not in ['private', 'shared']: 127 | storage = 'shared' 128 | if storage == 'shared': 129 | return '' 130 | location = self._camera.get_app_documents_directory() 131 | return join(self._default_subdir(location, subdir), 132 | self._default_file_name(name, ext)) 133 | 134 | def _default_subdir(self, location = '.', subdir=''): 135 | if not exists(location): 136 | location = '.' 137 | if not subdir: 138 | # Today's date 139 | subdir = datetime.now().strftime("%Y_%m_%d") 140 | path = join(location,subdir) 141 | if not exists(path): 142 | mkdir(path) 143 | return path 144 | 145 | def _default_file_name(self, name='', ext = '.jpg'): 146 | if name: 147 | name = Path(name).stem 148 | else: 149 | name = datetime.now().strftime("%H_%M_%S_%f")[:-4] 150 | return name + ext 151 | 152 | -------------------------------------------------------------------------------- /src/camera4kivy/preview_kivycamera.py: -------------------------------------------------------------------------------- 1 | from kivy.app import App 2 | from kivy.core.window import Window 3 | from threading import Thread 4 | from kivy.clock import mainthread 5 | from kivy.utils import platform 6 | from kivy.core import core_select_lib 7 | from kivy.graphics import Rectangle, Color 8 | from kivy.graphics.texture import Texture 9 | from kivy.core.text import Label as CoreLabel 10 | from kivy.metrics import sp 11 | from gestures4kivy import CommonGestures 12 | from camera4kivy.preview_common import PreviewCommon 13 | if platform in ['macosx', 'ios']: 14 | from kivy.core.camera import Camera 15 | else: 16 | from camera4kivy.based_on_kivy_core.camera import Camera 17 | 18 | from kivy.logger import Logger 19 | 20 | class KivyCameraProviderInfo(): 21 | def get_name(self): 22 | if Camera: 23 | provider = str(Camera).split('.')[-2].split('_')[-1] 24 | else: 25 | provider = "" 26 | return provider 27 | 28 | 29 | class PreviewKivyCamera(PreviewCommon, CommonGestures): 30 | 31 | def __init__(self, **kwargs): 32 | super().__init__(**kwargs) 33 | self.error_message = '' 34 | self.mirror = True 35 | self.switching_camera = False 36 | self.starting_camera = False 37 | self.abort_camera_start = False 38 | self.enable_zoom_gesture = False 39 | self.enable_focus_gesture = False 40 | self.audio = False 41 | self.cg_zoom_level = [1 , 1] 42 | self.window_width = Window.width 43 | if platform == 'ios': 44 | self._enable_on_resume() 45 | self.provider = KivyCameraProviderInfo().get_name() 46 | 47 | def __del__(self): 48 | self.disconnect_camera() 49 | 50 | @mainthread 51 | def _enable_on_resume(self): 52 | app = App.get_running_app() 53 | app.bind(on_resume = self.on_resume) 54 | 55 | def on_resume(self, arg): 56 | Window.update_viewport() 57 | 58 | def on_size(self, instance, size): 59 | self.configure_viewport() 60 | self.configure_texture_crop(None) 61 | if platform == 'ios' and self.window_width != Window.width: 62 | if self._camera: 63 | orientation = self._camera.get_device_orientation() 64 | if orientation in [1,2,3,4]: 65 | self._camera.set_video_orientation(orientation) 66 | self.canvas.clear() 67 | if self.error_message: 68 | self.canvas_text(self.error_message) 69 | elif self._camera and self._camera._texture: 70 | self.on_tex(None) 71 | else: 72 | with self.canvas: 73 | Color(1,1,1,1) 74 | Rectangle(size = self.view_size, pos = self.view_pos) 75 | self.window_width = Window.width 76 | 77 | ############################################# 78 | # User Events 79 | ############################################# 80 | 81 | def connect_camera(self, 82 | camera_id = '0', 83 | mirrored = True, 84 | audio = False, 85 | sensor_resolution = [], 86 | sensor_rotation = 0, 87 | default_zoom = 1.0, 88 | enable_zoom_gesture = True, 89 | enable_focus_gesture = True, 90 | filepath_callback = None, 91 | analyze_callback = None, 92 | canvas_callback = None, 93 | **kwargs): 94 | self.set_index(camera_id) 95 | if audio == True: 96 | self.audio = True 97 | self.set_resolution(sensor_resolution) 98 | self.set_rotation(sensor_rotation) 99 | self.set_filepath_callback(filepath_callback) 100 | self.data_callback = analyze_callback 101 | self.canvas_callback = canvas_callback 102 | self.default_zoom = min(max(default_zoom,0),1) 103 | self.enable_zoom_gesture = enable_zoom_gesture 104 | self.enable_focus_gesture = enable_focus_gesture 105 | self.cg_zoom_level = [self.default_zoom, self.default_zoom] 106 | if platform == 'ios': 107 | self.mirror = self.index != 0 108 | else: 109 | self.mirror = mirrored 110 | self.stop_camera() 111 | #Thread(target=self.start_camera, daemon=True).start() 112 | self.start_camera() 113 | 114 | def disconnect_camera(self): 115 | if self.starting_camera: 116 | # This was related to the Thread above, now redundant? 117 | self.abort_camera_start = True 118 | else: 119 | self.stop_camera() 120 | 121 | def select_camera(self, index): 122 | if self.switching_camera or self.starting_camera: 123 | return self.index 124 | self.switching_camera = True 125 | if platform == 'ios': 126 | if self._camera: 127 | self.set_index(index) 128 | self.mirror = self.index != 0 129 | self._camera.change_camera_input(self.index) 130 | self.zoom_abs(self.cg_zoom_level[self.index]) 131 | else: 132 | self.stop_camera() 133 | self.set_index(index) 134 | self.start_camera() 135 | self.switching_camera = False 136 | return index 137 | 138 | # Screenshot 139 | ###################### 140 | def capture_screenshot(self, location = '.', subdir = '', name = ''): 141 | view_crop = self.screenshot_crop() 142 | path = self.capture_path(location, subdir, name, '.jpg') 143 | tex = self.export_as_image().texture.get_region(*view_crop) 144 | tex.flip_vertical() 145 | if platform == 'ios': 146 | self._camera.save_texture(tex, path) 147 | else: 148 | tex.save(path, flipped = False) 149 | if self.callback: 150 | self.callback(path) 151 | 152 | # Photo 153 | ###################### 154 | def capture_photo(self, location = '.', subdir = '', name = ''): 155 | if self._camera and self._camera.texture: 156 | path = self.capture_path(location, subdir, name, '.jpg') 157 | tex = self._camera.texture.get_region(*self.tex_crop) 158 | if platform == 'ios': 159 | self._camera.save_texture(tex, path) 160 | elif self.provider in ['picamera2', 'opencv']: 161 | self._camera.photo(path, self.callback) 162 | return 163 | else: 164 | tex.save(path, flipped = False) 165 | if self.callback: 166 | self.callback(path) 167 | 168 | # Video 169 | ###################### 170 | 171 | def capture_video(self, location = '', subdir = '', name = ''): 172 | if self._camera and self._camera.texture: 173 | if self.provider in ['picamera2', 'opencv']: 174 | path = self.capture_path(location, subdir, name, '.mp4') 175 | self._camera.video_start(path, self.callback) 176 | 177 | def stop_capture_video(self): 178 | if self._camera and self._camera.texture: 179 | if self.provider in ['picamera2', 'opencv']: 180 | self._camera.video_stop() 181 | 182 | ############################## 183 | # Preview Widget Touch Events 184 | ############################## 185 | 186 | # pinch/spread for zoom 187 | def cgb_zoom(self, touch0, touch1, x, y, scale): 188 | if self._camera and self.enable_zoom_gesture: 189 | if platform == 'ios': 190 | level = max(self.cg_zoom_level[self.index] * scale, 1) 191 | self.cg_zoom_level[self.index] = level 192 | self.zoom_abs(level) 193 | elif self.provider in ['picamera2']: 194 | self._camera.zoom(scale) 195 | 196 | # drag 197 | def cgb_drag(self, touch, x, y, dx, dy): 198 | if self._camera and self.enable_zoom_gesture: 199 | if self.provider in ['picamera2']: 200 | # normalize to preview image 201 | crop = self.screenshot_crop() 202 | dx = dx / crop[2] 203 | dy = dy / crop[3] 204 | self._camera.drag(dx, dy) 205 | 206 | ############################################# 207 | # iOS only User Events 208 | ############################################# 209 | 210 | def zoom_abs(self, level): 211 | if platform == 'ios' and self._camera: 212 | self._camera.zoom_level(level) 213 | 214 | ############################################# 215 | # Picamera2 only User Events 216 | ############################################# 217 | 218 | def zoom_delta(self, delta_scale): 219 | if self._camera and self.provider in ['picamera2']: 220 | self._camera.zoom(delta_scale) 221 | 222 | def drag(self, delta_x, delta_y): 223 | if self._camera and self.provider in ['picamera2']: 224 | crop = self.screenshot_crop() 225 | dx = delta_x / crop[2] 226 | dy = delta_y / crop[3] 227 | self._camera.drag(dx, dy) 228 | 229 | ############################################# 230 | # Ignored User Events 231 | ############################################# 232 | 233 | def flash(self, state): 234 | return 'off' 235 | 236 | def torch(self, state): 237 | return 'off' 238 | 239 | def focus(self, x, y): 240 | pass 241 | 242 | ############################################# 243 | # Parse Arguments 244 | ############################################# 245 | 246 | def set_index(self, index): 247 | index = index.lower() 248 | try: 249 | int(index) 250 | isint = True 251 | except: 252 | isint = False 253 | if isint: 254 | self.index = int(index) 255 | elif index == 'front': 256 | self.index = 1 257 | elif index == 'back': 258 | self.index = 0 259 | elif index == 'toggle' and self.index == 0: 260 | self.index = 1 261 | elif index == 'toggle' and self.index == 1: 262 | self.index = 0 263 | else: 264 | self.index = 0; 265 | 266 | ############################################# 267 | # Camera Events 268 | ############################################# 269 | 270 | def start_camera(self): 271 | self.starting_camera = True 272 | try: 273 | if not self._sensor_resolution: 274 | # a max resolution will fall back to the highest available 275 | # except picamera 276 | if platform in ['macosx', 'ios']: 277 | # default 16:9 278 | self._sensor_resolution = [3840, 2160] 279 | elif self.provider in ['picamera','opencv']: 280 | self._sensor_resolution = [1280 , 960] 281 | elif self.provider in ['picamera2']: 282 | self._sensor_resolution = [800 , 600] 283 | else: 284 | #default 4:3 , value ignored by gi 285 | self._sensor_resolution = [6400, 4800] 286 | 287 | if self.provider in ['picamera2', 'opencv']: 288 | context = self 289 | else: 290 | context = None 291 | self._camera = Camera(index= self.index, 292 | resolution = self._sensor_resolution, 293 | rotation = self._sensor_rotation, 294 | callback = self.camera_error, 295 | context = context) 296 | self.error_message = "" 297 | except AttributeError as e: 298 | #Logger.warning(str(e)) 299 | self.camera_error_message() 300 | except Exception as e: 301 | #Logger.warning(str(e)) 302 | if self._camera: 303 | self.error_message = 'ERROR: Camera internal error.' 304 | else: 305 | self.error_message = 'ERROR: No camera provider found.' 306 | self._camera = None 307 | 308 | if self.error_message: 309 | self.canvas_text(self.error_message) 310 | 311 | if self._camera: 312 | self._camera.bind(on_load=self.configure_texture_crop) 313 | self._camera.bind(on_texture=self.on_tex) 314 | self._camera.start() 315 | self.zoom_delta(self.default_zoom) 316 | if self.abort_camera_start: 317 | self.stop_camera() 318 | self._camera = None 319 | self.abort_camera_start = False 320 | self.starting_camera = False 321 | 322 | def on_load(self): 323 | self.configure_texture_crop(None) 324 | 325 | def on_texture(self): 326 | self.on_tex(None) 327 | 328 | def camera_error(self): 329 | self.camera_error_message() 330 | self.canvas_text(self.error_message) 331 | 332 | def camera_error_message(self): 333 | self.error_message = "WARNING: Unable to connect to camera_id '" +\ 334 | str(self.index)+"'.\n" +\ 335 | 'Check that the camera is connected.' 336 | self._camera = None 337 | 338 | def stop_camera(self): 339 | if self._camera: 340 | self._camera.stop() 341 | self._camera.unbind(on_texture=self.on_tex) 342 | self.clear_texture() 343 | if self._camera.__class__.__name__ == 'CameraGi': 344 | self._camera.unload() 345 | del self._camera 346 | self._camera = None 347 | 348 | ############################################# 349 | # Texture 350 | ############################################# 351 | 352 | def clear_texture(self): 353 | if self._camera and self._camera.texture: 354 | tex_size = self._camera.texture.size 355 | buf = bytes([255] * tex_size[0] * tex_size[1] * 3) 356 | fmt = self._camera._format ## all the providers are 3 byte 357 | self._camera.texture.blit_buffer(buf, colorfmt= fmt, 358 | bufferfmt='ubyte') 359 | self.on_tex(None) 360 | 361 | 362 | def on_tex(self, camera): 363 | if self._camera and self._camera.texture: 364 | tex = self._camera.texture.get_region(*self.tex_crop) 365 | 366 | if self.data_callback: 367 | self.data_callback(tex, self.view_pos, 368 | self.tscale, self.mirror) 369 | if self.mirror: 370 | view_size = (-self.view_size[0], self.view_size[1]) 371 | view_pos = (self.view_pos[0] + self.view_size[0], 372 | self.view_pos[1]) 373 | else: 374 | view_size = self.view_size 375 | view_pos = self.view_pos 376 | self.canvas.clear() 377 | with self.canvas: 378 | Color(1,1,1,1) 379 | Rectangle(texture= tex, size = view_size, pos = view_pos) 380 | if self.canvas_callback: 381 | self.canvas_callback(tex, view_size, view_pos) 382 | 383 | def configure_texture_crop(self, dontcare): 384 | if not self._camera or not self._camera.texture: 385 | return 386 | width_tex, height_tex = self._camera.texture.size 387 | self.tex_crop = self.crop_for_aspect_orientation(width_tex, height_tex) 388 | self.tscale = self.view_size[1] / self.tex_crop[3] 389 | 390 | def crop_for_aspect_orientation(self, width_tex, height_tex): 391 | orientation = self.decode_orientation() 392 | if self.aspect_ratio == "4:3": 393 | aspect = 4 / 3 394 | else: 395 | aspect = 16 / 9 396 | crop_pos_x = 0 397 | crop_pos_y = 0 398 | crop_siz_x = width_tex 399 | crop_siz_y = height_tex 400 | if orientation == 'portrait': 401 | if width_tex < height_tex: 402 | # Portrait texture 403 | if height_tex / width_tex > 1.5: 404 | # texture is 16:9 405 | if self.aspect_ratio == '4:3': 406 | crop_siz_y = width_tex * aspect #1 407 | crop_pos_y = (height_tex - crop_siz_y) // 2 408 | else: 409 | # texture is 4:3 410 | if self.aspect_ratio == '16:9': 411 | crop_siz_x = height_tex // aspect 412 | crop_pos_x = (width_tex - crop_siz_x) // 2 413 | else: 414 | # Landscape texture 415 | crop_siz_x = height_tex // aspect 416 | crop_pos_x = (width_tex - crop_siz_x) // 2 417 | else: # Landscape 418 | if width_tex < height_tex: 419 | # Portrait texture 420 | crop_siz_y = width_tex // aspect 421 | crop_pos_y = (width_tex - crop_siz_y) // 2 422 | else: 423 | # Landscape texture 424 | if width_tex / height_tex > 1.5: 425 | # texture is 16:9 426 | if self.aspect_ratio == '4:3': 427 | crop_siz_x = height_tex * aspect 428 | crop_pos_x = (width_tex - crop_siz_x) // 2 429 | else: 430 | # texture is 4:3 431 | if self.aspect_ratio == '16:9': 432 | crop_siz_y = width_tex // aspect 433 | crop_pos_y = (height_tex - crop_siz_y) // 2 434 | return [ int(crop_pos_x), int(crop_pos_y), 435 | int(crop_siz_x), int(crop_siz_y)] 436 | 437 | @mainthread 438 | def canvas_text(self,text): 439 | label = CoreLabel(font_size = sp(16)) 440 | label.text = text 441 | label.refresh() 442 | if label.texture: 443 | pos = [self.view_pos[0] +\ 444 | (self.view_size[0] - label.texture.size[0]) / 2, 445 | self.view_pos[1] + self.view_size[1] / 2] 446 | with self.canvas: 447 | Color(0.6,0.6,0.6,1) 448 | Rectangle(size = self.view_size, pos = self.view_pos) 449 | Color(1,0,0,1) 450 | Rectangle(size=label.texture.size, 451 | pos=pos, 452 | texture=label.texture) 453 | 454 | --------------------------------------------------------------------------------