├── .gitignore ├── example_snapshots ├── 2017-02-21T10_52_21.227244_on.jpg ├── 2017-02-21T10_52_23.287836_on.jpg ├── 2017-02-21T10_52_22.553099_off.jpg └── 2017-02-21T10_52_24.915270_off.jpg ├── config.py ├── LICENSE ├── vision.py ├── device └── D2CMsgSender.py ├── README.md └── room-glimpse.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | **/*.ipynb 3 | runnb.sh 4 | creds/** 5 | data/** 6 | iothub_client.so 7 | -------------------------------------------------------------------------------- /example_snapshots/2017-02-21T10_52_21.227244_on.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahirner/room-glimpse/HEAD/example_snapshots/2017-02-21T10_52_21.227244_on.jpg -------------------------------------------------------------------------------- /example_snapshots/2017-02-21T10_52_23.287836_on.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahirner/room-glimpse/HEAD/example_snapshots/2017-02-21T10_52_23.287836_on.jpg -------------------------------------------------------------------------------- /example_snapshots/2017-02-21T10_52_22.553099_off.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahirner/room-glimpse/HEAD/example_snapshots/2017-02-21T10_52_22.553099_off.jpg -------------------------------------------------------------------------------- /example_snapshots/2017-02-21T10_52_24.915270_off.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahirner/room-glimpse/HEAD/example_snapshots/2017-02-21T10_52_24.915270_off.jpg -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | #Config Camera 2 | RESOLUTION = (640, 480) 3 | FPS = 30 4 | ROTATION = 180 5 | MD_BLOCK_FRACTION = 0.008 #Fraction of blocks that must show movement 6 | MD_SPEED = 2.0 #How many screens those blocks must move per second 7 | MD_FALLOFF = 0.75 #How many seconds no motion must be present to trigger completion of a scene 8 | 9 | #Config Persistency 10 | DATA_FOLDER = './data' 11 | 12 | #Config Azure Cognition 13 | AZURE_COG_HOST = 'https://westus.api.cognitive.microsoft.com/vision/v1.0/analyze' 14 | AZURE_COG_RETRIES = 3 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Alexander Hirner 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /vision.py: -------------------------------------------------------------------------------- 1 | from config import AZURE_COG_RETRIES, AZURE_COG_HOST 2 | from creds.credentials import AZURE_COG_KEY 3 | 4 | import requests 5 | 6 | def processRequest(json, data, headers, params ): 7 | #From example code of project Oxford 8 | """ 9 | Parameters: 10 | json: Used when processing images from its URL. See API Documentation 11 | data: Used when processing image read from disk. See API Documentation 12 | headers: Used to pass the key information and the data type request 13 | """ 14 | retries = 0 15 | result = None 16 | 17 | while True: 18 | response = requests.request( 'post', AZURE_COG_HOST, json = json, data = data, headers = headers, params = params ) 19 | if response.status_code == 429: 20 | print( "Message: %s" % ( response.json()['error']['message'] ) ) 21 | if retries <= AZURE_COG_RETRIES: 22 | time.sleep(1) 23 | retries += 1 24 | continue 25 | else: 26 | print( 'Error: failed after retrying!' ) 27 | break 28 | 29 | elif response.status_code == 200 or response.status_code == 201: 30 | if 'content-length' in response.headers and int(response.headers['content-length']) == 0: 31 | result = None 32 | elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str): 33 | if 'application/json' in response.headers['content-type'].lower(): 34 | result = response.json() if response.content else None 35 | elif 'image' in response.headers['content-type'].lower(): 36 | result = response.content 37 | else: 38 | print( "Error code: %d" % ( response.status_code ) ) 39 | print( "Message: %s" % ( response.json()['error']['message'] ) ) 40 | break 41 | 42 | return result 43 | 44 | def analyze_img(jpg, features='Color,Categories,Tags,Description'): 45 | params = { 'visualFeatures' : features} 46 | headers = dict() 47 | headers['Ocp-Apim-Subscription-Key'] = AZURE_COG_KEY 48 | headers['Content-Type'] = 'application/octet-stream' 49 | result = processRequest(None, jpg, headers, params ) 50 | 51 | return result -------------------------------------------------------------------------------- /device/D2CMsgSender.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module Name: d2cMsgSender.py 3 | Project: IoTHubRestSample 4 | Copyright (c) Microsoft Corporation. 5 | Using [Send device-to-cloud message](https://msdn.microsoft.com/en-US/library/azure/mt590784.aspx) API to send device-to-cloud message from the simulated device application to IoT Hub. 6 | This source is subject to the Microsoft Public License. 7 | See http://www.microsoft.com/en-us/openness/licenses.aspx#MPL 8 | All other rights reserved. 9 | THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, 10 | EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED 11 | WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. 12 | """ 13 | from __future__ import print_function 14 | import base64 15 | import hmac 16 | import hashlib 17 | import time 18 | import requests 19 | import urllib 20 | 21 | #Adapted to work with python3 22 | class D2CMsgSender: 23 | 24 | API_VERSION = '2016-02-03' 25 | TOKEN_VALID_SECS = 10 26 | TOKEN_FORMAT = 'SharedAccessSignature sig=%s&se=%s&skn=%s&sr=%s' 27 | 28 | def __init__(self, connectionString=None): 29 | if connectionString != None: 30 | iotHost, keyName, keyValue = [sub[sub.index('=') + 1:] for sub in connectionString.split(";")] 31 | self.iotHost = iotHost 32 | self.keyName = keyName 33 | self.keyValue = keyValue 34 | 35 | def _buildExpiryOn(self): 36 | return '%d' % (time.time() + self.TOKEN_VALID_SECS) 37 | 38 | def _buildIoTHubSasToken(self, deviceId): 39 | resourceUri = '%s/devices/%s' % (self.iotHost, deviceId) 40 | targetUri = resourceUri.lower() 41 | expiryTime = self._buildExpiryOn() 42 | toSign = '%s\n%s' % (targetUri, expiryTime) 43 | key = base64.b64decode(self.keyValue.encode('utf-8')) 44 | signature = urllib.parse.quote( 45 | base64.b64encode( 46 | hmac.HMAC(key, toSign.encode('utf-8'), hashlib.sha256).digest() 47 | ) 48 | ).replace('/', '%2F') 49 | return self.TOKEN_FORMAT % (signature, expiryTime, self.keyName, targetUri) 50 | 51 | def sendD2CMsg(self, deviceId, message): 52 | sasToken = self._buildIoTHubSasToken(deviceId) 53 | url = 'https://%s/devices/%s/messages/events?api-version=%s' % (self.iotHost, deviceId, self.API_VERSION) 54 | r = requests.post(url, headers={'Authorization': sasToken}, data=message) 55 | return r.text, r.status_code 56 | 57 | if __name__ == '__main__': 58 | connectionString = 'HostName=.azure-devices.net;SharedAccessKeyName=device;SharedAccessKey=' 59 | d2cMsgSender = D2CMsgSender(connectionString) 60 | deviceId = 'iotdevice1' 61 | message = 'Hello, IoT Hub' 62 | print(d2cMsgSender.sendD2CMsg(deviceId, message)) 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **A fun IoT app using a Raspberry Pi + camera.** 2 | The app detects [motion from the h264 encoder](http://picamera.readthedocs.io/en/release-1.12/recipes2.html#recording-motion-vector-data) with little CPU drain. A first snapshot is taken once the total motion in the video stream exceeds a certain threshold. A second snapshot is taken after the scene becomes static again. Finally, the second snapshot is analyzed. Thus, this _Thing of the Internet_ is a (wonky) surveillance camera and a selfie-machine at the same time --however you want to view it. The purpose was to demo Azure IoT and cognitive services on top of building an image acquisition framework for the RPi. 3 | 4 | ## Automatic Image Captioning 5 | The importance of (data) privacy grows daily, but having a NN talk about its observations might just be ok... Thus, snapshots get only persisted on the local file system. The gist of the second snapshot is extracted by Microsoft's [computer vision API](https://www.microsoft.com/cognitive-services/en-us/computer-vision-api). This gist consists of tags, categories and a caption from the second snapshot. It is passed on to the cloud. 6 | ## IoT Telemetry: 7 | Next to the description and other features at the end of the scene, telemetry includes motion vectors for each frame during a scene. Learning gestures from this dataset would be even more fun! I wanted to try Azure's IoT Hub for data ingestion. All data mentioned above is forwarded via device-to-cloud-messages. 8 | 9 | # Example 10 | **I'm entering the living room from the left** 11 | ![alt-text](https://raw.githubusercontent.com/ahirner/room-glimpse/master/example_snapshots/2017-02-21T10_52_21.227244_on.jpg) 12 | 13 | The motion detector triggers the first snapshot to be stored on the RPi. At the same time, motion vector data from each video frame is forwarded to the cloud asynchronously. 14 | 15 | **I pause to complete the scene** 16 | ![alt-text](https://raw.githubusercontent.com/ahirner/room-glimpse/master/example_snapshots/2017-02-21T10_52_22.553099_off.jpg) 17 | 18 | ```javascript 19 | caption: 'a man that is standing in the living room' 20 | confidence: 0.1240666986256891 21 | tags: 'floor', 'indoor', 'person', 'window', 'table', 'room', 'man', 'living', 'holding', 'young', 'black', 'standing', 'woman', 'dog', 'kitchen', 'remote', 'playing', 'white' 22 | ``` 23 | This is how the second snapshot is described by Azure's cognitive API. Fair enough... Unfortunately, the caption doesn't mention my awesome guitar performance. The description of the scene and meta-information like timestamps are dispatched whereas recording motion-data stops. 24 | 25 | **I leave the room after much applause** :clap::clap::clap: (snapshot omitted)... 26 | 27 | After no motion was detected for a set amount of time (0.75 secs in that case), another scene is analyzed. 28 | 29 | **Now it's just the the bare room** 30 | ![alt-text](https://raw.githubusercontent.com/ahirner/room-glimpse/master/example_snapshots/2017-02-21T10_52_24.915270_off.jpg) 31 | ```javascript 32 | caption: 'a living room with hard wood floor' 33 | confidence: 0.9247661343688557 34 | tags: 'floor', 'indoor', 'room', 'living', 'table', 'building', 'window', 'wood', 'hard', 'wooden', 'sitting', 'television', 'black', 'furniture', 'kitchen', 'small', 'large', 'open', 'area', 'computer', 'view', 'home', 'white', 'modern', 'door', 'screen', 'desk', 'laptop', 'dog', 'refrigerator', 'bedroom' 35 | ``` 36 | This time, the description is pretty accurate (and confident). 37 | 38 | # Installation 39 | - [Setup](https://azure.microsoft.com/en-us/resources/samples/iot-hub-c-raspberrypi-getstartedkit/) an Azure IoT Hub and add the RPi as a device. 40 | - `git clone https://github.com/ahirner/room-glimpse.git` 41 | - Create `credentials.py` in `./creds` with the Azure Cognitive API key, the IoT device ID and a device connection string. 42 | ```python 43 | AZURE_COG_KEY= 'xxx' 44 | AZURE_DEV_ID= 'yyy' 45 | AZURE_DEV_CONNECTION_STRING='HostName=zzz.azure-devices.net;SharedAccessKeyName=zzz;SharedAccessKey=zzz=' 46 | ``` 47 | - Install missing modules (_`requirements.txt` tbd_) 48 | - Start with `python3 room-glimpse.py` 49 | 50 | Only the HTTP API is used for now. The dedicated [azure-iot-python SDK](https://github.com/azure/azure-iot-sdk-python) can [control batching more effectively](https://github.com/Azure/azure-iot-sdk-python/issues/15), use MQTT for less overhead but is not yet available via pip3 on Unix. 51 | 52 | Configuration for the video stream, motion thresholds and cloud endpoints are in `config.py`. 53 | 54 | # More ideas 55 | 1) Of course, nothing prevents you from running/training your own version of a [talking NN](https://github.com/tensorflow/models/tree/master/im2txt). In fact, this project is a vantage point to try pushing computing on the edge. Sam maintains a [pip wheel to install TensorFlow](https://github.com/samjabrahams/tensorflow-on-raspberry-pi) on the little RPi. [Pete Warden](https://petewarden.com/2016/12/30/rewriting-tensorflow-graphs-with-the-gtt/) has done amazing work recently to trim down NNs in a principled way (e.g. quantization for fixed point math). 56 | 57 | 2) In general, make use of spare cores. Most of the time, the CPU idles at 15% (remember the h264 motion detection). So there is plenty of room left for [beefier tasks](http://www.pyimagesearch.com/2016/04/18/install-guide-raspberry-pi-3-raspbian-jessie-opencv-3/) on the edge. 58 | 59 | 3) Overlay motion vectors in a live web view (there is a 2D vector for each 16x16 macro block). 60 | -------------------------------------------------------------------------------- /room-glimpse.py: -------------------------------------------------------------------------------- 1 | 2 | # coding: utf-8 3 | 4 | 5 | from __future__ import division 6 | 7 | from config import * 8 | from creds.credentials import * 9 | from vision import * 10 | 11 | from device.D2CMsgSender import D2CMsgSender 12 | import json 13 | import io, os 14 | import socket 15 | import time, datetime 16 | 17 | import numpy as np 18 | import PIL.Image 19 | import picamera 20 | import picamera.array 21 | 22 | from queue import Queue 23 | from collections import namedtuple #Forgo typing to maintain vanilla python 3.4 compatibility on RPi 24 | 25 | #Schema 26 | Motion = namedtuple('Motion', 'timestamp, triggered, vectors_x, vectors_y, sad, magnitude') 27 | Snapshot = namedtuple('Snapshot','timestamp, img_rgb, motion') 28 | PictureEvent = namedtuple('PictureEvent', 'timestamp, type, on, data') 29 | SceneCapture = namedtuple('SceneCapture', 'pic_on, pic_off') 30 | 31 | 32 | 33 | def to_jpg(rgb): 34 | f = io.BytesIO() 35 | PIL.Image.fromarray(rgb).save(f, 'jpeg') 36 | return f.getvalue() 37 | 38 | def to_ISO(timestamp): 39 | return datetime.datetime.fromtimestamp(timestamp).isoformat() 40 | 41 | def to_ID(timestamp, on): 42 | return str(to_ISO(timestamp).replace(':', '_')) + ('_on' if on else '_off') 43 | 44 | def get_convert_jpg(pic: PictureEvent, modify=True): 45 | jpg = pic.data 46 | if pic.type == 'rgb': 47 | jpg = to_jpg(pic.data) 48 | if modify: 49 | pic.data = jpg 50 | pic.type = 'jpg' 51 | #Todo: Exception handling 52 | return jpg 53 | 54 | def save_jpg(jpg, _id): 55 | if DATA_FOLDER is not None: 56 | with open(os.path.join(DATA_FOLDER, _id+'.jpg'), "wb") as f: 57 | f.write(jpg) 58 | 59 | 60 | 61 | #Derive more constants 62 | BLOCKSIZE = 16 63 | MOTION_W = RESOLUTION[0] // BLOCKSIZE + 1 64 | MOTION_H = RESOLUTION[1] // BLOCKSIZE + 1 65 | BLOCKS = (MOTION_W)*(MOTION_H) 66 | MD_BLOCKS = int(MD_BLOCK_FRACTION * BLOCKS) 67 | MD_MAGNITUDE = int(MD_SPEED / FPS * RESOLUTION[0]) 68 | print("Motion detected if >%i out of %i blocks show >%i pixel movement in a %i wide frame" % (MD_BLOCKS, BLOCKS, MD_MAGNITUDE, RESOLUTION[0])) 69 | 70 | 71 | 72 | class MyRGBAnalysis(picamera.array.PiRGBAnalysis): 73 | def __init__(self, camera): 74 | super().__init__(camera) 75 | self.rgb = None 76 | 77 | def analyse(self, a): 78 | self.rgb = a 79 | 80 | class MyMotionDetector(picamera.array.PiMotionAnalysis): 81 | 82 | def __init__(self, camera, rgb_detect: MyRGBAnalysis, scene_queue, motion_queue, picture_queue): 83 | super().__init__(camera) 84 | 85 | self.last_md_time_false = time.time() 86 | self.last_md_time_true = None 87 | self.rgb_detect = rgb_detect 88 | self.md = False 89 | 90 | self.scene_queue = scene_queue 91 | self.motion_queue = motion_queue 92 | self.picture_queue = picture_queue 93 | 94 | def analyse(self, a): 95 | m = np.sqrt( 96 | np.square(a['x'].astype(np.float)) + 97 | np.square(a['y'].astype(np.float)) 98 | ).clip(0, 255).astype(np.uint8) 99 | 100 | #If there're more than MD_BLOCKS vectors with a magnitude greater 101 | #than MD_MAGNITUDE, then say we've detected motion 102 | md = ((m > MD_MAGNITUDE).sum() > MD_BLOCKS) 103 | 104 | now = time.time() 105 | motion = Motion(now, md, a['x'], a['y'], a['sad'], m) 106 | 107 | #Todo: does motion or RGB analysis come first? In the former case, current_state['rgb'] lags one frame 108 | # --> use separate picamera capture 109 | snap = Snapshot(now, rgb_detect.rgb, motion) 110 | 111 | self.md_update(snap) 112 | 113 | def md_update(self, snap: Snapshot): 114 | now = snap.timestamp 115 | before = self.last_md_time_true 116 | is_motion = snap.motion.triggered 117 | 118 | #Test if motion detection flipped over 119 | if is_motion: 120 | self.last_md_time_true = now 121 | if not self.md: 122 | self.md_rising(snap) 123 | self.md = True 124 | else: 125 | self.last_md_time_false = now 126 | if self.md is True and before is not None and (now - before) > MD_FALLOFF: 127 | self.md_falling(snap) 128 | self.md = False 129 | 130 | #Queue motion data 131 | if self.md: 132 | if (self.motion_queue): self.motion_queue.put(snap.motion) 133 | 134 | 135 | #Attention: runs in blocking sync with motion detection 136 | def md_rising(self, snap: Snapshot): 137 | now = snap.timestamp 138 | motion = snap.motion 139 | 140 | #Calculate Summary statistics only for debugging purposes 141 | avg_x = motion.vectors_x.sum() / RESOLUTION[0] 142 | avg_y = motion.vectors_y.sum() / RESOLUTION[1] 143 | avg_m = motion.magnitude.sum() / (RESOLUTION[0] * RESOLUTION[1]) 144 | 145 | print('Motion detected, avg_x: %i, avg_y: %i, mag: %i' % (avg_x, avg_y, avg_m) ) 146 | 147 | pic = PictureEvent(now, 'rgb', True, snap.img_rgb) 148 | self.last_pic_on = pic 149 | if (self.picture_queue): self.picture_queue.put(pic) 150 | 151 | def md_falling(self, snap: Snapshot): 152 | now = snap.timestamp 153 | print("Motion vanished after %f secs" % (now - self.last_md_time_true)) 154 | 155 | pic = PictureEvent(now, 'jpg', False, to_jpg(snap.img_rgb)) 156 | 157 | if (self.picture_queue): self.picture_queue.put(pic) 158 | if (self.scene_queue): self.scene_queue.put(SceneCapture(self.last_pic_on, pic)) 159 | 160 | 161 | 162 | #Custom encoder for objects containing numpy 163 | class MsgEncoder(json.JSONEncoder): 164 | def default(self, obj): 165 | if isinstance(obj, np.integer): 166 | return int(obj) 167 | elif isinstance(obj, np.floating): 168 | return float(obj) 169 | elif isinstance(obj, np.ndarray): 170 | return obj.tolist() 171 | else: 172 | return super(MsgEncoder, self).default(obj) 173 | 174 | #Normalized versions with summary stats to be sent to the cloud 175 | MotionEvent = namedtuple('MotionEvent', 'timestamp, triggered, blocks_x, blocks_y, vectors_x, vectors_y, avg_x, avg_y, mag, sad') 176 | SceneEvent = namedtuple('SceneEvent', 'timestamp_on, timestamp_off, caption, caption_conf, tags') 177 | 178 | def dispatch_scene(scene_queue, azure_msg): 179 | while True: 180 | scene = scene_queue.get() 181 | 182 | jpg_off = get_convert_jpg(scene.pic_off, False) 183 | 184 | result = analyze_img(jpg_off) 185 | caption = result['description']['captions'][0]['text'] 186 | caption_confidence = result['description']['captions'][0]['confidence'] 187 | tags = result['description']['tags'] 188 | on = to_ISO(scene.pic_on.timestamp) 189 | off = to_ISO(scene.pic_off.timestamp) 190 | 191 | event = SceneEvent(on, off, caption, caption_confidence, tags) 192 | print(event) 193 | azure_msg.sendD2CMsg(AZURE_DEV_ID, json.dumps(event._asdict(), cls=MsgEncoder)) 194 | 195 | scene_queue.task_done() 196 | 197 | def dispatch_motiondata(motion_queue, azure_msg): 198 | 199 | #Todo: we could reduce overhead by enabling batch transfer (with caveats interleaving low- and high latency messages). 200 | #This is only feasible with the official Python SDK. On *nix you still need to manually compile it. 201 | #Therefore, we go with HTTP and REST for now. 202 | #Discussion here: https://github.com/Azure/azure-iot-sdk-python/issues/15 203 | nr_motion = 0 204 | while True: 205 | m = motion_queue.get() 206 | 207 | #MotionEvent = namedtuple('MotionEvent', 'timestamp, triggered, blocks_x, blocks_y, vectors_x, vectors_y, avg_x, avg_y, min_x, min_y, mag') 208 | avg_x = m.vectors_x.sum() / RESOLUTION[0] 209 | avg_y = m.vectors_y.sum() / RESOLUTION[1] 210 | avg_m = m.magnitude.sum() / (RESOLUTION[0] * RESOLUTION[1]) 211 | 212 | me = MotionEvent(to_ISO(m.timestamp), to_ISO(m.triggered), MOTION_W, MOTION_H, list(m.vectors_x.flatten()), list(m.vectors_y.flatten()), avg_x, avg_y, avg_m, list(m.sad.flatten())) 213 | 214 | azure_msg.sendD2CMsg(AZURE_DEV_ID, json.dumps(me._asdict(), cls=MsgEncoder)) 215 | 216 | nr_motion += 1 217 | print(nr_motion, to_ISO(m.timestamp), to_ISO(time.time()), avg_m) 218 | 219 | motion_queue.task_done() 220 | 221 | def publish_pictures(picture_queue): 222 | while True: 223 | p = picture_queue.get() 224 | 225 | jpg = get_convert_jpg(p, False) 226 | _id = to_ID(p.timestamp, p.on) 227 | save_jpg(jpg, _id) 228 | 229 | print("File saved: " +_id) 230 | picture_queue.task_done() 231 | 232 | 233 | 234 | import _thread 235 | 236 | if __name__ == "__main__": 237 | 238 | #Create queues 239 | #Todo: make cloud telemetrics and saving pictures optional 240 | scene_queue = Queue(3) #Queue three full scenes 241 | motion_queue = Queue(FPS * 10) #Queue a maxmimum of 10 seconds motion-data 242 | picture_queue = Queue(4) #Queue a maximum pair of two on and off snapshots 243 | 244 | with picamera.PiCamera() as camera: 245 | camera.resolution = RESOLUTION 246 | camera.framerate = FPS 247 | camera.rotation = ROTATION 248 | 249 | rgb_detect = MyRGBAnalysis(camera) 250 | motion_detect = MyMotionDetector(camera, rgb_detect, scene_queue, motion_queue, picture_queue) 251 | 252 | print("Starting camera and motion detection...") 253 | #Set up motion and video stream analyzer 254 | camera.start_recording( 255 | '/dev/null', 256 | format='h264', 257 | motion_output=motion_detect 258 | ) 259 | #Set up RGB capture on a splitter port 260 | camera.start_recording( 261 | rgb_detect, 262 | format='rgb', 263 | splitter_port=2 264 | ) 265 | camera.wait_recording(0.5) 266 | 267 | #Todo: make cloud telemetrics and saving pictures optional 268 | print("Starting threads for data dispatch...") 269 | azure_msg = D2CMsgSender(AZURE_DEV_CONNECTION_STRING) 270 | _thread.start_new_thread(dispatch_scene, (scene_queue, azure_msg)) 271 | _thread.start_new_thread(dispatch_motiondata, (motion_queue, azure_msg)) 272 | _thread.start_new_thread(publish_pictures, (picture_queue,)) 273 | 274 | print("--- Exit with Ctrl-C ---") 275 | while True: 276 | try: 277 | time.sleep(1) 278 | except KeyboardInterrupt: 279 | break 280 | 281 | camera.stop_recording(splitter_port=2) 282 | camera.stop_recording() 283 | 284 | print("Camera stopped, waiting for queued data to dispatch") 285 | 286 | scene_queue.join() 287 | motion_queue.join() 288 | picture_queue.join() 289 | 290 | --------------------------------------------------------------------------------