├── .env ├── .gitignore ├── .vscode ├── launch.json └── settings.json ├── README.md ├── config └── deployment.arm32v7.json ├── deployment.template.json ├── docker ├── jetson-nano-l4t-cuda-cudnn-opencv-darknet │ └── Dockerfile ├── jetson-nano-l4t-cuda-cudnn-opencv │ └── Dockerfile ├── jetson-nano-l4t-cuda-cudnn │ └── Dockerfile ├── jetson-nano-l4t-cuda │ └── Dockerfile └── jetson-nano-l4t │ └── Dockerfile └── modules └── YoloModule ├── .gitignore ├── Dockerfile.arm64v8 ├── app ├── AppState.py ├── ImageServer.py ├── VideoCapture.py ├── VideoStream.py ├── YoloInference.py ├── darknet │ └── darknet.py ├── main.py ├── templates │ └── index.html └── yolo │ ├── coco.data │ ├── coco.names │ ├── yolov3-tiny.cfg │ └── yolov3-tiny.weights ├── build └── requirements.txt └── module.json /.env: -------------------------------------------------------------------------------- 1 | CONTAINER_REGISTRY_URL=toolboc 2 | CONTAINER_REGISTRY_USERNAME= 3 | CONTAINER_REGISTRY_PASSWORD= 4 | CONTAINER_MODULE_VERSION=latest 5 | CONTAINER_VIDEO_SOURCE=https://www.youtube.com/watch?v=XJ735krOiPo 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "YoloModule Remote Debug (Python)", 6 | "type": "python", 7 | "request": "attach", 8 | "port": 5678, 9 | "host": "localhost", 10 | "logToFile": true, 11 | "redirectOutput": true, 12 | "pathMappings": [ 13 | { 14 | "localRoot": "${workspaceFolder}/modules/YoloModule", 15 | "remoteRoot": "/app" 16 | } 17 | ], 18 | "windows": { 19 | "pathMappings": [ 20 | { 21 | "localRoot": "${workspaceFolder}\\modules\\YoloModule", 22 | "remoteRoot": "/app" 23 | } 24 | ] 25 | } 26 | } 27 | ] 28 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "azure-iot-edge.defaultPlatform": { 3 | "platform": "arm32v7", 4 | "alias": null 5 | } 6 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | ![](https://pbs.twimg.com/media/D_ANZnbWsAA4EVK.jpg) 4 | 5 | The IntelligentEdgeHOL walks through the process of deploying an [IoT Edge](https://docs.microsoft.com/azure/iot-edge/about-iot-edge?WT.mc_id=iot-0000-pdecarlo) module to an Nvidia Jetson Nano device to allow for detection of objects in YouTube videos, RTSP streams, Hololens Mixed Reality Capture, or an attached web cam. It achieves performance of around 10 frames per second for most video data. 6 | 7 | The module ships as a fully self-contained docker image totalling around 5.5GB. This image contains all necessary dependencies including the [Nvidia Linux for Tegra Drivers](https://developer.nvidia.com/embedded/linux-tegra) for Jetson Nano, [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit), [NVIDIA CUDA Deep Neural Network library (CUDNN)](https://developer.nvidia.com/cudnn), [OpenCV](https://github.com/opencv/opencv), and [Darknet](https://github.com/AlexeyAB/darknet). For details on how the base images are built, see the included `docker` folder. 8 | 9 | Object Detection is accomplished using YOLOv3-tiny with [Darknet](https://github.com/AlexeyAB/darknet) which supports detection of the following: 10 | 11 | 12 | *person, bicycle, car, motorbike, aeroplane, bus, train, truck, boat, traffic light, fire hydrant, stop sign, parking meter, bench, bird, cat, dog, horse, sheep, cow, elephant, bear, zebra, giraffe, backpack, umbrella, handbag, tie, suitcase, frisbee, skis, snowboard, sports ball, kite, baseball bat, baseball glove,skateboard, surfboard, tennis racket, bottle, wine glass, cup, fork, knife, spoon, bowl, banana, apple, sandwich, orange, broccoli, carrot, hot dog, pizza, donut, cake, chair, sofa, pottedplant, bed, diningtable, toilet, tv monitor, laptop, mouse, remote, keyboard, cell phone, microwave, oventoaster, sink, refrigerator, book, clock, vase, scissors, teddy bear, hair drier, toothbrush* 13 | 14 | # Demos 15 | 16 | * [Yolo Object Detection with Nvidia Jetson and Hololens](https://www.youtube.com/watch?v=zxGcUmcl1qo&feature=youtu.be) 17 | * [The Intelligent Edge on the IoT Show @ Microsoft Ignite 2019](https://channel9.msdn.com/Shows/Internet-of-Things-Show/The-Intelligent-Edge-by-Microsoft?WT.mc_id=iot-0000-pdecarlo) 18 | 19 | # Hands-On Lab Materials 20 | 21 | * [Presentation Deck](http://aka.ms/intelligentedgeholdeck) 22 | * [Presentation Video](http://aka.ms/intelligentedgeholvideo) 23 | - Note: If you want to view a full walkthrough of this lab, skip to 38:00 24 | 25 | 26 | # Getting Started 27 | This lab requires that you have the following: 28 | 29 | Hardware: 30 | * [Nvidia Jetson Nano Device](https://amzn.to/2WFE5zF) 31 | * A [cooling fan](https://amzn.to/2ZI2ki9) installed on or pointed at the Nvidia Jetson Nano device 32 | * USB Webcam (Optional) 33 | - Note: The power consumption will require that your device is configured to use a [5V/4A barrel adapter](https://amzn.to/32DFsTq) as mentioned [here](https://www.jetsonhacks.com/2019/04/10/jetson-nano-use-more-power/) with an [Open-CV compatible camera](https://web.archive.org/web/20120815172655/http://opencv.willowgarage.com/wiki/Welcome/OS/). 34 | 35 | Development Environment: 36 | - [Visual Studio Code (VSCode)](https://code.visualstudio.com/Download?WT.mc_id=iot-0000-pdecarlo) 37 | - VSCode Extensions 38 | - [Azure Account Extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode.azure-account&WT.mc_id=iot-0000-pdecarlo) 39 | - [Azure IoT Edge Extension](https://marketplace.visualstudio.com/items?itemName=vsciot-vscode.azure-iot-edge&WT.mc_id=iot-0000-pdecarlo) 40 | - [Docker Extension](https://marketplace.visualstudio.com/items?itemName=PeterJausovec.vscode-docker&WT.mc_id=iot-0000-pdecarlo) 41 | - [Azure IoT Toolkit Extension](https://marketplace.visualstudio.com/items?itemName=vsciot-vscode.azure-iot-toolkit&WT.mc_id=iot-0000-pdecarlo) 42 | - Git tool(s) 43 | [Git command line](https://git-scm.com/) 44 | 45 | # Installing IoT Edge onto the Jetson Nano Device 46 | 47 | Before we install IoT Edge, we need to install a few utilities onto the Nvidia Jetson Nano device with: 48 | 49 | ``` 50 | sudo apt-get install -y curl nano python3-pip 51 | ``` 52 | 53 | ARM64 builds of IoT Edge are currently being offered in preview and will eventually go into General Availability. We will make use of the ARM64 builds to ensure that we get the best performance out of our IoT Edge solution. 54 | 55 | These builds are provided starting in the [1.0.8 release tag](https://github.com/Azure/azure-iotedge/releases/tag/1.0.8). To install the 1.0.8 release of IoT Edge, run the following from a terminal on your Nvidia Jetson device: 56 | 57 | ``` 58 | # You can copy the entire text from this code block and 59 | # paste in terminal. The comment lines will be ignored. 60 | 61 | # Install the IoT Edge repository configuration 62 | curl https://packages.microsoft.com/config/ubuntu/18.04/multiarch/prod.list > ./microsoft-prod.list 63 | 64 | # Copy the generated list 65 | sudo cp ./microsoft-prod.list /etc/apt/sources.list.d/ 66 | 67 | # Install the Microsoft GPG public key 68 | curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > microsoft.gpg 69 | sudo cp ./microsoft.gpg /etc/apt/trusted.gpg.d/ 70 | 71 | # Perform apt update 72 | sudo apt-get update 73 | 74 | # Install IoT Edge and the Security Daemon 75 | sudo apt-get install iotedge 76 | 77 | ``` 78 | 79 | # Provisioning the IoT Edge Runtime on the Jetson Nano Device 80 | 81 | To manually provision a device, you need to provide it with a device connection string that you can create by registering a new IoT Edge device in your IoT hub. You can create a new device connection string to accomplish this by following the documentation for [Registering an IoT Edge device in the Azure Portal](https://docs.microsoft.com/azure/iot-edge/how-to-register-device-portal?WT.mc_id=iot-0000-pdecarlo) or by [Registering an IoT Edge device with the Azure-CLI](https://docs.microsoft.com/azure/iot-edge/how-to-register-device-cli?WT.mc_id=iot-0000-pdecarlo). 82 | 83 | Once you have obtained a connection string, open the configuration file: 84 | 85 | ``` 86 | sudo nano /etc/iotedge/config.yaml 87 | ``` 88 | 89 | Find the provisioning section of the file and uncomment the manual provisioning mode. Update the value of `device_connection_string` with the connection string from your IoT Edge device. 90 | 91 | ``` 92 | provisioning: 93 | source: "manual" 94 | device_connection_string: "" 95 | 96 | # provisioning: 97 | # source: "dps" 98 | # global_endpoint: "https://global.azure-devices-provisioning.net" 99 | # scope_id: "{scope_id}" 100 | # registration_id: "{registration_id}" 101 | 102 | ``` 103 | 104 | After you have updated the value of `device_connection_string`, restart the iotedge service with: 105 | 106 | ``` 107 | sudo service iotedge restart 108 | ``` 109 | 110 | You can check the status of the IoT Edge Daemon using: 111 | 112 | ``` 113 | systemctl status iotedge 114 | ``` 115 | 116 | Examine daemon logs using: 117 | ``` 118 | journalctl -u iotedge --no-pager --no-full 119 | ``` 120 | 121 | And, list running modules with: 122 | 123 | ``` 124 | sudo iotedge list 125 | ``` 126 | 127 | # Configuring the YoloModule Video Source 128 | 129 | Clone or download a copy of [this repo](https://github.com/toolboc/IntelligentEdgeHOL) and open the `IntelligentEdgeHOL` folder in Visual Studio Code. Next, press `F1` and select `Azure IoT Hub: Select IoT Hub`. Next, choose the IoT Hub you created when provisioning the IoT Edge Runtime on the Jetson Nano Device and follow the prompts to complete the process. 130 | 131 | In VS Code, navigate to the `.env` file and modify the following value: 132 | 133 | `CONTAINER_VIDEO_SOURCE` 134 | 135 | To use a youtube video, provide a Youtube URL, ex: https://www.youtube.com/watch?v=YZkp0qBBmpw 136 | 137 | For an rtsp stream, provide a link to the rtsp stream in the format, rtsp:// 138 | 139 | To use a HoloLens video stream, see this [article](https://blog.kloud.com.au/2016/09/01/streaming-hololens-video-to-your-web-browser/) to enable a user account in the HoloLens Web Portal, once this is configured, provide the url to the HoloLens video streaming endpoint, ex: 140 | https://[USERNAME]:[PASSWORD]@[HOLOLENS_IP]/api/holographic/stream/live_high.mp4?holo=true&pv=true&mic=true&loopback=true 141 | 142 | If you have an attached USB web cam, provide the V4L device path (this can be obtained from the terminal with `ls -ltrh /dev/video*`, ex: /dev/video0 and open the included `deployment.template.json` and look for: 143 | 144 | ``` 145 | { 146 | "PathOnHost": "/dev/tegra_dc_ctrl", 147 | "PathInContainer":"/dev/tegra_dc_ctrl", 148 | "CgroupPermissions":"rwm" 149 | } 150 | ``` 151 | 152 | Then, add the following (including the comma), directly beneath it 153 | 154 | ``` 155 | , 156 | { 157 | "PathOnHost": "/dev/video0", 158 | "PathInContainer":"/dev/video0", 159 | "CgroupPermissions":"rwm" 160 | } 161 | ``` 162 | 163 | 164 | # Deploy the YoloModule to the Jetson Nano device 165 | 166 | Create a deployment for the Jetson Nano device by right-clicking `deployment.template.json` and select `Generate IoT Edge Deployment Manifest`. This will create a file under the config folder named `deployment.arm32v7.json`, right-click that file and select `Create Deployment for Single Device` and select the device you created when provisioning the IoT Edge Runtime on the Jetson Nano Device. 167 | 168 | It may take a few minutes for the module to begin running on the device as it needs to pull an approximately 5.5GB docker image. You can check the progress on the Nvidia Jetson device by monitoring the iotedge agent logs with: 169 | 170 | ``` 171 | sudo docker logs -f edgeAgent 172 | ``` 173 | 174 | Example output: 175 | 176 | ``` 177 | 2019-05-15 01:34:09.314 +00:00 [INF] - Executing command: "Command Group: ( 178 | [Create module YoloModule] 179 | [Start module YoloModule] 180 | )" 181 | 2019-05-15 01:34:09.314 +00:00 [INF] - Executing command: "Create module YoloModule" 182 | 2019-05-15 01:34:09.886 +00:00 [INF] - Executing command: "Start module YoloModule" 183 | 2019-05-15 01:34:10.356 +00:00 [INF] - Plan execution ended for deployment 10 184 | 2019-05-15 01:34:10.506 +00:00 [INF] - Updated reported properties 185 | 2019-05-15 01:34:15.666 +00:00 [INF] - Updated reported properties 186 | ``` 187 | 188 | # Verify the deployment results 189 | 190 | Confirm the module is working as expected by accessing the web server that the YoloModule exposes. 191 | 192 | You can Open this Web Server using the IP Address or Host Name of the Nvidia Jetson Device. 193 | 194 | Example : 195 | 196 | http://jetson-nano-00 197 | 198 | or 199 | 200 | http://`` 201 | 202 | You should see an unaltered video stream depending on the video source you configured. In the next section, we will enable the object detection feature by modifying a value in the associated module twin. 203 | 204 | ![](https://pbs.twimg.com/media/D_ANYjHWwAECM-L.jpg) 205 | 206 | Monitor the YoloModule logs with: 207 | 208 | ``` 209 | sudo docker logs -f YoloModule 210 | ``` 211 | 212 | Example output: 213 | 214 | ``` 215 | toolboc@JetsonNano:~$ sudo docker logs -f YoloModule 216 | [youtube] unPK61Hz3Rw: Downloading webpage 217 | [youtube] unPK61Hz3Rw: Downloading video info webpage 218 | [download] Destination: /app/video.mp4 219 | [download] 100% of 43.10MiB in 00:0093MiB/s ETA 00:00known ETA 220 | Download Complete 221 | =============================================================== 222 | videoCapture::__Run__() 223 | - Stream : False 224 | - useMovieFile : True 225 | Camera frame size : 1280x720 226 | frame size : 1280x720 227 | Frame rate (FPS) : 29 228 | 229 | device_twin_callback() 230 | - status : COMPLETE 231 | - payload : 232 | { 233 | "$version": 4, 234 | "Inference": 1, 235 | "VerboseMode": 0, 236 | "ConfidenceLevel": "0.3", 237 | "VideoSource": "https://www.youtube.com/watch?v=tYcvF8o5GXE" 238 | } 239 | - ConfidenceLevel : 0.3 240 | - Verbose : 0 241 | - Inference : 1 242 | - VideoSource : https://www.youtube.com/watch?v=tYcvF8o5GXE 243 | 244 | ===> YouTube Video Source 245 | Start downloading video 246 | WARNING: Assuming --restrict-filenames since file system encoding cannot encode all characters. Set the LC_ALL environment variable to fix this. 247 | [youtube] tYcvF8o5GXE: Downloading webpage 248 | [youtube] tYcvF8o5GXE: Downloading video info webpage 249 | [download] Destination: /app/video.mp4 250 | [download] 100% of 48.16MiB in 00:0080MiB/s ETA 00:00known ETA 251 | Download Complete 252 | ``` 253 | 254 | # Monitor the GPU utilization stats 255 | 256 | On the Jetson device, you can monitor the GPU utilization by installing `jetson-stats` with: 257 | 258 | ``` 259 | sudo -H pip3 install jetson-stats 260 | ``` 261 | 262 | Once, installed run: 263 | 264 | ``` 265 | sudo jtop 266 | ``` 267 | 268 | # Update the Video Source by modifying the Module Twin 269 | 270 | While in VSCode, select the Azure IoT Hub Devices window, find your IoT Edge device and expand the modules sections until you see the `YoloModule` entry. 271 | 272 | Right click on `YoloModule` and select `Edit Module Twin` 273 | 274 | A new window name `azure-iot-module-twin.json` should open. 275 | 276 | Edit `properties -> desired -> VideoSource` with the URL of another video. 277 | 278 | Right click anywhere in the Editor window, then select `Update Module Twin` 279 | 280 | It may take some time depending on the size of video, but the new video should begin playing in your browser. 281 | 282 | # Controlling/Managing the Module 283 | You can change the following settings via the Module Twin after the container has started running. 284 | 285 | `ConfidenceLevel` : (float) 286 | Confidence Level threshold. The module ignores any inference results below this threshold. 287 | 288 | `Verbose` : (bool) Allows for more verbose output, useful for debugging issues 289 | 290 | `Inference` : (bool) Allows for toggling object detection via Yolo inference 291 | 292 | `VideoSource` : (string) 293 | Source of video stream/capture source 294 | 295 | # Pushing Detected Object Data into Azure Time Series Insights 296 | 297 | [Azure Time Series Insights](https://docs.microsoft.com/azure/time-series-insights/time-series-insights-overview?WT.mc_id=iot-0000-pdecarlo) is built to store, visualize, and query large amounts of time series data, such as that generated by IoT devices. This service can allow us to extract insights that may allow us to build something very interesting. For example, imagine getting an alert when the mail truck is actually at the driveway, counting wildlife species using camera feeds from the National Park Service, or being able to tell that people are in a place that they should not be or counting them over time! 298 | 299 | To begin, navigate to the resource group that contains the IoT Hub that was created in the previous steps. Add a new Time Series Insights environment into the Resource Group and select `S1` tier for deployment. Be sure to place the Time Series Insights instance into the same Geographical region which contains your IoT Hub to minimize latency and egress charges. 300 | 301 | ![](https://hackster.imgix.net/uploads/attachments/939871/image_11Mggcf7p3.png?auto=compress) 302 | 303 | Next, choose a unique name for your Event Source and configure the Event Source to point to the IoT Hub you created in the previous steps. Set the `IoT Hub Access Policy Name` to "iothubowner", be sure to create a new IoT Hub Consumer Group named "tsi", and leave the `TimeStamp Propery Name` empty as shown below: 304 | 305 | ![](https://hackster.imgix.net/uploads/attachments/939872/image_4DsJXUVxvt.png?auto=compress) 306 | 307 | Complete the steps to "Review and Create" your deployment of Time Series Insights. Once the instance has finished deploying, you can navigate to the Time Insights Explorer by viewing the newly deployed Time Series Insights Environment resource, selecting "Overview" and clicking the "Time Series Insights explorer URL". Once you have clicked the link, you may begin working with your detected object data. 308 | 309 | For details on how to explore and query your data in the Azure Time Series Insights explorer, you may consult the [Time Series Insights documentation](https://docs.microsoft.com/azure/time-series-insights/time-series-insights-explorer?WT.mc_id=iot-0000-pdecarlo). 310 | 311 | ![](https://hackster.imgix.net/uploads/attachments/939873/image_JWWcQszXsh.png?auto=compress) 312 | 313 | -------------------------------------------------------------------------------- /config/deployment.arm32v7.json: -------------------------------------------------------------------------------- 1 | { 2 | "modulesContent": { 3 | "$edgeAgent": { 4 | "properties.desired": { 5 | "schemaVersion": "1.0", 6 | "runtime": { 7 | "type": "docker", 8 | "settings": { 9 | "minDockerVersion": "v1.25", 10 | "loggingOptions": "", 11 | "registryCredentials": { 12 | "bootcampfy19acr": { 13 | "username": "$CONTAINER_REGISTRY_USERNAME", 14 | "password": "$CONTAINER_REGISTRY_PASSWORD", 15 | "address": "toolboc" 16 | } 17 | } 18 | } 19 | }, 20 | "systemModules": { 21 | "edgeAgent": { 22 | "type": "docker", 23 | "settings": { 24 | "image": "mcr.microsoft.com/azureiotedge-agent:1.0.8", 25 | "createOptions": "{}" 26 | } 27 | }, 28 | "edgeHub": { 29 | "type": "docker", 30 | "status": "running", 31 | "restartPolicy": "always", 32 | "settings": { 33 | "image": "mcr.microsoft.com/azureiotedge-hub:1.0.8", 34 | "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5671/tcp\":[{\"HostPort\":\"5671\"}],\"8883/tcp\":[{\"HostPort\":\"8883\"}],\"443/tcp\":[{\"HostPort\":\"443\"}]}}}" 35 | } 36 | } 37 | }, 38 | "modules": { 39 | "YoloModule": { 40 | "version": "1.0", 41 | "type": "docker", 42 | "status": "running", 43 | "restartPolicy": "always", 44 | "settings": { 45 | "image": "toolboc/yolomodule:latest-arm32v7", 46 | "createOptions": "{\"Env\":[\"VIDEO_PATH=https://www.youtube.com/watch?v=XJ735krOiPo\",\"VIDEO_WIDTH=0\",\"VIDEO_HEIGHT=0\",\"FONT_SCALE=0.8\"],\"HostConfig\":{\"Devices\":[{\"PathOnHost\":\"/dev/nvhost-ctrl\",\"PathInContainer\":\"/dev/nvhost-ctrl\",\"CgroupPermissions\":\"rwm\"},{\"PathOnHost\":\"/dev/nvhost-ctrl-gpu\",\"PathInContainer\":\"dev/nvhost-ctrl-gpu\",\"CgroupPermissions\":\"rwm\"},{\"PathOnHost\":\"/dev/nvhost-prof-gpu\",\"PathInContainer\":\"dev/nvhost-prof-gpu \",\"CgroupPermissions\":\"rwm\"},{\"PathOnHost\":\"/dev/nvmap\",\"PathInContainer\":\"/dev/nvmap\",\"Cgroup", 47 | "createOptions01": "Permissions\":\"rwm\"},{\"PathOnHost\":\"dev/nvhost-gpu\",\"PathInContainer\":\"dev/nvhost-gpu\",\"CgroupPermissions\":\"rwm\"},{\"PathOnHost\":\"/dev/nvhost-as-gpu\",\"PathInContainer\":\"/dev/nvhost-as-gpu\",\"CgroupPermissions\":\"rwm\"},{\"PathOnHost\":\"/dev/nvhost-vic\",\"PathInContainer\":\"/dev/nvhost-vic\",\"CgroupPermissions\":\"rwm\"},{\"PathOnHost\":\"/dev/tegra_dc_ctrl\",\"PathInContainer\":\"/dev/tegra_dc_ctrl\",\"CgroupPermissions\":\"rwm\"}],\"PortBindings\":{\"80/tcp\":[{\"HostPort\":\"80\"}]}}}" 48 | } 49 | } 50 | } 51 | } 52 | }, 53 | "$edgeHub": { 54 | "properties.desired": { 55 | "schemaVersion": "1.0", 56 | "routes": { 57 | "YoloModuleToIoTHub": "FROM /messages/modules/YoloModule/outputs/* INTO $upstream" 58 | }, 59 | "storeAndForwardConfiguration": { 60 | "timeToLiveSecs": 7200 61 | } 62 | } 63 | }, 64 | "YoloModule": { 65 | "properties.desired": { 66 | "ConfidenceLevel": "0.3", 67 | "VerboseMode": 0, 68 | "Inference": 1, 69 | "VideoSource": "" 70 | } 71 | } 72 | } 73 | } -------------------------------------------------------------------------------- /deployment.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-template": "1.0.0", 3 | "modulesContent": { 4 | "$edgeAgent": { 5 | "properties.desired": { 6 | "schemaVersion": "1.0", 7 | "runtime": { 8 | "type": "docker", 9 | "settings": { 10 | "minDockerVersion": "v1.25", 11 | "loggingOptions": "", 12 | "registryCredentials": { 13 | "bootcampfy19acr": { 14 | "username": "$CONTAINER_REGISTRY_USERNAME", 15 | "password": "$CONTAINER_REGISTRY_PASSWORD", 16 | "address": "$CONTAINER_REGISTRY_URL" 17 | } 18 | } 19 | } 20 | }, 21 | "systemModules": { 22 | "edgeAgent": { 23 | "type": "docker", 24 | "settings": { 25 | "image": "mcr.microsoft.com/azureiotedge-agent:1.0", 26 | "createOptions": {} 27 | } 28 | }, 29 | "edgeHub": { 30 | "type": "docker", 31 | "status": "running", 32 | "restartPolicy": "always", 33 | "settings": { 34 | "image": "mcr.microsoft.com/azureiotedge-hub:1.0", 35 | "createOptions": { 36 | "HostConfig": { 37 | "PortBindings": { 38 | "5671/tcp": [ 39 | { 40 | "HostPort": "5671" 41 | } 42 | ], 43 | "8883/tcp": [ 44 | { 45 | "HostPort": "8883" 46 | } 47 | ], 48 | "443/tcp": [ 49 | { 50 | "HostPort": "443" 51 | } 52 | ] 53 | } 54 | } 55 | } 56 | } 57 | } 58 | }, 59 | "modules": { 60 | "YoloModule": { 61 | "version": "1.0", 62 | "type": "docker", 63 | "status": "running", 64 | "restartPolicy": "always", 65 | "settings": { 66 | "image": "${MODULES.YoloModule}", 67 | "createOptions": { 68 | "Env": [ 69 | "VIDEO_PATH=$CONTAINER_VIDEO_SOURCE", 70 | "VIDEO_WIDTH=0", 71 | "VIDEO_HEIGHT=0", 72 | "FONT_SCALE=0.8" 73 | ], 74 | "HostConfig": { 75 | "Devices": [ 76 | { 77 | "PathOnHost": "/dev/nvhost-ctrl", 78 | "PathInContainer":"/dev/nvhost-ctrl", 79 | "CgroupPermissions":"rwm" 80 | }, 81 | { 82 | "PathOnHost": "/dev/nvhost-ctrl-gpu", 83 | "PathInContainer":"dev/nvhost-ctrl-gpu", 84 | "CgroupPermissions":"rwm" 85 | }, 86 | { 87 | "PathOnHost": "/dev/nvhost-prof-gpu", 88 | "PathInContainer":"dev/nvhost-prof-gpu ", 89 | "CgroupPermissions":"rwm" 90 | }, 91 | { 92 | "PathOnHost": "/dev/nvmap", 93 | "PathInContainer":"/dev/nvmap", 94 | "CgroupPermissions":"rwm" 95 | }, 96 | { 97 | "PathOnHost": "dev/nvhost-gpu", 98 | "PathInContainer":"dev/nvhost-gpu", 99 | "CgroupPermissions":"rwm" 100 | }, 101 | { 102 | "PathOnHost": "/dev/nvhost-as-gpu", 103 | "PathInContainer":"/dev/nvhost-as-gpu", 104 | "CgroupPermissions":"rwm" 105 | }, 106 | { 107 | "PathOnHost": "/dev/nvhost-vic", 108 | "PathInContainer":"/dev/nvhost-vic", 109 | "CgroupPermissions":"rwm" 110 | }, 111 | { 112 | "PathOnHost": "/dev/tegra_dc_ctrl", 113 | "PathInContainer":"/dev/tegra_dc_ctrl", 114 | "CgroupPermissions":"rwm" 115 | } 116 | ], 117 | "PortBindings": { 118 | "80/tcp": [ 119 | { 120 | "HostPort": "80" 121 | } 122 | ] 123 | } 124 | } 125 | } 126 | } 127 | } 128 | } 129 | } 130 | }, 131 | "$edgeHub": { 132 | "properties.desired": { 133 | "schemaVersion": "1.0", 134 | "routes": { 135 | "YoloModuleToIoTHub": "FROM /messages/modules/YoloModule/outputs/* INTO $upstream" 136 | }, 137 | "storeAndForwardConfiguration": { 138 | "timeToLiveSecs": 7200 139 | } 140 | } 141 | }, 142 | "YoloModule": { 143 | "properties.desired": { 144 | "ConfidenceLevel": "0.3", 145 | "VerboseMode": 0, 146 | "Inference": 1, 147 | "VideoSource": "" 148 | } 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /docker/jetson-nano-l4t-cuda-cudnn-opencv-darknet/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM toolboc/jetson-nano-l4t-cuda-cudnn-opencv 2 | 3 | RUN apt update && apt install -y libcanberra-gtk-module && \ 4 | rm -rf /var/lib/apt/lists/* 5 | 6 | #GET Darknet sources 7 | WORKDIR /usr/local/src 8 | RUN git clone https://github.com/AlexeyAB/darknet.git && \ 9 | cd darknet && \ 10 | sed -i 's/GPU=0/GPU=1/g' Makefile && \ 11 | sed -i 's/CUDNN=0/CUDNN=1/g' Makefile && \ 12 | sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/g' Makefile && \ 13 | sed -i 's/OPENCV=0/OPENCV=1/g' Makefile && \ 14 | sed -i 's/LIBSO=0/LIBSO=1/g' Makefile && \ 15 | sed -i '/arch=compute_53/s/^#.//g' Makefile && \ 16 | make 17 | 18 | RUN cd darknet && LD_LIBRARY_PATH=./:$LD_LIBRARY_PATH 19 | -------------------------------------------------------------------------------- /docker/jetson-nano-l4t-cuda-cudnn-opencv/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM toolboc/jetson-nano-l4t-cuda-cudnn 2 | 3 | #Required for libjasper-dev 4 | RUN echo "deb http://ports.ubuntu.com/ubuntu-ports/ xenial-security main restricted" | sudo tee -a /etc/apt/sources.list 5 | 6 | #INSTALL OPENCV dependencies 7 | RUN apt update && apt purge *libopencv* && apt install -y build-essential cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev \ 8 | libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev \ 9 | python2.7-dev python3.6-dev python-dev python-numpy python3-numpy \ 10 | libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev \ 11 | libv4l-dev v4l-utils qv4l2 v4l2ucp \ 12 | curl unzip && \ 13 | rm -rf /var/lib/apt/lists/* 14 | 15 | #GET OPENCV sources 16 | WORKDIR /usr/local/src 17 | RUN curl -L https://github.com/opencv/opencv/archive/4.1.0.zip -o opencv-4.1.0.zip && \ 18 | curl -L https://github.com/opencv/opencv_contrib/archive/4.1.0.zip -o opencv_contrib-4.1.0.zip && \ 19 | unzip opencv-4.1.0.zip && \ 20 | unzip opencv_contrib-4.1.0.zip && \ 21 | rm -rf opencv*.zip 22 | 23 | #INSTALL OPENCV 24 | RUN cd opencv-4.1.0/ && mkdir release && cd release/ && \ 25 | cmake -D OPENCV_GENERATE_PKGCONFIG=ON -D OPENCV_PC_FILE_NAME=opencv.pc -D WITH_CUDA=ON -D CUDA_ARCH_BIN="5.3" -D CUDA_ARCH_PTX="" -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-4.1.0/modules -D WITH_GSTREAMER=ON -D WITH_LIBV4L=ON -D BUILD_opencv_python2=ON -D BUILD_opencv_python3=ON -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_EXAMPLES=OFF -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. && \ 26 | make -j3 && \ 27 | make install && \ 28 | cp unix-install/opencv.pc /usr/local/lib/pkgconfig && \ 29 | rm -rf /usr/local/src/opencv-4.1.0 30 | 31 | RUN ldconfig 32 | -------------------------------------------------------------------------------- /docker/jetson-nano-l4t-cuda-cudnn/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM toolboc/jetson-nano-l4t-cuda 2 | 3 | # NVIDIA CUDA Deep Neural Network library (cuDNN) 4 | 5 | ENV CUDNN_VERSION 7.3.1.28 6 | 7 | ENV CUDNN_PKG_VERSION=${CUDA_VERSION}-1 8 | 9 | LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}" 10 | 11 | ARG libcudnn7_URL=https://onedrive.live.com/download?cid=54AD8562A32D8752&resid=54AD8562A32D8752%21376196&authkey=ADTDdL0bhMWq4vM 12 | ARG libcudnn7_dev_URL=https://onedrive.live.com/download?cid=54AD8562A32D8752&resid=54AD8562A32D8752%21376197&authkey=APizXm-di7JPR0Y 13 | ARG libcudnn7_doc_URL=https://onedrive.live.com/download?cid=54AD8562A32D8752&resid=54AD8562A32D8752%21376195&authkey=ADqH53K9oRnkO-8 14 | 15 | RUN curl -sL $libcudnn7_URL -o libcudnn7_$CUDNN_VERSION-1+cuda10.0_arm64.deb && \ 16 | echo "92867c0a495f84ec11d108f84b776620 libcudnn7_$CUDNN_VERSION-1+cuda10.0_arm64.deb" | md5sum -c - && \ 17 | dpkg -i libcudnn7_$CUDNN_VERSION-1+cuda10.0_arm64.deb && \ 18 | rm libcudnn7_$CUDNN_VERSION-1+cuda10.0_arm64.deb 19 | 20 | RUN curl -sL $libcudnn7_dev_URL -o libcudnn7-dev_$CUDNN_VERSION-1+cuda10.0_arm64.deb && \ 21 | echo "dd0fbfa225b2374b946febc98e2cdec4 libcudnn7-dev_$CUDNN_VERSION-1+cuda10.0_arm64.deb" | md5sum -c - && \ 22 | dpkg -i libcudnn7-dev_$CUDNN_VERSION-1+cuda10.0_arm64.deb && \ 23 | rm libcudnn7-dev_$CUDNN_VERSION-1+cuda10.0_arm64.deb 24 | 25 | RUN curl -sL $libcudnn7_doc_URL -o libcudnn7-doc_$CUDNN_VERSION-1+cuda10.0_arm64.deb && \ 26 | echo "9478c16ceeaaca937d4d26b982e48bd1 libcudnn7-doc_$CUDNN_VERSION-1+cuda10.0_arm64.deb" | md5sum -c - && \ 27 | dpkg -i libcudnn7-doc_$CUDNN_VERSION-1+cuda10.0_arm64.deb && \ 28 | rm libcudnn7-doc_$CUDNN_VERSION-1+cuda10.0_arm64.deb -------------------------------------------------------------------------------- /docker/jetson-nano-l4t-cuda/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM toolboc/jetson-nano-l4t 2 | 3 | #INSTALL CUDA Toolkit for L4T 4 | ARG URL=https://onedrive.live.com/download?cid=54AD8562A32D8752&resid=54AD8562A32D8752%21376191&authkey=APwtvHgdqlgnJzo 5 | ARG CUDA_TOOLKIT_PKG="cuda-repo-l4t-10-0-local-10.0.166_1.0-1_arm64.deb" 6 | 7 | RUN apt-get update && \ 8 | apt-get install -y --no-install-recommends curl && \ 9 | curl -sL ${URL} -o ${CUDA_TOOLKIT_PKG} && \ 10 | echo "5e3eedc3707305f9022d41754d6becde ${CUDA_TOOLKIT_PKG}" | md5sum -c - && \ 11 | dpkg --force-all -i ${CUDA_TOOLKIT_PKG} && \ 12 | rm ${CUDA_TOOLKIT_PKG} && \ 13 | apt-key add var/cuda-repo-*-local*/*.pub && \ 14 | apt-get update && \ 15 | apt-get install -y --allow-downgrades cuda-toolkit-10-0 libgomp1 libfreeimage-dev libopenmpi-dev openmpi-bin && \ 16 | dpkg --purge cuda-repo-l4t-10-0-local-10.0.166 && \ 17 | apt-get clean && \ 18 | rm -rf /var/lib/apt/lists/* 19 | 20 | ENV CUDA_HOME=/usr/local/cuda 21 | ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64 22 | ENV PATH=$PATH:$CUDA_HOME/bin 23 | -------------------------------------------------------------------------------- /docker/jetson-nano-l4t/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM balenalib/jetson-tx2-ubuntu:bionic 2 | 3 | ARG URL=https://onedrive.live.com/download?cid=54AD8562A32D8752&resid=54AD8562A32D8752%21376194&authkey=ADUfVNPnEHviFoU 4 | ARG DRIVER_PACK=Jetson-210_Linux_R32.1.0_aarch64.tbz2 5 | 6 | RUN apt-get update && apt-get install -y --no-install-recommends \ 7 | bzip2 \ 8 | ca-certificates \ 9 | curl \ 10 | lbzip2 \ 11 | sudo \ 12 | && \ 13 | curl -sSL $URL -o ${DRIVER_PACK} && \ 14 | echo "9138c7dd844eb290a20b31446b757e1781080f63 *./${DRIVER_PACK}" | sha1sum -c --strict - && \ 15 | tar -xpj --overwrite -f ./${DRIVER_PACK} && \ 16 | sed -i '/.*tar -I lbzip2 -xpmf ${LDK_NV_TEGRA_DIR}\/config\.tbz2.*/c\tar -I lbzip2 -xpm --overwrite -f ${LDK_NV_TEGRA_DIR}\/config.tbz2' ./Linux_for_Tegra/apply_binaries.sh && \ 17 | ./Linux_for_Tegra/apply_binaries.sh -r / && \ 18 | rm -rf ./Linux_for_Tegra && \ 19 | rm ./${DRIVER_PACK} \ 20 | && \ 21 | apt-get purge --autoremove -y bzip2 curl lbzip2 && \ 22 | apt-get clean && \ 23 | rm -rf /var/lib/apt/lists/* 24 | 25 | ENV LD_LIBRARY_PATH=/usr/lib/aarch64-linux-gnu/tegra:/usr/lib/aarch64-linux-gnu/tegra-egl:${LD_LIBRARY_PATH} 26 | 27 | RUN ln -s /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so.32.1.0 /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so && \ 28 | ln -s /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so.32.1.0 /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so.1 && \ 29 | ln -sf /usr/lib/aarch64-linux-gnu/tegra/libGL.so /usr/lib/aarch64-linux-gnu/libGL.so && \ 30 | ln -s /usr/lib/aarch64-linux-gnu/libcuda.so /usr/lib/aarch64-linux-gnu/libcuda.so.1 && \ 31 | ln -sf /usr/lib/aarch64-linux-gnu/tegra-egl/libEGL.so /usr/lib/aarch64-linux-gnu/libEGL.so 32 | -------------------------------------------------------------------------------- /modules/YoloModule/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | develop-eggs/ 12 | dist/ 13 | downloads/ 14 | eggs/ 15 | .eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | .hypothesis/ 47 | .pytest_cache/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | db.sqlite3 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # Jupyter Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # Environments 84 | .env 85 | .venv 86 | env/ 87 | venv/ 88 | ENV/ 89 | env.bak/ 90 | venv.bak/ 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | .spyproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | # mkdocs documentation 100 | /site 101 | 102 | # mypy 103 | .mypy_cache/ 104 | -------------------------------------------------------------------------------- /modules/YoloModule/Dockerfile.arm64v8: -------------------------------------------------------------------------------- 1 | FROM balenalib/jetson-tx2-ubuntu:bionic as iot-sdk-python-builder 2 | 3 | # Update image 4 | SHELL ["/bin/bash", "-c"] 5 | 6 | RUN apt-key adv --fetch-key https://repo.download.nvidia.com/jetson/jetson-ota-public.asc 7 | 8 | RUN apt-get update && apt-get install -y ca-certificates cmake build-essential curl libcurl4-openssl-dev \ 9 | libssl-dev uuid-dev apt-utils python python-pip python-virtualenv python3 python3-pip python3-virtualenv \ 10 | libboost-python-dev pkg-config valgrind sudo git software-properties-common && \ 11 | rm -rf /var/lib/apt/lists/* 12 | 13 | WORKDIR /usr/sdk 14 | 15 | RUN python -m virtualenv --python=python3 env3 16 | RUN source env3/bin/activate && pip install --upgrade pip && pip install -U setuptools wheel 17 | 18 | RUN git clone --recursive --branch release_2019_01_03 --depth=1 https://github.com/Azure/azure-iot-sdk-python.git src 19 | 20 | # Build for Python 3 21 | RUN add-apt-repository ppa:deadsnakes/ppa 22 | RUN source env3/bin/activate && ./src/build_all/linux/setup.sh --python-version 3.6 23 | RUN source env3/bin/activate && ./src/build_all/linux/release.sh --build-python 3.6 24 | 25 | # Build for Python 2 26 | #RUN pip install --upgrade pip==10.0.1 && python -m pip install -U setuptools wheel 27 | #RUN ./src/build_all/linux/setup.sh 28 | #RUN ./src/build_all/linux/release.sh 29 | 30 | FROM toolboc/jetson-nano-l4t-cuda-cudnn-opencv-darknet 31 | 32 | WORKDIR /app 33 | 34 | RUN apt-get update && \ 35 | DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends libcurl4-openssl-dev python3-pip libboost-python-dev libgtk2.0-dev python3-setuptools python3-numpy python3-opencv python-opencv && \ 36 | rm -rf /var/lib/apt/lists/* 37 | 38 | COPY --from=iot-sdk-python-builder /usr/sdk/src/device/doc/package-readme.md /src/device/doc/package-readme.md 39 | COPY --from=iot-sdk-python-builder /usr/sdk/src/build_all/linux/release_device_client /src/build_all/linux/release_device_client 40 | RUN cd /src/build_all/linux/release_device_client && python3 setup.py install 41 | COPY --from=iot-sdk-python-builder /usr/sdk/src/device/samples/iothub_client.so /app/iothub_client.so 42 | 43 | RUN cp /usr/local/src/darknet/libdarknet.so /app/libdarknet.so 44 | 45 | COPY /build/requirements.txt ./ 46 | RUN pip3 install --upgrade pip 47 | RUN pip3 install --no-cache-dir -r requirements.txt 48 | RUN pip3 install tornado==4.5.3 trollius 49 | 50 | RUN apt-get update && \ 51 | apt-get install -y --no-install-recommends zip pandoc && \ 52 | rm -rf /var/lib/apt/lists/* 53 | 54 | RUN git clone --depth=1 https://github.com/ytdl-org/youtube-dl.git && \ 55 | cd youtube-dl && \ 56 | make && \ 57 | make install 58 | 59 | ADD /app/ . 60 | 61 | # Expose the port 62 | EXPOSE 80 63 | 64 | ENTRYPOINT [ "python3", "-u", "./main.py" ] 65 | -------------------------------------------------------------------------------- /modules/YoloModule/app/AppState.py: -------------------------------------------------------------------------------- 1 | def init(hubManager): 2 | global HubManager 3 | HubManager = hubManager -------------------------------------------------------------------------------- /modules/YoloModule/app/ImageServer.py: -------------------------------------------------------------------------------- 1 | # Base on work from https://github.com/Bronkoknorb/PyImageStream 2 | import trollius as asyncio 3 | import tornado.ioloop 4 | import tornado.web 5 | import tornado.websocket 6 | import threading 7 | import base64 8 | import os 9 | 10 | class ImageStreamHandler(tornado.websocket.WebSocketHandler): 11 | 12 | def initialize(self, videoCapture): 13 | self.clients = [] 14 | self.videoCapture = videoCapture 15 | 16 | def check_origin(self, origin): 17 | return True 18 | 19 | def open(self): 20 | self.clients.append(self) 21 | print("Image Server Connection::opened") 22 | 23 | def on_message(self, msg): 24 | if msg == 'next': 25 | frame = self.videoCapture.get_display_frame() 26 | if frame != None: 27 | encoded = base64.b64encode(frame) 28 | self.write_message(encoded, binary=False) 29 | 30 | def on_close(self): 31 | self.clients.remove(self) 32 | print("Image Server Connection::closed") 33 | 34 | class ImageServer(threading.Thread): 35 | 36 | def __init__(self, port, videoCapture): 37 | threading.Thread.__init__(self) 38 | self.setDaemon(True) 39 | self.port = port 40 | self.videoCapture = videoCapture 41 | 42 | def run(self): 43 | print ('ImageServer::run() : Started Image Server') 44 | try: 45 | loop = asyncio.new_event_loop() 46 | asyncio.set_event_loop( loop ) 47 | 48 | indexPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates') 49 | app = tornado.web.Application([ 50 | (r"/stream", ImageStreamHandler, {'videoCapture': self.videoCapture}), 51 | (r"/(.*)", tornado.web.StaticFileHandler, {'path': indexPath, 'default_filename': 'index.html'}) 52 | ]) 53 | app.listen(self.port) 54 | print ('ImageServer::Started.') 55 | 56 | tornado.ioloop.IOLoop.instance().start() 57 | except Exception as e: 58 | print('ImageServer::exited run loop. Exception - '+ str(e)) 59 | 60 | def close(self): 61 | print ('ImageServer::close()') -------------------------------------------------------------------------------- /modules/YoloModule/app/VideoCapture.py: -------------------------------------------------------------------------------- 1 | #To make python 2 and python 3 compatible code 2 | from __future__ import division 3 | from __future__ import absolute_import 4 | 5 | import cv2 6 | import numpy as np 7 | import requests 8 | import time 9 | import json 10 | import os 11 | import signal 12 | 13 | import ImageServer 14 | from ImageServer import ImageServer 15 | import VideoStream 16 | from VideoStream import VideoStream 17 | 18 | import YoloInference 19 | from YoloInference import YoloInference 20 | 21 | class VideoCapture(object): 22 | 23 | def __init__( 24 | self, 25 | videoPath = "", 26 | verbose = True, 27 | videoW = 0, 28 | videoH = 0, 29 | fontScale = 1.0, 30 | inference = True, 31 | confidenceLevel = 0.5): 32 | 33 | self.videoPath = videoPath 34 | self.verbose = verbose 35 | self.videoW = videoW 36 | self.videoH = videoH 37 | self.inference = inference 38 | self.confidenceLevel = confidenceLevel 39 | self.useStream = False 40 | self.useMovieFile = False 41 | self.frameCount = 0 42 | self.vStream = None 43 | self.vCapture = None 44 | self.displayFrame = None 45 | self.fontScale = float(fontScale) 46 | self.captureInProgress = False 47 | 48 | print("VideoCapture::__init__()") 49 | print("OpenCV Version : %s" % (cv2.__version__)) 50 | print("===============================================================") 51 | print("Initialising Video Capture with the following parameters: ") 52 | print(" - Video path : " + self.videoPath) 53 | print(" - Video width : " + str(self.videoW)) 54 | print(" - Video height : " + str(self.videoH)) 55 | print(" - Font Scale : " + str(self.fontScale)) 56 | print(" - Inference? : " + str(self.inference)) 57 | print(" - ConficenceLevel : " + str(self.confidenceLevel)) 58 | print("") 59 | 60 | self.imageServer = ImageServer(80, self) 61 | self.imageServer.start() 62 | 63 | self.yoloInference = YoloInference(self.fontScale) 64 | 65 | def __IsCaptureDev(self, videoPath): 66 | try: 67 | return '/dev/video' in videoPath.lower() 68 | except ValueError: 69 | return False 70 | 71 | def __IsRtsp(self, videoPath): 72 | try: 73 | if 'rtsp:' in videoPath.lower() or '/api/holographic/stream' in videoPath.lower(): 74 | return True 75 | except ValueError: 76 | return False 77 | 78 | def __IsYoutube(self, videoPath): 79 | try: 80 | if 'www.youtube.com' in videoPath.lower() or 'youtu.be' in videoPath.lower(): 81 | return True 82 | else: 83 | return False 84 | except ValueError: 85 | return False 86 | 87 | def __enter__(self): 88 | 89 | if self.verbose: 90 | print("videoCapture::__enter__()") 91 | 92 | self.setVideoSource(self.videoPath) 93 | 94 | return self 95 | 96 | def setVideoSource(self, newVideoPath): 97 | 98 | if self.captureInProgress: 99 | self.captureInProgress = False 100 | time.sleep(1.0) 101 | if self.vCapture: 102 | self.vCapture.release() 103 | self.vCapture = None 104 | elif self.vStream: 105 | self.vStream.stop() 106 | self.vStream = None 107 | 108 | if self.__IsRtsp(newVideoPath): 109 | print("\r\n===> RTSP Video Source") 110 | 111 | self.useStream = True 112 | self.useMovieFile = False 113 | self.videoPath = newVideoPath 114 | 115 | if self.vStream: 116 | self.vStream.start() 117 | self.vStream = None 118 | 119 | if self.vCapture: 120 | self.vCapture.release() 121 | self.vCapture = None 122 | 123 | self.vStream = VideoStream(newVideoPath).start() 124 | # Needed to load at least one frame into the VideoStream class 125 | time.sleep(1.0) 126 | self.captureInProgress = True 127 | 128 | elif self.__IsYoutube(newVideoPath): 129 | print("\r\n===> YouTube Video Source") 130 | self.useStream = False 131 | self.useMovieFile = True 132 | # This is video file 133 | self.downloadVideo(newVideoPath) 134 | self.videoPath = newVideoPath 135 | if self.vCapture.isOpened(): 136 | self.captureInProgress = True 137 | else: 138 | print("===========================\r\nWARNING : Failed to Open Video Source\r\n===========================\r\n") 139 | 140 | elif self.__IsCaptureDev(newVideoPath): 141 | print("===> Webcam Video Source") 142 | if self.vStream: 143 | self.vStream.start() 144 | self.vStream = None 145 | 146 | if self.vCapture: 147 | self.vCapture.release() 148 | self.vCapture = None 149 | 150 | self.videoPath = newVideoPath 151 | self.useMovieFile = False 152 | self.useStream = False 153 | self.vCapture = cv2.VideoCapture(newVideoPath) 154 | if self.vCapture.isOpened(): 155 | self.captureInProgress = True 156 | else: 157 | print("===========================\r\nWARNING : Failed to Open Video Source\r\n===========================\r\n") 158 | else: 159 | print("===========================\r\nWARNING : No Video Source\r\n===========================\r\n") 160 | self.useStream = False 161 | self.useYouTube = False 162 | self.vCapture = None 163 | self.vStream = None 164 | return self 165 | 166 | def downloadVideo(self, videoUrl): 167 | if self.captureInProgress: 168 | bRestartCapture = True 169 | time.sleep(1.0) 170 | if self.vCapture: 171 | print("Relase vCapture") 172 | self.vCapture.release() 173 | self.vCapture = None 174 | else: 175 | bRestartCapture = False 176 | 177 | if os.path.isfile('/app/video.mp4'): 178 | os.remove("/app/video.mp4") 179 | 180 | print("Start downloading video") 181 | os.system("youtube-dl -o /app/video.mp4 -f mp4 " + videoUrl) 182 | print("Download Complete") 183 | self.vCapture = cv2.VideoCapture("/app/video.mp4") 184 | time.sleep(1.0) 185 | self.frameCount = int(self.vCapture.get(cv2.CAP_PROP_FRAME_COUNT)) 186 | 187 | if bRestartCapture: 188 | self.captureInProgress = True 189 | 190 | def get_display_frame(self): 191 | return self.displayFrame 192 | 193 | def videoStreamReadTimeoutHandler(signum, frame): 194 | raise Exception("VideoStream Read Timeout") 195 | 196 | def start(self): 197 | while True: 198 | if self.captureInProgress: 199 | self.__Run__() 200 | 201 | if not self.captureInProgress: 202 | time.sleep(1.0) 203 | 204 | def __Run__(self): 205 | 206 | print("===============================================================") 207 | print("videoCapture::__Run__()") 208 | print(" - Stream : " + str(self.useStream)) 209 | print(" - useMovieFile : " + str(self.useMovieFile)) 210 | 211 | cameraH = 0 212 | cameraW = 0 213 | frameH = 0 214 | frameW = 0 215 | 216 | if self.useStream and self.vStream: 217 | cameraH = int(self.vStream.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) 218 | cameraW = int(self.vStream.stream.get(cv2.CAP_PROP_FRAME_WIDTH)) 219 | elif self.useStream == False and self.vCapture: 220 | cameraH = int(self.vCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) 221 | cameraW = int(self.vCapture.get(cv2.CAP_PROP_FRAME_WIDTH)) 222 | else: 223 | print("Error : No Video Source") 224 | return 225 | 226 | if self.videoW != 0 and self.videoH != 0 and self.videoH != cameraH and self.videoW != cameraW: 227 | needResizeFrame = True 228 | frameH = self.videoH 229 | frameW = self.videoW 230 | else: 231 | needResizeFrame = False 232 | frameH = cameraH 233 | frameW = cameraW 234 | 235 | if needResizeFrame: 236 | print("Original frame size : " + str(cameraW) + "x" + str(cameraH)) 237 | print(" New frame size : " + str(frameW) + "x" + str(frameH)) 238 | print(" Resize : " + str(needResizeFrame)) 239 | else: 240 | print("Camera frame size : " + str(cameraW) + "x" + str(cameraH)) 241 | print(" frame size : " + str(frameW) + "x" + str(frameH)) 242 | 243 | # Check camera's FPS 244 | if self.useStream: 245 | cameraFPS = int(self.vStream.stream.get(cv2.CAP_PROP_FPS)) 246 | else: 247 | cameraFPS = int(self.vCapture.get(cv2.CAP_PROP_FPS)) 248 | 249 | if cameraFPS == 0: 250 | print("Error : Could not get FPS") 251 | raise Exception("Unable to acquire FPS for Video Source") 252 | return 253 | 254 | print("Frame rate (FPS) : " + str(cameraFPS)) 255 | 256 | currentFPS = cameraFPS 257 | perFrameTimeInMs = 1000 / cameraFPS 258 | 259 | signal.signal(signal.SIGALRM, self.videoStreamReadTimeoutHandler) 260 | 261 | while True: 262 | 263 | # Get current time before we capture a frame 264 | tFrameStart = time.time() 265 | 266 | if not self.captureInProgress: 267 | break 268 | 269 | if self.useMovieFile: 270 | currentFrame = int(self.vCapture.get(cv2.CAP_PROP_POS_FRAMES)) 271 | if currentFrame >= self.frameCount: 272 | self.vCapture.set(cv2.CAP_PROP_POS_FRAMES, 0) 273 | 274 | try: 275 | # Read a frame 276 | if self.useStream: 277 | # Timeout after 10s 278 | signal.alarm(10) 279 | frame = self.vStream.read() 280 | signal.alarm(0) 281 | else: 282 | frame = self.vCapture.read()[1] 283 | except Exception as e: 284 | print("ERROR : Exception during capturing") 285 | raise(e) 286 | 287 | # Resize frame if flagged 288 | if needResizeFrame: 289 | frame = cv2.resize(frame, (self.videoW, self.videoH)) 290 | 291 | # Run Object Detection 292 | if self.inference: 293 | self.yoloInference.runInference(frame, frameW, frameH, self.confidenceLevel) 294 | 295 | # Calculate FPS 296 | timeElapsedInMs = (time.time() - tFrameStart) * 1000 297 | currentFPS = 1000.0 / timeElapsedInMs 298 | 299 | if (currentFPS > cameraFPS): 300 | # Cannot go faster than Camera's FPS 301 | currentFPS = cameraFPS 302 | 303 | # Add FPS Text to the frame 304 | cv2.putText( frame, "FPS " + str(round(currentFPS, 1)), (10, int(30 * self.fontScale)), cv2.FONT_HERSHEY_SIMPLEX, self.fontScale, (0,0,255), 2) 305 | 306 | self.displayFrame = cv2.imencode( '.jpg', frame )[1].tobytes() 307 | 308 | timeElapsedInMs = (time.time() - tFrameStart) * 1000 309 | 310 | if (1000 / cameraFPS) > timeElapsedInMs: 311 | # This is faster than image source (e.g. camera) can feed. 312 | waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs 313 | #if self.verbose: 314 | #print(" Wait time between frames :" + str(int(waitTimeBetweenFrames))) 315 | time.sleep(waitTimeBetweenFrames/1000.0) 316 | 317 | def __exit__(self, exception_type, exception_value, traceback): 318 | 319 | if self.vCapture: 320 | self.vCapture.release() 321 | 322 | self.imageServer.close() 323 | cv2.destroyAllWindows() 324 | -------------------------------------------------------------------------------- /modules/YoloModule/app/VideoStream.py: -------------------------------------------------------------------------------- 1 | #To make python 2 and python 3 compatible code 2 | from __future__ import absolute_import 3 | 4 | from threading import Thread 5 | import sys 6 | if sys.version_info[0] < 3:#e.g python version <3 7 | import cv2 8 | else: 9 | import cv2 10 | # from cv2 import cv2 11 | # pylint: disable=E1101 12 | # pylint: disable=E0401 13 | # Disabling linting that is not supported by Pylint for C extensions such as OpenCV. See issue https://github.com/PyCQA/pylint/issues/1955 14 | 15 | 16 | # import the Queue class from Python 3 17 | if sys.version_info >= (3, 0): 18 | from queue import Queue 19 | # otherwise, import the Queue class for Python 2.7 20 | else: 21 | from Queue import Queue 22 | 23 | #This class reads all the video frames in a separate thread and always has the keeps only the latest frame in its queue to be grabbed by another thread 24 | class VideoStream(object): 25 | def __init__(self, path, queueSize=3): 26 | print("===============================================================\r\nVideoStream::__init__()") 27 | self.stream = cv2.VideoCapture(path) 28 | self.stopped = False 29 | self.Q = Queue(maxsize=queueSize) 30 | 31 | def start(self): 32 | # start a thread to read frames from the video stream 33 | t = Thread(target=self.update, args=()) 34 | t.daemon = True 35 | t.start() 36 | return self 37 | 38 | def update(self): 39 | try: 40 | while True: 41 | if self.stopped: 42 | return 43 | 44 | if not self.Q.full(): 45 | (grabbed, frame) = self.stream.read() 46 | 47 | # if the `grabbed` boolean is `False`, then we have 48 | # reached the end of the video file 49 | if not grabbed: 50 | self.stop() 51 | return 52 | 53 | self.Q.put(frame) 54 | 55 | #Clean the queue to keep only the latest frame 56 | while self.Q.qsize() > 1: 57 | self.Q.get() 58 | except Exception as e: 59 | print("got error: "+str(e)) 60 | 61 | def read(self): 62 | return self.Q.get() 63 | 64 | def more(self): 65 | return self.Q.qsize() > 0 66 | 67 | def stop(self): 68 | self.stopped = True 69 | 70 | def __exit__(self, exception_type, exception_value, traceback): 71 | self.stream.release() -------------------------------------------------------------------------------- /modules/YoloModule/app/YoloInference.py: -------------------------------------------------------------------------------- 1 | #To make python 2 and python 3 compatible code 2 | from __future__ import division 3 | from __future__ import absolute_import 4 | 5 | from darknet import darknet 6 | 7 | import AppState 8 | 9 | import iothub_client 10 | # pylint: disable=E0611 11 | # Disabling linting that is not supported by Pylint for C extensions such as iothub_client. See issue https://github.com/PyCQA/pylint/issues/1955 12 | from iothub_client import (IoTHubMessage) 13 | 14 | import cv2 15 | #import cv2.cv as cv 16 | import numpy as np 17 | import time 18 | import os 19 | import json 20 | from datetime import datetime 21 | 22 | yolocfg = r'yolo/yolov3-tiny.cfg' 23 | yoloweight = r'yolo/yolov3-tiny.weights' 24 | classesFile = r'yolo/coco.names' 25 | dataFile = r'yolo/coco.data' 26 | 27 | encoding = 'utf-8' 28 | 29 | 30 | class YoloInference(object): 31 | 32 | def __init__( 33 | self, 34 | fontScale = 1.0): 35 | 36 | print("YoloInference::__init__()") 37 | print("===============================================================") 38 | print("Initialising Yolo Inference with the following parameters: ") 39 | print("") 40 | 41 | self.classLabels = None 42 | self.colors = None 43 | self.nmsThreshold = 0.35 44 | self.fontScale = float(fontScale) 45 | self.fontThickness = 2 46 | self.net = None 47 | self.rgb = True 48 | self.verbose = False 49 | self.lastMessageSentTime = datetime.now() 50 | 51 | # Read class names from text file 52 | print(" - Setting Classes") 53 | with open(classesFile, 'r') as f: 54 | self.classLabels = [line.strip() for line in f.readlines()] 55 | 56 | # Generate colors for different classes 57 | print(" - Setting Colors") 58 | self.colors = np.random.uniform(0, 255, size=(len(self.classLabels), 3)) 59 | 60 | # Read pre-trained model and config file 61 | print(" - Loading Model and Config") 62 | darknet.performDetect( configPath = yolocfg, weightPath = yoloweight, metaPath= dataFile, initOnly= True ) 63 | 64 | def __get_output_layers(self, net): 65 | layerNames = net.getLayerNames() 66 | output_layer = [layerNames[i[0] - 1] for i in net.getUnconnectedOutLayers()] 67 | return output_layer 68 | 69 | def __draw_rect(self, image, class_id, confidence, x, y, w, h): 70 | 71 | if self.verbose: 72 | print("draw_rect x :" + str(x)) 73 | print("draw_rect x :" + str(y)) 74 | print("draw_rect w :" + str(w)) 75 | print("draw_rect h :" + str(h)) 76 | 77 | label = '%.2f' % confidence 78 | label = '%s:%s' % (class_id, label) 79 | color = self.colors[self.classLabels.index(class_id)] 80 | 81 | labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.fontScale, self.fontThickness) 82 | 83 | cv2.rectangle(image, (x, y), (w, h), color, self.fontThickness) 84 | 85 | # draw text inside the bounding box 86 | cv2.putText(image, label, (x + self.fontThickness + 2, y + labelSize[1] + baseLine + self.fontThickness + 2), cv2.FONT_HERSHEY_SIMPLEX, self.fontScale, color, self.fontThickness) 87 | 88 | def runInference(self, frame, frameW, frameH, confidenceLevel): 89 | try: 90 | 91 | detections = darknet.detect(darknet.netMain, darknet.metaMain, frame, confidenceLevel) 92 | 93 | countsByClassId = {}; 94 | 95 | for detection in detections: 96 | 97 | classLabel = detection[0] 98 | classID = str(detection[0], encoding) 99 | confidence = detection[1] 100 | 101 | if confidence > confidenceLevel: 102 | 103 | if classID not in countsByClassId: 104 | countsByClassId[classID] = 1 105 | else: 106 | countsByClassId[classID] = countsByClassId[classID] + 1 107 | 108 | bounds = detection[2] 109 | 110 | xEntent = int(bounds[2]) 111 | yExtent = int(bounds[3]) 112 | # Coordinates are around the center 113 | xCoord = int(bounds[0] - bounds[2]/2) 114 | yCoord = int(bounds[1] - bounds[3]/2) 115 | 116 | self.__draw_rect(frame, classID, confidence, xCoord, yCoord, xCoord + xEntent, yCoord + yExtent) 117 | 118 | if len(countsByClassId) > 0 and (datetime.now() - self.lastMessageSentTime).total_seconds() >= 1 : 119 | strMessage = json.dumps(countsByClassId) 120 | message = IoTHubMessage(strMessage) 121 | print(strMessage) 122 | AppState.HubManager.send_event_to_output("output1", message, 0) 123 | self.lastMessageSentTime=datetime.now() 124 | 125 | except Exception as e: 126 | print("Exception during AI Inference") 127 | print(e) -------------------------------------------------------------------------------- /modules/YoloModule/app/darknet/darknet.py: -------------------------------------------------------------------------------- 1 | #!python3 2 | """ 3 | Python 3 wrapper for identifying objects in images 4 | 5 | Requires DLL compilation 6 | 7 | Both the GPU and no-GPU version should be compiled; the no-GPU version should be renamed "yolo_cpp_dll_nogpu.dll". 8 | 9 | On a GPU system, you can force CPU evaluation by any of: 10 | 11 | - Set global variable DARKNET_FORCE_CPU to True 12 | - Set environment variable CUDA_VISIBLE_DEVICES to -1 13 | - Set environment variable "FORCE_CPU" to "true" 14 | 15 | 16 | To use, either run performDetect() after import, or modify the end of this file. 17 | 18 | See the docstring of performDetect() for parameters. 19 | 20 | Directly viewing or returning bounding-boxed images requires scikit-image to be installed (`pip install scikit-image`) 21 | 22 | 23 | Original *nix 2.7: https://github.com/pjreddie/darknet/blob/0f110834f4e18b30d5f101bf8f1724c34b7b83db/python/darknet.py 24 | Windows Python 2.7 version: https://github.com/AlexeyAB/darknet/blob/fc496d52bf22a0bb257300d3c79be9cd80e722cb/build/darknet/x64/darknet.py 25 | 26 | @author: Philip Kahn 27 | @date: 20180503 28 | """ 29 | #pylint: disable=R, W0401, W0614, W0703 30 | from ctypes import * 31 | import math 32 | import random 33 | import os 34 | import numpy as np 35 | 36 | def sample(probs): 37 | s = sum(probs) 38 | probs = [a/s for a in probs] 39 | r = random.uniform(0, 1) 40 | for i in range(len(probs)): 41 | r = r - probs[i] 42 | if r <= 0: 43 | return i 44 | return len(probs)-1 45 | 46 | def c_array(ctype, values): 47 | arr = (ctype*len(values))() 48 | arr[:] = values 49 | return arr 50 | 51 | class BOX(Structure): 52 | _fields_ = [("x", c_float), 53 | ("y", c_float), 54 | ("w", c_float), 55 | ("h", c_float)] 56 | 57 | class DETECTION(Structure): 58 | _fields_ = [("bbox", BOX), 59 | ("classes", c_int), 60 | ("prob", POINTER(c_float)), 61 | ("mask", POINTER(c_float)), 62 | ("objectness", c_float), 63 | ("sort_class", c_int)] 64 | 65 | 66 | class IMAGE(Structure): 67 | _fields_ = [("w", c_int), 68 | ("h", c_int), 69 | ("c", c_int), 70 | ("data", POINTER(c_float))] 71 | 72 | class METADATA(Structure): 73 | _fields_ = [("classes", c_int), 74 | ("names", POINTER(c_char_p))] 75 | 76 | 77 | 78 | #lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL) 79 | #lib = CDLL("libdarknet.so", RTLD_GLOBAL) 80 | hasGPU = True 81 | if os.name == "nt": 82 | cwd = os.path.dirname(__file__) 83 | os.environ['PATH'] = cwd + ';' + os.environ['PATH'] 84 | winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll") 85 | winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll") 86 | envKeys = list() 87 | for k, v in os.environ.items(): 88 | envKeys.append(k) 89 | try: 90 | try: 91 | tmp = os.environ["FORCE_CPU"].lower() 92 | if tmp in ["1", "true", "yes", "on"]: 93 | raise ValueError("ForceCPU") 94 | else: 95 | print("Flag value '"+tmp+"' not forcing CPU mode") 96 | except KeyError: 97 | # We never set the flag 98 | if 'CUDA_VISIBLE_DEVICES' in envKeys: 99 | if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0: 100 | raise ValueError("ForceCPU") 101 | try: 102 | global DARKNET_FORCE_CPU 103 | if DARKNET_FORCE_CPU: 104 | raise ValueError("ForceCPU") 105 | except NameError: 106 | pass 107 | # print(os.environ.keys()) 108 | # print("FORCE_CPU flag undefined, proceeding with GPU") 109 | if not os.path.exists(winGPUdll): 110 | raise ValueError("NoDLL") 111 | lib = CDLL(winGPUdll, RTLD_GLOBAL) 112 | except (KeyError, ValueError): 113 | hasGPU = False 114 | if os.path.exists(winNoGPUdll): 115 | lib = CDLL(winNoGPUdll, RTLD_GLOBAL) 116 | print("Notice: CPU-only mode") 117 | else: 118 | # Try the other way, in case no_gpu was 119 | # compile but not renamed 120 | lib = CDLL(winGPUdll, RTLD_GLOBAL) 121 | print("Environment variables indicated a CPU run, but we didn't find `"+winNoGPUdll+"`. Trying a GPU run anyway.") 122 | else: 123 | lib = CDLL("./libdarknet.so", RTLD_GLOBAL) 124 | lib.network_width.argtypes = [c_void_p] 125 | lib.network_width.restype = c_int 126 | lib.network_height.argtypes = [c_void_p] 127 | lib.network_height.restype = c_int 128 | 129 | copy_image_from_bytes = lib.copy_image_from_bytes 130 | copy_image_from_bytes.argtypes = [IMAGE,c_char_p] 131 | 132 | def network_width(net): 133 | return lib.network_width(net) 134 | 135 | def network_height(net): 136 | return lib.network_height(net) 137 | 138 | predict = lib.network_predict_ptr 139 | predict.argtypes = [c_void_p, POINTER(c_float)] 140 | predict.restype = POINTER(c_float) 141 | 142 | if hasGPU: 143 | set_gpu = lib.cuda_set_device 144 | set_gpu.argtypes = [c_int] 145 | 146 | make_image = lib.make_image 147 | make_image.argtypes = [c_int, c_int, c_int] 148 | make_image.restype = IMAGE 149 | 150 | get_network_boxes = lib.get_network_boxes 151 | get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int), c_int] 152 | get_network_boxes.restype = POINTER(DETECTION) 153 | 154 | make_network_boxes = lib.make_network_boxes 155 | make_network_boxes.argtypes = [c_void_p] 156 | make_network_boxes.restype = POINTER(DETECTION) 157 | 158 | free_detections = lib.free_detections 159 | free_detections.argtypes = [POINTER(DETECTION), c_int] 160 | 161 | free_ptrs = lib.free_ptrs 162 | free_ptrs.argtypes = [POINTER(c_void_p), c_int] 163 | 164 | network_predict = lib.network_predict_ptr 165 | network_predict.argtypes = [c_void_p, POINTER(c_float)] 166 | 167 | reset_rnn = lib.reset_rnn 168 | reset_rnn.argtypes = [c_void_p] 169 | 170 | load_net = lib.load_network 171 | load_net.argtypes = [c_char_p, c_char_p, c_int] 172 | load_net.restype = c_void_p 173 | 174 | load_net_custom = lib.load_network_custom 175 | load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int] 176 | load_net_custom.restype = c_void_p 177 | 178 | do_nms_obj = lib.do_nms_obj 179 | do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float] 180 | 181 | do_nms_sort = lib.do_nms_sort 182 | do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float] 183 | 184 | free_image = lib.free_image 185 | free_image.argtypes = [IMAGE] 186 | 187 | letterbox_image = lib.letterbox_image 188 | letterbox_image.argtypes = [IMAGE, c_int, c_int] 189 | letterbox_image.restype = IMAGE 190 | 191 | load_meta = lib.get_metadata 192 | lib.get_metadata.argtypes = [c_char_p] 193 | lib.get_metadata.restype = METADATA 194 | 195 | load_image = lib.load_image_color 196 | load_image.argtypes = [c_char_p, c_int, c_int] 197 | load_image.restype = IMAGE 198 | 199 | rgbgr_image = lib.rgbgr_image 200 | rgbgr_image.argtypes = [IMAGE] 201 | 202 | predict_image = lib.network_predict_image 203 | predict_image.argtypes = [c_void_p, IMAGE] 204 | predict_image.restype = POINTER(c_float) 205 | 206 | predict_image_letterbox = lib.network_predict_image_letterbox 207 | predict_image_letterbox.argtypes = [c_void_p, IMAGE] 208 | predict_image_letterbox.restype = POINTER(c_float) 209 | 210 | def array_to_image(arr): 211 | # need to return old values to avoid python freeing memory 212 | arr = arr.transpose(2,0,1) 213 | c = arr.shape[0] 214 | h = arr.shape[1] 215 | w = arr.shape[2] 216 | arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0 217 | data = arr.ctypes.data_as(POINTER(c_float)) 218 | im = IMAGE(w,h,c,data) 219 | return im, arr 220 | 221 | def classify(net, meta, im): 222 | out = predict_image(net, im) 223 | res = [] 224 | for i in range(meta.classes): 225 | if altNames is None: 226 | nameTag = meta.names[i] 227 | else: 228 | nameTag = altNames[i] 229 | res.append((nameTag, out[i])) 230 | res = sorted(res, key=lambda x: -x[1]) 231 | return res 232 | 233 | def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45): 234 | if isinstance(image, bytes): 235 | # image is a filename 236 | # i.e. image = b'/darknet/data/dog.jpg' 237 | im = load_image(image, 0, 0) 238 | else: 239 | # image is an nparray 240 | # i.e. image = cv2.imread('/darknet/data/dog.jpg') 241 | im, image = array_to_image(image) 242 | rgbgr_image(im) 243 | num = c_int(0) 244 | pnum = pointer(num) 245 | letter_box = 0 246 | predict_image(net, im) 247 | dets = get_network_boxes(net, im.w, im.h, thresh, 248 | hier_thresh, None, 0, pnum, letter_box) 249 | num = pnum[0] 250 | if nms: do_nms_obj(dets, num, meta.classes, nms) 251 | 252 | res = [] 253 | for j in range(num): 254 | a = dets[j].prob[0:meta.classes] 255 | if any(a): 256 | ai = np.array(a).nonzero()[0] 257 | for i in ai: 258 | b = dets[j].bbox 259 | res.append((meta.names[i], dets[j].prob[i], 260 | (b.x, b.y, b.w, b.h))) 261 | 262 | res = sorted(res, key=lambda x: -x[1]) 263 | if isinstance(image, bytes): free_image(im) 264 | free_detections(dets, num) 265 | return res 266 | 267 | def detect_image(net, meta, im, thresh=.5, hier_thresh=.5, nms=.45, debug= False): 268 | #import cv2 269 | #custom_image_bgr = cv2.imread(image) # use: detect(,,imagePath,) 270 | #custom_image = cv2.cvtColor(custom_image_bgr, cv2.COLOR_BGR2RGB) 271 | #custom_image = cv2.resize(custom_image,(lib.network_width(net), lib.network_height(net)), interpolation = cv2.INTER_LINEAR) 272 | #import scipy.misc 273 | #custom_image = scipy.misc.imread(image) 274 | #im, arr = array_to_image(custom_image) # you should comment line below: free_image(im) 275 | num = c_int(0) 276 | if debug: print("Assigned num") 277 | pnum = pointer(num) 278 | if debug: print("Assigned pnum") 279 | predict_image(net, im) 280 | letter_box = 0 281 | #predict_image_letterbox(net, im) 282 | #letter_box = 1 283 | if debug: print("did prediction") 284 | #dets = get_network_boxes(net, custom_image_bgr.shape[1], custom_image_bgr.shape[0], thresh, hier_thresh, None, 0, pnum, letter_box) # OpenCV 285 | dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum, letter_box) 286 | if debug: print("Got dets") 287 | num = pnum[0] 288 | if debug: print("got zeroth index of pnum") 289 | if nms: 290 | do_nms_sort(dets, num, meta.classes, nms) 291 | if debug: print("did sort") 292 | res = [] 293 | if debug: print("about to range") 294 | for j in range(num): 295 | if debug: print("Ranging on "+str(j)+" of "+str(num)) 296 | if debug: print("Classes: "+str(meta), meta.classes, meta.names) 297 | for i in range(meta.classes): 298 | if debug: print("Class-ranging on "+str(i)+" of "+str(meta.classes)+"= "+str(dets[j].prob[i])) 299 | if dets[j].prob[i] > 0: 300 | b = dets[j].bbox 301 | if altNames is None: 302 | nameTag = meta.names[i] 303 | else: 304 | nameTag = altNames[i] 305 | if debug: 306 | print("Got bbox", b) 307 | print(nameTag) 308 | print(dets[j].prob[i]) 309 | print((b.x, b.y, b.w, b.h)) 310 | res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h))) 311 | if debug: print("did range") 312 | res = sorted(res, key=lambda x: -x[1]) 313 | if debug: print("did sort") 314 | free_detections(dets, num) 315 | if debug: print("freed detections") 316 | return res 317 | 318 | 319 | netMain = None 320 | metaMain = None 321 | altNames = None 322 | 323 | def performDetect(imagePath="data/dog.jpg", thresh= 0.25, configPath = "./cfg/yolov3.cfg", weightPath = "yolov3.weights", metaPath= "./cfg/coco.data", showImage= True, makeImageOnly = False, initOnly= False): 324 | """ 325 | Convenience function to handle the detection and returns of objects. 326 | 327 | Displaying bounding boxes requires libraries scikit-image and numpy 328 | 329 | Parameters 330 | ---------------- 331 | imagePath: str 332 | Path to the image to evaluate. Raises ValueError if not found 333 | 334 | thresh: float (default= 0.25) 335 | The detection threshold 336 | 337 | configPath: str 338 | Path to the configuration file. Raises ValueError if not found 339 | 340 | weightPath: str 341 | Path to the weights file. Raises ValueError if not found 342 | 343 | metaPath: str 344 | Path to the data file. Raises ValueError if not found 345 | 346 | showImage: bool (default= True) 347 | Compute (and show) bounding boxes. Changes return. 348 | 349 | makeImageOnly: bool (default= False) 350 | If showImage is True, this won't actually *show* the image, but will create the array and return it. 351 | 352 | initOnly: bool (default= False) 353 | Only initialize globals. Don't actually run a prediction. 354 | 355 | Returns 356 | ---------------------- 357 | 358 | 359 | When showImage is False, list of tuples like 360 | ('obj_label', confidence, (bounding_box_x_px, bounding_box_y_px, bounding_box_width_px, bounding_box_height_px)) 361 | The X and Y coordinates are from the center of the bounding box. Subtract half the width or height to get the lower corner. 362 | 363 | Otherwise, a dict with 364 | { 365 | "detections": as above 366 | "image": a numpy array representing an image, compatible with scikit-image 367 | "caption": an image caption 368 | } 369 | """ 370 | # Import the global variables. This lets us instance Darknet once, then just call performDetect() again without instancing again 371 | global metaMain, netMain, altNames #pylint: disable=W0603 372 | assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)" 373 | if not os.path.exists(configPath): 374 | raise ValueError("Invalid config path `"+os.path.abspath(configPath)+"`") 375 | if not os.path.exists(weightPath): 376 | raise ValueError("Invalid weight path `"+os.path.abspath(weightPath)+"`") 377 | if not os.path.exists(metaPath): 378 | raise ValueError("Invalid data file path `"+os.path.abspath(metaPath)+"`") 379 | if netMain is None: 380 | netMain = load_net_custom(configPath.encode("ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1 381 | if metaMain is None: 382 | metaMain = load_meta(metaPath.encode("ascii")) 383 | if altNames is None: 384 | # In Python 3, the metafile default access craps out on Windows (but not Linux) 385 | # Read the names file and create a list to feed to detect 386 | try: 387 | with open(metaPath) as metaFH: 388 | metaContents = metaFH.read() 389 | import re 390 | match = re.search("names *= *(.*)$", metaContents, re.IGNORECASE | re.MULTILINE) 391 | if match: 392 | result = match.group(1) 393 | else: 394 | result = None 395 | try: 396 | if os.path.exists(result): 397 | with open(result) as namesFH: 398 | namesList = namesFH.read().strip().split("\n") 399 | altNames = [x.strip() for x in namesList] 400 | except TypeError: 401 | pass 402 | except Exception: 403 | pass 404 | if initOnly: 405 | print("Initialized detector") 406 | return None 407 | if not os.path.exists(imagePath): 408 | raise ValueError("Invalid image path `"+os.path.abspath(imagePath)+"`") 409 | # Do the detection 410 | #detections = detect(netMain, metaMain, imagePath, thresh) # if is used cv2.imread(image) 411 | detections = detect(netMain, metaMain, imagePath.encode("ascii"), thresh) 412 | if showImage: 413 | try: 414 | from skimage import io, draw 415 | image = io.imread(imagePath) 416 | print("*** "+str(len(detections))+" Results, color coded by confidence ***") 417 | imcaption = [] 418 | for detection in detections: 419 | label = detection[0] 420 | confidence = detection[1] 421 | pstring = label+": "+str(np.rint(100 * confidence))+"%" 422 | imcaption.append(pstring) 423 | print(pstring) 424 | bounds = detection[2] 425 | shape = image.shape 426 | # x = shape[1] 427 | # xExtent = int(x * bounds[2] / 100) 428 | # y = shape[0] 429 | # yExtent = int(y * bounds[3] / 100) 430 | yExtent = int(bounds[3]) 431 | xEntent = int(bounds[2]) 432 | # Coordinates are around the center 433 | xCoord = int(bounds[0] - bounds[2]/2) 434 | yCoord = int(bounds[1] - bounds[3]/2) 435 | boundingBox = [ 436 | [xCoord, yCoord], 437 | [xCoord, yCoord + yExtent], 438 | [xCoord + xEntent, yCoord + yExtent], 439 | [xCoord + xEntent, yCoord] 440 | ] 441 | # Wiggle it around to make a 3px border 442 | rr, cc = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] for x in boundingBox], shape= shape) 443 | rr2, cc2 = draw.polygon_perimeter([x[1] + 1 for x in boundingBox], [x[0] for x in boundingBox], shape= shape) 444 | rr3, cc3 = draw.polygon_perimeter([x[1] - 1 for x in boundingBox], [x[0] for x in boundingBox], shape= shape) 445 | rr4, cc4 = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] + 1 for x in boundingBox], shape= shape) 446 | rr5, cc5 = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] - 1 for x in boundingBox], shape= shape) 447 | boxColor = (int(255 * (1 - (confidence ** 2))), int(255 * (confidence ** 2)), 0) 448 | draw.set_color(image, (rr, cc), boxColor, alpha= 0.8) 449 | draw.set_color(image, (rr2, cc2), boxColor, alpha= 0.8) 450 | draw.set_color(image, (rr3, cc3), boxColor, alpha= 0.8) 451 | draw.set_color(image, (rr4, cc4), boxColor, alpha= 0.8) 452 | draw.set_color(image, (rr5, cc5), boxColor, alpha= 0.8) 453 | if not makeImageOnly: 454 | io.imshow(image) 455 | io.show() 456 | detections = { 457 | "detections": detections, 458 | "image": image, 459 | "caption": "\n
".join(imcaption) 460 | } 461 | except Exception as e: 462 | print("Unable to show image: "+str(e)) 463 | return detections 464 | 465 | if __name__ == "__main__": 466 | print(performDetect()) 467 | -------------------------------------------------------------------------------- /modules/YoloModule/app/main.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft. All rights reserved. 2 | # Licensed under the MIT license. See LICENSE file in the project root for 3 | # full license information. 4 | 5 | import os 6 | import random 7 | import sys 8 | import time 9 | import json 10 | 11 | import iothub_client 12 | # pylint: disable=E0611 13 | # Disabling linting that is not supported by Pylint for C extensions such as iothub_client. See issue https://github.com/PyCQA/pylint/issues/1955 14 | from iothub_client import (IoTHubModuleClient, IoTHubClientError, IoTHubError, 15 | IoTHubMessage, IoTHubMessageDispositionResult, 16 | IoTHubTransportProvider) 17 | 18 | import VideoCapture 19 | from VideoCapture import VideoCapture 20 | 21 | import AppState 22 | 23 | def send_to_Hub_callback(strMessage): 24 | message = IoTHubMessage(bytearray(strMessage, 'utf8')) 25 | print("\r\nsend_to_Hub_callback()") 26 | print(" - message : %s" & message) 27 | hubManager.send_event_to_output("output1", message, 0) 28 | 29 | # Callback received when the message that we're forwarding is processed. 30 | def send_confirmation_callback(message, result, user_context): 31 | print("\r\nsend_confirmation_callback()") 32 | print(" - result : %s" % result) 33 | 34 | def device_twin_callback(update_state, payload, user_context): 35 | global hubManager 36 | global videoCapture 37 | 38 | if (("%s"%(update_state)) == "PARTIAL"): 39 | jsonData = json.loads(payload) 40 | else: 41 | jsonData = json.loads(payload).get('desired') 42 | 43 | print("\r\ndevice_twin_callback()") 44 | print(" - status : %s" % update_state ) 45 | print(" - payload : \r\n%s" % json.dumps(jsonData, indent=4)) 46 | 47 | if "ConfidenceLevel" in jsonData: 48 | print(" - ConfidenceLevel : " + str(jsonData['ConfidenceLevel'])) 49 | videoCapture.confidenceLevel = float(jsonData['ConfidenceLevel']) 50 | 51 | if "VerboseMode" in jsonData: 52 | print(" - Verbose : " + str(jsonData['VerboseMode'])) 53 | if jsonData['VerboseMode'] == 0: 54 | videoCapture.verbose = False 55 | else: 56 | videoCapture.verbose = True 57 | 58 | if "Inference" in jsonData: 59 | print(" - Inference : " + str(jsonData['Inference'])) 60 | if jsonData['Inference'] == 0: 61 | videoCapture.inference = False 62 | else: 63 | videoCapture.inference = True 64 | 65 | if "VideoSource" in jsonData: 66 | strUrl = str(jsonData['VideoSource']) 67 | print(" - VideoSource : " + strUrl) 68 | if strUrl.lower() != videoCapture.videoPath.lower() and strUrl != "": 69 | videoCapture.setVideoSource(strUrl) 70 | 71 | device_twin_send_reported(hubManager) 72 | 73 | def device_twin_send_reported(hubManager): 74 | global videoCapture 75 | 76 | jsonTemplate = "{\"ConfidenceLevel\": \"%s\",\"VerboseMode\": %d,\"Inference\": %d, \"VideoSource\":\"%s\"}" 77 | 78 | strUrl = videoCapture.videoPath 79 | 80 | jsonData = jsonTemplate % ( 81 | str(videoCapture.confidenceLevel), 82 | videoCapture.verbose, 83 | videoCapture.inference, 84 | strUrl) 85 | 86 | print("\r\ndevice_twin_send_reported()") 87 | print(" - payload : \r\n%s" % json.dumps(jsonData, indent=4)) 88 | 89 | hubManager.send_reported_state(jsonData, len(jsonData), 1002) 90 | 91 | def send_reported_state_callback(status_code, user_context): 92 | print("\r\nsend_reported_state_callback()") 93 | print(" - status_code : [%d]" % (status_code) ) 94 | 95 | class HubManager(object): 96 | 97 | def __init__( 98 | self, 99 | messageTimeout, 100 | protocol, 101 | verbose): 102 | 103 | # Communicate with the Edge Hub 104 | 105 | self.messageTimeout = messageTimeout 106 | self.client_protocol = protocol 107 | self.client = IoTHubModuleClient() 108 | self.client.create_from_environment(protocol) 109 | self.client.set_option("messageTimeout", self.messageTimeout) 110 | self.client.set_option("product_info","edge-yolo-capture") 111 | if verbose: 112 | self.client.set_option("logtrace", 1)#enables MQTT logging 113 | 114 | self.client.set_module_twin_callback( 115 | device_twin_callback, None) 116 | 117 | def send_reported_state(self, reported_state, size, user_context): 118 | self.client.send_reported_state( 119 | reported_state, size, 120 | send_reported_state_callback, user_context) 121 | 122 | def send_event_to_output(self, outputQueueName, event, send_context): 123 | self.client.send_event_async(outputQueueName, event, send_confirmation_callback, send_context) 124 | 125 | def main( 126 | videoPath ="", 127 | verbose = False, 128 | videoWidth = 0, 129 | videoHeight = 0, 130 | fontScale = 1.0, 131 | inference = False, 132 | confidenceLevel = 0.8 133 | ): 134 | 135 | global hubManager 136 | global videoCapture 137 | 138 | try: 139 | print("\nPython %s\n" % sys.version ) 140 | print("Yolo Capture Azure IoT Edge Module. Press Ctrl-C to exit." ) 141 | 142 | with VideoCapture(videoPath, 143 | verbose, 144 | videoWidth, 145 | videoHeight, 146 | fontScale, 147 | inference, 148 | confidenceLevel) as videoCapture: 149 | 150 | try: 151 | hubManager = HubManager(10000, IoTHubTransportProvider.MQTT, False) 152 | AppState.init(hubManager) 153 | except IoTHubError as iothub_error: 154 | print("Unexpected error %s from IoTHub" % iothub_error ) 155 | return 156 | 157 | videoCapture.start() 158 | 159 | except KeyboardInterrupt: 160 | print("Camera capture module stopped" ) 161 | 162 | 163 | def __convertStringToBool(env): 164 | if env in ['True', 'TRUE', '1', 'y', 'YES', 'Y', 'Yes']: 165 | return True 166 | elif env in ['False', 'FALSE', '0', 'n', 'NO', 'N', 'No']: 167 | return False 168 | else: 169 | raise ValueError('Could not convert string to bool.') 170 | 171 | if __name__ == '__main__': 172 | try: 173 | VIDEO_PATH = os.environ['VIDEO_PATH'] 174 | VERBOSE = __convertStringToBool(os.getenv('VERBOSE', 'False')) 175 | VIDEO_WIDTH = int(os.getenv('VIDEO_WIDTH', 0)) 176 | VIDEO_HEIGHT = int(os.getenv('VIDEO_HEIGHT',0)) 177 | FONT_SCALE = os.getenv('FONT_SCALE', 1) 178 | INFERENCE = __convertStringToBool(os.getenv('INFERENCE', 'False')) 179 | CONFIDENCE_LEVEL = float(os.getenv('CONFIDENCE_LEVEL', "0.8")) 180 | 181 | except ValueError as error: 182 | print(error ) 183 | sys.exit(1) 184 | 185 | main(VIDEO_PATH, VERBOSE, VIDEO_WIDTH, VIDEO_HEIGHT, FONT_SCALE, INFERENCE, CONFIDENCE_LEVEL) 186 | 187 | 188 | 189 | -------------------------------------------------------------------------------- /modules/YoloModule/app/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Video Stream 4 | 5 | 6 | 7 | 8 | 9 | 27 | 28 | -------------------------------------------------------------------------------- /modules/YoloModule/app/yolo/coco.data: -------------------------------------------------------------------------------- 1 | classes= 80 2 | train = /home/pjreddie/data/coco/trainvalno5k.txt 3 | valid = coco_testdev 4 | #valid = data/coco_val_5k.list 5 | names = /app/yolo/coco.names 6 | backup = /home/pjreddie/backup/ 7 | eval=coco -------------------------------------------------------------------------------- /modules/YoloModule/app/yolo/coco.names: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /modules/YoloModule/app/yolo/yolov3-tiny.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | # batch=64 7 | # subdivisions=2 8 | width=416 9 | height=416 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | [convolutional] 26 | batch_normalize=1 27 | filters=16 28 | size=3 29 | stride=1 30 | pad=1 31 | activation=leaky 32 | 33 | [maxpool] 34 | size=2 35 | stride=2 36 | 37 | [convolutional] 38 | batch_normalize=1 39 | filters=32 40 | size=3 41 | stride=1 42 | pad=1 43 | activation=leaky 44 | 45 | [maxpool] 46 | size=2 47 | stride=2 48 | 49 | [convolutional] 50 | batch_normalize=1 51 | filters=64 52 | size=3 53 | stride=1 54 | pad=1 55 | activation=leaky 56 | 57 | [maxpool] 58 | size=2 59 | stride=2 60 | 61 | [convolutional] 62 | batch_normalize=1 63 | filters=128 64 | size=3 65 | stride=1 66 | pad=1 67 | activation=leaky 68 | 69 | [maxpool] 70 | size=2 71 | stride=2 72 | 73 | [convolutional] 74 | batch_normalize=1 75 | filters=256 76 | size=3 77 | stride=1 78 | pad=1 79 | activation=leaky 80 | 81 | [maxpool] 82 | size=2 83 | stride=2 84 | 85 | [convolutional] 86 | batch_normalize=1 87 | filters=512 88 | size=3 89 | stride=1 90 | pad=1 91 | activation=leaky 92 | 93 | [maxpool] 94 | size=2 95 | stride=1 96 | 97 | [convolutional] 98 | batch_normalize=1 99 | filters=1024 100 | size=3 101 | stride=1 102 | pad=1 103 | activation=leaky 104 | 105 | ########### 106 | 107 | [convolutional] 108 | batch_normalize=1 109 | filters=256 110 | size=1 111 | stride=1 112 | pad=1 113 | activation=leaky 114 | 115 | [convolutional] 116 | batch_normalize=1 117 | filters=512 118 | size=3 119 | stride=1 120 | pad=1 121 | activation=leaky 122 | 123 | [convolutional] 124 | size=1 125 | stride=1 126 | pad=1 127 | filters=255 128 | activation=linear 129 | 130 | 131 | 132 | [yolo] 133 | mask = 3,4,5 134 | anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 135 | classes=80 136 | num=6 137 | jitter=.3 138 | ignore_thresh = .7 139 | truth_thresh = 1 140 | random=1 141 | 142 | [route] 143 | layers = -4 144 | 145 | [convolutional] 146 | batch_normalize=1 147 | filters=128 148 | size=1 149 | stride=1 150 | pad=1 151 | activation=leaky 152 | 153 | [upsample] 154 | stride=2 155 | 156 | [route] 157 | layers = -1, 8 158 | 159 | [convolutional] 160 | batch_normalize=1 161 | filters=256 162 | size=3 163 | stride=1 164 | pad=1 165 | activation=leaky 166 | 167 | [convolutional] 168 | size=1 169 | stride=1 170 | pad=1 171 | filters=255 172 | activation=linear 173 | 174 | [yolo] 175 | mask = 0,1,2 176 | anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 177 | classes=80 178 | num=6 179 | jitter=.3 180 | ignore_thresh = .7 181 | truth_thresh = 1 182 | random=1 183 | -------------------------------------------------------------------------------- /modules/YoloModule/app/yolo/yolov3-tiny.weights: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/IntelligentEdgeHOL/cc58c584fcdab0115efe74885c100222d2184104/modules/YoloModule/app/yolo/yolov3-tiny.weights -------------------------------------------------------------------------------- /modules/YoloModule/build/requirements.txt: -------------------------------------------------------------------------------- 1 | #azure-iothub-device-client~=1.4.3 2 | #setuptools 3 | #numpy 4 | requests 5 | #opencv-contrib-python 6 | 7 | -------------------------------------------------------------------------------- /modules/YoloModule/module.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-version": "0.0.1", 3 | "description": "", 4 | "image": { 5 | "repository": "$CONTAINER_REGISTRY_URL/yolomodule", 6 | "tag": { 7 | "version": "$CONTAINER_MODULE_VERSION", 8 | "platforms": { 9 | "arm32v7": "./Dockerfile.arm64v8" 10 | } 11 | }, 12 | "buildOptions": [] 13 | }, 14 | "language": "python" 15 | } --------------------------------------------------------------------------------