├── Darknet_YOLOv3_Cigarette_Smoking_Detection.ipynb
├── Darknet_YOLOv3_Guns_Detection.ipynb
├── Darknet_YOLOv4_Google_Colab_(Firearm_Detection).ipynb
├── ImageColorizerColab.ipynb
├── README.md
├── VideoColorizerColab.ipynb
├── img
├── BRICS.gif
├── Guns.gif
├── Smoking.gif
├── colorize_photo.jpg
├── filename.md
└── handgun_yolov4.gif
└── pyannote_video - Face Detection, Tracking & clustering in Videos.ipynb
/Darknet_YOLOv3_Cigarette_Smoking_Detection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Darknet_YOLOv3_Cigarette_Smoking_Detection.ipynb",
7 | "version": "0.3.2",
8 | "provenance": [],
9 | "collapsed_sections": [],
10 | "include_colab_link": true
11 | },
12 | "kernelspec": {
13 | "display_name": "Python 3",
14 | "language": "python",
15 | "name": "python3"
16 | },
17 | "accelerator": "GPU"
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "metadata": {
32 | "id": "CHhR2wyYizjO",
33 | "colab_type": "text"
34 | },
35 | "cell_type": "markdown",
36 | "source": [
37 | "Cigarette Smoking detection using YOLOv3 darknet\n",
38 | "---"
39 | ]
40 | },
41 | {
42 | "metadata": {
43 | "colab_type": "code",
44 | "id": "zuKrDkml1o8Y",
45 | "colab": {}
46 | },
47 | "cell_type": "code",
48 | "source": [
49 | "!apt update\n",
50 | "!apt upgrade -y\n",
51 | "!uname -m && cat /etc/*release\n",
52 | "!gcc --version\n",
53 | "!uname -r"
54 | ],
55 | "execution_count": 0,
56 | "outputs": []
57 | },
58 | {
59 | "metadata": {
60 | "colab_type": "code",
61 | "id": "CcSxk0nhr-Ct",
62 | "colab": {}
63 | },
64 | "cell_type": "code",
65 | "source": [
66 | "ls"
67 | ],
68 | "execution_count": 0,
69 | "outputs": []
70 | },
71 | {
72 | "metadata": {
73 | "colab_type": "text",
74 | "id": "HVe0hQzc83GY"
75 | },
76 | "cell_type": "markdown",
77 | "source": [
78 | "To access Google Drive Folder and Files"
79 | ]
80 | },
81 | {
82 | "metadata": {
83 | "colab_type": "code",
84 | "id": "8eSazWMO82eu",
85 | "colab": {}
86 | },
87 | "cell_type": "code",
88 | "source": [
89 | "# Load the Drive helper and mount\n",
90 | "from google.colab import drive\n",
91 | "\n",
92 | "# This will prompt for authorization.\n",
93 | "drive.mount('/content/drive')"
94 | ],
95 | "execution_count": 0,
96 | "outputs": []
97 | },
98 | {
99 | "metadata": {
100 | "colab_type": "code",
101 | "id": "b5ywvjkQ9BMn",
102 | "colab": {}
103 | },
104 | "cell_type": "code",
105 | "source": [
106 | "# After executing the cell above, Drive\n",
107 | "# files will be present in \"/content/drive/My Drive\".\n",
108 | "!ls -a \"/content/drive/My Drive/GColab/\""
109 | ],
110 | "execution_count": 0,
111 | "outputs": []
112 | },
113 | {
114 | "metadata": {
115 | "colab_type": "text",
116 | "id": "Gh91u1MqyDLO"
117 | },
118 | "cell_type": "markdown",
119 | "source": [
120 | "**Original Version of Darknet**"
121 | ]
122 | },
123 | {
124 | "metadata": {
125 | "colab_type": "code",
126 | "id": "ojeQnomQue0M",
127 | "colab": {}
128 | },
129 | "cell_type": "code",
130 | "source": [
131 | "%rm -r darknet\n",
132 | "!git clone https://github.com/pjreddie/darknet\n",
133 | "%cd darknet/"
134 | ],
135 | "execution_count": 0,
136 | "outputs": []
137 | },
138 | {
139 | "metadata": {
140 | "colab_type": "text",
141 | "id": "Wd86EGSsx6RA"
142 | },
143 | "cell_type": "markdown",
144 | "source": [
145 | "**Modify Version of Darknet**"
146 | ]
147 | },
148 | {
149 | "metadata": {
150 | "colab_type": "code",
151 | "id": "AxsXDMvXxvSW",
152 | "outputId": "59adc515-a893-4702-89f8-b7fb2205d587",
153 | "colab": {
154 | "base_uri": "https://localhost:8080/",
155 | "height": 153
156 | }
157 | },
158 | "cell_type": "code",
159 | "source": [
160 | "#%rm -r darknet\n",
161 | "!git clone https://github.com/AlexeyAB/darknet/\n",
162 | "%cd darknet/"
163 | ],
164 | "execution_count": 0,
165 | "outputs": [
166 | {
167 | "output_type": "stream",
168 | "text": [
169 | "Cloning into 'darknet'...\n",
170 | "remote: Enumerating objects: 35, done.\u001b[K\n",
171 | "remote: Counting objects: 2% (1/35) \u001b[K\rremote: Counting objects: 5% (2/35) \u001b[K\rremote: Counting objects: 8% (3/35) \u001b[K\rremote: Counting objects: 11% (4/35) \u001b[K\rremote: Counting objects: 14% (5/35) \u001b[K\rremote: Counting objects: 17% (6/35) \u001b[K\rremote: Counting objects: 20% (7/35) \u001b[K\rremote: Counting objects: 22% (8/35) \u001b[K\rremote: Counting objects: 25% (9/35) \u001b[K\rremote: Counting objects: 28% (10/35) \u001b[K\rremote: Counting objects: 31% (11/35) \u001b[K\rremote: Counting objects: 34% (12/35) \u001b[K\rremote: Counting objects: 37% (13/35) \u001b[K\rremote: Counting objects: 40% (14/35) \u001b[K\rremote: Counting objects: 42% (15/35) \u001b[K\rremote: Counting objects: 45% (16/35) \u001b[K\rremote: Counting objects: 48% (17/35) \u001b[K\rremote: Counting objects: 51% (18/35) \u001b[K\rremote: Counting objects: 54% (19/35) \u001b[K\rremote: Counting objects: 57% (20/35) \u001b[K\rremote: Counting objects: 60% (21/35) \u001b[K\rremote: Counting objects: 62% (22/35) \u001b[K\rremote: Counting objects: 65% (23/35) \u001b[K\rremote: Counting objects: 68% (24/35) \u001b[K\rremote: Counting objects: 71% (25/35) \u001b[K\rremote: Counting objects: 74% (26/35) \u001b[K\rremote: Counting objects: 77% (27/35) \u001b[K\rremote: Counting objects: 80% (28/35) \u001b[K\rremote: Counting objects: 82% (29/35) \u001b[K\rremote: Counting objects: 85% (30/35) \u001b[K\rremote: Counting objects: 88% (31/35) \u001b[K\rremote: Counting objects: 91% (32/35) \u001b[K\rremote: Counting objects: 94% (33/35) \u001b[K\rremote: Counting objects: 97% (34/35) \u001b[K\rremote: Counting objects: 100% (35/35) \u001b[K\rremote: Counting objects: 100% (35/35), done.\u001b[K\n",
172 | "remote: Compressing objects: 100% (19/19), done.\u001b[K\n",
173 | "remote: Total 8238 (delta 15), reused 27 (delta 14), pack-reused 8203\u001b[K\n",
174 | "Receiving objects: 100% (8238/8238), 8.54 MiB | 16.57 MiB/s, done.\n",
175 | "Resolving deltas: 100% (5527/5527), done.\n",
176 | "/content/darknet\n"
177 | ],
178 | "name": "stdout"
179 | }
180 | ]
181 | },
182 | {
183 | "metadata": {
184 | "colab_type": "code",
185 | "id": "cIltg5kZtiVm",
186 | "outputId": "2ac0b6ef-2774-4c33-9a3f-7580e1d8ad22",
187 | "colab": {
188 | "base_uri": "https://localhost:8080/",
189 | "height": 306
190 | }
191 | },
192 | "cell_type": "code",
193 | "source": [
194 | "!apt install libopencv-dev python-opencv ffmpeg"
195 | ],
196 | "execution_count": 0,
197 | "outputs": [
198 | {
199 | "output_type": "stream",
200 | "text": [
201 | "Reading package lists... Done\n",
202 | "Building dependency tree \n",
203 | "Reading state information... Done\n",
204 | "ffmpeg is already the newest version (7:3.4.4-0ubuntu0.18.04.1).\n",
205 | "libopencv-dev is already the newest version (3.2.0+dfsg-4ubuntu0.1).\n",
206 | "The following NEW packages will be installed:\n",
207 | " python-opencv\n",
208 | "0 upgraded, 1 newly installed, 0 to remove and 3 not upgraded.\n",
209 | "Need to get 535 kB of archives.\n",
210 | "After this operation, 2,944 kB of additional disk space will be used.\n",
211 | "Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python-opencv amd64 3.2.0+dfsg-4ubuntu0.1 [535 kB]\n",
212 | "Fetched 535 kB in 1s (466 kB/s)\n",
213 | "Selecting previously unselected package python-opencv.\n",
214 | "(Reading database ... 110852 files and directories currently installed.)\n",
215 | "Preparing to unpack .../python-opencv_3.2.0+dfsg-4ubuntu0.1_amd64.deb ...\n",
216 | "Unpacking python-opencv (3.2.0+dfsg-4ubuntu0.1) ...\n",
217 | "Setting up python-opencv (3.2.0+dfsg-4ubuntu0.1) ...\n"
218 | ],
219 | "name": "stdout"
220 | }
221 | ]
222 | },
223 | {
224 | "metadata": {
225 | "colab_type": "text",
226 | "id": "FHSTeIFJvEl8"
227 | },
228 | "cell_type": "markdown",
229 | "source": [
230 | "Changing the variables to include OpenCV and GPU in the Makefile"
231 | ]
232 | },
233 | {
234 | "metadata": {
235 | "colab_type": "code",
236 | "id": "INUiWQphvFF_",
237 | "colab": {}
238 | },
239 | "cell_type": "code",
240 | "source": [
241 | "!sed -i 's/OPENCV=0/OPENCV=1/g' Makefile\n",
242 | "!sed -i 's/GPU=0/GPU=1/g' Makefile\n",
243 | "#!sed -i 's/CUDNN=0/CUDNN=1/g' Makefile\n",
244 | "%pycat Makefile"
245 | ],
246 | "execution_count": 0,
247 | "outputs": []
248 | },
249 | {
250 | "metadata": {
251 | "colab_type": "code",
252 | "id": "kQDhvwpxWRs1",
253 | "colab": {}
254 | },
255 | "cell_type": "code",
256 | "source": [
257 | "!make"
258 | ],
259 | "execution_count": 0,
260 | "outputs": []
261 | },
262 | {
263 | "metadata": {
264 | "colab_type": "text",
265 | "id": "aCLysbFOL2UF"
266 | },
267 | "cell_type": "markdown",
268 | "source": [
269 | "**How to train (to detect your custom objects):**\n",
270 | "---"
271 | ]
272 | },
273 | {
274 | "metadata": {
275 | "colab_type": "text",
276 | "id": "Uk5TTCBWL5Ea"
277 | },
278 | "cell_type": "markdown",
279 | "source": [
280 | "**Training Yolo v3:**\n",
281 | "\n",
282 | "1.Create file yolo-obj.cfg with the same content as in yolov3.cfg (or copy yolov3.cfg to yolo-obj.cfg) and:\n",
283 | "\n",
284 | "* change line batch to batch=64\n",
285 | "* change line subdivisions to subdivisions=8\n",
286 | "* change line classes=80 to your number of objects in each of 3 [yolo]-layers:\n",
287 | " * yolov3.cfg#L610\n",
288 | " * yolov3.cfg#L696\n",
289 | " * yolov3.cfg#L783\n",
290 | "* change [filters=255] to filters=(classes + 5)x3 in the 3 [convolutional] before each [yolo] layer\n",
291 | " * yolov3.cfg#L603\n",
292 | " * yolov3.cfg#L689\n",
293 | " * yolov3.cfg#L776\n",
294 | " \n",
295 | "So if classes=1 then should be filters=18. If classes=2 then write filters=21."
296 | ]
297 | },
298 | {
299 | "metadata": {
300 | "colab_type": "code",
301 | "id": "ePe4Qu8QN0Ii",
302 | "colab": {}
303 | },
304 | "cell_type": "code",
305 | "source": [
306 | "%cp cfg/yolov3.cfg cfg/yolo-obj.cfg\n",
307 | "!sed -i 's/batch=1/batch=64/g' cfg/yolo-obj.cfg\n",
308 | "!sed -i 's/subdivisions=1/subdivisions=32/g' cfg/yolo-obj.cfg\n",
309 | "!sed -i 's/classes=80/classes=1/g' cfg/yolo-obj.cfg\n",
310 | "!sed -i 's/filters=255/filters=18/g' cfg/yolo-obj.cfg\n",
311 | "!sed -i 's/width=416/width=608/g' cfg/yolo-obj.cfg\n",
312 | "!sed -i 's/height=416/height=608/g' cfg/yolo-obj.cfg"
313 | ],
314 | "execution_count": 0,
315 | "outputs": []
316 | },
317 | {
318 | "metadata": {
319 | "colab_type": "code",
320 | "id": "hE5EGwcSQYbO",
321 | "colab": {}
322 | },
323 | "cell_type": "code",
324 | "source": [
325 | "%pycat cfg/yolo-obj.cfg"
326 | ],
327 | "execution_count": 0,
328 | "outputs": []
329 | },
330 | {
331 | "metadata": {
332 | "colab_type": "text",
333 | "id": "0Tum3Y4HRoqx"
334 | },
335 | "cell_type": "markdown",
336 | "source": [
337 | "2.Create file obj.names in the directory `build\\darknet\\x64\\data\\` with objects names - each in new line\n",
338 | "\n"
339 | ]
340 | },
341 | {
342 | "metadata": {
343 | "colab_type": "code",
344 | "id": "L5M9X03jR0Vl",
345 | "colab": {}
346 | },
347 | "cell_type": "code",
348 | "source": [
349 | "all_classes = \"\"\"Smoking\n",
350 | "\"\"\"\n",
351 | "\n",
352 | "file = \"\"\"text_file = open(\"build/darknet/x64/data/obj.names\", \"w\");text_file.write(all_classes);text_file.close()\"\"\" \n",
353 | "\n",
354 | "exec(file)\n",
355 | "%pycat build/darknet/x64/data/obj.names"
356 | ],
357 | "execution_count": 0,
358 | "outputs": []
359 | },
360 | {
361 | "metadata": {
362 | "colab_type": "text",
363 | "id": "iBAEunM0Ry7b"
364 | },
365 | "cell_type": "markdown",
366 | "source": [
367 | "3.Create file obj.data in the directory `build\\darknet\\x64\\data\\` containing (where classes = number of objects):"
368 | ]
369 | },
370 | {
371 | "metadata": {
372 | "colab_type": "code",
373 | "id": "Bqbe2_uFUzFU",
374 | "colab": {}
375 | },
376 | "cell_type": "code",
377 | "source": [
378 | "obj_data = \"\"\"classes= 1\n",
379 | "train = build/darknet/x64/data/train.txt\n",
380 | "valid = build/darknet/x64/data/valid.txt\n",
381 | "names = build/darknet/x64/data/obj.names\n",
382 | "backup = build/darknet/x64/backup/\n",
383 | "\"\"\"\n",
384 | "\n",
385 | "file = \"\"\"text_file = open(\"build/darknet/x64/data/obj.data\", \"w\");text_file.write(obj_data);text_file.close()\"\"\" \n",
386 | "\n",
387 | "exec(file)\n",
388 | "%pycat build/darknet/x64/data/obj.data"
389 | ],
390 | "execution_count": 0,
391 | "outputs": []
392 | },
393 | {
394 | "metadata": {
395 | "colab_type": "text",
396 | "id": "FUy55xwhUyVZ"
397 | },
398 | "cell_type": "markdown",
399 | "source": [
400 | "4.Put image-files (.jpg) of your objects in the directory build/darknet/x64/data/obj/\n"
401 | ]
402 | },
403 | {
404 | "metadata": {
405 | "colab_type": "code",
406 | "id": "5TRsi9sS178e",
407 | "colab": {}
408 | },
409 | "cell_type": "code",
410 | "source": [
411 | "#%mkdir build/darknet/x64/data/obj\n",
412 | "%cp -r \"/content/drive/My Drive/GColab/Smoking/.\" build/darknet/x64/data/obj/"
413 | ],
414 | "execution_count": 0,
415 | "outputs": []
416 | },
417 | {
418 | "metadata": {
419 | "colab_type": "code",
420 | "id": "_pEomLo6yvVT",
421 | "colab": {}
422 | },
423 | "cell_type": "code",
424 | "source": [
425 | "%ls -1 build/darknet/x64/data/obj/*.jpg | wc -l\n",
426 | "%ls -1 build/darknet/x64/data/obj/*.txt | wc -l"
427 | ],
428 | "execution_count": 0,
429 | "outputs": []
430 | },
431 | {
432 | "metadata": {
433 | "colab_type": "text",
434 | "id": "hiusyk1m_VEe"
435 | },
436 | "cell_type": "markdown",
437 | "source": [
438 | "5.You should label each object on images from your dataset. Use this visual GUI-software for marking bounded boxes of objects and generating annotation files for Yolo v2 & v3: \n",
439 | "\n",
440 | "---\n",
441 | "**LabelImg**\n",
442 | " \n",
443 | "LabelImg is a graphical image annotation tool.: \n",
444 | "https://github.com/tzutalin/labelImg\n",
445 | "\n",
446 | "\n",
447 | "---\n",
448 | "\n",
449 | "**Yolo_mark**\n",
450 | "\n",
451 | "Windows & Linux GUI for marking bounded boxes of objects in images for training Yolo v3 and v2\n",
452 | "\n",
453 | "https://github.com/AlexeyAB/Yolo_mark\n",
454 | "\n",
455 | "It will create `.txt`-file for each `.jpg`-image-file - in the same directory and with the same name, but with `.txt`-extension, and put to file: object number and object coordinates on this image, for each object in new line: ` `\n",
456 | "\n",
457 | "Where:\n",
458 | "\n",
459 | "\n",
460 | "* `` - integer object number from 0 to (classes-1)\n",
461 | "* ` ` - float values relative to width and height of image, it can be equal from (0.0 to 1.0]\n",
462 | "* for example: ` = / ` or ` = / `\n",
463 | "* atention: ` ` - are center of rectangle (are not top-left corner)\n",
464 | "\n",
465 | "\n",
466 | "For example for img1.jpg you will be created img1.txt containing:\n",
467 | "\n",
468 | "\n",
469 | "```\n",
470 | "1 0.716797 0.395833 0.216406 0.147222\n",
471 | "0 0.687109 0.379167 0.255469 0.158333\n",
472 | "1 0.420312 0.395833 0.140625 0.166667\n",
473 | "```\n",
474 | "\n"
475 | ]
476 | },
477 | {
478 | "metadata": {
479 | "colab_type": "text",
480 | "id": "P0FXqQ7Gds4A"
481 | },
482 | "cell_type": "markdown",
483 | "source": [
484 | "6.Create file train.txt and valid.txt in directory `build\\darknet\\x64\\data\\` with filenames of your images, each filename in new line, with path relative to darknet, for example containing:\n"
485 | ]
486 | },
487 | {
488 | "metadata": {
489 | "colab_type": "code",
490 | "id": "EGq614FhX6K8",
491 | "colab": {}
492 | },
493 | "cell_type": "code",
494 | "source": [
495 | "import os, fnmatch\n",
496 | "import numpy as np\n",
497 | "\n",
498 | "train_file = open(\"build/darknet/x64/data/train.txt\", \"w\")\n",
499 | "valid_file = open(\"build/darknet/x64/data/valid.txt\", \"w\")\n",
500 | "listOfFiles = os.listdir('build/darknet/x64/data/obj/') \n",
501 | "pattern = \"*.jpg\" \n",
502 | "for f_name in listOfFiles: \n",
503 | " if fnmatch.fnmatch(f_name, pattern):\n",
504 | " if np.random.rand(1) < 0.8:\n",
505 | " train_file.write(\"build/darknet/x64/data/obj/\"+f_name+\"\\n\")\n",
506 | " #print (\"data/obj/\"+f_name)\n",
507 | " else:\n",
508 | " valid_file.write(\"build/darknet/x64/data/obj/\"+f_name+\"\\n\") \n",
509 | " \n",
510 | "train_file.close()\n",
511 | "valid_file.close()"
512 | ],
513 | "execution_count": 0,
514 | "outputs": []
515 | },
516 | {
517 | "metadata": {
518 | "colab_type": "code",
519 | "id": "ml2I-ABUTQ6m",
520 | "outputId": "1185d003-516e-4e02-dd95-0679919138bc",
521 | "colab": {
522 | "base_uri": "https://localhost:8080/",
523 | "height": 51
524 | }
525 | },
526 | "cell_type": "code",
527 | "source": [
528 | "#Count number of files \n",
529 | "!wc -l build/darknet/x64/data/train.txt\n",
530 | "!wc -l build/darknet/x64/data/valid.txt"
531 | ],
532 | "execution_count": 0,
533 | "outputs": [
534 | {
535 | "output_type": "stream",
536 | "text": [
537 | "556 build/darknet/x64/data/train.txt\n",
538 | "137 build/darknet/x64/data/valid.txt\n"
539 | ],
540 | "name": "stdout"
541 | }
542 | ]
543 | },
544 | {
545 | "metadata": {
546 | "colab_type": "code",
547 | "id": "kj-pb3-UqSwd",
548 | "colab": {}
549 | },
550 | "cell_type": "code",
551 | "source": [
552 | "%pycat build/darknet/x64/data/valid.txt"
553 | ],
554 | "execution_count": 0,
555 | "outputs": []
556 | },
557 | {
558 | "metadata": {
559 | "colab_type": "text",
560 | "id": "K8pPboneqydm"
561 | },
562 | "cell_type": "markdown",
563 | "source": [
564 | "7.Download pre-trained weights for the convolutional layers (154 MB): https://pjreddie.com/media/files/darknet53.conv.74 and put to the directory `build\\darknet\\x64`"
565 | ]
566 | },
567 | {
568 | "metadata": {
569 | "colab_type": "code",
570 | "id": "h_H7qCo9qylB",
571 | "outputId": "fd1b90c2-d246-4183-ca2c-1b3bde1d434b",
572 | "colab": {
573 | "base_uri": "https://localhost:8080/",
574 | "height": 204
575 | }
576 | },
577 | "cell_type": "code",
578 | "source": [
579 | "!wget -P build/darknet/x64/ https://pjreddie.com/media/files/darknet53.conv.74\n",
580 | "#%ls build/darknet/x64/"
581 | ],
582 | "execution_count": 0,
583 | "outputs": [
584 | {
585 | "output_type": "stream",
586 | "text": [
587 | "--2019-01-10 04:14:23-- https://pjreddie.com/media/files/darknet53.conv.74\n",
588 | "Resolving pjreddie.com (pjreddie.com)... 128.208.3.39\n",
589 | "Connecting to pjreddie.com (pjreddie.com)|128.208.3.39|:443... connected.\n",
590 | "HTTP request sent, awaiting response... 200 OK\n",
591 | "Length: 162482580 (155M) [application/octet-stream]\n",
592 | "Saving to: ‘build/darknet/x64/darknet53.conv.74’\n",
593 | "\n",
594 | "darknet53.conv.74 100%[===================>] 154.96M 63.7MB/s in 2.4s \n",
595 | "\n",
596 | "2019-01-10 04:14:26 (63.7 MB/s) - ‘build/darknet/x64/darknet53.conv.74’ saved [162482580/162482580]\n",
597 | "\n"
598 | ],
599 | "name": "stdout"
600 | }
601 | ]
602 | },
603 | {
604 | "metadata": {
605 | "colab_type": "text",
606 | "id": "9MTqLH85DfV9"
607 | },
608 | "cell_type": "markdown",
609 | "source": [
610 | "8.training by using the command line:\n",
611 | "\n",
612 | "\n",
613 | "```\n",
614 | "./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/darknet53.conv.74 -dont_show\n",
615 | "```\n",
616 | "\n",
617 | "8.1. For training with mAP (mean average precisions) calculation for each 4 Epochs (set valid=valid.txt or train.txt in obj.data file) and run: \n",
618 | "\n",
619 | "\n",
620 | "\n",
621 | "```\n",
622 | "./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/darknet53.conv.74 -dont_show -map\n",
623 | "```\n",
624 | "\n"
625 | ]
626 | },
627 | {
628 | "metadata": {
629 | "colab_type": "code",
630 | "id": "C5VW-CkMsTUp",
631 | "colab": {}
632 | },
633 | "cell_type": "code",
634 | "source": [
635 | "!./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/darknet53.conv.74 -dont_show"
636 | ],
637 | "execution_count": 0,
638 | "outputs": []
639 | },
640 | {
641 | "metadata": {
642 | "colab_type": "text",
643 | "id": "4f_TRrcCEMtN"
644 | },
645 | "cell_type": "markdown",
646 | "source": [
647 | "9.After training is complete - get result yolo-obj_final.weights from path `build\\darknet\\x64\\backup\\`\n",
648 | "\n",
649 | "\n",
650 | "* After each 100 iterations you can stop and later start training from this point. For example, after 2000 iterations you can stop training, and later just copy yolo-obj_2000.weights from `build\\darknet\\x64\\backup\\` to` build\\darknet\\x64\\ `and start training using: \n",
651 | "\n",
652 | "\n",
653 | "```\n",
654 | "./darknet detector train data/obj.data yolo-obj.cfg yolo-obj_2000.weights\n",
655 | "```\n",
656 | "\n",
657 | "\n",
658 | "\n"
659 | ]
660 | },
661 | {
662 | "metadata": {
663 | "colab_type": "code",
664 | "id": "9GHn2iDhgCum",
665 | "colab": {}
666 | },
667 | "cell_type": "code",
668 | "source": [
669 | "!./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/yolo-obj_1000.weights -dont_show"
670 | ],
671 | "execution_count": 0,
672 | "outputs": []
673 | },
674 | {
675 | "metadata": {
676 | "colab_type": "text",
677 | "id": "p2eWyFXYG06P"
678 | },
679 | "cell_type": "markdown",
680 | "source": [
681 | "Custom object detection:\n",
682 | "---\n",
683 | "Example of Cigarette Smoking detection:\n",
684 | "\n",
685 | "**Image : **\n",
686 | "\n",
687 | "```\n",
688 | "./darknet detector test build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/smoking_1000it.weights -thresh 0.20 data/cigarette.jpg\n",
689 | "```\n",
690 | "\n",
691 | "\n",
692 | "**Video : **\n",
693 | "\n",
694 | "```\n",
695 | "./darknet detector demo build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/smoking_1000it.weights -thresh 0.20 -dont_show Smoking.mp4 -out_filename Smoking_output.mp4\n",
696 | "```\n",
697 | "\n"
698 | ]
699 | },
700 | {
701 | "metadata": {
702 | "colab_type": "code",
703 | "id": "Wu3cwRauF2Kv",
704 | "colab": {}
705 | },
706 | "cell_type": "code",
707 | "source": [
708 | "!./darknet detector demo build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/smoking_800it_1avgLoss.weights -thresh 0.20 -dont_show Smoking.mp4 -out_filename Smoking_20%.mp4"
709 | ],
710 | "execution_count": 0,
711 | "outputs": []
712 | },
713 | {
714 | "metadata": {
715 | "colab_type": "code",
716 | "id": "EDFD5OyR82mL",
717 | "colab": {}
718 | },
719 | "cell_type": "code",
720 | "source": [
721 | "ls -lh *.mp4"
722 | ],
723 | "execution_count": 0,
724 | "outputs": []
725 | },
726 | {
727 | "metadata": {
728 | "id": "-Fnf5QO2ftfN",
729 | "colab_type": "text"
730 | },
731 | "cell_type": "markdown",
732 | "source": [
733 | "**Playing videos on google colab**"
734 | ]
735 | },
736 | {
737 | "metadata": {
738 | "colab_type": "code",
739 | "id": "qT9TFs4ikb1L",
740 | "colab": {}
741 | },
742 | "cell_type": "code",
743 | "source": [
744 | "import io\n",
745 | "import base64\n",
746 | "from IPython.display import HTML\n",
747 | "video = io.open('Smoking_20%.mp4', 'r+b').read()\n",
748 | "encoded = base64.b64encode(video)\n",
749 | "HTML(data=''''''.format(encoded.decode('ascii')))"
750 | ],
751 | "execution_count": 0,
752 | "outputs": []
753 | },
754 | {
755 | "metadata": {
756 | "id": "x0iH6F4viDHq",
757 | "colab_type": "text"
758 | },
759 | "cell_type": "markdown",
760 | "source": [
761 | "Extra Tutorial\n",
762 | "---"
763 | ]
764 | },
765 | {
766 | "metadata": {
767 | "id": "0WV3v9fmdM9u",
768 | "colab_type": "text"
769 | },
770 | "cell_type": "markdown",
771 | "source": [
772 | "Copying data from google colab to google drive"
773 | ]
774 | },
775 | {
776 | "metadata": {
777 | "colab_type": "code",
778 | "id": "WlfyY-YBfZ0E",
779 | "colab": {}
780 | },
781 | "cell_type": "code",
782 | "source": [
783 | "%cp -r build/darknet/x64/yolo-obj_1500up_05avgLoss.weights \"/content/drive/My Drive/GColab/\"\n",
784 | "%cp -r Smoking_20%.mp4 \"/content/drive/My Drive/GColab/\""
785 | ],
786 | "execution_count": 0,
787 | "outputs": []
788 | },
789 | {
790 | "metadata": {
791 | "id": "xLvR5EI5dZd-",
792 | "colab_type": "text"
793 | },
794 | "cell_type": "markdown",
795 | "source": [
796 | "Downloading the data from the colab"
797 | ]
798 | },
799 | {
800 | "metadata": {
801 | "id": "IhTDrB9odYk_",
802 | "colab_type": "code",
803 | "colab": {}
804 | },
805 | "cell_type": "code",
806 | "source": [
807 | "from google.colab import files\n",
808 | "files.download('build/darknet/x64/yolo-obj_1500up_05avgLoss.weights')"
809 | ],
810 | "execution_count": 0,
811 | "outputs": []
812 | },
813 | {
814 | "metadata": {
815 | "id": "oFhXKJyidnUr",
816 | "colab_type": "text"
817 | },
818 | "cell_type": "markdown",
819 | "source": [
820 | "Copying data from google drive to google colab"
821 | ]
822 | },
823 | {
824 | "metadata": {
825 | "colab_type": "code",
826 | "id": "AvorXG2D8v9a",
827 | "colab": {}
828 | },
829 | "cell_type": "code",
830 | "source": [
831 | "%cp -r \"/content/drive/My Drive/GColab/yolo-obj_1500up_05avgLoss.weights\" build/darknet/x64/\n",
832 | "%cp -r \"/content/drive/My Drive/GColab/SmokingDEMO.mp4\" ."
833 | ],
834 | "execution_count": 0,
835 | "outputs": []
836 | },
837 | {
838 | "metadata": {
839 | "id": "o3Yp1VgeAvX1",
840 | "colab_type": "text"
841 | },
842 | "cell_type": "markdown",
843 | "source": [
844 | "Youtube Link: \n",
845 | "---\n",
846 | "https://youtu.be/vEnQIptZzyI"
847 | ]
848 | }
849 | ]
850 | }
--------------------------------------------------------------------------------
/Darknet_YOLOv3_Guns_Detection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Darknet_YOLOv3_Guns_Detection.ipynb",
7 | "version": "0.3.2",
8 | "provenance": [],
9 | "collapsed_sections": [],
10 | "include_colab_link": true
11 | },
12 | "kernelspec": {
13 | "display_name": "Python 3",
14 | "language": "python",
15 | "name": "python3"
16 | },
17 | "accelerator": "GPU"
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "metadata": {
32 | "id": "CHhR2wyYizjO",
33 | "colab_type": "text"
34 | },
35 | "cell_type": "markdown",
36 | "source": [
37 | "Guns detection using YOLOv3 darknet\n",
38 | "---"
39 | ]
40 | },
41 | {
42 | "metadata": {
43 | "colab_type": "code",
44 | "id": "zuKrDkml1o8Y",
45 | "colab": {}
46 | },
47 | "cell_type": "code",
48 | "source": [
49 | "!apt update\n",
50 | "!apt upgrade -y\n",
51 | "!uname -m && cat /etc/*release\n",
52 | "!gcc --version\n",
53 | "!uname -r"
54 | ],
55 | "execution_count": 0,
56 | "outputs": []
57 | },
58 | {
59 | "metadata": {
60 | "colab_type": "code",
61 | "id": "CcSxk0nhr-Ct",
62 | "colab": {}
63 | },
64 | "cell_type": "code",
65 | "source": [
66 | "ls"
67 | ],
68 | "execution_count": 0,
69 | "outputs": []
70 | },
71 | {
72 | "metadata": {
73 | "colab_type": "text",
74 | "id": "HVe0hQzc83GY"
75 | },
76 | "cell_type": "markdown",
77 | "source": [
78 | "To access Google Drive Folder and Files"
79 | ]
80 | },
81 | {
82 | "metadata": {
83 | "colab_type": "code",
84 | "id": "8eSazWMO82eu",
85 | "colab": {}
86 | },
87 | "cell_type": "code",
88 | "source": [
89 | "# Load the Drive helper and mount\n",
90 | "from google.colab import drive\n",
91 | "\n",
92 | "# This will prompt for authorization.\n",
93 | "drive.mount('/content/drive')"
94 | ],
95 | "execution_count": 0,
96 | "outputs": []
97 | },
98 | {
99 | "metadata": {
100 | "colab_type": "code",
101 | "id": "b5ywvjkQ9BMn",
102 | "colab": {}
103 | },
104 | "cell_type": "code",
105 | "source": [
106 | "# After executing the cell above, Drive\n",
107 | "# files will be present in \"/content/drive/My Drive\".\n",
108 | "!ls -a \"/content/drive/My Drive/GColab/\""
109 | ],
110 | "execution_count": 0,
111 | "outputs": []
112 | },
113 | {
114 | "metadata": {
115 | "colab_type": "text",
116 | "id": "Gh91u1MqyDLO"
117 | },
118 | "cell_type": "markdown",
119 | "source": [
120 | "**Original Version of Darknet**"
121 | ]
122 | },
123 | {
124 | "metadata": {
125 | "colab_type": "code",
126 | "id": "ojeQnomQue0M",
127 | "colab": {}
128 | },
129 | "cell_type": "code",
130 | "source": [
131 | "%rm -r darknet\n",
132 | "!git clone https://github.com/pjreddie/darknet\n",
133 | "%cd darknet/"
134 | ],
135 | "execution_count": 0,
136 | "outputs": []
137 | },
138 | {
139 | "metadata": {
140 | "colab_type": "text",
141 | "id": "Wd86EGSsx6RA"
142 | },
143 | "cell_type": "markdown",
144 | "source": [
145 | "**Modify Version of Darknet**"
146 | ]
147 | },
148 | {
149 | "metadata": {
150 | "colab_type": "code",
151 | "id": "AxsXDMvXxvSW",
152 | "outputId": "59adc515-a893-4702-89f8-b7fb2205d587",
153 | "colab": {
154 | "base_uri": "https://localhost:8080/",
155 | "height": 153
156 | }
157 | },
158 | "cell_type": "code",
159 | "source": [
160 | "#%rm -r darknet\n",
161 | "!git clone https://github.com/AlexeyAB/darknet/\n",
162 | "%cd darknet/"
163 | ],
164 | "execution_count": 0,
165 | "outputs": [
166 | {
167 | "output_type": "stream",
168 | "text": [
169 | "Cloning into 'darknet'...\n",
170 | "remote: Enumerating objects: 35, done.\u001b[K\n",
171 | "remote: Counting objects: 2% (1/35) \u001b[K\rremote: Counting objects: 5% (2/35) \u001b[K\rremote: Counting objects: 8% (3/35) \u001b[K\rremote: Counting objects: 11% (4/35) \u001b[K\rremote: Counting objects: 14% (5/35) \u001b[K\rremote: Counting objects: 17% (6/35) \u001b[K\rremote: Counting objects: 20% (7/35) \u001b[K\rremote: Counting objects: 22% (8/35) \u001b[K\rremote: Counting objects: 25% (9/35) \u001b[K\rremote: Counting objects: 28% (10/35) \u001b[K\rremote: Counting objects: 31% (11/35) \u001b[K\rremote: Counting objects: 34% (12/35) \u001b[K\rremote: Counting objects: 37% (13/35) \u001b[K\rremote: Counting objects: 40% (14/35) \u001b[K\rremote: Counting objects: 42% (15/35) \u001b[K\rremote: Counting objects: 45% (16/35) \u001b[K\rremote: Counting objects: 48% (17/35) \u001b[K\rremote: Counting objects: 51% (18/35) \u001b[K\rremote: Counting objects: 54% (19/35) \u001b[K\rremote: Counting objects: 57% (20/35) \u001b[K\rremote: Counting objects: 60% (21/35) \u001b[K\rremote: Counting objects: 62% (22/35) \u001b[K\rremote: Counting objects: 65% (23/35) \u001b[K\rremote: Counting objects: 68% (24/35) \u001b[K\rremote: Counting objects: 71% (25/35) \u001b[K\rremote: Counting objects: 74% (26/35) \u001b[K\rremote: Counting objects: 77% (27/35) \u001b[K\rremote: Counting objects: 80% (28/35) \u001b[K\rremote: Counting objects: 82% (29/35) \u001b[K\rremote: Counting objects: 85% (30/35) \u001b[K\rremote: Counting objects: 88% (31/35) \u001b[K\rremote: Counting objects: 91% (32/35) \u001b[K\rremote: Counting objects: 94% (33/35) \u001b[K\rremote: Counting objects: 97% (34/35) \u001b[K\rremote: Counting objects: 100% (35/35) \u001b[K\rremote: Counting objects: 100% (35/35), done.\u001b[K\n",
172 | "remote: Compressing objects: 100% (19/19), done.\u001b[K\n",
173 | "remote: Total 8238 (delta 15), reused 27 (delta 14), pack-reused 8203\u001b[K\n",
174 | "Receiving objects: 100% (8238/8238), 8.54 MiB | 16.57 MiB/s, done.\n",
175 | "Resolving deltas: 100% (5527/5527), done.\n",
176 | "/content/darknet\n"
177 | ],
178 | "name": "stdout"
179 | }
180 | ]
181 | },
182 | {
183 | "metadata": {
184 | "colab_type": "code",
185 | "id": "cIltg5kZtiVm",
186 | "outputId": "2ac0b6ef-2774-4c33-9a3f-7580e1d8ad22",
187 | "colab": {
188 | "base_uri": "https://localhost:8080/",
189 | "height": 306
190 | }
191 | },
192 | "cell_type": "code",
193 | "source": [
194 | "!apt install libopencv-dev python-opencv ffmpeg"
195 | ],
196 | "execution_count": 0,
197 | "outputs": [
198 | {
199 | "output_type": "stream",
200 | "text": [
201 | "Reading package lists... Done\n",
202 | "Building dependency tree \n",
203 | "Reading state information... Done\n",
204 | "ffmpeg is already the newest version (7:3.4.4-0ubuntu0.18.04.1).\n",
205 | "libopencv-dev is already the newest version (3.2.0+dfsg-4ubuntu0.1).\n",
206 | "The following NEW packages will be installed:\n",
207 | " python-opencv\n",
208 | "0 upgraded, 1 newly installed, 0 to remove and 3 not upgraded.\n",
209 | "Need to get 535 kB of archives.\n",
210 | "After this operation, 2,944 kB of additional disk space will be used.\n",
211 | "Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python-opencv amd64 3.2.0+dfsg-4ubuntu0.1 [535 kB]\n",
212 | "Fetched 535 kB in 1s (466 kB/s)\n",
213 | "Selecting previously unselected package python-opencv.\n",
214 | "(Reading database ... 110852 files and directories currently installed.)\n",
215 | "Preparing to unpack .../python-opencv_3.2.0+dfsg-4ubuntu0.1_amd64.deb ...\n",
216 | "Unpacking python-opencv (3.2.0+dfsg-4ubuntu0.1) ...\n",
217 | "Setting up python-opencv (3.2.0+dfsg-4ubuntu0.1) ...\n"
218 | ],
219 | "name": "stdout"
220 | }
221 | ]
222 | },
223 | {
224 | "metadata": {
225 | "colab_type": "text",
226 | "id": "FHSTeIFJvEl8"
227 | },
228 | "cell_type": "markdown",
229 | "source": [
230 | "Changing the variables to include OpenCV and GPU in the Makefile"
231 | ]
232 | },
233 | {
234 | "metadata": {
235 | "colab_type": "code",
236 | "id": "INUiWQphvFF_",
237 | "colab": {}
238 | },
239 | "cell_type": "code",
240 | "source": [
241 | "!sed -i 's/OPENCV=0/OPENCV=1/g' Makefile\n",
242 | "!sed -i 's/GPU=0/GPU=1/g' Makefile\n",
243 | "#!sed -i 's/CUDNN=0/CUDNN=1/g' Makefile\n",
244 | "%pycat Makefile"
245 | ],
246 | "execution_count": 0,
247 | "outputs": []
248 | },
249 | {
250 | "metadata": {
251 | "colab_type": "code",
252 | "id": "kQDhvwpxWRs1",
253 | "colab": {}
254 | },
255 | "cell_type": "code",
256 | "source": [
257 | "!make"
258 | ],
259 | "execution_count": 0,
260 | "outputs": []
261 | },
262 | {
263 | "metadata": {
264 | "colab_type": "text",
265 | "id": "aCLysbFOL2UF"
266 | },
267 | "cell_type": "markdown",
268 | "source": [
269 | "**How to train (to detect your custom objects):**\n",
270 | "---"
271 | ]
272 | },
273 | {
274 | "metadata": {
275 | "colab_type": "text",
276 | "id": "Uk5TTCBWL5Ea"
277 | },
278 | "cell_type": "markdown",
279 | "source": [
280 | "**Training Yolo v3:**\n",
281 | "\n",
282 | "1.Create file yolo-obj.cfg with the same content as in yolov3.cfg (or copy yolov3.cfg to yolo-obj.cfg) and:\n",
283 | "\n",
284 | "* change line batch to batch=64\n",
285 | "* change line subdivisions to subdivisions=8\n",
286 | "* change line classes=80 to your number of objects in each of 3 [yolo]-layers:\n",
287 | " * yolov3.cfg#L610\n",
288 | " * yolov3.cfg#L696\n",
289 | " * yolov3.cfg#L783\n",
290 | "* change [filters=255] to filters=(classes + 5)x3 in the 3 [convolutional] before each [yolo] layer\n",
291 | " * yolov3.cfg#L603\n",
292 | " * yolov3.cfg#L689\n",
293 | " * yolov3.cfg#L776\n",
294 | " \n",
295 | "So if classes=1 then should be filters=18. If classes=2 then write filters=21."
296 | ]
297 | },
298 | {
299 | "metadata": {
300 | "colab_type": "code",
301 | "id": "ePe4Qu8QN0Ii",
302 | "colab": {}
303 | },
304 | "cell_type": "code",
305 | "source": [
306 | "%cp cfg/yolov3.cfg cfg/yolo-obj.cfg\n",
307 | "!sed -i 's/batch=1/batch=64/g' cfg/yolo-obj.cfg\n",
308 | "!sed -i 's/subdivisions=1/subdivisions=32/g' cfg/yolo-obj.cfg\n",
309 | "!sed -i 's/classes=80/classes=1/g' cfg/yolo-obj.cfg\n",
310 | "!sed -i 's/filters=255/filters=18/g' cfg/yolo-obj.cfg\n",
311 | "!sed -i 's/width=416/width=608/g' cfg/yolo-obj.cfg\n",
312 | "!sed -i 's/height=416/height=608/g' cfg/yolo-obj.cfg"
313 | ],
314 | "execution_count": 0,
315 | "outputs": []
316 | },
317 | {
318 | "metadata": {
319 | "colab_type": "code",
320 | "id": "hE5EGwcSQYbO",
321 | "colab": {}
322 | },
323 | "cell_type": "code",
324 | "source": [
325 | "%pycat cfg/yolo-obj.cfg"
326 | ],
327 | "execution_count": 0,
328 | "outputs": []
329 | },
330 | {
331 | "metadata": {
332 | "colab_type": "text",
333 | "id": "0Tum3Y4HRoqx"
334 | },
335 | "cell_type": "markdown",
336 | "source": [
337 | "2.Create file obj.names in the directory `build\\darknet\\x64\\data\\` with objects names - each in new line\n",
338 | "\n"
339 | ]
340 | },
341 | {
342 | "metadata": {
343 | "colab_type": "code",
344 | "id": "L5M9X03jR0Vl",
345 | "colab": {}
346 | },
347 | "cell_type": "code",
348 | "source": [
349 | "all_classes = \"\"\"Gun\n",
350 | "\"\"\"\n",
351 | "\n",
352 | "file = \"\"\"text_file = open(\"build/darknet/x64/data/obj.names\", \"w\");text_file.write(all_classes);text_file.close()\"\"\" \n",
353 | "\n",
354 | "exec(file)\n",
355 | "%pycat build/darknet/x64/data/obj.names"
356 | ],
357 | "execution_count": 0,
358 | "outputs": []
359 | },
360 | {
361 | "metadata": {
362 | "colab_type": "text",
363 | "id": "iBAEunM0Ry7b"
364 | },
365 | "cell_type": "markdown",
366 | "source": [
367 | "3.Create file obj.data in the directory `build\\darknet\\x64\\data\\` containing (where classes = number of objects):"
368 | ]
369 | },
370 | {
371 | "metadata": {
372 | "colab_type": "code",
373 | "id": "Bqbe2_uFUzFU",
374 | "colab": {}
375 | },
376 | "cell_type": "code",
377 | "source": [
378 | "obj_data = \"\"\"classes= 1\n",
379 | "train = build/darknet/x64/data/train.txt\n",
380 | "valid = build/darknet/x64/data/valid.txt\n",
381 | "names = build/darknet/x64/data/obj.names\n",
382 | "backup = build/darknet/x64/backup/\n",
383 | "\"\"\"\n",
384 | "\n",
385 | "file = \"\"\"text_file = open(\"build/darknet/x64/data/obj.data\", \"w\");text_file.write(obj_data);text_file.close()\"\"\" \n",
386 | "\n",
387 | "exec(file)\n",
388 | "%pycat build/darknet/x64/data/obj.data"
389 | ],
390 | "execution_count": 0,
391 | "outputs": []
392 | },
393 | {
394 | "metadata": {
395 | "colab_type": "text",
396 | "id": "FUy55xwhUyVZ"
397 | },
398 | "cell_type": "markdown",
399 | "source": [
400 | "4.Put image-files (.jpg) of your objects in the directory build/darknet/x64/data/obj/\n"
401 | ]
402 | },
403 | {
404 | "metadata": {
405 | "colab_type": "code",
406 | "id": "5TRsi9sS178e",
407 | "colab": {}
408 | },
409 | "cell_type": "code",
410 | "source": [
411 | "#%mkdir build/darknet/x64/data/obj\n",
412 | "%cp -r \"/content/drive/My Drive/GColab_ML_DL/Guns/.\" build/darknet/x64/data/obj/\n",
413 | "#%cp -r \"/content/drive/My Drive/GColab_ML_DL/Smoking/.\" build/darknet/x64/data/obj/"
414 | ],
415 | "execution_count": 0,
416 | "outputs": []
417 | },
418 | {
419 | "metadata": {
420 | "colab_type": "code",
421 | "id": "JAeCqqUD7GkE",
422 | "outputId": "6ae9479e-2cda-483a-ef68-58359e73b407",
423 | "colab": {
424 | "base_uri": "https://localhost:8080/",
425 | "height": 34
426 | }
427 | },
428 | "cell_type": "code",
429 | "source": [
430 | "ls -1 \"/content/drive/My Drive/GColab_ML_DL/Guns/\" | wc -l"
431 | ],
432 | "execution_count": 0,
433 | "outputs": [
434 | {
435 | "output_type": "stream",
436 | "text": [
437 | "1386\n"
438 | ],
439 | "name": "stdout"
440 | }
441 | ]
442 | },
443 | {
444 | "metadata": {
445 | "colab_type": "code",
446 | "id": "_pEomLo6yvVT",
447 | "outputId": "d51b7109-561b-4c05-bda0-28550c0f8aad",
448 | "colab": {
449 | "base_uri": "https://localhost:8080/",
450 | "height": 51
451 | }
452 | },
453 | "cell_type": "code",
454 | "source": [
455 | "%ls -1 build/darknet/x64/data/obj/*.jpg | wc -l\n",
456 | "%ls -1 build/darknet/x64/data/obj/*.txt | wc -l"
457 | ],
458 | "execution_count": 0,
459 | "outputs": [
460 | {
461 | "output_type": "stream",
462 | "text": [
463 | "693\n",
464 | "693\n"
465 | ],
466 | "name": "stdout"
467 | }
468 | ]
469 | },
470 | {
471 | "metadata": {
472 | "colab_type": "text",
473 | "id": "hiusyk1m_VEe"
474 | },
475 | "cell_type": "markdown",
476 | "source": [
477 | "5.You should label each object on images from your dataset. Use this visual GUI-software for marking bounded boxes of objects and generating annotation files for Yolo v2 & v3: \n",
478 | "\n",
479 | "---\n",
480 | "**LabelImg**\n",
481 | " \n",
482 | "LabelImg is a graphical image annotation tool.: \n",
483 | "https://github.com/tzutalin/labelImg\n",
484 | "\n",
485 | "\n",
486 | "---\n",
487 | "\n",
488 | "**Yolo_mark**\n",
489 | "\n",
490 | "Windows & Linux GUI for marking bounded boxes of objects in images for training Yolo v3 and v2\n",
491 | "\n",
492 | "https://github.com/AlexeyAB/Yolo_mark\n",
493 | "\n",
494 | "It will create `.txt`-file for each `.jpg`-image-file - in the same directory and with the same name, but with `.txt`-extension, and put to file: object number and object coordinates on this image, for each object in new line: ` `\n",
495 | "\n",
496 | "Where:\n",
497 | "\n",
498 | "\n",
499 | "* `` - integer object number from 0 to (classes-1)\n",
500 | "* ` ` - float values relative to width and height of image, it can be equal from (0.0 to 1.0]\n",
501 | "* for example: ` = / ` or ` = / `\n",
502 | "* atention: ` ` - are center of rectangle (are not top-left corner)\n",
503 | "\n",
504 | "\n",
505 | "For example for img1.jpg you will be created img1.txt containing:\n",
506 | "\n",
507 | "\n",
508 | "```\n",
509 | "1 0.716797 0.395833 0.216406 0.147222\n",
510 | "0 0.687109 0.379167 0.255469 0.158333\n",
511 | "1 0.420312 0.395833 0.140625 0.166667\n",
512 | "```\n",
513 | "\n"
514 | ]
515 | },
516 | {
517 | "metadata": {
518 | "colab_type": "text",
519 | "id": "P0FXqQ7Gds4A"
520 | },
521 | "cell_type": "markdown",
522 | "source": [
523 | "6.Create file train.txt and valid.txt in directory `build\\darknet\\x64\\data\\` with filenames of your images, each filename in new line, with path relative to darknet, for example containing:\n"
524 | ]
525 | },
526 | {
527 | "metadata": {
528 | "colab_type": "code",
529 | "id": "EGq614FhX6K8",
530 | "colab": {}
531 | },
532 | "cell_type": "code",
533 | "source": [
534 | "import os, fnmatch\n",
535 | "import numpy as np\n",
536 | "\n",
537 | "train_file = open(\"build/darknet/x64/data/train.txt\", \"w\")\n",
538 | "valid_file = open(\"build/darknet/x64/data/valid.txt\", \"w\")\n",
539 | "listOfFiles = os.listdir('build/darknet/x64/data/obj/') \n",
540 | "pattern = \"*.jpg\" \n",
541 | "for f_name in listOfFiles: \n",
542 | " if fnmatch.fnmatch(f_name, pattern):\n",
543 | " if np.random.rand(1) < 0.8:\n",
544 | " train_file.write(\"build/darknet/x64/data/obj/\"+f_name+\"\\n\")\n",
545 | " #print (\"data/obj/\"+f_name)\n",
546 | " else:\n",
547 | " valid_file.write(\"build/darknet/x64/data/obj/\"+f_name+\"\\n\") \n",
548 | " \n",
549 | "train_file.close()\n",
550 | "valid_file.close()"
551 | ],
552 | "execution_count": 0,
553 | "outputs": []
554 | },
555 | {
556 | "metadata": {
557 | "colab_type": "code",
558 | "id": "ml2I-ABUTQ6m",
559 | "outputId": "1185d003-516e-4e02-dd95-0679919138bc",
560 | "colab": {
561 | "base_uri": "https://localhost:8080/",
562 | "height": 51
563 | }
564 | },
565 | "cell_type": "code",
566 | "source": [
567 | "#Count number of files \n",
568 | "!wc -l build/darknet/x64/data/train.txt\n",
569 | "!wc -l build/darknet/x64/data/valid.txt"
570 | ],
571 | "execution_count": 0,
572 | "outputs": [
573 | {
574 | "output_type": "stream",
575 | "text": [
576 | "556 build/darknet/x64/data/train.txt\n",
577 | "137 build/darknet/x64/data/valid.txt\n"
578 | ],
579 | "name": "stdout"
580 | }
581 | ]
582 | },
583 | {
584 | "metadata": {
585 | "colab_type": "code",
586 | "id": "kj-pb3-UqSwd",
587 | "colab": {}
588 | },
589 | "cell_type": "code",
590 | "source": [
591 | "%pycat build/darknet/x64/data/valid.txt"
592 | ],
593 | "execution_count": 0,
594 | "outputs": []
595 | },
596 | {
597 | "metadata": {
598 | "colab_type": "text",
599 | "id": "K8pPboneqydm"
600 | },
601 | "cell_type": "markdown",
602 | "source": [
603 | "7.Download pre-trained weights for the convolutional layers (154 MB): https://pjreddie.com/media/files/darknet53.conv.74 and put to the directory `build\\darknet\\x64`"
604 | ]
605 | },
606 | {
607 | "metadata": {
608 | "colab_type": "code",
609 | "id": "h_H7qCo9qylB",
610 | "outputId": "fd1b90c2-d246-4183-ca2c-1b3bde1d434b",
611 | "colab": {
612 | "base_uri": "https://localhost:8080/",
613 | "height": 204
614 | }
615 | },
616 | "cell_type": "code",
617 | "source": [
618 | "!wget -P build/darknet/x64/ https://pjreddie.com/media/files/darknet53.conv.74\n",
619 | "#%ls build/darknet/x64/"
620 | ],
621 | "execution_count": 0,
622 | "outputs": [
623 | {
624 | "output_type": "stream",
625 | "text": [
626 | "--2019-01-10 04:14:23-- https://pjreddie.com/media/files/darknet53.conv.74\n",
627 | "Resolving pjreddie.com (pjreddie.com)... 128.208.3.39\n",
628 | "Connecting to pjreddie.com (pjreddie.com)|128.208.3.39|:443... connected.\n",
629 | "HTTP request sent, awaiting response... 200 OK\n",
630 | "Length: 162482580 (155M) [application/octet-stream]\n",
631 | "Saving to: ‘build/darknet/x64/darknet53.conv.74’\n",
632 | "\n",
633 | "darknet53.conv.74 100%[===================>] 154.96M 63.7MB/s in 2.4s \n",
634 | "\n",
635 | "2019-01-10 04:14:26 (63.7 MB/s) - ‘build/darknet/x64/darknet53.conv.74’ saved [162482580/162482580]\n",
636 | "\n"
637 | ],
638 | "name": "stdout"
639 | }
640 | ]
641 | },
642 | {
643 | "metadata": {
644 | "colab_type": "text",
645 | "id": "9MTqLH85DfV9"
646 | },
647 | "cell_type": "markdown",
648 | "source": [
649 | "8.training by using the command line:\n",
650 | "\n",
651 | "\n",
652 | "```\n",
653 | "./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/darknet53.conv.74 -dont_show\n",
654 | "```\n",
655 | "\n",
656 | "8.1. For training with mAP (mean average precisions) calculation for each 4 Epochs (set valid=valid.txt or train.txt in obj.data file) and run: \n",
657 | "\n",
658 | "\n",
659 | "\n",
660 | "```\n",
661 | "./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/darknet53.conv.74 -dont_show -map\n",
662 | "```\n",
663 | "\n"
664 | ]
665 | },
666 | {
667 | "metadata": {
668 | "colab_type": "code",
669 | "id": "C5VW-CkMsTUp",
670 | "colab": {}
671 | },
672 | "cell_type": "code",
673 | "source": [
674 | "!./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/darknet53.conv.74 -dont_show"
675 | ],
676 | "execution_count": 0,
677 | "outputs": []
678 | },
679 | {
680 | "metadata": {
681 | "colab_type": "text",
682 | "id": "4f_TRrcCEMtN"
683 | },
684 | "cell_type": "markdown",
685 | "source": [
686 | "9.After training is complete - get result yolo-obj_final.weights from path `build\\darknet\\x64\\backup\\`\n",
687 | "\n",
688 | "\n",
689 | "* After each 100 iterations you can stop and later start training from this point. For example, after 2000 iterations you can stop training, and later just copy yolo-obj_2000.weights from `build\\darknet\\x64\\backup\\` to` build\\darknet\\x64\\ `and start training using: \n",
690 | "\n",
691 | "\n",
692 | "```\n",
693 | "./darknet detector train data/obj.data yolo-obj.cfg yolo-obj_2000.weights\n",
694 | "```\n",
695 | "\n",
696 | "\n",
697 | "\n"
698 | ]
699 | },
700 | {
701 | "metadata": {
702 | "colab_type": "code",
703 | "id": "9GHn2iDhgCum",
704 | "colab": {}
705 | },
706 | "cell_type": "code",
707 | "source": [
708 | "!./darknet detector train build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/yolo-obj_1000.weights -dont_show"
709 | ],
710 | "execution_count": 0,
711 | "outputs": []
712 | },
713 | {
714 | "metadata": {
715 | "colab_type": "text",
716 | "id": "p2eWyFXYG06P"
717 | },
718 | "cell_type": "markdown",
719 | "source": [
720 | "Custom object detection:\n",
721 | "---\n",
722 | "Example of Guns detection:\n",
723 | "\n",
724 | "**Image : **\n",
725 | "\n",
726 | "```\n",
727 | "./darknet detector test build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/guns_1000it.weights -thresh 0.20 data/ak47.jpg\n",
728 | "```\n",
729 | "\n",
730 | "\n",
731 | "**Video : **\n",
732 | "\n",
733 | "```\n",
734 | "./darknet detector demo build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/guns_1000it.weights -thresh 0.20 -dont_show Guns.mp4 -out_filename Guns_output.mp4\n",
735 | "```\n",
736 | "\n"
737 | ]
738 | },
739 | {
740 | "metadata": {
741 | "colab_type": "code",
742 | "id": "Wu3cwRauF2Kv",
743 | "colab": {}
744 | },
745 | "cell_type": "code",
746 | "source": [
747 | "!./darknet detector demo build/darknet/x64/data/obj.data cfg/yolo-obj.cfg build/darknet/x64/smoking_800it_1avgLoss.weights -thresh 0.20 -dont_show Guns.mp4 -out_filename Guns_1class_20%.mp4"
748 | ],
749 | "execution_count": 0,
750 | "outputs": []
751 | },
752 | {
753 | "metadata": {
754 | "colab_type": "code",
755 | "id": "EDFD5OyR82mL",
756 | "colab": {}
757 | },
758 | "cell_type": "code",
759 | "source": [
760 | "ls -lh *.mp4"
761 | ],
762 | "execution_count": 0,
763 | "outputs": []
764 | },
765 | {
766 | "metadata": {
767 | "id": "-Fnf5QO2ftfN",
768 | "colab_type": "text"
769 | },
770 | "cell_type": "markdown",
771 | "source": [
772 | "**Playing videos on google colab**"
773 | ]
774 | },
775 | {
776 | "metadata": {
777 | "colab_type": "code",
778 | "id": "qT9TFs4ikb1L",
779 | "colab": {}
780 | },
781 | "cell_type": "code",
782 | "source": [
783 | "import io\n",
784 | "import base64\n",
785 | "from IPython.display import HTML\n",
786 | "video = io.open('Guns_output.mp4', 'r+b').read()\n",
787 | "encoded = base64.b64encode(video)\n",
788 | "HTML(data=''''''.format(encoded.decode('ascii')))"
789 | ],
790 | "execution_count": 0,
791 | "outputs": []
792 | },
793 | {
794 | "metadata": {
795 | "id": "x0iH6F4viDHq",
796 | "colab_type": "text"
797 | },
798 | "cell_type": "markdown",
799 | "source": [
800 | "Extra Tutorial\n",
801 | "---"
802 | ]
803 | },
804 | {
805 | "metadata": {
806 | "id": "0WV3v9fmdM9u",
807 | "colab_type": "text"
808 | },
809 | "cell_type": "markdown",
810 | "source": [
811 | "Copying data from google colab to google drive"
812 | ]
813 | },
814 | {
815 | "metadata": {
816 | "colab_type": "code",
817 | "id": "WlfyY-YBfZ0E",
818 | "colab": {}
819 | },
820 | "cell_type": "code",
821 | "source": [
822 | "%cp -r build/darknet/x64/yolo-obj_1500up_05avgLoss.weights \"/content/drive/My Drive/GColab/\"\n",
823 | "%cp -r Guns_output.mp4 \"/content/drive/My Drive/GColab/\""
824 | ],
825 | "execution_count": 0,
826 | "outputs": []
827 | },
828 | {
829 | "metadata": {
830 | "id": "xLvR5EI5dZd-",
831 | "colab_type": "text"
832 | },
833 | "cell_type": "markdown",
834 | "source": [
835 | "Downloading the data from the colab"
836 | ]
837 | },
838 | {
839 | "metadata": {
840 | "id": "IhTDrB9odYk_",
841 | "colab_type": "code",
842 | "colab": {}
843 | },
844 | "cell_type": "code",
845 | "source": [
846 | "from google.colab import files\n",
847 | "files.download('build/darknet/x64/yolo-obj_1500up_05avgLoss.weights')"
848 | ],
849 | "execution_count": 0,
850 | "outputs": []
851 | },
852 | {
853 | "metadata": {
854 | "id": "oFhXKJyidnUr",
855 | "colab_type": "text"
856 | },
857 | "cell_type": "markdown",
858 | "source": [
859 | "Copying data from google drive to google colab"
860 | ]
861 | },
862 | {
863 | "metadata": {
864 | "colab_type": "code",
865 | "id": "AvorXG2D8v9a",
866 | "colab": {}
867 | },
868 | "cell_type": "code",
869 | "source": [
870 | "%cp -r \"/content/drive/My Drive/GColab/yolo-obj_1500up_05avgLoss.weights\" build/darknet/x64/\n",
871 | "#%cp -r \"/content/drive/My Drive/GColab/SmokingDEMO.mp4\" .\n",
872 | "%cp -r \"/content/drive/My Drive/GColab/Guns.mp4\" ."
873 | ],
874 | "execution_count": 0,
875 | "outputs": []
876 | },
877 | {
878 | "metadata": {
879 | "id": "o3Yp1VgeAvX1",
880 | "colab_type": "text"
881 | },
882 | "cell_type": "markdown",
883 | "source": [
884 | "Youtube Link: \n",
885 | "---\n",
886 | "https://youtu.be/Uzty5hRWSQs"
887 | ]
888 | }
889 | ]
890 | }
891 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep-Learning-with-GoogleColab
2 | Develop Deep Learning Applications with Google Colaboratory - on the free Tesla K80/Tesla T4/Tesla P100 GPU - using Keras, Tensorflow and PyTorch.
3 |
4 | **Darknet YOLOv4 - Google Colab (Firearms Detection)**
5 | ---
6 | Firearms Detection [
](https://colab.research.google.com/github/hardik0/Deep-Learning-with-GoogleColab/blob/master/Darknet_YOLOv4_Google_Colab_(Firearm_Detection).ipynb)
7 |
8 | Darknet YOLOv4: Real-Time Object Detection
9 |
10 | You only look once (YOLOv4) is a state-of-the-art, real-time Firearms detection system for firearms, pistol, rifle detection.
11 |
12 | 
13 |
14 |
15 | Youtube Link:
16 | https://youtu.be/XgvDXT0fme4
17 |
18 |
19 | **Cigarette/Smoking Detection using YOLOv3 Darknet**
20 | ---
21 | Cigarette/Smoking Detection [
](https://colab.research.google.com/github/hardik0/Deep-Learning-with-GoogleColab/blob/master/Darknet_YOLOv3_Cigarette_Smoking_Detection.ipynb)
22 |
23 | YOLO: Real-Time Object Detection
24 |
25 | You only look once (YOLO) is a state-of-the-art, real-time object detection system for cigarette & smoking detection.
26 |
27 | 
28 |
29 |
30 | Youtube Link:
31 | https://youtu.be/vEnQIptZzyI
32 |
33 |
34 | **Guns Detection using YOLOv3 Darknet**
35 | ---
36 | Guns Detection [
](https://colab.research.google.com/github/hardik0/Deep-Learning-with-GoogleColab/blob/master/Darknet_YOLOv3_Guns_Detection.ipynb)
37 |
38 | YOLO: Real-Time Object Detection
39 |
40 | You only look once (YOLO) is a state-of-the-art, real-time object detection system for firearms, pistol, rifle detection.
41 |
42 | 
43 |
44 |
45 | Youtube Link:
46 | https://youtu.be/Uzty5hRWSQs
47 |
48 | Note:
49 | + Why some guns were not detected?
50 | - I had few images of a pistol, when I trained the Yolo model.
51 |
52 |
53 | **Pyannote-Video**
54 | ---
55 | Pyannote-Video [
](https://colab.research.google.com/github/hardik0/Deep-Learning-with-GoogleColab/blob/master/pyannote_video%20-%20Face%20Detection%2C%20Tracking%20%26%20clustering%20in%20Videos.ipynb)
56 |
57 | A Toolkit for Face Detection, Tracking, and Clustering in Videos
58 |
59 | 
60 |
61 |
62 | Youtube Link:
63 | https://youtu.be/8hepsfDWzFA
64 |
65 |
66 | **DeOldify**
67 | ---
68 | Image [
](https://colab.research.google.com/github/hardik0/Deep-Learning-with-GoogleColab/blob/master/ImageColorizerColab.ipynb) |
69 | Video [
](https://colab.research.google.com/github/hardik0/Deep-Learning-with-GoogleColab/blob/master/VideoColorizerColab.ipynb)
70 |
71 | To Colorizing and Restoring your old Black and White Photos/Videos
72 |
73 | 
74 |
75 |
76 | Youtube Channel Link:
77 | https://www.youtube.com/channel/UC0L-Z0hP3_39dSNNNgpdiOQ
78 |
79 | **Acknowledgements**
80 | ---
81 |
82 | + [@pjreddie](https://www.github.com/pjreddie) for Original Darknet(YOLOv3) Version.
83 | + [@AlexeyAB](https://www.github.com/AlexeyAB) for Modify Darknet(YOLOv3) Version.
84 | + [@pyannote](https://www.github.com/pyannote) for pyannote-video.
85 | + [@jantic](https://github.com/jantic) for DeOldify.
86 |
--------------------------------------------------------------------------------
/VideoColorizerColab.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "VideoColorizerColab.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": [],
9 | "toc_visible": true,
10 | "include_colab_link": true
11 | },
12 | "kernelspec": {
13 | "name": "python3",
14 | "display_name": "Python 3"
15 | },
16 | "language_info": {
17 | "codemirror_mode": {
18 | "name": "ipython",
19 | "version": 3
20 | },
21 | "file_extension": ".py",
22 | "mimetype": "text/x-python",
23 | "name": "python",
24 | "nbconvert_exporter": "python",
25 | "pygments_lexer": "ipython3",
26 | "version": "3.7.0"
27 | },
28 | "accelerator": "GPU"
29 | },
30 | "cells": [
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {
34 | "id": "view-in-github",
35 | "colab_type": "text"
36 | },
37 | "source": [
38 | "
"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {
44 | "colab_type": "text",
45 | "id": "663IVxfrpIAb"
46 | },
47 | "source": [
48 | "#◢ DeOldify - Colorize your own Videos/Images!\n",
49 | "\n",
50 | "\n",
51 | "_FYI: This notebook is intended as a tool to colorize GIFs and short Videos and Images, if you are trying to convert longer video you may hit the limit on processing space. Running the Jupyter notebook on your own machine is recommended (and faster) for larger video sizes._\n",
52 | "\n",
53 | "####**Credits:**\n",
54 | "\n",
55 | "Big special thanks to:\n",
56 | "\n",
57 | "**Robert Bell** for all his work on the video Colab notebook, and paving the way to video in DeOldify!\n",
58 | "\n",
59 | "**Matt Robinson** and **María Benavente** for pioneering the DeOldify image colab notebook.\n",
60 | "\n",
61 | "**Dana Kelley** for doing things, breaking stuff & having an opinion on everything."
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "metadata": {
67 | "id": "rl_kLYcV_wmJ",
68 | "colab_type": "code",
69 | "colab": {}
70 | },
71 | "source": [
72 | "from google.colab import drive\n",
73 | "drive.mount('/content/drive')"
74 | ],
75 | "execution_count": 0,
76 | "outputs": []
77 | },
78 | {
79 | "cell_type": "markdown",
80 | "metadata": {
81 | "colab_type": "text",
82 | "id": "ZjPqTBNoohK9"
83 | },
84 | "source": [
85 | "\n",
86 | "\n",
87 | "---\n",
88 | "\n",
89 | "\n",
90 | "#◢ Verify Correct Runtime Settings\n",
91 | "\n",
92 | "** IMPORTANT **\n",
93 | "\n",
94 | "In the \"Runtime\" menu for the notebook window, select \"Change runtime type.\" Ensure that the following are selected:\n",
95 | "* Runtime Type = Python 3\n",
96 | "* Hardware Accelerator = GPU \n",
97 | "\n",
98 | "Your instance must have following gpu to process and render the video\n",
99 | "* Tesla T4 \n",
100 | "* Tesla P100\n"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "metadata": {
106 | "id": "NxPtdDjO9mG7",
107 | "colab_type": "code",
108 | "outputId": "4a5badec-b8f7-4476-e5bd-250edb0815af",
109 | "colab": {
110 | "base_uri": "https://localhost:8080/",
111 | "height": 306
112 | }
113 | },
114 | "source": [
115 | "!nvidia-smi"
116 | ],
117 | "execution_count": 0,
118 | "outputs": [
119 | {
120 | "output_type": "stream",
121 | "text": [
122 | "Tue Nov 19 03:15:03 2019 \n",
123 | "+-----------------------------------------------------------------------------+\n",
124 | "| NVIDIA-SMI 430.50 Driver Version: 418.67 CUDA Version: 10.1 |\n",
125 | "|-------------------------------+----------------------+----------------------+\n",
126 | "| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
127 | "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
128 | "|===============================+======================+======================|\n",
129 | "| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n",
130 | "| N/A 33C P0 25W / 250W | 0MiB / 16280MiB | 0% Default |\n",
131 | "+-------------------------------+----------------------+----------------------+\n",
132 | " \n",
133 | "+-----------------------------------------------------------------------------+\n",
134 | "| Processes: GPU Memory |\n",
135 | "| GPU PID Type Process name Usage |\n",
136 | "|=============================================================================|\n",
137 | "| No running processes found |\n",
138 | "+-----------------------------------------------------------------------------+\n"
139 | ],
140 | "name": "stdout"
141 | }
142 | ]
143 | },
144 | {
145 | "cell_type": "markdown",
146 | "metadata": {
147 | "id": "1xFVQ2dcBavg",
148 | "colab_type": "text"
149 | },
150 | "source": [
151 | "**Check GPU Type**"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "metadata": {
157 | "id": "gJcLSMag97Yz",
158 | "colab_type": "code",
159 | "outputId": "203bf4df-9dc9-4a08-a6bf-84586d0e50a6",
160 | "colab": {
161 | "base_uri": "https://localhost:8080/",
162 | "height": 51
163 | }
164 | },
165 | "source": [
166 | "import pynvml\n",
167 | "\n",
168 | "\n",
169 | "pynvml.nvmlInit()\n",
170 | "handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n",
171 | "device_name = pynvml.nvmlDeviceGetName(handle)\n",
172 | "print(device_name)\n",
173 | "\n",
174 | "if device_name != b'Tesla T4' and device_name != b'Tesla P100-PCIE-16GB':\n",
175 | " raise Exception(\"\"\"\n",
176 | " Unfortunately this instance does not have a Tesla T4 /Tesla P100 GPU.\n",
177 | " \n",
178 | " Please make sure you've configured Colab to request a GPU instance type.\n",
179 | " \n",
180 | " Sometimes Colab allocates a Tesla K80 instead of a T4. Resetting the instance.\n",
181 | "\n",
182 | " If you get a K80 GPU, try Runtime -> Reset all runtimes... \n",
183 | " \"\"\")\n",
184 | "else:\n",
185 | " print('Woo! You got the right kind of GPU!')"
186 | ],
187 | "execution_count": 0,
188 | "outputs": [
189 | {
190 | "output_type": "stream",
191 | "text": [
192 | "b'Tesla P100-PCIE-16GB'\n",
193 | "Woo! You got the right kind of GPU!\n"
194 | ],
195 | "name": "stdout"
196 | }
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "metadata": {
202 | "colab_type": "code",
203 | "id": "00_GcC_trpdE",
204 | "colab": {}
205 | },
206 | "source": [
207 | "from os import path\n",
208 | "import torch"
209 | ],
210 | "execution_count": 0,
211 | "outputs": []
212 | },
213 | {
214 | "cell_type": "markdown",
215 | "metadata": {
216 | "colab_type": "text",
217 | "id": "gaEJBGDlptEo"
218 | },
219 | "source": [
220 | "#◢ Git clone and install DeOldify"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "metadata": {
226 | "colab_type": "code",
227 | "id": "-T-svuHytJ-8",
228 | "colab": {}
229 | },
230 | "source": [
231 | "!git clone https://github.com/jantic/DeOldify.git DeOldify"
232 | ],
233 | "execution_count": 0,
234 | "outputs": []
235 | },
236 | {
237 | "cell_type": "code",
238 | "metadata": {
239 | "id": "5GMmgoEzJu8H",
240 | "colab_type": "code",
241 | "outputId": "4fafd594-f770-40fc-f6be-6ffb1d041d46",
242 | "colab": {
243 | "base_uri": "https://localhost:8080/",
244 | "height": 34
245 | }
246 | },
247 | "source": [
248 | "cd DeOldify"
249 | ],
250 | "execution_count": 0,
251 | "outputs": [
252 | {
253 | "output_type": "stream",
254 | "text": [
255 | "/content/DeOldify\n"
256 | ],
257 | "name": "stdout"
258 | }
259 | ]
260 | },
261 | {
262 | "cell_type": "markdown",
263 | "metadata": {
264 | "colab_type": "text",
265 | "id": "BDFjbNxaadNJ"
266 | },
267 | "source": [
268 | "#◢ Setup"
269 | ]
270 | },
271 | {
272 | "cell_type": "code",
273 | "metadata": {
274 | "colab_type": "code",
275 | "id": "Lsx7xCXNSVt6",
276 | "colab": {}
277 | },
278 | "source": [
279 | "!pip install -r requirements.txt"
280 | ],
281 | "execution_count": 0,
282 | "outputs": []
283 | },
284 | {
285 | "cell_type": "code",
286 | "metadata": {
287 | "colab_type": "code",
288 | "id": "MsJa69CMwj3l",
289 | "colab": {}
290 | },
291 | "source": [
292 | "import fastai\n",
293 | "from deoldify.visualize import *\n",
294 | "from pathlib import Path\n",
295 | "torch.backends.cudnn.benchmark=True"
296 | ],
297 | "execution_count": 0,
298 | "outputs": []
299 | },
300 | {
301 | "cell_type": "code",
302 | "metadata": {
303 | "id": "Wo0FL51WJCS_",
304 | "colab_type": "code",
305 | "colab": {}
306 | },
307 | "source": [
308 | "!mkdir 'models'\n",
309 | "#Download Pretrained Weights for video\n",
310 | "!wget https://www.dropbox.com/s/336vn9y4qwyg9yz/ColorizeVideo_gen.pth?dl=0 -O ./models/ColorizeVideo_gen.pth\n",
311 | "#Download Pretrained Weights for image\n",
312 | "!wget https://www.dropbox.com/s/zkehq1uwahhbc2o/ColorizeArtistic_gen.pth?dl=0 -O ./models/ColorizeArtistic_gen.pth"
313 | ],
314 | "execution_count": 0,
315 | "outputs": []
316 | },
317 | {
318 | "cell_type": "code",
319 | "metadata": {
320 | "colab_type": "code",
321 | "id": "tzHVnegp21hC",
322 | "colab": {}
323 | },
324 | "source": [
325 | "colorizer = get_video_colorizer()\n",
326 | "colorizer_img = get_image_colorizer(artistic=True)"
327 | ],
328 | "execution_count": 0,
329 | "outputs": []
330 | },
331 | {
332 | "cell_type": "markdown",
333 | "metadata": {
334 | "id": "ZnkfekAxJCTS",
335 | "colab_type": "text"
336 | },
337 | "source": [
338 | "#◢ Instructions"
339 | ]
340 | },
341 | {
342 | "cell_type": "markdown",
343 | "metadata": {
344 | "id": "EZ8oJw-bJCTV",
345 | "colab_type": "text"
346 | },
347 | "source": [
348 | "### source_url (video)\n",
349 | "Type in a url hosting a video from YouTube, Imgur, Twitter, Reddit, Vimeo, etc. Many sources work! GIFs also work. Full list here: https://ytdl-org.github.io/youtube-dl/supportedsites.html NOTE: If you want to use your own video, upload it first to a site like YouTube. \n",
350 | "\n",
351 | "### source_url (image)\n",
352 | "Type in a url to a direct link of an image. Usually that means they'll end in .png, .jpg, etc. NOTE: If you want to use your own image, upload it first to a site like Imgur. \n",
353 | "\n",
354 | "### render_factor\n",
355 | "The default value of 21(for Vidoe) and 35(for image) has been carefully chosen and should work -ok- for most scenarios (but probably won't be the -best-). This determines resolution at which the color portion of the video is rendered. Lower resolution will render faster, and colors also tend to look more vibrant. Older and lower quality film in particular will generally benefit by lowering the render factor. Higher render factors are often better for higher quality videos and inconsistencies (flashy render) will generally be reduced, but the colors may get slightly washed out. \n",
356 | "\n",
357 | "### How to Download a Copy\n",
358 | "Simply right click on the displayed video/image and click \"Save video as...\"!\n",
359 | "\n",
360 | "## Pro Tips\n",
361 | "1. If a video takes a long time to render and you're wondering how well the frames will actually be colorized, you can preview how well the frames will be rendered at each render_factor by using the code at the bottom. Just stop the video rendering by hitting the stop button on the cell, then run that bottom cell under \"See how well render_factor values perform on a frame here\". It's not perfect and you may still need to experiment a bit especially when it comes to figuring out how to reduce frame inconsistency. But it'll go a long way in narrowing down what actually works.\n",
362 | "2. If videos are taking way too much time for your liking, running the Jupyter notebook VideoColorizer.ipynb on your own machine (with DeOldify installed) will generally be much faster (as long as you have the hardware for it). \n",
363 | "3.You can evaluate how well the image is rendered at each render_factor by using the code at the bottom (that cell under \"See how well render_factor values perform on a frame here\").\n",
364 | "\n",
365 | "## Troubleshooting\n",
366 | "The video player may wind up not showing up, in which case- make sure to wait for the Jupyter cell to complete processing first (the play button will stop spinning). Then follow these alternative download instructions\n",
367 | "\n",
368 | "1. In the menu to the left, click Files\n",
369 | "2. If you don't see the 'DeOldify' folder, click \"Refresh\"\n",
370 | "3. By default, rendered video will be in /DeOldify/video/result/\n",
371 | "\n",
372 | "If a video you downloaded doesn't play, it's probably because the cell didn't complete processing and the video is in a half-finished state.\n",
373 | "\n",
374 | "If you get a 'CUDA out of memory' error, you probably have the render_factor too high. The max is 44 on 11GB video cards."
375 | ]
376 | },
377 | {
378 | "cell_type": "markdown",
379 | "metadata": {
380 | "id": "r2EImy_7Eo4d",
381 | "colab_type": "text"
382 | },
383 | "source": [
384 | "#◢ Download Youtube Videos\n",
385 | "make directory and cd into it"
386 | ]
387 | },
388 | {
389 | "cell_type": "code",
390 | "metadata": {
391 | "id": "u-GKzdebO55s",
392 | "colab_type": "code",
393 | "outputId": "17ecce90-8274-428c-80a4-d48f3d2c145e",
394 | "colab": {
395 | "base_uri": "https://localhost:8080/",
396 | "height": 51
397 | }
398 | },
399 | "source": [
400 | "%cd ..\n",
401 | "%mkdir youtube_videos\n",
402 | "%cd youtube_videos"
403 | ],
404 | "execution_count": 0,
405 | "outputs": [
406 | {
407 | "output_type": "stream",
408 | "text": [
409 | "/content\n",
410 | "/content/youtube_videos\n"
411 | ],
412 | "name": "stdout"
413 | }
414 | ]
415 | },
416 | {
417 | "cell_type": "markdown",
418 | "metadata": {
419 | "id": "W8zVkAuYRxE3",
420 | "colab_type": "text"
421 | },
422 | "source": [
423 | "Download all playlists of YouTube channel/user keeping each playlist in separate directory & best mp4 format available or any other best if no mp4 available"
424 | ]
425 | },
426 | {
427 | "cell_type": "code",
428 | "metadata": {
429 | "id": "lqBNKUf1OzGW",
430 | "colab_type": "code",
431 | "colab": {}
432 | },
433 | "source": [
434 | "!youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' -o '%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' --restrict-filenames -v https://www.youtube.com/user/Tommydan333"
435 | ],
436 | "execution_count": 0,
437 | "outputs": []
438 | },
439 | {
440 | "cell_type": "markdown",
441 | "metadata": {
442 | "id": "f5oYSS5IQTjK",
443 | "colab_type": "text"
444 | },
445 | "source": [
446 | " Download only video description to a .description file (optional)"
447 | ]
448 | },
449 | {
450 | "cell_type": "code",
451 | "metadata": {
452 | "id": "t00Iez7YNYIY",
453 | "colab_type": "code",
454 | "colab": {}
455 | },
456 | "source": [
457 | "!youtube-dl --skip-download --youtube-skip-dash-manifest --write-description -o '%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' --restrict-filenames -v https://www.youtube.com/user/Tommydan333"
458 | ],
459 | "execution_count": 0,
460 | "outputs": []
461 | },
462 | {
463 | "cell_type": "code",
464 | "metadata": {
465 | "id": "UsZ_gW7mN_Ut",
466 | "colab_type": "code",
467 | "colab": {}
468 | },
469 | "source": [
470 | "!ls Tommydan333/Uploads_from_Tommydan333/"
471 | ],
472 | "execution_count": 0,
473 | "outputs": []
474 | },
475 | {
476 | "cell_type": "markdown",
477 | "metadata": {
478 | "id": "i2kHonBeWQdu",
479 | "colab_type": "text"
480 | },
481 | "source": [
482 | "Zip downloaded .discription file"
483 | ]
484 | },
485 | {
486 | "cell_type": "code",
487 | "metadata": {
488 | "id": "Ro-MIwP4VPGI",
489 | "colab_type": "code",
490 | "colab": {}
491 | },
492 | "source": [
493 | "!zip -r all_video_desc.zip Tommydan333/Uploads_from_Tommydan333/"
494 | ],
495 | "execution_count": 0,
496 | "outputs": []
497 | },
498 | {
499 | "cell_type": "code",
500 | "metadata": {
501 | "id": "TefiiJvwbBIa",
502 | "colab_type": "code",
503 | "colab": {}
504 | },
505 | "source": [
506 | "%ls '../youtube_videos/Tommydan333/'"
507 | ],
508 | "execution_count": 0,
509 | "outputs": []
510 | },
511 | {
512 | "cell_type": "markdown",
513 | "metadata": {
514 | "id": "f-riW8H3F44-",
515 | "colab_type": "text"
516 | },
517 | "source": [
518 | "Backup Videos to Google Drive (optional)"
519 | ]
520 | },
521 | {
522 | "cell_type": "code",
523 | "metadata": {
524 | "id": "VyAVdmG4F5G9",
525 | "colab_type": "code",
526 | "outputId": "a9558cad-8add-468c-b014-9ae8bb2b419b",
527 | "colab": {
528 | "base_uri": "https://localhost:8080/",
529 | "height": 34
530 | }
531 | },
532 | "source": [
533 | "%cp -r '/Tommydan333/.' '../drive/My Drive/Colab Notebooks/Old_BackupVideos/' "
534 | ],
535 | "execution_count": 0,
536 | "outputs": [
537 | {
538 | "output_type": "execute_result",
539 | "data": {
540 | "text/plain": [
541 | "'/content/DeOldify'"
542 | ]
543 | },
544 | "metadata": {
545 | "tags": []
546 | },
547 | "execution_count": 46
548 | }
549 | ]
550 | },
551 | {
552 | "cell_type": "markdown",
553 | "metadata": {
554 | "id": "kUsP_hBRHQWv",
555 | "colab_type": "text"
556 | },
557 | "source": [
558 | "Copy Videos from Google Drive - You can use the drag and drop method to copy data using files manager. (to enable **View > Table of contents**)"
559 | ]
560 | },
561 | {
562 | "cell_type": "code",
563 | "metadata": {
564 | "id": "kU6pMoIQrbxp",
565 | "colab_type": "code",
566 | "colab": {}
567 | },
568 | "source": [
569 | "%mkdir video\n",
570 | "%mkdir video/source\n",
571 | "%cp -r '../drive/My Drive/Colab Notebooks/Old_BackupVideos/.' video/source/\n",
572 | "#%ls 'video/source/'"
573 | ],
574 | "execution_count": 0,
575 | "outputs": []
576 | },
577 | {
578 | "cell_type": "markdown",
579 | "metadata": {
580 | "colab_type": "text",
581 | "id": "sUQrbSYipiJn"
582 | },
583 | "source": [
584 | "#◢ Colorize!!! - Image/Photo"
585 | ]
586 | },
587 | {
588 | "cell_type": "markdown",
589 | "metadata": {
590 | "id": "mYppbaJ9JjxU",
591 | "colab_type": "text"
592 | },
593 | "source": [
594 | "### Colorize from URL"
595 | ]
596 | },
597 | {
598 | "cell_type": "code",
599 | "metadata": {
600 | "id": "gKI8mG2SRsqI",
601 | "colab_type": "code",
602 | "colab": {}
603 | },
604 | "source": [
605 | "source_url = '' #@param {type:\"string\"}\n",
606 | "render_factor = 34 #@param {type: \"slider\", min: 7, max: 45}\n",
607 | "\n",
608 | "if source_url is not None and source_url !='':\n",
609 | " image_path = colorizer_img.plot_transformed_image_from_url(url=source_url, render_factor=render_factor, compare=True)\n",
610 | " show_image_in_notebook(image_path)\n",
611 | "else:\n",
612 | " print('Provide an image url and try again.')"
613 | ],
614 | "execution_count": 0,
615 | "outputs": []
616 | },
617 | {
618 | "cell_type": "markdown",
619 | "metadata": {
620 | "id": "olQwWSaiKLar",
621 | "colab_type": "text"
622 | },
623 | "source": [
624 | "## See how well render_factor values perform on the image here"
625 | ]
626 | },
627 | {
628 | "cell_type": "code",
629 | "metadata": {
630 | "id": "JtlTayn1J6F2",
631 | "colab_type": "code",
632 | "colab": {}
633 | },
634 | "source": [
635 | "for i in range(10,46,2):\n",
636 | " colorizer_img.plot_transformed_image('test_images/image.png', render_factor=i, display_render_factor=True, figsize=(7,7))"
637 | ],
638 | "execution_count": 0,
639 | "outputs": []
640 | },
641 | {
642 | "cell_type": "markdown",
643 | "metadata": {
644 | "id": "C5aoAXSTRyWd",
645 | "colab_type": "text"
646 | },
647 | "source": [
648 | "#◢ Colorize!!! - Video "
649 | ]
650 | },
651 | {
652 | "cell_type": "markdown",
653 | "metadata": {
654 | "id": "5fVnl-YosCad",
655 | "colab_type": "text"
656 | },
657 | "source": [
658 | "### Colorize from URL"
659 | ]
660 | },
661 | {
662 | "cell_type": "code",
663 | "metadata": {
664 | "id": "9yopMdDYJCTd",
665 | "colab_type": "code",
666 | "colab": {}
667 | },
668 | "source": [
669 | "source_url = '' #@param {type:\"string\"}\n",
670 | "render_factor = 21 #@param {type: \"slider\", min: 5, max: 44}\n",
671 | "\n",
672 | "if source_url is not None and source_url !='':\n",
673 | " video_path = colorizer.colorize_from_url(source_url, 'video.mp4', render_factor)\n",
674 | " #show_video_in_notebook(video_path)\n",
675 | "else:\n",
676 | " print('Provide a video url and try again.')"
677 | ],
678 | "execution_count": 0,
679 | "outputs": []
680 | },
681 | {
682 | "cell_type": "markdown",
683 | "metadata": {
684 | "id": "8xhlKlh4sYGF",
685 | "colab_type": "text"
686 | },
687 | "source": [
688 | "### Colorize from File"
689 | ]
690 | },
691 | {
692 | "cell_type": "code",
693 | "metadata": {
694 | "id": "EuwH_hGYd0MZ",
695 | "colab_type": "code",
696 | "colab": {}
697 | },
698 | "source": [
699 | "class color:\n",
700 | " BLUE = '\\033[94m'\n",
701 | " GREEN = '\\033[92m'\n",
702 | " RED = '\\033[91m'\n",
703 | " BOLD = '\\033[1m' \n",
704 | " UNDERLINE = '\\033[4m'\n",
705 | " END = '\\033[0m' \n",
706 | " #The above code is just for fun only!\n",
707 | "\n",
708 | "import os\n",
709 | "import shutil\n",
710 | "from os import listdir\n",
711 | "from os.path import isfile, join\n",
712 | "\n",
713 | "render_factor = 21\n",
714 | "\n",
715 | "#This is the default directory, first you have to copy the video here\n",
716 | "old_video_source = 'video/source/'\n",
717 | "\n",
718 | " \n",
719 | "fileName = []\n",
720 | "i = [] \n",
721 | "fileNames = [f for f in listdir(old_video_source) if isfile(join(old_video_source, f))]\n",
722 | "\n",
723 | "for fileName in fileNames:\n",
724 | " \n",
725 | " try:\n",
726 | " \n",
727 | " #Video proccessing & rendering\n",
728 | " print(color.BOLD + str(fileName) + color.END + color.BLUE + ' ready for proccessig.' + color.END)\n",
729 | " video_path = colorizer.colorize_from_file_name(str(fileName), render_factor) \n",
730 | " print(color.GREEN + 'Video rendering done, Now ' + color.END + color.BOLD + fileName + color.END + ' file ready for copy.')\n",
731 | " \n",
732 | " #Copying file \n",
733 | " build_video_dir = 'video/result/'\n",
734 | " new_build_video_path = build_video_dir + str(fileName)\n",
735 | " target_dir = '../drive/My Drive/Colab Notebooks/Old_ColorizeVideos'\n",
736 | "\n",
737 | " assert not os.path.isabs(new_build_video_path)\n",
738 | " target = os.path.join(target_dir, os.path.dirname(new_build_video_path))\n",
739 | "\n",
740 | " # Create the folders if not already exists\n",
741 | " #os.makedirs(target_dir)\n",
742 | "\n",
743 | " # adding exception handling\n",
744 | " try:\n",
745 | " shutil.copy(new_build_video_path, target_dir)\n",
746 | " print(color.BOLD + fileName + color.RED + \" Successfully Copied to \" + color.END + target_dir + \"\\n\")\n",
747 | " \n",
748 | " except IOError as e: \n",
749 | " print(\"Unable to copy file. %s\" % e)\n",
750 | " \n",
751 | " except:\n",
752 | " print(\"Unexpected error:\", sys.exc_info())\n",
753 | " "
754 | ],
755 | "execution_count": 0,
756 | "outputs": []
757 | },
758 | {
759 | "cell_type": "markdown",
760 | "metadata": {
761 | "id": "ZQGv4GC8JCTi",
762 | "colab_type": "text"
763 | },
764 | "source": [
765 | "### See how well render_factor values perform on a frame here"
766 | ]
767 | },
768 | {
769 | "cell_type": "code",
770 | "metadata": {
771 | "id": "YCm5n1noJCTj",
772 | "colab_type": "code",
773 | "colab": {}
774 | },
775 | "source": [
776 | "for i in range(10,45,2):\n",
777 | " colorizer.vis.plot_transformed_image('video/bwframes/video/00001.jpg', render_factor=i, display_render_factor=True, figsize=(5,5))"
778 | ],
779 | "execution_count": 0,
780 | "outputs": []
781 | },
782 | {
783 | "cell_type": "markdown",
784 | "metadata": {
785 | "colab_type": "text",
786 | "id": "X7Ycv_Y9xAHp"
787 | },
788 | "source": [
789 | "---\n",
790 | "#⚙ Recommended video and gif sources \n",
791 | "* [/r/Nickelodeons/](https://www.reddit.com/r/Nickelodeons/)\n",
792 | "* [r/silentmoviegifs](https://www.reddit.com/r/silentmoviegifs/)\n",
793 | "* https://twitter.com/silentmoviegifs \n",
794 | "\n",
795 | "---\n",
796 | "#⚙ Recommended image sources \n",
797 | "* [/r/TheWayWeWere](https://www.reddit.com/r/TheWayWeWere/)"
798 | ]
799 | }
800 | ]
801 | }
--------------------------------------------------------------------------------
/img/BRICS.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hardik0/Deep-Learning-with-GoogleColab/5a2c5b1ee88f71e9f40c44395b75359db6f80c65/img/BRICS.gif
--------------------------------------------------------------------------------
/img/Guns.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hardik0/Deep-Learning-with-GoogleColab/5a2c5b1ee88f71e9f40c44395b75359db6f80c65/img/Guns.gif
--------------------------------------------------------------------------------
/img/Smoking.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hardik0/Deep-Learning-with-GoogleColab/5a2c5b1ee88f71e9f40c44395b75359db6f80c65/img/Smoking.gif
--------------------------------------------------------------------------------
/img/colorize_photo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hardik0/Deep-Learning-with-GoogleColab/5a2c5b1ee88f71e9f40c44395b75359db6f80c65/img/colorize_photo.jpg
--------------------------------------------------------------------------------
/img/filename.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/img/handgun_yolov4.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hardik0/Deep-Learning-with-GoogleColab/5a2c5b1ee88f71e9f40c44395b75359db6f80c65/img/handgun_yolov4.gif
--------------------------------------------------------------------------------