├── tmp_task.py ├── stream ├── __init__.py ├── __pycache__ │ ├── urls.cpython-35.pyc │ ├── urls.cpython-36.pyc │ ├── urls.cpython-37.pyc │ ├── wsgi.cpython-35.pyc │ ├── wsgi.cpython-36.pyc │ ├── wsgi.cpython-37.pyc │ ├── __init__.cpython-35.pyc │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── settings.cpython-35.pyc │ ├── settings.cpython-36.pyc │ └── settings.cpython-37.pyc ├── wsgi.py ├── urls.py └── settings.py ├── yolov3_weight └── put_yolo_weight_here.txt ├── webcam ├── models.py ├── tests.py ├── admin.py ├── apps.py ├── templates │ ├── images │ │ ├── japan.jpg │ │ ├── ocst.jpg │ │ ├── team │ │ │ ├── 1.jpg │ │ │ ├── 2.jpg │ │ │ └── 3.jpg │ │ ├── fujifilm.jpg │ │ ├── puppydog.jpg │ │ ├── wooden-desk.jpg │ │ ├── gutman-island.jpg │ │ ├── testimonial01.jpg │ │ ├── testimonial02.jpg │ │ ├── testimonial03.jpg │ │ └── testimonial04.jpg │ ├── fonts │ │ ├── FontAwesome.otf │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.ttf │ │ └── fontawesome-webfont.woff │ ├── camera2.html │ ├── index.html │ ├── camera1.html │ └── css │ │ ├── templatemo_style.css │ │ └── font-awesome.min.css ├── __pycache__ │ ├── apps.cpython-37.pyc │ ├── admin.cpython-35.pyc │ ├── admin.cpython-36.pyc │ ├── admin.cpython-37.pyc │ ├── models.cpython-35.pyc │ ├── models.cpython-36.pyc │ ├── models.cpython-37.pyc │ ├── views.cpython-35.pyc │ ├── views.cpython-36.pyc │ ├── views.cpython-37.pyc │ ├── __init__.cpython-35.pyc │ └── __init__.cpython-36.pyc ├── migrations │ └── __pycache__ │ │ ├── __init__.cpython-35.pyc │ │ └── __init__.cpython-36.pyc └── views.py ├── images ├── cam1.png ├── cam2.png └── home.png ├── data ├── anchors │ ├── coco_anchors.txt │ └── basline_anchors.txt ├── classes │ ├── voc.names │ └── coco.names └── dataset │ ├── voc_test.txt │ └── voc_train.txt ├── static ├── images │ ├── japan.jpg │ ├── ocst.jpg │ ├── team │ │ ├── 1.jpg │ │ ├── 2.jpg │ │ └── 3.jpg │ ├── fujifilm.jpg │ ├── puppydog.jpg │ ├── wooden-desk.jpg │ ├── gutman-island.jpg │ ├── testimonial01.jpg │ ├── testimonial02.jpg │ ├── testimonial03.jpg │ └── testimonial04.jpg └── css │ ├── templatemo_style.css │ └── font-awesome.min.css ├── core ├── __pycache__ │ ├── common.cpython-36.pyc │ ├── config.cpython-36.pyc │ ├── utils.cpython-36.pyc │ ├── yolov3.cpython-36.pyc │ ├── __init__.cpython-36.pyc │ └── backbone.cpython-36.pyc ├── config.py ├── backbone.py ├── common.py ├── utils.py ├── dataset.py └── yolov3.py ├── manage.py ├── requirements.txt ├── LICENSE ├── README.md ├── webcam_detection.py └── train.py /tmp_task.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /stream/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /yolov3_weight/put_yolo_weight_here.txt: -------------------------------------------------------------------------------- 1 | put_yolo_weight_here!!! -------------------------------------------------------------------------------- /webcam/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | 3 | # Create your models here. 4 | -------------------------------------------------------------------------------- /webcam/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /webcam/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | # Register your models here. 4 | -------------------------------------------------------------------------------- /images/cam1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/images/cam1.png -------------------------------------------------------------------------------- /images/cam2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/images/cam2.png -------------------------------------------------------------------------------- /images/home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/images/home.png -------------------------------------------------------------------------------- /data/anchors/coco_anchors.txt: -------------------------------------------------------------------------------- 1 | 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 2 | -------------------------------------------------------------------------------- /static/images/japan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/japan.jpg -------------------------------------------------------------------------------- /static/images/ocst.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/ocst.jpg -------------------------------------------------------------------------------- /static/images/team/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/team/1.jpg -------------------------------------------------------------------------------- /static/images/team/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/team/2.jpg -------------------------------------------------------------------------------- /static/images/team/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/team/3.jpg -------------------------------------------------------------------------------- /static/images/fujifilm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/fujifilm.jpg -------------------------------------------------------------------------------- /static/images/puppydog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/puppydog.jpg -------------------------------------------------------------------------------- /webcam/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class WebcamConfig(AppConfig): 5 | name = 'webcam' 6 | -------------------------------------------------------------------------------- /static/images/wooden-desk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/wooden-desk.jpg -------------------------------------------------------------------------------- /static/images/gutman-island.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/gutman-island.jpg -------------------------------------------------------------------------------- /static/images/testimonial01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/testimonial01.jpg -------------------------------------------------------------------------------- /static/images/testimonial02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/testimonial02.jpg -------------------------------------------------------------------------------- /static/images/testimonial03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/testimonial03.jpg -------------------------------------------------------------------------------- /static/images/testimonial04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/static/images/testimonial04.jpg -------------------------------------------------------------------------------- /webcam/templates/images/japan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/japan.jpg -------------------------------------------------------------------------------- /webcam/templates/images/ocst.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/ocst.jpg -------------------------------------------------------------------------------- /webcam/templates/images/team/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/team/1.jpg -------------------------------------------------------------------------------- /webcam/templates/images/team/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/team/2.jpg -------------------------------------------------------------------------------- /webcam/templates/images/team/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/team/3.jpg -------------------------------------------------------------------------------- /core/__pycache__/common.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/core/__pycache__/common.cpython-36.pyc -------------------------------------------------------------------------------- /core/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/core/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /core/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/core/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /core/__pycache__/yolov3.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/core/__pycache__/yolov3.cpython-36.pyc -------------------------------------------------------------------------------- /stream/__pycache__/urls.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/urls.cpython-35.pyc -------------------------------------------------------------------------------- /stream/__pycache__/urls.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/urls.cpython-36.pyc -------------------------------------------------------------------------------- /stream/__pycache__/urls.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/urls.cpython-37.pyc -------------------------------------------------------------------------------- /stream/__pycache__/wsgi.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/wsgi.cpython-35.pyc -------------------------------------------------------------------------------- /stream/__pycache__/wsgi.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/wsgi.cpython-36.pyc -------------------------------------------------------------------------------- /stream/__pycache__/wsgi.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/wsgi.cpython-37.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/apps.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/apps.cpython-37.pyc -------------------------------------------------------------------------------- /webcam/templates/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /webcam/templates/images/fujifilm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/fujifilm.jpg -------------------------------------------------------------------------------- /webcam/templates/images/puppydog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/puppydog.jpg -------------------------------------------------------------------------------- /core/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/core/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /core/__pycache__/backbone.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/core/__pycache__/backbone.cpython-36.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/admin.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/admin.cpython-35.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/admin.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/admin.cpython-36.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/admin.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/admin.cpython-37.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/models.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/models.cpython-35.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/models.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/models.cpython-37.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/views.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/views.cpython-35.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/views.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/views.cpython-36.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/views.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/views.cpython-37.pyc -------------------------------------------------------------------------------- /webcam/templates/images/wooden-desk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/wooden-desk.jpg -------------------------------------------------------------------------------- /stream/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /stream/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /stream/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /stream/__pycache__/settings.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/settings.cpython-35.pyc -------------------------------------------------------------------------------- /stream/__pycache__/settings.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/settings.cpython-36.pyc -------------------------------------------------------------------------------- /stream/__pycache__/settings.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/stream/__pycache__/settings.cpython-37.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /webcam/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /webcam/templates/images/gutman-island.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/gutman-island.jpg -------------------------------------------------------------------------------- /webcam/templates/images/testimonial01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/testimonial01.jpg -------------------------------------------------------------------------------- /webcam/templates/images/testimonial02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/testimonial02.jpg -------------------------------------------------------------------------------- /webcam/templates/images/testimonial03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/testimonial03.jpg -------------------------------------------------------------------------------- /webcam/templates/images/testimonial04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/images/testimonial04.jpg -------------------------------------------------------------------------------- /data/anchors/basline_anchors.txt: -------------------------------------------------------------------------------- 1 | 1.25,1.625, 2.0,3.75, 4.125,2.875, 1.875,3.8125, 3.875,2.8125, 3.6875,7.4375, 3.625,2.8125, 4.875,6.1875, 11.65625,10.1875 2 | -------------------------------------------------------------------------------- /webcam/templates/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /webcam/templates/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /webcam/templates/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/templates/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /webcam/migrations/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/migrations/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /webcam/migrations/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranleanh/yolo-django-streaming/HEAD/webcam/migrations/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /data/classes/voc.names: -------------------------------------------------------------------------------- 1 | aeroplane 2 | bicycle 3 | bird 4 | boat 5 | bottle 6 | bus 7 | car 8 | cat 9 | chair 10 | cow 11 | diningtable 12 | dog 13 | horse 14 | motorbike 15 | person 16 | pottedplant 17 | sheep 18 | sofa 19 | train 20 | tvmonitor -------------------------------------------------------------------------------- /stream/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for stream project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stream.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stream.settings') 9 | try: 10 | from django.core.management import execute_from_command_line 11 | except ImportError as exc: 12 | raise ImportError( 13 | "Couldn't import Django. Are you sure it's installed and " 14 | "available on your PYTHONPATH environment variable? Did you " 15 | "forget to activate a virtual environment?" 16 | ) from exc 17 | execute_from_command_line(sys.argv) 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.13.0 2 | asgiref==3.4.1 3 | astor==0.8.1 4 | cached-property==1.5.2 5 | Django==3.2.5 6 | easydict==1.9 7 | gast==0.2.2 8 | google-pasta==0.2.0 9 | grpcio==1.39.0 10 | h5py==3.3.0 11 | importlib-metadata==4.6.1 12 | Keras-Applications==1.0.8 13 | Keras-Preprocessing==1.1.2 14 | Markdown==3.3.4 15 | mysqlclient==2.0.3 16 | numpy==1.21.1 17 | opencv-python==4.5.3.56 18 | opt-einsum==3.3.0 19 | Pillow==8.3.1 20 | protobuf==3.17.3 21 | pytz==2021.1 22 | six==1.16.0 23 | sqlparse==0.4.1 24 | tensorboard==1.15.0 25 | tensorflow==1.15.0 26 | tensorflow-estimator==1.15.1 27 | termcolor==1.1.0 28 | typing-extensions==3.10.0.0 29 | Werkzeug==2.0.1 30 | wrapt==1.12.1 31 | zipp==3.5.0 32 | -------------------------------------------------------------------------------- /data/classes/coco.names: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 LA Tran 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /stream/urls.py: -------------------------------------------------------------------------------- 1 | """stream URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/2.2/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | # 2019-07-23 Modified by Tran Le Anh 17 | 18 | from django.contrib import admin 19 | from django.urls import path 20 | from webcam.views import index, video_feed_1, video_feed_2, camera_1, camera_2 21 | # from webcam.views import database, search 22 | 23 | urlpatterns = [ 24 | path('admin/', admin.site.urls), 25 | path('index/', index), 26 | path('video_feed_1/', video_feed_1, name="video-feed-1"), 27 | path('video_feed_2/', video_feed_2, name="video-feed-2"), 28 | path('index/camera1/', camera_1), 29 | path('index/camera2/', camera_2), 30 | # path('index/database/', database), 31 | # path('index/database/50latest', database), 32 | # path('index/database/search', search), 33 | ] 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # yolov3-django-streaming 2 | 3 | This project is to stream object detection (yolov3) with 2 cameras (2 switchable channels) on web browser using Django framework. 4 | 5 | The project can be deployed on Ubuntu and Windows. 6 | 7 | Watch final result: https://www.youtube.com/watch?v=SDnpNd7xRbE&t=10s 8 | 9 | ## 1. Steps to use 10 | 1. Download file "yolov3_coco.pb" from the link below and locate it in folder "yolov3_weight": ([gotolink](https://drive.google.com/drive/u/1/folders/1apB-yPIxxzC9D6_iAaQrXWuGpbWIK6Lp)) 11 | ```bashrc 12 | https://drive.google.com/drive/u/1/folders/1apB-yPIxxzC9D6_iAaQrXWuGpbWIK6Lp 13 | ``` 14 | 2. Create your virtual environment and install required packages: 15 | ```bashrc 16 | $ pip install -r requirements.txt 17 | ``` 18 | 3. Run program: 19 | ```bashrc 20 | $ python manage.py runserver 21 | ``` 22 | 4. Open any web browser and navigate to URL (home page): 23 | ```bashrc 24 | http://127.0.0.1:8000/index 25 | ``` 26 | ## 2. What should happen then? 27 | (don't worry, my face will not be on your screen) 28 | - Camera 1: 29 | 30 | ![picture](images/cam1.png) 31 | 32 | - Camera 2: 33 | 34 | ![picture](images/cam2.png) 35 | 36 | ## 3. Be careful 37 | - The 2 camera ids in the source code are "0" and "2" (for my computer). 38 | - You should change them for running on any other computers. Go to webcam/views.py then find and change "cam_id" parameter. 39 | 40 | 41 | ## Acknowledgement 42 | - The yolov3 implementation was borrowed from [YunYang1994](https://github.com/YunYang1994/tensorflow-yolov3) 43 | 44 | 45 | ## Cite This Project 46 | ```bashrc 47 | @article{tran2020yolostream, 48 | title={Object Detection Streaming and Data Management on Web Browser}, 49 | author={Tran, Le-Anh}, 50 | journal={Technical Report}, 51 | year={2020} 52 | } 53 | ``` 54 | 55 | Good luck. 56 | 57 | Created on July 11, 2019. 58 | 59 | Last update on July 22, 2021. (because I had received tons of emails for fixing this project, thanks) 60 | 61 | Tran Le Anh ([LA Tran](https://sites.google.com/view/leanhtran/)) 62 | 63 | -------------------------------------------------------------------------------- /data/dataset/voc_test.txt: -------------------------------------------------------------------------------- 1 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000001.jpg 48,240,195,371,11 8,12,352,498,14 2 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000002.jpg 139,200,207,301,18 3 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000003.jpg 123,155,215,195,17 239,156,307,205,8 4 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000004.jpg 13,311,84,362,6 362,330,500,389,6 235,328,334,375,6 175,327,252,364,6 139,320,189,359,6 108,325,150,353,6 84,323,121,350,6 5 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000006.jpg 187,135,282,242,15 154,209,369,375,10 255,207,366,375,8 138,211,249,375,8 6 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000008.jpg 192,16,364,249,8 7 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000010.jpg 87,97,258,427,12 133,72,245,284,14 8 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000011.jpg 126,51,330,308,7 9 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000013.jpg 299,160,446,252,9 10 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000014.jpg 72,163,302,228,5 185,194,500,316,6 416,180,500,222,6 314,8,344,65,14 331,4,361,61,14 357,8,401,61,14 11 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000015.jpg 77,136,360,358,1 12 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000018.jpg 31,30,358,279,11 13 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000022.jpg 68,103,368,283,12 186,44,255,230,14 14 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000025.jpg 2,84,59,248,9 68,115,233,279,9 64,173,377,373,9 320,2,496,375,14 221,4,341,374,14 135,14,220,148,14 69,43,156,177,9 58,54,104,139,14 279,1,331,86,14 320,22,344,96,14 15 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000027.jpg 174,101,349,351,14 16 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000028.jpg 63,18,374,500,7 17 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000029.jpg 56,63,284,290,11 18 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000031.jpg 41,77,430,255,18 19 | /home/yang/test/VOC/test/VOCdevkit/VOC2007/JPEGImages/000037.jpg 61,96,464,339,11 20 | -------------------------------------------------------------------------------- /data/dataset/voc_train.txt: -------------------------------------------------------------------------------- 1 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000005.jpg 263,211,324,339,8 165,264,253,372,8 241,194,295,299,8 2 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000007.jpg 141,50,500,330,6 3 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000009.jpg 69,172,270,330,12 150,141,229,284,14 285,201,327,331,14 258,198,297,329,14 4 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000012.jpg 156,97,351,270,6 5 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000016.jpg 92,72,305,473,1 6 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000017.jpg 185,62,279,199,14 90,78,403,336,12 7 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000019.jpg 231,88,483,256,7 11,113,266,259,7 8 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000020.jpg 33,148,371,416,6 9 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000021.jpg 1,235,182,388,11 210,36,336,482,14 46,82,170,365,14 11,181,142,419,14 10 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000023.jpg 9,230,245,500,1 230,220,334,500,1 2,1,117,369,14 3,2,243,462,14 225,1,334,486,14 11 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000024.jpg 196,165,489,247,18 12 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000026.jpg 90,125,337,212,6 13 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000030.jpg 36,205,180,289,1 51,160,150,292,14 295,138,450,290,14 14 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000032.jpg 104,78,375,183,0 133,88,197,123,0 195,180,213,229,14 26,189,44,238,14 15 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000033.jpg 9,107,499,263,0 421,200,482,226,0 325,188,411,223,0 16 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000034.jpg 116,167,360,400,18 141,153,333,229,18 17 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000035.jpg 1,96,191,361,14 218,98,465,318,14 18 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000036.jpg 27,79,319,344,11 19 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000039.jpg 156,89,344,279,19 20 | /home/yang/test/VOC/train/VOCdevkit/VOC2007/JPEGImages/000041.jpg 363,47,432,107,19 216,92,307,302,14 164,148,227,244,14 21 | -------------------------------------------------------------------------------- /core/config.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # coding=utf-8 3 | #================================================================ 4 | # Copyright (C) 2019 * Ltd. All rights reserved. 5 | # 6 | # Editor : VIM 7 | # File name : config.py 8 | # Author : YunYang1994 9 | # Created date: 2019-02-28 13:06:54 10 | # Description : 11 | # 12 | #================================================================ 13 | 14 | from easydict import EasyDict as edict 15 | 16 | 17 | __C = edict() 18 | # Consumers can get config by: from config import cfg 19 | 20 | cfg = __C 21 | 22 | # YOLO options 23 | __C.YOLO = edict() 24 | 25 | # Set the class name 26 | __C.YOLO.CLASSES = "./data/classes/coco.names" 27 | __C.YOLO.ANCHORS = "./data/anchors/basline_anchors.txt" 28 | __C.YOLO.MOVING_AVE_DECAY = 0.9995 29 | __C.YOLO.STRIDES = [8, 16, 32] 30 | __C.YOLO.ANCHOR_PER_SCALE = 3 31 | __C.YOLO.IOU_LOSS_THRESH = 0.5 32 | __C.YOLO.UPSAMPLE_METHOD = "resize" 33 | __C.YOLO.ORIGINAL_WEIGHT = "./checkpoint/yolov3_coco.ckpt" 34 | __C.YOLO.DEMO_WEIGHT = "./checkpoint/yolov3_coco_demo.ckpt" 35 | 36 | # Train options 37 | __C.TRAIN = edict() 38 | 39 | __C.TRAIN.ANNOT_PATH = "./data/dataset/voc_train.txt" 40 | __C.TRAIN.BATCH_SIZE = 6 41 | __C.TRAIN.INPUT_SIZE = [320, 352, 384, 416, 448, 480, 512, 544, 576, 608] 42 | __C.TRAIN.DATA_AUG = True 43 | __C.TRAIN.LEARN_RATE_INIT = 1e-4 44 | __C.TRAIN.LEARN_RATE_END = 1e-6 45 | __C.TRAIN.WARMUP_EPOCHS = 2 46 | __C.TRAIN.FISRT_STAGE_EPOCHS = 20 47 | __C.TRAIN.SECOND_STAGE_EPOCHS = 30 48 | __C.TRAIN.INITIAL_WEIGHT = "./checkpoint/yolov3_coco_demo.ckpt" 49 | 50 | 51 | 52 | # TEST options 53 | __C.TEST = edict() 54 | 55 | __C.TEST.ANNOT_PATH = "./data/dataset/voc_test.txt" 56 | __C.TEST.BATCH_SIZE = 2 57 | __C.TEST.INPUT_SIZE = 544 58 | __C.TEST.DATA_AUG = False 59 | __C.TEST.WRITE_IMAGE = True 60 | __C.TEST.WRITE_IMAGE_PATH = "./data/detection/" 61 | __C.TEST.WRITE_IMAGE_SHOW_LABEL = False 62 | __C.TEST.WEIGHT_FILE = "./checkpoint/yolov3_test_loss=9.2099.ckpt-5" 63 | __C.TEST.SHOW_LABEL = False 64 | __C.TEST.SCORE_THRESHOLD = 0.3 65 | __C.TEST.IOU_THRESHOLD = 0.45 66 | 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /core/backbone.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # coding=utf-8 3 | #================================================================ 4 | # Copyright (C) 2019 * Ltd. All rights reserved. 5 | # 6 | # Editor : VIM 7 | # File name : backbone.py 8 | # Author : YunYang1994 9 | # Created date: 2019-02-17 11:03:35 10 | # Description : 11 | # 12 | #================================================================ 13 | 14 | import core.common as common 15 | import tensorflow as tf 16 | 17 | 18 | def darknet53(input_data, trainable): 19 | 20 | with tf.variable_scope('darknet'): 21 | 22 | input_data = common.convolutional(input_data, filters_shape=(3, 3, 3, 32), trainable=trainable, name='conv0') 23 | input_data = common.convolutional(input_data, filters_shape=(3, 3, 32, 64), 24 | trainable=trainable, name='conv1', downsample=True) 25 | 26 | for i in range(1): 27 | input_data = common.residual_block(input_data, 64, 32, 64, trainable=trainable, name='residual%d' %(i+0)) 28 | 29 | input_data = common.convolutional(input_data, filters_shape=(3, 3, 64, 128), 30 | trainable=trainable, name='conv4', downsample=True) 31 | 32 | for i in range(2): 33 | input_data = common.residual_block(input_data, 128, 64, 128, trainable=trainable, name='residual%d' %(i+1)) 34 | 35 | input_data = common.convolutional(input_data, filters_shape=(3, 3, 128, 256), 36 | trainable=trainable, name='conv9', downsample=True) 37 | 38 | for i in range(8): 39 | input_data = common.residual_block(input_data, 256, 128, 256, trainable=trainable, name='residual%d' %(i+3)) 40 | 41 | route_1 = input_data 42 | input_data = common.convolutional(input_data, filters_shape=(3, 3, 256, 512), 43 | trainable=trainable, name='conv26', downsample=True) 44 | 45 | for i in range(8): 46 | input_data = common.residual_block(input_data, 512, 256, 512, trainable=trainable, name='residual%d' %(i+11)) 47 | 48 | route_2 = input_data 49 | input_data = common.convolutional(input_data, filters_shape=(3, 3, 512, 1024), 50 | trainable=trainable, name='conv43', downsample=True) 51 | 52 | for i in range(4): 53 | input_data = common.residual_block(input_data, 1024, 512, 1024, trainable=trainable, name='residual%d' %(i+19)) 54 | 55 | return route_1, route_2, input_data 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /webcam/templates/camera2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 8 | Camera 2 9 | 10 | 11 | 12 | 13 | 14 | 15 | {% load static %} 16 | 17 | 18 | 19 | 20 | 21 | 22 | 30 |
31 |
32 |

Object Detection

33 |
34 | 42 |
43 |
44 |
45 |

Streaming App

46 |
47 | 48 |

49 | 50 | 53 | 54 |
55 |

Camera 2

56 | 57 |

58 |

59 |

60 | 61 |

62 | 63 |
64 | 65 |
66 | 67 | 68 |
69 |
70 |
71 |
72 | 73 | -------------------------------------------------------------------------------- /webcam/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 8 | Home 9 | 10 | 11 | 12 | 13 | 14 | 15 | {% load static %} 16 | 17 | 18 | 19 | 20 | 21 | 22 | 30 |
31 |
32 |

Object Detection

33 |
34 | 41 |
42 |
43 |
44 |

Streaming App

45 |
46 | 47 |

48 | 49 | 52 | 53 |
54 |

Camera 1 (Default)

55 | 56 |

57 |

58 |

59 | 60 |

61 | 62 |
63 | 64 |
65 | 66 | 67 |
68 |
69 |
70 |
71 | 72 | -------------------------------------------------------------------------------- /webcam/templates/camera1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 8 | Camera 1 9 | 10 | 11 | 12 | 13 | 14 | 15 | {% load static %} 16 | 17 | 18 | 19 | 20 | 21 | 22 | 30 |
31 |
32 |

Object Detection

33 |
34 | 43 |
44 |
45 |
46 |

Streaming App

47 |
48 | 49 |

50 | 51 | 54 | 55 |
56 |

Camera 1

57 |

58 |

59 |

60 | 61 |

62 | 63 |
64 | 65 |
66 | 67 | 68 |
69 |
70 |
71 |
72 | 73 | -------------------------------------------------------------------------------- /webcam_detection.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import time 3 | import numpy as np 4 | import core.utils as utils 5 | import tensorflow as tf 6 | from PIL import Image 7 | 8 | obj_classes = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] 9 | 10 | return_elements = ["input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"] 11 | pb_file = "./yolov3_coco.pb" 12 | # video_path = "./docs/images/road.mp4" 13 | video_path = 0 14 | num_classes = 80 15 | input_size = 416 16 | graph = tf.Graph() 17 | return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements) 18 | 19 | vid = cv2.VideoCapture(video_path) 20 | 21 | def detection(vid): 22 | with tf.Session(graph=graph) as sess: 23 | 24 | return_value, frame = vid.read() 25 | if return_value: 26 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 27 | image = Image.fromarray(frame) 28 | else: 29 | raise ValueError("No image!") 30 | 31 | 32 | frame_size = frame.shape[:2] 33 | image_data = utils.image_preporcess(np.copy(frame), [input_size, input_size]) 34 | image_data = image_data[np.newaxis, ...] 35 | prev_time = time.time() 36 | 37 | pred_sbbox, pred_mbbox, pred_lbbox = sess.run( 38 | [return_tensors[1], return_tensors[2], return_tensors[3]], 39 | feed_dict={ return_tensors[0]: image_data}) 40 | 41 | pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)), 42 | np.reshape(pred_mbbox, (-1, 5 + num_classes)), 43 | np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0) 44 | 45 | bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, 0.3) 46 | bboxes = utils.nms(bboxes, 0.45, method='nms') 47 | image, detected = utils.draw_bbox(frame, bboxes) 48 | 49 | 50 | detected = np.asarray(detected) 51 | 52 | print("------- frame i ---------") 53 | 54 | class_count = [] 55 | 56 | for i in range(len(obj_classes)): # 80 57 | obj_count = 0 58 | for j in range(len(detected)): 59 | if int(detected[j][5]) == i: obj_count += 1 60 | 61 | class_count = np.append(class_count, obj_count) 62 | 63 | 64 | 65 | curr_time = time.time() 66 | exec_time = curr_time - prev_time 67 | result = np.asarray(image) 68 | info = "time: %.2f ms" %(1000*exec_time) 69 | # cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE) 70 | result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) 71 | 72 | return result, class_count 73 | 74 | # if __name__ == "__main__": 75 | 76 | # while True: 77 | # result = detection(vid) 78 | 79 | # cv2.imshow("result", result) 80 | # if cv2.waitKey(1) & 0xFF == ord('q'): break -------------------------------------------------------------------------------- /core/common.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # coding=utf-8 3 | #================================================================ 4 | # Copyright (C) 2019 * Ltd. All rights reserved. 5 | # 6 | # Editor : VIM 7 | # File name : common.py 8 | # Author : YunYang1994 9 | # Created date: 2019-02-28 09:56:29 10 | # Description : 11 | # 12 | #================================================================ 13 | 14 | import tensorflow as tf 15 | 16 | 17 | def convolutional(input_data, filters_shape, trainable, name, downsample=False, activate=True, bn=True): 18 | 19 | with tf.variable_scope(name): 20 | if downsample: 21 | pad_h, pad_w = (filters_shape[0] - 2) // 2 + 1, (filters_shape[1] - 2) // 2 + 1 22 | paddings = tf.constant([[0, 0], [pad_h, pad_h], [pad_w, pad_w], [0, 0]]) 23 | input_data = tf.pad(input_data, paddings, 'CONSTANT') 24 | strides = (1, 2, 2, 1) 25 | padding = 'VALID' 26 | else: 27 | strides = (1, 1, 1, 1) 28 | padding = "SAME" 29 | 30 | weight = tf.get_variable(name='weight', dtype=tf.float32, trainable=True, 31 | shape=filters_shape, initializer=tf.random_normal_initializer(stddev=0.01)) 32 | conv = tf.nn.conv2d(input=input_data, filter=weight, strides=strides, padding=padding) 33 | 34 | if bn: 35 | conv = tf.layers.batch_normalization(conv, beta_initializer=tf.zeros_initializer(), 36 | gamma_initializer=tf.ones_initializer(), 37 | moving_mean_initializer=tf.zeros_initializer(), 38 | moving_variance_initializer=tf.ones_initializer(), training=trainable) 39 | else: 40 | bias = tf.get_variable(name='bias', shape=filters_shape[-1], trainable=True, 41 | dtype=tf.float32, initializer=tf.constant_initializer(0.0)) 42 | conv = tf.nn.bias_add(conv, bias) 43 | 44 | if activate == True: conv = tf.nn.leaky_relu(conv, alpha=0.1) 45 | 46 | return conv 47 | 48 | 49 | def residual_block(input_data, input_channel, filter_num1, filter_num2, trainable, name): 50 | 51 | short_cut = input_data 52 | 53 | with tf.variable_scope(name): 54 | input_data = convolutional(input_data, filters_shape=(1, 1, input_channel, filter_num1), 55 | trainable=trainable, name='conv1') 56 | input_data = convolutional(input_data, filters_shape=(3, 3, filter_num1, filter_num2), 57 | trainable=trainable, name='conv2') 58 | 59 | residual_output = input_data + short_cut 60 | 61 | return residual_output 62 | 63 | 64 | 65 | def route(name, previous_output, current_output): 66 | 67 | with tf.variable_scope(name): 68 | output = tf.concat([current_output, previous_output], axis=-1) 69 | 70 | return output 71 | 72 | 73 | def upsample(input_data, name, method="deconv"): 74 | assert method in ["resize", "deconv"] 75 | 76 | if method == "resize": 77 | with tf.variable_scope(name): 78 | input_shape = tf.shape(input_data) 79 | output = tf.image.resize_nearest_neighbor(input_data, (input_shape[1] * 2, input_shape[2] * 2)) 80 | 81 | if method == "deconv": 82 | # replace resize_nearest_neighbor with conv2d_transpose To support TensorRT optimization 83 | numm_filter = input_data.shape.as_list()[-1] 84 | output = tf.layers.conv2d_transpose(input_data, numm_filter, kernel_size=2, padding='same', 85 | strides=(2,2), kernel_initializer=tf.random_normal_initializer()) 86 | 87 | return output 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /stream/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for stream project. 3 | 4 | Generated by 'django-admin startproject' using Django 2.2.3. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.2/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/2.2/ref/settings/ 11 | """ 12 | 13 | # 2019-07-23 Modified by Tran Le Anh 14 | 15 | import os 16 | 17 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 18 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 19 | 20 | 21 | # Quick-start development settings - unsuitable for production 22 | # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ 23 | 24 | # SECURITY WARNING: keep the secret key used in production secret! 25 | SECRET_KEY = 'os*hqvscy)s#9m!u0kcr-^_$)8@upg94&-bi9oso*=rrvx6$&x' 26 | 27 | # SECURITY WARNING: don't run with debug turned on in production! 28 | DEBUG = True 29 | 30 | ALLOWED_HOSTS = ['192.168.11.232', '127.0.0.1'] 31 | 32 | 33 | # Application definition 34 | 35 | INSTALLED_APPS = [ 36 | 'django.contrib.admin', 37 | 'django.contrib.auth', 38 | 'django.contrib.contenttypes', 39 | 'django.contrib.sessions', 40 | 'django.contrib.messages', 41 | 'django.contrib.staticfiles', 42 | 'webcam' 43 | ] 44 | 45 | MIDDLEWARE = [ 46 | 'django.middleware.security.SecurityMiddleware', 47 | 'django.contrib.sessions.middleware.SessionMiddleware', 48 | 'django.middleware.common.CommonMiddleware', 49 | 'django.middleware.csrf.CsrfViewMiddleware', 50 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 51 | 'django.contrib.messages.middleware.MessageMiddleware', 52 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 53 | ] 54 | 55 | ROOT_URLCONF = 'stream.urls' 56 | 57 | TEMPLATES = [ 58 | { 59 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 60 | 'DIRS': [], 61 | # 'DIRS': [os.path.join(BASE_DIR, 'templates')], 62 | 'APP_DIRS': True, 63 | 'OPTIONS': { 64 | 'context_processors': [ 65 | 'django.template.context_processors.debug', 66 | 'django.template.context_processors.request', 67 | 'django.contrib.auth.context_processors.auth', 68 | 'django.contrib.messages.context_processors.messages', 69 | ], 70 | }, 71 | }, 72 | ] 73 | 74 | WSGI_APPLICATION = 'stream.wsgi.application' 75 | 76 | 77 | # Database 78 | # https://docs.djangoproject.com/en/2.2/ref/settings/#databases 79 | 80 | # DATABASES = { 81 | # # 'default': { 82 | # # 'ENGINE': 'django.db.backends.sqlite3', 83 | # # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 84 | # # } 85 | 86 | # 'default': { 87 | # # MySQL database engine class. 88 | # 'ENGINE': 'django.db.backends.mysql', 89 | # # MySQL database host ip. 90 | # 'HOST': '127.0.0.1', 91 | # # port number. 92 | # 'PORT': '3306', 93 | # # database name. 94 | # 'NAME': 'ocst', 95 | # # user name. 96 | # 'USER': 'root', 97 | # # password 98 | # 'PASSWORD': 'password', 99 | # # connect options 100 | # 'OPTIONS': {'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",}, 101 | # } 102 | # } 103 | 104 | 105 | # Password validation 106 | # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators 107 | 108 | AUTH_PASSWORD_VALIDATORS = [ 109 | { 110 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 111 | }, 112 | { 113 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 114 | }, 115 | { 116 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 117 | }, 118 | { 119 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 120 | }, 121 | ] 122 | 123 | 124 | # Internationalization 125 | # https://docs.djangoproject.com/en/2.2/topics/i18n/ 126 | 127 | LANGUAGE_CODE = 'en-us' 128 | 129 | # TIME_ZONE = 'UTC' 130 | TIME_ZONE = 'Asia/Seoul' 131 | 132 | USE_I18N = True 133 | 134 | USE_L10N = True 135 | 136 | # USE_TZ = True 137 | USE_TZ = False 138 | 139 | 140 | # Static files (CSS, JavaScript, Images) 141 | # https://docs.djangoproject.com/en/2.2/howto/static-files/ 142 | 143 | STATIC_URL = '/static/' 144 | 145 | STATICFILES_DIRS = [ 146 | os.path.join(BASE_DIR, "static"), 147 | ] 148 | -------------------------------------------------------------------------------- /webcam/views.py: -------------------------------------------------------------------------------- 1 | from django.shortcuts import render 2 | 3 | # Create your views here. 4 | from django.http import HttpResponse 5 | from django.template import RequestContext, loader 6 | from django.http.response import StreamingHttpResponse 7 | 8 | import cv2 9 | import numpy as np 10 | import datetime 11 | import time 12 | 13 | import core.utils as utils 14 | import tensorflow as tf 15 | from PIL import Image 16 | 17 | # HOME PAGE ------------------------- 18 | def index(request): 19 | template = loader.get_template('index.html') 20 | return HttpResponse(template.render({}, request)) 21 | # ----------------------------------- 22 | 23 | # CAMERA 1 PAGE --------------------- 24 | def camera_1(request): 25 | template = loader.get_template('camera1.html') 26 | return HttpResponse(template.render({}, request)) 27 | # ----------------------------------- 28 | 29 | # CAMERA 1 PAGE --------------------- 30 | def camera_2(request): 31 | template = loader.get_template('camera2.html') 32 | return HttpResponse(template.render({}, request)) 33 | # ----------------------------------- 34 | 35 | # DISPLAY CAMERA 1 ------------------ 36 | def stream_1(): 37 | 38 | cam_id = 0 39 | vid = cv2.VideoCapture(cam_id) 40 | 41 | while True: 42 | frame, class_count = detection(vid) 43 | 44 | frame = cv2.resize(frame, (1000, 700)) 45 | 46 | print("\nObjects in frame:") 47 | row = 0 48 | for k in range(len(class_count)): 49 | if class_count[k] > 0: 50 | row += 1 51 | infor = str(obj_classes[k]) + ": " + str(int(class_count[k])) 52 | print(" " + infor) 53 | frame = cv2.putText(frame,infor,(20,(row+1)*35), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA) 54 | 55 | cv2.imwrite('currentframe.jpg', frame) 56 | yield (b'--frame\r\n' 57 | b'Content-Type: image/jpeg\r\n\r\n' + open('currentframe.jpg', 'rb').read() + b'\r\n') 58 | 59 | def video_feed_1(request): 60 | return StreamingHttpResponse(stream_1(), content_type='multipart/x-mixed-replace; boundary=frame') 61 | # ----------------------------------- 62 | 63 | 64 | # DISPLAY CAMERA 2 ------------------ 65 | def stream_2(): 66 | 67 | cam_id = 2 68 | vid = cv2.VideoCapture(cam_id) 69 | 70 | while True: 71 | frame, class_count = detection(vid) 72 | 73 | frame = cv2.resize(frame, (1000, 700)) 74 | 75 | print("\nObjects in frame:") 76 | row = 0 77 | for k in range(len(class_count)): 78 | if class_count[k] > 0: 79 | row += 1 80 | infor = str(obj_classes[k]) + ": " + str(int(class_count[k])) 81 | print(" " + infor) 82 | frame = cv2.putText(frame,infor,(20,(row+1)*35), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA) 83 | 84 | cv2.imwrite('currentframe.jpg', frame) 85 | yield (b'--frame\r\n' 86 | b'Content-Type: image/jpeg\r\n\r\n' + open('currentframe.jpg', 'rb').read() + b'\r\n') 87 | 88 | def video_feed_2(request): 89 | return StreamingHttpResponse(stream_2(), content_type='multipart/x-mixed-replace; boundary=frame') 90 | # ----------------------------------- 91 | 92 | # PARAMETERS FOR YOLO---------------- 93 | obj_classes = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] 94 | 95 | return_elements = ["input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"] 96 | pb_file = "./yolov3_weight/yolov3_coco.pb" 97 | num_classes = 80 98 | input_size = 416 99 | graph = tf.Graph() 100 | return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements) 101 | # ----------------------------------- 102 | 103 | # YOLO DETECTION -------------------- 104 | def detection(vid): 105 | with tf.Session(graph=graph) as sess: 106 | 107 | return_value, frame = vid.read() 108 | if return_value: 109 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 110 | image = Image.fromarray(frame) 111 | else: 112 | raise ValueError("No image!") 113 | 114 | 115 | frame_size = frame.shape[:2] 116 | image_data = utils.image_preporcess(np.copy(frame), [input_size, input_size]) 117 | image_data = image_data[np.newaxis, ...] 118 | prev_time = time.time() 119 | 120 | pred_sbbox, pred_mbbox, pred_lbbox = sess.run( 121 | [return_tensors[1], return_tensors[2], return_tensors[3]], 122 | feed_dict={ return_tensors[0]: image_data}) 123 | 124 | pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)), 125 | np.reshape(pred_mbbox, (-1, 5 + num_classes)), 126 | np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0) 127 | 128 | bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, 0.3) 129 | bboxes = utils.nms(bboxes, 0.45, method='nms') 130 | image, detected = utils.draw_bbox(frame, bboxes) 131 | 132 | 133 | detected = np.asarray(detected) 134 | 135 | # print("------- frame i ---------") 136 | 137 | class_count = [] 138 | 139 | for i in range(len(obj_classes)): # 80 140 | obj_count = 0 141 | for j in range(len(detected)): 142 | if int(detected[j][5]) == i: obj_count += 1 143 | 144 | class_count = np.append(class_count, obj_count) 145 | 146 | curr_time = time.time() 147 | exec_time = curr_time - prev_time 148 | result = np.asarray(image) 149 | info = "time: %.2f ms" %(1000*exec_time) 150 | # cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE) 151 | result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) 152 | 153 | return result, class_count 154 | # ----------------------------------- 155 | 156 | -------------------------------------------------------------------------------- /static/css/templatemo_style.css: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | templatemo 419 black white 4 | 5 | http://www.templatemo.com/preview/templatemo_419_black_white 6 | 7 | http://stackoverflow.com/questions/10525744/css-table-cell-equal-width 8 | 9 | */ 10 | * { font-family: 'Open Sans', sans-serif; } 11 | 12 | html, body { height: 100%; } 13 | 14 | .green { color: #3B3; } 15 | .blue { color: #09E; } 16 | 17 | h1, h2 { margin-bottom: 20px; } 18 | 19 | h2 { font-size: 24px; } 20 | .templatemo-container { 21 | display: table; 22 | width: 100%; 23 | height: 100%; 24 | table-layout: fixed; 25 | } 26 | .left-container { 27 | display: table-cell; 28 | float: none; 29 | width: 25%; 30 | padding-bottom: 20px; 31 | padding-left: 30px; 32 | padding-right: 0; 33 | } 34 | .right-container { 35 | display: table-cell; 36 | float: none; 37 | width: 75%; 38 | padding-bottom: 20px; 39 | padding-left: 0; 40 | padding-right: 30px; 41 | vertical-align: top; 42 | } 43 | .black-bg { 44 | color: white; 45 | background-color: black; 46 | } 47 | .white-bg { background-color: white; } 48 | .tm-left-inner-container { margin-right: 30px; } 49 | .tm-right-inner-container { 50 | margin-left: 30px; 51 | /*max-width: 710px;*/ 52 | max-width: 1200px; 53 | } 54 | .templatemo-logo { 55 | display: table; 56 | overflow: hidden; 57 | table-layout: fixed; 58 | width: 100% 59 | } 60 | .logo-left-container { 61 | display: table-cell; 62 | width: 38%; 63 | } 64 | .logo-right-container { 65 | display: table-cell; 66 | width: 62%; 67 | } 68 | .templatemo-logo h1 { 69 | margin-top: 0; 70 | margin-bottom: 0; 71 | max-width: 250px; 72 | } 73 | .templatemo-logo .col-lg-6, .templatemo-logo .col-md-6, .templatemo-logo .col-sm-6 { 74 | padding-left: 0; 75 | padding-right: 0; 76 | padding-top: 20px; 77 | } 78 | .logo-left { 79 | border-top: 1px solid gray; 80 | border-right: none; 81 | border-bottom: 1px solid gray; 82 | border-left: 1px solid gray; 83 | float: right; 84 | padding: 20px 30px 20px 100px; 85 | text-align: right; 86 | } 87 | .logo-right { 88 | border-top: 1px solid gray; 89 | border-right: 1px solid gray; 90 | border-bottom: 1px solid gray; 91 | border-left: none; 92 | float: left; 93 | padding: 20px 100px 20px 30px; 94 | } 95 | 96 | .templatemo-nav { 97 | clear: both; 98 | font-size: 16px; 99 | font-weight: 300; 100 | float: right; 101 | min-width: 250px; 102 | } 103 | .templatemo-nav>li>a { 104 | border: 2px solid #8C8C8C; 105 | border-radius: 8px; 106 | color: white; 107 | padding: 15px 50px; 108 | margin-bottom: 20px; 109 | transition: background-color 0.3s; 110 | } 111 | .templatemo-nav>li>a:hover, .templatemo-nav>li>a:focus, .templatemo-nav>li>a.active { 112 | background-color: #fff; 113 | color: #000; 114 | font-weight: 700; 115 | } 116 | .templatemo-nav>li>a>.fa { margin-right: 10px; } 117 | .templatemo-header { 118 | clear: both; 119 | font-size: 30px; 120 | margin-top: 0; 121 | } 122 | article { margin-top: 40px; } 123 | article p { 124 | margin-bottom: 15px; 125 | text-align: justify; 126 | } 127 | footer { 128 | margin-top: 40px; 129 | overflow: hidden; 130 | } 131 | footer p { margin-bottom: 0; } 132 | footer .col-lg-6, footer .col-md-6 { 133 | padding-left: 0; 134 | padding-right: 0; 135 | } 136 | .templatemo-social { text-align: right; } 137 | .templatemo-social i { 138 | border: 1px solid #ccc; 139 | padding-top: 7px; 140 | padding-bottom: 5px; 141 | text-align: center; 142 | width: 30px; 143 | height: 30px; 144 | transition: background-color 0.3s; 145 | } 146 | .templatemo-social a { color: #757575; } 147 | .templatemo-social a:hover i { 148 | background-color: #000; 149 | color: #fff; 150 | } 151 | .row .templatemo-item { margin-top: 0; } 152 | .templatemo-item p { margin-top: 20px; } 153 | .templatemo-item ul { margin-bottom: 20px; } 154 | .templatemo-item ul li { list-style: none; } 155 | .fa.templatemo-service-icon { font-size: 100px; } 156 | 157 | .templatemo-testimonial { overflow: hidden; } 158 | .templatemo-testimonial .fa { 159 | color: #C8B55E; 160 | padding: 5px; 161 | } 162 | .templatemo-border-left { 163 | border-left: 3px solid #C8B55E; 164 | padding-left: 10px; 165 | } 166 | .templatemo-border-right { 167 | border-right: 3px solid #C8B55E; 168 | padding-right: 10px; 169 | text-align: right; 170 | } 171 | .templatemo-testimonial footer { color: #777; } 172 | .templatemo-testimonial footer:before { content: '\2014 \00A0'; } 173 | .form-control:focus { 174 | border-color: #DEB215; 175 | box-shadow: inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(234, 231, 60, 0.6); 176 | } 177 | iframe { border: none; } 178 | #map-canvas { 179 | height: 350px; 180 | margin-bottom: 30px; 181 | } 182 | .margin-bottom-60 { margin-bottom: 60px; } 183 | 184 | /* Media Queries 185 | ----------------------------------------*/ 186 | @media screen and (max-width: 991px) { 187 | .templatemo-social { 188 | margin-top: 15px; 189 | text-align: left; 190 | } 191 | } 192 | @media screen and (max-width: 767px) { 193 | .templatemo-logo, .templatemo-container { display: block; } 194 | .logo-left-container, .logo-right-container, .left-container, .right-container { 195 | display: block; 196 | width: 100%; 197 | } 198 | .left-container, .right-container { 199 | padding-left: 20px; 200 | padding-right: 20px; 201 | } 202 | .logo-left, .logo-right, .templatemo-nav { float: none; } 203 | .logo-left { 204 | border-right: 1px solid gray; 205 | text-align: center; 206 | margin-bottom: 30px; 207 | padding: 20px; 208 | } 209 | .logo-right { 210 | border-left: 1px solid gray; 211 | text-align: center; 212 | margin-bottom: 30px; 213 | padding: 20px; 214 | } 215 | .tm-left-inner-container { margin-right: 0; } 216 | .tm-right-inner-container { margin-left: 0; } 217 | .templatemo-nav { max-width: 100%; } 218 | .templatemo-nav>li>a { 219 | margin-bottom: 5px; 220 | text-align: center; 221 | } 222 | h1 { 223 | margin-top: 0; 224 | margin-bottom: 20px; 225 | } 226 | .templatemo-logo h1 { 227 | border: none; 228 | max-width: none; 229 | } 230 | .left-container, .right-container { padding-top: 40px; } 231 | .templatemo-logo .col-lg-6, .templatemo-logo .col-md-6, .templatemo-logo .col-sm-6 { padding-top: 0; } 232 | } 233 | -------------------------------------------------------------------------------- /webcam/templates/css/templatemo_style.css: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | templatemo 419 black white 4 | 5 | http://www.templatemo.com/preview/templatemo_419_black_white 6 | 7 | http://stackoverflow.com/questions/10525744/css-table-cell-equal-width 8 | 9 | */ 10 | * { font-family: 'Open Sans', sans-serif; } 11 | 12 | html, body { height: 100%; } 13 | 14 | .green { color: #3B3; } 15 | .blue { color: #09E; } 16 | 17 | h1, h2 { margin-bottom: 20px; } 18 | 19 | h2 { font-size: 24px; } 20 | .templatemo-container { 21 | display: table; 22 | width: 100%; 23 | height: 100%; 24 | table-layout: fixed; 25 | } 26 | .left-container { 27 | display: table-cell; 28 | float: none; 29 | width: 25%; 30 | padding-bottom: 20px; 31 | padding-left: 30px; 32 | padding-right: 0; 33 | } 34 | .right-container { 35 | display: table-cell; 36 | float: none; 37 | width: 75%; 38 | padding-bottom: 20px; 39 | padding-left: 0; 40 | padding-right: 30px; 41 | vertical-align: top; 42 | } 43 | .black-bg { 44 | color: white; 45 | background-color: black; 46 | } 47 | .white-bg { background-color: white; } 48 | .tm-left-inner-container { margin-right: 30px; } 49 | .tm-right-inner-container { 50 | margin-left: 30px; 51 | /*max-width: 710px;*/ 52 | max-width: 1200px; 53 | } 54 | .templatemo-logo { 55 | display: table; 56 | overflow: hidden; 57 | table-layout: fixed; 58 | width: 100% 59 | } 60 | .logo-left-container { 61 | display: table-cell; 62 | width: 38%; 63 | } 64 | .logo-right-container { 65 | display: table-cell; 66 | width: 62%; 67 | } 68 | .templatemo-logo h1 { 69 | margin-top: 0; 70 | margin-bottom: 0; 71 | max-width: 250px; 72 | } 73 | .templatemo-logo .col-lg-6, .templatemo-logo .col-md-6, .templatemo-logo .col-sm-6 { 74 | padding-left: 0; 75 | padding-right: 0; 76 | padding-top: 20px; 77 | } 78 | .logo-left { 79 | border-top: 1px solid gray; 80 | border-right: none; 81 | border-bottom: 1px solid gray; 82 | border-left: 1px solid gray; 83 | float: right; 84 | padding: 20px 30px 20px 100px; 85 | text-align: right; 86 | } 87 | .logo-right { 88 | border-top: 1px solid gray; 89 | border-right: 1px solid gray; 90 | border-bottom: 1px solid gray; 91 | border-left: none; 92 | float: left; 93 | padding: 20px 100px 20px 30px; 94 | } 95 | 96 | .templatemo-nav { 97 | clear: both; 98 | font-size: 16px; 99 | font-weight: 300; 100 | float: right; 101 | min-width: 250px; 102 | } 103 | .templatemo-nav>li>a { 104 | border: 2px solid #8C8C8C; 105 | border-radius: 8px; 106 | color: white; 107 | padding: 15px 50px; 108 | margin-bottom: 20px; 109 | transition: background-color 0.3s; 110 | } 111 | .templatemo-nav>li>a:hover, .templatemo-nav>li>a:focus, .templatemo-nav>li>a.active { 112 | background-color: #fff; 113 | color: #000; 114 | font-weight: 700; 115 | } 116 | .templatemo-nav>li>a>.fa { margin-right: 10px; } 117 | .templatemo-header { 118 | clear: both; 119 | font-size: 30px; 120 | margin-top: 0; 121 | } 122 | article { margin-top: 40px; } 123 | article p { 124 | margin-bottom: 15px; 125 | text-align: justify; 126 | } 127 | footer { 128 | margin-top: 40px; 129 | overflow: hidden; 130 | } 131 | footer p { margin-bottom: 0; } 132 | footer .col-lg-6, footer .col-md-6 { 133 | padding-left: 0; 134 | padding-right: 0; 135 | } 136 | .templatemo-social { text-align: right; } 137 | .templatemo-social i { 138 | border: 1px solid #ccc; 139 | padding-top: 7px; 140 | padding-bottom: 5px; 141 | text-align: center; 142 | width: 30px; 143 | height: 30px; 144 | transition: background-color 0.3s; 145 | } 146 | .templatemo-social a { color: #757575; } 147 | .templatemo-social a:hover i { 148 | background-color: #000; 149 | color: #fff; 150 | } 151 | .row .templatemo-item { margin-top: 0; } 152 | .templatemo-item p { margin-top: 20px; } 153 | .templatemo-item ul { margin-bottom: 20px; } 154 | .templatemo-item ul li { list-style: none; } 155 | .fa.templatemo-service-icon { font-size: 100px; } 156 | 157 | .templatemo-testimonial { overflow: hidden; } 158 | .templatemo-testimonial .fa { 159 | color: #C8B55E; 160 | padding: 5px; 161 | } 162 | .templatemo-border-left { 163 | border-left: 3px solid #C8B55E; 164 | padding-left: 10px; 165 | } 166 | .templatemo-border-right { 167 | border-right: 3px solid #C8B55E; 168 | padding-right: 10px; 169 | text-align: right; 170 | } 171 | .templatemo-testimonial footer { color: #777; } 172 | .templatemo-testimonial footer:before { content: '\2014 \00A0'; } 173 | .form-control:focus { 174 | border-color: #DEB215; 175 | box-shadow: inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(234, 231, 60, 0.6); 176 | } 177 | iframe { border: none; } 178 | #map-canvas { 179 | height: 350px; 180 | margin-bottom: 30px; 181 | } 182 | .margin-bottom-60 { margin-bottom: 60px; } 183 | 184 | /* Media Queries 185 | ----------------------------------------*/ 186 | @media screen and (max-width: 991px) { 187 | .templatemo-social { 188 | margin-top: 15px; 189 | text-align: left; 190 | } 191 | } 192 | @media screen and (max-width: 767px) { 193 | .templatemo-logo, .templatemo-container { display: block; } 194 | .logo-left-container, .logo-right-container, .left-container, .right-container { 195 | display: block; 196 | width: 100%; 197 | } 198 | .left-container, .right-container { 199 | padding-left: 20px; 200 | padding-right: 20px; 201 | } 202 | .logo-left, .logo-right, .templatemo-nav { float: none; } 203 | .logo-left { 204 | border-right: 1px solid gray; 205 | text-align: center; 206 | margin-bottom: 30px; 207 | padding: 20px; 208 | } 209 | .logo-right { 210 | border-left: 1px solid gray; 211 | text-align: center; 212 | margin-bottom: 30px; 213 | padding: 20px; 214 | } 215 | .tm-left-inner-container { margin-right: 0; } 216 | .tm-right-inner-container { margin-left: 0; } 217 | .templatemo-nav { max-width: 100%; } 218 | .templatemo-nav>li>a { 219 | margin-bottom: 5px; 220 | text-align: center; 221 | } 222 | h1 { 223 | margin-top: 0; 224 | margin-bottom: 20px; 225 | } 226 | .templatemo-logo h1 { 227 | border: none; 228 | max-width: none; 229 | } 230 | .left-container, .right-container { padding-top: 40px; } 231 | .templatemo-logo .col-lg-6, .templatemo-logo .col-md-6, .templatemo-logo .col-sm-6 { padding-top: 0; } 232 | } 233 | -------------------------------------------------------------------------------- /core/utils.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # coding=utf-8 3 | #================================================================ 4 | # Copyright (C) 2019 * Ltd. All rights reserved. 5 | # 6 | # Editor : VIM 7 | # File name : utils.py 8 | # Author : YunYang1994 9 | # Created date: 2019-02-28 13:14:19 10 | # Description : 2019-07-23 Modified by Tran Le Anh 11 | # 12 | #================================================================ 13 | 14 | import cv2 15 | import random 16 | import colorsys 17 | import numpy as np 18 | import tensorflow as tf 19 | from core.config import cfg 20 | 21 | def read_class_names(class_file_name): 22 | '''loads class name from a file''' 23 | names = {} 24 | with open(class_file_name, 'r') as data: 25 | for ID, name in enumerate(data): 26 | names[ID] = name.strip('\n') 27 | return names 28 | 29 | def get_anchors(anchors_path): 30 | '''loads the anchors from a file''' 31 | with open(anchors_path) as f: 32 | anchors = f.readline() 33 | anchors = np.array(anchors.split(','), dtype=np.float32) 34 | return anchors.reshape(3, 3, 2) 35 | 36 | 37 | def image_preporcess(image, target_size, gt_boxes=None): 38 | 39 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) 40 | 41 | ih, iw = target_size 42 | h, w, _ = image.shape 43 | 44 | scale = min(iw/w, ih/h) 45 | nw, nh = int(scale * w), int(scale * h) 46 | image_resized = cv2.resize(image, (nw, nh)) 47 | 48 | image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0) 49 | dw, dh = (iw - nw) // 2, (ih-nh) // 2 50 | image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized 51 | image_paded = image_paded / 255. 52 | 53 | if gt_boxes is None: 54 | return image_paded 55 | 56 | else: 57 | gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw 58 | gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh 59 | return image_paded, gt_boxes 60 | 61 | 62 | def draw_bbox(image, bboxes, classes=read_class_names(cfg.YOLO.CLASSES), show_label=True): 63 | """ 64 | bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates. 65 | """ 66 | data = open(cfg.YOLO.CLASSES, 'r') 67 | 68 | num_classes = len(classes) 69 | image_h, image_w, _ = image.shape 70 | hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] 71 | colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) 72 | colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) 73 | 74 | random.seed(0) 75 | random.shuffle(colors) 76 | random.seed(None) 77 | 78 | for i, bbox in enumerate(bboxes): 79 | coor = np.array(bbox[:4], dtype=np.int32) 80 | fontScale = 0.5 81 | score = bbox[4] 82 | class_ind = int(bbox[5]) 83 | bbox_color = colors[class_ind] 84 | bbox_thick = int(0.6 * (image_h + image_w) / 600) 85 | c1, c2 = (coor[0], coor[1]), (coor[2], coor[3]) 86 | cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) 87 | 88 | if show_label: 89 | bbox_mess = '%s: %.2f' % (classes[class_ind], score) 90 | t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick//2)[0] 91 | cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1) # filled 92 | 93 | cv2.putText(image, bbox_mess, (c1[0], c1[1]-2), cv2.FONT_HERSHEY_SIMPLEX, 94 | fontScale, (0, 0, 0), bbox_thick//2, lineType=cv2.LINE_AA) 95 | 96 | return image, bboxes 97 | 98 | 99 | 100 | def bboxes_iou(boxes1, boxes2): 101 | 102 | boxes1 = np.array(boxes1) 103 | boxes2 = np.array(boxes2) 104 | 105 | boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1]) 106 | boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1]) 107 | 108 | left_up = np.maximum(boxes1[..., :2], boxes2[..., :2]) 109 | right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:]) 110 | 111 | inter_section = np.maximum(right_down - left_up, 0.0) 112 | inter_area = inter_section[..., 0] * inter_section[..., 1] 113 | union_area = boxes1_area + boxes2_area - inter_area 114 | ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps) 115 | 116 | return ious 117 | 118 | 119 | 120 | def read_pb_return_tensors(graph, pb_file, return_elements): 121 | 122 | # with tf.gfile.FastGFile(pb_file, 'rb') as f: 123 | with tf.io.gfile.GFile(pb_file, 'rb') as f: 124 | # frozen_graph_def = tf.GraphDef() 125 | frozen_graph_def = tf.compat.v1.GraphDef() 126 | frozen_graph_def.ParseFromString(f.read()) 127 | 128 | with graph.as_default(): 129 | return_elements = tf.import_graph_def(frozen_graph_def, 130 | return_elements=return_elements) 131 | return return_elements 132 | 133 | 134 | def nms(bboxes, iou_threshold, sigma=0.3, method='nms'): 135 | """ 136 | :param bboxes: (xmin, ymin, xmax, ymax, score, class) 137 | 138 | Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf 139 | https://github.com/bharatsingh430/soft-nms 140 | """ 141 | classes_in_img = list(set(bboxes[:, 5])) 142 | best_bboxes = [] 143 | 144 | for cls in classes_in_img: 145 | cls_mask = (bboxes[:, 5] == cls) 146 | cls_bboxes = bboxes[cls_mask] 147 | 148 | while len(cls_bboxes) > 0: 149 | max_ind = np.argmax(cls_bboxes[:, 4]) 150 | best_bbox = cls_bboxes[max_ind] 151 | best_bboxes.append(best_bbox) 152 | cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]]) 153 | iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4]) 154 | weight = np.ones((len(iou),), dtype=np.float32) 155 | 156 | assert method in ['nms', 'soft-nms'] 157 | 158 | if method == 'nms': 159 | iou_mask = iou > iou_threshold 160 | weight[iou_mask] = 0.0 161 | 162 | if method == 'soft-nms': 163 | weight = np.exp(-(1.0 * iou ** 2 / sigma)) 164 | 165 | cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight 166 | score_mask = cls_bboxes[:, 4] > 0. 167 | cls_bboxes = cls_bboxes[score_mask] 168 | 169 | return best_bboxes 170 | 171 | 172 | def postprocess_boxes(pred_bbox, org_img_shape, input_size, score_threshold): 173 | 174 | valid_scale=[0, np.inf] 175 | pred_bbox = np.array(pred_bbox) 176 | 177 | pred_xywh = pred_bbox[:, 0:4] 178 | pred_conf = pred_bbox[:, 4] 179 | pred_prob = pred_bbox[:, 5:] 180 | 181 | # # (1) (x, y, w, h) --> (xmin, ymin, xmax, ymax) 182 | pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5, 183 | pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1) 184 | # # (2) (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org) 185 | org_h, org_w = org_img_shape 186 | resize_ratio = min(input_size / org_w, input_size / org_h) 187 | 188 | dw = (input_size - resize_ratio * org_w) / 2 189 | dh = (input_size - resize_ratio * org_h) / 2 190 | 191 | pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio 192 | pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio 193 | 194 | # # (3) clip some boxes those are out of range 195 | pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]), 196 | np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1) 197 | invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3])) 198 | pred_coor[invalid_mask] = 0 199 | 200 | # # (4) discard some invalid boxes 201 | bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1)) 202 | scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1])) 203 | 204 | # # (5) discard some boxes with low scores 205 | classes = np.argmax(pred_prob, axis=-1) 206 | scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes] 207 | score_mask = scores > score_threshold 208 | mask = np.logical_and(scale_mask, score_mask) 209 | coors, scores, classes = pred_coor[mask], scores[mask], classes[mask] 210 | 211 | return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1) 212 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # coding=utf-8 3 | #================================================================ 4 | # Copyright (C) 2019 * Ltd. All rights reserved. 5 | # 6 | # Editor : VIM 7 | # File name : train.py 8 | # Author : YunYang1994 9 | # Created date: 2019-02-28 17:50:26 10 | # Description : 11 | # 12 | #================================================================ 13 | 14 | import os 15 | import time 16 | import shutil 17 | import numpy as np 18 | import tensorflow as tf 19 | import core.utils as utils 20 | from tqdm import tqdm 21 | from core.dataset import Dataset 22 | from core.yolov3 import YOLOV3 23 | from core.config import cfg 24 | 25 | 26 | class YoloTrain(object): 27 | def __init__(self): 28 | self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE 29 | self.classes = utils.read_class_names(cfg.YOLO.CLASSES) 30 | self.num_classes = len(self.classes) 31 | self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT 32 | self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END 33 | self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS 34 | self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS 35 | self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS 36 | self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT 37 | self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) 38 | self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY 39 | self.max_bbox_per_scale = 150 40 | self.train_logdir = "./data/log/train" 41 | self.trainset = Dataset('train') 42 | self.testset = Dataset('test') 43 | self.steps_per_period = len(self.trainset) 44 | self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) 45 | 46 | with tf.name_scope('define_input'): 47 | self.input_data = tf.placeholder(dtype=tf.float32, name='input_data') 48 | self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox') 49 | self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') 50 | self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') 51 | self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes') 52 | self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') 53 | self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') 54 | self.trainable = tf.placeholder(dtype=tf.bool, name='training') 55 | 56 | with tf.name_scope("define_loss"): 57 | self.model = YOLOV3(self.input_data, self.trainable) 58 | self.net_var = tf.global_variables() 59 | self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss( 60 | self.label_sbbox, self.label_mbbox, self.label_lbbox, 61 | self.true_sbboxes, self.true_mbboxes, self.true_lbboxes) 62 | self.loss = self.giou_loss + self.conf_loss + self.prob_loss 63 | 64 | with tf.name_scope('learn_rate'): 65 | self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step') 66 | warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period, 67 | dtype=tf.float64, name='warmup_steps') 68 | train_steps = tf.constant( (self.first_stage_epochs + self.second_stage_epochs)* self.steps_per_period, 69 | dtype=tf.float64, name='train_steps') 70 | self.learn_rate = tf.cond( 71 | pred=self.global_step < warmup_steps, 72 | true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init, 73 | false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) * 74 | (1 + tf.cos( 75 | (self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi)) 76 | ) 77 | global_step_update = tf.assign_add(self.global_step, 1.0) 78 | 79 | with tf.name_scope("define_weight_decay"): 80 | moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables()) 81 | 82 | with tf.name_scope("define_first_stage_train"): 83 | self.first_stage_trainable_var_list = [] 84 | for var in tf.trainable_variables(): 85 | var_name = var.op.name 86 | var_name_mess = str(var_name).split('/') 87 | if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']: 88 | self.first_stage_trainable_var_list.append(var) 89 | 90 | first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, 91 | var_list=self.first_stage_trainable_var_list) 92 | with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): 93 | with tf.control_dependencies([first_stage_optimizer, global_step_update]): 94 | with tf.control_dependencies([moving_ave]): 95 | self.train_op_with_frozen_variables = tf.no_op() 96 | 97 | with tf.name_scope("define_second_stage_train"): 98 | second_stage_trainable_var_list = tf.trainable_variables() 99 | second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, 100 | var_list=second_stage_trainable_var_list) 101 | 102 | with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): 103 | with tf.control_dependencies([second_stage_optimizer, global_step_update]): 104 | with tf.control_dependencies([moving_ave]): 105 | self.train_op_with_all_variables = tf.no_op() 106 | 107 | with tf.name_scope('loader_and_saver'): 108 | self.loader = tf.train.Saver(self.net_var) 109 | self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10) 110 | 111 | with tf.name_scope('summary'): 112 | tf.summary.scalar("learn_rate", self.learn_rate) 113 | tf.summary.scalar("giou_loss", self.giou_loss) 114 | tf.summary.scalar("conf_loss", self.conf_loss) 115 | tf.summary.scalar("prob_loss", self.prob_loss) 116 | tf.summary.scalar("total_loss", self.loss) 117 | 118 | logdir = "./data/log/" 119 | if os.path.exists(logdir): shutil.rmtree(logdir) 120 | os.mkdir(logdir) 121 | self.write_op = tf.summary.merge_all() 122 | self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph) 123 | 124 | 125 | def train(self): 126 | self.sess.run(tf.global_variables_initializer()) 127 | try: 128 | print('=> Restoring weights from: %s ... ' % self.initial_weight) 129 | self.loader.restore(self.sess, self.initial_weight) 130 | except: 131 | print('=> %s does not exist !!!' % self.initial_weight) 132 | print('=> Now it starts to train YOLOV3 from scratch ...') 133 | self.first_stage_epochs = 0 134 | 135 | for epoch in range(1, 1+self.first_stage_epochs+self.second_stage_epochs): 136 | if epoch <= self.first_stage_epochs: 137 | train_op = self.train_op_with_frozen_variables 138 | else: 139 | train_op = self.train_op_with_all_variables 140 | 141 | pbar = tqdm(self.trainset) 142 | train_epoch_loss, test_epoch_loss = [], [] 143 | 144 | for train_data in pbar: 145 | _, summary, train_step_loss, global_step_val = self.sess.run( 146 | [train_op, self.write_op, self.loss, self.global_step],feed_dict={ 147 | self.input_data: train_data[0], 148 | self.label_sbbox: train_data[1], 149 | self.label_mbbox: train_data[2], 150 | self.label_lbbox: train_data[3], 151 | self.true_sbboxes: train_data[4], 152 | self.true_mbboxes: train_data[5], 153 | self.true_lbboxes: train_data[6], 154 | self.trainable: True, 155 | }) 156 | 157 | train_epoch_loss.append(train_step_loss) 158 | self.summary_writer.add_summary(summary, global_step_val) 159 | pbar.set_description("train loss: %.2f" %train_step_loss) 160 | 161 | for test_data in self.testset: 162 | test_step_loss = self.sess.run( self.loss, feed_dict={ 163 | self.input_data: test_data[0], 164 | self.label_sbbox: test_data[1], 165 | self.label_mbbox: test_data[2], 166 | self.label_lbbox: test_data[3], 167 | self.true_sbboxes: test_data[4], 168 | self.true_mbboxes: test_data[5], 169 | self.true_lbboxes: test_data[6], 170 | self.trainable: False, 171 | }) 172 | 173 | test_epoch_loss.append(test_step_loss) 174 | 175 | train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss) 176 | ckpt_file = "./checkpoint/yolov3_test_loss=%.4f.ckpt" % test_epoch_loss 177 | log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) 178 | print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..." 179 | %(epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file)) 180 | self.saver.save(self.sess, ckpt_file, global_step=epoch) 181 | 182 | 183 | 184 | if __name__ == '__main__': YoloTrain().train() 185 | 186 | 187 | 188 | 189 | -------------------------------------------------------------------------------- /core/dataset.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # coding=utf-8 3 | #================================================================ 4 | # Copyright (C) 2019 * Ltd. All rights reserved. 5 | # 6 | # Editor : VIM 7 | # File name : dataset.py 8 | # Author : YunYang1994 9 | # Created date: 2019-03-15 18:05:03 10 | # Description : 11 | # 12 | #================================================================ 13 | 14 | import os 15 | import cv2 16 | import random 17 | import numpy as np 18 | import tensorflow as tf 19 | import core.utils as utils 20 | from core.config import cfg 21 | 22 | 23 | 24 | class Dataset(object): 25 | """implement Dataset here""" 26 | def __init__(self, dataset_type): 27 | self.annot_path = cfg.TRAIN.ANNOT_PATH if dataset_type == 'train' else cfg.TEST.ANNOT_PATH 28 | self.input_sizes = cfg.TRAIN.INPUT_SIZE if dataset_type == 'train' else cfg.TEST.INPUT_SIZE 29 | self.batch_size = cfg.TRAIN.BATCH_SIZE if dataset_type == 'train' else cfg.TEST.BATCH_SIZE 30 | self.data_aug = cfg.TRAIN.DATA_AUG if dataset_type == 'train' else cfg.TEST.DATA_AUG 31 | 32 | self.train_input_sizes = cfg.TRAIN.INPUT_SIZE 33 | self.strides = np.array(cfg.YOLO.STRIDES) 34 | self.classes = utils.read_class_names(cfg.YOLO.CLASSES) 35 | self.num_classes = len(self.classes) 36 | self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS)) 37 | self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE 38 | self.max_bbox_per_scale = 150 39 | 40 | self.annotations = self.load_annotations(dataset_type) 41 | self.num_samples = len(self.annotations) 42 | self.num_batchs = int(np.ceil(self.num_samples / self.batch_size)) 43 | self.batch_count = 0 44 | 45 | 46 | def load_annotations(self, dataset_type): 47 | with open(self.annot_path, 'r') as f: 48 | txt = f.readlines() 49 | annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0] 50 | np.random.shuffle(annotations) 51 | return annotations 52 | 53 | def __iter__(self): 54 | return self 55 | 56 | def __next__(self): 57 | 58 | with tf.device('/cpu:0'): 59 | self.train_input_size = random.choice(self.train_input_sizes) 60 | self.train_output_sizes = self.train_input_size // self.strides 61 | 62 | batch_image = np.zeros((self.batch_size, self.train_input_size, self.train_input_size, 3)) 63 | 64 | batch_label_sbbox = np.zeros((self.batch_size, self.train_output_sizes[0], self.train_output_sizes[0], 65 | self.anchor_per_scale, 5 + self.num_classes)) 66 | batch_label_mbbox = np.zeros((self.batch_size, self.train_output_sizes[1], self.train_output_sizes[1], 67 | self.anchor_per_scale, 5 + self.num_classes)) 68 | batch_label_lbbox = np.zeros((self.batch_size, self.train_output_sizes[2], self.train_output_sizes[2], 69 | self.anchor_per_scale, 5 + self.num_classes)) 70 | 71 | batch_sbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4)) 72 | batch_mbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4)) 73 | batch_lbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4)) 74 | 75 | num = 0 76 | if self.batch_count < self.num_batchs: 77 | while num < self.batch_size: 78 | index = self.batch_count * self.batch_size + num 79 | if index >= self.num_samples: index -= self.num_samples 80 | annotation = self.annotations[index] 81 | image, bboxes = self.parse_annotation(annotation) 82 | label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = self.preprocess_true_boxes(bboxes) 83 | 84 | batch_image[num, :, :, :] = image 85 | batch_label_sbbox[num, :, :, :, :] = label_sbbox 86 | batch_label_mbbox[num, :, :, :, :] = label_mbbox 87 | batch_label_lbbox[num, :, :, :, :] = label_lbbox 88 | batch_sbboxes[num, :, :] = sbboxes 89 | batch_mbboxes[num, :, :] = mbboxes 90 | batch_lbboxes[num, :, :] = lbboxes 91 | num += 1 92 | self.batch_count += 1 93 | return batch_image, batch_label_sbbox, batch_label_mbbox, batch_label_lbbox, \ 94 | batch_sbboxes, batch_mbboxes, batch_lbboxes 95 | else: 96 | self.batch_count = 0 97 | np.random.shuffle(self.annotations) 98 | raise StopIteration 99 | 100 | def random_horizontal_flip(self, image, bboxes): 101 | 102 | if random.random() < 0.5: 103 | _, w, _ = image.shape 104 | image = image[:, ::-1, :] 105 | bboxes[:, [0,2]] = w - bboxes[:, [2,0]] 106 | 107 | return image, bboxes 108 | 109 | def random_crop(self, image, bboxes): 110 | 111 | if random.random() < 0.5: 112 | h, w, _ = image.shape 113 | max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1) 114 | 115 | max_l_trans = max_bbox[0] 116 | max_u_trans = max_bbox[1] 117 | max_r_trans = w - max_bbox[2] 118 | max_d_trans = h - max_bbox[3] 119 | 120 | crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans))) 121 | crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans))) 122 | crop_xmax = max(w, int(max_bbox[2] + random.uniform(0, max_r_trans))) 123 | crop_ymax = max(h, int(max_bbox[3] + random.uniform(0, max_d_trans))) 124 | 125 | image = image[crop_ymin : crop_ymax, crop_xmin : crop_xmax] 126 | 127 | bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin 128 | bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin 129 | 130 | return image, bboxes 131 | 132 | def random_translate(self, image, bboxes): 133 | 134 | if random.random() < 0.5: 135 | h, w, _ = image.shape 136 | max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1) 137 | 138 | max_l_trans = max_bbox[0] 139 | max_u_trans = max_bbox[1] 140 | max_r_trans = w - max_bbox[2] 141 | max_d_trans = h - max_bbox[3] 142 | 143 | tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1)) 144 | ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1)) 145 | 146 | M = np.array([[1, 0, tx], [0, 1, ty]]) 147 | image = cv2.warpAffine(image, M, (w, h)) 148 | 149 | bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx 150 | bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty 151 | 152 | return image, bboxes 153 | 154 | def parse_annotation(self, annotation): 155 | 156 | line = annotation.split() 157 | image_path = line[0] 158 | if not os.path.exists(image_path): 159 | raise KeyError("%s does not exist ... " %image_path) 160 | image = np.array(cv2.imread(image_path)) 161 | bboxes = np.array([list(map(int, box.split(','))) for box in line[1:]]) 162 | 163 | if self.data_aug: 164 | image, bboxes = self.random_horizontal_flip(np.copy(image), np.copy(bboxes)) 165 | image, bboxes = self.random_crop(np.copy(image), np.copy(bboxes)) 166 | image, bboxes = self.random_translate(np.copy(image), np.copy(bboxes)) 167 | 168 | image, bboxes = utils.image_preporcess(np.copy(image), [self.train_input_size, self.train_input_size], np.copy(bboxes)) 169 | return image, bboxes 170 | 171 | def bbox_iou(self, boxes1, boxes2): 172 | 173 | boxes1 = np.array(boxes1) 174 | boxes2 = np.array(boxes2) 175 | 176 | boxes1_area = boxes1[..., 2] * boxes1[..., 3] 177 | boxes2_area = boxes2[..., 2] * boxes2[..., 3] 178 | 179 | boxes1 = np.concatenate([boxes1[..., :2] - boxes1[..., 2:] * 0.5, 180 | boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1) 181 | boxes2 = np.concatenate([boxes2[..., :2] - boxes2[..., 2:] * 0.5, 182 | boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1) 183 | 184 | left_up = np.maximum(boxes1[..., :2], boxes2[..., :2]) 185 | right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:]) 186 | 187 | inter_section = np.maximum(right_down - left_up, 0.0) 188 | inter_area = inter_section[..., 0] * inter_section[..., 1] 189 | union_area = boxes1_area + boxes2_area - inter_area 190 | 191 | return inter_area / union_area 192 | 193 | def preprocess_true_boxes(self, bboxes): 194 | 195 | label = [np.zeros((self.train_output_sizes[i], self.train_output_sizes[i], self.anchor_per_scale, 196 | 5 + self.num_classes)) for i in range(3)] 197 | bboxes_xywh = [np.zeros((self.max_bbox_per_scale, 4)) for _ in range(3)] 198 | bbox_count = np.zeros((3,)) 199 | 200 | for bbox in bboxes: 201 | bbox_coor = bbox[:4] 202 | bbox_class_ind = bbox[4] 203 | 204 | onehot = np.zeros(self.num_classes, dtype=np.float) 205 | onehot[bbox_class_ind] = 1.0 206 | uniform_distribution = np.full(self.num_classes, 1.0 / self.num_classes) 207 | deta = 0.01 208 | smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution 209 | 210 | bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5, bbox_coor[2:] - bbox_coor[:2]], axis=-1) 211 | bbox_xywh_scaled = 1.0 * bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis] 212 | 213 | iou = [] 214 | exist_positive = False 215 | for i in range(3): 216 | anchors_xywh = np.zeros((self.anchor_per_scale, 4)) 217 | anchors_xywh[:, 0:2] = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) + 0.5 218 | anchors_xywh[:, 2:4] = self.anchors[i] 219 | 220 | iou_scale = self.bbox_iou(bbox_xywh_scaled[i][np.newaxis, :], anchors_xywh) 221 | iou.append(iou_scale) 222 | iou_mask = iou_scale > 0.3 223 | 224 | if np.any(iou_mask): 225 | xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) 226 | 227 | label[i][yind, xind, iou_mask, :] = 0 228 | label[i][yind, xind, iou_mask, 0:4] = bbox_xywh 229 | label[i][yind, xind, iou_mask, 4:5] = 1.0 230 | label[i][yind, xind, iou_mask, 5:] = smooth_onehot 231 | 232 | bbox_ind = int(bbox_count[i] % self.max_bbox_per_scale) 233 | bboxes_xywh[i][bbox_ind, :4] = bbox_xywh 234 | bbox_count[i] += 1 235 | 236 | exist_positive = True 237 | 238 | if not exist_positive: 239 | best_anchor_ind = np.argmax(np.array(iou).reshape(-1), axis=-1) 240 | best_detect = int(best_anchor_ind / self.anchor_per_scale) 241 | best_anchor = int(best_anchor_ind % self.anchor_per_scale) 242 | xind, yind = np.floor(bbox_xywh_scaled[best_detect, 0:2]).astype(np.int32) 243 | 244 | label[best_detect][yind, xind, best_anchor, :] = 0 245 | label[best_detect][yind, xind, best_anchor, 0:4] = bbox_xywh 246 | label[best_detect][yind, xind, best_anchor, 4:5] = 1.0 247 | label[best_detect][yind, xind, best_anchor, 5:] = smooth_onehot 248 | 249 | bbox_ind = int(bbox_count[best_detect] % self.max_bbox_per_scale) 250 | bboxes_xywh[best_detect][bbox_ind, :4] = bbox_xywh 251 | bbox_count[best_detect] += 1 252 | label_sbbox, label_mbbox, label_lbbox = label 253 | sbboxes, mbboxes, lbboxes = bboxes_xywh 254 | return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes 255 | 256 | def __len__(self): 257 | return self.num_batchs 258 | 259 | 260 | 261 | 262 | -------------------------------------------------------------------------------- /core/yolov3.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # coding=utf-8 3 | #================================================================ 4 | # Copyright (C) 2019 * Ltd. All rights reserved. 5 | # 6 | # Editor : VIM 7 | # File name : yolov3.py 8 | # Author : YunYang1994 9 | # Created date: 2019-02-28 10:47:03 10 | # Description : 11 | # 12 | #================================================================ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import core.utils as utils 17 | import core.common as common 18 | import core.backbone as backbone 19 | from core.config import cfg 20 | 21 | 22 | class YOLOV3(object): 23 | """Implement tensoflow yolov3 here""" 24 | def __init__(self, input_data, trainable): 25 | 26 | self.trainable = trainable 27 | self.classes = utils.read_class_names(cfg.YOLO.CLASSES) 28 | self.num_class = len(self.classes) 29 | self.strides = np.array(cfg.YOLO.STRIDES) 30 | self.anchors = utils.get_anchors(cfg.YOLO.ANCHORS) 31 | self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE 32 | self.iou_loss_thresh = cfg.YOLO.IOU_LOSS_THRESH 33 | self.upsample_method = cfg.YOLO.UPSAMPLE_METHOD 34 | 35 | try: 36 | self.conv_lbbox, self.conv_mbbox, self.conv_sbbox = self.__build_nework(input_data) 37 | except: 38 | raise NotImplementedError("Can not build up yolov3 network!") 39 | 40 | with tf.variable_scope('pred_sbbox'): 41 | self.pred_sbbox = self.decode(self.conv_sbbox, self.anchors[0], self.strides[0]) 42 | 43 | with tf.variable_scope('pred_mbbox'): 44 | self.pred_mbbox = self.decode(self.conv_mbbox, self.anchors[1], self.strides[1]) 45 | 46 | with tf.variable_scope('pred_lbbox'): 47 | self.pred_lbbox = self.decode(self.conv_lbbox, self.anchors[2], self.strides[2]) 48 | 49 | def __build_nework(self, input_data): 50 | 51 | route_1, route_2, input_data = backbone.darknet53(input_data, self.trainable) 52 | 53 | input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv52') 54 | input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv53') 55 | input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv54') 56 | input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv55') 57 | input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv56') 58 | 59 | conv_lobj_branch = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, name='conv_lobj_branch') 60 | conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 1024, 3*(self.num_class + 5)), 61 | trainable=self.trainable, name='conv_lbbox', activate=False, bn=False) 62 | 63 | input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv57') 64 | input_data = common.upsample(input_data, name='upsample0', method=self.upsample_method) 65 | 66 | with tf.variable_scope('route_1'): 67 | input_data = tf.concat([input_data, route_2], axis=-1) 68 | 69 | input_data = common.convolutional(input_data, (1, 1, 768, 256), self.trainable, 'conv58') 70 | input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv59') 71 | input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv60') 72 | input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv61') 73 | input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv62') 74 | 75 | conv_mobj_branch = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, name='conv_mobj_branch' ) 76 | conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 512, 3*(self.num_class + 5)), 77 | trainable=self.trainable, name='conv_mbbox', activate=False, bn=False) 78 | 79 | input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv63') 80 | input_data = common.upsample(input_data, name='upsample1', method=self.upsample_method) 81 | 82 | with tf.variable_scope('route_2'): 83 | input_data = tf.concat([input_data, route_1], axis=-1) 84 | 85 | input_data = common.convolutional(input_data, (1, 1, 384, 128), self.trainable, 'conv64') 86 | input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv65') 87 | input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv66') 88 | input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv67') 89 | input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv68') 90 | 91 | conv_sobj_branch = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, name='conv_sobj_branch') 92 | conv_sbbox = common.convolutional(conv_sobj_branch, (1, 1, 256, 3*(self.num_class + 5)), 93 | trainable=self.trainable, name='conv_sbbox', activate=False, bn=False) 94 | 95 | return conv_lbbox, conv_mbbox, conv_sbbox 96 | 97 | def decode(self, conv_output, anchors, stride): 98 | """ 99 | return tensor of shape [batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes] 100 | contains (x, y, w, h, score, probability) 101 | """ 102 | 103 | conv_shape = tf.shape(conv_output) 104 | batch_size = conv_shape[0] 105 | output_size = conv_shape[1] 106 | anchor_per_scale = len(anchors) 107 | 108 | conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, anchor_per_scale, 5 + self.num_class)) 109 | 110 | conv_raw_dxdy = conv_output[:, :, :, :, 0:2] 111 | conv_raw_dwdh = conv_output[:, :, :, :, 2:4] 112 | conv_raw_conf = conv_output[:, :, :, :, 4:5] 113 | conv_raw_prob = conv_output[:, :, :, :, 5: ] 114 | 115 | y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size]) 116 | x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1]) 117 | 118 | xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1) 119 | xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, anchor_per_scale, 1]) 120 | xy_grid = tf.cast(xy_grid, tf.float32) 121 | 122 | pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * stride 123 | pred_wh = (tf.exp(conv_raw_dwdh) * anchors) * stride 124 | pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1) 125 | 126 | pred_conf = tf.sigmoid(conv_raw_conf) 127 | pred_prob = tf.sigmoid(conv_raw_prob) 128 | 129 | return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1) 130 | 131 | def focal(self, target, actual, alpha=1, gamma=2): 132 | focal_loss = alpha * tf.pow(tf.abs(target - actual), gamma) 133 | return focal_loss 134 | 135 | def bbox_giou(self, boxes1, boxes2): 136 | 137 | boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5, 138 | boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1) 139 | boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5, 140 | boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1) 141 | 142 | boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]), 143 | tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1) 144 | boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]), 145 | tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1) 146 | 147 | boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1]) 148 | boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1]) 149 | 150 | left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2]) 151 | right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:]) 152 | 153 | inter_section = tf.maximum(right_down - left_up, 0.0) 154 | inter_area = inter_section[..., 0] * inter_section[..., 1] 155 | union_area = boxes1_area + boxes2_area - inter_area 156 | iou = inter_area / union_area 157 | 158 | enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2]) 159 | enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:]) 160 | enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0) 161 | enclose_area = enclose[..., 0] * enclose[..., 1] 162 | giou = iou - 1.0 * (enclose_area - union_area) / enclose_area 163 | 164 | return giou 165 | 166 | def bbox_iou(self, boxes1, boxes2): 167 | 168 | boxes1_area = boxes1[..., 2] * boxes1[..., 3] 169 | boxes2_area = boxes2[..., 2] * boxes2[..., 3] 170 | 171 | boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5, 172 | boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1) 173 | boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5, 174 | boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1) 175 | 176 | left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2]) 177 | right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:]) 178 | 179 | inter_section = tf.maximum(right_down - left_up, 0.0) 180 | inter_area = inter_section[..., 0] * inter_section[..., 1] 181 | union_area = boxes1_area + boxes2_area - inter_area 182 | iou = 1.0 * inter_area / union_area 183 | 184 | return iou 185 | 186 | def loss_layer(self, conv, pred, label, bboxes, anchors, stride): 187 | 188 | conv_shape = tf.shape(conv) 189 | batch_size = conv_shape[0] 190 | output_size = conv_shape[1] 191 | input_size = stride * output_size 192 | conv = tf.reshape(conv, (batch_size, output_size, output_size, 193 | self.anchor_per_scale, 5 + self.num_class)) 194 | conv_raw_conf = conv[:, :, :, :, 4:5] 195 | conv_raw_prob = conv[:, :, :, :, 5:] 196 | 197 | pred_xywh = pred[:, :, :, :, 0:4] 198 | pred_conf = pred[:, :, :, :, 4:5] 199 | 200 | label_xywh = label[:, :, :, :, 0:4] 201 | respond_bbox = label[:, :, :, :, 4:5] 202 | label_prob = label[:, :, :, :, 5:] 203 | 204 | giou = tf.expand_dims(self.bbox_giou(pred_xywh, label_xywh), axis=-1) 205 | input_size = tf.cast(input_size, tf.float32) 206 | 207 | bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2) 208 | giou_loss = respond_bbox * bbox_loss_scale * (1- giou) 209 | 210 | iou = self.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :]) 211 | max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1) 212 | 213 | respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < self.iou_loss_thresh, tf.float32 ) 214 | 215 | conf_focal = self.focal(respond_bbox, pred_conf) 216 | 217 | conf_loss = conf_focal * ( 218 | respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf) 219 | + 220 | respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf) 221 | ) 222 | 223 | prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob) 224 | 225 | giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4])) 226 | conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4])) 227 | prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4])) 228 | 229 | return giou_loss, conf_loss, prob_loss 230 | 231 | 232 | 233 | def compute_loss(self, label_sbbox, label_mbbox, label_lbbox, true_sbbox, true_mbbox, true_lbbox): 234 | 235 | with tf.name_scope('smaller_box_loss'): 236 | loss_sbbox = self.loss_layer(self.conv_sbbox, self.pred_sbbox, label_sbbox, true_sbbox, 237 | anchors = self.anchors[0], stride = self.strides[0]) 238 | 239 | with tf.name_scope('medium_box_loss'): 240 | loss_mbbox = self.loss_layer(self.conv_mbbox, self.pred_mbbox, label_mbbox, true_mbbox, 241 | anchors = self.anchors[1], stride = self.strides[1]) 242 | 243 | with tf.name_scope('bigger_box_loss'): 244 | loss_lbbox = self.loss_layer(self.conv_lbbox, self.pred_lbbox, label_lbbox, true_lbbox, 245 | anchors = self.anchors[2], stride = self.strides[2]) 246 | 247 | with tf.name_scope('giou_loss'): 248 | giou_loss = loss_sbbox[0] + loss_mbbox[0] + loss_lbbox[0] 249 | 250 | with tf.name_scope('conf_loss'): 251 | conf_loss = loss_sbbox[1] + loss_mbbox[1] + loss_lbbox[1] 252 | 253 | with tf.name_scope('prob_loss'): 254 | prob_loss = loss_sbbox[2] + loss_mbbox[2] + loss_lbbox[2] 255 | 256 | return giou_loss, conf_loss, prob_loss 257 | 258 | 259 | -------------------------------------------------------------------------------- /static/css/font-awesome.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome 3 | * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) 4 | */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.1.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:spin 2s infinite linear;-moz-animation:spin 2s infinite linear;-o-animation:spin 2s infinite linear;animation:spin 2s infinite linear}@-moz-keyframes spin{0%{-moz-transform:rotate(0deg)}100%{-moz-transform:rotate(359deg)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg)}}@-o-keyframes spin{0%{-o-transform:rotate(0deg)}100%{-o-transform:rotate(359deg)}}@keyframes spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-moz-transform:scale(-1, 1);-ms-transform:scale(-1, 1);-o-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-moz-transform:scale(1, -1);-ms-transform:scale(1, -1);-o-transform:scale(1, -1);transform:scale(1, -1)}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper-square:before,.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"} -------------------------------------------------------------------------------- /webcam/templates/css/font-awesome.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome 3 | * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) 4 | */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.1.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:spin 2s infinite linear;-moz-animation:spin 2s infinite linear;-o-animation:spin 2s infinite linear;animation:spin 2s infinite linear}@-moz-keyframes spin{0%{-moz-transform:rotate(0deg)}100%{-moz-transform:rotate(359deg)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg)}}@-o-keyframes spin{0%{-o-transform:rotate(0deg)}100%{-o-transform:rotate(359deg)}}@keyframes spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-moz-transform:scale(-1, 1);-ms-transform:scale(-1, 1);-o-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-moz-transform:scale(1, -1);-ms-transform:scale(1, -1);-o-transform:scale(1, -1);transform:scale(1, -1)}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper-square:before,.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"} --------------------------------------------------------------------------------