├── Business_value_marketing
├── .ipynb_checkpoints
│ └── marketing_data-checkpoint.ipynb
├── camera_1.csv
├── marketing_data.ipynb
├── time_average_age_correlation.png
├── time_count_correlation.png
└── time_count_gender_correlation.png
├── JunctionAPI
├── TinyFaces
│ ├── README.md
│ ├── __init__.py
│ ├── tiny_face_det.py
│ ├── tiny_face_eval.py
│ ├── tiny_face_model.py
│ ├── tinyface_tf
│ │ ├── selfie.jpg
│ │ └── selfie_detection.jpg
│ ├── uploaded_imgs
│ │ ├── img1.jpg
│ │ └── img1_detection.jpg
│ └── util.py
├── __init__.py
├── api.py
├── app
│ ├── __init__.py
│ ├── detect
│ │ ├── __init__.py
│ │ ├── face_detector.py
│ │ ├── forms.py
│ │ ├── routes.py
│ │ ├── routes_opencv.py
│ │ └── run_model_server.py
│ ├── main
│ │ ├── __init__.py
│ │ ├── forms.py
│ │ └── routes.py
│ └── templates
│ │ ├── base.html
│ │ └── detect
│ │ ├── results.html
│ │ └── upload.html
├── config.py
└── flask_requirements.txt
├── LICENSE
├── README.md
├── api
├── __pycache__
│ ├── api.cpython-35.pyc
│ ├── api.cpython-36.pyc
│ ├── config.cpython-35.pyc
│ └── config.cpython-36.pyc
├── api.py
├── app
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ └── __init__.cpython-36.pyc
│ ├── detect
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── camera.cpython-35.pyc
│ │ │ ├── customerservices.cpython-35.pyc
│ │ │ ├── forms.cpython-35.pyc
│ │ │ ├── forms.cpython-36.pyc
│ │ │ ├── geo_info_caller.cpython-35.pyc
│ │ │ ├── routes.cpython-35.pyc
│ │ │ ├── routes.cpython-36.pyc
│ │ │ ├── run_model_server.cpython-35.pyc
│ │ │ └── run_model_server.cpython-36.pyc
│ │ ├── camera.py
│ │ ├── camera.pyc
│ │ ├── customerservices.py
│ │ ├── customerservices.pyc
│ │ ├── face_detector.py
│ │ ├── facedata.csv
│ │ ├── facedata.ods
│ │ ├── facedata.xls
│ │ ├── forms.py
│ │ ├── geo_info_caller.py
│ │ ├── morerealdata.csv
│ │ ├── outputgenerator.py
│ │ ├── routes.py
│ │ ├── run_model_server.py
│ │ ├── savedheatmap
│ │ │ └── heatmap.png
│ │ ├── staduim1.png
│ │ ├── test_customerService.py
│ │ └── tweetGenerator.py
│ ├── main
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── routes.cpython-35.pyc
│ │ │ └── routes.cpython-36.pyc
│ │ ├── forms.py
│ │ └── routes.py
│ ├── model
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── age_model.cpython-35.pyc
│ │ │ ├── age_model.cpython-36.pyc
│ │ │ ├── emo_model.cpython-35.pyc
│ │ │ ├── emo_model.cpython-36.pyc
│ │ │ ├── smallervggnet.cpython-35.pyc
│ │ │ └── smallervggnet.cpython-36.pyc
│ │ ├── age_model.py
│ │ ├── emo_model.py
│ │ └── smallervggnet.py
│ ├── records
│ │ └── camera1.csv
│ ├── static
│ │ ├── age_status.png
│ │ ├── emotion_status.png
│ │ ├── gender_status.png
│ │ ├── heatmap.png
│ │ ├── selfytest_detect.jpeg
│ │ ├── test2_detect.jpeg
│ │ └── test_detect.jpg
│ └── templates
│ │ ├── base.html
│ │ └── detect
│ │ ├── index.html
│ │ ├── public.html
│ │ ├── results.html
│ │ └── upload.html
├── config.py
└── requirements.txt
├── face_detection_statistics.png
├── junction.jpg
├── out_putgeneration
├── README.md
├── camera.py
├── camera.pyc
├── customerservices.py
├── customerservices.pyc
├── facedata.csv
├── facedata.ods
├── facedata.xls
├── geo_info_caller.py
├── geo_info_caller.pyc
├── morerealdata.csv
├── outputgenerator.py
├── savedheatmap
│ └── heatmap.png
├── staduim1.png
├── test_customerService.py
└── tweetGenerator.py
└── web_and_mobile.png
/Business_value_marketing/.ipynb_checkpoints/marketing_data-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import pandas as pd\n",
10 | "import numpy as np"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "data = pd.read_csv('camera_1.csv', encoding='utf8')"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 3,
25 | "metadata": {},
26 | "outputs": [
27 | {
28 | "data": {
29 | "text/html": [
30 | "
\n",
31 | "\n",
44 | "
\n",
45 | " \n",
46 | " \n",
47 | " \n",
48 | " Time \n",
49 | " Face_count \n",
50 | " Age \n",
51 | " Male \n",
52 | " Female \n",
53 | " Emotion \n",
54 | " \n",
55 | " \n",
56 | " \n",
57 | " \n",
58 | " 0 \n",
59 | " 7:00 \n",
60 | " 20 \n",
61 | " 25 \n",
62 | " 80 \n",
63 | " 20 \n",
64 | " neutral \n",
65 | " \n",
66 | " \n",
67 | " 1 \n",
68 | " 7:15 \n",
69 | " 25 \n",
70 | " 25 \n",
71 | " 80 \n",
72 | " 20 \n",
73 | " happy \n",
74 | " \n",
75 | " \n",
76 | " 2 \n",
77 | " 7:30 \n",
78 | " 30 \n",
79 | " 25 \n",
80 | " 80 \n",
81 | " 20 \n",
82 | " neutral \n",
83 | " \n",
84 | " \n",
85 | " 3 \n",
86 | " 7:45 \n",
87 | " 25 \n",
88 | " 22 \n",
89 | " 80 \n",
90 | " 20 \n",
91 | " neutral \n",
92 | " \n",
93 | " \n",
94 | " 4 \n",
95 | " 8:00 \n",
96 | " 30 \n",
97 | " 28 \n",
98 | " 80 \n",
99 | " 20 \n",
100 | " happy \n",
101 | " \n",
102 | " \n",
103 | "
\n",
104 | "
"
105 | ],
106 | "text/plain": [
107 | " Time Face_count Age Male Female Emotion\n",
108 | "0 7:00 20 25 80 20 neutral\n",
109 | "1 7:15 25 25 80 20 happy\n",
110 | "2 7:30 30 25 80 20 neutral\n",
111 | "3 7:45 25 22 80 20 neutral\n",
112 | "4 8:00 30 28 80 20 happy"
113 | ]
114 | },
115 | "execution_count": 3,
116 | "metadata": {},
117 | "output_type": "execute_result"
118 | }
119 | ],
120 | "source": [
121 | "data.head()"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": 5,
127 | "metadata": {},
128 | "outputs": [
129 | {
130 | "name": "stdout",
131 | "output_type": "stream",
132 | "text": [
133 | "Emotion\n",
134 | "angry 3\n",
135 | "happy 29\n",
136 | "neutral 38\n",
137 | "none 3\n",
138 | "dtype: int64\n"
139 | ]
140 | },
141 | {
142 | "data": {
143 | "text/plain": [
144 | ""
145 | ]
146 | },
147 | "execution_count": 5,
148 | "metadata": {},
149 | "output_type": "execute_result"
150 | },
151 | {
152 | "data": {
153 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYsAAAD8CAYAAACGsIhGAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADzlJREFUeJzt3X+s3XV9x/HnixZpRQcoHVZ0XmVdwFYp0jJ0TtEowbkMDLLg3FKmobpMQc0S3a/INCxu06nJmKQoAxeYAwQxLhPJBoomCLfSUmpBna2bwETRItUJWt7743ybnV1v7+dyz7n3nN48Hwnhe773e8553296z/N+v997z01VIUnSTA4a9QCSpPFnLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktS0dNQDDMuRRx5ZExMTox5Dkg4omzdv/l5VrWhtt2hiMTExweTk5KjHkKQDSpJvzWY7T0NJkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpadG8keBP7trOjmOPm9fnOO7uHfP6+JI0rjyykCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1DTUWSSaS7EhySZLtST6XZHmStUluTXJnkuuSHNFtf3OSv0pyW5KvJfn1bv2SJH+T5PbuPm8a5pySpMdnPo4sVgEXVdVqYDdwJvBx4J1V9XxgG/Duvu2XVtVJwNv61r8ReKiq1gPrgXOTPHseZpUkzcJ8vOvszqra0i1vBo4BDq+qz3frLgeu7tv+2r5tJ7rlU4HnJ3ltd/swehHa2f9ESTYCGwFWLl00b6ArSWNnPl5hH+lb3gscPsvt9/bNE+CtVXXDTHesqk3AJoA1y5bX4x9VkjQbC3GB+yHgB/uuRwC/B3x+hu0BbgD+IMnBAEl+Jcmh8zijJGkGC3XuZgNwcZInAt8Efr+x/UfpnZL6SpIA3wXOmNcJJUn7larFcfZmzbLldfXExLw+h38pT9Jik2RzVa1rbefvWUiSmoyFJKnJWEiSmoyFJKnJWEiSmoyFJKnJWEiSmoyFJKlp0bz73rI1qzlucnLUY0jSouSRhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqMhSSpyVhIkpqWjnqAYdn+4Haed/nz5vU5tm3YNq+PL0njyiMLSVKTsZAkNRkLSVKTsZAkNRkLSVKTsZAkNRkLSVKTsZAkNRkLSVKTsZAkNY1dLJIsGfUMkqT/b+BYJPlUks1JtifZ2K3bk+TCJFuT3JrkqG79Md3t25O8J8mebv0pSW5KciWwLcl7k5zf9xwXJjlv0FklSXMzjCOLN1TVicA64LwkTwUOBW6tquOBLwDndtt+GPhwVa0H7pvyOCcBf1pVzwU+BmwASHIQcDZwxRBmlSTNwTBicV6SrcCtwDOBVcCjwGe6j28GJrrlFwJXd8tXTnmc26pqJ0BV7QIeTHICcCpwR1U9OPWJk2xMMplkcu/De4fwqUiSpjPQW5QnOQV4BfDCqvpxkpuBZcBPq6q6zfbO8nl+NOX2R4FzgKcBl053h6raBGwCWP7s5TXdNpKkwQ16ZHEY8IMuFMcCJze2vxU4s1s+u7HtdcBpwHrghoGmlCQNZNBYfBZYmuRO4L30YjCTtwHvSHIbsBJ4aH8bVtWjwE3AVVXlOSZJGqGBTkNV1SPAq6b50JP6trkGuKa7eS9wclVVkrOByW6bm4Gb+x+gu7B9MnDWIDNKkga30H9W9UTg75IE2A28YbqNkjyX3gXy66rq6ws4nyRpGgsai6q6BTh+Ftt9FXjO/E8kSZqNsfsNbknS+DEWkqQmYyFJajIWkqQmYyFJajIWkqSmhf49i3mz+qmrmdwwOeoxJGlR8shCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktS0dNQDDM19d8AFh416CkmDuuChUU+gaXhkIUlqMhaSpCZjIUlqMhaSpCZjIUlqMhaSpCZjIUlqMhaSpCZjIUlqmlMskkwkuWvYw0iSxpNHFpKkpkFisSTJJUm2J/lckuVJzk1ye5KtST6Z5IkASS5LcnGSW5J8LclvduvPSXJ9ks8muSfJu7v1701y/r4nSnJhkvMG+kwlSXM2SCxWARdV1WpgN3AmcG1Vra+q44EdwBv7tp8AXgq8Grg4ybJu/UnA64G1wFlJ1gEfAzYAJDkIOBu4YuoASTYmmUwy+d0f1wCfiiRpJoPEYmdVbemWN9OLwZru6GEbvQCs7tv+qqp6rKq+DnwTOLZbf2NVPVhV/wNcC7y4qnYBDyY5ATgVuKOqHpw6QFVtqqp1VbVuxRMzwKciSZrJIG9R/kjf8l5gOXAZcEZVbU1yDnBK3zZTv/WvxvqPAucATwMuHWBOSdKAhn2B+8nA/UkOpndk0e+sJAclOQZ4DnBPt/6VSZ6SZDlwBvClbv11wGnAeuCGIc8pSXochv3Hj/4c+DLwLWAbvXjscw/weeAo4M1V9ZMkAF8E/hH4ZeDKqpoEqKpHk9wE7K6qvUOeU5L0OMwpFt01hTV9t9/f9+GP7OduX6qqt0+z/oGqesvUld2F7ZOBs+YyoyRpeMby9yySPBf4BvBv3QVxSdIILcjf4K6qc/az/jJ6F8Wnrv8qvesakqQxMJZHFpKk8WIsJElNxkKS1GQsJElNxkKS1GQsJElNC/Kjswvi6SfABZOjnkKSFiWPLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktS0dNQDDMu2ex9i4l3/MuoxJGlB7XrfqxfkeTyykCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1LVgskkwk+Z053nfPsOeRJM3eQh5ZTADTxiLJonnbEUlajJov0kkmgH8Fvgi8CLgXOB14OnARsAL4MXBuVd2d5DLgM1V1TXf/PVX1JOB9wHFJtgCXAz8AXg0sAw5N8lvA9cARwMHAn1XV9UP7TCVJczbbI4tVwEVVtRrYDZwJbALeWlUnAn8E/H3jMd4F3FJVa6vqg926FwIbqurlwE+A11TVC4CXAR9Iksf36UiS5sNsT//srKot3fJmeqeUXgRc3fd6fsgcnv/Gqvp+txzgL5O8BHgMOBo4Cvjv/d05yUZgI8CSX1gxh6eXJM3GbGPxSN/yXnov4rurau002/6M7oilOzJ4wgyP+6O+5dfTO6V1YlX9NMkueqeo9quqNtE7wuGQlauq8TlIkuZorhe4fwjsTHIW9KKQ5PjuY7uAE7vl0+ldfwB4GHjyDI95GPBAF4qXAc+a42ySpCEb5KehXg+8MclWYDu9MABcArw0yW3Ar/J/Rw93Aj9LsjXJ26d5vCuAdUkmu8e+e4DZJElDlKrFcfbmkJWrauWGD416DElaUIP+pbwkm6tqXWs7f4NbktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTcZCktRkLCRJTYvm70g87+jDmBzwl1MkSdPzyEKS1GQsJElNxkKS1GQsJElNxkKS1GQsJElNxkKS1GQsJElNxkKS1GQsJElNxkKS1GQsJElNxkKS1JSqGvUMQ5HkYeCeUc8xC0cC3xv1ELPgnMPlnMPlnMPzrKpa0dpo0bxFOXBPVa0b9RAtSSadc3icc7icc7gOlDlnw9NQkqQmYyFJalpMsdg06gFmyTmHyzmHyzmH60CZs2nRXOCWJM2fxXRkIUmaJ4siFklOS3JPkm8kedeo59mfJLuSbEuyJcnkqOfZJ8mlSR5IclffuqckuTHJ17v/HzHKGbuZppvzgiT3dvt0S5LfGPGMz0xyU5IdSbYnOb9bP1b7c4Y5x21/LktyW5Kt3Zx/0a1/dpIvd/vzn5M8YUznvCzJzr79uXaUcw7igD8NlWQJ8DXglcC3gduB11XVV0c62DSS7ALWVdVY/dx1kpcAe4CPV9Wabt1fA9+vqvd1AT6iqt45hnNeAOypqvePcrZ9kqwEVlbVV5I8GdgMnAGcwxjtzxnm/G3Ga38GOLSq9iQ5GPgicD7wDuDaqvpEkouBrVX1kTGc883AZ6rqmlHNNiyL4cjiJOAbVfXNqnoU+ARw+ohnOqBU1ReA709ZfTpwebd8Ob0XkpHaz5xjparur6qvdMsPAzuAoxmz/TnDnGOlevZ0Nw/u/ivg5cC+F+Bx2J/7m3PRWAyxOBr4r77b32YM/9F3Cvhcks1JNo56mIajqup+6L2wAL844nlm8pYkd3anqUZ+umyfJBPACcCXGeP9OWVOGLP9mWRJki3AA8CNwH8Au6vqZ90mY/E1P3XOqtq3Py/s9ucHkxwywhEHshhikWnWjWvRf62qXgC8CvjD7rSKBvMR4BhgLXA/8IHRjtOT5EnAJ4G3VdUPRz3P/kwz59jtz6raW1VrgWfQO5Nw3HSbLexU0wwwZc4ka4A/Bo4F1gNPAUZ6KncQiyEW3wae2Xf7GcB9I5plRlV1X/f/B4Dr6P3DH1ff6c5r7zu//cCI55lWVX2n+yJ9DLiEMdin3TnrTwJXVNW13eqx25/TzTmO+3OfqtoN3AycDByeZN/bFY3V13zfnKd1p/uqqh4B/oEx2p+P12KIxe3Aqu6nI54AnA18esQz/Zwkh3YXEklyKHAqcNfM9xqpTwMbuuUNwPUjnGW/9r0Ad17DiPdpd6HzY8COqvrbvg+N1f7c35xjuD9XJDm8W14OvILe9ZWbgNd2m43D/pxuzrv7vkEIvesq4/w1P6MD/qehALof7/sQsAS4tKouHPFIPyfJc+gdTUDvDRyvHJc5k/wTcAq9d8j8DvBu4FPAVcAvAf8JnFVVI724vJ85T6F3yqSAXcCb9l0bGIUkLwZuAbYBj3Wr/4Te9YCx2Z8zzPk6xmt/Pp/eBewl9L65vaqq3tN9PX2C3qmdO4Df7b57H7c5/x1YQe90+RbgzX0Xwg8oiyIWkqT5tRhOQ0mS5pmxkCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1GQtJUpOxkCQ1/S/UgiSSUUzCnAAAAABJRU5ErkJggg==\n",
154 | "text/plain": [
155 | ""
156 | ]
157 | },
158 | "metadata": {},
159 | "output_type": "display_data"
160 | }
161 | ],
162 | "source": [
163 | "print(data.groupby('Emotion').size())\n",
164 | "%matplotlib inline\n",
165 | "import matplotlib as plt\n",
166 | "data['Emotion'].value_counts().plot(kind=\"barh\")"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": null,
172 | "metadata": {},
173 | "outputs": [],
174 | "source": []
175 | }
176 | ],
177 | "metadata": {
178 | "kernelspec": {
179 | "display_name": "Python 3",
180 | "language": "python",
181 | "name": "python3"
182 | },
183 | "language_info": {
184 | "codemirror_mode": {
185 | "name": "ipython",
186 | "version": 3
187 | },
188 | "file_extension": ".py",
189 | "mimetype": "text/x-python",
190 | "name": "python",
191 | "nbconvert_exporter": "python",
192 | "pygments_lexer": "ipython3",
193 | "version": "3.6.4"
194 | }
195 | },
196 | "nbformat": 4,
197 | "nbformat_minor": 2
198 | }
199 |
--------------------------------------------------------------------------------
/Business_value_marketing/camera_1.csv:
--------------------------------------------------------------------------------
1 | Time,Face_count,Age,Male,Female,Emotion
2 | 7:00,0,0,0,0,none
3 | 7:15,0,0,0,0,none
4 | 7:30,0,0,0,0,none
5 | 7:45,0,0,0,0,none
6 | 8:00,30,20,24,6,happy
7 | 8:15,30,20,24,6,happy
8 | 8:30,30,20,24,6,neutral
9 | 8:45,50,20,40,10,happy
10 | 9:00,60,20,48,12,neutral
11 | 9:15,100,20,75,25,neutral
12 | 9:30,150,20,105,45,neutral
13 | 9:45,250,20,175,75,angry
14 | 10:00,250,20,175,75,neutral
15 | 10:15,170,20,105,65,neutral
16 | 10:30,170,40,105,65,neutral
17 | 10:45,160,40,96,24,happy
18 | 11:00,120,40,96,24,happy
19 | 11:15,120,40,96,24,happy
20 | 11:30,100,40,80,20,neutral
21 | 11:45,100,40,80,20,neutral
22 | 12:00,80,20,40,40,neutral
23 | 12:15,80,20,40,40,neutral
24 | 12:30,80,20,40,40,neutral
25 | 12:45,50,20,35,15,neutral
26 | 13:00,50,20,35,15,neutral
27 | 13:15,50,20,35,15,happy
28 | 13:30,40,20,30,10,happy
29 | 13:45,40,20,30,10,happy
30 | 14:00,50,20,35,15,happy
31 | 14:15,60,20,40,20,happy
32 | 14:30,80,20,60,20,neutral
33 | 14:45,100,20,60,40,neutral
34 | 15:00,120,20,60,60,neutral
35 | 15:15,120,20,60,60,neutral
36 | 15:30,140,20,70,70,neutral
37 | 15:45,150,20,80,70,neutral
38 | 16:00,160,20,130,30,neutral
39 | 16:15,180,20,140,40,neutral
40 | 16:30,200,20,160,40,neutral
41 | 16:45,200,20,160,40,neutral
42 | 17:00,200,20,160,40,neutral
43 | 17:15,300,20,240,60,angry
44 | 17:30,300,30,240,60,angry
45 | 17:45,220,30,180,40,neutral
46 | 18:00,200,40,160,40,neutral
47 | 18:15,200,40,160,40,neutral
48 | 18:30,160,40,120,40,neutral
49 | 18:45,160,40,120,40,neutral
50 | 19:00,100,40,80,20,happy
51 | 19:15,100,40,80,20,happy
52 | 19:30,60,50,40,20,happy
53 | 19:45,50,50,40,10,happy
54 | 20:00,60,50,40,10,happy
55 | 20:15,60,50,40,10,happy
56 | 20:30,60,50,40,10,happy
57 | 20:45,50,50,40,10,happy
58 | 21:00,40,50,36,4,happy
59 | 21:15,30,50,27,3,neutral
60 | 21:30,20,50,18,2,neutral
61 | 21:45,10,50,9,1,neutral
62 | 22:00,10,50,9,1,neutral
63 | 22:15,10,50,9,1,neutral
64 | 22:30,10,60,9,1,happy
65 | 22:45,10,60,9,1,happy
66 | 23:00,10,60,9,1,happy
67 | 23:15,10,60,9,1,happy
68 | 23:30,10,60,9,1,happy
69 | 23:45,10,60,9,1,happy
70 | 0:00,10,60,9,1,happy
71 | 0:15,10,60,9,1,happy
72 | 0:30,0,0,0,0,none
73 | 0:45,0,0,0,0,none
74 | 1:00,0,0,0,0,none
--------------------------------------------------------------------------------
/Business_value_marketing/time_average_age_correlation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/Business_value_marketing/time_average_age_correlation.png
--------------------------------------------------------------------------------
/Business_value_marketing/time_count_correlation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/Business_value_marketing/time_count_correlation.png
--------------------------------------------------------------------------------
/Business_value_marketing/time_count_gender_correlation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/Business_value_marketing/time_count_gender_correlation.png
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/README.md:
--------------------------------------------------------------------------------
1 | # Tiny Face Detector in TensorFlow
2 |
3 | A TensorFlow port(inference only) of Tiny Face Detector from [authors' MatConvNet codes](https://github.com/peiyunh/tiny)[1].
4 |
5 | # Usage
6 | ## Converting a pretrained model
7 |
8 | `matconvnet_hr101_to_pickle` reads weights of the MatConvNet pretrained model and
9 | write back to a pickle file which is used in a TensorFlow model as initial weights.
10 |
11 | 1. Download a [ResNet101-based pretrained model(hr_res101.mat)](https://www.cs.cmu.edu/%7Epeiyunh/tiny/hr_res101.mat)
12 | from the authors' repo.
13 |
14 | 2. Convert the model to a pickle file by:
15 | ```
16 | python matconvnet_hr101_to_pickle.py
17 | --matlab_model_path /path/to/pretrained_model
18 | --weight_file_path ./tinyface_tf/tf.pkl
19 | ```
20 | 3. Don't forget to change the model path in "tiny_face_det.py" file (L:34)
21 |
22 | # Examples
23 | ### selfie with many people
24 | This is the same image as one in [the authors' repo](https://github.com/peiyunh/tiny)[1].
25 |
26 | 
27 |
28 | [Original image](https://github.com/peiyunh/tiny/blob/master/data/demo/selfie.jpg)
29 |
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/JunctionAPI/TinyFaces/__init__.py
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/tiny_face_det.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import tensorflow as tf
7 | import tiny_face_model
8 | import JunctionAPI.TinyFaces.util
9 | import numpy as np
10 | import cv2
11 |
12 | import time
13 | from scipy.special import expit
14 |
15 |
16 | def overlay_bounding_boxes(raw_img, refined_bboxes, lw):
17 | # Overlay bounding boxes on an image with the color based on the confidence.
18 | for r in refined_bboxes:
19 | _score = expit(r[4])
20 | cm_idx = int(np.ceil(_score * 255))
21 | rect_color = [int(np.ceil(x * 255)) for x in JunctionAPI.TinyFaces.util.cm_data[cm_idx]] # parula
22 | _lw = lw
23 | if lw == 0: # line width of each bounding box is adaptively determined.
24 | bw, bh = r[2] - r[0] + 1, r[3] - r[0] + 1
25 | _lw = 1 if min(bw, bh) <= 20 else max(2, min(3, min(bh / 20, bw / 20)))
26 | _lw = int(np.ceil(_lw * _score))
27 |
28 | _r = [int(x) for x in r[:4]]
29 | cv2.rectangle(raw_img, (_r[0], _r[1]), (_r[2], _r[3]), rect_color, _lw)
30 |
31 |
32 | def evaluate(img_path, prob_thresh=0.5, nms_thresh=0.1, lw=3, display=False):
33 | x = tf.placeholder(tf.float32, [1, None, None, 3]) # n, h, w, c
34 | model = tiny_face_model.Model('/path/to/pkl/file/')
35 | score_final = model.tiny_face(x)
36 |
37 |
38 | average_image = model.get_data_by_key("average_image")
39 | clusters = model.get_data_by_key("clusters")
40 |
41 | # main
42 | with tf.Session() as sess:
43 | sess.run(tf.global_variables_initializer())
44 | fname = img_path
45 | raw_img = cv2.imread(img_path)
46 | raw_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
47 | raw_img_f = raw_img.astype(np.float32)
48 |
49 | scales = [0.5, 1, 1.5, 2.0]
50 | start = time.time()
51 |
52 | # initialize output
53 | bboxes = np.empty(shape=(0, 5))
54 |
55 | # process input at different scales
56 | for s in scales:
57 | print("Processing {} at scale {:.4f}".format(fname, s))
58 | img = cv2.resize(raw_img_f, (0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
59 | img = img - average_image
60 | img = img[np.newaxis, :]
61 |
62 | # we don't run every template on every scale ids of templates to ignore
63 | tids = list(range(4, 12)) + ([] if s <= 1.0 else list(range(18, 25)))
64 | ignoredTids = list(set(range(0, clusters.shape[0])) - set(tids))
65 |
66 | # run through the net
67 | score_final_tf = sess.run(score_final, feed_dict={x: img})
68 |
69 | # collect scores
70 | score_cls_tf, score_reg_tf = score_final_tf[:, :, :, :25], score_final_tf[:, :, :, 25:125]
71 | prob_cls_tf = expit(score_cls_tf)
72 | prob_cls_tf[0, :, :, ignoredTids] = 0.0
73 |
74 | def _calc_bounding_boxes():
75 | # threshold for detection
76 | _, fy, fx, fc = np.where(prob_cls_tf > prob_thresh)
77 |
78 | # interpret heatmap into bounding boxes
79 | cy = fy * 8 - 1
80 | cx = fx * 8 - 1
81 | ch = clusters[fc, 3] - clusters[fc, 1] + 1
82 | cw = clusters[fc, 2] - clusters[fc, 0] + 1
83 |
84 | # extract bounding box refinement
85 | Nt = clusters.shape[0]
86 | tx = score_reg_tf[0, :, :, 0:Nt]
87 | ty = score_reg_tf[0, :, :, Nt:2 * Nt]
88 | tw = score_reg_tf[0, :, :, 2 * Nt:3 * Nt]
89 | th = score_reg_tf[0, :, :, 3 * Nt:4 * Nt]
90 |
91 | # refine bounding boxes
92 | dcx = cw * tx[fy, fx, fc]
93 | dcy = ch * ty[fy, fx, fc]
94 | rcx = cx + dcx
95 | rcy = cy + dcy
96 | rcw = cw * np.exp(tw[fy, fx, fc])
97 | rch = ch * np.exp(th[fy, fx, fc])
98 |
99 | scores = score_cls_tf[0, fy, fx, fc]
100 | tmp_bboxes = np.vstack((rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2))
101 | tmp_bboxes = np.vstack((tmp_bboxes / s, scores))
102 | tmp_bboxes = tmp_bboxes.transpose()
103 | return tmp_bboxes
104 |
105 | tmp_bboxes = _calc_bounding_boxes()
106 | bboxes = np.vstack((bboxes, tmp_bboxes)) # : (5265, 5)
107 |
108 | print("time {:.2f} secs for {}".format(time.time() - start, fname))
109 |
110 | # non maximum suppression
111 | # refind_idx = util.nms(bboxes, nms_thresh)
112 | refind_idx = tf.image.non_max_suppression(tf.convert_to_tensor(bboxes[:, :4], dtype=tf.float32),
113 | tf.convert_to_tensor(bboxes[:, 4], dtype=tf.float32),
114 | max_output_size=bboxes.shape[0], iou_threshold=nms_thresh)
115 | refind_idx = sess.run(refind_idx)
116 | refined_bboxes = bboxes[refind_idx]
117 | return refined_bboxes
118 |
119 |
120 | def detect_from_img_path(img_path):
121 | with tf.Graph().as_default():
122 | detections = evaluate(img_path, prob_thresh=0.5, nms_thresh=0.1,lw=3)
123 | return detections
124 |
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/tiny_face_eval.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 | import os
6 | os.environ["CUDA_VISIBLE_DEVICES"]="0"
7 |
8 |
9 | import tensorflow as tf
10 | import tiny_face_model
11 | import JunctionAPI.TinyFaces.util
12 | from argparse import ArgumentParser
13 | import numpy as np
14 | import matplotlib.pyplot as plt
15 | import cv2
16 | import pickle
17 |
18 | import pylab as pl
19 | import time
20 | import os
21 | from scipy.special import expit
22 | import glob
23 |
24 | MAX_INPUT_DIM = 5000.0
25 |
26 | def overlay_bounding_boxes(raw_img, refined_bboxes, lw):
27 | """Overlay bounding boxes of face on images.
28 | Args:
29 | raw_img:
30 | A target image.
31 | refined_bboxes:
32 | Bounding boxes of detected faces.
33 | lw:
34 | Line width of bounding boxes. If zero specified,
35 | this is determined based on confidence of each detection.
36 | Returns:
37 | None.
38 | """
39 |
40 | # Overlay bounding boxes on an image with the color based on the confidence.
41 | for r in refined_bboxes:
42 | _score = expit(r[4])
43 | cm_idx = int(np.ceil(_score * 255))
44 | rect_color = [int(np.ceil(x * 255)) for x in JunctionAPI.TinyFaces.util.cm_data[cm_idx]] # parula
45 | _lw = lw
46 | if lw == 0: # line width of each bounding box is adaptively determined.
47 | bw, bh = r[2] - r[0] + 1, r[3] - r[0] + 1
48 | _lw = 1 if min(bw, bh) <= 20 else max(2, min(3, min(bh / 20, bw / 20)))
49 | _lw = int(np.ceil(_lw * _score))
50 |
51 | _r = [int(x) for x in r[:4]]
52 | cv2.rectangle(raw_img, (_r[0], _r[1]), (_r[2], _r[3]), rect_color, _lw)
53 |
54 |
55 | def evaluate(weight_file_path, data_dir, output_dir, prob_thresh=0.5, nms_thresh=0.1, lw=3, display=False):
56 | """Detect faces in images.
57 | Args:
58 | prob_thresh:
59 | The threshold of detection confidence.
60 | nms_thresh:
61 | The overlap threshold of non maximum suppression
62 | weight_file_path:
63 | A pretrained weight file in the pickle format
64 | generated by matconvnet_hr101_to_tf.py.
65 | data_dir:
66 | A directory which contains images.
67 | output_dir:
68 | A directory into which images with detected faces are output.
69 | lw:
70 | Line width of bounding boxes. If zero specified,
71 | this is determined based on confidence of each detection.
72 | display:
73 | Display tiny face images on window.
74 | Returns:
75 | None.
76 | """
77 |
78 | # placeholder of input images. Currently batch size of one is supported.
79 | x = tf.placeholder(tf.float32, [1, None, None, 3]) # n, h, w, c
80 |
81 | # Create the tiny face model which weights are loaded from a pretrained model.
82 | model = tiny_face_model.Model(weight_file_path)
83 | score_final = model.tiny_face(x)
84 |
85 | # Find image files in data_dir.
86 | filenames = []
87 | for ext in ('*.png', '*.gif', '*.jpg', '*.jpeg'):
88 | filenames.extend(glob.glob(os.path.join(data_dir, ext)))
89 |
90 | # Load an average image and clusters(reference boxes of templates).
91 | with open(weight_file_path, "rb") as f:
92 | _, mat_params_dict = pickle.load(f)
93 |
94 | average_image = model.get_data_by_key("average_image")
95 | clusters = model.get_data_by_key("clusters")
96 | clusters_h = clusters[:, 3] - clusters[:, 1] + 1
97 | clusters_w = clusters[:, 2] - clusters[:, 0] + 1
98 | normal_idx = np.where(clusters[:, 4] == 1)
99 |
100 | # main
101 | with tf.Session() as sess:
102 | sess.run(tf.global_variables_initializer())
103 |
104 | for filename in filenames:
105 | fname = filename.split(os.sep)[-1]
106 | raw_img = cv2.imread(filename)
107 | raw_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
108 | raw_img_f = raw_img.astype(np.float32)
109 |
110 | def _calc_scales():
111 | raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
112 | min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),
113 | np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))
114 | max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
115 | scales_down = pl.frange(min_scale, 0, 1.)
116 | scales_up = pl.frange(0.5, max_scale, 0.5)
117 | scales_pow = np.hstack((scales_down, scales_up))
118 | scales = np.power(2.0, scales_pow)
119 | return scales
120 |
121 | scales = [0.5, 1, 1.2] # 0.5000, 1.0000, 1.4142, 2.0000] # _calc_scales()
122 | start = time.time()
123 |
124 | # initialize output
125 | bboxes = np.empty(shape=(0, 5))
126 |
127 | # process input at different scales
128 | for s in scales:
129 | print("Processing {} at scale {:.4f}".format(fname, s))
130 | img = cv2.resize(raw_img_f, (0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
131 | img = img - average_image
132 | img = img[np.newaxis, :]
133 |
134 | # we don't run every template on every scale ids of templates to ignore
135 | tids = list(range(4, 12)) + ([] if s <= 1.0 else list(range(18, 25)))
136 | ignoredTids = list(set(range(0, clusters.shape[0])) - set(tids))
137 |
138 | # run through the net
139 | score_final_tf = sess.run(score_final, feed_dict={x: img})
140 |
141 | # collect scores
142 | score_cls_tf, score_reg_tf = score_final_tf[:, :, :, :25], score_final_tf[:, :, :, 25:125]
143 | prob_cls_tf = expit(score_cls_tf)
144 | prob_cls_tf[0, :, :, ignoredTids] = 0.0
145 |
146 | def _calc_bounding_boxes():
147 | # threshold for detection
148 | _, fy, fx, fc = np.where(prob_cls_tf > prob_thresh)
149 |
150 | # interpret heatmap into bounding boxes
151 | cy = fy * 8 - 1
152 | cx = fx * 8 - 1
153 | ch = clusters[fc, 3] - clusters[fc, 1] + 1
154 | cw = clusters[fc, 2] - clusters[fc, 0] + 1
155 |
156 | # extract bounding box refinement
157 | Nt = clusters.shape[0]
158 | tx = score_reg_tf[0, :, :, 0:Nt]
159 | ty = score_reg_tf[0, :, :, Nt:2*Nt]
160 | tw = score_reg_tf[0, :, :, 2*Nt:3*Nt]
161 | th = score_reg_tf[0, :, :, 3*Nt:4*Nt]
162 |
163 | # refine bounding boxes
164 | dcx = cw * tx[fy, fx, fc]
165 | dcy = ch * ty[fy, fx, fc]
166 | rcx = cx + dcx
167 | rcy = cy + dcy
168 | rcw = cw * np.exp(tw[fy, fx, fc])
169 | rch = ch * np.exp(th[fy, fx, fc])
170 |
171 | scores = score_cls_tf[0, fy, fx, fc]
172 | tmp_bboxes = np.vstack((rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2))
173 | tmp_bboxes = np.vstack((tmp_bboxes / s, scores))
174 | tmp_bboxes = tmp_bboxes.transpose()
175 | return tmp_bboxes
176 |
177 | tmp_bboxes = _calc_bounding_boxes()
178 | bboxes = np.vstack((bboxes, tmp_bboxes)) # : (5265, 5)
179 |
180 |
181 | print("time {:.2f} secs for {}".format(time.time() - start, fname))
182 |
183 | # non maximum suppression
184 | # refind_idx = util.nms(bboxes, nms_thresh)
185 | refind_idx = tf.image.non_max_suppression(tf.convert_to_tensor(bboxes[:, :4], dtype=tf.float32),
186 | tf.convert_to_tensor(bboxes[:, 4], dtype=tf.float32),
187 | max_output_size=bboxes.shape[0], iou_threshold=nms_thresh)
188 | refind_idx = sess.run(refind_idx)
189 | refined_bboxes = bboxes[refind_idx]
190 | overlay_bounding_boxes(raw_img, refined_bboxes, lw)
191 |
192 | if display:
193 | # plt.axis('off')
194 | plt.imshow(raw_img)
195 | plt.show()
196 |
197 | # save image with bounding boxes
198 | raw_img = cv2.cvtColor(raw_img, cv2.COLOR_RGB2BGR)
199 | cv2.imwrite(os.path.join(output_dir, fname), raw_img)
200 |
201 | def main():
202 |
203 | argparse = ArgumentParser()
204 | argparse.add_argument('--weight_file_path', type=str, help='Pretrained weight file.', default="/path/to/mat2tf.pkl")
205 | argparse.add_argument('--data_dir', type=str, help='Image data directory.', default="/path/to/input_image_directory")
206 | argparse.add_argument('--output_dir', type=str, help='Output directory for images with faces detected.', default="/path/to/output_directory")
207 | argparse.add_argument('--prob_thresh', type=float, help='The threshold of detection confidence(default: 0.5).', default=0.5)
208 | argparse.add_argument('--nms_thresh', type=float, help='The overlap threshold of non maximum suppression(default: 0.1).', default=0.1)
209 | argparse.add_argument('--line_width', type=int, help='Line width of bounding boxes(0: auto).', default=3)
210 | argparse.add_argument('--display', type=bool, help='Display each image on window.', default=False)
211 |
212 | args = argparse.parse_args()
213 |
214 | # check arguments
215 | assert os.path.exists(args.weight_file_path), "weight file: " + args.weight_file_path + " not found."
216 | assert os.path.exists(args.data_dir), "data directory: " + args.data_dir + " not found."
217 | assert os.path.exists(args.output_dir), "output directory: " + args.output_dir + " not found."
218 | assert args.line_width >= 0, "line_width should be >= 0."
219 |
220 | with tf.Graph().as_default():
221 | evaluate(
222 | weight_file_path=args.weight_file_path, data_dir=args.data_dir, output_dir=args.output_dir,
223 | prob_thresh=args.prob_thresh, nms_thresh=args.nms_thresh,
224 | lw=args.line_width, display=args.display)
225 |
226 | if __name__ == '__main__':
227 | main()
228 |
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/tiny_face_model.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import tensorflow as tf
7 | import numpy as np
8 | import pickle
9 |
10 | class Model():
11 | def __init__(self, weight_file_path):
12 | """Overlay bounding boxes of face on images.
13 | Args:
14 | weight_file_path:
15 | A pretrained weight file in the pickle format
16 | generated by matconvnet_hr101_to_tf.py.
17 | Returns:
18 | None.
19 | """
20 | self.dtype = tf.float32
21 | self.weight_file_path = weight_file_path
22 | with open(self.weight_file_path, "rb") as f:
23 | self.mat_blocks_dict, self.mat_params_dict = pickle.load(f)
24 |
25 | def get_data_by_key(self, key):
26 | """Helper to access a pretrained model data through a key."""
27 | assert key in self.mat_params_dict, "key: " + key + " not found."
28 | return self.mat_params_dict[key]
29 |
30 | def _weight_variable_on_cpu(self, name, shape):
31 | """Helper to create a weight Variable stored on CPU memory.
32 |
33 | Args:
34 | name: name of the variable.
35 | shape: list of ints: (height, width, channel, filter).
36 |
37 | Returns:
38 | initializer for Variable.
39 | """
40 | assert len(shape) == 4
41 |
42 | weights = self.get_data_by_key(name + "_filter") # (h, w, channel, filter)
43 | assert list(weights.shape) == shape
44 | initializer = tf.constant_initializer(weights, dtype=self.dtype)
45 |
46 | with tf.device('/cpu:0'):
47 | var = tf.get_variable(name + "_w", shape, initializer=initializer, dtype=self.dtype)
48 | return var
49 |
50 | def _bias_variable_on_cpu(self, name, shape):
51 | """Helper to create a bias Variable stored on CPU memory.
52 |
53 | Args:
54 | name: name of the variable.
55 | shape: int, filter size.
56 |
57 | Returns:
58 | initializer for Variable.
59 | """
60 | assert isinstance(shape, int)
61 | bias = self.get_data_by_key(name + "_bias")
62 | assert len(bias) == shape
63 | initializer = tf.constant_initializer(bias, dtype=self.dtype)
64 |
65 | with tf.device('/cpu:0'):
66 | var = tf.get_variable(name + "_b", shape, initializer=initializer, dtype=self.dtype)
67 | return var
68 |
69 |
70 | def _bn_variable_on_cpu(self, name, shape):
71 | """Helper to create a batch normalization Variable stored on CPU memory.
72 |
73 | Args:
74 | name: name of the variable.
75 | shape: int, filter size.
76 |
77 | Returns:
78 | initializer for Variable.
79 | """
80 | assert isinstance(shape, int)
81 |
82 | name2 = "bn" + name[3:]
83 | if name.startswith("conv"):
84 | name2 = "bn_" + name
85 |
86 | scale = self.get_data_by_key(name2 + '_scale')
87 | offset = self.get_data_by_key(name2 + '_offset')
88 | mean = self.get_data_by_key(name2 + '_mean')
89 | variance = self.get_data_by_key(name2 + '_variance')
90 |
91 | with tf.device('/cpu:0'):
92 | initializer = tf.constant_initializer(scale, dtype=self.dtype)
93 | scale = tf.get_variable(name2 + "_scale", shape, initializer=initializer, dtype=self.dtype)
94 | initializer = tf.constant_initializer(offset, dtype=self.dtype)
95 | offset = tf.get_variable(name2 + "_offset", shape, initializer=initializer, dtype=self.dtype)
96 | initializer = tf.constant_initializer(mean, dtype=self.dtype)
97 | mean = tf.get_variable(name2 + "_mean", shape, initializer=initializer, dtype=self.dtype)
98 | initializer = tf.constant_initializer(variance, dtype=self.dtype)
99 | variance = tf.get_variable(name2 + "_variance", shape, initializer=initializer, dtype=self.dtype)
100 |
101 | return scale, offset, mean, variance
102 |
103 |
104 | def conv_block(self, bottom, name, shape, strides=[1,1,1,1], padding="SAME",
105 | has_bias=False, add_relu=True, add_bn=True, eps=1.0e-5):
106 | """Create a block composed of multiple layers:
107 | a conv layer
108 | a batch normalization layer
109 | an activation layer
110 |
111 | Args:
112 | bottom: A layer before this block.
113 | name: Name of the block.
114 | shape: List of ints: (height, width, channel, filter).
115 | strides: Strides of conv layer.
116 | padding: Padding of conv layer.
117 | has_bias: Whether a bias term is added.
118 | add_relu: Whether a ReLU layer is added.
119 | add_bn: Whether a batch normalization layer is added.
120 | eps: A small float number to avoid dividing by 0, used in a batch normalization layer.
121 | Returns:
122 | a block of layers
123 | """
124 | assert len(shape) == 4
125 |
126 | weight = self._weight_variable_on_cpu(name, shape)
127 | conv = tf.nn.conv2d(bottom, weight, strides, padding=padding)
128 | if has_bias:
129 | bias = self._bias_variable_on_cpu(name, shape[3])
130 |
131 | pre_activation = tf.nn.bias_add(conv, bias) if has_bias else conv
132 |
133 | if add_bn:
134 | # scale, offset, mean, variance = self._bn_variable_on_cpu("bn_" + name, shape[-1])
135 | scale, offset, mean, variance = self._bn_variable_on_cpu(name, shape[-1])
136 | pre_activation = tf.nn.batch_normalization(pre_activation, mean, variance, offset, scale, variance_epsilon=eps)
137 |
138 | relu = tf.nn.relu(pre_activation) if add_relu else pre_activation
139 |
140 | return relu
141 |
142 |
143 | def conv_trans_layer(self, bottom, name, shape, strides=[1,1,1,1], padding="SAME", has_bias=False):
144 | """Create a block composed of multiple layers:
145 | a transpose of conv layer
146 | an activation layer
147 |
148 | Args:
149 | bottom: A layer before this block.
150 | name: Name of the block.
151 | shape: List of ints: (height, width, channel, filter).
152 | strides: Strides of conv layer.
153 | padding: Padding of conv layer.
154 | has_bias: Whether a bias term is added.
155 | add_relu: Whether a ReLU layer is added.
156 | Returns:
157 | a block of layers
158 | """
159 | assert len(shape) == 4
160 |
161 | weight = self._weight_variable_on_cpu(name, shape)
162 | nb, h, w, nc = tf.split(tf.shape(bottom), num_or_size_splits=4)
163 | output_shape = tf.stack([nb, (h - 1) * strides[1] - 3 + shape[0], (w - 1) * strides[2] - 3 + shape[1], nc])[:, 0]
164 | conv = tf.nn.conv2d_transpose(bottom, weight, output_shape, strides, padding=padding)
165 | if has_bias:
166 | bias = self._bias_variable_on_cpu(name, shape[3])
167 |
168 | conv = tf.nn.bias_add(conv, bias) if has_bias else conv
169 |
170 | return conv
171 |
172 | def residual_block(self, bottom, name, in_channel, neck_channel, out_channel, trunk):
173 | """Create a residual block.
174 |
175 | Args:
176 | bottom: A layer before this block.
177 | name: Name of the block.
178 | in_channel: number of channels in a input tensor.
179 | neck_channel: number of channels in a bottleneck block.
180 | out_channel: number of channels in a output tensor.
181 | trunk: a tensor in a identity path.
182 | Returns:
183 | a block of layers
184 | """
185 | _strides = [1, 2, 2, 1] if name.startswith("res3a") or name.startswith("res4a") else [1, 1, 1, 1]
186 | res = self.conv_block(bottom, name + '_branch2a', shape=[1, 1, in_channel, neck_channel],
187 | strides=_strides, padding="VALID", add_relu=True)
188 | res = self.conv_block(res, name + '_branch2b', shape=[3, 3, neck_channel, neck_channel],
189 | padding="SAME", add_relu=True)
190 | res = self.conv_block(res, name + '_branch2c', shape=[1, 1, neck_channel, out_channel],
191 | padding="VALID", add_relu=False)
192 |
193 | res = trunk + res
194 | res = tf.nn.relu(res)
195 |
196 | return res
197 |
198 | def tiny_face(self, image):
199 | """Create a tiny face model.
200 |
201 | Args:
202 | image: an input image.
203 | Returns:
204 | a score tensor
205 | """
206 | img = tf.pad(image, [[0, 0], [3, 3], [3, 3], [0, 0]], "CONSTANT")
207 | conv = self.conv_block(img, 'conv1', shape=[7, 7, 3, 64], strides=[1, 2, 2, 1], padding="VALID", add_relu=True)
208 | pool1 = tf.nn.max_pool(conv, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
209 |
210 | res2a_branch1 = self.conv_block(pool1, 'res2a_branch1', shape=[1, 1, 64, 256], padding="VALID", add_relu=False)
211 | res2a = self.residual_block(pool1, 'res2a', 64, 64, 256, res2a_branch1)
212 | res2b = self.residual_block(res2a, 'res2b', 256, 64, 256, res2a)
213 | res2c = self.residual_block(res2b, 'res2c', 256, 64, 256, res2b)
214 |
215 | res3a_branch1 = self.conv_block(res2c, 'res3a_branch1', shape=[1, 1, 256, 512], strides=[1, 2, 2, 1], padding="VALID", add_relu=False)
216 | res3a = self.residual_block(res2c, 'res3a', 256, 128, 512, res3a_branch1)
217 |
218 | res3b1 = self.residual_block(res3a, 'res3b1', 512, 128, 512, res3a)
219 | res3b2 = self.residual_block(res3b1, 'res3b2', 512, 128, 512, res3b1)
220 | res3b3 = self.residual_block(res3b2, 'res3b3', 512, 128, 512, res3b2)
221 |
222 | res4a_branch1 = self.conv_block(res3b3, 'res4a_branch1', shape=[1, 1, 512, 1024], strides=[1, 2, 2, 1], padding="VALID", add_relu=False)
223 | res4a = self.residual_block(res3b3, 'res4a', 512, 256, 1024, res4a_branch1)
224 |
225 | res4b = res4a
226 | for i in range(1, 23):
227 | res4b = self.residual_block(res4b, 'res4b' + str(i), 1024, 256, 1024, res4b)
228 |
229 | score_res4 = self.conv_block(res4b, 'score_res4', shape=[1, 1, 1024, 125], padding="VALID",
230 | has_bias=True, add_relu=False, add_bn=False)
231 | score4 = self.conv_trans_layer(score_res4, 'score4', shape=[4, 4, 125, 125], strides=[1, 2, 2, 1], padding="SAME")
232 | score_res3 = self.conv_block(res3b3, 'score_res3', shape=[1, 1, 512, 125], padding="VALID",
233 | has_bias=True, add_bn=False, add_relu=False)
234 |
235 | bs, height, width = tf.split(tf.shape(score4), num_or_size_splits=4)[0:3]
236 | _size = tf.convert_to_tensor([height[0], width[0]])
237 | _offsets = tf.zeros([bs[0], 2])
238 | score_res3c = tf.image.extract_glimpse(score_res3, _size, _offsets, centered=True, normalized=False)
239 |
240 | score_final = score4 + score_res3c
241 | return score_final
242 |
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/tinyface_tf/selfie.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/JunctionAPI/TinyFaces/tinyface_tf/selfie.jpg
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/tinyface_tf/selfie_detection.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/JunctionAPI/TinyFaces/tinyface_tf/selfie_detection.jpg
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/uploaded_imgs/img1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/JunctionAPI/TinyFaces/uploaded_imgs/img1.jpg
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/uploaded_imgs/img1_detection.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/JunctionAPI/TinyFaces/uploaded_imgs/img1_detection.jpg
--------------------------------------------------------------------------------
/JunctionAPI/TinyFaces/util.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import numpy as np
7 |
8 | def nms(dets, prob_thresh):
9 | x1 = dets[:, 0]
10 | y1 = dets[:, 1]
11 | x2 = dets[:, 2]
12 | y2 = dets[:, 3]
13 | scores = dets[:, 4]
14 |
15 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
16 |
17 | order = scores.argsort()[::-1]
18 |
19 | keep = []
20 | while order.size > 0:
21 | i = order[0]
22 | keep.append(i)
23 | xx1 = np.maximum(x1[i], x1[order[1:]])
24 | yy1 = np.maximum(y1[i], y1[order[1:]])
25 | xx2 = np.minimum(x2[i], x2[order[1:]])
26 | yy2 = np.minimum(y2[i], y2[order[1:]])
27 | w = np.maximum(0.0, xx2 - xx1 + 1)
28 | h = np.maximum(0.0, yy2 - yy1 + 1)
29 | inter = w * h
30 |
31 | ovr = inter / (areas[i] + areas[order[1:]] - inter)
32 | inds = np.where(ovr <= prob_thresh)[0]
33 |
34 | order = order[inds + 1]
35 | return keep
36 |
37 | # colormap parula borrowed from
38 | # https://github.com/BIDS/colormap/blob/master/fake_parula.py
39 | cm_data = [[ 0.26710521, 0.03311059, 0.6188155 ],
40 | [ 0.26493929, 0.04780926, 0.62261795],
41 | [ 0.26260545, 0.06084214, 0.62619176],
42 | [ 0.26009691, 0.07264411, 0.62951561],
43 | [ 0.25740785, 0.08360391, 0.63256745],
44 | [ 0.25453369, 0.09395358, 0.63532497],
45 | [ 0.25147146, 0.10384228, 0.6377661 ],
46 | [ 0.24822014, 0.11337029, 0.6398697 ],
47 | [ 0.24478105, 0.12260661, 0.64161629],
48 | [ 0.24115816, 0.131599 , 0.6429888 ],
49 | [ 0.23735836, 0.14038009, 0.64397346],
50 | [ 0.23339166, 0.14897137, 0.64456048],
51 | [ 0.22927127, 0.15738602, 0.64474476],
52 | [ 0.22501278, 0.16563165, 0.64452595],
53 | [ 0.22063349, 0.17371215, 0.64390834],
54 | [ 0.21616055, 0.18162302, 0.64290515],
55 | [ 0.21161851, 0.18936156, 0.64153295],
56 | [ 0.20703353, 0.19692415, 0.63981287],
57 | [ 0.20243273, 0.20430706, 0.63776986],
58 | [ 0.19784363, 0.211507 , 0.63543183],
59 | [ 0.19329361, 0.21852157, 0.63282872],
60 | [ 0.18880937, 0.2253495 , 0.62999156],
61 | [ 0.18442119, 0.23198815, 0.62695569],
62 | [ 0.18014936, 0.23844124, 0.62374886],
63 | [ 0.17601569, 0.24471172, 0.62040016],
64 | [ 0.17204028, 0.25080356, 0.61693715],
65 | [ 0.16824123, 0.25672163, 0.6133854 ],
66 | [ 0.16463462, 0.26247158, 0.60976836],
67 | [ 0.16123449, 0.26805963, 0.60610723],
68 | [ 0.15805279, 0.27349243, 0.60242099],
69 | [ 0.15509948, 0.27877688, 0.59872645],
70 | [ 0.15238249, 0.28392004, 0.59503836],
71 | [ 0.14990781, 0.28892902, 0.59136956],
72 | [ 0.14767951, 0.29381086, 0.58773113],
73 | [ 0.14569979, 0.29857245, 0.58413255],
74 | [ 0.1439691 , 0.30322055, 0.58058191],
75 | [ 0.14248613, 0.30776167, 0.57708599],
76 | [ 0.14124797, 0.31220208, 0.57365049],
77 | [ 0.14025018, 0.31654779, 0.57028011],
78 | [ 0.13948691, 0.32080454, 0.5669787 ],
79 | [ 0.13895174, 0.32497744, 0.56375063],
80 | [ 0.13863958, 0.32907012, 0.56060453],
81 | [ 0.138537 , 0.3330895 , 0.55753513],
82 | [ 0.13863384, 0.33704026, 0.55454374],
83 | [ 0.13891931, 0.34092684, 0.55163126],
84 | [ 0.13938212, 0.34475344, 0.54879827],
85 | [ 0.14001061, 0.34852402, 0.54604503],
86 | [ 0.14079292, 0.35224233, 0.54337156],
87 | [ 0.14172091, 0.35590982, 0.54078769],
88 | [ 0.14277848, 0.35953205, 0.53828312],
89 | [ 0.14395358, 0.36311234, 0.53585661],
90 | [ 0.1452346 , 0.36665374, 0.5335074 ],
91 | [ 0.14661019, 0.3701591 , 0.5312346 ],
92 | [ 0.14807104, 0.37363011, 0.52904278],
93 | [ 0.1496059 , 0.3770697 , 0.52692951],
94 | [ 0.15120289, 0.3804813 , 0.52488853],
95 | [ 0.15285214, 0.38386729, 0.52291854],
96 | [ 0.15454421, 0.38722991, 0.52101815],
97 | [ 0.15627225, 0.39056998, 0.5191937 ],
98 | [ 0.15802555, 0.39389087, 0.5174364 ],
99 | [ 0.15979549, 0.39719482, 0.51574311],
100 | [ 0.16157425, 0.40048375, 0.51411214],
101 | [ 0.16335571, 0.40375871, 0.51254622],
102 | [ 0.16513234, 0.40702178, 0.51104174],
103 | [ 0.1668964 , 0.41027528, 0.50959299],
104 | [ 0.16864151, 0.41352084, 0.50819797],
105 | [ 0.17036277, 0.41675941, 0.50685814],
106 | [ 0.1720542 , 0.41999269, 0.50557008],
107 | [ 0.17370932, 0.42322271, 0.50432818],
108 | [ 0.17532301, 0.42645082, 0.50313007],
109 | [ 0.17689176, 0.42967776, 0.50197686],
110 | [ 0.17841013, 0.43290523, 0.5008633 ],
111 | [ 0.17987314, 0.43613477, 0.49978492],
112 | [ 0.18127676, 0.43936752, 0.49873901],
113 | [ 0.18261885, 0.44260392, 0.49772638],
114 | [ 0.18389409, 0.44584578, 0.49673978],
115 | [ 0.18509911, 0.44909409, 0.49577605],
116 | [ 0.18623135, 0.4523496 , 0.494833 ],
117 | [ 0.18728844, 0.45561305, 0.49390803],
118 | [ 0.18826671, 0.45888565, 0.49299567],
119 | [ 0.18916393, 0.46216809, 0.49209268],
120 | [ 0.18997879, 0.46546084, 0.49119678],
121 | [ 0.19070881, 0.46876472, 0.49030328],
122 | [ 0.19135221, 0.47208035, 0.48940827],
123 | [ 0.19190791, 0.47540815, 0.48850845],
124 | [ 0.19237491, 0.47874852, 0.4876002 ],
125 | [ 0.19275204, 0.48210192, 0.48667935],
126 | [ 0.19303899, 0.48546858, 0.48574251],
127 | [ 0.19323526, 0.48884877, 0.48478573],
128 | [ 0.19334062, 0.49224271, 0.48380506],
129 | [ 0.19335574, 0.49565037, 0.4827974 ],
130 | [ 0.19328143, 0.49907173, 0.48175948],
131 | [ 0.19311664, 0.50250719, 0.48068559],
132 | [ 0.192864 , 0.50595628, 0.47957408],
133 | [ 0.19252521, 0.50941877, 0.47842186],
134 | [ 0.19210087, 0.51289469, 0.47722441],
135 | [ 0.19159194, 0.516384 , 0.47597744],
136 | [ 0.19100267, 0.51988593, 0.47467988],
137 | [ 0.19033595, 0.52340005, 0.47332894],
138 | [ 0.18959113, 0.5269267 , 0.47191795],
139 | [ 0.18877336, 0.530465 , 0.47044603],
140 | [ 0.18788765, 0.53401416, 0.46891178],
141 | [ 0.18693822, 0.53757359, 0.46731272],
142 | [ 0.18592276, 0.54114404, 0.46563962],
143 | [ 0.18485204, 0.54472367, 0.46389595],
144 | [ 0.18373148, 0.5483118 , 0.46207951],
145 | [ 0.18256585, 0.55190791, 0.4601871 ],
146 | [ 0.18135481, 0.55551253, 0.45821002],
147 | [ 0.18011172, 0.55912361, 0.45615277],
148 | [ 0.17884392, 0.56274038, 0.45401341],
149 | [ 0.17755858, 0.56636217, 0.45178933],
150 | [ 0.17625543, 0.56998972, 0.44946971],
151 | [ 0.174952 , 0.57362064, 0.44706119],
152 | [ 0.17365805, 0.57725408, 0.44456198],
153 | [ 0.17238403, 0.58088916, 0.4419703 ],
154 | [ 0.17113321, 0.58452637, 0.43927576],
155 | [ 0.1699221 , 0.58816399, 0.43648119],
156 | [ 0.1687662 , 0.5918006 , 0.43358772],
157 | [ 0.16767908, 0.59543526, 0.43059358],
158 | [ 0.16667511, 0.59906699, 0.42749697],
159 | [ 0.16575939, 0.60269653, 0.42428344],
160 | [ 0.16495764, 0.6063212 , 0.42096245],
161 | [ 0.16428695, 0.60993988, 0.41753246],
162 | [ 0.16376481, 0.61355147, 0.41399151],
163 | [ 0.16340924, 0.61715487, 0.41033757],
164 | [ 0.16323549, 0.62074951, 0.40656329],
165 | [ 0.16326148, 0.62433443, 0.40266378],
166 | [ 0.16351136, 0.62790748, 0.39864431],
167 | [ 0.16400433, 0.63146734, 0.39450263],
168 | [ 0.16475937, 0.63501264, 0.39023638],
169 | [ 0.16579502, 0.63854196, 0.38584309],
170 | [ 0.16712921, 0.64205381, 0.38132023],
171 | [ 0.168779 , 0.64554661, 0.37666513],
172 | [ 0.17075915, 0.64901912, 0.37186962],
173 | [ 0.17308572, 0.65246934, 0.36693299],
174 | [ 0.1757732 , 0.65589512, 0.36185643],
175 | [ 0.17883344, 0.65929449, 0.3566372 ],
176 | [ 0.18227669, 0.66266536, 0.35127251],
177 | [ 0.18611159, 0.66600553, 0.34575959],
178 | [ 0.19034516, 0.66931265, 0.34009571],
179 | [ 0.19498285, 0.67258423, 0.3342782 ],
180 | [ 0.20002863, 0.67581761, 0.32830456],
181 | [ 0.20548509, 0.67900997, 0.3221725 ],
182 | [ 0.21135348, 0.68215834, 0.31587999],
183 | [ 0.2176339 , 0.68525954, 0.30942543],
184 | [ 0.22432532, 0.68831023, 0.30280771],
185 | [ 0.23142568, 0.69130688, 0.29602636],
186 | [ 0.23893914, 0.69424565, 0.28906643],
187 | [ 0.2468574 , 0.69712255, 0.28194103],
188 | [ 0.25517514, 0.69993351, 0.27465372],
189 | [ 0.26388625, 0.70267437, 0.26720869],
190 | [ 0.27298333, 0.70534087, 0.25961196],
191 | [ 0.28246016, 0.70792854, 0.25186761],
192 | [ 0.29232159, 0.71043184, 0.2439642 ],
193 | [ 0.30253943, 0.71284765, 0.23594089],
194 | [ 0.31309875, 0.71517209, 0.22781515],
195 | [ 0.32399522, 0.71740028, 0.21959115],
196 | [ 0.33520729, 0.71952906, 0.21129816],
197 | [ 0.3467003 , 0.72155723, 0.20298257],
198 | [ 0.35846225, 0.72348143, 0.19466318],
199 | [ 0.3704552 , 0.72530195, 0.18639333],
200 | [ 0.38264126, 0.72702007, 0.17822762],
201 | [ 0.39499483, 0.72863609, 0.17020921],
202 | [ 0.40746591, 0.73015499, 0.1624122 ],
203 | [ 0.42001969, 0.73158058, 0.15489659],
204 | [ 0.43261504, 0.73291878, 0.14773267],
205 | [ 0.44521378, 0.73417623, 0.14099043],
206 | [ 0.45777768, 0.73536072, 0.13474173],
207 | [ 0.47028295, 0.73647823, 0.1290455 ],
208 | [ 0.48268544, 0.73753985, 0.12397794],
209 | [ 0.49497773, 0.73854983, 0.11957878],
210 | [ 0.5071369 , 0.73951621, 0.11589589],
211 | [ 0.51913764, 0.74044827, 0.11296861],
212 | [ 0.53098624, 0.74134823, 0.11080237],
213 | [ 0.5426701 , 0.74222288, 0.10940411],
214 | [ 0.55417235, 0.74308049, 0.10876749],
215 | [ 0.56550904, 0.74392086, 0.10885609],
216 | [ 0.57667994, 0.74474781, 0.10963233],
217 | [ 0.58767906, 0.74556676, 0.11105089],
218 | [ 0.59850723, 0.74638125, 0.1130567 ],
219 | [ 0.609179 , 0.74719067, 0.11558918],
220 | [ 0.61969877, 0.74799703, 0.11859042],
221 | [ 0.63007148, 0.74880206, 0.12200388],
222 | [ 0.64030249, 0.74960714, 0.12577596],
223 | [ 0.65038997, 0.75041586, 0.12985641],
224 | [ 0.66034774, 0.75122659, 0.1342004 ],
225 | [ 0.67018264, 0.75203968, 0.13876817],
226 | [ 0.67990043, 0.75285567, 0.14352456],
227 | [ 0.68950682, 0.75367492, 0.14843886],
228 | [ 0.69900745, 0.75449768, 0.15348445],
229 | [ 0.70840781, 0.75532408, 0.15863839],
230 | [ 0.71771325, 0.75615416, 0.16388098],
231 | [ 0.72692898, 0.75698787, 0.1691954 ],
232 | [ 0.73606001, 0.75782508, 0.17456729],
233 | [ 0.74511119, 0.75866562, 0.17998443],
234 | [ 0.75408719, 0.75950924, 0.18543644],
235 | [ 0.76299247, 0.76035568, 0.19091446],
236 | [ 0.77183123, 0.76120466, 0.19641095],
237 | [ 0.78060815, 0.76205561, 0.20191973],
238 | [ 0.78932717, 0.76290815, 0.20743538],
239 | [ 0.79799213, 0.76376186, 0.21295324],
240 | [ 0.8066067 , 0.76461631, 0.21846931],
241 | [ 0.81517444, 0.76547101, 0.22398014],
242 | [ 0.82369877, 0.76632547, 0.2294827 ],
243 | [ 0.832183 , 0.7671792 , 0.2349743 ],
244 | [ 0.8406303 , 0.76803167, 0.24045248],
245 | [ 0.84904371, 0.76888236, 0.24591492],
246 | [ 0.85742615, 0.76973076, 0.25135935],
247 | [ 0.86578037, 0.77057636, 0.25678342],
248 | [ 0.87410891, 0.77141875, 0.2621846 ],
249 | [ 0.88241406, 0.77225757, 0.26755999],
250 | [ 0.89070781, 0.77308772, 0.27291122],
251 | [ 0.89898836, 0.77391069, 0.27823228],
252 | [ 0.90725475, 0.77472764, 0.28351668],
253 | [ 0.91550775, 0.77553893, 0.28875751],
254 | [ 0.92375722, 0.7763404 , 0.29395046],
255 | [ 0.9320227 , 0.77712286, 0.29909267],
256 | [ 0.94027715, 0.7779011 , 0.30415428],
257 | [ 0.94856742, 0.77865213, 0.3091325 ],
258 | [ 0.95686038, 0.7793949 , 0.31397459],
259 | [ 0.965222 , 0.7800975 , 0.31864342],
260 | [ 0.97365189, 0.78076521, 0.32301107],
261 | [ 0.98227405, 0.78134549, 0.32678728],
262 | [ 0.99136564, 0.78176999, 0.3281624 ],
263 | [ 0.99505988, 0.78542889, 0.32106514],
264 | [ 0.99594185, 0.79046888, 0.31648808],
265 | [ 0.99646635, 0.79566972, 0.31244662],
266 | [ 0.99681528, 0.80094905, 0.30858532],
267 | [ 0.9970578 , 0.80627441, 0.30479247],
268 | [ 0.99724883, 0.81161757, 0.30105328],
269 | [ 0.99736711, 0.81699344, 0.29725528],
270 | [ 0.99742254, 0.82239736, 0.29337235],
271 | [ 0.99744736, 0.82781159, 0.28943391],
272 | [ 0.99744951, 0.83323244, 0.28543062],
273 | [ 0.9973953 , 0.83867931, 0.2812767 ],
274 | [ 0.99727248, 0.84415897, 0.27692897],
275 | [ 0.99713953, 0.84963903, 0.27248698],
276 | [ 0.99698641, 0.85512544, 0.26791703],
277 | [ 0.99673736, 0.86065927, 0.26304767],
278 | [ 0.99652358, 0.86616957, 0.25813608],
279 | [ 0.99622774, 0.87171946, 0.25292044],
280 | [ 0.99590494, 0.87727931, 0.24750009],
281 | [ 0.99555225, 0.88285068, 0.2418514 ],
282 | [ 0.99513763, 0.8884501 , 0.23588062],
283 | [ 0.99471252, 0.89405076, 0.2296837 ],
284 | [ 0.99421873, 0.89968246, 0.2230963 ],
285 | [ 0.99370185, 0.90532165, 0.21619768],
286 | [ 0.99313786, 0.91098038, 0.2088926 ],
287 | [ 0.99250707, 0.91666811, 0.20108214],
288 | [ 0.99187888, 0.92235023, 0.19290417],
289 | [ 0.99110991, 0.92809686, 0.18387963],
290 | [ 0.99042108, 0.93379995, 0.17458127],
291 | [ 0.98958484, 0.93956962, 0.16420166],
292 | [ 0.98873988, 0.94533859, 0.15303117],
293 | [ 0.98784836, 0.95112482, 0.14074826],
294 | [ 0.98680727, 0.95697596, 0.12661626]]
295 |
--------------------------------------------------------------------------------
/JunctionAPI/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/JunctionAPI/__init__.py
--------------------------------------------------------------------------------
/JunctionAPI/api.py:
--------------------------------------------------------------------------------
1 | from app import create_app
2 |
3 | app = create_app()
4 |
--------------------------------------------------------------------------------
/JunctionAPI/app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/JunctionAPI/app/__init__.py
--------------------------------------------------------------------------------
/JunctionAPI/app/detect/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint
2 |
3 | bp = Blueprint('detect', __name__)
4 |
5 | from app.detect import routes
6 | from app.detect import run_model_server
7 | run_model_server = run_model_server.pre_load_model()
8 |
--------------------------------------------------------------------------------
/JunctionAPI/app/detect/face_detector.py:
--------------------------------------------------------------------------------
1 | import time
2 | import cv2
3 |
4 |
5 | class FaceDetector():
6 | def __init__(self):
7 | from ..hr101_mxnet.tiny_fd import TinyFacesDetector
8 | # from tiny_fd import TinyFacesDetector
9 | self.tiny_faces = TinyFacesDetector(model_root='/home/paperspace/Desktop/Junction_ver2/api/app/weights', prob_thresh=0.5, gpu_idx=0)
10 |
11 | def detect(self, fname, txtfname):
12 | self.img = cv2.imread(fname)
13 | tic = time.time()
14 | boxes = self.detector.detect(self.img)
15 | toc = time.time()
16 | elapsed_time = toc - tic
17 |
18 | self.write_to_txt_file(fname, boxes, txtfname)
19 |
20 | return boxes.shape[0], boxes, elapsed_time
21 |
22 | def get_face_bboxes(self, img):
23 | boxes = self.tiny_faces.detect(self.img)
24 | # return boxes.shape[0], boxes
25 | return 127, [1,2,3,4]
26 | def write_to_txt_file(self, fname, boxes, txtfname):
27 | txtfile = open(txtfname, 'w')
28 | txtfile.write(fname + '\n')
29 | txtfile.write(str(boxes.shape[0]) + '\n')
30 | for box in boxes:
31 | line = ''
32 |
33 | x1 = min(max(0, int(box[0])), self.img.shape[0])
34 | y1 = min(max(0, int(box[1])), self.img.shape[1])
35 | x2 = min(max(0, int(box[2])), self.img.shape[0])
36 | y2 = min(max(0, int(box[3])), self.img.shape[1])
37 | line += str(x1) + ' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2) + '\n'
38 |
39 | txtfile.write(line)
40 |
--------------------------------------------------------------------------------
/JunctionAPI/app/detect/forms.py:
--------------------------------------------------------------------------------
1 | from flask_wtf import FlaskForm
2 |
3 | class DetectForm(FlaskForm):
4 | pass
5 |
--------------------------------------------------------------------------------
/JunctionAPI/app/detect/routes.py:
--------------------------------------------------------------------------------
1 | from flask import redirect, url_for, request, render_template, flash, session
2 | from flask import current_app as current_app
3 | from PIL import Image
4 | import cv2
5 | from app.detect import bp
6 | from app.detect import run_model_server
7 | import numpy as np
8 | from app.detect.forms import DetectForm
9 | from keras.preprocessing.image import img_to_array
10 | import matplotlib.pyplot as plt
11 | import os
12 | import io
13 | import imutils
14 | import copy
15 | import csv
16 | import shutil
17 | import sys
18 | sys.path.append('../../TinyFaces')
19 | from tiny_face_det import detect_from_img_path
20 | #import tiny_face_eval
21 |
22 | @bp.route('/detect', methods=['GET', 'POST'])
23 | def detect():
24 | if "processed_data" not in session:
25 | session["processed_data"] = {}
26 | bucket = session["processed_data"]
27 | emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
28 | gender_dict = {0: 'man', 1:'woman'}
29 |
30 | # For data analysis
31 | emo_count = {"Angry":0, "Disgusted":0, "Fearful":0, "Happy":0, "Neutral":0, "Sad":0, "Surprised":0}
32 | gender_count = {"man":0, "woman":0}
33 | age_class = {}
34 |
35 | if "processed_data" not in session:
36 | # To store the processed_data
37 | session["processed_data"] = {}
38 | form = DetectForm()
39 | current_app.config.update(DROPZONE_UPLOAD_ON_CLICK=True)
40 |
41 | if request.method == "POST":
42 | for (key, f) in (request.files.items()):
43 | image = f.read()
44 | image = Image.open(io.BytesIO(image)).convert('RGB')
45 | opencv_img = np.array(image)
46 | opencv_img = opencv_img[:, :, ::-1].copy()
47 | age_img_size = run_model_server.age_mdl.input.shape.as_list()[1]
48 | frame = imutils.resize(opencv_img, width=400)
49 | (h, w) = frame.shape[:2]
50 | img_path = '../../TinyFaces/uploaded_imgs/'
51 | cv2.imwrite(img_path, frame)
52 | detections = detect_from_img_path(img_path)
53 | print(detections)
54 | count = 0
55 |
56 | history_data = []
57 | # loop over the detections
58 | for det in detections:
59 | count += 1
60 | box = det[:-1]
61 | startX = int(box[0])
62 | startY = int(box[1])
63 | endX = int(box[2])
64 | endY = int(box[3])
65 |
66 | # Extract the patches
67 | img_patch = frame[startY:startY + (endY-startY), startX:startX + (endX - startX)]
68 |
69 | # Patch for Emo detect
70 | emo_patch = cv2.cvtColor(img_patch, cv2.COLOR_RGB2GRAY)
71 | emo_patch = np.expand_dims(np.expand_dims(cv2.resize(
72 | emo_patch, (48, 48)), -1), 0)
73 |
74 | # Patch for gender detect
75 | gender_patch = cv2.resize(img_patch, (96, 96))
76 | gender_patch = gender_patch.astype("float") / 255.0
77 | gender_patch = img_to_array(gender_patch)
78 | gender_patch = np.expand_dims(gender_patch, axis=0)
79 |
80 | # Path for age detect
81 | age_patch = cv2.cvtColor(img_patch, cv2.COLOR_BGR2RGB)
82 | age_patch = cv2.resize(age_patch, (age_img_size, age_img_size))
83 | age_patch = np.expand_dims(age_patch, axis=0)
84 |
85 | graph = run_model_server.graph
86 | with graph.as_default():
87 | predicted_age = run_model_server.age_mdl.predict(age_patch)
88 | ages = np.arange(0, 101).reshape(101, 1)
89 | predicted_age = int(predicted_age.dot(ages).flatten())
90 |
91 | detected_gender = run_model_server.gender_mdl.predict(gender_patch)[0]
92 | gender_index = int(np.argmax(detected_gender))
93 |
94 | predicted_emo = run_model_server.emo_mdl.predict(emo_patch)
95 |
96 | emo_index = int(np.argmax(predicted_emo))
97 | emo_count[emotion_dict[emo_index]] += 1
98 | gender_count[gender_dict[gender_index]] += 1
99 | if str(predicted_age) in age_class:
100 | age_class[str(predicted_age)] +=1
101 | else:
102 | age_class[str(predicted_age)] = 1
103 | history_data.append([emotion_dict[emo_index],gender_dict[gender_index],predicted_age,(startX,startY, endX, endY)])
104 |
105 | # draw the bounding box of the face along with the associated
106 | # probability
107 | y = startY - 10 if startY - 10 > 10 else startY + 10
108 | cv2.rectangle(frame, (startX, startY), (endX, endY),(0, 255, 0), 2)
109 |
110 | static_file_path = os.path.join(os.getcwd(), "app", "static")
111 | # Generate Emotion Status
112 | xs = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Neutral', 'Sad', 'Surprised']
113 | ys = []
114 | data = copy.deepcopy(emo_count)
115 | print(data)
116 | ys = [data['Angry'], data['Disgusted'], data['Fearful'], data['Happy'], data['Neutral'], data['Sad'], data['Surprised']]
117 | plt.bar(list(xs), ys, align='center', alpha=0.5)
118 |
119 | plt.ylabel('Number of people')
120 | plt.title('Emotion Classification')
121 | plt.savefig(os.path.join(static_file_path,'emotion_status.png'))
122 | plt.cla()
123 | plt.clf()
124 | # Generate Gender Count
125 | xs = ['MAN', 'WOMAN']
126 | ys = []
127 | data = copy.deepcopy(gender_count)
128 | ys = [data['man'], data['woman']]
129 | plt.bar(list(xs), ys, align='center', alpha=0.5)
130 |
131 | plt.ylabel('Number of people')
132 | plt.title('Gender Classification')
133 | plt.savefig(os.path.join(static_file_path,'gender_status.png'))
134 | plt.cla()
135 | plt.clf()
136 | # Age Classification
137 | data = copy.deepcopy(age_class)
138 | xs = []
139 | ys = []
140 | for item, lst in data.items():
141 | xs.append(item)
142 | ys.append(data[item])
143 | #plt.plot(list(xs), ys, label='Crowd Count', color='blue')
144 | plt.plot(list(xs), ys, 'bo', label='Crowd Count')
145 | plt.title("Age Classification Status")
146 | plt.xlabel("Age")
147 | plt.ylabel("Number of people")
148 | plt.legend(loc='upper right')
149 | plt.savefig(os.path.join(static_file_path,'age_status.png'))
150 | plt.cla()
151 | plt.clf()
152 | file_name = "{}_detect.{}".format(f.filename.split('.')[0],f.filename.split('.')[1])
153 | cv2.imwrite(os.path.join(static_file_path, file_name), frame)
154 | # Note we must return 200 HTTP OK response.
155 | # Saving csv for historical data analysis.
156 | # TODO: Change to database.
157 | with open(os.path.join(os.getcwd(),'app', 'records', 'camera1.csv'), mode='a') as data_file:
158 | data_file= csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
159 | for i in range(len(history_data)):
160 | data_file.writerow(list(history_data[i]))
161 |
162 | bucket['file_name'] = file_name
163 | bucket['detected_person'] = count
164 | session['processed_data'] = bucket
165 | return "Successfully processed.", 200
166 | else:
167 | return render_template('detect/../templates/detect/upload.html', title="Tokyo Junction",
168 | form=form)
169 |
170 | @bp.route('/result')
171 | def result():
172 | if "processed_data" not in session:
173 | return redirect(url_for('detect.detect'))
174 | processed_data = session["processed_data"]
175 | file_name = processed_data['file_name']
176 | person_count = processed_data['detected_person']
177 | session.pop('processed_data', None)
178 | # TO DO
179 | # Display the processed data on the page
180 | print("Here result")
181 | return render_template('detect/../templates/detect/results.html', file_name=file_name, people_count=person_count)
182 |
183 | @bp.route('/public')
184 | def public():
185 | print("Public...")
186 | return render_template('detect/../templates/detect/public.html')
187 |
--------------------------------------------------------------------------------
/JunctionAPI/app/detect/routes_opencv.py:
--------------------------------------------------------------------------------
1 | from flask import redirect, url_for, request, render_template, flash, session
2 | from flask import current_app as current_app
3 | from PIL import Image
4 | import cv2
5 | from app.detect import bp
6 | from app.detect import run_model_server
7 | import numpy as np
8 | from app.detect.forms import DetectForm
9 | from keras.preprocessing.image import img_to_array
10 | import matplotlib.pyplot as plt
11 | import os
12 | import io
13 | import imutils
14 | import csv
15 | import sys
16 | sys.path.append('/home/paperspace/Desktop/MLT/Tiny_Faces_in_Tensorflow')
17 | from tiny_face_det import get_img_path
18 | #import tiny_face_eval
19 |
20 | @bp.route('/detect', methods=['GET', 'POST'])
21 | def detect():
22 | if "processed_data" not in session:
23 | session["processed_data"] = {}
24 | bucket = session["processed_data"]
25 | emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
26 | gender_dict = {0: 'man', 1:'woman'}
27 |
28 | # For data analysis
29 | emo_count = {"Angry":0, "Disgusted":0, "Fearful":0, "Happy":0, "Neutral":0,
30 | "Sad":0, "Surprised":0}
31 | gender_count = {"man":0, "woman":0}
32 | age_class = {}
33 |
34 | if "processed_data" not in session:
35 | # To store the processed_data
36 | session["processed_data"] = {}
37 | form = DetectForm()
38 | current_app.config.update(DROPZONE_UPLOAD_ON_CLICK=True)
39 | # if request.method == 'GET':
40 | # print("Here visited")
41 | # return render_template('detect/upload.html', title="Upload Files")
42 | # else:
43 | # print("Posted Done")
44 | # return "Successfully processed.", 200
45 | if request.method == "POST":
46 | for (key, f) in (request.files.items()):
47 | # f.save(os.path.join(os.getcwd(),"app","uploads", f.filename))
48 | image = f.read()
49 | image = Image.open(io.BytesIO(image)).convert('RGB')
50 | opencv_img = np.array(image)
51 | opencv_img = opencv_img[:, :, ::-1].copy()
52 | age_img_size = run_model_server.age_mdl.input.shape.as_list()[1]
53 | frame = imutils.resize(opencv_img, width=400)
54 | (h, w) = frame.shape[:2]
55 | blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,(300, 300), (104.0, 177.0, 123.0))
56 | # pass the blob through the network and obtain the detections and
57 | # predictions
58 | run_model_server.net.setInput(blob)
59 | detections = run_model_server.net.forward()
60 | print(detections)
61 | #img_path = '/home/paperspace/img.jpg'
62 | #cv2.imwrite(img_path, frame)
63 | #detections = get_img_path(img_path)
64 | count = 0
65 |
66 | history_data = []
67 | # loop over the detections
68 | for i in range(0, detections.shape[2]):
69 | # extract the confidence (i.e., probability) associated with the
70 | # prediction
71 | confidence = detections[0, 0, i, 2]
72 |
73 | # filter out weak detections by ensuring the `confidence` is
74 | # greater than the minimum confidence
75 | if confidence < 0.5:
76 | continue
77 | count += 1
78 | # compute the (x, y)-coordinates of the bounding box for the
79 | # object
80 | box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
81 | (startX, startY, endX, endY) = box.astype("int")
82 |
83 | # Extract the patches
84 | img_patch = frame[startY:startY + (endY-startY), startX:startX + (endX - startX)]
85 |
86 | # Patch for Emo detect
87 | emo_patch = cv2.cvtColor(img_patch, cv2.COLOR_RGB2GRAY)
88 | emo_patch = np.expand_dims(np.expand_dims(cv2.resize(
89 | emo_patch, (48, 48)), -1), 0)
90 |
91 | # Patch for gender detect
92 | gender_patch = cv2.resize(img_patch, (96, 96))
93 | gender_patch = gender_patch.astype("float") / 255.0
94 | gender_patch = img_to_array(gender_patch)
95 | gender_patch = np.expand_dims(gender_patch, axis=0)
96 |
97 | # Path for age detect
98 | age_patch = cv2.cvtColor(img_patch, cv2.COLOR_BGR2RGB)
99 | age_patch = cv2.resize(age_patch, (age_img_size, age_img_size))
100 | age_patch = np.expand_dims(age_patch, axis=0)
101 |
102 | graph = run_model_server.graph
103 | with graph.as_default():
104 | predicted_age = run_model_server.age_mdl.predict(age_patch)
105 | ages = np.arange(0, 101).reshape(101, 1)
106 | predicted_age = int(predicted_age.dot(ages).flatten())
107 |
108 | detected_gender = run_model_server.gender_mdl.predict(gender_patch)[0]
109 | gender_index = int(np.argmax(detected_gender))
110 |
111 | predicted_emo = run_model_server.emo_mdl.predict(emo_patch)
112 |
113 | emo_index = int(np.argmax(predicted_emo))
114 | emo_count[emotion_dict[emo_index]] += 1
115 | gender_count[gender_dict[gender_index]] += 1
116 | if str(predicted_age) in age_class:
117 | age_class[str(predicted_age)] +=1
118 | else:
119 | age_class[str(predicted_age)] = 1
120 | history_data.append([emotion_dict[emo_index],gender_dict[gender_index],predicted_age,(startX,startY, endX, endY)])
121 |
122 | # draw the bounding box of the face along with the associated
123 | # probability
124 | y = startY - 10 if startY - 10 > 10 else startY + 10
125 | cv2.rectangle(frame, (startX, startY), (endX, endY),(0, 255, 0), 2)
126 |
127 | static_file_path = os.path.join(os.getcwd(), "app", "static")
128 | # Generate Emotion Status
129 | xs = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Neutral', 'Sad', 'Surprised']
130 | ys = []
131 | data = emo_count
132 | ys = [data['Angry'], data['Disgusted'], data['Fearful'], data['Happy'], data['Neutral'], data['Sad'], data['Surprised']
133 | plt.bar(list(xs), ys, align='center', alpha=0.5)
134 |
135 | plt.ylabel('Number of people')
136 | plt.title('Emotion Classification')
137 | plt.savefig(os.path.join(static_file_path,'emotion_status.png'))
138 | plt.cla()
139 |
140 | # Generate Gender Count
141 | xs = ['MAN', 'WOMAN']
142 | ys = []
143 | data = gender_count
144 | ys = [data['man'], data['woman']]
145 | plt.bar(list(xs), ys, align='center', alpha=0.5)
146 |
147 | plt.ylabel('Number of people')
148 | plt.title('Gender Classification')
149 | plt.savefig(os.path.join(static_file_path,'gender_status.png'))
150 | plt.cla()
151 |
152 | # Age Classification
153 | data = age_class
154 | xs = []
155 | ys = []
156 | for item, lst in data.items():
157 | xs.append(item)
158 | ys.append(data[item])
159 | plt.plot(list(xs), ys, label='Crowd Count', color='blue')
160 | plt.title("Age Classification Status")
161 | plt.xlabel("Age")
162 | plt.ylabel("Number of people")
163 | plt.legend(loc='upper right')
164 | plt.savefig(os.path.join(static_file_path,'age_status.png'))
165 |
166 | file_name = "{}_detect.{}".format(f.filename.split('.')[0],f.filename.split('.')[1])
167 | cv2.imwrite(os.path.join(static_file_path, file_name), frame)
168 | # Note we must return 200 HTTP OK response.
169 | # Saving csv for historical data analysis.
170 | # TODO: Change to database.
171 | with open(os.path.join(os.getcwd(),'app', 'records', 'camera1.csv'), mode='a') as data_file:
172 | data_file= csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
173 | for i in range(len(history_data)):
174 | data_file.writerow(list(history_data[i]))
175 |
176 | bucket['file_name'] = file_name
177 | bucket['detected_person'] = count
178 | session['processed_data'] = bucket
179 | return "Successfully processed.", 200
180 | else:
181 | return render_template('detect/../templates/detect/upload.html', title="Tokyo Junction",
182 | form=form)
183 |
184 | @bp.route('/result')
185 | def result():
186 | if "processed_data" not in session:
187 | return redirect(url_for('detect.detect'))
188 | processed_data = session["processed_data"]
189 | file_name = processed_data['file_name']
190 | person_count = processed_data['detected_person']
191 | session.pop('processed_data', None)
192 | # TO DO
193 | # Display the processed data on the page
194 | print("Here result")
195 | return render_template('detect/../templates/detect/results.html', file_name=file_name, people_count=person_count)
196 |
197 | @bp.route('/public')
198 | def public():
199 | print("Public...")
200 | return render_template('detect/../templates/detect/public.html')
201 |
--------------------------------------------------------------------------------
/JunctionAPI/app/detect/run_model_server.py:
--------------------------------------------------------------------------------
1 | # import the necessary packages
2 | # from keras.applications import ResNet50
3 | from keras.preprocessing.image import img_to_array
4 | from keras.preprocessing.image import array_to_img
5 | from keras.models import load_model
6 | from app.model import emo_model, age_model
7 | import numpy as np
8 | import time
9 | import cv2
10 | from PIL import Image
11 | import os
12 | import cv2
13 | import tensorflow as tf
14 |
15 | # Initialize Global Variable
16 | emo_mdl = None
17 | gender_mdl = None
18 | age_mdl = None
19 | graph = None
20 |
21 |
22 | args = {'prototxt': "deploy.prototxt.txt",
23 | 'face_model': "res10_300x300_ssd_iter_140000.caffemodel",
24 | 'emo_model': "emo_model.h5",
25 | 'gender_model': "gender_detection.model",
26 | 'age_model': "age_only_resnet50_weights.061-3.300-4.410.hdf5",
27 | 'confidence': 0.5}
28 |
29 | def pre_load_model():
30 | global emo_mdl
31 | global gender_mdl
32 | global age_mdl
33 | global net
34 | global graph
35 |
36 | # load the Keras Model
37 | emo_mdl = emo_model.build()
38 | emo_mdl.load_weights(os.path.join(os.getcwd(),'app', 'weights', args["emo_model"]))
39 | gender_mdl = load_model(os.path.join(os.getcwd(),'app', 'weights', args["gender_model"]))
40 | age_mdl = age_model.get_model(model_name="ResNet50")
41 | age_mdl.load_weights(os.path.join(os.getcwd(), 'app', 'weights', args["age_model"]))
42 | net = cv2.dnn.readNetFromCaffe(
43 | os.path.join(os.path.join(os.getcwd(), 'app', 'weights', args['prototxt'])),
44 | os.path.join(os.path.join(os.getcwd(),'app','weights', args['face_model']))
45 | )
46 | graph = tf.get_default_graph()
47 |
--------------------------------------------------------------------------------
/JunctionAPI/app/main/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint
2 |
3 | bp = Blueprint('main', __name__)
4 |
5 | from app.main import routes
6 |
--------------------------------------------------------------------------------
/JunctionAPI/app/main/forms.py:
--------------------------------------------------------------------------------
1 | from flask import request
2 |
--------------------------------------------------------------------------------
/JunctionAPI/app/main/routes.py:
--------------------------------------------------------------------------------
1 | from flask import redirect, url_for, request, render_template, flash
2 | from app.main import bp
3 | from flask import current_app as app
4 |
5 | @bp.route('/', methods=['GET'])
6 | def index():
7 | return "Hello"
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/JunctionAPI/app/templates/base.html:
--------------------------------------------------------------------------------
1 | {% extends 'bootstrap/base.html' %}
2 |
3 | {% block title %}
4 | {% if title %}{{ title }}{% else %}{{ ('Junction Tokyo') }}{% endif %}
5 | {% endblock %}
6 |
7 | {% block content %}
8 |
9 | {% with messages = get_flashed_messages() %}
10 | {% if messages %}
11 | {% for message in messages %}
12 |
{{ message }}
13 | {% endfor %}
14 | {% endif %}
15 | {% endwith %}
16 | {# application content needs to be provided in the app_content block #}
17 | {% block app_content %}{% endblock %}
18 |
19 | {% endblock %}
20 | {% block styles %}
21 | {{ super() }}
22 |
25 | {% endblock %}
26 |
--------------------------------------------------------------------------------
/JunctionAPI/app/templates/detect/results.html:
--------------------------------------------------------------------------------
1 | {% extends "../base.html" %}
2 | {% import 'bootstrap/wtf.html' as wtf %}
3 |
4 | {% block app_content %}
5 |
6 | Device ID : Camera 1
7 | Location : FLOOR3, Building A3
8 | Ticket Counter 1
9 | {% if people_count %}Total Number of People Detected :{{ people_count }} {% endif %}
10 |
11 | {% if file_name %}
12 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
59 |
60 |
61 |
62 |
63 |
64 |
65 | {% endif %}
66 |
67 | {% endblock %}
68 |
--------------------------------------------------------------------------------
/JunctionAPI/app/templates/detect/upload.html:
--------------------------------------------------------------------------------
1 | {% extends "../base.html" %}
2 | {% import 'bootstrap/wtf.html' as wtf %}
3 |
4 | {% block head %}
5 | {{ super() }}
6 | {{ dropzone.load_css() }}
7 | {{ dropzone.style('border: 2px dashed #0087F7; margin: 10px 0 10px; min-height: 400px;') }}
8 | {% endblock %}
9 | {% block app_content %}
10 |
11 | Device ID : Camera 1
12 | Location : FLOOR3, Building A3
13 | Ticket Counter 1
14 | {{ form.hidden_tag() }}
15 |
16 |
17 | {{ dropzone.create(action="detect.detect") }}
18 |
19 |
20 | Upload
21 |
22 |
23 | {{ dropzone.load_js() }}
24 | {{ dropzone.config(
25 | custom_init='
26 | this.on("success", function(file, response){
27 | window.location="' + url_for("detect.result") +'"
28 | });
29 | this.on("sending", function(file, xhr, formData) {
30 | csrf_name=$("#csrf_token").attr("name");
31 | csrf_value=$("#csrf_token").attr("value");
32 | formData.append(csrf_name, csrf_value);
33 | });
34 | ',
35 | custom_options="addRemoveLinks: true"
36 | ) }}
37 | {% endblock %}
38 |
--------------------------------------------------------------------------------
/JunctionAPI/config.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | basedir = os.path.abspath(os.path.dirname(__file__))
4 |
5 |
6 | class Config(object):
7 | SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
8 |
--------------------------------------------------------------------------------
/JunctionAPI/flask_requirements.txt:
--------------------------------------------------------------------------------
1 | alembic==0.9.6
2 | astroid==2.0.4
3 | Babel==2.5.1
4 | beautifulsoup4==4.6.3
5 | blinker==1.4
6 | certifi==2017.7.27.1
7 | chardet==3.0.4
8 | click==6.7
9 | dominate==2.3.1
10 | elasticsearch==6.1.1
11 | Flask==1.0.2
12 | Flask-Babel==0.11.2
13 | Flask-Blogging==1.0.2
14 | Flask-Bootstrap==3.3.7.1
15 | Flask-Cache==0.13.1
16 | Flask-Dropzone==1.5.3
17 | Flask-FileUpload==0.5.0
18 | Flask-HTTPAuth==3.2.3
19 | Flask-Login==0.4.0
20 | Flask-Mail==0.9.1
21 | Flask-Migrate==2.1.1
22 | Flask-Moment==0.5.2
23 | Flask-Principal==0.4.0
24 | Flask-SQLAlchemy==2.3.2
25 | Flask-Uploads==0.2.1
26 | Flask-WTF==0.14.2
27 | guess-language-spirit==0.5.3
28 | idna==2.6
29 | isort==4.3.4
30 | itsdangerous==0.24
31 | Jinja2==2.10
32 | lazy-object-proxy==1.3.1
33 | Mako==1.0.7
34 | Markdown==2.6.11
35 | MarkupSafe==1.0
36 | mccabe==0.6.1
37 | micawber==0.3.5
38 | peewee==3.7.0
39 | Pygments==2.2.0
40 | PyJWT==1.5.3
41 | pylint==2.1.1
42 | python-dateutil==2.6.1
43 | python-dotenv==0.7.1
44 | python-editor==1.0.3
45 | python-slugify==1.2.6
46 | pytz==2017.2
47 | redis==2.10.6
48 | requests==2.18.4
49 | rq==0.9.2
50 | shortuuid==0.5.0
51 | six==1.11.0
52 | SQLAlchemy==1.1.14
53 | typed-ast==1.1.0
54 | Unidecode==1.0.22
55 | urllib3==1.22
56 | visitor==0.1.3
57 | Werkzeug==0.14.1
58 | wrapt==1.10.11
59 | WTForms==2.1
60 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Machine Learning Tokyo
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MLT x 2020
2 |
3 | More than half a million people are expected to come to Tokyo for the Olympics 2020. We built a highly scalable system for face detection and count, age, gender and emotion prediction for
4 |
5 | * managing crowds,
6 | * making personalized recommendation for users and
7 | * optimizing marketing campaigns and ad placement (e.g. ads on screens based on average age or gender).
8 |
9 | We combined 3 Deep Learning Models and 3 APIs (Twitter, Google Maps, Google Translate). This helps to facilitate Safety, Efficiency and Business Value for the Tokyo Olympics 2020.
10 |
11 | **Presentation slides:** https://goo.gl/hBwi4U
12 |
13 | 
14 |
15 |
16 | # MLT Teams at Junction Tokyo
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/api/__pycache__/api.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/__pycache__/api.cpython-35.pyc
--------------------------------------------------------------------------------
/api/__pycache__/api.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/__pycache__/api.cpython-36.pyc
--------------------------------------------------------------------------------
/api/__pycache__/config.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/__pycache__/config.cpython-35.pyc
--------------------------------------------------------------------------------
/api/__pycache__/config.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/__pycache__/config.cpython-36.pyc
--------------------------------------------------------------------------------
/api/api.py:
--------------------------------------------------------------------------------
1 | from app import create_app
2 |
3 | app = create_app()
4 |
--------------------------------------------------------------------------------
/api/app/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, request, current_app
2 | from flask_bootstrap import Bootstrap
3 | from flask_dropzone import Dropzone
4 | from flask_uploads import UploadSet, configure_uploads, IMAGES, \
5 | patch_request_class
6 | from flask_wtf.csrf import CSRFProtect
7 | from config import Config
8 |
9 | bootstrap = Bootstrap()
10 | dropzone = Dropzone()
11 | csrf = CSRFProtect()
12 |
13 | def create_app(config_class=Config):
14 | app = Flask(__name__)
15 | app.config.from_object(config_class)
16 | bootstrap.init_app(app)
17 | dropzone.init_app(app)
18 | csrf.init_app(app)
19 |
20 | from app.main import bp as main_bp
21 | app.register_blueprint(main_bp)
22 |
23 | from app.detect import bp as detect_bp
24 | app.register_blueprint(detect_bp)
25 |
26 | return app
27 |
--------------------------------------------------------------------------------
/api/app/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/detect/README.md:
--------------------------------------------------------------------------------
1 | # Output generation for the frontend
2 | Currenlty using an dummy dataset
3 |
4 | run as: python outputgenerator.py
5 |
6 | # Output:
7 | * tweet in 3 language based on the stadium video
8 | * heatmap of the stadium
9 | * can be used to generate dataset statistic
10 |
11 | # Dependencies:
12 | pip install:
13 | * tweepy
14 | * googletrans
15 | * matplotlib
16 | * pandas
17 |
18 | # Error:
19 | * duplicate tweet -> clean the twitter post directly or add the time.min in the tweet
20 |
--------------------------------------------------------------------------------
/api/app/detect/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint
2 |
3 | bp = Blueprint('detect', __name__)
4 |
5 | from app.detect import routes
6 | from app.detect import run_model_server
7 | run_model_server = run_model_server.pre_load_model()
8 |
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/camera.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/camera.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/customerservices.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/customerservices.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/forms.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/forms.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/forms.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/forms.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/geo_info_caller.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/geo_info_caller.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/routes.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/routes.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/routes.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/routes.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/run_model_server.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/run_model_server.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/detect/__pycache__/run_model_server.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/__pycache__/run_model_server.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/detect/camera.py:
--------------------------------------------------------------------------------
1 | class camera():
2 |
3 | def __init__(self, cameraID, X, Y, databaseID, name, Ctype):
4 | self.cameraID = cameraID
5 | self.coorX = X
6 | self.coorY = Y
7 | self.databaseID = databaseID
8 | self.name = name
9 | self.Ctype = Ctype
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/api/app/detect/camera.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/camera.pyc
--------------------------------------------------------------------------------
/api/app/detect/customerservices.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 | import pandas as pd
3 | from app.detect.geo_info_caller import get_geo_info
4 | from app.detect import camera
5 | import os
6 |
7 | GeoCamera = namedtuple('GeoCamera', 'camera address distance duration')
8 |
9 | class customerservices:
10 | def __init__(self, dur_thr=200):
11 | self.dur_thr = dur_thr * 60
12 | self.serviceType = "return closest camera based on user input and position"
13 | file_path = os.path.join(os.getcwd(), "app", "detect", "morerealdata.csv")
14 | self.database = pd.read_csv(file_path,delimiter=";")
15 | self.result = pd.DataFrame()
16 |
17 | def getcamIDfromCtype(self, Camlist, type):
18 | goodTypeCam = []
19 | for cam in Camlist:
20 | if(cam.Ctype == type ):
21 | goodTypeCam.append(cam)
22 | return(goodTypeCam)
23 |
24 | def get_geo_cameras(self, goodTypeCam, geo_info):
25 | geo_cams = []
26 | for i, v_dur in enumerate(geo_info.duration_values):
27 | print(v_dur)
28 | if v_dur <= self.dur_thr:
29 | geo_cam = GeoCamera(goodTypeCam[i], geo_info.destinations[i], geo_info.distance_texts[i], geo_info.duration_texts[i])
30 | geo_cams.append(geo_cam)
31 |
32 | return geo_cams
33 |
34 | def findclosestCam(self, goodTypeCam, userPosition):
35 | dest_coords = [tuple([c.coorX, c.coorY]) for c in goodTypeCam]
36 | geo_info = get_geo_info(userPosition, dest_coords)
37 | geo_cams = self.get_geo_cameras(goodTypeCam, geo_info)
38 | return geo_cams
39 |
40 | def returnAllcorrespondingData(self, geo_cameras):
41 | for geo_cam in geo_cameras:
42 | self.result = self.result.append(self.database.loc[self.database['camera'] == geo_cam.camera.databaseID], ignore_index = True)
43 | return self.result, geo_cameras
44 |
45 | def generateRecommandationList(self, results, geocameras):
46 | # Generate the recommandation list:
47 | recommandation = results
48 | recommandation["name"] = ""
49 | recommandation["distance"] = ""
50 | recommandation["duration"] = ""
51 | recommandation["address"] = ""
52 |
53 | #print(recommandation)
54 |
55 | for c in geocameras:
56 | # Get the good index:
57 | index = recommandation.index[recommandation.camera == c.camera.databaseID]
58 | # Set the values:
59 | recommandation.set_value(index,'distance', c.distance)
60 | recommandation.set_value(index,'duration', c.duration)
61 | recommandation.set_value(index,'name', c.camera.name)
62 | recommandation.set_value(index,'address', c.address)
63 |
64 | recommandation.set_index('camera', inplace = True)
65 | recommandation.sort_values(by='waiting', inplace = True)
66 | #print(recommandation)
67 | return(recommandation)
68 |
69 | def solve_request(self, Camlist, Ctype, userPosition):
70 | goodTypeCam = self.getcamIDfromCtype(Camlist, Ctype)
71 |
72 | geo_cameras = self.findclosestCam(goodTypeCam, userPosition)
73 | results, geocameras = self.returnAllcorrespondingData(geo_cameras)
74 | recommandation = self.generateRecommandationList(results, geocameras)
75 |
76 | return(recommandation)
77 |
78 |
--------------------------------------------------------------------------------
/api/app/detect/customerservices.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/customerservices.pyc
--------------------------------------------------------------------------------
/api/app/detect/face_detector.py:
--------------------------------------------------------------------------------
1 | import time
2 | import cv2
3 |
4 |
5 | class FaceDetector():
6 | def __init__(self):
7 | from ..hr101_mxnet.tiny_fd import TinyFacesDetector
8 | # from tiny_fd import TinyFacesDetector
9 | self.tiny_faces = TinyFacesDetector(model_root='/home/paperspace/Desktop/Junction/api/app/hr101_mxnet/', prob_thresh=0.5, gpu_idx=0)
10 |
11 | def detect(self, fname, txtfname):
12 | self.img = cv2.imread(fname)
13 | tic = time.time()
14 | boxes = self.detector.detect(self.img)
15 | toc = time.time()
16 | elapsed_time = toc - tic
17 |
18 | self.write_to_txt_file(fname, boxes, txtfname)
19 |
20 | return boxes.shape[0], boxes, elapsed_time
21 |
22 | def get_face_bboxes(self, img):
23 | boxes = self.tiny_faces.detect(self.img)
24 | # return boxes.shape[0], boxes
25 | return 127, [1,2,3,4]
26 | def write_to_txt_file(self, fname, boxes, txtfname):
27 | txtfile = open(txtfname, 'w')
28 | txtfile.write(fname + '\n')
29 | txtfile.write(str(boxes.shape[0]) + '\n')
30 | for box in boxes:
31 | line = ''
32 |
33 | x1 = min(max(0, int(box[0])), self.img.shape[0])
34 | y1 = min(max(0, int(box[1])), self.img.shape[1])
35 | x2 = min(max(0, int(box[2])), self.img.shape[0])
36 | y2 = min(max(0, int(box[3])), self.img.shape[1])
37 | line += str(x1) + ' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2) + '\n'
38 |
39 | txtfile.write(line)
--------------------------------------------------------------------------------
/api/app/detect/facedata.csv:
--------------------------------------------------------------------------------
1 | camera;time;nbpeople;waiting;age;gender;emotion
2 | C1;10:00:00;10;50;25;guy;happy
3 | C1;12:00:00;40;200;35;guy;happy
4 | C1;02:00:00;40;200;30;mix;happy
5 | C1;04:00:00;20;100;22;guy;happy
6 | C1;06:00:00;0;0;0;mix;neutral
7 | C2;10:00:00;3;15;40;girl;happy
8 | C2;12:00:00;6;30;45;girl;angry
9 | C2;02:00:00;3;15;50;girl;angry
10 | C2;04:00:00;6;30;45;guy;angry
11 | C2;06:00:00;8;40;45;guy;happy
12 | C3;10:00:00;12;60;15;girl;happy
13 | C3;12:00:00;34;170;18;girl;angry
14 | C3;02:00:00;34;170;16;girl;happy
15 | C3;04:00:00;12;60;25;girl;happy
16 | C3;06:00:00;5;25;22;guy;angry
17 | C4;10:00:00;24;120;35;mix;angry
18 | C4;12:00:00;56;280;35;mix;angry
19 | C4;02:00:00;80;400;35;mix;angry
20 | C4;04:00:00;43;215;35;guy;angry
21 | C4;06:00:00;12;60;35;mix;angry
22 |
--------------------------------------------------------------------------------
/api/app/detect/facedata.ods:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/facedata.ods
--------------------------------------------------------------------------------
/api/app/detect/facedata.xls:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/facedata.xls
--------------------------------------------------------------------------------
/api/app/detect/forms.py:
--------------------------------------------------------------------------------
1 | from flask_wtf import FlaskForm
2 |
3 | class DetectForm(FlaskForm):
4 | pass
5 |
--------------------------------------------------------------------------------
/api/app/detect/geo_info_caller.py:
--------------------------------------------------------------------------------
1 | import googlemaps
2 | from datetime import datetime
3 | from collections import namedtuple
4 | import sys
5 |
6 | GeoObj = namedtuple('geo_object', ['destinations', 'distance_texts', 'distance_values',
7 | 'duration_texts', 'duration_values'])
8 |
9 | def get_geo_info(user_coords, dest_coords, mode='walking'):
10 | user_coords = tuple(user_coords)
11 | dest_coords = list(map(tuple, dest_coords))
12 | gmaps = googlemaps.Client(key='AIzaSyDjm1LEi97iX2-DBTu0d2xnAQrO9ElYDE8')
13 | distance_result = gmaps.distance_matrix(origins=user_coords, destinations=dest_coords, mode=mode)
14 |
15 | dests = distance_result['destination_addresses']
16 | dists = [elem['distance']['text'] for elem in distance_result['rows'][0]['elements']]
17 | durs = [elem['duration']['text'] for elem in distance_result['rows'][0]['elements']]
18 |
19 | v_dists = [elem['distance']['value'] for elem in distance_result['rows'][0]['elements']]
20 | v_durs = [elem['duration']['value'] for elem in distance_result['rows'][0]['elements']]
21 |
22 | geo_info = GeoObj(dests, dists, v_dists, durs, v_durs)
23 | return geo_info
24 |
25 |
26 | if __name__ == '__main__':
27 | user_coords = sys.argv[1]
28 | user_coords = tuple(map(float, user_coords.split(',')))
29 | dest_coords = sys.argv[2:]
30 | # dest_coords = dest_coords.split(' ')
31 | dest_coords = [tuple(map(float, dest_coord.split(','))) for dest_coord in dest_coords]
32 | geo_info = get_geo_info(user_coords, dest_coords, mode='walking')
33 |
34 |
--------------------------------------------------------------------------------
/api/app/detect/morerealdata.csv:
--------------------------------------------------------------------------------
1 | camera;nbpeople;waiting;age;gender;emotion
2 | C1;12;48;23;girl;happy
3 | C2;20;80;32;mixed;angry
4 | C3;3;12;34;boy;angry
5 | C4;6;24;18;girl;happy
6 | C5;2;8;67;girl;happy
7 |
--------------------------------------------------------------------------------
/api/app/detect/outputgenerator.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import matplotlib.pyplot as plt
3 |
4 | # dedicated object
5 | import tweetGenerator as tw
6 | import camera
7 |
8 | # Create Camera object (obtained from registration)
9 | camera1 = camera.camera(1, 80, 200, "C1", "entrance A", "stadium")
10 | camera2 = camera.camera(2, 500, 200,"C2", "entrance B", "stadium")
11 |
12 | # Load the database (currently dummy data)
13 | dummydatabase = pd.read_csv('facedata.csv',delimiter=";")
14 | print(dummydatabase.head())
15 |
16 | # Get the number of people and waiting time at each camera (time can be set in the query)
17 | TIME = '10:00:00'
18 |
19 | # Camera1
20 | query = dummydatabase.loc[dummydatabase['camera'] == 'C1'].loc[dummydatabase['time']== TIME]
21 | waitingtimeCamera1 = query.waiting
22 | waitingtimeCamera1 = waitingtimeCamera1.reset_index(drop=True)[0]
23 | nbpeopleC1 = query.nbpeople
24 | nbpeopleC1 = nbpeopleC1.reset_index(drop=True)[0]
25 | print(waitingtimeCamera1)
26 | print(nbpeopleC1)
27 |
28 | # Camera 2
29 | query = dummydatabase.loc[dummydatabase['camera'] == 'C2'].loc[dummydatabase['time']== TIME]
30 | waitingtimeCamera2 = query.waiting
31 | waitingtimeCamera2 = waitingtimeCamera2.reset_index(drop=True)[0]
32 | nbpeopleC2 = query.nbpeople
33 | nbpeopleC2 = nbpeopleC2.reset_index(drop=True)[0]
34 | print(waitingtimeCamera2)
35 | print(nbpeopleC2)
36 |
37 | ### Generate the heatmap of the stadium (all the numerical value are here to make the display looks better)
38 | heatmapC1 = plt.Circle((camera1.coorX, camera1.coorY), nbpeopleC1*8, color='r')
39 | heatmapC2 = plt.Circle((camera2.coorX, camera2.coorY), nbpeopleC2*8, color='g')
40 |
41 | img = plt.imread("staduim1.PNG")
42 | fig, ax = plt.subplots()
43 | ax.add_artist(heatmapC1)
44 | ax.add_artist(heatmapC2)
45 | plt.axis('off')
46 |
47 | plt.text(camera1.coorX - 20, camera1.coorY, str(waitingtimeCamera1) + "min", fontsize=12)
48 | plt.text(camera2.coorX - 20, camera2.coorY, str(waitingtimeCamera2) + "min", fontsize=12)
49 | ax.imshow(img)
50 |
51 | plt.savefig('savedheatmap/heatmap.png')
52 |
53 | ### Tweet generation
54 | tweetAPI = tw.TweetGenerator()
55 | englishText = tweetAPI.ruleBaseGenerator(camera1, nbpeopleC1, camera2, nbpeopleC2)
56 | tweetAPI.multilanguageTweet(englishText)
57 |
58 | ### Fancy Bussniss statictics: for demography / live advertissement tunning based on gender / age
59 | dummydatabase.groupby("camera").plot()
60 |
61 |
--------------------------------------------------------------------------------
/api/app/detect/routes.py:
--------------------------------------------------------------------------------
1 | from flask import redirect, url_for, request, render_template, flash, session
2 | from flask import current_app as current_app
3 | from PIL import Image
4 | import cv2
5 | from app.detect import bp
6 | from app.detect import run_model_server
7 | import numpy as np
8 | from app.detect.forms import DetectForm
9 | from keras.preprocessing.image import img_to_array
10 | import matplotlib.pyplot as plt
11 | import os
12 | import io
13 | import imutils
14 | import csv
15 |
16 | from app.detect import camera
17 | from app.detect import customerservices as cs
18 |
19 | from app.detect.geo_info_caller import get_geo_info
20 |
21 | @bp.route('/detect', methods=['GET', 'POST'])
22 | def detect():
23 | if "processed_data" not in session:
24 | session["processed_data"] = {}
25 | bucket = session["processed_data"]
26 | emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
27 | gender_dict = {0: 'man', 1:'woman'}
28 |
29 | # For data analysis
30 | emo_count = {"Angry":0, "Disgusted":0, "Fearful":0, "Happy":0, "Neutral":0,
31 | "Sad":0}
32 | gender_count = {"man":0, "woman":0}
33 | age_class = {}
34 |
35 | if "processed_data" not in session:
36 | # To store the processed_data
37 | session["processed_data"] = {}
38 | form = DetectForm()
39 | current_app.config.update(DROPZONE_UPLOAD_ON_CLICK=True)
40 | # if request.method == 'GET':
41 | # print("Here visited")
42 | # return render_template('detect/upload.html', title="Upload Files")
43 | # else:
44 | # print("Posted Done")
45 | # return "Successfully processed.", 200
46 | if request.method == "POST":
47 | for (key, f) in (request.files.items()):
48 | # f.save(os.path.join(os.getcwd(),"app","uploads", f.filename))
49 | image = f.read()
50 | image = Image.open(io.BytesIO(image)).convert('RGB')
51 | opencv_img = np.array(image)
52 | opencv_img = opencv_img[:, :, ::-1].copy()
53 | age_img_size = run_model_server.age_mdl.input.shape.as_list()[1]
54 | frame = imutils.resize(opencv_img, width=400)
55 | (h, w) = frame.shape[:2]
56 | blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,(300, 300), (104.0, 177.0, 123.0))
57 | # pass the blob through the network and obtain the detections and
58 | # predictions
59 | run_model_server.net.setInput(blob)
60 | detections = run_model_server.net.forward()
61 | count = 0
62 |
63 | history_data = []
64 | # loop over the detections
65 | for i in range(0, detections.shape[2]):
66 | # extract the confidence (i.e., probability) associated with the
67 | # prediction
68 | confidence = detections[0, 0, i, 2]
69 |
70 | # filter out weak detections by ensuring the `confidence` is
71 | # greater than the minimum confidence
72 | if confidence < 0.5:
73 | continue
74 | count += 1
75 | # compute the (x, y)-coordinates of the bounding box for the
76 | # object
77 | box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
78 | (startX, startY, endX, endY) = box.astype("int")
79 |
80 | # Extract the patches
81 | img_patch = frame[startY:startY + (endY-startY), startX:startX + (endX - startX)]
82 |
83 | # Patch for Emo detect
84 | emo_patch = cv2.cvtColor(img_patch, cv2.COLOR_RGB2GRAY)
85 | emo_patch = np.expand_dims(np.expand_dims(cv2.resize(
86 | emo_patch, (48, 48)), -1), 0)
87 |
88 | # Patch for gender detect
89 | gender_patch = cv2.resize(img_patch, (96, 96))
90 | gender_patch = gender_patch.astype("float") / 255.0
91 | gender_patch = img_to_array(gender_patch)
92 | gender_patch = np.expand_dims(gender_patch, axis=0)
93 |
94 | # Path for age detect
95 | age_patch = cv2.cvtColor(img_patch, cv2.COLOR_BGR2RGB)
96 | age_patch = cv2.resize(age_patch, (age_img_size, age_img_size))
97 | age_patch = np.expand_dims(age_patch, axis=0)
98 |
99 | graph = run_model_server.graph
100 | with graph.as_default():
101 | predicted_age = run_model_server.age_mdl.predict(age_patch)
102 | ages = np.arange(0, 101).reshape(101, 1)
103 | predicted_age = int(predicted_age.dot(ages).flatten())
104 |
105 | detected_gender = run_model_server.gender_mdl.predict(gender_patch)[0]
106 | gender_index = int(np.argmax(detected_gender))
107 |
108 | predicted_emo = run_model_server.emo_mdl.predict(emo_patch)
109 |
110 | emo_index = int(np.argmax(predicted_emo))
111 | emo_count[emotion_dict[emo_index]] += 1
112 | gender_count[gender_dict[gender_index]] += 1
113 | if str(predicted_age) in age_class:
114 | age_class[str(predicted_age)] +=1
115 | else:
116 | age_class[str(predicted_age)] = 1
117 | history_data.append([emotion_dict[emo_index],gender_dict[gender_index],predicted_age,(startX,startY, endX, endY)])
118 |
119 | # draw the bounding box of the face along with the associated
120 | # probability
121 | y = startY - 10 if startY - 10 > 10 else startY + 10
122 | cv2.rectangle(frame, (startX, startY), (endX, endY),(0, 255, 0), 2)
123 | history_data = [history_data]
124 | static_file_path = os.path.join(os.getcwd(), "app", "static")
125 | # Generate Emotion Status
126 | xs = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Neutral', 'Sad']
127 | ys = []
128 | data = emo_count
129 | ys = [data['Angry'], data['Disgusted'], data['Fearful'], data['Happy'], data['Neutral'], data['Sad']]
130 | plt.bar(list(xs), ys, align='center', alpha=0.5)
131 |
132 | plt.ylabel('Number of people')
133 | plt.title('Gender Classification')
134 | plt.savefig(os.path.join(static_file_path,'emotion_status.png'))
135 | plt.cla()
136 |
137 | # Generate Gender Count
138 | xs = ['MAN', 'WOMAN']
139 | ys = []
140 | data = gender_count
141 | ys = [data['man'], data['woman']]
142 | plt.bar(list(xs), ys, align='center', alpha=0.5)
143 |
144 | plt.ylabel('Number of people')
145 | plt.title('Gender Classification')
146 | plt.savefig(os.path.join(static_file_path,'gender_status.png'))
147 | plt.cla()
148 |
149 | # Age Classification
150 | data = age_class
151 | xs = []
152 | ys = []
153 | for item, lst in data.items():
154 | xs.append(item)
155 | ys.append(data[item])
156 | plt.plot(list(xs), ys, label='Crowd Count', color='blue')
157 | plt.title("Age Classification Status")
158 | plt.xlabel("Age")
159 | plt.ylabel("Number of people")
160 | plt.legend(loc='upper right')
161 | plt.savefig(os.path.join(static_file_path,'age_status.png'))
162 |
163 | file_name = "{}_detect.{}".format(f.filename.split('.')[0],f.filename.split('.')[1])
164 | cv2.imwrite(os.path.join(static_file_path, file_name), frame)
165 | # Note we must return 200 HTTP OK response.
166 | # Saving csv for historical data analysis.
167 | # TODO: Change to database.
168 | with open(os.path.join(os.getcwd(),'app', 'records', 'camera1.csv'), mode='a') as data_file:
169 | data_file= csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
170 | for i in range(len(history_data)):
171 | data_file.writerow(list(history_data[i]))
172 |
173 | bucket['file_name'] = file_name
174 | bucket['detected_person'] = count
175 | session['processed_data'] = bucket
176 | return "Successfully processed.", 200
177 | else:
178 | return render_template('detect/upload.html', title="Tokyo Junction",
179 | form=form)
180 |
181 | @bp.route('/result')
182 | def result():
183 | if "processed_data" not in session:
184 | return redirect(url_for('detect.detect'))
185 | processed_data = session["processed_data"]
186 | file_name = processed_data['file_name']
187 | person_count = processed_data['detected_person']
188 | session.pop('processed_data', None)
189 | # TO DO
190 | # Display the processed data on the page
191 | return render_template('detect/results.html', file_name=file_name, people_count=person_count)
192 |
193 | @bp.route('/heatmap')
194 | def public():
195 | print("Heatmap...")
196 | return render_template('detect/public.html')
197 |
198 | camera1 = camera.camera(1, 35.679221, 139.776093, "C1", "Restaurant A", "sushi")
199 | camera2 = camera.camera(2, 35.670262, 139.775106,"C2", "Restaurant B", "sushi")
200 | camera3 = camera.camera(3, 35.670259, 139.7751036,"C3", "Restaurant C", "sushi")
201 | camera4 = camera.camera(4, 35.670249, 139.7751056,"C4", "Entrance A", "nightclub")
202 | camlist =[camera1, camera2, camera3, camera4]
203 | place_types = ['caffe', 'restaurant', 'sushi', 'nightclub', 'bar', 'toilet']
204 | service = cs.customerservices()
205 |
206 | @bp.route("/get_places", methods=["GET"])
207 | def get_places():
208 | params = request.json
209 | if (params == None):
210 | params = request.args
211 | if (params is None):
212 | return 'Error: Please insert type and coordinates!'
213 |
214 | place_type = params.get('type')
215 |
216 | if place_type not in place_types:
217 | msg = '%s type not \nsupported...' % place_type
218 | msg = msg + '\nSupported types: %s' % ','.join(place_types)
219 | return msg
220 |
221 | coordinates = params.get("coordinates")
222 |
223 | if coordinates is None:
224 | return 'Please enter your coordinates'
225 |
226 | try:
227 | coordinates = list(map(float, coordinates.split(',')))
228 | except:
229 | return 'Wrong coordinates format'
230 | msg = None
231 | msg = service.solve_request(camlist, place_type, coordinates)
232 | msg = msg.reset_index(drop=True)
233 | n_cols = {'nbpeople': 'Queue size', 'waiting': 'waiting time',
234 | 'age': 'average age', 'gender': 'average gender',
235 | 'emotion': 'average emotion', 'duration': 'eta'}
236 |
237 | msg = msg.rename(index=str, columns=n_cols)
238 | args = dict()
239 | args.clear()
240 | args['place_type'] = place_type
241 | args['msg'] = msg
242 | print(args)
243 | return render_template('detect/index.html', **args)
244 |
245 |
246 |
247 |
--------------------------------------------------------------------------------
/api/app/detect/run_model_server.py:
--------------------------------------------------------------------------------
1 | # import the necessary packages
2 | # from keras.applications import ResNet50
3 | from keras.preprocessing.image import img_to_array
4 | from keras.preprocessing.image import array_to_img
5 | from keras.models import load_model
6 | from app.model import emo_model, age_model
7 | import numpy as np
8 | import time
9 | import cv2
10 | from PIL import Image
11 | import os
12 | import cv2
13 | import tensorflow as tf
14 |
15 | # Initialize Global Variable
16 | emo_mdl = None
17 | gender_mdl = None
18 | age_mdl = None
19 | graph = None
20 |
21 |
22 | args = {'prototxt': "deploy.prototxt.txt",
23 | 'face_model': "res10_300x300_ssd_iter_140000.caffemodel",
24 | 'emo_model': "emo_model.h5",
25 | 'gender_model': "gender_detection.model",
26 | 'age_model': "age_only_resnet50_weights.061-3.300-4.410.hdf5",
27 | 'confidence': 0.5}
28 |
29 | def pre_load_model():
30 | global emo_mdl
31 | global gender_mdl
32 | global age_mdl
33 | global net
34 | global graph
35 |
36 | # load the Keras Model
37 | emo_mdl = emo_model.build()
38 | emo_mdl.load_weights(os.path.join(os.getcwd(),'app', 'weights', args["emo_model"]))
39 | gender_mdl = load_model(os.path.join(os.getcwd(),'app', 'weights', args["gender_model"]))
40 | age_mdl = age_model.get_model(model_name="ResNet50")
41 | age_mdl.load_weights(os.path.join(os.getcwd(), 'app', 'weights', args["age_model"]))
42 | net = cv2.dnn.readNetFromCaffe(
43 | os.path.join(os.path.join(os.getcwd(), 'app', 'weights', args['prototxt'])),
44 | os.path.join(os.path.join(os.getcwd(),'app','weights', args['face_model']))
45 | )
46 | graph = tf.get_default_graph()
47 |
--------------------------------------------------------------------------------
/api/app/detect/savedheatmap/heatmap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/savedheatmap/heatmap.png
--------------------------------------------------------------------------------
/api/app/detect/staduim1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/detect/staduim1.png
--------------------------------------------------------------------------------
/api/app/detect/test_customerService.py:
--------------------------------------------------------------------------------
1 | # try the servicecustomer class
2 | import camera
3 | import customerservices as cs
4 |
5 | # Generate a list a Camera
6 | # Create Camera object (obtained from registration)
7 | camera1 = camera.camera(1, 35.679221, 139.776093, "C1", "Restaurant A", "sushi")
8 | camera2 = camera.camera(2, 35.670262, 139.775106,"C2", "Restaurant B", "sushi")
9 | camera3 = camera.camera(3, 35.670259, 139.7751036,"C3", "Restaurant C", "sushi")
10 | camera4 = camera.camera(4, 35.670249, 139.7751056,"C4", "Entrance A", "nightclub")
11 |
12 | camlist =[camera1, camera2, camera3, camera4]
13 |
14 | service = cs.customerservices()
15 |
16 | Ctype = 'sushi'
17 | UserPosition = [35.654697, 139.781929]
18 | result = service.solve_request(camlist, Ctype, UserPosition)
19 | print(result)
--------------------------------------------------------------------------------
/api/app/detect/tweetGenerator.py:
--------------------------------------------------------------------------------
1 | # Currently using tweepy but will try to use the tweeter API from Rakuten
2 | # Need to try the translation API
3 |
4 | import tweepy
5 | from googletrans import Translator
6 | import datetime
7 |
8 | #import unirest not supported on python3
9 |
10 | class TweetGenerator:
11 |
12 | def __init__(self):
13 | # Setting the key for identification
14 | self.CONSUMER_KEY ="hEejKZrYXMbN2lsQPmHYnCpvY"
15 | self.CONSUMER_SECRET = "k9a8nFVaDbmUJyDZBAwwdmc1miqh8sDWjJu1AohJw03oiUnPn2"
16 | self.ACCESS_KEY = "1096442165448720385-TYxgNnoYL3z5GmKavNjFgcqvXw4ViA"
17 | self.ACCESS_SECRET = "1jNRID3iJhdDqL0Yq2hAlhJCMlA7AubodvZ0997gY5Wfy"
18 |
19 | # Set the access:
20 | self.auth = tweepy.OAuthHandler(self.CONSUMER_KEY, self.CONSUMER_SECRET)
21 | self.auth.set_access_token(self.ACCESS_KEY, self.ACCESS_SECRET)
22 |
23 | # Set up the api:
24 | self.api = tweepy.API(self.auth)
25 |
26 | # Set up the translator:
27 | self.translator = Translator()
28 |
29 | def send_tweet(self, tweetText):
30 | # Send the status
31 | self.api.update_status(tweetText)
32 |
33 | def trnsl_JP(self, englishText):
34 | # Somehow call working API to translate
35 | jpText = self.translator.translate(englishText, dest='ja').text
36 | jpText = "[JAPANESE] " + jpText
37 | return jpText
38 |
39 | def trnsl_KO(self, englishText):
40 | # Somehow call working API to translate
41 | koText = self.translator.translate(englishText, dest='ko').text
42 | koText = "[KOREAN] " + koText
43 | return koText
44 |
45 | def multilanguageTweet(self, englishText):
46 | # Translate and send the different tweet
47 | if(englishText ==''):
48 | return(0)
49 |
50 | koText = self.trnsl_KO(englishText)
51 | self.send_tweet(koText)
52 | print("Korean tweet send")
53 |
54 | jpText = self.trnsl_JP(englishText)
55 | self.send_tweet(jpText)
56 | print("Japanese tweet send")
57 |
58 | self.send_tweet("[ENGLISH] " + englishText)
59 | print("Englsih tweet send")
60 |
61 | def ruleBaseGenerator(self, camera1, nbpeopleC1, camera2, nbpeopleC2):
62 | # Generate the message to tweet based on the camera
63 | currentDT = datetime.datetime.now()
64 | englishText =""
65 | if(nbpeopleC1 > nbpeopleC2):
66 | englishText = str(currentDT.day) +'/'+ str(currentDT.hour) +" | Please go to " + camera2.name
67 |
68 | if(nbpeopleC2 > nbpeopleC1):
69 | englishText = str(currentDT.day) +'/'+ str(currentDT.hour) +" | Pleae go to " + camera1.name
70 |
71 | return englishText
72 |
73 |
74 |
--------------------------------------------------------------------------------
/api/app/main/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint
2 |
3 | bp = Blueprint('main', __name__)
4 |
5 | from app.main import routes
6 |
--------------------------------------------------------------------------------
/api/app/main/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/main/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/main/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/main/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/main/__pycache__/routes.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/main/__pycache__/routes.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/main/__pycache__/routes.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/main/__pycache__/routes.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/main/forms.py:
--------------------------------------------------------------------------------
1 | from flask import request
2 |
--------------------------------------------------------------------------------
/api/app/main/routes.py:
--------------------------------------------------------------------------------
1 | from flask import redirect, url_for, request, render_template, flash
2 | from app.main import bp
3 | from flask import current_app as app
4 |
5 | @bp.route('/', methods=['GET'])
6 | def index():
7 | return "Hello"
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/api/app/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .emo_model import emo_model
2 | from .smallervggnet import SmallerVGGNet
3 |
--------------------------------------------------------------------------------
/api/app/model/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/model/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/model/__pycache__/age_model.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/age_model.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/model/__pycache__/age_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/age_model.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/model/__pycache__/emo_model.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/emo_model.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/model/__pycache__/emo_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/emo_model.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/model/__pycache__/smallervggnet.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/smallervggnet.cpython-35.pyc
--------------------------------------------------------------------------------
/api/app/model/__pycache__/smallervggnet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/model/__pycache__/smallervggnet.cpython-36.pyc
--------------------------------------------------------------------------------
/api/app/model/age_model.py:
--------------------------------------------------------------------------------
1 | import better_exceptions
2 | from keras.applications import ResNet50, InceptionResNetV2
3 | from keras.layers import Dense
4 | from keras.models import Model
5 | from keras import backend as K
6 |
7 |
8 | def age_mae(y_true, y_pred):
9 | true_age = K.sum(y_true * K.arange(0, 101, dtype="float32"), axis=-1)
10 | pred_age = K.sum(y_pred * K.arange(0, 101, dtype="float32"), axis=-1)
11 | mae = K.mean(K.abs(true_age - pred_age))
12 | return mae
13 |
14 |
15 | def get_model(model_name="ResNet50"):
16 | base_model = None
17 |
18 | if model_name == "ResNet50":
19 | base_model = ResNet50(include_top=False, weights='imagenet', input_shape=(224, 224, 3), pooling="avg")
20 | elif model_name == "InceptionResNetV2":
21 | base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=(299, 299, 3), pooling="avg")
22 |
23 | prediction = Dense(units=101, kernel_initializer="he_normal", use_bias=False, activation="softmax",
24 | name="pred_age")(base_model.output)
25 |
26 | model = Model(inputs=base_model.input, outputs=prediction)
27 |
28 | return model
29 |
30 |
31 | def main():
32 | model = get_model("InceptionResNetV2")
33 | model.summary()
34 |
35 |
36 | if __name__ == '__main__':
37 | main()
38 |
--------------------------------------------------------------------------------
/api/app/model/emo_model.py:
--------------------------------------------------------------------------------
1 | from keras.models import Sequential
2 | from keras.layers.core import Dense, Dropout, Flatten
3 | from keras.layers.convolutional import Conv2D
4 | from keras.optimizers import Adam
5 | from keras.layers.pooling import MaxPooling2D
6 |
7 | class emo_model:
8 | @staticmethod
9 | def build():
10 | model = Sequential()
11 |
12 | model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
13 | model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
14 | model.add(MaxPooling2D(pool_size=(2, 2)))
15 | model.add(Dropout(0.25))
16 |
17 | model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
18 | model.add(MaxPooling2D(pool_size=(2, 2)))
19 | model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
20 | model.add(MaxPooling2D(pool_size=(2, 2)))
21 | model.add(Dropout(0.25))
22 |
23 | model.add(Flatten())
24 | model.add(Dense(1024, activation='relu'))
25 | model.add(Dropout(0.5))
26 | model.add(Dense(7, activation='softmax'))
27 | return model
28 |
--------------------------------------------------------------------------------
/api/app/model/smallervggnet.py:
--------------------------------------------------------------------------------
1 | from keras.models import Sequential
2 | from keras.layers.normalization import BatchNormalization
3 | from keras.layers.convolutional import Conv2D
4 | from keras.layers.convolutional import MaxPooling2D
5 | from keras.layers.core import Activation
6 | from keras.layers.core import Flatten
7 | from keras.layers.core import Dropout
8 | from keras.layers.core import Dense
9 | from keras import backend as K
10 |
11 |
12 | class SmallerVGGNet:
13 | @staticmethod
14 | def build(width, height, depth, classes):
15 | model = Sequential()
16 | inputShape = (height, width, depth)
17 | chanDim = -1
18 |
19 | if K.image_data_format() == "channels_first":
20 | inputShape = (depth, height, width)
21 | chanDim = 1
22 |
23 | model.add(Conv2D(32, (3,3), padding="same", input_shape=inputShape))
24 | model.add(Activation("relu"))
25 | model.add(BatchNormalization(axis=chanDim))
26 | model.add(MaxPooling2D(pool_size=(3,3)))
27 | model.add(Dropout(0.25))
28 |
29 | model.add(Conv2D(64, (3,3), padding="same"))
30 | model.add(Activation("relu"))
31 | model.add(BatchNormalization(axis=chanDim))
32 | model.add(Conv2D(64, (3,3), padding="same"))
33 | model.add(Activation("relu"))
34 | model.add(BatchNormalization(axis=chanDim))
35 | model.add(MaxPooling2D(pool_size=(2,2)))
36 | model.add(Dropout(0.25))
37 |
38 | model.add(Conv2D(128, (3,3), padding="same"))
39 | model.add(Activation("relu"))
40 | model.add(BatchNormalization(axis=chanDim))
41 | model.add(Conv2D(128, (3,3), padding="same"))
42 | model.add(Activation("relu"))
43 | model.add(BatchNormalization(axis=chanDim))
44 | model.add(MaxPooling2D(pool_size=(2,2)))
45 | model.add(Dropout(0.25))
46 |
47 | model.add(Flatten())
48 | model.add(Dense(1024))
49 | model.add(Activation("relu"))
50 | model.add(BatchNormalization())
51 | model.add(Dropout(0.5))
52 |
53 | model.add(Dense(classes))
54 | model.add(Activation("sigmoid"))
55 |
56 | return model
57 |
--------------------------------------------------------------------------------
/api/app/records/camera1.csv:
--------------------------------------------------------------------------------
1 | "['Angry', 'man', 55, (218, 28, 277, 112)]","['Happy', 'man', 25, (15, 31, 76, 107)]","['Angry', 'woman', 30, (324, 25, 375, 90)]","['Fearful', 'man', 5, (275, 196, 328, 278)]","['Angry', 'man', 10, (127, 38, 176, 115)]","['Happy', 'woman', 5, (63, 204, 109, 270)]","['Neutral', 'woman', 8, (166, 223, 214, 288)]"
2 | "['Happy', 'man', 13, (110, 55, 141, 93)]","['Happy', 'man', 33, (213, 43, 238, 83)]","['Happy', 'man', 4, (193, 71, 218, 105)]","['Neutral', 'woman', 10, (69, 76, 95, 112)]"
3 | "['Happy', 'man', 13, (110, 55, 141, 93)]","['Happy', 'man', 33, (213, 43, 238, 83)]","['Happy', 'man', 4, (193, 71, 218, 105)]","['Neutral', 'woman', 10, (69, 76, 95, 112)]"
4 |
--------------------------------------------------------------------------------
/api/app/static/age_status.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/static/age_status.png
--------------------------------------------------------------------------------
/api/app/static/emotion_status.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/static/emotion_status.png
--------------------------------------------------------------------------------
/api/app/static/gender_status.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/static/gender_status.png
--------------------------------------------------------------------------------
/api/app/static/heatmap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/static/heatmap.png
--------------------------------------------------------------------------------
/api/app/static/selfytest_detect.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/static/selfytest_detect.jpeg
--------------------------------------------------------------------------------
/api/app/static/test2_detect.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/static/test2_detect.jpeg
--------------------------------------------------------------------------------
/api/app/static/test_detect.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/api/app/static/test_detect.jpg
--------------------------------------------------------------------------------
/api/app/templates/base.html:
--------------------------------------------------------------------------------
1 | {% extends 'bootstrap/base.html' %}
2 |
3 | {% block title %}
4 | {% if title %}{{ title }}{% else %}{{ ('Junction Tokyo') }}{% endif %}
5 | {% endblock %}
6 |
7 | {% block content %}
8 |
9 | {% with messages = get_flashed_messages() %}
10 | {% if messages %}
11 | {% for message in messages %}
12 |
{{ message }}
13 | {% endfor %}
14 | {% endif %}
15 | {% endwith %}
16 | {# application content needs to be provided in the app_content block #}
17 | {% block app_content %}{% endblock %}
18 |
19 | {% endblock %}
20 | {% block styles %}
21 | {{ super() }}
22 |
25 | {% endblock %}
26 |
--------------------------------------------------------------------------------
/api/app/templates/detect/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Recommendations for {{ place_type }}
4 | {% block content %}
5 | {{ msg.to_html() | safe }}
6 | {% endblock content %}
7 |
8 |
--------------------------------------------------------------------------------
/api/app/templates/detect/public.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 | {% import 'bootstrap/wtf.html' as wtf %}
3 |
4 | {% block app_content %}
5 |
11 | {% endblock %}
12 |
--------------------------------------------------------------------------------
/api/app/templates/detect/results.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 | {% import 'bootstrap/wtf.html' as wtf %}
3 |
4 | {% block app_content %}
5 |
6 | Device ID : Camera 1
7 | Location : FLOOR3, Building A3
8 | Ticket Counter 1
9 | {% if people_count %}Total Number of People Detected :{{ people_count }} {% endif %}
10 |
11 | {% if file_name %}
12 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
59 |
60 |
61 |
62 |
63 |
64 |
65 | {% endif %}
66 |
67 | {% endblock %}
68 |
--------------------------------------------------------------------------------
/api/app/templates/detect/upload.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 | {% import 'bootstrap/wtf.html' as wtf %}
3 |
4 | {% block head %}
5 | {{ super() }}
6 | {{ dropzone.load_css() }}
7 | {{ dropzone.style('border: 2px dashed #0087F7; margin: 10px 0 10px; min-height: 400px;') }}
8 | {% endblock %}
9 | {% block app_content %}
10 |
11 | Device ID : Camera 1
12 | Location : FLOOR3, Building A3
13 | Ticket Counter 1
14 | {{ form.hidden_tag() }}
15 |
16 |
17 | {{ dropzone.create(action="detect.detect") }}
18 |
19 |
20 | Upload
21 |
22 |
23 | {{ dropzone.load_js() }}
24 | {{ dropzone.config(
25 | custom_init='
26 | this.on("success", function(file, response){
27 | window.location="' + url_for("detect.result") +'"
28 | });
29 | this.on("sending", function(file, xhr, formData) {
30 | csrf_name=$("#csrf_token").attr("name");
31 | csrf_value=$("#csrf_token").attr("value");
32 | formData.append(csrf_name, csrf_value);
33 | });
34 | ',
35 | custom_options="addRemoveLinks: true"
36 | ) }}
37 | {% endblock %}
38 |
--------------------------------------------------------------------------------
/api/config.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | basedir = os.path.abspath(os.path.dirname(__file__))
4 |
5 |
6 | class Config(object):
7 | SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
8 |
--------------------------------------------------------------------------------
/api/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==0.4.0
2 | alembic==0.9.6
3 | asn1crypto==0.24.0
4 | astor==0.7.1
5 | astroid==2.0.4
6 | atomicwrites==1.3.0
7 | attrs==18.2.0
8 | Babel==2.5.1
9 | backcall==0.1.0
10 | beautifulsoup4==4.6.3
11 | better-exceptions==0.2.2
12 | bleach==1.5.0
13 | blinker==1.4
14 | cairocffi==1.0.2
15 | CairoSVG==2.3.0
16 | certifi==2017.7.27.1
17 | cffi==1.11.5
18 | chardet==3.0.4
19 | click==6.7
20 | cloudpickle==0.5.6
21 | commentjson==0.7.1
22 | cryptography==2.3.1
23 | cssselect2==0.2.1
24 | cycler==0.10.0
25 | dask==0.19.2
26 | decorator==4.3.0
27 | defusedxml==0.5.0
28 | dlib==19.16.0
29 | dominate==2.3.1
30 | elasticsearch==6.1.1
31 | entrypoints==0.2.3
32 | Flask==1.0.2
33 | Flask-Babel==0.11.2
34 | Flask-Blogging==1.0.2
35 | Flask-Bootstrap==3.3.7.1
36 | Flask-Cache==0.13.1
37 | Flask-Dropzone==1.5.3
38 | Flask-FileUpload==0.5.0
39 | Flask-HTTPAuth==3.2.3
40 | Flask-Login==0.4.0
41 | Flask-Mail==0.9.1
42 | Flask-Migrate==2.1.1
43 | Flask-Moment==0.5.2
44 | Flask-Principal==0.4.0
45 | Flask-SQLAlchemy==2.3.2
46 | Flask-Uploads==0.2.1
47 | Flask-WTF==0.14.2
48 | gast==0.2.2
49 | gevent==1.3.5
50 | graphviz==0.10.1
51 | greenlet==0.4.14
52 | grpcio==1.18.0
53 | guess-language-spirit==0.5.3
54 | gunicorn==19.9.0
55 | h5py==2.8.0
56 | html5lib==1.0.1
57 | idna==2.6
58 | imutils==0.5.2
59 | ipykernel==5.1.0
60 | ipython==7.1.1
61 | ipython-genutils==0.2.0
62 | ipywidgets==7.4.2
63 | isort==4.3.4
64 | itsdangerous==0.24
65 | jedi==0.13.1
66 | Jinja2==2.10
67 | jsonschema==2.6.0
68 | jupyter==1.0.0
69 | jupyter-client==5.2.3
70 | jupyter-console==6.0.0
71 | jupyter-core==4.4.0
72 | Keras==2.2.2
73 | Keras-Applications==1.0.4
74 | Keras-Preprocessing==1.0.2
75 | kiwisolver==1.0.1
76 | Lasagne==0.1
77 | lazy-object-proxy==1.3.1
78 | mahotas==1.4.4
79 | Mako==1.0.7
80 | Markdown==2.6.11
81 | MarkupSafe==1.0
82 | matplotlib==3.0.0
83 | mccabe==0.6.1
84 | micawber==0.3.5
85 | mistune==0.8.4
86 | more-itertools==6.0.0
87 | nbconvert==5.4.0
88 | nbformat==4.4.0
89 | networkx==2.2
90 | notebook==5.7.2
91 | numpy==1.14.5
92 | opencv-contrib-python==3.4.4.19
93 | opencv-python==3.4.4.19
94 | pandas==0.24.1
95 | pandocfilters==1.4.2
96 | parso==0.3.1
97 | pdf-reports==0.2.3
98 | peewee==3.7.0
99 | pexpect==4.6.0
100 | pickleshare==0.7.5
101 | Pillow==5.2.0
102 | pluggy==0.8.1
103 | prometheus-client==0.4.2
104 | prompt-toolkit==2.0.7
105 | protobuf==3.6.1
106 | ptyprocess==0.6.0
107 | py==1.7.0
108 | pycparser==2.19
109 | pydot==1.4.1
110 | Pygments==2.2.0
111 | PyJWT==1.5.3
112 | pylint==2.2.2
113 | PyMySQL==0.9.2
114 | pyparsing==2.2.1
115 | Pyphen==0.9.5
116 | pypugjs==5.7.2
117 | pytest==4.2.1
118 | python-dateutil==2.6.1
119 | python-dotenv==0.7.1
120 | python-editor==1.0.3
121 | python-slugify==1.2.6
122 | pytz==2017.2
123 | PyWavelets==1.0.0
124 | PyYAML==3.13
125 | pyzmq==17.1.2
126 | qtconsole==4.4.3
127 | redis==2.10.6
128 | requests==2.18.4
129 | rq==0.9.2
130 | scikit-image==0.14.0
131 | scikit-learn==0.19.2
132 | scikit-neuralnetwork==0.7
133 | scipy==1.1.0
134 | Send2Trash==1.5.0
135 | shortuuid==0.5.0
136 | six==1.11.0
137 | SQLAlchemy==1.1.14
138 | tensorboard==1.12.2
139 | tensorflow==1.5.0
140 | tensorflow-tensorboard==1.5.1
141 | termcolor==1.1.0
142 | terminado==0.8.1
143 | testpath==0.4.2
144 | Theano==1.0.4
145 | tinycss2==0.6.1
146 | toolz==0.9.0
147 | tornado==5.1.1
148 | traitlets==4.3.2
149 | typed-ast==1.1.0
150 | Unidecode==1.0.22
151 | urllib3==1.22
152 | visitor==0.1.3
153 | wcwidth==0.1.7
154 | WeasyPrint==44
155 | webencodings==0.5.1
156 | Werkzeug==0.14.1
157 | widgetsnbextension==3.4.2
158 | wrapt==1.10.11
159 | WTForms==2.1
160 | xlrd==1.2.0
161 |
--------------------------------------------------------------------------------
/face_detection_statistics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/face_detection_statistics.png
--------------------------------------------------------------------------------
/junction.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/junction.jpg
--------------------------------------------------------------------------------
/out_putgeneration/README.md:
--------------------------------------------------------------------------------
1 | # Output generation for the frontend
2 | Currenlty using an dummy dataset
3 |
4 | run as: python outputgenerator.py
5 |
6 | # Output:
7 | * tweet in 3 language based on the stadium video
8 | * heatmap of the stadium
9 | * can be used to generate dataset statistic
10 |
11 | # Dependencies:
12 | pip install:
13 | * tweepy
14 | * googletrans
15 | * matplotlib
16 | * pandas
17 |
18 | # Error:
19 | * duplicate tweet -> clean the twitter post directly or add the time.min in the tweet
20 |
--------------------------------------------------------------------------------
/out_putgeneration/camera.py:
--------------------------------------------------------------------------------
1 | # Camera class
2 |
3 | class camera():
4 |
5 | def __init__(self, cameraID, X, Y, databaseID, name, Ctype):
6 | self.cameraID = cameraID
7 | self.coorX = X
8 | self.coorY = Y
9 | self.databaseID = databaseID
10 | self.name = name
11 | self.Ctype = Ctype
12 |
13 |
14 |
--------------------------------------------------------------------------------
/out_putgeneration/camera.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/out_putgeneration/camera.pyc
--------------------------------------------------------------------------------
/out_putgeneration/customerservices.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 | import pandas as pd
3 | from geo_info_caller import get_geo_info
4 | import camera
5 |
6 | GeoCamera = namedtuple('GeoCamera', 'camera address distance duration')
7 |
8 | class customerservices:
9 | def __init__(self, dur_thr=200):
10 | self.dur_thr = dur_thr * 60
11 | self.serviceType = "return closest camera based on user input and position"
12 | self.database = pd.read_csv('morerealdata.csv',delimiter=";")
13 | self.result = pd.DataFrame()
14 |
15 | def getcamIDfromCtype(self, Camlist, type):
16 | goodTypeCam = []
17 | for cam in Camlist:
18 | if(cam.Ctype == type ):
19 | goodTypeCam.append(cam)
20 | return(goodTypeCam)
21 |
22 | def get_geo_cameras(self, goodTypeCam, geo_info):
23 | geo_cams = []
24 | for i, v_dur in enumerate(geo_info.duration_values):
25 | print(v_dur)
26 | if v_dur <= self.dur_thr:
27 | geo_cam = GeoCamera(goodTypeCam[i], geo_info.destinations[i], geo_info.distance_texts[i], geo_info.duration_texts[i])
28 | geo_cams.append(geo_cam)
29 |
30 | return geo_cams
31 |
32 | def findclosestCam(self, goodTypeCam, userPosition):
33 | dest_coords = [tuple([c.coorX, c.coorY]) for c in goodTypeCam]
34 | geo_info = get_geo_info(userPosition, dest_coords)
35 | geo_cams = self.get_geo_cameras(goodTypeCam, geo_info)
36 | return geo_cams
37 |
38 | def returnAllcorrespondingData(self, geo_cameras):
39 | for geo_cam in geo_cameras:
40 | self.result = self.result.append(self.database.loc[self.database['camera'] == geo_cam.camera.databaseID], ignore_index = True)
41 | return self.result, geo_cameras
42 |
43 | def generateRecommandationList(self, results, geocameras):
44 | # Generate the recommandation list:
45 | recommandation = results
46 | recommandation["name"] = ""
47 | recommandation["distance"] = ""
48 | recommandation["duration"] = ""
49 | recommandation["address"] = ""
50 |
51 | #print(recommandation)
52 |
53 | for c in geocameras:
54 | # Get the good index:
55 | index = recommandation.index[recommandation.camera == c.camera.databaseID]
56 | # Set the values:
57 | recommandation.set_value(index,'distance', c.distance)
58 | recommandation.set_value(index,'duration', c.duration)
59 | recommandation.set_value(index,'name', c.camera.name)
60 | recommandation.set_value(index,'address', c.address)
61 |
62 | recommandation.set_index('camera', inplace = True)
63 | recommandation.sort_values(by='waiting', inplace = True)
64 | #print(recommandation)
65 | return(recommandation)
66 |
67 | def solve_request(self, Camlist, Ctype, userPosition):
68 | goodTypeCam = self.getcamIDfromCtype(Camlist, Ctype)
69 |
70 | geo_cameras = self.findclosestCam(goodTypeCam, userPosition)
71 | results, geocameras = self.returnAllcorrespondingData(geo_cameras)
72 | recommandation = self.generateRecommandationList(results, geocameras)
73 |
74 | return(recommandation)
75 |
76 |
--------------------------------------------------------------------------------
/out_putgeneration/customerservices.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/out_putgeneration/customerservices.pyc
--------------------------------------------------------------------------------
/out_putgeneration/facedata.csv:
--------------------------------------------------------------------------------
1 | camera;time;nbpeople;waiting;age;gender;emotion
2 | C1;10:00:00;10;50;25;guy;happy
3 | C1;12:00:00;40;200;35;guy;happy
4 | C1;02:00:00;40;200;30;mix;happy
5 | C1;04:00:00;20;100;22;guy;happy
6 | C1;06:00:00;0;0;0;mix;neutral
7 | C2;10:00:00;3;15;40;girl;happy
8 | C2;12:00:00;6;30;45;girl;angry
9 | C2;02:00:00;3;15;50;girl;angry
10 | C2;04:00:00;6;30;45;guy;angry
11 | C2;06:00:00;8;40;45;guy;happy
12 | C3;10:00:00;12;60;15;girl;happy
13 | C3;12:00:00;34;170;18;girl;angry
14 | C3;02:00:00;34;170;16;girl;happy
15 | C3;04:00:00;12;60;25;girl;happy
16 | C3;06:00:00;5;25;22;guy;angry
17 | C4;10:00:00;24;120;35;mix;angry
18 | C4;12:00:00;56;280;35;mix;angry
19 | C4;02:00:00;80;400;35;mix;angry
20 | C4;04:00:00;43;215;35;guy;angry
21 | C4;06:00:00;12;60;35;mix;angry
22 |
--------------------------------------------------------------------------------
/out_putgeneration/facedata.ods:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/out_putgeneration/facedata.ods
--------------------------------------------------------------------------------
/out_putgeneration/facedata.xls:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/out_putgeneration/facedata.xls
--------------------------------------------------------------------------------
/out_putgeneration/geo_info_caller.py:
--------------------------------------------------------------------------------
1 | import googlemaps
2 | from datetime import datetime
3 | from collections import namedtuple
4 | import sys
5 |
6 | GeoObj = namedtuple('geo_object', ['destinations', 'distance_texts', 'distance_values',
7 | 'duration_texts', 'duration_values'])
8 |
9 | def get_geo_info(user_coords, dest_coords, mode='walking'):
10 | user_coords = tuple(user_coords)
11 | dest_coords = list(map(tuple, dest_coords))
12 | gmaps = googlemaps.Client(key='AIzaSyDjm1LEi97iX2-DBTu0d2xnAQrO9ElYDE8')
13 | distance_result = gmaps.distance_matrix(origins=user_coords, destinations=dest_coords, mode=mode)
14 |
15 | dests = distance_result['destination_addresses']
16 | dists = [elem['distance']['text'] for elem in distance_result['rows'][0]['elements']]
17 | durs = [elem['duration']['text'] for elem in distance_result['rows'][0]['elements']]
18 |
19 | v_dists = [elem['distance']['value'] for elem in distance_result['rows'][0]['elements']]
20 | v_durs = [elem['duration']['value'] for elem in distance_result['rows'][0]['elements']]
21 |
22 | geo_info = GeoObj(dests, dists, v_dists, durs, v_durs)
23 | return geo_info
24 |
25 |
26 | if __name__ == '__main__':
27 | user_coords = sys.argv[1]
28 | user_coords = tuple(map(float, user_coords.split(',')))
29 | dest_coords = sys.argv[2:]
30 | # dest_coords = dest_coords.split(' ')
31 | dest_coords = [tuple(map(float, dest_coord.split(','))) for dest_coord in dest_coords]
32 | geo_info = get_geo_info(user_coords, dest_coords, mode='walking')
33 |
34 |
--------------------------------------------------------------------------------
/out_putgeneration/geo_info_caller.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/out_putgeneration/geo_info_caller.pyc
--------------------------------------------------------------------------------
/out_putgeneration/morerealdata.csv:
--------------------------------------------------------------------------------
1 | camera;nbpeople;waiting;age;gender;emotion
2 | C1;12;48;23;girl;happy
3 | C2;20;80;32;mixed;angry
4 | C3;3;12;34;boy;angry
5 | C4;6;24;18;girl;happy
6 | C5;2;8;67;girl;happy
7 |
--------------------------------------------------------------------------------
/out_putgeneration/outputgenerator.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import matplotlib.pyplot as plt
3 |
4 | # dedicated object
5 | import tweetGenerator as tw
6 | import camera
7 |
8 | # Create Camera object (obtained from registration)
9 | camera1 = camera.camera(1, 80, 200, "C1", "entrance A", "stadium")
10 | camera2 = camera.camera(2, 500, 200,"C2", "entrance B", "stadium")
11 |
12 | # Load the database (currently dummy data)
13 | dummydatabase = pd.read_csv('facedata.csv',delimiter=";")
14 | print(dummydatabase.head())
15 |
16 | # Get the number of people and waiting time at each camera (time can be set in the query)
17 | TIME = '10:00:00'
18 |
19 | # Camera1
20 | query = dummydatabase.loc[dummydatabase['camera'] == 'C1'].loc[dummydatabase['time']== TIME]
21 | waitingtimeCamera1 = query.waiting
22 | waitingtimeCamera1 = waitingtimeCamera1.reset_index(drop=True)[0]
23 | nbpeopleC1 = query.nbpeople
24 | nbpeopleC1 = nbpeopleC1.reset_index(drop=True)[0]
25 | print(waitingtimeCamera1)
26 | print(nbpeopleC1)
27 |
28 | # Camera 2
29 | query = dummydatabase.loc[dummydatabase['camera'] == 'C2'].loc[dummydatabase['time']== TIME]
30 | waitingtimeCamera2 = query.waiting
31 | waitingtimeCamera2 = waitingtimeCamera2.reset_index(drop=True)[0]
32 | nbpeopleC2 = query.nbpeople
33 | nbpeopleC2 = nbpeopleC2.reset_index(drop=True)[0]
34 | print(waitingtimeCamera2)
35 | print(nbpeopleC2)
36 |
37 | ### Generate the heatmap of the stadium (all the numerical value are here to make the display looks better)
38 | heatmapC1 = plt.Circle((camera1.coorX, camera1.coorY), nbpeopleC1*8, color='r')
39 | heatmapC2 = plt.Circle((camera2.coorX, camera2.coorY), nbpeopleC2*8, color='g')
40 |
41 | img = plt.imread("staduim1.PNG")
42 | fig, ax = plt.subplots()
43 | ax.add_artist(heatmapC1)
44 | ax.add_artist(heatmapC2)
45 | plt.axis('off')
46 |
47 | plt.text(camera1.coorX - 20, camera1.coorY, str(waitingtimeCamera1) + "min", fontsize=12)
48 | plt.text(camera2.coorX - 20, camera2.coorY, str(waitingtimeCamera2) + "min", fontsize=12)
49 | ax.imshow(img)
50 |
51 | plt.savefig('savedheatmap/heatmap.png')
52 |
53 | ### Tweet generation
54 | tweetAPI = tw.TweetGenerator()
55 | englishText = tweetAPI.ruleBaseGenerator(camera1, nbpeopleC1, camera2, nbpeopleC2)
56 | tweetAPI.multilanguageTweet(englishText)
57 |
58 | ### Fancy Bussniss statictics: for demography / live advertissement tunning based on gender / age
59 | dummydatabase.groupby("camera").plot()
60 |
61 |
--------------------------------------------------------------------------------
/out_putgeneration/savedheatmap/heatmap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/out_putgeneration/savedheatmap/heatmap.png
--------------------------------------------------------------------------------
/out_putgeneration/staduim1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/out_putgeneration/staduim1.png
--------------------------------------------------------------------------------
/out_putgeneration/test_customerService.py:
--------------------------------------------------------------------------------
1 | # try the servicecustomer class
2 | import camera
3 | import customerservices as cs
4 |
5 | # Generate a list a Camera
6 | # Create Camera object (obtained from registration)
7 | camera1 = camera.camera(1, 35.679221, 139.776093, "C1", "Restaurant A", "sushi")
8 | camera2 = camera.camera(2, 35.670262, 139.775106,"C2", "Restaurant B", "sushi")
9 | camera3 = camera.camera(3, 35.670259, 139.7751036,"C3", "Restaurant C", "sushi")
10 | camera4 = camera.camera(4, 35.670249, 139.7751056,"C4", "Entrance A", "nightclub")
11 |
12 | camlist =[camera1, camera2, camera3, camera4]
13 |
14 | service = cs.customerservices()
15 |
16 | Ctype = 'sushi'
17 | UserPosition = [35.654697, 139.781929]
18 | result = service.solve_request(camlist, Ctype, UserPosition)
19 | print(result)
--------------------------------------------------------------------------------
/out_putgeneration/tweetGenerator.py:
--------------------------------------------------------------------------------
1 | # Currently using tweepy but will try to use the tweeter API from Rakuten
2 | # Need to try the translation API
3 |
4 | import tweepy
5 | from googletrans import Translator
6 | import datetime
7 |
8 | #import unirest not supported on python3
9 |
10 | class TweetGenerator:
11 |
12 | def __init__(self):
13 | # Setting the key for identification
14 | self.CONSUMER_KEY ="hEejKZrYXMbN2lsQPmHYnCpvY"
15 | self.CONSUMER_SECRET = "k9a8nFVaDbmUJyDZBAwwdmc1miqh8sDWjJu1AohJw03oiUnPn2"
16 | self.ACCESS_KEY = "1096442165448720385-TYxgNnoYL3z5GmKavNjFgcqvXw4ViA"
17 | self.ACCESS_SECRET = "1jNRID3iJhdDqL0Yq2hAlhJCMlA7AubodvZ0997gY5Wfy"
18 |
19 | # Set the access:
20 | self.auth = tweepy.OAuthHandler(self.CONSUMER_KEY, self.CONSUMER_SECRET)
21 | self.auth.set_access_token(self.ACCESS_KEY, self.ACCESS_SECRET)
22 |
23 | # Set up the api:
24 | self.api = tweepy.API(self.auth)
25 |
26 | # Set up the translator:
27 | self.translator = Translator()
28 |
29 | def send_tweet(self, tweetText):
30 | # Send the status
31 | self.api.update_status(tweetText)
32 |
33 | def trnsl_JP(self, englishText):
34 | # Somehow call working API to translate
35 | jpText = self.translator.translate(englishText, dest='ja').text
36 | jpText = "[JAPANESE] " + jpText
37 | return jpText
38 |
39 | def trnsl_KO(self, englishText):
40 | # Somehow call working API to translate
41 | koText = self.translator.translate(englishText, dest='ko').text
42 | koText = "[KOREAN] " + koText
43 | return koText
44 |
45 | def multilanguageTweet(self, englishText):
46 | # Translate and send the different tweet
47 | if(englishText ==''):
48 | return(0)
49 |
50 | koText = self.trnsl_KO(englishText)
51 | self.send_tweet(koText)
52 | print("Korean tweet send")
53 |
54 | jpText = self.trnsl_JP(englishText)
55 | self.send_tweet(jpText)
56 | print("Japanese tweet send")
57 |
58 | self.send_tweet("[ENGLISH] " + englishText)
59 | print("Englsih tweet send")
60 |
61 | def ruleBaseGenerator(self, camera1, nbpeopleC1, camera2, nbpeopleC2):
62 | # Generate the message to tweet based on the camera
63 | currentDT = datetime.datetime.now()
64 | englishText =""
65 | if(nbpeopleC1 > nbpeopleC2):
66 | englishText = str(currentDT.day) +'/'+ str(currentDT.hour) +" | Please go to " + camera2.name
67 |
68 | if(nbpeopleC2 > nbpeopleC1):
69 | englishText = str(currentDT.day) +'/'+ str(currentDT.hour) +" | Pleae go to " + camera1.name
70 |
71 | return englishText
72 |
73 |
74 |
--------------------------------------------------------------------------------
/web_and_mobile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Machine-Learning-Tokyo/MLTx2020/0b9a48ad46ec5310780c410f01cb11beaf8ee36b/web_and_mobile.png
--------------------------------------------------------------------------------