├── Age Prediction Usinge Image
├── Age_Prediction_Using_Face_Image.ipynb
└── FaceAgePrediction.pth
├── CelebA_DCGAN.ipynb
├── DCGAN
├── CelebA_DCGAN.ipynb
├── CelebA_DCGAN_.ipynb
└── MNIST_DCGAN.ipynb
├── Expert Mode
├── Face_Recognition_Expert_Mode.ipynb
└── Mnist_Expert_Mode.ipynb
├── FAMnist_PyTorch
├── FAMnist.jpg
├── FAMnist.pth
├── FAMnist_PyTorch.ipynb
├── Inference.py
├── Model.py
├── Test.py
├── Train.py
└── requirements.txt
├── Face Mask Detection
└── Face_Mask_Dataset.ipynb
├── Gan.ipynb
├── Gender Detection
└── GenderClassification.ipynb
├── House Price
├── CNN_HousePrices.h5
├── Inference.py
├── TestData
│ ├── bathroom.jpg
│ ├── bedroom.jpg
│ ├── frontal.jpg
│ ├── kitchen.jpg
│ └── s
├── cnn_regression.py
└── pyimagesearch
│ ├── datasets.py
│ └── models.py
├── Kaggle 17 Flowers
└── Kaggle_17_Flowers.ipynb
├── MLP vs MLP + Deep
├── Deep_FAMNIST.ipynb
├── Deep_MNIST.ipynb
├── Deep_cifar10.ipynb
├── Deep_cifar100.ipynb
├── MLP_FAMNIST.ipynb
├── MLP_cifar10.ipynb
├── MLP_cifar100.ipynb
└── MNIST_MLP.ipynb
├── MLP weather szeged hungary
└── MLP_weather_szeged_hungary.ipynb
├── PersianRecognition
├── PersianRecogBot.py
└── Persian_Recognition.ipynb
├── Pix2Pix
├── Inference.py
├── Input
│ ├── 0.jpg
│ ├── 1.jpg
│ ├── 2.jpg
│ ├── 3.jpg
│ ├── 4.jpg
│ └── s
├── inference.ipynb
└── pix2pix.ipynb
├── PyTorch Age Prediction Using Face Image TL
├── Inference.py
├── Model.py
├── PyTorch_Age_Prediction_Using_Face_Image_Transfer_Learning.ipynb
├── Test.py
├── Train.py
├── pytorch_age_prediction_using_face_image_transfer_learning.py
├── requirements.txt
└── sajjad.jpg
├── PyTorch Age Prediction Using Face Image
├── FaceAgePrediction.pth
├── Inference.py
├── Model.py
├── PyTorch_Age_Prediction_Using_Face_Image.ipynb
├── Test.py
├── Train.py
└── requirements.txt
├── PyTorch Human Parsing
└── Pose.ipynb
├── PyTorch Mnist Persian
├── 3.JPG
├── Inference.py
├── Mnist_Persian.ipynb
├── Model.py
├── PersianMnistFinal.pth
├── Test.py
├── Train.py
└── requirements.txt
├── PyTorch Persian Mnist TL
├── 3.JPG
├── Inference.py
├── Mnist_PersianTL.ipynb
├── Model.py
├── Persian_Mnist_Sweep.ipynb
├── Test.py
├── Train.py
└── requirements.txt
├── README.md
├── Recurrent Neural Network
├── 58_3.ipynb
├── GRU_From_Scratch_F.ipynb
├── GRU_RNN_Comparison_Classification.ipynb
├── Joon_Del.ipynb
├── Joon_Del_Update.ipynb
├── LSTM_From_Scratch_F.ipynb
├── PyTorch_RNN_Prediction.ipynb
├── SimpleRNN.ipynb
├── TensorFlow_RNN_GRU_Prediction_Comparison.ipynb
├── TensorFlow_RNN_Prediction.ipynb
└── ball_movement.ipynb
├── Text_Classification_Using_Word_Embedding.ipynb
├── YoloV5 Traffic Sign TensorRT
└── YoloV5_Traffic_Sign_TensorRT_Compration.ipynb
└── YoloV5 Traffic Sign
└── YoloV5.ipynb
/Age Prediction Usinge Image/FaceAgePrediction.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/Age Prediction Usinge Image/FaceAgePrediction.pth
--------------------------------------------------------------------------------
/Expert Mode/Mnist_Expert_Mode.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Mnist_Expert_Mode.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyOxiunXar5Y1P3e2PMoCTLP",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | }
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "metadata": {
33 | "id": "VWQD1JlOHnMr"
34 | },
35 | "source": [
36 | "import tensorflow as tf\n",
37 | "from tensorflow.keras.layers import Dense, Flatten, Conv2D"
38 | ],
39 | "execution_count": 2,
40 | "outputs": []
41 | },
42 | {
43 | "cell_type": "code",
44 | "metadata": {
45 | "colab": {
46 | "base_uri": "https://localhost:8080/"
47 | },
48 | "id": "8kkN49D6HpbU",
49 | "outputId": "d1f1babb-4775-45b5-9652-fe61d936262e"
50 | },
51 | "source": [
52 | "print(tf.__version__)"
53 | ],
54 | "execution_count": 5,
55 | "outputs": [
56 | {
57 | "output_type": "stream",
58 | "name": "stdout",
59 | "text": [
60 | "2.6.0\n"
61 | ]
62 | }
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {
68 | "id": "PD_OHkafH_gc"
69 | },
70 | "source": [
71 | "# Preparing Dataset\n",
72 | " "
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "metadata": {
78 | "colab": {
79 | "base_uri": "https://localhost:8080/"
80 | },
81 | "id": "iGti9kAEH3Eb",
82 | "outputId": "ca9f4753-d7f4-46c0-a50e-05589e1160d2"
83 | },
84 | "source": [
85 | "mnist = tf.keras.datasets.mnist\n",
86 | "\n",
87 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n",
88 | "x_train = x_train/255.0\n",
89 | "x_test = x_test/255.0"
90 | ],
91 | "execution_count": 7,
92 | "outputs": [
93 | {
94 | "output_type": "stream",
95 | "name": "stdout",
96 | "text": [
97 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
98 | "11493376/11490434 [==============================] - 0s 0us/step\n",
99 | "11501568/11490434 [==============================] - 0s 0us/step\n"
100 | ]
101 | }
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "metadata": {
107 | "colab": {
108 | "base_uri": "https://localhost:8080/"
109 | },
110 | "id": "PEcH4m4NIUXj",
111 | "outputId": "08624520-0e3a-4554-a5de-2dd361f3aa1f"
112 | },
113 | "source": [
114 | " x_train.shape"
115 | ],
116 | "execution_count": 9,
117 | "outputs": [
118 | {
119 | "output_type": "execute_result",
120 | "data": {
121 | "text/plain": [
122 | "(60000, 28, 28)"
123 | ]
124 | },
125 | "metadata": {},
126 | "execution_count": 9
127 | }
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "metadata": {
133 | "id": "s1NNbf3sIZPb"
134 | },
135 | "source": [
136 | "x_train = x_train[...,tf.newaxis].astype('float32')\n",
137 | "x_test = x_test[...,tf.newaxis].astype('float32')"
138 | ],
139 | "execution_count": 10,
140 | "outputs": []
141 | },
142 | {
143 | "cell_type": "code",
144 | "metadata": {
145 | "colab": {
146 | "base_uri": "https://localhost:8080/"
147 | },
148 | "id": "kNEsJagsIo_j",
149 | "outputId": "7717be1c-88df-4c73-b7f0-b1df8bf254f4"
150 | },
151 | "source": [
152 | "x_train.shape"
153 | ],
154 | "execution_count": 11,
155 | "outputs": [
156 | {
157 | "output_type": "execute_result",
158 | "data": {
159 | "text/plain": [
160 | "(60000, 28, 28, 1)"
161 | ]
162 | },
163 | "metadata": {},
164 | "execution_count": 11
165 | }
166 | ]
167 | },
168 | {
169 | "cell_type": "code",
170 | "metadata": {
171 | "id": "pwD2oiY9Iqyy"
172 | },
173 | "source": [
174 | "train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)\n",
175 | "test_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)"
176 | ],
177 | "execution_count": 12,
178 | "outputs": []
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {
183 | "id": "tKhJheizJATJ"
184 | },
185 | "source": [
186 | "# Define Model"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "metadata": {
192 | "id": "e1eTIT52I-3D"
193 | },
194 | "source": [
195 | "class MyModel(tf.keras.Model):\n",
196 | "\n",
197 | " def __init__(self,number_of_classes):\n",
198 | " super().__init__()\n",
199 | " self.conv1=Conv2D(32,(3,3),activation='relu')\n",
200 | " self.flatten=Flatten()\n",
201 | " self.fc1=Dense(128,activation='relu')\n",
202 | " self.fc2=Dense(number_of_classes, activation='softmax')\n",
203 | "\n",
204 | " def call(self,x):\n",
205 | "\n",
206 | " y = self.conv1(x)\n",
207 | "\n",
208 | " w = self.flatten(y)\n",
209 | "\n",
210 | " z = self.fc1(w)\n",
211 | "\n",
212 | " output = self.fc2(z)\n",
213 | " \n",
214 | " return output\n",
215 | " \n",
216 | "model = MyModel(10)"
217 | ],
218 | "execution_count": 15,
219 | "outputs": []
220 | },
221 | {
222 | "cell_type": "code",
223 | "metadata": {
224 | "id": "rn-7_RfRKWzM"
225 | },
226 | "source": [
227 | " loss_function = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
228 | "\n",
229 | " optimizer = tf.keras.optimizers.Adam()"
230 | ],
231 | "execution_count": 16,
232 | "outputs": []
233 | },
234 | {
235 | "cell_type": "code",
236 | "metadata": {
237 | "id": "44I3DgfrKpnT"
238 | },
239 | "source": [
240 | "train_loss = tf.keras.metrics.Mean(name = 'train_loss')\n",
241 | "train_acc = tf.keras.metrics.SparseCategoricalAccuracy(name = 'train_loss')\n",
242 | "\n",
243 | "test_loss = tf.keras.metrics.Mean(name = 'test_loss')\n",
244 | "test_acc = tf.keras.metrics.SparseCategoricalAccuracy(name = 'test_loss')"
245 | ],
246 | "execution_count": 18,
247 | "outputs": []
248 | },
249 | {
250 | "cell_type": "code",
251 | "metadata": {
252 | "id": "TyX23lRTK9bD"
253 | },
254 | "source": [
255 | "def train_step(images, y):\n",
256 | " with tf.GradientTape() as tape:\n",
257 | " y_pred = model(images, training=True)\n",
258 | " loss = loss_function(y, y_pred)\n",
259 | "\n",
260 | " gradients = tape.gradient(loss, model.trainable_variables)\n",
261 | "\n",
262 | " optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
263 | "\n",
264 | " train_loss(loss)\n",
265 | " train_acc(y,y_pred)"
266 | ],
267 | "execution_count": 30,
268 | "outputs": []
269 | },
270 | {
271 | "cell_type": "code",
272 | "metadata": {
273 | "id": "hbOvMm3RLrEc"
274 | },
275 | "source": [
276 | "def test_step(images, y):\n",
277 | " y_pred = model(images, training=False)\n",
278 | " loss = loss_function(y, y_pred)\n",
279 | "\n",
280 | " test_loss(loss)\n",
281 | " test_acc(y, y_pred)"
282 | ],
283 | "execution_count": 31,
284 | "outputs": []
285 | },
286 | {
287 | "cell_type": "code",
288 | "metadata": {
289 | "id": "UJ6ZGVWsMGKT"
290 | },
291 | "source": [
292 | "from tqdm import tqdm"
293 | ],
294 | "execution_count": 32,
295 | "outputs": []
296 | },
297 | {
298 | "cell_type": "code",
299 | "metadata": {
300 | "id": "jWPT1BZDMK8K"
301 | },
302 | "source": [
303 | "def train():\n",
304 | " epochs = 5\n",
305 | "\n",
306 | " for epoch in range(epochs):\n",
307 | " train_loss.reset_states()\n",
308 | " train_acc.reset_states()\n",
309 | " test_loss.reset_states()\n",
310 | " test_acc.reset_states()\n",
311 | "\n",
312 | " for images, labels in tqdm(train_data):\n",
313 | " train_step(images, labels)\n",
314 | "\n",
315 | " for images, labels in tqdm(test_data):\n",
316 | " test_step(images, labels)\n",
317 | "\n",
318 | " print('epoch:', epoch + 1)\n",
319 | " print('loss:', train_loss.result())\n",
320 | " print('accuracy:', train_acc.result())\n",
321 | " print('val loss:', test_loss.result())\n",
322 | " print('val accuracy:', test_acc.result())"
323 | ],
324 | "execution_count": 33,
325 | "outputs": []
326 | },
327 | {
328 | "cell_type": "markdown",
329 | "metadata": {
330 | "id": "RvyzKI7sNUZC"
331 | },
332 | "source": [
333 | "# Fit"
334 | ]
335 | },
336 | {
337 | "cell_type": "code",
338 | "metadata": {
339 | "colab": {
340 | "base_uri": "https://localhost:8080/"
341 | },
342 | "id": "xpvJPZisNTmo",
343 | "outputId": "725b0544-1fd7-415d-bdd6-5bc5667a2123"
344 | },
345 | "source": [
346 | "train()"
347 | ],
348 | "execution_count": 34,
349 | "outputs": [
350 | {
351 | "output_type": "stream",
352 | "name": "stderr",
353 | "text": [
354 | " 0%| | 0/1875 [00:00, ?it/s]/usr/local/lib/python3.7/dist-packages/keras/backend.py:4907: UserWarning: \"`sparse_categorical_crossentropy` received `from_logits=True`, but the `output` argument was produced by a sigmoid or softmax activation and thus does not represent logits. Was this intended?\"\n",
355 | " '\"`sparse_categorical_crossentropy` received `from_logits=True`, but '\n",
356 | "100%|██████████| 1875/1875 [01:22<00:00, 22.84it/s]\n",
357 | "100%|██████████| 1875/1875 [00:41<00:00, 45.59it/s]\n"
358 | ]
359 | },
360 | {
361 | "output_type": "stream",
362 | "name": "stdout",
363 | "text": [
364 | "epoch: 1\n",
365 | "loss: tf.Tensor(0.13839418, shape=(), dtype=float32)\n",
366 | "accuracy: tf.Tensor(0.9579833, shape=(), dtype=float32)\n",
367 | "val loss: tf.Tensor(0.051397692, shape=(), dtype=float32)\n",
368 | "val accuracy: tf.Tensor(0.9848667, shape=(), dtype=float32)\n"
369 | ]
370 | },
371 | {
372 | "output_type": "stream",
373 | "name": "stderr",
374 | "text": [
375 | "100%|██████████| 1875/1875 [01:20<00:00, 23.28it/s]\n",
376 | "100%|██████████| 1875/1875 [00:23<00:00, 80.32it/s]\n"
377 | ]
378 | },
379 | {
380 | "output_type": "stream",
381 | "name": "stdout",
382 | "text": [
383 | "epoch: 2\n",
384 | "loss: tf.Tensor(0.044776816, shape=(), dtype=float32)\n",
385 | "accuracy: tf.Tensor(0.98628336, shape=(), dtype=float32)\n",
386 | "val loss: tf.Tensor(0.03450815, shape=(), dtype=float32)\n",
387 | "val accuracy: tf.Tensor(0.98915, shape=(), dtype=float32)\n"
388 | ]
389 | },
390 | {
391 | "output_type": "stream",
392 | "name": "stderr",
393 | "text": [
394 | "100%|██████████| 1875/1875 [01:20<00:00, 23.33it/s]\n",
395 | "100%|██████████| 1875/1875 [00:41<00:00, 45.67it/s]\n"
396 | ]
397 | },
398 | {
399 | "output_type": "stream",
400 | "name": "stdout",
401 | "text": [
402 | "epoch: 3\n",
403 | "loss: tf.Tensor(0.024143366, shape=(), dtype=float32)\n",
404 | "accuracy: tf.Tensor(0.99226665, shape=(), dtype=float32)\n",
405 | "val loss: tf.Tensor(0.013415097, shape=(), dtype=float32)\n",
406 | "val accuracy: tf.Tensor(0.99558336, shape=(), dtype=float32)\n"
407 | ]
408 | },
409 | {
410 | "output_type": "stream",
411 | "name": "stderr",
412 | "text": [
413 | "100%|██████████| 1875/1875 [01:19<00:00, 23.48it/s]\n",
414 | "100%|██████████| 1875/1875 [00:23<00:00, 80.35it/s]\n"
415 | ]
416 | },
417 | {
418 | "output_type": "stream",
419 | "name": "stdout",
420 | "text": [
421 | "epoch: 4\n",
422 | "loss: tf.Tensor(0.014977431, shape=(), dtype=float32)\n",
423 | "accuracy: tf.Tensor(0.99506664, shape=(), dtype=float32)\n",
424 | "val loss: tf.Tensor(0.00744355, shape=(), dtype=float32)\n",
425 | "val accuracy: tf.Tensor(0.9975833, shape=(), dtype=float32)\n"
426 | ]
427 | },
428 | {
429 | "output_type": "stream",
430 | "name": "stderr",
431 | "text": [
432 | "100%|██████████| 1875/1875 [01:20<00:00, 23.24it/s]\n",
433 | "100%|██████████| 1875/1875 [00:23<00:00, 81.26it/s]"
434 | ]
435 | },
436 | {
437 | "output_type": "stream",
438 | "name": "stdout",
439 | "text": [
440 | "epoch: 5\n",
441 | "loss: tf.Tensor(0.010906981, shape=(), dtype=float32)\n",
442 | "accuracy: tf.Tensor(0.99635, shape=(), dtype=float32)\n",
443 | "val loss: tf.Tensor(0.0036947704, shape=(), dtype=float32)\n",
444 | "val accuracy: tf.Tensor(0.99876666, shape=(), dtype=float32)\n"
445 | ]
446 | },
447 | {
448 | "output_type": "stream",
449 | "name": "stderr",
450 | "text": [
451 | "\n"
452 | ]
453 | }
454 | ]
455 | },
456 | {
457 | "cell_type": "code",
458 | "metadata": {
459 | "id": "f0JwyIIcNPxd"
460 | },
461 | "source": [
462 | ""
463 | ],
464 | "execution_count": null,
465 | "outputs": []
466 | }
467 | ]
468 | }
--------------------------------------------------------------------------------
/FAMnist_PyTorch/FAMnist.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/FAMnist_PyTorch/FAMnist.jpg
--------------------------------------------------------------------------------
/FAMnist_PyTorch/FAMnist.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/FAMnist_PyTorch/FAMnist.pth
--------------------------------------------------------------------------------
/FAMnist_PyTorch/FAMnist_PyTorch.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "FAMnist_PyTorch.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": [],
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | },
18 | "accelerator": "GPU"
19 | },
20 | "cells": [
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "view-in-github",
25 | "colab_type": "text"
26 | },
27 | "source": [
28 | "
"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "source": [
34 | "!pip install wandb\n",
35 | "import wandb"
36 | ],
37 | "metadata": {
38 | "id": "9J7wBG6iek4r"
39 | },
40 | "execution_count": null,
41 | "outputs": []
42 | },
43 | {
44 | "cell_type": "code",
45 | "source": [
46 | "wandb.init(project=\"FAMNist\", entity=\"ma_heravi\")"
47 | ],
48 | "metadata": {
49 | "colab": {
50 | "base_uri": "https://localhost:8080/",
51 | "height": 125
52 | },
53 | "id": "rrxwunsaey6p",
54 | "outputId": "1a645003-ded6-4342-a8c7-747a9b7e3711"
55 | },
56 | "execution_count": 99,
57 | "outputs": [
58 | {
59 | "output_type": "display_data",
60 | "data": {
61 | "application/javascript": [
62 | "\n",
63 | " window._wandbApiKey = new Promise((resolve, reject) => {\n",
64 | " function loadScript(url) {\n",
65 | " return new Promise(function(resolve, reject) {\n",
66 | " let newScript = document.createElement(\"script\");\n",
67 | " newScript.onerror = reject;\n",
68 | " newScript.onload = resolve;\n",
69 | " document.body.appendChild(newScript);\n",
70 | " newScript.src = url;\n",
71 | " });\n",
72 | " }\n",
73 | " loadScript(\"https://cdn.jsdelivr.net/npm/postmate/build/postmate.min.js\").then(() => {\n",
74 | " const iframe = document.createElement('iframe')\n",
75 | " iframe.style.cssText = \"width:0;height:0;border:none\"\n",
76 | " document.body.appendChild(iframe)\n",
77 | " const handshake = new Postmate({\n",
78 | " container: iframe,\n",
79 | " url: 'https://wandb.ai/authorize'\n",
80 | " });\n",
81 | " const timeout = setTimeout(() => reject(\"Couldn't auto authenticate\"), 5000)\n",
82 | " handshake.then(function(child) {\n",
83 | " child.on('authorize', data => {\n",
84 | " clearTimeout(timeout)\n",
85 | " resolve(data)\n",
86 | " });\n",
87 | " });\n",
88 | " })\n",
89 | " });\n",
90 | " "
91 | ],
92 | "text/plain": [
93 | ""
94 | ]
95 | },
96 | "metadata": {}
97 | },
98 | {
99 | "output_type": "stream",
100 | "name": "stderr",
101 | "text": [
102 | "\u001b[34m\u001b[1mwandb\u001b[0m: You can find your API key in your browser here: https://wandb.ai/authorize\n"
103 | ]
104 | },
105 | {
106 | "name": "stdout",
107 | "output_type": "stream",
108 | "text": [
109 | "wandb: Paste an API key from your profile and hit enter, or press ctrl+c to quit: ··········\n"
110 | ]
111 | },
112 | {
113 | "output_type": "stream",
114 | "name": "stderr",
115 | "text": [
116 | "\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /root/.netrc\n"
117 | ]
118 | },
119 | {
120 | "output_type": "display_data",
121 | "data": {
122 | "text/html": [
123 | "\n",
124 | " Syncing run fiery-frost-1 to Weights & Biases (docs).
\n",
125 | "\n",
126 | " "
127 | ],
128 | "text/plain": [
129 | ""
130 | ]
131 | },
132 | "metadata": {}
133 | },
134 | {
135 | "output_type": "execute_result",
136 | "data": {
137 | "text/plain": [
138 | ""
139 | ],
140 | "text/html": [
141 | ""
142 | ]
143 | },
144 | "metadata": {},
145 | "execution_count": 99
146 | }
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "metadata": {
152 | "id": "bAltkQau1eOm"
153 | },
154 | "source": [
155 | "import torch\n",
156 | "from torch import nn\n",
157 | "from torch.utils.data import DataLoader\n",
158 | "from torchvision.datasets import FashionMNIST"
159 | ],
160 | "execution_count": 26,
161 | "outputs": []
162 | },
163 | {
164 | "cell_type": "code",
165 | "source": [
166 | "# hyperparameters\n",
167 | "latent_size = 10\n",
168 | "disc_inp_sz = 28*28\n",
169 | "img_size = 28\n",
170 | "epochs = 10\n",
171 | "batch_size = 32\n",
172 | "lr = 0.001\n",
173 | "wandb.config = {\n",
174 | " \"learning_rate\": lr,\n",
175 | " \"epochs\": epochs,\n",
176 | " \"batch_size\": 32\n",
177 | "}"
178 | ],
179 | "metadata": {
180 | "id": "7fUp_60YTSUx"
181 | },
182 | "execution_count": 100,
183 | "outputs": []
184 | },
185 | {
186 | "cell_type": "code",
187 | "metadata": {
188 | "id": "WH1WREd42OM7"
189 | },
190 | "source": [
191 | "class MyModel(nn.Module):\n",
192 | " def __init__(self, input_dims, output_dims):\n",
193 | " super(MyModel, self).__init__()\n",
194 | " self.fc = nn.Sequential(\n",
195 | " nn.Linear(input_dims, 256),\n",
196 | " nn.ReLU(),\n",
197 | " nn.BatchNorm1d(256),\n",
198 | " nn.Dropout(0.25),\n",
199 | " \n",
200 | " nn.Linear(256, 128),\n",
201 | " nn.ReLU(),\n",
202 | " nn.BatchNorm1d(128),\n",
203 | " nn.Dropout(0.5),\n",
204 | "\n",
205 | " nn.Linear(128, 64),\n",
206 | " nn.ReLU(),\n",
207 | " nn.BatchNorm1d(64),\n",
208 | " nn.Dropout(0.5),\n",
209 | " \n",
210 | " nn.Linear(64, output_dims),\n",
211 | " nn.Sigmoid(),\n",
212 | " )\n",
213 | " \n",
214 | " def forward(self, input_t):\n",
215 | " input_t = input_t.reshape((input_t.shape[0], 784))\n",
216 | " return self.fc(input_t)"
217 | ],
218 | "execution_count": 101,
219 | "outputs": []
220 | },
221 | {
222 | "cell_type": "code",
223 | "metadata": {
224 | "id": "ISS2sL_Y7U7u",
225 | "colab": {
226 | "base_uri": "https://localhost:8080/"
227 | },
228 | "outputId": "77590ca8-46d8-47c3-e0f4-9c9b200f5c3c"
229 | },
230 | "source": [
231 | " device = torch.device(\"cuda\")\n",
232 | "# device = torch.device(\"cpu\")\n",
233 | "\n",
234 | "model = MyModel(disc_inp_sz, latent_size).to(device)\n",
235 | "\n",
236 | "model = model.to(device)\n",
237 | "model.train(True)"
238 | ],
239 | "execution_count": 89,
240 | "outputs": [
241 | {
242 | "output_type": "execute_result",
243 | "data": {
244 | "text/plain": [
245 | "MyModel(\n",
246 | " (fc): Sequential(\n",
247 | " (0): Linear(in_features=784, out_features=256, bias=True)\n",
248 | " (1): ReLU()\n",
249 | " (2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
250 | " (3): Dropout(p=0.25, inplace=False)\n",
251 | " (4): Linear(in_features=256, out_features=128, bias=True)\n",
252 | " (5): ReLU()\n",
253 | " (6): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
254 | " (7): Dropout(p=0.5, inplace=False)\n",
255 | " (8): Linear(in_features=128, out_features=64, bias=True)\n",
256 | " (9): ReLU()\n",
257 | " (10): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
258 | " (11): Dropout(p=0.5, inplace=False)\n",
259 | " (12): Linear(in_features=64, out_features=10, bias=True)\n",
260 | " (13): Sigmoid()\n",
261 | " )\n",
262 | ")"
263 | ]
264 | },
265 | "metadata": {},
266 | "execution_count": 89
267 | }
268 | ]
269 | },
270 | {
271 | "cell_type": "code",
272 | "metadata": {
273 | "id": "FvjhjgX-JWGH"
274 | },
275 | "source": [
276 | "def calc_acc(preds, labels):\n",
277 | " _, preds_max = torch.max(preds, 1)\n",
278 | " acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)\n",
279 | " return acc"
280 | ],
281 | "execution_count": 90,
282 | "outputs": []
283 | },
284 | {
285 | "cell_type": "code",
286 | "metadata": {
287 | "id": "fRwKLl1H8lNu"
288 | },
289 | "source": [
290 | "# Data Preparing\n",
291 | "\n",
292 | "transform = torchvision.transforms.Compose([\n",
293 | " torchvision.transforms.ToTensor(),\n",
294 | " torchvision.transforms.Normalize((0), (1))\n",
295 | "])\n",
296 | "\n",
297 | "dataset = torchvision.datasets.FashionMNIST(\"./dataset\", train=True, download=True, transform=transform)\n",
298 | "train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)"
299 | ],
300 | "execution_count": 91,
301 | "outputs": []
302 | },
303 | {
304 | "cell_type": "code",
305 | "metadata": {
306 | "id": "_FXbdTDBC1AZ"
307 | },
308 | "source": [
309 | " # compile\n",
310 | "optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n",
311 | "loss_function = torch.nn.CrossEntropyLoss()"
312 | ],
313 | "execution_count": 92,
314 | "outputs": []
315 | },
316 | {
317 | "cell_type": "code",
318 | "metadata": {
319 | "colab": {
320 | "base_uri": "https://localhost:8080/"
321 | },
322 | "id": "08p9Lp_kD4tW",
323 | "outputId": "3f651c24-cefb-4b22-8055-ad1713a2a975"
324 | },
325 | "source": [
326 | "# train\n",
327 | "\n",
328 | "for epoch in range(1, epochs+1):\n",
329 | " train_loss = 0.0\n",
330 | " train_acc = 0.0\n",
331 | " for images, labels in train_data_loader:\n",
332 | " images = images.to(device)\n",
333 | " labels = labels.to(device)\n",
334 | " optimizer.zero_grad()\n",
335 | " # 1- forwarding\n",
336 | " preds = model(images)\n",
337 | " # 2- backwarding \n",
338 | " loss = loss_function(preds, labels)\n",
339 | " loss.backward()\n",
340 | " # 3- Update\n",
341 | " optimizer.step()\n",
342 | "\n",
343 | " train_loss += loss\n",
344 | " train_acc += calc_acc(preds, labels)\n",
345 | " \n",
346 | " total_loss = train_loss / len(train_data_loader)\n",
347 | " total_acc = train_acc / len(train_data_loader)\n",
348 | "\n",
349 | " print(f\"Epoch: {epoch}, Loss: {total_loss}, Acc: {total_acc}\")\n",
350 | " wandb.log({'epochs': epoch + 1,\n",
351 | " 'loss': total_loss,\n",
352 | " 'acc': total_acc\n",
353 | " })"
354 | ],
355 | "execution_count": 103,
356 | "outputs": [
357 | {
358 | "output_type": "stream",
359 | "name": "stdout",
360 | "text": [
361 | "Epoch: 1, Loss: 1.5897778272628784, Acc: 0.8096666666666668\n",
362 | "Epoch: 2, Loss: 1.5875996351242065, Acc: 0.8134666666666667\n",
363 | "Epoch: 3, Loss: 1.5891987085342407, Acc: 0.8096166666666667\n",
364 | "Epoch: 4, Loss: 1.587140440940857, Acc: 0.8137500000000001\n",
365 | "Epoch: 5, Loss: 1.5859978199005127, Acc: 0.8131\n",
366 | "Epoch: 6, Loss: 1.5859174728393555, Acc: 0.8155333333333333\n",
367 | "Epoch: 7, Loss: 1.5866419076919556, Acc: 0.8155333333333333\n",
368 | "Epoch: 8, Loss: 1.586602807044983, Acc: 0.8125833333333333\n",
369 | "Epoch: 9, Loss: 1.5848454236984253, Acc: 0.8156166666666667\n",
370 | "Epoch: 10, Loss: 1.5847679376602173, Acc: 0.8157000000000001\n"
371 | ]
372 | }
373 | ]
374 | },
375 | {
376 | "cell_type": "code",
377 | "metadata": {
378 | "id": "YvK38m2ALXRi"
379 | },
380 | "source": [
381 | "# save\n",
382 | "torch.save(model.state_dict(), \"FAMnist.pth\")"
383 | ],
384 | "execution_count": 104,
385 | "outputs": []
386 | },
387 | {
388 | "cell_type": "code",
389 | "metadata": {
390 | "colab": {
391 | "base_uri": "https://localhost:8080/"
392 | },
393 | "id": "7c20DbHl8TH2",
394 | "outputId": "a1547faa-87c4-420b-e960-5aed114fa1e0"
395 | },
396 | "source": [
397 | "# inference\n",
398 | "\n",
399 | "import cv2\n",
400 | "import numpy as np\n",
401 | "\n",
402 | "\n",
403 | "# model.train(False)\n",
404 | "model.eval()\n",
405 | "\n",
406 | "# preprocess\n",
407 | "img = cv2.imread(\"3.jpg\")\n",
408 | "img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
409 | "img = cv2.resize(img, (28, 28))\n",
410 | "tensor = transform(img).unsqueeze(0).to(device)\n",
411 | "\n",
412 | "# process\n",
413 | "preds = model(tensor)\n",
414 | "\n",
415 | "# postprocess\n",
416 | "preds = preds.cpu().detach().numpy()\n",
417 | "output = np.argmax(preds)\n",
418 | "output"
419 | ],
420 | "execution_count": null,
421 | "outputs": [
422 | {
423 | "output_type": "execute_result",
424 | "data": {
425 | "text/plain": [
426 | "3"
427 | ]
428 | },
429 | "metadata": {},
430 | "execution_count": 35
431 | }
432 | ]
433 | },
434 | {
435 | "cell_type": "code",
436 | "metadata": {
437 | "id": "xliTDWEROZ97"
438 | },
439 | "source": [
440 | ""
441 | ],
442 | "execution_count": null,
443 | "outputs": []
444 | }
445 | ]
446 | }
--------------------------------------------------------------------------------
/FAMnist_PyTorch/Inference.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from Model import MyModel
4 | import torchvision
5 | import torch
6 | import argparse
7 |
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument("--device", default='cpu', type=str)
10 | parser.add_argument("--Image", type=str)
11 | args = parser.parse_args()
12 |
13 | latent_size = 10
14 | disc_inp_sz = 28*28
15 |
16 | device = torch.device(args.device)
17 | model = MyModel(disc_inp_sz, latent_size)
18 |
19 | model.load_state_dict(torch.load('FAMnist.pth'))
20 | model.eval()
21 |
22 | transform = torchvision.transforms.Compose([
23 | torchvision.transforms.ToTensor(),
24 | torchvision.transforms.Normalize((0), (1))
25 | # torchvision.transforms.RandomHorizontalFILIP(),
26 | ])
27 |
28 | img = cv2.imread(args.Image)
29 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
30 | img = cv2.resize(img, (28, 28))
31 | tensor = transform(img).unsqueeze(0).to(device)
32 |
33 | preds = model(tensor)
34 |
35 | preds = preds.cpu().detach().numpy()
36 |
37 | output = np.argmax(preds)
38 | print(output)
39 |
--------------------------------------------------------------------------------
/FAMnist_PyTorch/Model.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | class MyModel(nn.Module):
5 | def __init__(self, input_dims, output_dims):
6 | super(MyModel, self).__init__()
7 | self.fc = nn.Sequential(
8 | nn.Linear(input_dims, 256),
9 | nn.ReLU(),
10 | nn.BatchNorm1d(256),
11 | nn.Dropout(0.25),
12 |
13 | nn.Linear(256, 128),
14 | nn.ReLU(),
15 | nn.BatchNorm1d(128),
16 | nn.Dropout(0.5),
17 |
18 | nn.Linear(128, 64),
19 | nn.ReLU(),
20 | nn.BatchNorm1d(64),
21 | nn.Dropout(0.5),
22 |
23 | nn.Linear(64, output_dims),
24 | nn.Sigmoid(),
25 | )
26 |
27 | def forward(self, input_t):
28 | input_t = input_t.reshape((input_t.shape[0], 784))
29 | return self.fc(input_t)
--------------------------------------------------------------------------------
/FAMnist_PyTorch/Test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | from torch import nn
4 | from torch.utils.data import DataLoader
5 | from torchvision.datasets import FashionMNIST
6 | from Model import MyModel
7 |
8 | import argparse
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--device", default='cpu', type=str)
12 | args = parser.parse_args()
13 |
14 | # hyperparameters
15 | latent_size = 10
16 | disc_inp_sz = 28 * 28
17 | img_size = 28
18 | epochs = 10
19 | batch_size = 32
20 | lr = 0.001
21 |
22 | device = torch.device(args.device)
23 |
24 | model = MyModel(disc_inp_sz, latent_size).to(device)
25 |
26 | model = model.to(device)
27 | model.train(True)
28 |
29 |
30 | def calc_acc(preds, labels):
31 | _, preds_max = torch.max(preds, 1)
32 | acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)
33 | return acc
34 |
35 |
36 | # Data Preparing
37 |
38 | transform = torchvision.transforms.Compose([
39 | torchvision.transforms.ToTensor(),
40 | torchvision.transforms.Normalize((0), (1))
41 | ])
42 |
43 | test_set = torchvision.datasets.FashionMNIST('./test_data', download=True, train=False, transform=transform)
44 | test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True)
45 |
46 | device = torch.device(args.device)
47 | model = MyModel(disc_inp_sz, latent_size)
48 |
49 | model.load_state_dict(torch.load('FAMnist.pth'))
50 | model.eval()
51 |
52 | test_acc = 0.0
53 | for img, label in test_loader:
54 | img = img.to(device)
55 | label = label.to(device)
56 |
57 | pred = model(img)
58 | test_acc += calc_acc(pred, label)
59 |
60 | total_acc = test_acc / len(test_loader)
61 | print(f"test accuracy: {total_acc}")
62 |
--------------------------------------------------------------------------------
/FAMnist_PyTorch/Train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | from torch import nn
4 | from torch.utils.data import DataLoader
5 | from torchvision.datasets import FashionMNIST
6 | from Model import MyModel
7 |
8 | import argparse
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--device", default='cpu', type=str)
12 | args = parser.parse_args()
13 |
14 | # hyperparameters
15 | latent_size = 10
16 | disc_inp_sz = 28 * 28
17 | img_size = 28
18 | epochs = 10
19 | batch_size = 32
20 | lr = 0.001
21 |
22 | device = torch.device("cuda")
23 | # device = torch.device("cpu")
24 |
25 | model = MyModel(disc_inp_sz, latent_size).to(device)
26 |
27 | model = model.to(device)
28 | model.train(True)
29 |
30 |
31 | def calc_acc(preds, labels):
32 | _, preds_max = torch.max(preds, 1)
33 | acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)
34 | return acc
35 |
36 |
37 | # Data Preparing
38 |
39 | transform = torchvision.transforms.Compose([
40 | torchvision.transforms.ToTensor(),
41 | torchvision.transforms.Normalize((0), (1))
42 | ])
43 |
44 | dataset = torchvision.datasets.FashionMNIST("./dataset", train=True, download=True, transform=transform)
45 | train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
46 |
47 | # compile
48 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
49 | loss_function = torch.nn.CrossEntropyLoss()
50 |
51 | # train
52 |
53 | for epoch in range(1, epochs + 1):
54 | train_loss = 0.0
55 | train_acc = 0.0
56 | for images, labels in train_data_loader:
57 | images = images.to(device)
58 | labels = labels.to(device)
59 | optimizer.zero_grad()
60 | # 1- forwarding
61 | preds = model(images)
62 | # 2- backwarding
63 | loss = loss_function(preds, labels)
64 | loss.backward()
65 | # 3- Update
66 | optimizer.step()
67 |
68 | train_loss += loss
69 | train_acc += calc_acc(preds, labels)
70 |
71 | total_loss = train_loss / len(train_data_loader)
72 | total_acc = train_acc / len(train_data_loader)
73 |
74 | print(f"Epoch: {epoch}, Loss: {total_loss}, Acc: {total_acc}")
75 |
76 | # save
77 | torch.save(model.state_dict(), "FAMnist.pth")
78 |
--------------------------------------------------------------------------------
/FAMnist_PyTorch/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | opencv-python
4 |
--------------------------------------------------------------------------------
/House Price/CNN_HousePrices.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/House Price/CNN_HousePrices.h5
--------------------------------------------------------------------------------
/House Price/Inference.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | from tensorflow.keras.models import load_model
5 |
6 | model = load_model('CNN_HousePrices.h5')
7 | Images = []
8 | outputImage = np.zeros((64, 64, 3), dtype="uint8")
9 |
10 | for image_name in os.listdir('TestData'):
11 | image = cv2.imread('TestData/' + image_name)
12 | image = cv2.resize(image, (32, 32))
13 | Images.append(image)
14 |
15 | outputImage[0:32, 0:32] = Images[0]
16 | outputImage[0:32, 32:64] = Images[1]
17 | outputImage[32:64, 32:64] = Images[2]
18 | outputImage[32:64, 0:32] = Images[3]
19 |
20 | outputImage = outputImage/255
21 | outputImage = outputImage.reshape(1, 64, 64, 3)
22 | prediction = model.predict([outputImage])
23 | print('The House Price is: ', prediction)
24 |
--------------------------------------------------------------------------------
/House Price/TestData/bathroom.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/House Price/TestData/bathroom.jpg
--------------------------------------------------------------------------------
/House Price/TestData/bedroom.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/House Price/TestData/bedroom.jpg
--------------------------------------------------------------------------------
/House Price/TestData/frontal.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/House Price/TestData/frontal.jpg
--------------------------------------------------------------------------------
/House Price/TestData/kitchen.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/House Price/TestData/kitchen.jpg
--------------------------------------------------------------------------------
/House Price/TestData/s:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/House Price/cnn_regression.py:
--------------------------------------------------------------------------------
1 | # import the necessary packages
2 | from tensorflow.keras.optimizers import Adam
3 | from sklearn.model_selection import train_test_split
4 | from pyimagesearch import models
5 | from pyimagesearch import datasets
6 | import numpy as np
7 | import argparse
8 | import locale
9 | import os
10 | # construct the argument parser and parse the arguments
11 | ap = argparse.ArgumentParser()
12 | ap.add_argument("-d", "--dataset", type=str, required=True,
13 | help="path to input dataset of house images")
14 | args = vars(ap.parse_args())
15 |
16 |
17 | # construct the path to the input .txt file that contains information
18 | # on each house in the dataset and then load the dataset
19 | print("[INFO] loading house attributes...")
20 | inputPath = os.path.sep.join([args["dataset"], "HousesInfo.txt"])
21 | # inputPath = r'C:\Users\M. Ali\Desktop\House Price\HousesDataset\HousesInfo.txt'
22 | df = datasets.load_house_attributes(inputPath)
23 | # load the house images and then scale the pixel intensities to the
24 | # range [0, 1]
25 | print("[INFO] loading house images...")
26 | images = datasets.load_house_images(df, args["dataset"])
27 |
28 | # images = datasets.load_house_images(df, r'HouseDataset')
29 | images = images / 255.0
30 | # partition the data into training and testing splits using 75% of
31 | # the data for training and the remaining 25% for testing
32 | print(df.shape)
33 | print(images)
34 | split = train_test_split(df, images, test_size=0.25, random_state=42)
35 | (trainAttrX, testAttrX, trainImagesX, testImagesX) = split
36 |
37 |
38 | # find the largest house price in the training set and use it to
39 | # scale our house prices to the range [0, 1] (will lead to better
40 | # training and convergence)
41 | maxPrice = trainAttrX["price"].max()
42 | trainY = trainAttrX["price"] / maxPrice
43 | testY = testAttrX["price"] / maxPrice
44 | # create our Convolutional Neural Network and then compile the model
45 | # using mean absolute percentage error as our loss, implying that we
46 | # seek to minimize the absolute percentage difference between our
47 | # price *predictions* and the *actual prices*
48 | model = models.create_cnn(64, 64, 3, regress=True)
49 | opt = Adam(lr=1e-3, decay=1e-3 / 200)
50 | model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
51 | # train the model
52 | print("[INFO] training model...")
53 | model.fit(x=trainImagesX, y=trainY, validation_data=(testImagesX, testY), epochs=200, batch_size=8)
54 |
55 |
56 | # make predictions on the testing data
57 | print("[INFO] predicting house prices...")
58 | preds = model.predict(testImagesX)
59 | # compute the difference between the *predicted* house prices and the
60 | # *actual* house prices, then compute the percentage difference and
61 | # the absolute percentage difference
62 | diff = preds.flatten() - testY
63 | percentDiff = (diff / testY) * 100
64 | absPercentDiff = np.abs(percentDiff)
65 | # compute the mean and standard deviation of the absolute percentage
66 | # difference
67 | mean = np.mean(absPercentDiff)
68 | std = np.std(absPercentDiff)
69 | # finally, show some statistics on our model
70 | locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
71 | print("[INFO] avg. house price: {}, std house price: {}".format(
72 | locale.currency(df["price"].mean(), grouping=True),
73 | locale.currency(df["price"].std(), grouping=True)))
74 | print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std))
75 |
76 | model.save('CNN_HousePrices.h5')
77 |
78 |
--------------------------------------------------------------------------------
/House Price/pyimagesearch/datasets.py:
--------------------------------------------------------------------------------
1 | # import the necessary packages
2 | from sklearn.preprocessing import LabelBinarizer
3 | from sklearn.preprocessing import MinMaxScaler
4 | import pandas as pd
5 | import numpy as np
6 | import glob
7 | import cv2
8 | import os
9 |
10 |
11 | def load_house_attributes(inputPath):
12 | # initialize the list of column names in the CSV file and then
13 | # load it using Pandas
14 | cols = ["bedrooms", "bathrooms", "area", "zipcode", "price"]
15 | df = pd.read_csv(inputPath, sep=" ", header=None, names=cols)
16 |
17 | # determine (1) the unique zip codes and (2) the number of data
18 | # points with each zip code
19 | zipcodes = df["zipcode"].value_counts().keys().tolist()
20 | counts = df["zipcode"].value_counts().tolist()
21 | # loop over each of the unique zip codes and their corresponding
22 | # count
23 | for (zipcode, count) in zip(zipcodes, counts):
24 | # the zip code counts for our housing dataset is *extremely*
25 | # unbalanced (some only having 1 or 2 houses per zip code)
26 | # so let's sanitize our data by removing any houses with less
27 | # than 25 houses per zip code
28 | if count < 25:
29 | idxs = df[df["zipcode"] == zipcode].index
30 | df.drop(idxs, inplace=True)
31 | # return the data frame
32 | return df
33 |
34 |
35 | def process_house_attributes(df, train, test):
36 | # initialize the column names of the continuous data
37 | continuous = ["bedrooms", "bathrooms", "area"]
38 | # performin min-max scaling each continuous feature column to
39 | # the range [0, 1]
40 | cs = MinMaxScaler()
41 | trainContinuous = cs.fit_transform(train[continuous])
42 | testContinuous = cs.transform(test[continuous])
43 |
44 | # one-hot encode the zip code categorical data (by definition of
45 | # one-hot encoing, all output features are now in the range [0, 1])
46 | zipBinarizer = LabelBinarizer().fit(df["zipcode"])
47 | trainCategorical = zipBinarizer.transform(train["zipcode"])
48 | testCategorical = zipBinarizer.transform(test["zipcode"])
49 | # construct our training and testing data points by concatenating
50 | # the categorical features with the continuous features
51 | trainX = np.hstack([trainCategorical, trainContinuous])
52 | testX = np.hstack([testCategorical, testContinuous])
53 | # return the concatenated training and testing data
54 | return (trainX, testX)
55 |
56 |
57 | def load_house_images(df, inputPath):
58 | # initialize our images array (i.e., the house images themselves)
59 | images = []
60 | # loop over the indexes of the houses
61 | for i in df.index.values:
62 | # find the four images for the house and sort the file paths,
63 | # ensuring the four are always in the *same order*
64 | basePath = os.path.sep.join([inputPath, "{}_*".format(i + 1)])
65 | housePaths = sorted(list(glob.glob(basePath)))
66 |
67 | # initialize our list of input images along with the output image
68 | # after *combining* the four input images
69 | inputImages = []
70 | outputImage = np.zeros((64, 64, 3), dtype="uint8")
71 | # loop over the input house paths
72 | for housePath in housePaths:
73 | # load the input image, resize it to be 32 32, and then
74 | # update the list of input images
75 | image = cv2.imread(housePath)
76 | image = cv2.resize(image, (32, 32))
77 | inputImages.append(image)
78 |
79 | # tile the four input images in the output image such the first
80 | # image goes in the top-right corner, the second image in the
81 | # top-left corner, the third image in the bottom-right corner,
82 | # and the final image in the bottom-left corner
83 | # print(outputImage.shape, len(inputImages))
84 | # outputImage = inputImages[0]
85 | outputImage[0:32, 0:32] = inputImages[0]
86 | outputImage[0:32, 32:64] = inputImages[1]
87 | outputImage[32:64, 32:64] = inputImages[2]
88 | outputImage[32:64, 0:32] = inputImages[3]
89 | # add the tiled image to our set of images the network will be
90 | # trained on
91 | images.append(outputImage)
92 | # return our set of images
93 | return np.array(images)
--------------------------------------------------------------------------------
/House Price/pyimagesearch/models.py:
--------------------------------------------------------------------------------
1 | # import the necessary packages
2 | from tensorflow.keras.models import Sequential
3 | from tensorflow.keras.layers import BatchNormalization
4 | from tensorflow.keras.layers import Conv2D
5 | from tensorflow.keras.layers import MaxPooling2D
6 | from tensorflow.keras.layers import Activation
7 | from tensorflow.keras.layers import Dropout
8 | from tensorflow.keras.layers import Dense
9 | from tensorflow.keras.layers import Flatten
10 | from tensorflow.keras.layers import Input
11 | from tensorflow.keras.models import Model
12 | def create_mlp(dim, regress=False):
13 | # define our MLP network
14 | model = Sequential()
15 | model.add(Dense(8, input_dim=dim, activation="relu"))
16 | model.add(Dense(4, activation="relu"))
17 | # check to see if the regression node should be added
18 | if regress:
19 | model.add(Dense(1, activation="linear"))
20 | # return our model
21 | return model
22 |
23 | def create_cnn(width, height, depth, filters=(16, 32, 64), regress=False):
24 | # initialize the input shape and channel dimension, assuming
25 | # TensorFlow/channels-last ordering
26 | inputShape = (height, width, depth)
27 | chanDim = -1
28 |
29 | # define the model input
30 | inputs = Input(shape=inputShape)
31 | # loop over the number of filters
32 | for (i, f) in enumerate(filters):
33 | # if this is the first CONV layer then set the input
34 | # appropriately
35 | if i == 0:
36 | x = inputs
37 | # CONV => RELU => BN => POOL
38 | x = Conv2D(f, (3, 3), padding="same")(x)
39 | x = Activation("relu")(x)
40 | x = BatchNormalization(axis=chanDim)(x)
41 | x = MaxPooling2D(pool_size=(2, 2))(x)
42 |
43 | # flatten the volume, then FC => RELU => BN => DROPOUT
44 | x = Flatten()(x)
45 | x = Dense(16)(x)
46 | x = Activation("relu")(x)
47 | x = BatchNormalization(axis=chanDim)(x)
48 | x = Dropout(0.5)(x)
49 | # apply another FC layer, this one to match the number of nodes
50 | # coming out of the MLP
51 | x = Dense(4)(x)
52 | x = Activation("relu")(x)
53 | # check to see if the regression node should be added
54 | if regress:
55 | x = Dense(1, activation="linear")(x)
56 | # construct the CNN
57 | model = Model(inputs, x)
58 | # return the CNN
59 | return model
60 |
61 |
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/Deep_FAMNIST.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Deep_FAMNIST.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyMDgru8V0K9ijgZSbSYCUhA",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | },
18 | "accelerator": "GPU"
19 | },
20 | "cells": [
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "view-in-github",
25 | "colab_type": "text"
26 | },
27 | "source": [
28 | "
"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "metadata": {
34 | "id": "ywTl6H60hsJ1"
35 | },
36 | "source": [
37 | "import tensorflow as tf\n",
38 | "from tensorflow.keras import layers"
39 | ],
40 | "execution_count": 1,
41 | "outputs": []
42 | },
43 | {
44 | "cell_type": "code",
45 | "metadata": {
46 | "id": "2NJ230MshuWX"
47 | },
48 | "source": [
49 | "dataset = tf.keras.datasets.fashion_mnist"
50 | ],
51 | "execution_count": 2,
52 | "outputs": []
53 | },
54 | {
55 | "cell_type": "code",
56 | "metadata": {
57 | "colab": {
58 | "base_uri": "https://localhost:8080/"
59 | },
60 | "id": "_9artKrhhus-",
61 | "outputId": "a1432196-f287-429f-e006-c158e6120b42"
62 | },
63 | "source": [
64 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
65 | ],
66 | "execution_count": 3,
67 | "outputs": [
68 | {
69 | "output_type": "stream",
70 | "name": "stdout",
71 | "text": [
72 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n",
73 | "32768/29515 [=================================] - 0s 0us/step\n",
74 | "40960/29515 [=========================================] - 0s 0us/step\n",
75 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n",
76 | "26427392/26421880 [==============================] - 0s 0us/step\n",
77 | "26435584/26421880 [==============================] - 0s 0us/step\n",
78 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n",
79 | "16384/5148 [===============================================================================================] - 0s 0us/step\n",
80 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n",
81 | "4423680/4422102 [==============================] - 0s 0us/step\n",
82 | "4431872/4422102 [==============================] - 0s 0us/step\n"
83 | ]
84 | }
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "metadata": {
90 | "id": "0sSQZ8HNkE4f"
91 | },
92 | "source": [
93 | "X_train, X_test = X_train/255.0, X_test/255.0"
94 | ],
95 | "execution_count": 4,
96 | "outputs": []
97 | },
98 | {
99 | "cell_type": "code",
100 | "metadata": {
101 | "colab": {
102 | "base_uri": "https://localhost:8080/"
103 | },
104 | "id": "XtCXCoC2pqIH",
105 | "outputId": "f3e8c2eb-8be4-4a14-a20f-68f698e451e0"
106 | },
107 | "source": [
108 | "X_train.shape"
109 | ],
110 | "execution_count": 5,
111 | "outputs": [
112 | {
113 | "output_type": "execute_result",
114 | "data": {
115 | "text/plain": [
116 | "(60000, 28, 28)"
117 | ]
118 | },
119 | "metadata": {},
120 | "execution_count": 5
121 | }
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "metadata": {
127 | "id": "p7Vw_mILl1He"
128 | },
129 | "source": [
130 | "X_train = X_train.reshape(X_train.shape[0],X_train.shape[1], X_train.shape[2],1)\n",
131 | "X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],1)"
132 | ],
133 | "execution_count": 6,
134 | "outputs": []
135 | },
136 | {
137 | "cell_type": "code",
138 | "metadata": {
139 | "id": "BwLGRng2hwoG"
140 | },
141 | "source": [
142 | "model = tf.keras.models.Sequential([ \n",
143 | " layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),\n",
144 | " layers.MaxPooling2D((2,2)),\n",
145 | " layers.Conv2D(64, (3,3), activation='relu'),\n",
146 | " layers.MaxPooling2D((2,2)),\n",
147 | " layers.Conv2D(128, (3,3), activation='relu'),\n",
148 | " \n",
149 | " layers.Flatten(),\n",
150 | " layers.Dense(128, activation='relu'),\n",
151 | " layers.Dense(64, activation='relu'),\n",
152 | " layers.Dense(100, activation='softmax'),\n",
153 | "])"
154 | ],
155 | "execution_count": 7,
156 | "outputs": []
157 | },
158 | {
159 | "cell_type": "code",
160 | "metadata": {
161 | "id": "umHHXStjhxuW"
162 | },
163 | "source": [
164 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])\n"
165 | ],
166 | "execution_count": 8,
167 | "outputs": []
168 | },
169 | {
170 | "cell_type": "code",
171 | "metadata": {
172 | "colab": {
173 | "base_uri": "https://localhost:8080/"
174 | },
175 | "id": "Zp_gMvSHhy7u",
176 | "outputId": "23dba69e-7686-484f-fa33-f16178d2601d"
177 | },
178 | "source": [
179 | "model.fit(X_train, Y_train, epochs=10)"
180 | ],
181 | "execution_count": 9,
182 | "outputs": [
183 | {
184 | "output_type": "stream",
185 | "name": "stdout",
186 | "text": [
187 | "Epoch 1/10\n",
188 | "1875/1875 [==============================] - 38s 6ms/step - loss: 0.5525 - accuracy: 0.7965\n",
189 | "Epoch 2/10\n",
190 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.3262 - accuracy: 0.8795\n",
191 | "Epoch 3/10\n",
192 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.2754 - accuracy: 0.8986\n",
193 | "Epoch 4/10\n",
194 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.2431 - accuracy: 0.9101\n",
195 | "Epoch 5/10\n",
196 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.2158 - accuracy: 0.9209\n",
197 | "Epoch 6/10\n",
198 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.1946 - accuracy: 0.9269\n",
199 | "Epoch 7/10\n",
200 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.1749 - accuracy: 0.9350\n",
201 | "Epoch 8/10\n",
202 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.1553 - accuracy: 0.9421\n",
203 | "Epoch 9/10\n",
204 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.1393 - accuracy: 0.9484\n",
205 | "Epoch 10/10\n",
206 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.1275 - accuracy: 0.9525\n"
207 | ]
208 | },
209 | {
210 | "output_type": "execute_result",
211 | "data": {
212 | "text/plain": [
213 | ""
214 | ]
215 | },
216 | "metadata": {},
217 | "execution_count": 9
218 | }
219 | ]
220 | },
221 | {
222 | "cell_type": "code",
223 | "metadata": {
224 | "id": "p3Djxh85h0fm",
225 | "outputId": "831b0e11-bbc4-408f-82ec-a266339117ae",
226 | "colab": {
227 | "base_uri": "https://localhost:8080/"
228 | }
229 | },
230 | "source": [
231 | "model.evaluate(X_test, Y_test)"
232 | ],
233 | "execution_count": 10,
234 | "outputs": [
235 | {
236 | "output_type": "stream",
237 | "name": "stdout",
238 | "text": [
239 | "313/313 [==============================] - 2s 4ms/step - loss: 0.2879 - accuracy: 0.9093\n"
240 | ]
241 | },
242 | {
243 | "output_type": "execute_result",
244 | "data": {
245 | "text/plain": [
246 | "[0.28787940740585327, 0.9093000292778015]"
247 | ]
248 | },
249 | "metadata": {},
250 | "execution_count": 10
251 | }
252 | ]
253 | },
254 | {
255 | "cell_type": "code",
256 | "metadata": {
257 | "id": "ppogFvdhBsNT"
258 | },
259 | "source": [
260 | ""
261 | ],
262 | "execution_count": null,
263 | "outputs": []
264 | }
265 | ]
266 | }
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/Deep_MNIST.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Deep_MNIST.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyMcjec8tk9wzc5HN8YmlnHR",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | },
18 | "accelerator": "GPU"
19 | },
20 | "cells": [
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "view-in-github",
25 | "colab_type": "text"
26 | },
27 | "source": [
28 | "
"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "metadata": {
34 | "id": "ywTl6H60hsJ1"
35 | },
36 | "source": [
37 | "import tensorflow as tf\n",
38 | "from tensorflow.keras import layers"
39 | ],
40 | "execution_count": 1,
41 | "outputs": []
42 | },
43 | {
44 | "cell_type": "code",
45 | "metadata": {
46 | "id": "2NJ230MshuWX"
47 | },
48 | "source": [
49 | "dataset = tf.keras.datasets.mnist"
50 | ],
51 | "execution_count": 2,
52 | "outputs": []
53 | },
54 | {
55 | "cell_type": "code",
56 | "metadata": {
57 | "id": "_9artKrhhus-",
58 | "outputId": "b23a7214-953a-4133-dadb-f0cdbf17865d",
59 | "colab": {
60 | "base_uri": "https://localhost:8080/"
61 | }
62 | },
63 | "source": [
64 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
65 | ],
66 | "execution_count": 3,
67 | "outputs": [
68 | {
69 | "output_type": "stream",
70 | "name": "stdout",
71 | "text": [
72 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
73 | "11493376/11490434 [==============================] - 0s 0us/step\n",
74 | "11501568/11490434 [==============================] - 0s 0us/step\n"
75 | ]
76 | }
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "metadata": {
82 | "id": "0sSQZ8HNkE4f"
83 | },
84 | "source": [
85 | "X_train, X_test = X_train/255.0, X_test/255.0"
86 | ],
87 | "execution_count": 4,
88 | "outputs": []
89 | },
90 | {
91 | "cell_type": "code",
92 | "metadata": {
93 | "colab": {
94 | "base_uri": "https://localhost:8080/"
95 | },
96 | "id": "XtCXCoC2pqIH",
97 | "outputId": "f56b9f47-feac-4475-8b55-5044911b25b1"
98 | },
99 | "source": [
100 | "X_train.shape"
101 | ],
102 | "execution_count": 5,
103 | "outputs": [
104 | {
105 | "output_type": "execute_result",
106 | "data": {
107 | "text/plain": [
108 | "(60000, 28, 28)"
109 | ]
110 | },
111 | "metadata": {},
112 | "execution_count": 5
113 | }
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "metadata": {
119 | "id": "p7Vw_mILl1He"
120 | },
121 | "source": [
122 | "X_train = X_train.reshape(X_train.shape[0],X_train.shape[1], X_train.shape[2],1)\n",
123 | "X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],1)"
124 | ],
125 | "execution_count": 6,
126 | "outputs": []
127 | },
128 | {
129 | "cell_type": "code",
130 | "metadata": {
131 | "id": "BwLGRng2hwoG"
132 | },
133 | "source": [
134 | "model = tf.keras.models.Sequential([ \n",
135 | " layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),\n",
136 | " layers.MaxPooling2D((2,2)),\n",
137 | " layers.Conv2D(64, (3,3), activation='relu'),\n",
138 | " layers.MaxPooling2D((2,2)),\n",
139 | " layers.Conv2D(128, (3,3), activation='relu'),\n",
140 | " \n",
141 | " layers.Flatten(),\n",
142 | " layers.Dense(128, activation='relu'),\n",
143 | " layers.Dense(64, activation='relu'),\n",
144 | " layers.Dense(100, activation='softmax'),\n",
145 | "])"
146 | ],
147 | "execution_count": 7,
148 | "outputs": []
149 | },
150 | {
151 | "cell_type": "code",
152 | "metadata": {
153 | "id": "umHHXStjhxuW"
154 | },
155 | "source": [
156 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])"
157 | ],
158 | "execution_count": 8,
159 | "outputs": []
160 | },
161 | {
162 | "cell_type": "code",
163 | "metadata": {
164 | "colab": {
165 | "base_uri": "https://localhost:8080/"
166 | },
167 | "id": "Zp_gMvSHhy7u",
168 | "outputId": "0e54bcae-434a-4655-f9da-ae81907dc84a"
169 | },
170 | "source": [
171 | "model.fit(X_train, Y_train, epochs=10)"
172 | ],
173 | "execution_count": 9,
174 | "outputs": [
175 | {
176 | "output_type": "stream",
177 | "name": "stdout",
178 | "text": [
179 | "Epoch 1/10\n",
180 | "1875/1875 [==============================] - 38s 6ms/step - loss: 0.1996 - accuracy: 0.9410\n",
181 | "Epoch 2/10\n",
182 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0512 - accuracy: 0.9840\n",
183 | "Epoch 3/10\n",
184 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0383 - accuracy: 0.9880\n",
185 | "Epoch 4/10\n",
186 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0295 - accuracy: 0.9911\n",
187 | "Epoch 5/10\n",
188 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0226 - accuracy: 0.9927\n",
189 | "Epoch 6/10\n",
190 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0182 - accuracy: 0.9940\n",
191 | "Epoch 7/10\n",
192 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0157 - accuracy: 0.9950\n",
193 | "Epoch 8/10\n",
194 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0132 - accuracy: 0.9959\n",
195 | "Epoch 9/10\n",
196 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0120 - accuracy: 0.9962\n",
197 | "Epoch 10/10\n",
198 | "1875/1875 [==============================] - 10s 5ms/step - loss: 0.0114 - accuracy: 0.9967\n"
199 | ]
200 | },
201 | {
202 | "output_type": "execute_result",
203 | "data": {
204 | "text/plain": [
205 | ""
206 | ]
207 | },
208 | "metadata": {},
209 | "execution_count": 9
210 | }
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "metadata": {
216 | "id": "p3Djxh85h0fm",
217 | "outputId": "10efbe7a-643d-487f-896a-ddf2db519648",
218 | "colab": {
219 | "base_uri": "https://localhost:8080/"
220 | }
221 | },
222 | "source": [
223 | "model.evaluate(X_test, Y_test)"
224 | ],
225 | "execution_count": 10,
226 | "outputs": [
227 | {
228 | "output_type": "stream",
229 | "name": "stdout",
230 | "text": [
231 | "313/313 [==============================] - 2s 4ms/step - loss: 0.0307 - accuracy: 0.9932\n"
232 | ]
233 | },
234 | {
235 | "output_type": "execute_result",
236 | "data": {
237 | "text/plain": [
238 | "[0.03065897524356842, 0.9932000041007996]"
239 | ]
240 | },
241 | "metadata": {},
242 | "execution_count": 10
243 | }
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "metadata": {
249 | "id": "5I1OSzfbAVtb"
250 | },
251 | "source": [
252 | ""
253 | ],
254 | "execution_count": null,
255 | "outputs": []
256 | }
257 | ]
258 | }
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/Deep_cifar10.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Deep_cifar10.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyPK+74gS4XnrOV22TbFxR80",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | },
18 | "accelerator": "GPU"
19 | },
20 | "cells": [
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "view-in-github",
25 | "colab_type": "text"
26 | },
27 | "source": [
28 | "
"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "metadata": {
34 | "id": "ywTl6H60hsJ1"
35 | },
36 | "source": [
37 | "import tensorflow as tf\n",
38 | "from tensorflow.keras import layers"
39 | ],
40 | "execution_count": 1,
41 | "outputs": []
42 | },
43 | {
44 | "cell_type": "code",
45 | "metadata": {
46 | "id": "2NJ230MshuWX"
47 | },
48 | "source": [
49 | "dataset = tf.keras.datasets.cifar10"
50 | ],
51 | "execution_count": 2,
52 | "outputs": []
53 | },
54 | {
55 | "cell_type": "code",
56 | "metadata": {
57 | "colab": {
58 | "base_uri": "https://localhost:8080/"
59 | },
60 | "id": "_9artKrhhus-",
61 | "outputId": "fddf7d71-70bf-48dd-a80e-ab6612541fc2"
62 | },
63 | "source": [
64 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
65 | ],
66 | "execution_count": 3,
67 | "outputs": [
68 | {
69 | "output_type": "stream",
70 | "name": "stdout",
71 | "text": [
72 | "Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n",
73 | "170500096/170498071 [==============================] - 12s 0us/step\n",
74 | "170508288/170498071 [==============================] - 12s 0us/step\n"
75 | ]
76 | }
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "metadata": {
82 | "id": "0sSQZ8HNkE4f"
83 | },
84 | "source": [
85 | "X_train, X_test = X_train/255.0, X_test/255.0"
86 | ],
87 | "execution_count": 4,
88 | "outputs": []
89 | },
90 | {
91 | "cell_type": "code",
92 | "metadata": {
93 | "colab": {
94 | "base_uri": "https://localhost:8080/"
95 | },
96 | "id": "XtCXCoC2pqIH",
97 | "outputId": "3339afbc-8cd8-470e-c9ad-0951cacf50ea"
98 | },
99 | "source": [
100 | "X_train.shape"
101 | ],
102 | "execution_count": 5,
103 | "outputs": [
104 | {
105 | "output_type": "execute_result",
106 | "data": {
107 | "text/plain": [
108 | "(50000, 32, 32, 3)"
109 | ]
110 | },
111 | "metadata": {},
112 | "execution_count": 5
113 | }
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "metadata": {
119 | "id": "p7Vw_mILl1He"
120 | },
121 | "source": [
122 | "X_train = X_train.reshape(X_train.shape[0],X_train.shape[1], X_train.shape[2],3)\n",
123 | "X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],3)"
124 | ],
125 | "execution_count": 6,
126 | "outputs": []
127 | },
128 | {
129 | "cell_type": "code",
130 | "metadata": {
131 | "id": "BwLGRng2hwoG"
132 | },
133 | "source": [
134 | "model = tf.keras.models.Sequential([ \n",
135 | " layers.Conv2D(32, (3,3), activation='relu', input_shape=(32, 32, 3)),\n",
136 | " layers.MaxPooling2D((2,2)),\n",
137 | " layers.Conv2D(64, (3,3), activation='relu'),\n",
138 | " layers.MaxPooling2D((2,2)),\n",
139 | " layers.Conv2D(128, (3,3), activation='relu'),\n",
140 | " \n",
141 | " layers.Flatten(),\n",
142 | " layers.Dense(128, activation='relu'),\n",
143 | " layers.Dense(64, activation='relu'),\n",
144 | " layers.Dense(100, activation='softmax'),\n",
145 | "])"
146 | ],
147 | "execution_count": 7,
148 | "outputs": []
149 | },
150 | {
151 | "cell_type": "code",
152 | "metadata": {
153 | "id": "umHHXStjhxuW"
154 | },
155 | "source": [
156 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])"
157 | ],
158 | "execution_count": 8,
159 | "outputs": []
160 | },
161 | {
162 | "cell_type": "code",
163 | "metadata": {
164 | "colab": {
165 | "base_uri": "https://localhost:8080/"
166 | },
167 | "id": "Zp_gMvSHhy7u",
168 | "outputId": "10a38d8a-91b1-4ac5-9a35-13dbc47d0cb7"
169 | },
170 | "source": [
171 | "model.fit(X_train, Y_train, epochs=10)"
172 | ],
173 | "execution_count": 9,
174 | "outputs": [
175 | {
176 | "output_type": "stream",
177 | "name": "stdout",
178 | "text": [
179 | "Epoch 1/10\n",
180 | "1563/1563 [==============================] - 39s 7ms/step - loss: 1.6184 - accuracy: 0.4160\n",
181 | "Epoch 2/10\n",
182 | "1563/1563 [==============================] - 11s 7ms/step - loss: 1.1921 - accuracy: 0.5778\n",
183 | "Epoch 3/10\n",
184 | "1563/1563 [==============================] - 11s 7ms/step - loss: 1.0199 - accuracy: 0.6435\n",
185 | "Epoch 4/10\n",
186 | "1563/1563 [==============================] - 11s 7ms/step - loss: 0.9057 - accuracy: 0.6804\n",
187 | "Epoch 5/10\n",
188 | "1563/1563 [==============================] - 11s 7ms/step - loss: 0.8144 - accuracy: 0.7132\n",
189 | "Epoch 6/10\n",
190 | "1563/1563 [==============================] - 11s 7ms/step - loss: 0.7501 - accuracy: 0.7370\n",
191 | "Epoch 7/10\n",
192 | "1563/1563 [==============================] - 11s 7ms/step - loss: 0.6812 - accuracy: 0.7594\n",
193 | "Epoch 8/10\n",
194 | "1563/1563 [==============================] - 11s 7ms/step - loss: 0.6300 - accuracy: 0.7768\n",
195 | "Epoch 9/10\n",
196 | "1563/1563 [==============================] - 11s 7ms/step - loss: 0.5778 - accuracy: 0.7961\n",
197 | "Epoch 10/10\n",
198 | "1563/1563 [==============================] - 11s 7ms/step - loss: 0.5285 - accuracy: 0.8134\n"
199 | ]
200 | },
201 | {
202 | "output_type": "execute_result",
203 | "data": {
204 | "text/plain": [
205 | ""
206 | ]
207 | },
208 | "metadata": {},
209 | "execution_count": 9
210 | }
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "metadata": {
216 | "id": "p3Djxh85h0fm"
217 | },
218 | "source": [
219 | ""
220 | ],
221 | "execution_count": null,
222 | "outputs": []
223 | }
224 | ]
225 | }
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/Deep_cifar100.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Deep_cifar100.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyPpUOLzhVphTv1r9IpEl1Pn",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | },
18 | "accelerator": "GPU"
19 | },
20 | "cells": [
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "view-in-github",
25 | "colab_type": "text"
26 | },
27 | "source": [
28 | "
"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "metadata": {
34 | "id": "ywTl6H60hsJ1"
35 | },
36 | "source": [
37 | "import tensorflow as tf\n",
38 | "from tensorflow.keras import layers"
39 | ],
40 | "execution_count": 22,
41 | "outputs": []
42 | },
43 | {
44 | "cell_type": "code",
45 | "metadata": {
46 | "id": "2NJ230MshuWX"
47 | },
48 | "source": [
49 | "dataset = tf.keras.datasets.cifar100"
50 | ],
51 | "execution_count": 23,
52 | "outputs": []
53 | },
54 | {
55 | "cell_type": "code",
56 | "metadata": {
57 | "id": "_9artKrhhus-"
58 | },
59 | "source": [
60 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
61 | ],
62 | "execution_count": 24,
63 | "outputs": []
64 | },
65 | {
66 | "cell_type": "code",
67 | "metadata": {
68 | "id": "0sSQZ8HNkE4f"
69 | },
70 | "source": [
71 | "X_train, X_test = X_train/255.0, X_test/255.0"
72 | ],
73 | "execution_count": 25,
74 | "outputs": []
75 | },
76 | {
77 | "cell_type": "code",
78 | "metadata": {
79 | "colab": {
80 | "base_uri": "https://localhost:8080/"
81 | },
82 | "id": "XtCXCoC2pqIH",
83 | "outputId": "356d88c8-0ff5-4991-90e1-464aa709fdcd"
84 | },
85 | "source": [
86 | "X_train.shape"
87 | ],
88 | "execution_count": 26,
89 | "outputs": [
90 | {
91 | "output_type": "execute_result",
92 | "data": {
93 | "text/plain": [
94 | "(50000, 32, 32, 3)"
95 | ]
96 | },
97 | "metadata": {},
98 | "execution_count": 26
99 | }
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "metadata": {
105 | "id": "p7Vw_mILl1He"
106 | },
107 | "source": [
108 | "X_train = X_train.reshape(X_train.shape[0],X_train.shape[1], X_train.shape[2],3)\n",
109 | "X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],3)"
110 | ],
111 | "execution_count": 27,
112 | "outputs": []
113 | },
114 | {
115 | "cell_type": "code",
116 | "metadata": {
117 | "id": "BwLGRng2hwoG"
118 | },
119 | "source": [
120 | "model = tf.keras.models.Sequential([ \n",
121 | " layers.Conv2D(32, (3,3), activation='relu', input_shape=(32, 32, 3)),\n",
122 | " layers.MaxPooling2D((2,2)),\n",
123 | " layers.Conv2D(64, (3,3), activation='relu'),\n",
124 | " layers.MaxPooling2D((2,2)),\n",
125 | " layers.Conv2D(128, (3,3), activation='relu'),\n",
126 | " \n",
127 | " layers.Flatten(),\n",
128 | " layers.Dense(128, activation='relu'),\n",
129 | " layers.Dense(64, activation='relu'),\n",
130 | " layers.Dense(100, activation='softmax'),\n",
131 | "])"
132 | ],
133 | "execution_count": 32,
134 | "outputs": []
135 | },
136 | {
137 | "cell_type": "code",
138 | "metadata": {
139 | "id": "umHHXStjhxuW"
140 | },
141 | "source": [
142 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])"
143 | ],
144 | "execution_count": 33,
145 | "outputs": []
146 | },
147 | {
148 | "cell_type": "code",
149 | "metadata": {
150 | "colab": {
151 | "base_uri": "https://localhost:8080/"
152 | },
153 | "id": "Zp_gMvSHhy7u",
154 | "outputId": "f585492f-e3c0-45dc-8a03-659e88faec26"
155 | },
156 | "source": [
157 | "model.fit(X_train, Y_train, epochs=10)"
158 | ],
159 | "execution_count": 34,
160 | "outputs": [
161 | {
162 | "output_type": "stream",
163 | "name": "stdout",
164 | "text": [
165 | "Epoch 1/10\n",
166 | "1563/1563 [==============================] - 13s 8ms/step - loss: 4.0354 - accuracy: 0.0749\n",
167 | "Epoch 2/10\n",
168 | "1563/1563 [==============================] - 12s 8ms/step - loss: 3.3768 - accuracy: 0.1812\n",
169 | "Epoch 3/10\n",
170 | "1563/1563 [==============================] - 12s 7ms/step - loss: 3.0443 - accuracy: 0.2418\n",
171 | "Epoch 4/10\n",
172 | "1563/1563 [==============================] - 12s 7ms/step - loss: 2.8115 - accuracy: 0.2910\n",
173 | "Epoch 5/10\n",
174 | "1563/1563 [==============================] - 12s 7ms/step - loss: 2.6284 - accuracy: 0.3298\n",
175 | "Epoch 6/10\n",
176 | "1563/1563 [==============================] - 12s 7ms/step - loss: 2.4854 - accuracy: 0.3586\n",
177 | "Epoch 7/10\n",
178 | "1563/1563 [==============================] - 12s 7ms/step - loss: 2.3576 - accuracy: 0.3847\n",
179 | "Epoch 8/10\n",
180 | "1563/1563 [==============================] - 12s 7ms/step - loss: 2.2404 - accuracy: 0.4115\n",
181 | "Epoch 9/10\n",
182 | "1563/1563 [==============================] - 12s 8ms/step - loss: 2.1448 - accuracy: 0.4315\n",
183 | "Epoch 10/10\n",
184 | "1563/1563 [==============================] - 12s 7ms/step - loss: 2.0459 - accuracy: 0.4522\n"
185 | ]
186 | },
187 | {
188 | "output_type": "execute_result",
189 | "data": {
190 | "text/plain": [
191 | ""
192 | ]
193 | },
194 | "metadata": {},
195 | "execution_count": 34
196 | }
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "metadata": {
202 | "id": "p3Djxh85h0fm"
203 | },
204 | "source": [
205 | ""
206 | ],
207 | "execution_count": null,
208 | "outputs": []
209 | }
210 | ]
211 | }
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/MLP_FAMNIST.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "MLP_FAMNIST.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyMaRRekSZ3lh78CFwrlnurY",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | }
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "metadata": {
33 | "id": "ywTl6H60hsJ1"
34 | },
35 | "source": [
36 | "import tensorflow as tf\n",
37 | "from tensorflow.keras.layers import Dense"
38 | ],
39 | "execution_count": 1,
40 | "outputs": []
41 | },
42 | {
43 | "cell_type": "code",
44 | "metadata": {
45 | "id": "2NJ230MshuWX"
46 | },
47 | "source": [
48 | "dataset = tf.keras.datasets.fashion_mnist"
49 | ],
50 | "execution_count": 2,
51 | "outputs": []
52 | },
53 | {
54 | "cell_type": "code",
55 | "metadata": {
56 | "colab": {
57 | "base_uri": "https://localhost:8080/"
58 | },
59 | "id": "_9artKrhhus-",
60 | "outputId": "cc4cccec-4027-440e-fe96-98f3a9379ea4"
61 | },
62 | "source": [
63 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
64 | ],
65 | "execution_count": 3,
66 | "outputs": [
67 | {
68 | "output_type": "stream",
69 | "name": "stdout",
70 | "text": [
71 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n",
72 | "32768/29515 [=================================] - 0s 0us/step\n",
73 | "40960/29515 [=========================================] - 0s 0us/step\n",
74 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n",
75 | "26427392/26421880 [==============================] - 0s 0us/step\n",
76 | "26435584/26421880 [==============================] - 0s 0us/step\n",
77 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n",
78 | "16384/5148 [===============================================================================================] - 0s 0us/step\n",
79 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n",
80 | "4423680/4422102 [==============================] - 0s 0us/step\n",
81 | "4431872/4422102 [==============================] - 0s 0us/step\n"
82 | ]
83 | }
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "metadata": {
89 | "id": "2BJvNfjthvq2"
90 | },
91 | "source": [
92 | "X_train = X_train.reshape(60000,784)"
93 | ],
94 | "execution_count": 4,
95 | "outputs": []
96 | },
97 | {
98 | "cell_type": "code",
99 | "metadata": {
100 | "id": "BwLGRng2hwoG"
101 | },
102 | "source": [
103 | "model = tf.keras.models.Sequential([\n",
104 | " Dense(64, input_dim = 784, activation='relu'),\n",
105 | " Dense(32, activation='relu'),\n",
106 | " Dense(10, activation='softmax'),\n",
107 | "])"
108 | ],
109 | "execution_count": 5,
110 | "outputs": []
111 | },
112 | {
113 | "cell_type": "code",
114 | "metadata": {
115 | "id": "umHHXStjhxuW"
116 | },
117 | "source": [
118 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])"
119 | ],
120 | "execution_count": 6,
121 | "outputs": []
122 | },
123 | {
124 | "cell_type": "code",
125 | "metadata": {
126 | "colab": {
127 | "base_uri": "https://localhost:8080/"
128 | },
129 | "id": "Zp_gMvSHhy7u",
130 | "outputId": "d74fd090-0451-42e0-ed51-893d55df9568"
131 | },
132 | "source": [
133 | "model.fit(X_train, Y_train, epochs=10)"
134 | ],
135 | "execution_count": 7,
136 | "outputs": [
137 | {
138 | "output_type": "stream",
139 | "name": "stdout",
140 | "text": [
141 | "Epoch 1/10\n",
142 | "1875/1875 [==============================] - 5s 2ms/step - loss: 2.3990 - accuracy: 0.6955\n",
143 | "Epoch 2/10\n",
144 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.6420 - accuracy: 0.7861\n",
145 | "Epoch 3/10\n",
146 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.5442 - accuracy: 0.8118\n",
147 | "Epoch 4/10\n",
148 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.4904 - accuracy: 0.8282\n",
149 | "Epoch 5/10\n",
150 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.4551 - accuracy: 0.8406\n",
151 | "Epoch 6/10\n",
152 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.4282 - accuracy: 0.8480\n",
153 | "Epoch 7/10\n",
154 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.4176 - accuracy: 0.8522\n",
155 | "Epoch 8/10\n",
156 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.4125 - accuracy: 0.8538\n",
157 | "Epoch 9/10\n",
158 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.4029 - accuracy: 0.8547\n",
159 | "Epoch 10/10\n",
160 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.3943 - accuracy: 0.8609\n"
161 | ]
162 | },
163 | {
164 | "output_type": "execute_result",
165 | "data": {
166 | "text/plain": [
167 | ""
168 | ]
169 | },
170 | "metadata": {},
171 | "execution_count": 7
172 | }
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "metadata": {
178 | "id": "p3Djxh85h0fm"
179 | },
180 | "source": [
181 | ""
182 | ],
183 | "execution_count": null,
184 | "outputs": []
185 | }
186 | ]
187 | }
188 |
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/MLP_cifar10.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "MLP_cifar10.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyPJ996uZ2vqwB0U5lWuUKFm",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | }
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "metadata": {
33 | "id": "ywTl6H60hsJ1"
34 | },
35 | "source": [
36 | "import tensorflow as tf\n",
37 | "from tensorflow.keras.layers import Dense"
38 | ],
39 | "execution_count": 1,
40 | "outputs": []
41 | },
42 | {
43 | "cell_type": "code",
44 | "metadata": {
45 | "id": "2NJ230MshuWX"
46 | },
47 | "source": [
48 | "dataset = tf.keras.datasets.cifar10"
49 | ],
50 | "execution_count": 29,
51 | "outputs": []
52 | },
53 | {
54 | "cell_type": "code",
55 | "metadata": {
56 | "id": "_9artKrhhus-"
57 | },
58 | "source": [
59 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
60 | ],
61 | "execution_count": 30,
62 | "outputs": []
63 | },
64 | {
65 | "cell_type": "code",
66 | "metadata": {
67 | "colab": {
68 | "base_uri": "https://localhost:8080/"
69 | },
70 | "id": "0sSQZ8HNkE4f",
71 | "outputId": "ecc70047-a3f1-4ef8-9bde-681bfeac2885"
72 | },
73 | "source": [
74 | "X_train.shape"
75 | ],
76 | "execution_count": 31,
77 | "outputs": [
78 | {
79 | "output_type": "execute_result",
80 | "data": {
81 | "text/plain": [
82 | "(50000, 32, 32, 3)"
83 | ]
84 | },
85 | "metadata": {},
86 | "execution_count": 31
87 | }
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "metadata": {
93 | "id": "2BJvNfjthvq2"
94 | },
95 | "source": [
96 | "X_train = X_train.reshape(50000,3072)"
97 | ],
98 | "execution_count": 32,
99 | "outputs": []
100 | },
101 | {
102 | "cell_type": "code",
103 | "metadata": {
104 | "id": "BwLGRng2hwoG"
105 | },
106 | "source": [
107 | "model = tf.keras.models.Sequential([\n",
108 | " Dense(64, input_dim = 3072, activation='relu'),\n",
109 | " Dense(32, activation='relu'),\n",
110 | " Dense(10, activation='softmax'),\n",
111 | "])"
112 | ],
113 | "execution_count": 33,
114 | "outputs": []
115 | },
116 | {
117 | "cell_type": "code",
118 | "metadata": {
119 | "id": "umHHXStjhxuW"
120 | },
121 | "source": [
122 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])"
123 | ],
124 | "execution_count": 34,
125 | "outputs": []
126 | },
127 | {
128 | "cell_type": "code",
129 | "metadata": {
130 | "colab": {
131 | "base_uri": "https://localhost:8080/"
132 | },
133 | "id": "Zp_gMvSHhy7u",
134 | "outputId": "9b0188e6-fca7-42f7-85d7-b4934b8e6428"
135 | },
136 | "source": [
137 | "model.fit(X_train, Y_train, epochs=10)"
138 | ],
139 | "execution_count": 35,
140 | "outputs": [
141 | {
142 | "output_type": "stream",
143 | "name": "stdout",
144 | "text": [
145 | "Epoch 1/10\n",
146 | "1563/1563 [==============================] - 7s 4ms/step - loss: 4.0352 - accuracy: 0.0991\n",
147 | "Epoch 2/10\n",
148 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3036 - accuracy: 0.0975\n",
149 | "Epoch 3/10\n",
150 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.5180 - accuracy: 0.0979\n",
151 | "Epoch 4/10\n",
152 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3028 - accuracy: 0.0980\n",
153 | "Epoch 5/10\n",
154 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3028 - accuracy: 0.0985\n",
155 | "Epoch 6/10\n",
156 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3027 - accuracy: 0.0995\n",
157 | "Epoch 7/10\n",
158 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3028 - accuracy: 0.0958\n",
159 | "Epoch 8/10\n",
160 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3027 - accuracy: 0.0982\n",
161 | "Epoch 9/10\n",
162 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3028 - accuracy: 0.1005\n",
163 | "Epoch 10/10\n",
164 | "1563/1563 [==============================] - 6s 4ms/step - loss: 2.3028 - accuracy: 0.1005\n"
165 | ]
166 | },
167 | {
168 | "output_type": "execute_result",
169 | "data": {
170 | "text/plain": [
171 | ""
172 | ]
173 | },
174 | "metadata": {},
175 | "execution_count": 35
176 | }
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "metadata": {
182 | "id": "p3Djxh85h0fm"
183 | },
184 | "source": [
185 | ""
186 | ],
187 | "execution_count": null,
188 | "outputs": []
189 | }
190 | ]
191 | }
192 |
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/MLP_cifar100.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "MLP_cifar100.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyO0X/tzRAuC2TLmGoRV4aqt",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | }
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "metadata": {
33 | "id": "ywTl6H60hsJ1"
34 | },
35 | "source": [
36 | "import tensorflow as tf\n",
37 | "from tensorflow.keras.layers import Dense"
38 | ],
39 | "execution_count": null,
40 | "outputs": []
41 | },
42 | {
43 | "cell_type": "code",
44 | "metadata": {
45 | "id": "2NJ230MshuWX"
46 | },
47 | "source": [
48 | "dataset = tf.keras.datasets.cifar100"
49 | ],
50 | "execution_count": null,
51 | "outputs": []
52 | },
53 | {
54 | "cell_type": "code",
55 | "metadata": {
56 | "colab": {
57 | "base_uri": "https://localhost:8080/"
58 | },
59 | "id": "_9artKrhhus-",
60 | "outputId": "68269cf1-6596-4c80-a1d0-509a624bb432"
61 | },
62 | "source": [
63 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
64 | ],
65 | "execution_count": null,
66 | "outputs": [
67 | {
68 | "output_type": "stream",
69 | "name": "stdout",
70 | "text": [
71 | "Downloading data from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz\n",
72 | "169009152/169001437 [==============================] - 2s 0us/step\n",
73 | "169017344/169001437 [==============================] - 2s 0us/step\n"
74 | ]
75 | }
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "metadata": {
81 | "colab": {
82 | "base_uri": "https://localhost:8080/"
83 | },
84 | "id": "0sSQZ8HNkE4f",
85 | "outputId": "b2ea2186-90a2-467c-c228-4b293291b160"
86 | },
87 | "source": [
88 | "X_train.shape"
89 | ],
90 | "execution_count": null,
91 | "outputs": [
92 | {
93 | "output_type": "execute_result",
94 | "data": {
95 | "text/plain": [
96 | "(50000, 1)"
97 | ]
98 | },
99 | "metadata": {},
100 | "execution_count": 20
101 | }
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "metadata": {
107 | "id": "2BJvNfjthvq2"
108 | },
109 | "source": [
110 | "X_train = X_train.reshape(50000,3072)"
111 | ],
112 | "execution_count": null,
113 | "outputs": []
114 | },
115 | {
116 | "cell_type": "code",
117 | "metadata": {
118 | "id": "BwLGRng2hwoG"
119 | },
120 | "source": [
121 | "model = tf.keras.models.Sequential([\n",
122 | " Dense(64, input_dim = 3072, activation='relu'),\n",
123 | " Dense(32, activation='relu'),\n",
124 | " Dense(100, activation='softmax'),\n",
125 | "])"
126 | ],
127 | "execution_count": null,
128 | "outputs": []
129 | },
130 | {
131 | "cell_type": "code",
132 | "metadata": {
133 | "id": "umHHXStjhxuW"
134 | },
135 | "source": [
136 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])"
137 | ],
138 | "execution_count": null,
139 | "outputs": []
140 | },
141 | {
142 | "cell_type": "code",
143 | "metadata": {
144 | "colab": {
145 | "base_uri": "https://localhost:8080/"
146 | },
147 | "id": "Zp_gMvSHhy7u",
148 | "outputId": "c304f1ce-173c-4212-f15f-61015871e1af"
149 | },
150 | "source": [
151 | "model.fit(X_train, Y_train, epochs=10)"
152 | ],
153 | "execution_count": null,
154 | "outputs": [
155 | {
156 | "output_type": "stream",
157 | "name": "stdout",
158 | "text": [
159 | "Epoch 1/10\n",
160 | "1563/1563 [==============================] - 7s 4ms/step - loss: 5.4727 - accuracy: 0.0083\n",
161 | "Epoch 2/10\n",
162 | "1563/1563 [==============================] - 7s 4ms/step - loss: 4.6059 - accuracy: 0.0082\n",
163 | "Epoch 3/10\n",
164 | "1563/1563 [==============================] - 7s 4ms/step - loss: 4.6059 - accuracy: 0.0085\n",
165 | "Epoch 4/10\n",
166 | "1563/1563 [==============================] - 6s 4ms/step - loss: 4.6059 - accuracy: 0.0090\n",
167 | "Epoch 5/10\n",
168 | "1563/1563 [==============================] - 7s 4ms/step - loss: 4.6059 - accuracy: 0.0090\n",
169 | "Epoch 6/10\n",
170 | "1563/1563 [==============================] - 6s 4ms/step - loss: 4.6059 - accuracy: 0.0088\n",
171 | "Epoch 7/10\n",
172 | "1563/1563 [==============================] - 7s 4ms/step - loss: 4.6059 - accuracy: 0.0087\n",
173 | "Epoch 8/10\n",
174 | "1563/1563 [==============================] - 7s 4ms/step - loss: 4.6059 - accuracy: 0.0089\n",
175 | "Epoch 9/10\n",
176 | "1563/1563 [==============================] - 6s 4ms/step - loss: 4.6059 - accuracy: 0.0091\n",
177 | "Epoch 10/10\n",
178 | "1563/1563 [==============================] - 7s 4ms/step - loss: 4.6059 - accuracy: 0.0086\n"
179 | ]
180 | },
181 | {
182 | "output_type": "execute_result",
183 | "data": {
184 | "text/plain": [
185 | ""
186 | ]
187 | },
188 | "metadata": {},
189 | "execution_count": 25
190 | }
191 | ]
192 | },
193 | {
194 | "cell_type": "code",
195 | "metadata": {
196 | "id": "p3Djxh85h0fm"
197 | },
198 | "source": [
199 | ""
200 | ],
201 | "execution_count": null,
202 | "outputs": []
203 | }
204 | ]
205 | }
206 |
--------------------------------------------------------------------------------
/MLP vs MLP + Deep/MNIST_MLP.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "MNIST_MLP.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyNCJRR5OktKYezmExXXXVBe",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | }
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "metadata": {
33 | "id": "DJxOjGD1cwJB"
34 | },
35 | "source": [
36 | "import tensorflow as tf\n",
37 | "from tensorflow.keras.layers import Dense"
38 | ],
39 | "execution_count": 7,
40 | "outputs": []
41 | },
42 | {
43 | "cell_type": "code",
44 | "metadata": {
45 | "id": "gBNUS51ScydX"
46 | },
47 | "source": [
48 | "dataset = tf.keras.datasets.mnist"
49 | ],
50 | "execution_count": 2,
51 | "outputs": []
52 | },
53 | {
54 | "cell_type": "code",
55 | "metadata": {
56 | "colab": {
57 | "base_uri": "https://localhost:8080/"
58 | },
59 | "id": "cfN9veCqc3J9",
60 | "outputId": "46484c04-fe80-4ab7-9a18-1ddb67b9643c"
61 | },
62 | "source": [
63 | "(X_train, Y_train), (X_test, Y_test) = dataset.load_data()"
64 | ],
65 | "execution_count": 5,
66 | "outputs": [
67 | {
68 | "output_type": "stream",
69 | "name": "stdout",
70 | "text": [
71 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
72 | "11493376/11490434 [==============================] - 0s 0us/step\n",
73 | "11501568/11490434 [==============================] - 0s 0us/step\n"
74 | ]
75 | }
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "metadata": {
81 | "id": "Z8FZ2ThpdIaW"
82 | },
83 | "source": [
84 | "X_train = X_train.reshape(60000,784)"
85 | ],
86 | "execution_count": 6,
87 | "outputs": []
88 | },
89 | {
90 | "cell_type": "code",
91 | "metadata": {
92 | "id": "3kgV5_uSdmpt"
93 | },
94 | "source": [
95 | "model = tf.keras.models.Sequential([\n",
96 | " Dense(64, input_dim = 784, activation='relu'),\n",
97 | " Dense(32, activation='relu'),\n",
98 | " Dense(10, activation='softmax'),\n",
99 | "])"
100 | ],
101 | "execution_count": 11,
102 | "outputs": []
103 | },
104 | {
105 | "cell_type": "code",
106 | "metadata": {
107 | "id": "aYEkK0tAejvc"
108 | },
109 | "source": [
110 | "model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])"
111 | ],
112 | "execution_count": 13,
113 | "outputs": []
114 | },
115 | {
116 | "cell_type": "code",
117 | "metadata": {
118 | "colab": {
119 | "base_uri": "https://localhost:8080/"
120 | },
121 | "id": "eyzB-uzvfBM0",
122 | "outputId": "25c14a2a-e810-4948-86d0-72c47c3b0cc9"
123 | },
124 | "source": [
125 | "model.fit(X_train, Y_train, epochs=10)"
126 | ],
127 | "execution_count": 14,
128 | "outputs": [
129 | {
130 | "output_type": "stream",
131 | "name": "stdout",
132 | "text": [
133 | "Epoch 1/10\n",
134 | "1875/1875 [==============================] - 4s 2ms/step - loss: 1.5332 - accuracy: 0.7429\n",
135 | "Epoch 2/10\n",
136 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.4768 - accuracy: 0.8849\n",
137 | "Epoch 3/10\n",
138 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.3283 - accuracy: 0.9181\n",
139 | "Epoch 4/10\n",
140 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.2432 - accuracy: 0.9360\n",
141 | "Epoch 5/10\n",
142 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.1883 - accuracy: 0.9493\n",
143 | "Epoch 6/10\n",
144 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.1624 - accuracy: 0.9563\n",
145 | "Epoch 7/10\n",
146 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.1422 - accuracy: 0.9610\n",
147 | "Epoch 8/10\n",
148 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.1272 - accuracy: 0.9646\n",
149 | "Epoch 9/10\n",
150 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.1214 - accuracy: 0.9672\n",
151 | "Epoch 10/10\n",
152 | "1875/1875 [==============================] - 3s 2ms/step - loss: 0.1078 - accuracy: 0.9706\n"
153 | ]
154 | },
155 | {
156 | "output_type": "execute_result",
157 | "data": {
158 | "text/plain": [
159 | ""
160 | ]
161 | },
162 | "metadata": {},
163 | "execution_count": 14
164 | }
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "metadata": {
170 | "id": "XcDN_YcVfHb9"
171 | },
172 | "source": [
173 | ""
174 | ],
175 | "execution_count": null,
176 | "outputs": []
177 | }
178 | ]
179 | }
180 |
--------------------------------------------------------------------------------
/PersianRecognition/PersianRecogBot.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from tensorflow.python.keras.models import load_model
3 | import numpy as np
4 | import telebot
5 | from telebot import types
6 | from retinaface import RetinaFace
7 | bot = telebot.TeleBot('Input Your Bot Token')
8 |
9 |
10 | @bot.message_handler(content_types=['photo'])
11 | def photo(message):
12 | print('message.photo =', message.photo)
13 | fileID = message.photo[-1].file_id
14 | print('fileID =', fileID)
15 | file_info = bot.get_file(fileID)
16 | print('file.file_path =', file_info.file_path)
17 | downloaded_file = bot.download_file(file_info.file_path)
18 |
19 | with open(f"BotPhotos/{fileID}.jpg", 'wb') as new_file:
20 | new_file.write(downloaded_file)
21 |
22 | model = load_model('persian.h5')
23 |
24 | image = cv2.imread(f"BotPhotos/{fileID}.jpg")
25 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
26 | faces = RetinaFace.extract_faces(image, align=True)
27 |
28 | for face in faces:
29 | face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
30 | image = cv2.resize(image, (224, 224))
31 | image = image / 255
32 | image = image.reshape(1, 224, 224, 3)
33 |
34 | pred = model.predict([image])
35 |
36 | result = np.argmax(pred)
37 |
38 | if result == 1:
39 | print('از ما است')
40 | bot.reply_to(message, 'از ما است')
41 |
42 | elif result == 0:
43 | print('از ما نیست')
44 | bot.reply_to(message, 'از ما نیست')
45 |
46 |
47 | @bot.message_handler(commands=['SendPic'])
48 | def send_pic(message):
49 | bot.send_message(message.chat.id, 'Mrc', reply_markup=buttons)
50 |
51 |
52 | @bot.message_handler(commands=['start'])
53 | def say_hello(message):
54 | bot.send_message(message.chat.id, f'wellcome {message.from_user.first_name} Joonz')
55 |
56 |
57 | @bot.message_handler(func=lambda message: True)
58 | def send_unknown(message):
59 | bot.reply_to(message, 'نمیفهمم چی میگی یره!')
60 |
61 |
62 | bot.polling()
63 |
64 |
--------------------------------------------------------------------------------
/PersianRecognition/Persian_Recognition.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Persian_Recognition.ipynb",
7 | "provenance": [],
8 | "mount_file_id": "1hBw6u5Z1L18jJpdfPJRyszZJPgVe7Nc3",
9 | "authorship_tag": "ABX9TyPHns0Be2TmD3bVoShV90we",
10 | "include_colab_link": true
11 | },
12 | "kernelspec": {
13 | "name": "python3",
14 | "display_name": "Python 3"
15 | },
16 | "language_info": {
17 | "name": "python"
18 | },
19 | "accelerator": "GPU"
20 | },
21 | "cells": [
22 | {
23 | "cell_type": "markdown",
24 | "metadata": {
25 | "id": "view-in-github",
26 | "colab_type": "text"
27 | },
28 | "source": [
29 | "
"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "metadata": {
35 | "id": "DhYh6V4qo3YY"
36 | },
37 | "source": [
38 | "import tensorflow as tf\n",
39 | "import numpy as np\n",
40 | "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
41 | "from tensorflow.keras.layers import Conv2D, Dense, MaxPool2D, Flatten\n",
42 | "from tensorflow.keras.optimizers import SGD, Adam\n",
43 | "from sklearn.model_selection import train_test_split"
44 | ],
45 | "execution_count": 11,
46 | "outputs": []
47 | },
48 | {
49 | "cell_type": "code",
50 | "metadata": {
51 | "id": "rs9iyqx3mIEx"
52 | },
53 | "source": [
54 | "image_data_generator = ImageDataGenerator(rescale=1./255,\n",
55 | " horizontal_flip=True,\n",
56 | " rotation_range=45,\n",
57 | " zoom_range=0.1,\n",
58 | " validation_split=0.2)\n",
59 | "width, height = 224, 224\n",
60 | "batch_size = 32"
61 | ],
62 | "execution_count": 12,
63 | "outputs": []
64 | },
65 | {
66 | "cell_type": "code",
67 | "metadata": {
68 | "colab": {
69 | "base_uri": "https://localhost:8080/"
70 | },
71 | "id": "6D_fB7ZknzvT",
72 | "outputId": "223270ce-68f6-45db-8016-4ee8b333132c"
73 | },
74 | "source": [
75 | "train_data = image_data_generator.flow_from_directory('/content/drive/MyDrive/Persian_Recognition',\n",
76 | " target_size=(width,height),\n",
77 | " class_mode='categorical',\n",
78 | " batch_size=batch_size,\n",
79 | " shuffle=True,\n",
80 | " subset='training' ,\n",
81 | ")\n",
82 | "\n",
83 | "val_data = image_data_generator.flow_from_directory('/content/drive/MyDrive/Persian_Recognition',\n",
84 | " target_size=(width,height),\n",
85 | " class_mode='categorical',\n",
86 | " batch_size=batch_size,\n",
87 | " shuffle=True,\n",
88 | " subset='validation',\n",
89 | ")"
90 | ],
91 | "execution_count": 13,
92 | "outputs": [
93 | {
94 | "output_type": "stream",
95 | "name": "stdout",
96 | "text": [
97 | "Found 2302 images belonging to 2 classes.\n",
98 | "Found 575 images belonging to 2 classes.\n"
99 | ]
100 | }
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "metadata": {
106 | "id": "QDW0aEJip_r_"
107 | },
108 | "source": [
109 | "base_model = tf.keras.applications.VGG16(\n",
110 | " input_shape=(width, height, 3),\n",
111 | " include_top=False,\n",
112 | " weights='imagenet')"
113 | ],
114 | "execution_count": 14,
115 | "outputs": []
116 | },
117 | {
118 | "cell_type": "code",
119 | "metadata": {
120 | "colab": {
121 | "base_uri": "https://localhost:8080/"
122 | },
123 | "id": "zDErTQsarUSZ",
124 | "outputId": "a2484064-a0bb-4a67-f8fa-73165be43ca3"
125 | },
126 | "source": [
127 | "base_model.summary()"
128 | ],
129 | "execution_count": 15,
130 | "outputs": [
131 | {
132 | "output_type": "stream",
133 | "name": "stdout",
134 | "text": [
135 | "Model: \"vgg16\"\n",
136 | "_________________________________________________________________\n",
137 | "Layer (type) Output Shape Param # \n",
138 | "=================================================================\n",
139 | "input_2 (InputLayer) [(None, 224, 224, 3)] 0 \n",
140 | "_________________________________________________________________\n",
141 | "block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n",
142 | "_________________________________________________________________\n",
143 | "block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n",
144 | "_________________________________________________________________\n",
145 | "block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n",
146 | "_________________________________________________________________\n",
147 | "block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n",
148 | "_________________________________________________________________\n",
149 | "block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n",
150 | "_________________________________________________________________\n",
151 | "block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n",
152 | "_________________________________________________________________\n",
153 | "block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n",
154 | "_________________________________________________________________\n",
155 | "block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n",
156 | "_________________________________________________________________\n",
157 | "block3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n",
158 | "_________________________________________________________________\n",
159 | "block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n",
160 | "_________________________________________________________________\n",
161 | "block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n",
162 | "_________________________________________________________________\n",
163 | "block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n",
164 | "_________________________________________________________________\n",
165 | "block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n",
166 | "_________________________________________________________________\n",
167 | "block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n",
168 | "_________________________________________________________________\n",
169 | "block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n",
170 | "_________________________________________________________________\n",
171 | "block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n",
172 | "_________________________________________________________________\n",
173 | "block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n",
174 | "_________________________________________________________________\n",
175 | "block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n",
176 | "=================================================================\n",
177 | "Total params: 14,714,688\n",
178 | "Trainable params: 14,714,688\n",
179 | "Non-trainable params: 0\n",
180 | "_________________________________________________________________\n"
181 | ]
182 | }
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "metadata": {
188 | "id": "V3JqAiY9rv2T"
189 | },
190 | "source": [
191 | "for layer in base_model.layers:\n",
192 | " layer.trainable=False"
193 | ],
194 | "execution_count": 16,
195 | "outputs": []
196 | },
197 | {
198 | "cell_type": "code",
199 | "metadata": {
200 | "id": "bYHRkiX4ngZx"
201 | },
202 | "source": [
203 | "model = tf.keras.models.Sequential([\n",
204 | " base_model,\n",
205 | " Flatten(),\n",
206 | " Dense(128, activation='relu'),\n",
207 | " Dense(32, activation='relu'),\n",
208 | " Dense(2, activation='softmax') \n",
209 | "])"
210 | ],
211 | "execution_count": 17,
212 | "outputs": []
213 | },
214 | {
215 | "cell_type": "code",
216 | "metadata": {
217 | "id": "MYQbt3jbrCKp"
218 | },
219 | "source": [
220 | "model.compile(optimizer=Adam(),\n",
221 | " loss=tf.keras.losses.categorical_crossentropy,\n",
222 | " metrics='accuracy')"
223 | ],
224 | "execution_count": 18,
225 | "outputs": []
226 | },
227 | {
228 | "cell_type": "code",
229 | "metadata": {
230 | "colab": {
231 | "base_uri": "https://localhost:8080/"
232 | },
233 | "id": "KJUwTvhQuAeS",
234 | "outputId": "634f410a-c551-49f1-a589-bde5ed41d49d"
235 | },
236 | "source": [
237 | "train_data.samples"
238 | ],
239 | "execution_count": 19,
240 | "outputs": [
241 | {
242 | "output_type": "execute_result",
243 | "data": {
244 | "text/plain": [
245 | "2302"
246 | ]
247 | },
248 | "metadata": {},
249 | "execution_count": 19
250 | }
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "metadata": {
256 | "colab": {
257 | "base_uri": "https://localhost:8080/"
258 | },
259 | "id": "rMr9e50hrMuw",
260 | "outputId": "23c52317-d0d7-4e8d-a91b-68fd0e26a93a"
261 | },
262 | "source": [
263 | "model.fit(train_data,\n",
264 | " steps_per_epoch=train_data.samples/batch_size,\n",
265 | " validation_data=val_data,\n",
266 | " validation_steps=val_data.samples/batch_size,\n",
267 | " epochs=10,\n",
268 | " )"
269 | ],
270 | "execution_count": 20,
271 | "outputs": [
272 | {
273 | "output_type": "stream",
274 | "name": "stdout",
275 | "text": [
276 | "Epoch 1/10\n",
277 | "71/71 [==============================] - 1183s 16s/step - loss: 0.4762 - accuracy: 0.8388 - val_loss: 0.3227 - val_accuracy: 0.8800\n",
278 | "Epoch 2/10\n",
279 | "71/71 [==============================] - 58s 806ms/step - loss: 0.1916 - accuracy: 0.9279 - val_loss: 0.2661 - val_accuracy: 0.8922\n",
280 | "Epoch 3/10\n",
281 | "71/71 [==============================] - 58s 807ms/step - loss: 0.1745 - accuracy: 0.9353 - val_loss: 0.2464 - val_accuracy: 0.9026\n",
282 | "Epoch 4/10\n",
283 | "71/71 [==============================] - 58s 805ms/step - loss: 0.1349 - accuracy: 0.9509 - val_loss: 0.4937 - val_accuracy: 0.8296\n",
284 | "Epoch 5/10\n",
285 | "71/71 [==============================] - 58s 810ms/step - loss: 0.1345 - accuracy: 0.9483 - val_loss: 0.2808 - val_accuracy: 0.8870\n",
286 | "Epoch 6/10\n",
287 | "71/71 [==============================] - 59s 813ms/step - loss: 0.1570 - accuracy: 0.9370 - val_loss: 0.3183 - val_accuracy: 0.8783\n",
288 | "Epoch 7/10\n",
289 | "71/71 [==============================] - 58s 810ms/step - loss: 0.1213 - accuracy: 0.9548 - val_loss: 0.2474 - val_accuracy: 0.8922\n",
290 | "Epoch 8/10\n",
291 | "71/71 [==============================] - 58s 808ms/step - loss: 0.1307 - accuracy: 0.9470 - val_loss: 0.2562 - val_accuracy: 0.8904\n",
292 | "Epoch 9/10\n",
293 | "71/71 [==============================] - 58s 811ms/step - loss: 0.1053 - accuracy: 0.9561 - val_loss: 0.2532 - val_accuracy: 0.8939\n",
294 | "Epoch 10/10\n",
295 | "71/71 [==============================] - 58s 812ms/step - loss: 0.1062 - accuracy: 0.9609 - val_loss: 0.3058 - val_accuracy: 0.8957\n"
296 | ]
297 | },
298 | {
299 | "output_type": "execute_result",
300 | "data": {
301 | "text/plain": [
302 | ""
303 | ]
304 | },
305 | "metadata": {},
306 | "execution_count": 20
307 | }
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "metadata": {
313 | "id": "jKBKy91MzJmZ",
314 | "colab": {
315 | "base_uri": "https://localhost:8080/"
316 | },
317 | "outputId": "128b3873-731e-4e76-a8b8-4fc3ec827ed8"
318 | },
319 | "source": [
320 | "from google.colab import drive\n",
321 | "drive.mount('/content/drive')"
322 | ],
323 | "execution_count": 21,
324 | "outputs": [
325 | {
326 | "output_type": "stream",
327 | "name": "stdout",
328 | "text": [
329 | "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
330 | ]
331 | }
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "metadata": {
337 | "id": "rKBeZq2hsWuh"
338 | },
339 | "source": [
340 | "model.save('Persian.h5')"
341 | ],
342 | "execution_count": 22,
343 | "outputs": []
344 | },
345 | {
346 | "cell_type": "code",
347 | "metadata": {
348 | "id": "CvtlHiWas2qY",
349 | "colab": {
350 | "base_uri": "https://localhost:8080/"
351 | },
352 | "outputId": "c5d69070-e666-471a-f84f-eda0f931e042"
353 | },
354 | "source": [
355 | "print(tf. __version__)"
356 | ],
357 | "execution_count": 23,
358 | "outputs": [
359 | {
360 | "output_type": "stream",
361 | "name": "stdout",
362 | "text": [
363 | "2.6.0\n"
364 | ]
365 | }
366 | ]
367 | },
368 | {
369 | "cell_type": "code",
370 | "metadata": {
371 | "id": "EFK2Cdmzx8LL"
372 | },
373 | "source": [
374 | ""
375 | ],
376 | "execution_count": null,
377 | "outputs": []
378 | }
379 | ]
380 | }
--------------------------------------------------------------------------------
/Pix2Pix/Inference.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.keras.models import load_model
3 | import os
4 | import pathlib
5 | import time
6 | import datetime
7 | import cv2
8 | import os
9 | import numpy as np
10 | import argparse
11 |
12 | from matplotlib import pyplot as plt
13 | from IPython import display
14 | from tensorflow.keras.preprocessing.image import ImageDataGenerator
15 |
16 | parser = argparse.ArgumentParser()
17 | parser.add_argument("--Input", default='Input', type=str)
18 | args = parser.parse_args()
19 |
20 | model = load_model('facades.h5')
21 |
22 | input_image = tf.data.Dataset.list_files(str('Input/*.jpg'))
23 |
24 | IMG_WIDTH = 256
25 | IMG_HEIGHT = 256
26 |
27 |
28 | def load_images_from_folder(folder):
29 | images = []
30 | for filename in os.listdir(folder):
31 | img = tf.io.read_file(f'Input/{filename}')
32 | img = tf.image.decode_jpeg(img)
33 | img = tf.cast(img, tf.float32)
34 | img = (img / 127.5) - 1
35 | img = tf.image.resize(img, [IMG_HEIGHT, IMG_WIDTH],
36 | method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
37 | if img is not None:
38 | images.append(img)
39 | return images
40 |
41 |
42 | input = load_images_from_folder(args.Input)
43 |
44 | predict = []
45 | for image in input:
46 | # image = tf.image.convert_image_dtype(image, dtype=tf.float32, saturate=False, name=None)
47 | image = np.expand_dims(image, axis=0)
48 | prediction = model(image, training=True)
49 | prediction = prediction * 0.5 + 0.5
50 | prediction = tf.image.convert_image_dtype(prediction, dtype=tf.uint8, saturate=False, name=None)
51 | prediction = np.squeeze(prediction, axis=0)
52 | predict.append(prediction)
53 |
54 | for i, pred in enumerate(predict):
55 | plt.imshow(predict[i])
56 | plt.show()
57 |
--------------------------------------------------------------------------------
/Pix2Pix/Input/0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/Pix2Pix/Input/0.jpg
--------------------------------------------------------------------------------
/Pix2Pix/Input/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/Pix2Pix/Input/1.jpg
--------------------------------------------------------------------------------
/Pix2Pix/Input/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/Pix2Pix/Input/2.jpg
--------------------------------------------------------------------------------
/Pix2Pix/Input/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/Pix2Pix/Input/3.jpg
--------------------------------------------------------------------------------
/Pix2Pix/Input/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/Pix2Pix/Input/4.jpg
--------------------------------------------------------------------------------
/Pix2Pix/Input/s:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image TL/Inference.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import torchvision
4 | from torch.utils.data import Dataset, DataLoader
5 | import pandas as pd
6 | from matplotlib import pyplot as plt
7 | from sklearn.model_selection import train_test_split
8 | import os
9 | import cv2
10 | import numpy as np
11 | import argparse
12 | from Model import MyModel
13 | from google_drive_downloader import GoogleDriveDownloader as gdd
14 | import argparse
15 |
16 | gdd.download_file_from_google_drive(file_id='1lIF9rV5LaBdrYNc5GN3mkzdFL2k6hlJL',
17 | dest_path='./FaceAgePredictionTL.pth')
18 |
19 | parser = argparse.ArgumentParser()
20 | parser.add_argument("--device", default='cpu', type=str)
21 | parser.add_argument("--Image", type=str)
22 | args = parser.parse_args()
23 |
24 | # hyperparameters
25 | latent_size = 10
26 | disc_inp_sz = 224*224
27 | img_size = 224
28 | epochs = 10
29 | batch_size = 32
30 | lr = 0.001
31 | width = height = 224
32 |
33 | device = torch.device(args.device)
34 | model = MyModel()
35 | model = model.to(device)
36 |
37 | model.load_state_dict(torch.load('FaceAgePredictionTL.pth'))
38 | model.eval()
39 |
40 | transform = torchvision.transforms.Compose([
41 | torchvision.transforms.ToPILImage(),
42 | torchvision.transforms.Resize((28, 28)),
43 | torchvision.transforms.RandomHorizontalFlip(),
44 | torchvision.transforms.ToTensor(),
45 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
46 | ])
47 |
48 | img = cv2.imread(args.Image)
49 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
50 | tensor = transform(img).unsqueeze(0).to(device)
51 | # tensor = tensor.permute((0, 3, 2, 1))
52 | preds = model(tensor)
53 |
54 | preds = preds.cpu().detach().numpy()
55 |
56 | output = np.argmax(preds)
57 |
58 | print(output)
59 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image TL/Model.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | class MyModel(nn.Module):
5 | def __init__(self):
6 | super(MyModel, self).__init__()
7 | self.conv2d = nn.Sequential(
8 | nn.Conv2d(3, 32, (3, 3), stride=(1, 1), padding=(1, 1)),
9 | nn.ReLU(),
10 | nn.BatchNorm2d(32),
11 | nn.MaxPool2d((2, 2)),
12 | nn.Conv2d(32, 32, (3, 3), stride=(1, 1), padding=(1, 1)),
13 | nn.ReLU(),
14 | nn.BatchNorm2d(32),
15 | nn.MaxPool2d((2, 2)),
16 | nn.Conv2d(32, 64, (3, 3), stride=(1, 1), padding=(1, 1)),
17 | nn.ReLU(),
18 | nn.BatchNorm2d(64),
19 | )
20 |
21 | self.fc = nn.Sequential(
22 | nn.Flatten(start_dim=1),
23 | nn.Linear(3136, 256),
24 | nn.ReLU(),
25 | nn.Linear(256, 1),
26 | nn.ReLU(),
27 | )
28 |
29 | def forward(self, input_t):
30 | x = self.conv2d(input_t)
31 | # print(x.shape)
32 | return self.fc(x)
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image TL/Test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import torchvision
4 | from torch.utils.data import Dataset, DataLoader
5 | from google_drive_downloader import GoogleDriveDownloader as gdd
6 | import argparse
7 |
8 | gdd.download_file_from_google_drive(file_id='1lIF9rV5LaBdrYNc5GN3mkzdFL2k6hlJL',
9 | dest_path='./FaceAgePredictionTL.pth')
10 |
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument("--device", default='cpu', type=str)
13 | parser.add_argument("--dataset", type=str)
14 | args = parser.parse_args()
15 |
16 | # hyperparameters
17 | latent_size = 10
18 | disc_inp_sz = 224*224
19 | img_size = 224
20 | epochs = 10
21 | batch_size = 32
22 | lr = 0.001
23 | width = height = 224
24 |
25 | device = torch.device(args.device)
26 |
27 | model = MyModel().to(device)
28 |
29 | model = model.to(device)
30 | model.train(True)
31 |
32 | # Data Preparing
33 |
34 |
35 | transform = torchvision.transforms.Compose([
36 | torchvision.transforms.ToPILImage(),
37 | torchvision.transforms.Resize((28, 28)),
38 | torchvision.transforms.RandomHorizontalFlip(),
39 | torchvision.transforms.ToTensor(),
40 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
41 | ])
42 |
43 | dataset = torchvision.datasets.ImageFolder(args.dataset, transform=transform)
44 | n = len(dataset)
45 | n_test = int(0.1 * n)
46 | test_set = torch.utils.data.Subset(dataset, range(n_test))
47 | test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True)
48 |
49 | # compile
50 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
51 | loss_function = torch.nn.MSELoss()
52 |
53 | device = torch.device(args.device)
54 | model = MyModel()
55 |
56 | model.load_state_dict(torch.load("FaceAgePredictionTL.pth"))
57 | model.eval()
58 |
59 | test_loss = 0.0
60 | for img, label in test_loader:
61 | img = img.to(device)
62 | label = label.to(device)
63 |
64 | pred = model(img)
65 | loss = loss_function(preds, labels)
66 | loss.backward()
67 |
68 | test_loss += loss
69 |
70 | total_acc = test_loss / len(test_loader)
71 | print(f"test accuracy: {total_acc}")
72 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image TL/Train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import torchvision
4 | from torch.utils.data import Dataset, DataLoader
5 | import pandas as pd
6 | from matplotlib import pyplot as plt
7 | from sklearn.model_selection import train_test_split
8 | import os
9 | import cv2
10 | import numpy as np
11 | import argparse
12 | import wandb
13 |
14 | wandb.init(project="AgePredictionTL", entity="ma_heravi")
15 |
16 | parser = argparse.ArgumentParser()
17 | parser.add_argument("--device", default='cpu', type=str)
18 | parser.add_argument("--dataset", type=str)
19 | args = parser.parse_args()
20 |
21 | # hyperparameters
22 | latent_size = 10
23 | disc_inp_sz = 224 * 224
24 | img_size = 224
25 | epochs = 10
26 | batch_size = 32
27 | lr = 0.001
28 | width = height = 224
29 | wandb.config = {
30 | "learning_rate": lr,
31 | "epochs": epochs,
32 | "batch_size": batch_size
33 | }
34 |
35 | images = []
36 | ages = []
37 |
38 | for image_name in os.listdir(args.dataset)[0:9000]:
39 | part = image_name.split('_')
40 | ages.append(int(part[0]))
41 |
42 | image = cv2.imread(f'crop_part1/{image_name}')
43 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
44 | images.append(image)
45 |
46 | images = pd.Series(images, name='Images')
47 | ages = pd.Series(ages, name='Ages')
48 |
49 | df = pd.concat([images, ages], axis=1)
50 | df.head()
51 |
52 | plt.figure(figsize=(24, 8))
53 | plt.hist(df['Ages'], bins=116)
54 | plt.show()
55 |
56 | under4 = []
57 |
58 | for i in range(len(df)):
59 | if df['Ages'].iloc[i] <= 4:
60 | under4.append(df.iloc[i])
61 |
62 | under4 = pd.DataFrame(under4)
63 | under4 = under4.sample(frac=0.3)
64 |
65 | up4 = df[df['Ages'] > 4]
66 |
67 | df = pd.concat([under4, up4])
68 |
69 | df = df[df['Ages'] < 90]
70 |
71 | plt.figure(figsize=(24, 8))
72 | plt.hist(df['Ages'], bins=89)
73 | plt.show()
74 |
75 | X = []
76 | Y = []
77 |
78 | for i in range(len(df)):
79 | df['Images'].iloc[i] = cv2.resize(df['Images'].iloc[i], (width, height))
80 |
81 | X.append(df['Images'].iloc[i])
82 | Y.append(df['Ages'].iloc[i])
83 |
84 | X = np.array(X)
85 | Y = np.array(Y)
86 |
87 | X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2)
88 |
89 | # X_train = X_train.astype(np.float32)
90 |
91 | X_train = torch.tensor(X_train)
92 | Y_train = torch.tensor(Y_train)
93 | X_train = torch.permute(X_train, (0, 3, 2, 1))
94 |
95 |
96 | # from torch.utils.data import TensorDataset
97 |
98 | class MyDataset(Dataset):
99 | def __init__(self, X, y, transform=None):
100 | self.data = X
101 | self.target = y
102 | self.transform = transform
103 |
104 | def __getitem__(self, index):
105 | x = self.data[index]
106 | y = self.target[index]
107 |
108 | # Normalize your data here
109 | if self.transform:
110 | x = self.transform(x)
111 |
112 | return x, y
113 |
114 | def __len__(self):
115 | return len(self.data)
116 |
117 |
118 | transform = torchvision.transforms.Compose([
119 | torchvision.transforms.ToPILImage(),
120 | torchvision.transforms.Resize((28, 28)),
121 | torchvision.transforms.RandomHorizontalFlip(),
122 | torchvision.transforms.ToTensor(),
123 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
124 | ])
125 |
126 | dataset = MyDataset(X_train, Y_train, transform)
127 | train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
128 |
129 | device = torch.device(args.device)
130 |
131 | model = MyModel().to(device)
132 |
133 | model.train(True)
134 |
135 | # compile
136 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
137 | loss_function = torch.nn.MSELoss()
138 |
139 | # train
140 |
141 | for epoch in range(1, epochs + 1):
142 | train_loss = 0.0
143 | train_acc = 0.0
144 | for images, labels in train_data_loader:
145 | images = images.to(device)
146 | labels = labels.to(device)
147 | optimizer.zero_grad()
148 | # 1- forwarding
149 | preds = model(images)
150 |
151 | # 2- backwarding
152 | loss = loss_function(preds, labels.float())
153 | loss.backward()
154 |
155 | # 3- Update
156 | optimizer.step()
157 |
158 | train_loss += loss
159 |
160 | total_loss = train_loss / len(train_data_loader)
161 |
162 | print(f"Epoch: {epoch}, Loss: {total_loss}")
163 | wandb.log({'epochs': epoch,
164 | 'loss': total_loss,
165 | })
166 |
167 | # save
168 | torch.save(model.state_dict(), "FaceAgePredictionTL.pth")
169 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image TL/pytorch_age_prediction_using_face_image_transfer_learning.py:
--------------------------------------------------------------------------------
1 |
2 | import wandb
3 |
4 | import torch
5 | from torch import nn
6 | import torchvision
7 | from torch.utils.data import Dataset, DataLoader
8 | import pandas as pd
9 | from matplotlib import pyplot as plt
10 | from sklearn.model_selection import train_test_split
11 | import os
12 | import cv2
13 | import numpy as np
14 |
15 | import wandb
16 |
17 | wandb.init(project="AgePredictionTL", entity="ma_heravi")
18 |
19 | # hyperparameters
20 | latent_size = 10
21 | disc_inp_sz = 224*224
22 | img_size = 224
23 | epochs = 10
24 | batch_size = 32
25 | lr = 0.001
26 | width = height = 224
27 | wandb.config = {
28 | "learning_rate": lr,
29 | "epochs": epochs,
30 | "batch_size": batch_size
31 | }
32 |
33 | images = []
34 | ages = []
35 |
36 | for image_name in os.listdir('crop_part1')[0:9000]:
37 | part = image_name.split('_')
38 | ages.append(int(part[0]))
39 |
40 | image = cv2.imread(f'crop_part1/{image_name}')
41 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
42 | images.append(image)
43 |
44 | images = pd.Series(images, name= 'Images')
45 | ages = pd.Series(ages, name= 'Ages')
46 |
47 | df = pd.concat([images, ages], axis= 1)
48 | df.head()
49 |
50 | plt.figure(figsize=(24, 8))
51 | plt.hist(df['Ages'], bins= 116)
52 | plt.show()
53 |
54 | under4 = []
55 |
56 | for i in range(len(df)):
57 | if df['Ages'].iloc[i] <= 4:
58 | under4.append(df.iloc[i])
59 |
60 | under4 = pd.DataFrame(under4)
61 | under4 = under4.sample(frac= 0.3)
62 |
63 | up4 = df[df['Ages'] > 4]
64 |
65 | df = pd.concat([under4, up4])
66 |
67 | df = df[df['Ages'] < 90]
68 |
69 | plt.figure(figsize=(24, 8))
70 | plt.hist(df['Ages'], bins= 89)
71 | plt.show()
72 |
73 | X = []
74 | Y = []
75 |
76 | for i in range(len(df)):
77 | df['Images'].iloc[i] = cv2.resize(df['Images'].iloc[i], (width, height))
78 |
79 | X.append(df['Images'].iloc[i])
80 | Y.append(df['Ages'].iloc[i])
81 |
82 | X = np.array(X)
83 | Y = np.array(Y)
84 |
85 | X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size = 0.2)
86 |
87 | # X_train = X_train.astype(np.float32)
88 |
89 | X_train = torch.tensor(X_train)
90 | Y_train = torch.tensor(Y_train)
91 | X_train = torch.permute(X_train, (0, 3, 2, 1))
92 |
93 | # from torch.utils.data import TensorDataset
94 |
95 | class MyDataset(Dataset):
96 | def __init__(self, X, y, transform=None):
97 | self.data = X
98 | self.target = y
99 | self.transform = transform
100 |
101 | def __getitem__(self, index):
102 | x = self.data[index]
103 | y = self.target[index]
104 |
105 | # Normalize your data here
106 | if self.transform:
107 | x = self.transform(x)
108 |
109 | return x, y
110 |
111 | def __len__(self):
112 | return len(self.data)
113 |
114 | transform = torchvision.transforms.Compose([
115 | torchvision.transforms.ToPILImage(),
116 | torchvision.transforms.Resize((28, 28)),
117 | torchvision.transforms.RandomHorizontalFlip(),
118 | torchvision.transforms.ToTensor(),
119 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
120 | ])
121 |
122 | dataset = MyDataset(X_train, Y_train, transform)
123 | train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
124 |
125 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
126 |
127 | model = torchvision.models.resnet50(pretrained=True)
128 | in_features = model.fc.in_features
129 | model.fc = nn.Linear(in_features,1)
130 |
131 | ct = 0
132 | for child in model.children():
133 | ct += 1
134 | if ct < 7:
135 | for param in child.parameters():
136 | param.requires_grad = False
137 |
138 | model = model.to(device)
139 |
140 | # compile
141 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
142 | loss_function = torch.nn.MSELoss()
143 |
144 | # train
145 | wandb.watch(model)
146 |
147 | for epoch in range(1, epochs+1):
148 | train_loss = 0.0
149 | train_acc = 0.0
150 | for images, labels in train_data_loader:
151 | images = images.to(device)
152 | labels = labels.to(device)
153 | optimizer.zero_grad()
154 | # 1- forwarding
155 | preds = model(images)
156 | # print(labels)
157 | # print(preds)
158 | # 2- backwarding
159 | loss = loss_function(preds, labels.float())
160 | loss.backward()
161 | # 3- Update
162 | optimizer.step()
163 |
164 | train_loss += loss
165 |
166 | total_loss = train_loss / len(train_data_loader)
167 |
168 | print(f"Epoch: {epoch}, Loss: {total_loss}")
169 | wandb.log({'epochs': epoch,
170 | 'loss': total_loss,
171 | })
172 |
173 | # save
174 | torch.save(model.state_dict(), "FaceAgePredictionTL.pth")
175 |
176 | torch.save(model.state_dict(), "/content/drive/MyDrive/FaceAgePredictionTL.pth")
177 |
178 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image TL/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | opencv-python
4 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image TL/sajjad.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/PyTorch Age Prediction Using Face Image TL/sajjad.jpg
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image/FaceAgePrediction.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/PyTorch Age Prediction Using Face Image/FaceAgePrediction.pth
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image/Inference.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import torchvision
4 | from torch.utils.data import Dataset, DataLoader
5 | import pandas as pd
6 | from matplotlib import pyplot as plt
7 | from sklearn.model_selection import train_test_split
8 | import os
9 | import cv2
10 | import numpy as np
11 | import argparse
12 | from Model import MyModel
13 |
14 | parser = argparse.ArgumentParser()
15 | parser.add_argument("--device", default='cpu', type=str)
16 | parser.add_argument("--Image", type=str)
17 | args = parser.parse_args()
18 |
19 | # hyperparameters
20 | latent_size = 10
21 | disc_inp_sz = 224*224
22 | img_size = 224
23 | epochs = 10
24 | batch_size = 32
25 | lr = 0.001
26 | width = height = 224
27 |
28 | device = torch.device(args.device)
29 | model = MyModel()
30 |
31 | model.load_state_dict(torch.load('FaceAgePrediction.pth'))
32 | model.eval()
33 |
34 | transform = torchvision.transforms.Compose([
35 | torchvision.transforms.ToPILImage(),
36 | torchvision.transforms.Resize((28, 28)),
37 | torchvision.transforms.RandomHorizontalFlip(),
38 | torchvision.transforms.ToTensor(),
39 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
40 | ])
41 |
42 | img = cv2.imread(args.Image)
43 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
44 | tensor = transform(img).unsqueeze(0).to(device)
45 | # tensor = tensor.permute((0, 3, 2, 1))
46 | preds = model(tensor)
47 |
48 | preds = preds.cpu().detach().numpy()
49 |
50 | output = np.argmax(preds)
51 |
52 | print(output)
53 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image/Model.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | class MyModel(nn.Module):
5 | def __init__(self):
6 | super(MyModel, self).__init__()
7 | self.conv2d = nn.Sequential(
8 | nn.Conv2d(3, 32, (3, 3), stride=(1, 1), padding=(1, 1)),
9 | nn.ReLU(),
10 | nn.BatchNorm2d(32),
11 | nn.MaxPool2d((2, 2)),
12 | nn.Conv2d(32, 32, (3, 3), stride=(1, 1), padding=(1, 1)),
13 | nn.ReLU(),
14 | nn.BatchNorm2d(32),
15 | nn.MaxPool2d((2, 2)),
16 | nn.Conv2d(32, 64, (3, 3), stride=(1, 1), padding=(1, 1)),
17 | nn.ReLU(),
18 | nn.BatchNorm2d(64),
19 | )
20 |
21 | self.fc = nn.Sequential(
22 | nn.Flatten(start_dim=1),
23 | nn.Linear(3136, 256),
24 | nn.ReLU(),
25 | nn.Linear(256, 1),
26 | nn.ReLU(),
27 | )
28 |
29 | def forward(self, input_t):
30 | x = self.conv2d(input_t)
31 | # print(x.shape)
32 | return self.fc(x)
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image/Test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import torchvision
4 | from torch.utils.data import Dataset, DataLoader
5 |
6 | import argparse
7 |
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument("--device", default='cpu', type=str)
10 | parser.add_argument("--dataset", type=str)
11 | args = parser.parse_args()
12 |
13 | # hyperparameters
14 | latent_size = 10
15 | disc_inp_sz = 224*224
16 | img_size = 224
17 | epochs = 10
18 | batch_size = 32
19 | lr = 0.001
20 | width = height = 224
21 |
22 | device = torch.device(args.device)
23 |
24 | model = MyModel().to(device)
25 |
26 | model = model.to(device)
27 | model.train(True)
28 |
29 | # Data Preparing
30 |
31 |
32 | transform = torchvision.transforms.Compose([
33 | torchvision.transforms.ToPILImage(),
34 | torchvision.transforms.Resize((28, 28)),
35 | torchvision.transforms.RandomHorizontalFlip(),
36 | torchvision.transforms.ToTensor(),
37 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
38 | ])
39 |
40 | dataset = torchvision.datasets.ImageFolder(args.dataset, transform=transform)
41 | n = len(dataset)
42 | n_test = int(0.1 * n)
43 | test_set = torch.utils.data.Subset(dataset, range(n_test))
44 | test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True)
45 |
46 | # compile
47 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
48 | loss_function = torch.nn.MSELoss()
49 |
50 | device = torch.device(args.device)
51 | model = MyModel()
52 |
53 | model.load_state_dict(torch.load("FaceAgePrediction.pth"))
54 | model.eval()
55 |
56 | test_loss = 0.0
57 | for img, label in test_loader:
58 | img = img.to(device)
59 | label = label.to(device)
60 |
61 | pred = model(img)
62 | loss = loss_function(preds, labels)
63 | loss.backward()
64 |
65 | test_loss += loss
66 |
67 | total_acc = test_loss / len(test_loader)
68 | print(f"test accuracy: {total_acc}")
69 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image/Train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import torchvision
4 | from torch.utils.data import Dataset, DataLoader
5 | import pandas as pd
6 | from matplotlib import pyplot as plt
7 | from sklearn.model_selection import train_test_split
8 | import os
9 | import cv2
10 | import numpy as np
11 | import argparse
12 | import wandb
13 |
14 | wandb.init(project="AgePrediction", entity="ma_heravi")
15 |
16 | parser = argparse.ArgumentParser()
17 | parser.add_argument("--device", default='cpu', type=str)
18 | parser.add_argument("--dataset", type=str)
19 | args = parser.parse_args()
20 |
21 | # hyperparameters
22 | latent_size = 10
23 | disc_inp_sz = 224 * 224
24 | img_size = 224
25 | epochs = 10
26 | batch_size = 32
27 | lr = 0.001
28 | width = height = 224
29 | wandb.config = {
30 | "learning_rate": lr,
31 | "epochs": epochs,
32 | "batch_size": batch_size
33 | }
34 |
35 | images = []
36 | ages = []
37 |
38 | for image_name in os.listdir(args.dataset)[0:9000]:
39 | part = image_name.split('_')
40 | ages.append(int(part[0]))
41 |
42 | image = cv2.imread(f'crop_part1/{image_name}')
43 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
44 | images.append(image)
45 |
46 | images = pd.Series(images, name='Images')
47 | ages = pd.Series(ages, name='Ages')
48 |
49 | df = pd.concat([images, ages], axis=1)
50 | df.head()
51 |
52 | plt.figure(figsize=(24, 8))
53 | plt.hist(df['Ages'], bins=116)
54 | plt.show()
55 |
56 | under4 = []
57 |
58 | for i in range(len(df)):
59 | if df['Ages'].iloc[i] <= 4:
60 | under4.append(df.iloc[i])
61 |
62 | under4 = pd.DataFrame(under4)
63 | under4 = under4.sample(frac=0.3)
64 |
65 | up4 = df[df['Ages'] > 4]
66 |
67 | df = pd.concat([under4, up4])
68 |
69 | df = df[df['Ages'] < 90]
70 |
71 | plt.figure(figsize=(24, 8))
72 | plt.hist(df['Ages'], bins=89)
73 | plt.show()
74 |
75 | X = []
76 | Y = []
77 |
78 | for i in range(len(df)):
79 | df['Images'].iloc[i] = cv2.resize(df['Images'].iloc[i], (width, height))
80 |
81 | X.append(df['Images'].iloc[i])
82 | Y.append(df['Ages'].iloc[i])
83 |
84 | X = np.array(X)
85 | Y = np.array(Y)
86 |
87 | X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2)
88 |
89 | # X_train = X_train.astype(np.float32)
90 |
91 | X_train = torch.tensor(X_train)
92 | Y_train = torch.tensor(Y_train)
93 | X_train = torch.permute(X_train, (0, 3, 2, 1))
94 |
95 |
96 | # from torch.utils.data import TensorDataset
97 |
98 | class MyDataset(Dataset):
99 | def __init__(self, X, y, transform=None):
100 | self.data = X
101 | self.target = y
102 | self.transform = transform
103 |
104 | def __getitem__(self, index):
105 | x = self.data[index]
106 | y = self.target[index]
107 |
108 | # Normalize your data here
109 | if self.transform:
110 | x = self.transform(x)
111 |
112 | return x, y
113 |
114 | def __len__(self):
115 | return len(self.data)
116 |
117 |
118 | transform = torchvision.transforms.Compose([
119 | torchvision.transforms.ToPILImage(),
120 | torchvision.transforms.Resize((28, 28)),
121 | torchvision.transforms.RandomHorizontalFlip(),
122 | torchvision.transforms.ToTensor(),
123 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
124 | ])
125 |
126 | dataset = MyDataset(X_train, Y_train, transform)
127 | train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
128 |
129 | device = torch.device(args.device)
130 |
131 | model = MyModel().to(device)
132 |
133 | model = model.to(device)
134 | model.train(True)
135 |
136 | # compile
137 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
138 | loss_function = torch.nn.MSELoss()
139 |
140 | # train
141 |
142 | for epoch in range(1, epochs + 1):
143 | train_loss = 0.0
144 | train_acc = 0.0
145 | for images, labels in train_data_loader:
146 | images = images.to(device)
147 | labels = labels.to(device)
148 | optimizer.zero_grad()
149 | # 1- forwarding
150 | preds = model(images)
151 |
152 | # 2- backwarding
153 | loss = loss_function(preds, labels.float())
154 | loss.backward()
155 |
156 | # 3- Update
157 | optimizer.step()
158 |
159 | train_loss += loss
160 |
161 | total_loss = train_loss / len(train_data_loader)
162 |
163 | print(f"Epoch: {epoch}, Loss: {total_loss}")
164 | wandb.log({'epochs': epoch,
165 | 'loss': total_loss,
166 | })
167 |
168 | # save
169 | torch.save(model.state_dict(), "FaceAgePrediction.pth")
170 |
--------------------------------------------------------------------------------
/PyTorch Age Prediction Using Face Image/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | opencv-python
4 |
--------------------------------------------------------------------------------
/PyTorch Mnist Persian/3.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/PyTorch Mnist Persian/3.JPG
--------------------------------------------------------------------------------
/PyTorch Mnist Persian/Inference.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from Model import MyModel
4 | import torchvision
5 | import torch
6 | import argparse
7 |
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument("--device", default='cpu', type=str)
10 | parser.add_argument("--Image", type=str)
11 | args = parser.parse_args()
12 |
13 | disc_inp_sz = 28*28
14 |
15 | device = torch.device(args.device)
16 | model = MyModel(disc_inp_sz)
17 |
18 | model.load_state_dict(torch.load('PersianMnistFinal.pth'))
19 | model.eval()
20 |
21 | transform = torchvision.transforms.Compose([
22 | torchvision.transforms.ToPILImage(),
23 | torchvision.transforms.Resize((28, 28)),
24 | torchvision.transforms.ToTensor(),
25 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
26 | ])
27 |
28 | img = cv2.imread(args.Image)
29 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
30 | print(img.shape)
31 | tensor = transform(img).unsqueeze(0).to(device)
32 |
33 | preds = model(tensor)
34 |
35 | preds = preds.cpu().detach().numpy()
36 |
37 | output = np.argmax(preds)
38 |
39 | print(output)
40 |
--------------------------------------------------------------------------------
/PyTorch Mnist Persian/Model.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | class MyModel(nn.Module):
5 | def __init__(self, input_dims):
6 | super(MyModel, self).__init__()
7 | self.conv2d = nn.Sequential(
8 | nn.Conv2d(3, 32, (3, 3), stride=(1, 1), padding=(1, 1)),
9 | nn.ReLU(),
10 | nn.BatchNorm2d(32),
11 | nn.MaxPool2d((2, 2)),
12 | nn.Conv2d(32, 32, (3, 3), stride=(1, 1), padding=(1, 1)),
13 | nn.ReLU(),
14 | nn.BatchNorm2d(32),
15 | nn.MaxPool2d((2, 2)),
16 | nn.Conv2d(32, 64, (3, 3), stride=(1, 1), padding=(1, 1)),
17 | nn.ReLU(),
18 | nn.BatchNorm2d(64),
19 | nn.Flatten(start_dim=1)
20 |
21 | )
22 |
23 | self.fc = nn.Sequential(
24 | nn.Flatten(),
25 | nn.Linear(64 * 7 * 7, 512),
26 | nn.ReLU(),
27 | nn.Linear(512, 10),
28 | nn.Softmax(),
29 | )
30 |
31 | def forward(self, input_t):
32 | x = self.conv2d(input_t)
33 | # print(x.shape)
34 | return self.fc(x)
35 |
--------------------------------------------------------------------------------
/PyTorch Mnist Persian/PersianMnistFinal.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/PyTorch Mnist Persian/PersianMnistFinal.pth
--------------------------------------------------------------------------------
/PyTorch Mnist Persian/Test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | from torch import nn
4 | from torch.utils.data import DataLoader
5 | from torchvision.datasets import FashionMNIST
6 | from Model import MyModel
7 |
8 | import argparse
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--device", default='cpu', type=str)
12 | parser.add_argument("--dataset", type=str)
13 | args = parser.parse_args()
14 |
15 | # hyperparameters
16 | latent_size = 10
17 | disc_inp_sz = 28 * 28
18 | img_size = 28
19 | epochs = 10
20 | batch_size = 32
21 | lr = 0.001
22 |
23 | device = torch.device(args.device)
24 |
25 | model = MyModel(disc_inp_sz, latent_size).to(device)
26 |
27 | model = model.to(device)
28 | model.train(True)
29 |
30 |
31 | def calc_acc(preds, labels):
32 | _, preds_max = torch.max(preds, 1)
33 | acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)
34 | return acc
35 |
36 | # Data Preparing
37 |
38 |
39 | transform = torchvision.transforms.Compose([
40 | torchvision.transforms.Resize((28, 28)),
41 | torchvision.transforms.ToTensor(),
42 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
43 | ])
44 |
45 | Dataset = torchvision.datasets.ImageFolder(args.dataset, transform=transform)
46 | n = len(Dataset)
47 | n_test = int(0.1 * n)
48 | test_set = torch.utils.data.Subset(Dataset, range(n_test)) # take first 10%
49 | test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True)
50 |
51 | device = torch.device(args.device)
52 | model = MyModel(disc_inp_sz)
53 |
54 | model.load_state_dict(torch.load("PersianMnistFinal.pth"))
55 | model.eval()
56 |
57 | test_acc = 0.0
58 | for img, label in test_loader:
59 | img = img.to(device)
60 | label = label.to(device)
61 |
62 | pred = model(img)
63 | test_acc += calc_acc(pred, label)
64 |
65 | total_acc = test_acc / len(test_loader)
66 | print(f"test accuracy: {total_acc}")
67 |
--------------------------------------------------------------------------------
/PyTorch Mnist Persian/Train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | from torch import nn
4 | from torch.utils.data import DataLoader
5 | from torchvision.datasets import FashionMNIST
6 | from Model import MyModel
7 |
8 | import argparse
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--device", default='cpu', type=str)
12 | parser.add_argument("--dataset", type=str)
13 |
14 | args = parser.parse_args()
15 |
16 | latent_size = 10
17 | disc_inp_sz = 28*28
18 | img_size = 28
19 | epochs = 10
20 | batch_size = 32
21 | lr = 0.001
22 |
23 | device = torch.device(args.device)
24 |
25 | model = MyModel(disc_inp_sz).to(device)
26 |
27 | model = model.to(device)
28 | model.train(True)
29 |
30 |
31 | def calc_acc(preds, labels):
32 | _, preds_max = torch.max(preds, 1)
33 | acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)
34 | return acc
35 |
36 |
37 | # Data Preparing
38 |
39 | transform = torchvision.transforms.Compose([
40 | torchvision.transforms.Resize((28, 28)),
41 | torchvision.transforms.ToTensor(),
42 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
43 | ])
44 |
45 | dataset = torchvision.datasets.ImageFolder(root=args.dataset, transform=transform)
46 | train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
47 |
48 | # compile
49 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
50 | loss_function = torch.nn.CrossEntropyLoss()
51 |
52 | # train
53 |
54 | for epoch in range(1, epochs + 1):
55 | train_loss = 0.0
56 | train_acc = 0.0
57 | for images, labels in train_data_loader:
58 | images = images.to(device)
59 | labels = labels.to(device)
60 | optimizer.zero_grad()
61 | # 1- forwarding
62 | preds = model(images)
63 | # 2- backwarding
64 | loss = loss_function(preds, labels)
65 | loss.backward()
66 | # 3- Update
67 | optimizer.step()
68 |
69 | train_loss += loss
70 | train_acc += calc_acc(preds, labels)
71 |
72 | total_loss = train_loss / len(train_data_loader)
73 | total_acc = train_acc / len(train_data_loader)
74 |
75 | print(f"Epoch: {epoch}, Loss: {total_loss}, Acc: {total_acc}")
76 |
77 | # save
78 | torch.save(model.state_dict(), "PersianMnistFinal.pth")
79 |
--------------------------------------------------------------------------------
/PyTorch Mnist Persian/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | opencv-python
4 |
--------------------------------------------------------------------------------
/PyTorch Persian Mnist TL/3.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maheravi/Deep-Learning/8cf17a2798f8a93790647f699473955c322a47c3/PyTorch Persian Mnist TL/3.JPG
--------------------------------------------------------------------------------
/PyTorch Persian Mnist TL/Inference.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from Model import MyModel
4 | import torchvision
5 | import torch
6 | from google_drive_downloader import GoogleDriveDownloader as gdd
7 | import argparse
8 |
9 | gdd.download_file_from_google_drive(file_id='1k_ZWFxK8-tvong8umtcTcLa8Dk1i1o6i',
10 | dest_path='./PersianMnistTL.pth')
11 |
12 |
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument("--device", default='cpu', type=str)
15 | parser.add_argument("--Image", type=str)
16 | args = parser.parse_args()
17 |
18 | disc_inp_sz = 28*28
19 |
20 | device = torch.device(args.device)
21 | model = MyModel().to(device)
22 |
23 | model.load_state_dict(torch.load('PersianMnistTL.pth'))
24 | model.eval()
25 |
26 | transform = torchvision.transforms.Compose([
27 | torchvision.transforms.ToPILImage(),
28 | torchvision.transforms.Resize((28, 28)),
29 | torchvision.transforms.ToTensor(),
30 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
31 | ])
32 |
33 | img = cv2.imread(args.Image)
34 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
35 | print(img.shape)
36 | tensor = transform(img).unsqueeze(0).to(device)
37 |
38 | preds = model(tensor)
39 |
40 | preds = preds.cpu().detach().numpy()
41 |
42 | output = np.argmax(preds)
43 |
44 | print(output)
45 |
--------------------------------------------------------------------------------
/PyTorch Persian Mnist TL/Mnist_PersianTL.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Mnist_PersianTL.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": [],
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | },
18 | "accelerator": "GPU"
19 | },
20 | "cells": [
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "view-in-github",
25 | "colab_type": "text"
26 | },
27 | "source": [
28 | "
"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "source": [
34 | "!pip install wandb\n",
35 | "import wandb"
36 | ],
37 | "metadata": {
38 | "id": "9J7wBG6iek4r",
39 | "colab": {
40 | "base_uri": "https://localhost:8080/"
41 | },
42 | "outputId": "1b5137b0-a191-44e0-d293-7a8d944b7c6a"
43 | },
44 | "execution_count": 1,
45 | "outputs": [
46 | {
47 | "output_type": "stream",
48 | "name": "stdout",
49 | "text": [
50 | "Requirement already satisfied: wandb in /usr/local/lib/python3.7/dist-packages (0.12.9)\n",
51 | "Requirement already satisfied: GitPython>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.1.26)\n",
52 | "Requirement already satisfied: requests<3,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.23.0)\n",
53 | "Requirement already satisfied: subprocess32>=3.5.3 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.5.4)\n",
54 | "Requirement already satisfied: Click!=8.0.0,>=7.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (7.1.2)\n",
55 | "Requirement already satisfied: sentry-sdk>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.5.2)\n",
56 | "Requirement already satisfied: shortuuid>=0.5.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.0.8)\n",
57 | "Requirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.17.3)\n",
58 | "Requirement already satisfied: configparser>=3.8.1 in /usr/local/lib/python3.7/dist-packages (from wandb) (5.2.0)\n",
59 | "Requirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (5.4.8)\n",
60 | "Requirement already satisfied: docker-pycreds>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (0.4.0)\n",
61 | "Requirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from wandb) (3.13)\n",
62 | "Requirement already satisfied: pathtools in /usr/local/lib/python3.7/dist-packages (from wandb) (0.1.2)\n",
63 | "Requirement already satisfied: promise<3,>=2.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.3)\n",
64 | "Requirement already satisfied: yaspin>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.1.0)\n",
65 | "Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.8.2)\n",
66 | "Requirement already satisfied: six>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.15.0)\n",
67 | "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from GitPython>=1.0.0->wandb) (3.10.0.2)\n",
68 | "Requirement already satisfied: gitdb<5,>=4.0.1 in /usr/local/lib/python3.7/dist-packages (from GitPython>=1.0.0->wandb) (4.0.9)\n",
69 | "Requirement already satisfied: smmap<6,>=3.0.1 in /usr/local/lib/python3.7/dist-packages (from gitdb<5,>=4.0.1->GitPython>=1.0.0->wandb) (5.0.0)\n",
70 | "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (3.0.4)\n",
71 | "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (2021.10.8)\n",
72 | "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (1.24.3)\n",
73 | "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (2.10)\n",
74 | "Requirement already satisfied: termcolor<2.0.0,>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from yaspin>=1.0.0->wandb) (1.1.0)\n"
75 | ]
76 | }
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "source": [
82 | "wandb.init(project=\"PersianMnistTL\", entity=\"ma_heravi\")"
83 | ],
84 | "metadata": {
85 | "colab": {
86 | "base_uri": "https://localhost:8080/",
87 | "height": 93
88 | },
89 | "id": "rrxwunsaey6p",
90 | "outputId": "bf882709-0f08-4122-c29e-911fe18e2493"
91 | },
92 | "execution_count": 2,
93 | "outputs": [
94 | {
95 | "output_type": "stream",
96 | "name": "stderr",
97 | "text": [
98 | "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mma_heravi\u001b[0m (use `wandb login --relogin` to force relogin)\n"
99 | ]
100 | },
101 | {
102 | "output_type": "display_data",
103 | "data": {
104 | "text/html": [
105 | "\n",
106 | " Syncing run spring-glitter-2 to Weights & Biases (docs).
\n",
107 | "\n",
108 | " "
109 | ],
110 | "text/plain": [
111 | ""
112 | ]
113 | },
114 | "metadata": {}
115 | },
116 | {
117 | "output_type": "execute_result",
118 | "data": {
119 | "text/plain": [
120 | ""
121 | ],
122 | "text/html": [
123 | ""
124 | ]
125 | },
126 | "metadata": {},
127 | "execution_count": 2
128 | }
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "metadata": {
134 | "id": "bAltkQau1eOm"
135 | },
136 | "source": [
137 | "import torch\n",
138 | "from torch import nn\n",
139 | "import torchvision\n",
140 | "from torch.utils.data import DataLoader\n",
141 | "from torchvision.datasets import FashionMNIST"
142 | ],
143 | "execution_count": 3,
144 | "outputs": []
145 | },
146 | {
147 | "cell_type": "code",
148 | "source": [
149 | "# hyperparameters\n",
150 | "latent_size = 10\n",
151 | "disc_inp_sz = 28*28\n",
152 | "img_size = 28\n",
153 | "epochs = 10\n",
154 | "batch_size = 32\n",
155 | "lr = 0.001\n",
156 | "wandb.config = {\n",
157 | " \"learning_rate\": lr,\n",
158 | " \"epochs\": epochs,\n",
159 | " \"batch_size\": batch_size\n",
160 | "}"
161 | ],
162 | "metadata": {
163 | "id": "7fUp_60YTSUx"
164 | },
165 | "execution_count": 4,
166 | "outputs": []
167 | },
168 | {
169 | "cell_type": "code",
170 | "metadata": {
171 | "id": "ISS2sL_Y7U7u"
172 | },
173 | "source": [
174 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
175 | "\n",
176 | "model = torchvision.models.resnet50(pretrained=True)\n",
177 | "in_features = model.fc.in_features\n",
178 | "model.fc = nn.Linear(in_features,10)\n",
179 | "\n",
180 | "ct = 0\n",
181 | "for child in model.children():\n",
182 | " ct +=1\n",
183 | " if ct < 7:\n",
184 | " for param in child.parameters():\n",
185 | " param.requires_grad = False\n",
186 | "\n",
187 | "model = model.to(device)"
188 | ],
189 | "execution_count": 7,
190 | "outputs": []
191 | },
192 | {
193 | "cell_type": "code",
194 | "metadata": {
195 | "id": "FvjhjgX-JWGH"
196 | },
197 | "source": [
198 | "def calc_acc(preds, labels):\n",
199 | " _, preds_max = torch.max(preds, 1)\n",
200 | " acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)\n",
201 | " return acc"
202 | ],
203 | "execution_count": 8,
204 | "outputs": []
205 | },
206 | {
207 | "cell_type": "code",
208 | "source": [
209 | "from google.colab import drive\n",
210 | "drive.mount('/content/drive')"
211 | ],
212 | "metadata": {
213 | "colab": {
214 | "base_uri": "https://localhost:8080/"
215 | },
216 | "id": "JLfmH_txG51d",
217 | "outputId": "7f934ade-308f-467c-852f-9b01fe0138b3"
218 | },
219 | "execution_count": 9,
220 | "outputs": [
221 | {
222 | "output_type": "stream",
223 | "name": "stdout",
224 | "text": [
225 | "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
226 | ]
227 | }
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "metadata": {
233 | "id": "fRwKLl1H8lNu"
234 | },
235 | "source": [
236 | "# Data Preparing\n",
237 | "\n",
238 | "transform = torchvision.transforms.Compose([\n",
239 | " torchvision.transforms.Resize((28, 28)),\n",
240 | " torchvision.transforms.ToTensor(),\n",
241 | " torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n",
242 | "])\n",
243 | "\n",
244 | "\n",
245 | "dataset = torchvision.datasets.ImageFolder(\"/content/drive/MyDrive/MNIST_persian\", transform=transform)\n",
246 | "train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)"
247 | ],
248 | "execution_count": 10,
249 | "outputs": []
250 | },
251 | {
252 | "cell_type": "code",
253 | "metadata": {
254 | "id": "_FXbdTDBC1AZ"
255 | },
256 | "source": [
257 | " # compile\n",
258 | "optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n",
259 | "loss_function = torch.nn.CrossEntropyLoss()"
260 | ],
261 | "execution_count": 11,
262 | "outputs": []
263 | },
264 | {
265 | "cell_type": "code",
266 | "metadata": {
267 | "colab": {
268 | "base_uri": "https://localhost:8080/"
269 | },
270 | "id": "08p9Lp_kD4tW",
271 | "outputId": "c14ac372-2f6b-4a21-c872-2ebe36bb14d8"
272 | },
273 | "source": [
274 | "# train\n",
275 | "wandb.watch(model)\n",
276 | "\n",
277 | "for epoch in range(1, epochs+1):\n",
278 | " train_loss = 0.0\n",
279 | " train_acc = 0.0\n",
280 | " for images, labels in train_data_loader:\n",
281 | " images = images.to(device)\n",
282 | " labels = labels.to(device)\n",
283 | " optimizer.zero_grad()\n",
284 | " # 1- forwarding\n",
285 | " preds = model(images)\n",
286 | " # 2- backwarding \n",
287 | " loss = loss_function(preds, labels)\n",
288 | " loss.backward()\n",
289 | " # 3- Update\n",
290 | " optimizer.step()\n",
291 | "\n",
292 | " train_loss += loss\n",
293 | " train_acc += calc_acc(preds, labels)\n",
294 | " \n",
295 | " total_loss = train_loss / len(train_data_loader)\n",
296 | " total_acc = train_acc / len(train_data_loader)\n",
297 | "\n",
298 | " print(f\"Epoch: {epoch}, Loss: {total_loss}, Acc: {total_acc}\")\n",
299 | " wandb.log({'epochs': epoch + 1,\n",
300 | " 'loss': total_loss,\n",
301 | " 'acc': total_acc\n",
302 | " })"
303 | ],
304 | "execution_count": 12,
305 | "outputs": [
306 | {
307 | "output_type": "stream",
308 | "name": "stdout",
309 | "text": [
310 | "Epoch: 1, Loss: 0.9431494474411011, Acc: 0.7179276315789473\n",
311 | "Epoch: 2, Loss: 0.42344897985458374, Acc: 0.881578947368421\n",
312 | "Epoch: 3, Loss: 0.313978374004364, Acc: 0.9152960526315789\n",
313 | "Epoch: 4, Loss: 0.22633948922157288, Acc: 0.9391447368421052\n",
314 | "Epoch: 5, Loss: 0.2635694146156311, Acc: 0.9226973684210525\n",
315 | "Epoch: 6, Loss: 0.21754202246665955, Acc: 0.9383223684210525\n",
316 | "Epoch: 7, Loss: 0.17074167728424072, Acc: 0.9613486842105262\n",
317 | "Epoch: 8, Loss: 0.30612286925315857, Acc: 0.9473684210526315\n",
318 | "Epoch: 9, Loss: 0.16001418232917786, Acc: 0.9720394736842105\n",
319 | "Epoch: 10, Loss: 0.11227972060441971, Acc: 0.9671052631578947\n"
320 | ]
321 | }
322 | ]
323 | },
324 | {
325 | "cell_type": "code",
326 | "metadata": {
327 | "id": "YvK38m2ALXRi"
328 | },
329 | "source": [
330 | "# save\n",
331 | "torch.save(model.state_dict(), \"/content/drive/MyDrive/PersianMnistTL.pth\")"
332 | ],
333 | "execution_count": 14,
334 | "outputs": []
335 | },
336 | {
337 | "cell_type": "code",
338 | "metadata": {
339 | "id": "xliTDWEROZ97"
340 | },
341 | "source": [
342 | ""
343 | ],
344 | "execution_count": null,
345 | "outputs": []
346 | }
347 | ]
348 | }
--------------------------------------------------------------------------------
/PyTorch Persian Mnist TL/Model.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 | import torchvision
3 |
4 |
5 | class MyModel(nn.Module):
6 | def __init__(self):
7 | super(MyModel, self).__init__()
8 | model = torchvision.models.resnet50(pretrained=True)
9 | in_features = model.fc.in_features
10 | model.fc = nn.Linear(in_features, 10)
11 |
12 | ct = 0
13 | for child in model.children():
14 | ct += 1
15 | if ct < 7:
16 | for param in child.parameters():
17 | param.requires_grad = False
18 |
--------------------------------------------------------------------------------
/PyTorch Persian Mnist TL/Test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | from torch import nn
4 | from torch.utils.data import DataLoader
5 | from torchvision.datasets import FashionMNIST
6 | from Model import MyModel
7 | from google_drive_downloader import GoogleDriveDownloader as gdd
8 | import argparse
9 |
10 | gdd.download_file_from_google_drive(file_id='1k_ZWFxK8-tvong8umtcTcLa8Dk1i1o6i',
11 | dest_path='./PersianMnistTL.pth')
12 |
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument("--device", default='cpu', type=str)
15 | parser.add_argument("--dataset", type=str)
16 | args = parser.parse_args()
17 |
18 | # hyperparameters
19 | latent_size = 10
20 | disc_inp_sz = 28 * 28
21 | img_size = 28
22 | epochs = 10
23 | batch_size = 32
24 | lr = 0.001
25 |
26 | def calc_acc(preds, labels):
27 | _, preds_max = torch.max(preds, 1)
28 | acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)
29 | return acc
30 |
31 | # Data Preparing
32 |
33 | transform = torchvision.transforms.Compose([
34 | torchvision.transforms.Resize((28, 28)),
35 | torchvision.transforms.ToTensor(),
36 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
37 | ])
38 |
39 | Dataset = torchvision.datasets.ImageFolder(args.dataset, transform=transform)
40 | n = len(Dataset) # total number of examples
41 | n_test = int(0.1 * n) # take ~10% for test
42 | test_set = torch.utils.data.Subset(Dataset, range(n_test)) # take first 10%
43 | test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True)
44 |
45 | device = torch.device(args.device)
46 | model = MyModel().to(device)
47 |
48 | model.load_state_dict(torch.load("PersianMnistTL.pth"))
49 | model.eval()
50 |
51 | test_acc = 0.0
52 | for img, label in test_loader:
53 | img = img.to(device)
54 | label = label.to(device)
55 |
56 | pred = model(img)
57 | test_acc += calc_acc(pred, label)
58 |
59 | total_acc = test_acc / len(test_loader)
60 | print(f"test accuracy: {total_acc}")
61 |
--------------------------------------------------------------------------------
/PyTorch Persian Mnist TL/Train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | from torch import nn
4 | from torch.utils.data import DataLoader
5 | from torchvision.datasets import FashionMNIST
6 | from Model import MyModel
7 |
8 | import argparse
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--device", default='cpu', type=str)
12 | parser.add_argument("--dataset", type=str)
13 |
14 | args = parser.parse_args()
15 |
16 | latent_size = 10
17 | disc_inp_sz = 28*28
18 | img_size = 28
19 | epochs = 10
20 | batch_size = 32
21 | lr = 0.001
22 |
23 |
24 | device = torch.device(args.device)
25 | model = MyModel().to(device)
26 |
27 |
28 | def calc_acc(preds, labels):
29 | _, preds_max = torch.max(preds, 1)
30 | acc = torch.sum(preds_max == labels.data, dtype=torch.float64) / len(preds)
31 | return acc
32 |
33 |
34 | # Data Preparing
35 |
36 | transform = torchvision.transforms.Compose([
37 | torchvision.transforms.Resize((28, 28)),
38 | torchvision.transforms.ToTensor(),
39 | torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
40 | ])
41 |
42 | dataset = torchvision.datasets.ImageFolder(root=args.dataset, transform=transform)
43 | train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
44 |
45 | # compile
46 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
47 | loss_function = torch.nn.CrossEntropyLoss()
48 |
49 | # train
50 |
51 | for epoch in range(1, epochs + 1):
52 | train_loss = 0.0
53 | train_acc = 0.0
54 | for images, labels in train_data_loader:
55 | images = images.to(device)
56 | labels = labels.to(device)
57 | optimizer.zero_grad()
58 | # 1- forwarding
59 | preds = model(images)
60 | # 2- backwarding
61 | loss = loss_function(preds, labels)
62 | loss.backward()
63 | # 3- Update
64 | optimizer.step()
65 |
66 | train_loss += loss
67 | train_acc += calc_acc(preds, labels)
68 |
69 | total_loss = train_loss / len(train_data_loader)
70 | total_acc = train_acc / len(train_data_loader)
71 |
72 | print(f"Epoch: {epoch}, Loss: {total_loss}, Acc: {total_acc}")
73 |
74 | # save
75 | torch.save(model.state_dict(), "PersianMnistTL.pth")
76 |
--------------------------------------------------------------------------------
/PyTorch Persian Mnist TL/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | opencv-python
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep-Learning
2 |
3 | - ## MLP vs Deep:
4 |
5 | Herein a simple and common compration between multilayer perceptron and deep neural network presented on 4 benchmark dataset which contains: MNist, Fashion MNist, Cfar10, and Cfar100.
6 |
7 | *Results of model accuracy are tableau below.*
8 | | Benchmark Name | MLP| Deep Neural Networks |
9 | | :--- | :---: | :---: |
10 | | MNist | 0.9706 | 0.9967 |
11 | |Fashion MNist | 0.8609 | 0.9525 |
12 | |Cfar10 | 0.1005 | 0.8134 |
13 | |Cfar100 | 0.0086 | 0.4522 |
14 |
15 | - ## Rouhaniat Detection 👳♂️
16 |
17 | a telegram bot for Rouhaniat and other people image classification using tensorflow and keras.
18 |
19 | - ## Persian Detection:
20 |
21 | an erroneous Iranian and foreign people image classification using tensorflow and keras due to deficiency of data.
22 |
23 | - ## Szeged Hungry Weather Prediction Using MLP:
24 |
25 | Weather temperature prediction a day of year using multilayer perceptron with tensorflow and keras.
26 |
27 | *Test data loss is 3.2215388*
28 |
29 | - ## Kaggle 17 Flowers:
30 |
31 | 17 flowers classification using fine-tuned VGG16 with tensorflow and keras
32 |
33 | - ## With Mask or Without Mask? 😷
34 |
35 | #### Don't forget your mask!
36 |
37 | A simple classification of wearing or forgetting mask using fine-tuned ResNet50V2 with tensorflow and keras.
38 |
39 | Dataset prepared in: [Kaggle mask 12k dataset](kaggle.com/ashishjangra27/gender-recognition-200k-images-celeba)
40 |
41 | *Results of confusion Matrix using one epoch are tableau below.*
42 | | Classes/Classes | WithMask | WithoutMask |
43 | | :--- | :---: | :---: |
44 | | WithMask | 474 | 9 |
45 | |WithoutMask | 10 | 499 |
46 |
47 | - ## Gender Detection 👨👩
48 |
49 | Male or Female classification using using fine-tuned ResNet50V2 with tensorflow and keras.
50 |
51 | Dataset prepared in: [Kaggle Gender 200K dataset](kaggle.com/ashishjangra27/face-mask-12k-images-dataset)
52 |
53 | *Results of confusion Matrix using five epochs are tableau below.*
54 | | Classes/Classes | Male | Female |
55 | | :--- | :---: | :---: |
56 | | Male | 11470 | 72 |
57 | |Female | 1284 | 7175 |
58 |
59 | - ## Age Prediction Using Image
60 |
61 | Dataset prepared in: [Kaggle Gender 200K dataset](kaggle.com/jangedoo/utkface-new)
62 |
63 | - ## House price
64 |
65 | House price estimation from visual and textual features.
66 |
67 | Dataset prepared in: [Houses Dataset](https://github.com/emanhamed/Houses-dataset)
68 |
69 | # Deep-Learning with PyTorch
70 |
71 | - ## Fashion Mnist PyTorch
72 |
73 | The Fashion Mnist dataset classification with PyTorch presented. The Model defined by linear fully connected layers. The result accuracy is 0.8157.
74 |
75 | - [x] Train.py
76 | - [x] Test.py (for evaluate)
77 | - [x] Model.py
78 | - [x] Inference.py
79 | - [x] requirements.txt
80 | - [x] FAMnist_PyTorch.ipynb
81 |
82 | - ## Mnist Persian
83 |
84 | A simpele Persian Mnist classification using PyTorch
85 |
86 | - [x] Train.py
87 | - [x] Test.py
88 | - [x] Model.py
89 | - [x] Inference.py
90 | - [x] requirements.txt
91 | - [x] Mnist_Persian.ipynb
92 | - [x] PersianMnistFinal.pth
93 |
94 | - ## PyTorch Age Prediction Using Face Image
95 |
96 | Dataset prepared in: [Kaggle Gender 200K dataset](kaggle.com/jangedoo/utkface-new)
97 |
98 | - [x] Train.py
99 | - [x] Test.py
100 | - [x] Model.py
101 | - [x] Inference.py
102 | - [x] requirements.txt
103 | - [x] PyTorch_Age_Prediction_Using_Face_Image.ipynb.ipynb
104 |
105 | - ## Mnist Persian TL
106 |
107 | A simpele transfer learning using ResNet50 Persian Mnist classification using PyTorch
108 |
109 | - [x] Train.py
110 | - [x] Test.py
111 | - [x] Model.py
112 | - [x] Inference.py
113 | - [x] requirements.txt
114 | - [x] Mnist_Persian.ipynb
115 | - [x] PersianMnistFinal.pth
116 |
117 | - ## PyTorch Age Prediction Using Face Image TL
118 |
119 | PyTorch transfer learning using ResNet50 on kaggle face image age prediction dataset
120 |
121 | Dataset prepared in: [Kaggle Gender 200K dataset](kaggle.com/jangedoo/utkface-new)
122 |
123 | - [x] Train.py
124 | - [x] Test.py
125 | - [x] Model.py
126 | - [x] Inference.py
127 | - [x] requirements.txt
128 | - [x] PyTorch_Age_Prediction_Using_Face_Image.ipynb.ipynb
129 |
--------------------------------------------------------------------------------
/Recurrent Neural Network/Joon_Del.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Joon_Del.ipynb",
7 | "provenance": [],
8 | "mount_file_id": "1ZvD8wCMHE1XtjrtnMKk2QazHLDD_YeMy",
9 | "authorship_tag": "ABX9TyPTAb9dx78EzHY0Hf6lzoJc",
10 | "include_colab_link": true
11 | },
12 | "kernelspec": {
13 | "name": "python3",
14 | "display_name": "Python 3"
15 | },
16 | "language_info": {
17 | "name": "python"
18 | },
19 | "accelerator": "GPU"
20 | },
21 | "cells": [
22 | {
23 | "cell_type": "markdown",
24 | "metadata": {
25 | "id": "view-in-github",
26 | "colab_type": "text"
27 | },
28 | "source": [
29 | "
"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 1,
35 | "metadata": {
36 | "id": "-v7ARgTK2zqf"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "import numpy as np\n",
41 | "import tensorflow as tf\n",
42 | "import cv2\n",
43 | "import os\n",
44 | "from tensorflow.keras.layers import Conv2D, MaxPooling2D, SimpleRNN, GRU, LSTM, Dense, Flatten, TimeDistributed\n",
45 | "from matplotlib import pyplot as plt"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "source": [
51 | "batch_size = 12\n",
52 | "epoch = 10\n",
53 | "lr = 0.001\n",
54 | "width = height = 50"
55 | ],
56 | "metadata": {
57 | "id": "SVGPUtgE3Jy-"
58 | },
59 | "execution_count": 2,
60 | "outputs": []
61 | },
62 | {
63 | "cell_type": "code",
64 | "source": [
65 | "path = \"/content/drive/MyDrive/joon_del/\"\n",
66 | "dirs = os.listdir(path)\n",
67 | "dirs"
68 | ],
69 | "metadata": {
70 | "colab": {
71 | "base_uri": "https://localhost:8080/"
72 | },
73 | "id": "7ZpKkF1WYcXp",
74 | "outputId": "c4ebd22c-1604-446a-d434-728b3d3b0088"
75 | },
76 | "execution_count": 3,
77 | "outputs": [
78 | {
79 | "output_type": "execute_result",
80 | "data": {
81 | "text/plain": [
82 | "['0', '1']"
83 | ]
84 | },
85 | "metadata": {},
86 | "execution_count": 3
87 | }
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "source": [
93 | "path = \"/content/drive/MyDrive/joon_del/\"\n",
94 | "dirs = os.listdir(path)\n",
95 | "\n",
96 | "File=[]\n",
97 | "label=[]\n",
98 | "\n",
99 | "for dir in dirs:\n",
100 | " subfolder = f\"/content/drive/MyDrive/joon_del/{dir}\"\n",
101 | " files = os.listdir(subfolder)\n",
102 | " for f in files:\n",
103 | " cap = cv2.VideoCapture(f'/content/drive/MyDrive/joon_del/{dir}/{f}')\n",
104 | " video=[]\n",
105 | " while(True):\n",
106 | " ret, frame = cap.read()\n",
107 | " if ret == False:\n",
108 | " break\n",
109 | " frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
110 | " img = cv2.resize(frame_gray, dsize=(width, height))\n",
111 | " video.append(img)\n",
112 | " File.append(video)\n",
113 | " label.append(float(dir))\n",
114 | "\n",
115 | "\n"
116 | ],
117 | "metadata": {
118 | "id": "Bx1FwV5CYu3k"
119 | },
120 | "execution_count": 4,
121 | "outputs": []
122 | },
123 | {
124 | "cell_type": "code",
125 | "source": [
126 | "longest_list = max(len(f) for f in File)\n",
127 | "longest_list"
128 | ],
129 | "metadata": {
130 | "colab": {
131 | "base_uri": "https://localhost:8080/"
132 | },
133 | "id": "_trfqRQuUAS9",
134 | "outputId": "1acf902d-67dd-4080-fd36-0acc7760e1ef"
135 | },
136 | "execution_count": 5,
137 | "outputs": [
138 | {
139 | "output_type": "execute_result",
140 | "data": {
141 | "text/plain": [
142 | "313"
143 | ]
144 | },
145 | "metadata": {},
146 | "execution_count": 5
147 | }
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "source": [
153 | "for i, f in enumerate(File):\n",
154 | " if len (f) < longest_list:\n",
155 | " for j in range(longest_list-len(f)):\n",
156 | " File[i].append(np.zeros((width, height)))"
157 | ],
158 | "metadata": {
159 | "id": "0oBAwR7pUSeW"
160 | },
161 | "execution_count": 6,
162 | "outputs": []
163 | },
164 | {
165 | "cell_type": "code",
166 | "source": [
167 | "File = np.asarray(File)\n",
168 | "label = np.asarray(label)\n",
169 | "File = File[..., np.newaxis]\n",
170 | "label = label[..., np.newaxis]"
171 | ],
172 | "metadata": {
173 | "id": "B55aisduU-1P"
174 | },
175 | "execution_count": 7,
176 | "outputs": []
177 | },
178 | {
179 | "cell_type": "code",
180 | "source": [
181 | "from sklearn.model_selection import train_test_split\n",
182 | "X_train, X_test, y_train, y_test = train_test_split(File, label, test_size=0.2, random_state=42)"
183 | ],
184 | "metadata": {
185 | "id": "ncDGNRqkUlNF"
186 | },
187 | "execution_count": 8,
188 | "outputs": []
189 | },
190 | {
191 | "cell_type": "code",
192 | "source": [
193 | "print('train data size:', X_train.shape)\n",
194 | "print('train label size:', y_train.shape)\n"
195 | ],
196 | "metadata": {
197 | "colab": {
198 | "base_uri": "https://localhost:8080/"
199 | },
200 | "id": "AJ3P8GJQUxzl",
201 | "outputId": "dd2d99c2-f66c-47c9-ee86-ae3bc5ff9258"
202 | },
203 | "execution_count": 9,
204 | "outputs": [
205 | {
206 | "output_type": "stream",
207 | "name": "stdout",
208 | "text": [
209 | "train data size: (50, 313, 50, 50, 1)\n",
210 | "train label size: (50, 1)\n"
211 | ]
212 | }
213 | ]
214 | },
215 | {
216 | "cell_type": "code",
217 | "source": [
218 | "crnn = tf.keras.models.Sequential([\n",
219 | " # CNN\n",
220 | " TimeDistributed(Conv2D(16, (3, 3), activation=\"relu\", input_shape=(None, width, height, 1))),\n",
221 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
222 | " TimeDistributed(Conv2D(8, (3, 3), activation=\"relu\")),\n",
223 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
224 | " TimeDistributed(Conv2D(4, (3, 3), activation=\"relu\")),\n",
225 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
226 | " TimeDistributed(Conv2D(2, (3, 3), activation=\"relu\")),\n",
227 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
228 | "\n",
229 | " TimeDistributed(Flatten()),\n",
230 | "\n",
231 | " # RNN\n",
232 | " SimpleRNN(50),\n",
233 | " Dense(2, activation=\"softmax\")\n",
234 | "])"
235 | ],
236 | "metadata": {
237 | "id": "iuRXn8gUU6jt"
238 | },
239 | "execution_count": 10,
240 | "outputs": []
241 | },
242 | {
243 | "cell_type": "code",
244 | "source": [
245 | "crnn.compile(optimizer=tf.keras.optimizers.Adam(),\n",
246 | " loss=tf.keras.losses.sparse_categorical_crossentropy,\n",
247 | " metrics=[\"accuracy\"])\n",
248 | "\n",
249 | "crnn.fit(X_train, y_train, batch_size=batch_size, epochs=epoch)"
250 | ],
251 | "metadata": {
252 | "colab": {
253 | "base_uri": "https://localhost:8080/"
254 | },
255 | "id": "EXwysUTpXF3y",
256 | "outputId": "b6215ff8-6c11-4c46-e9b9-a060613689e5"
257 | },
258 | "execution_count": 11,
259 | "outputs": [
260 | {
261 | "output_type": "stream",
262 | "name": "stdout",
263 | "text": [
264 | "Epoch 1/10\n",
265 | "5/5 [==============================] - 16s 427ms/step - loss: 0.6775 - accuracy: 0.5000\n",
266 | "Epoch 2/10\n",
267 | "5/5 [==============================] - 2s 363ms/step - loss: 0.7626 - accuracy: 0.4400\n",
268 | "Epoch 3/10\n",
269 | "5/5 [==============================] - 2s 378ms/step - loss: 0.7609 - accuracy: 0.4400\n",
270 | "Epoch 4/10\n",
271 | "5/5 [==============================] - 2s 394ms/step - loss: 0.6854 - accuracy: 0.5600\n",
272 | "Epoch 5/10\n",
273 | "5/5 [==============================] - 2s 366ms/step - loss: 0.6975 - accuracy: 0.5200\n",
274 | "Epoch 6/10\n",
275 | "5/5 [==============================] - 2s 339ms/step - loss: 0.6957 - accuracy: 0.5400\n",
276 | "Epoch 7/10\n",
277 | "5/5 [==============================] - 2s 351ms/step - loss: 0.6902 - accuracy: 0.5000\n",
278 | "Epoch 8/10\n",
279 | "5/5 [==============================] - 2s 386ms/step - loss: 0.6902 - accuracy: 0.5600\n",
280 | "Epoch 9/10\n",
281 | "5/5 [==============================] - 2s 365ms/step - loss: 0.6909 - accuracy: 0.5400\n",
282 | "Epoch 10/10\n",
283 | "5/5 [==============================] - 2s 360ms/step - loss: 0.6958 - accuracy: 0.5400\n"
284 | ]
285 | },
286 | {
287 | "output_type": "execute_result",
288 | "data": {
289 | "text/plain": [
290 | ""
291 | ]
292 | },
293 | "metadata": {},
294 | "execution_count": 11
295 | }
296 | ]
297 | },
298 | {
299 | "cell_type": "code",
300 | "source": [
301 | "crnn_eval = crnn.evaluate(X_test, y_test)\n"
302 | ],
303 | "metadata": {
304 | "colab": {
305 | "base_uri": "https://localhost:8080/"
306 | },
307 | "id": "bYtl8BdoXbyN",
308 | "outputId": "b16e5ad7-8692-4a0b-f022-913fb30cc5fb"
309 | },
310 | "execution_count": 12,
311 | "outputs": [
312 | {
313 | "output_type": "stream",
314 | "name": "stdout",
315 | "text": [
316 | "1/1 [==============================] - 1s 782ms/step - loss: 0.7065 - accuracy: 0.4615\n"
317 | ]
318 | }
319 | ]
320 | },
321 | {
322 | "cell_type": "code",
323 | "source": [
324 | ""
325 | ],
326 | "metadata": {
327 | "id": "YSfXNPMCYArl"
328 | },
329 | "execution_count": null,
330 | "outputs": []
331 | }
332 | ]
333 | }
--------------------------------------------------------------------------------
/Recurrent Neural Network/Joon_Del_Update.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "view-in-github",
7 | "colab_type": "text"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 1,
16 | "metadata": {
17 | "id": "-v7ARgTK2zqf"
18 | },
19 | "outputs": [],
20 | "source": [
21 | "import numpy as np\n",
22 | "import tensorflow as tf\n",
23 | "import cv2\n",
24 | "import os\n",
25 | "from tensorflow.keras.layers import Conv2D, MaxPooling2D, SimpleRNN, GRU, LSTM, Dense, Flatten, TimeDistributed\n",
26 | "from matplotlib import pyplot as plt"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": 20,
32 | "metadata": {
33 | "id": "SVGPUtgE3Jy-"
34 | },
35 | "outputs": [],
36 | "source": [
37 | "batch_size = 12\n",
38 | "epoch = 10\n",
39 | "lr = 0.001\n",
40 | "width = height = 50"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": 21,
46 | "metadata": {
47 | "colab": {
48 | "base_uri": "https://localhost:8080/"
49 | },
50 | "id": "7ZpKkF1WYcXp",
51 | "outputId": "d6fdf2ac-6d5f-4c25-aa8e-404ca491d18c"
52 | },
53 | "outputs": [
54 | {
55 | "output_type": "execute_result",
56 | "data": {
57 | "text/plain": [
58 | "['0', '1']"
59 | ]
60 | },
61 | "metadata": {},
62 | "execution_count": 21
63 | }
64 | ],
65 | "source": [
66 | "path = \"/content/drive/MyDrive/joon_del/\"\n",
67 | "dirs = os.listdir(path)\n",
68 | "dirs"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 22,
74 | "metadata": {
75 | "id": "Bx1FwV5CYu3k"
76 | },
77 | "outputs": [],
78 | "source": [
79 | "path = \"/content/drive/MyDrive/joon_del/\"\n",
80 | "dirs = os.listdir(path)\n",
81 | "\n",
82 | "File=[]\n",
83 | "label=[]\n",
84 | "\n",
85 | "for dir in dirs:\n",
86 | " subfolder = f\"/content/drive/MyDrive/joon_del/{dir}\"\n",
87 | " files = os.listdir(subfolder)\n",
88 | " for f in files:\n",
89 | " cap = cv2.VideoCapture(f'/content/drive/MyDrive/joon_del/{dir}/{f}')\n",
90 | " video=[]\n",
91 | " while(True):\n",
92 | " ret, frame = cap.read()\n",
93 | " if ret == False:\n",
94 | " break\n",
95 | " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
96 | " img = cv2.resize(frame, dsize=(width, height))\n",
97 | " img = img / 255.\n",
98 | " video.append(img)\n",
99 | " File.append(video)\n",
100 | " label.append(float(dir))\n",
101 | "\n",
102 | "\n"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 23,
108 | "metadata": {
109 | "colab": {
110 | "base_uri": "https://localhost:8080/"
111 | },
112 | "id": "_trfqRQuUAS9",
113 | "outputId": "84575505-f7a5-43f7-e3e8-2ec4d7a33f75"
114 | },
115 | "outputs": [
116 | {
117 | "output_type": "execute_result",
118 | "data": {
119 | "text/plain": [
120 | "313"
121 | ]
122 | },
123 | "metadata": {},
124 | "execution_count": 23
125 | }
126 | ],
127 | "source": [
128 | "longest_list = max(len(f) for f in File)\n",
129 | "longest_list"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": 24,
135 | "metadata": {
136 | "id": "0oBAwR7pUSeW"
137 | },
138 | "outputs": [],
139 | "source": [
140 | "for i, f in enumerate(File):\n",
141 | " if len (f) < longest_list:\n",
142 | " for j in range(longest_list-len(f)):\n",
143 | " File[i].append(np.zeros((width, height, 3)))"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "source": [
149 | "len(File)"
150 | ],
151 | "metadata": {
152 | "colab": {
153 | "base_uri": "https://localhost:8080/"
154 | },
155 | "id": "jisAymzW8uom",
156 | "outputId": "54ef3707-d5f2-4500-f6e0-26741579af8f"
157 | },
158 | "execution_count": 25,
159 | "outputs": [
160 | {
161 | "output_type": "execute_result",
162 | "data": {
163 | "text/plain": [
164 | "68"
165 | ]
166 | },
167 | "metadata": {},
168 | "execution_count": 25
169 | }
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "execution_count": 26,
175 | "metadata": {
176 | "id": "B55aisduU-1P"
177 | },
178 | "outputs": [],
179 | "source": [
180 | "newFile = np.array(File)\n",
181 | "label = np.array(label)\n",
182 | "label = label[..., np.newaxis]"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": 27,
188 | "metadata": {
189 | "id": "ncDGNRqkUlNF"
190 | },
191 | "outputs": [],
192 | "source": [
193 | "from sklearn.model_selection import train_test_split\n",
194 | "X_train, X_test, y_train, y_test = train_test_split(newFile, label, test_size=0.2, random_state=42)"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": 28,
200 | "metadata": {
201 | "colab": {
202 | "base_uri": "https://localhost:8080/"
203 | },
204 | "id": "AJ3P8GJQUxzl",
205 | "outputId": "f16fff33-ed97-4863-c221-cd4e07a37234"
206 | },
207 | "outputs": [
208 | {
209 | "output_type": "stream",
210 | "name": "stdout",
211 | "text": [
212 | "train data size: (54, 313, 50, 50, 3)\n",
213 | "train label size: (54, 1)\n"
214 | ]
215 | }
216 | ],
217 | "source": [
218 | "print('train data size:', X_train.shape)\n",
219 | "print('train label size:', y_train.shape)\n"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": 29,
225 | "metadata": {
226 | "id": "iuRXn8gUU6jt"
227 | },
228 | "outputs": [],
229 | "source": [
230 | "crnn = tf.keras.models.Sequential([\n",
231 | " # CNN\n",
232 | " TimeDistributed(Conv2D(16, (3, 3), activation=\"relu\", input_shape=(None, width, height, 3))),\n",
233 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
234 | "\n",
235 | " TimeDistributed(Flatten()),\n",
236 | "\n",
237 | " # RNN\n",
238 | " SimpleRNN(50, return_sequences=True),\n",
239 | " SimpleRNN(20),\n",
240 | " Dense(2, activation=\"softmax\")\n",
241 | "])"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": 30,
247 | "metadata": {
248 | "colab": {
249 | "base_uri": "https://localhost:8080/"
250 | },
251 | "id": "EXwysUTpXF3y",
252 | "outputId": "02ba89d7-b0af-4791-d2a4-58a782f1e4ad"
253 | },
254 | "outputs": [
255 | {
256 | "output_type": "stream",
257 | "name": "stdout",
258 | "text": [
259 | "Epoch 1/10\n",
260 | "5/5 [==============================] - 9s 1s/step - loss: 0.7511 - accuracy: 0.5185\n",
261 | "Epoch 2/10\n",
262 | "5/5 [==============================] - 6s 1s/step - loss: 0.6312 - accuracy: 0.7593\n",
263 | "Epoch 3/10\n",
264 | "5/5 [==============================] - 7s 1s/step - loss: 0.8721 - accuracy: 0.3148\n",
265 | "Epoch 4/10\n",
266 | "5/5 [==============================] - 6s 1s/step - loss: 0.7566 - accuracy: 0.4259\n",
267 | "Epoch 5/10\n",
268 | "5/5 [==============================] - 6s 1s/step - loss: 0.7430 - accuracy: 0.5185\n",
269 | "Epoch 6/10\n",
270 | "5/5 [==============================] - 6s 1s/step - loss: 0.6940 - accuracy: 0.5926\n",
271 | "Epoch 7/10\n",
272 | "5/5 [==============================] - 6s 1s/step - loss: 0.7414 - accuracy: 0.5370\n",
273 | "Epoch 8/10\n",
274 | "5/5 [==============================] - 6s 1s/step - loss: 0.6902 - accuracy: 0.5556\n",
275 | "Epoch 9/10\n",
276 | "5/5 [==============================] - 6s 1s/step - loss: 0.6945 - accuracy: 0.6111\n",
277 | "Epoch 10/10\n",
278 | "5/5 [==============================] - 6s 1s/step - loss: 0.7772 - accuracy: 0.4630\n"
279 | ]
280 | },
281 | {
282 | "output_type": "execute_result",
283 | "data": {
284 | "text/plain": [
285 | ""
286 | ]
287 | },
288 | "metadata": {},
289 | "execution_count": 30
290 | }
291 | ],
292 | "source": [
293 | "crnn.compile(optimizer=tf.keras.optimizers.Adam(),\n",
294 | " loss=tf.keras.losses.sparse_categorical_crossentropy,\n",
295 | " metrics=[\"accuracy\"])\n",
296 | "\n",
297 | "crnn.fit(X_train, y_train, batch_size=batch_size, epochs=epoch)"
298 | ]
299 | },
300 | {
301 | "cell_type": "code",
302 | "execution_count": 18,
303 | "metadata": {
304 | "colab": {
305 | "base_uri": "https://localhost:8080/"
306 | },
307 | "id": "bYtl8BdoXbyN",
308 | "outputId": "cdeb0336-53cb-4128-9d4e-11995f2954f0"
309 | },
310 | "outputs": [
311 | {
312 | "output_type": "stream",
313 | "name": "stdout",
314 | "text": [
315 | "1/1 [==============================] - 1s 643ms/step - loss: 0.8415 - accuracy: 0.2143\n"
316 | ]
317 | }
318 | ],
319 | "source": [
320 | "crnn_eval = crnn.evaluate(X_test, y_test)\n"
321 | ]
322 | },
323 | {
324 | "cell_type": "markdown",
325 | "source": [
326 | "#Inference"
327 | ],
328 | "metadata": {
329 | "id": "Sb8hJ6cmHpLV"
330 | }
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": 43,
335 | "metadata": {
336 | "id": "YSfXNPMCYArl"
337 | },
338 | "outputs": [],
339 | "source": [
340 | "cap = cv2.VideoCapture(f'/content/drive/MyDrive/joon_del/1/012.mp4')\n",
341 | "video=[]\n",
342 | "while(True):\n",
343 | " ret, frame = cap.read()\n",
344 | " if ret == False:\n",
345 | " break\n",
346 | " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
347 | " img = cv2.resize(frame, dsize=(width, height))\n",
348 | " img = img / 255.\n",
349 | " video.append(img)\n"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "source": [
355 | "if len (video) < longest_list:\n",
356 | " for j in range(longest_list-len(video)):\n",
357 | " video.append(np.zeros((width, height, 3)))"
358 | ],
359 | "metadata": {
360 | "id": "gL-ifbdXIFNs"
361 | },
362 | "execution_count": 44,
363 | "outputs": []
364 | },
365 | {
366 | "cell_type": "code",
367 | "source": [
368 | "Video = np.array(video)\n",
369 | "Video = Video[np.newaxis, ...]"
370 | ],
371 | "metadata": {
372 | "id": "qP-wH18OIZnM"
373 | },
374 | "execution_count": 45,
375 | "outputs": []
376 | },
377 | {
378 | "cell_type": "code",
379 | "source": [
380 | "pred = crnn.predict(Video)\n",
381 | "prediction = np.argmax(pred)\n",
382 | "if prediction == 1:\n",
383 | " print('Khodeshe')\n",
384 | "else:\n",
385 | " print('khodesh nist')"
386 | ],
387 | "metadata": {
388 | "colab": {
389 | "base_uri": "https://localhost:8080/"
390 | },
391 | "id": "7_Tnio9eIeKM",
392 | "outputId": "5a472e0d-82ca-4809-e130-2e15d9b50400"
393 | },
394 | "execution_count": 46,
395 | "outputs": [
396 | {
397 | "output_type": "stream",
398 | "name": "stdout",
399 | "text": [
400 | "Khodeshe\n"
401 | ]
402 | }
403 | ]
404 | },
405 | {
406 | "cell_type": "code",
407 | "source": [
408 | ""
409 | ],
410 | "metadata": {
411 | "id": "NmrNT4djJQs8"
412 | },
413 | "execution_count": null,
414 | "outputs": []
415 | }
416 | ],
417 | "metadata": {
418 | "accelerator": "GPU",
419 | "colab": {
420 | "name": "Joon_Del_Update.ipynb",
421 | "provenance": [],
422 | "mount_file_id": "114Uv9Ob_xRzJWkBjymVxQs2d0m0ksTxq",
423 | "authorship_tag": "ABX9TyPWcM0nfpEExiCTXMQ37jPb",
424 | "include_colab_link": true
425 | },
426 | "kernelspec": {
427 | "display_name": "Python 3",
428 | "name": "python3"
429 | },
430 | "language_info": {
431 | "name": "python"
432 | }
433 | },
434 | "nbformat": 4,
435 | "nbformat_minor": 0
436 | }
--------------------------------------------------------------------------------
/Recurrent Neural Network/ball_movement.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "ball_movement.ipynb",
7 | "provenance": [],
8 | "authorship_tag": "ABX9TyP2NJ1toYeZPJ8ylpy6Kj/a",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | },
18 | "accelerator": "GPU"
19 | },
20 | "cells": [
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "view-in-github",
25 | "colab_type": "text"
26 | },
27 | "source": [
28 | "
"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 1,
34 | "metadata": {
35 | "id": "-U1BxfUa3ZYI"
36 | },
37 | "outputs": [],
38 | "source": [
39 | "import numpy as np\n",
40 | "import cv2\n",
41 | "import random\n",
42 | "import matplotlib.pyplot as plt\n",
43 | "import tensorflow as tf\n",
44 | "from tensorflow.keras.layers import Conv2D, MaxPooling2D, SimpleRNN, GRU, LSTM, Dense, Flatten, TimeDistributed"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "source": [
50 | "from google.colab.patches import cv2_imshow"
51 | ],
52 | "metadata": {
53 | "id": "LhrkW4qq5MoX"
54 | },
55 | "execution_count": 2,
56 | "outputs": []
57 | },
58 | {
59 | "cell_type": "code",
60 | "source": [
61 | "def balldata(height, width, dataset_size, frame_num):\n",
62 | " \n",
63 | " data = []\n",
64 | " labels = []\n",
65 | " height, width = height, width\n",
66 | " dx, dy = 10, 5\n",
67 | " # x = random.randint(0, width)\n",
68 | "\n",
69 | " for b in range(dataset_size):\n",
70 | " label = random.choice((0, 1))\n",
71 | " frames = []\n",
72 | "\n",
73 | " if label == 0:\n",
74 | "\n",
75 | " x = random.randint(0, 10)\n",
76 | " y = height\n",
77 | "\n",
78 | " for i in range(frame_num):\n",
79 | " img = np.zeros((height, width))\n",
80 | " cv2.circle(img, (x, y), 10, (255), -1)\n",
81 | " x = x + dx\n",
82 | " y = y - dy\n",
83 | " # labels.append(label)\n",
84 | " frames.append(img)\n",
85 | "\n",
86 | " elif label == 1:\n",
87 | "\n",
88 | " x = random.randint(width-10, width)\n",
89 | " y = 10\n",
90 | "\n",
91 | " for i in range(frame_num):\n",
92 | " img = np.zeros((height, width))\n",
93 | " cv2.circle(img, (x, y), 10, (255), -1)\n",
94 | " x = x - dx\n",
95 | " y = y + dy\n",
96 | " # labels.append(label)\n",
97 | " frames.append(img)\n",
98 | " labels.append(label)\n",
99 | " data.append(frames)\n",
100 | "\n",
101 | " return data, labels"
102 | ],
103 | "metadata": {
104 | "id": "xz1ANzhAP7UQ"
105 | },
106 | "execution_count": 3,
107 | "outputs": []
108 | },
109 | {
110 | "cell_type": "code",
111 | "source": [
112 | "data, labels = balldata(50, 100, 100, 50)\n",
113 | "\n",
114 | "data = np.array(data)\n",
115 | "labels = np.array(labels)\n",
116 | "data = data[..., np.newaxis]\n",
117 | "labels = labels[..., np.newaxis]\n",
118 | "\n",
119 | "print(data.shape)\n",
120 | "print(labels.shape)"
121 | ],
122 | "metadata": {
123 | "colab": {
124 | "base_uri": "https://localhost:8080/"
125 | },
126 | "id": "AqRVi8Mz8DMs",
127 | "outputId": "6d6062e9-5904-4f30-b145-629f97e14bf6"
128 | },
129 | "execution_count": 4,
130 | "outputs": [
131 | {
132 | "output_type": "stream",
133 | "name": "stdout",
134 | "text": [
135 | "(100, 50, 50, 100, 1)\n",
136 | "(100, 1)\n"
137 | ]
138 | }
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "source": [
144 | "#RNN Models"
145 | ],
146 | "metadata": {
147 | "id": "aaNR21SUH7G_"
148 | }
149 | },
150 | {
151 | "cell_type": "code",
152 | "source": [
153 | "rnn = tf.keras.models.Sequential([\n",
154 | " # CNN\n",
155 | " TimeDistributed(Conv2D(2, (3, 3), activation=\"relu\", input_shape=(None, 50, 100, 1))),\n",
156 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
157 | "\n",
158 | " TimeDistributed(Flatten()),\n",
159 | "\n",
160 | " # RNN\n",
161 | " SimpleRNN(20),\n",
162 | " Dense(2, activation=\"softmax\")\n",
163 | "])"
164 | ],
165 | "metadata": {
166 | "id": "8AojlmJmSDbe"
167 | },
168 | "execution_count": 5,
169 | "outputs": []
170 | },
171 | {
172 | "cell_type": "code",
173 | "source": [
174 | "rnn.compile(optimizer=tf.keras.optimizers.Adam(),\n",
175 | " loss=tf.keras.losses.sparse_categorical_crossentropy,\n",
176 | " metrics=[\"accuracy\"])\n",
177 | "\n",
178 | "rnn.fit(data, labels, batch_size=32, epochs=5)"
179 | ],
180 | "metadata": {
181 | "colab": {
182 | "base_uri": "https://localhost:8080/"
183 | },
184 | "id": "_AkE8It48ZzW",
185 | "outputId": "188b561a-7ddd-4519-c16b-bc309eea17a0"
186 | },
187 | "execution_count": 6,
188 | "outputs": [
189 | {
190 | "output_type": "stream",
191 | "name": "stdout",
192 | "text": [
193 | "Epoch 1/5\n",
194 | "4/4 [==============================] - 5s 108ms/step - loss: 0.6240 - accuracy: 0.7400\n",
195 | "Epoch 2/5\n",
196 | "4/4 [==============================] - 0s 74ms/step - loss: 0.4678 - accuracy: 1.0000\n",
197 | "Epoch 3/5\n",
198 | "4/4 [==============================] - 0s 76ms/step - loss: 0.3500 - accuracy: 1.0000\n",
199 | "Epoch 4/5\n",
200 | "4/4 [==============================] - 0s 64ms/step - loss: 0.2634 - accuracy: 1.0000\n",
201 | "Epoch 5/5\n",
202 | "4/4 [==============================] - 0s 80ms/step - loss: 0.2113 - accuracy: 1.0000\n"
203 | ]
204 | },
205 | {
206 | "output_type": "execute_result",
207 | "data": {
208 | "text/plain": [
209 | ""
210 | ]
211 | },
212 | "metadata": {},
213 | "execution_count": 6
214 | }
215 | ]
216 | },
217 | {
218 | "cell_type": "markdown",
219 | "source": [
220 | "#GRU Models"
221 | ],
222 | "metadata": {
223 | "id": "BjzRvU_h9JZe"
224 | }
225 | },
226 | {
227 | "cell_type": "code",
228 | "source": [
229 | "gru = tf.keras.models.Sequential([\n",
230 | " # CNN\n",
231 | " TimeDistributed(Conv2D(2, (3, 3), activation=\"relu\", input_shape=(None, 50, 100, 1))),\n",
232 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
233 | "\n",
234 | " TimeDistributed(Flatten()),\n",
235 | "\n",
236 | " # GRU\n",
237 | " GRU(20),\n",
238 | " Dense(2, activation=\"softmax\")\n",
239 | "])"
240 | ],
241 | "metadata": {
242 | "id": "J1es4JmX8mwh"
243 | },
244 | "execution_count": 7,
245 | "outputs": []
246 | },
247 | {
248 | "cell_type": "code",
249 | "source": [
250 | "gru.compile(optimizer=tf.keras.optimizers.Adam(),\n",
251 | " loss=tf.keras.losses.sparse_categorical_crossentropy,\n",
252 | " metrics=[\"accuracy\"])\n",
253 | "\n",
254 | "gru.fit(data, labels, batch_size=32, epochs=5)"
255 | ],
256 | "metadata": {
257 | "id": "FecgIWFa9aN4",
258 | "outputId": "ad9b5487-7eb2-4918-f836-e26fc485cfc3",
259 | "colab": {
260 | "base_uri": "https://localhost:8080/"
261 | }
262 | },
263 | "execution_count": 8,
264 | "outputs": [
265 | {
266 | "output_type": "stream",
267 | "name": "stdout",
268 | "text": [
269 | "Epoch 1/5\n",
270 | "4/4 [==============================] - 2s 41ms/step - loss: 0.6928 - accuracy: 0.5900\n",
271 | "Epoch 2/5\n",
272 | "4/4 [==============================] - 0s 33ms/step - loss: 0.6907 - accuracy: 0.5900\n",
273 | "Epoch 3/5\n",
274 | "4/4 [==============================] - 0s 34ms/step - loss: 0.6873 - accuracy: 0.5900\n",
275 | "Epoch 4/5\n",
276 | "4/4 [==============================] - 0s 31ms/step - loss: 0.6841 - accuracy: 0.5900\n",
277 | "Epoch 5/5\n",
278 | "4/4 [==============================] - 0s 25ms/step - loss: 0.6818 - accuracy: 0.5900\n"
279 | ]
280 | },
281 | {
282 | "output_type": "execute_result",
283 | "data": {
284 | "text/plain": [
285 | ""
286 | ]
287 | },
288 | "metadata": {},
289 | "execution_count": 8
290 | }
291 | ]
292 | },
293 | {
294 | "cell_type": "markdown",
295 | "source": [
296 | "#LSTM Models"
297 | ],
298 | "metadata": {
299 | "id": "5INCxrMG9hZY"
300 | }
301 | },
302 | {
303 | "cell_type": "code",
304 | "source": [
305 | "lstm = tf.keras.models.Sequential([\n",
306 | " # CNN\n",
307 | " TimeDistributed(Conv2D(2, (3, 3), activation=\"relu\", input_shape=(None, 50, 100, 1))),\n",
308 | " TimeDistributed(MaxPooling2D(pool_size=(2, 2))),\n",
309 | "\n",
310 | " TimeDistributed(Flatten()),\n",
311 | "\n",
312 | " # LSTM\n",
313 | " LSTM(20),\n",
314 | " Dense(2, activation=\"softmax\")\n",
315 | "])"
316 | ],
317 | "metadata": {
318 | "id": "3a91hyXh9kCB"
319 | },
320 | "execution_count": 9,
321 | "outputs": []
322 | },
323 | {
324 | "cell_type": "code",
325 | "source": [
326 | "lstm.compile(optimizer=tf.keras.optimizers.Adam(),\n",
327 | " loss=tf.keras.losses.sparse_categorical_crossentropy,\n",
328 | " metrics=[\"accuracy\"])\n",
329 | "\n",
330 | "lstm.fit(data, labels, batch_size=32, epochs=5)"
331 | ],
332 | "metadata": {
333 | "id": "HNDbeouy9tkJ",
334 | "outputId": "30d9669a-4c45-48b1-b30b-be203d08d230",
335 | "colab": {
336 | "base_uri": "https://localhost:8080/"
337 | }
338 | },
339 | "execution_count": 10,
340 | "outputs": [
341 | {
342 | "output_type": "stream",
343 | "name": "stdout",
344 | "text": [
345 | "Epoch 1/5\n",
346 | "4/4 [==============================] - 2s 44ms/step - loss: 0.6930 - accuracy: 0.5500\n",
347 | "Epoch 2/5\n",
348 | "4/4 [==============================] - 0s 36ms/step - loss: 0.6905 - accuracy: 0.5900\n",
349 | "Epoch 3/5\n",
350 | "4/4 [==============================] - 0s 34ms/step - loss: 0.6894 - accuracy: 0.5900\n",
351 | "Epoch 4/5\n",
352 | "4/4 [==============================] - 0s 30ms/step - loss: 0.6867 - accuracy: 0.5900\n",
353 | "Epoch 5/5\n",
354 | "4/4 [==============================] - 0s 25ms/step - loss: 0.6822 - accuracy: 0.5900\n"
355 | ]
356 | },
357 | {
358 | "output_type": "execute_result",
359 | "data": {
360 | "text/plain": [
361 | ""
362 | ]
363 | },
364 | "metadata": {},
365 | "execution_count": 10
366 | }
367 | ]
368 | },
369 | {
370 | "cell_type": "markdown",
371 | "source": [
372 | "# Inference"
373 | ],
374 | "metadata": {
375 | "id": "yatW9LIq_6ZN"
376 | }
377 | },
378 | {
379 | "cell_type": "code",
380 | "source": [
381 | "data, labels = balldata(50, 100, 10, 50)\n",
382 | "\n",
383 | "data = np.array(data)\n",
384 | "labels = np.array(labels)\n",
385 | "data = data[..., np.newaxis]\n",
386 | "labels = labels[..., np.newaxis]\n",
387 | "\n",
388 | "print(data.shape)\n",
389 | "print(labels.shape)"
390 | ],
391 | "metadata": {
392 | "id": "YpPG-pQR-3nB",
393 | "outputId": "95bcf99d-a284-4b4b-f501-77b2e97e013a",
394 | "colab": {
395 | "base_uri": "https://localhost:8080/"
396 | }
397 | },
398 | "execution_count": 11,
399 | "outputs": [
400 | {
401 | "output_type": "stream",
402 | "name": "stdout",
403 | "text": [
404 | "(10, 50, 50, 100, 1)\n",
405 | "(10, 1)\n"
406 | ]
407 | }
408 | ]
409 | },
410 | {
411 | "cell_type": "code",
412 | "source": [
413 | "rnn_eval = rnn.evaluate(data, labels)\n",
414 | "gru_eval = gru.evaluate(data, labels)\n",
415 | "lstm_eval = lstm.evaluate(data, labels)"
416 | ],
417 | "metadata": {
418 | "id": "m2phhe2I_FeA",
419 | "outputId": "78669dac-a55a-48e0-915d-b20d3a8bb877",
420 | "colab": {
421 | "base_uri": "https://localhost:8080/"
422 | }
423 | },
424 | "execution_count": 12,
425 | "outputs": [
426 | {
427 | "output_type": "stream",
428 | "name": "stdout",
429 | "text": [
430 | "1/1 [==============================] - 0s 298ms/step - loss: 0.1792 - accuracy: 1.0000\n",
431 | "1/1 [==============================] - 0s 425ms/step - loss: 0.6791 - accuracy: 0.6000\n",
432 | "1/1 [==============================] - 0s 473ms/step - loss: 0.6746 - accuracy: 0.6000\n"
433 | ]
434 | }
435 | ]
436 | },
437 | {
438 | "cell_type": "code",
439 | "source": [
440 | ""
441 | ],
442 | "metadata": {
443 | "id": "hCW7dqHICrCw"
444 | },
445 | "execution_count": null,
446 | "outputs": []
447 | }
448 | ]
449 | }
--------------------------------------------------------------------------------