├── CNN_with_DTD_dataset_glcm.ipynb ├── CNN_with_DTD_dataset_with_image_gradient.ipynb ├── CNN_with_KTH_dataset_LBP.ipynb ├── CNN_with_KTH_dataset_original_image.ipynb ├── Copy_of_CNN_with_DTD_dataset_glcm_image_with_gradient_and_LBP.ipynb ├── KTH_dataset_CNN_original_and_original_plus_lbp.ipynb ├── README.md └── waveletCNN_DTD.ipynb /README.md: -------------------------------------------------------------------------------- 1 | # Texture Classification 2 | 3 | This repository stores the notebooks having code written for those experiments that are mentioned in the blog _**"Texture Analysis with Deep Learning for improved Computer Vision"**_ (https://medium.com/@trapti.kalra_ibm/texture-analysis-with-deep-learning-for-improved-computer-vision-aa627c8bb133) 4 | -------------------------------------------------------------------------------- /waveletCNN_DTD.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "waveletCNN_DTD.ipynb", 7 | "provenance": [] 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | }, 13 | "accelerator": "GPU" 14 | }, 15 | "cells": [ 16 | { 17 | "cell_type": "code", 18 | "metadata": { 19 | "id": "zQyX8ddJk32y" 20 | }, 21 | "source": [ 22 | "import numpy as np\n", 23 | "from matplotlib import pyplot as plt\n", 24 | "\n", 25 | "from keras import backend as K\n", 26 | "from keras.models import Model\n", 27 | "from keras.layers import Input\n", 28 | "from keras.layers import Dense\n", 29 | "from keras.layers import Conv2D\n", 30 | "from keras.layers import Lambda\n", 31 | "from keras.layers import Flatten\n", 32 | "from keras.layers import Reshape\n", 33 | "from keras.layers import Dropout\n", 34 | "from keras.layers import Activation\n", 35 | "from keras.layers import AveragePooling2D\n", 36 | "from keras.layers import BatchNormalization\n", 37 | "from keras.layers.merge import add, concatenate\n", 38 | "from keras.preprocessing.image import ImageDataGenerator\n", 39 | "from tensorflow.keras.utils import plot_model\n" 40 | ], 41 | "execution_count": null, 42 | "outputs": [] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "metadata": { 47 | "id": "EMcF2AfPk_SX" 48 | }, 49 | "source": [ 50 | "# batch operation usng tensor slice\n", 51 | "def WaveletTransformAxisY(batch_img):\n", 52 | " odd_img = batch_img[:,0::2]\n", 53 | " even_img = batch_img[:,1::2]\n", 54 | " L = (odd_img + even_img) / 2.0\n", 55 | " H = K.abs(odd_img - even_img)\n", 56 | " return L, H\n", 57 | "\n", 58 | "def WaveletTransformAxisX(batch_img):\n", 59 | " # transpose + fliplr\n", 60 | " tmp_batch = K.permute_dimensions(batch_img, [0, 2, 1])[:,:,::-1]\n", 61 | " _dst_L, _dst_H = WaveletTransformAxisY(tmp_batch)\n", 62 | " # transpose + flipud\n", 63 | " dst_L = K.permute_dimensions(_dst_L, [0, 2, 1])[:,::-1,...]\n", 64 | " dst_H = K.permute_dimensions(_dst_H, [0, 2, 1])[:,::-1,...]\n", 65 | " return dst_L, dst_H" 66 | ], 67 | "execution_count": null, 68 | "outputs": [] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "metadata": { 73 | "id": "WLXXxjIhlCvn" 74 | }, 75 | "source": [ 76 | "def Wavelet(batch_image):\n", 77 | " # make channel first image\n", 78 | " batch_image = K.permute_dimensions(batch_image, [0, 3, 1, 2])\n", 79 | " r = batch_image[:,0]\n", 80 | " g = batch_image[:,1]\n", 81 | " b = batch_image[:,2]\n", 82 | "\n", 83 | " # level 1 decomposition\n", 84 | " wavelet_L, wavelet_H = WaveletTransformAxisY(r)\n", 85 | " r_wavelet_LL, r_wavelet_LH = WaveletTransformAxisX(wavelet_L)\n", 86 | " r_wavelet_HL, r_wavelet_HH = WaveletTransformAxisX(wavelet_H)\n", 87 | "\n", 88 | " wavelet_L, wavelet_H = WaveletTransformAxisY(g)\n", 89 | " g_wavelet_LL, g_wavelet_LH = WaveletTransformAxisX(wavelet_L)\n", 90 | " g_wavelet_HL, g_wavelet_HH = WaveletTransformAxisX(wavelet_H)\n", 91 | "\n", 92 | " wavelet_L, wavelet_H = WaveletTransformAxisY(b)\n", 93 | " b_wavelet_LL, b_wavelet_LH = WaveletTransformAxisX(wavelet_L)\n", 94 | " b_wavelet_HL, b_wavelet_HH = WaveletTransformAxisX(wavelet_H)\n", 95 | "\n", 96 | " wavelet_data = [r_wavelet_LL, r_wavelet_LH, r_wavelet_HL, r_wavelet_HH, \n", 97 | " g_wavelet_LL, g_wavelet_LH, g_wavelet_HL, g_wavelet_HH,\n", 98 | " b_wavelet_LL, b_wavelet_LH, b_wavelet_HL, b_wavelet_HH]\n", 99 | " transform_batch = K.stack(wavelet_data, axis=1)\n", 100 | "\n", 101 | " # level 2 decomposition\n", 102 | " wavelet_L2, wavelet_H2 = WaveletTransformAxisY(r_wavelet_LL)\n", 103 | " r_wavelet_LL2, r_wavelet_LH2 = WaveletTransformAxisX(wavelet_L2)\n", 104 | " r_wavelet_HL2, r_wavelet_HH2 = WaveletTransformAxisX(wavelet_H2)\n", 105 | "\n", 106 | " wavelet_L2, wavelet_H2 = WaveletTransformAxisY(g_wavelet_LL)\n", 107 | " g_wavelet_LL2, g_wavelet_LH2 = WaveletTransformAxisX(wavelet_L2)\n", 108 | " g_wavelet_HL2, g_wavelet_HH2 = WaveletTransformAxisX(wavelet_H2)\n", 109 | "\n", 110 | " wavelet_L2, wavelet_H2 = WaveletTransformAxisY(b_wavelet_LL)\n", 111 | " b_wavelet_LL2, b_wavelet_LH2 = WaveletTransformAxisX(wavelet_L2)\n", 112 | " b_wavelet_HL2, b_wavelet_HH2 = WaveletTransformAxisX(wavelet_H2)\n", 113 | "\n", 114 | "\n", 115 | " wavelet_data_l2 = [r_wavelet_LL2, r_wavelet_LH2, r_wavelet_HL2, r_wavelet_HH2, \n", 116 | " g_wavelet_LL2, g_wavelet_LH2, g_wavelet_HL2, g_wavelet_HH2,\n", 117 | " b_wavelet_LL2, b_wavelet_LH2, b_wavelet_HL2, b_wavelet_HH2]\n", 118 | " transform_batch_l2 = K.stack(wavelet_data_l2, axis=1)\n", 119 | "\n", 120 | " # level 3 decomposition\n", 121 | " wavelet_L3, wavelet_H3 = WaveletTransformAxisY(r_wavelet_LL2)\n", 122 | " r_wavelet_LL3, r_wavelet_LH3 = WaveletTransformAxisX(wavelet_L3)\n", 123 | " r_wavelet_HL3, r_wavelet_HH3 = WaveletTransformAxisX(wavelet_H3)\n", 124 | "\n", 125 | " wavelet_L3, wavelet_H3 = WaveletTransformAxisY(g_wavelet_LL2)\n", 126 | " g_wavelet_LL3, g_wavelet_LH3 = WaveletTransformAxisX(wavelet_L3)\n", 127 | " g_wavelet_HL3, g_wavelet_HH3 = WaveletTransformAxisX(wavelet_H3)\n", 128 | "\n", 129 | " wavelet_L3, wavelet_H3 = WaveletTransformAxisY(b_wavelet_LL2)\n", 130 | " b_wavelet_LL3, b_wavelet_LH3 = WaveletTransformAxisX(wavelet_L3)\n", 131 | " b_wavelet_HL3, b_wavelet_HH3 = WaveletTransformAxisX(wavelet_H3)\n", 132 | "\n", 133 | " wavelet_data_l3 = [r_wavelet_LL3, r_wavelet_LH3, r_wavelet_HL3, r_wavelet_HH3, \n", 134 | " g_wavelet_LL3, g_wavelet_LH3, g_wavelet_HL3, g_wavelet_HH3,\n", 135 | " b_wavelet_LL3, b_wavelet_LH3, b_wavelet_HL3, b_wavelet_HH3]\n", 136 | " transform_batch_l3 = K.stack(wavelet_data_l3, axis=1)\n", 137 | "\n", 138 | " # level 4 decomposition\n", 139 | " wavelet_L4, wavelet_H4 = WaveletTransformAxisY(r_wavelet_LL3)\n", 140 | " r_wavelet_LL4, r_wavelet_LH4 = WaveletTransformAxisX(wavelet_L4)\n", 141 | " r_wavelet_HL4, r_wavelet_HH4 = WaveletTransformAxisX(wavelet_H4)\n", 142 | "\n", 143 | " wavelet_L4, wavelet_H4 = WaveletTransformAxisY(g_wavelet_LL3)\n", 144 | " g_wavelet_LL4, g_wavelet_LH4 = WaveletTransformAxisX(wavelet_L4)\n", 145 | " g_wavelet_HL4, g_wavelet_HH4 = WaveletTransformAxisX(wavelet_H4)\n", 146 | "\n", 147 | " wavelet_L3, wavelet_H3 = WaveletTransformAxisY(b_wavelet_LL3)\n", 148 | " b_wavelet_LL4, b_wavelet_LH4 = WaveletTransformAxisX(wavelet_L4)\n", 149 | " b_wavelet_HL4, b_wavelet_HH4 = WaveletTransformAxisX(wavelet_H4)\n", 150 | "\n", 151 | "\n", 152 | " wavelet_data_l4 = [r_wavelet_LL4, r_wavelet_LH4, r_wavelet_HL4, r_wavelet_HH4, \n", 153 | " g_wavelet_LL4, g_wavelet_LH4, g_wavelet_HL4, g_wavelet_HH4,\n", 154 | " b_wavelet_LL4, b_wavelet_LH4, b_wavelet_HL4, b_wavelet_HH4]\n", 155 | " transform_batch_l4 = K.stack(wavelet_data_l4, axis=1)\n", 156 | "\n", 157 | " # print('shape before')\n", 158 | " # print(transform_batch.shape)\n", 159 | " # print(transform_batch_l2.shape)\n", 160 | " # print(transform_batch_l3.shape)\n", 161 | " # print(transform_batch_l4.shape)\n", 162 | "\n", 163 | " decom_level_1 = K.permute_dimensions(transform_batch, [0, 2, 3, 1])\n", 164 | " decom_level_2 = K.permute_dimensions(transform_batch_l2, [0, 2, 3, 1])\n", 165 | " decom_level_3 = K.permute_dimensions(transform_batch_l3, [0, 2, 3, 1])\n", 166 | " decom_level_4 = K.permute_dimensions(transform_batch_l4, [0, 2, 3, 1])\n", 167 | " \n", 168 | " # print('shape after')\n", 169 | " # print(decom_level_1.shape)\n", 170 | " # print(decom_level_2.shape)\n", 171 | " # print(decom_level_3.shape)\n", 172 | " # print(decom_level_4.shape)\n", 173 | " return [decom_level_1, decom_level_2, decom_level_3, decom_level_4]\n", 174 | "\n", 175 | "\n", 176 | "def Wavelet_out_shape(input_shapes):\n", 177 | " # print('in to shape')\n", 178 | " return [tuple([None, 112, 112, 12]), tuple([None, 56, 56, 12]), \n", 179 | " tuple([None, 28, 28, 12]), tuple([None, 14, 14, 12])]" 180 | ], 181 | "execution_count": null, 182 | "outputs": [] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "metadata": { 187 | "id": "h6dmgkdTlDOk", 188 | "colab": { 189 | "base_uri": "https://localhost:8080/" 190 | }, 191 | "outputId": "766000e2-803f-4c4b-a699-c1198b2aaf03" 192 | }, 193 | "source": [ 194 | "img_batch = K.zeros(shape=(8, 224, 224, 3), dtype='float32')\n", 195 | "Wavelet(img_batch)" 196 | ], 197 | "execution_count": null, 198 | "outputs": [ 199 | { 200 | "output_type": "execute_result", 201 | "data": { 202 | "text/plain": [ 203 | "[,\n", 511 | " ,\n", 819 | " ,\n", 1127 | " ]" 1435 | ] 1436 | }, 1437 | "metadata": {}, 1438 | "execution_count": 4 1439 | } 1440 | ] 1441 | }, 1442 | { 1443 | "cell_type": "code", 1444 | "metadata": { 1445 | "id": "gDuuMOgEm-XS" 1446 | }, 1447 | "source": [ 1448 | "import tensorflow as tf\n", 1449 | "import tensorflow_datasets as tfds\n", 1450 | "import requests\n", 1451 | "import tarfile\n", 1452 | "from io import BytesIO\n", 1453 | "\n", 1454 | "from PIL import Image,ImageOps\n", 1455 | "import io\n", 1456 | "import re\n", 1457 | "import numpy\n", 1458 | "from sklearn.model_selection import train_test_split\n", 1459 | "\n", 1460 | "url = \"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz\"\n", 1461 | "\n", 1462 | "image_data = []\n", 1463 | "image_label = []\n", 1464 | "label_list = []\n", 1465 | "response = requests.get(url, stream=True)\n", 1466 | "with tarfile.open(fileobj=BytesIO(response.raw.read()), mode=\"r:gz\") as tar_file:\n", 1467 | " for member in tar_file.getmembers():\n", 1468 | " if re.match('dtd/images/.*/.*\\.jpg', member.name):\n", 1469 | " dir_list = member.name.split('/')\n", 1470 | " label = dir_list[-2]\n", 1471 | " if label not in label_list:\n", 1472 | " label_list.append(label)\n", 1473 | "\n", 1474 | " f= tar_file.extractfile(member)\n", 1475 | " if f is not None:\n", 1476 | " image = f.read()\n", 1477 | " image = Image.open(io.BytesIO(image))\n", 1478 | " image = image.crop((0,0,224,224))\n", 1479 | " #image = ImageOps.grayscale(image)\n", 1480 | " pic = numpy.array(image)\n", 1481 | " image_data.append(pic)\n", 1482 | " image_label.append(label)" 1483 | ], 1484 | "execution_count": null, 1485 | "outputs": [] 1486 | }, 1487 | { 1488 | "cell_type": "code", 1489 | "source": [ 1490 | "# (train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n", 1491 | "# (train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\n", 1492 | "\n", 1493 | "mapping = {}\n", 1494 | "total_image_label = list(set(image_label))\n", 1495 | "for x in range(len(total_image_label )):\n", 1496 | " mapping[total_image_label[x]] = x\n", 1497 | "\n", 1498 | "for x in range(len(image_label)):\n", 1499 | " image_label[x] = mapping[image_label[x]]\n", 1500 | "\n", 1501 | "image_data, image_label = numpy.array(image_data), numpy.array(image_label)\n", 1502 | "\n", 1503 | "train_images, rem_images, train_labels, rem_labels = train_test_split(image_data, image_label, test_size=0.6, random_state=47,stratify=image_label)\n", 1504 | "validation_images, test_images, validation_labels, test_labels = train_test_split(rem_images, rem_labels, test_size=0.5, random_state=47,stratify=rem_labels)\n" 1505 | ], 1506 | "metadata": { 1507 | "id": "u17ULYpwDgzk" 1508 | }, 1509 | "execution_count": null, 1510 | "outputs": [] 1511 | }, 1512 | { 1513 | "cell_type": "code", 1514 | "metadata": { 1515 | "id": "2MpxaRCZlF1J" 1516 | }, 1517 | "source": [ 1518 | "def get_wavelet_cnn_model():\n", 1519 | "\n", 1520 | " input_shape = 224, 224, 3\n", 1521 | "\n", 1522 | " input_ = Input(input_shape, name='the_input')\n", 1523 | " # wavelet = Lambda(Wavelet, name='wavelet')\n", 1524 | " wavelet = Lambda(Wavelet, Wavelet_out_shape, name='wavelet')\n", 1525 | " input_l1, input_l2, input_l3, input_l4 = wavelet(input_)\n", 1526 | " # print(input_l1)\n", 1527 | " # print(input_l2)\n", 1528 | " # print(input_l3)\n", 1529 | " # print(input_l4)\n", 1530 | " # level one decomposition starts\n", 1531 | " conv_1 = Conv2D(64, kernel_size=(3, 3), padding='same', name='conv_1')(input_l1)\n", 1532 | " norm_1 = BatchNormalization(name='norm_1')(conv_1)\n", 1533 | " relu_1 = Activation('relu', name='relu_1')(norm_1)\n", 1534 | "\n", 1535 | " conv_1_2 = Conv2D(64, kernel_size=(3, 3), strides=(2, 2), padding='same', name='conv_1_2')(relu_1)\n", 1536 | " norm_1_2 = BatchNormalization(name='norm_1_2')(conv_1_2)\n", 1537 | " relu_1_2 = Activation('relu', name='relu_1_2')(norm_1_2)\n", 1538 | "\n", 1539 | " # level two decomposition starts\n", 1540 | " conv_a = Conv2D(filters=64, kernel_size=(3, 3), padding='same', name='conv_a')(input_l2)\n", 1541 | " norm_a = BatchNormalization(name='norm_a')(conv_a)\n", 1542 | " relu_a = Activation('relu', name='relu_a')(norm_a)\n", 1543 | "\n", 1544 | " # concate level one and level two decomposition\n", 1545 | " concate_level_2 = concatenate([relu_1_2, relu_a])\n", 1546 | " conv_2 = Conv2D(128, kernel_size=(3, 3), padding='same', name='conv_2')(concate_level_2)\n", 1547 | " norm_2 = BatchNormalization(name='norm_2')(conv_2)\n", 1548 | " relu_2 = Activation('relu', name='relu_2')(norm_2)\n", 1549 | "\n", 1550 | " conv_2_2 = Conv2D(128, kernel_size=(3, 3), strides=(2, 2), padding='same', name='conv_2_2')(relu_2)\n", 1551 | " norm_2_2 = BatchNormalization(name='norm_2_2')(conv_2_2)\n", 1552 | " relu_2_2 = Activation('relu', name='relu_2_2')(norm_2_2)\n", 1553 | "\n", 1554 | " # level three decomposition starts \n", 1555 | " conv_b = Conv2D(filters=64, kernel_size=(3, 3), padding='same', name='conv_b')(input_l3)\n", 1556 | " norm_b = BatchNormalization(name='norm_b')(conv_b)\n", 1557 | " relu_b = Activation('relu', name='relu_b')(norm_b)\n", 1558 | "\n", 1559 | " conv_b_2 = Conv2D(128, kernel_size=(3, 3), padding='same', name='conv_b_2')(relu_b)\n", 1560 | " norm_b_2 = BatchNormalization(name='norm_b_2')(conv_b_2)\n", 1561 | " relu_b_2 = Activation('relu', name='relu_b_2')(norm_b_2)\n", 1562 | "\n", 1563 | " # concate level two and level three decomposition \n", 1564 | " concate_level_3 = concatenate([relu_2_2, relu_b_2])\n", 1565 | " conv_3 = Conv2D(256, kernel_size=(3, 3), padding='same', name='conv_3')(concate_level_3)\n", 1566 | " norm_3 = BatchNormalization(name='nomr_3')(conv_3)\n", 1567 | " relu_3 = Activation('relu', name='relu_3')(norm_3)\n", 1568 | "\n", 1569 | " conv_3_2 = Conv2D(256, kernel_size=(3, 3), strides=(2, 2), padding='same', name='conv_3_2')(relu_3)\n", 1570 | " norm_3_2 = BatchNormalization(name='norm_3_2')(conv_3_2)\n", 1571 | " relu_3_2 = Activation('relu', name='relu_3_2')(norm_3_2)\n", 1572 | "\n", 1573 | " # level four decomposition start\n", 1574 | " conv_c = Conv2D(64, kernel_size=(3, 3), padding='same', name='conv_c')(input_l4)\n", 1575 | " norm_c = BatchNormalization(name='norm_c')(conv_c)\n", 1576 | " relu_c = Activation('relu', name='relu_c')(norm_c)\n", 1577 | "\n", 1578 | " conv_c_2 = Conv2D(256, kernel_size=(3, 3), padding='same', name='conv_c_2')(relu_c)\n", 1579 | " norm_c_2 = BatchNormalization(name='norm_c_2')(conv_c_2)\n", 1580 | " relu_c_2 = Activation('relu', name='relu_c_2')(norm_c_2)\n", 1581 | "\n", 1582 | " conv_c_3 = Conv2D(256, kernel_size=(3, 3), padding='same', name='conv_c_3')(relu_c_2)\n", 1583 | " norm_c_3 = BatchNormalization(name='norm_c_3')(conv_c_3)\n", 1584 | " relu_c_3 = Activation('relu', name='relu_c_3')(norm_c_3)\n", 1585 | "\n", 1586 | " # concate level level three and level four decomposition\n", 1587 | " concate_level_4 = concatenate([relu_3_2, relu_c_3])\n", 1588 | " conv_4 = Conv2D(256, kernel_size=(3, 3), padding='same', name='conv_4')(concate_level_4)\n", 1589 | " norm_4 = BatchNormalization(name='norm_4')(conv_4)\n", 1590 | " relu_4 = Activation('relu', name='relu_4')(norm_4)\n", 1591 | "\n", 1592 | " conv_4_2 = Conv2D(256, kernel_size=(3, 3), strides=(2, 2), padding='same', name='conv_4_2')(relu_4)\n", 1593 | " norm_4_2 = BatchNormalization(name='norm_4_2')(conv_4_2)\n", 1594 | " relu_4_2 = Activation('relu', name='relu_4_2')(norm_4_2)\n", 1595 | "\n", 1596 | " conv_5_1 = Conv2D(128, kernel_size=(3, 3), padding='same', name='conv_5_1')(relu_4_2)\n", 1597 | " norm_5_1 = BatchNormalization(name='norm_5_1')(conv_5_1)\n", 1598 | " relu_5_1 = Activation('relu', name='relu_5_1')(norm_5_1)\n", 1599 | "\n", 1600 | " pool_5_1 = AveragePooling2D(pool_size=(7, 7), strides=1, padding='same', name='avg_pool_5_1')(relu_5_1)\n", 1601 | " flat_5_1 = Flatten(name='flat_5_1')(pool_5_1) \n", 1602 | "\n", 1603 | " fc_5 = Dense(2048, name='fc_5')(flat_5_1)\n", 1604 | " norm_5 = BatchNormalization(name='norm_5')(fc_5)\n", 1605 | " relu_5 = Activation('relu', name='relu_5')(norm_5)\n", 1606 | " drop_5 = Dropout(0.5, name='drop_5')(relu_5)\n", 1607 | "\n", 1608 | " fc_6 = Dense(2048, name='fc_6')(drop_5)\n", 1609 | " norm_6 = BatchNormalization(name='norm_6')(fc_6)\n", 1610 | " relu_6 = Activation('relu', name='relu_6')(norm_6)\n", 1611 | " drop_6 = Dropout(0.5, name='drop_6')(relu_6)\n", 1612 | "\n", 1613 | " output = Dense(47, activation='softmax', name='fc_7')(drop_6)\n", 1614 | "\n", 1615 | " model = Model(inputs=input_, outputs=output)\n", 1616 | " return model" 1617 | ], 1618 | "execution_count": null, 1619 | "outputs": [] 1620 | }, 1621 | { 1622 | "cell_type": "code", 1623 | "metadata": { 1624 | "id": "XlNN-MeWlQHX" 1625 | }, 1626 | "source": [ 1627 | "model = get_wavelet_cnn_model()\n", 1628 | "# plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n" 1629 | ], 1630 | "execution_count": null, 1631 | "outputs": [] 1632 | }, 1633 | { 1634 | "cell_type": "code", 1635 | "source": [ 1636 | "model.summary()" 1637 | ], 1638 | "metadata": { 1639 | "colab": { 1640 | "base_uri": "https://localhost:8080/" 1641 | }, 1642 | "id": "dqa5u8_WjEIy", 1643 | "outputId": "e5eae442-0724-44ec-c579-515936fa56a5" 1644 | }, 1645 | "execution_count": null, 1646 | "outputs": [ 1647 | { 1648 | "output_type": "stream", 1649 | "name": "stdout", 1650 | "text": [ 1651 | "Model: \"model_1\"\n", 1652 | "__________________________________________________________________________________________________\n", 1653 | " Layer (type) Output Shape Param # Connected to \n", 1654 | "==================================================================================================\n", 1655 | " the_input (InputLayer) [(None, 224, 224, 3 0 [] \n", 1656 | " )] \n", 1657 | " \n", 1658 | " wavelet (Lambda) [(None, 112, 112, 1 0 ['the_input[0][0]'] \n", 1659 | " 2), \n", 1660 | " (None, 56, 56, 12) \n", 1661 | " , (None, 28, 28, 12 \n", 1662 | " ), \n", 1663 | " (None, 14, 14, 12) \n", 1664 | " ] \n", 1665 | " \n", 1666 | " conv_1 (Conv2D) (None, 112, 112, 64 6976 ['wavelet[0][0]'] \n", 1667 | " ) \n", 1668 | " \n", 1669 | " norm_1 (BatchNormalization) (None, 112, 112, 64 256 ['conv_1[0][0]'] \n", 1670 | " ) \n", 1671 | " \n", 1672 | " relu_1 (Activation) (None, 112, 112, 64 0 ['norm_1[0][0]'] \n", 1673 | " ) \n", 1674 | " \n", 1675 | " conv_1_2 (Conv2D) (None, 56, 56, 64) 36928 ['relu_1[0][0]'] \n", 1676 | " \n", 1677 | " conv_a (Conv2D) (None, 56, 56, 64) 6976 ['wavelet[0][1]'] \n", 1678 | " \n", 1679 | " norm_1_2 (BatchNormalization) (None, 56, 56, 64) 256 ['conv_1_2[0][0]'] \n", 1680 | " \n", 1681 | " norm_a (BatchNormalization) (None, 56, 56, 64) 256 ['conv_a[0][0]'] \n", 1682 | " \n", 1683 | " relu_1_2 (Activation) (None, 56, 56, 64) 0 ['norm_1_2[0][0]'] \n", 1684 | " \n", 1685 | " relu_a (Activation) (None, 56, 56, 64) 0 ['norm_a[0][0]'] \n", 1686 | " \n", 1687 | " concatenate_3 (Concatenate) (None, 56, 56, 128) 0 ['relu_1_2[0][0]', \n", 1688 | " 'relu_a[0][0]'] \n", 1689 | " \n", 1690 | " conv_2 (Conv2D) (None, 56, 56, 128) 147584 ['concatenate_3[0][0]'] \n", 1691 | " \n", 1692 | " conv_b (Conv2D) (None, 28, 28, 64) 6976 ['wavelet[0][2]'] \n", 1693 | " \n", 1694 | " norm_2 (BatchNormalization) (None, 56, 56, 128) 512 ['conv_2[0][0]'] \n", 1695 | " \n", 1696 | " norm_b (BatchNormalization) (None, 28, 28, 64) 256 ['conv_b[0][0]'] \n", 1697 | " \n", 1698 | " relu_2 (Activation) (None, 56, 56, 128) 0 ['norm_2[0][0]'] \n", 1699 | " \n", 1700 | " relu_b (Activation) (None, 28, 28, 64) 0 ['norm_b[0][0]'] \n", 1701 | " \n", 1702 | " conv_2_2 (Conv2D) (None, 28, 28, 128) 147584 ['relu_2[0][0]'] \n", 1703 | " \n", 1704 | " conv_b_2 (Conv2D) (None, 28, 28, 128) 73856 ['relu_b[0][0]'] \n", 1705 | " \n", 1706 | " norm_2_2 (BatchNormalization) (None, 28, 28, 128) 512 ['conv_2_2[0][0]'] \n", 1707 | " \n", 1708 | " norm_b_2 (BatchNormalization) (None, 28, 28, 128) 512 ['conv_b_2[0][0]'] \n", 1709 | " \n", 1710 | " conv_c (Conv2D) (None, 14, 14, 64) 6976 ['wavelet[0][3]'] \n", 1711 | " \n", 1712 | " relu_2_2 (Activation) (None, 28, 28, 128) 0 ['norm_2_2[0][0]'] \n", 1713 | " \n", 1714 | " relu_b_2 (Activation) (None, 28, 28, 128) 0 ['norm_b_2[0][0]'] \n", 1715 | " \n", 1716 | " norm_c (BatchNormalization) (None, 14, 14, 64) 256 ['conv_c[0][0]'] \n", 1717 | " \n", 1718 | " concatenate_4 (Concatenate) (None, 28, 28, 256) 0 ['relu_2_2[0][0]', \n", 1719 | " 'relu_b_2[0][0]'] \n", 1720 | " \n", 1721 | " relu_c (Activation) (None, 14, 14, 64) 0 ['norm_c[0][0]'] \n", 1722 | " \n", 1723 | " conv_3 (Conv2D) (None, 28, 28, 256) 590080 ['concatenate_4[0][0]'] \n", 1724 | " \n", 1725 | " conv_c_2 (Conv2D) (None, 14, 14, 256) 147712 ['relu_c[0][0]'] \n", 1726 | " \n", 1727 | " nomr_3 (BatchNormalization) (None, 28, 28, 256) 1024 ['conv_3[0][0]'] \n", 1728 | " \n", 1729 | " norm_c_2 (BatchNormalization) (None, 14, 14, 256) 1024 ['conv_c_2[0][0]'] \n", 1730 | " \n", 1731 | " relu_3 (Activation) (None, 28, 28, 256) 0 ['nomr_3[0][0]'] \n", 1732 | " \n", 1733 | " relu_c_2 (Activation) (None, 14, 14, 256) 0 ['norm_c_2[0][0]'] \n", 1734 | " \n", 1735 | " conv_3_2 (Conv2D) (None, 14, 14, 256) 590080 ['relu_3[0][0]'] \n", 1736 | " \n", 1737 | " conv_c_3 (Conv2D) (None, 14, 14, 256) 590080 ['relu_c_2[0][0]'] \n", 1738 | " \n", 1739 | " norm_3_2 (BatchNormalization) (None, 14, 14, 256) 1024 ['conv_3_2[0][0]'] \n", 1740 | " \n", 1741 | " norm_c_3 (BatchNormalization) (None, 14, 14, 256) 1024 ['conv_c_3[0][0]'] \n", 1742 | " \n", 1743 | " relu_3_2 (Activation) (None, 14, 14, 256) 0 ['norm_3_2[0][0]'] \n", 1744 | " \n", 1745 | " relu_c_3 (Activation) (None, 14, 14, 256) 0 ['norm_c_3[0][0]'] \n", 1746 | " \n", 1747 | " concatenate_5 (Concatenate) (None, 14, 14, 512) 0 ['relu_3_2[0][0]', \n", 1748 | " 'relu_c_3[0][0]'] \n", 1749 | " \n", 1750 | " conv_4 (Conv2D) (None, 14, 14, 256) 1179904 ['concatenate_5[0][0]'] \n", 1751 | " \n", 1752 | " norm_4 (BatchNormalization) (None, 14, 14, 256) 1024 ['conv_4[0][0]'] \n", 1753 | " \n", 1754 | " relu_4 (Activation) (None, 14, 14, 256) 0 ['norm_4[0][0]'] \n", 1755 | " \n", 1756 | " conv_4_2 (Conv2D) (None, 7, 7, 256) 590080 ['relu_4[0][0]'] \n", 1757 | " \n", 1758 | " norm_4_2 (BatchNormalization) (None, 7, 7, 256) 1024 ['conv_4_2[0][0]'] \n", 1759 | " \n", 1760 | " relu_4_2 (Activation) (None, 7, 7, 256) 0 ['norm_4_2[0][0]'] \n", 1761 | " \n", 1762 | " conv_5_1 (Conv2D) (None, 7, 7, 128) 295040 ['relu_4_2[0][0]'] \n", 1763 | " \n", 1764 | " norm_5_1 (BatchNormalization) (None, 7, 7, 128) 512 ['conv_5_1[0][0]'] \n", 1765 | " \n", 1766 | " relu_5_1 (Activation) (None, 7, 7, 128) 0 ['norm_5_1[0][0]'] \n", 1767 | " \n", 1768 | " avg_pool_5_1 (AveragePooling2D (None, 7, 7, 128) 0 ['relu_5_1[0][0]'] \n", 1769 | " ) \n", 1770 | " \n", 1771 | " flat_5_1 (Flatten) (None, 6272) 0 ['avg_pool_5_1[0][0]'] \n", 1772 | " \n", 1773 | " fc_5 (Dense) (None, 2048) 12847104 ['flat_5_1[0][0]'] \n", 1774 | " \n", 1775 | " norm_5 (BatchNormalization) (None, 2048) 8192 ['fc_5[0][0]'] \n", 1776 | " \n", 1777 | " relu_5 (Activation) (None, 2048) 0 ['norm_5[0][0]'] \n", 1778 | " \n", 1779 | " drop_5 (Dropout) (None, 2048) 0 ['relu_5[0][0]'] \n", 1780 | " \n", 1781 | " fc_6 (Dense) (None, 2048) 4196352 ['drop_5[0][0]'] \n", 1782 | " \n", 1783 | " norm_6 (BatchNormalization) (None, 2048) 8192 ['fc_6[0][0]'] \n", 1784 | " \n", 1785 | " relu_6 (Activation) (None, 2048) 0 ['norm_6[0][0]'] \n", 1786 | " \n", 1787 | " drop_6 (Dropout) (None, 2048) 0 ['relu_6[0][0]'] \n", 1788 | " \n", 1789 | " fc_7 (Dense) (None, 47) 96303 ['drop_6[0][0]'] \n", 1790 | " \n", 1791 | "==================================================================================================\n", 1792 | "Total params: 21,582,447\n", 1793 | "Trainable params: 21,569,519\n", 1794 | "Non-trainable params: 12,928\n", 1795 | "__________________________________________________________________________________________________\n" 1796 | ] 1797 | } 1798 | ] 1799 | }, 1800 | { 1801 | "cell_type": "code", 1802 | "metadata": { 1803 | "id": "w6zpGp6flIEz", 1804 | "outputId": "3359e9d3-3eeb-4395-ae52-650a4ad35d09", 1805 | "colab": { 1806 | "base_uri": "https://localhost:8080/" 1807 | } 1808 | }, 1809 | "source": [ 1810 | "# model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n", 1811 | "\n", 1812 | "model.compile(\n", 1813 | " #optimizer=tf.keras.optimizers.Adadelta(learning_rate=0.01, ),\n", 1814 | " optimizer=tf.optimizers.Adam(lr=0.01),\n", 1815 | " loss='sparse_categorical_crossentropy',\n", 1816 | " metrics=['sparse_categorical_accuracy'])" 1817 | ], 1818 | "execution_count": null, 1819 | "outputs": [ 1820 | { 1821 | "output_type": "stream", 1822 | "name": "stderr", 1823 | "text": [ 1824 | "/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.\n", 1825 | " super(Adam, self).__init__(name, **kwargs)\n" 1826 | ] 1827 | } 1828 | ] 1829 | }, 1830 | { 1831 | "cell_type": "code", 1832 | "source": [ 1833 | "train_images.shape" 1834 | ], 1835 | "metadata": { 1836 | "id": "xwFPv1aKHDrR", 1837 | "outputId": "598a0d99-1455-400e-fa5c-153625b88328", 1838 | "colab": { 1839 | "base_uri": "https://localhost:8080/" 1840 | } 1841 | }, 1842 | "execution_count": null, 1843 | "outputs": [ 1844 | { 1845 | "output_type": "execute_result", 1846 | "data": { 1847 | "text/plain": [ 1848 | "(2256, 224, 224, 3)" 1849 | ] 1850 | }, 1851 | "metadata": {}, 1852 | "execution_count": 10 1853 | } 1854 | ] 1855 | }, 1856 | { 1857 | "cell_type": "code", 1858 | "metadata": { 1859 | "id": "V2740Mdynf0w", 1860 | "outputId": "8545f394-21a0-4cad-da20-202546a01926", 1861 | "colab": { 1862 | "base_uri": "https://localhost:8080/" 1863 | } 1864 | }, 1865 | "source": [ 1866 | "history = model.fit(\n", 1867 | "\ttrain_images.astype('float32'),train_labels,\n", 1868 | "\t# steps_per_epoch=8000//32,\n", 1869 | "\tepochs=300,\n", 1870 | "\tvalidation_data=(validation_images.astype('float32'), validation_labels),\n", 1871 | " # validation_steps=2000//32,\n", 1872 | "\tbatch_size=32,\n", 1873 | " validation_batch_size=64,\n", 1874 | "\t)" 1875 | ], 1876 | "execution_count": null, 1877 | "outputs": [ 1878 | { 1879 | "output_type": "stream", 1880 | "name": "stdout", 1881 | "text": [ 1882 | "Epoch 1/300\n", 1883 | "71/71 [==============================] - 42s 392ms/step - loss: 6.9868 - sparse_categorical_accuracy: 0.0239 - val_loss: 21218.8789 - val_sparse_categorical_accuracy: 0.0213\n", 1884 | "Epoch 2/300\n", 1885 | "71/71 [==============================] - 22s 309ms/step - loss: 6.3227 - sparse_categorical_accuracy: 0.0284 - val_loss: 100.3341 - val_sparse_categorical_accuracy: 0.0195\n", 1886 | "Epoch 3/300\n", 1887 | "71/71 [==============================] - 22s 308ms/step - loss: 5.9046 - sparse_categorical_accuracy: 0.0310 - val_loss: 78.2508 - val_sparse_categorical_accuracy: 0.0171\n", 1888 | "Epoch 4/300\n", 1889 | "71/71 [==============================] - 22s 307ms/step - loss: 5.0310 - sparse_categorical_accuracy: 0.0337 - val_loss: 9.5732 - val_sparse_categorical_accuracy: 0.0230\n", 1890 | "Epoch 5/300\n", 1891 | "71/71 [==============================] - 22s 307ms/step - loss: 4.6823 - sparse_categorical_accuracy: 0.0470 - val_loss: 6.2962 - val_sparse_categorical_accuracy: 0.0378\n", 1892 | "Epoch 6/300\n", 1893 | "71/71 [==============================] - 22s 309ms/step - loss: 4.2734 - sparse_categorical_accuracy: 0.0532 - val_loss: 5.4194 - val_sparse_categorical_accuracy: 0.0467\n", 1894 | "Epoch 7/300\n", 1895 | "71/71 [==============================] - 22s 307ms/step - loss: 4.0415 - sparse_categorical_accuracy: 0.0501 - val_loss: 4.7230 - val_sparse_categorical_accuracy: 0.0609\n", 1896 | "Epoch 8/300\n", 1897 | "71/71 [==============================] - 22s 308ms/step - loss: 3.9167 - sparse_categorical_accuracy: 0.0501 - val_loss: 4.2933 - val_sparse_categorical_accuracy: 0.0656\n", 1898 | "Epoch 9/300\n", 1899 | "71/71 [==============================] - 23s 324ms/step - loss: 3.7572 - sparse_categorical_accuracy: 0.0581 - val_loss: 4.0301 - val_sparse_categorical_accuracy: 0.0561\n", 1900 | "Epoch 10/300\n", 1901 | "71/71 [==============================] - 22s 308ms/step - loss: 3.6905 - sparse_categorical_accuracy: 0.0559 - val_loss: 3.8335 - val_sparse_categorical_accuracy: 0.0691\n", 1902 | "Epoch 11/300\n", 1903 | "71/71 [==============================] - 22s 307ms/step - loss: 3.6587 - sparse_categorical_accuracy: 0.0683 - val_loss: 4.0433 - val_sparse_categorical_accuracy: 0.0656\n", 1904 | "Epoch 12/300\n", 1905 | "71/71 [==============================] - 22s 307ms/step - loss: 3.6114 - sparse_categorical_accuracy: 0.0718 - val_loss: 3.6855 - val_sparse_categorical_accuracy: 0.0686\n", 1906 | "Epoch 13/300\n", 1907 | "71/71 [==============================] - 22s 307ms/step - loss: 3.5955 - sparse_categorical_accuracy: 0.0780 - val_loss: 3.6928 - val_sparse_categorical_accuracy: 0.0638\n", 1908 | "Epoch 14/300\n", 1909 | "71/71 [==============================] - 22s 308ms/step - loss: 3.5572 - sparse_categorical_accuracy: 0.0807 - val_loss: 3.9635 - val_sparse_categorical_accuracy: 0.0656\n", 1910 | "Epoch 15/300\n", 1911 | "71/71 [==============================] - 22s 307ms/step - loss: 3.5318 - sparse_categorical_accuracy: 0.0833 - val_loss: 5.5147 - val_sparse_categorical_accuracy: 0.0455\n", 1912 | "Epoch 16/300\n", 1913 | "71/71 [==============================] - 22s 306ms/step - loss: 3.5552 - sparse_categorical_accuracy: 0.0891 - val_loss: 4.0628 - val_sparse_categorical_accuracy: 0.0757\n", 1914 | "Epoch 17/300\n", 1915 | "71/71 [==============================] - 22s 308ms/step - loss: 3.5229 - sparse_categorical_accuracy: 0.0864 - val_loss: 3.8679 - val_sparse_categorical_accuracy: 0.0762\n", 1916 | "Epoch 18/300\n", 1917 | "71/71 [==============================] - 22s 308ms/step - loss: 3.4912 - sparse_categorical_accuracy: 0.0975 - val_loss: 3.7719 - val_sparse_categorical_accuracy: 0.0751\n", 1918 | "Epoch 19/300\n", 1919 | "71/71 [==============================] - 22s 308ms/step - loss: 3.4755 - sparse_categorical_accuracy: 0.0851 - val_loss: 4.0138 - val_sparse_categorical_accuracy: 0.0650\n", 1920 | "Epoch 20/300\n", 1921 | "71/71 [==============================] - 22s 307ms/step - loss: 3.4562 - sparse_categorical_accuracy: 0.0957 - val_loss: 3.9489 - val_sparse_categorical_accuracy: 0.0745\n", 1922 | "Epoch 21/300\n", 1923 | "71/71 [==============================] - 23s 323ms/step - loss: 3.4349 - sparse_categorical_accuracy: 0.1064 - val_loss: 4.1794 - val_sparse_categorical_accuracy: 0.0485\n", 1924 | "Epoch 22/300\n", 1925 | "71/71 [==============================] - 22s 312ms/step - loss: 3.4038 - sparse_categorical_accuracy: 0.1051 - val_loss: 3.5716 - val_sparse_categorical_accuracy: 0.0857\n", 1926 | "Epoch 23/300\n", 1927 | "71/71 [==============================] - 22s 308ms/step - loss: 3.3705 - sparse_categorical_accuracy: 0.1121 - val_loss: 3.6451 - val_sparse_categorical_accuracy: 0.0881\n", 1928 | "Epoch 24/300\n", 1929 | "71/71 [==============================] - 22s 307ms/step - loss: 3.3946 - sparse_categorical_accuracy: 0.0997 - val_loss: 4.0075 - val_sparse_categorical_accuracy: 0.0798\n", 1930 | "Epoch 25/300\n", 1931 | "71/71 [==============================] - 22s 308ms/step - loss: 3.3805 - sparse_categorical_accuracy: 0.1139 - val_loss: 3.6845 - val_sparse_categorical_accuracy: 0.0881\n", 1932 | "Epoch 26/300\n", 1933 | "71/71 [==============================] - 22s 308ms/step - loss: 3.3534 - sparse_categorical_accuracy: 0.1082 - val_loss: 3.7873 - val_sparse_categorical_accuracy: 0.0810\n", 1934 | "Epoch 27/300\n", 1935 | "71/71 [==============================] - 22s 308ms/step - loss: 3.3223 - sparse_categorical_accuracy: 0.1241 - val_loss: 3.6280 - val_sparse_categorical_accuracy: 0.1017\n", 1936 | "Epoch 28/300\n", 1937 | "71/71 [==============================] - 22s 307ms/step - loss: 3.3309 - sparse_categorical_accuracy: 0.1144 - val_loss: 3.5389 - val_sparse_categorical_accuracy: 0.1005\n", 1938 | "Epoch 29/300\n", 1939 | "71/71 [==============================] - 22s 308ms/step - loss: 3.3002 - sparse_categorical_accuracy: 0.1334 - val_loss: 3.8253 - val_sparse_categorical_accuracy: 0.1040\n", 1940 | "Epoch 30/300\n", 1941 | "71/71 [==============================] - 22s 307ms/step - loss: 3.2604 - sparse_categorical_accuracy: 0.1414 - val_loss: 3.7762 - val_sparse_categorical_accuracy: 0.1011\n", 1942 | "Epoch 31/300\n", 1943 | "71/71 [==============================] - 22s 307ms/step - loss: 3.2983 - sparse_categorical_accuracy: 0.1348 - val_loss: 3.9602 - val_sparse_categorical_accuracy: 0.0857\n", 1944 | "Epoch 32/300\n", 1945 | "71/71 [==============================] - 22s 308ms/step - loss: 3.2837 - sparse_categorical_accuracy: 0.1281 - val_loss: 3.4023 - val_sparse_categorical_accuracy: 0.1212\n", 1946 | "Epoch 33/300\n", 1947 | "71/71 [==============================] - 22s 307ms/step - loss: 3.2597 - sparse_categorical_accuracy: 0.1476 - val_loss: 3.4694 - val_sparse_categorical_accuracy: 0.1217\n", 1948 | "Epoch 34/300\n", 1949 | "71/71 [==============================] - 22s 308ms/step - loss: 3.2062 - sparse_categorical_accuracy: 0.1463 - val_loss: 4.2289 - val_sparse_categorical_accuracy: 0.0780\n", 1950 | "Epoch 35/300\n", 1951 | "71/71 [==============================] - 23s 323ms/step - loss: 3.2248 - sparse_categorical_accuracy: 0.1374 - val_loss: 3.6176 - val_sparse_categorical_accuracy: 0.0952\n", 1952 | "Epoch 36/300\n", 1953 | "71/71 [==============================] - 22s 306ms/step - loss: 3.2158 - sparse_categorical_accuracy: 0.1476 - val_loss: 3.4235 - val_sparse_categorical_accuracy: 0.1294\n", 1954 | "Epoch 37/300\n", 1955 | "71/71 [==============================] - 22s 307ms/step - loss: 3.1837 - sparse_categorical_accuracy: 0.1463 - val_loss: 3.4746 - val_sparse_categorical_accuracy: 0.1283\n", 1956 | "Epoch 38/300\n", 1957 | "71/71 [==============================] - 22s 308ms/step - loss: 3.2038 - sparse_categorical_accuracy: 0.1387 - val_loss: 3.4483 - val_sparse_categorical_accuracy: 0.1371\n", 1958 | "Epoch 39/300\n", 1959 | "71/71 [==============================] - 22s 308ms/step - loss: 3.1708 - sparse_categorical_accuracy: 0.1494 - val_loss: 3.4886 - val_sparse_categorical_accuracy: 0.1147\n", 1960 | "Epoch 40/300\n", 1961 | "71/71 [==============================] - 22s 307ms/step - loss: 3.1960 - sparse_categorical_accuracy: 0.1556 - val_loss: 3.6675 - val_sparse_categorical_accuracy: 0.1117\n", 1962 | "Epoch 41/300\n", 1963 | "71/71 [==============================] - 22s 308ms/step - loss: 3.1769 - sparse_categorical_accuracy: 0.1476 - val_loss: 3.3898 - val_sparse_categorical_accuracy: 0.1324\n", 1964 | "Epoch 42/300\n", 1965 | "71/71 [==============================] - 22s 308ms/step - loss: 3.1637 - sparse_categorical_accuracy: 0.1720 - val_loss: 3.6598 - val_sparse_categorical_accuracy: 0.1123\n", 1966 | "Epoch 43/300\n", 1967 | "71/71 [==============================] - 22s 307ms/step - loss: 3.1188 - sparse_categorical_accuracy: 0.1649 - val_loss: 3.7410 - val_sparse_categorical_accuracy: 0.1117\n", 1968 | "Epoch 44/300\n", 1969 | "71/71 [==============================] - 22s 307ms/step - loss: 3.1449 - sparse_categorical_accuracy: 0.1574 - val_loss: 3.7459 - val_sparse_categorical_accuracy: 0.1141\n", 1970 | "Epoch 45/300\n", 1971 | "71/71 [==============================] - 22s 307ms/step - loss: 3.1067 - sparse_categorical_accuracy: 0.1587 - val_loss: 4.1176 - val_sparse_categorical_accuracy: 0.0845\n", 1972 | "Epoch 46/300\n", 1973 | "71/71 [==============================] - 22s 307ms/step - loss: 3.1110 - sparse_categorical_accuracy: 0.1605 - val_loss: 3.7363 - val_sparse_categorical_accuracy: 0.1087\n", 1974 | "Epoch 47/300\n", 1975 | "71/71 [==============================] - 22s 308ms/step - loss: 3.0604 - sparse_categorical_accuracy: 0.1596 - val_loss: 3.5147 - val_sparse_categorical_accuracy: 0.1294\n", 1976 | "Epoch 48/300\n", 1977 | "71/71 [==============================] - 22s 307ms/step - loss: 3.0725 - sparse_categorical_accuracy: 0.1720 - val_loss: 3.4113 - val_sparse_categorical_accuracy: 0.1383\n", 1978 | "Epoch 49/300\n", 1979 | "71/71 [==============================] - 22s 307ms/step - loss: 3.0741 - sparse_categorical_accuracy: 0.1715 - val_loss: 3.3805 - val_sparse_categorical_accuracy: 0.1371\n", 1980 | "Epoch 50/300\n", 1981 | "71/71 [==============================] - 22s 307ms/step - loss: 3.0548 - sparse_categorical_accuracy: 0.1831 - val_loss: 3.4663 - val_sparse_categorical_accuracy: 0.1395\n", 1982 | "Epoch 51/300\n", 1983 | "71/71 [==============================] - 22s 307ms/step - loss: 3.0620 - sparse_categorical_accuracy: 0.1786 - val_loss: 3.6021 - val_sparse_categorical_accuracy: 0.1359\n", 1984 | "Epoch 52/300\n", 1985 | "71/71 [==============================] - 22s 308ms/step - loss: 3.0341 - sparse_categorical_accuracy: 0.1707 - val_loss: 3.5400 - val_sparse_categorical_accuracy: 0.1454\n", 1986 | "Epoch 53/300\n", 1987 | "71/71 [==============================] - 22s 308ms/step - loss: 3.0408 - sparse_categorical_accuracy: 0.1773 - val_loss: 3.6476 - val_sparse_categorical_accuracy: 0.1212\n", 1988 | "Epoch 54/300\n", 1989 | "71/71 [==============================] - 22s 307ms/step - loss: 3.0088 - sparse_categorical_accuracy: 0.1835 - val_loss: 3.8170 - val_sparse_categorical_accuracy: 0.1288\n", 1990 | "Epoch 55/300\n", 1991 | "71/71 [==============================] - 22s 307ms/step - loss: 2.9596 - sparse_categorical_accuracy: 0.1986 - val_loss: 3.6509 - val_sparse_categorical_accuracy: 0.1277\n", 1992 | "Epoch 56/300\n", 1993 | "71/71 [==============================] - 22s 307ms/step - loss: 2.9687 - sparse_categorical_accuracy: 0.1871 - val_loss: 3.4933 - val_sparse_categorical_accuracy: 0.1312\n", 1994 | "Epoch 57/300\n", 1995 | "71/71 [==============================] - 22s 307ms/step - loss: 3.0099 - sparse_categorical_accuracy: 0.1950 - val_loss: 3.5811 - val_sparse_categorical_accuracy: 0.1336\n", 1996 | "Epoch 58/300\n", 1997 | "71/71 [==============================] - 22s 308ms/step - loss: 2.9756 - sparse_categorical_accuracy: 0.1946 - val_loss: 3.5744 - val_sparse_categorical_accuracy: 0.1407\n", 1998 | "Epoch 59/300\n", 1999 | "71/71 [==============================] - 22s 308ms/step - loss: 2.9659 - sparse_categorical_accuracy: 0.1910 - val_loss: 3.8492 - val_sparse_categorical_accuracy: 0.1312\n", 2000 | "Epoch 60/300\n", 2001 | "71/71 [==============================] - 22s 307ms/step - loss: 2.9546 - sparse_categorical_accuracy: 0.1986 - val_loss: 3.6462 - val_sparse_categorical_accuracy: 0.1342\n", 2002 | "Epoch 61/300\n", 2003 | "71/71 [==============================] - 22s 308ms/step - loss: 2.9387 - sparse_categorical_accuracy: 0.2057 - val_loss: 3.3727 - val_sparse_categorical_accuracy: 0.1684\n", 2004 | "Epoch 62/300\n", 2005 | "71/71 [==============================] - 22s 308ms/step - loss: 2.9244 - sparse_categorical_accuracy: 0.1933 - val_loss: 3.5693 - val_sparse_categorical_accuracy: 0.1099\n", 2006 | "Epoch 63/300\n", 2007 | "71/71 [==============================] - 22s 308ms/step - loss: 2.9499 - sparse_categorical_accuracy: 0.1950 - val_loss: 3.4940 - val_sparse_categorical_accuracy: 0.1513\n", 2008 | "Epoch 64/300\n", 2009 | "71/71 [==============================] - 22s 307ms/step - loss: 2.8953 - sparse_categorical_accuracy: 0.2008 - val_loss: 3.5402 - val_sparse_categorical_accuracy: 0.1312\n", 2010 | "Epoch 65/300\n", 2011 | "71/71 [==============================] - 22s 307ms/step - loss: 2.8985 - sparse_categorical_accuracy: 0.2079 - val_loss: 3.3525 - val_sparse_categorical_accuracy: 0.1619\n", 2012 | "Epoch 66/300\n", 2013 | "71/71 [==============================] - 22s 308ms/step - loss: 2.8722 - sparse_categorical_accuracy: 0.2123 - val_loss: 3.5927 - val_sparse_categorical_accuracy: 0.1448\n", 2014 | "Epoch 67/300\n", 2015 | "71/71 [==============================] - 22s 307ms/step - loss: 2.8936 - sparse_categorical_accuracy: 0.2172 - val_loss: 3.4085 - val_sparse_categorical_accuracy: 0.1625\n", 2016 | "Epoch 68/300\n", 2017 | "71/71 [==============================] - 22s 308ms/step - loss: 2.8737 - sparse_categorical_accuracy: 0.2035 - val_loss: 4.0690 - val_sparse_categorical_accuracy: 0.1217\n", 2018 | "Epoch 69/300\n", 2019 | "71/71 [==============================] - 22s 308ms/step - loss: 2.8342 - sparse_categorical_accuracy: 0.2230 - val_loss: 3.6521 - val_sparse_categorical_accuracy: 0.1519\n", 2020 | "Epoch 70/300\n", 2021 | "71/71 [==============================] - 22s 308ms/step - loss: 2.7953 - sparse_categorical_accuracy: 0.2305 - val_loss: 3.3418 - val_sparse_categorical_accuracy: 0.1820\n", 2022 | "Epoch 71/300\n", 2023 | "71/71 [==============================] - 22s 307ms/step - loss: 2.8107 - sparse_categorical_accuracy: 0.2482 - val_loss: 3.3238 - val_sparse_categorical_accuracy: 0.1649\n", 2024 | "Epoch 72/300\n", 2025 | "71/71 [==============================] - 22s 307ms/step - loss: 2.8458 - sparse_categorical_accuracy: 0.2332 - val_loss: 3.5640 - val_sparse_categorical_accuracy: 0.1661\n", 2026 | "Epoch 73/300\n", 2027 | "71/71 [==============================] - 22s 307ms/step - loss: 2.8541 - sparse_categorical_accuracy: 0.2252 - val_loss: 3.7177 - val_sparse_categorical_accuracy: 0.1519\n", 2028 | "Epoch 74/300\n", 2029 | "71/71 [==============================] - 22s 308ms/step - loss: 2.7844 - sparse_categorical_accuracy: 0.2340 - val_loss: 3.7733 - val_sparse_categorical_accuracy: 0.1383\n", 2030 | "Epoch 75/300\n", 2031 | "71/71 [==============================] - 22s 307ms/step - loss: 2.7743 - sparse_categorical_accuracy: 0.2349 - val_loss: 3.3696 - val_sparse_categorical_accuracy: 0.1684\n", 2032 | "Epoch 76/300\n", 2033 | "71/71 [==============================] - 23s 323ms/step - loss: 2.7930 - sparse_categorical_accuracy: 0.2363 - val_loss: 3.4344 - val_sparse_categorical_accuracy: 0.1767\n", 2034 | "Epoch 77/300\n", 2035 | "71/71 [==============================] - 22s 306ms/step - loss: 2.7857 - sparse_categorical_accuracy: 0.2274 - val_loss: 3.3522 - val_sparse_categorical_accuracy: 0.1714\n", 2036 | "Epoch 78/300\n", 2037 | "71/71 [==============================] - 22s 308ms/step - loss: 2.7545 - sparse_categorical_accuracy: 0.2420 - val_loss: 3.3605 - val_sparse_categorical_accuracy: 0.1631\n", 2038 | "Epoch 79/300\n", 2039 | "71/71 [==============================] - 22s 308ms/step - loss: 2.7239 - sparse_categorical_accuracy: 0.2504 - val_loss: 3.4101 - val_sparse_categorical_accuracy: 0.1696\n", 2040 | "Epoch 80/300\n", 2041 | "71/71 [==============================] - 22s 308ms/step - loss: 2.7140 - sparse_categorical_accuracy: 0.2456 - val_loss: 3.6859 - val_sparse_categorical_accuracy: 0.1442\n", 2042 | "Epoch 81/300\n", 2043 | "71/71 [==============================] - 22s 307ms/step - loss: 2.7078 - sparse_categorical_accuracy: 0.2478 - val_loss: 3.2753 - val_sparse_categorical_accuracy: 0.1909\n", 2044 | "Epoch 82/300\n", 2045 | "71/71 [==============================] - 22s 307ms/step - loss: 2.6559 - sparse_categorical_accuracy: 0.2593 - val_loss: 3.5518 - val_sparse_categorical_accuracy: 0.1820\n", 2046 | "Epoch 83/300\n", 2047 | "71/71 [==============================] - 23s 322ms/step - loss: 2.6594 - sparse_categorical_accuracy: 0.2677 - val_loss: 4.2011 - val_sparse_categorical_accuracy: 0.1099\n", 2048 | "Epoch 84/300\n", 2049 | "71/71 [==============================] - 22s 307ms/step - loss: 2.7181 - sparse_categorical_accuracy: 0.2482 - val_loss: 3.4587 - val_sparse_categorical_accuracy: 0.1625\n", 2050 | "Epoch 85/300\n", 2051 | "71/71 [==============================] - 22s 307ms/step - loss: 2.6539 - sparse_categorical_accuracy: 0.2699 - val_loss: 3.5047 - val_sparse_categorical_accuracy: 0.1684\n", 2052 | "Epoch 86/300\n", 2053 | "71/71 [==============================] - 23s 322ms/step - loss: 2.6754 - sparse_categorical_accuracy: 0.2624 - val_loss: 3.6456 - val_sparse_categorical_accuracy: 0.1596\n", 2054 | "Epoch 87/300\n", 2055 | "71/71 [==============================] - 22s 307ms/step - loss: 2.6444 - sparse_categorical_accuracy: 0.2624 - val_loss: 3.2898 - val_sparse_categorical_accuracy: 0.1891\n", 2056 | "Epoch 88/300\n", 2057 | "71/71 [==============================] - 22s 307ms/step - loss: 2.6681 - sparse_categorical_accuracy: 0.2699 - val_loss: 3.4222 - val_sparse_categorical_accuracy: 0.1879\n", 2058 | "Epoch 89/300\n", 2059 | "71/71 [==============================] - 22s 308ms/step - loss: 2.6190 - sparse_categorical_accuracy: 0.2704 - val_loss: 3.2880 - val_sparse_categorical_accuracy: 0.1879\n", 2060 | "Epoch 90/300\n", 2061 | "71/71 [==============================] - 22s 307ms/step - loss: 2.6231 - sparse_categorical_accuracy: 0.2713 - val_loss: 3.5953 - val_sparse_categorical_accuracy: 0.1608\n", 2062 | "Epoch 91/300\n", 2063 | "71/71 [==============================] - 22s 309ms/step - loss: 2.6260 - sparse_categorical_accuracy: 0.2633 - val_loss: 3.1735 - val_sparse_categorical_accuracy: 0.2027\n", 2064 | "Epoch 92/300\n", 2065 | "71/71 [==============================] - 22s 306ms/step - loss: 2.5931 - sparse_categorical_accuracy: 0.2850 - val_loss: 3.6664 - val_sparse_categorical_accuracy: 0.1868\n", 2066 | "Epoch 93/300\n", 2067 | "71/71 [==============================] - 22s 308ms/step - loss: 2.5666 - sparse_categorical_accuracy: 0.2934 - val_loss: 3.9269 - val_sparse_categorical_accuracy: 0.1572\n", 2068 | "Epoch 94/300\n", 2069 | "71/71 [==============================] - 22s 306ms/step - loss: 2.5676 - sparse_categorical_accuracy: 0.2855 - val_loss: 3.4037 - val_sparse_categorical_accuracy: 0.1874\n", 2070 | "Epoch 95/300\n", 2071 | "71/71 [==============================] - 22s 307ms/step - loss: 2.5267 - sparse_categorical_accuracy: 0.2974 - val_loss: 3.3667 - val_sparse_categorical_accuracy: 0.1921\n", 2072 | "Epoch 96/300\n", 2073 | "71/71 [==============================] - 22s 307ms/step - loss: 2.5629 - sparse_categorical_accuracy: 0.2815 - val_loss: 3.3811 - val_sparse_categorical_accuracy: 0.2033\n", 2074 | "Epoch 97/300\n", 2075 | "71/71 [==============================] - 22s 308ms/step - loss: 2.5703 - sparse_categorical_accuracy: 0.2775 - val_loss: 4.3641 - val_sparse_categorical_accuracy: 0.1407\n", 2076 | "Epoch 98/300\n", 2077 | "71/71 [==============================] - 22s 308ms/step - loss: 2.5653 - sparse_categorical_accuracy: 0.2863 - val_loss: 3.7637 - val_sparse_categorical_accuracy: 0.1820\n", 2078 | "Epoch 99/300\n", 2079 | "71/71 [==============================] - 22s 307ms/step - loss: 2.4919 - sparse_categorical_accuracy: 0.2961 - val_loss: 3.5381 - val_sparse_categorical_accuracy: 0.1956\n", 2080 | "Epoch 100/300\n", 2081 | "71/71 [==============================] - 22s 308ms/step - loss: 2.4869 - sparse_categorical_accuracy: 0.3067 - val_loss: 3.7825 - val_sparse_categorical_accuracy: 0.1596\n", 2082 | "Epoch 101/300\n", 2083 | "71/71 [==============================] - 22s 308ms/step - loss: 2.5224 - sparse_categorical_accuracy: 0.2908 - val_loss: 3.4249 - val_sparse_categorical_accuracy: 0.1826\n", 2084 | "Epoch 102/300\n", 2085 | "71/71 [==============================] - 22s 308ms/step - loss: 2.4357 - sparse_categorical_accuracy: 0.3010 - val_loss: 3.4763 - val_sparse_categorical_accuracy: 0.2021\n", 2086 | "Epoch 103/300\n", 2087 | "71/71 [==============================] - 22s 307ms/step - loss: 2.4287 - sparse_categorical_accuracy: 0.3134 - val_loss: 3.5915 - val_sparse_categorical_accuracy: 0.1838\n", 2088 | "Epoch 104/300\n", 2089 | "71/71 [==============================] - 22s 308ms/step - loss: 2.4642 - sparse_categorical_accuracy: 0.3103 - val_loss: 3.6273 - val_sparse_categorical_accuracy: 0.1897\n", 2090 | "Epoch 105/300\n", 2091 | "71/71 [==============================] - 22s 307ms/step - loss: 2.4356 - sparse_categorical_accuracy: 0.3156 - val_loss: 3.2782 - val_sparse_categorical_accuracy: 0.2151\n", 2092 | "Epoch 106/300\n", 2093 | "71/71 [==============================] - 22s 308ms/step - loss: 2.4298 - sparse_categorical_accuracy: 0.3147 - val_loss: 3.5830 - val_sparse_categorical_accuracy: 0.1921\n", 2094 | "Epoch 107/300\n", 2095 | "71/71 [==============================] - 22s 307ms/step - loss: 2.3574 - sparse_categorical_accuracy: 0.3214 - val_loss: 3.4212 - val_sparse_categorical_accuracy: 0.2110\n", 2096 | "Epoch 108/300\n", 2097 | "71/71 [==============================] - 22s 307ms/step - loss: 2.4311 - sparse_categorical_accuracy: 0.3178 - val_loss: 3.9655 - val_sparse_categorical_accuracy: 0.1543\n", 2098 | "Epoch 109/300\n", 2099 | "71/71 [==============================] - 22s 307ms/step - loss: 2.3560 - sparse_categorical_accuracy: 0.3324 - val_loss: 4.0234 - val_sparse_categorical_accuracy: 0.1874\n", 2100 | "Epoch 110/300\n", 2101 | "71/71 [==============================] - 22s 307ms/step - loss: 2.2850 - sparse_categorical_accuracy: 0.3488 - val_loss: 3.4923 - val_sparse_categorical_accuracy: 0.1974\n", 2102 | "Epoch 111/300\n", 2103 | "71/71 [==============================] - 22s 308ms/step - loss: 2.3620 - sparse_categorical_accuracy: 0.3196 - val_loss: 4.0235 - val_sparse_categorical_accuracy: 0.1619\n", 2104 | "Epoch 112/300\n", 2105 | "71/71 [==============================] - 22s 307ms/step - loss: 2.2928 - sparse_categorical_accuracy: 0.3373 - val_loss: 3.2872 - val_sparse_categorical_accuracy: 0.1980\n", 2106 | "Epoch 113/300\n", 2107 | "71/71 [==============================] - 22s 307ms/step - loss: 2.2949 - sparse_categorical_accuracy: 0.3453 - val_loss: 3.5569 - val_sparse_categorical_accuracy: 0.2015\n", 2108 | "Epoch 114/300\n", 2109 | "71/71 [==============================] - 22s 308ms/step - loss: 2.2711 - sparse_categorical_accuracy: 0.3648 - val_loss: 3.4658 - val_sparse_categorical_accuracy: 0.2122\n", 2110 | "Epoch 115/300\n", 2111 | "71/71 [==============================] - 22s 308ms/step - loss: 2.1951 - sparse_categorical_accuracy: 0.3657 - val_loss: 3.4993 - val_sparse_categorical_accuracy: 0.2128\n", 2112 | "Epoch 116/300\n", 2113 | "71/71 [==============================] - 22s 308ms/step - loss: 2.2116 - sparse_categorical_accuracy: 0.3506 - val_loss: 3.7455 - val_sparse_categorical_accuracy: 0.2015\n", 2114 | "Epoch 117/300\n", 2115 | "71/71 [==============================] - 23s 322ms/step - loss: 2.2627 - sparse_categorical_accuracy: 0.3630 - val_loss: 3.4933 - val_sparse_categorical_accuracy: 0.2092\n", 2116 | "Epoch 118/300\n", 2117 | "71/71 [==============================] - 23s 323ms/step - loss: 2.1986 - sparse_categorical_accuracy: 0.3670 - val_loss: 3.5735 - val_sparse_categorical_accuracy: 0.2145\n", 2118 | "Epoch 119/300\n", 2119 | "71/71 [==============================] - 22s 310ms/step - loss: 2.1630 - sparse_categorical_accuracy: 0.3816 - val_loss: 3.8103 - val_sparse_categorical_accuracy: 0.1962\n", 2120 | "Epoch 120/300\n", 2121 | "71/71 [==============================] - 22s 310ms/step - loss: 2.1555 - sparse_categorical_accuracy: 0.3834 - val_loss: 3.9379 - val_sparse_categorical_accuracy: 0.2116\n", 2122 | "Epoch 121/300\n", 2123 | "71/71 [==============================] - 22s 308ms/step - loss: 2.0786 - sparse_categorical_accuracy: 0.3945 - val_loss: 4.4570 - val_sparse_categorical_accuracy: 0.1939\n", 2124 | "Epoch 122/300\n", 2125 | "71/71 [==============================] - 22s 309ms/step - loss: 2.1050 - sparse_categorical_accuracy: 0.3967 - val_loss: 3.7601 - val_sparse_categorical_accuracy: 0.1998\n", 2126 | "Epoch 123/300\n", 2127 | "71/71 [==============================] - 22s 309ms/step - loss: 2.1397 - sparse_categorical_accuracy: 0.3914 - val_loss: 3.6435 - val_sparse_categorical_accuracy: 0.2210\n", 2128 | "Epoch 124/300\n", 2129 | "71/71 [==============================] - 22s 308ms/step - loss: 2.1302 - sparse_categorical_accuracy: 0.3874 - val_loss: 3.8100 - val_sparse_categorical_accuracy: 0.1921\n", 2130 | "Epoch 125/300\n", 2131 | "71/71 [==============================] - 22s 310ms/step - loss: 2.0559 - sparse_categorical_accuracy: 0.3963 - val_loss: 3.6734 - val_sparse_categorical_accuracy: 0.2163\n", 2132 | "Epoch 126/300\n", 2133 | "71/71 [==============================] - 22s 308ms/step - loss: 2.0335 - sparse_categorical_accuracy: 0.4136 - val_loss: 3.5595 - val_sparse_categorical_accuracy: 0.2305\n", 2134 | "Epoch 127/300\n", 2135 | "71/71 [==============================] - 22s 309ms/step - loss: 2.0092 - sparse_categorical_accuracy: 0.4176 - val_loss: 3.7933 - val_sparse_categorical_accuracy: 0.1927\n", 2136 | "Epoch 128/300\n", 2137 | "71/71 [==============================] - 22s 308ms/step - loss: 2.0331 - sparse_categorical_accuracy: 0.4176 - val_loss: 3.9397 - val_sparse_categorical_accuracy: 0.2086\n", 2138 | "Epoch 129/300\n", 2139 | "71/71 [==============================] - 22s 308ms/step - loss: 1.9514 - sparse_categorical_accuracy: 0.4300 - val_loss: 4.0182 - val_sparse_categorical_accuracy: 0.2063\n", 2140 | "Epoch 130/300\n", 2141 | "71/71 [==============================] - 22s 309ms/step - loss: 1.9285 - sparse_categorical_accuracy: 0.4402 - val_loss: 4.1122 - val_sparse_categorical_accuracy: 0.1998\n", 2142 | "Epoch 131/300\n", 2143 | "71/71 [==============================] - 22s 308ms/step - loss: 1.9200 - sparse_categorical_accuracy: 0.4459 - val_loss: 4.5235 - val_sparse_categorical_accuracy: 0.1696\n", 2144 | "Epoch 132/300\n", 2145 | "71/71 [==============================] - 22s 307ms/step - loss: 1.8061 - sparse_categorical_accuracy: 0.4694 - val_loss: 4.2841 - val_sparse_categorical_accuracy: 0.1921\n", 2146 | "Epoch 133/300\n", 2147 | "71/71 [==============================] - 22s 308ms/step - loss: 1.8945 - sparse_categorical_accuracy: 0.4583 - val_loss: 3.8858 - val_sparse_categorical_accuracy: 0.2246\n", 2148 | "Epoch 134/300\n", 2149 | "71/71 [==============================] - 22s 308ms/step - loss: 1.8350 - sparse_categorical_accuracy: 0.4641 - val_loss: 4.5790 - val_sparse_categorical_accuracy: 0.1655\n", 2150 | "Epoch 135/300\n", 2151 | "71/71 [==============================] - 22s 310ms/step - loss: 1.8474 - sparse_categorical_accuracy: 0.4659 - val_loss: 3.8186 - val_sparse_categorical_accuracy: 0.2210\n", 2152 | "Epoch 136/300\n", 2153 | "71/71 [==============================] - 22s 308ms/step - loss: 1.8036 - sparse_categorical_accuracy: 0.4730 - val_loss: 3.9353 - val_sparse_categorical_accuracy: 0.2039\n", 2154 | "Epoch 137/300\n", 2155 | "71/71 [==============================] - 22s 308ms/step - loss: 1.7038 - sparse_categorical_accuracy: 0.5004 - val_loss: 4.4350 - val_sparse_categorical_accuracy: 0.1874\n", 2156 | "Epoch 138/300\n", 2157 | "71/71 [==============================] - 22s 309ms/step - loss: 1.8234 - sparse_categorical_accuracy: 0.4632 - val_loss: 4.5829 - val_sparse_categorical_accuracy: 0.1885\n", 2158 | "Epoch 139/300\n", 2159 | "71/71 [==============================] - 22s 309ms/step - loss: 1.7067 - sparse_categorical_accuracy: 0.5013 - val_loss: 3.8322 - val_sparse_categorical_accuracy: 0.2145\n", 2160 | "Epoch 140/300\n", 2161 | "71/71 [==============================] - 22s 308ms/step - loss: 1.7004 - sparse_categorical_accuracy: 0.5106 - val_loss: 4.1037 - val_sparse_categorical_accuracy: 0.1980\n", 2162 | "Epoch 141/300\n", 2163 | "71/71 [==============================] - 22s 309ms/step - loss: 1.7233 - sparse_categorical_accuracy: 0.5058 - val_loss: 3.8477 - val_sparse_categorical_accuracy: 0.2252\n", 2164 | "Epoch 142/300\n", 2165 | "71/71 [==============================] - 22s 308ms/step - loss: 1.5950 - sparse_categorical_accuracy: 0.5328 - val_loss: 4.9668 - val_sparse_categorical_accuracy: 0.1838\n", 2166 | "Epoch 143/300\n", 2167 | "71/71 [==============================] - 22s 308ms/step - loss: 1.5766 - sparse_categorical_accuracy: 0.5341 - val_loss: 4.2591 - val_sparse_categorical_accuracy: 0.2145\n", 2168 | "Epoch 144/300\n", 2169 | "71/71 [==============================] - 22s 309ms/step - loss: 1.5677 - sparse_categorical_accuracy: 0.5297 - val_loss: 4.3978 - val_sparse_categorical_accuracy: 0.2163\n", 2170 | "Epoch 145/300\n", 2171 | "71/71 [==============================] - 22s 309ms/step - loss: 1.5495 - sparse_categorical_accuracy: 0.5248 - val_loss: 4.7838 - val_sparse_categorical_accuracy: 0.2015\n", 2172 | "Epoch 146/300\n", 2173 | "71/71 [==============================] - 22s 309ms/step - loss: 1.4787 - sparse_categorical_accuracy: 0.5545 - val_loss: 4.7021 - val_sparse_categorical_accuracy: 0.2145\n", 2174 | "Epoch 147/300\n", 2175 | "71/71 [==============================] - 22s 310ms/step - loss: 1.4780 - sparse_categorical_accuracy: 0.5634 - val_loss: 4.2973 - val_sparse_categorical_accuracy: 0.2252\n", 2176 | "Epoch 148/300\n", 2177 | "71/71 [==============================] - 22s 308ms/step - loss: 1.4284 - sparse_categorical_accuracy: 0.5754 - val_loss: 4.3958 - val_sparse_categorical_accuracy: 0.2352\n", 2178 | "Epoch 149/300\n", 2179 | "71/71 [==============================] - 22s 310ms/step - loss: 1.3446 - sparse_categorical_accuracy: 0.5922 - val_loss: 5.6285 - val_sparse_categorical_accuracy: 0.1974\n", 2180 | "Epoch 150/300\n", 2181 | "71/71 [==============================] - 22s 308ms/step - loss: 1.5036 - sparse_categorical_accuracy: 0.5532 - val_loss: 4.1659 - val_sparse_categorical_accuracy: 0.2429\n", 2182 | "Epoch 151/300\n", 2183 | "71/71 [==============================] - 22s 308ms/step - loss: 1.3492 - sparse_categorical_accuracy: 0.5949 - val_loss: 4.5681 - val_sparse_categorical_accuracy: 0.2051\n", 2184 | "Epoch 152/300\n", 2185 | "71/71 [==============================] - 22s 308ms/step - loss: 1.3736 - sparse_categorical_accuracy: 0.5864 - val_loss: 4.7519 - val_sparse_categorical_accuracy: 0.2181\n", 2186 | "Epoch 153/300\n", 2187 | "71/71 [==============================] - 22s 307ms/step - loss: 1.3232 - sparse_categorical_accuracy: 0.6020 - val_loss: 4.3704 - val_sparse_categorical_accuracy: 0.2063\n", 2188 | "Epoch 154/300\n", 2189 | "71/71 [==============================] - 22s 308ms/step - loss: 1.2946 - sparse_categorical_accuracy: 0.6192 - val_loss: 5.3466 - val_sparse_categorical_accuracy: 0.2098\n", 2190 | "Epoch 155/300\n", 2191 | "71/71 [==============================] - 22s 310ms/step - loss: 1.2640 - sparse_categorical_accuracy: 0.6113 - val_loss: 4.7126 - val_sparse_categorical_accuracy: 0.2228\n", 2192 | "Epoch 156/300\n", 2193 | "71/71 [==============================] - 22s 308ms/step - loss: 1.2073 - sparse_categorical_accuracy: 0.6485 - val_loss: 4.9604 - val_sparse_categorical_accuracy: 0.2204\n", 2194 | "Epoch 157/300\n", 2195 | "71/71 [==============================] - 22s 309ms/step - loss: 1.1358 - sparse_categorical_accuracy: 0.6543 - val_loss: 5.5710 - val_sparse_categorical_accuracy: 0.2145\n", 2196 | "Epoch 158/300\n", 2197 | "71/71 [==============================] - 22s 309ms/step - loss: 1.1639 - sparse_categorical_accuracy: 0.6352 - val_loss: 5.6296 - val_sparse_categorical_accuracy: 0.2045\n", 2198 | "Epoch 159/300\n", 2199 | "71/71 [==============================] - 22s 309ms/step - loss: 1.1957 - sparse_categorical_accuracy: 0.6396 - val_loss: 4.9912 - val_sparse_categorical_accuracy: 0.2128\n", 2200 | "Epoch 160/300\n", 2201 | "71/71 [==============================] - 22s 308ms/step - loss: 1.0771 - sparse_categorical_accuracy: 0.6698 - val_loss: 5.3881 - val_sparse_categorical_accuracy: 0.1933\n", 2202 | "Epoch 161/300\n", 2203 | "71/71 [==============================] - 22s 308ms/step - loss: 1.0700 - sparse_categorical_accuracy: 0.6777 - val_loss: 5.3703 - val_sparse_categorical_accuracy: 0.2092\n", 2204 | "Epoch 162/300\n", 2205 | "71/71 [==============================] - 22s 309ms/step - loss: 1.0829 - sparse_categorical_accuracy: 0.6786 - val_loss: 4.9954 - val_sparse_categorical_accuracy: 0.2193\n", 2206 | "Epoch 163/300\n", 2207 | "71/71 [==============================] - 22s 310ms/step - loss: 1.0939 - sparse_categorical_accuracy: 0.6782 - val_loss: 5.1679 - val_sparse_categorical_accuracy: 0.2074\n", 2208 | "Epoch 164/300\n", 2209 | "71/71 [==============================] - 22s 306ms/step - loss: 1.0879 - sparse_categorical_accuracy: 0.6733 - val_loss: 5.2026 - val_sparse_categorical_accuracy: 0.2270\n", 2210 | "Epoch 165/300\n", 2211 | "71/71 [==============================] - 22s 307ms/step - loss: 0.9356 - sparse_categorical_accuracy: 0.7114 - val_loss: 5.4145 - val_sparse_categorical_accuracy: 0.1909\n", 2212 | "Epoch 166/300\n", 2213 | "71/71 [==============================] - 22s 307ms/step - loss: 0.9632 - sparse_categorical_accuracy: 0.7021 - val_loss: 5.2643 - val_sparse_categorical_accuracy: 0.1968\n", 2214 | "Epoch 167/300\n", 2215 | "71/71 [==============================] - 22s 308ms/step - loss: 0.8781 - sparse_categorical_accuracy: 0.7371 - val_loss: 5.7516 - val_sparse_categorical_accuracy: 0.2110\n", 2216 | "Epoch 168/300\n", 2217 | "71/71 [==============================] - 22s 307ms/step - loss: 0.8600 - sparse_categorical_accuracy: 0.7438 - val_loss: 5.9095 - val_sparse_categorical_accuracy: 0.2128\n", 2218 | "Epoch 169/300\n", 2219 | "71/71 [==============================] - 22s 308ms/step - loss: 0.9149 - sparse_categorical_accuracy: 0.7199 - val_loss: 6.2048 - val_sparse_categorical_accuracy: 0.2210\n", 2220 | "Epoch 170/300\n", 2221 | "71/71 [==============================] - 22s 307ms/step - loss: 0.8780 - sparse_categorical_accuracy: 0.7398 - val_loss: 5.8846 - val_sparse_categorical_accuracy: 0.2004\n", 2222 | "Epoch 171/300\n", 2223 | " 2/71 [..............................] - ETA: 17s - loss: 0.7076 - sparse_categorical_accuracy: 0.7969" 2224 | ] 2225 | } 2226 | ] 2227 | }, 2228 | { 2229 | "cell_type": "code", 2230 | "metadata": { 2231 | "id": "MI3d6ONirH6h" 2232 | }, 2233 | "source": [ 2234 | "acc = history.history['sparse_categorical_accuracy']\n", 2235 | "val_acc = history.history['val_sparse_categorical_accuracy']\n", 2236 | "epochs = range(len(acc))\n", 2237 | "\n", 2238 | "plt.plot(acc, label='training accuracy')\n", 2239 | "plt.plot(val_acc, label='validation accuracy')\n", 2240 | "plt.title('Wavelet_DTD_Accuracy')\n", 2241 | "plt.xlabel('epochs')\n", 2242 | "plt.ylabel('accuracy')\n", 2243 | "plt.legend()" 2244 | ], 2245 | "execution_count": null, 2246 | "outputs": [] 2247 | }, 2248 | { 2249 | "cell_type": "code", 2250 | "metadata": { 2251 | "id": "FjlFssvfrIwO" 2252 | }, 2253 | "source": [ 2254 | "loss = history.history['loss']\n", 2255 | "val_loss = history.history['val_loss']\n", 2256 | "\n", 2257 | "plt.plot(loss, label='training loss')\n", 2258 | "plt.plot(val_loss, label='validation loss')\n", 2259 | "plt.title('Wavelet_DTD_Loss')\n", 2260 | "plt.xlabel('epochs')\n", 2261 | "plt.ylabel('loss')\n", 2262 | "plt.legend()" 2263 | ], 2264 | "execution_count": null, 2265 | "outputs": [] 2266 | } 2267 | ] 2268 | } --------------------------------------------------------------------------------