├── MEDIA ├── Demo-Draw.gif ├── demo-device.gif └── Upload-Via-Link.gif ├── Requirements.txt ├── README.md ├── app.py └── HandwrittenDigitRecognition.ipynb /MEDIA/Demo-Draw.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/srijarkoroy/Handwritten-Digit-Recognition/HEAD/MEDIA/Demo-Draw.gif -------------------------------------------------------------------------------- /MEDIA/demo-device.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/srijarkoroy/Handwritten-Digit-Recognition/HEAD/MEDIA/demo-device.gif -------------------------------------------------------------------------------- /MEDIA/Upload-Via-Link.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/srijarkoroy/Handwritten-Digit-Recognition/HEAD/MEDIA/Upload-Via-Link.gif -------------------------------------------------------------------------------- /Requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | opencv-python 3 | tensorflow 4 | keras 5 | streamlit 6 | streamlit-drawable-canvas==0.1.1 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Handwritten Digit Recognition 2 | 3 | Here's a web-based application built to allow an user to upload an image of a handwritten digit or draw them in the app itself and get a prediction by the model as to what he/she has entered. The MNIST dataset from sklearn has been to train the neural network model having an input layer, two hidden layers and an output layer with ten units, for 10 digits (0-9). 4 | 5 | # Steps for using the Web App 6 | - Setting up the Python Environment with the dependencies: 7 | 8 | pip install -r Requirements.txt 9 | 10 | - Cloning the Repository: 11 | 12 | git clone https://github.com/srijarkoroy/HandwrittenDigitRecognitionWebApp.git 13 | - Entering The directory: 14 | 15 | cd HandwrittenDigitRecognitionWebApp 16 | - Running the Web App: 17 | 18 | streamlit run app.py 19 | - Stopping the web app from the terminal: 20 | 21 | Ctrl+C 22 | 23 | # Demonstration 24 |
25 | 26 | Here's a Demo as to how the Web App may be used effectively: 27 |
28 | 29 | ## Uploading Image from Device 30 | ![](MEDIA/demo-device.gif) 31 |
32 | 33 | ## Uploading Image via Link 34 | ![](MEDIA/Upload-Via-Link.gif) 35 |
36 | 37 | ## Drawing the Image within the Web App 38 | ![](MEDIA/Demo-Draw.gif) 39 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import cv2 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | import tensorflow as tf 6 | from PIL import Image 7 | import urllib.request 8 | from streamlit_drawable_canvas import st_canvas 9 | import time 10 | 11 | html_temp = ''' 12 |
13 |

Handwritten Digit Recognition

14 | 15 |
16 | ''' 17 | st.markdown(html_temp, unsafe_allow_html=True) 18 | html_temp = ''' 19 |
20 |

21 |

Please upload Image for Classification

22 |
23 | ''' 24 | st.set_option('deprecation.showfileUploaderEncoding', False) 25 | st.markdown(html_temp, unsafe_allow_html=True) 26 | opt = st.selectbox("How do you want to upload the image for classification?\n", ('Please Select', 'Upload image via link', 'Upload image from device', 'Draw the Digit!')) 27 | if opt == 'Upload image from device': 28 | file = st.file_uploader('Select', type = ['jpg', 'png', 'jpeg']) 29 | st.set_option('deprecation.showfileUploaderEncoding', False) 30 | if file is not None: 31 | image = Image.open(file) 32 | 33 | elif opt == 'Upload image via link': 34 | 35 | try: 36 | img = st.text_input('Enter the Image Address') 37 | image = Image.open(urllib.request.urlopen(img)) 38 | 39 | except: 40 | if st.button('Submit'): 41 | show = st.error("Please Enter a valid Image Address!") 42 | time.sleep(4) 43 | show.empty() 44 | 45 | elif opt == 'Draw the Digit!': 46 | b_width = st.slider('Brush Width: ',1,50,10) 47 | drawing_mode = st.checkbox("Draw",True) 48 | image_data = st_canvas(b_width, '#000', '#EEE', height=200,width=300, drawing_mode=drawing_mode, key="canvas") 49 | try: 50 | cv2.imwrite("test.jpg",image_data) 51 | image = Image.open("test.jpg") 52 | except: 53 | pass 54 | try: 55 | if image is not None: 56 | st.image(image, width = 300, caption = 'Uploaded Image') 57 | if st.button('Predict'): 58 | mnist=tf.keras.datasets.mnist #importing the dataset form mnist 59 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 60 | #normalizing the data 61 | x_train = tf.keras.utils.normalize(x_train, axis=1) 62 | x_test = tf.keras.utils.normalize(x_test, axis=1) 63 | #defining the model with an input layer, two hidden layers and an output layer 64 | model=tf.keras.models.Sequential([ 65 | tf.keras.layers.Flatten(input_shape=(28,28)), 66 | tf.keras.layers.Dense(units=128,activation=tf.nn.relu), 67 | tf.keras.layers.Dense(units=128,activation=tf.nn.relu), 68 | tf.keras.layers.Dense(units=10,activation=tf.nn.softmax) 69 | ]) 70 | 71 | model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy']) 72 | model.fit(x_train,y_train,epochs=5) 73 | loss,accuracy=model.evaluate(x_test,y_test) 74 | model.save('digits.model') 75 | image = np.array(image.resize((28, 28), Image.ANTIALIAS)) 76 | image = np.array(image, dtype='uint8' ) 77 | image = image[:,:,0] 78 | image = np.invert(np.array([image])) 79 | prediction = model.predict(image) 80 | st.success('Hey! The uploaded digit has been predicted as {} with Accuracy {:.3f}'.format(np.argmax(prediction),accuracy)) 81 | 82 | except: 83 | pass 84 | -------------------------------------------------------------------------------- /HandwrittenDigitRecognition.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "HandwrittenDigitRecognition.ipynb", 7 | "provenance": [], 8 | "toc_visible": true 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | } 14 | }, 15 | "cells": [ 16 | { 17 | "cell_type": "markdown", 18 | "metadata": { 19 | "id": "UYDNUpVh56EU" 20 | }, 21 | "source": [ 22 | "# **Importing required libraries**" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "metadata": { 28 | "id": "Y4EG8Fwe57rj" 29 | }, 30 | "source": [ 31 | "import numpy as np\n", 32 | "import matplotlib.pyplot as plt\n", 33 | "import tensorflow as tf" 34 | ], 35 | "execution_count": 2, 36 | "outputs": [] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": { 41 | "id": "YVycPHyf6ov5" 42 | }, 43 | "source": [ 44 | "# **Importing the MNIST Dataset for Handwritten Digit Recognition**" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "metadata": { 50 | "id": "1RHMH6-G62GU" 51 | }, 52 | "source": [ 53 | "mnist=tf.keras.datasets.mnist" 54 | ], 55 | "execution_count": 4, 56 | "outputs": [] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": { 61 | "id": "_HyxMbJM6__P" 62 | }, 63 | "source": [ 64 | "# **Splitting the data into Train and Test Set and normalizing them**" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "metadata": { 70 | "id": "ZcZ_WRbN7QqR", 71 | "outputId": "2bec8a8c-beed-42fb-ae1e-289f2701c7c7", 72 | "colab": { 73 | "base_uri": "https://localhost:8080/", 74 | "height": 51 75 | } 76 | }, 77 | "source": [ 78 | "(x_train, y_train), (x_test, y_test) = mnist.load_data() \n", 79 | "x_train = tf.keras.utils.normalize(x_train, axis=1)\n", 80 | "x_test = tf.keras.utils.normalize(x_test, axis=1)" 81 | ], 82 | "execution_count": 5, 83 | "outputs": [ 84 | { 85 | "output_type": "stream", 86 | "text": [ 87 | "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n", 88 | "11493376/11490434 [==============================] - 0s 0us/step\n" 89 | ], 90 | "name": "stdout" 91 | } 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": { 97 | "id": "v2zrmv6B7f6J" 98 | }, 99 | "source": [ 100 | "# **Defining the neural network with an input layer, two hidden layers and an output layer**" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "metadata": { 106 | "id": "Kj8aji0K7qYt" 107 | }, 108 | "source": [ 109 | "model=tf.keras.models.Sequential([\n", 110 | " tf.keras.layers.Flatten(input_shape=(28,28)), \n", 111 | " tf.keras.layers.Dense(units=128,activation=tf.nn.relu), \n", 112 | " tf.keras.layers.Dense(units=128,activation=tf.nn.relu), \n", 113 | " tf.keras.layers.Dense(units=10,activation=tf.nn.softmax)\n", 114 | " ])\n", 115 | ], 116 | "execution_count": 14, 117 | "outputs": [] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": { 122 | "id": "shDB_KxJ749A" 123 | }, 124 | "source": [ 125 | "# **Training the Model**" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "metadata": { 131 | "id": "wYoEyeJA7-Cq", 132 | "outputId": "0d4a8693-11cf-4dc1-c42d-40055e4de1a8", 133 | "colab": { 134 | "base_uri": "https://localhost:8080/", 135 | "height": 204 136 | } 137 | }, 138 | "source": [ 139 | "model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])\n", 140 | "model.fit(x_train,y_train,epochs=5)" 141 | ], 142 | "execution_count": 15, 143 | "outputs": [ 144 | { 145 | "output_type": "stream", 146 | "text": [ 147 | "Epoch 1/5\n", 148 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.2665 - accuracy: 0.9212\n", 149 | "Epoch 2/5\n", 150 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.1075 - accuracy: 0.9665\n", 151 | "Epoch 3/5\n", 152 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.0731 - accuracy: 0.9771\n", 153 | "Epoch 4/5\n", 154 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.0547 - accuracy: 0.9831\n", 155 | "Epoch 5/5\n", 156 | "1875/1875 [==============================] - 4s 2ms/step - loss: 0.0401 - accuracy: 0.9870\n" 157 | ], 158 | "name": "stdout" 159 | }, 160 | { 161 | "output_type": "execute_result", 162 | "data": { 163 | "text/plain": [ 164 | "" 165 | ] 166 | }, 167 | "metadata": { 168 | "tags": [] 169 | }, 170 | "execution_count": 15 171 | } 172 | ] 173 | }, 174 | { 175 | "cell_type": "markdown", 176 | "metadata": { 177 | "id": "Tb3zzR7c8ZgW" 178 | }, 179 | "source": [ 180 | "# **Testing our model on x_test and calculating performance metrics on the Test Set**" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "metadata": { 186 | "id": "A4g_nw1F89sK", 187 | "outputId": "f123fd32-8c1c-4a39-fc4a-cfae04e98bd0", 188 | "colab": { 189 | "base_uri": "https://localhost:8080/", 190 | "height": 238 191 | } 192 | }, 193 | "source": [ 194 | "y_pred = model.predict(x_test)\n", 195 | "y_pred" 196 | ], 197 | "execution_count": 16, 198 | "outputs": [ 199 | { 200 | "output_type": "execute_result", 201 | "data": { 202 | "text/plain": [ 203 | "array([[1.4795325e-10, 1.7486268e-10, 1.0466767e-07, ..., 9.9999988e-01,\n", 204 | " 4.2716729e-11, 3.2030443e-09],\n", 205 | " [6.0774934e-11, 9.3260474e-05, 9.9990654e-01, ..., 5.4021698e-10,\n", 206 | " 9.9329514e-09, 8.1218495e-13],\n", 207 | " [4.3179924e-10, 9.9999273e-01, 1.0487961e-06, ..., 4.7345516e-06,\n", 208 | " 6.8220920e-08, 4.5402391e-08],\n", 209 | " ...,\n", 210 | " [3.2844753e-13, 3.8346002e-08, 1.1769129e-10, ..., 2.6874857e-06,\n", 211 | " 6.5114385e-08, 8.7992747e-05],\n", 212 | " [5.0371515e-08, 9.0370180e-05, 5.3776166e-09, ..., 1.4873401e-06,\n", 213 | " 1.4615327e-02, 5.1012472e-07],\n", 214 | " [4.2338417e-09, 4.8646456e-08, 5.5530806e-11, ..., 7.1753242e-13,\n", 215 | " 2.5254001e-09, 1.1791937e-10]], dtype=float32)" 216 | ] 217 | }, 218 | "metadata": { 219 | "tags": [] 220 | }, 221 | "execution_count": 16 222 | } 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "metadata": { 228 | "id": "wdOkdZK68gcv", 229 | "outputId": "59c14a3f-3f8c-42dd-e46d-8f4066394a32", 230 | "colab": { 231 | "base_uri": "https://localhost:8080/", 232 | "height": 34 233 | } 234 | }, 235 | "source": [ 236 | "loss,accuracy=model.evaluate(x_test,y_test)" 237 | ], 238 | "execution_count": 17, 239 | "outputs": [ 240 | { 241 | "output_type": "stream", 242 | "text": [ 243 | "313/313 [==============================] - 0s 1ms/step - loss: 0.0882 - accuracy: 0.9756\n" 244 | ], 245 | "name": "stdout" 246 | } 247 | ] 248 | } 249 | ] 250 | } 251 | --------------------------------------------------------------------------------