├── Data └── ss.md ├── FaceMask-Detection.ipynb ├── README.md ├── cnn_facemask.png ├── demo.gif └── haarcascade_frontalface_default.xml /Data/ss.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /FaceMask-Detection.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import zipfile\n", 11 | "import random\n", 12 | "import shutil\n", 13 | "import tensorflow as tf\n", 14 | "from tensorflow.keras.optimizers import RMSprop\n", 15 | "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n", 16 | "from shutil import copyfile\n", 17 | "from os import getcwd\n", 18 | "from os import listdir\n", 19 | "import cv2\n", 20 | "from tensorflow.keras.layers import Conv2D, Input, ZeroPadding2D, BatchNormalization, Activation, MaxPooling2D, Flatten, Dense\n", 21 | "from tensorflow.keras.models import Model, load_model\n", 22 | "from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint\n", 23 | "from sklearn.model_selection import train_test_split\n", 24 | "from sklearn.metrics import f1_score\n", 25 | "from sklearn.utils import shuffle\n", 26 | "import imutils\n", 27 | "import numpy as np\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "import matplotlib.image as mpimg" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 2, 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "name": "stdout", 39 | "output_type": "stream", 40 | "text": [ 41 | "The number of images with facemask labelled 'yes': 690\n", 42 | "The number of images with facemask labelled 'no': 686\n" 43 | ] 44 | } 45 | ], 46 | "source": [ 47 | "print(\"The number of images with facemask labelled 'yes':\",len(os.listdir('facemask-dataset/yes')))\n", 48 | "print(\"The number of images with facemask labelled 'no':\",len(os.listdir('facemask-dataset/no')))" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 3, 54 | "metadata": {}, 55 | "outputs": [ 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "Number of examples: 2751\n", 61 | "Percentage of positive examples: 50.163576881134134%, number of pos examples: 1380\n", 62 | "Percentage of negative examples: 49.836423118865866%, number of neg examples: 1371\n" 63 | ] 64 | } 65 | ], 66 | "source": [ 67 | "def data_summary(main_path):\n", 68 | " \n", 69 | " yes_path = main_path+'yesreal'\n", 70 | " no_path = main_path+'noreal'\n", 71 | " \n", 72 | " # number of files (images) that are in the the folder named 'yes' that represent tumorous (positive) examples\n", 73 | " m_pos = len(listdir(yes_path))\n", 74 | " # number of files (images) that are in the the folder named 'no' that represent non-tumorous (negative) examples\n", 75 | " m_neg = len(listdir(no_path))\n", 76 | " # number of all examples\n", 77 | " m = (m_pos+m_neg)\n", 78 | " \n", 79 | " pos_prec = (m_pos* 100.0)/ m\n", 80 | " neg_prec = (m_neg* 100.0)/ m\n", 81 | " \n", 82 | " print(f\"Number of examples: {m}\")\n", 83 | " print(f\"Percentage of positive examples: {pos_prec}%, number of pos examples: {m_pos}\") \n", 84 | " print(f\"Percentage of negative examples: {neg_prec}%, number of neg examples: {m_neg}\") \n", 85 | " \n", 86 | "augmented_data_path = 'facemask-dataset/trial1/augmented data1/' \n", 87 | "data_summary(augmented_data_path)" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 4, 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):\n", 97 | " dataset = []\n", 98 | " \n", 99 | " for unitData in os.listdir(SOURCE):\n", 100 | " data = SOURCE + unitData\n", 101 | " if(os.path.getsize(data) > 0):\n", 102 | " dataset.append(unitData)\n", 103 | " else:\n", 104 | " print('Skipped ' + unitData)\n", 105 | " print('Invalid file i.e zero size')\n", 106 | " \n", 107 | " train_set_length = int(len(dataset) * SPLIT_SIZE)\n", 108 | " test_set_length = int(len(dataset) - train_set_length)\n", 109 | " shuffled_set = random.sample(dataset, len(dataset))\n", 110 | " train_set = dataset[0:train_set_length]\n", 111 | " test_set = dataset[-test_set_length:]\n", 112 | " \n", 113 | " for unitData in train_set:\n", 114 | " temp_train_set = SOURCE + unitData\n", 115 | " final_train_set = TRAINING + unitData\n", 116 | " copyfile(temp_train_set, final_train_set)\n", 117 | " \n", 118 | " for unitData in test_set:\n", 119 | " temp_test_set = SOURCE + unitData\n", 120 | " final_test_set = TESTING + unitData\n", 121 | " copyfile(temp_test_set, final_test_set)\n", 122 | " \n", 123 | " \n", 124 | "YES_SOURCE_DIR = \"facemask-dataset/trial1/augmented data1/yesreal/\"\n", 125 | "TRAINING_YES_DIR = \"facemask-dataset/trial1/augmented data1/training/yes1/\"\n", 126 | "TESTING_YES_DIR = \"facemask-dataset/trial1/augmented data1/testing/yes1/\"\n", 127 | "NO_SOURCE_DIR = \"facemask-dataset/trial1/augmented data1/noreal/\"\n", 128 | "TRAINING_NO_DIR = \"facemask-dataset/trial1/augmented data1/training/no1/\"\n", 129 | "TESTING_NO_DIR = \"facemask-dataset/trial1/augmented data1/testing/no1/\"\n", 130 | "split_size = .8\n", 131 | "split_data(YES_SOURCE_DIR, TRAINING_YES_DIR, TESTING_YES_DIR, split_size)\n", 132 | "split_data(NO_SOURCE_DIR, TRAINING_NO_DIR, TESTING_NO_DIR, split_size)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 5, 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "name": "stdout", 142 | "output_type": "stream", 143 | "text": [ 144 | "The number of images with facemask in the training set labelled 'yes': 1104\n", 145 | "The number of images with facemask in the test set labelled 'yes': 276\n", 146 | "The number of images without facemask in the training set labelled 'no': 1096\n", 147 | "The number of images without facemask in the test set labelled 'no': 275\n" 148 | ] 149 | } 150 | ], 151 | "source": [ 152 | "print(\"The number of images with facemask in the training set labelled 'yes':\", len(os.listdir('facemask-dataset/trial1/augmented data1/training/yes1')))\n", 153 | "print(\"The number of images with facemask in the test set labelled 'yes':\", len(os.listdir('facemask-dataset/trial1/augmented data1/testing/yes1')))\n", 154 | "print(\"The number of images without facemask in the training set labelled 'no':\", len(os.listdir('facemask-dataset/trial1/augmented data1/training/no1')))\n", 155 | "print(\"The number of images without facemask in the test set labelled 'no':\", len(os.listdir('facemask-dataset/trial1/augmented data1/testing/no1')))" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 6, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "model = tf.keras.models.Sequential([\n", 165 | " tf.keras.layers.Conv2D(100, (3,3), activation='relu', input_shape=(150, 150, 3)),\n", 166 | " tf.keras.layers.MaxPooling2D(2,2),\n", 167 | " \n", 168 | " tf.keras.layers.Conv2D(100, (3,3), activation='relu'),\n", 169 | " tf.keras.layers.MaxPooling2D(2,2),\n", 170 | " \n", 171 | " tf.keras.layers.Flatten(),\n", 172 | " tf.keras.layers.Dropout(0.5),\n", 173 | " tf.keras.layers.Dense(50, activation='relu'),\n", 174 | " tf.keras.layers.Dense(2, activation='softmax')\n", 175 | "])\n", 176 | "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": 7, 182 | "metadata": {}, 183 | "outputs": [ 184 | { 185 | "name": "stdout", 186 | "output_type": "stream", 187 | "text": [ 188 | "Found 2200 images belonging to 2 classes.\n", 189 | "Found 551 images belonging to 2 classes.\n" 190 | ] 191 | } 192 | ], 193 | "source": [ 194 | "TRAINING_DIR = \"facemask-dataset/trial1/augmented data1/training\"\n", 195 | "train_datagen = ImageDataGenerator(rescale=1.0/255,\n", 196 | " rotation_range=40,\n", 197 | " width_shift_range=0.2,\n", 198 | " height_shift_range=0.2,\n", 199 | " shear_range=0.2,\n", 200 | " zoom_range=0.2,\n", 201 | " horizontal_flip=True,\n", 202 | " fill_mode='nearest')\n", 203 | "\n", 204 | "train_generator = train_datagen.flow_from_directory(TRAINING_DIR, \n", 205 | " batch_size=10, \n", 206 | " target_size=(150, 150))\n", 207 | "VALIDATION_DIR = \"facemask-dataset/trial1/augmented data1/testing\"\n", 208 | "validation_datagen = ImageDataGenerator(rescale=1.0/255)\n", 209 | "\n", 210 | "validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR, \n", 211 | " batch_size=10, \n", 212 | " target_size=(150, 150))\n", 213 | "checkpoint = ModelCheckpoint('model-{epoch:03d}.model',monitor='val_loss',verbose=0,save_best_only=True,mode='auto')" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 13, 219 | "metadata": {}, 220 | "outputs": [ 221 | { 222 | "name": "stdout", 223 | "output_type": "stream", 224 | "text": [ 225 | "WARNING:tensorflow:sample_weight modes were coerced from\n", 226 | " ...\n", 227 | " to \n", 228 | " ['...']\n", 229 | "WARNING:tensorflow:sample_weight modes were coerced from\n", 230 | " ...\n", 231 | " to \n", 232 | " ['...']\n", 233 | "Train for 220 steps, validate for 56 steps\n", 234 | "Epoch 1/30\n", 235 | "220/220 [==============================] - 237s 1s/step - loss: 0.0922 - acc: 0.9682 - val_loss: 0.1251 - val_acc: 0.9546\n", 236 | "Epoch 2/30\n", 237 | "219/220 [============================>.] - ETA: 1s - loss: 0.1030 - acc: 0.9685INFO:tensorflow:Assets written to: model-002.model\\assets\n", 238 | "220/220 [==============================] - 247s 1s/step - loss: 0.1026 - acc: 0.9686 - val_loss: 0.1050 - val_acc: 0.9619\n", 239 | "Epoch 3/30\n", 240 | "220/220 [==============================] - 263s 1s/step - loss: 0.0731 - acc: 0.9786 - val_loss: 0.5988 - val_acc: 0.7550\n", 241 | "Epoch 4/30\n", 242 | "220/220 [==============================] - 231s 1s/step - loss: 0.0733 - acc: 0.9741 - val_loss: 0.1133 - val_acc: 0.9528\n", 243 | "Epoch 5/30\n", 244 | "220/220 [==============================] - 231s 1s/step - loss: 0.1414 - acc: 0.9541 - val_loss: 0.1225 - val_acc: 0.9564\n", 245 | "Epoch 6/30\n", 246 | "220/220 [==============================] - 233s 1s/step - loss: 0.1031 - acc: 0.9645 - val_loss: 0.1323 - val_acc: 0.9583\n", 247 | "Epoch 7/30\n", 248 | "219/220 [============================>.] - ETA: 0s - loss: 0.0958 - acc: 0.9699INFO:tensorflow:Assets written to: model-007.model\\assets\n", 249 | "220/220 [==============================] - 238s 1s/step - loss: 0.0967 - acc: 0.9695 - val_loss: 0.0932 - val_acc: 0.9655\n", 250 | "Epoch 8/30\n", 251 | "220/220 [==============================] - 232s 1s/step - loss: 0.1023 - acc: 0.9655 - val_loss: 0.1128 - val_acc: 0.9564\n", 252 | "Epoch 9/30\n", 253 | "220/220 [==============================] - 234s 1s/step - loss: 0.0886 - acc: 0.9691 - val_loss: 0.0934 - val_acc: 0.9601\n", 254 | "Epoch 10/30\n", 255 | "220/220 [==============================] - 234s 1s/step - loss: 0.0940 - acc: 0.9686 - val_loss: 0.1654 - val_acc: 0.9401\n", 256 | "Epoch 11/30\n", 257 | "219/220 [============================>.] - ETA: 1s - loss: 0.1115 - acc: 0.9680INFO:tensorflow:Assets written to: model-011.model\\assets\n", 258 | "220/220 [==============================] - 238s 1s/step - loss: 0.1111 - acc: 0.9682 - val_loss: 0.0861 - val_acc: 0.9637\n", 259 | "Epoch 12/30\n", 260 | "220/220 [==============================] - 234s 1s/step - loss: 0.0612 - acc: 0.9809 - val_loss: 0.1659 - val_acc: 0.9365\n", 261 | "Epoch 13/30\n", 262 | "220/220 [==============================] - 234s 1s/step - loss: 0.0995 - acc: 0.9709 - val_loss: 0.1097 - val_acc: 0.9583\n", 263 | "Epoch 14/30\n", 264 | "220/220 [==============================] - 233s 1s/step - loss: 0.0572 - acc: 0.9782 - val_loss: 0.1252 - val_acc: 0.9510\n", 265 | "Epoch 15/30\n", 266 | "220/220 [==============================] - 234s 1s/step - loss: 0.1090 - acc: 0.9650 - val_loss: 0.1397 - val_acc: 0.9583\n", 267 | "Epoch 16/30\n", 268 | "220/220 [==============================] - 235s 1s/step - loss: 0.0510 - acc: 0.9809 - val_loss: 0.1068 - val_acc: 0.9528\n", 269 | "Epoch 17/30\n", 270 | "220/220 [==============================] - 232s 1s/step - loss: 0.0508 - acc: 0.9805 - val_loss: 0.1558 - val_acc: 0.9419\n", 271 | "Epoch 18/30\n", 272 | "220/220 [==============================] - 232s 1s/step - loss: 0.0497 - acc: 0.9850 - val_loss: 0.1015 - val_acc: 0.9619\n", 273 | "Epoch 19/30\n", 274 | "220/220 [==============================] - 232s 1s/step - loss: 0.1081 - acc: 0.9677 - val_loss: 0.1414 - val_acc: 0.9365\n", 275 | "Epoch 20/30\n", 276 | "220/220 [==============================] - 233s 1s/step - loss: 0.0913 - acc: 0.9686 - val_loss: 0.1166 - val_acc: 0.9528\n", 277 | "Epoch 21/30\n", 278 | "220/220 [==============================] - 236s 1s/step - loss: 0.0708 - acc: 0.9755 - val_loss: 0.1102 - val_acc: 0.9601\n", 279 | "Epoch 22/30\n", 280 | "220/220 [==============================] - 232s 1s/step - loss: 0.0564 - acc: 0.9782 - val_loss: 0.1580 - val_acc: 0.9419\n", 281 | "Epoch 23/30\n", 282 | "220/220 [==============================] - 233s 1s/step - loss: 0.0493 - acc: 0.9823 - val_loss: 0.1048 - val_acc: 0.9637\n", 283 | "Epoch 24/30\n", 284 | "220/220 [==============================] - 231s 1s/step - loss: 0.0620 - acc: 0.9809 - val_loss: 0.1151 - val_acc: 0.9528\n", 285 | "Epoch 25/30\n", 286 | "220/220 [==============================] - 233s 1s/step - loss: 0.0828 - acc: 0.9768 - val_loss: 0.1617 - val_acc: 0.9437\n", 287 | "Epoch 26/30\n", 288 | "220/220 [==============================] - 233s 1s/step - loss: 0.0438 - acc: 0.9886 - val_loss: 0.2205 - val_acc: 0.9292\n", 289 | "Epoch 27/30\n", 290 | "220/220 [==============================] - 287s 1s/step - loss: 0.0608 - acc: 0.9805 - val_loss: 0.1159 - val_acc: 0.9528\n", 291 | "Epoch 28/30\n", 292 | "220/220 [==============================] - 276s 1s/step - loss: 0.0536 - acc: 0.9850 - val_loss: 0.1131 - val_acc: 0.9601\n", 293 | "Epoch 29/30\n", 294 | "220/220 [==============================] - 257s 1s/step - loss: 0.0495 - acc: 0.9882 - val_loss: 0.0949 - val_acc: 0.9655\n", 295 | "Epoch 30/30\n", 296 | "220/220 [==============================] - 231s 1s/step - loss: 0.0368 - acc: 0.9886 - val_loss: 0.1072 - val_acc: 0.9619\n" 297 | ] 298 | } 299 | ], 300 | "source": [ 301 | "history = model.fit_generator(train_generator,\n", 302 | " epochs=30,\n", 303 | " validation_data=validation_generator,\n", 304 | " callbacks=[checkpoint])" 305 | ] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "execution_count": 14, 310 | "metadata": {}, 311 | "outputs": [], 312 | "source": [ 313 | "face_clsfr=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 16, 319 | "metadata": {}, 320 | "outputs": [], 321 | "source": [ 322 | "labels_dict={0:'without_mask',1:'with_mask'}\n", 323 | "color_dict={0:(0,0,255),1:(0,255,0)}\n", 324 | "\n", 325 | "size = 4\n", 326 | "webcam = cv2.VideoCapture(0) #Use camera 0\n", 327 | "\n", 328 | "# We load the xml file\n", 329 | "classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n", 330 | "\n", 331 | "while True:\n", 332 | " (rval, im) = webcam.read()\n", 333 | " im=cv2.flip(im,1,1) #Flip to act as a mirror\n", 334 | "\n", 335 | " # Resize the image to speed up detection\n", 336 | " mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size))\n", 337 | "\n", 338 | " # detect MultiScale / faces \n", 339 | " faces = classifier.detectMultiScale(mini)\n", 340 | "\n", 341 | " # Draw rectangles around each face\n", 342 | " for f in faces:\n", 343 | " (x, y, w, h) = [v * size for v in f] #Scale the shapesize backup\n", 344 | " #Save just the rectangle faces in SubRecFaces\n", 345 | " face_img = im[y:y+h, x:x+w]\n", 346 | " resized=cv2.resize(face_img,(150,150))\n", 347 | " normalized=resized/255.0\n", 348 | " reshaped=np.reshape(normalized,(1,150,150,3))\n", 349 | " reshaped = np.vstack([reshaped])\n", 350 | " result=model.predict(reshaped)\n", 351 | " #print(result)\n", 352 | " \n", 353 | " label=np.argmax(result,axis=1)[0]\n", 354 | " \n", 355 | " cv2.rectangle(im,(x,y),(x+w,y+h),color_dict[label],2)\n", 356 | " cv2.rectangle(im,(x,y-40),(x+w,y),color_dict[label],-1)\n", 357 | " cv2.putText(im, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)\n", 358 | " \n", 359 | " # Show the image\n", 360 | " cv2.imshow('LIVE', im)\n", 361 | " key = cv2.waitKey(10)\n", 362 | " # if Esc key is press then break out of the loop \n", 363 | " if key == 27: #The Esc key\n", 364 | " break\n", 365 | "# Stop video\n", 366 | "webcam.release()\n", 367 | "\n", 368 | "# Close all started windows\n", 369 | "cv2.destroyAllWindows()" 370 | ] 371 | } 372 | ], 373 | "metadata": { 374 | "kernelspec": { 375 | "display_name": "Python 3", 376 | "language": "python", 377 | "name": "python3" 378 | }, 379 | "language_info": { 380 | "codemirror_mode": { 381 | "name": "ipython", 382 | "version": 3 383 | }, 384 | "file_extension": ".py", 385 | "mimetype": "text/x-python", 386 | "name": "python", 387 | "nbconvert_exporter": "python", 388 | "pygments_lexer": "ipython3", 389 | "version": "3.7.7" 390 | } 391 | }, 392 | "nbformat": 4, 393 | "nbformat_minor": 4 394 | } 395 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Face-Mask-Detection 2 | In order to protect ourselves from the COVID-19 Pandemic, almost every one of us tend to wear a face mask. It becomes increasingly necessary to check if the people in the crowd wear face masks in most public gatherings such as Malls, Theatres, Parks. The development of an AI solution to detect if the person is wearing a face mask and allow their entry would be of great help to the society. In this, a simple Face Mask detection system is built using the Deep Learning technique called as Convolutional Neural Networks (CNN). This CNN Model is built using the TensorFlow framework and the OpenCV library which is highly used for real-time applications. This model can also be used to develop a full-fledged software to scan every person before they can enter the public gathering. Using this model, an accuracy of over 96% is obtained. This can also be used further to achieve even higher levels of accuracy. 3 | 4 | Medium Article - [Click Here!](https://towardsdatascience.com/covid-19-face-mask-detection-using-tensorflow-and-opencv-702dd833515b) 5 | 6 | ### Data - 7 | I have used the face mask dataset provided by [Prajna Bhandary](https://www.linkedin.com/feed/update/urn%3Ali%3Aactivity%3A6655711815361761280/) available at [Github](https://github.com/prajnasb/observations/tree/master/experiements/data) 8 | 9 | ### CNN Architecture - 10 | In this proposed method, the Face Mask detection model is built using the Sequential API of the keras library. This allows us to create the new layers for our model step by step. The various layers used for our CNN model is described below. 11 | 12 | The first layer is the Conv2D layer with 100 filters and the filter size or the kernel size of 3X3. In this first step, the activation function used is the ‘ReLu’. This ReLu function stands for Rectified Linear Unit which will output the input directly if is positive, otherwise, it will output zero. The input size is also initialized as 150X150X3 for all the images to be trained and tested using this model 13 | 14 | In the second layer, the MaxPooling2D is used with the pool size of 2X2. 15 | 16 | The next layer is again a Conv2D layer with another 100 filters of the same filter size 3X3 and the activation function used is the ‘ReLu’. This Conv2D layer is followed by a MaxPooling3=2D layer with pool size 2X2. 17 | 18 | In the next step, we use the Flatten() layer to flatten all the layers into a single 1D layer. 19 | 20 | After the Flatten layer, we use the Dropout (0.5) layer to prevent the model from overfitting. 21 | 22 | Finally, towards the end, we use the Dense layer with 50 units and the activation function as ‘ReLu’. 23 | 24 | The last layer of our model will be another Dense Layer, with only two units and the activation function used will be the ‘Softmax’ function. The softmax function outputs a vector which will represent the probability distributions of each of the input units. Here, two input units are used. The softmax function will output a vector with two probability distribution values. 25 | 26 | ![alt text](https://github.com/mk-gurucharan/Face-Mask-Detection/blob/master/cnn_facemask.png) 27 | 28 | After building the model, we compile the model and define the loss function and optimizer function. In this model, we use the ‘Adam’ Optimizer and the ‘Binary Cross Entropy’ as the Loss function for training purpose. 29 | 30 | For the face detection, the Haar Feature-based Cascade Classifiers are used in this experiment. It is is a machine learning object detection algorithm used to identify objects in an image or video and based on the concept of features proposed by Paul Viola and Michael Jones. In this, a cascade function is trained from a lot of positive and negative images. It is then used to detect objects in other images. 31 | 32 | The cascade classifier used for this experiment is the Face Detection Cascade Classifier. In this, a model is pre-trained with frontal facial features is developed and used in this experiment to detect the faces in real-time. 33 | 34 | Finally, the CNN model along with the cascade classifier is trained for 30 epochs with two classes, one denoting the class of images with the face masks and the other without face masks. 35 | 36 | ### Demo - 37 | ![](demo.gif) 38 | -------------------------------------------------------------------------------- /cnn_facemask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gurucharan-marthi/Face-Mask-Detection/7b28abb2ab69269a39f086d9101e552997c2daee/cnn_facemask.png -------------------------------------------------------------------------------- /demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gurucharan-marthi/Face-Mask-Detection/7b28abb2ab69269a39f086d9101e552997c2daee/demo.gif --------------------------------------------------------------------------------