├── README.md
└── Pain_Detection.ipynb
/README.md:
--------------------------------------------------------------------------------
1 | # Pain-Detection-From-Facial-Expressions
2 |
3 | * The aim of this project is to build an automatic pain detection system based on facial expressions, which can be used in smart hospitals.
4 | * The [Dataset](https://sites.pitt.edu/~emotion/um-spread.htm) used in this project has been gathered by the University of Pittsburgh, Carnegie Mellon University, the University of North British Columbia and McMaster University, for research purposes only.
5 | * The data preprocessing was done using the OpenCV library and the pain detection model was built and trained using PyTorch.
6 | * The main findings of this project have been published on an article on the Journal of Software Engineering and Applications (JSEA): [Karamitsos, I. , Seladji, I. and Modak, S. (2021) A Modified CNN Network for Automatic Pain Identification Using Facial Expressions. Journal of Software Engineering and Applications, 14, 400-417. doi: 10.4236/jsea.2021.148024](https://www.scirp.org/journal/paperinformation.aspx?paperid=111428)
7 |
--------------------------------------------------------------------------------
/Pain_Detection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "kernelspec": {
6 | "display_name": "Python 3",
7 | "language": "python",
8 | "name": "python3"
9 | },
10 | "language_info": {
11 | "codemirror_mode": {
12 | "name": "ipython",
13 | "version": 3
14 | },
15 | "file_extension": ".py",
16 | "mimetype": "text/x-python",
17 | "name": "python",
18 | "nbconvert_exporter": "python",
19 | "pygments_lexer": "ipython3",
20 | "version": "3.7.10"
21 | },
22 | "colab": {
23 | "name": "Pain Detection.ipynb",
24 | "provenance": [],
25 | "include_colab_link": true
26 | }
27 | },
28 | "cells": [
29 | {
30 | "cell_type": "markdown",
31 | "metadata": {
32 | "id": "view-in-github",
33 | "colab_type": "text"
34 | },
35 | "source": [
36 | "
"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {
42 | "id": "vWhxlD7E7a92"
43 | },
44 | "source": [
45 | "#
Pain Detection Through Facial Expressions - Ilham Seladji, MS. Data Analytics"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "metadata": {
51 | "scrolled": true,
52 | "id": "TONrOKcB7a96"
53 | },
54 | "source": [
55 | "#Load Libraries\n",
56 | "import cv2\n",
57 | "import os\n",
58 | "import numpy as np\n",
59 | "import matplotlib.pyplot as plt\n",
60 | "import torch\n",
61 | "import torchvision\n",
62 | "from torchvision import datasets, transforms\n",
63 | "import torch.nn as nn\n",
64 | "import torch.nn.functional as F\n",
65 | "import torch.optim as optim\n",
66 | "import tkinter as tk\n",
67 | "import re\n",
68 | "from tkinter import filedialog\n",
69 | "from sklearn.metrics import confusion_matrix\n",
70 | "import seaborn as sn\n",
71 | "import pandas as pd\n",
72 | "import RegscorePy as rp"
73 | ],
74 | "execution_count": null,
75 | "outputs": []
76 | },
77 | {
78 | "cell_type": "code",
79 | "metadata": {
80 | "id": "ZKvLUEGo7a98"
81 | },
82 | "source": [
83 | "# Load the OpenCV face cascade\n",
84 | "face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n",
85 | "kernel = np.ones((5,5),np.float32)/25\n",
86 | "def preprocess_img(img, img_size):\n",
87 | " #Face Detection & Cropping\n",
88 | " faces = face_cascade.detectMultiScale(img, 1.1, 4)\n",
89 | " # Grayscale conversion\n",
90 | " img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
91 | " # Histogram equalization\n",
92 | " img = cv2.equalizeHist(img)\n",
93 | " for (x, y, w, h) in faces: \n",
94 | " img_ = img[y:y+h, x:x+w]\n",
95 | " try:\n",
96 | " #Resizing/Normalizing\n",
97 | " img_ = cv2.resize(img_,(img_size, img_size))\n",
98 | " #Mean Filtering\n",
99 | " img_ = cv2.filter2D(img_,-1,kernel)\n",
100 | " except Exception as e:\n",
101 | " #print(\"Exception: \"+str(e))\n",
102 | " return img\n",
103 | " return img_"
104 | ],
105 | "execution_count": null,
106 | "outputs": []
107 | },
108 | {
109 | "cell_type": "code",
110 | "metadata": {
111 | "id": "br4X7kWo7a99"
112 | },
113 | "source": [
114 | "def preprocess(class_path, img_size):\n",
115 | " for filename in os.listdir(class_path):\n",
116 | " #img = cv2.imread(os.path.join(class_path,filename), cv2.IMREAD_GRAYSCALE)\n",
117 | " img = cv2.imread(os.path.join(class_path,filename))\n",
118 | " img = preprocess_img(img, img_size)\n",
119 | " cv2.imwrite(os.path.join(\"Processed \"+class_path,filename), img)"
120 | ],
121 | "execution_count": null,
122 | "outputs": []
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "metadata": {
127 | "id": "TG3ub5w57a99"
128 | },
129 | "source": [
130 | "# Preprocessing:"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "metadata": {
136 | "scrolled": true,
137 | "id": "KJ_-D7F57a99"
138 | },
139 | "source": [
140 | "preprocess(\"Binary Classified Pain Images/0\", 224)\n",
141 | "preprocess(\"Binary Classified Pain Images/1\", 224)"
142 | ],
143 | "execution_count": null,
144 | "outputs": []
145 | },
146 | {
147 | "cell_type": "markdown",
148 | "metadata": {
149 | "id": "zRMbjowX7a9-"
150 | },
151 | "source": [
152 | "## Print Sample"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "metadata": {
158 | "id": "OdYje6u67a9-"
159 | },
160 | "source": [
161 | "for filename in os.listdir(\"Processed Binary Classified Pain Images/0\"):\n",
162 | " #Conversion to grayscale\n",
163 | " img = cv2.imread(os.path.join(\"Processed Binary Classified Pain Images/0\",filename), cv2.IMREAD_GRAYSCALE)\n",
164 | " tran = transforms.ToTensor()\n",
165 | " img = tran(img)\n",
166 | " break"
167 | ],
168 | "execution_count": null,
169 | "outputs": []
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {
174 | "id": "f8D_6BOI7a9_"
175 | },
176 | "source": [
177 | "# Importing Data"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "metadata": {
183 | "scrolled": true,
184 | "id": "6I01J09B7a-A"
185 | },
186 | "source": [
187 | "data_transform = transforms.Compose(\n",
188 | " [transforms.ToTensor(),\n",
189 | " transforms.Normalize((0.5,), (0.5,)),\n",
190 | " transforms.Grayscale(num_output_channels=1)])\n",
191 | "trainset = datasets.ImageFolder(root=\"train\", transform=data_transform)\n",
192 | "trainloader = torch.utils.data.DataLoader(trainset, batch_size = 10, shuffle = True)"
193 | ],
194 | "execution_count": null,
195 | "outputs": []
196 | },
197 | {
198 | "cell_type": "code",
199 | "metadata": {
200 | "id": "YFdM7iEd7a-A"
201 | },
202 | "source": [
203 | "testset = datasets.ImageFolder(root=\"test\", transform=data_transform)\n",
204 | "testloader = torch.utils.data.DataLoader(testset)"
205 | ],
206 | "execution_count": null,
207 | "outputs": []
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "metadata": {
212 | "id": "EdRsS8XP7a-B"
213 | },
214 | "source": [
215 | "# Processing:\n",
216 | "\n",
217 | "### CNN Architecture:"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "metadata": {
223 | "id": "fLJVL1l47a-B"
224 | },
225 | "source": [
226 | "num_epochs = 20\n",
227 | "batch_size = 10\n",
228 | "learning_rate = 0.0001\n",
229 | "class NN(nn.Module):\n",
230 | " def __init__(self):\n",
231 | " super(NN, self).__init__()\n",
232 | " self.layer1 = nn.Sequential(\n",
233 | " nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),\n",
234 | " nn.ReLU(),\n",
235 | " nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\n",
236 | " nn.ReLU(),\n",
237 | " nn.MaxPool2d(kernel_size=2, stride=2))\n",
238 | " self.layer2 = nn.Sequential(\n",
239 | " nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),\n",
240 | " nn.ReLU(),\n",
241 | " nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),\n",
242 | " nn.ReLU(),\n",
243 | " nn.MaxPool2d(kernel_size=2, stride=2))\n",
244 | " self.layer3 = nn.Sequential(\n",
245 | " nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),\n",
246 | " nn.ReLU(),\n",
247 | " nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n",
248 | " nn.ReLU(),\n",
249 | " nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n",
250 | " nn.ReLU(),\n",
251 | " nn.MaxPool2d(kernel_size=2, stride=2))\n",
252 | " self.layer4 = nn.Sequential(\n",
253 | " nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),\n",
254 | " nn.ReLU(),\n",
255 | " nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n",
256 | " nn.ReLU(),\n",
257 | " nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n",
258 | " nn.ReLU(),\n",
259 | " nn.MaxPool2d(kernel_size=2, stride=2))\n",
260 | " self.layer5 = nn.Sequential(\n",
261 | " nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n",
262 | " nn.ReLU(),\n",
263 | " nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n",
264 | " nn.ReLU(),\n",
265 | " nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n",
266 | " nn.ReLU(),\n",
267 | " nn.MaxPool2d(kernel_size=2, stride=2))\n",
268 | " self.drop_out = nn.Dropout()\n",
269 | " self.fc1 = nn.Linear(7 * 7 * 512, 1000)\n",
270 | " self.fc2 = nn.Linear(1000, 2)\n",
271 | " def forward(self, x):\n",
272 | " out = self.layer1(x)\n",
273 | " out = self.layer2(out)\n",
274 | " out = self.layer3(out)\n",
275 | " out = self.layer4(out)\n",
276 | " out = self.layer5(out)\n",
277 | " out = out.reshape(out.size(0), -1)\n",
278 | " out = self.drop_out(out)\n",
279 | " out = F.relu(self.fc1(out))\n",
280 | " out = self.fc2(out)\n",
281 | " return out"
282 | ],
283 | "execution_count": null,
284 | "outputs": []
285 | },
286 | {
287 | "cell_type": "markdown",
288 | "metadata": {
289 | "id": "PtzuVPET7a-B"
290 | },
291 | "source": [
292 | "### Computing Loss:"
293 | ]
294 | },
295 | {
296 | "cell_type": "code",
297 | "metadata": {
298 | "id": "Ojo5VJch7a-C"
299 | },
300 | "source": [
301 | "model = NN()\n",
302 | "criterion = nn.CrossEntropyLoss()\n",
303 | "optimizer = optim.Adam(model.parameters(), lr=learning_rate)"
304 | ],
305 | "execution_count": null,
306 | "outputs": []
307 | },
308 | {
309 | "cell_type": "markdown",
310 | "metadata": {
311 | "id": "-JIDxXYb7a-D"
312 | },
313 | "source": [
314 | "### Train Network:"
315 | ]
316 | },
317 | {
318 | "cell_type": "code",
319 | "metadata": {
320 | "id": "GCFPdr397a-D"
321 | },
322 | "source": [
323 | "total_step = len(trainloader)\n",
324 | "loss_list = []\n",
325 | "acc_list = []\n",
326 | "for epoch in range(num_epochs):\n",
327 | " for i, (images, labels) in enumerate(trainloader):\n",
328 | " outputs = model(images)\n",
329 | " loss = criterion(outputs, labels)\n",
330 | " optimizer.zero_grad()\n",
331 | " loss.backward()\n",
332 | " optimizer.step()\n",
333 | " total = labels.size(0)\n",
334 | " _, predicted = torch.max(outputs.data, 1)\n",
335 | " correct = (predicted == labels).sum().item()\n",
336 | " print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),(correct / total) * 100))\n",
337 | " loss_list.append(loss.item())\n",
338 | " acc_list.append(correct / total)"
339 | ],
340 | "execution_count": null,
341 | "outputs": []
342 | },
343 | {
344 | "cell_type": "code",
345 | "metadata": {
346 | "id": "2IYTKsuc7a-E"
347 | },
348 | "source": [
349 | "pd.DataFrame(acc_list).to_excel('accuracies.xlsx')\n",
350 | "pd.DataFrame(loss_list).to_excel('losses.xlsx')"
351 | ],
352 | "execution_count": null,
353 | "outputs": []
354 | },
355 | {
356 | "cell_type": "markdown",
357 | "metadata": {
358 | "id": "dkqrOc_d7a-E"
359 | },
360 | "source": [
361 | "### Save Trained Model"
362 | ]
363 | },
364 | {
365 | "cell_type": "code",
366 | "metadata": {
367 | "id": "1sl_1dWQ7a-E"
368 | },
369 | "source": [
370 | "torch.save(model.state_dict(), \"./pain_detection_nn.pth\")"
371 | ],
372 | "execution_count": null,
373 | "outputs": []
374 | },
375 | {
376 | "cell_type": "markdown",
377 | "metadata": {
378 | "id": "gg8n9Mn57a-E"
379 | },
380 | "source": [
381 | "### Test Network Performance"
382 | ]
383 | },
384 | {
385 | "cell_type": "code",
386 | "metadata": {
387 | "id": "ReqMDyb_7a-E",
388 | "outputId": "9cfa213e-e12e-4fe6-b4fc-4da7a7d9da7b"
389 | },
390 | "source": [
391 | "#load Model\n",
392 | "model = NN()\n",
393 | "model.load_state_dict(torch.load(\"pain_detection_nn.pth\"))"
394 | ],
395 | "execution_count": null,
396 | "outputs": [
397 | {
398 | "data": {
399 | "text/plain": [
400 | ""
401 | ]
402 | },
403 | "execution_count": 16,
404 | "metadata": {},
405 | "output_type": "execute_result"
406 | }
407 | ]
408 | },
409 | {
410 | "cell_type": "code",
411 | "metadata": {
412 | "id": "CDUZ0DFj7a-F",
413 | "outputId": "8783111b-8b7a-4a74-af9b-b7341cb5b3bb"
414 | },
415 | "source": [
416 | "model.eval()\n",
417 | "actual_outputs=[]\n",
418 | "predicted_outputs=[]\n",
419 | "with torch.no_grad():\n",
420 | " correct = 0\n",
421 | " total = 0\n",
422 | " for images, labels in testloader:\n",
423 | " actual_outputs.append(labels.item())\n",
424 | " outputs = model(images)\n",
425 | " _, predicted = torch.max(outputs.data, 1)\n",
426 | " predicted_outputs.append(predicted.item())\n",
427 | " total += labels.size(0)\n",
428 | " correct += (predicted == labels).sum().item()\n",
429 | "accuracy = (correct / total) * 100\n",
430 | "print('The test accuracy is: {} %'.format(accuracy))"
431 | ],
432 | "execution_count": null,
433 | "outputs": [
434 | {
435 | "name": "stdout",
436 | "output_type": "stream",
437 | "text": [
438 | "The test accuracy is: 92.25 %\n"
439 | ]
440 | }
441 | ]
442 | },
443 | {
444 | "cell_type": "code",
445 | "metadata": {
446 | "id": "_KbqQi9W7a-F"
447 | },
448 | "source": [
449 | "cm = confusion_matrix(actual_outputs, predicted_outputs)\n",
450 | "df_cm = pd.DataFrame(cm, index = [\"No Pain\", \"Pain\"],\n",
451 | " columns = [\"No Pain\", \"Pain\"])\n",
452 | "plt.figure(figsize = (10,7))\n",
453 | "sn.heatmap(df_cm, annot=True)"
454 | ],
455 | "execution_count": null,
456 | "outputs": []
457 | },
458 | {
459 | "cell_type": "markdown",
460 | "metadata": {
461 | "id": "FDjEAiXg7a-F"
462 | },
463 | "source": [
464 | "### Test Network with new images"
465 | ]
466 | },
467 | {
468 | "cell_type": "code",
469 | "metadata": {
470 | "scrolled": false,
471 | "id": "gCOedGjA7a-F"
472 | },
473 | "source": [
474 | "#Open Image\n",
475 | "root = tk.Tk()\n",
476 | "root.withdraw()\n",
477 | "path = filedialog.askopenfilename()\n",
478 | "new_img = cv2.imread(path)\n",
479 | "processed_img = preprocess_img(new_img, 224)\n",
480 | "plt.imshow(cv2.cvtColor(processed_img, cv2.COLOR_BGR2RGB))"
481 | ],
482 | "execution_count": null,
483 | "outputs": []
484 | },
485 | {
486 | "cell_type": "code",
487 | "metadata": {
488 | "scrolled": true,
489 | "id": "It5KVG0w7a-G"
490 | },
491 | "source": [
492 | "#Preprocess Image\n",
493 | "tran = transforms.Compose(\n",
494 | " [transforms.ToTensor(),\n",
495 | " transforms.Normalize((0.5,), (0.5,))])\n",
496 | "processed_img = tran(processed_img)"
497 | ],
498 | "execution_count": null,
499 | "outputs": []
500 | },
501 | {
502 | "cell_type": "code",
503 | "metadata": {
504 | "id": "TbTYjC6T7a-G"
505 | },
506 | "source": [
507 | "#Assess Pain\n",
508 | "output = model(processed_img[None,...])"
509 | ],
510 | "execution_count": null,
511 | "outputs": []
512 | },
513 | {
514 | "cell_type": "code",
515 | "metadata": {
516 | "scrolled": true,
517 | "id": "XVfOLQfM7a-G"
518 | },
519 | "source": [
520 | "value, index = torch.max(output, 1)\n",
521 | "if index==0:\n",
522 | " print(\"No Pain\")\n",
523 | "else:\n",
524 | " print(\"Pain\")\n"
525 | ],
526 | "execution_count": null,
527 | "outputs": []
528 | }
529 | ]
530 | }
--------------------------------------------------------------------------------