250 | {!this.state.hasAnalyzeData ? (
251 | this.getFinalData()
252 | ) : (
253 |
254 |
255 |
256 |
257 |
258 | {user.name}님의 최종감정입니다.
259 |
260 |
261 |
262 |
263 |
264 |
277 | {this.state.signalData.map((entry, index) => (
278 | |
282 | ))}
283 |
284 |
285 |
286 |
287 |
294 |
307 | {this.state.signalData.map((entry, index) => (
308 | |
312 | ))}
313 |
314 |
315 |
316 |
317 |
318 |
330 | {this.state.signalData.map((entry, index) => (
331 | |
335 | ))}
336 |
337 |
338 |
339 |
340 |
341 |
342 | {' '}
343 |
344 | Face
345 |
346 |
347 |
348 | {' '}
349 |
350 | Face&EEG
351 |
352 |
353 |
354 | {' '}
355 |
356 | EEG
357 |
358 |
359 |
360 |
361 |
362 | )}
363 |
364 | );
365 | }
366 | }
367 |
368 | export default withRouter(Result);
369 |
--------------------------------------------------------------------------------
/src/back-end/FBI/api/views.py:
--------------------------------------------------------------------------------
1 | from .models import *
2 | from django.db.models import Count
3 | from django.utils import timezone
4 | from django.urls import reverse
5 | from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
6 | from rest_framework import status
7 | from rest_framework.decorators import api_view
8 | from rest_framework.response import Response
9 | from rest_framework.views import APIView
10 | from . import serializers
11 | from .customLogin import *
12 | import random, os, pickle, sys, shutil
13 | from PIL import Image
14 |
15 | sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(
16 | os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))))))
17 | # from src.analyze.face.predict_face_emotion_faceapi import predict_emotion
18 | from src.analyzeModule import detectEmotion
19 |
20 | ROOT_DIR = os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(
21 | os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))))))))
22 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
23 | # Directory path for saving real-time data.
24 | dirPath = os.path.join(ROOT_DIR, 'FBI-data')
25 | dataDirPath = ''
26 | dateDirPath = ''
27 | # Path for saving userFace images.
28 | path = os.path.join(BASE_DIR, 'media')
29 | # Temporarily save encoded image of new user for signup.
30 | encodedImage = []
31 | # Dict for saving accumulated real time data.
32 | resultsDic = {}
33 |
34 | @api_view(['POST'])
35 | def signup(request):
36 | serializer = serializers.UserSerializer(data=request.data)
37 | if serializer.is_valid():
38 | # Save new user to db.
39 | newUser = User.objects.create_user(username=serializer.data['username'],
40 | userFace='default')
41 | newUser.save()
42 | # Update userFace file name.
43 | newUser.userFace = request.FILES['userFace']
44 | newUser.save()
45 | payload = {
46 | 'id': newUser.id,
47 | 'username': newUser.username,
48 | }
49 | # Save encoded image of user.
50 | current_dir = os.getcwd()
51 | userInfo = [(newUser.id, newUser.username), encodedImage[0]]
52 | del encodedImage[0]
53 | if 'encoded_users' not in os.listdir(current_dir):
54 | with open('encoded_users', "wb") as fw:
55 | pickle.dump(userInfo, fw)
56 | fw.close()
57 | else:
58 | with open('encoded_users', "ab") as fi:
59 | pickle.dump(userInfo, fi)
60 | fi.close()
61 | request.session['id'] = newUser.id
62 | # Create data directory for saving real-time data.
63 | # Path : capstone-2020-2/FBI-data
64 | if not os.path.isdir(dirPath):
65 | os.mkdir(dirPath)
66 | # Create subdirectory for user.
67 | global dataDirPath
68 | dataDirPath = os.path.join(dirPath, newUser.userFace.name.split("/")[1].split(".")[0])
69 | if not os.path.isdir(dataDirPath):
70 | os.mkdir(dataDirPath)
71 | return JsonResponse(payload)
72 | else:
73 | return Response(serializer.errors)
74 |
75 | @api_view(['POST'])
76 | def login(request):
77 | img = Image.open(request.FILES['userFace'])
78 | imgPath = os.path.join(BASE_DIR, 'temp.jpg')
79 | img.save(imgPath, 'JPEG')
80 |
81 | try:
82 | img = face_recognition.load_image_file(imgPath)
83 | login_face_encoding = face_recognition.face_encodings(img, num_jitters=10, model="large")[0]
84 | os.remove(imgPath)
85 | except IndexError:
86 | return HttpResponse("Please take another picture.", status=status.HTTP_409_CONFLICT)
87 |
88 | current_dir = os.getcwd()
89 | if 'encoded_users' not in os.listdir(current_dir):
90 | encodedImage.append(login_face_encoding)
91 | return HttpResponse("First user.", status=status.HTTP_406_NOT_ACCEPTABLE)
92 |
93 | encodeUsers = []
94 | with open('encoded_users', 'rb') as fr:
95 | while True:
96 | try:
97 | encodeUsers.append(pickle.load(fr))
98 | except EOFError:
99 | break
100 |
101 | user = isUser(login_face_encoding, encodeUsers)
102 | if user is None:
103 | encodedImage.append(login_face_encoding)
104 | return HttpResponse("Please sign up first", status=status.HTTP_404_NOT_FOUND)
105 | else:
106 | request.session['id'] = user[0]
107 | # Set user data directory path.
108 | global dataDirPath
109 | dataDirPath = os.path.join(dirPath, '{}_{}'.format(user[1], user[0]))
110 | payload = {
111 | 'id': user[0],
112 | 'username': user[1],
113 | }
114 | return JsonResponse(payload)
115 |
116 | @api_view(['POST'])
117 | def logout(request):
118 | try:
119 | request.session.flush()
120 | except KeyError:
121 | pass
122 | global dataDirPath
123 | dataDirPath = ''
124 | return HttpResponse("You're logged out.")
125 |
126 | class getAnalyzingVideo(APIView):
127 | def get(self, request, id, emotionTag):
128 | viewedVideos = request.session.get('viewedVideos', {})
129 | if emotionTag not in viewedVideos:
130 | viewedVideos[emotionTag] = []
131 | videoObjects = Video.objects.filter(tag=emotionTag)
132 | numOfVideos = videoObjects.aggregate(count=Count('videoId')).get('count')
133 | videos = videoObjects.values_list('videoId', flat=True)
134 | if numOfVideos == 0:
135 | return HttpResponse("No videos.")
136 | if len(viewedVideos[emotionTag]) == numOfVideos:
137 | return HttpResponse("You've seen every video.", status=status.HTTP_404_NOT_FOUND)
138 | while True:
139 | randId = random.sample(list(videos), 1)[0]
140 | if randId in viewedVideos[emotionTag]:
141 | continue
142 | video = Video.objects.filter(pk=randId).first()
143 | if video:
144 | viewedVideos[emotionTag].append(randId)
145 | request.session['viewedVideos'] = viewedVideos
146 | request.session.modified = True
147 | # Create subdirectory for played videos.
148 | videoInfo = '{}_{}'.format(video.title, video.videoId)
149 | global dataDirPath
150 | videoDirPath = os.path.join(dataDirPath, videoInfo)
151 | if not os.path.isdir(videoDirPath):
152 | os.makedirs(videoDirPath)
153 | # Create directories based on the datetime the video was played
154 | # since each video might be played multiple times.
155 | global dateDirPath
156 | now = timezone.localtime()
157 | dateDirPath = os.path.join(videoDirPath, now.strftime('%Y-%m-%d %H:%M:%S'))
158 | os.mkdir(dateDirPath)
159 | # Create directories separately for face, eeg data.
160 | os.mkdir(os.path.join(dateDirPath, 'face'))
161 | os.mkdir(os.path.join(dateDirPath, 'eeg'))
162 | # Save result
163 | result = Result.objects.create(user=User.objects.filter(pk=id).first(),
164 | video=Video.objects.filter(pk=randId).first(),
165 | viewedDate=now,
166 | dataPath=dateDirPath)
167 | request.session['resultId'] = result.resultId
168 | return JsonResponse({
169 | 'user' : id,
170 | 'link' : video.link,
171 | 'id' : video.videoId,
172 | 'startTime' : video.startTime,
173 | 'duration' : video.duration,
174 | 'tag' : video.tag,
175 | 'dateDirPath': dateDirPath,
176 | })
177 | else:
178 | continue
179 | def post(self, request, id, emotionTag):
180 | return HttpResponseRedirect(reverse('realTimeResult'))
181 |
182 | @api_view(['POST'])
183 | def realTimeAnalyze(request):
184 | img = Image.open(request.FILES['image'])
185 | # Set image path and eeg path.
186 | imgName = request.data['image'].name
187 | eegName = 'test_signal.txt'
188 | #imgPath = os.path.join(request.data['dateDirPath'], 'face', imgName)
189 | global dateDirPath
190 | imgPath = os.path.join(os.path.join(dateDirPath, 'face'), imgName)
191 | eegTempPath = os.path.join(dirPath, eegName)
192 | # Save image to corresponding dir path.
193 | img.save(imgPath, "JPEG")
194 |
195 | emotionTag = request.data['videoTag']
196 | if(emotionTag =="happy"):
197 | emotionTag="happiness"
198 | elif(emotionTag =="sad"):
199 | emotionTag="sadness"
200 | highestEmotion, multiResult, faceResult, eegResult, sensorStatus = detectEmotion(imgPath, eegTempPath, emotionTag)
201 | # Accumulate results.
202 | global resultsDic
203 | emotions = ["happiness", "sadness", "disgust", "fear", "neutral"]
204 | for emotion in emotions:
205 | if emotion not in resultsDic:
206 | resultsDic[emotion] = [0, 0, 0]
207 | resultsDic[emotion][0] += faceResult[emotion]
208 | resultsDic[emotion][1] += eegResult[emotion]
209 | resultsDic[emotion][2] += multiResult[emotion]
210 |
211 | payload = {
212 | 'emotionTag': highestEmotion,
213 | 'emotionValues': multiResult,
214 | 'faceValues': faceResult,
215 | 'eegValues': eegResult,
216 | 'eegConnections' : {
217 | "eeg1": int(sensorStatus[0]),
218 | "eeg2" : int(sensorStatus[1]),
219 | "eeg3" : int(sensorStatus[2]),
220 | "eeg4" : int(sensorStatus[3]),
221 | "eeg5" : int(sensorStatus[4]),
222 | "eeg6" : int(sensorStatus[5]),
223 | "eeg7" : int(sensorStatus[6]),
224 | "eeg8" : int(sensorStatus[7]),
225 | }
226 | }
227 | return JsonResponse(payload)
228 |
229 | @api_view(['GET'])
230 | def finalResult(request):
231 | # Create text to send signal to save accumulated EEG signal text.
232 | file = open(os.path.join(dirPath, "save.txt"), "w")
233 | file.write("Save accumulated EEG signals")
234 | file.close()
235 | # Save final result to DB.
236 | global resultsDic
237 | resultId = request.session.get('resultId')
238 | result = Result.objects.filter(pk=resultId).first()
239 | result.happiness = resultsDic['happiness'][2]
240 | result.sadness = resultsDic['sadness'][2]
241 | result.disgust = resultsDic['disgust'][2]
242 | result.fear = resultsDic['fear'][2]
243 | result.neutral = resultsDic['neutral'][2]
244 | result.save()
245 | payload = {
246 | "faceResult" : {
247 | 'happiness' : resultsDic['happiness'][0],
248 | 'sadness' : resultsDic['sadness'][0],
249 | 'disgust' : resultsDic['disgust'][0],
250 | 'fear' : resultsDic['fear'][0],
251 | 'neutral' : resultsDic['neutral'][0],
252 | },
253 | "eegResult" : {
254 | 'happiness' : resultsDic['happiness'][1],
255 | 'sadness' : resultsDic['sadness'][1],
256 | 'disgust' : resultsDic['disgust'][1],
257 | 'fear' : resultsDic['fear'][1],
258 | 'neutral' : resultsDic['neutral'][1],
259 | },
260 | "multiResult" : {
261 | 'happiness' : resultsDic['happiness'][2],
262 | 'sadness' : resultsDic['sadness'][2],
263 | 'disgust' : resultsDic['disgust'][2],
264 | 'fear' : resultsDic['fear'][2],
265 | 'neutral' : resultsDic['neutral'][2],
266 | }
267 | }
268 | resultsDic = {}
269 | # Save accumulated EEG signal.
270 | filePath = os.path.join(dirPath, "all_signal.txt")
271 | destination = os.path.join(dateDirPath, "eeg")
272 | while True:
273 | if os.path.isfile(filePath):
274 | dest = shutil.move(filePath, destination)
275 | break
276 | return JsonResponse(payload)
--------------------------------------------------------------------------------
/src/analyze/face/face_emotion.py:
--------------------------------------------------------------------------------
1 | # import
2 | import os
3 | import copy
4 | import numpy as np
5 | import cv2
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 | import torchvision
10 | import torchvision.transforms as transforms
11 | from torch.utils.data import TensorDataset, DataLoader, Dataset
12 | import pandas as pd
13 | from sklearn.model_selection import train_test_split
14 | from sklearn.metrics import classification_report
15 | from sklearn.metrics import plot_confusion_matrix
16 | # import seaborn as sns
17 | # import matplotlib.pyplot as plt
18 | from pandas import DataFrame
19 |
20 |
21 |
22 |
23 | dataset_path = "dataset/FER2013/fer2013/fer2013.csv"
24 | image_size = (48,48)
25 |
26 | # 하이퍼 파라미터 설정
27 | batch_size = 64
28 | num_epochs = 100
29 | # num_epochs = 10
30 | input_shape = (1, 48, 48)
31 | validation_split = 0.2
32 | verbose = 1
33 | num_classes = 7
34 | patience = 50
35 |
36 | learning_rate = 0.001
37 | l2_regularization = 0.01
38 |
39 |
40 |
41 | class MyDataset(Dataset):
42 | def __init__(self, df_data, transform=None):
43 | super().__init__()
44 | pixels = df_data['pixels'].tolist()
45 | width, height = 48, 48
46 | faces = []
47 | for pixel_sequence in pixels:
48 | face = [int(pixel) for pixel in pixel_sequence.split(' ')]
49 | face = np.asarray(face).reshape(1, width, height)
50 | # face = cv2.resize(face.astype('uint8'), image_size)
51 | faces.append(face.astype('float32'))
52 |
53 | self.faces = np.asarray(faces)
54 | # self.faces = np.expand_dims(faces, -1)
55 | # self.emotions = pd.get_dummies(df_data['emotion']).values
56 | self.emotions = df_data['emotion'].values
57 | self.transform = transform
58 |
59 | def __len__(self):
60 | return self.emotions.shape[0]
61 |
62 | def __getitem__(self, index):
63 | return self.faces[index], self.emotions[index]
64 |
65 |
66 |
67 | class FaceEmotion(nn.Module):
68 | def __init__(self, num_classes=num_classes):
69 | super(FaceEmotion, self).__init__()
70 |
71 | # base block
72 | self.base_block = nn.Sequential(
73 | nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1, bias=False),
74 | nn.BatchNorm2d(8),
75 | nn.ReLU(inplace=True),
76 | nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3, stride=1, padding=1, bias=False),
77 | nn.BatchNorm2d(8),
78 | nn.ReLU(inplace=True))
79 |
80 | # Residual black 1 - shortcut
81 | self.shortcut_1 = nn.Sequential(
82 | nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False),
83 | nn.BatchNorm2d(16))
84 |
85 | # Residual block 1
86 | self.residual_block_1 = nn.Sequential(
87 | nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False),
88 | nn.BatchNorm2d(16),
89 | nn.ReLU(inplace=True),
90 |
91 | nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False),
92 | nn.BatchNorm2d(16),
93 |
94 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
95 | )
96 |
97 | # Residual black 2 - shortcut
98 | self.shortcut_2 = nn.Sequential(
99 | nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False),
100 | nn.BatchNorm2d(32))
101 |
102 | # Residual block 2
103 | self.residual_block_2 = nn.Sequential(
104 | nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False),
105 | nn.BatchNorm2d(32),
106 | nn.ReLU(inplace=True),
107 |
108 | nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False),
109 | nn.BatchNorm2d(32),
110 |
111 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
112 | )
113 |
114 | # Residual black 3 - shortcut
115 | self.shortcut_3 = nn.Sequential(
116 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
117 | nn.BatchNorm2d(64))
118 |
119 | # Residual block 3
120 | self.residual_block_3 = nn.Sequential(
121 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
122 | nn.BatchNorm2d(64),
123 | nn.ReLU(inplace=True),
124 |
125 | nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
126 | nn.BatchNorm2d(64),
127 |
128 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
129 | )
130 |
131 | # Residual black 4 - shortcut
132 | self.shortcut_4 = nn.Sequential(
133 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False),
134 | nn.BatchNorm2d(128))
135 |
136 | # Residual block 4
137 | self.residual_block_4 = nn.Sequential(
138 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1, bias=False),
139 | nn.BatchNorm2d(128),
140 | nn.ReLU(inplace=True),
141 |
142 | nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False),
143 | nn.BatchNorm2d(128),
144 |
145 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
146 | )
147 |
148 | # 아웃 채널은 클래스의 수 7가지...
149 | self.last_block = nn.Sequential(
150 | nn.Conv2d(in_channels=128, out_channels=num_classes, kernel_size=3),
151 | nn.AdaptiveAvgPool2d((1, 1)) # 인자로 주는 값은 결과물의 H x W : 전역평균
152 | )
153 |
154 | def forward(self, x):
155 | x = self.base_block(x)
156 | x = self.residual_block_1(x) + self.shortcut_1(x)
157 | x = self.residual_block_2(x) + self.shortcut_2(x)
158 | x = self.residual_block_3(x) + self.shortcut_3(x)
159 | x = self.residual_block_4(x) + self.shortcut_4(x)
160 |
161 | x = self.last_block(x)
162 |
163 | # 벡터로 펴준다.
164 | x = x.view(x.size(0), -1)
165 |
166 | # output = F.softmax(x, dim=0)
167 |
168 | return x
169 |
170 | def predict(self, x):
171 | x = self.forward(x)
172 | output = F.softmax(x, dim=1)
173 |
174 | return output
175 |
176 |
177 |
178 | def loading_dataset():
179 | # Dataset 불러오기
180 | print("Dataset Loading...")
181 | trans_train = transforms.Compose([transforms.ToTensor(),
182 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
183 | transforms.RandomRotation(10),
184 | transforms.RandomHorizontalFlip(),
185 | ])
186 | trans_val = transforms.Compose([transforms.ToTensor(),
187 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
188 | ])
189 |
190 | data = pd.read_csv(dataset_path)
191 | print(f"Dataset size : {len(data)}")
192 | train, test = train_test_split(data, test_size=0.2, shuffle=True)
193 | train, val = train_test_split(train, test_size=validation_split, shuffle=False)
194 | print(f"train : {len(train)} + val {len(val)} / test : {len(test)}")
195 |
196 | dataset_train = MyDataset(df_data=train, transform=trans_train)
197 | dataset_val = MyDataset(df_data=val, transform=trans_val)
198 | dataset_test = MyDataset(df_data=test,transform=None)
199 |
200 | loader_train = DataLoader(dataset=dataset_train, batch_size=batch_size, shuffle=True, num_workers=0)
201 | loader_val = DataLoader(dataset=dataset_val, batch_size=batch_size, shuffle=True, num_workers=0)
202 | loader_test = DataLoader(dataset=dataset_test, batch_size=batch_size, shuffle=True, num_workers=0)
203 |
204 | return loader_train, loader_val, loader_test
205 |
206 |
207 |
208 | def training_model():
209 | # GPU 설정
210 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
211 | # GPU 설정 확인
212 | print("GPU : ", torch.cuda.is_available())
213 |
214 | # model 객체 생성
215 | model = FaceEmotion()
216 | print("Network 생성")
217 | model.cuda()
218 |
219 | criterion = nn.CrossEntropyLoss()
220 | # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
221 | optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
222 |
223 | best_acc = 0
224 | best_model = model
225 |
226 | for epoch in range(0, num_epochs):
227 | # Training
228 | model.train(True)
229 | running_loss = 0.0
230 | running_corrects = 0
231 | running_total = 0
232 | total_step = len(loader_train)
233 | for i, (images, labels) in enumerate(loader_train):
234 | images = images.to(device)
235 | labels = labels.to(device)
236 | outputs = model(images)
237 |
238 | y_true = labels.cpu().numpy()
239 | _, predicted = torch.max(outputs.data, 1)
240 | # predicted = predicted.cpu().numpy()
241 |
242 | optimizer.zero_grad()
243 | loss = criterion(outputs, labels)
244 | loss.backward()
245 | optimizer.step()
246 |
247 | running_loss += loss.item()
248 | # running_corrects += torch.sum(predicted == labels.data)
249 | # running_corrects += (predicted == labels).sum().item()
250 | running_corrects += sum(1 for a, b in zip(predicted, labels) if a == b)
251 | running_total += len(y_true)
252 |
253 | # Traing Epoch 한 번 끝.
254 | epoch_loss = running_loss / len(loader_train)
255 | epoch_acc = 100 * running_corrects / running_total
256 |
257 | # if (i%100) == 0:
258 | # print(outputs[0])
259 | # print(f"labels shape : {labels.shape}, outputs shape : {outputs.shape}")
260 |
261 | print(f"Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{total_step}], Train Accuracy : {epoch_acc}, Train Loss : {loss.item()}")
262 |
263 | # validation
264 | model.train(False)
265 | running_loss = 0.0
266 | running_corrects = 0
267 | running_total = 0
268 | total_step = len(loader_val)
269 | for i, (images, labels) in enumerate(loader_val):
270 | images = images.to(device)
271 | labels = labels.to(device)
272 | outputs = model(images)
273 |
274 | y_true = labels.cpu().numpy()
275 | _, predicted = torch.max(outputs.data, 1)
276 | predicted = predicted.cpu().numpy()
277 |
278 |
279 | loss = criterion(outputs, labels)
280 |
281 | running_loss += loss.item()
282 | # running_corrects += torch.sum(predicted == labels.data)
283 | # running_corrects += (predicted == labels).sum().item()
284 | running_corrects += sum(1 for a, b in zip(predicted, labels) if a == b)
285 | running_total += len(y_true)
286 |
287 | # Validation Epoch 한 번 끝
288 | # 평균 loss, accuracy 구하기
289 | epoch_loss = running_loss / len(loader_val)
290 | epoch_acc = 100 * running_corrects / running_total
291 |
292 | print(f"Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{total_step}], Val accuracy : {epoch_acc}")
293 | # 가장 성능이 좋았던 epoch 에서의 모델을 저장.
294 | if epoch_acc > best_acc:
295 | print("Best Model is saved...")
296 | best_acc = epoch_acc
297 | best_model = copy.deepcopy(model)
298 | # class_names = ['Angry', 'Fear','Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral']
299 | # print(classification_report(y_true, predicted, zero_division=0))
300 |
301 | print("학습 끝. 모델 저장.")
302 | torch.save(best_model.state_dict(), "FaceEmotionModel.pt")
303 |
304 |
305 |
306 | def confusion_matrix(preds, labels, conf_matrix):
307 | preds = torch.argmax(preds, 1)
308 | for p, t in zip(preds, labels):
309 | conf_matrix[p, t] += 1
310 | return conf_matrix
311 |
312 |
313 |
314 | def test_model():
315 | # GPU 설정
316 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
317 | # GPU 설정 확인
318 | print("GPU : ", torch.cuda.is_available())
319 |
320 | model = FaceEmotion()
321 | model.load_state_dict(torch.load("FaceEmotionModel.pt"))
322 | model.cuda()
323 | model.train(False)
324 |
325 | conf_matrix = np.zeros((7, 7))
326 |
327 | class_total = [0, 0, 0, 0, 0, 0, 0]
328 | class_correct = [0, 0, 0, 0, 0, 0, 0]
329 |
330 | running_loss = 0.0
331 | running_corrects = 0
332 | running_total = 0
333 |
334 | total_step = len(loader_test)
335 | for i, (images, labels) in enumerate(loader_test):
336 | images = images.to(device)
337 | labels = labels.to(device)
338 | outputs = model(images)
339 |
340 | conf_matrix = confusion_matrix(outputs, labels, conf_matrix)
341 |
342 | y_true = labels.cpu().numpy()
343 | _, predicted = torch.max(outputs.data, 1)
344 |
345 | c = (predicted == labels).squeeze()
346 | for idx in range(len(labels)):
347 | label = labels[idx]
348 | pred = predicted[idx]
349 | if label == pred:
350 | class_correct[label] += 1
351 | class_total[label] += 1
352 |
353 | if (i % 10 == 0):
354 | print (f"Step : {i+1} / {total_step}")
355 |
356 |
357 | class_names = ['Angry', 'Fear','Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral']
358 | for emotion in range(0, 7):
359 | print(f"Accuracy of {class_names[emotion]} : {100 * class_correct[emotion] / class_total[emotion]}")
360 |
361 | df = DataFrame(conf_matrix, index=class_names, columns=class_names)
362 | print(df)
363 | # plt.figure(figsize=(15, 15))
364 | # sns.heatmap(df, annot=True)
365 |
366 |
367 | if __name__ == "__main__":
368 | loading_dataset()
369 | training_model()
--------------------------------------------------------------------------------