├── bin └── README.md ├── doc ├── README.md ├── images │ ├── FBI_logo.png │ ├── KJ Kim.jpg │ ├── MH Seo.jpg │ ├── SJ Kim.jpg │ ├── SM Yoon.jpg │ ├── YJ Lee.jpg │ └── YL Lee.jpg ├── mid eval 1 │ ├── 답변서 요약_2조.pdf │ ├── 중간보고서_2조.pdf │ └── 수행계획서_ver2_2조.pdf ├── mid eval 2 │ ├── 2차 중간보고서.pdf │ └── 중간자문피드백응답.pdf ├── final eval │ └── 수행결과보고서_FBI.pdf └── implementation plan │ ├── 발표자료_2조.pdf │ └── 수행계획서_2조.pdf ├── src ├── analyze │ ├── eeg │ │ ├── README.md │ │ ├── sensorModule.pyc │ │ ├── eeg_classifier.pth │ │ ├── test_signal.pickle │ │ ├── Test_EEG_Classifier.pth │ │ ├── sensorModule.py │ │ ├── transformModule.py │ │ ├── Models.py │ │ ├── preprocessModule.py │ │ ├── meta_data.py │ │ ├── record_signal.py │ │ ├── CustomDatasetClass.py │ │ ├── test_only_eeg_Classifier.py │ │ ├── eegAnalyzeModule.py │ │ ├── plotModule.py │ │ ├── TrainTestModule.py │ │ └── .ipynb_checkpoints │ │ │ └── Train EEG based Emotion Classifier-checkpoint.ipynb │ └── face │ │ ├── README.md │ │ ├── FaceEmotionModel.pt │ │ ├── predict_face_emotion.py │ │ ├── multimodal_decision.py │ │ ├── face_login_test.py │ │ ├── face_login.py │ │ ├── predict_face_emotion_faceapi.py │ │ ├── xception.py │ │ └── face_emotion.py ├── back-end │ ├── FBI │ │ ├── FBI │ │ │ ├── __init__.py │ │ │ ├── asgi.py │ │ │ ├── wsgi.py │ │ │ ├── urls.py │ │ │ └── settings.py │ │ ├── api │ │ │ ├── __init__.py │ │ │ ├── tests.py │ │ │ ├── admin.py │ │ │ ├── apps.py │ │ │ ├── serializers.py │ │ │ ├── urls.py │ │ │ ├── customLogin.py │ │ │ ├── models.py │ │ │ └── views.py │ │ └── manage.py │ └── README.md ├── front-end │ ├── .eslintrc.json │ ├── public │ │ ├── robots.txt │ │ ├── favicon.ico │ │ ├── logo192.png │ │ ├── logo512.png │ │ ├── manifest.json │ │ └── index.html │ ├── src │ │ ├── railed.png │ │ ├── components │ │ │ ├── Image │ │ │ │ ├── carouselImage1 │ │ │ │ ├── carouselImage2 │ │ │ │ ├── carouselImage3 │ │ │ │ └── carouselImage4 │ │ │ ├── NavBar.js │ │ │ ├── IntroCarousel.js │ │ │ ├── loginSuccessAlert.js │ │ │ └── IntroCard.js │ │ ├── index.css │ │ ├── index.js │ │ ├── UserContext.js │ │ ├── App.js │ │ ├── pages │ │ │ ├── Main.js │ │ │ ├── Signup.js │ │ │ ├── Login.js │ │ │ ├── AnalyzeOption.js │ │ │ ├── test │ │ │ │ └── VideoPlay.js │ │ │ ├── VideoPlay.js │ │ │ └── Result.js │ │ ├── App.css │ │ └── serviceWorker.js │ ├── .vscode │ │ └── settings.json │ ├── .prettierrc │ ├── package.json │ └── README.md ├── .ipynb_checkpoints │ └── 분석모듈 실제 동작 테스트-checkpoint.ipynb ├── analyzeModule.py └── README.md ├── _config.yml ├── .gitignore ├── README.md └── index.md /bin/README.md: -------------------------------------------------------------------------------- 1 | bin 2 | -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | doc 2 | -------------------------------------------------------------------------------- /src/analyze/eeg/README.md: -------------------------------------------------------------------------------- 1 | eeg 2 | -------------------------------------------------------------------------------- /src/back-end/FBI/FBI/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/back-end/FBI/api/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-slate -------------------------------------------------------------------------------- /src/back-end/README.md: -------------------------------------------------------------------------------- 1 | back-end 2 | -------------------------------------------------------------------------------- /src/analyze/face/README.md: -------------------------------------------------------------------------------- 1 | face 2 | 3 | -------------------------------------------------------------------------------- /src/front-end/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["plugin:prettier/recommended"] 3 | } -------------------------------------------------------------------------------- /src/back-end/FBI/api/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /doc/images/FBI_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/images/FBI_logo.png -------------------------------------------------------------------------------- /doc/images/KJ Kim.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/images/KJ Kim.jpg -------------------------------------------------------------------------------- /doc/images/MH Seo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/images/MH Seo.jpg -------------------------------------------------------------------------------- /doc/images/SJ Kim.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/images/SJ Kim.jpg -------------------------------------------------------------------------------- /doc/images/SM Yoon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/images/SM Yoon.jpg -------------------------------------------------------------------------------- /doc/images/YJ Lee.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/images/YJ Lee.jpg -------------------------------------------------------------------------------- /doc/images/YL Lee.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/images/YL Lee.jpg -------------------------------------------------------------------------------- /src/back-end/FBI/api/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | # Register your models here. 4 | -------------------------------------------------------------------------------- /src/front-end/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /doc/mid eval 1/답변서 요약_2조.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/mid eval 1/답변서 요약_2조.pdf -------------------------------------------------------------------------------- /doc/mid eval 1/중간보고서_2조.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/mid eval 1/중간보고서_2조.pdf -------------------------------------------------------------------------------- /doc/mid eval 2/2차 중간보고서.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/mid eval 2/2차 중간보고서.pdf -------------------------------------------------------------------------------- /doc/mid eval 2/중간자문피드백응답.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/mid eval 2/중간자문피드백응답.pdf -------------------------------------------------------------------------------- /src/front-end/src/railed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/src/railed.png -------------------------------------------------------------------------------- /doc/final eval/수행결과보고서_FBI.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/final eval/수행결과보고서_FBI.pdf -------------------------------------------------------------------------------- /doc/mid eval 1/수행계획서_ver2_2조.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/mid eval 1/수행계획서_ver2_2조.pdf -------------------------------------------------------------------------------- /src/analyze/eeg/sensorModule.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/analyze/eeg/sensorModule.pyc -------------------------------------------------------------------------------- /src/front-end/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/public/favicon.ico -------------------------------------------------------------------------------- /src/front-end/public/logo192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/public/logo192.png -------------------------------------------------------------------------------- /src/front-end/public/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/public/logo512.png -------------------------------------------------------------------------------- /doc/implementation plan/발표자료_2조.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/implementation plan/발표자료_2조.pdf -------------------------------------------------------------------------------- /src/analyze/eeg/eeg_classifier.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/analyze/eeg/eeg_classifier.pth -------------------------------------------------------------------------------- /src/analyze/eeg/test_signal.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/analyze/eeg/test_signal.pickle -------------------------------------------------------------------------------- /src/back-end/FBI/api/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class ApiConfig(AppConfig): 5 | name = 'api' 6 | -------------------------------------------------------------------------------- /doc/implementation plan/수행계획서_2조.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/doc/implementation plan/수행계획서_2조.pdf -------------------------------------------------------------------------------- /src/analyze/face/FaceEmotionModel.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/analyze/face/FaceEmotionModel.pt -------------------------------------------------------------------------------- /src/analyze/eeg/Test_EEG_Classifier.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/analyze/eeg/Test_EEG_Classifier.pth -------------------------------------------------------------------------------- /src/.ipynb_checkpoints/분석모듈 실제 동작 테스트-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [], 3 | "metadata": {}, 4 | "nbformat": 4, 5 | "nbformat_minor": 2 6 | } 7 | -------------------------------------------------------------------------------- /src/front-end/src/components/Image/carouselImage1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/src/components/Image/carouselImage1 -------------------------------------------------------------------------------- /src/front-end/src/components/Image/carouselImage2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/src/components/Image/carouselImage2 -------------------------------------------------------------------------------- /src/front-end/src/components/Image/carouselImage3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/src/components/Image/carouselImage3 -------------------------------------------------------------------------------- /src/front-end/src/components/Image/carouselImage4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kookmin-sw/capstone-2020-2/HEAD/src/front-end/src/components/Image/carouselImage4 -------------------------------------------------------------------------------- /src/front-end/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "javascript.format.enable": false, 3 | "python.pythonPath": "C:\\Users\\sjk14.DESKTOP-4VMRRKQ\\AppData\\Local\\Programs\\Python\\Python38-32\\python.exe" 4 | } -------------------------------------------------------------------------------- /src/front-end/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "semi": true, 4 | "useTabs": false, 5 | "tabWidth": 2, 6 | "trailingComma": "all", 7 | "printWidth": 80, 8 | "bracketSpacing": true 9 | } -------------------------------------------------------------------------------- /src/back-end/FBI/api/serializers.py: -------------------------------------------------------------------------------- 1 | from .models import User 2 | from rest_framework import serializers 3 | 4 | class UserSerializer(serializers.ModelSerializer): 5 | class Meta: 6 | model = User 7 | fields = ['id', 'username', 'userFace'] 8 | 9 | class UserLoginSerializer(serializers.ModelSerializer): 10 | class Meta: 11 | model = User 12 | fields = ['userFace'] -------------------------------------------------------------------------------- /src/front-end/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | } 9 | 10 | code { 11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 12 | monospace; 13 | } 14 | -------------------------------------------------------------------------------- /src/back-end/FBI/FBI/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for FBI project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.asgi import get_asgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FBI.settings') 15 | 16 | application = get_asgi_application() 17 | -------------------------------------------------------------------------------- /src/back-end/FBI/FBI/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for FBI project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FBI.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /src/front-end/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import './index.css'; 4 | import App from './App'; 5 | import * as serviceWorker from './serviceWorker'; 6 | import 'bootstrap/dist/css/bootstrap.css'; 7 | 8 | ReactDOM.render(, document.getElementById('root')); 9 | 10 | // If you want your app to work offline and load faster, you can change 11 | // unregister() to register() below. Note this comes with some pitfalls. 12 | // Learn more about service workers: https://bit.ly/CRA-PWA 13 | serviceWorker.unregister(); 14 | 15 | export { default as Main } from './pages/Main'; 16 | -------------------------------------------------------------------------------- /src/front-end/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo192.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo512.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /src/back-end/FBI/api/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | from rest_framework.urlpatterns import format_suffix_patterns 3 | from . import views 4 | 5 | urlpatterns = [ 6 | path('signup/', views.signup, name="signup"), 7 | path('login/', views.login, name="login"), 8 | path('logout/', views.logout, name="logout"), 9 | path('user//analyze//', views.getAnalyzingVideo.as_view(), name='analyze'), 10 | path('user/analyze/real-time-result/', views.realTimeAnalyze, name='realTimeResult'), 11 | path('analyze/final-result/', views.finalResult, name='finalResult'), 12 | ] 13 | 14 | #urlpatterns = format_suffix_patterns(urlpatterns) -------------------------------------------------------------------------------- /src/analyze/face/predict_face_emotion.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import torch 3 | from torch.autograd import Variable 4 | # import torch.nn.functional as F 5 | import torchvision.transforms as transforms 6 | 7 | from face_emotion import FaceEmotion 8 | 9 | model = FaceEmotion() 10 | model.load_state_dict(torch.load("FaceEmotionModel.pt")) 11 | 12 | 13 | image = cv2.imread('10.jpg',cv2.IMREAD_GRAYSCALE) 14 | transformation = transforms.Compose([transforms.ToTensor()]) 15 | image_tensor = transformation(image).float() 16 | inp = Variable(image_tensor) 17 | 18 | output = model.predict(inp.unsqueeze_(0)) 19 | print(output.data) 20 | 21 | emotion = torch.max(output.data, 1) 22 | print(emotion) -------------------------------------------------------------------------------- /src/back-end/FBI/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FBI.settings') 9 | try: 10 | from django.core.management import execute_from_command_line 11 | except ImportError as exc: 12 | raise ImportError( 13 | "Couldn't import Django. Are you sure it's installed and " 14 | "available on your PYTHONPATH environment variable? Did you " 15 | "forget to activate a virtual environment?" 16 | ) from exc 17 | execute_from_command_line(sys.argv) 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /src/front-end/src/UserContext.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | 3 | const UserContext = React.createContext({ 4 | user: { id: '', name: '', loggedIn: false }, 5 | }); 6 | 7 | class UserProvider extends Component { 8 | // Context state 9 | state = { 10 | user: '', 11 | setUser: this.setUser.bind(this), 12 | unsetUser: this.unsetUser.bind(this), 13 | }; 14 | 15 | // Method to update state 16 | setUser(newUser) { 17 | // console.log('hihihi setUser'); 18 | this.setState({ user: newUser }); 19 | console.log('hey setUser success'); 20 | } 21 | 22 | unsetUser() { 23 | this.setState({ user: { id: 0, name: '', loggedIn: false } }); 24 | } 25 | 26 | render() { 27 | const { children } = this.props; 28 | return ( 29 | {children} 30 | ); 31 | } 32 | } 33 | 34 | export { UserProvider }; 35 | export const UserConsumer = UserContext.Consumer; 36 | export default UserContext; 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Real-time data 2 | /FBI-data 3 | 4 | # back-end 5 | # Ignore settings 6 | /src/back-end/FBI/mysql.cnf 7 | 8 | # Ignore migrations 9 | /src/back-end/FBI/api/migrations 10 | /src/back-end/FBI/media 11 | 12 | # Ignore idea 13 | /src/back-end/FBI/.idea 14 | .idea/ 15 | 16 | # Ignore pycache 17 | **/__pycache__ 18 | /src/back-end/FBI/FBI/__pycache__ 19 | /src/back-end/FBI/api/__pycache__ 20 | 21 | # Ignore encoded users file 22 | /src/back-end/FBI/encoded_users 23 | 24 | 25 | # face 26 | # Ignore dataset 27 | /src/face/dataset/ 28 | 29 | #frontend 30 | # dependencies 31 | /src/front-end/node_modules 32 | /src/front-end/.pnp 33 | /src/front-end/.pnp.js 34 | 35 | # testing 36 | /src/front-end/coverage 37 | 38 | # production 39 | /src/front-end/build 40 | 41 | /src/front-end/npm-debug.log* 42 | /src/front-end/yarn-debug.log* 43 | /src/front-end/yarn-error.log* 44 | 45 | # misc 46 | *.env.local 47 | *.env.development.local 48 | *.env.test.local 49 | *.env.production.local 50 | 51 | # for Mac 52 | *.DS_store 53 | -------------------------------------------------------------------------------- /src/back-end/FBI/api/customLogin.py: -------------------------------------------------------------------------------- 1 | import face_recognition 2 | import numpy as np 3 | import pickle 4 | 5 | def getEncodedUsersList(): 6 | encodeUsers = [] 7 | with open('encoded_users', 'rb') as fr: 8 | while True: 9 | try: 10 | encodeUsers.append(pickle.load(fr)) 11 | except EOFError: 12 | break 13 | return encodeUsers 14 | 15 | def isUser(login_face_encoding, encodedUsers): 16 | user_images_encoding = [] 17 | for user in encodedUsers: 18 | user_images_encoding.append(user[1]) 19 | 20 | matches = face_recognition.compare_faces(user_images_encoding, login_face_encoding, 0.42) 21 | face_distances = face_recognition.face_distance(user_images_encoding, login_face_encoding) 22 | 23 | if not True in matches: 24 | return None 25 | else: 26 | best_match_index = np.argmin(face_distances) 27 | if matches[best_match_index]: 28 | matched_user = encodedUsers[best_match_index][0] 29 | return matched_user 30 | else: 31 | return None -------------------------------------------------------------------------------- /src/back-end/FBI/FBI/urls.py: -------------------------------------------------------------------------------- 1 | """FBI URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/3.0/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.urls import path, include 18 | from django.conf import settings 19 | from django.conf.urls.static import static 20 | 21 | urlpatterns = [ 22 | path('admin/', admin.site.urls), 23 | path('api/v1/', include('api.urls')) 24 | ] 25 | 26 | if settings.DEBUG: 27 | urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 28 | 29 | -------------------------------------------------------------------------------- /src/front-end/src/components/NavBar.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import '../App.css'; 3 | import { IconButton,AppBar,Toolbar,Typography,Breadcrumbs, } from '@material-ui/core'; 4 | import MenuIcon from '@material-ui/icons/Menu'; 5 | import { Link, BrowserRouter as Router, Route } from 'react-router-dom'; 6 | class NavBar extends Component { 7 | 8 | 9 | render() { 10 | return ( 11 | 12 | 13 | 14 | 15 | 16 | 17 | FBI Emotion 18 | 19 | 20 | 21 | 22 | Home 23 | 24 | 25 | Logout 26 | 27 | 28 | 29 | 30 | ); 31 | } 32 | } 33 | 34 | export default NavBar; 35 | -------------------------------------------------------------------------------- /src/analyze/eeg/sensorModule.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | import brainflow 4 | from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds 5 | from brainflow.data_filter import DataFilter, FilterTypes, AggOperations 6 | 7 | def set_board(): 8 | params = BrainFlowInputParams() 9 | params.serial_port = '/dev/ttyUSB0' 10 | BoardShim.enable_dev_board_logger() 11 | return BoardShim(0, params) 12 | 13 | def start_record(board): 14 | board.prepare_session() 15 | # while not board.is_prepared(): 16 | board.start_stream() 17 | 18 | def stop_record(board): 19 | board.stop_stream() 20 | board.release_session() 21 | 22 | def rail_test(signal): 23 | n_railed = 0 24 | is_railed = [{x:0} for x in range(0, signal.shape[0])] 25 | for ch in range(0, signal.shape[0]): 26 | val1,val2,val3 = signal[ch][1:4] 27 | 28 | if val1 == val2 and val2 == val3: 29 | n_railed += 1 30 | is_railed[ch] = True 31 | else: is_railed[ch] = False; 32 | return is_railed, n_railed 33 | 34 | if __name__ == "__main__": 35 | print("Complete setting") 36 | -------------------------------------------------------------------------------- /src/front-end/src/components/IntroCarousel.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import '../App.css'; 3 | import Carousel from 'react-bootstrap/Carousel'; 4 | import carouselImage1 from './Image/carouselImage1'; 5 | import carouselImage2 from './Image/carouselImage2'; 6 | import carouselImage3 from './Image/carouselImage3'; 7 | import carouselImage4 from './Image/carouselImage4'; 8 | 9 | const carouselStyle = { 10 | height: '98%', 11 | marginTop: '4px', 12 | }; 13 | class IntroCarousel extends Component { 14 | render() { 15 | return ( 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | ); 31 | } 32 | } 33 | 34 | export default IntroCarousel; 35 | -------------------------------------------------------------------------------- /src/front-end/src/components/loginSuccessAlert.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import '../App.css'; 3 | import { IconButton } from '@material-ui/core'; 4 | import { Alert, AlertTitle } from '@material-ui/lab/'; 5 | import Collapse from '@material-ui/core/Collapse'; 6 | import CloseIcon from '@material-ui/icons/Close'; 7 | 8 | class LoginSuccessAlert extends Component { 9 | constructor(props) { 10 | super(props); 11 | this.state = { 12 | close: true, 13 | }; 14 | } 15 | 16 | render() { 17 | return ( 18 | 19 | { 27 | this.setState({ 28 | close: false, 29 | }); 30 | }} 31 | > 32 | 33 | 34 | } 35 | > 36 | Login! 37 | {this.props.userName}님 안녕하세요! 38 | 39 | 40 | 41 | ); 42 | } 43 | } 44 | 45 | export default LoginSuccessAlert; 46 | -------------------------------------------------------------------------------- /src/analyze/eeg/transformModule.py: -------------------------------------------------------------------------------- 1 | # 필요한 라이브러리들 2 | from collections import defaultdict 3 | import numpy as np 4 | 5 | # Signal Processing Library 6 | from scipy import signal 7 | import pyeeg as pe 8 | from brainflow.data_filter import DataFilter, FilterTypes, AggOperations 9 | # ======================================================= 10 | 11 | # eeg signal을 변환하는 다양한 함수들을 정의 12 | def computeFD(total_signal, chosen_channels): # 채널 인덱스 리스트 13 | fd_values = [] 14 | 15 | for ch in chosen_channels: 16 | input_signal = total_signal[ch] 17 | 18 | fd = pe.hfd(input_signal, 5) 19 | fd_values.append(fd) 20 | return fd_values 21 | 22 | def computefftMap(total_signal, chosen_channels, freqs, sf=128): 23 | # Init 24 | fftMap = False 25 | 26 | for ch in chosen_channels: 27 | input_signal = total_signal[ch] 28 | 29 | # Fast Fourier Transform ======================== 30 | ffts = pe.bin_power(input_signal, freqs, sf) 31 | fft = ffts[1] 32 | # =============================================== 33 | 34 | # FFT Map 35 | if type(fftMap) == bool: 36 | fftMap = fft 37 | else: 38 | fftMap = np.vstack((fftMap, fft)) 39 | return fftMap 40 | 41 | if __name__ == "__main__": 42 | print("환경설정 완료") -------------------------------------------------------------------------------- /src/analyze/face/multimodal_decision.py: -------------------------------------------------------------------------------- 1 | import operator 2 | 3 | def prob_distribution(dic): 4 | total = sum(list(dic.values())) 5 | for key, val in dic.items(): 6 | dic[key] /= total 7 | 8 | return dic 9 | 10 | 11 | def multimodal_emotion(face_emotions, eeg_emotions, high_enhance, low_enhance): 12 | max_face_emotions = max(face_emotions.items(), key=operator.itemgetter(1))[0] 13 | 14 | # EEG 값이 스트링인 경우 15 | if isinstance(eeg_emotions, str): 16 | enhance = high_enhance if max_face_emotions == "neutral" else low_enhance 17 | face_emotions[eeg_emotions] *= enhance # 이 값만큼 eeg 결과 감정에 가중치를 줌. 18 | 19 | emotions = prob_distribution(face_emotions) # 확률값으로 만들어주기. 20 | 21 | 22 | # EEG 값이 확률값인 경우 23 | else: 24 | # 무표정일 때 25 | if max_face_emotions == "neutral": 26 | for key, val in face_emotions.items(): 27 | face_emotions[key] += (low_enhance*face_emotions[key] + high_enhance*eeg_emotions[key]) 28 | 29 | if max_face_emotions == "neutral": 30 | for key, val in face_emotions.items(): 31 | face_emotions[key] += (high_enhance*face_emotions[key] + low_enhance*eeg_emotions[key]) 32 | 33 | emotions = prob_distribution(face_emotions) 34 | 35 | return emotions -------------------------------------------------------------------------------- /src/analyze/face/face_login_test.py: -------------------------------------------------------------------------------- 1 | from face_login import login 2 | import os 3 | import pickle 4 | import face_recognition 5 | import cv2 6 | 7 | # 유저 목록 이진파일이 있는지 없는지 확인. 8 | current_dir = os.getcwd() 9 | 10 | if "user_list.txt" not in os.listdir(current_dir): 11 | print("No User Data.") 12 | user_list = [] 13 | else: 14 | user_list_file = open('user_list.txt', 'rb') 15 | user_list = pickle.load(user_list_file) 16 | user_list_file.close() 17 | user_name_list = [] 18 | for user in user_list: 19 | user_name_list.append(user[0]) 20 | print("---------- User List ----------") 21 | print(len(user_name_list)) 22 | print(user_name_list) 23 | 24 | user_image_path = input("Enter a User image path : ") 25 | user_image = cv2.imread(user_image_path) 26 | 27 | uid = login(user_image, user_list, 0.4) 28 | 29 | if uid is None: 30 | print("You're Not on the user list.") 31 | answer = input("Would you like to sign up? (Y / N) : ") 32 | 33 | if answer in ['y', 'Y']: 34 | user_image_encoding = face_recognition.face_encodings(user_image, num_jitters=10, model="large")[0] 35 | uid = input("Enter the your name : ") 36 | user_list.append([uid, user_image_encoding]) 37 | user_list_file = open('user_list.txt', 'wb') 38 | pickle.dump(user_list, user_list_file) 39 | user_list_file.close() 40 | else: 41 | print(f"Hello, {uid} !") -------------------------------------------------------------------------------- /src/front-end/src/App.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import Login from './pages/Login'; 3 | import Main from './pages/Main'; 4 | import Signup from './pages/Signup'; 5 | import Analyze from './pages/AnalyzeOption'; 6 | import VideoPlay from './pages/VideoPlay'; 7 | import Result from './pages/Result'; 8 | import './App.css'; 9 | import axios from 'axios'; 10 | import { BrowserRouter as Router, Route, Switch } from 'react-router-dom'; 11 | import { UserProvider } from './UserContext'; 12 | 13 | axios.defaults.xsrfCookieName = 'csrftoken'; 14 | axios.defaults.xsrfHeaderName = 'X-CSRFToken'; 15 | 16 | class App extends Component { 17 | constructor() { 18 | super(); 19 | this.state = { 20 | user: { id: 0, name: 'appUser', loggedIn: false }, 21 | }; 22 | } 23 | 24 | render() { 25 | return ( 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | ); 40 | } 41 | } 42 | 43 | export default App; 44 | -------------------------------------------------------------------------------- /src/analyze/eeg/Models.py: -------------------------------------------------------------------------------- 1 | # Model class 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | class CNN(nn.Module): 8 | def __init__(self, n_channel, lin_len, out_len, n_electrodes, model_type): 9 | super(CNN, self).__init__() 10 | self.pool = nn.MaxPool2d(2) 11 | 12 | self.conv1 = nn.Conv2d(n_channel, 4, 3) # 8 => 4* 13 | self.batch1 = nn.BatchNorm2d(4) # 8=>4* 14 | 15 | self.conv2 = nn.Conv2d(4, 4, 3) 16 | self.batch2 = nn.BatchNorm2d(4) 17 | 18 | self.fc1 = nn.Linear(lin_len, 8) # 죽어라 이연지** 64=>8* 19 | self.fc2 = nn.Linear(8, out_len) 20 | 21 | self.n_electrodes = n_electrodes 22 | self.model_type = model_type 23 | 24 | self.lin_len = lin_len 25 | 26 | def forward(self, x): 27 | # conv => batch => pool => relu 28 | if self.n_electrodes == 32: 29 | x = F.relu(self.pool(self.batch1(self.conv1(x)))) 30 | elif self.n_electrodes == 8: 31 | x = F.relu(self.batch1(self.conv1(x))) # ch 8 32 | x = self.conv2(x) 33 | x = x.view(-1, self.lin_len) 34 | x = F.relu(self.fc1(x)) 35 | x = self.fc2(x) 36 | 37 | if self.model_type == "reg": 38 | return x 39 | else: 40 | return F.softmax(x, dim=1) 41 | 42 | if __name__ == "__main__": 43 | print("환경설정 완료") -------------------------------------------------------------------------------- /src/front-end/src/pages/Main.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import { withRouter } from 'react-router-dom'; 3 | import { 4 | createMuiTheme, 5 | Grid, 6 | responsiveFontSizes, 7 | ThemeProvider, 8 | } from '@material-ui/core'; 9 | import IntroCarousel from '../components/IntroCarousel'; 10 | import IntroCard from '../components/IntroCard'; 11 | import '../App.css'; 12 | import NavBar from '../components/NavBar'; 13 | 14 | let theme = createMuiTheme(); 15 | theme = responsiveFontSizes(theme); 16 | 17 | class Main extends Component { 18 | render() { 19 | return ( 20 | 21 |
22 | 23 | 29 | 30 | 31 | {' '} 32 | 42 | 43 | {' '} 44 | {' '} 45 |
{' '} 46 |
47 | ); 48 | } 49 | } 50 | 51 | export default withRouter(Main); 52 | -------------------------------------------------------------------------------- /src/analyze/eeg/preprocessModule.py: -------------------------------------------------------------------------------- 1 | from brainflow.data_filter import DataFilter, FilterTypes, AggOperations 2 | import numpy as np 3 | import copy 4 | 5 | def down_sampling(signal, chosen_channels): 6 | down_signal = [] 7 | for ch in chosen_channels: 8 | copy_signal = copy.deepcopy(signal[ch]) # ch 단위로 카피해야함.. (이상함..아무튼 그럼..) 9 | down_signal.append(DataFilter.perform_downsampling(copy_signal, 2, AggOperations.MEAN.value)) 10 | down_signal = np.array(down_signal) 11 | return down_signal 12 | 13 | def filtering(signal, sf, chosen_channels): 14 | for ch in chosen_channels: 15 | copy_signal = copy.deepcopy(signal[ch]) 16 | 17 | DataFilter.perform_lowpass(copy_signal, sf, 50.0, 5, FilterTypes.CHEBYSHEV_TYPE_1.value, 1) 18 | DataFilter.perform_highpass (copy_signal, sf, 3.0, 4, FilterTypes.BUTTERWORTH.value, 0) 19 | 20 | signal[ch] = copy_signal 21 | return signal 22 | 23 | 24 | # 0과 1 사이로 scailing 25 | def scale(signal): 26 | minVal = np.min(signal) 27 | maxVal = np.max(signal) 28 | 29 | signal = (signal - minVal) / (maxVal - minVal) 30 | return signal 31 | 32 | # 정규화 33 | def normalize(signal): 34 | n = np.linalg.norm(signal); 35 | signal = signal/n 36 | 37 | return signal 38 | 39 | # 표준화 (평균이 0, 분산이 1 이 되도록.) 40 | def standardize(signal): 41 | M = np.mean(signal); 42 | S = np.std(signal); 43 | signal = (signal - M) / S 44 | 45 | return signal 46 | 47 | if __name__ == "__main__": 48 | print("환경설정 완료") -------------------------------------------------------------------------------- /src/analyze/face/face_login.py: -------------------------------------------------------------------------------- 1 | import face_recognition 2 | import numpy as np 3 | 4 | # 로그인 함수 5 | # 사용자 이미지랑, 유저 목록 리스트 [(uid_1, 이미지_1), (uid_2, 이미지_2), ... ] 를 넘겨받음. 6 | # 등록된 유저면 uid 리턴 7 | # 등록되지 않은 유저면 None 값을 리턴 8 | def login(login_face, user_list, threshold): 9 | """Face Login Function 10 | 11 | Arguments: 12 | login_face {opencv image format} -- 로그인 하려는 사용자의 얼굴 이미지 13 | user_list {[(uid, opencv image format), ...]} -- DB 에서 불러온 기존 유저들의 uid 와 인코딩 이미지 쌍 리스트 14 | threshold {float} -- 매칭되는 얼굴과의 distance 차이. 이 값보다 작으면 매칭된다고 판단하는 것. 15 | 16 | Returns: 17 | string -- 등록된 유저이면 uid, 등록되지 않은 유저이면 None 값 리턴 18 | """ 19 | 20 | # user_list 가 빈 리스트면 None 리턴 21 | if not user_list: 22 | return None 23 | 24 | login_face_encoding = face_recognition.face_encodings(login_face, num_jitters=10, model="large")[0] 25 | 26 | # 전달받은 (uid, 인코딩 이미지) 리스트에서 이미지만 따로 분리 27 | user_images_encoding = [] 28 | for user in user_list: 29 | user_images_encoding.append(user[1]) 30 | 31 | matches = face_recognition.compare_faces(user_images_encoding, login_face_encoding, threshold) 32 | face_distances = face_recognition.face_distance(user_images_encoding, login_face_encoding) 33 | print("Distance :", face_distances) # 로그인 하려는 사람의 얼굴과, 등록된 전체 회원들의 얼굴 별 유사도 거리를 출력 34 | 35 | # 유저 리스트에 매칭되는 사람이 없을 경우 36 | if not True in matches: 37 | return None 38 | # 유저 리스트에 매칭되는 사람이 있을 경우 39 | else: 40 | best_match_index = np.argmin(face_distances) 41 | if matches[best_match_index]: 42 | matched_uid = user_list[best_match_index][0] 43 | return matched_uid 44 | else: 45 | return None -------------------------------------------------------------------------------- /src/analyze/eeg/meta_data.py: -------------------------------------------------------------------------------- 1 | # Channel 2 | SEED_channels = {'FP1': 0, 'FPZ': 1, 'FP2': 2, 'AF3': 3, 3 | 'AF4': 4, 'F7': 5, 'F5': 6, 'F3': 7, 4 | 'F1': 8, 'FZ': 9, 'F2': 10, 'F4': 11, 5 | 'F6': 12, 'F8': 13, 'FT7': 14, 'FC5': 15, 6 | 'FC3': 16, 'FC1': 17, 'FCZ': 18, 'FC2': 19, 7 | 'FC4': 20, 'FC6': 21, 'FT8': 22, 'T7': 23, 8 | 'C5': 24, 'C3': 25, 'C1': 26, 'CZ': 27, 9 | 'C2': 28, 'C4': 29, 'C6': 30, 'T8': 31, 10 | 'TP7': 32, 'CP5': 33, 'CP3': 34, 'CP1': 35, 11 | 'CPZ': 36, 'CP2': 37, 'CP4': 38, 'CP6': 39, 12 | 'TP8': 40, 'P7': 41, 'P5': 42, 'P3': 43, 13 | 'P1': 44, 'PZ': 45, 'P2': 46, 'P4': 47, 14 | 'P6': 48, 'P8': 49, 'PO7': 50, 'PO5': 51, 15 | 'PO3': 52, 'POZ': 53, 'PO4': 54, 'PO6': 55, 16 | 'PO8': 56, 'CB1': 57, 'O1': 58, 'OZ': 59, 17 | 'O2': 60, 'CB2': 61} 18 | 19 | SEED_all_channel_names = list(SEED_channels.keys()) 20 | SEED_all_channel_values = list(SEED_channels.values()) 21 | 22 | DEAP_channels = {"FP1":0, "AF3":1, "F3":2, "F7":3, 23 | "FC5":4, "FC1":5, "C3":6, "T7":7, 24 | "CP5":8, "CP1":9, "P3":10, 25 | "P7":11, "PO3":12, "O1":13, 26 | "OZ":14, "PZ":15, "FP2":16, 27 | "AF4":17, "FZ":18, "F4":19, 28 | "F8":20, "FC6":21, "FC2":22, 29 | "CZ":23, "C4":24, "T8":25, 30 | "CP6":26, "CP2":27, "P4":28, 31 | "P8":29, "PO4":30, "O2":31} 32 | 33 | DEAP_all_channel_names = list(DEAP_channels.keys()) 34 | DEAP_all_channel_values = list(DEAP_channels.values()) -------------------------------------------------------------------------------- /src/analyze/eeg/record_signal.py: -------------------------------------------------------------------------------- 1 | from sensorModule import * 2 | import pickle 3 | import numpy as np 4 | # import keyboard 5 | import sys 6 | 7 | n_sec = 6 8 | n_ch = 8 9 | sf = 256 #board.get_sampling_rate(0) 10 | eeg_channels = [i for i in range(0,8)] 11 | splitted_signal = [] 12 | 13 | # ======================================= 14 | if __name__ == '__main__': 15 | has_sensor = True if sys.argv[1] == 'y' else False 16 | 17 | if has_sensor: 18 | try: 19 | board = set_board() 20 | start_record(board) 21 | except: 22 | print("Can't connect to an EEG sensor") 23 | sys.exit(1) 24 | 25 | while True: 26 | # if keyboard.is_pressed('q') : 27 | # break 28 | time.sleep(1) # save recent n_seconds signal for every 1 second. 29 | if has_sensor: 30 | temp_signal = board.get_current_board_data(n_sec * sf) # latest data from a board ** 31 | else: 32 | temp_signal = np.random.rand(8, n_sec * sf) 33 | 34 | temp_signal = temp_signal[eeg_channels, :] 35 | print(temp_signal.shape) 36 | 37 | railed_channels, n_railed = rail_test(temp_signal) 38 | if n_railed != 0: 39 | print("Railed Channels = ", railed_channels) 40 | continue # do not saved railed signals? 41 | splitted_signal.append(temp_signal) 42 | 43 | # save 44 | with open("../../../FBI-data/test_signal.txt", 'wb') as f: 45 | pickle.dump(temp_signal, f) 46 | 47 | # whole signal (Warn : could contain railed signals) 48 | if has_sensor: 49 | total_signal = board.get_board_data() # get all data and remove it from internal buffer 50 | total_signal = total_signal[eeg_channels, :] 51 | print("Total signal length = ", total_signal.shape[1] // sf) 52 | 53 | stop_record(board) 54 | print("Connection closed") 55 | sys.exit(0) -------------------------------------------------------------------------------- /src/front-end/src/components/IntroCard.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import '../App.css'; 3 | import { 4 | Button, 5 | Card, 6 | CardContent, 7 | Grid, 8 | Hidden, 9 | Typography, 10 | } from '@material-ui/core'; 11 | 12 | class IntroCard extends Component { 13 | render() { 14 | return ( 15 | 16 | 23 | 29 | 35 | Get started! 36 | {' '} 37 | 38 | 39 | 이 곳에 당신의 얼굴을 보여주세요.
40 | EEG와 표정을 이용한 감정인식이 가능합니다.{' '} 41 |
{' '} 42 |
{' '} 43 |
{' '} 44 | 45 | 46 |
47 | 53 | * 수집된 데이터는 삭제되지 않습니다.{' '} 54 | {' '} 55 |
{' '} 56 | {' '} 67 |
{' '} 68 |
{' '} 69 |
70 | ); 71 | } 72 | } 73 | 74 | export default IntroCard; 75 | -------------------------------------------------------------------------------- /src/analyze/face/predict_face_emotion_faceapi.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import base64 3 | import requests 4 | import json 5 | import numpy as np 6 | import os 7 | 8 | BASE_DIR = os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))) 9 | xmlPath = os.path.join(BASE_DIR, 'src/analyze/face/haarcascade_frontalface_default.xml') 10 | 11 | def predict_emotion(image_path): 12 | # Key 13 | subscription_key = "ac039e4790244804a34be1b1afa4e4ee" 14 | # Endpoint 15 | endpoint = "https://capstone-faceapi.cognitiveservices.azure.com/" 16 | base_uri = "https://koreacentral.api.cognitive.microsoft.com" 17 | 18 | face_cascade = cv2.CascadeClassifier(xmlPath) 19 | 20 | image = cv2.imread(image_path) 21 | 22 | # face detection 을 위해 흑백으로 변환 23 | gray = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY) 24 | 25 | # face detection 26 | faces = face_cascade.detectMultiScale(gray, 1.1, 4) 27 | 28 | # 사진에 얼굴이 있는 경우 29 | if len(faces): 30 | buffer = cv2.imencode('.jpg', image)[1].tostring() 31 | ''' 32 | with open(image_path, 'rb') as f: 33 | image_data = f.read() 34 | ''' 35 | 36 | # header 설정 37 | headers = { 38 | 'Content-Type': 'application/octet-stream', 39 | 'Ocp-Apim-Subscription-Key': subscription_key, 40 | } 41 | 42 | # parameter 설정 43 | params = { 44 | 'returnFaceId': 'true', 45 | 'returnFaceLandmarks': 'false', 46 | 'returnFaceAttributes': 'emotion', 47 | } 48 | 49 | # 리퀘스트 요청 50 | response = requests.request('POST', base_uri + '/face/v1.0/detect', json=None, data=buffer, headers=headers, params=params) 51 | emotion_dicts = response.json()[0]['faceAttributes']['emotion'] 52 | #print(emotion_dicts) 53 | 54 | emotions = list(emotion_dicts.items()) 55 | emotions = [e[1] for e in emotions] 56 | 57 | return True, emotion_dicts 58 | 59 | # 사진에 얼굴이 없는 경우. 60 | else: 61 | return False, None -------------------------------------------------------------------------------- /src/front-end/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 17 | 18 | 19 | 20 | 29 | React App 30 | 31 | 32 | 33 |
34 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /src/analyze/eeg/CustomDatasetClass.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | 4 | 5 | # label preprocessing for DEAP dataset 6 | def process_label(val, numOfClass=2): 7 | # 2 class 8 | if numOfClass == 2: 9 | if val > 5: 10 | return 1 11 | else: 12 | return 0 13 | # 3 class 14 | elif numOfClass == 3: 15 | if val > 6: 16 | return 2 # high 17 | elif val < 4: 18 | return 0 # low 19 | else: 20 | return 1 # neutral 21 | 22 | # Custom dataset class 23 | import torchvision.transforms as transforms 24 | from torch.utils.data import Dataset, DataLoader 25 | 26 | transform = transforms.Compose([transforms.ToTensor()]) 27 | 28 | class EEG_Dataset(Dataset): 29 | # load, split 30 | def __init__(self, data_list, dataset, target=-1, transform=None): 31 | self.data_list = data_list 32 | self.transform = transform 33 | self.dataset = dataset 34 | self.target = target 35 | 36 | def __len__(self): 37 | return len(self.data_list) 38 | 39 | def __getitem__(self, idx): 40 | if self.dataset == 'DEAP': 41 | spectro, label = self.data_list[idx][0], self.data_list[idx][1][self.target] 42 | elif self.dataset == 'SEED' or self.dataset == 'BCI': 43 | spectro, label = self.data_list[idx][0], self.data_list[idx][1] 44 | if self.transform: 45 | spectro = self.transform(spectro) 46 | return spectro, label 47 | 48 | # Convert loaded dataset into a custom dataset instance 49 | def get_train_test_set(train, test, bs, dataset): 50 | trainset = EEG_Dataset(train, dataset, target=-1, transform=transform) 51 | trainloader = torch.utils.data.DataLoader(trainset, batch_size=bs, 52 | shuffle=True, num_workers=2) 53 | testset = EEG_Dataset(test, dataset, target=-1, transform=transform) 54 | testloader = torch.utils.data.DataLoader(testset, batch_size=bs, 55 | shuffle=False, num_workers=2) 56 | return trainset, trainloader, testset, testloader 57 | 58 | if __name__ == "__main__": 59 | print("환경설정 완료") -------------------------------------------------------------------------------- /src/front-end/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fbi_front", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@material-ui/core": "^4.9.11", 7 | "@material-ui/icons": "^4.9.1", 8 | "@material-ui/lab": "^4.0.0-alpha.50", 9 | "@testing-library/jest-dom": "^4.2.4", 10 | "@testing-library/react": "^9.5.0", 11 | "@testing-library/user-event": "^7.2.1", 12 | "@types/jest": "^25.2.1", 13 | "@types/node": "^13.11.0", 14 | "@types/react": "^16.9.32", 15 | "@types/react-dom": "^16.9.6", 16 | "axios": "^0.19.2", 17 | "base64-to-image": "^1.0.2", 18 | "bootstrap": "^4.4.1", 19 | "browserslist": "^4.11.1", 20 | "eslint-config-airbnb": "^18.1.0", 21 | "face-api.js": "file:face-api.js", 22 | "kill-port": "^1.6.0", 23 | "lodash": "^4.17.15", 24 | "ract": "0.0.1", 25 | "react": "^16.13.1", 26 | "react-bootstrap": "^1.0.0", 27 | "react-dom": "^16.13.1", 28 | "react-player": "^1.15.3", 29 | "react-redux": "^7.2.0", 30 | "react-router-dom": "^5.1.2", 31 | "react-scripts": "3.4.1", 32 | "react-webcam": "^5.0.1", 33 | "reactstrap": "^8.4.1", 34 | "recharts": "^1.8.5", 35 | "redux": "^4.0.5", 36 | "redux-form": "^8.3.5", 37 | "typescript": "^3.8.3", 38 | "video-react": "^0.14.1" 39 | }, 40 | "scripts": { 41 | "start": "react-scripts --max_old_space_size=4096 start", 42 | "build": "react-scripts build", 43 | "test": "react-scripts test", 44 | "eject": "react-scripts eject" 45 | }, 46 | "eslintConfig": { 47 | "extends": [ 48 | "airbnb", 49 | "prettier" 50 | ], 51 | "rules": { 52 | "react/jsx-filename-extension": 0, 53 | "react/prefer-stateless-function": 0, 54 | "react/jsx-one-expression-per-line": 0 55 | } 56 | }, 57 | "browserslist": [ 58 | "defaults" 59 | ], 60 | "development": [ 61 | "last 1 chrome version", 62 | "last 1 firefox version", 63 | "last 1 safari version" 64 | ], 65 | "proxy": "http://localhost:8000", 66 | "devDependencies": { 67 | "babel-eslint": "^10.1.0", 68 | "eslint-config-prettier": "^6.11.0", 69 | "eslint-plugin-prettier": "^3.1.3", 70 | "eslint-plugin-react": "^7.19.0", 71 | "prettier": "^2.0.5" 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/analyze/eeg/test_only_eeg_Classifier.py: -------------------------------------------------------------------------------- 1 | from eegAnalyzeModule import * # ** 2 | from sensorModule import * 3 | import pickle 4 | import numpy as np 5 | import keyboard 6 | import sys 7 | 8 | n_sec = 6 9 | n_ch = 8 10 | sf = 256 #board.get_sampling_rate(0) 11 | eeg_channels = [i for i in range(0,8)] 12 | splitted_signal = [] 13 | 14 | # ======================================= 15 | if __name__ == '__main__': 16 | has_sensor = True if sys.argv[1] == 'y' else False 17 | 18 | if has_sensor: 19 | try: 20 | board = set_board() 21 | start_record(board) 22 | except: 23 | print("Can't connect to an EEG sensor") 24 | sys.exit(1) 25 | 26 | while True: 27 | if keyboard.is_pressed('q') : 28 | break 29 | time.sleep(1) # save recent n_seconds signal for every 1 second. 30 | if has_sensor: 31 | temp_signal = board.get_current_board_data(n_sec * sf) # latest data from a board ** 32 | else: 33 | temp_signal = np.random.rand(8, n_sec * sf) 34 | 35 | temp_signal = temp_signal[eeg_channels, :] 36 | splitted_signal.append(temp_signal) 37 | 38 | # save 39 | eeg_save_path = "test_signal.pickle" 40 | with open(eeg_save_path, 'wb') as f: 41 | pickle.dump(temp_signal, f) 42 | 43 | # 여기다 넣어본다.. =============================== *** 44 | eeg_emotion, n_railed, is_railed = predict_emotion_EEG(model, "test_signal.pickle", chosen_channels, freqs, sf=256) 45 | max_idx = np.argmax(eeg_emotion[0,:].detach().numpy()) 46 | result_emo = emo_map[max_idx] 47 | 48 | if n_railed != 0: 49 | print("Railed Channels = ", railed_channels) 50 | print("Result Emotion = ", result_emo) 51 | # =============================================== *** 52 | 53 | # whole signal (Warn : could contain railed signals) 54 | if has_sensor: 55 | total_signal = board.get_board_data() # get all data and remove it from internal buffer 56 | total_signal = total_signal[eeg_channels, :] 57 | print("Total signal length = ", total_signal.shape[1] // sf) 58 | 59 | stop_record(board) 60 | print("Connection closed") 61 | sys.exit(0) -------------------------------------------------------------------------------- /src/analyze/eeg/eegAnalyzeModule.py: -------------------------------------------------------------------------------- 1 | #-*- coding: utf-8 -*- 2 | from .preprocessModule import * # Signal Preprocessing Methods 3 | from .transformModule import * # Signal => Input form 4 | import numpy as np 5 | import torch 6 | import pickle 7 | 8 | # setting 9 | freqs = [freq for freq in range(4,46,1)] 10 | chosen_channels = [i for i in range(0, 8)] 11 | sf = 256 12 | emo_map = {0 : "neutral", 1 : "sadness", 2 : "happiness", 13 | 3 : "disgust", 4 : "fear"}#, 5 : "anger", 6:"surprise"} 14 | 15 | # load trained model 16 | from .Models import * # CNN 17 | 18 | model = CNN(n_channel=1, lin_len=592, out_len=5, 19 | n_electrodes=8, model_type='cla') 20 | model.load_state_dict(torch.load('/home/myounghee/Documents/Django/test_fbi/capstone-2020-2/src/analyze/eeg/eeg_classifier.pth')) 21 | model.eval() 22 | 23 | 24 | def rail_test(signal): 25 | n_railed = 0 26 | is_railed = [{x:0} for x in range(0, signal.shape[0])] 27 | # print("Railed_channels : ", end = '') 28 | for ch in range(0, signal.shape[0]): 29 | val1,val2,val3 = signal[ch][1:4] 30 | 31 | if val1 == val2 and val2 == val3: 32 | n_railed += 1 33 | is_railed[ch] = True 34 | print(ch+1, ", ", end = '') 35 | else: is_railed[ch] = False; 36 | return is_railed, n_railed 37 | 38 | 39 | def predict_emotion_EEG(model, signal_path, chosen_channels, freqs, sf=256): 40 | # signal = read_signal_from_txt(signal_path) 41 | with open(signal_path, 'rb') as f: 42 | signal = pickle.load(f) 43 | 44 | is_railed, n_railed = rail_test(signal) 45 | 46 | # preprocessing 47 | copied_signal = copy.deepcopy(signal) 48 | copied_signal = down_sampling(copied_signal, chosen_channels) 49 | sf = sf // 2 50 | copied_signal = filtering(copied_signal, sf, chosen_channels) 51 | signal = copied_signal 52 | 53 | # transform into an input form 54 | fftMap = computefftMap(signal, chosen_channels, freqs, sf) 55 | fftMap = np.expand_dims(fftMap, axis=0) 56 | fftMap = np.expand_dims(fftMap, axis=0) 57 | fftMap_tensor = torch.from_numpy(fftMap) 58 | 59 | outputs = model(fftMap_tensor.float()) 60 | #max_idx = np.argmax(outputs[0,:].detach().numpy()) 61 | 62 | emo_dict = {} 63 | for idx, emo_class in emo_map.items(): 64 | emo_dict[emo_class] = outputs[0][idx].item() 65 | 66 | return emo_dict, n_railed, is_railed 67 | 68 | if __name__ == "__main__": 69 | print("환경설정 완료") -------------------------------------------------------------------------------- /src/analyzeModule.py: -------------------------------------------------------------------------------- 1 | from src.analyze.eeg.eegAnalyzeModule import * 2 | from src.analyze.face.predict_face_emotion_faceapi import * 3 | import operator 4 | 5 | def prob_distribution_method(dic): 6 | total = sum(list(dic.values())) 7 | for key, val in dic.items(): 8 | dic[key] /= total 9 | return dic 10 | 11 | def multimodal_emotion(face_emotions, face_detected, eeg_emotions, n_railed, high_enhance=3, low_enhance=1): 12 | unused_emotions = ["anger", "contempt", "surprise"] 13 | 14 | if face_detected: 15 | for emo in unused_emotions: 16 | del face_emotions[emo] 17 | max_face_emotions = max(face_emotions.items(), key=operator.itemgetter(1))[0] 18 | 19 | # 둘다없음 20 | if n_railed > 0 and not face_detected: 21 | return None, None 22 | 23 | # 얼굴만 감지 24 | elif face_detected and n_railed > 0: 25 | emotions = face_emotions 26 | 27 | # 센서만 감지 28 | elif not face_detected and n_railed == 0: 29 | emotions = eeg_emotions 30 | 31 | # 둘다 감지 32 | else: 33 | # EEG 값이 스트링인 경우 34 | if isinstance(eeg_emotions, str): 35 | enhance = high_enhance if max_face_emotions == "neutral" else low_enhance 36 | face_emotions[eeg_emotions] *= enhance # 이 값만큼 eeg 결과 감정에 가중치를 줌. 37 | emotions = prob_distribution(face_emotions) # 확률값으로 만들어주기. 38 | # EEG 값이 확률값인 경우 39 | else: 40 | # 무표정일 때 41 | if max_face_emotions == "neutral": 42 | for key, val in face_emotions.items(): 43 | face_emotions[key] += (low_enhance*face_emotions[key] + high_enhance*eeg_emotions[key]) 44 | if max_face_emotions == "neutral": 45 | for key, val in face_emotions.items(): 46 | face_emotions[key] += (high_enhance*face_emotions[key] + low_enhance*eeg_emotions[key]) 47 | emotions = prob_distribution_method(face_emotions) 48 | return max_face_emotions, emotions 49 | 50 | def detectEmotion(facePath, eegPath, RM): 51 | face_detected, face_emotion = predict_emotion(facePath) 52 | eeg_emotion, n_railed, sensor_status = predict_emotion_EEG(model, eegPath, chosen_channels, freqs, sf=256) 53 | # RM ========================================================= 54 | emotion, prob_distribution = multimodal_emotion(face_emotion, face_detected, eeg_emotion, n_railed) 55 | return emotion, prob_distribution, face_emotion, eeg_emotion, sensor_status -------------------------------------------------------------------------------- /src/front-end/README.md: -------------------------------------------------------------------------------- 1 | This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). 2 | 3 | ## Available Scripts 4 | 5 | In the project directory, you can run: 6 | 7 | ### `npm start` 8 | 9 | Runs the app in the development mode.
10 | Open [http://localhost:3000](http://localhost:3000) to view it in the browser. 11 | 12 | The page will reload if you make edits.
13 | You will also see any lint errors in the console. 14 | 15 | ### `npm test` 16 | 17 | Launches the test runner in the interactive watch mode.
18 | See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. 19 | 20 | ### `npm run build` 21 | 22 | Builds the app for production to the `build` folder.
23 | It correctly bundles React in production mode and optimizes the build for the best performance. 24 | 25 | The build is minified and the filenames include the hashes.
26 | Your app is ready to be deployed! 27 | 28 | See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. 29 | 30 | ### `npm run eject` 31 | 32 | **Note: this is a one-way operation. Once you `eject`, you can’t go back!** 33 | 34 | If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project. 35 | 36 | Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own. 37 | 38 | You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it. 39 | 40 | ## Learn More 41 | 42 | You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started). 43 | 44 | To learn React, check out the [React documentation](https://reactjs.org/). 45 | 46 | ### Code Splitting 47 | 48 | This section has moved here: https://facebook.github.io/create-react-app/docs/code-splitting 49 | 50 | ### Analyzing the Bundle Size 51 | 52 | This section has moved here: https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size 53 | 54 | ### Making a Progressive Web App 55 | 56 | This section has moved here: https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app 57 | 58 | ### Advanced Configuration 59 | 60 | This section has moved here: https://facebook.github.io/create-react-app/docs/advanced-configuration 61 | 62 | ### Deployment 63 | 64 | This section has moved here: https://facebook.github.io/create-react-app/docs/deployment 65 | 66 | ### `npm run build` fails to minify 67 | 68 | This section has moved here: https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify 69 | -------------------------------------------------------------------------------- /src/back-end/FBI/api/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | from django.contrib.auth.models import ( 3 | BaseUserManager, AbstractBaseUser 4 | ) 5 | 6 | def rename_and_upload(instance, filename): 7 | filebase, extension = filename.split('.') 8 | return 'user/{}_{}.{}'.format(instance.username, instance.id, extension) 9 | 10 | class Video(models.Model): 11 | videoId = models.AutoField(primary_key=True) 12 | title = models.CharField(max_length=50) 13 | artist = models.CharField(max_length=50) 14 | link = models.CharField(max_length=100) 15 | tag = models.CharField(max_length=50) 16 | startTime = models.TimeField(default=':00:00') 17 | duration = models.IntegerField(default=60) 18 | 19 | class Meta: 20 | db_table = 'Video' 21 | 22 | def __str__(self): 23 | return self.title 24 | 25 | class UserManager(BaseUserManager): 26 | def create_user(self, username, userFace): 27 | if not username: 28 | raise ValueError('Must be registered with a username') 29 | 30 | user = self.model( 31 | username=username, 32 | userFace=userFace, 33 | ) 34 | user.save(using=self._db) 35 | return user 36 | 37 | def create_superuser(self, username, userFace,): 38 | user = self.create_user( 39 | username=username, 40 | userFace=userFace, 41 | ) 42 | user.is_admin = True 43 | user.save(using=self._db) 44 | return user 45 | 46 | class User(AbstractBaseUser): 47 | username = models.CharField( 48 | max_length=100, 49 | unique=False, 50 | ) 51 | userFace = models.ImageField(unique=True, upload_to=rename_and_upload) 52 | # ManyToMany 53 | videosSeen = models.ManyToManyField(Video, through='Result') 54 | is_active = models.BooleanField(default=True) 55 | is_admin = models.BooleanField(default=False) 56 | 57 | objects = UserManager() 58 | 59 | USERNAME_FIELD = 'userFace' 60 | REQUIRED_FIELDS = ['username'] 61 | 62 | class Meta: 63 | db_table = 'User' 64 | 65 | def __str__(self): 66 | return self.username 67 | 68 | def has_perm(self, perm, obj=None): 69 | return True 70 | 71 | def has_module_perms(self, app_label): 72 | return True 73 | 74 | @property 75 | def is_staff(self): 76 | return self.is_admin 77 | 78 | class Result(models.Model): 79 | resultId = models.AutoField(primary_key=True) 80 | user = models.ForeignKey( 81 | User, 82 | on_delete=models.CASCADE 83 | ) 84 | video = models.ForeignKey( 85 | Video, 86 | on_delete=models.CASCADE 87 | ) 88 | viewedDate = models.DateTimeField() 89 | dataPath = models.CharField(max_length=1000) 90 | emotion = models.CharField(max_length=10) 91 | happiness = models.FloatField(null=True) 92 | sadness = models.FloatField(null=True) 93 | anger = models.FloatField(null=True) 94 | contempt = models.FloatField(null=True) 95 | disgust = models.FloatField(null=True) 96 | fear = models.FloatField(null=True) 97 | neutral = models.FloatField(null=True) 98 | surprise = models.FloatField(null=True) 99 | 100 | class Meta: 101 | db_table = 'Result' 102 | 103 | def __str__(self): 104 | return self.emotion -------------------------------------------------------------------------------- /src/analyze/eeg/plotModule.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from __future__ import division 3 | from __future__ import absolute_import 4 | import scipy.interpolate 5 | import numpy as np 6 | import matplotlib 7 | import matplotlib.pyplot as plt 8 | 9 | # plot 2d array as an image 10 | def show_image(im, color = 'rainbow'): #'YlOrRd', 'gray' 11 | fig = plt.imshow(im, cmap = color) 12 | fig.axes.get_xaxis().set_visible(False) 13 | fig.axes.get_yaxis().set_visible(False) 14 | 15 | # plot 1d array as a head contour 16 | def head_plot(values, color = 'rainbow'): 17 | meanR = values 18 | 19 | # parameters 20 | N = 300 # number of points for interpolation 21 | xy_center = [4,4] 22 | radius = 5 23 | 24 | koord = [[3,8],[5,8],# 2 25 | [3,7],[5,7],# 2 26 | [0,6],[2,6],[4,6],[6,6],[8,6], # 5 27 | [1,5],[3,5],[5,5],[7,5],# 4 28 | [0,4],[2,4],[4,4],[6,4],[8,4], # 5 29 | [1,3],[3,3],[5,3],[7,3],# 4 30 | [0,2],[2,2],[4,2],[6,2],[8,2],# 4 31 | [3,1],[5,1], 32 | [3,0],[4,0],[5,0]] 33 | 34 | ch = ["FP1", "FP2", 35 | "AF3", "AF4", 36 | "F7", "F3", "F2", "F4", "F8", 37 | "FC5", "FC1", "FC2", "FC6", 38 | "T7", "C3", "CZ", "C4", "T8", 39 | "CP5", "CP1", "CP2", "CP6", 40 | "P7", "P3", "PZ", "P4", "P8", 41 | "PO3", "PO4", 42 | "O1", "OZ", "O2"] 43 | 44 | xi = np.linspace(0, 8, N) 45 | yi = np.linspace(0, 8, N) 46 | 47 | x,y = [],[] 48 | for i in koord: 49 | x.append(i[0]) 50 | y.append(i[1]) 51 | z = meanR 52 | zi = scipy.interpolate.griddata((x, y), z, (xi[None,:], yi[:,None]), method='cubic') 53 | 54 | # set points > radius to not-a-number. They will not be plotted. 55 | # the dr/2 makes the edges a bit smoother 56 | dr = xi[1] - xi[0] 57 | for i in range(N): 58 | for j in range(N): 59 | r = np.sqrt((xi[i] - xy_center[0])**2 + (yi[j] - xy_center[1])**2) 60 | if (r - dr/2) > radius: 61 | zi[j,i] = "nan" 62 | 63 | # make figure 64 | fig = plt.figure() 65 | 66 | # set aspect = 1 to make it a circle 67 | ax = fig.add_subplot(111, aspect = 1) 68 | 69 | # use different number of levels for the fill and the lines 70 | #CS = ax.contourf(xi, yi, zi, 60, cmap = plt.cm.jet, zorder = 1) 71 | CS = ax.contourf(xi, yi, zi, 60, cmap = color, zorder = 1) 72 | 73 | # hide ============================================== 74 | # 경계선 75 | ax.contour(xi, yi, zi, 15, colors = "grey", zorder = 2) 76 | 77 | # electrode 위치 78 | # ax.scatter(x, y, marker = 'o', c = 'b', s = 15, zorder = 3) 79 | 80 | # circle 81 | # circle = matplotlib.patches.Circle(xy = xy_center, radius = radius, edgecolor = "k", facecolor = "none") 82 | # ax.add_patch(circle) 83 | 84 | # color bar 85 | #cbar = fig.colorbar(CS, ax=ax) 86 | 87 | # axis invisible 88 | for loc, spine in ax.spines.items(): 89 | spine.set_linewidth(0) 90 | 91 | # remove the ticks 92 | ax.set_xticks([]) 93 | ax.set_yticks([]) 94 | # =================================================== 95 | # set axes limits 96 | #ax.set_xlim(-0.5, 8.5); ax.set_ylim(-0.5, 8.5) 97 | 98 | fig.canvas.draw() 99 | plt.show() 100 | 101 | if __name__ == "__main__": 102 | print("환경설정 완료") -------------------------------------------------------------------------------- /src/front-end/src/pages/Signup.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import axios from 'axios'; 3 | import Webcam from 'react-webcam'; 4 | import '../App.css'; 5 | import { withRouter } from 'react-router-dom'; 6 | import { Grid, Button, TextField, InputAdornment } from '@material-ui/core'; 7 | import AccountCircle from '@material-ui/icons/AccountCircle'; 8 | import UserContext from '../UserContext'; 9 | import NavBar from '../components/NavBar'; 10 | 11 | class Signup extends Component { 12 | constructor(props) { 13 | super(props); 14 | this.state = { 15 | userName: '', 16 | }; 17 | } 18 | 19 | static contextType = UserContext; 20 | 21 | userNameChange(event) { 22 | this.setState({ 23 | userName: event.target.value, 24 | }); 25 | } 26 | 27 | signupSubmit() { 28 | console.log('User Name: ' + this.state.userName); 29 | console.log(this.props.location.state); 30 | // Todo: post username and userFace 31 | this.signUpRequest(); 32 | } 33 | 34 | signUpRequest = async () => { 35 | console.log(this.props.location.state.userFace); 36 | console.log(this.state.userName); 37 | let user_form_data = new FormData(); 38 | user_form_data.append('userFace', this.props.location.state.userFace); 39 | user_form_data.append('username', this.state.userName); 40 | console.log(user_form_data); 41 | try { 42 | const response = await axios.post('api/v1/signup/', user_form_data, { 43 | headers: { 44 | 'content-type': 'multipart/form-data', 45 | }, 46 | }); 47 | console.log(response); 48 | console.log('Sign up 성공'); 49 | const { user, setUser } = this.context; 50 | const newUser = { 51 | id: response.data.id, 52 | name: response.data.username, 53 | loggedIn: true, 54 | }; 55 | console.log(newUser); 56 | setUser(newUser); 57 | this.props.history.push('/Analyze'); 58 | } catch (error) { 59 | console.error(error.content); 60 | console.log('Sign up 실패 - 사진 다시찍어야함'); 61 | this.props.history.push('/Login'); 62 | } 63 | }; 64 | 65 | render() { 66 | return ( 67 |
68 | 69 | 70 | 79 |

Sign Up

80 | 81 |
82 | 88 | 89 | 90 | ), 91 | }} 92 | value={this.state.userName} 93 | onChange={this.userNameChange.bind(this)} 94 | />{' '} 95 | {' '} 107 |
{' '} 108 |
{' '} 109 |
{' '} 110 |
111 | ); 112 | } 113 | } 114 | 115 | export default withRouter(Signup); 116 | -------------------------------------------------------------------------------- /src/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 |
3 | 4 | ### 1. Installations for Face Analyze 5 |
6 | 7 | - src/analyze/face 폴더로 이동 후 아래의 python 명령어 입력
8 |      $ pip install pillow
9 |      - face recognition 사용 위한 설정
10 |          $ pip install opencv-python
11 |          $ pip install opencv-contrib-python
12 |          $ pip install cmake
13 |          $ pip install dlib
14 |          $ pip install face_recognition
15 |      $ pip install requests
16 |
17 | 18 | ### 2. Installations for EEG analyze 19 |
20 | 21 | - src/analyze/eeg 폴더로 이동 후, 아래 python 명령어들 입력
22 | - "환경설정 완료" 출력이 되면 끝! 다음 스크립트로 쭉쭉 넘어가기
23 | - 에러가 나면 아래 명령어로 라이브러리 설치 (anaconda가 아니면 conda를 pip로 대체)

24 |      $ python CustomDatasetClass.py //또는 Models.py
25 |        $ conda install pytorch (pip install torch)
26 |        $ conda install -c pytorch torchvision (pip install torchvision)
27 |      $ python preprocessModule.py //또는 sensorModule.py
28 |        $ pip install brainflow
29 |      $ python transformModule.py
30 |        $ conda install scipy
31 |        - pyeeg 설치
32 |            $ git clone https://github.com/forrestbao/pyeeg.git
33 |            $ cd pyeeg
34 |            $ python setup.py install //home 디렉토리에 설치하고 싶으면 끝에 —user 옵션 추가
35 |      $ python eegAnalyzeModule.py
36 |      $ python plotModule.py
37 |        $ conda install matplotlib
38 |      $ python record_signal.py n //마지막 n 까지 입력해야함.
39 |        $ pip install keyboard
40 |        - 끝에 n대신 y옵션을 주면 실제 연결된 센서로부터 받아오는 신호를 저장

41 |
42 | 43 | ### 3. Installations for Server&DB (Django, MySQL) 44 |
45 | 46 | - django 설치
47 |      $ pip install django

48 | - DB 설정
49 |      - src/back-end/FBI/mysql.cnf 파일 수정
50 |          [client]
51 |          database = 'db_name'
52 |          host = localhost
53 |          user = root
54 |          password = 'mypassword'
55 |      - src/back-end/FBI/settings.py에 INSTALLED_APP 안의 'api’ 주석 처리
56 |      - src/back-end/FBI 에서
57 |          $ ./manage.py migrate
58 |      - src/back-end/FBI/settings.py에 INSTALLED_APP 안의 'api’ 주석 처리 해제
59 |          $ ./manage.py makemigrations
60 |          $ ./manage.py migrate
61 |
62 | 63 | ### 4. Installations for Client (React) 64 |
65 | 66 | - src/front-end 폴더 이동 후
67 |      $npm install
68 |
69 | 70 | ### 5. Start 71 |
72 | 73 | - EEG 센서 가동 및 실행 컴퓨터에 연결 USB 삽입
74 | - src/analyze/eeg/ 폴더 이동 후
75 |      $ python record_signal.py y
76 | - src/back-end/FBI 폴더 이동 후
77 |      $ python manage.py runserver
78 | - src/front-end 폴더 이동 후
79 |      $ npm start
80 |
81 | -------------------------------------------------------------------------------- /src/back-end/FBI/FBI/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for FBI project. 3 | 4 | Generated by 'django-admin startproject' using Django 3.0.4. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/3.0/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = '4!xhc0+k4yt3qb^hg@%%ijxumr=ccjqaa%i*&dxps0@3*kq6b(' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = [] 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | 'django.contrib.admin', 35 | 'django.contrib.auth', 36 | 'django.contrib.contenttypes', 37 | 'django.contrib.sessions', 38 | 'django.contrib.messages', 39 | 'django.contrib.staticfiles', 40 | 'rest_framework', 41 | 'corsheaders', 42 | 'api', 43 | ] 44 | 45 | AUTH_USER_MODEL = 'api.User' 46 | 47 | MIDDLEWARE = [ 48 | 'corsheaders.middleware.CorsMiddleware', 49 | 'django.middleware.security.SecurityMiddleware', 50 | 'django.contrib.sessions.middleware.SessionMiddleware', 51 | 'django.middleware.common.CommonMiddleware', 52 | 'django.middleware.csrf.CsrfViewMiddleware', 53 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 54 | 'django.contrib.messages.middleware.MessageMiddleware', 55 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 56 | ] 57 | 58 | CORS_ORIGIN_WHITELIST = ( 59 | 'http://localhost:3000', 60 | ) 61 | 62 | ROOT_URLCONF = 'FBI.urls' 63 | 64 | TEMPLATES = [ 65 | { 66 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 67 | 'DIRS': [], 68 | 'APP_DIRS': True, 69 | 'OPTIONS': { 70 | 'context_processors': [ 71 | 'django.template.context_processors.debug', 72 | 'django.template.context_processors.request', 73 | 'django.contrib.auth.context_processors.auth', 74 | 'django.contrib.messages.context_processors.messages', 75 | ], 76 | }, 77 | }, 78 | ] 79 | 80 | WSGI_APPLICATION = 'FBI.wsgi.application' 81 | 82 | 83 | # Database 84 | # https://docs.djangoproject.com/en/3.0/ref/settings/#databases 85 | 86 | DATABASES = { 87 | 'default': { 88 | 'ENGINE': 'django.db.backends.mysql', 89 | 'OPTIONS': { 90 | 'read_default_file': os.path.join(BASE_DIR, 'mysql.cnf') 91 | } 92 | } 93 | } 94 | 95 | 96 | # Password validation 97 | # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators 98 | 99 | AUTH_PASSWORD_VALIDATORS = [ 100 | { 101 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 102 | }, 103 | { 104 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 105 | }, 106 | { 107 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 108 | }, 109 | { 110 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 111 | }, 112 | ] 113 | 114 | 115 | # Internationalization 116 | # https://docs.djangoproject.com/en/3.0/topics/i18n/ 117 | 118 | LANGUAGE_CODE = 'en-us' 119 | 120 | TIME_ZONE = 'UTC' 121 | 122 | USE_I18N = True 123 | 124 | USE_L10N = True 125 | 126 | USE_TZ = True 127 | 128 | 129 | # Static files (CSS, JavaScript, Images) 130 | # https://docs.djangoproject.com/en/3.0/howto/static-files/ 131 | 132 | STATIC_URL = '/static/' 133 | 134 | MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 135 | MEDIA_URL = '/media/' 136 | -------------------------------------------------------------------------------- /src/front-end/src/pages/Login.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import axios from 'axios'; 3 | import Webcam from 'react-webcam'; 4 | import '../App.css'; 5 | import { withRouter } from 'react-router-dom'; 6 | import 'base64-to-image'; 7 | import { Grid } from '@material-ui/core'; 8 | import UserContext from '../UserContext'; 9 | import NavBar from '../components/NavBar'; 10 | 11 | class Login extends Component { 12 | constructor(props) { 13 | super(props); 14 | this.state = { 15 | userFace: null, 16 | }; 17 | } 18 | 19 | static contextType = UserContext; 20 | 21 | componentWillMount(Webcam) { 22 | this.getLogin(); 23 | } 24 | componentWillUnmount() { 25 | clearTimeout(this.captureImg); 26 | } 27 | 28 | setRef = (webcam) => { 29 | this.webcam = webcam; 30 | }; 31 | 32 | faceDetected(data) { 33 | console.log(data); 34 | const { user, setUser } = this.context; 35 | const newUser = { 36 | id: data.id, 37 | name: data.username, 38 | loggedIn: true, 39 | }; 40 | setUser(newUser); 41 | this.props.history.push('/Analyze'); 42 | } 43 | 44 | faceNotDetected() { 45 | this.props.history.push('/Signup', { userFace: this.state.userFace }); 46 | console.log('얼굴 정보 없음, 로그인 3 페이지로 넘어감'); 47 | } 48 | 49 | faceNotFound() { 50 | window.location.reload(false); 51 | } 52 | 53 | getLogin = async () => { 54 | console.log('캡처되고있음'); 55 | 56 | const captureImg = setTimeout(() => { 57 | var base64Str = this.webcam.getScreenshot(); 58 | var file = dataURLtoFile(base64Str, 'hello.jpg'); 59 | console.log(file); 60 | console.log('캡처됨'); 61 | this.setState({ 62 | userFace: file, 63 | }); 64 | this.userFace(); 65 | }, 2300); 66 | const dataURLtoFile = (dataurl, filename) => { 67 | var arr = dataurl.split(','), 68 | mime = arr[0].match(/:(.*?);/)[1], 69 | bstr = atob(arr[1]), 70 | n = bstr.length, 71 | u8arr = new Uint8Array(n); 72 | 73 | while (n--) { 74 | u8arr[n] = bstr.charCodeAt(n); 75 | } 76 | 77 | return new File([u8arr], filename, { type: mime }); 78 | }; 79 | }; 80 | 81 | userFace = async () => { 82 | let form_data = new FormData(); 83 | form_data.append('userFace', this.state.userFace); 84 | try { 85 | const response = await axios.post('api/v1/login/', form_data, { 86 | headers: { 87 | 'content-type': 'multipart/form-data', 88 | }, 89 | }); 90 | console.log(response); 91 | this.setState({ 92 | userName: response.data.username, 93 | }); 94 | this.faceDetected(response.data); 95 | } catch (error) { 96 | console.log(error); 97 | console.error(error.response); 98 | if (error.response.status == 404) { 99 | // No user info. 100 | this.faceNotDetected(); 101 | } else if (error.response.status == 406) { 102 | // First user error occured. 103 | this.faceNotDetected(); 104 | } else if (error.response.status == 409) { 105 | // Cannot recognize face on image. 106 | this.faceNotFound(); 107 | } 108 | } 109 | }; 110 | 111 | render() { 112 | return ( 113 |
114 | 115 | 122 | 131 |

Face Login

132 |

가만히 화면을 응시해주세요.

133 |
134 |
135 | ); 136 | } 137 | } 138 | 139 | export default withRouter(Login); 140 | -------------------------------------------------------------------------------- /src/front-end/src/pages/AnalyzeOption.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import '../App.css'; 3 | import { Grid, Typography } from '@material-ui/core/'; 4 | import { Link, withRouter } from 'react-router-dom'; 5 | 6 | import MenuItem from '@material-ui/core/MenuItem'; 7 | import Select from '@material-ui/core/Select'; 8 | import InputLabel from '@material-ui/core/InputLabel'; 9 | import FormControl from '@material-ui/core/FormControl'; 10 | import LoginAlert from '../components/loginSuccessAlert'; 11 | import UserContext from '../UserContext'; 12 | import NavBar from '../components/NavBar'; 13 | 14 | class Analyze extends Component { 15 | state = { 16 | emotionTag: '', 17 | close: true, 18 | }; 19 | 20 | redirectToLogin() { 21 | return this.props.history.push(`/Login`); 22 | } 23 | 24 | static contextType = UserContext; 25 | render() { 26 | const { classes } = this.props; 27 | const { user } = this.context; 28 | console.log(user); 29 | return ( 30 | <> 31 | {user.loggedIn ? ( 32 |
33 | 34 | 35 | 36 | 37 | Choose an emotion you want! 38 | 39 | 40 | 41 | 42 | Emotion 43 | 44 | 107 | 108 | 109 |
110 | ) : ( 111 | this.redirectToLogin() 112 | )} 113 | 114 | ); 115 | } 116 | } 117 | 118 | export default withRouter(Analyze); 119 | -------------------------------------------------------------------------------- /src/front-end/src/App.css: -------------------------------------------------------------------------------- 1 | .App-logo { 2 | height: 40vmin; 3 | pointer-events: none; 4 | } 5 | body { 6 | width: 100%; 7 | height: 100%; 8 | } 9 | 10 | html { 11 | width: 100%; 12 | height: 100% !important; 13 | } 14 | 15 | .full-container { 16 | width: 100%; 17 | height: 100%; 18 | } 19 | 20 | @media (prefers-reduced-motion: no-preference) { 21 | .App-logo { 22 | animation: App-logo-spin infinite 20s linear; 23 | } 24 | } 25 | 26 | .App-header { 27 | background-color: #282c34; 28 | min-height: 100vh; 29 | display: flex; 30 | flex-direction: column; 31 | align-items: center; 32 | justify-content: center; 33 | font-size: calc(10px + 2vmin); 34 | color: white; 35 | } 36 | 37 | .App-link { 38 | color: #61dafb; 39 | } 40 | 41 | @keyframes App-logo-spin { 42 | from { 43 | transform: rotate(0deg); 44 | } 45 | to { 46 | transform: rotate(360deg); 47 | } 48 | } 49 | .carousel { 50 | height: 100%; 51 | } 52 | .carousel-caption { 53 | top: 500px; 54 | z-index: 1; 55 | } 56 | 57 | .carousel-inner { 58 | height: 100%; 59 | } 60 | 61 | .videoWebcam { 62 | width: 100%; 63 | height: 100%; 64 | object-fit: cover; 65 | } 66 | .webcam { 67 | width: 55%; 68 | height: 80%; 69 | left: -12%; 70 | position: relative; 71 | object-fit: cover; 72 | } 73 | 74 | #menu { 75 | position: absolute; 76 | right: 10px; 77 | color: black; 78 | } 79 | 80 | .menuLink { 81 | color: black; 82 | } 83 | 84 | .recharts-wrapper#realtimeChart { 85 | text-align: center; 86 | } 87 | 88 | @keyframes blinker { 89 | 50% { 90 | opacity: 0; 91 | } 92 | } 93 | #faceLogin { 94 | position: absolute; 95 | top: 15%; 96 | left: 70%; 97 | color: white; 98 | font-size: 60px; 99 | font-weight: bold; 100 | } 101 | #faceLogin2 { 102 | position: absolute; 103 | top: 28%; 104 | left: 70%; 105 | color: white; 106 | font-size: 20px; 107 | } 108 | #root { 109 | width: 100%; 110 | height: 100%; 111 | } 112 | #userInput { 113 | position: absolute; 114 | color: white !important; 115 | top: 29%; 116 | left: 69%; 117 | width: 350px; 118 | justify-content: center; 119 | } 120 | .MuiInputBase-root, 121 | .MuiInput-root, 122 | label { 123 | color: white !important; 124 | border: white !important; 125 | } 126 | .MuiButton-outlinedPrimary { 127 | border: 1px solid white !important; 128 | color: white !important; 129 | } 130 | .MuiButton-outlinedPrimary#startCardBtn { 131 | width: 60%; 132 | height: 50%; 133 | color: blue !important; 134 | border: 1px solid blue !important; 135 | } 136 | 137 | .MuiInput-underline::before, 138 | .MuiInput-underline::after { 139 | border-bottom: 1px solid white !important; 140 | } 141 | 142 | #loginBox { 143 | background-color: black; 144 | align-items: center; 145 | text-align: center; 146 | height: 95%; 147 | } 148 | #loginBox1 { 149 | background-color: rgba(255, 255, 255, 0.904); 150 | align-items: center; 151 | text-align: center; 152 | height: 100%; 153 | } 154 | #loginBox2 { 155 | background-color: black; 156 | align-items: center; 157 | text-align: center; 158 | height: 87%; 159 | } 160 | #startCard { 161 | background-color: #d1e2e7; 162 | position: relative; 163 | width: 70%; 164 | height: 40%; 165 | } 166 | #startCardBtn { 167 | width: 60%; 168 | height: 50%; 169 | } 170 | #AnalyzeText { 171 | position: absolute; 172 | top: 40%; 173 | left: 30%; 174 | color: white; 175 | } 176 | #resultText { 177 | margin-top: 7%; 178 | color: white; 179 | } 180 | #AnalyzeOption { 181 | position: relative; 182 | text-align: center; 183 | } 184 | #emotionSelect { 185 | position: relative; 186 | left: 35%; 187 | 188 | width: 25%; 189 | z-index: 1; 190 | background-color: white; 191 | } 192 | #railed { 193 | margin-left: 4%; 194 | width: 80%; 195 | height: 100%; 196 | } 197 | #connection { 198 | margin-top: 12%; 199 | } 200 | 201 | #connections { 202 | color: red; 203 | } 204 | 205 | .recharts-layer.recharts-pie { 206 | position: absolute; 207 | width: 100% !important; 208 | height: 100% !important; 209 | } 210 | 211 | .recharts-wrapper#PieEEG { 212 | margin-top: 200px; 213 | } 214 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FBI (Face Brain of I(e:)motion) 2 | 3 | 4 | Capstone Design Project for Kookmin University, 2020 5 | 6 | [**https://kookmin-sw.github.io/capstone-2020-2/**](https://kookmin-sw.github.io/capstone-2020-2/) 7 | 8 | 9 | ### **1. 프로젝트 소개** 10 | --- 11 |

12 | 13 |

14 | 15 | **얼굴 표정 & 뇌파 신호 기반 감정 인식 및 분석 웹 서비스, FBI**
16 | FBI는 표정과 뇌파에 기반한 멀티 모달 감정 인식기술을 제공하는 웹 어플리케이션이다. 17 | 사용자가 웹 어플리케이션에서 재생되는 영상을 시청하는 동안 모자에 달린 뇌파측정 센서와 카메라로 사용자의 뇌파와 표정을 측정하고, 실시간으로 사용자의 감정을 분석하여 시각화한다. 또한 실시간으로 측정된 감정 분석 데이터를 누적하여 최종 감정 분석 결과를 제공한다. 분석 결과에 대해서는 표정, 뇌파, 멀티모달(표정+뇌파)에 대한 각 결과를 모두 반영함으로써 보다 명확하고 신뢰성 있는 분석 서비스를 제공한다. 18 | FBI는 기존의 단일 패턴 감정 분석 시스템에서 벗어나, 두 가지 신호 데이터를 기반으로 유저의 감정을 분석하는 멀티 모달 감정인식 시스템을 개발함으로써 각 데이터의 한계점을 보완하여 체계적이고 정확한 감정인식 기술을 제공하는 것을 목표로 한다. 특수환경(교도소, 병원)에서 맞춤형 심리 치료나 감정표현에 어려움을 겪는 사용자의 감정인식이 필요한 곳에서 FBI의 감정 인식 및 분석 서비스가 해결책이 될 수 있을 것이다. 19 |

20 | **Facial expression & brainwave signals based emotion recognition and analysis web service, FBI**
21 | FBI is a web application that provides multi-modal emotion recognition technology based on facial expressions and brain waves. 22 | While the user is watching the video played in the web application, the EEG sensor on the hat and camera are used to measure the user's brainwaves and facial expressions. Based on this, the user's emotions are analyzed and visualized in real-time. In addition, the real-time emotion analysis data is accumulated to provide the final emotion analysis result. For analysis results, we reflect all the results for facial expression, brain wave, and multi-modal (expression + brain wave) in UI which provide more clear and reliable analysis services. 23 | FBI aims to provide a systematic and accurate emotion recognition technology by complementing the limitations of each data by developing a multi-modal emotion recognition system which analyzes the user's emotion based on two signal data, not single pattern emotion analysis system which is existing. FBI's emotion recognition and analysis service can be a solution in places where special environment (prison, hospital) needs customized psychological treatment or emotion recognition of users who have difficulty expressing emotions. 24 |

25 | 26 | ### **2. 소개 영상** 27 | --- 28 | **수행 계획 소개 영상** 29 |
30 | Link: https://www.youtube.com/watch?v=4n9E5AUYYvU?t=0s 31 |

32 | **1차 중간평가 발표 영상** 33 |
34 | Link: https://www.youtube.com/watch?v=10gp0o0CV0o?t=0s 35 |

36 | **1차 중간평가 시연 영상** 37 |
38 | Link: https://www.youtube.com/watch?v=JV1dCH-8sd8?t=0s 39 |

40 | **2차 중간평가 발표 영상** 41 |
42 | Link: https://www.youtube.com/watch?v=9Aiapp7FzLo&feature=youtu.be&t=0s 43 |

44 | **2차 중간평가 시연 영상** 45 |
46 | Link: https://www.youtube.com/watch?v=CUAWhUwhbk0&t=0s 47 |

48 | **최종 평가 발표 영상** 49 |
50 | [![youtube_img](https://img.youtube.com/vi/SnbIfh3Ph5c/0.jpg)](https://www.youtube.com/watch?v=SnbIfh3Ph5c&feature=youtu.be) 51 |

52 | **최종 평가 시연 영상(Full.ver)** 53 |
54 | [![youtube_img](https://img.youtube.com/vi/cNzKsm_rX5A/0.jpg)](https://www.youtube.com/watch?v=cNzKsm_rX5A&t=0s) 55 |

56 | **최종 평가 시연 영상(Short.ver)** 57 |
58 | [![youtube_img](https://img.youtube.com/vi/i19ny52nIEM/0.jpg)](https://www.youtube.com/watch?v=i19ny52nIEM&t=0s) 59 |

60 | 61 | ### **3. 팀 소개** 62 | --- 63 | 64 | 65 | 66 | ### 윤상민 교수님 (Prof. Sang-Min Yoon) 67 | 68 |   - Role: 지도교수 & 멘토
69 |   - Email: smyoon@kookmin.ac.kr
70 |






71 | 72 | 73 | 74 | ### 이연지 (Yeon-Ji Lee) 75 | 76 |   - Student ID: 20171668
77 |   - Role: 팀장, 뇌파 기반 감정 인식 모델 학습, multi-modal 분석모듈 개발
78 |   - Email: nelumbotus@kookmin.ac.kr
79 |





80 | 81 | 82 | 83 | ### 김경진 (Kyoung-Jin Kim) 84 | 85 |   - Student ID: 20170292
86 |   - Role: Server 및 DB 구축, back-end API 개발
87 |   - Email: jinnymini@gmail.com
88 |





89 | 90 | 91 | 92 | ### 김소정 (So-Jung Kim) 93 | 94 |   - Student ID: 20162753
95 |   - Role: Front-end UI 설계 및 구현
96 |   - Email: sjk1708@gmail.com
97 |





98 | 99 | 100 | 101 | ### 서명희 (Myoung-Hee Seo) 102 | 103 |   - Student ID: 20171627
104 |   - Role: Server 및 시스템 연동, Front-end UI 구현, Git 관리
105 |   - Email: myounghee.seo2@gmail.com
106 |





107 | 108 | 109 | 110 | 111 | ### 이예림 (Ye-Rim Lee) 112 | 113 |   - Student ID: 20171670
114 |   - Role: 표정 기반 감정 인식 모델 학습, multi-modal 분석모듈 개발
115 |   - Email: lylim5050@kookmin.ac.kr
116 |





117 |

118 | 119 | 120 | -------------------------------------------------------------------------------- /index.md: -------------------------------------------------------------------------------- 1 | # FBI (Face Brain of I(e:)motion) 2 | 3 | 4 | Capstone Design Project for Kookmin University, 2020 5 | 6 | [**https://kookmin-sw.github.io/capstone-2020-2/**](https://kookmin-sw.github.io/capstone-2020-2/) 7 | 8 | 9 | ### **1. 프로젝트 소개** 10 | --- 11 |

12 | 13 |

14 | 15 | **얼굴 표정 & 뇌파 신호 기반 감정 인식 및 분석 웹 서비스, FBI**
16 | FBI는 표정과 뇌파에 기반한 멀티 모달 감정 인식기술을 제공하는 웹 어플리케이션이다. 17 | 사용자가 웹 어플리케이션에서 재생되는 영상을 시청하는 동안 모자에 달린 뇌파측정 센서와 카메라로 사용자의 뇌파와 표정을 측정하고, 실시간으로 사용자의 감정을 분석하여 시각화한다. 또한 실시간으로 측정된 감정 분석 데이터를 누적하여 최종 감정 분석 결과를 제공한다. 분석 결과에 대해서는 표정, 뇌파, 멀티모달(표정+뇌파)에 대한 각 결과를 모두 반영함으로써 보다 명확하고 신뢰성 있는 분석 서비스를 제공한다. 18 | FBI는 기존의 단일 패턴 감정 분석 시스템에서 벗어나, 두 가지 신호 데이터를 기반으로 유저의 감정을 분석하는 멀티 모달 감정인식 시스템을 개발함으로써 각 데이터의 한계점을 보완하여 체계적이고 정확한 감정인식 기술을 제공하는 것을 목표로 한다. 특수환경(교도소, 병원)에서 맞춤형 심리 치료나 감정표현에 어려움을 겪는 사용자의 감정인식이 필요한 곳에서 FBI의 감정 인식 및 분석 서비스가 해결책이 될 수 있을 것이다. 19 |

20 | **Facial expression & brainwave signals based emotion recognition and analysis web service, FBI**
21 | FBI is a web application that provides multi-modal emotion recognition technology based on facial expressions and brain waves. 22 | While the user is watching the video played in the web application, the EEG sensor on the hat and camera are used to measure the user's brainwaves and facial expressions. Based on this, the user's emotions are analyzed and visualized in real-time. In addition, the real-time emotion analysis data is accumulated to provide the final emotion analysis result. For analysis results, we reflect all the results for facial expression, brain wave, and multi-modal (expression + brain wave) in UI which provide more clear and reliable analysis services. 23 | FBI aims to provide a systematic and accurate emotion recognition technology by complementing the limitations of each data by developing a multi-modal emotion recognition system which analyzes the user's emotion based on two signal data, not single pattern emotion analysis system which is existing. FBI's emotion recognition and analysis service can be a solution in places where special environment (prison, hospital) needs customized psychological treatment or emotion recognition of users who have difficulty expressing emotions. 24 |

25 | 26 | ### **2. 소개 영상** 27 | --- 28 | **수행 계획 소개 영상** 29 |
30 | Link: https://www.youtube.com/watch?v=4n9E5AUYYvU?t=0s 31 |

32 | **1차 중간평가 발표 영상** 33 |
34 | Link: https://www.youtube.com/watch?v=10gp0o0CV0o?t=0s 35 |

36 | **1차 중간평가 시연 영상** 37 |
38 | Link: https://www.youtube.com/watch?v=JV1dCH-8sd8?t=0s 39 |

40 | **2차 중간평가 발표 영상** 41 |
42 | Link: https://www.youtube.com/watch?v=9Aiapp7FzLo&feature=youtu.be&t=0s 43 |

44 | **2차 중간평가 시연 영상** 45 |
46 | Link: https://www.youtube.com/watch?v=CUAWhUwhbk0&t=0s 47 |

48 | **최종 평가 발표 영상** 49 |
50 | [![youtube_img](https://img.youtube.com/vi/SnbIfh3Ph5c/0.jpg)](https://www.youtube.com/watch?v=SnbIfh3Ph5c&feature=youtu.be) 51 |

52 | **최종 평가 시연 영상(Full.ver)** 53 |
54 | [![youtube_img](https://img.youtube.com/vi/cNzKsm_rX5A/0.jpg)](https://www.youtube.com/watch?v=cNzKsm_rX5A&t=0s) 55 |

56 | **최종 평가 시연 영상(Short.ver)** 57 |
58 | [![youtube_img](https://img.youtube.com/vi/i19ny52nIEM/0.jpg)](https://www.youtube.com/watch?v=i19ny52nIEM&t=0s) 59 |

60 | 61 | ### **3. 팀 소개** 62 | --- 63 | 64 | 65 | 66 | ### 윤상민 교수님 (Prof. Sang-Min Yoon) 67 | 68 |   - Role: 지도교수 & 멘토
69 |   - Email: smyoon@kookmin.ac.kr
70 |






71 | 72 | 73 | 74 | ### 이연지 (Yeon-Ji Lee) 75 | 76 |   - Student ID: 20171668
77 |   - Role: 팀장, 뇌파 기반 감정 인식 모델 학습, multi-modal 분석모듈 개발
78 |   - Email: nelumbotus@kookmin.ac.kr
79 |





80 | 81 | 82 | 83 | ### 김경진 (Kyoung-Jin Kim) 84 | 85 |   - Student ID: 20170292
86 |   - Role: Server 및 DB 구축, back-end API 개발
87 |   - Email: jinnymini@gmail.com
88 |





89 | 90 | 91 | 92 | ### 김소정 (So-Jung Kim) 93 | 94 |   - Student ID: 20162753
95 |   - Role: Front-end UI 설계 및 구현
96 |   - Email: sjk1708@gmail.com
97 |





98 | 99 | 100 | 101 | ### 서명희 (Myoung-Hee Seo) 102 | 103 |   - Student ID: 20171627
104 |   - Role: Server 및 시스템 연동, Front-end UI 구현, Git 관리
105 |   - Email: myounghee.seo2@gmail.com
106 |





107 | 108 | 109 | 110 | 111 | ### 이예림 (Ye-Rim Lee) 112 | 113 |   - Student ID: 20171670
114 |   - Role: 표정 기반 감정 인식 모델 학습, multi-modal 분석모듈 개발
115 |   - Email: lylim5050@kookmin.ac.kr
116 |





117 |

118 | 119 | 120 | -------------------------------------------------------------------------------- /src/front-end/src/serviceWorker.js: -------------------------------------------------------------------------------- 1 | // This optional code is used to register a service worker. 2 | // register() is not called by default. 3 | 4 | // This lets the app load faster on subsequent visits in production, and gives 5 | // it offline capabilities. However, it also means that developers (and users) 6 | // will only see deployed updates on subsequent visits to a page, after all the 7 | // existing tabs open on the page have been closed, since previously cached 8 | // resources are updated in the background. 9 | 10 | // To learn more about the benefits of this model and instructions on how to 11 | // opt-in, read https://bit.ly/CRA-PWA 12 | 13 | const isLocalhost = Boolean( 14 | window.location.hostname === 'localhost' || 15 | // [::1] is the IPv6 localhost address. 16 | window.location.hostname === '[::1]' || 17 | // 127.0.0.0/8 are considered localhost for IPv4. 18 | window.location.hostname.match( 19 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/ 20 | ) 21 | ); 22 | 23 | export function register(config) { 24 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) { 25 | // The URL constructor is available in all browsers that support SW. 26 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href); 27 | if (publicUrl.origin !== window.location.origin) { 28 | // Our service worker won't work if PUBLIC_URL is on a different origin 29 | // from what our page is served on. This might happen if a CDN is used to 30 | // serve assets; see https://github.com/facebook/create-react-app/issues/2374 31 | return; 32 | } 33 | 34 | window.addEventListener('load', () => { 35 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`; 36 | 37 | if (isLocalhost) { 38 | // This is running on localhost. Let's check if a service worker still exists or not. 39 | checkValidServiceWorker(swUrl, config); 40 | 41 | // Add some additional logging to localhost, pointing developers to the 42 | // service worker/PWA documentation. 43 | navigator.serviceWorker.ready.then(() => { 44 | console.log( 45 | 'This web app is being served cache-first by a service ' + 46 | 'worker. To learn more, visit https://bit.ly/CRA-PWA' 47 | ); 48 | }); 49 | } else { 50 | // Is not localhost. Just register service worker 51 | registerValidSW(swUrl, config); 52 | } 53 | }); 54 | } 55 | } 56 | 57 | function registerValidSW(swUrl, config) { 58 | navigator.serviceWorker 59 | .register(swUrl) 60 | .then(registration => { 61 | registration.onupdatefound = () => { 62 | const installingWorker = registration.installing; 63 | if (installingWorker == null) { 64 | return; 65 | } 66 | installingWorker.onstatechange = () => { 67 | if (installingWorker.state === 'installed') { 68 | if (navigator.serviceWorker.controller) { 69 | // At this point, the updated precached content has been fetched, 70 | // but the previous service worker will still serve the older 71 | // content until all client tabs are closed. 72 | console.log( 73 | 'New content is available and will be used when all ' + 74 | 'tabs for this page are closed. See https://bit.ly/CRA-PWA.' 75 | ); 76 | 77 | // Execute callback 78 | if (config && config.onUpdate) { 79 | config.onUpdate(registration); 80 | } 81 | } else { 82 | // At this point, everything has been precached. 83 | // It's the perfect time to display a 84 | // "Content is cached for offline use." message. 85 | console.log('Content is cached for offline use.'); 86 | 87 | // Execute callback 88 | if (config && config.onSuccess) { 89 | config.onSuccess(registration); 90 | } 91 | } 92 | } 93 | }; 94 | }; 95 | }) 96 | .catch(error => { 97 | console.error('Error during service worker registration:', error); 98 | }); 99 | } 100 | 101 | function checkValidServiceWorker(swUrl, config) { 102 | // Check if the service worker can be found. If it can't reload the page. 103 | fetch(swUrl, { 104 | headers: { 105 | 'Service-Worker': 'script' 106 | }, 107 | }) 108 | .then(response => { 109 | // Ensure service worker exists, and that we really are getting a JS file. 110 | const contentType = response.headers.get('content-type'); 111 | if ( 112 | response.status === 404 || 113 | (contentType != null && contentType.indexOf('javascript') === -1) 114 | ) { 115 | // No service worker found. Probably a different app. Reload the page. 116 | navigator.serviceWorker.ready.then(registration => { 117 | registration.unregister().then(() => { 118 | window.location.reload(); 119 | }); 120 | }); 121 | } else { 122 | // Service worker found. Proceed as normal. 123 | registerValidSW(swUrl, config); 124 | } 125 | }) 126 | .catch(() => { 127 | console.log( 128 | 'No internet connection found. App is running in offline mode.' 129 | ); 130 | }); 131 | } 132 | 133 | export function unregister() { 134 | if ('serviceWorker' in navigator) { 135 | navigator.serviceWorker.ready 136 | .then(registration => { 137 | registration.unregister(); 138 | }) 139 | .catch(error => { 140 | console.error(error.message); 141 | }); 142 | } 143 | } -------------------------------------------------------------------------------- /src/analyze/eeg/TrainTestModule.py: -------------------------------------------------------------------------------- 1 | # Test method 2 | from sklearn.metrics import classification_report 3 | from sklearn.metrics import confusion_matrix 4 | import numpy as np 5 | import torch 6 | import torch.nn as nn 7 | import torch.optim as optim 8 | 9 | def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None): 10 | """pretty print for confusion matrixes""" 11 | columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length 12 | empty_cell = " " * columnwidth 13 | 14 | # Begin CHANGES 15 | fst_empty_cell = (columnwidth-3)//2 * " " + "t/p" + (columnwidth-3)//2 * " " 16 | 17 | if len(fst_empty_cell) < len(empty_cell): 18 | fst_empty_cell = " " * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell 19 | # Print header 20 | print(" " + fst_empty_cell, end=" ") 21 | # End CHANGES 22 | 23 | for label in labels: 24 | print("%{0}s".format(columnwidth) % label, end=" ") 25 | 26 | print() 27 | # Print rows 28 | for i, label1 in enumerate(labels): 29 | print(" %{0}s".format(columnwidth) % label1, end=" ") 30 | for j in range(len(labels)): 31 | cell = "%{0}.1f".format(columnwidth) % cm[i, j] 32 | if hide_zeroes: 33 | cell = cell if float(cm[i, j]) != 0 else empty_cell 34 | if hide_diagonal: 35 | cell = cell if i != j else empty_cell 36 | if hide_threshold: 37 | cell = cell if cm[i, j] > hide_threshold else empty_cell 38 | print(cell, end=" ") 39 | print() 40 | 41 | def testModel(net, testloader, target_names, criterion, model_type, detail=False): 42 | numOfClass = len(target_names) 43 | co = 0 # 맞은 개수 44 | to = 0 # 테스트셋 개수 45 | 46 | # [[0,0,0], [0,0,0], [0,0,0]] 47 | conf_mat = [[0 for i in range(numOfClass)] for j in range(numOfClass)] 48 | 49 | 50 | all_label = [] 51 | all_out = [] 52 | with torch.no_grad(): 53 | running_loss = 0 54 | for data in testloader: 55 | images, labels = data 56 | outputs = net(images.float()) 57 | loss = criterion(outputs, labels) 58 | running_loss += loss.item() 59 | 60 | # Update Confusion Matrix ============================== 61 | for i in range(len(outputs)): 62 | to += 1 63 | # ===============================print 64 | if detail: 65 | if to <= 10: 66 | print("Output : ", outputs[i]) 67 | print("Label : ", labels[i]) 68 | else: break 69 | # =================================== 70 | 71 | if model_type == "cla": 72 | predicted = np.argmax(np.array(outputs[i])); 73 | label = labels[i].item() 74 | elif model_type == "reg": 75 | predicted = 0 if outputs[i] < 5 else 1 76 | label = 0 if labels[i].item() < 5 else 1 77 | 78 | all_label.append(label) 79 | all_out.append(predicted) 80 | 81 | conf_mat[predicted][label]+=1 82 | 83 | if predicted == label: 84 | co += 1 85 | # =========================================================== 86 | loss = running_loss/len(testloader.dataset) 87 | acc = 100 * (co / to) 88 | 89 | cm = confusion_matrix(all_label, all_out, labels = [i for i in range(0,numOfClass)]) 90 | print_cm(cm, target_names) 91 | 92 | print("V_loss = %.06f Acc = %.04f "%(loss, acc)) 93 | return loss, acc, 1 #f1_score 94 | 95 | 96 | # Train method 97 | def train_model(net, model_type, trainloader, testloader, target_names, epoch=100, detail=True): 98 | t_loss_list = [] 99 | v_loss_list = [] 100 | f1_list = [] 101 | acc_list = [] 102 | criterion = None; optimizer = None 103 | 104 | # optimizer, loss function 105 | if model_type == "cla": 106 | criterion = nn.CrossEntropyLoss() 107 | optimizer = optim.Adam(net.parameters(), lr=1e-4, weight_decay = 1e-5) 108 | elif model_type == "reg": 109 | criterion = nn.MSELoss() 110 | criterion = nn.SmoothL1Loss() # 111 | optimizer = optim.Adam(net.parameters(), lr=1e-5, weight_decay = 1e-5) 112 | 113 | prev_acc = 0 114 | 115 | for epoch in range(epoch): 116 | running_loss = 0.0 117 | 118 | for i, data in enumerate(trainloader, 0): 119 | # get the inputs; data is a list of [inputs, labels] 120 | inputs, labels = data 121 | 122 | optimizer.zero_grad() 123 | outputs = net(inputs.float()) 124 | loss = criterion(outputs, labels) 125 | loss.backward() 126 | optimizer.step() 127 | 128 | running_loss += loss.item() 129 | t_loss = running_loss/len(trainloader.dataset) 130 | if detail: 131 | print("\n[ Epoch %d ] T_Loss = %f \n"%(epoch+1, t_loss), end='' ) 132 | 133 | try: 134 | val_loss, acc, f1 = testModel(net, testloader, target_names, criterion, model_type, False) 135 | except: continue 136 | 137 | if acc > prev_acc: 138 | #model_path = "torch_models/SEED_fftMap_ch8_10sec_5over/"+str(epoch)+"epoch.pth" 139 | #torch.save(net.state_dict(), model_path) 140 | prev_acc = acc 141 | 142 | t_loss_list.append(running_loss/len(trainloader.dataset)) 143 | v_loss_list.append(val_loss) 144 | acc_list.append(acc) 145 | f1_list.append(f1) 146 | print('Finished Training') 147 | return net, t_loss_list, v_loss_list, f1_list, acc_list -------------------------------------------------------------------------------- /src/analyze/face/xception.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import cv2 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import torchvision 8 | import torchvision.transforms as transforms 9 | from torch.utils.data import TensorDataset, DataLoader, Dataset 10 | 11 | 12 | 13 | transform_train = transforms.Compose([ 14 | transforms.Resize(299), 15 | transforms.RandomCrop(299, padding=38), 16 | transforms.RandomHorizontalFlip(), 17 | transforms.ToTensor(), 18 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))]) 19 | 20 | transform_validation = transforms.Compose([ 21 | transforms.Resize(299), 22 | transforms.ToTensor(), 23 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))]) 24 | 25 | 26 | transform_test = transforms.Compose([ 27 | transforms.Resize(299), 28 | transforms.ToTensor(), 29 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))]) 30 | 31 | 32 | class depthwise_separable_conv(nn.Module): 33 | def __init__(self, nin, nout, kernel_size, padding, bias=False): 34 | super(depthwise_separable_conv, self).__init__() 35 | self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size, padding=padding, groups=nin, bias=bias) 36 | self.pointwise = nn.Conv2d(nin, nout, kernel_size=1, bias=bias) 37 | 38 | def forward(self, x): 39 | out = self.depthwise(x) 40 | out = self.pointwise(out) 41 | return out 42 | 43 | 44 | class Xception(nn.Module): 45 | def __init__(self, input_channel, num_classes=10): 46 | super(Xception, self).__init__() 47 | 48 | # Entry Flow 49 | self.entry_flow_1 = nn.Sequential( 50 | nn.Conv2d(input_channel, 32, kernel_size=3, stride=2, padding=1, bias=False), 51 | nn.BatchNorm2d(32), 52 | nn.ReLU(True), 53 | 54 | nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), 55 | nn.BatchNorm2d(64), 56 | nn.ReLU(True) 57 | ) 58 | 59 | self.entry_flow_2 = nn.Sequential( 60 | depthwise_separable_conv(64, 128, 3, 1), 61 | nn.BatchNorm2d(128), 62 | nn.ReLU(True), 63 | 64 | depthwise_separable_conv(128, 128, 3, 1), 65 | nn.BatchNorm2d(128), 66 | nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 67 | ) 68 | 69 | self.entry_flow_2_residual = nn.Conv2d(64, 128, kernel_size=1, stride=2, padding=0) 70 | 71 | self.entry_flow_3 = nn.Sequential( 72 | nn.ReLU(True), 73 | depthwise_separable_conv(128, 256, 3, 1), 74 | nn.BatchNorm2d(256), 75 | 76 | nn.ReLU(True), 77 | depthwise_separable_conv(256, 256, 3, 1), 78 | nn.BatchNorm2d(256), 79 | 80 | nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 81 | ) 82 | 83 | self.entry_flow_3_residual = nn.Conv2d(128, 256, kernel_size=1, stride=2, padding=0) 84 | 85 | self.entry_flow_4 = nn.Sequential( 86 | nn.ReLU(True), 87 | depthwise_separable_conv(256, 728, 3, 1), 88 | nn.BatchNorm2d(728), 89 | 90 | nn.ReLU(True), 91 | depthwise_separable_conv(728, 728, 3, 1), 92 | nn.BatchNorm2d(728), 93 | 94 | nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 95 | ) 96 | 97 | self.entry_flow_4_residual = nn.Conv2d(256, 728, kernel_size=1, stride=2, padding=0) 98 | 99 | # Middle Flow 100 | self.middle_flow = nn.Sequential( 101 | nn.ReLU(True), 102 | depthwise_separable_conv(728, 728, 3, 1), 103 | nn.BatchNorm2d(728), 104 | 105 | nn.ReLU(True), 106 | depthwise_separable_conv(728, 728, 3, 1), 107 | nn.BatchNorm2d(728), 108 | 109 | nn.ReLU(True), 110 | depthwise_separable_conv(728, 728, 3, 1), 111 | nn.BatchNorm2d(728) 112 | ) 113 | 114 | # Exit Flow 115 | self.exit_flow_1 = nn.Sequential( 116 | nn.ReLU(True), 117 | depthwise_separable_conv(728, 728, 3, 1), 118 | nn.BatchNorm2d(728), 119 | 120 | nn.ReLU(True), 121 | depthwise_separable_conv(728, 1024, 3, 1), 122 | nn.BatchNorm2d(1024), 123 | 124 | nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 125 | ) 126 | self.exit_flow_1_residual = nn.Conv2d(728, 1024, kernel_size=1, stride=2, padding=0) 127 | self.exit_flow_2 = nn.Sequential( 128 | depthwise_separable_conv(1024, 1536, 3, 1), 129 | nn.BatchNorm2d(1536), 130 | nn.ReLU(True), 131 | 132 | depthwise_separable_conv(1536, 2048, 3, 1), 133 | nn.BatchNorm2d(2048), 134 | nn.ReLU(True) 135 | ) 136 | 137 | self.linear = nn.Linear(2048, num_classes) 138 | 139 | def forward(self, x): 140 | entry_out1 = self.entry_flow_1(x) 141 | print("flow_1 : ", entry_out1.shape) 142 | entry_out2 = self.entry_flow_2(entry_out1) + self.entry_flow_2_residual(entry_out1) 143 | print("flow_2 : ", entry_out2.shape) 144 | entry_out3 = self.entry_flow_3(entry_out2) + self.entry_flow_3_residual(entry_out2) 145 | print("flow_3 : ", entry_out3.shape) 146 | entry_out = self.entry_flow_4(entry_out3) + self.entry_flow_4_residual(entry_out3) 147 | print("flow_4 : ", entry_out.shape) 148 | 149 | middle_out = self.middle_flow(entry_out) + entry_out 150 | print("middle_flow : ", middle_out.shape) 151 | 152 | for i in range(7): 153 | middle_out = self.middle_flow(middle_out) + middle_out 154 | 155 | exit_out1 = self.exit_flow_1(middle_out) + self.exit_flow_1_residual(middle_out) 156 | print("exit_flow_1 : ", exit_out1.shape) 157 | exit_out2 = self.exit_flow_2(exit_out1) 158 | print("exit_flow_2 : ", exit_out2.shape) 159 | 160 | exit_avg_pool = F.adaptive_avg_pool2d(exit_out2, (1, 1)) 161 | exit_avg_pool_flat = exit_avg_pool.view(exit_avg_pool.size(0), -1) 162 | 163 | output = self.linear(exit_avg_pool_flat) 164 | 165 | return output 166 | 167 | 168 | 169 | # GPU 설정 170 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 171 | # GPU 설정 확인 172 | print(torch.cuda.is_available()) 173 | 174 | # 하이퍼 파라미터 설정 175 | num_epochs = 10 176 | num_classes = 7 177 | batch_size = 128 178 | learning_rate = 0.001 179 | 180 | trans1 = transforms.Compose([transforms.ToTensor(), 181 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) 182 | ]) 183 | 184 | train_dataset_path = "dataset/Face_expression_recognition_dataset/images/train" 185 | train_dataset = torchvision.datasets.ImageFolder(root=train_dataset_path, transform=trans1) 186 | train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0) 187 | 188 | 189 | 190 | # model 객체 생성 191 | model = Xception(3, 7) 192 | print("Network 생성") 193 | print(model) 194 | model.cuda() 195 | criterion = nn.CrossEntropyLoss() 196 | optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) 197 | 198 | total_step = len(train_loader) 199 | for epoch in range(0, num_epochs): 200 | for i, (images, labels) in enumerate(train_loader): 201 | images = images.to(device) 202 | labels = labels.to(device) 203 | 204 | outputs = model(images) 205 | loss = criterion(outputs, labels) 206 | 207 | optimizer.zero_grad() 208 | loss.backword() 209 | optimizer.step() 210 | 211 | if (i%10) == 0: 212 | print(f"Epoch [{i}/{num_epochs}], Step [{i+1}/{total_step}, Loss : {loss.item}") 213 | 214 | print("학습 끝. 모델 저장.") 215 | torch.save(model, "FaceEmotionModel.pt") -------------------------------------------------------------------------------- /src/front-end/src/pages/test/VideoPlay.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import axios from 'axios'; 3 | import '../App.css'; 4 | import ReactPlayer from 'react-player'; 5 | import { Link, BrowserRouter as Router, Route } from 'react-router-dom'; 6 | import Webcam from 'react-webcam'; 7 | import { 8 | Radar, 9 | RadarChart, 10 | PolarGrid, 11 | PolarAngleAxis, 12 | PolarRadiusAxis, 13 | Legend, 14 | } from 'recharts'; 15 | import AppBar from '@material-ui/core/AppBar'; 16 | import Toolbar from '@material-ui/core/Toolbar'; 17 | import Typography from '@material-ui/core/Typography'; 18 | import IconButton from '@material-ui/core/IconButton'; 19 | import MenuIcon from '@material-ui/icons/Menu'; 20 | import Breadcrumbs from '@material-ui/core/Breadcrumbs'; 21 | import UserContext from '../UserContext'; 22 | import { updateArrayBindingPattern } from 'typescript'; 23 | 24 | class VideoPlay extends Component { 25 | constructor(props) { 26 | super(props); 27 | this.state = { 28 | realtimeUserFace: null, 29 | realtimeStart: 0, 30 | video: {}, 31 | signalData: [ 32 | { 33 | emotionTag: 'happy', 34 | A: 1.0, 35 | fullMark: 1.0, 36 | }, 37 | { 38 | emotionTag: 'sad', 39 | A: 0.0, 40 | fullMark: 1.0, 41 | }, 42 | { 43 | emotionTag: 'disgust', 44 | A: 0.0, 45 | fullMark: 1.0, 46 | }, 47 | { 48 | emotionTag: 'contempt', 49 | A: 0.0, 50 | fullMark: 1.0, 51 | }, 52 | { 53 | emotionTag: 'surprise', 54 | A: 0.0, 55 | fullMark: 1.0, 56 | }, 57 | { 58 | emotionTag: 'fear', 59 | A: 0.0, 60 | fullMark: 1.0, 61 | }, 62 | { 63 | emotionTag: 'neutral', 64 | A: 0.0, 65 | fullMark: 1.0, 66 | }, 67 | ], 68 | user: { 69 | id: 0, 70 | name: '', 71 | loggedIn: false, 72 | }, 73 | emotionTag: null, 74 | imageIndex: 1, 75 | }; 76 | } 77 | 78 | redirectToLogin() { 79 | return this.props.history.push(`/Login`); 80 | } 81 | 82 | static contextType = UserContext; 83 | 84 | componentWillMount() { 85 | try { 86 | const selectedEmotionTag = this.props.location.state.emotionTag; 87 | console.log(selectedEmotionTag); 88 | this.setState({ 89 | emotionTag: selectedEmotionTag, 90 | }); 91 | const { user } = this.context; 92 | this.setState({ 93 | user: this.context.user, 94 | }); 95 | console.log('user is', user); 96 | if (user) { 97 | console.log('user id', user.id); 98 | console.log('selectedEmotion', selectedEmotionTag); 99 | this.getVideo(user.id, selectedEmotionTag); 100 | this.setState({ realtimeStart: this.state.realtimeStart + 1 }); 101 | } else { 102 | this.redirectToLogin(); 103 | } 104 | } catch (error) { 105 | console.log(error); 106 | this.props.history.push('/Option'); 107 | } 108 | } 109 | componentWillUnmount() { 110 | this.getUserImg = null; 111 | // this.props.isLast = true; 112 | } 113 | componentDidMount() {} 114 | 115 | setRef = (webcam) => { 116 | this.webcam = webcam; 117 | }; 118 | 119 | getVideo = async (id, emotionTag) => { 120 | console.log(id, emotionTag); 121 | try { 122 | const res = await axios.get(`api/v1/user/${id}/analyze/${emotionTag}/`); 123 | console.log(res.data); 124 | const videoData = res.data; 125 | this.setState({ video: videoData }); 126 | console.log('video is', this.state.video); 127 | } catch (error) { 128 | console.log(error.response.message); 129 | } 130 | }; 131 | 132 | getUserImg = () => { 133 | const captureImg = setInterval(() => { 134 | var base64Str = this.webcam.getScreenshot(); 135 | var file = dataURLtoFile( 136 | base64Str, 137 | `${this.state.user.id}-${this.state.video.id}-${( 138 | '000' + this.state.imageIndex 139 | ).slice(-3)}.jpg`, 140 | ); 141 | console.log('getUserImg 실행중'); 142 | this.setState({ 143 | realtimeUserFace: file, 144 | imageIndex: this.state.imageIndex + 1, 145 | realtimeStart: this.state.realtimeStart + 1, 146 | }); 147 | this.realtimeUserFace(file); 148 | }, 2000); 149 | 150 | const dataURLtoFile = (dataurl, filename) => { 151 | var arr = dataurl.split(','), 152 | mime = arr[0].match(/:(.*?);/)[1], 153 | bstr = atob(arr[1]), 154 | n = bstr.length, 155 | u8arr = new Uint8Array(n); 156 | 157 | while (n--) { 158 | u8arr[n] = bstr.charCodeAt(n); 159 | } 160 | 161 | return new File([u8arr], filename, { type: mime }); 162 | }; 163 | }; 164 | 165 | realtimeUserFace = (file) => { 166 | try { 167 | let realtimeData = new FormData(); 168 | realtimeData.append('image', file); 169 | realtimeData.append('imgPath', this.state.video.imgPath); 170 | console.log('realtimeUserFace image file', file); 171 | console.log(this.state.video.imgPath); 172 | // console.log('testing....', this.state.realtimeUserFace); 173 | return ( 174 | axios 175 | // .get(`api/v1/user/${id}/analyze/real-time-result/`, image, { 176 | .post( 177 | `api/v1/user/${this.state.user.id}/analyze/real-time-result/`, 178 | realtimeData, 179 | { 180 | headers: { 181 | 'content-type': 'multipart/form-data', 182 | }, 183 | }, 184 | ) 185 | .then((response) => { 186 | let values = response.emotionValues; 187 | console.log(response); 188 | // console.log(response.data); 189 | let newSignalData = this.state.signalData; 190 | console.log(newSignalData); 191 | const emotionList = [ 192 | 'happy', 193 | 'sad', 194 | 'disgust', 195 | 'contempt', 196 | 'surprise', 197 | 'fear', 198 | 'neutral', 199 | ]; 200 | for (let emotionIdx = 0; emotionIdx < 7; emotionIdx++) { 201 | newSignalData[emotionIdx].A = 202 | response.data.emotionValues[emotionList[emotionIdx]]; 203 | } 204 | }) 205 | ); 206 | } catch (error) { 207 | console.log(error); 208 | } 209 | }; 210 | 211 | getEmotions = async (id, emotionTag) => { 212 | const response = await axios 213 | .get(`api/v1/user/${id}/analyze/${emotionTag}/result/`) 214 | .then((response) => console.log(response)) 215 | .catch((error) => console.log(error)); 216 | this.append(response); 217 | }; 218 | 219 | render() { 220 | if (this.state.realtimeStart == 1) { 221 | this.getUserImg(); 222 | } 223 | 224 | return ( 225 |
226 |
227 | 228 | 229 | 230 | 231 | 232 | 233 | RealTime Emotion 234 | 235 | 236 | 237 | 238 | Home 239 | 240 | 241 | Logout 242 | 243 | 244 | 245 | 246 |
247 | 254 | 255 | 264 | 265 | 271 | 272 | 273 | 274 | 281 | 282 |
283 | ); 284 | } 285 | } 286 | 287 | export default VideoPlay; 288 | -------------------------------------------------------------------------------- /src/front-end/src/pages/VideoPlay.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import axios from 'axios'; 3 | import '../App.css'; 4 | import ReactPlayer from 'react-player'; 5 | import { withRouter } from 'react-router-dom'; 6 | import Webcam from 'react-webcam'; 7 | import { 8 | ComposedChart, 9 | XAxis, 10 | YAxis, 11 | Tooltip, 12 | CartesianGrid, 13 | Area, 14 | Bar, 15 | Legend, 16 | } from 'recharts'; 17 | 18 | import UserContext from '../UserContext'; 19 | 20 | import NavBar from '../components/NavBar'; 21 | import railed from '../railed.png'; 22 | import { Typography, Grid } from '@material-ui/core'; 23 | 24 | class VideoPlay extends Component { 25 | constructor(props) { 26 | super(props); 27 | this.state = { 28 | realtimeUserFace: null, 29 | realtimeStart: 0, 30 | video: {}, 31 | signalData: [ 32 | { 33 | emotionTag: 'happiness', 34 | multi: 0.0, 35 | face: 0.0, 36 | eeg: 0.0, 37 | }, 38 | { 39 | emotionTag: 'sadness', 40 | multi: 0.0, 41 | face: 0.0, 42 | eeg: 0.0, 43 | }, 44 | { 45 | emotionTag: 'disgust', 46 | multi: 0.0, 47 | face: 0.0, 48 | eeg: 0.0, 49 | }, 50 | { 51 | emotionTag: 'fear', 52 | multi: 0.0, 53 | face: 0.0, 54 | eeg: 0.0, 55 | }, 56 | { 57 | emotionTag: 'neutral', 58 | multi: 0.0, 59 | face: 0.0, 60 | eeg: 0.0, 61 | }, 62 | ], 63 | user: { 64 | id: 0, 65 | name: '', 66 | loggedIn: false, 67 | }, 68 | emotionTag: null, 69 | imageIndex: 1, 70 | fullConnected: false, 71 | badConnection: { 72 | eeg1: 1, 73 | eeg2: 1, 74 | eeg3: 1, 75 | eeg4: 1, 76 | eeg5: 1, 77 | eeg6: 1, 78 | eeg7: 1, 79 | eeg8: 1, 80 | }, 81 | }; 82 | } 83 | 84 | redirectToLogin() { 85 | return this.props.history.push(`/Login`); 86 | } 87 | 88 | static contextType = UserContext; 89 | 90 | componentWillMount() { 91 | try { 92 | const selectedEmotionTag = this.props.location.state.emotionTag; 93 | console.log(selectedEmotionTag); 94 | this.setState({ 95 | emotionTag: selectedEmotionTag, 96 | }); 97 | const { user } = this.context; 98 | this.setState({ 99 | user: this.context.user, 100 | }); 101 | console.log('user is', user); 102 | if (user) { 103 | console.log('user id', user.id); 104 | console.log('selectedEmotion', selectedEmotionTag); 105 | this.getVideo(user.id, selectedEmotionTag); 106 | this.setState({ realtimeStart: this.state.realtimeStart + 1 }); 107 | } else { 108 | this.redirectToLogin(); 109 | } 110 | } catch (error) { 111 | console.log(error); 112 | this.props.history.push('/Analyze'); 113 | } 114 | } 115 | componentDidMount() {} 116 | 117 | setRef = (webcam) => { 118 | this.webcam = webcam; 119 | }; 120 | 121 | componentWillUnmount() { 122 | clearInterval(this.captureImg); 123 | } 124 | 125 | getVideo = async (id, emotionTag) => { 126 | console.log(id, emotionTag); 127 | try { 128 | const res = await axios.get(`api/v1/user/${id}/analyze/${emotionTag}/`); 129 | console.log(res.data); 130 | const videoData = res.data; 131 | this.setState({ video: videoData }); 132 | console.log('video is', this.state.video); 133 | } catch (error) { 134 | console.log(error.response.message); 135 | } 136 | }; 137 | 138 | getUserImg = () => { 139 | var cnt = 0; 140 | this.captureImg = setInterval(() => { 141 | try { 142 | var base64Str = this.webcam.getScreenshot(); 143 | 144 | var file = dataURLtoFile( 145 | base64Str, 146 | `${this.state.user.id}-${this.state.video.id}-${( 147 | '000' + this.state.imageIndex 148 | ).slice(-3)}.jpg`, 149 | ); 150 | console.log('getUserImg 실행중'); 151 | this.setState({ 152 | realtimeUserFace: file, 153 | imageIndex: this.state.imageIndex + 1, 154 | realtimeStart: this.state.realtimeStart + 1, 155 | }); 156 | this.realtimeUserFace(file); 157 | cnt++; 158 | console.log('cnt is', cnt); 159 | if (cnt == 77) clearInterval(this.captureImg); 160 | if (cnt == 80) { 161 | console.log('종료합니다.', cnt); 162 | return this.props.history.push(`/Result`); 163 | } 164 | } catch { 165 | if ( 166 | this.props.location == '/Result' || 167 | this.props.location == 'Analyze' 168 | ) { 169 | this.props.history.push(this.props.location); 170 | } 171 | } 172 | }, 1000); 173 | 174 | const dataURLtoFile = (dataurl, filename) => { 175 | var arr = dataurl.split(','), 176 | mime = arr[0].match(/:(.*?);/)[1], 177 | bstr = atob(arr[1]), 178 | n = bstr.length, 179 | u8arr = new Uint8Array(n); 180 | 181 | while (n--) { 182 | u8arr[n] = bstr.charCodeAt(n); 183 | } 184 | 185 | return new File([u8arr], filename, { type: mime }); 186 | }; 187 | }; 188 | 189 | realtimeUserFace = (file) => { 190 | try { 191 | let realtimeData = new FormData(); 192 | realtimeData.append('image', file); 193 | realtimeData.append('dateDirPath', this.state.video.dateDirPath); 194 | realtimeData.append('videoTag', this.state.video.tag); 195 | console.log('realtimeUserFace image file', file); 196 | console.log(this.state.video.dateDirPath); 197 | console.log(this.state.badConnection); 198 | return axios 199 | .post(`api/v1/user/analyze/real-time-result/`, realtimeData, { 200 | headers: { 201 | 'content-type': 'multipart/form-data', 202 | }, 203 | }) 204 | .then((response) => { 205 | if (response.data.emotionValues) { 206 | let values = response.emotionValues; 207 | console.log(response); 208 | 209 | let newSignalData = []; 210 | console.log(newSignalData); 211 | const emotionList = [ 212 | 'happiness', 213 | 'sadness', 214 | 'disgust', 215 | 'fear', 216 | 'neutral', 217 | ]; 218 | for (let emotionIdx = 0; emotionIdx < 5; emotionIdx++) { 219 | newSignalData.push({ 220 | emotionTag: emotionList[emotionIdx], 221 | multi: response.data.emotionValues[emotionList[emotionIdx]], 222 | face: response.data.faceValues[emotionList[emotionIdx]], 223 | eeg: response.data.eegValues[emotionList[emotionIdx]], 224 | }); 225 | } 226 | let _badConnection = response.data.eegConnections; 227 | let eegCheck = true; 228 | for (let eegIdx = 1; eegIdx < 8; eegIdx++) { 229 | if (response.data.eegConnections['eeg' + eegIdx] == 1) { 230 | eegCheck = false; 231 | } 232 | } 233 | console.log(this.state.badConnection); 234 | this.setState({ 235 | signalData: newSignalData, 236 | badConnection: _badConnection, 237 | fullConnected: eegCheck, 238 | }); 239 | } 240 | }); 241 | } catch (error) { 242 | console.log(error); 243 | } 244 | }; 245 | 246 | render() { 247 | if (this.state.realtimeStart == 1) { 248 | this.getUserImg(); 249 | } 250 | console.log(this.state.signalData); 251 | let connection = this.state.badConnection; 252 | const varFromState = this.state.signalChange; 253 | return ( 254 |
255 | 256 | 257 | 258 | 259 | 266 | 267 | 268 | 269 | 278 | 279 | 280 | 281 | 282 | 283 | {this.state.fullConnected ? ( 284 | 285 | 286 | All sensors are connected! 287 | 288 | 289 | ) : ( 290 | 291 | 292 | BadConnection Railed : 293 | 294 | 295 | {connection.eeg1 ? '1 ' : ''} 296 | {connection.eeg2 ? '2 ' : ''} 297 | {connection.eeg3 ? '3 ' : ''} 298 | {connection.eeg4 ? '4 ' : ''} 299 | {connection.eeg5 ? '5 ' : ''} 300 | {connection.eeg6 ? '6 ' : ''} 301 | {connection.eeg7 ? '7 ' : ''} 302 | {connection.eeg8 ? '8 ' : ''} 303 | 304 | 305 | )} 306 | 307 | 308 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 327 | 334 | 335 | 336 | 337 | 338 |
339 | ); 340 | } 341 | } 342 | 343 | export default withRouter(VideoPlay); 344 | -------------------------------------------------------------------------------- /src/analyze/eeg/.ipynb_checkpoints/Train EEG based Emotion Classifier-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import torch\n", 10 | "import numpy as np\n", 11 | "import matplotlib.pyplot as plt\n", 12 | "\n", 13 | "from CustomDatasetClass import * # EEG_Dataset, get_train_test_set()\n", 14 | "from Models import * # CNN \n", 15 | "from TrainTestModule import * # train_model(), test_model()" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 2, 21 | "metadata": {}, 22 | "outputs": [ 23 | { 24 | "data": { 25 | "text/plain": [ 26 | "True" 27 | ] 28 | }, 29 | "execution_count": 2, 30 | "metadata": {}, 31 | "output_type": "execute_result" 32 | } 33 | ], 34 | "source": [ 35 | "torch.cuda.is_available()" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": {}, 41 | "source": [ 42 | "### Setting\n", 43 | "* lin_len 지우고, 자동 계산으로 바꾸기..**" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 3, 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "# dataset setting ===================\n", 53 | "data_type = \"fftMap\" # \"fftMap\", \"fd\"\n", 54 | "dataset = \"SEED\" # \"DEAP\", \"SEED\"\n", 55 | "split = \"10sec_5over\" # window sliding\n", 56 | "n_channel = 1 # input depth\n", 57 | "n_electrodes = 32 # (num of eeg sensor's electrodes)\n", 58 | "numOfClass = 3 # num of label\n", 59 | "# ==================================\n", 60 | "\n", 61 | "# model parameter ==================\n", 62 | "model_type =\"cla\" # \"reg\"\n", 63 | "lin_len = 884 if n_electrodes == 32 else 592 # vector's length after flattening # => 아 이건 계산하는걸로 바꾸자;\n", 64 | "out_len = 1 if model_type == \"reg\" else numOfClass\n", 65 | "# ===================================\n", 66 | "\n", 67 | "bs = 64 # batch size\n", 68 | "\n", 69 | "# not used for SEED dataset\n", 70 | "target = 0 # (V=0, A=1, D=2, L=3)" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "### Load dataset" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 4, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "import pickle\n", 87 | "\n", 88 | "data_path = \"InputData/\" + dataset + \"_\" + data_type + \"_\" + split + \".pickle\"\n", 89 | "meta_path = \"InputData/SEED_10sec_5over\"\n", 90 | "\n", 91 | "with open(data_path, 'rb') as f:\n", 92 | " eeg_dataset = pickle.load(f)\n", 93 | "\n", 94 | "if dataset == \"SEED\":\n", 95 | " with open(meta_path + \"_ptc_to_idx\", 'rb') as f:\n", 96 | " ptc_to_idx = pickle.load(f)\n", 97 | " with open(meta_path + \"_idx_to_ptc\", 'rb') as f:\n", 98 | " idx_to_ptc = pickle.load(f)" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 5, 104 | "metadata": {}, 105 | "outputs": [ 106 | { 107 | "name": "stdout", 108 | "output_type": "stream", 109 | "text": [ 110 | "전체 데이터 개수 = 9675\n", 111 | "데이터 하나의 shape = (32, 41)\n", 112 | "레이블 example = 1\n" 113 | ] 114 | } 115 | ], 116 | "source": [ 117 | "print(\"전체 데이터 개수 = \", len(eeg_dataset))\n", 118 | "print(\"데이터 하나의 shape = \", eeg_dataset[0][0].shape)\n", 119 | "print(\"레이블 example = \", eeg_dataset[0][1])" 120 | ] 121 | }, 122 | { 123 | "cell_type": "markdown", 124 | "metadata": {}, 125 | "source": [ 126 | "#### Label processing\n", 127 | "\n", 128 | "* if value > 5 then class = 1(High)\n", 129 | "* if value <= 5 then class = 0(Low)" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": 6, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "if model_type == \"cla\": \n", 139 | " for i in range(len(eeg_dataset)):\n", 140 | " if dataset == \"DEAP\":\n", 141 | " V, A, D, L = eeg_dataset[i][1]\n", 142 | " V = process_label(V, numOfClass); D = process_label(D, numOfClass); \n", 143 | " A = process_label(A, numOfClass); L = process_label(L, numOfClass);\n", 144 | " eeg_dataset[i][1] = [V,A,D,L]\n", 145 | " elif dataset == \"SEED\": # already integer value\n", 146 | " eeg_dataset[i][1] += 1" 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "metadata": {}, 152 | "source": [ 153 | "#### Traverse method\n", 154 | "* DEAP도 ptc_to_idx, idx_to_ptc 파일을 만들자,,코드 개더럽;; **" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 7, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "if dataset == \"DEAP\":\n", 164 | " noc = len(eeg_dataset)//1280 # num of chunks\n", 165 | " print(noc)\n", 166 | "\n", 167 | " def get_data_index(p_id, t_id, c_id=0, noc=1):\n", 168 | " return (noc * 40) * p_id + noc * t_id + c_id\n", 169 | "\n", 170 | " def get_base_index(p_id, t_id):\n", 171 | " return 40 * p_id + t_id\n", 172 | "\n", 173 | " # get p_id, t_id, c_id\n", 174 | " def from_data_index(index, noc=1):\n", 175 | " p_id,remain = index // (noc*40), index % (noc*40)\n", 176 | " return p_id, remain//noc, remain%noc\n", 177 | "\n", 178 | "elif dataset == \"SEED\":\n", 179 | " def get_data_index(p_id, t_id, c_id=0, noc=0): # noc is not used\n", 180 | " return ptc_to_idx[str(p_id+1) + \"-\" + str(t_id) + \"-\" + str(c_id)]\n", 181 | "\n", 182 | " def get_base_index(p_id, t_id):\n", 183 | " return 15 * p_id + t_id\n", 184 | "\n", 185 | " # get p_id, t_id, c_id\n", 186 | " def from_data_index(index, noc=0): # noc is not used\n", 187 | " return idx_to_ptc[index]" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 8, 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "# Basic train-test split\n", 197 | "from sklearn.model_selection import train_test_split" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 9, 203 | "metadata": {}, 204 | "outputs": [], 205 | "source": [ 206 | "def LOOCV_split(test_subject, eeg_dataset, target=0, noc=0):\n", 207 | " # test_subject = Leave this participant out\n", 208 | " train = []; test = []\n", 209 | " \n", 210 | " for idx in range(len(eeg_dataset)): \n", 211 | " p_id, t_id, c_id = from_data_index(idx, noc) # *\n", 212 | " \n", 213 | " if p_id == test_subject:\n", 214 | " test.append(eeg_dataset[idx])\n", 215 | " else:\n", 216 | " train.append(eeg_dataset[idx])\n", 217 | " return train, test" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 10, 223 | "metadata": { 224 | "collapsed": true 225 | }, 226 | "outputs": [ 227 | { 228 | "name": "stdout", 229 | "output_type": "stream", 230 | "text": [ 231 | "\n", 232 | "[ Epoch 1 ] T_Loss = 0.017046 \n", 233 | " t/p Low Neutral High \n", 234 | " Low 320.0 0.0 310.0 \n", 235 | " Neutral 323.0 0.0 288.0 \n", 236 | " High 53.0 0.0 641.0 \n", 237 | "V_loss = 0.017119 Acc = 49.6641 \n", 238 | "\n", 239 | "[ Epoch 2 ] T_Loss = 0.016426 \n", 240 | " t/p Low Neutral High \n", 241 | " Low 350.0 19.0 261.0 \n", 242 | " Neutral 329.0 96.0 186.0 \n", 243 | " High 80.0 13.0 601.0 \n", 244 | "V_loss = 0.016384 Acc = 54.1085 \n", 245 | "\n", 246 | "[ Epoch 3 ] T_Loss = 0.015800 \n", 247 | " t/p Low Neutral High \n", 248 | " Low 155.0 257.0 218.0 \n", 249 | " Neutral 68.0 415.0 128.0 \n", 250 | " High 27.0 104.0 563.0 \n", 251 | "V_loss = 0.015812 Acc = 58.5530 \n", 252 | "\n", 253 | "[ Epoch 4 ] T_Loss = 0.015314 \n", 254 | " t/p Low Neutral High \n", 255 | " Low 233.0 193.0 204.0 \n", 256 | " Neutral 104.0 379.0 128.0 \n", 257 | " High 43.0 76.0 575.0 \n", 258 | "V_loss = 0.015377 Acc = 61.3437 \n", 259 | "\n", 260 | "[ Epoch 5 ] T_Loss = 0.014904 \n", 261 | " t/p Low Neutral High \n", 262 | " Low 283.0 158.0 189.0 \n", 263 | " Neutral 135.0 365.0 111.0 \n", 264 | " High 58.0 58.0 578.0 \n", 265 | "V_loss = 0.015025 Acc = 63.3592 \n", 266 | "Finished Training\n" 267 | ] 268 | } 269 | ], 270 | "source": [ 271 | "import torch\n", 272 | "# 1. get neural network\n", 273 | "net = CNN(n_channel, lin_len, out_len, n_electrodes, model_type)\n", 274 | "\n", 275 | "# 2. train : test split\n", 276 | "# train, test = LOOCV_split(test_subject, eeg_dataset) # LOOCV\n", 277 | "train, test = train_test_split(eeg_dataset, test_size=0.2) # 8 : 2\n", 278 | "\n", 279 | "# 3. get dataset instance\n", 280 | "trainset, trainloader, testset, testloader = get_train_test_set(train, test, bs, dataset)\n", 281 | "\n", 282 | "# 4. train a network\n", 283 | "net, t_loss_list, v_loss_list, f1_list, acc_list = train_model(net, model_type, trainloader, testloader, numOfClass, epoch=5, detail=True)" 284 | ] 285 | }, 286 | { 287 | "cell_type": "markdown", 288 | "metadata": {}, 289 | "source": [ 290 | "### Leave-one-out-cross-validation over all participants" 291 | ] 292 | }, 293 | { 294 | "cell_type": "code", 295 | "execution_count": 12, 296 | "metadata": {}, 297 | "outputs": [], 298 | "source": [ 299 | "numOfParticipants = 32 if dataset == \"DEAP\" else 15\n", 300 | "\n", 301 | "for test_subject in range(0, numOfParticipants):\n", 302 | " print(test_subject, \"에 대한 LOOCV 학습\")\n", 303 | " net = CNN(n_channel, lin_len, out_len, n_electrodes, model_type)\n", 304 | " \n", 305 | " train, test = LOOCV_split(test_subject, eeg_dataset)\n", 306 | " trainset, trainloader, testset, testloader = get_train_test_set(train, test, bs, dataset)\n", 307 | " net, t_loss_list, v_loss_list, f1_list, acc_list = train_model(net, model_type, trainloader, testloader, numOfClass, epoch=5, detail=True)" 308 | ] 309 | } 310 | ], 311 | "metadata": { 312 | "kernelspec": { 313 | "display_name": "Python 3", 314 | "language": "python", 315 | "name": "python3" 316 | }, 317 | "language_info": { 318 | "codemirror_mode": { 319 | "name": "ipython", 320 | "version": 3 321 | }, 322 | "file_extension": ".py", 323 | "mimetype": "text/x-python", 324 | "name": "python", 325 | "nbconvert_exporter": "python", 326 | "pygments_lexer": "ipython3", 327 | "version": "3.6.5" 328 | } 329 | }, 330 | "nbformat": 4, 331 | "nbformat_minor": 4 332 | } 333 | -------------------------------------------------------------------------------- /src/front-end/src/pages/Result.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react'; 2 | import axios from 'axios'; 3 | import '../App.css'; 4 | import { withRouter } from 'react-router-dom'; 5 | import { Grid, Typography } from '@material-ui/core'; 6 | import { PieChart, Pie, Sector, Cell } from 'recharts'; 7 | import UserContext from '../UserContext'; 8 | import NavBar from '../components/NavBar'; 9 | 10 | const COLORS = ['#ffcc5c', '#87bdd8', '#96ceb4', '#6b5b95', '#e6e2d3']; 11 | 12 | const renderActiveShape = (props) => { 13 | const RADIAN = Math.PI / 180; 14 | const { 15 | cx, 16 | cy, 17 | midAngle, 18 | innerRadius, 19 | outerRadius, 20 | startAngle, 21 | endAngle, 22 | fill, 23 | percent, 24 | } = props; 25 | const sin = Math.sin(-RADIAN * midAngle); 26 | const cos = Math.cos(-RADIAN * midAngle); 27 | const sx = cx + (outerRadius + 10) * cos; 28 | const sy = cy + (outerRadius + 10) * sin; 29 | const mx = cx + (outerRadius + 30) * cos; 30 | const my = cy + (outerRadius + 30) * sin; 31 | const ex = mx + (cos >= 0 ? 1 : -1) * 22; 32 | const ey = my; 33 | const textAnchor = cos >= 0 ? 'start' : 'end'; 34 | return ( 35 | 36 | 45 | 54 | 59 | 60 | 61 | = 0 ? 1 : -1) * 12} 63 | y={ey} 64 | dy={18} 65 | textAnchor={textAnchor} 66 | fill="#999" 67 | > 68 | {`${(percent * 100).toFixed(0)}%`} 69 | 70 | 71 | ); 72 | }; 73 | 74 | const renderActiveShapeM = (props) => { 75 | const RADIAN = Math.PI / 180; 76 | const { 77 | cx, 78 | cy, 79 | midAngle, 80 | innerRadius, 81 | outerRadius, 82 | startAngle, 83 | endAngle, 84 | fill, 85 | payload, 86 | percent, 87 | multi, 88 | eeg, 89 | face, 90 | data, 91 | } = props; 92 | const sin = Math.sin(-RADIAN * midAngle); 93 | const cos = Math.cos(-RADIAN * midAngle); 94 | const sx = cx + (outerRadius + 10) * cos; 95 | const sy = cy + (outerRadius + 10) * sin; 96 | const mx = cx + (outerRadius + 30) * cos; 97 | const my = cy + (outerRadius + 30) * sin; 98 | const ex = mx + (cos >= 0 ? 1 : -1) * 22; 99 | const ey = my; 100 | const textAnchor = cos >= 0 ? 'start' : 'end'; 101 | return ( 102 | 103 | 111 | {payload.emotionTag} 112 | 113 | 114 | 123 | 132 | 137 | 138 | 139 | = 0 ? 1 : -1) * 12} 141 | y={ey} 142 | dy={18} 143 | textAnchor={textAnchor} 144 | fill="#999" 145 | > 146 | {`${(percent * 100).toFixed(0)}%`} 147 | 148 | 149 | ); 150 | }; 151 | 152 | class Result extends Component { 153 | constructor(props) { 154 | super(props); 155 | this.state = { 156 | signalData: [ 157 | { 158 | emotionTag: 'happiness', 159 | multi: 0.0, 160 | face: 0.0, 161 | eeg: 0.0, 162 | fullMark: 1.0, 163 | }, 164 | { 165 | emotionTag: 'sadness', 166 | multi: 0.0, 167 | face: 0.0, 168 | eeg: 0.0, 169 | fullMark: 1.0, 170 | }, 171 | { 172 | emotionTag: 'disgust', 173 | multi: 0.0, 174 | face: 0.0, 175 | eeg: 0.0, 176 | fullMark: 1.0, 177 | }, 178 | { 179 | emotionTag: 'fear', 180 | multi: 0.0, 181 | face: 0.0, 182 | eeg: 0.0, 183 | fullMark: 1.0, 184 | }, 185 | { 186 | emotionTag: 'neutral', 187 | multi: 0.0, 188 | face: 0.0, 189 | eeg: 0.0, 190 | fullMark: 1.0, 191 | }, 192 | ], 193 | 194 | user: { 195 | id: 0, 196 | name: '', 197 | loggedIn: false, 198 | }, 199 | emotionTag: null, 200 | imageIndex: 1, 201 | activeIndex: 0, 202 | }; 203 | } 204 | 205 | static contextType = UserContext; 206 | onPieEnter = (data, index) => { 207 | this.setState({ 208 | activeIndex: index, 209 | }); 210 | }; 211 | 212 | getFinalData = () => { 213 | axios 214 | .get(`api/v1/analyze/final-result/`, { 215 | // headers: { 216 | // 'content-type': 'multipart/form-data', 217 | // }, 218 | }) 219 | .then((response) => { 220 | if (response.data.multiResult) { 221 | let newSignalData = []; 222 | const emotionList = [ 223 | 'happiness', 224 | 'sadness', 225 | 'disgust', 226 | 'fear', 227 | 'neutral', 228 | ]; 229 | for (let emotionIdx = 0; emotionIdx < 5; emotionIdx++) { 230 | newSignalData.push({ 231 | emotionTag: emotionList[emotionIdx], 232 | multi: response.data.multiResult[emotionList[emotionIdx]], 233 | face: response.data.faceResult[emotionList[emotionIdx]], 234 | eeg: response.data.eegResult[emotionList[emotionIdx]], 235 | fullMark: 1.0, 236 | }); 237 | } 238 | this.setState({ 239 | signalData: newSignalData, 240 | hasAnalyzeData: true, 241 | }); 242 | } 243 | }); 244 | }; 245 | 246 | render() { 247 | const { user } = this.context; 248 | return ( 249 |
250 | {!this.state.hasAnalyzeData ? ( 251 | this.getFinalData() 252 | ) : ( 253 |
254 | 255 | 256 | 257 | 258 | {user.name}님의 최종감정입니다. 259 | 260 | 261 | 262 | 263 | 264 | 277 | {this.state.signalData.map((entry, index) => ( 278 | 282 | ))} 283 | 284 | 285 | 286 | 287 | 294 | 307 | {this.state.signalData.map((entry, index) => ( 308 | 312 | ))} 313 | 314 | 315 | 316 | 317 | 318 | 330 | {this.state.signalData.map((entry, index) => ( 331 | 335 | ))} 336 | 337 | 338 | 339 | 340 | 341 | 342 | {' '} 343 | 344 | Face 345 | 346 | 347 | 348 | {' '} 349 | 350 | Face&EEG 351 | 352 | 353 | 354 | {' '} 355 | 356 | EEG 357 | 358 | 359 | 360 | 361 |
362 | )} 363 |
364 | ); 365 | } 366 | } 367 | 368 | export default withRouter(Result); 369 | -------------------------------------------------------------------------------- /src/back-end/FBI/api/views.py: -------------------------------------------------------------------------------- 1 | from .models import * 2 | from django.db.models import Count 3 | from django.utils import timezone 4 | from django.urls import reverse 5 | from django.http import HttpResponse, JsonResponse, HttpResponseRedirect 6 | from rest_framework import status 7 | from rest_framework.decorators import api_view 8 | from rest_framework.response import Response 9 | from rest_framework.views import APIView 10 | from . import serializers 11 | from .customLogin import * 12 | import random, os, pickle, sys, shutil 13 | from PIL import Image 14 | 15 | sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname( 16 | os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))))))))) 17 | # from src.analyze.face.predict_face_emotion_faceapi import predict_emotion 18 | from src.analyzeModule import detectEmotion 19 | 20 | ROOT_DIR = os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname( 21 | os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))))) 22 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 23 | # Directory path for saving real-time data. 24 | dirPath = os.path.join(ROOT_DIR, 'FBI-data') 25 | dataDirPath = '' 26 | dateDirPath = '' 27 | # Path for saving userFace images. 28 | path = os.path.join(BASE_DIR, 'media') 29 | # Temporarily save encoded image of new user for signup. 30 | encodedImage = [] 31 | # Dict for saving accumulated real time data. 32 | resultsDic = {} 33 | 34 | @api_view(['POST']) 35 | def signup(request): 36 | serializer = serializers.UserSerializer(data=request.data) 37 | if serializer.is_valid(): 38 | # Save new user to db. 39 | newUser = User.objects.create_user(username=serializer.data['username'], 40 | userFace='default') 41 | newUser.save() 42 | # Update userFace file name. 43 | newUser.userFace = request.FILES['userFace'] 44 | newUser.save() 45 | payload = { 46 | 'id': newUser.id, 47 | 'username': newUser.username, 48 | } 49 | # Save encoded image of user. 50 | current_dir = os.getcwd() 51 | userInfo = [(newUser.id, newUser.username), encodedImage[0]] 52 | del encodedImage[0] 53 | if 'encoded_users' not in os.listdir(current_dir): 54 | with open('encoded_users', "wb") as fw: 55 | pickle.dump(userInfo, fw) 56 | fw.close() 57 | else: 58 | with open('encoded_users', "ab") as fi: 59 | pickle.dump(userInfo, fi) 60 | fi.close() 61 | request.session['id'] = newUser.id 62 | # Create data directory for saving real-time data. 63 | # Path : capstone-2020-2/FBI-data 64 | if not os.path.isdir(dirPath): 65 | os.mkdir(dirPath) 66 | # Create subdirectory for user. 67 | global dataDirPath 68 | dataDirPath = os.path.join(dirPath, newUser.userFace.name.split("/")[1].split(".")[0]) 69 | if not os.path.isdir(dataDirPath): 70 | os.mkdir(dataDirPath) 71 | return JsonResponse(payload) 72 | else: 73 | return Response(serializer.errors) 74 | 75 | @api_view(['POST']) 76 | def login(request): 77 | img = Image.open(request.FILES['userFace']) 78 | imgPath = os.path.join(BASE_DIR, 'temp.jpg') 79 | img.save(imgPath, 'JPEG') 80 | 81 | try: 82 | img = face_recognition.load_image_file(imgPath) 83 | login_face_encoding = face_recognition.face_encodings(img, num_jitters=10, model="large")[0] 84 | os.remove(imgPath) 85 | except IndexError: 86 | return HttpResponse("Please take another picture.", status=status.HTTP_409_CONFLICT) 87 | 88 | current_dir = os.getcwd() 89 | if 'encoded_users' not in os.listdir(current_dir): 90 | encodedImage.append(login_face_encoding) 91 | return HttpResponse("First user.", status=status.HTTP_406_NOT_ACCEPTABLE) 92 | 93 | encodeUsers = [] 94 | with open('encoded_users', 'rb') as fr: 95 | while True: 96 | try: 97 | encodeUsers.append(pickle.load(fr)) 98 | except EOFError: 99 | break 100 | 101 | user = isUser(login_face_encoding, encodeUsers) 102 | if user is None: 103 | encodedImage.append(login_face_encoding) 104 | return HttpResponse("Please sign up first", status=status.HTTP_404_NOT_FOUND) 105 | else: 106 | request.session['id'] = user[0] 107 | # Set user data directory path. 108 | global dataDirPath 109 | dataDirPath = os.path.join(dirPath, '{}_{}'.format(user[1], user[0])) 110 | payload = { 111 | 'id': user[0], 112 | 'username': user[1], 113 | } 114 | return JsonResponse(payload) 115 | 116 | @api_view(['POST']) 117 | def logout(request): 118 | try: 119 | request.session.flush() 120 | except KeyError: 121 | pass 122 | global dataDirPath 123 | dataDirPath = '' 124 | return HttpResponse("You're logged out.") 125 | 126 | class getAnalyzingVideo(APIView): 127 | def get(self, request, id, emotionTag): 128 | viewedVideos = request.session.get('viewedVideos', {}) 129 | if emotionTag not in viewedVideos: 130 | viewedVideos[emotionTag] = [] 131 | videoObjects = Video.objects.filter(tag=emotionTag) 132 | numOfVideos = videoObjects.aggregate(count=Count('videoId')).get('count') 133 | videos = videoObjects.values_list('videoId', flat=True) 134 | if numOfVideos == 0: 135 | return HttpResponse("No videos.") 136 | if len(viewedVideos[emotionTag]) == numOfVideos: 137 | return HttpResponse("You've seen every video.", status=status.HTTP_404_NOT_FOUND) 138 | while True: 139 | randId = random.sample(list(videos), 1)[0] 140 | if randId in viewedVideos[emotionTag]: 141 | continue 142 | video = Video.objects.filter(pk=randId).first() 143 | if video: 144 | viewedVideos[emotionTag].append(randId) 145 | request.session['viewedVideos'] = viewedVideos 146 | request.session.modified = True 147 | # Create subdirectory for played videos. 148 | videoInfo = '{}_{}'.format(video.title, video.videoId) 149 | global dataDirPath 150 | videoDirPath = os.path.join(dataDirPath, videoInfo) 151 | if not os.path.isdir(videoDirPath): 152 | os.makedirs(videoDirPath) 153 | # Create directories based on the datetime the video was played 154 | # since each video might be played multiple times. 155 | global dateDirPath 156 | now = timezone.localtime() 157 | dateDirPath = os.path.join(videoDirPath, now.strftime('%Y-%m-%d %H:%M:%S')) 158 | os.mkdir(dateDirPath) 159 | # Create directories separately for face, eeg data. 160 | os.mkdir(os.path.join(dateDirPath, 'face')) 161 | os.mkdir(os.path.join(dateDirPath, 'eeg')) 162 | # Save result 163 | result = Result.objects.create(user=User.objects.filter(pk=id).first(), 164 | video=Video.objects.filter(pk=randId).first(), 165 | viewedDate=now, 166 | dataPath=dateDirPath) 167 | request.session['resultId'] = result.resultId 168 | return JsonResponse({ 169 | 'user' : id, 170 | 'link' : video.link, 171 | 'id' : video.videoId, 172 | 'startTime' : video.startTime, 173 | 'duration' : video.duration, 174 | 'tag' : video.tag, 175 | 'dateDirPath': dateDirPath, 176 | }) 177 | else: 178 | continue 179 | def post(self, request, id, emotionTag): 180 | return HttpResponseRedirect(reverse('realTimeResult')) 181 | 182 | @api_view(['POST']) 183 | def realTimeAnalyze(request): 184 | img = Image.open(request.FILES['image']) 185 | # Set image path and eeg path. 186 | imgName = request.data['image'].name 187 | eegName = 'test_signal.txt' 188 | #imgPath = os.path.join(request.data['dateDirPath'], 'face', imgName) 189 | global dateDirPath 190 | imgPath = os.path.join(os.path.join(dateDirPath, 'face'), imgName) 191 | eegTempPath = os.path.join(dirPath, eegName) 192 | # Save image to corresponding dir path. 193 | img.save(imgPath, "JPEG") 194 | 195 | emotionTag = request.data['videoTag'] 196 | if(emotionTag =="happy"): 197 | emotionTag="happiness" 198 | elif(emotionTag =="sad"): 199 | emotionTag="sadness" 200 | highestEmotion, multiResult, faceResult, eegResult, sensorStatus = detectEmotion(imgPath, eegTempPath, emotionTag) 201 | # Accumulate results. 202 | global resultsDic 203 | emotions = ["happiness", "sadness", "disgust", "fear", "neutral"] 204 | for emotion in emotions: 205 | if emotion not in resultsDic: 206 | resultsDic[emotion] = [0, 0, 0] 207 | resultsDic[emotion][0] += faceResult[emotion] 208 | resultsDic[emotion][1] += eegResult[emotion] 209 | resultsDic[emotion][2] += multiResult[emotion] 210 | 211 | payload = { 212 | 'emotionTag': highestEmotion, 213 | 'emotionValues': multiResult, 214 | 'faceValues': faceResult, 215 | 'eegValues': eegResult, 216 | 'eegConnections' : { 217 | "eeg1": int(sensorStatus[0]), 218 | "eeg2" : int(sensorStatus[1]), 219 | "eeg3" : int(sensorStatus[2]), 220 | "eeg4" : int(sensorStatus[3]), 221 | "eeg5" : int(sensorStatus[4]), 222 | "eeg6" : int(sensorStatus[5]), 223 | "eeg7" : int(sensorStatus[6]), 224 | "eeg8" : int(sensorStatus[7]), 225 | } 226 | } 227 | return JsonResponse(payload) 228 | 229 | @api_view(['GET']) 230 | def finalResult(request): 231 | # Create text to send signal to save accumulated EEG signal text. 232 | file = open(os.path.join(dirPath, "save.txt"), "w") 233 | file.write("Save accumulated EEG signals") 234 | file.close() 235 | # Save final result to DB. 236 | global resultsDic 237 | resultId = request.session.get('resultId') 238 | result = Result.objects.filter(pk=resultId).first() 239 | result.happiness = resultsDic['happiness'][2] 240 | result.sadness = resultsDic['sadness'][2] 241 | result.disgust = resultsDic['disgust'][2] 242 | result.fear = resultsDic['fear'][2] 243 | result.neutral = resultsDic['neutral'][2] 244 | result.save() 245 | payload = { 246 | "faceResult" : { 247 | 'happiness' : resultsDic['happiness'][0], 248 | 'sadness' : resultsDic['sadness'][0], 249 | 'disgust' : resultsDic['disgust'][0], 250 | 'fear' : resultsDic['fear'][0], 251 | 'neutral' : resultsDic['neutral'][0], 252 | }, 253 | "eegResult" : { 254 | 'happiness' : resultsDic['happiness'][1], 255 | 'sadness' : resultsDic['sadness'][1], 256 | 'disgust' : resultsDic['disgust'][1], 257 | 'fear' : resultsDic['fear'][1], 258 | 'neutral' : resultsDic['neutral'][1], 259 | }, 260 | "multiResult" : { 261 | 'happiness' : resultsDic['happiness'][2], 262 | 'sadness' : resultsDic['sadness'][2], 263 | 'disgust' : resultsDic['disgust'][2], 264 | 'fear' : resultsDic['fear'][2], 265 | 'neutral' : resultsDic['neutral'][2], 266 | } 267 | } 268 | resultsDic = {} 269 | # Save accumulated EEG signal. 270 | filePath = os.path.join(dirPath, "all_signal.txt") 271 | destination = os.path.join(dateDirPath, "eeg") 272 | while True: 273 | if os.path.isfile(filePath): 274 | dest = shutil.move(filePath, destination) 275 | break 276 | return JsonResponse(payload) -------------------------------------------------------------------------------- /src/analyze/face/face_emotion.py: -------------------------------------------------------------------------------- 1 | # import 2 | import os 3 | import copy 4 | import numpy as np 5 | import cv2 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | import torchvision 10 | import torchvision.transforms as transforms 11 | from torch.utils.data import TensorDataset, DataLoader, Dataset 12 | import pandas as pd 13 | from sklearn.model_selection import train_test_split 14 | from sklearn.metrics import classification_report 15 | from sklearn.metrics import plot_confusion_matrix 16 | # import seaborn as sns 17 | # import matplotlib.pyplot as plt 18 | from pandas import DataFrame 19 | 20 | 21 | 22 | 23 | dataset_path = "dataset/FER2013/fer2013/fer2013.csv" 24 | image_size = (48,48) 25 | 26 | # 하이퍼 파라미터 설정 27 | batch_size = 64 28 | num_epochs = 100 29 | # num_epochs = 10 30 | input_shape = (1, 48, 48) 31 | validation_split = 0.2 32 | verbose = 1 33 | num_classes = 7 34 | patience = 50 35 | 36 | learning_rate = 0.001 37 | l2_regularization = 0.01 38 | 39 | 40 | 41 | class MyDataset(Dataset): 42 | def __init__(self, df_data, transform=None): 43 | super().__init__() 44 | pixels = df_data['pixels'].tolist() 45 | width, height = 48, 48 46 | faces = [] 47 | for pixel_sequence in pixels: 48 | face = [int(pixel) for pixel in pixel_sequence.split(' ')] 49 | face = np.asarray(face).reshape(1, width, height) 50 | # face = cv2.resize(face.astype('uint8'), image_size) 51 | faces.append(face.astype('float32')) 52 | 53 | self.faces = np.asarray(faces) 54 | # self.faces = np.expand_dims(faces, -1) 55 | # self.emotions = pd.get_dummies(df_data['emotion']).values 56 | self.emotions = df_data['emotion'].values 57 | self.transform = transform 58 | 59 | def __len__(self): 60 | return self.emotions.shape[0] 61 | 62 | def __getitem__(self, index): 63 | return self.faces[index], self.emotions[index] 64 | 65 | 66 | 67 | class FaceEmotion(nn.Module): 68 | def __init__(self, num_classes=num_classes): 69 | super(FaceEmotion, self).__init__() 70 | 71 | # base block 72 | self.base_block = nn.Sequential( 73 | nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1, bias=False), 74 | nn.BatchNorm2d(8), 75 | nn.ReLU(inplace=True), 76 | nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3, stride=1, padding=1, bias=False), 77 | nn.BatchNorm2d(8), 78 | nn.ReLU(inplace=True)) 79 | 80 | # Residual black 1 - shortcut 81 | self.shortcut_1 = nn.Sequential( 82 | nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False), 83 | nn.BatchNorm2d(16)) 84 | 85 | # Residual block 1 86 | self.residual_block_1 = nn.Sequential( 87 | nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False), 88 | nn.BatchNorm2d(16), 89 | nn.ReLU(inplace=True), 90 | 91 | nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False), 92 | nn.BatchNorm2d(16), 93 | 94 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) 95 | ) 96 | 97 | # Residual black 2 - shortcut 98 | self.shortcut_2 = nn.Sequential( 99 | nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False), 100 | nn.BatchNorm2d(32)) 101 | 102 | # Residual block 2 103 | self.residual_block_2 = nn.Sequential( 104 | nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False), 105 | nn.BatchNorm2d(32), 106 | nn.ReLU(inplace=True), 107 | 108 | nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False), 109 | nn.BatchNorm2d(32), 110 | 111 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) 112 | ) 113 | 114 | # Residual black 3 - shortcut 115 | self.shortcut_3 = nn.Sequential( 116 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False), 117 | nn.BatchNorm2d(64)) 118 | 119 | # Residual block 3 120 | self.residual_block_3 = nn.Sequential( 121 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False), 122 | nn.BatchNorm2d(64), 123 | nn.ReLU(inplace=True), 124 | 125 | nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False), 126 | nn.BatchNorm2d(64), 127 | 128 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) 129 | ) 130 | 131 | # Residual black 4 - shortcut 132 | self.shortcut_4 = nn.Sequential( 133 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False), 134 | nn.BatchNorm2d(128)) 135 | 136 | # Residual block 4 137 | self.residual_block_4 = nn.Sequential( 138 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1, bias=False), 139 | nn.BatchNorm2d(128), 140 | nn.ReLU(inplace=True), 141 | 142 | nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False), 143 | nn.BatchNorm2d(128), 144 | 145 | # nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) 146 | ) 147 | 148 | # 아웃 채널은 클래스의 수 7가지... 149 | self.last_block = nn.Sequential( 150 | nn.Conv2d(in_channels=128, out_channels=num_classes, kernel_size=3), 151 | nn.AdaptiveAvgPool2d((1, 1)) # 인자로 주는 값은 결과물의 H x W : 전역평균 152 | ) 153 | 154 | def forward(self, x): 155 | x = self.base_block(x) 156 | x = self.residual_block_1(x) + self.shortcut_1(x) 157 | x = self.residual_block_2(x) + self.shortcut_2(x) 158 | x = self.residual_block_3(x) + self.shortcut_3(x) 159 | x = self.residual_block_4(x) + self.shortcut_4(x) 160 | 161 | x = self.last_block(x) 162 | 163 | # 벡터로 펴준다. 164 | x = x.view(x.size(0), -1) 165 | 166 | # output = F.softmax(x, dim=0) 167 | 168 | return x 169 | 170 | def predict(self, x): 171 | x = self.forward(x) 172 | output = F.softmax(x, dim=1) 173 | 174 | return output 175 | 176 | 177 | 178 | def loading_dataset(): 179 | # Dataset 불러오기 180 | print("Dataset Loading...") 181 | trans_train = transforms.Compose([transforms.ToTensor(), 182 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 183 | transforms.RandomRotation(10), 184 | transforms.RandomHorizontalFlip(), 185 | ]) 186 | trans_val = transforms.Compose([transforms.ToTensor(), 187 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 188 | ]) 189 | 190 | data = pd.read_csv(dataset_path) 191 | print(f"Dataset size : {len(data)}") 192 | train, test = train_test_split(data, test_size=0.2, shuffle=True) 193 | train, val = train_test_split(train, test_size=validation_split, shuffle=False) 194 | print(f"train : {len(train)} + val {len(val)} / test : {len(test)}") 195 | 196 | dataset_train = MyDataset(df_data=train, transform=trans_train) 197 | dataset_val = MyDataset(df_data=val, transform=trans_val) 198 | dataset_test = MyDataset(df_data=test,transform=None) 199 | 200 | loader_train = DataLoader(dataset=dataset_train, batch_size=batch_size, shuffle=True, num_workers=0) 201 | loader_val = DataLoader(dataset=dataset_val, batch_size=batch_size, shuffle=True, num_workers=0) 202 | loader_test = DataLoader(dataset=dataset_test, batch_size=batch_size, shuffle=True, num_workers=0) 203 | 204 | return loader_train, loader_val, loader_test 205 | 206 | 207 | 208 | def training_model(): 209 | # GPU 설정 210 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 211 | # GPU 설정 확인 212 | print("GPU : ", torch.cuda.is_available()) 213 | 214 | # model 객체 생성 215 | model = FaceEmotion() 216 | print("Network 생성") 217 | model.cuda() 218 | 219 | criterion = nn.CrossEntropyLoss() 220 | # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) 221 | optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) 222 | 223 | best_acc = 0 224 | best_model = model 225 | 226 | for epoch in range(0, num_epochs): 227 | # Training 228 | model.train(True) 229 | running_loss = 0.0 230 | running_corrects = 0 231 | running_total = 0 232 | total_step = len(loader_train) 233 | for i, (images, labels) in enumerate(loader_train): 234 | images = images.to(device) 235 | labels = labels.to(device) 236 | outputs = model(images) 237 | 238 | y_true = labels.cpu().numpy() 239 | _, predicted = torch.max(outputs.data, 1) 240 | # predicted = predicted.cpu().numpy() 241 | 242 | optimizer.zero_grad() 243 | loss = criterion(outputs, labels) 244 | loss.backward() 245 | optimizer.step() 246 | 247 | running_loss += loss.item() 248 | # running_corrects += torch.sum(predicted == labels.data) 249 | # running_corrects += (predicted == labels).sum().item() 250 | running_corrects += sum(1 for a, b in zip(predicted, labels) if a == b) 251 | running_total += len(y_true) 252 | 253 | # Traing Epoch 한 번 끝. 254 | epoch_loss = running_loss / len(loader_train) 255 | epoch_acc = 100 * running_corrects / running_total 256 | 257 | # if (i%100) == 0: 258 | # print(outputs[0]) 259 | # print(f"labels shape : {labels.shape}, outputs shape : {outputs.shape}") 260 | 261 | print(f"Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{total_step}], Train Accuracy : {epoch_acc}, Train Loss : {loss.item()}") 262 | 263 | # validation 264 | model.train(False) 265 | running_loss = 0.0 266 | running_corrects = 0 267 | running_total = 0 268 | total_step = len(loader_val) 269 | for i, (images, labels) in enumerate(loader_val): 270 | images = images.to(device) 271 | labels = labels.to(device) 272 | outputs = model(images) 273 | 274 | y_true = labels.cpu().numpy() 275 | _, predicted = torch.max(outputs.data, 1) 276 | predicted = predicted.cpu().numpy() 277 | 278 | 279 | loss = criterion(outputs, labels) 280 | 281 | running_loss += loss.item() 282 | # running_corrects += torch.sum(predicted == labels.data) 283 | # running_corrects += (predicted == labels).sum().item() 284 | running_corrects += sum(1 for a, b in zip(predicted, labels) if a == b) 285 | running_total += len(y_true) 286 | 287 | # Validation Epoch 한 번 끝 288 | # 평균 loss, accuracy 구하기 289 | epoch_loss = running_loss / len(loader_val) 290 | epoch_acc = 100 * running_corrects / running_total 291 | 292 | print(f"Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{total_step}], Val accuracy : {epoch_acc}") 293 | # 가장 성능이 좋았던 epoch 에서의 모델을 저장. 294 | if epoch_acc > best_acc: 295 | print("Best Model is saved...") 296 | best_acc = epoch_acc 297 | best_model = copy.deepcopy(model) 298 | # class_names = ['Angry', 'Fear','Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral'] 299 | # print(classification_report(y_true, predicted, zero_division=0)) 300 | 301 | print("학습 끝. 모델 저장.") 302 | torch.save(best_model.state_dict(), "FaceEmotionModel.pt") 303 | 304 | 305 | 306 | def confusion_matrix(preds, labels, conf_matrix): 307 | preds = torch.argmax(preds, 1) 308 | for p, t in zip(preds, labels): 309 | conf_matrix[p, t] += 1 310 | return conf_matrix 311 | 312 | 313 | 314 | def test_model(): 315 | # GPU 설정 316 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 317 | # GPU 설정 확인 318 | print("GPU : ", torch.cuda.is_available()) 319 | 320 | model = FaceEmotion() 321 | model.load_state_dict(torch.load("FaceEmotionModel.pt")) 322 | model.cuda() 323 | model.train(False) 324 | 325 | conf_matrix = np.zeros((7, 7)) 326 | 327 | class_total = [0, 0, 0, 0, 0, 0, 0] 328 | class_correct = [0, 0, 0, 0, 0, 0, 0] 329 | 330 | running_loss = 0.0 331 | running_corrects = 0 332 | running_total = 0 333 | 334 | total_step = len(loader_test) 335 | for i, (images, labels) in enumerate(loader_test): 336 | images = images.to(device) 337 | labels = labels.to(device) 338 | outputs = model(images) 339 | 340 | conf_matrix = confusion_matrix(outputs, labels, conf_matrix) 341 | 342 | y_true = labels.cpu().numpy() 343 | _, predicted = torch.max(outputs.data, 1) 344 | 345 | c = (predicted == labels).squeeze() 346 | for idx in range(len(labels)): 347 | label = labels[idx] 348 | pred = predicted[idx] 349 | if label == pred: 350 | class_correct[label] += 1 351 | class_total[label] += 1 352 | 353 | if (i % 10 == 0): 354 | print (f"Step : {i+1} / {total_step}") 355 | 356 | 357 | class_names = ['Angry', 'Fear','Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral'] 358 | for emotion in range(0, 7): 359 | print(f"Accuracy of {class_names[emotion]} : {100 * class_correct[emotion] / class_total[emotion]}") 360 | 361 | df = DataFrame(conf_matrix, index=class_names, columns=class_names) 362 | print(df) 363 | # plt.figure(figsize=(15, 15)) 364 | # sns.heatmap(df, annot=True) 365 | 366 | 367 | if __name__ == "__main__": 368 | loading_dataset() 369 | training_model() --------------------------------------------------------------------------------