├── .gitignore ├── Camera_Thread_class.py ├── README.md ├── emoji_pics ├── angry.png ├── happy.png ├── neutral.png ├── raw │ ├── angry.png │ ├── happy.png │ ├── neutral.png │ ├── sad.png │ └── surprise.png ├── sad.png └── surprise.png ├── haarcascade_frontalface_default.xml ├── imgs └── img.png ├── ktj_background.png ├── mainfile.py ├── mainwindow2.py ├── mainwindow2.ui ├── requirements.txt └── weight.h5 /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Example user template template 3 | ### Example user template 4 | 5 | # IntelliJ project files 6 | .idea 7 | *.iml 8 | out 9 | gen 10 | .DS_Store 11 | -------------------------------------------------------------------------------- /Camera_Thread_class.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import tensorflow as tf 4 | import numpy as np 5 | from PyQt5 import QtCore, QtGui 6 | from PyQt5.QtCore import QThread 7 | import cv2 8 | from tensorflow.python.keras import Sequential 9 | from tensorflow.python.keras.layers import Dense 10 | 11 | from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas 12 | from matplotlib.figure import Figure 13 | 14 | 15 | class Camera_Thread_class(QThread): 16 | def __init__(self, ui): 17 | super().__init__() 18 | self.running_flag = True 19 | self.ui = ui 20 | # 初始化keras模型 21 | self.model = self.get_student_model() 22 | self.types = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] 23 | self.initEmojis() 24 | self.initBar() 25 | 26 | def get_student_model(self): 27 | stu_base = tf.keras.applications.MobileNetV2( 28 | input_shape=None, 29 | alpha=1.0, 30 | include_top=True, 31 | # weights='imagenet', 32 | input_tensor=None, 33 | pooling=None, 34 | classes=1000, 35 | classifier_activation='softmax' 36 | ) 37 | # 生成一个model 38 | student_model = Sequential(stu_base) 39 | student_model.add(Dense(5, activation="softmax")) 40 | student_model.load_weights("weight.h5") 41 | 42 | return student_model 43 | 44 | 45 | def initBar(self): 46 | self.F = MyFigure(width=3, height=2, dpi=100) 47 | self.ui.gridLayout.addWidget(self.F) 48 | 49 | def initCamera_timer(self): 50 | self.timer_camera = QtCore.QTimer() 51 | self.timer_camera.start(10) 52 | self.timer_camera.timeout.connect(self.showCamera) 53 | 54 | def showCamera(self): 55 | 56 | flag, self.image = self.cap.read() 57 | show = cv2.resize(self.image, (160, 150)) 58 | show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB) 59 | showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888) 60 | self.ui.Cameralabel.setPixmap(QtGui.QPixmap.fromImage(showImage)) 61 | 62 | self.getDetectCamera(self.image) 63 | 64 | # showCamera的附属函数 65 | def getDetectCamera(self, pics): 66 | 67 | frame = pics 68 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 69 | frame_raw = pics.copy() 70 | # OpenCV人脸识别分类器 71 | classifier = cv2.CascadeClassifier( 72 | "haarcascade_frontalface_default.xml" 73 | ) 74 | color = (255, 0, 0) 75 | # 调用识别人脸 76 | faceRects = classifier.detectMultiScale( 77 | gray, scaleFactor=1.2, minNeighbors=3, minSize=(150, 150)) 78 | 79 | x, y, w, h = (0, 0, 0, 0) 80 | if len(faceRects): # 大于0则检测到人脸 81 | for faceRect in faceRects: # 单独框出每一张人脸 82 | 83 | x, y, w, h = faceRect 84 | 85 | 86 | cv2.rectangle(frame, (x, y), (x + h, y + w), color, 5) 87 | cv2.circle(frame, (x + w // 4, y + h // 4 + 30), min(w // 8, h // 8), 88 | color, 5) 89 | cv2.circle(frame, (x + 3 * w // 4, y + h // 4 + 30), min(w // 8, h // 8), 90 | color, 5) 91 | 92 | cv2.rectangle(frame, (x + 3 * w // 8, y + 3 * h // 4), 93 | (x + 5 * w // 8, y + 7 * h // 8), color, 5) 94 | 95 | show = cv2.resize(frame, (160, 150)) 96 | show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB) 97 | showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888) 98 | self.ui.Detectlabel.setPixmap(QtGui.QPixmap.fromImage(showImage)) 99 | 100 | # 把送入的图片送入深度学习模型中推理 101 | if len(faceRects) > 0: 102 | self.pridectEmotion(frame_raw[y:y + h, x:x + w]) 103 | # self.pridectEmotion(frame_raw[y-40:y+h+40,x-40:x+w+40]) 104 | 105 | face_area = frame_raw[y:y + h, x:x + w] 106 | 107 | 108 | 109 | else: 110 | self.ui.emtiontextlabel.setText("未检测到人脸!") 111 | 112 | def initEmojis(self): 113 | shapee = (160, 150) 114 | self.angry_emoji = cv2.imread("./emoji_pics/angry.png") 115 | self.angry_emoji = cv2.cvtColor(self.angry_emoji, cv2.COLOR_BGR2RGB) 116 | self.angry_emoji = cv2.resize(self.angry_emoji, shapee) 117 | 118 | self.happy_emoji = cv2.imread("./emoji_pics/happy.png") 119 | self.happy_emoji = cv2.cvtColor(self.happy_emoji, cv2.COLOR_BGR2RGB) 120 | self.happy_emoji = cv2.resize(self.happy_emoji, shapee) 121 | 122 | self.neutral_emoji = cv2.imread("./emoji_pics/neutral.png") 123 | self.neutral_emoji = cv2.cvtColor(self.neutral_emoji, cv2.COLOR_BGR2RGB) 124 | self.neutral_emoji = cv2.resize(self.neutral_emoji, shapee) 125 | 126 | self.sad_emoji = cv2.imread("./emoji_pics/sad.png") 127 | self.sad_emoji = cv2.cvtColor(self.sad_emoji, cv2.COLOR_BGR2RGB) 128 | self.sad_emoji = cv2.resize(self.sad_emoji, shapee) 129 | 130 | self.surprise_emoji = cv2.imread("./emoji_pics/surprise.png") 131 | self.surprise_emoji = cv2.cvtColor(self.surprise_emoji, cv2.COLOR_BGR2RGB) 132 | self.surprise_emoji = cv2.resize(self.surprise_emoji, shapee) 133 | 134 | def pridectEmotion(self, frame): 135 | img = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) 136 | img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) 137 | img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_LINEAR) 138 | img_show = cv2.resize(img, (160, 150)) 139 | showImage = QtGui.QImage(img_show.data, img_show.shape[1], img_show.shape[0], QtGui.QImage.Format_RGB888) 140 | self.ui.Face_Label.setPixmap(QtGui.QPixmap.fromImage(showImage)) 141 | img = (img[..., ::-1].astype(np.float32)) / 255.0 142 | img = img.reshape((1, 224, 224, 3)) 143 | 144 | pred = self.model.predict(img) 145 | emotion_possible = pred * 100 146 | type_index = np.argmax(pred) 147 | emotion_label = self.types[type_index] 148 | self.ui.emtiontextlabel.setText(emotion_label) 149 | 150 | if (type_index == 0): 151 | img_showw = self.angry_emoji 152 | showImage = QtGui.QImage(img_showw.data, img_showw.shape[1], img_showw.shape[0], 153 | QtGui.QImage.Format_RGB888) 154 | self.ui.EmojiLabel.setPixmap(QtGui.QPixmap.fromImage(showImage)) 155 | if (type_index == 1): 156 | img_showw = self.happy_emoji 157 | showImage = QtGui.QImage(img_showw.data, img_showw.shape[1], img_showw.shape[0], 158 | QtGui.QImage.Format_RGB888) 159 | self.ui.EmojiLabel.setPixmap(QtGui.QPixmap.fromImage(showImage)) 160 | if (type_index == 2): 161 | img_showw = self.neutral_emoji 162 | showImage = QtGui.QImage(img_showw.data, img_showw.shape[1], img_showw.shape[0], 163 | QtGui.QImage.Format_RGB888) 164 | self.ui.EmojiLabel.setPixmap(QtGui.QPixmap.fromImage(showImage)) 165 | if (type_index == 3): 166 | img_showw = self.sad_emoji 167 | showImage = QtGui.QImage(img_showw.data, img_showw.shape[1], img_showw.shape[0], 168 | QtGui.QImage.Format_RGB888) 169 | self.ui.EmojiLabel.setPixmap(QtGui.QPixmap.fromImage(showImage)) 170 | if (type_index == 4): 171 | img_showw = self.surprise_emoji 172 | showImage = QtGui.QImage(img_showw.data, img_showw.shape[1], img_showw.shape[0], 173 | QtGui.QImage.Format_RGB888) 174 | self.ui.EmojiLabel.setPixmap(QtGui.QPixmap.fromImage(showImage)) 175 | 176 | possible = list(emotion_possible[0]) 177 | 178 | self.F.axes.cla() 179 | self.F.axes.set_ylim([0, 100]) 180 | self.F.axes.bar(["angry", "happy", "neutral", "sad", "surprise"], possible, 181 | color=["r", "gold", "deepskyblue", "slategray", "g"]) 182 | self.F.axes.set_title("Prediction Probability Distribution") 183 | self.F.draw() 184 | 185 | def startRunning(self): 186 | if (self.running_flag): 187 | return 1 188 | self.running_flag = True 189 | self.start() 190 | 191 | def stopRunning(self): 192 | self.running_flag = False 193 | time.sleep(1) 194 | self.ui.emtiontextlabel.setText("检测暂停!") 195 | 196 | def run(self): 197 | fps = 30 #这个是帧数参数,大家可以根据需要自行设定! 198 | self.cap = cv2.VideoCapture(0) 199 | while (self.running_flag): 200 | self.showCamera() 201 | time.sleep(1 / fps) 202 | 203 | 204 | class MyFigure(FigureCanvas): 205 | def __init__(self, width, height, dpi): 206 | self.fig = Figure(figsize=(width, height), dpi=dpi) 207 | super(MyFigure, self).__init__(self.fig) 208 | self.axes = self.fig.add_subplot(111) 209 | 210 | def refresh(self): 211 | super(MyFigure, self).__init__(self.fig) 212 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepLearning-Emotion-Classifier-withGUI 2 | 3 | 4 | ## 前言 5 | 本项目是基于深度学习的人脸实时表情识别项目,方便部署运行。 6 | 表情识别部分采用TensorFlow+OpenCV实现,GUI使用pyqt5构建。 7 | 目前可针对5种表情(Angry,Happy,Neutral,Sad,Surprise)进行识别。 8 | 本项目已在Python3.7下测试通过。 9 | 10 | ### 如果喜欢请为本项目点亮小星星😁⭐️ 11 | 12 | #### 项目演示: 13 | ![image](https://github.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/blob/master/imgs/img.png) 14 | 15 | ## 安装与启动 16 | ```sh 17 | pip install -r requirements.txt 18 | python mainfile.py 19 | ``` 20 | 21 | 22 | ## 使用许可 23 | 24 | [MIT](LICENSE) © ZhiYong 25 | -------------------------------------------------------------------------------- /emoji_pics/angry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/angry.png -------------------------------------------------------------------------------- /emoji_pics/happy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/happy.png -------------------------------------------------------------------------------- /emoji_pics/neutral.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/neutral.png -------------------------------------------------------------------------------- /emoji_pics/raw/angry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/raw/angry.png -------------------------------------------------------------------------------- /emoji_pics/raw/happy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/raw/happy.png -------------------------------------------------------------------------------- /emoji_pics/raw/neutral.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/raw/neutral.png -------------------------------------------------------------------------------- /emoji_pics/raw/sad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/raw/sad.png -------------------------------------------------------------------------------- /emoji_pics/raw/surprise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/raw/surprise.png -------------------------------------------------------------------------------- /emoji_pics/sad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/sad.png -------------------------------------------------------------------------------- /emoji_pics/surprise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/emoji_pics/surprise.png -------------------------------------------------------------------------------- /imgs/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/imgs/img.png -------------------------------------------------------------------------------- /ktj_background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/ktj_background.png -------------------------------------------------------------------------------- /mainfile.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import matplotlib 4 | from PyQt5 import QtWidgets 5 | 6 | 7 | from Camera_Thread_class import Camera_Thread_class 8 | 9 | matplotlib.use("Qt5Agg") # 声明使用QT5 10 | 11 | import mainwindow2 as uiWindow 12 | ui=0 13 | 14 | class FaceMain: 15 | 16 | def __init__(self): 17 | app = QtWidgets.QApplication(sys.argv) 18 | self.MainWindow = QtWidgets.QMainWindow() 19 | ui = uiWindow.Ui_MainWindow() 20 | ui.setupUi(self.MainWindow) 21 | self.ui=ui 22 | self.initfunc() 23 | self.MainWindow.show() 24 | sys.exit(app.exec_()) 25 | def initfunc(self): 26 | self.camera_thread=Camera_Thread_class(self.ui) 27 | self.camera_thread.start() 28 | # 初始化按键信号 29 | self.initsignal() 30 | 31 | 32 | def initsignal(self): 33 | # 信号绑定的是控制子线程的启动和停止 34 | self.ui.DetectOnBtn.clicked.connect(self.camera_thread.startRunning) 35 | self.ui.DetectOffBtn.clicked.connect(self.camera_thread.stopRunning) 36 | 37 | 38 | 39 | if __name__ == '__main__': 40 | FaceMain() 41 | -------------------------------------------------------------------------------- /mainwindow2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'mainwindow2.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.15.2 6 | # 7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is 8 | # run again. Do not edit this file unless you know what you are doing. 9 | 10 | 11 | from PyQt5 import QtCore, QtGui, QtWidgets 12 | 13 | 14 | class Ui_MainWindow(object): 15 | def setupUi(self, MainWindow): 16 | MainWindow.setObjectName("MainWindow") 17 | MainWindow.resize(936, 518) 18 | MainWindow.setStyleSheet("#MainWindow{border-image:url(./ktj_background.png);}") 19 | self.centralwidget = QtWidgets.QWidget(MainWindow) 20 | self.centralwidget.setObjectName("centralwidget") 21 | self.DetectOnBtn = QtWidgets.QPushButton(self.centralwidget) 22 | self.DetectOnBtn.setGeometry(QtCore.QRect(20, 380, 161, 91)) 23 | font = QtGui.QFont() 24 | font.setPointSize(28) 25 | self.DetectOnBtn.setFont(font) 26 | self.DetectOnBtn.setObjectName("DetectOnBtn") 27 | self.Cameralabel = QtWidgets.QLabel(self.centralwidget) 28 | self.Cameralabel.setGeometry(QtCore.QRect(30, 10, 161, 151)) 29 | self.Cameralabel.setAutoFillBackground(False) 30 | self.Cameralabel.setStyleSheet("border-style: solid;\n" 31 | "border-width: 1px;") 32 | self.Cameralabel.setText("") 33 | self.Cameralabel.setObjectName("Cameralabel") 34 | self.Detectlabel = QtWidgets.QLabel(self.centralwidget) 35 | self.Detectlabel.setGeometry(QtCore.QRect(210, 10, 161, 151)) 36 | self.Detectlabel.setStyleSheet("border-style: solid;\n" 37 | "border-width: 1px;") 38 | self.Detectlabel.setText("") 39 | self.Detectlabel.setObjectName("Detectlabel") 40 | self.shibiejieguo = QtWidgets.QLabel(self.centralwidget) 41 | self.shibiejieguo.setGeometry(QtCore.QRect(30, 280, 151, 41)) 42 | font = QtGui.QFont() 43 | font.setPointSize(21) 44 | self.shibiejieguo.setFont(font) 45 | self.shibiejieguo.setStyleSheet("color:white") 46 | self.shibiejieguo.setObjectName("shibiejieguo") 47 | self.emtiontextlabel = QtWidgets.QLabel(self.centralwidget) 48 | self.emtiontextlabel.setGeometry(QtCore.QRect(140, 280, 151, 41)) 49 | font = QtGui.QFont() 50 | font.setPointSize(21) 51 | self.emtiontextlabel.setFont(font) 52 | self.emtiontextlabel.setStyleSheet("color:white") 53 | self.emtiontextlabel.setObjectName("emtiontextlabel") 54 | self.Face_Label = QtWidgets.QLabel(self.centralwidget) 55 | self.Face_Label.setGeometry(QtCore.QRect(390, 10, 161, 151)) 56 | self.Face_Label.setStyleSheet("border-style: solid;\n" 57 | "border-width: 1px;") 58 | self.Face_Label.setText("") 59 | self.Face_Label.setObjectName("Face_Label") 60 | self.EmojiLabel = QtWidgets.QLabel(self.centralwidget) 61 | self.EmojiLabel.setGeometry(QtCore.QRect(390, 170, 161, 151)) 62 | self.EmojiLabel.setStyleSheet("border-style: solid;\n" 63 | "border-width: 1px;") 64 | self.EmojiLabel.setText("") 65 | self.EmojiLabel.setObjectName("EmojiLabel") 66 | self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget) 67 | self.gridLayoutWidget.setGeometry(QtCore.QRect(570, 0, 361, 481)) 68 | self.gridLayoutWidget.setObjectName("gridLayoutWidget") 69 | self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget) 70 | self.gridLayout.setContentsMargins(0, 0, 0, 0) 71 | self.gridLayout.setObjectName("gridLayout") 72 | self.DetectOffBtn = QtWidgets.QPushButton(self.centralwidget) 73 | self.DetectOffBtn.setGeometry(QtCore.QRect(210, 380, 161, 91)) 74 | font = QtGui.QFont() 75 | font.setPointSize(28) 76 | self.DetectOffBtn.setFont(font) 77 | self.DetectOffBtn.setObjectName("DetectOffBtn") 78 | self.noticetext = QtWidgets.QTextBrowser(self.centralwidget) 79 | self.noticetext.setGeometry(QtCore.QRect(20, 170, 351, 101)) 80 | self.noticetext.setObjectName("noticetext") 81 | self.online = QtWidgets.QLabel(self.centralwidget) 82 | self.online.setGeometry(QtCore.QRect(140, 320, 151, 41)) 83 | font = QtGui.QFont() 84 | font.setPointSize(21) 85 | self.online.setFont(font) 86 | self.online.setStyleSheet("color:white") 87 | self.online.setObjectName("online") 88 | self.onlined = QtWidgets.QLabel(self.centralwidget) 89 | self.onlined.setGeometry(QtCore.QRect(30, 320, 151, 41)) 90 | font = QtGui.QFont() 91 | font.setPointSize(21) 92 | self.onlined.setFont(font) 93 | self.onlined.setStyleSheet("color:white") 94 | self.onlined.setObjectName("onlined") 95 | MainWindow.setCentralWidget(self.centralwidget) 96 | self.menuBar = QtWidgets.QMenuBar(MainWindow) 97 | self.menuBar.setGeometry(QtCore.QRect(0, 0, 936, 24)) 98 | self.menuBar.setObjectName("menuBar") 99 | MainWindow.setMenuBar(self.menuBar) 100 | 101 | self.retranslateUi(MainWindow) 102 | QtCore.QMetaObject.connectSlotsByName(MainWindow) 103 | 104 | def retranslateUi(self, MainWindow): 105 | _translate = QtCore.QCoreApplication.translate 106 | MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) 107 | self.DetectOnBtn.setText(_translate("MainWindow", "启动识别\n" 108 | "Start")) 109 | self.shibiejieguo.setText(_translate("MainWindow", "识别结果:")) 110 | self.emtiontextlabel.setText(_translate("MainWindow", "Loading........")) 111 | self.DetectOffBtn.setText(_translate("MainWindow", "停止识别\n" 112 | "Stop")) 113 | self.online.setText(_translate("MainWindow", "Loading........")) 114 | self.onlined.setText(_translate("MainWindow", "网络状态:")) 115 | -------------------------------------------------------------------------------- /mainwindow2.ui: -------------------------------------------------------------------------------- 1 | 2 | 3 | MainWindow 4 | 5 | 6 | 7 | 0 8 | 0 9 | 936 10 | 518 11 | 12 | 13 | 14 | MainWindow 15 | 16 | 17 | #MainWindow{border-image:url(./ktj_background.png);} 18 | 19 | 20 | 21 | 22 | 23 | 20 24 | 380 25 | 161 26 | 91 27 | 28 | 29 | 30 | 31 | 28 32 | 33 | 34 | 35 | 启动识别 36 | Start 37 | 38 | 39 | 40 | 41 | 42 | 30 43 | 10 44 | 161 45 | 151 46 | 47 | 48 | 49 | false 50 | 51 | 52 | border-style: solid; 53 | border-width: 1px; 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 210 63 | 10 64 | 161 65 | 151 66 | 67 | 68 | 69 | border-style: solid; 70 | border-width: 1px; 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 30 80 | 280 81 | 151 82 | 41 83 | 84 | 85 | 86 | 87 | 21 88 | 89 | 90 | 91 | color:white 92 | 93 | 94 | 识别结果: 95 | 96 | 97 | 98 | 99 | 100 | 140 101 | 280 102 | 151 103 | 41 104 | 105 | 106 | 107 | 108 | 21 109 | 110 | 111 | 112 | color:white 113 | 114 | 115 | Loading........ 116 | 117 | 118 | 119 | 120 | 121 | 390 122 | 10 123 | 161 124 | 151 125 | 126 | 127 | 128 | border-style: solid; 129 | border-width: 1px; 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 390 139 | 170 140 | 161 141 | 151 142 | 143 | 144 | 145 | border-style: solid; 146 | border-width: 1px; 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 570 156 | 0 157 | 361 158 | 481 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 210 167 | 380 168 | 161 169 | 91 170 | 171 | 172 | 173 | 174 | 28 175 | 176 | 177 | 178 | 停止识别 179 | Stop 180 | 181 | 182 | 183 | 184 | 185 | 20 186 | 170 187 | 351 188 | 101 189 | 190 | 191 | 192 | 193 | 194 | 195 | 140 196 | 320 197 | 151 198 | 41 199 | 200 | 201 | 202 | 203 | 21 204 | 205 | 206 | 207 | color:white 208 | 209 | 210 | Loading........ 211 | 212 | 213 | 214 | 215 | 216 | 30 217 | 320 218 | 151 219 | 41 220 | 221 | 222 | 223 | 224 | 21 225 | 226 | 227 | 228 | color:white 229 | 230 | 231 | 网络状态: 232 | 233 | 234 | 235 | 236 | 237 | 238 | 0 239 | 0 240 | 936 241 | 24 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib==3.3.4 2 | opencv-python==4.5.1.48 3 | PyQt5==5.15.2 4 | tensorflow==2.4.1 5 | 6 | -------------------------------------------------------------------------------- /weight.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiyongm/DeepLearning-Emotion-Classifier-withGUI/37980625766a622b0d5b7a63538c71f06d97b802/weight.h5 --------------------------------------------------------------------------------