├── README.md ├── app.py ├── app1.py ├── emotion_model1.h5 ├── emotion_model1.json ├── haarcascade_frontalface_default.xml ├── requirements.txt └── runtime.txt /README.md: -------------------------------------------------------------------------------- 1 | # WebCam-Face-Emotion-Detection-Streamlit 2 | Real time face detection streamlit based bew application for server deployment. 3 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import streamlit as st 4 | from tensorflow import keras 5 | from keras.models import model_from_json 6 | from keras.preprocessing.image import img_to_array 7 | from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode 8 | 9 | # load model 10 | emotion_dict = {0:'angry', 1 :'happy', 2: 'neutral', 3:'sad', 4: 'surprise'} 11 | # load json and create model 12 | json_file = open('emotion_model1.json', 'r') 13 | loaded_model_json = json_file.read() 14 | json_file.close() 15 | classifier = model_from_json(loaded_model_json) 16 | 17 | # load weights into new model 18 | classifier.load_weights("emotion_model1.h5") 19 | 20 | #load face 21 | try: 22 | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 23 | except Exception: 24 | st.write("Error loading cascade classifiers") 25 | 26 | RTC_CONFIGURATION = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}) 27 | 28 | class Faceemotion(VideoTransformerBase): 29 | def transform(self, frame): 30 | img = frame.to_ndarray(format="bgr24") 31 | 32 | #image gray 33 | img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 34 | faces = face_cascade.detectMultiScale( 35 | image=img_gray, scaleFactor=1.3, minNeighbors=5) 36 | for (x, y, w, h) in faces: 37 | cv2.rectangle(img=img, pt1=(x, y), pt2=( 38 | x + w, y + h), color=(255, 0, 0), thickness=2) 39 | roi_gray = img_gray[y:y + h, x:x + w] 40 | roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) 41 | if np.sum([roi_gray]) != 0: 42 | roi = roi_gray.astype('float') / 255.0 43 | roi = img_to_array(roi) 44 | roi = np.expand_dims(roi, axis=0) 45 | prediction = classifier.predict(roi)[0] 46 | maxindex = int(np.argmax(prediction)) 47 | finalout = emotion_dict[maxindex] 48 | output = str(finalout) 49 | label_position = (x, y) 50 | cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) 51 | 52 | return img 53 | 54 | def main(): 55 | # Face Analysis Application # 56 | st.title("Real Time Face Emotion Detection Application") 57 | activiteis = ["Home", "Webcam Face Detection", "About"] 58 | choice = st.sidebar.selectbox("Select Activity", activiteis) 59 | st.sidebar.markdown( 60 | """ Developed by Mohammad Juned Khan 61 | Email : Mohammad.juned.z.khan@gmail.com 62 | [LinkedIn] (https://www.linkedin.com/in/md-juned-khan)""") 63 | if choice == "Home": 64 | html_temp_home1 = """
65 |

66 | Face Emotion detection application using OpenCV, Custom CNN model and Streamlit.

67 |
68 |
""" 69 | st.markdown(html_temp_home1, unsafe_allow_html=True) 70 | st.write(""" 71 | The application has two functionalities. 72 | 73 | 1. Real time face detection using web cam feed. 74 | 75 | 2. Real time face emotion recognization. 76 | 77 | """) 78 | elif choice == "Webcam Face Detection": 79 | st.header("Webcam Live Feed") 80 | st.write("Click on start to use webcam and detect your face emotion") 81 | webrtc_streamer(key="example", mode=WebRtcMode.SENDRECV, rtc_configuration=RTC_CONFIGURATION, 82 | video_processor_factory=Faceemotion) 83 | 84 | elif choice == "About": 85 | st.subheader("About this app") 86 | html_temp_about1= """
87 |

88 | Real time face emotion detection application using OpenCV, Custom Trained CNN model and Streamlit.

89 |
90 |
""" 91 | st.markdown(html_temp_about1, unsafe_allow_html=True) 92 | 93 | html_temp4 = """ 94 |
95 |

This Application is developed by Mohammad Juned Khan using Streamlit Framework, Opencv, Tensorflow and Keras library for demonstration purpose. If you're on LinkedIn and want to connect, just click on the link in sidebar and shoot me a request. If you have any suggestion or wnat to comment just write a mail at Mohammad.juned.z.khan@gmail.com.

96 |

Thanks for Visiting

97 |
98 |

99 |

""" 100 | 101 | st.markdown(html_temp4, unsafe_allow_html=True) 102 | 103 | else: 104 | pass 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /app1.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import streamlit as st 4 | from tensorflow import keras 5 | from keras.models import model_from_json 6 | from keras.preprocessing.image import img_to_array 7 | from streamlit_webrtc import webrtc_streamer, VideoTransformerBase 8 | 9 | # load model 10 | emotion_dict = {0:'angry', 1 :'happy', 2: 'neutral', 3:'sad', 4: 'surprise'} 11 | # load json and create model 12 | json_file = open('emotion_model1.json', 'r') 13 | loaded_model_json = json_file.read() 14 | json_file.close() 15 | classifier = model_from_json(loaded_model_json) 16 | 17 | # load weights into new model 18 | classifier.load_weights("emotion_model1.h5") 19 | 20 | #load face 21 | try: 22 | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 23 | except Exception: 24 | st.write("Error loading cascade classifiers") 25 | 26 | class VideoTransformer(VideoTransformerBase): 27 | def transform(self, frame): 28 | img = frame.to_ndarray(format="bgr24") 29 | 30 | #image gray 31 | img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 32 | faces = face_cascade.detectMultiScale( 33 | image=img_gray, scaleFactor=1.3, minNeighbors=5) 34 | for (x, y, w, h) in faces: 35 | cv2.rectangle(img=img, pt1=(x, y), pt2=( 36 | x + w, y + h), color=(255, 0, 0), thickness=2) 37 | roi_gray = img_gray[y:y + h, x:x + w] 38 | roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) 39 | if np.sum([roi_gray]) != 0: 40 | roi = roi_gray.astype('float') / 255.0 41 | roi = img_to_array(roi) 42 | roi = np.expand_dims(roi, axis=0) 43 | prediction = classifier.predict(roi)[0] 44 | maxindex = int(np.argmax(prediction)) 45 | finalout = emotion_dict[maxindex] 46 | output = str(finalout) 47 | label_position = (x, y) 48 | cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) 49 | 50 | return img 51 | 52 | def main(): 53 | # Face Analysis Application # 54 | st.title("Real Time Face Emotion Detection Application") 55 | activiteis = ["Home", "Webcam Face Detection", "About"] 56 | choice = st.sidebar.selectbox("Select Activity", activiteis) 57 | st.sidebar.markdown( 58 | """ Developed by Mohammad Juned Khan 59 | Email : Mohammad.juned.z.khan@gmail.com 60 | [LinkedIn] (https://www.linkedin.com/in/md-juned-khan)""") 61 | if choice == "Home": 62 | html_temp_home1 = """
63 |

64 | Face Emotion detection application using OpenCV, Custom CNN model and Streamlit.

65 |
66 |
""" 67 | st.markdown(html_temp_home1, unsafe_allow_html=True) 68 | st.write(""" 69 | The application has two functionalities. 70 | 71 | 1. Real time face detection using web cam feed. 72 | 73 | 2. Real time face emotion recognization. 74 | 75 | """) 76 | elif choice == "Webcam Face Detection": 77 | st.header("Webcam Live Feed") 78 | st.write("Click on start to use webcam and detect your face emotion") 79 | webrtc_streamer(key="example", video_transformer_factory=VideoTransformer) 80 | 81 | elif choice == "About": 82 | st.subheader("About this app") 83 | html_temp_about1= """
84 |

85 | Real time face emotion detection application using OpenCV, Custom Trained CNN model and Streamlit.

86 |
87 |
""" 88 | st.markdown(html_temp_about1, unsafe_allow_html=True) 89 | 90 | html_temp4 = """ 91 |
92 |

This Application is developed by Mohammad Juned Khan using Streamlit Framework, Opencv, Tensorflow and Keras library for demonstration purpose. If you're on LinkedIn and want to connect, just click on the link in sidebar and shoot me a request. If you have any suggestion or wnat to comment just write a mail at Mohammad.juned.z.khan@gmail.com.

93 |

Thanks for Visiting

94 |
95 |

96 |

""" 97 | 98 | st.markdown(html_temp4, unsafe_allow_html=True) 99 | 100 | else: 101 | pass 102 | 103 | 104 | if __name__ == "__main__": 105 | main() 106 | -------------------------------------------------------------------------------- /emotion_model1.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MainakRepositor/Visual-Emotion-Detection/cede2f317dba928982cac1367841366ae2c0a51c/emotion_model1.h5 -------------------------------------------------------------------------------- /emotion_model1.json: -------------------------------------------------------------------------------- 1 | {"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "conv2d_input"}}, {"class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "filters": 32, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 64, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Flatten", "config": {"name": "flatten", "trainable": true, "dtype": "float32", "data_format": "channels_last"}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1024, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 720, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.5, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 480, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_3", "trainable": true, "dtype": "float32", "rate": 0.5, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_3", "trainable": true, "dtype": "float32", "units": 240, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dense", "config": {"name": "dense_4", "trainable": true, "dtype": "float32", "units": 5, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.6.0", "backend": "tensorflow"} -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy~=1.21.2 2 | streamlit~=0.87.0 3 | keras~=2.4.3 4 | opencv-contrib-python-headless 5 | tensorflow-cpu 6 | streamlit_webrtc 7 | -------------------------------------------------------------------------------- /runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.7.11 2 | --------------------------------------------------------------------------------