├── runtime.txt ├── .gitignore ├── packages.txt ├── code ├── new_models │ ├── readme.md │ ├── demo.md │ ├── app_local_streamlit_select_music.py │ ├── app_local_streamlit_with_input_button.py │ ├── app_local_streamlit_ui_improved.py │ ├── app_PySimpleGUI_withui_2.py │ └── app_local_streamlit_error_h.py ├── model │ └── fer2013_mini_XCEPTION.102-0.66.hdf5 ├── ui_interfaces │ ├── app_PySimpleGUI.py │ ├── cli_main.py │ └── app_local_streamlit.py └── deployment │ └── app.py ├── requirements.txt ├── LICENSE ├── CONTRIBUTING.md ├── CODE_OF_CONDUCT.md ├── README.md └── LEARN.md /runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.10.13 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | .venv 3 | env 4 | .env -------------------------------------------------------------------------------- /packages.txt: -------------------------------------------------------------------------------- 1 | libgl1-mesa-glx 2 | ffmpeg 3 | -------------------------------------------------------------------------------- /code/new_models/readme.md: -------------------------------------------------------------------------------- 1 | ### Make new contributions here!! -------------------------------------------------------------------------------- /code/model/fer2013_mini_XCEPTION.102-0.66.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/HEAD/code/model/fer2013_mini_XCEPTION.102-0.66.hdf5 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.15.0 2 | opencv-python-headless==4.8.0.76 3 | keras==2.15.0 4 | requests==2.31.0 5 | keras-models==0.0.7 6 | PySimpleGUI 7 | streamlit 8 | streamlit-webrtc 9 | numpy -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Shivam Gupta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /code/new_models/demo.md: -------------------------------------------------------------------------------- 1 | ## 🎵 Music Recommendation by Facial Expressions – Demo Hub 2 | 3 | Welcome to the community demo showcase for the **Facial Expression-Based Music Recommendation System**. 4 | This page highlights video demonstrations contributed by developers showcasing their implementations, UI approaches, and creative twists on the core project. 5 | 6 | > 💡 Want to add your own demo? See [Issue #11 – Demo Videos](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/issues/11) for discussion and [CONTRIBUTING.md](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/contribution/CONTRIBUTING.md) for instructions. 7 | 8 | --- 9 | 10 | This project demonstrates a facial expression-based music recommendation system. 11 | It uses real-time webcam input to detect user emotions and plays songs aligned with the current mood — happy, sad, angry, or neutral. 12 | 13 | --- 14 | 15 | ### 🎯 Overview 16 | 17 | - **Project Title**: Music Recommendation Using Facial Expressions 18 | - **Input**: Webcam feed to detect facial expressions 19 | - **Output**: Music suggestions based on emotion 20 | 21 | --- 22 | 23 | ### 🚀 Features 24 | 25 | - Real-time emotion detection via webcam 26 | - Multiple GUI options: CLI, Streamlit, PySimpleGUI 27 | - Emotion-to-music mapping for better user experience 28 | - Modular and customizable 29 | 30 | --- 31 | 32 | ## 🌍 Community Demos 33 | 34 | | Contributor | Demo Video | Notes | 35 | |-------------|------------|-------| 36 | | [Shivam Gupta (Project Admin)](https://github.com/SGCODEX) | [Demo Link](https://www.youtube.com/watch?v=Qj5yUBjSr7I) | CLI Demo Video | 37 | | [Liban Ansari](https://github.com/LibanAnsari) | [Demo Link](https://drive.google.com/drive/folders/1Cx2MG26FyNOZknlcM_01XF4koXhaq0WK) | Installation guide with all 3 ways to run the app (CLI, Streamlit, PySimpleGUI) | 38 | | ... | ... | ... | 39 | 40 | --- 41 | 42 | ### 🤝 How to Contribute 43 | 44 | We welcome all contributors to submit their own demos — whether you’ve improved the system, added a new UI, or just want to show your version in action! 45 | You can reference demos listed above as examples. 46 | 47 | 1. Comment or participate in [Issue #11 – Demo Videos](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/issues/11) 48 | 2. Follow the steps in [CONTRIBUTING.md](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/contribution/CONTRIBUTING.md) 49 | 3. Add your video to the table above 50 | 51 | --- 52 | 53 | ### 📜 License 54 | 55 | This project is licensed under the MIT License. 56 | -------------------------------------------------------------------------------- /code/ui_interfaces/app_PySimpleGUI.py: -------------------------------------------------------------------------------- 1 | #This is the code with PySimpleGUI as GUI for the project 2 | #Importing the libraries 3 | import cv2 4 | import PySimpleGUI as sg 5 | from keras.models import load_model 6 | import numpy as np 7 | import webbrowser 8 | from threading import Thread 9 | import requests 10 | import re 11 | 12 | # Load the pre-trained facial expression recognition model 13 | model = load_model('code/model/fer2013_mini_XCEPTION.102-0.66.hdf5') 14 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 15 | 16 | def detect_emotion(frame, face_cascade): 17 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 18 | faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5) 19 | for (x, y, w, h) in faces: 20 | roi_gray = gray[y:y + h, x:x + w] 21 | roi_gray = cv2.resize(roi_gray, (64, 64), interpolation=cv2.INTER_AREA) 22 | roi = roi_gray / 255.0 23 | roi = np.reshape(roi, (1, 64, 64, 1)) 24 | prediction = model.predict(roi) 25 | emotion_label = emotions[np.argmax(prediction)] 26 | cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) 27 | cv2.putText(frame, emotion_label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2) 28 | return frame, emotion_label 29 | return None, None # Return None when no faces are detected 30 | 31 | def video_thread(window): 32 | cap = cv2.VideoCapture(0) 33 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 34 | current_emotion = None 35 | while True: 36 | ret, frame = cap.read() 37 | if ret: 38 | frame = cv2.resize(frame, (640, 480)) # Resize frame for better performance 39 | frame_with_faces, current_detected_emotion = detect_emotion(frame, face_cascade) 40 | if frame_with_faces is not None: 41 | imgbytes = cv2.imencode('.png', frame_with_faces)[1].tobytes() 42 | window['-IMAGE-'].update(data=imgbytes) 43 | if current_detected_emotion != current_emotion: 44 | window['-EMOTION-'].update(value=f'Detected Emotion: {current_detected_emotion}') 45 | current_emotion = current_detected_emotion 46 | 47 | def play_song_with_emotion(emotion, window): 48 | search_query = f"https://www.youtube.com/results?search_query={emotion}+weekend+beats" 49 | response = requests.get(search_query) 50 | html_content = response.text 51 | match = re.search(r'/watch\?v=([^\"]+)', html_content) 52 | if match: 53 | video_id = match.group(1) 54 | video_url = f"https://www.youtube.com/watch?v={video_id}" 55 | webbrowser.open(video_url) 56 | 57 | def gui_thread(): 58 | layout = [ 59 | [sg.Image(filename='', key='-IMAGE-')], 60 | [sg.Text('Detected Emotion: ', key='-EMOTION-')], 61 | [sg.Button('Capture Emotion', button_color=('black', 'orange'), key='-CAPTURE-EMOTION-')], 62 | [sg.Button('Play Song', button_color=('black', 'green'), key='-PLAY-')], 63 | [sg.Text(size=(30, 1), key='-RETURN-VALUE-')] 64 | ] 65 | 66 | window = sg.Window('Facial Expression Recognition', layout) 67 | 68 | Thread(target=video_thread, args=(window,), daemon=True).start() 69 | 70 | current_emotion = None # Store the latest captured emotion 71 | 72 | while True: 73 | event, values = window.read() 74 | if event == sg.WINDOW_CLOSED: 75 | break 76 | elif event == '-CAPTURE-EMOTION-': 77 | current_emotion = window['-EMOTION-'].DisplayText.split(":")[1].strip() 78 | window['-RETURN-VALUE-'].update(value=f'Detected Emotion: {current_emotion}') 79 | elif event == '-PLAY-' and current_emotion: 80 | play_song_with_emotion(current_emotion, window) 81 | 82 | window.close() 83 | 84 | if __name__ == "__main__": 85 | gui_thread() 86 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ⭐ Thank you for your interest in contributing!! 4 | 5 | Here’s how you can contribute to this repository 6 | 7 | ## Found a bug? 8 | 9 | If you find a bug in the source code, you can help us by [submitting an issue](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/issues/new?assignees=&labels=bug) to our GitHub Repository. Even better, you can submit a Pull Request with a fix. 10 | 11 | ## Missing a feature? 12 | 13 | So, you've got an awesome feature in mind? Throw it over to us by [creating an issue](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/issues/new?assignees=&labels=enhancement) on our GitHub Repo. 14 | 15 | If you don't feel ready to make a code contribution yet, no problem! You can also check out the [documentation issues](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/issues?q=is%3Aopen+is%3Aissue+label%3Adocumentation). 16 | 17 | # How do I make a code contribution? 18 | 19 | ## Good first issues 20 | 21 | Are you new to open source contribution? Wondering how contributions work in our project? Here's a quick rundown. 22 | 23 | Find an issue that you're interested in addressing, or a feature that you'd like to add. 24 | You can use [this view](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) which helps new contributors find easy gateways into our project. 25 | 26 | ## Step 1: Make a fork 27 | 28 | Fork the repository to your GitHub organization. This means that you'll have a copy of the repository under _your-GitHub-username/repository-name_. 29 | 30 | ## Step 2: Clone the repository to your local machine 31 | 32 | ``` 33 | git clone https://github.com//Music-Recommendation-Using-Facial-Expressions 34 | 35 | ``` 36 | 37 | ## Step 3: Make your changes 38 | 39 | Now that everything is set up and works as expected, you can get start developing or update the code with your bug fix or new feature. 40 | 41 | ## Step 4: Add the changes that are ready to be committed 42 | 43 | Stage the changes that are ready to be committed: 44 | 45 | ```jsx 46 | git add . 47 | ``` 48 | 49 | ## Step 7: Commit the changes (Git) 50 | 51 | Commit the changes with a short message. (See below for more details on how we structure our commit messages) 52 | 53 | ```jsx 54 | git commit -m "(): " 55 | ``` 56 | 57 | ## Step 8: Push the changes to the remote repository 58 | 59 | Push the changes to the remote repository using: 60 | 61 | ```jsx 62 | git push origin 63 | ``` 64 | 65 | ## Step 9: Create Pull Request 66 | 67 | In GitHub, do the following to submit a pull request to the upstream repository: 68 | 69 | 1. Give the pull request a title and a short description of the changes made. Include also the issue or bug number associated with your change. Explain the changes that you made, any issues you think exist with the pull request you made, and any questions you have for the maintainer. 70 | 71 | Remember, it's okay if your pull request is not perfect (no pull request ever is). The reviewer will be able to help you fix any problems and improve it! 72 | 73 | 2. Wait for the pull request to be reviewed by a maintainer. 74 | 75 | 3. Make changes to the pull request if the reviewing maintainer recommends them. 76 | 77 | Celebrate your success after your pull request is merged :-) 78 | 79 | ## PR Template 80 | 81 | The name and description of a PR is important, because it will eventually become the commit that shows up in main. Here are the guidelines for how to name and describe a PR. 82 | 83 | ### PR name: [Feature Name] 84 | ### PR description: Keyword #{issue-number} Short summary 85 | 86 | ### For example: 87 | Name: Added Readme File 88 | 89 | Description: Fixes #1 90 | 91 | Added Readme File for the project 92 | 93 | ### Feature Name 94 | - A short-form version of the feature you're working on, or, alternatively the location you're making changes. Serves as an easy, at a glance, indicator of what the PR is touching. 95 | 96 | ### Issue Number 97 | - Following the keyword is the issue number - this is the issue the ticket addresses. 98 | 99 | ### Short Summary 100 | - This is a short summary of what your commit/PR is doing. 101 | 102 | ## Code of conduct 103 | 104 | Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. 105 | 106 | [Code of Conduct](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/main/CODE_OF_CONDUCT.md) 107 | 108 | Our Code of Conduct means that you are responsible for treating everyone on the project with respect and courtesy. 109 | 110 | ⭐ Happy coding, and we look forward to your contributions! 111 | -------------------------------------------------------------------------------- /code/deployment/app.py: -------------------------------------------------------------------------------- 1 | from streamlit_webrtc import webrtc_streamer, VideoTransformerBase 2 | import streamlit as st 3 | import cv2 4 | from keras.models import load_model 5 | import numpy as np 6 | import webbrowser 7 | import requests 8 | import re 9 | import os 10 | import time 11 | 12 | # Load model and labels 13 | model = load_model("code/model/fer2013_mini_XCEPTION.102-0.66.hdf5") 14 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 15 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 16 | 17 | # App config 18 | st.set_page_config(page_title="Emotion-Based Music Player", layout="centered") 19 | st.title("Facial Emotion Recognition App") 20 | st.write("This app detects your facial expression and plays a suitable song.") 21 | 22 | # Footer 23 | st.markdown( 24 | """ 25 | 42 | 43 | 48 | """, 49 | unsafe_allow_html=True 50 | ) 51 | 52 | # App state 53 | if "last_emotion" not in st.session_state: 54 | st.session_state.last_emotion = "Neutral" 55 | if "show_video" not in st.session_state: 56 | st.session_state.show_video = False 57 | 58 | # Streamlit WebRTC Video Transformer 59 | class EmotionDetector(VideoTransformerBase): 60 | def __init__(self): 61 | self.last_emotion = "Neutral" 62 | 63 | def transform(self, frame): 64 | img = frame.to_ndarray(format="bgr24") 65 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 66 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 67 | 68 | for (x, y, w, h) in faces: 69 | roi_gray = gray[y:y+h, x:x+w] 70 | roi_gray = cv2.resize(roi_gray, (64, 64)) 71 | roi = roi_gray.astype("float") / 255.0 72 | roi = np.expand_dims(roi, axis=0) 73 | roi = np.expand_dims(roi, axis=-1) 74 | preds = model.predict(roi)[0] 75 | self.last_emotion = emotions[np.argmax(preds)] 76 | st.session_state.last_emotion = self.last_emotion 77 | 78 | cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) 79 | cv2.putText(img, self.last_emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) 80 | break 81 | 82 | return img 83 | 84 | # 🎥 Live Camera Detection Mode 85 | if not st.session_state.show_video: 86 | st.subheader("📷 Capturing Your Live Emotions") 87 | col1, col2 = st.columns([1, 2]) # Adjust the ratio as you prefer 88 | 89 | with col1: 90 | capture = st.button("🎵 Play Song on Last Captured Emotion") 91 | 92 | with col2: 93 | # ctx = webrtc_streamer(key="emotion", video_transformer_factory=EmotionDetector) 94 | # Google's STUN server helps WebRTC webcam work reliably across networks and firewalls. 95 | # Using public STUN server to establish webcam stream across NAT/firewalls. 96 | 97 | ctx = webrtc_streamer( 98 | key="emotion", 99 | video_transformer_factory=EmotionDetector, 100 | rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]} 101 | ) 102 | 103 | 104 | if capture: 105 | if ctx.video_transformer: 106 | st.session_state.last_emotion = ctx.video_transformer.last_emotion 107 | st.session_state.show_video = True 108 | st.rerun() 109 | 110 | # 🎧 Play Song For Detected Mood 111 | if st.session_state.show_video: 112 | st.markdown("## 🎧 Now Playing Music For Your Mood") 113 | st.markdown(f"**Last Detected Mood:** `{st.session_state.last_emotion}`") 114 | 115 | if st.button("🔁 Detect Emotions Again"): 116 | st.session_state.show_video = False 117 | st.rerun() 118 | 119 | search_query = f"https://www.youtube.com/results?search_query={st.session_state.last_emotion}+background+tunes" 120 | response = requests.get(search_query) 121 | 122 | if response.status_code != 200: 123 | print("Failed to retrieve YouTube search results. Status code:", response.status_code) 124 | 125 | html_content = response.text 126 | match = re.search(r'/watch\?v=([^\"]+)', html_content) 127 | if match: 128 | video_id = match.group(1) 129 | video_url = f"https://www.youtube.com/watch?v={video_id.encode('utf-8').decode('unicode_escape')}" 130 | st.video(video_url) 131 | print("Opening YouTube video:", video_url) 132 | -------------------------------------------------------------------------------- /code/ui_interfaces/cli_main.py: -------------------------------------------------------------------------------- 1 | #This is the raw code without using any fancy GUI or web tool 2 | #Importing the libraries 3 | import cv2 4 | from keras.models import load_model 5 | import numpy as np 6 | import webbrowser 7 | import requests 8 | import re 9 | import os 10 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 11 | 12 | # Function to play the first song from youtube queries 13 | def play_first_song(final_emotion): 14 | try: 15 | search_query = f"https://www.youtube.com/results?search_query={final_emotion}+background+tunes" 16 | 17 | # to fetch the search results page 18 | response = requests.get(search_query) 19 | 20 | # HTTP status code 200 = request was successful 21 | if response.status_code != 200: 22 | print("Failed to retrieve YouTube search results. Status code:", response.status_code) 23 | return 24 | 25 | html_content = response.text 26 | 27 | match = re.search(r'/watch\?v=([^\"]+)', html_content) 28 | if match: 29 | video_id = match.group(1) 30 | #video_url = f"https://www.youtube.com/watch?v={video_id}" 31 | video_url = f"https://www.youtube.com/watch?v={video_id.encode('utf-8').decode('unicode_escape')}" 32 | 33 | # printing the video URL for debugging purposes 34 | print("Opening YouTube video:", video_url) 35 | 36 | # opening the video in the default web browser 37 | webbrowser.open(video_url) 38 | else: 39 | print("No video found for the given query.") 40 | 41 | except requests.RequestException as e: 42 | print("An error occurred while connecting to YouTube:", e) 43 | except Exception as e: 44 | print("An unexpected error occurred:", e) 45 | 46 | # Load the pre-trained facial expression recognition model 47 | model = load_model("code/model/fer2013_mini_XCEPTION.102-0.66.hdf5") 48 | 49 | # Define the list of emotions 50 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 51 | 52 | # Create a cascade classifier for detecting faces 53 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 54 | 55 | # Open a connection to the camera (0 is usually the default camera) 56 | cap = cv2.VideoCapture(0) 57 | 58 | # Variable to store the final emotion 59 | final_emotion = None 60 | 61 | # Flag to indicate whether emotion capture has occurred 62 | emotion_captured = False 63 | 64 | # Flag for music part 65 | Flag_final = False 66 | 67 | # Callback function for mouse click 68 | def on_button_click(event, x, y, flags, param): 69 | global final_emotion 70 | if event == cv2.EVENT_LBUTTONDOWN: 71 | final_emotion = emotion_label 72 | print("Final Emotion:", final_emotion) 73 | cap.release() 74 | cv2.destroyAllWindows() 75 | emotion_captured = True 76 | 77 | # Create a named window 78 | cv2.namedWindow('Facial Expression Recognition') 79 | 80 | # Set the mouse callback function 81 | cv2.setMouseCallback('Facial Expression Recognition', on_button_click) 82 | 83 | 84 | while True: 85 | # Read a frame from the camera 86 | ret, frame = cap.read() 87 | #print("Read frame:", ret) 88 | 89 | # Check if the emotion has been captured 90 | if ret == False: 91 | play_first_song(final_emotion) 92 | break 93 | #print("breaking the loop") 94 | 95 | # Convert the frame to grayscale for face detection 96 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 97 | 98 | # Detect faces in the frame 99 | faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5) 100 | 101 | for (x, y, w, h) in faces: 102 | # Extract the region of interest (ROI) which is the face 103 | roi_gray = gray[y:y + h, x:x + w] 104 | roi_gray = cv2.resize(roi_gray, (64, 64), interpolation=cv2.INTER_AREA) 105 | 106 | # Normalize the pixel values 107 | roi = roi_gray / 255.0 108 | 109 | # Reshape the image for the model 110 | roi = np.reshape(roi, (1, 64, 64, 1)) 111 | 112 | # Make a prediction using the pre-trained model 113 | prediction = model.predict(roi) 114 | emotion_label = emotions[np.argmax(prediction)] 115 | 116 | # Draw a rectangle around the face and display the predicted emotion 117 | cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) 118 | cv2.putText(frame, emotion_label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2) 119 | 120 | # Display the button text 121 | button_text = "Capture Emotion" 122 | cv2.putText(frame, button_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) 123 | 124 | 125 | # Display the frame 126 | cv2.imshow('Facial Expression Recognition', frame) 127 | #print("Display frame") 128 | 129 | # Break the loop if the 'q' key is pressed 130 | if cv2.waitKey(1) & 0xFF == ord('q'): 131 | break 132 | #print("Key pressed") 133 | 134 | # Release the camera and close all OpenCV windows 135 | #print("About to release the camera and close windows") 136 | cap.release() 137 | cv2.destroyAllWindows() 138 | -------------------------------------------------------------------------------- /code/new_models/app_local_streamlit_select_music.py: -------------------------------------------------------------------------------- 1 | from streamlit_webrtc import webrtc_streamer, VideoTransformerBase 2 | import streamlit as st 3 | import cv2 4 | from keras.models import load_model 5 | import numpy as np 6 | import requests 7 | import re 8 | import time 9 | from urllib.parse import quote_plus 10 | 11 | # Load model and labels 12 | model = load_model("code/model/fer2013_mini_XCEPTION.102-0.66.hdf5") 13 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 14 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 15 | 16 | # App config 17 | st.set_page_config(page_title="Emotion-Based Music Player", layout="centered") 18 | st.title("Facial Emotion Recognition App") 19 | st.write("This app detects your facial expression and plays a suitable song.") 20 | 21 | # Footer 22 | st.markdown( 23 | """ 24 | 41 | 42 | 47 | """, 48 | unsafe_allow_html=True 49 | ) 50 | 51 | # App state 52 | if "last_emotion" not in st.session_state: 53 | st.session_state.last_emotion = "Neutral" 54 | if "show_video" not in st.session_state: 55 | st.session_state.show_video = False 56 | 57 | # Streamlit WebRTC Video Transformer 58 | class EmotionDetector(VideoTransformerBase): 59 | def __init__(self): 60 | self.last_emotion = "Neutral" 61 | 62 | def transform(self, frame): 63 | img = frame.to_ndarray(format="bgr24") 64 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 65 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 66 | 67 | for (x, y, w, h) in faces: 68 | roi_gray = gray[y:y+h, x:x+w] 69 | roi_gray = cv2.resize(roi_gray, (64, 64)) 70 | roi = roi_gray.astype("float") / 255.0 71 | roi = np.expand_dims(roi, axis=0) 72 | roi = np.expand_dims(roi, axis=-1) 73 | preds = model.predict(roi)[0] 74 | self.last_emotion = emotions[np.argmax(preds)] 75 | st.session_state.last_emotion = self.last_emotion 76 | 77 | cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) 78 | cv2.putText(img, self.last_emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) 79 | break 80 | 81 | return img 82 | 83 | # 🎥 Live Camera Detection Mode 84 | if not st.session_state.show_video: 85 | st.subheader("📷 Capturing Your Live Emotions") 86 | col1, col2 = st.columns([1, 2]) 87 | 88 | with col1: 89 | capture = st.button("🎵 Play Song on Last Captured Emotion") 90 | 91 | with col2: 92 | ctx = webrtc_streamer( 93 | key="emotion", 94 | video_transformer_factory=EmotionDetector, 95 | rtc_configuration={ 96 | "iceServers": [ 97 | {"urls": ["stun:stun.l.google.com:19302"]}, 98 | {"urls": ["stun:stun1.l.google.com:19302"]}, 99 | {"urls": ["stun:stun2.l.google.com:19302"]}, 100 | { 101 | "urls": ["turn:openrelay.metered.ca:80"], 102 | "username": "openrelayproject", 103 | "credential": "openrelayproject" 104 | } 105 | ] 106 | } 107 | ) 108 | 109 | if capture: 110 | if ctx.video_transformer: 111 | st.session_state.last_emotion = ctx.video_transformer.last_emotion 112 | st.session_state.show_video = True 113 | st.rerun() 114 | 115 | # 🎧 Play Song For Detected Mood 116 | if st.session_state.show_video: 117 | st.markdown(f"## 🎧 Choose a {st.session_state.last_emotion} song to play") 118 | st.markdown(f"**Last Detected Mood:** `{st.session_state.last_emotion}`") 119 | 120 | if st.button("🔁 Detect Emotions Again"): 121 | st.session_state.show_video = False 122 | st.rerun() 123 | 124 | search_query = f"https://www.youtube.com/results?search_query={quote_plus(st.session_state.last_emotion + ' background tunes')}" 125 | response = requests.get(search_query) 126 | 127 | if response.status_code != 200: 128 | st.error("Failed to retrieve YouTube search results.") 129 | else: 130 | html_content = response.text 131 | video_ids = re.findall(r'/watch\?v=([^"]{11})', html_content) 132 | unique_ids = list(dict.fromkeys(video_ids))[:5] # Top 5 unique videos 133 | 134 | if unique_ids: 135 | cols = st.columns(len(unique_ids)) 136 | for i, video_id in enumerate(unique_ids): 137 | with cols[i]: 138 | video_url = f"https://www.youtube.com/watch?v={video_id}" 139 | thumbnail_url = f"https://img.youtube.com/vi/{video_id}/0.jpg" 140 | try: 141 | st.image(thumbnail_url, use_container_width=True) 142 | except TypeError: 143 | st.image(thumbnail_url) 144 | if st.button(f"▶️ Play {i+1}", key=video_id): 145 | st.session_state.selected_video = video_url 146 | 147 | if "selected_video" in st.session_state: 148 | st.video(st.session_state.selected_video) 149 | else: 150 | st.warning("No music videos found.") 151 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Table of Contents 4 | 5 | 1. [Statement of Purpose](#statement-of-purpose) 6 | 2. [Community Values](#community-values) 7 | 3. [Expected Behavior](#expected-behavior) 8 | 4. [Unacceptable Behavior](#unacceptable-behavior) 9 | 5. [Procedures for Reporting and Resolving Issues](#procedures-for-reporting-and-resolving-issues) 10 | 6. [Consequences of Unacceptable Behavior](#consequences-of-unacceptable-behavior) 11 | 7. [Scope](#scope) 12 | 8. [Acknowledgments](#acknowledgments) 13 | 9. [Get Involved](#get-involved) 14 | 15 | --- 16 | 17 | ## 1. Statement of Purpose 18 | 19 | This Python-based AI project aims at utilizing Computer Vision Technologies for facial recognition and analyzing facial expressions. By identifying the current user mood, the system should leverages YouTube's search capabilities to recommend music that aligns with the emotions. 20 | 21 | --- 22 | 23 | ## 2. Community Values 24 | 25 | We prioritize the following values: 26 | 27 | | **Value** | **Description** | 28 | |------------------|---------------------------------------------------------------------------------| 29 | | **Inclusivity** | We embrace diverse backgrounds and perspectives, enhancing our collective work.| 30 | | **Collaboration** | We believe in teamwork and encourage open communication and mutual support. | 31 | | **Respect** | We treat everyone with kindness and respect, fostering an environment where contributions are valued. | 32 | | **Innovation** | We encourage creative thinking and experimentation, supporting initiatives that drive our project forward. | 33 | 34 | --- 35 | 36 | ## 3. Expected Behavior 37 | 38 | As a participant in the community, you are expected to: 39 | 40 | - **Be Respectful and Inclusive**: Treat all individuals with respect and kindness, regardless of their background, identity, or experience level. 41 | - **Communicate Constructively**: Engage in discussions that are constructive, focusing on ideas and solutions rather than personal opinions. 42 | - **Welcome Collaboration**: Actively seek out diverse viewpoints and be open to feedback that can improve our projects. 43 | - **Support Others**: Offer help to fellow contributors, especially newcomers, and encourage a culture of mentorship. 44 | - **Practice Empathy**: Consider the feelings and experiences of others, striving to create an understanding environment. 45 | 46 | --- 47 | 48 | ## 4. Unacceptable Behavior 49 | 50 | The following behaviors are considered unacceptable within our community: 51 | 52 | | **Behavior Type** | **Description** | 53 | |--------------------------|-----------------------------------------------------------------| 54 | | **Harassment** | Any form of harassment, including offensive comments, intimidation, or unwanted attention. | 55 | | **Discrimination** | Actions or comments that discriminate based on race, ethnicity, gender, sexual orientation, disability, age, or any other characteristic. | 56 | | **Personal Attacks** | Engaging in personal insults or derogatory remarks directed at individuals or groups. | 57 | | **Disruption** | Disrupting discussions, meetings, or community spaces, hindering constructive dialogue. | 58 | | **Invasive Actions** | Unwanted physical contact or any other form of unwanted attention. | 59 | 60 | --- 61 | 62 | ## 5. Procedures for Reporting and Resolving Issues 63 | 64 | If you witness or experience unacceptable behavior, we encourage you to take the following steps: 65 | 66 | 1. **Identify the Incident**: Clearly identify the nature of the unacceptable behavior and gather any evidence, such as screenshots or messages. 67 | 68 | 2. **Report the Incident**: Reach out to a project maintainer or designated community leader through direct message or email. Include detailed information about the incident, such as: 69 | - Names of those involved (if known) 70 | - Dates and times of the incident 71 | - Context surrounding the behavior 72 | - Any relevant evidence 73 | 74 | 3. **Documentation**: Keep a record of your report and any responses received to ensure a transparent resolution process. 75 | 76 | 4. **Confidentiality**: All reports will be treated confidentially. Personal information will only be shared with those involved in the resolution process. 77 | 78 | 5. **Follow-Up**: You can request updates on the status of your report and any actions taken. 79 | 80 | --- 81 | 82 | ## 6. Consequences of Unacceptable Behavior 83 | 84 | Consequences for violating the Code of Conduct will be determined based on the severity and frequency of the behavior. Potential consequences include: 85 | 86 | | **Consequences** | **Description** | 87 | |--------------------------|---------------------------------------------------------------| 88 | | **Verbal Warning** | A private discussion addressing the behavior. | 89 | | **Written Warning** | A formal notice outlining the behavior and expected changes. | 90 | | **Temporary Suspension** | A temporary ban from contributing to the project or participating in community discussions. | 91 | | **Permanent Removal** | A permanent ban for severe violations or repeated offenses. | 92 | 93 | --- 94 | 95 | ## 7. Scope 96 | 97 | This Code of Conduct applies to all participants, including contributors, maintainers, and users, both online and offline. It encompasses interactions in the following areas: 98 | 99 | - GitHub discussions and issues 100 | - Community meetings and events 101 | - Social media platforms 102 | - Any other official channels related to CopilotKit 103 | 104 | --- 105 | 106 | ## 8. Acknowledgments 107 | 108 | This Code of Conduct draws inspiration from various open-source communities dedicated to inclusivity and respect. We appreciate their efforts in creating positive environments and strive to uphold similar standards in our community. Your feedback is always welcome to improve our Code of Conduct and practices. 109 | 110 | --- 111 | 112 | ## 9. Get Involved 113 | 114 | We invite you to contribute!! Whether through code, documentation, or community engagement, your participation is invaluable. For more information on how to contribute, please check our [contribution guide](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/main/CONTRIBUTING.md). 115 | 116 | Thank you for being part of the community! Together, we can build a safe, respectful, and innovative space for all. 117 | -------------------------------------------------------------------------------- /code/ui_interfaces/app_local_streamlit.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import cv2 3 | from keras.models import load_model # Assuming you have Keras installed 4 | import numpy as np 5 | import webbrowser 6 | import requests 7 | import re 8 | import os 9 | import time 10 | 11 | # Load model and labels 12 | model = load_model("code/model/fer2013_mini_XCEPTION.102-0.66.hdf5") 13 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 14 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 15 | 16 | # App config 17 | st.set_page_config(page_title="Emotion-Based Music Player", layout="centered") 18 | st.title("Facial Emotion Recognition App") 19 | st.write("This app detects your facial expression, displays the predicted emotion, and plays a suitable song.") 20 | 21 | #Footer 22 | st.markdown( 23 | """ 24 | 40 | 41 | 46 | """, 47 | unsafe_allow_html=True 48 | ) 49 | 50 | #st.title("🧠 Detected Emotion KPI") 51 | 52 | # App state 53 | if "last_emotion" not in st.session_state: 54 | st.session_state.last_emotion = "Neutral" 55 | if "show_video" not in st.session_state: 56 | st.session_state.show_video = False 57 | 58 | # Function to detect emotion 59 | def detect_emotion(frame): 60 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 61 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 62 | 63 | emotion = st.session_state.last_emotion 64 | for (x, y, w, h) in faces: 65 | roi_gray = gray[y:y+h, x:x+w] 66 | roi_gray = cv2.resize(roi_gray, (64, 64)) 67 | roi = roi_gray.astype("float") / 255.0 68 | roi = np.expand_dims(roi, axis=0) 69 | roi = np.expand_dims(roi, axis=-1) 70 | preds = model.predict(roi)[0] 71 | emotion = emotions[np.argmax(preds)] 72 | cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) 73 | cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) 74 | break 75 | return frame, emotion 76 | 77 | # ------------------------------ 78 | # 🎥 Live Camera Detection Mode 79 | # ------------------------------ 80 | if not st.session_state.show_video: 81 | col1, col2 = st.columns([1, 2]) 82 | 83 | with col1: 84 | st.subheader("📊 Current Emotion") 85 | emotion_placeholder = st.empty() 86 | 87 | cap = cv2.VideoCapture(0) 88 | capture = False 89 | 90 | with col2: 91 | st.subheader("📷 Live Feed") 92 | image_placeholder = st.empty() 93 | st.markdown("
", unsafe_allow_html=True) 94 | capture = st.button("🎵 Play Song on Captured Emotion") 95 | 96 | 97 | st.markdown("---") 98 | 99 | while cap.isOpened() and not capture: 100 | ret, frame = cap.read() 101 | if not ret: 102 | break 103 | 104 | frame = cv2.resize(frame, (320, 240)) 105 | frame, detected_emotion = detect_emotion(frame) 106 | st.session_state.last_emotion = detected_emotion 107 | 108 | # Update live KPI and video feed 109 | 110 | emotion_colors = { 111 | "Happy": "#DFF2BF", 112 | "Sad": "#FFBABA", 113 | "Angry": "#FFAAAA", 114 | "Surprise": "#FFFFBA", 115 | "Neutral": "#E0E0E0", 116 | "Fear": "#D0BAFF", 117 | "Disgust": "#B0FFBA" 118 | } 119 | 120 | bg_color = emotion_colors.get(detected_emotion, "#f9fff9") 121 | 122 | emotion_placeholder.markdown( 123 | f""" 124 |
137 | {detected_emotion} 138 |
139 | """, 140 | unsafe_allow_html=True 141 | ) 142 | 143 | 144 | image_placeholder.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), channels="RGB") 145 | 146 | if cv2.waitKey(1) & 0xFF == ord('q'): 147 | break 148 | time.sleep(0.1) # Limit refresh rate 149 | 150 | cap.release() 151 | st.session_state.show_video = True # switch mode 152 | 153 | # ------------------------------ 154 | # 🎧 Play Song For Detected Mood 155 | # ------------------------------ 156 | if st.session_state.show_video: 157 | st.markdown("## 🎧 Now Playing Music For Your Mood") 158 | st.markdown(f"**Detected Mood:** `{st.session_state.last_emotion}`") 159 | 160 | if st.button("🔁 Detect Emotions Again"): 161 | st.session_state.show_video = False 162 | st.rerun() 163 | 164 | search_query = f"https://www.youtube.com/results?search_query={st.session_state.last_emotion}+background+tunes" 165 | 166 | # to fetch the search results page 167 | response = requests.get(search_query) 168 | 169 | # HTTP status code 200 = request was successful 170 | if response.status_code != 200: 171 | print("Failed to retrieve YouTube search results. Status code:", response.status_code) 172 | 173 | html_content = response.text 174 | 175 | match = re.search(r'/watch\?v=([^\"]+)', html_content) 176 | if match: 177 | video_id = match.group(1) 178 | #video_url = f"https://www.youtube.com/watch?v={video_id}" 179 | video_url = f"https://www.youtube.com/watch?v={video_id.encode('utf-8').decode('unicode_escape')}" 180 | 181 | # printing the video URL for debugging purposes 182 | st.video(video_url) 183 | print("Opening YouTube video:", video_url) 184 | 185 | 186 | 187 | 188 | -------------------------------------------------------------------------------- /code/new_models/app_local_streamlit_with_input_button.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import cv2 3 | from keras.models import load_model 4 | import numpy as np 5 | import requests 6 | import re 7 | import time 8 | 9 | # Load model and labels 10 | model = load_model("code/model/fer2013_mini_XCEPTION.102-0.66.hdf5") 11 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 12 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 13 | 14 | # App config 15 | st.set_page_config(page_title="Emotion-Based Music Player", layout="centered") 16 | st.title("Facial Emotion Recognition App") 17 | st.write("This app detects your facial expression or lets you select one, then plays music based on your mood.") 18 | 19 | # Footer 20 | st.markdown( 21 | """ 22 | 38 | 39 | 44 | """, 45 | unsafe_allow_html=True 46 | ) 47 | 48 | # Session state setup 49 | if "last_emotion" not in st.session_state: 50 | st.session_state.last_emotion = "Neutral" 51 | if "mode" not in st.session_state: 52 | st.session_state.mode = "input" # can be 'input' or 'video' 53 | 54 | # Emotion detection function 55 | def detect_emotion(frame): 56 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 57 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 58 | 59 | emotion = st.session_state.last_emotion 60 | for (x, y, w, h) in faces: 61 | roi_gray = gray[y:y+h, x:x+w] 62 | roi_gray = cv2.resize(roi_gray, (64, 64)) 63 | roi = roi_gray.astype("float") / 255.0 64 | roi = np.expand_dims(roi, axis=0) 65 | roi = np.expand_dims(roi, axis=-1) 66 | preds = model.predict(roi)[0] 67 | emotion = emotions[np.argmax(preds)] 68 | cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) 69 | cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) 70 | break 71 | return frame, emotion 72 | 73 | # YouTube search function 74 | def search_youtube_video(emotion): 75 | search_query = f"https://www.youtube.com/results?search_query={emotion}+background+tunes" 76 | response = requests.get(search_query) 77 | if response.status_code != 200: 78 | return None 79 | match = re.search(r'/watch\?v=[\w-]+', response.text) 80 | if match: 81 | return f"https://www.youtube.com{match.group()}" 82 | return None 83 | 84 | # ----------------------- 85 | # 🎛️ Mode Switch Buttons 86 | # ----------------------- 87 | col1, col2 = st.columns([1, 3]) 88 | with col1: 89 | if st.session_state.mode == "input": 90 | if st.button("🎥 Switch to Video Mode"): 91 | st.session_state.mode = "video" 92 | st.rerun() 93 | else: 94 | if st.button("🎛️ Switch to Input Mode"): 95 | st.session_state.mode = "input" 96 | st.rerun() 97 | 98 | # ------------------------- 99 | # 🎚️ INPUT MODE (Default) 100 | # ------------------------- 101 | if st.session_state.mode == "input": 102 | input = st.selectbox("🎯 Select Emotion", emotions, index=0) 103 | if st.button("🔍 Search Music for Selected Emotion"): 104 | video_url = search_youtube_video(input) 105 | if video_url: 106 | st.video(video_url) 107 | else: 108 | st.warning("Unable to find a suitable video.") 109 | 110 | # ------------------------- 111 | # 📹 VIDEO DETECTION MODE 112 | # ------------------------- 113 | if st.session_state.mode == "video": 114 | col1, col2 = st.columns([1, 2]) 115 | 116 | with col1: 117 | st.subheader("📊 Current Emotion") 118 | emotion_placeholder = st.empty() 119 | 120 | cap = cv2.VideoCapture(0) 121 | capture = False 122 | 123 | with col2: 124 | st.subheader("📷 Live Feed") 125 | image_placeholder = st.empty() 126 | capture = st.button("🎵 Play Song on Captured Emotion") 127 | 128 | st.markdown("---") 129 | 130 | while cap.isOpened() and not capture: 131 | ret, frame = cap.read() 132 | if not ret: 133 | break 134 | 135 | frame = cv2.resize(frame, (320, 240)) 136 | frame, detected_emotion = detect_emotion(frame) 137 | st.session_state.last_emotion = detected_emotion 138 | 139 | emotion_colors = { 140 | "Happy": "#DFF2BF", 141 | "Sad": "#FFBABA", 142 | "Angry": "#FFAAAA", 143 | "Surprise": "#FFFFBA", 144 | "Neutral": "#E0E0E0", 145 | "Fear": "#D0BAFF", 146 | "Disgust": "#B0FFBA" 147 | } 148 | 149 | bg_color = emotion_colors.get(detected_emotion, "#f9fff9") 150 | emotion_placeholder.markdown( 151 | f""" 152 |
165 | {detected_emotion} 166 |
167 | """, 168 | unsafe_allow_html=True 169 | ) 170 | image_placeholder.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), channels="RGB") 171 | 172 | if cv2.waitKey(1) & 0xFF == ord('q'): 173 | break 174 | time.sleep(0.1) 175 | 176 | cap.release() 177 | 178 | # Detected & now play music 179 | if capture: 180 | st.markdown("## 🎧 Now Playing Music For Your Mood") 181 | st.markdown(f"**Detected Mood:** `{st.session_state.last_emotion}`") 182 | video_url = search_youtube_video(st.session_state.last_emotion) 183 | if video_url: 184 | st.video(video_url) 185 | else: 186 | st.warning("Could not find a video for this emotion.") 187 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Music-Recommendation-Using-Facial-Expressions 2 | 3 | Please give us a ⭐ and fork this repo to get started. Thank you 🙌🙌. 4 | 5 | ### Project Overview 6 | 7 | This project is a Python-based application that uses OpenCV for real-time facial detection and a pre-trained deep learning model (fer2013_mini_XCEPTION.102-0.66.hdf5) to recognize and analyze facial expressions. By capturing live video feed from the user’s webcam, it identifies the user’s emotions—such as happiness, sadness, anger, or neutrality—based on facial cues. 8 | 9 | Once the emotion is detected, the application constructs a YouTube search query tailored to the identified mood. Using the webbrowser module, the application automatically opens relevant YouTube search results in the user’s default browser, allowing them to access music that aligns with their current emotional state. The requests library further supports this functionality by enabling API interactions for a smoother YouTube search experience. 10 | 11 | This project combines elements of computer vision and deep learning with web integration to create a personalized and interactive music recommendation system. It demonstrates the potential of AI-powered emotion detection in real-world applications, where user experience can be enhanced through real-time responsiveness and intelligent content recommendations. 12 | 13 | ### Installation 14 | 15 | 1. **Clone the Repository:** 16 | ```bash 17 | git clone https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions.git 18 | ``` 19 | 2. **Install Dependencies:** 20 | ```bash 21 | pip install -r requirements.txt --quiet 22 | ``` 23 | 24 | ### How to Run & Interface Options 25 | 26 | This project supports three ways to interact with the emotion-based music recommendation system: 27 | 28 | **a)CLI Mode (Terminal)** 29 | - Run the core logic directly via terminal (no GUI). 30 | ```bash 31 | python code\ui_interfaces\cli_main.py 32 | ``` 33 | **b)Web Interface (Streamlit)** 34 | - Clean, browser-based UI using Streamlit. 35 | ```bash 36 | streamlit run code\ui_interfaces\app_local_streamlit.py 37 | ``` 38 | **c)Desktop App (PySimpleGUI)** 39 | - Native desktop GUI that runs as a standalone application. 40 | ```bash 41 | python code\ui_interfaces\app_PySimpleGUI.py 42 | ``` 43 | **Ignore - Deployed File** 44 | ``` 45 | streamlit run code\deployment\app.py 46 | ``` 47 | 48 | ### Core Tech Stack & Libraries 49 | 50 | - Python: As the primary programming language for its versatility and extensive libraries. 51 | - OpenCV: For real-time image and video processing, including facial detection. 52 | - TensorFlow and Keras: For building and training the deep learning model to recognize facial expressions. 53 | - fer2013_mini_XCEPTION.102-0.66.hdf5: A pre-trained model for facial emotion recognition. 54 | - webbrowser: To open web pages, specifically YouTube search results. 55 | - requests: For making HTTP requests to interact with web APIs (e.g., YouTube search). 56 | 57 | ### How it Works / Usage 58 | 59 | 1. **Facial Detection:** 60 | - The script captures a video feed from your webcam. 61 | - OpenCV is used to detect faces in each frame. 62 | 2. **Emotion Recognition:** 63 | - Detected faces are processed by the trained model. 64 | - The model predicts the dominant emotion (e.g., happy, sad, angry, neutral). 65 | - Script captures the emotion when we click on the screen, the clicked emotion is stored as current emotion 66 | 3. **Music Recommendation:** 67 | - Based on the predicted emotion, the script constructs a YouTube search query. 68 | - The `webbrowser` module opens the search results in your default browser. 69 | 70 | - Watch & Contribute to Community Demo Videos here: [Demo Videos](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/contribution/code/new_models/demo.md) 71 | - Example Demo Video: [Demo Video for CLI Interface](https://www.youtube.com/watch?v=Qj5yUBjSr7I) 72 | 73 | ### Main Features 74 | **1. Real-time Facial Detection and Emotion Recognition:** 75 | - Uses OpenCV to capture live video feed from the user's webcam. 76 | - Employs a pre-trained deep learning model (fer2013_mini_XCEPTION.102-0.66.hdf5) to accurately identify facial expressions. 77 | - Recognizes a range of emotions, including happiness, sadness, anger, and neutrality. 78 | 79 | **2. Emotion-Based YouTube Search and Recommendation:** 80 | - Utilizes the webbrowser module to automatically open relevant search results in the user's default browser. 81 | - Leverages the requests library to interact with YouTube's API for a more efficient search process. 82 | - Constructs a YouTube search query based on the detected emotion 83 | 84 | **3. Intuitive User Interface:** 85 | - Provides a simple and user-friendly interfaces to interact with the application. 86 | - Displays the detected emotion in real-time. 87 | - Presents a clear visual representation of the search results. 88 | 89 | ### Customization & Additional Considerations 90 | 91 | - **Model:** Experiment with different pre-trained models or fine-tune the existing one for more accurate emotion recognition. 92 | - **Search Queries:** Adjust the search query construction to refine the music recommendations. 93 | - **User Interface:** Consider creating a user-friendly GUI / Front end to enhance the experience. 94 | - **Privacy:** Be mindful of privacy concerns when capturing and processing facial data. 95 | - **Performance:** Optimize the code for real-time performance, especially on resource-constrained devices. 96 | - **Error Handling:** Implement robust error handling to gracefully handle exceptions. 97 | 98 | ### Contribution 99 | 100 | **NOTE: Please create PRs only to the contribution branch. All others will be automatically closed.** 101 | 102 | We welcome contributions to this project. Feel free to fork the repository, make improvements, and submit pull requests. 103 | We value all contributions, whether it's through code, documentation, creating demos or just spreading the word. 104 | **If you have introduced a new Computer Vision Library based code or new model or using new library (such as fer), Please submit final code in new_models folder.** 105 | Here are a few useful resources to help you get started: 106 | - For contributions, [Check out the contribution guide](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/main/CONTRIBUTING.md) . 107 | 108 | ### PR Template 109 | 110 | Please submit all PRs in this format: [PR Template](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/main/CONTRIBUTING.md#pr-template) 111 | 112 | ### License 113 | 114 | This project is licensed under the [MIT License](https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions/blob/main/LICENSE) 115 | 116 | ### Contact Us 117 | 118 | For any questions or issues, please contact us at shivampilot2004@gmail.com 119 | -------------------------------------------------------------------------------- /code/new_models/app_local_streamlit_ui_improved.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import cv2 3 | from keras.models import load_model 4 | import numpy as np 5 | import requests 6 | import re 7 | import time 8 | 9 | model = load_model("code/model/fer2013_mini_XCEPTION.102-0.66.hdf5") 10 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 11 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 12 | st.markdown(""" 13 | 58 | 59 | """, unsafe_allow_html=True) 60 | 61 | st.set_page_config(page_title=" Emotion-Based Music Player", layout="centered") 62 | 63 | st.markdown(""" 64 |

65 | Facial Emotion 66 | Recognition 67 | 📸 68 |

69 |

Your Mood, Your Music

70 |

Let AI read your mood & deliver a sound to soothe you!

71 | """, unsafe_allow_html=True) 72 | 73 | if "last_emotion" not in st.session_state: 74 | st.session_state.last_emotion = "Neutral" 75 | if "show_video" not in st.session_state: 76 | st.session_state.show_video = False 77 | 78 | def detect_emotion(frame): 79 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 80 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 81 | emotion = st.session_state.last_emotion 82 | for (x, y, w, h) in faces: 83 | roi_gray = gray[y:y+h, x:x+w] 84 | roi_gray = cv2.resize(roi_gray, (64, 64)) 85 | roi = roi_gray.astype("float") / 255.0 86 | roi = np.expand_dims(roi, axis=0) 87 | roi = np.expand_dims(roi, axis=-1) 88 | preds = model.predict(roi)[0] 89 | emotion = emotions[np.argmax(preds)] 90 | cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) 91 | cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) 92 | break 93 | return frame, emotion 94 | 95 | if not st.session_state.show_video: 96 | st.markdown('
', unsafe_allow_html=True) 97 | cap = cv2.VideoCapture(0) 98 | capture = False 99 | st.markdown(""" 100 |

Emotion Detection

101 |

Allow camera access and see your mood in real time.

102 | """, unsafe_allow_html=True) 103 | emotion_placeholder = st.empty() 104 | image_placeholder = st.empty() 105 | capture = st.button(" Play Song Based on Emotion") 106 | st.markdown("
", unsafe_allow_html=True) 107 | st.markdown("---") 108 | 109 | while cap.isOpened() and not capture: 110 | ret, frame = cap.read() 111 | if not ret: 112 | break 113 | frame = cv2.resize(frame, (340, 260)) 114 | frame, detected_emotion = detect_emotion(frame) 115 | st.session_state.last_emotion = detected_emotion 116 | 117 | BADGE_GRADIENTS = { 118 | "Happy": "linear-gradient(90deg, #f9d423, #ff4e50)", 119 | "Sad": "linear-gradient(90deg, #485563, #29323c)", 120 | "Angry": "linear-gradient(90deg, #ee0979, #ff6a00)", 121 | "Surprise": "linear-gradient(90deg, #a8ff78, #78ffd6)", 122 | "Neutral": "linear-gradient(90deg, #ece9e6, #ffffff)", 123 | "Fear": "linear-gradient(90deg, #ba5370, #f4e2d8)", 124 | "Disgust": "linear-gradient(90deg, #3ca55c, #b5ac49)" 125 | } 126 | badge_gradient = BADGE_GRADIENTS.get(detected_emotion, "linear-gradient(90deg,#ece9e6, #ffffff)") 127 | 128 | emotion_placeholder.markdown( 129 | f""" 130 |
131 | {detected_emotion} 132 |
133 | """, unsafe_allow_html=True 134 | ) 135 | 136 | image_placeholder.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), channels="RGB") 137 | if cv2.waitKey(1) & 0xFF == ord('q'): 138 | break 139 | time.sleep(0.13) 140 | cap.release() 141 | st.session_state.show_video = True 142 | 143 | if st.session_state.show_video: 144 | st.markdown('
', unsafe_allow_html=True) 145 | st.markdown(""" 146 |

147 | Curated Music For You 148 |

149 | """, unsafe_allow_html=True) 150 | st.markdown( 151 | f"

Detected Mood: {st.session_state.last_emotion}

", 152 | unsafe_allow_html=True) 153 | st.markdown('
', unsafe_allow_html=True) 154 | 155 | if st.button(" Detect Again"): 156 | st.session_state.show_video = False 157 | st.rerun() 158 | 159 | query = f"https://www.youtube.com/results?search_query={st.session_state.last_emotion}+relaxing+music" 160 | response = requests.get(query) 161 | if response.status_code == 200: 162 | match = re.search(r'/watch\?v=([^"]+)', response.text) 163 | if match: 164 | video_id = match.group(1) 165 | video_url = f"https://www.youtube.com/watch?v={video_id}" 166 | st.video(video_url) 167 | else: 168 | st.error("Unable to fetch music. Please check your connection.") 169 | else: 170 | st.error("Unable to fetch music. Please check your connection.") 171 | 172 | st.markdown(""" 173 |
174 | Project under GSSoC'25 176 | 177 |
178 | """, unsafe_allow_html=True) 179 | -------------------------------------------------------------------------------- /code/new_models/app_PySimpleGUI_withui_2.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import PySimpleGUI as sg 3 | from keras.models import load_model 4 | import numpy as np 5 | import webbrowser 6 | from threading import Thread, Event 7 | import requests 8 | import re 9 | import time 10 | 11 | model = load_model('code/model/fer2013_mini_XCEPTION.102-0.66.hdf5') 12 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 13 | DEFAULT_CAMERA_INDEX = 0 14 | 15 | def emoji_for(emotion): 16 | mapping = { 17 | 'Angry': '😡', 'Disgust': '🤢', 'Fear': '😨', 'Happy': '😄', 18 | 'Sad': '😢', 'Surprise': '😲', 'Neutral': '😐' 19 | } 20 | return mapping.get(emotion, '🙂') 21 | 22 | def emotion_fill_percentage(emotion): 23 | levels = { 24 | 'Happy': 100, 'Surprise': 80, 'Neutral': 50, 25 | 'Sad': 30, 'Fear': 20, 'Disgust': 20, 'Angry': 10 26 | } 27 | return levels.get(emotion, 50) 28 | 29 | def color_for_emotion(emotion): 30 | mapping = { 31 | 'Happy': ('#1E88E5', '#E3F2FD'), 32 | 'Sad': ('#1976D2', '#E3F2FD'), 33 | 'Angry': ('#D32F2F', '#FFEBEE'), 34 | 'Surprise': ('#0288D1', '#E1F5FE'), 35 | 'Fear': ('#455A64', '#CFD8DC'), 36 | 'Disgust': ('#00796B', '#E0F2F1'), 37 | 'Neutral': ('#5C6BC0', '#E8EAF6') 38 | } 39 | return mapping.get(emotion, ('#2196F3', '#E3F2FD')) 40 | 41 | def detect_emotion(frame, face_cascade): 42 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 43 | faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5) 44 | for (x, y, w, h) in faces: 45 | roi_gray = gray[y:y + h, x:x + w] 46 | roi_gray = cv2.resize(roi_gray, (64, 64), interpolation=cv2.INTER_AREA) 47 | roi = roi_gray / 255.0 48 | roi = np.reshape(roi, (1, 64, 64, 1)) 49 | prediction = model.predict(roi) 50 | emotion_label = emotions[np.argmax(prediction)] 51 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 120, 255), 2) 52 | cv2.putText(frame, emotion_label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 120, 255), 2) 53 | return frame, emotion_label 54 | return None, None # Return None when no faces are detected 55 | 56 | def play_song_with_emotion(emotion, window): 57 | search_query = f"https://www.youtube.com/results?search_query={emotion}+weekend+beats" 58 | response = requests.get(search_query) 59 | html_content = response.text 60 | match = re.search(r'/watch\?v=([^\"]+)', html_content) 61 | if match: 62 | video_id = match.group(1) 63 | video_url = f"https://www.youtube.com/watch?v={video_id}" 64 | webbrowser.open(video_url) 65 | 66 | def video_worker(window, stop_event, cam_idx): 67 | cap = cv2.VideoCapture(cam_idx) 68 | if not cap.isOpened(): 69 | window.write_event_value('-STATUS-', ('Cannot open camera', 'bad')) 70 | return 71 | 72 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 73 | prev_time = time.time() 74 | fps = 0.0 75 | 76 | while not stop_event.is_set(): 77 | ret, frame = cap.read() 78 | if not ret: 79 | window.write_event_value('-STATUS-', ('Camera read failed', 'bad')) 80 | break 81 | 82 | frame = cv2.resize(frame, (640, 480)) 83 | frame_with_faces, detection = detect_emotion(frame, face_cascade) 84 | frame_to_show = frame_with_faces if frame_with_faces is not None else frame 85 | 86 | now = time.time() 87 | fps = 1.0 / (now - prev_time) if now != prev_time else fps 88 | prev_time = now 89 | 90 | imgbytes = cv2.imencode('.png', frame_to_show)[1].tobytes() 91 | window.write_event_value( 92 | '-FRAME-', 93 | {'img': imgbytes, 'emotion': detection, 'fps': fps} 94 | ) 95 | 96 | cap.release() 97 | 98 | def build_layout(): 99 | sg.theme('LightBlue2') 100 | 101 | header = [sg.Text('🎵 Music-Recommendation-Using-Facial-Expressions🎵', 102 | font=('Helvetica', 18, 'bold'), 103 | text_color='#0D47A1', 104 | justification='center', 105 | expand_x=True, 106 | pad=((0,0),(10,10)))] 107 | 108 | camera_panel = [ 109 | [sg.Image(key='-IMAGE-', size=(640, 480))], 110 | [sg.Text('FPS: --', key='-FPS-', size=(12, 1), text_color='#01579B')] 111 | ] 112 | 113 | main_panel = [ 114 | [sg.Text('Detected Emotion', font=('Helvetica', 17, 'bold'), text_color='#1565C0')], 115 | [sg.Text('—', key='-EMOTION-TEXT-', font=('Helvetica', 32, 'bold'), text_color='#1A237E')], 116 | [sg.ProgressBar(100, orientation='h', size=(30, 15), key='-EMOTION-BAR-', bar_color=('#1565C0', 'white'))], 117 | [sg.Button( 118 | '📸 Capture', 119 | key='-CAPTURE-EMOTION-', 120 | button_color=('white', '#1E88E5'), 121 | mouseover_colors=('white', '#0D47A1'), 122 | size=(17, 1), 123 | font=('Helvetica', 14, 'bold') 124 | ), 125 | sg.Button( 126 | '▶ Play Song', 127 | key='-PLAY-', 128 | button_color=('white', '#009688'), 129 | mouseover_colors=('white', '#00796B'), 130 | size=(17, 1), 131 | font=('Helvetica', 14, 'bold') 132 | )], 133 | [sg.Text('', key='-RETURN-VALUE-', size=(45, 1), text_color='#0477BF', font=('Helvetica', 14, 'italic'))] 134 | ] 135 | 136 | footer = [sg.Text("GSSoC'25", justification='center', expand_x=True, text_color='#1565C0')] 137 | 138 | layout = [ 139 | header, 140 | [sg.Column(camera_panel, element_justification='center'), 141 | sg.VSeperator(color='#90A4AE'), 142 | sg.Column(main_panel, element_justification='center')], 143 | [sg.HorizontalSeparator(color='#90A4AE')], 144 | footer 145 | ] 146 | return layout 147 | 148 | def gui_thread(): 149 | window = sg.Window('Facial Expression Recognition', 150 | build_layout(), resizable=True, finalize=True) 151 | stop_event = Event() 152 | video_thread = Thread(target=video_worker, args=(window, stop_event, DEFAULT_CAMERA_INDEX), daemon=True) 153 | video_thread.start() 154 | 155 | current_emotion = None 156 | window_closed = False 157 | 158 | while True: 159 | event, values = window.read(timeout=50) 160 | 161 | if event in (sg.WINDOW_CLOSED, sg.WIN_CLOSED): 162 | stop_event.set() 163 | window_closed = True 164 | break 165 | 166 | elif event == '-FRAME-': 167 | if window_closed: 168 | continue 169 | data = values.get('-FRAME-') 170 | if not data: 171 | continue 172 | try: 173 | window['-IMAGE-'].update(data=data['img']) 174 | window['-FPS-'].update(f"FPS: {data['fps']:.1f}") 175 | except Exception: 176 | continue 177 | 178 | if data['emotion']: 179 | current_emotion = data['emotion'] 180 | window['-EMOTION-TEXT-'].update(f"{emoji_for(current_emotion)} {current_emotion}") 181 | window['-EMOTION-BAR-'].update( 182 | emotion_fill_percentage(current_emotion), 183 | bar_color=color_for_emotion(current_emotion) 184 | ) 185 | else: 186 | current_emotion = None 187 | window['-EMOTION-TEXT-'].update("😶 No face detected") 188 | window['-EMOTION-BAR-'].update(0) 189 | 190 | elif event == '-CAPTURE-EMOTION-': 191 | if current_emotion is not None: 192 | window['-RETURN-VALUE-'].update( 193 | value=f"Detected Emotion: {current_emotion}" 194 | ) 195 | else: 196 | window['-RETURN-VALUE-'].update('No emotion detected yet.') 197 | 198 | elif event == '-PLAY-': 199 | if not current_emotion: 200 | window['-RETURN-VALUE-'].update('Capture an emotion first.') 201 | else: 202 | url = play_song_with_emotion(current_emotion, window) 203 | window['-RETURN-VALUE-'].update(f'Playing: {url}' if url else 'Failed to fetch a YouTube link.') 204 | 205 | stop_event.set() 206 | video_thread.join(timeout=2.0) 207 | try: 208 | window.close() 209 | except Exception: 210 | pass 211 | 212 | if __name__ == '__main__': 213 | gui_thread() 214 | -------------------------------------------------------------------------------- /LEARN.md: -------------------------------------------------------------------------------- 1 | # LEARN.md - Onboarding Guide for Music Recommendation via Facial Emotion 2 | 3 | Welcome to the Music-Recommendation-Using-Facial-Expressions project! This guide reflects existing repository content and key GitHub sections to help you navigate and contribute easily. 4 | 5 | ## Contents 6 | 1. [Project Overview](#project-overview) 7 | 2. [Key Features](#key-features) 8 | 3. [Directory Layout](#directory-layout) 9 | 4. [Tech Stack](#tech-stack) 10 | 5. [Interface Options](#interface-options) 11 | 6. [Getting Started Locally](#getting-started-locally) 12 | 7. [Dependencies](#dependencies) 13 | 8. [How to Contribute](#how-to-contribute) 14 | 9. [How to Extend the Project](#how-to-extend-the-project) 15 | 10. [Debugging Tips](#debugging-tips) 16 | 11. [Additional Resources](#additional-resources) 17 | 12. [Next Steps](#next-steps) 18 | 13. [Code of Conduct](#code-of-conduct) 19 | 14. [License](#license) 20 | 21 | --- 22 | 23 | ## Project Overview 24 | This project is a Python-based application that uses OpenCV for real-time facial detection and a pre-trained deep learning model (fer2013_mini_XCEPTION.102-0.66.hdf5) to recognize and analyze facial expressions. By capturing live video feed from the user’s webcam, it identifies the user’s emotions—such as happiness, sadness, anger, or neutrality—based on facial cues. 25 | 26 | Once the emotion is detected, the application constructs a YouTube search query tailored to the identified mood. Using the webbrowser module, the application automatically opens relevant YouTube search results in the user’s default browser, allowing them to access music that aligns with their current emotional state. The requests library further supports this functionality by enabling API interactions for a smoother YouTube search experience. 27 | 28 | This project combines elements of computer vision and deep learning with web integration to create a personalized and interactive music recommendation system. It demonstrates the potential of AI-powered emotion detection in real-world applications, where user experience can be enhanced through real-time responsiveness and intelligent content recommendations. 29 | 30 | --- 31 | 32 | 33 | 40 | 47 | 48 |
34 | Screenshot 1 39 | 41 | Screenshot 2 46 |
49 | 50 | 51 | ## Key Features 52 | 1. **Real-time Facial Detection and Emotion Recognition** 53 | - Captures live video from the webcam using OpenCV. 54 | - Uses a pre-trained model (`fer2013_mini_XCEPTION.102-0.66.hdf5`) to identify emotions like happiness, sadness, anger, and neutrality. 55 | 56 | 2. **Emotion-Based YouTube Search and Recommendation** 57 | - Constructs YouTube search queries based on detected emotions. 58 | - Opens results automatically in the default browser via the `webbrowser` module. 59 | - Optionally leverages `requests` for API-based searches. 60 | 61 | 3. **Intuitive User Interface** 62 | - Provides simple CLI, Streamlit, and PySimpleGUI interfaces. 63 | - Displays detected emotion in real time. 64 | - Shows a clear visual of search results. 65 | 66 | 4. **Customization & Considerations** 67 | - Swap or fine-tune models for improved accuracy. 68 | - Tweak search query logic to refine recommendations. 69 | - Enhance the front-end GUI for a better user experience. 70 | - Be mindful of privacy when processing facial data. 71 | 72 | --- 73 | 74 | ## Directory Layout 75 | ``` 76 | Music-Recommendation-Using-Facial-Expressions/ 77 | ├── code/ 78 | │ ├── deployment/ # Streamlit deployment script (app.py) 79 | │ ├── model/ # Pre-trained model files 80 | │ ├── new_models/ # Additional or alternative models 81 | │ └── ui_interfaces/ # CLI, Streamlit, and PySimpleGUI scripts 82 | ├── CODE_OF_CONDUCT.md 83 | ├── CONTRIBUTING.md 84 | ├── LEARN.md # This guide 85 | ├── LICENSE 86 | ├── README.md 87 | ├── packages.txt 88 | ├── requirements.txt 89 | ├── runtime.txt 90 | ``` 91 | 92 | --- 93 | ## Tech Stack 94 | - **Python 3.8+** 95 | - **OpenCV** for webcam video capture & face detection 96 | - **TensorFlow & Keras** (Xception-based model) for emotion classification 97 | - **Streamlit** for web UI 98 | - **PySimpleGUI** for desktop GUI 99 | - **webbrowser** for launching YouTube searches 100 | --- 101 | ## Code Structure Deep Dive 102 | 103 | 104 | - **code/model/** 105 | Contains the pre-trained emotion-recognition model `fer2013_mini_XCEPTION.102-0.66.hdf5`. 106 | 107 | - **code/new_models/** 108 | Stores any additional or experimental `.hdf5` models here. 109 | 110 | - **code/ui_interfaces/cli_main.py** 111 | - Captures webcam frames via OpenCV 112 | - Converts to grayscale, detects faces (Haar cascades embedded in this script) 113 | - Loads the `.hdf5` model and predicts emotion 114 | - Maps emotion → YouTube keywords and opens the browser (`webbrowser.open()`) 115 | 116 | - **code/ui_interfaces/app_local_streamlit.py** 117 | - Initializes a Streamlit layout: video feed, emotion label, and “Play Music” button 118 | - On button click, runs the same detection + recommendation pipeline as in `cli_main.py` 119 | 120 | - **code/ui_interfaces/app_PySimpleGUI.py** 121 | - Builds a desktop GUI window with live camera feed 122 | - Displays current emotion label 123 | - “Play Music” button triggers YouTube search logic 124 | 125 | - **code/deployment/app.py** 126 | - Entry point for deploying the Streamlit app (e.g., to Heroku or Streamlit Cloud) 127 | - Wraps `app_local_streamlit.py` logic for production environments 128 | 129 | --- 130 | ## Interface Options 131 | - **CLI Mode:** 132 | ```bash 133 | python code/ui_interfaces/cli_main.py 134 | ``` 135 | - **Streamlit Web App:** 136 | ```bash 137 | streamlit run code/ui_interfaces/app_local_streamlit.py 138 | ``` 139 | - **PySimpleGUI Desktop App:** 140 | ```bash 141 | python code/ui_interfaces/app_PySimpleGUI.py 142 | ``` 143 | 144 | --- 145 | 146 | ## Getting Started Locally 147 | From **[README.md](README.md)**: 148 | ```bash 149 | git clone https://github.com/SGCODEX/Music-Recommendation-Using-Facial-Expressions.git 150 | cd Music-Recommendation-Using-Facial-Expressions 151 | pip install -r requirements.txt --quiet 152 | ``` 153 | --- 154 | 155 | ## Dependencies 156 | All Python dependencies are listed in **requirements.txt** and **packages.txt**. Install with: 157 | ```bash 158 | pip install -r requirements.txt --quiet 159 | ``` 160 | --- 161 | 162 | ## How to Contribute 163 | As detailed in **[CONTRIBUTING.md](CONTRIBUTING.md)**: 164 | 1. Fork the repository 165 | 2. Create a branch: 166 | ```bash 167 | git checkout -b feature/your-feature 168 | ``` 169 | 3. Make changes and commit with a clear message 170 | 4. Push to your fork and open a Pull Request against `main` 171 | 172 | --- 173 | 174 | ## How to Extend the Project 175 | Check the **Issues** tab on GitHub for open labels like `enhancement`, `feature`, or `good first issue` to find areas needing new functionality. Comment on an issue you’d like to tackle to let maintainers know, then follow the standard fork-and-pull workflow. 176 | 177 | When contributing, consider: 178 | 179 | => Code Quality: Follow existing patterns and conventions 180 | 181 | => Documentation: Update this LEARN.md file when adding features 182 | 183 | => Testing: Manually test all functionality 184 | 185 | => User Experience: Ensure changes enhance usability 186 | 187 | --- 188 | 189 | ## Debugging Tips 190 | - Confirm camera index and permissions if the webcam isn’t detected. 191 | - Ensure the model file exists under `code/model/`. 192 | - Verify that dependencies from **requirements.txt** are installed if imports fail. 193 | 194 | --- 195 | 196 | ## Additional Resources 197 | - **[README.md](README.md)**: Quick start and overview 198 | - **[CONTRIBUTING.md](CONTRIBUTING.md)**: Contribution guidelines 199 | - **GitHub Issues**: Track feature requests and bugs 200 | - **GitHub Actions**: Check the **Actions** tab for CI status 201 | --- 202 | 203 | ## Next Steps 204 | 205 | 1. **Explore the Code**: Open each file and read through the comments 206 | 2. **Make Changes**: Try modifying colors, text, or functionality 207 | 3. **Share Your Work**: Create a pull request with your improvements 208 | --- 209 | 210 | ## Code of Conduct 211 | Please review **[CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md)** for community guidelines and standards before contributing. 212 | 213 | --- 214 | 215 | ## License 216 | This project is licensed under the terms of the **MIT License** found in **LICENSE**. 217 | -------------------------------------------------------------------------------- /code/new_models/app_local_streamlit_error_h.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import cv2 3 | from keras.models import load_model 4 | import numpy as np 5 | import webbrowser 6 | import requests 7 | import re 8 | import os 9 | import time 10 | import logging 11 | 12 | # Set up logging to track errors for debugging 13 | logging.basicConfig(level=logging.INFO) 14 | logger = logging.getLogger(__name__) 15 | 16 | # App config: Set up the app's title and layout 17 | try: 18 | st.set_page_config(page_title="Emotion-Based Music Player", layout="centered") 19 | st.title("Facial Emotion Recognition App") 20 | st.write("This app detects your facial expression, displays the predicted emotion, and plays a suitable song.") 21 | except Exception as e: 22 | # Show error if app setup fails and stop the app 23 | st.error(f"Failed to initialize app configuration: {str(e)}") 24 | logger.error(f"App config error: {str(e)}") 25 | st.stop() 26 | 27 | # Load model and face detector: Try to load the emotion model and face detection file 28 | try: 29 | model = load_model("Music-Recommendation-Using-Facial-Expressions\\code\\model\\fer2013_mini_XCEPTION.102-0.66.hdf5") 30 | emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] 31 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') 32 | 33 | # Check if face detector loaded correctly 34 | if face_cascade.empty(): 35 | raise ValueError("Failed to load Haar cascade classifier") 36 | except Exception as e: 37 | # Show error if model or face detector fails to load and stop the app 38 | st.error(f"Error loading model or cascade classifier: {str(e)}") 39 | logger.error(f"Model loading error: {str(e)}") 40 | st.stop() 41 | 42 | # Footer: Add a footer with project details 43 | try: 44 | st.markdown( 45 | """ 46 | 62 | 63 | 68 | """, 69 | unsafe_allow_html=True 70 | ) 71 | except Exception as e: 72 | # Show warning if footer fails to display, but keep app running 73 | st.warning(f"Failed to render footer: {str(e)}") 74 | logger.warning(f"Footer rendering error: {str(e)}") 75 | 76 | # App state: Initialize variables to track last detected emotion and video state 77 | if "last_emotion" not in st.session_state: 78 | st.session_state.last_emotion = "Neutral" 79 | if "show_video" not in st.session_state: 80 | st.session_state.show_video = False 81 | 82 | # Function to detect emotion: Process a frame to find faces and predict emotion 83 | def detect_emotion(frame): 84 | try: 85 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 86 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 87 | 88 | emotion = st.session_state.last_emotion 89 | for (x, y, w, h) in faces: 90 | roi_gray = gray[y:y+h, x:x+w] 91 | roi_gray = cv2.resize(roi_gray, (64, 64)) 92 | roi = roi_gray.astype("float") / 255.0 93 | roi = np.expand_dims(roi, axis=0) 94 | roi = np.expand_dims(roi, axis=-1) 95 | preds = model.predict(roi)[0] 96 | emotion = emotions[np.argmax(preds)] 97 | cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) 98 | cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) 99 | break 100 | return frame, emotion 101 | except Exception as e: 102 | # If emotion detection fails, log error and return last known emotion 103 | logger.error(f"Emotion detection error: {str(e)}") 104 | return frame, st.session_state.last_emotion 105 | 106 | # Live Camera Detection Mode: Show live video feed and detect emotions 107 | if not st.session_state.show_video: 108 | try: 109 | col1, col2 = st.columns([1, 2]) 110 | 111 | with col1: 112 | st.subheader("📊 Current Emotion") 113 | emotion_placeholder = st.empty() 114 | 115 | # Camera handling: Try to access the webcam 116 | try: 117 | cap = cv2.VideoCapture(0) 118 | if not cap.isOpened(): 119 | raise ValueError("Unable to access webcam") 120 | except Exception as e: 121 | # Show error if webcam access fails and stop the app 122 | st.error(f"Camera initialization failed: {str(e)}") 123 | logger.error(f"Camera error: {str(e)}") 124 | st.stop() 125 | 126 | with col2: 127 | st.subheader("📷 Live Feed") 128 | image_placeholder = st.empty() 129 | st.markdown("
", unsafe_allow_html=True) 130 | capture = st.button("🎵 Play Song on Captured Emotion") 131 | 132 | st.markdown("---") 133 | 134 | # Frame processing: Process each frame from the camera 135 | while cap.isOpened() and not capture: 136 | ret, frame = cap.read() 137 | if not ret: 138 | # Show warning if frame capture fails and exit loop 139 | st.warning("Failed to capture frame from camera") 140 | logger.warning("Camera frame capture failed") 141 | break 142 | 143 | try: 144 | frame = cv2.resize(frame, (320, 240)) 145 | frame, detected_emotion = detect_emotion(frame) 146 | st.session_state.last_emotion = detected_emotion 147 | 148 | # Update live KPI and video feed 149 | emotion_colors = { 150 | "Happy": "#DFF2BF", 151 | "Sad": "#FFBABA", 152 | "Angry": "#FFAAAA", 153 | "Surprise": "#FFFFBA", 154 | "Neutral": "#E0E0E0", 155 | "Fear": "#D0BAFF", 156 | "Disgust": "#B0FFBA" 157 | } 158 | 159 | bg_color = emotion_colors.get(detected_emotion, "#f9fff9") 160 | 161 | emotion_placeholder.markdown( 162 | f""" 163 |
176 | {detected_emotion} 177 |
178 | """, 179 | unsafe_allow_html=True 180 | ) 181 | 182 | image_placeholder.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), channels="RGB") 183 | 184 | if cv2.waitKey(1) & 0xFF == ord('q'): 185 | break 186 | time.sleep(0.1) # Limit refresh rate to avoid overloading 187 | 188 | except Exception as e: 189 | # If frame processing fails, show warning and keep running 190 | st.warning(f"Error processing frame: {str(e)}") 191 | logger.warning(f"Frame processing error: {str(e)}") 192 | time.sleep(0.1) 193 | continue 194 | 195 | # Release camera when done 196 | cap.release() 197 | 198 | except Exception as e: 199 | # General stability: Handle any errors in camera loop and stop app gracefully 200 | st.error(f"Error in camera processing loop: {str(e)}") 201 | logger.error(f"Camera loop error: {str(e)}") 202 | if 'cap' in locals() and cap.isOpened(): 203 | cap.release() 204 | st.stop() 205 | 206 | st.session_state.show_video = True 207 | 208 | # Play Song For Detected Mood: Play a YouTube video based on the detected emotion 209 | if st.session_state.show_video: 210 | try: 211 | st.markdown("## 🎧 Now Playing Music For Your Mood") 212 | st.markdown(f"**Detected Mood:** `{st.session_state.last_emotion}`") 213 | 214 | if st.button("🔁 Detect Emotions Again"): 215 | st.session_state.show_video = False 216 | st.rerun() 217 | 218 | # YouTube integration: Search for a video matching the detected emotion 219 | search_query = f"https://www.youtube.com/results?search_query={st.session_state.last_emotion}+background+tunes" 220 | 221 | try: 222 | # Try to fetch YouTube search results with a timeout 223 | response = requests.get(search_query, timeout=10) 224 | if response.status_code != 200: 225 | raise Exception(f"HTTP {response.status_code}") 226 | 227 | html_content = response.text 228 | match = re.search(r'/watch\?v=([^\"]+)', html_content) 229 | if not match: 230 | raise Exception("No video found in search results") 231 | 232 | video_id = match.group(1) 233 | video_url = f"https://www.youtube.com/watch?v={video_id}" 234 | 235 | st.video(video_url) 236 | logger.info(f"Playing YouTube video: {video_url}") 237 | 238 | except Exception as e: 239 | # Non-critical error handling: Show error if video loading fails, but keep app running 240 | st.error(f"Failed to load music: {str(e)}") 241 | logger.error(f"YouTube video error: {str(e)}") 242 | st.markdown("Unable to play music. Please try again.") 243 | 244 | except Exception as e: 245 | # General stability: Handle any errors in music playback section 246 | st.error(f"Error in music playback section: {str(e)}") 247 | logger.error(f"Music playback error: {str(e)}") --------------------------------------------------------------------------------