├── README.md ├── emotion.jpg ├── labels.npy ├── model.h5 └── music.py /README.md: -------------------------------------------------------------------------------- 1 | # emotion-based-music 2 |

Explaination Video

3 | Emotion based music | ai | deep learning project | with code | ml project 4 | 5 | 6 |

Description

7 | Welcome to new project emotion based music built by using mediapipe and keras. also opencv and streamlit is used to create a webapp. for capturing the webcam in the browser i used streamlit-webrtc module. I explained all of the code in this video which is required to create a webapp for emotion based music recommender. 8 |

9 | In this video I used live emoji project code to create a model which could classify different emotions so I already explained the code for that which is over here 10 |
Data Collection script : https://youtu.be/ZxZSGRdTLtE 11 |
Data Training and Inference script : https://youtu.be/He_oZ-MnIrU 12 |
code for live emoji : https://github.com/Pawandeep-prog/liveEmoji 13 | 14 |

Connect with me

15 | If you have any queries regarding any of the topic I discussed in this video feel free to talk to e using below links:
16 | facebook : https://m.facebook.com/proogramminghub
17 | instagram : @programming_hut
18 | twitter : https://twitter.com/programming_hut
19 | github : https://github.com/Pawandeep-prog
20 | discord : https://discord.gg/G5Cunyg
21 | linkedin : https://www.linkedin.com/in/programminghut
22 | youtube : https://www.youtube.com/c/programminghutofficial
23 | -------------------------------------------------------------------------------- /emotion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pawandeep-prog/emotion-based-music/a76cc6ff01838c45d5ecda7159340385a0333e36/emotion.jpg -------------------------------------------------------------------------------- /labels.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pawandeep-prog/emotion-based-music/a76cc6ff01838c45d5ecda7159340385a0333e36/labels.npy -------------------------------------------------------------------------------- /model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pawandeep-prog/emotion-based-music/a76cc6ff01838c45d5ecda7159340385a0333e36/model.h5 -------------------------------------------------------------------------------- /music.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from streamlit_webrtc import webrtc_streamer 3 | import av 4 | import cv2 5 | import numpy as np 6 | import mediapipe as mp 7 | from keras.models import load_model 8 | import webbrowser 9 | 10 | model = load_model("model.h5") 11 | label = np.load("labels.npy") 12 | holistic = mp.solutions.holistic 13 | hands = mp.solutions.hands 14 | holis = holistic.Holistic() 15 | drawing = mp.solutions.drawing_utils 16 | 17 | st.header("Emotion Based Music Recommender") 18 | 19 | if "run" not in st.session_state: 20 | st.session_state["run"] = "true" 21 | 22 | try: 23 | emotion = np.load("emotion.npy")[0] 24 | except: 25 | emotion="" 26 | 27 | if not(emotion): 28 | st.session_state["run"] = "true" 29 | else: 30 | st.session_state["run"] = "false" 31 | 32 | class EmotionProcessor: 33 | def recv(self, frame): 34 | frm = frame.to_ndarray(format="bgr24") 35 | 36 | ############################## 37 | frm = cv2.flip(frm, 1) 38 | 39 | res = holis.process(cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)) 40 | 41 | lst = [] 42 | 43 | if res.face_landmarks: 44 | for i in res.face_landmarks.landmark: 45 | lst.append(i.x - res.face_landmarks.landmark[1].x) 46 | lst.append(i.y - res.face_landmarks.landmark[1].y) 47 | 48 | if res.left_hand_landmarks: 49 | for i in res.left_hand_landmarks.landmark: 50 | lst.append(i.x - res.left_hand_landmarks.landmark[8].x) 51 | lst.append(i.y - res.left_hand_landmarks.landmark[8].y) 52 | else: 53 | for i in range(42): 54 | lst.append(0.0) 55 | 56 | if res.right_hand_landmarks: 57 | for i in res.right_hand_landmarks.landmark: 58 | lst.append(i.x - res.right_hand_landmarks.landmark[8].x) 59 | lst.append(i.y - res.right_hand_landmarks.landmark[8].y) 60 | else: 61 | for i in range(42): 62 | lst.append(0.0) 63 | 64 | lst = np.array(lst).reshape(1,-1) 65 | 66 | pred = label[np.argmax(model.predict(lst))] 67 | 68 | print(pred) 69 | cv2.putText(frm, pred, (50,50),cv2.FONT_ITALIC, 1, (255,0,0),2) 70 | 71 | np.save("emotion.npy", np.array([pred])) 72 | 73 | 74 | drawing.draw_landmarks(frm, res.face_landmarks, holistic.FACEMESH_TESSELATION, 75 | landmark_drawing_spec=drawing.DrawingSpec(color=(0,0,255), thickness=-1, circle_radius=1), 76 | connection_drawing_spec=drawing.DrawingSpec(thickness=1)) 77 | drawing.draw_landmarks(frm, res.left_hand_landmarks, hands.HAND_CONNECTIONS) 78 | drawing.draw_landmarks(frm, res.right_hand_landmarks, hands.HAND_CONNECTIONS) 79 | 80 | 81 | ############################## 82 | 83 | return av.VideoFrame.from_ndarray(frm, format="bgr24") 84 | 85 | lang = st.text_input("Language") 86 | singer = st.text_input("singer") 87 | 88 | if lang and singer and st.session_state["run"] != "false": 89 | webrtc_streamer(key="key", desired_playing_state=True, 90 | video_processor_factory=EmotionProcessor) 91 | 92 | btn = st.button("Recommend me songs") 93 | 94 | if btn: 95 | if not(emotion): 96 | st.warning("Please let me capture your emotion first") 97 | st.session_state["run"] = "true" 98 | else: 99 | webbrowser.open(f"https://www.youtube.com/results?search_query={lang}+{emotion}+song+{singer}") 100 | np.save("emotion.npy", np.array([""])) 101 | st.session_state["run"] = "false" 102 | --------------------------------------------------------------------------------