├── .gitignore ├── .vscode └── settings.json ├── CNN_m.h5 ├── alarm.wav ├── CNN__model.h5 ├── README.md ├── Train_model.py └── detection.py /.gitignore: -------------------------------------------------------------------------------- 1 | Train dataset 2 | Valid dataset -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.pythonPath": "C:\\Users\\swati\\anaconda3\\python.exe" 3 | } -------------------------------------------------------------------------------- /CNN_m.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhishek351/Driver-drowsiness-detection-CNN-Keras-OpenCV/HEAD/CNN_m.h5 -------------------------------------------------------------------------------- /alarm.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhishek351/Driver-drowsiness-detection-CNN-Keras-OpenCV/HEAD/alarm.wav -------------------------------------------------------------------------------- /CNN__model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhishek351/Driver-drowsiness-detection-CNN-Keras-OpenCV/HEAD/CNN__model.h5 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Driver-drowsiness-detection-using-Deep-Learning 2 | Deep learning, Tensorflow, OpenCV. 3 | 4 | Run the detection.py script with tensorflow as a backend, The script continuously monitors the the user's eye state and classifies whether the person is drowsy or not. 5 | Train_model.py is the script used to traing the CNN classifier model. 6 | 7 | ### The model was trained on a dataset with 11000 eye images, classified under labels such as open right eye, left eye and closed right eye, left eye. 8 | 9 | # Vedio Link 10 | https://drive.google.com/file/d/15_1B-n15fWxpHFIXsDOWWjkjlbLdQzTF/view?usp=sharing 11 | -------------------------------------------------------------------------------- /Train_model.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense,Dropout,Conv2D,MaxPooling2D,Flatten 3 | from keras.preprocessing.image import ImageDataGenerator 4 | 5 | 6 | train = ImageDataGenerator(rescale=1/255,shear_range=0.2,zoom_range=0.2,horizontal_flip=True) 7 | validation = ImageDataGenerator(rescale=1/255) 8 | 9 | train_image_gen = train.flow_from_directory('Train dataset',target_size=(100,100),batch_size=32,color_mode='grayscale',class_mode='binary') 10 | valid_image_gen = validation.flow_from_directory('Valid dataset',target_size=(100,100),batch_size=32,color_mode='grayscale',class_mode='binary') 11 | 12 | batch_size=32 13 | SPE= len(train_image_gen.classes)//batch_size 14 | VS = len(valid_image_gen.classes)//batch_size 15 | print(SPE,VS) 16 | 17 | print(train_image_gen.class_indices) 18 | 19 | model = Sequential() 20 | model.add(Conv2D(64,(3,3),input_shape=(100,100,1),activation="relu")) 21 | model.add(MaxPooling2D((2,2))) 22 | 23 | model.add(Conv2D(32,(3,3),activation="relu")) 24 | model.add(MaxPooling2D((2,2))) 25 | 26 | model.add(Conv2D(32,(3,3),activation="relu")) 27 | model.add(MaxPooling2D((2,2))) 28 | 29 | model.add(Conv2D(16,(3,3),activation="relu")) 30 | model.add(MaxPooling2D((2,2))) 31 | 32 | model.add(Conv2D(16,(3,3),activation="relu")) 33 | model.add(MaxPooling2D((2,2))) 34 | 35 | model.add(Flatten()) 36 | model.add(Dropout(0.5)) 37 | 38 | model.add(Dense(128,activation="relu")) 39 | model.add(Dropout(0.2)) 40 | 41 | model.add(Dense(64,activation="relu")) 42 | model.add(Dropout(0.2)) 43 | 44 | model.add(Dense(1,activation='sigmoid')) 45 | 46 | model.compile(optimizer="adam",loss="binary_crossentropy",metrics=["accuracy"]) 47 | 48 | results = model.fit_generator(train_image_gen,validation_data=valid_image_gen,epochs=10,steps_per_epoch=SPE,validation_steps=VS) 49 | 50 | model.save("CNN__model.h5") -------------------------------------------------------------------------------- /detection.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | from keras.models import load_model 4 | import numpy as np 5 | from pygame import mixer 6 | import time 7 | 8 | 9 | mixer.init() 10 | sound = mixer.Sound('alarm.wav') 11 | 12 | face = cv2.CascadeClassifier('haarcascade/haarcascade_frontalface_alt.xml') 13 | leye = cv2.CascadeClassifier('haarcascade/haarcascade_lefteye_2splits.xml') 14 | reye = cv2.CascadeClassifier('haarcascade/haarcascade_righteye_2splits.xml') 15 | 16 | eyes=cv2.CascadeClassifier('haarcascade\haarcascade_eye.xml') 17 | 18 | lbl=['Closed eyes','Open eyes'] 19 | 20 | model = load_model('CNN__model.h5') 21 | path = os.getcwd() 22 | cap = cv2.VideoCapture(0) #to access the camera 23 | font = cv2.FONT_HERSHEY_COMPLEX_SMALL 24 | count=0 25 | score=0 26 | thicc=2 27 | rpred=[99] 28 | lpred=[99] 29 | 30 | 31 | while(True): 32 | ret, frame = cap.read() 33 | height,width = frame.shape[:2] 34 | 35 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 36 | faces = face.detectMultiScale(gray,minNeighbors=5,scaleFactor=1.1,minSize=(25,25)) #Face detection 37 | eye=eyes.detectMultiScale(gray) #eye detection 38 | left_eye = leye.detectMultiScale(gray) #Left eye detection 39 | right_eye = reye.detectMultiScale(gray) #Right eye detection 40 | 41 | cv2.rectangle(frame, (0,height-50) , (200,height) , (0,0,0) ,thickness=cv2.FILLED) 42 | 43 | for (x,y,w,h) in faces: 44 | cv2.rectangle(frame, (x,y) , (x+w,y+h) , (150,150,150) , 1) 45 | 46 | for (x,y,w,h) in eye: 47 | cv2.rectangle(frame, (x,y),(x+w,y+h) ,(150,150,150) , 1) 48 | 49 | for (x,y,w,h) in right_eye: 50 | r_eye=frame[y:y+h,x:x+w] 51 | count=count+1 52 | r_eye = cv2.cvtColor(r_eye,cv2.COLOR_BGR2GRAY) 53 | r_eye = cv2.resize(r_eye,(100,100)) 54 | r_eye= r_eye/255 55 | r_eye= r_eye.reshape(100,100,-1) 56 | r_eye = np.expand_dims(r_eye,axis=0) 57 | rpred = model.predict_classes(r_eye) 58 | 59 | if(rpred[0]==1): 60 | lbl='Open' 61 | if(rpred[0]==0): 62 | lbl='Closed' 63 | break 64 | 65 | for (x,y,w,h) in left_eye: 66 | l_eye=frame[y:y+h,x:x+w] 67 | count=count+1 68 | l_eye = cv2.cvtColor(l_eye,cv2.COLOR_BGR2GRAY) 69 | l_eye = cv2.resize(l_eye,(100,100)) 70 | l_eye= l_eye/255 71 | l_eye=l_eye.reshape(100,100,-1) 72 | l_eye = np.expand_dims(l_eye,axis=0) 73 | lpred = model.predict_classes(l_eye) 74 | if(lpred[0]==1): 75 | lbl='Open' 76 | if(lpred[0]==0): 77 | lbl='Closed' 78 | break 79 | 80 | if(rpred[0]==0 and lpred[0]==0): 81 | score=score+1 82 | cv2.putText(frame,"Closed",(10,height-20), font, 1,(255,255,255),1,cv2.LINE_AA) 83 | # if(rpred[0]==1 or lpred[0]==1): 84 | else: 85 | score=score-1 86 | cv2.putText(frame,"Open",(10,height-20), font, 1,(255,255,255),1,cv2.LINE_AA) 87 | 88 | 89 | if(score<0): 90 | score=0 91 | cv2.putText(frame,'Score:'+str(score),(100,height-20), font, 1,(255,255,255),1,cv2.LINE_AA) 92 | if(score>10): 93 | #person is feeling sleepy so we beep the alarm 94 | cv2.imwrite(os.path.join(path,'image.jpg'),frame) 95 | try: 96 | sound.play() 97 | 98 | except: 99 | pass 100 | if(thicc<16): 101 | thicc= thicc+2 102 | else: 103 | thicc=thicc-2 104 | if(thicc<2): 105 | thicc=2 106 | cv2.rectangle(frame,(0,0),(width,height),(0,0,255),thicc) 107 | cv2.imshow('Driver drowsiness detection',frame) 108 | if cv2.waitKey(1) & 0xFF == ord('q'): 109 | break 110 | cap.release() 111 | cv2.destroyAllWindows() 112 | --------------------------------------------------------------------------------