└── FER.py /FER.py: -------------------------------------------------------------------------------- 1 | from google.colab import drive 2 | drive.mount('/content/drive') 3 | # Import necessary libraries 4 | import os 5 | import cv2 6 | import numpy as np 7 | import random 8 | import tensorflow as tf 9 | from tensorflow.keras import Sequential, layers 10 | from sklearn.model_selection import train_test_split 11 | from tensorflow.keras.preprocessing.image import ImageDataGenerator 12 | from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau 13 | 14 | # Dataset path 15 | datasetPath = "/content/drive/MyDrive/facial-expression-dataset/images/new_validation" 16 | 17 | # Image dimensions and lists to store images and labels 18 | imageSize = (48, 48) 19 | X_train = [] 20 | y_train = [] 21 | 22 | # Define emotion mapping based on folder names in the new dataset 23 | emotion_label_mapping = { 24 | 'angry': 0, 25 | 'disgust': 1, 26 | 'fear': 2, 27 | 'happy': 3, 28 | 'neutral': 4, 29 | 'sad': 5, 30 | 'surprise': 6 31 | } 32 | 33 | # Read the dataset, resize images, and convert to grayscale 34 | for emotion_folder in os.listdir(datasetPath + "/train"): 35 | emotion_path = os.path.join(datasetPath + "/train", emotion_folder) 36 | if emotion_folder not in emotion_label_mapping: 37 | print(f"Skipping folder '{emotion_folder}' - Not in emotion mapping.") 38 | continue 39 | 40 | if os.path.isdir(emotion_path): 41 | for image_file in os.listdir(emotion_path): 42 | img_path = os.path.join(emotion_path, image_file) 43 | try: 44 | img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) 45 | if img is not None: 46 | img_resized = cv2.resize(img, imageSize) 47 | X_train.append(img_resized) 48 | y_train.append(emotion_label_mapping[emotion_folder]) 49 | else: 50 | print(f"Error loading image: {img_path}") 51 | except Exception as e: 52 | print(f"Error processing image {img_path}: {e}") 53 | 54 | # Convert to numpy arrays 55 | X_train = np.array(X_train) 56 | y_train = np.array(y_train) 57 | 58 | # Normalize pixel values 59 | X_train = X_train.astype('float32') / 255.0 60 | 61 | # One-hot encode labels 62 | y_train = tf.keras.utils.to_categorical(y_train, num_classes=len(emotion_label_mapping)) 63 | 64 | # Split data into training and validation sets 65 | X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42) 66 | 67 | # Reshape X_train to include the channel dimension 68 | X_train = X_train.reshape(X_train.shape[0], imageSize[0], imageSize[1], 1) 69 | 70 | # Reshape X_val to include the channel dimension as well 71 | X_val = X_val.reshape(X_val.shape[0], imageSize[0], imageSize[1], 1) 72 | 73 | # Data augmentation 74 | data_gen = ImageDataGenerator( 75 | rotation_range=10, 76 | width_shift_range=0.1, 77 | height_shift_range=0.1, 78 | shear_range=0.1, 79 | zoom_range=0.1, 80 | horizontal_flip=True 81 | ) 82 | 83 | # Build a deeper CNN model with Batch Normalization 84 | model = Sequential([ 85 | layers.InputLayer(input_shape=(imageSize[0], imageSize[1], 1)), 86 | layers.Conv2D(64, (3, 3), activation='relu'), 87 | layers.BatchNormalization(), 88 | layers.MaxPooling2D((2, 2)), 89 | 90 | layers.Conv2D(128, (3, 3), activation='relu'), 91 | layers.BatchNormalization(), 92 | layers.MaxPooling2D((2, 2)), 93 | 94 | layers.Conv2D(256, (3, 3), activation='relu'), 95 | layers.BatchNormalization(), 96 | layers.MaxPooling2D((2, 2)), 97 | 98 | layers.Conv2D(512, (3, 3), activation='relu'), 99 | layers.BatchNormalization(), 100 | layers.MaxPooling2D((2, 2)), 101 | 102 | layers.Flatten(), 103 | layers.Dense(512, activation='relu'), 104 | layers.Dropout(0.5), 105 | 106 | layers.Dense(len(emotion_label_mapping), activation='softmax') 107 | ]) 108 | 109 | # Compile the model 110 | model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), 111 | loss='categorical_crossentropy', metrics=['accuracy']) 112 | 113 | # Callbacks for early stopping and learning rate reduction 114 | early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True) 115 | lr_reduction = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001) 116 | 117 | # Train the model with data augmentation 118 | history = model.fit(data_gen.flow(X_train, y_train, batch_size=64), 119 | epochs=50, 120 | validation_data=(X_val, y_val), 121 | callbacks=[early_stopping, lr_reduction]) 122 | 123 | # Evaluate the model 124 | loss, accuracy = model.evaluate(X_val, y_val) 125 | print(f'Validation Accuracy: {accuracy * 100:.2f}%') 126 | 127 | # Save the model 128 | model.save('/content/drive/MyDrive/facial_expression_model.keras') 129 | 130 | print("Model has been trained and saved successfully.") 131 | 132 | from sklearn.metrics import confusion_matrix 133 | 134 | # Assuming you have y_pred (predicted labels) and y_true (true labels) 135 | y_pred = model.predict(X_val) 136 | y_pred_classes = np.argmax(y_pred, axis=1) 137 | y_true_classes = np.argmax(y_val, axis=1) 138 | 139 | 140 | cm = confusion_matrix(y_true_classes, y_pred_classes) 141 | 142 | # Print the confusion matrix 143 | print("Confusion Matrix:") 144 | print(cm) 145 | 146 | # Optionally, you can visualize the confusion matrix using seaborn 147 | import seaborn as sns 148 | import matplotlib.pyplot as plt 149 | 150 | plt.figure(figsize=(10, 7)) 151 | sns.heatmap(cm, annot=True, fmt='d', cmap='Blues') 152 | plt.xlabel('Predicted Labels') 153 | plt.ylabel('True Labels') 154 | plt.title('Confusion Matrix') 155 | plt.show() --------------------------------------------------------------------------------