├── Artificial Neural Network ├── NNHW2.m ├── NNminiProject1_a.m ├── NNminiProject1_b.m └── NNminiProject1_c.m ├── Facial Recognition - Fusions ├── Part A - Multi-classifier Fusion │ ├── Calc_FRR_FAR.m │ ├── Calc_Genuine_Impostor.m │ ├── GenuineCalc.m │ ├── ImpostorCalc.m │ ├── LDAModel.m │ ├── LDA_Main.m │ ├── Label_TransformGI.m │ ├── MCS_Main.m │ ├── ModeOne.m │ ├── PCA.m │ ├── PCA_Main.m │ ├── PCA_Process.m │ ├── imgPrep.m │ ├── imgPrepModeOne.m │ ├── label.m │ └── label_GI.m └── Part B - Multi-instance Fusion │ ├── Calc_FRR_FAR.m │ ├── GICalc_Instance_LDA.m │ ├── GICalc_Instance_PCA.m │ ├── GICalc_LDA.m │ ├── GenuineCalc_PCA.m │ ├── ImpostorCalc_PCA.m │ ├── LDA_Instance_GILabel.m │ ├── LDA_Instance_Main.m │ ├── LDA_Label.m │ ├── LDA_Main.m │ ├── LDA_Model.m │ ├── LDA_imgPrep.m │ ├── MultiInstance_Main.m │ ├── PCA_Instance_Main.m │ ├── PCA_Label_TransformGI.m │ ├── PCA_Main.m │ ├── PCA_Model.m │ ├── PCA_imgPrep.m │ └── PCA_train_test.m ├── Facial Recognition-Autoencoder ├── AutoEncoder OldSchool │ ├── AutoEncoder_PartA.m │ ├── OneHotLabel.m │ ├── autoEncoderModel.m │ ├── imageCategory.m │ └── setupData.m ├── AutoEncoder Part A │ ├── AutoEncoder_PartA.m │ ├── GenuineLabel.m │ ├── ImposterLabel.m │ ├── OneHotLabel.m │ ├── autoEncoderModel.m │ ├── genuineMSE.m │ ├── imageCategory.m │ ├── imposterMSE.m │ ├── setupData.m │ ├── testGenuineMSE.m │ ├── testImposterMSE.m │ ├── trainGenuineMSE.m │ └── trainImposterMSE.m └── AutoEncoder Part B │ ├── AutoEncoder_PartB.m │ ├── Committee │ ├── AutoEncoder_PartB.m │ ├── OneHotLabel.m │ ├── autoencoderModel.m │ ├── imageCategory.m │ └── setupData.m │ ├── OneHotLabel.m │ ├── autoencoderModel.m │ ├── imageCategory.m │ └── setupData.m ├── Facial Recognition-LDA+PCA ├── Calc_Genuine_Impostor.m ├── LDA_for_PCA.m ├── PCA_LDA_Combo.m ├── PCA_Process.m ├── imgPrep.m └── label.m ├── Facial Recognition-LDA ├── Calc_Genuine_Impostor.m ├── LDAModel.m ├── LDA_Main.m ├── imgPrep.m └── label_GI.m ├── Facial Recognition-PCA ├── Mode One │ ├── GenuineCalc.m │ ├── ImpostorCalc.m │ ├── Label_TransformGI.m │ ├── ModeOne.m │ ├── PCA.m │ └── imgPrepModeOne.m └── Mode Two │ ├── ImpostorCalc.m │ ├── Label_TransformGI.m │ ├── ModeTwo.m │ ├── PCA_M2.m │ └── imgPrepModeTwo.m ├── Fingerprint Spoof Detector-Naive Bayes ├── Bayes_Fingerprint_Spoof_Detector.m ├── Bayes_Fingerprint_Spoof_Detector_prior.m └── loadData.m ├── Gesture Recognition-NARX ├── Committee │ ├── 3CV │ │ ├── committee3CV.m │ │ ├── confusionROCcommittee.m │ │ └── main3CV.m │ └── 6CV │ │ ├── committee6CV.m │ │ ├── concatAll6CV.m │ │ ├── confusionROCcommittee6CV.m │ │ └── main6CV.m ├── Pre-Fold │ ├── EncapsulatePerson.m │ ├── combExcelData.m │ ├── fiveTrainedInit.m │ ├── inputHiddenOutputDelay.m │ ├── movementTarget.m │ ├── preFold.m │ ├── prefoldConfusionROC.m │ └── shrinkConfusion.m ├── Subject Independent - 6CV │ ├── SubjectIndependent_6CV.m │ ├── confusionROCSixF.m │ ├── serializePersonXZ.m │ ├── sixFoldCV.m │ ├── slashConfusion.m │ └── stackFivePeople.m └── Subject Specific - 3CV │ ├── Encapsulate.m │ ├── SubjectSpecific_3CV.m │ ├── combExcelData.m │ ├── confusionROC.m │ ├── movementTarget.m │ ├── roundData.m │ ├── shrinkConfusion.m │ └── threeFoldCV.m ├── Gesture Recognition-TDNN ├── Prefold │ ├── EncapsulatePerson.m │ ├── depthsNodesDecision.m │ ├── fiveTrainedInit.m │ ├── preFold.m │ └── prefoldConfusionROC.m ├── Subject Independent-6CV │ ├── SubjectIndependent_6CV.m │ ├── confusionROCSixF.m │ ├── serializePersonXZ.m │ ├── sixFoldCV.m │ ├── slashConfusion.m │ └── stackFivePeople.m ├── Subject Specific-3CV │ ├── Encapsulate.m │ ├── SubjectSpecific_3CV.m │ ├── confusionROC.m │ ├── roundData.m │ ├── shrinkConfusion.m │ └── threeFoldCV.m ├── combExcelData.m └── movementTarget.m ├── Portfolio Selection Final Project ├── 5 and 4 and 3 Star LV LB LG.xlsx ├── 5&4&3star.xlsx ├── 5star.xlsx ├── Final_Project_Portfolio_Selection.m ├── inforatio.m └── sharperatio.m ├── README.md └── Radial Basis Function (Exact & Regular) ├── MiniProject2.m ├── MiniProject2_Task.m ├── P.mat ├── T.mat └── parseData.m /Artificial Neural Network/NNHW2.m: -------------------------------------------------------------------------------- 1 | % Load dataset 2 | load bodyfat_dataset; 3 | 4 | % Variable X and T 5 | [X,T] = bodyfat_dataset; 6 | 7 | % Fitting Network 8 | hiddenLayerSize = 1; 9 | 10 | % Build the Net 11 | net = fitnet(hiddenLayerSize); 12 | 13 | % Number of Neurons in layer 14 | net.layers{1}.size = 10; 15 | 16 | % Data Vector excluded 50 testing 17 | allData = (1:202); 18 | 19 | % Indexes 20 | validate = datasample(1:202, 40); 21 | testData = 202:252; 22 | trainData = setdiff(allData,validate); 23 | 24 | % Train Input and Target 25 | TrainSetInput = X(trainData); 26 | TrainSetTarget = T(trainData); 27 | 28 | % Test Input and Target 29 | TestSetInput = X(testData); 30 | TestSetTarget = T(testData); 31 | 32 | % Divide Param Ratio 33 | net.divideParam.trainRatio = 0.6429; 34 | net.divideParam.valRatio = 0.1587; 35 | net.divideParam.testRatio = 0.1984; 36 | 37 | % Train the Network 38 | [net, tr] = train(net, TrainSetInput, TrainSetTarget); 39 | 40 | % Test the Network 41 | Testoutputs = net(TestSetInput); 42 | errors = gsubtract(Testoutputs, TestSetTarget); 43 | performance = perform(net, TestSetTarget, Testoutputs); 44 | 45 | %figure, plotconfusion(TestSetTarget,Testoutputs) -------------------------------------------------------------------------------- /Artificial Neural Network/NNminiProject1_a.m: -------------------------------------------------------------------------------- 1 | % --------------------------------------------------------------------------------- 2 | % Mini Project #1 - a : 10 Nodes in 1 Hidden Later. 3 | % Training (80%), Validation(20%) and Testing(0%). 4 | % Mean and variance of MSEs for training, validation 5 | % and testing. 6 | % --------------------------------------------------------------------------------- 7 | 8 | % Load dataset 9 | load bodyfat_dataset; 10 | 11 | % Variable X and T 12 | [X,T] = bodyfat_dataset; 13 | 14 | % Fitting Network 15 | hiddenLayerSize = 1; 16 | 17 | % Build the Net 18 | net = fitnet(hiddenLayerSize); 19 | 20 | % Number of Neurons in layer 21 | net.layers{1}.size = 10; 22 | 23 | % Number of Epochs 24 | net.trainParam.epochs = 10; 25 | 26 | % Data Vector excluded 50 testing 27 | allData = (1:202); 28 | 29 | % Indexes 30 | validate = datasample(1:202, 40); 31 | testData = 202:252; 32 | trainData = setdiff(allData,validate); 33 | 34 | % Train Input and Target 35 | TrainSetInput = X(trainData); 36 | TrainSetTarget = T(trainData); 37 | 38 | % Test Input and Target 39 | TestSetInput = X(testData); 40 | TestSetTarget = T(testData); 41 | 42 | % Validation 43 | ValidateInput = X(validate); 44 | ValidateTarget = T(validate); 45 | 46 | % Divide Param Ratio 47 | net.divideParam.trainRatio = 0.8; 48 | net.divideParam.valRatio = 0.2; 49 | net.divideParam.testRatio = 0; 50 | 51 | % Train the Network 52 | [net, tr] = train(net, TrainSetInput, TrainSetTarget); 53 | 54 | % Train Outputs 55 | TrainOutputs = net(TrainSetInput); 56 | 57 | % Test the Network 58 | TestOutputs = net(TestSetInput); 59 | 60 | % Validate Output 61 | ValidateOutputs = net(ValidateInput); 62 | 63 | % Errors and Performance 64 | TrainErrors = gsubtract(TrainOutputs, TrainSetTarget); 65 | TestErrors = gsubtract(TestOutputs, TestSetTarget); 66 | ValidateErrors = gsubtract(ValidateOutputs, ValidateTarget); 67 | performance = perform(net, TestSetTarget, TestOutputs); 68 | 69 | %figure, plotconfusion(TestSetTarget,Testoutputs) 70 | % MSEs 71 | TrainMSE = mse(net, TrainSetTarget, TrainOutputs, 'regularization',0.01); 72 | TestMSE = mse(net, TestSetTarget, TestOutputs, 'regularization',0.01); 73 | ValidationMSE = mse(net, ValidateTarget, ValidateOutputs, 'regularization',0.01); 74 | 75 | % Mean Errors 76 | meanTrainE = mean(TrainErrors); 77 | meanTestE = mean(TestErrors); 78 | meanValidateE = mean(ValidateErrors); 79 | 80 | % Variance Errors 81 | varTrainE = var(TrainErrors); 82 | varTestE = var(TestErrors); 83 | varValidateE = var(ValidateErrors); 84 | -------------------------------------------------------------------------------- /Artificial Neural Network/NNminiProject1_b.m: -------------------------------------------------------------------------------- 1 | % --------------------------------------------------------------------------------- 2 | % Mini Project #1 - b : 2 Nodes & 50 Nodes in 1 Hidden Later. 3 | % Training (80%), Validation(20%) and Testing(0%). 4 | % Mean and variance of MSEs for training, validation 5 | % and testing. 6 | % --------------------------------------------------------------------------------- 7 | 8 | % Load dataset 9 | load bodyfat_dataset; 10 | 11 | % Variable X and T 12 | [X,T] = bodyfat_dataset; 13 | 14 | % Fitting Network 15 | hiddenLayerSize = 1; 16 | 17 | % Build the Net 18 | net = fitnet(hiddenLayerSize); 19 | 20 | % Number of Neurons in layer 21 | net.layers{1}.size = 2; 22 | %net.layers{1}.size = 50; 23 | 24 | 25 | % Number of Epochs 26 | net.trainParam.epochs = 10; 27 | 28 | % Data Vector excluded 50 testing 29 | allData = (1:202); 30 | 31 | % Indexes 32 | validate = datasample(1:202, 40); 33 | testData = 202:252; 34 | trainData = setdiff(allData,validate); 35 | 36 | % Train Input and Target 37 | TrainSetInput = X(trainData); 38 | TrainSetTarget = T(trainData); 39 | 40 | % Test Input and Target 41 | TestSetInput = X(testData); 42 | TestSetTarget = T(testData); 43 | 44 | % Validation 45 | ValidateInput = X(validate); 46 | ValidateTarget = T(validate); 47 | 48 | % Divide Param Ratio 49 | net.divideParam.trainRatio = 0.6; 50 | net.divideParam.valRatio = 0.4; 51 | net.divideParam.testRatio = 0; 52 | 53 | % Train the Network 54 | [net, tr] = train(net, TrainSetInput, TrainSetTarget); 55 | 56 | % Train Outputs 57 | TrainOutputs = net(TrainSetInput); 58 | 59 | % Test the Network 60 | TestOutputs = net(TestSetInput); 61 | 62 | % Validate Output 63 | ValidateOutputs = net(ValidateInput); 64 | 65 | % Errors and Performance 66 | TrainErrors = gsubtract(TrainOutputs, TrainSetTarget); 67 | TestErrors = gsubtract(TestOutputs, TestSetTarget); 68 | ValidateErrors = gsubtract(ValidateOutputs, ValidateTarget); 69 | performance = perform(net, TestSetTarget, TestOutputs); 70 | 71 | %figure, plotconfusion(TestSetTarget,Testoutputs) 72 | % MSEs 73 | TrainMSE = mse(net, TrainSetTarget, TrainOutputs, 'regularization',0.01); 74 | TestMSE = mse(net, TestSetTarget, TestOutputs, 'regularization',0.01); 75 | ValidationMSE = mse(net, ValidateTarget, ValidateOutputs, 'regularization',0.01); 76 | 77 | % Mean Errors 78 | meanTrainE = mean(TrainErrors); 79 | meanTestE = mean(TestErrors); 80 | meanValidateE = mean(ValidateErrors); 81 | disp([meanTrainE, meanTestE, meanValidateE]); 82 | 83 | % Variance Errors 84 | varTrainE = var(TrainErrors); 85 | varTestE = var(TestErrors); 86 | varValidateE = var(ValidateErrors); 87 | disp([varTrainE, varTestE, varValidateE]); -------------------------------------------------------------------------------- /Artificial Neural Network/NNminiProject1_c.m: -------------------------------------------------------------------------------- 1 | % --------------------------------------------------------------------------------- 2 | % Mini Project #1 - c : Regularization 0.1 and 0.5. 3 | % --------------------------------------------------------------------------------- 4 | 5 | % Load dataset 6 | load bodyfat_dataset; 7 | 8 | % Variable X and T 9 | [X,T] = bodyfat_dataset; 10 | 11 | % Fitting Network 12 | hiddenLayerSize = 1; 13 | 14 | % Build the Net 15 | net = fitnet(hiddenLayerSize); 16 | 17 | % Number of Neurons in layer 18 | %net.layers{1}.size = 2; 19 | net.layers{1}.size = 10; 20 | 21 | 22 | % Number of Epochs 23 | net.trainParam.epochs = 10; 24 | 25 | % Regularization 26 | %net.performParam.regularization = 0.5; 27 | net.performParam.regularization = 0.1; 28 | 29 | % Data Vector excluded 50 testing 30 | allData = (1:202); 31 | 32 | % Indexes 33 | validate = datasample(1:202, 40); 34 | testData = 202:252; 35 | trainData = setdiff(allData,validate); 36 | 37 | % Train Input and Target 38 | TrainSetInput = X(trainData); 39 | TrainSetTarget = T(trainData); 40 | 41 | % Test Input and Target 42 | TestSetInput = X(testData); 43 | TestSetTarget = T(testData); 44 | 45 | % Validation 46 | ValidateInput = X(validate); 47 | ValidateTarget = T(validate); 48 | 49 | % Divide Param Ratio 50 | net.divideParam.trainRatio = 0.6429; 51 | net.divideParam.valRatio = 0.1587; 52 | net.divideParam.testRatio = 0.1984; 53 | 54 | % Train the Network 55 | [net, tr] = train(net, TrainSetInput, TrainSetTarget); 56 | 57 | % Train Outputs 58 | TrainOutputs = net(TrainSetInput); 59 | 60 | % Test the Network 61 | TestOutputs = net(TestSetInput); 62 | 63 | % Validate Output 64 | ValidateOutputs = net(ValidateInput); 65 | 66 | % Errors and Performance 67 | TrainErrors = gsubtract(TrainOutputs, TrainSetTarget); 68 | TestErrors = gsubtract(TestOutputs, TestSetTarget); 69 | ValidateErrors = gsubtract(ValidateOutputs, ValidateTarget); 70 | performance = perform(net, TestSetTarget, TestOutputs); 71 | 72 | %figure, plotconfusion(TestSetTarget,Testoutputs) 73 | % MSEs 74 | TrainMSE = mse(net, TrainSetTarget, TrainOutputs, 'regularization',0.01); 75 | TestMSE = mse(net, TestSetTarget, TestOutputs, 'regularization',0.01); 76 | ValidationMSE = mse(net, ValidateTarget, ValidateOutputs, 'regularization',0.01); 77 | 78 | % Mean Errors 79 | meanTrainE = mean(TrainErrors); 80 | meanTestE = mean(TestErrors); 81 | meanValidateE = mean(ValidateErrors); 82 | disp([meanTrainE, meanTestE, meanValidateE]); 83 | 84 | % Variance Errors 85 | varTrainE = var(TrainErrors); 86 | varTestE = var(TestErrors); 87 | varValidateE = var(ValidateErrors); 88 | disp([varTrainE, varTestE, varValidateE]); -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/Calc_FRR_FAR.m: -------------------------------------------------------------------------------- 1 | % Calculate FRR and FAR from ROC 2 | function [FRR, FAR] = Calc_FRR_FAR(roc) 3 | 4 | % GAR and FAR from ROC 5 | GAR = roc(1, :)'; 6 | 7 | % Get FRR and FAR 8 | FRR = round((GAR + 1)*10^4)/10^4; 9 | FAR = round((roc(2, :)')*10^4)/10^4; 10 | 11 | end 12 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/Calc_Genuine_Impostor.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine and Impostor Scores for each person 2 | function[GI] = Calc_Genuine_Impostor(trainImgD, testImgD, eigenVec) 3 | 4 | % Initialize to store Genuine and Impostor 5 | GI = []; 6 | 7 | % Loop through all 40 people 8 | for face = 1:40 9 | startImpostor = []; 10 | endImpostor = []; 11 | % Calculate Genuine Scores 12 | G_score = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, face)*eigenVec)', 'euclidean'); 13 | % Person 1 14 | if face == 1 15 | GI= [GI, G_score]; 16 | for ifaceOne = 2:40 17 | iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, ifaceOne)*eigenVec)', 'euclidean'); 18 | GI = [GI, iscore]; 19 | end 20 | % Person 2 - 40 21 | else 22 | % Calculate the first part of the Impostor 23 | for iface = 1 : (face-1) 24 | start_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface)*eigenVec)', 'euclidean'); 25 | startImpostor = [startImpostor, start_iscore]; 26 | end 27 | 28 | % Calculate the second part of the Impostor 29 | for iface2 = (face+1):40 30 | end_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface2)*eigenVec)', 'euclidean'); 31 | endImpostor = [endImpostor, end_iscore]; 32 | end 33 | smashTogether = [startImpostor, G_score, endImpostor]; 34 | GI = [GI; smashTogether]; 35 | end 36 | 37 | end 38 | 39 | 40 | end 41 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/GenuineCalc.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine Scores 2 | 3 | function [GenuineVec, featureMatrixAgg, testProjectAgg] = GenuineCalc 4 | 5 | % Initialize vector 6 | GenuineVec = []; 7 | featureMatrixAgg = []; 8 | testProjectAgg = []; 9 | 10 | % Load Images to set up train and test data by person 11 | % Calculate Genuine Scores first 12 | for faceNum = 1 : 40 13 | [ trainData, testData, train_Label, test_Label, train_GLabel, testG_Label ] = imgPrepModeOne(faceNum, 0, 0); 14 | 15 | % Mode #1 - PCA 16 | [featureMatrix, testProject, GenuineScore] = PCA(trainData, testData); 17 | 18 | % Store each person's feature matrix in a multidimensional array 19 | featureMatrixAgg = cat(3, featureMatrixAgg, featureMatrix); 20 | testProjectAgg = cat(3, testProjectAgg, testProject); 21 | 22 | % Genuine Scores 23 | GenuineVec = [GenuineVec; GenuineScore]; 24 | 25 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/ImpostorCalc.m: -------------------------------------------------------------------------------- 1 | % Calculate Impostor Scores from feature matrix and projected value 2 | function[ImpostorVec] = ImpostorCalc(featureMatrixAgg, testProjectAgg) 3 | 4 | % Initialize vector 5 | ImpostorVec = []; 6 | 7 | % Loop through feature matrices 8 | for FM_idx = 1 : size(featureMatrixAgg, 3) 9 | PersonImpostor = []; 10 | % Loop through projected matrices to calculate scores 11 | for TP_idx = 1 : size(testProjectAgg, 3) 12 | 13 | % If not itself, do the math! 14 | if FM_idx ~= TP_idx 15 | 16 | % Calc Euclidean Distance for impostor scores 17 | impostorScore = pdist2(featureMatrixAgg(:,:, TP_idx), testProjectAgg(:,:, FM_idx), 'euclidean'); 18 | PersonImpostor = [PersonImpostor, impostorScore]; 19 | end 20 | end 21 | 22 | % Store Impostsor Scores 23 | % Can't vertcat an empty vector with another value vector 24 | % So store the first Impostor directly in the Impostor 25 | if isempty(ImpostorVec) == 1 26 | ImpostorVec = PersonImpostor; 27 | 28 | % Any Impostors after 1st one are vertcat into the matrix 29 | else 30 | ImpostorVec = vertcat(ImpostorVec, PersonImpostor); 31 | %ImpostorVec = [ImpostorVec; PersonImpostor]; 32 | end 33 | 34 | end 35 | 36 | 37 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/LDAModel.m: -------------------------------------------------------------------------------- 1 | % Set up Train, Test Data and build LDA Model 2 | function[V, eigvector_sort, eigenVal, trainImg, testImg] = LDAModel() 3 | 4 | % Initialize 5 | trainImg = []; 6 | testImg = []; 7 | trainImg_mean_class = []; 8 | SB = zeros(10, 10); 9 | SW = zeros(10, 10); 10 | 11 | % Load in Images & Calculate Mean for Each Class 12 | for faceNum = 1:40 13 | [ train_img_vec, test_img_vec, train_Label, test_Label ] = imgPrep(faceNum); 14 | trainImg = cat(3, trainImg, cell2mat(train_img_vec')); 15 | testImg = cat(3, testImg, cell2mat(test_img_vec')); 16 | 17 | % Calculate the mean for each class 18 | trainImg_mean_class = [trainImg_mean_class; mean(trainImg(:, :, faceNum))]; 19 | 20 | end 21 | 22 | % Num of Observations for each class -- per image or per pixels ? 23 | % Average all mean classes 24 | mu = mean(trainImg_mean_class); 25 | 26 | % Center the data (data-mean) 27 | for idx = 1:40 28 | center = (trainImg(:, :, idx))-repmat(trainImg_mean_class(idx, :),size(trainImg(:, :, idx),1),1); 29 | %trainImg_center = cat(3, trainImg_center, center); 30 | 31 | % Calculate the within class variance (SW) 32 | within_class_var = center'*center; 33 | SW = SW + within_class_var; 34 | 35 | end 36 | 37 | % SW 38 | inv_SW=inv(SW); 39 | 40 | % Calculate between class variance (SB) 41 | for trainImgC = 1:40 42 | between_class_var = size(trainImg(:, :, trainImgC),1) * (trainImg_mean_class(trainImgC, :)-mu)'* (trainImg_mean_class(trainImgC,:)-mu); 43 | %SB = cat(3, SB, between_class_var); 44 | SB = SB +between_class_var; 45 | end 46 | 47 | % Calculate V 48 | V = inv_SW * SB; 49 | 50 | % Get Eigenvalue and Eigenvectors of V 51 | [eigenVec, eigenVal] = eig(V); 52 | 53 | eigval_diagnal = diag(eigenVal); 54 | [other, index] = sort(eigval_diagnal,'descend'); 55 | eigval_diagnal = eigval_diagnal(index); 56 | eigvector_sort = eigenVec(:, index); 57 | 58 | % Pick out eigen values based on threshold 59 | countNumEig = 0; 60 | for count = 1:size(eigval_diagnal,1) 61 | if(eigval_diagnal(count)>0) 62 | countNumEig = countNumEig + 1; 63 | end 64 | end 65 | 66 | % Filtered eigen vectors 67 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 68 | 69 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/LDA_Main.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % Facial Recognition : LDA 3 | % --------------------------------------------------------------------------------------------------- 4 | 5 | function[LDA_GI, LDA_Labels] = LDA_Main() 6 | 7 | % Label 8 | [LDA_Labels] = label_GI(); 9 | 10 | % Build LDA Model 11 | [V, eigenVec, eigenVal, trainImg, testImg] = LDAModel(); 12 | 13 | % Calculate Genuine and Impostor Scores 14 | [LDA_GI] = Calc_Genuine_Impostor(trainImg, testImg, V); 15 | 16 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/Label_TransformGI.m: -------------------------------------------------------------------------------- 1 | % Create Label and transform GI vector to 0-1 2 | 3 | function [testLabels, GI_convert]=Label_TransformGI(GI) 4 | 5 | % Labels for data 6 | Labels = zeros(size(GI,1),size(GI,1)); 7 | for rowCount = 0:10:size(GI,1)-10 8 | Labels(rowCount+1:rowCount + 10, rowCount+1:rowCount + 10) = ones(10, 10); 9 | end 10 | 11 | % Convert GI to 0 or 1 : Cut off Point 0.9 12 | GI_convert = zeros(size(GI,1),size(GI,1)); 13 | 14 | for row = 1:size(GI,1) 15 | for col = 1:size(GI,2) 16 | if GI(row,col) < 0.9 17 | GI_convert(row, col) = 1; 18 | end 19 | 20 | end 21 | end 22 | testLabels = [ones(1, 10), zeros(1, size(GI,1)-10 )]'; 23 | 24 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/MCS_Main.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------------------- 2 | % Final Project - Part A : Multi-classifier fusion ( LDA + PCA fusion ) 3 | % ------------------------------------------------------------------------------------- 4 | 5 | clc 6 | close all 7 | clear all 8 | 9 | 10 | % LDA Model 11 | [LDA_GI, LDA_Labels] = LDA_Main(); 12 | 13 | % PCA Model 14 | [PCA_GI, PCA_Labels] = PCA_Main(); 15 | 16 | % MSC Score Fusion 17 | % Average of LDA & PCA 18 | AVG_ScoreFusion_GI = (LDA_GI + PCA_GI)/2; 19 | % Minimum of LDA v.s. PCA 20 | MIN_ScoreFusion_GI = min(LDA_GI, PCA_GI); 21 | % Maximum of LDA v.s. PCA 22 | MAX_ScoreFusion_GI = max(LDA_GI, PCA_GI); 23 | 24 | % Plot ROC Curve 25 | [LDA_roc,LDA_EER,LDA_area,LDA_EERthr,LDA_ALLthr]= ezroc3(LDA_GI, LDA_Labels); 26 | [PCA_roc,PCA_EER,PCA_area,PCA_EERthr,PCA_ALLthr]= ezroc3(PCA_GI, PCA_Labels); 27 | [AVG_roc,AVG_EER,AVG_area,AVG_EERthr,AVG_ALLthr]= ezroc3(AVG_ScoreFusion_GI, PCA_Labels); 28 | [MIN_roc,MIN_EER,MIN_area,MIN_EERthr,MIN_ALLthr]= ezroc3(MIN_ScoreFusion_GI, PCA_Labels); 29 | [MAX_roc,MAX_EER,MAX_area,MAX_EERthr,MAX_ALLthr]= ezroc3(MAX_ScoreFusion_GI, PCA_Labels); 30 | 31 | % FRR + FAR for each method 32 | [LDA_FRR, LDA_FAR] = Calc_FRR_FAR(LDA_roc); 33 | [PCA_FRR, PCA_FAR] = Calc_FRR_FAR(PCA_roc); 34 | [AVG_FRR, AVG_FAR] = Calc_FRR_FAR(AVG_roc); 35 | [MIN_FRR, MIN_FAR] = Calc_FRR_FAR(MIN_roc); 36 | [MAX_FRR, MAX_FAR] = Calc_FRR_FAR(MAX_roc); 37 | 38 | % Plot FAR and FRR for all methods 39 | figure; 40 | plot(LDA_FAR, LDA_FRR, 'LineWidth', 2); 41 | title({'[ LDA & PCA Models ]'; 'Regular v.s. Score-Level Fusion'}); 42 | xlabel('FAR(FPR)'); 43 | ylabel('FRR(FNR)'); 44 | hold on; 45 | plot(PCA_FAR, PCA_FRR, 'LineWidth', 2); 46 | plot(AVG_FAR, AVG_FRR, 'LineWidth', 2); 47 | plot(MIN_FAR, MIN_FRR, 'LineWidth', 2); 48 | plot(MAX_FAR, MAX_FRR, 'LineWidth', 2); 49 | hold off; 50 | legend('LDA', 'PCA', 'AVG', 'MIN', 'MAX'); -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/ModeOne.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % PCA Mode 1 : 1-5 Images as Training and 6-10 Images as Testing 3 | % --------------------------------------------------------------------------------------------------- 4 | %clc 5 | %clear all 6 | %close all 7 | 8 | % Calculate Genuine Scores 9 | [GenuineVec, featureMatrixAgg, testProjectAgg] = GenuineCalc; 10 | 11 | % Calculate Impostor Scores 12 | [ImpostorVec] = ImpostorCalc(featureMatrixAgg, testProjectAgg); 13 | 14 | % Genuine + Impostor 15 | PCA_GI = [GenuineVec, ImpostorVec]; 16 | 17 | % Create Labels + Transform GI to 0-1 18 | [testLabels, GI_convert] = Label_TransformGI(PCA_GI); 19 | 20 | % Plot Graph 21 | [roc,EER,area,EERthr,ALLthr]= ezroc3(PCA_GI, testLabels, 2, 0, 1); 22 | 23 | % GAR and FAR from ROC 24 | GAR = roc(1, :)'; 25 | 26 | % Get FRR and FAR 27 | FRR = round((GAR + 1)*10^4)/10^4; 28 | FAR = round((roc(2, :)')*10^4)/10^4; 29 | 30 | % Obtain FRR values at 0%, 5%, 10% FAR 31 | case1 = []; 32 | case2 = []; 33 | case3 = []; 34 | 35 | for idx = 1:size(FAR,1) 36 | % FRR = 0% 37 | if (FAR(idx, 1) == 0) 38 | case1 = [case1,FRR(idx, 1)]; 39 | % FAR = 5% 40 | elseif (0.005 <= FAR(idx, 1) && FAR(idx, 1) <=0.0055) 41 | case2 = [case2,FRR(idx, 1)]; 42 | % FAR = 10% 43 | elseif (0.1 <= FAR(idx, 1) && FAR(idx, 1) <=0.1015) 44 | case3 = [case3,FRR(idx, 1)]; 45 | end 46 | end 47 | 48 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/PCA.m: -------------------------------------------------------------------------------- 1 | % Principal Component Analysis (PCA Model) 2 | 3 | function [featureMatrix, testProject, GenuineScore] = PCA(trainData, testData) 4 | 5 | % Unpack Cell to Vector 6 | trainData_V = cell2mat(trainData); 7 | testData_V = cell2mat(testData); 8 | 9 | % Get size of matrix 10 | [trainRow, trainCol] = size(trainData_V); 11 | [testRow, testCol] = size(testData_V); 12 | 13 | % Calculate the mean for each person's img data 14 | trainMean = mean((trainData_V)')'; 15 | testMean = mean((testData_V)')'; 16 | 17 | % Center the train data ( img - mean for each person's imgs ) 18 | trainCenter = trainData_V - repmat(trainMean,1,trainCol); 19 | 20 | % Calculate Covariance Matrix 21 | train_CoVar = trainCenter * trainCenter'; 22 | 23 | % Calculate Eignevalues and Eigenvectors 24 | [ eigvector,eignval ] = eig(train_CoVar); 25 | 26 | % Sort eigenvector in descending order by correspoding eigenvalue 27 | eigval_diagnal = diag(eignval); 28 | [other, index] = sort(eigval_diagnal,'descend'); 29 | eigval_diagnal = eigval_diagnal(index); 30 | eigvector_sort = eigvector(:, index); 31 | 32 | % Pick out eigen values based on threshold 33 | countNumEig = 0; 34 | for count = 1:size(eigval_diagnal,1) 35 | if(eigval_diagnal(count)>0) 36 | countNumEig = countNumEig + 1; 37 | end 38 | end 39 | 40 | % Filtered eigen vectors 41 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 42 | 43 | % Calculate Feature Matrix (Train Projected Space) 44 | featureMatrix = filteredEigVec' * trainCenter; 45 | 46 | % Center the test data ( img - mean for each person's imgs ) 47 | testCenter = testData_V - repmat(testMean,1,testCol); 48 | 49 | % Project testing data to the Train Projected Space 50 | testProject = filteredEigVec' * testCenter; 51 | 52 | % Genuine Scores 53 | GenuineScore = pdist2(featureMatrix, testProject, 'euclidean'); 54 | 55 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/PCA_Main.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % PCA Mode 1 : 1-5 Images as Training and 6-10 Images as Testing 3 | % --------------------------------------------------------------------------------------------------- 4 | 5 | function[PCA_GI, PCA_Labels] = PCA_Main() 6 | 7 | % Calculate Genuine Scores 8 | [GenuineVec, featureMatrixAgg, testProjectAgg] = GenuineCalc; 9 | 10 | % Calculate Impostor Scores 11 | [ImpostorVec] = ImpostorCalc(featureMatrixAgg, testProjectAgg); 12 | 13 | % Genuine + Impostor 14 | PCA_GI = [GenuineVec, ImpostorVec]; 15 | 16 | % Create Labels + Transform GI to 0-1 17 | [PCA_Labels, GI_convert] = Label_TransformGI(PCA_GI); 18 | 19 | 20 | end 21 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/PCA_Process.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine Scores 2 | 3 | function [featureMatrixAgg, testProjectAgg] = PCA_Process 4 | 5 | % Initialize vector 6 | featureMatrixAgg = []; 7 | testProjectAgg = []; 8 | trainImg_mean_class = []; 9 | testImg_mean_class = []; 10 | train_center_all = []; 11 | test_center_all = []; 12 | trainImg = []; 13 | testImg = []; 14 | 15 | % Load Images to set up train and test data by person 16 | for faceNum = 1:40 17 | [ train_img_vec, test_img_vec, train_Label, test_Label ] = imgPrep(faceNum); 18 | trainImg = cat(3, trainImg, cell2mat(train_img_vec)); 19 | testImg = cat(3, testImg, cell2mat(test_img_vec)); 20 | 21 | % Calculate the mean for each class 22 | trainImg_mean_class = [trainImg_mean_class; mean(trainImg(:, :, faceNum))]; 23 | testImg_mean_class = [testImg_mean_class; mean(testImg(:, :, faceNum))]; 24 | 25 | end 26 | 27 | % Average all mean classes 28 | mu_train = mean(trainImg_mean_class); 29 | mu_test = mean(testImg_mean_class); 30 | 31 | % Center the data (data-mean) 32 | for idx = 1:40 33 | train_center = (trainImg(:, :, idx))-repmat(trainImg_mean_class(idx, :),size(trainImg(:, :, idx),1),1); 34 | test_center = (testImg(:, :, idx))-repmat(testImg_mean_class(idx, :),size(testImg(:, :, idx),1),1); 35 | 36 | % Store centers for each class 37 | train_center_all = [train_center_all; train_center]; 38 | test_center_all = [test_center_all; test_center]; 39 | 40 | end 41 | 42 | % Calculate Covariance Matrix 43 | train_co_var = train_center_all'*train_center_all; 44 | test_co_var = test_center_all'*test_center_all; 45 | 46 | % Calculate Eignevalues and Eigenvectors 47 | [ eigvector,eignval ] = eig(train_co_var); 48 | 49 | % Sort eigenvector in descending order by correspoding eigenvalue 50 | eigval_diagnal = diag(eignval); 51 | [other, index] = sort(eigval_diagnal,'descend'); 52 | eigval_diagnal = eigval_diagnal(index); 53 | eigvector_sort = eigvector(:, index); 54 | 55 | % Pick out eigen values based on threshold 56 | countNumEig = 0; 57 | for count = 1:size(eigval_diagnal,1) 58 | if(eigval_diagnal(count)>0) 59 | countNumEig = countNumEig + 1; 60 | end 61 | end 62 | 63 | % Filtered eigen vectors 64 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 65 | 66 | % Calculate Feature Matrix (Train Projected Space) 67 | featureMatrix = train_co_var' * train_center_all'; 68 | 69 | % Project testing data to the Train Projected Space 70 | testProject = test_co_var' * test_center_all'; 71 | 72 | for i = 1:40 73 | % Store each person's feature matrix in a multidimensional array 74 | featureMatrixAgg = cat(3, featureMatrixAgg, featureMatrix(:, (i-1)*10+1:i*10)); 75 | testProjectAgg = cat(3, testProjectAgg, testProject(:, (i-1)*10+1:i*10)); 76 | end 77 | 78 | end 79 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/imgPrep.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label] = imgPrep(faceNum) 4 | % Read in image values for each person 5 | faceNumM = int2str(faceNum); 6 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 7 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 8 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 9 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 10 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 11 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 12 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 13 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 14 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 15 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 16 | 17 | % Resize image into 10 x 10 pixels 18 | resize1 = imresize(img1R,[10 10]); 19 | resize2 = imresize(img2R,[10 10]); 20 | resize3 = imresize(img3R,[10 10]); 21 | resize4= imresize(img4R,[10 10]); 22 | resize5 = imresize(img5R,[10 10]); 23 | resize6 = imresize(img6R,[10 10]); 24 | resize7 = imresize(img7R,[10 10]); 25 | resize8 = imresize(img8R,[10 10]); 26 | resize9 = imresize(img9R,[10 10]); 27 | resize10 = imresize(img10R,[10 10]); 28 | 29 | % Convert image pixels from unit8 to double 30 | img1 = im2double(resize1); 31 | img2 = im2double(resize2); 32 | img3 = im2double(resize3); 33 | img4= im2double(resize4); 34 | img5 = im2double(resize5); 35 | img6 = im2double(resize6); 36 | img7 = im2double(resize7); 37 | img8 = im2double(resize8); 38 | img9 = im2double(resize9); 39 | img10 = im2double(resize10); 40 | 41 | % Randomly pick 5 for training and 5 for testing 42 | data_all = (1:10); 43 | trainDataPick = randperm(10,5); 44 | testDataPick = setdiff(data_all, trainDataPick); 45 | 46 | % Grab the images for training picks and convert each image into cell array 47 | % representation 48 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 49 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} ]; 50 | 51 | % Train Label for each category 52 | train_Label(1, 1:5) = faceNum; 53 | %train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 54 | 55 | % Same for testing picks 56 | test_img_vec = [{eval(strcat('img', int2str(testDataPick(1))))} {eval(strcat('img', int2str(testDataPick(2))))} {eval(strcat('img', int2str(testDataPick(3))))} ... 57 | {eval(strcat('img', int2str(testDataPick(4))))} {eval(strcat('img', int2str(testDataPick(5))))}]; 58 | 59 | test_Label(1, 1:5) = faceNum; 60 | %test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 61 | 62 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/imgPrepModeOne.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label, train_Label2, test_Label2] = imgPrepModeOne(faceNum, sort_TrainCombo, sort_TestCombo) 4 | 5 | % Read in image values for each person 6 | faceNumM = int2str(faceNum); 7 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 8 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 9 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 10 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 11 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 12 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 13 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 14 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 15 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 16 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 17 | 18 | % Resize image into 10 x 10 pixels 19 | resize1 = imresize(img1R,[10 10]); 20 | resize2 = imresize(img2R,[10 10]); 21 | resize3 = imresize(img3R,[10 10]); 22 | resize4= imresize(img4R,[10 10]); 23 | resize5 = imresize(img5R,[10 10]); 24 | resize6 = imresize(img6R,[10 10]); 25 | resize7 = imresize(img7R,[10 10]); 26 | resize8 = imresize(img8R,[10 10]); 27 | resize9 = imresize(img9R,[10 10]); 28 | resize10 = imresize(img10R,[10 10]); 29 | 30 | % Convert image pixels from unit8 to double 31 | img1 = im2double(resize1); 32 | img2 = im2double(resize2); 33 | img3 = im2double(resize3); 34 | img4= im2double(resize4); 35 | img5 = im2double(resize5); 36 | img6 = im2double(resize6); 37 | img7 = im2double(resize7); 38 | img8 = im2double(resize8); 39 | img9 = im2double(resize9); 40 | img10 = im2double(resize10); 41 | 42 | % Randomly pick 5 for training and 5 for testing 43 | data_all = (1:10); 44 | trainDataPick = randperm(10,5); 45 | testDataPick = setdiff(data_all, trainDataPick); 46 | 47 | % Grab the images for training picks and convert each image into cell array 48 | % representation 49 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 50 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} ]; 51 | 52 | % Train Label for each category 53 | train_Label(1, 1:5) = faceNum; 54 | train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 55 | 56 | % Same for testing picks 57 | test_img_vec = [{eval(strcat('img', int2str(testDataPick(1))))} {eval(strcat('img', int2str(testDataPick(2))))} {eval(strcat('img', int2str(testDataPick(3))))} ... 58 | {eval(strcat('img', int2str(testDataPick(4))))} {eval(strcat('img', int2str(testDataPick(5))))}]; 59 | 60 | test_Label(1, 1:5) = faceNum; 61 | test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 62 | 63 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/label.m: -------------------------------------------------------------------------------- 1 | % Generate labels for Geunuine and Impostor 2 | function[label] = label() 3 | 4 | % Initialize label matrix as 1 5 | label = ones(400, 400); 6 | 7 | 8 | % Replace the label with 0 when it's the right class 9 | for face = 1:40 10 | label(10*(face-1) +1:10*face, 10*(face-1) +1:10*face) = 0; 11 | end 12 | 13 | 14 | end 15 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part A - Multi-classifier Fusion/label_GI.m: -------------------------------------------------------------------------------- 1 | % Generate labels for Geunuine and Impostor 2 | function[label] = label_GI() 3 | 4 | % Initialize label matrix as 1 5 | label = ones(400, 400); 6 | 7 | 8 | % Replace the label with 0 when it's the right class 9 | for face = 1:40 10 | label(10*(face-1) +1:10*face, 10*(face-1) +1:10*face) = 0; 11 | end 12 | 13 | 14 | end 15 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/Calc_FRR_FAR.m: -------------------------------------------------------------------------------- 1 | % Calculate FRR and FAR from ROC 2 | function [FRR, FAR] = Calc_FRR_FAR(roc) 3 | 4 | % GAR and FAR from ROC 5 | GAR = roc(1, :)'; 6 | 7 | % Get FRR and FAR 8 | FRR = round((GAR + 1)*10^4)/10^4; 9 | FAR = round((roc(2, :)')*10^4)/10^4; 10 | 11 | end 12 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/GICalc_Instance_LDA.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine and Impostor Scores for each person 2 | function[GI] = GICalc_Instance_LDA(trainImgD, testImgD, eigenVec) 3 | 4 | % Initialize to store Genuine and Impostor 5 | GI = []; 6 | 7 | % Loop through all 40 people 8 | for face = 1:40 9 | startImpostor = []; 10 | endImpostor = []; 11 | % Calculate Genuine Scores 12 | G_score = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, face)*eigenVec)', 'euclidean'); 13 | 14 | % Person 1 15 | if face == 1 16 | GI= [GI, mean(mean(G_score))]; 17 | for ifaceOne = 2:40 18 | iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, ifaceOne)*eigenVec)', 'euclidean'); 19 | GI = [GI, mean(mean(iscore))]; 20 | end 21 | 22 | % Person 2 - 40 23 | else 24 | % Calculate the first part of the Impostor 25 | for iface = 1 : (face-1) 26 | start_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface)*eigenVec)', 'euclidean'); 27 | startImpostor = [startImpostor, mean(mean(start_iscore))]; 28 | end 29 | 30 | % Calculate the second part of the Impostor 31 | for iface2 = (face+1):40 32 | end_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface2)*eigenVec)', 'euclidean'); 33 | endImpostor = [endImpostor, mean(mean(end_iscore))]; 34 | end 35 | smashTogether = [startImpostor, mean(mean(G_score)), endImpostor]; 36 | GI = [GI; smashTogether]; 37 | end 38 | 39 | end 40 | 41 | 42 | end 43 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/GICalc_Instance_PCA.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine and Impostor for PCA 2 | function [ GI ] = GICalc_Instance_PCA(featureMatrixAgg, testProjectAgg) 3 | 4 | % Initialize 5 | GI = []; 6 | 7 | 8 | % Loop through all 40 classes 9 | for person = 1:40 10 | tempGenuine = []; 11 | tempImpostor = []; 12 | starttempImpostor = []; 13 | endtempImpostor = []; 14 | startImpostor = []; 15 | endImpostor = []; 16 | 17 | % Calculate Genuine and Impostor Scores and take average of each images 18 | for imgCount = 1:5 19 | GenuineScore = pdist2(featureMatrixAgg(1:10,10*(imgCount-1)+1:10*imgCount, person), testProjectAgg(1:10,10*(imgCount-1)+1:10*imgCount, person)); 20 | tempGenuine = [tempGenuine, mean(mean(GenuineScore))]; 21 | end 22 | 23 | % Calculate Genuine and Impostor Scores 24 | % Stack up person 1 25 | if person == 1 26 | GI = [GI, mean(tempGenuine)]; 27 | for person2 = 2:40 28 | for mainImg = 1:5 29 | for imgCount2 = 1:5 30 | ImpostorScore = pdist2(featureMatrixAgg(1:10,10*(imgCount2-1)+1:10*imgCount2, person2), testProjectAgg(1:10,10*(mainImg-1)+1:10*mainImg, person)); 31 | tempImpostor = [tempImpostor, mean(mean(ImpostorScore))]; 32 | end 33 | end 34 | GI = [GI, mean(tempImpostor)]; 35 | end 36 | 37 | else 38 | % Do the same for Person 2 to 40 39 | for iface = 1 : (person-1) 40 | for mainImgL = 1:5 41 | for imgCount_start = 1:5 42 | ImpostorScore = pdist2(testProjectAgg(1:10,10*(imgCount_start-1)+1:10*imgCount_start, iface), testProjectAgg(1:10,10*(mainImgL-1)+1:10*mainImgL, person)); 43 | starttempImpostor = [starttempImpostor, mean(mean(ImpostorScore))]; 44 | end 45 | end 46 | startImpostor = [startImpostor, mean(starttempImpostor)]; 47 | end 48 | for iface2 = (person+1):40 49 | for mainImgL2 = 1:5 50 | for imgCount_end = 1:5 51 | ImpostorScore = pdist2(testProjectAgg(1:10,10*(imgCount_end-1)+1:10*imgCount_end, iface2), testProjectAgg(1:10,10*(mainImgL2-1)+1:10*mainImgL2, person)); 52 | endtempImpostor = [endtempImpostor, mean(mean(ImpostorScore))]; 53 | end 54 | end 55 | endImpostor = [endImpostor, mean(endtempImpostor)]; 56 | end 57 | stack = [startImpostor, mean(tempGenuine), endImpostor]; 58 | GI = [GI; stack]; 59 | end 60 | end 61 | 62 | end 63 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/GICalc_LDA.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine and Impostor Scores for each person for LDA 2 | function[GI] = GICalc_LDA(trainImgD, testImgD, eigenVec) 3 | 4 | % Initialize to store Genuine and Impostor 5 | GI = []; 6 | 7 | % Loop through all 40 people 8 | for face = 1:40 9 | startImpostor = []; 10 | endImpostor = []; 11 | % Calculate Genuine Scores 12 | G_score = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, face)*eigenVec)', 'euclidean'); 13 | % Person 1 14 | if face == 1 15 | GI= [GI, G_score]; 16 | for ifaceOne = 2:40 17 | iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, ifaceOne)*eigenVec)', 'euclidean'); 18 | GI = [GI, iscore]; 19 | end 20 | % Person 2 - 40 21 | else 22 | % Calculate the first part of the Impostor 23 | for iface = 1 : (face-1) 24 | start_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface)*eigenVec)', 'euclidean'); 25 | startImpostor = [startImpostor, start_iscore]; 26 | end 27 | 28 | % Calculate the second part of the Impostor 29 | for iface2 = (face+1):40 30 | end_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface2)*eigenVec)', 'euclidean'); 31 | endImpostor = [endImpostor, end_iscore]; 32 | end 33 | smashTogether = [startImpostor, G_score, endImpostor]; 34 | GI = [GI; smashTogether]; 35 | end 36 | 37 | end 38 | 39 | 40 | end 41 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/GenuineCalc_PCA.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine Scores 2 | 3 | function [GenuineVec, featureMatrixAgg, testProjectAgg] = GenuineCalc_PCA() 4 | 5 | % Initialize vector 6 | GenuineVec = []; 7 | featureMatrixAgg = []; 8 | testProjectAgg = []; 9 | 10 | % Load Images to set up train and test data by person 11 | % Calculate Genuine Scores first 12 | for faceNum = 1 : 40 13 | [ trainData, testData, train_Label, test_Label, train_GLabel, testG_Label ] = PCA_imgPrep(faceNum, 0, 0); 14 | 15 | % Mode #1 - PCA 16 | [featureMatrix, testProject, GenuineScore] = PCA_Model(trainData, testData); 17 | 18 | % Store each person's feature matrix in a multidimensional array 19 | featureMatrixAgg = cat(3, featureMatrixAgg, featureMatrix); 20 | testProjectAgg = cat(3, testProjectAgg, testProject); 21 | 22 | % Genuine Scores 23 | GenuineVec = [GenuineVec; GenuineScore]; 24 | 25 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/ImpostorCalc_PCA.m: -------------------------------------------------------------------------------- 1 | % Calculate Impostor Scores from feature matrix and projected value 2 | function[ImpostorVec] = ImpostorCalc_PCA(featureMatrixAgg, testProjectAgg) 3 | 4 | % Initialize vector 5 | ImpostorVec = []; 6 | 7 | % Loop through feature matrices 8 | for FM_idx = 1 : size(featureMatrixAgg, 3) 9 | PersonImpostor = []; 10 | % Loop through projected matrices to calculate scores 11 | for TP_idx = 1 : size(testProjectAgg, 3) 12 | 13 | % If not itself, do the math! 14 | if FM_idx ~= TP_idx 15 | 16 | % Calc Euclidean Distance for impostor scores 17 | impostorScore = pdist2(featureMatrixAgg(:,:, TP_idx), testProjectAgg(:,:, FM_idx), 'euclidean'); 18 | PersonImpostor = [PersonImpostor, impostorScore]; 19 | end 20 | end 21 | 22 | % Store Impostsor Scores 23 | % Can't vertcat an empty vector with another value vector 24 | % So store the first Impostor directly in the Impostor 25 | if isempty(ImpostorVec) == 1 26 | ImpostorVec = PersonImpostor; 27 | 28 | % Any Impostors after 1st one are vertcat into the matrix 29 | else 30 | ImpostorVec = vertcat(ImpostorVec, PersonImpostor); 31 | %ImpostorVec = [ImpostorVec; PersonImpostor]; 32 | end 33 | 34 | end 35 | 36 | 37 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/LDA_Instance_GILabel.m: -------------------------------------------------------------------------------- 1 | % Generate labels for Geunuine and Impostor 2 | function[label] = LDA_Instance_GILabel(GIsize) 3 | 4 | % Initialize label matrix as 1 5 | label = ones(GIsize, GIsize); 6 | 7 | % Replace the label with 0 when it's the right class 8 | for face = 1:40 9 | if GIsize == 400 10 | label(10*(face-1) +1:10*face, 10*(face-1) +1:10*face) = 0; 11 | else 12 | label(face, face) = 0; 13 | end 14 | 15 | 16 | end 17 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/LDA_Instance_Main.m: -------------------------------------------------------------------------------- 1 | % --------------------------------------------------------- 2 | % LDA Model for Multi-Instance 3 | % --------------------------------------------------------- 4 | 5 | function[LDA_GI_Instance, LDA_Labels_Instance] = LDA_Instance_Main() 6 | 7 | % Build LDA Model 8 | [V, eigenVec, eigenVal, trainImg, testImg] = LDA_Model(); 9 | 10 | % Calculate Genuine and Impostor Scores 11 | [LDA_GI_Instance] = GICalc_Instance_LDA(trainImg, testImg, V); 12 | 13 | % Label 14 | [LDA_Labels_Instance] = LDA_Instance_GILabel(size(LDA_GI_Instance, 1)); 15 | 16 | 17 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/LDA_Label.m: -------------------------------------------------------------------------------- 1 | % Generate labels for Geunuine and Impostor 2 | function[label] = LDA_Label(size) 3 | 4 | % Initialize label matrix as 1 5 | label = ones(size, size); 6 | 7 | 8 | % Replace the label with 0 when it's the right class 9 | for face = 1:40 10 | label(10*(face-1) +1:10*face, 10*(face-1) +1:10*face) = 0; 11 | end 12 | 13 | 14 | end 15 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/LDA_Main.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % Facial Recognition : LDA 3 | % --------------------------------------------------------------------------------------------------- 4 | 5 | function[LDA_GI, LDA_Labels] = LDA_Main() 6 | 7 | % Build LDA Model 8 | [V, eigenVec, eigenVal, trainImg, testImg] = LDA_Model(); 9 | 10 | % Calculate Genuine and Impostor Scores 11 | [LDA_GI] = GICalc_LDA(trainImg, testImg, V); 12 | 13 | % Label 14 | [LDA_Labels] = LDA_Label(size(LDA_GI, 1)); 15 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/LDA_Model.m: -------------------------------------------------------------------------------- 1 | % Set up Train, Test Data and build LDA Model 2 | function[V, eigvector_sort, eigenVal, trainImg, testImg] = LDA_Model() 3 | 4 | % Initialize 5 | trainImg = []; 6 | testImg = []; 7 | trainImg_mean_class = []; 8 | SB = zeros(10, 10); 9 | SW = zeros(10, 10); 10 | 11 | % Load in Images & Calculate Mean for Each Class 12 | for faceNum = 1:40 13 | [ train_img_vec, test_img_vec, train_Label, test_Label ] = LDA_imgPrep(faceNum); 14 | trainImg = cat(3, trainImg, cell2mat(train_img_vec')); 15 | testImg = cat(3, testImg, cell2mat(test_img_vec')); 16 | 17 | % Calculate the mean for each class 18 | trainImg_mean_class = [trainImg_mean_class; mean(trainImg(:, :, faceNum))]; 19 | 20 | end 21 | 22 | % Num of Observations for each class -- per image or per pixels ? 23 | % Average all mean classes 24 | mu = mean(trainImg_mean_class); 25 | 26 | % Center the data (data-mean) 27 | for idx = 1:40 28 | center = (trainImg(:, :, idx))-repmat(trainImg_mean_class(idx, :),size(trainImg(:, :, idx),1),1); 29 | %trainImg_center = cat(3, trainImg_center, center); 30 | 31 | % Calculate the within class variance (SW) 32 | within_class_var = center'*center; 33 | SW = SW + within_class_var; 34 | 35 | end 36 | 37 | % SW 38 | inv_SW=inv(SW); 39 | 40 | % Calculate between class variance (SB) 41 | for trainImgC = 1:40 42 | between_class_var = size(trainImg(:, :, trainImgC),1) * (trainImg_mean_class(trainImgC, :)-mu)'* (trainImg_mean_class(trainImgC,:)-mu); 43 | %SB = cat(3, SB, between_class_var); 44 | SB = SB +between_class_var; 45 | end 46 | 47 | % Calculate V 48 | V = inv_SW * SB; 49 | 50 | % Get Eigenvalue and Eigenvectors of V 51 | [eigenVec, eigenVal] = eig(V); 52 | 53 | eigval_diagnal = diag(eigenVal); 54 | [other, index] = sort(eigval_diagnal,'descend'); 55 | eigval_diagnal = eigval_diagnal(index); 56 | eigvector_sort = eigenVec(:, index); 57 | 58 | % Pick out eigen values based on threshold 59 | countNumEig = 0; 60 | for count = 1:size(eigval_diagnal,1) 61 | if(eigval_diagnal(count)>0) 62 | countNumEig = countNumEig + 1; 63 | end 64 | end 65 | 66 | % Filtered eigen vectors 67 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 68 | 69 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/LDA_imgPrep.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label] = LDA_imgPrep(faceNum) 4 | % Read in image values for each person 5 | faceNumM = int2str(faceNum); 6 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 7 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 8 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 9 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 10 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 11 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 12 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 13 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 14 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 15 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 16 | 17 | % Resize image into 10 x 10 pixels 18 | resize1 = imresize(img1R,[10 10]); 19 | resize2 = imresize(img2R,[10 10]); 20 | resize3 = imresize(img3R,[10 10]); 21 | resize4= imresize(img4R,[10 10]); 22 | resize5 = imresize(img5R,[10 10]); 23 | resize6 = imresize(img6R,[10 10]); 24 | resize7 = imresize(img7R,[10 10]); 25 | resize8 = imresize(img8R,[10 10]); 26 | resize9 = imresize(img9R,[10 10]); 27 | resize10 = imresize(img10R,[10 10]); 28 | 29 | % Convert image pixels from unit8 to double 30 | img1 = im2double(resize1); 31 | img2 = im2double(resize2); 32 | img3 = im2double(resize3); 33 | img4= im2double(resize4); 34 | img5 = im2double(resize5); 35 | img6 = im2double(resize6); 36 | img7 = im2double(resize7); 37 | img8 = im2double(resize8); 38 | img9 = im2double(resize9); 39 | img10 = im2double(resize10); 40 | 41 | % Randomly pick 5 for training and 5 for testing 42 | data_all = (1:10); 43 | trainDataPick = randperm(10,5); 44 | testDataPick = setdiff(data_all, trainDataPick); 45 | 46 | % Grab the images for training picks and convert each image into cell array 47 | % representation 48 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 49 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} ]; 50 | 51 | % Train Label for each category 52 | train_Label(1, 1:5) = faceNum; 53 | %train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 54 | 55 | % Same for testing picks 56 | test_img_vec = [{eval(strcat('img', int2str(testDataPick(1))))} {eval(strcat('img', int2str(testDataPick(2))))} {eval(strcat('img', int2str(testDataPick(3))))} ... 57 | {eval(strcat('img', int2str(testDataPick(4))))} {eval(strcat('img', int2str(testDataPick(5))))}]; 58 | 59 | test_Label(1, 1:5) = faceNum; 60 | %test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 61 | 62 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/MultiInstance_Main.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------------------------------------------ 2 | % Final Project - Part B : Multi-Instance fusion ( LDA + PCA fusion ) 3 | % ------------------------------------------------------------------------------------------------------------ 4 | 5 | clc 6 | close all 7 | clear all 8 | 9 | % Original PCA and LDA Model 10 | [LDA_GI, LDA_Labels] = LDA_Main(); 11 | [PCA_GI, PCA_Labels] = PCA_Main(); 12 | 13 | 14 | % PCA Model Multi-instance -- Something still off 15 | [PCA_GI_Instance, PCA_Labels_Instance] = PCA_Instance_Main(); 16 | 17 | % LDA Model Multi-instance 18 | [LDA_GI_Instance, LDA_Labels_Instance] = LDA_Instance_Main(); 19 | 20 | % Plot ROC Curve 21 | [LDA_roc, LDA_EER, LDA_area, LDA_EERthr, LDA_ALLthr]= ezroc3(LDA_GI, LDA_Labels); 22 | [PCA_roc, PCA_EER, PCA_area, PCA_EERthr, PCA_ALLthr]= ezroc3(PCA_GI, PCA_Labels); 23 | [LDA_Instance_roc, LDA_Instance_EER, LDA_Instance_area, LDA_Instance_EERthr, LDA_Instance_ALLthr]= ezroc3(LDA_GI_Instance, LDA_Labels_Instance); 24 | [PCA_Instance_roc, PCA_Instance_EER, PCA_Instance_area, PCA_Instance_EERthr, PCA_Instance_ALLthr]= ezroc3(PCA_GI_Instance, PCA_Labels_Instance); 25 | 26 | % FRR + FAR for each method 27 | [LDA_FRR, LDA_FAR] = Calc_FRR_FAR(LDA_roc); 28 | [PCA_FRR, PCA_FAR] = Calc_FRR_FAR(PCA_roc); 29 | [LDA_Instance_FRR, LDA_Instance_FAR] = Calc_FRR_FAR(LDA_Instance_roc); 30 | [PCA_Instance_FRR, PCA_Instance_FAR] = Calc_FRR_FAR(PCA_Instance_roc); 31 | 32 | 33 | % Plot FAR and FRR to compare Regular v.s. Multi-Instance Fusion 34 | figure; 35 | plot(LDA_FAR, LDA_FRR, 'LineWidth', 2); 36 | title({'[ LDA & PCA Models ]'; 'Regular v.s. Multi-Instance Fusion'}); 37 | xlabel('FAR(FPR)'); 38 | ylabel('FRR(FNR)'); 39 | hold on; 40 | plot(PCA_FAR, PCA_FRR, 'LineWidth', 2); 41 | plot(LDA_Instance_FAR, LDA_Instance_FRR, 'LineWidth', 2); 42 | plot(PCA_Instance_FAR, PCA_Instance_FRR, 'LineWidth', 2); 43 | 44 | hold off; 45 | legend('LDA', 'PCA', 'LDA Instance', 'PCA Instance'); 46 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/PCA_Instance_Main.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % PCA Model for Multi-Instance 3 | % --------------------------------------------------------------------------------------------------- 4 | 5 | function[PCA_GI_Instance, PCA_Labels_Instance] = PCA_Instance_Main() 6 | 7 | % Train and test projected values 8 | [featureMatrixAgg, testProjectAgg] = PCA_train_test(); 9 | 10 | % Calculate Genuine Scores & Impostor Scores for PCA Instance 11 | [ PCA_GI_Instance ] = GICalc_Instance_PCA(featureMatrixAgg, testProjectAgg); 12 | 13 | % PCA Instance Labels 14 | % Initialize label matrix as 1 15 | PCA_Labels_Instance = ones(size(PCA_GI_Instance,1), size(PCA_GI_Instance,1)); 16 | 17 | % Replace the label with 0 when it's the right class 18 | for face = 1:40 19 | PCA_Labels_Instance(face, face) = 0; 20 | end 21 | 22 | end 23 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/PCA_Label_TransformGI.m: -------------------------------------------------------------------------------- 1 | % Create Label and transform GI vector to 0-1 2 | 3 | function [testLabels, GI_convert]=PCA_Label_TransformGI(GI) 4 | 5 | % Labels for data 6 | Labels = zeros(size(GI,1),size(GI,1)); 7 | for rowCount = 0:10:size(GI,1)-10 8 | Labels(rowCount+1:rowCount + 10, rowCount+1:rowCount + 10) = ones(10, 10); 9 | end 10 | 11 | % Convert GI to 0 or 1 : Cut off Point 0.9 12 | GI_convert = zeros(size(GI,1),size(GI,1)); 13 | 14 | for row = 1:size(GI,1) 15 | for col = 1:size(GI,2) 16 | if GI(row,col) < 0.9 17 | GI_convert(row, col) = 1; 18 | end 19 | 20 | end 21 | end 22 | testLabels = [ones(1, 10), zeros(1, size(GI,1)-10 )]'; 23 | 24 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/PCA_Main.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % PCA Mode 1 : 1-5 Images as Training and 6-10 Images as Testing 3 | % --------------------------------------------------------------------------------------------------- 4 | 5 | function[PCA_GI, PCA_Labels] = PCA_Main() 6 | 7 | % Calculate Genuine Scores 8 | [GenuineVec, featureMatrixAgg, testProjectAgg] = GenuineCalc_PCA(); 9 | 10 | % Calculate Impostor Scores 11 | [ImpostorVec] = ImpostorCalc_PCA(featureMatrixAgg, testProjectAgg); 12 | 13 | % Genuine + Impostor 14 | PCA_GI = [GenuineVec, ImpostorVec]; 15 | 16 | % Create Labels + Transform GI to 0-1 17 | [PCA_Labels, GI_convert] = PCA_Label_TransformGI(PCA_GI); 18 | 19 | 20 | end 21 | -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/PCA_Model.m: -------------------------------------------------------------------------------- 1 | % Principal Component Analysis (PCA Model) 2 | 3 | function [featureMatrix, testProject, GenuineScore] = PCA_Model(trainData, testData) 4 | 5 | % Unpack Cell to Vector 6 | trainData_V = cell2mat(trainData); 7 | testData_V = cell2mat(testData); 8 | 9 | % Get size of matrix 10 | [trainRow, trainCol] = size(trainData_V); 11 | [testRow, testCol] = size(testData_V); 12 | 13 | % Calculate the mean for each person's img data 14 | trainMean = mean((trainData_V)')'; 15 | testMean = mean((testData_V)')'; 16 | 17 | % Center the train data ( img - mean for each person's imgs ) 18 | trainCenter = trainData_V - repmat(trainMean,1,trainCol); 19 | 20 | % Calculate Covariance Matrix 21 | train_CoVar = trainCenter * trainCenter'; 22 | 23 | % Calculate Eignevalues and Eigenvectors 24 | [ eigvector,eignval ] = eig(train_CoVar); 25 | 26 | % Sort eigenvector in descending order by correspoding eigenvalue 27 | eigval_diagnal = diag(eignval); 28 | [other, index] = sort(eigval_diagnal,'descend'); 29 | eigval_diagnal = eigval_diagnal(index); 30 | eigvector_sort = eigvector(:, index); 31 | 32 | % Pick out eigen values based on threshold 33 | countNumEig = 0; 34 | for count = 1:size(eigval_diagnal,1) 35 | if(eigval_diagnal(count)>0) 36 | countNumEig = countNumEig + 1; 37 | end 38 | end 39 | 40 | % Filtered eigen vectors 41 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 42 | 43 | % Calculate Feature Matrix (Train Projected Space) 44 | featureMatrix = filteredEigVec' * trainCenter; 45 | 46 | % Center the test data ( img - mean for each person's imgs ) 47 | testCenter = testData_V - repmat(testMean,1,testCol); 48 | 49 | % Project testing data to the Train Projected Space 50 | testProject = filteredEigVec' * testCenter; 51 | 52 | % Genuine Scores 53 | GenuineScore = pdist2(featureMatrix, testProject, 'euclidean'); 54 | 55 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/PCA_imgPrep.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label, train_Label2, test_Label2] = PCA_imgPrep(faceNum, sort_TrainCombo, sort_TestCombo) 4 | 5 | % Read in image values for each person 6 | faceNumM = int2str(faceNum); 7 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 8 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 9 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 10 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 11 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 12 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 13 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 14 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 15 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 16 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 17 | 18 | % Resize image into 10 x 10 pixels 19 | resize1 = imresize(img1R,[10 10]); 20 | resize2 = imresize(img2R,[10 10]); 21 | resize3 = imresize(img3R,[10 10]); 22 | resize4= imresize(img4R,[10 10]); 23 | resize5 = imresize(img5R,[10 10]); 24 | resize6 = imresize(img6R,[10 10]); 25 | resize7 = imresize(img7R,[10 10]); 26 | resize8 = imresize(img8R,[10 10]); 27 | resize9 = imresize(img9R,[10 10]); 28 | resize10 = imresize(img10R,[10 10]); 29 | 30 | % Convert image pixels from unit8 to double 31 | img1 = im2double(resize1); 32 | img2 = im2double(resize2); 33 | img3 = im2double(resize3); 34 | img4= im2double(resize4); 35 | img5 = im2double(resize5); 36 | img6 = im2double(resize6); 37 | img7 = im2double(resize7); 38 | img8 = im2double(resize8); 39 | img9 = im2double(resize9); 40 | img10 = im2double(resize10); 41 | 42 | % Randomly pick 5 for training and 5 for testing 43 | data_all = (1:10); 44 | trainDataPick = randperm(10,5); 45 | testDataPick = setdiff(data_all, trainDataPick); 46 | 47 | % Grab the images for training picks and convert each image into cell array 48 | % representation 49 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 50 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} ]; 51 | 52 | % Train Label for each category 53 | train_Label(1, 1:5) = faceNum; 54 | train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 55 | 56 | % Same for testing picks 57 | test_img_vec = [{eval(strcat('img', int2str(testDataPick(1))))} {eval(strcat('img', int2str(testDataPick(2))))} {eval(strcat('img', int2str(testDataPick(3))))} ... 58 | {eval(strcat('img', int2str(testDataPick(4))))} {eval(strcat('img', int2str(testDataPick(5))))}]; 59 | 60 | test_Label(1, 1:5) = faceNum; 61 | test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 62 | 63 | end -------------------------------------------------------------------------------- /Facial Recognition - Fusions/Part B - Multi-instance Fusion/PCA_train_test.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine Scores 2 | 3 | function [featureMatrixAgg, testProjectAgg] = PCA_train_test() 4 | 5 | % Initialize vector 6 | GenuineVec = []; 7 | featureMatrixAgg = []; 8 | testProjectAgg = []; 9 | 10 | % Load Images to set up train and test data by person 11 | % Calculate Genuine Scores first 12 | for faceNum = 1 : 40 13 | [ trainData, testData, train_Label, test_Label, train_GLabel, testG_Label ] = PCA_imgPrep(faceNum, 0, 0); 14 | 15 | % Mode #1 - PCA 16 | [featureMatrix, testProject, GenuineScore] = PCA_Model(trainData, testData); 17 | 18 | % Store each person's feature matrix in a multidimensional array 19 | featureMatrixAgg = cat(3, featureMatrixAgg, featureMatrix); 20 | testProjectAgg = cat(3, testProjectAgg, testProject); 21 | 22 | 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder OldSchool/AutoEncoder_PartA.m: -------------------------------------------------------------------------------- 1 | % -------------------------------------------------------------------------------------------- 2 | % Final Project - Facial Recognition : AutoEncoder 3 | % Part A 4 | % -------------------------------------------------------------------------------------------- 5 | clc 6 | clear all 7 | 8 | % Setup data for all categories and labeled them as One Hot 9 | [train_Shuffle_D, train_T, test_Shuffle_D, test_T] = OneHotLabel(); 10 | 11 | % Autoencoder 12 | [autoenc1, feature1, autoenc2, feature2] = autoEncoderModel(train_Shuffle_D, train_T, test_Shuffle_D, test_T); -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder OldSchool/OneHotLabel.m: -------------------------------------------------------------------------------- 1 | % Convert Labels into One Hot Encoding Labels 2 | function [train_Shuffle_D, train_T, test_Shuffle_D, test_T] = OneHotLabel() 3 | 4 | % Call function setupData for train and test shuffle data and labels 5 | [train_Shuffle_D, train_Shuffle_T, test_Shuffle_D, test_Shuffle_T] = setupData; 6 | 7 | % Initialize Targets as zeros 8 | train_T = zeros(40, 240); 9 | test_T = zeros(40, 160); 10 | 11 | % Mark 1 with the corresponding index for train and test labels 12 | for trainLoop = 1:length(train_Shuffle_T) 13 | train_T(train_Shuffle_T(trainLoop), trainLoop) = 1; 14 | end 15 | 16 | for testLoop = 1:length(test_Shuffle_T) 17 | test_T(test_Shuffle_T(testLoop), testLoop) = 1; 18 | end 19 | 20 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder OldSchool/autoEncoderModel.m: -------------------------------------------------------------------------------- 1 | function [autoenc1, feature1, autoenc2, feature2] = autoEncoderModel(train_Shuffle_D, train_T, test_Shuffle_D, test_T) 2 | 3 | % Autoencoder 4 | hiddenSize1 = 100; 5 | 6 | % Train First AutoEncoder 7 | autoenc1 = trainAutoencoder(train_Shuffle_D,hiddenSize1, ... 8 | 'MaxEpochs',400, ... 9 | 'L2WeightRegularization',0.004, ... 10 | 'SparsityRegularization',4, ... 11 | 'SparsityProportion',0.15, ... 12 | 'ScaleData', false); 13 | 14 | % Visualize weights of the first AutoEncoder 15 | figure(1) 16 | plotWeights(autoenc1); 17 | 18 | % Generate Features from the first AutoEncoder 19 | feature1 = encode(autoenc1, train_Shuffle_D); 20 | 21 | % Train Second AutoEncoder 22 | hiddenSize2 = 50; 23 | autoenc2 = trainAutoencoder(feature1,hiddenSize2, ... 24 | 'MaxEpochs',200, ... 25 | 'L2WeightRegularization',0.002, ... 26 | 'SparsityRegularization',1, ... 27 | 'SparsityProportion',0.3, ... 28 | 'ScaleData', false); 29 | 30 | % Generate Second Set of Features by passing the previous feature 31 | feature2 = encode(autoenc2,feature1); 32 | 33 | % Train Final Layer : Softmax 34 | softmax = trainSoftmaxLayer(feature2, train_T,'MaxEpochs',400); 35 | 36 | % Stacked Nerual Network 37 | StackedDeepNet = stack(autoenc1, autoenc2, softmax); 38 | 39 | % Reshape pixels in each image in the testing data 40 | imageWidth = 112; 41 | imageHeight = 92; 42 | inputSize = imageWidth*imageHeight; 43 | 44 | % Convert the test images into vectors and store in a matrix 45 | convertTest = zeros(inputSize,numel(test_Shuffle_D)); 46 | for i = 1:numel(test_Shuffle_D) 47 | convertTest(:,i) = test_Shuffle_D{i}(:); 48 | end 49 | 50 | % Confusion Matrix 51 | predictTest = StackedDeepNet(convertTest); 52 | plotconfusion(test_T,predictTest); 53 | set(findobj(gca,'type','text'),'fontsize',4.5) 54 | 55 | % Fine Tune the Model 56 | xTrain = zeros(inputSize,numel(train_Shuffle_D)); 57 | for i = 1:numel(train_Shuffle_D) 58 | xTrain(:,i) = train_Shuffle_D{i}(:); 59 | end 60 | 61 | % Creat the deep net 62 | deepnet = train(StackedDeepNet,xTrain,train_T); 63 | 64 | predictTest_DN = deepnet(convertTest); 65 | plotconfusion(test_T,predictTest_DN); 66 | set(findobj(gca,'type','text'),'fontsize',4.5) 67 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder OldSchool/imageCategory.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, train_Label, test_img_vec, test_Label ] = imageCategory(faceNum) 4 | 5 | % Read in image values for each person 6 | faceNumM = int2str(faceNum); 7 | faceNumM = int2str(1); 8 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 9 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 10 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 11 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 12 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 13 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 14 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 15 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 16 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 17 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 18 | 19 | img1 = im2double(img1R); 20 | img2 = im2double(img2R); 21 | img3 = im2double(img3R); 22 | img4= im2double(img4R); 23 | img5 = im2double(img5R); 24 | img6 = im2double(img6R); 25 | img7 = im2double(img7R); 26 | img8 = im2double(img8R); 27 | img9 = im2double(img9R); 28 | img10 = im2double(img10R); 29 | 30 | % Randomly pick 6 for training and 4 for testing 31 | data_all = (1:10); 32 | trainDataPick = randperm(10,6); 33 | testDataPick = setdiff(data_all, trainDataPick); 34 | 35 | % Grab the images for training picks and convert each image into cell array 36 | % representation 37 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 38 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} {eval(strcat('img', int2str(trainDataPick(6))))}]; 39 | 40 | % Train Label for each category 41 | train_Label(1, 1:6) = faceNum; 42 | 43 | % Same for testing picks 44 | test_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 45 | {eval(strcat('img', int2str(trainDataPick(4))))}]; 46 | test_Label(1, 1:4) = faceNum; 47 | 48 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder OldSchool/setupData.m: -------------------------------------------------------------------------------- 1 | % Set up train (60%) and testing data (40%) for all 40 people 2 | function [train_Shuffle_D, train_Shuffle_T, test_Shuffle_D, test_Shuffle_T] = setupData 3 | 4 | % Initialize Cell Arrays to store all train and test Data 5 | trainData ={}; 6 | testData = {}; 7 | trainTarget = []; 8 | testTarget = []; 9 | 10 | % Loop through all 40 people and prep for training and testing dataset 11 | for faceNum = 1:40 12 | [ train_img_vec, train_Label, test_img_vec, test_Label ] = imageCategory(faceNum); 13 | % Set up Train + Test Image Data 14 | trainData = [trainData train_img_vec]; 15 | testData = [testData test_img_vec]; 16 | 17 | % Set Up Train + Test Image Label 18 | trainTarget = [trainTarget train_Label]; 19 | testTarget = [testTarget test_Label]; 20 | end 21 | 22 | % Shuffle index for train and test 23 | trainShuffle_Idx = randperm(240,240); 24 | testShuffle_Idx = randperm(160,160); 25 | 26 | % Reorder data and label for training and testing 27 | train_Shuffle_D = {}; 28 | test_Shuffle_D = {}; 29 | train_Shuffle_T = []; 30 | test_Shuffle_T = []; 31 | 32 | % Shuffled Train Data + Label 33 | for trainShuf = 1:240 34 | train_Shuffle_D = [ train_Shuffle_D trainData(trainShuffle_Idx(trainShuf)) ]; 35 | train_Shuffle_T = [ train_Shuffle_T trainTarget(trainShuffle_Idx(trainShuf)) ]; 36 | end 37 | 38 | % Shuffle Test Data + Label 39 | for testShuf = 1:160 40 | %resizeTest = imresize(cell2mat(testData(testShuffle_Idx(testShuf)), [50, 50])); 41 | test_Shuffle_D = [ test_Shuffle_D testData(testShuffle_Idx(testShuf)) ]; 42 | test_Shuffle_T = [ test_Shuffle_T testTarget(testShuffle_Idx(testShuf)) ]; 43 | end 44 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/AutoEncoder_PartA.m: -------------------------------------------------------------------------------- 1 | % -------------------------------------------------------------------------------------------- 2 | % Final Project - Facial Recognition : AutoEncoder 3 | % Part A 4 | % -------------------------------------------------------------------------------------------- 5 | clc 6 | clear all 7 | 8 | % Load Images to set up train and test data & targets 9 | [trainData, trainTarget, testData, testTarget] = setupData; 10 | [ train_T, test_T ] = OneHotLabel(trainTarget, testTarget); 11 | 12 | % Train + Test AutoEncoder 13 | [autoenc1, feature1, autoenc2, feature2, predictTest, predictTest_DN ] = autoEncoderModel(trainData, train_T, testData, test_T); 14 | 15 | % Calculate MSE for Geuine and Imposter 16 | % Train - feature 1 & feature 2 17 | [genuineMSE_Train, reshapeFeatureTrain] = trainGenuineMSE(feature1); 18 | [imposterMSE_Train] = trainImposterMSE(reshapeFeatureTrain); 19 | 20 | [genuineMSE_Train2, reshapeFeatureTrain2] = trainGenuineMSE(feature2); 21 | [imposterMSE_Train2] = trainImposterMSE(reshapeFeatureTrain2); 22 | 23 | % Test 24 | [ genuineMSE_Test, reshapeFeatureTest] = testGenuineMSE(predictTest); 25 | [ imposterMSE_Test] = testImposterMSE(reshapeFeatureTest); 26 | % Deep Net Test 27 | [ genuineMSE_Test_DN, reshapeFeatureTest_DN] = testGenuineMSE(predictTest_DN); 28 | [ imposterMSE_Test_DN] = testImposterMSE(reshapeFeatureTest_DN); 29 | 30 | % Geuine + Imposter Data for Train and Test 31 | genuine_Imposter_Train = [genuineMSE_Train imposterMSE_Train]; 32 | genuine_Imposter_Test = [genuineMSE_Test imposterMSE_Test]; 33 | genuine_Imposter_Train2 = [genuineMSE_Train2 imposterMSE_Train2]; 34 | genuine_Imposter_Test_DN = [genuineMSE_Test_DN imposterMSE_Test_DN]; 35 | 36 | % Geuine + Imposter Labels for Train and Test 37 | [GT_Train, GT_Test] = GenuineLabel(genuineMSE_Train, genuineMSE_Test); 38 | [IT_Train, IT_Test] = ImposterLabel(imposterMSE_Train, imposterMSE_Test); 39 | 40 | GI_Train = [ GT_Train IT_Train]; 41 | GI_Test = [ GT_Test IT_Test]; 42 | 43 | ezroc3(predictTest, test_T); 44 | saveas(figure(2), [pwd '\Graphs\5\predictROC.fig']); 45 | ezroc3(predictTest_DN, test_T); 46 | saveas(figure(3), [pwd '\Graphs\5\predictDNROC.fig']); 47 | 48 | % ROC from Train and Test Geuine + Imposter Scores 49 | ezroc3(-genuine_Imposter_Train, GI_Train); 50 | saveas(figure(4), [pwd '\Graphs\5\GeuineImposter_TrainROC.fig']); 51 | 52 | ezroc3(-genuine_Imposter_Test, GI_Test); 53 | saveas(figure(5), [pwd '\Graphs\5\GeuineImposter_TestROC.fig']); 54 | 55 | ezroc3(-genuine_Imposter_Train2, GI_Train); 56 | saveas(figure(6), [pwd '\Graphs\5\GeuineImposter_Train2ROC.fig']); 57 | 58 | ezroc3(-genuine_Imposter_Test_DN, GI_Test); 59 | saveas(figure(7), [pwd '\Graphs\5\GeuineImposter_Test_DNROC.fig']); -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/GenuineLabel.m: -------------------------------------------------------------------------------- 1 | % Set up Genuine Labels for Train and Test 2 | function [GT_Train, GT_Test] = GenuineLabel(genuineMSE_Train, genuineMSE_Test) 3 | 4 | % Label/Targets for Geuine 5 | GT_Train = ones(1, length(genuineMSE_Train)); 6 | GT_Test = ones(1, length(genuineMSE_Test)); 7 | 8 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/ImposterLabel.m: -------------------------------------------------------------------------------- 1 | % Set up Labels for Imposter Train and Test 2 | 3 | function [IT_Train, IT_Test] = ImposterLabel(imposterMSE_Train, imposterMSE_Test) 4 | 5 | % Label/Targets for Imposter 6 | IT_Train = zeros(1, length(imposterMSE_Train)); 7 | IT_Test = zeros(1, length(imposterMSE_Test)); 8 | end 9 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/OneHotLabel.m: -------------------------------------------------------------------------------- 1 | % Convert Labels into One Hot Encoding Labels 2 | function [ train_T, test_T ] = OneHotLabel(trainTarget, testTarget) 3 | 4 | % Initialize Targets as zeros 5 | train_T = zeros(40, length(trainTarget)); 6 | test_T = zeros(40, length(testTarget)); 7 | 8 | % Mark 1 with the corresponding index for train and test labels 9 | for trainLoop = 1:length(trainTarget) 10 | train_T(trainTarget(trainLoop), trainLoop) = 1; 11 | end 12 | 13 | for testLoop = 1:length(testTarget) 14 | test_T(testTarget(testLoop), testLoop) = 1; 15 | end 16 | 17 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/autoEncoderModel.m: -------------------------------------------------------------------------------- 1 | function [autoenc1, feature1, autoenc2, feature2, predictTest, predictTest_DN ] = autoEncoderModel(trainData, train_T, testData, test_T) 2 | 3 | % Autoencoder 4 | hiddenSize1 = 100; 5 | 6 | % Train First AutoEncoder 7 | autoenc1 = trainAutoencoder(trainData,hiddenSize1, ... 8 | 'MaxEpochs',400, ... 9 | 'L2WeightRegularization',0.006, ... 10 | 'SparsityRegularization',5, ... 11 | 'SparsityProportion',0.2, ... 12 | 'ScaleData', false); 13 | 14 | % Visualize weights of the first AutoEncoder 15 | figure(1) 16 | plotWeights(autoenc1); 17 | 18 | % Generate Features from the first AutoEncoder 19 | feature1 = encode(autoenc1, trainData); 20 | 21 | % Train Second AutoEncoder 22 | hiddenSize2 = 50; 23 | autoenc2 = trainAutoencoder(feature1,hiddenSize2, ... 24 | 'MaxEpochs',200, ... 25 | 'L2WeightRegularization',0.004, ... 26 | 'SparsityRegularization',1, ... 27 | 'SparsityProportion',0.4, ... 28 | 'ScaleData', false); 29 | 30 | % Generate Second Set of Features by passing the previous feature 31 | feature2 = encode(autoenc2,feature1); 32 | 33 | % Train Final Layer : Softmax 34 | softmax = trainSoftmaxLayer(feature2, train_T,'MaxEpochs',400); 35 | 36 | % Stacked Nerual Network 37 | StackedDeepNet = stack(autoenc1, autoenc2, softmax); 38 | 39 | % Reshape pixels in each image in the testing data 40 | imageWidth = 112; 41 | imageHeight = 92; 42 | inputSize = imageWidth*imageHeight; 43 | 44 | % Convert the test images into vectors and store in a matrix 45 | convertTest = zeros(inputSize,numel(testData)); 46 | for i = 1:numel(testData) 47 | convertTest(:,i) = testData{i}(:); 48 | end 49 | 50 | % Confusion Matrix 51 | predictTest = StackedDeepNet(convertTest); 52 | %plotconfusion(test_T,predictTest); 53 | set(findobj(gca,'type','text'),'fontsize',4.5) 54 | 55 | % Fine Tune the Model 56 | xTrain = zeros(inputSize,numel(trainData)); 57 | for i = 1:numel(trainData) 58 | xTrain(:,i) = trainData{i}(:); 59 | end 60 | 61 | % Creat the deep net 62 | deepnet = train(StackedDeepNet,xTrain,train_T); 63 | 64 | % Another Confuion Matrix 65 | predictTest_DN = deepnet(convertTest); 66 | %plotconfusion(test_T,predictTest_DN); 67 | set(findobj(gca,'type','text'),'fontsize',4.5) 68 | 69 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/genuineMSE.m: -------------------------------------------------------------------------------- 1 | % Create pair images for input data 2 | % Calculate MSE and assign labels 3 | function[geuineMSE_Train, geuineMSE_Test, GT_Train, GT_Test] = genuineMSE(trainData, testData) 4 | 5 | geuineMSE_Train = []; 6 | geuineMSE_Test = []; 7 | 8 | for idx = 1:length(trainData) 9 | temp_mseImg = immse(trainData{1, idx}{1, 1}, trainData{1, idx}{1, 2}); 10 | geuineMSE_Train = [geuineMSE_Train temp_mseImg]; 11 | end 12 | 13 | for idx2 = 1:length(testData) 14 | temp_mseImgT = immse(testData{1, idx2}{1, 1}, testData{1, idx2}{1, 2}); 15 | geuineMSE_Test = [geuineMSE_Test temp_mseImgT]; 16 | end 17 | 18 | % Label/Targets for Geuine 19 | GT_Train = ones(1, length(geuineMSE_Train)); 20 | GT_Test = ones(1, length(geuineMSE_Test)); 21 | end 22 | 23 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/imageCategory.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label, train_Label2, test_Label2] = imageCategory(faceNum, sort_TrainCombo, sort_TestCombo) 4 | 5 | % Read in image values for each person 6 | faceNumM = int2str(faceNum); 7 | faceNumM = int2str(1); 8 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 9 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 10 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 11 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 12 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 13 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 14 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 15 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 16 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 17 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 18 | 19 | % Resize image into 90 x 90 pixels 20 | resize1 = imresize(img1R,[90 90]); 21 | resize2 = imresize(img2R,[90 90]); 22 | resize3 = imresize(img3R,[90 90]); 23 | resize4= imresize(img4R,[90 90]); 24 | resize5 = imresize(img5R,[90 90]); 25 | resize6 = imresize(img6R,[90 90]); 26 | resize7 = imresize(img7R,[90 90]); 27 | resize8 = imresize(img8R,[90 90]); 28 | resize9 = imresize(img9R,[90 90]); 29 | resize10 = imresize(img10R,[90 90]); 30 | 31 | % Convert image pixels from unit8 to double 32 | img1 = im2double(img1R); 33 | img2 = im2double(img2R); 34 | img3 = im2double(img3R); 35 | img4= im2double(img4R); 36 | img5 = im2double(img5R); 37 | img6 = im2double(img6R); 38 | img7 = im2double(img7R); 39 | img8 = im2double(img8R); 40 | img9 = im2double(img9R); 41 | img10 = im2double(img10R); 42 | 43 | % Randomly pick 6 for training and 4 for testing 44 | data_all = (1:10); 45 | trainDataPick = randperm(10,6); 46 | testDataPick = setdiff(data_all, trainDataPick); 47 | 48 | % Grab the images for training picks and convert each image into cell array 49 | % representation 50 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 51 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} {eval(strcat('img', int2str(trainDataPick(6))))}]; 52 | 53 | % Train Label for each category 54 | train_Label(1, 1:6) = faceNum; 55 | train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 56 | 57 | % Same for testing picks 58 | test_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 59 | {eval(strcat('img', int2str(trainDataPick(4))))}]; 60 | 61 | test_Label(1, 1:4) = faceNum; 62 | test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 63 | 64 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/imposterMSE.m: -------------------------------------------------------------------------------- 1 | % Create pair images for input data 2 | % Calculate MSE and assign labels 3 | function[imposterMSE_Train, imposterMSE_Test, IT_Train, IT_Test] = imposterMSE(trainIData, testIData) 4 | 5 | imposterMSE_Train = []; 6 | imposterMSE_Test = []; 7 | 8 | for idx = 1:length(trainIData) 9 | temp_mseImg = immse(trainIData{1, idx}{1, 1}, trainIData{1, idx}{1, 2}); 10 | imposterMSE_Train = [imposterMSE_Train temp_mseImg]; 11 | end 12 | 13 | for idx2 = 1:length(testIData) 14 | temp_mseImgT = immse(testIData{1, idx2}{1, 1}, testIData{1, idx2}{1, 2}); 15 | imposterMSE_Test = [imposterMSE_Test temp_mseImgT]; 16 | end 17 | 18 | 19 | % Label/Targets for Imposter 20 | IT_Train = zeros(1, length(imposterMSE_Train)); 21 | IT_Test = zeros(1, length(imposterMSE_Test)); 22 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/setupData.m: -------------------------------------------------------------------------------- 1 | % Set up train (60%) and testing data (40%) for all 40 people 2 | function [trainData, trainTarget, testData, testTarget] = setupData 3 | 4 | % Initialize Cell Arrays to store all train and test Data 5 | trainData ={}; 6 | testData = {}; 7 | trainTarget = []; 8 | testTarget = []; 9 | 10 | % Loop through all 40 people and prep for training and testing dataset 11 | for faceNum = 1:40 12 | [ train_img_vec, test_img_vec, train_Label, test_Label, train_GLabel, testG_Label ] = imageCategory(faceNum, 0, 0); 13 | 14 | % Set up Train + Test Image Data 15 | trainData = [trainData train_img_vec]; 16 | testData = [testData test_img_vec]; 17 | 18 | % Set Up Train + Test Image Label 19 | trainTarget = [trainTarget train_Label]; 20 | testTarget = [testTarget test_Label]; 21 | end 22 | 23 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/testGenuineMSE.m: -------------------------------------------------------------------------------- 1 | % Test MSE for Genuine 2 | function [genuineMSE_Test, reshapeFeatureTest] = testGenuineMSE(predictTest) 3 | 4 | % Initialize 5 | reshapeFeatureTest = []; 6 | genuineMSE_Test = []; 7 | 8 | % Combo index list 9 | sort_TestCombo = combnk(1:4,2); 10 | 11 | % Resize Feature 12 | for num=1:length(predictTest) 13 | reshapeFeatureTest = [reshapeFeatureTest mean(predictTest(:, num))]; 14 | end 15 | 16 | featureGTest = reshape(reshapeFeatureTest, 40, 4); 17 | 18 | % Set up Train MSE for Gunuine 19 | for faceNum = 1:length(featureGTest) 20 | for col = 1:length(sort_TestCombo) 21 | temp_test_pair = [mse(featureGTest(faceNum, sort_TestCombo(col, 1)), featureGTest(faceNum, sort_TestCombo(col, 2)))]; 22 | genuineMSE_Test = [genuineMSE_Test temp_test_pair]; 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/testImposterMSE.m: -------------------------------------------------------------------------------- 1 | % Train MSE for Imposter 2 | function [imposterMSE_Test] = testImposterMSE(reshapeFeatureTest) 3 | 4 | % Combo index list 5 | testIdx = combnk(1:length(reshapeFeatureTest), 2); 6 | 7 | % Find Index Numbers of comparing itself 8 | deleteTest = []; 9 | 10 | for num = 1:length(testIdx) 11 | if testIdx (num, 2) < 6 12 | deleteTest = [deleteTest num]; 13 | end 14 | end 15 | 16 | % Take out the rows if it's comparing with its own 17 | for d = 1:length(deleteTest) 18 | testIdx(d, :) = []; 19 | end 20 | 21 | % Initialize Cell Arrays to store all train Data 22 | imposterMSE_Test =[]; 23 | 24 | % Loop through all 40 people and prep for training 25 | for testLoop = 1:length(testIdx) 26 | temp_test_pair = [mse(reshapeFeatureTest(1, testIdx(testLoop, 1)), reshapeFeatureTest(1, testIdx(testLoop, 2)))]; 27 | imposterMSE_Test = [imposterMSE_Test temp_test_pair]; 28 | disp(testLoop); 29 | temp_test_pair = []; 30 | 31 | end 32 | 33 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/trainGenuineMSE.m: -------------------------------------------------------------------------------- 1 | % Train MSE for Genuine - Person 2 | function [genuineMSE_Train, reshapeFeatureTrain] = trainGenuineMSE(feature) 3 | 4 | % Initialize 5 | reshapeFeatureTrain = []; 6 | genuineMSE_Train = []; 7 | 8 | % Resize Feature 9 | for num=1:length(feature) 10 | reshapeFeatureTrain = [reshapeFeatureTrain mean(feature(:, num))]; 11 | end 12 | 13 | featureG = reshape(reshapeFeatureTrain, 40, 6); 14 | 15 | % Set up Train MSE for Gunuine 16 | for faceNum = 1:length(featureG) 17 | for col = 1:length(sort_TrainCombo) 18 | temp_train_pair = [mse(featureG(faceNum, sort_TrainCombo(col, 1)), featureG(faceNum, sort_TrainCombo(col, 2)))]; 19 | genuineMSE_Train = [genuineMSE_Train temp_train_pair]; 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part A/trainImposterMSE.m: -------------------------------------------------------------------------------- 1 | % Train MSE for Imposter 2 | function [imposterMSE_Train] = trainImposterMSE(reshapeFeatureTrain) 3 | 4 | % Combo index list 5 | trainIdx = combnk(1:length(reshapeFeatureTrain),2); 6 | 7 | % Find Index Numbers of comparing itself 8 | deleteTrain = []; 9 | %deleteTest = []; 10 | 11 | for num1 = 1:length(trainIdx) 12 | if trainIdx(num1, 2) < 6 13 | deleteTrain = [deleteTrain num1]; 14 | end 15 | end 16 | 17 | % Take out the rows if it's comparing with its own 18 | for d = 1:length(deleteTrain) 19 | trainIdx(d, :) = []; 20 | end 21 | 22 | % Initialize Cell Arrays to store all train Data 23 | imposterMSE_Train =[]; 24 | 25 | % Loop through all 40 people and prep for training 26 | for trainLoop = 1:length(trainIdx) 27 | temp_train_pair = [mse(reshapeFeatureTrain(1, trainIdx(trainLoop, 1)), reshapeFeatureTrain(1, trainIdx(trainLoop, 2)))]; 28 | imposterMSE_Train = [imposterMSE_Train temp_train_pair]; 29 | disp(trainLoop); 30 | temp_train_pair = []; 31 | 32 | end 33 | 34 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/AutoEncoder_PartB.m: -------------------------------------------------------------------------------- 1 | % -------------------------------------------------------------------------------------------- 2 | % Final Project - Facial Recognition : AutoEncoder 3 | % Part B - Train Autoencoder by person 4 | % -------------------------------------------------------------------------------------------- 5 | clc 6 | clear all 7 | 8 | % Initialize empty vector to store train and test result from all 40 people 9 | personTrainResult = []; 10 | personTestResult = []; 11 | 12 | 13 | % Load Images to set up all train and test data & targets 14 | [ trainData, trainTarget, testData, testTarget ] = setupData; 15 | [ train_T, test_T ] = OneHotLabel(trainTarget, testTarget); 16 | 17 | % Train/Test Autocencoder for each person 18 | for person = 1:40 19 | % Train and Test Targets 20 | trainPT = train_T(person, :); 21 | testPT = test_T(person, :); 22 | 23 | % AutoEncoder Model 24 | [autoenc1, feature1, autoenc2, feature2, predictTest, predictTrain] = autoencoderModel(trainData, trainPT, testData); 25 | 26 | % Store the train and test results from each person 27 | personTrainResult = [personTrainResult; predictTrain]; 28 | personTestResult = [personTestResult; predictTest]; 29 | 30 | end 31 | 32 | % Plot Train + Test ROC 33 | ezroc3(personTrainResult, train_T); 34 | saveas(figure(2), [pwd '\Graphs\TrainROC.fig']); 35 | ezroc3(personTestResult, test_T); 36 | saveas(figure(3), [pwd '\Graphs\TestROC.fig']); 37 | 38 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/Committee/AutoEncoder_PartB.m: -------------------------------------------------------------------------------- 1 | % -------------------------------------------------------------------------------------------- 2 | % Final Project - Facial Recognition : AutoEncoder 3 | % Part B - Train Autoencoder by person 4 | % -------------------------------------------------------------------------------------------- 5 | clc 6 | clear all 7 | 8 | % Initialize empty vector to store train and test result from all 40 people 9 | % And two zero 3D vectors to store committee train and test results 10 | personTrainResult = []; 11 | personTestResult = []; 12 | personTrain = zeros(40, 240, 3); 13 | personTest = zeros(40, 160, 3); 14 | 15 | % Load Images to set up all train and test data & targets 16 | [ trainData, trainTarget, testData, testTarget ] = setupData; 17 | [ train_T, test_T ] = OneHotLabel(trainTarget, testTarget); 18 | 19 | % Train/Test Autocencoder for each person 20 | for combo = 1:3 21 | for person = 1:40 22 | % Train and Test Targets 23 | trainPT = train_T(person, :); 24 | testPT = test_T(person, :); 25 | 26 | % AutoEncoder Model 27 | [autoenc1, feature1, autoenc2, feature2, predictTest, predictTrain] = ... 28 | autoencoderModel(trainData, trainPT, testData, combo); 29 | 30 | % Store the train and test results from each person 31 | personTrainResult = [personTrainResult; predictTrain]; 32 | personTestResult = [personTestResult; predictTest]; 33 | fprintf(strcat('>>>>>>>>>>>>>>>>> Person ' ,int2str(person), '\n')) 34 | 35 | end 36 | % Store train and test for different parameters in autoencoder 37 | personTrain(:, :, combo) = personTrainResult; 38 | personTest(:, :, combo) = personTestResult; 39 | end 40 | 41 | % Take an average on train and test results for committee 42 | personTrainCommittee = (personTrain(:, :, 1)+personTrain(:, :, 2)+personTrain(:, :, 3))/3; 43 | personTestCommittee = (personTest(:, :, 1)+personTest(:, :, 2)+personTest(:, :, 3))/3; 44 | 45 | 46 | % Plot Train + Test ROC for the best optimized net 47 | ezroc3(personTrain(:, :, 1), train_T); 48 | saveas(figure(2), [pwd '\Graphs\TrainROC3.fig']); 49 | ezroc3(personTest(:, :, 1), test_T); 50 | saveas(figure(3), [pwd '\Graphs\TestROC3.fig']); 51 | 52 | % Plot Train + Test ROC for the committee 53 | ezroc3(personTrainCommittee, train_T); 54 | saveas(figure(4), [pwd '\Graphs\TrainROC_Committee.fig']); 55 | ezroc3(personTestCommittee, test_T); 56 | saveas(figure(5), [pwd '\Graphs\TestROC_Committee.fig']); -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/Committee/OneHotLabel.m: -------------------------------------------------------------------------------- 1 | % Convert Labels into One Hot Encoding Labels 2 | function [ train_T, test_T ] = OneHotLabel(trainTarget, testTarget) 3 | 4 | % Initialize Targets as zeros 5 | train_T = zeros(40, length(trainTarget)); 6 | test_T = zeros(40, length(testTarget)); 7 | 8 | % Mark 1 with the corresponding index for train and test labels 9 | for trainLoop = 1:length(trainTarget) 10 | train_T(trainTarget(trainLoop), trainLoop) = 1; 11 | end 12 | 13 | for testLoop = 1:length(testTarget) 14 | test_T(testTarget(testLoop), testLoop) = 1; 15 | end 16 | 17 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/Committee/autoencoderModel.m: -------------------------------------------------------------------------------- 1 | % AutoEncoder Model 2 | 3 | function [autoenc1, feature1, autoenc2, feature2, predictTest, predictTrain] = ... 4 | autoencoderModel(trainData, train_person_T, testData, combo) 5 | 6 | % Committee Hidden Size 1 & 2 7 | hiddenSize1List = [100, 140, 100]; 8 | hiddenSize2List = [50, 70, 50]; 9 | 10 | % Committee L2 1 & 2 11 | L2One = [0.006, 0.004, 0.008]; 12 | L2Two = [0.004, 0.002, 0.006]; 13 | 14 | % Committee Sparsity Regularization 1 & 2 15 | SparReg1 = [5, 4, 6]; 16 | SparReg2 = ones(1, 3); 17 | 18 | % Committee Sparsity Proportion 1 & 2 19 | SparProp1 = [0.2, 0.15, 0.1]; 20 | SparProp2 = [0.4, 0.3, 0.2]; 21 | 22 | % Autoencoder 23 | hiddenSize1 = hiddenSize1List(combo); 24 | 25 | % Train First AutoEncoder 26 | autoenc1 = trainAutoencoder(trainData,hiddenSize1, ... 27 | 'MaxEpochs',400, ... 28 | 'L2WeightRegularization',L2One(combo), ... 29 | 'SparsityRegularization',SparReg1(combo), ... 30 | 'SparsityProportion',SparProp1(combo), ... 31 | 'ScaleData', false); 32 | 33 | % Visualize weights of the first AutoEncoder 34 | figure(1) 35 | plotWeights(autoenc1); 36 | 37 | % Generate Features from the first AutoEncoder 38 | feature1 = encode(autoenc1, trainData); 39 | 40 | % Train Second AutoEncoder 41 | hiddenSize2 = hiddenSize2List(combo); 42 | autoenc2 = trainAutoencoder(feature1,hiddenSize2, ... 43 | 'MaxEpochs',200, ... 44 | 'L2WeightRegularization',L2Two(combo), ... 45 | 'SparsityRegularization',SparReg2(combo), ... 46 | 'SparsityProportion',SparProp2(combo), ... 47 | 'ScaleData', false); 48 | 49 | % Generate Second Set of Features by passing the previous feature 50 | feature2 = encode(autoenc2,feature1); 51 | 52 | % Train Final Layer using Loss Function Cross Enropy 53 | cross_entropy = trainSoftmaxLayer(feature2, train_person_T, ... 54 | 'MaxEpochs',400, 'LossFunction', 'crossentropy'); 55 | 56 | % Stacked Nerual Network 57 | StackedDeepNet = stack(autoenc1, autoenc2, cross_entropy); 58 | 59 | % Reshape pixels in each image in the testing data 60 | imageWidth = 112; 61 | imageHeight = 92; 62 | inputSize = imageWidth*imageHeight; 63 | convertTrain = zeros(inputSize,numel(trainData)); 64 | 65 | for i = 1:numel(trainData) 66 | convertTrain(:,i) = trainData{i}(:); 67 | end 68 | 69 | predictTrain = StackedDeepNet(convertTrain); 70 | 71 | % Convert the test images into vectors and store in a matrix 72 | convertTest = zeros(inputSize,numel(testData)); 73 | for i = 1:numel(testData) 74 | convertTest(:,i) = testData{i}(:); 75 | end 76 | 77 | % Test Output 78 | predictTest = StackedDeepNet(convertTest); 79 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/Committee/imageCategory.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label, train_Label2, test_Label2] = imageCategory(faceNum, sort_TrainCombo, sort_TestCombo) 4 | 5 | % Read in image values for each person 6 | faceNumM = int2str(faceNum); 7 | faceNumM = int2str(1); 8 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 9 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 10 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 11 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 12 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 13 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 14 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 15 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 16 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 17 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 18 | 19 | % Resize image into 90 x 90 pixels 20 | resize1 = imresize(img1R,[90 90]); 21 | resize2 = imresize(img2R,[90 90]); 22 | resize3 = imresize(img3R,[90 90]); 23 | resize4= imresize(img4R,[90 90]); 24 | resize5 = imresize(img5R,[90 90]); 25 | resize6 = imresize(img6R,[90 90]); 26 | resize7 = imresize(img7R,[90 90]); 27 | resize8 = imresize(img8R,[90 90]); 28 | resize9 = imresize(img9R,[90 90]); 29 | resize10 = imresize(img10R,[90 90]); 30 | 31 | % Convert image pixels from unit8 to double 32 | img1 = im2double(img1R); 33 | img2 = im2double(img2R); 34 | img3 = im2double(img3R); 35 | img4= im2double(img4R); 36 | img5 = im2double(img5R); 37 | img6 = im2double(img6R); 38 | img7 = im2double(img7R); 39 | img8 = im2double(img8R); 40 | img9 = im2double(img9R); 41 | img10 = im2double(img10R); 42 | 43 | % Randomly pick 6 for training and 4 for testing 44 | data_all = (1:10); 45 | trainDataPick = randperm(10,6); 46 | testDataPick = setdiff(data_all, trainDataPick); 47 | 48 | % Grab the images for training picks and convert each image into cell array 49 | % representation 50 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 51 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} {eval(strcat('img', int2str(trainDataPick(6))))}]; 52 | 53 | % Train Label for each category 54 | train_Label(1, 1:6) = faceNum; 55 | train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 56 | 57 | % Same for testing picks 58 | test_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 59 | {eval(strcat('img', int2str(trainDataPick(4))))}]; 60 | 61 | test_Label(1, 1:4) = faceNum; 62 | test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 63 | 64 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/Committee/setupData.m: -------------------------------------------------------------------------------- 1 | % Set up train (60%) and testing data (40%) for all 40 people 2 | function [trainData, trainTarget, testData, testTarget] = setupData 3 | 4 | % Initialize Cell Arrays to store all train and test Data 5 | trainData ={}; 6 | testData = {}; 7 | trainTarget = []; 8 | testTarget = []; 9 | 10 | % Loop through all 40 people and prep for training and testing dataset 11 | for faceNum = 1:40 12 | [ train_img_vec, test_img_vec, train_Label, test_Label, train_GLabel, testG_Label ] = imageCategory(faceNum, 0, 0); 13 | 14 | % Set up Train + Test Image Data 15 | trainData = [trainData train_img_vec]; 16 | testData = [testData test_img_vec]; 17 | 18 | % Set Up Train + Test Image Label 19 | trainTarget = [trainTarget train_Label]; 20 | testTarget = [testTarget test_Label]; 21 | end 22 | 23 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/OneHotLabel.m: -------------------------------------------------------------------------------- 1 | % Convert Labels into One Hot Encoding Labels 2 | function [ train_T, test_T ] = OneHotLabel(trainTarget, testTarget) 3 | 4 | % Initialize Targets as zeros 5 | train_T = zeros(40, length(trainTarget)); 6 | test_T = zeros(40, length(testTarget)); 7 | 8 | % Mark 1 with the corresponding index for train and test labels 9 | for trainLoop = 1:length(trainTarget) 10 | train_T(trainTarget(trainLoop), trainLoop) = 1; 11 | end 12 | 13 | for testLoop = 1:length(testTarget) 14 | test_T(testTarget(testLoop), testLoop) = 1; 15 | end 16 | 17 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/autoencoderModel.m: -------------------------------------------------------------------------------- 1 | function [autoenc1, feature1, autoenc2, feature2, predictTest, predictTrain] = autoencoderModel(trainData, train_person_T, testData) 2 | 3 | % Autoencoder 4 | hiddenSize1 = 100; 5 | 6 | % Train First AutoEncoder 7 | autoenc1 = trainAutoencoder(trainData,hiddenSize1, ... 8 | 'MaxEpochs',400, ... 9 | 'L2WeightRegularization',0.006, ... 10 | 'SparsityRegularization',5, ... 11 | 'SparsityProportion',0.2, ... 12 | 'ScaleData', false); 13 | 14 | % Visualize weights of the first AutoEncoder 15 | figure(1) 16 | plotWeights(autoenc1); 17 | 18 | % Generate Features from the first AutoEncoder 19 | feature1 = encode(autoenc1, trainData); 20 | 21 | % Train Second AutoEncoder 22 | hiddenSize2 = 50; 23 | autoenc2 = trainAutoencoder(feature1,hiddenSize2, ... 24 | 'MaxEpochs',200, ... 25 | 'L2WeightRegularization',0.004, ... 26 | 'SparsityRegularization',1, ... 27 | 'SparsityProportion',0.4, ... 28 | 'ScaleData', false); 29 | 30 | % Generate Second Set of Features by passing the previous feature 31 | feature2 = encode(autoenc2,feature1); 32 | 33 | % Train Final Layer using Loss Function Cross Enropy 34 | cross_entropy = trainSoftmaxLayer(feature2, train_person_T, ... 35 | 'MaxEpochs',400, 'LossFunction', 'crossentropy'); 36 | 37 | % Stacked Nerual Network 38 | StackedDeepNet = stack(autoenc1, autoenc2, cross_entropy); 39 | 40 | % Reshape pixels in each image in the testing data 41 | imageWidth = 112; 42 | imageHeight = 92; 43 | inputSize = imageWidth*imageHeight; 44 | convertTrain = zeros(inputSize,numel(trainData)); 45 | 46 | for i = 1:numel(trainData) 47 | convertTrain(:,i) = trainData{i}(:); 48 | end 49 | 50 | predictTrain = StackedDeepNet(convertTrain); 51 | 52 | % Convert the test images into vectors and store in a matrix 53 | convertTest = zeros(inputSize,numel(testData)); 54 | for i = 1:numel(testData) 55 | convertTest(:,i) = testData{i}(:); 56 | end 57 | 58 | % Test Output 59 | predictTest = StackedDeepNet(convertTest); 60 | -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/imageCategory.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label, train_Label2, test_Label2] = imageCategory(faceNum, sort_TrainCombo, sort_TestCombo) 4 | 5 | % Read in image values for each person 6 | faceNumM = int2str(faceNum); 7 | faceNumM = int2str(1); 8 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 9 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 10 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 11 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 12 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 13 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 14 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 15 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 16 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 17 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 18 | 19 | % Resize image into 90 x 90 pixels 20 | resize1 = imresize(img1R,[90 90]); 21 | resize2 = imresize(img2R,[90 90]); 22 | resize3 = imresize(img3R,[90 90]); 23 | resize4= imresize(img4R,[90 90]); 24 | resize5 = imresize(img5R,[90 90]); 25 | resize6 = imresize(img6R,[90 90]); 26 | resize7 = imresize(img7R,[90 90]); 27 | resize8 = imresize(img8R,[90 90]); 28 | resize9 = imresize(img9R,[90 90]); 29 | resize10 = imresize(img10R,[90 90]); 30 | 31 | % Convert image pixels from unit8 to double 32 | img1 = im2double(img1R); 33 | img2 = im2double(img2R); 34 | img3 = im2double(img3R); 35 | img4= im2double(img4R); 36 | img5 = im2double(img5R); 37 | img6 = im2double(img6R); 38 | img7 = im2double(img7R); 39 | img8 = im2double(img8R); 40 | img9 = im2double(img9R); 41 | img10 = im2double(img10R); 42 | 43 | % Randomly pick 6 for training and 4 for testing 44 | data_all = (1:10); 45 | trainDataPick = randperm(10,6); 46 | testDataPick = setdiff(data_all, trainDataPick); 47 | 48 | % Grab the images for training picks and convert each image into cell array 49 | % representation 50 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 51 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} {eval(strcat('img', int2str(trainDataPick(6))))}]; 52 | 53 | % Train Label for each category 54 | train_Label(1, 1:6) = faceNum; 55 | train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 56 | 57 | % Same for testing picks 58 | test_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 59 | {eval(strcat('img', int2str(trainDataPick(4))))}]; 60 | 61 | test_Label(1, 1:4) = faceNum; 62 | test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 63 | 64 | end -------------------------------------------------------------------------------- /Facial Recognition-Autoencoder/AutoEncoder Part B/setupData.m: -------------------------------------------------------------------------------- 1 | % Set up train (60%) and testing data (40%) for all 40 people 2 | function [trainData, trainTarget, testData, testTarget] = setupData 3 | 4 | % Initialize Cell Arrays to store all train and test Data 5 | trainData ={}; 6 | testData = {}; 7 | trainTarget = []; 8 | testTarget = []; 9 | 10 | % Loop through all 40 people and prep for training and testing dataset 11 | for faceNum = 1:40 12 | [ train_img_vec, test_img_vec, train_Label, test_Label, train_GLabel, testG_Label ] = imageCategory(faceNum, 0, 0); 13 | 14 | % Set up Train + Test Image Data 15 | trainData = [trainData train_img_vec]; 16 | testData = [testData test_img_vec]; 17 | 18 | % Set Up Train + Test Image Label 19 | trainTarget = [trainTarget train_Label]; 20 | testTarget = [testTarget test_Label]; 21 | end 22 | 23 | end -------------------------------------------------------------------------------- /Facial Recognition-LDA+PCA/Calc_Genuine_Impostor.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine and Impostor Scores for each person 2 | function[GI] = Calc_Genuine_Impostor(trainImgD, testImgD, eigenVec) 3 | 4 | % Initialize to store Genuine and Impostor 5 | GI = []; 6 | 7 | % Loop through all 40 people 8 | for face = 1:40 9 | startImpostor = []; 10 | endImpostor = []; 11 | % Calculate Genuine Scores 12 | G_score = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, face)*eigenVec)', 'euclidean'); 13 | % Person 1 14 | if face == 1 15 | GI= [GI, G_score]; 16 | for ifaceOne = 2:40 17 | iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, ifaceOne)*eigenVec)', 'euclidean'); 18 | GI = [GI, iscore]; 19 | end 20 | % Person 2 - 40 21 | else 22 | % Calculate the first part of the Impostor 23 | for iface = 1 : (face-1) 24 | start_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface)*eigenVec)', 'euclidean'); 25 | startImpostor = [startImpostor, start_iscore]; 26 | end 27 | 28 | % Calculate the second part of the Impostor 29 | for iface2 = (face+1):40 30 | end_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface2)*eigenVec)', 'euclidean'); 31 | endImpostor = [endImpostor, end_iscore]; 32 | end 33 | smashTogether = [startImpostor, G_score, endImpostor]; 34 | GI = [GI; smashTogether]; 35 | end 36 | 37 | end 38 | 39 | 40 | end 41 | -------------------------------------------------------------------------------- /Facial Recognition-LDA+PCA/LDA_for_PCA.m: -------------------------------------------------------------------------------- 1 | % Set up Train, Test Data and build LDA Model 2 | function[V, eigvector_sort, eigenVal] = LDA_for_PCA(train_feature) 3 | 4 | % Initialize 5 | trainImg_mean_class = []; 6 | SB = zeros(10, 10); 7 | SW = zeros(10, 10); 8 | 9 | % Calculate Mean for Each Class 10 | for faceNum = 1:40 11 | % Calculate the mean for each class 12 | trainImg_mean_class = [trainImg_mean_class; mean(train_feature(:, :, faceNum))]; 13 | 14 | end 15 | 16 | % Num of Observations for each class -- per image or per pixels ? 17 | % Average all mean classes 18 | mu = mean(trainImg_mean_class); 19 | 20 | % Center the data (data-mean) 21 | for idx = 1:40 22 | center = (train_feature(:, :, idx))-repmat(trainImg_mean_class(idx, :),size(train_feature(:, :, idx),1),1); 23 | 24 | % Calculate the within class variance (SW) 25 | within_class_var = center'*center; 26 | SW = SW + within_class_var; 27 | 28 | end 29 | 30 | % SW 31 | inv_SW=inv(SW); 32 | 33 | % Calculate between class variance (SB) 34 | for trainImgC = 1:40 35 | between_class_var = size(train_feature(:, :, trainImgC),1) * (trainImg_mean_class(trainImgC, :)-mu)'* (trainImg_mean_class(trainImgC,:)-mu); 36 | SB = SB +between_class_var; 37 | end 38 | 39 | % Calculate V 40 | V = inv_SW * SB; 41 | 42 | % Get Eigenvalue and Eigenvectors of V 43 | [eigenVec, eigenVal] = eig(V); 44 | 45 | eigval_diagnal = diag(eigenVal); 46 | [other, index] = sort(eigval_diagnal,'descend'); 47 | eigval_diagnal = eigval_diagnal(index); 48 | eigvector_sort = eigenVec(:, index); 49 | 50 | % Pick out eigen values based on threshold 51 | countNumEig = 0; 52 | for count = 1:size(eigval_diagnal,1) 53 | if(eigval_diagnal(count)>0) 54 | countNumEig = countNumEig + 1; 55 | end 56 | end 57 | 58 | % Filtered eigen vectors 59 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 60 | 61 | end -------------------------------------------------------------------------------- /Facial Recognition-LDA+PCA/PCA_LDA_Combo.m: -------------------------------------------------------------------------------- 1 | % ----------------------------------------------------------------------------------------------------------------------------------- 2 | % PCA + LDA Combo Model 3 | % Perform LDA on feature vectors obtained using PCA (1 to 10 images) 4 | % Calculate the system using Mode #1 rules in PCA for LDA model 5 | % ----------------------------------------------------------------------------------------------------------------------------------- 6 | 7 | clc 8 | clear all 9 | close all 10 | 11 | % Initialize vectors 12 | trainfeature = []; 13 | testfeature = []; 14 | 15 | % Build LDA Model 16 | [trainfeature, testfeature] = PCA_Process; 17 | [V, eigvector_sort, eigenVal] = LDA_for_PCA(trainfeature); 18 | 19 | 20 | % Calculate Genuine and Impostor Scores 21 | [GI] = Calc_Genuine_Impostor(trainfeature, testfeature, V); 22 | 23 | % Label 24 | [Label] = label(); 25 | 26 | % Plot ROC Curve 27 | [roc,EER,area,EERthr,ALLthr] = ezroc3(GI, Label, 2, 0, 1); 28 | 29 | % GAR and FAR from ROC 30 | GAR = roc(1, :)'; 31 | 32 | % Get FRR and FAR 33 | FRR = round((GAR + 1)*10^4)/10^4; 34 | FAR = round((roc(2, :)')*10^4)/10^4; 35 | 36 | % Obtain FRR values at 0%, 5%, 10% FAR 37 | case1 = []; 38 | case2 = []; 39 | case3 = []; 40 | 41 | % Obtain EER for corresponding threshold 42 | for idx = 1:size(FAR,1) 43 | % FRR = 0% 44 | if (FAR(idx, 1) == 0) 45 | case1 = [case1,FRR(idx, 1)]; 46 | % FAR = 5% 47 | elseif (0.005 <= FAR(idx, 1) && FAR(idx, 1) <=0.0055) 48 | case2 = [case2,FRR(idx, 1)]; 49 | % FAR = 10% 50 | elseif (0.1 <= FAR(idx, 1) && FAR(idx, 1) <=0.1015) 51 | case3 = [case3,FRR(idx, 1)]; 52 | end 53 | end 54 | 55 | -------------------------------------------------------------------------------- /Facial Recognition-LDA+PCA/PCA_Process.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine Scores 2 | 3 | function [featureMatrixAgg, testProjectAgg] = PCA_Process 4 | 5 | % Initialize vector 6 | featureMatrixAgg = []; 7 | testProjectAgg = []; 8 | trainImg_mean_class = []; 9 | testImg_mean_class = []; 10 | train_center_all = []; 11 | test_center_all = []; 12 | trainImg = []; 13 | testImg = []; 14 | 15 | % Load Images to set up train and test data by person 16 | for faceNum = 1:40 17 | [ train_img_vec, test_img_vec, train_Label, test_Label ] = imgPrep(faceNum); 18 | trainImg = cat(3, trainImg, cell2mat(train_img_vec)); 19 | testImg = cat(3, testImg, cell2mat(test_img_vec)); 20 | 21 | % Calculate the mean for each class 22 | trainImg_mean_class = [trainImg_mean_class; mean(trainImg(:, :, faceNum))]; 23 | testImg_mean_class = [testImg_mean_class; mean(testImg(:, :, faceNum))]; 24 | 25 | end 26 | 27 | % Average all mean classes 28 | mu_train = mean(trainImg_mean_class); 29 | mu_test = mean(testImg_mean_class); 30 | 31 | % Center the data (data-mean) 32 | for idx = 1:40 33 | train_center = (trainImg(:, :, idx))-repmat(trainImg_mean_class(idx, :),size(trainImg(:, :, idx),1),1); 34 | test_center = (testImg(:, :, idx))-repmat(testImg_mean_class(idx, :),size(testImg(:, :, idx),1),1); 35 | 36 | % Store centers for each class 37 | train_center_all = [train_center_all; train_center]; 38 | test_center_all = [test_center_all; test_center]; 39 | 40 | end 41 | 42 | % Calculate Covariance Matrix 43 | train_co_var = train_center_all'*train_center_all; 44 | test_co_var = test_center_all'*test_center_all; 45 | 46 | % Calculate Eignevalues and Eigenvectors 47 | [ eigvector,eignval ] = eig(train_co_var); 48 | 49 | % Sort eigenvector in descending order by correspoding eigenvalue 50 | eigval_diagnal = diag(eignval); 51 | [other, index] = sort(eigval_diagnal,'descend'); 52 | eigval_diagnal = eigval_diagnal(index); 53 | eigvector_sort = eigvector(:, index); 54 | 55 | % Pick out eigen values based on threshold 56 | countNumEig = 0; 57 | for count = 1:size(eigval_diagnal,1) 58 | if(eigval_diagnal(count)>0) 59 | countNumEig = countNumEig + 1; 60 | end 61 | end 62 | 63 | % Filtered eigen vectors 64 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 65 | 66 | % Calculate Feature Matrix (Train Projected Space) 67 | featureMatrix = train_co_var' * train_center_all'; 68 | 69 | % Project testing data to the Train Projected Space 70 | testProject = test_co_var' * test_center_all'; 71 | 72 | for i = 1:40 73 | % Store each person's feature matrix in a multidimensional array 74 | featureMatrixAgg = cat(3, featureMatrixAgg, featureMatrix(:, (i-1)*10+1:i*10)); 75 | testProjectAgg = cat(3, testProjectAgg, testProject(:, (i-1)*10+1:i*10)); 76 | end 77 | 78 | end 79 | -------------------------------------------------------------------------------- /Facial Recognition-LDA+PCA/imgPrep.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label] = imgPrep(faceNum) 4 | % Read in image values for each person 5 | faceNumM = int2str(faceNum); 6 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 7 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 8 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 9 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 10 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 11 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 12 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 13 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 14 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 15 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 16 | 17 | % Resize image into 10 x 10 pixels 18 | resize1 = imresize(img1R,[10 10]); 19 | resize2 = imresize(img2R,[10 10]); 20 | resize3 = imresize(img3R,[10 10]); 21 | resize4= imresize(img4R,[10 10]); 22 | resize5 = imresize(img5R,[10 10]); 23 | resize6 = imresize(img6R,[10 10]); 24 | resize7 = imresize(img7R,[10 10]); 25 | resize8 = imresize(img8R,[10 10]); 26 | resize9 = imresize(img9R,[10 10]); 27 | resize10 = imresize(img10R,[10 10]); 28 | 29 | % Convert image pixels from unit8 to double 30 | img1 = im2double(resize1); 31 | img2 = im2double(resize2); 32 | img3 = im2double(resize3); 33 | img4= im2double(resize4); 34 | img5 = im2double(resize5); 35 | img6 = im2double(resize6); 36 | img7 = im2double(resize7); 37 | img8 = im2double(resize8); 38 | img9 = im2double(resize9); 39 | img10 = im2double(resize10); 40 | 41 | % Randomly pick 5 for training and 5 for testing 42 | data_all = (1:10); 43 | trainDataPick = randperm(10,5); 44 | testDataPick = setdiff(data_all, trainDataPick); 45 | 46 | % Grab the images for training picks and convert each image into cell array 47 | % representation 48 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 49 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} ]; 50 | 51 | % Train Label for each category 52 | train_Label(1, 1:5) = faceNum; 53 | %train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 54 | 55 | % Same for testing picks 56 | test_img_vec = [{eval(strcat('img', int2str(testDataPick(1))))} {eval(strcat('img', int2str(testDataPick(2))))} {eval(strcat('img', int2str(testDataPick(3))))} ... 57 | {eval(strcat('img', int2str(testDataPick(4))))} {eval(strcat('img', int2str(testDataPick(5))))}]; 58 | 59 | test_Label(1, 1:5) = faceNum; 60 | %test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 61 | 62 | end -------------------------------------------------------------------------------- /Facial Recognition-LDA+PCA/label.m: -------------------------------------------------------------------------------- 1 | % Generate labels for Geunuine and Impostor 2 | function[label] = label() 3 | 4 | % Initialize label matrix as 1 5 | label = ones(400, 400); 6 | 7 | 8 | % Replace the label with 0 when it's the right class 9 | for face = 1:40 10 | label(10*(face-1) +1:10*face, 10*(face-1) +1:10*face) = 0; 11 | end 12 | 13 | 14 | end 15 | -------------------------------------------------------------------------------- /Facial Recognition-LDA/Calc_Genuine_Impostor.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine and Impostor Scores for each person 2 | function[GI] = Calc_Genuine_Impostor(trainImgD, testImgD, eigenVec) 3 | 4 | % Initialize to store Genuine and Impostor 5 | GI = []; 6 | 7 | % Loop through all 40 people 8 | for face = 1:40 9 | startImpostor = []; 10 | endImpostor = []; 11 | % Calculate Genuine Scores 12 | G_score = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, face)*eigenVec)', 'euclidean'); 13 | % Person 1 14 | if face == 1 15 | GI= [GI, G_score]; 16 | for ifaceOne = 2:40 17 | iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, ifaceOne)*eigenVec)', 'euclidean'); 18 | GI = [GI, iscore]; 19 | end 20 | % Person 2 - 40 21 | else 22 | % Calculate the first part of the Impostor 23 | for iface = 1 : (face-1) 24 | start_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface)*eigenVec)', 'euclidean'); 25 | startImpostor = [startImpostor, start_iscore]; 26 | end 27 | 28 | % Calculate the second part of the Impostor 29 | for iface2 = (face+1):40 30 | end_iscore = pdist2((trainImgD(:, :, face)*eigenVec)', (testImgD(:, :, iface2)*eigenVec)', 'euclidean'); 31 | endImpostor = [endImpostor, end_iscore]; 32 | end 33 | smashTogether = [startImpostor, G_score, endImpostor]; 34 | GI = [GI; smashTogether]; 35 | end 36 | 37 | end 38 | 39 | 40 | end 41 | -------------------------------------------------------------------------------- /Facial Recognition-LDA/LDAModel.m: -------------------------------------------------------------------------------- 1 | % Set up Train, Test Data and build LDA Model 2 | function[V, eigvector_sort, eigenVal, trainImg, testImg] = LDAModel() 3 | 4 | % Initialize 5 | trainImg = []; 6 | testImg = []; 7 | trainImg_mean_class = []; 8 | SB = zeros(10, 10); 9 | SW = zeros(10, 10); 10 | 11 | % Load in Images & Calculate Mean for Each Class 12 | for faceNum = 1:40 13 | [ train_img_vec, test_img_vec, train_Label, test_Label ] = imgPrep(faceNum); 14 | trainImg = cat(3, trainImg, cell2mat(train_img_vec')); 15 | testImg = cat(3, testImg, cell2mat(test_img_vec')); 16 | 17 | % Calculate the mean for each class 18 | trainImg_mean_class = [trainImg_mean_class; mean(trainImg(:, :, faceNum))]; 19 | 20 | end 21 | 22 | % Num of Observations for each class -- per image or per pixels ? 23 | % Average all mean classes 24 | mu = mean(trainImg_mean_class); 25 | 26 | % Center the data (data-mean) 27 | for idx = 1:40 28 | center = (trainImg(:, :, idx))-repmat(trainImg_mean_class(idx, :),size(trainImg(:, :, idx),1),1); 29 | %trainImg_center = cat(3, trainImg_center, center); 30 | 31 | % Calculate the within class variance (SW) 32 | within_class_var = center'*center; 33 | SW = SW + within_class_var; 34 | 35 | end 36 | 37 | % SW 38 | inv_SW=inv(SW); 39 | 40 | % Calculate between class variance (SB) 41 | for trainImgC = 1:40 42 | between_class_var = size(trainImg(:, :, trainImgC),1) * (trainImg_mean_class(trainImgC, :)-mu)'* (trainImg_mean_class(trainImgC,:)-mu); 43 | %SB = cat(3, SB, between_class_var); 44 | SB = SB +between_class_var; 45 | end 46 | 47 | % Calculate V 48 | V = inv_SW * SB; 49 | 50 | % Get Eigenvalue and Eigenvectors of V 51 | [eigenVec, eigenVal] = eig(V); 52 | 53 | eigval_diagnal = diag(eigenVal); 54 | [other, index] = sort(eigval_diagnal,'descend'); 55 | eigval_diagnal = eigval_diagnal(index); 56 | eigvector_sort = eigenVec(:, index); 57 | 58 | % Pick out eigen values based on threshold 59 | countNumEig = 0; 60 | for count = 1:size(eigval_diagnal,1) 61 | if(eigval_diagnal(count)>0) 62 | countNumEig = countNumEig + 1; 63 | end 64 | end 65 | 66 | % Filtered eigen vectors 67 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 68 | 69 | end -------------------------------------------------------------------------------- /Facial Recognition-LDA/LDA_Main.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % Facial Recognition : LDA 3 | % --------------------------------------------------------------------------------------------------- 4 | clc 5 | clear all 6 | close all 7 | 8 | % Label 9 | [label] = label_GI(); 10 | 11 | % Build LDA Model 12 | [V, eigenVec, eigenVal, trainImg, testImg] = LDAModel(); 13 | 14 | % Calculate Genuine and Impostor Scores 15 | [GI] = Calc_Genuine_Impostor(trainImg, testImg, V); 16 | 17 | % Plot ROC Curve 18 | [roc,EER,area,EERthr,ALLthr] = ezroc3(GI, label, 2, 0, 1); 19 | 20 | % GAR and FAR from ROC 21 | GAR = roc(1, :)'; 22 | 23 | % Get FRR and FAR 24 | FRR = round((GAR + 1)*10^4)/10^4; 25 | FAR = round((roc(2, :)')*10^4)/10^4; 26 | 27 | % Obtain FRR values at 0%, 5%, 10% FAR 28 | case1 = []; 29 | case2 = []; 30 | case3 = []; 31 | 32 | % Obtain EER for corresponding threshold 33 | for idx = 1:size(FAR,1) 34 | % FRR = 0% 35 | if (FAR(idx, 1) == 0) 36 | case1 = [case1,FRR(idx, 1)]; 37 | % FAR = 5% 38 | elseif (0.005 <= FAR(idx, 1) && FAR(idx, 1) <=0.0055) 39 | case2 = [case2,FRR(idx, 1)]; 40 | % FAR = 10% 41 | elseif (0.1 <= FAR(idx, 1) && FAR(idx, 1) <=0.1015) 42 | case3 = [case3,FRR(idx, 1)]; 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /Facial Recognition-LDA/imgPrep.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label] = imgPrep(faceNum) 4 | % Read in image values for each person 5 | faceNumM = int2str(faceNum); 6 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 7 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 8 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 9 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 10 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 11 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 12 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 13 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 14 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 15 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 16 | 17 | % Resize image into 10 x 10 pixels 18 | resize1 = imresize(img1R,[10 10]); 19 | resize2 = imresize(img2R,[10 10]); 20 | resize3 = imresize(img3R,[10 10]); 21 | resize4= imresize(img4R,[10 10]); 22 | resize5 = imresize(img5R,[10 10]); 23 | resize6 = imresize(img6R,[10 10]); 24 | resize7 = imresize(img7R,[10 10]); 25 | resize8 = imresize(img8R,[10 10]); 26 | resize9 = imresize(img9R,[10 10]); 27 | resize10 = imresize(img10R,[10 10]); 28 | 29 | % Convert image pixels from unit8 to double 30 | img1 = im2double(resize1); 31 | img2 = im2double(resize2); 32 | img3 = im2double(resize3); 33 | img4= im2double(resize4); 34 | img5 = im2double(resize5); 35 | img6 = im2double(resize6); 36 | img7 = im2double(resize7); 37 | img8 = im2double(resize8); 38 | img9 = im2double(resize9); 39 | img10 = im2double(resize10); 40 | 41 | % Randomly pick 5 for training and 5 for testing 42 | data_all = (1:10); 43 | trainDataPick = randperm(10,5); 44 | testDataPick = setdiff(data_all, trainDataPick); 45 | 46 | % Grab the images for training picks and convert each image into cell array 47 | % representation 48 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 49 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} ]; 50 | 51 | % Train Label for each category 52 | train_Label(1, 1:5) = faceNum; 53 | %train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 54 | 55 | % Same for testing picks 56 | test_img_vec = [{eval(strcat('img', int2str(testDataPick(1))))} {eval(strcat('img', int2str(testDataPick(2))))} {eval(strcat('img', int2str(testDataPick(3))))} ... 57 | {eval(strcat('img', int2str(testDataPick(4))))} {eval(strcat('img', int2str(testDataPick(5))))}]; 58 | 59 | test_Label(1, 1:5) = faceNum; 60 | %test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 61 | 62 | end -------------------------------------------------------------------------------- /Facial Recognition-LDA/label_GI.m: -------------------------------------------------------------------------------- 1 | % Generate labels for Geunuine and Impostor 2 | function[label] = label_GI() 3 | 4 | % Initialize label matrix as 1 5 | label = ones(400, 400); 6 | 7 | 8 | % Replace the label with 0 when it's the right class 9 | for face = 1:40 10 | label(10*(face-1) +1:10*face, 10*(face-1) +1:10*face) = 0; 11 | end 12 | 13 | 14 | end 15 | -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode One/GenuineCalc.m: -------------------------------------------------------------------------------- 1 | % Calculate Genuine Scores 2 | 3 | function [GenuineVec, featureMatrixAgg, testProjectAgg] = GenuineCalc 4 | 5 | % Initialize vector 6 | GenuineVec = []; 7 | featureMatrixAgg = []; 8 | testProjectAgg = []; 9 | 10 | % Load Images to set up train and test data by person 11 | % Calculate Genuine Scores first 12 | for faceNum = 1 : 40 13 | [ trainData, testData, train_Label, test_Label, train_GLabel, testG_Label ] = imgPrepModeOne(faceNum, 0, 0); 14 | 15 | % Mode #1 - PCA 16 | [featureMatrix, testProject, GenuineScore] = PCA(trainData, testData); 17 | 18 | % Store each person's feature matrix in a multidimensional array 19 | featureMatrixAgg = cat(3, featureMatrixAgg, featureMatrix); 20 | testProjectAgg = cat(3, testProjectAgg, testProject); 21 | 22 | % Genuine Scores 23 | GenuineVec = [GenuineVec; GenuineScore]; 24 | 25 | end -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode One/ImpostorCalc.m: -------------------------------------------------------------------------------- 1 | % Calculate Impostor Scores from feature matrix and projected value 2 | function[ImpostorVec] = ImpostorCalc(featureMatrixAgg, testProjectAgg) 3 | 4 | % Initialize vector 5 | ImpostorVec = []; 6 | 7 | % Loop through feature matrices 8 | for FM_idx = 1 : size(featureMatrixAgg, 3) 9 | PersonImpostor = []; 10 | % Loop through projected matrices to calculate scores 11 | for TP_idx = 1 : size(testProjectAgg, 3) 12 | 13 | % If not itself, do the math! 14 | if FM_idx ~= TP_idx 15 | 16 | % Calc Euclidean Distance for impostor scores 17 | impostorScore = pdist2(featureMatrixAgg(:,:, TP_idx), testProjectAgg(:,:, FM_idx), 'euclidean'); 18 | PersonImpostor = [PersonImpostor, impostorScore]; 19 | end 20 | end 21 | 22 | % Store Impostsor Scores 23 | % Can't vertcat an empty vector with another value vector 24 | % So store the first Impostor directly in the Impostor 25 | if isempty(ImpostorVec) == 1 26 | ImpostorVec = PersonImpostor; 27 | 28 | % Any Impostors after 1st one are vertcat into the matrix 29 | else 30 | ImpostorVec = vertcat(ImpostorVec, PersonImpostor); 31 | %ImpostorVec = [ImpostorVec; PersonImpostor]; 32 | end 33 | 34 | end 35 | 36 | 37 | end -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode One/Label_TransformGI.m: -------------------------------------------------------------------------------- 1 | % Create Label and transform GI vector to 0-1 2 | 3 | function [testLabels, GI_convert]=Label_TransformGI(GI) 4 | 5 | % Labels for data 6 | Labels = zeros(size(GI,1),size(GI,1)); 7 | for rowCount = 0:10:size(GI,1)-10 8 | Labels(rowCount+1:rowCount + 10, rowCount+1:rowCount + 10) = ones(10, 10); 9 | end 10 | 11 | % Convert GI to 0 or 1 : Cut off Point 0.9 12 | GI_convert = zeros(size(GI,1),size(GI,1)); 13 | 14 | for row = 1:size(GI,1) 15 | for col = 1:size(GI,2) 16 | if GI(row,col) < 0.9 17 | GI_convert(row, col) = 1; 18 | end 19 | 20 | end 21 | end 22 | testLabels = [ones(1, 10), zeros(1, size(GI,1)-10 )]'; 23 | 24 | end -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode One/ModeOne.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % Mode 1 : 1-5 Images as Training and 6-10 Images as Testing 3 | % --------------------------------------------------------------------------------------------------- 4 | clc 5 | clear all 6 | close all 7 | 8 | % Calculate Genuine Scores 9 | [GenuineVec, featureMatrixAgg, testProjectAgg] = GenuineCalc; 10 | 11 | % Calculate Impostor Scores 12 | [ImpostorVec] = ImpostorCalc(featureMatrixAgg, testProjectAgg); 13 | 14 | % Genuine + Impostor 15 | GI = [GenuineVec, ImpostorVec]; 16 | 17 | % Create Labels + Transform GI to 0-1 18 | [testLabels, GI_convert]=Label_TransformGI(GI); 19 | 20 | % Plot Graph 21 | [roc,EER,area,EERthr,ALLthr]= ezroc3(GI, testLabels, 2, 0, 1); 22 | 23 | % GAR and FAR from ROC 24 | GAR = roc(1, :)'; 25 | 26 | % Get FRR and FAR 27 | FRR = round((GAR + 1)*10^4)/10^4; 28 | FAR = round((roc(2, :)')*10^4)/10^4; 29 | 30 | % Obtain FRR values at 0%, 5%, 10% FAR 31 | case1 = []; 32 | case2 = []; 33 | case3 = []; 34 | 35 | for idx = 1:size(FAR,1) 36 | % FRR = 0% 37 | if (FAR(idx, 1) == 0) 38 | case1 = [case1,FRR(idx, 1)]; 39 | % FAR = 5% 40 | elseif (0.005 <= FAR(idx, 1) && FAR(idx, 1) <=0.0055) 41 | case2 = [case2,FRR(idx, 1)]; 42 | % FAR = 10% 43 | elseif (0.1 <= FAR(idx, 1) && FAR(idx, 1) <=0.1015) 44 | case3 = [case3,FRR(idx, 1)]; 45 | end 46 | end 47 | 48 | -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode One/PCA.m: -------------------------------------------------------------------------------- 1 | % Principal Component Analysis (PCA Model) 2 | 3 | function [featureMatrix, testProject, GenuineScore] = PCA(trainData, testData) 4 | 5 | % Unpack Cell to Vector 6 | trainData_V = cell2mat(trainData); 7 | testData_V = cell2mat(testData); 8 | 9 | % Get size of matrix 10 | [trainRow, trainCol] = size(trainData_V); 11 | [testRow, testCol] = size(testData_V); 12 | 13 | % Calculate the mean for each person's img data 14 | trainMean = mean((trainData_V)')'; 15 | testMean = mean((testData_V)')'; 16 | 17 | % Center the train data ( img - mean for each person's imgs ) 18 | trainCenter = trainData_V - repmat(trainMean,1,trainCol); 19 | 20 | % Calculate Covariance Matrix 21 | train_CoVar = trainCenter * trainCenter'; 22 | 23 | % Calculate Eignevalues and Eigenvectors 24 | [ eigvector,eignval ] = eig(train_CoVar); 25 | 26 | % Sort eigenvector in descending order by correspoding eigenvalue 27 | eigval_diagnal = diag(eignval); 28 | [other, index] = sort(eigval_diagnal,'descend'); 29 | eigval_diagnal = eigval_diagnal(index); 30 | eigvector_sort = eigvector(:, index); 31 | 32 | % Pick out eigen values based on threshold 33 | countNumEig = 0; 34 | for count = 1:size(eigval_diagnal,1) 35 | if(eigval_diagnal(count)>0) 36 | countNumEig = countNumEig + 1; 37 | end 38 | end 39 | 40 | % Filtered eigen vectors 41 | filteredEigVec = eigvector_sort(:, 1:countNumEig); 42 | 43 | % Calculate Feature Matrix (Train Projected Space) 44 | featureMatrix = filteredEigVec' * trainCenter; 45 | 46 | % Center the test data ( img - mean for each person's imgs ) 47 | testCenter = testData_V - repmat(testMean,1,testCol); 48 | 49 | % Project testing data to the Train Projected Space 50 | testProject = filteredEigVec' * testCenter; 51 | 52 | % Genuine Scores 53 | GenuineScore = pdist2(featureMatrix, testProject, 'euclidean'); 54 | 55 | end -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode One/imgPrepModeOne.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ train_img_vec, test_img_vec, train_Label, test_Label, train_Label2, test_Label2] = imgPrepModeOne(faceNum, sort_TrainCombo, sort_TestCombo) 4 | 5 | % Read in image values for each person 6 | faceNumM = int2str(faceNum); 7 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 8 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 9 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 10 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 11 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 12 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 13 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 14 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 15 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 16 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 17 | 18 | % Resize image into 10 x 10 pixels 19 | resize1 = imresize(img1R,[10 10]); 20 | resize2 = imresize(img2R,[10 10]); 21 | resize3 = imresize(img3R,[10 10]); 22 | resize4= imresize(img4R,[10 10]); 23 | resize5 = imresize(img5R,[10 10]); 24 | resize6 = imresize(img6R,[10 10]); 25 | resize7 = imresize(img7R,[10 10]); 26 | resize8 = imresize(img8R,[10 10]); 27 | resize9 = imresize(img9R,[10 10]); 28 | resize10 = imresize(img10R,[10 10]); 29 | 30 | % Convert image pixels from unit8 to double 31 | img1 = im2double(resize1); 32 | img2 = im2double(resize2); 33 | img3 = im2double(resize3); 34 | img4= im2double(resize4); 35 | img5 = im2double(resize5); 36 | img6 = im2double(resize6); 37 | img7 = im2double(resize7); 38 | img8 = im2double(resize8); 39 | img9 = im2double(resize9); 40 | img10 = im2double(resize10); 41 | 42 | % Randomly pick 5 for training and 5 for testing 43 | data_all = (1:10); 44 | trainDataPick = randperm(10,5); 45 | testDataPick = setdiff(data_all, trainDataPick); 46 | 47 | % Grab the images for training picks and convert each image into cell array 48 | % representation 49 | train_img_vec = [{eval(strcat('img', int2str(trainDataPick(1))))} {eval(strcat('img', int2str(trainDataPick(2))))} {eval(strcat('img', int2str(trainDataPick(3))))} ... 50 | {eval(strcat('img', int2str(trainDataPick(4))))} {eval(strcat('img', int2str(trainDataPick(5))))} ]; 51 | 52 | % Train Label for each category 53 | train_Label(1, 1:5) = faceNum; 54 | train_Label2(1, 1:length(sort_TrainCombo)) = faceNum; 55 | 56 | % Same for testing picks 57 | test_img_vec = [{eval(strcat('img', int2str(testDataPick(1))))} {eval(strcat('img', int2str(testDataPick(2))))} {eval(strcat('img', int2str(testDataPick(3))))} ... 58 | {eval(strcat('img', int2str(testDataPick(4))))} {eval(strcat('img', int2str(testDataPick(5))))}]; 59 | 60 | test_Label(1, 1:5) = faceNum; 61 | test_Label2(1, 1:length(sort_TestCombo)) = faceNum; 62 | 63 | end -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode Two/ImpostorCalc.m: -------------------------------------------------------------------------------- 1 | % Calculate Impostor Scores from feature matrix and projected value 2 | function[ImpostorVec] = ImpostorCalc(featureMatrixAgg, testProjectAgg) 3 | 4 | % Initialize vector 5 | ImpostorVec = []; 6 | 7 | % Loop through feature matrices 8 | for FM_idx = 1 : size(featureMatrixAgg, 3) 9 | PersonImpostor = []; 10 | % Loop through projected matrices to calculate scores 11 | for TP_idx = 1 : size(testProjectAgg, 3) 12 | 13 | % If not itself, do the math! 14 | if FM_idx ~= TP_idx 15 | 16 | % Calc Euclidean Distance for impostor scores 17 | impostorScore = pdist2(featureMatrixAgg(:,:, TP_idx), testProjectAgg(:,:, FM_idx), 'euclidean'); 18 | PersonImpostor = [PersonImpostor, impostorScore]; 19 | end 20 | end 21 | 22 | % Store Impostsor Scores 23 | % Can't vertcat an empty vector with another value vector 24 | % So store the first Impostor directly in the Impostor 25 | if isempty(ImpostorVec) == 1 26 | ImpostorVec = PersonImpostor; 27 | 28 | % Any Impostors after 1st one are vertcat into the matrix 29 | else 30 | ImpostorVec = vertcat(ImpostorVec, PersonImpostor); 31 | %ImpostorVec = [ImpostorVec; PersonImpostor]; 32 | end 33 | 34 | end 35 | 36 | 37 | end -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode Two/Label_TransformGI.m: -------------------------------------------------------------------------------- 1 | % Create Label and transform GI vector to 0-1 2 | 3 | function [testLabels, GI_convert]=Label_TransformGI(GI) 4 | 5 | % Labels for data 6 | Labels = zeros(size(GI,1),size(GI,1)); 7 | for rowCount = 0:10:size(GI,1)-10 8 | Labels(rowCount+1:rowCount + 10, rowCount+1:rowCount + 10) = ones(10, 10); 9 | end 10 | 11 | % Convert GI to 0 or 1 : Cut off Point 0.9 12 | GI_convert = zeros(size(GI,1),size(GI,1)); 13 | 14 | for row = 1:size(GI,1) 15 | for col = 1:size(GI,2) 16 | if GI(row,col) < 0.9 17 | GI_convert(row, col) = 1; 18 | end 19 | 20 | end 21 | end 22 | testLabels = [ones(1, 10), zeros(1, size(GI,1)-10 )]'; 23 | 24 | end -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode Two/ModeTwo.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------------------------------------------------------------------- 2 | % Mode 2 : First 25 Subjects as Training, Last 15 as Testing 3 | % --------------------------------------------------------------------------------------------------- 4 | clc 5 | clear all 6 | close all 7 | 8 | % Set up testing and training data + Calculate Genuine Scores 9 | [ first_img_vec, second_img_vec, img_vec ] = imgPrepModeTwo; 10 | 11 | % PCA + Calculate Genuine Scores 12 | [featureMatrixAgg, testProjectAgg, GenuineVec] = PCA_M2(first_img_vec, second_img_vec, img_vec ); 13 | 14 | % Calculate Impostor Scores 15 | [ImpostorVec] = ImpostorCalc(featureMatrixAgg, testProjectAgg); 16 | 17 | % Genuine + Impostor 18 | GI = [GenuineVec, ImpostorVec]; 19 | 20 | % Create Labels + Transform GI to 0-1 21 | [testLabels, GI_convert]=Label_TransformGI(GI); 22 | 23 | % Plot Graph 24 | [roc,EER,area,EERthr,ALLthr]= ezroc3(GI, testLabels, 2, 0, 1); 25 | 26 | % GAR and FAR from ROC 27 | GAR = roc(1, :)'; 28 | 29 | % Get FRR and FAR 30 | FRR = round((GAR + 1)*10^4)/10^4; 31 | FAR = round((roc(2, :)')*10^4)/10^4; 32 | 33 | % Obtain FRR values at 0%, 5%, 10% FAR 34 | case1 = []; 35 | case2 = []; 36 | case3 = []; 37 | 38 | for idx = 1:size(FAR,1) 39 | % FRR = 0% 40 | if (FAR(idx, 1) == 0) 41 | case1 = [case1,FRR(idx, 1)]; 42 | % FAR = 5% 43 | elseif (0.005 <= FAR(idx, 1) && FAR(idx, 1) <=0.0055) 44 | case2 = [case2,FRR(idx, 1)]; 45 | % FAR = 10% 46 | elseif (0.1 <= FAR(idx, 1) && FAR(idx, 1) <=0.1015) 47 | case3 = [case3,FRR(idx, 1)]; 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /Facial Recognition-PCA/Mode Two/imgPrepModeTwo.m: -------------------------------------------------------------------------------- 1 | % Load in images to create image vectos for testing and training / each person 2 | 3 | function [ first_img_vec, second_img_vec, img_vec ] = imgPrepModeTwo() 4 | 5 | % Initialize vectors 6 | first_img_vec = []; 7 | second_img_vec = []; 8 | img_vec = []; 9 | 10 | for faceNum = 1:40 11 | % Read in image values for each person 12 | faceNumM = int2str(faceNum); 13 | img1R= imread(strcat('faceImg\s', faceNumM, '\1.pgm')); 14 | img2R= imread(strcat('faceImg\s', faceNumM, '\2.pgm')); 15 | img3R= imread(strcat('faceImg\s', faceNumM, '\3.pgm')); 16 | img4R= imread(strcat('faceImg\s', faceNumM, '\4.pgm')); 17 | img5R= imread(strcat('faceImg\s', faceNumM, '\5.pgm')); 18 | img6R= imread(strcat('faceImg\s', faceNumM, '\6.pgm')); 19 | img7R= imread(strcat('faceImg\s', faceNumM, '\7.pgm')); 20 | img8R= imread(strcat('faceImg\s', faceNumM, '\8.pgm')); 21 | img9R= imread(strcat('faceImg\s', faceNumM, '\9.pgm')); 22 | img10R= imread(strcat('faceImg\s', faceNumM, '\10.pgm')); 23 | 24 | % Resize image into 10 x 10 pixels 25 | resize1 = imresize(img1R,[10 10]); 26 | resize2 = imresize(img2R,[10 10]); 27 | resize3 = imresize(img3R,[10 10]); 28 | resize4= imresize(img4R,[10 10]); 29 | resize5 = imresize(img5R,[10 10]); 30 | resize6 = imresize(img6R,[10 10]); 31 | resize7 = imresize(img7R,[10 10]); 32 | resize8 = imresize(img8R,[10 10]); 33 | resize9 = imresize(img9R,[10 10]); 34 | resize10 = imresize(img10R,[10 10]); 35 | 36 | % Convert image pixels from unit8 to double 37 | img1 = im2double(resize1); 38 | img2 = im2double(resize2); 39 | img3 = im2double(resize3); 40 | img4= im2double(resize4); 41 | img5 = im2double(resize5); 42 | img6 = im2double(resize6); 43 | img7 = im2double(resize7); 44 | img8 = im2double(resize8); 45 | img9 = im2double(resize9); 46 | img10 = im2double(resize10); 47 | 48 | % After 25 people, load as testing data 49 | if faceNum > 25 50 | 51 | first_img_vec_P = [{eval(strcat('img', int2str(1)))} {eval(strcat('img', int2str(2)))} {eval(strcat('img', int2str(3)))} ... 52 | {eval(strcat('img', int2str(4)))} {eval(strcat('img', int2str(5)))}]; 53 | second_img_vec_P = [ {eval(strcat('img', int2str(6)))} {eval(strcat('img', int2str(7)))} ... 54 | {eval(strcat('img', int2str(8)))} {eval(strcat('img', int2str(9)))} {eval(strcat('img', int2str(10)))}]; 55 | 56 | first_img_vec = [first_img_vec ; first_img_vec_P]; 57 | second_img_vec = [second_img_vec ; second_img_vec_P]; 58 | else 59 | 60 | % Grab the images (Person 1- 25) for training. 61 | % Convert each image into cell array representation. 62 | img_vec_person = [{eval(strcat('img', int2str(1)))} {eval(strcat('img', int2str(2)))} {eval(strcat('img', int2str(3)))} ... 63 | {eval(strcat('img', int2str(4)))} {eval(strcat('img', int2str(5)))} {eval(strcat('img', int2str(6)))} {eval(strcat('img', int2str(7)))} ... 64 | {eval(strcat('img', int2str(8)))} {eval(strcat('img', int2str(9)))} {eval(strcat('img', int2str(10)))} ]; 65 | 66 | img_vec = [img_vec; img_vec_person]; 67 | 68 | end 69 | 70 | 71 | end 72 | end -------------------------------------------------------------------------------- /Fingerprint Spoof Detector-Naive Bayes/Bayes_Fingerprint_Spoof_Detector.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------------------------------ 2 | % Fingerprint Spoof Detector - Naive Bayes (Default Priors) 3 | % ------------------------------------------------------------------------------------------------ 4 | clc 5 | clear all 6 | 7 | % Load in test & train data : 8 | % Live fingerprints + Fake fingerprints for Gelatine & Latex 9 | [ liveTrain, liveTest, GelTrain, GelTest, LatexTrain, LatexTest] = loadData; 10 | 11 | % Concat live + Latex Train & Test 12 | liveLatexTrain = [liveTrain ; LatexTrain]; 13 | liveLatexTest = [liveTest ; LatexTest]; 14 | liveLatexTrainLabel = [ones(1000,1); zeros(200,1)]; 15 | liveLatexTestLabel = [ones(1000,1); zeros(200,1)]; 16 | 17 | % Concat live + Latex + Gelatine Test 18 | liveLatexGelTest = [liveLatexTest; GelTest]; 19 | liveLatexGelTestLabel = [ones(1000,1); zeros(400,1)]; 20 | 21 | % Train multiclass Naive Bayes Model - Live + Latex 22 | NBModel_LiveLatex = fitcnb(liveLatexTrain, liveLatexTrainLabel); 23 | 24 | % >> Two classes ================================================= 25 | % Test model - Live + Latex 26 | predictLabel_LiveLatex = predict(NBModel_LiveLatex, liveLatexTest); 27 | 28 | % Resubstitution Classification Error - Live + Latex 29 | LossLiveLatex = loss(NBModel_LiveLatex, liveLatexTest, liveLatexTestLabel); 30 | resubErrorLiveLatex = resubLoss(NBModel_LiveLatex,'LossFun','classiferror'); 31 | 32 | % >> Three classes ================================================ 33 | % Test model - Live + Latex + Gelatine 34 | predictLabel_LiveLatexGel = predict(NBModel_LiveLatex, liveLatexGelTest); 35 | 36 | % Resubstitution Classification Error - Live + Latex + Gelatine 37 | LossLiveLatexGel = loss(NBModel_LiveLatex, liveLatexGelTest, liveLatexGelTestLabel); 38 | resubErrorLiveLatexGel = resubLoss(NBModel_LiveLatex,'LossFun','classiferror'); -------------------------------------------------------------------------------- /Fingerprint Spoof Detector-Naive Bayes/Bayes_Fingerprint_Spoof_Detector_prior.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------------------------------ 2 | % Fingerprint Spoof Detector - Naive Bayes (Priors[0.6 0.4]) 3 | % ------------------------------------------------------------------------------------------------ 4 | clc 5 | clear all 6 | 7 | % Load in test & train data : 8 | % Live fingerprints + Fake fingerprints for Gelatine & Latex 9 | [ liveTrain, liveTest, GelTrain, GelTest, LatexTrain, LatexTest] = loadData; 10 | 11 | % Concat live + Latex Train & Test 12 | liveLatexTrain = [liveTrain ; LatexTrain]; 13 | liveLatexTest = [liveTest ; LatexTest]; 14 | liveLatexTrainLabel = [ones(1000,1); zeros(200,1)]; 15 | liveLatexTestLabel = [ones(1000,1); zeros(200,1)]; 16 | 17 | % Concat live + Latex + Gelatine Test 18 | liveLatexGelTest = [liveLatexTest; GelTest]; 19 | liveLatexGelTestLabel = [ones(1000,1); zeros(400,1)]; 20 | 21 | % Train multiclass Naive Bayes Model - Live + Latex 22 | NBModel_LiveLatex_P = fitcnb(liveLatexTrain, liveLatexTrainLabel); 23 | 24 | % Set Prior to [0.6 0.4] 25 | NBModel_LiveLatex_P.Prior = [0.6 0.4]; 26 | 27 | % >> Two classes ================================================= 28 | % Test model - Live + Latex 29 | predictLabel_LiveLatex_P = predict(NBModel_LiveLatex_P, liveLatexTest); 30 | 31 | % Resubstitution Classification Error - Live + Latex 32 | LossLiveLatex_P = loss(NBModel_LiveLatex_P, liveLatexTest, liveLatexTestLabel); 33 | resubErrorLiveLatex_P = resubLoss(NBModel_LiveLatex_P,'LossFun','classiferror'); 34 | 35 | % >> Three classes ================================================ 36 | % Test model - Live + Latex + Gelatine 37 | predictLabel_LiveLatexGel_P = predict(NBModel_LiveLatex_P, liveLatexGelTest); 38 | 39 | % Resubstitution Classification Error - Live + Latex + Gelatine 40 | LossLiveLatexGel_P = loss(NBModel_LiveLatex_P, liveLatexGelTest, liveLatexGelTestLabel); 41 | resubErrorLiveLatexGel_P = resubLoss(NBModel_LiveLatex_P,'LossFun','classiferror'); -------------------------------------------------------------------------------- /Fingerprint Spoof Detector-Naive Bayes/loadData.m: -------------------------------------------------------------------------------- 1 | % Simple function to preprocess loaded data 2 | function [ liveTrain, liveTest, GelTrain, GelTest, LatexTrain, LatexTest] = loadData 3 | 4 | % Live fingerprints 5 | loadliveTrain = load('featureMat_liv_train_bioLBP.mat'); 6 | loadliveTest = load('featureMat_liv_test_bioLBP.mat'); 7 | liveTrain = loadliveTrain.featureMat_liv_train_bioLBP; 8 | liveTest = loadliveTest.featureMat_liv_test_bioLBP; 9 | 10 | % Fake fingerprints - Gelatine 11 | loadGelTrain = load('featureMat_Gelatine_train_bioLBP.mat'); 12 | loadGelTest = load('featureMat_Gelatine_test_bioLBP.mat'); 13 | GelTrain = loadGelTrain.featureMat_Gelatine_train_bioLBP; 14 | GelTest = loadGelTest.featureMat_Gelatine_test_bioLBP; 15 | 16 | % Fake fingerprints - Latex 17 | loadLatTrain = load('featureMat_Latex_train_bioLBP.mat'); 18 | loadLatTest = load('featureMat_Latex_test_bioLBP.mat'); 19 | LatexTrain = loadLatTrain.featureMat_Latex_train_bioLBP; 20 | LatexTest = loadLatTest.featureMat_Latex_test_bioLBP; 21 | 22 | 23 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Committee/3CV/committee3CV.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------------------------------------------- 2 | % Mini Project #4 - Gesture Recognition : NARX Nerual Network 3 | % Subject Specific - 3 Fold CV Committee 4 | % ------------------------------------------------------------------------------------------------------------- 5 | 6 | % Load 3 NARX net & Run 3 CV on three different NARX nets for predicted values 7 | load('InDelay15FeedDelay10Hidden15_CNet.mat'); 8 | [ predictTest_P1a, predictTest_P2a, predictTest_P3a, predictTest_P4a, predictTest_P5a, predictTest_P6a, stackMovementTest ] = ... 9 | main3CV(NARXnet); 10 | load('InDelay30FeedDelay23Hidden15_CNet.mat'); 11 | [ predictTest_P1b, predictTest_P2b, predictTest_P3b, predictTest_P4b, predictTest_P5b, predictTest_P6b, stackMovementTest_b ] = ... 12 | main3CV(NARXnet); 13 | load('InDelay30FeedDelay28Hidden15_CNet.mat'); 14 | [ predictTest_P1c, predictTest_P2c, predictTest_P3c, predictTest_P4c, predictTest_P5c, predictTest_P6c, stackMovementTest_c ] = ... 15 | main3CV(NARXnet); 16 | 17 | % ROC + Confusion Matrix for each person 18 | confusionROCcommittee('Person1', predictTest_P1a, predictTest_P1b, predictTest_P1c, stackMovementTest); 19 | confusionROCcommittee('Person2', predictTest_P2a, predictTest_P2b, predictTest_P2c, stackMovementTest); 20 | confusionROCcommittee('Person3', predictTest_P3a, predictTest_P3b, predictTest_P3c, stackMovementTest); 21 | confusionROCcommittee('Person4', predictTest_P4a, predictTest_P4b, predictTest_P4c, stackMovementTest); 22 | confusionROCcommittee('Person5', predictTest_P5a, predictTest_P5b, predictTest_P5c, stackMovementTest); 23 | confusionROCcommittee('Person6',predictTest_P6a, predictTest_P6b, predictTest_P6c, stackMovementTest); 24 | -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Committee/3CV/confusionROCcommittee.m: -------------------------------------------------------------------------------- 1 | % Plot confusion matrix and ROC for 3 different NARX nets results 2 | function confusionROCcommittee(person, predictTestT1, predictTestT2, predictTestT3, stackMovementTest) 3 | 4 | % Resize the matrix to 4x4 for the prediction output by calling function 5 | % 'shrinkConfusion' 6 | predictT1_4by4 = shrinkConfusion(predictTestT1); 7 | predictT2_4by4 = shrinkConfusion(predictTestT2); 8 | predictT3_4by4 = shrinkConfusion(predictTestT3); 9 | overall = shrinkConfusion(cat(3, predictTestT1, predictTestT2, predictTestT3)); 10 | 11 | % Plot ROC Curves for all three rounds individually and an overall ROC + 12 | % Save to folders! 13 | ezroc3( predictT1_4by4); 14 | saveas(figure(1), [pwd '\committee\', person , '\', person, '_net1ROC.fig']); 15 | ezroc3( predictT2_4by4 ); 16 | saveas(figure(2), [pwd '\committee\', person , '\', person, '_net2ROC.fig']); 17 | ezroc3( predictT3_4by4 ); 18 | saveas(figure(3), [pwd '\committee\', person , '\', person, '_net3ROC.fig']); 19 | ezroc3( overall ); 20 | saveas(figure(4), [pwd '\committee\', person , '\', person, '_triNetROC.fig']); 21 | 22 | % Plot Confusion Matrix 23 | % Class 1/--1 to 1/0 24 | threeDataStack = [ predictTestT1 predictTestT2 predictTestT3 ] ; 25 | movementLabel = [ stackMovementTest stackMovementTest stackMovementTest ]; 26 | InitMovemntTestModify = (movementLabel + 1 )/2; 27 | PredTestModify = (threeDataStack + 1)/2; 28 | 29 | % Plot Confusion Matrix 30 | confusionG = plotconfusion(InitMovemntTestModify, PredTestModify, person); 31 | % Save the matrix 32 | saveas(confusionG, [pwd '\committee\', person , '\', person, '_Confusion.fig']) 33 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Committee/3CV/main3CV.m: -------------------------------------------------------------------------------- 1 | % 3CV training function for specific NARX net 2 | function [ predictTest_P1, predictTest_P2, predictTest_P3, predictTest_P4, predictTest_P5, predictTest_P6, stackMovementTest_P1 ] = main3CV(NARXnet) 3 | 4 | % Load 3 Rounds of x, z of each movement for each Person 5 | [ P1_C, P1_T, P1_R, P1_D ] = Encapsulate('GestureData\Person1.xls'); 6 | [ P2_C, P2_T, P2_R, P2_D ] = Encapsulate('GestureData\Person2.xls'); 7 | [ P3_C, P3_T, P3_R, P3_D ] = Encapsulate('GestureData\Person3.xls'); 8 | [ P4_C, P4_T, P4_R, P4_D ] = Encapsulate('GestureData\Person4.xls'); 9 | [ P5_C, P5_T, P5_R, P5_D ] = Encapsulate('GestureData\Person5.xls'); 10 | [ P6_C, P6_T, P6_R, P6_D ] = Encapsulate('GestureData\Person6.xls'); 11 | 12 | % Load Time Delay Nerual Network 13 | load('InDelay30FeedDelay28Hidden15_CNet.mat'); 14 | 15 | % 3 Fold Cross Validation 16 | [trained3CVnet_P1, predictTestT1_P1, predictTestT2_P1, predictTest_P1, stackMovementTest_P1] ... 17 | = threeFoldCV(NARXnet, P1_C, P1_T, P1_R, P1_D); 18 | [trained3CVnet_P2, predictTestT1_P2, predictTestT2_P2, predictTest_P2, stackMovementTest_P2] ... 19 | = threeFoldCV(NARXnet, P2_C, P2_T, P2_R, P2_D); 20 | [trained3CVnet_P3, predictTestT1_P3, predictTestT2_P3, predictTest_P3, stackMovementTest_P3] ... 21 | = threeFoldCV(NARXnet, P3_C, P3_T, P3_R, P3_D); 22 | [trained3CVnet_P4, predictTestT1_P4, predictTestT2_P4, predictTest_P4, stackMovementTest_P4] ... 23 | = threeFoldCV(NARXnet, P4_C, P4_T, P4_R, P4_D); 24 | [trained3CVnet_P5, predictTestT1_P5, predictTestT2_P5, predictTest_P5, stackMovementTest_P5] ... 25 | = threeFoldCV(NARXnet, P5_C, P5_T, P5_R, P5_D); 26 | [trained3CVnet_P6, predictTestT1_P6, predictTestT2_P6, predictTest_P6, stackMovementTest_P6] ... 27 | = threeFoldCV(NARXnet, P6_C, P6_T, P6_R, P6_D); 28 | 29 | end 30 | -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Committee/6CV/committee6CV.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------------------------------------------- 2 | % Mini Project #4 - Gesture Recognition : NARX Nerual Network 3 | % Subject Specific - 6 Fold CV Committee 4 | % ------------------------------------------------------------------------------------------------------------- 5 | 6 | % Load 3 NARX net & Run 6 CV on three different NARX nets for predicted values 7 | load('InDelay15FeedDelay10Hidden15_CNet.mat'); 8 | [trained6CVnet1, predictTestT1n1, predictTestT2n1, predictTestT3n1, predictTestT4n1, predictTestT5n1, predictTestT6n1, stackMovementTest1] ... 9 | = main6CV(NARXnet); 10 | load('InDelay30FeedDelay23Hidden15_CNet.mat'); 11 | [trained6CVnet2, predictTestT1n2, predictTestT2n2, predictTestT3n2, predictTestT4n2, predictTestT5n2, predictTestT6n2, stackMovementTest2] ... 12 | = main6CV(NARXnet); 13 | load('InDelay30FeedDelay28Hidden15_CNet.mat'); 14 | [trained6CVnet3, predictTestT1n3, predictTestT2n3, predictTestT3n3, predictTestT4n3, predictTestT5n3, predictTestT6n3, stackMovementTest3] ... 15 | = main6CV(NARXnet); 16 | % Concat all three predictions 17 | [predictTestT1] = concatAll6CV(predictTestT1n1, predictTestT1n2, predictTestT1n3); 18 | [predictTestT2] = concatAll6CV(predictTestT2n1, predictTestT2n2, predictTestT2n3); 19 | [predictTestT3] = concatAll6CV(predictTestT3n1, predictTestT3n2, predictTestT3n3); 20 | [predictTestT4] = concatAll6CV(predictTestT4n1, predictTestT4n2, predictTestT4n3); 21 | [predictTestT5] = concatAll6CV(predictTestT5n1, predictTestT5n2, predictTestT5n3); 22 | [predictTestT6] = concatAll6CV(predictTestT6n1, predictTestT6n2, predictTestT6n3); 23 | 24 | % ROC + Confusion Matrix 25 | confusionROCcommittee6CV(predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest1); -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Committee/6CV/concatAll6CV.m: -------------------------------------------------------------------------------- 1 | % Simple function to stack up all predicted output from the NARX nets 2 | function[predictTestT] = concatAll6CV(predictTestTn1, predictTestTn2, predictTestTn3) 3 | 4 | predictTestT = [predictTestTn1 predictTestTn2 predictTestTn3]; 5 | 6 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Committee/6CV/confusionROCcommittee6CV.m: -------------------------------------------------------------------------------- 1 | % Confusion Matrix for every folds in a single graph -- 6CV 2 | function confusionROCcommittee6CV(predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest) 3 | 4 | % Resize the matrix to 4x4 for the prediction output by calling function 5 | % 'shrinkConfusion' 6 | predictT1_4by4 = slashConfusion(predictTestT1); 7 | predictT2_4by4 = slashConfusion(predictTestT2); 8 | predictT3_4by4 = slashConfusion(predictTestT3); 9 | predictT4_4by4 = slashConfusion(predictTestT4); 10 | predictT5_4by4 = slashConfusion(predictTestT5); 11 | predictT6_4by4 = slashConfusion(predictTestT6); 12 | overall = slashConfusion(cat(6, predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6)); 13 | 14 | % Plot ROC Curves for all three rounds individually and an overall ROC + 15 | % Save to folders! 16 | ezroc3( predictT1_4by4 ); 17 | saveas(figure(1), [pwd '\Committee_6CV\Fold1ROC.fig']); 18 | ezroc3( predictT2_4by4 ); 19 | saveas(figure(2), [pwd '\Committee_6CV\Fold2ROC.fig']); 20 | ezroc3( predictT3_4by4 ); 21 | saveas(figure(3), [pwd '\Committee_6CV\Fold3ROC.fig']); 22 | ezroc3( predictT4_4by4 ); 23 | saveas(figure(4), [pwd '\Committee_6CV\Fold4ROC.fig']); 24 | ezroc3( predictT5_4by4 ); 25 | saveas(figure(5), [pwd '\Committee_6CV\Fold5ROC.fig']); 26 | ezroc3( predictT6_4by4 ); 27 | saveas(figure(6), [pwd '\Committee_6CV\Fold6ROC.fig']); 28 | ezroc3( overall ); 29 | saveas(figure(7), [pwd '\Committee_6CV\OverallROC.fig']); 30 | 31 | % Plot Confusion Matrix 32 | % Class 1/--1 to 1/0 33 | movementLabel = [ stackMovementTest stackMovementTest stackMovementTest ]; 34 | InitMovemntTestModify = (movementLabel + 1 )/2; 35 | sixDataStack = [predictTestT1 predictTestT2 predictTestT3 predictTestT4 predictTestT5 predictTestT6]; 36 | PredTestModify = (sixDataStack + 1)/2; 37 | 38 | % Plot Confusion Matrix 39 | confusionG = plotconfusion(InitMovemntTestModify, PredTestModify); 40 | 41 | % Save the matrix 42 | saveas(confusionG, [pwd '\Committee_6CV\Confusion.fig']) 43 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Committee/6CV/main6CV.m: -------------------------------------------------------------------------------- 1 | % 6CV training function for specific NARX net 2 | function [trained6CVnet, predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest] = main6CV(NARXnet) 3 | 4 | % Load 3 Rounds of x, z of each movement for each Person 5 | [ P1_xz ] = serializePersonXZ('GestureData\Person1.xls'); 6 | [ P2_xz ] = serializePersonXZ('GestureData\Person2.xls'); 7 | [ P3_xz] = serializePersonXZ('GestureData\Person3.xls'); 8 | [ P4_xz ] = serializePersonXZ('GestureData\Person4.xls'); 9 | [ P5_xz ] = serializePersonXZ('GestureData\Person5.xls'); 10 | [ P6_xz ] = serializePersonXZ('GestureData\Person6.xls'); 11 | 12 | 13 | % 6 Fold Cross Validation 14 | [ trained6CVnet, predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest] ... 15 | = sixFoldCV(NARXnet, P1_xz, P2_xz, P3_xz,P4_xz,P5_xz,P6_xz); 16 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/EncapsulatePerson.m: -------------------------------------------------------------------------------- 1 | % Encapsulate all data into 2 matrices : x and z per Person 2 | 3 | function [P_xz] = EncapsulatePerson(fileName) 4 | 5 | % Circle 6 | [R1C_x, R1C_z, R2C_x, R2C_z, R3C_x, R3C_z] = combExcelData(fileName, 'Circle'); 7 | circle_xz = [R1C_x, R1C_z; R2C_x, R2C_z; R3C_x, R3C_z]'; 8 | 9 | % Triangle 10 | [R1T_x, R1T_z, R2T_x, R2T_z, R3T_x, R3T_z] = combExcelData(fileName, 'Triangle'); 11 | triangle_xz = [R1T_x, R1T_z; R2T_x, R2T_z; R3T_x, R3T_z]'; 12 | 13 | % Right 14 | [R1R_x, R1R_z, R2R_x, R2R_z, R3R_x, R3R_z] = combExcelData(fileName, 'Right'); 15 | right_xz = [R1R_x, R1R_z; R2R_x, R2R_z; R3R_x, R3R_z]'; 16 | 17 | % Down 18 | [R1D_x, R1D_z, R2D_x, R2D_z, R3D_x, R3D_z] = combExcelData(fileName, 'Down'); 19 | down_xz = [R1D_x, R1D_z; R2D_x, R2D_z; R3D_x, R3D_z]'; 20 | 21 | % Stack up 4 movements for Person's x,z 22 | P_xz = [circle_xz; triangle_xz; right_xz; down_xz]; 23 | 24 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/combExcelData.m: -------------------------------------------------------------------------------- 1 | % Parse x, z for each round for each movements 2 | 3 | function [R1_x, R1_z, R2_x, R2_z, R3_x, R3_z] = combExcelData(fileName, sheetName) 4 | 5 | % Load in all data from Person Excel File by Different Sheet Name 6 | personSheetsData = xlsread(fileName, sheetName); 7 | 8 | % Slice Round 1 x and z Columns 9 | R1_x = personSheetsData(:,1); 10 | R1_z = personSheetsData(:,3); 11 | 12 | % Slice Round 2 x and z Columns 13 | R2_x = personSheetsData(:,5); 14 | R2_z = personSheetsData(:,7); 15 | 16 | % Slice Round 3 x and z Columns 17 | R3_x = personSheetsData(:,9); 18 | R3_z = personSheetsData(:,11); 19 | 20 | % Concatenate all 3 Rounds of x and z into one column for each 21 | %AllRound_x = cat(1, Round1_x, Round2_x, Round3_x); 22 | %AllRound_z = cat(1, Round1_z, Round2_z, Round3_z); 23 | 24 | end 25 | 26 | -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/fiveTrainedInit.m: -------------------------------------------------------------------------------- 1 | % Train 5 times for the Initial Net 2 | function[ initPredMatrix, Result4by4Matrix, stackFiveMatrixAvg ] = fiveTrainedInit(net, seqTrainxz, seqValxz, seqTrainxzT) 3 | 4 | initPredMatrix = zeros(4, 4, 5); 5 | stackFiveMatrix = zeros(4,1200, 5); 6 | 7 | for run = (1:5) 8 | predictTest = 0; 9 | 10 | % Train 11 | [Xs, Xi, Ai, Ts] = preparets(net, seqTrainxz, { }, seqTrainxzT); 12 | trainedNet = train(net, Xs, Ts, Xi, Ai); 13 | 14 | % Validation 15 | netc= closeloop(trainedNet); 16 | predictTest_pack = netc(seqValxz); 17 | 18 | % predictTest_pack = trainedNet(seqValxz, xic, aic); 19 | predictTest = cell2mat(predictTest_pack); 20 | 21 | % Stack up all prediction data into 4 rows 22 | stackFiveMatrix(:, :, run) = predictTest; 23 | 24 | % Unpack x,z into 4 Movements 25 | properConfusionPredict =shrinkConfusion(predictTest); 26 | 27 | % Store in the 4 x 4 x 5 Matrix for all 5 runs 28 | initPredMatrix(:, :, run) = properConfusionPredict; 29 | 30 | end 31 | 32 | 33 | % Average out into 4 x 4 x 1 Matrix 34 | Result4by4Matrix = (initPredMatrix(:, :, 1) + initPredMatrix(:, :, 2) + initPredMatrix(:, :, 3) + initPredMatrix(:, :, 4) + initPredMatrix(:, :, 5))/5; 35 | stackFiveMatrixAvg = (stackFiveMatrix(:, :, 1) + stackFiveMatrix(:, :, 2) + stackFiveMatrix(:, :, 3) + stackFiveMatrix(:, :, 4) + stackFiveMatrix(:, :, 5))/5; 36 | end 37 | 38 | 39 | -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/inputHiddenOutputDelay.m: -------------------------------------------------------------------------------- 1 | % Train with 2 different input delay lines, 2 hidden layer size, and 3 2 | % different output delay lines 3 | % Under rule input delay has to be longer than output delay, total 4 | % combination : 8 5 | 6 | function inputHiddenOutputDelay(P2_xz) 7 | 8 | % Different combinations of input delay, hidden layer and output delay 9 | inputDelay = [15, 30]; 10 | hiddenLayer = [15, 30]; 11 | outputDelay = [10, 23, 28]; 12 | count = 1; 13 | 14 | % Generate 8 different nets for all possible combinations, excluding the 15 | % cases where input is shorter than output delay line 16 | 17 | % Input Delay Lines 18 | for inputIdx = (1:2) 19 | % Hidden Layer Size 20 | for hiddenIdx = (1:2) 21 | % Output Delay Lines 22 | for outputIdx = (1:3) 23 | % Only create the net when the output delay lines is shorter 24 | % than input delay lines 25 | if outputDelay(outputIdx) < inputDelay(inputIdx) 26 | % Train with 2 different input delay lines, 3 different output 27 | % delay lines and 2 different hidden layer size 28 | NARXnet = narxnet(1:inputDelay(inputIdx), 1:outputDelay(outputIdx), hiddenLayer(hiddenIdx), 'open','trainbr'); 29 | NARXnet.layers{2}.transferFcn = 'tansig'; 30 | NARXnet.divideParam.testRatio = 0; 31 | NARXnet.trainParam.epochs = 100; 32 | 33 | % Set up Training : Round 1 | Validation : Round 2 34 | trainxz = [ P2_xz(1:2, 1:300) P2_xz(3:4, 1:300) P2_xz(5:6, 1:300) P2_xz(7:8, 1:300) ]; 35 | valxz= [ P2_xz(1:2, 301:600) P2_xz(3:4, 301:600) P2_xz(5:6, 301:600) P2_xz(7:8, 301:600) ]; 36 | 37 | % Set up target values ( class : 1 | not-class : -1 ) 38 | [ stackMovementTrain, stackMovementVal ] = movementTarget('Prefold'); 39 | 40 | % To sequencial format 41 | seqTrainxz = con2seq( trainxz ); 42 | seqTrainxzT = con2seq( stackMovementTrain ); 43 | seqValxz = con2seq( valxz ); 44 | 45 | % 5 Simulation Matrices then Average 5 dimension 4 x 4 to 1 dimension 46 | [ initPredMatrix, Result4by4Matrix, stackFiveMatrixAvg ] = fiveTrainedInit(NARXnet, seqTrainxz, seqValxz, seqTrainxzT); 47 | 48 | % Initialize Empty Vectors 49 | comboMatrix = zeros(4, 4, 9); 50 | 51 | % Keep Track of depth and number of nodes 52 | %netname = strcat('InDelay', int2str(inputDelay(inputIdx)),'FeedDelay', int2str(outputDelay(outputIdx)), 'Hidden', int2str(hiddenLayer(hiddenIdx)), '_Net'); 53 | Cnetname = strcat('InDelay', int2str(inputDelay(inputIdx)),'FeedDelay', int2str(outputDelay(outputIdx)), 'Hidden', int2str(hiddenLayer(hiddenIdx)), '_CNet'); 54 | 55 | % Store Matrix 56 | comboMatrix(:, :, count) = Result4by4Matrix; 57 | 58 | % Plot + Save ROC + Confusion Matrix 59 | prefoldConfusionROC(stackMovementVal , initPredMatrix, stackFiveMatrixAvg, Cnetname, count); 60 | 61 | % Save Net 62 | save(Cnetname ,'NARXnet'); 63 | fprintf('Trained finished', '\n'); 64 | count = count + 1; 65 | end 66 | end 67 | end 68 | end 69 | 70 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/movementTarget.m: -------------------------------------------------------------------------------- 1 | % One Hot Target for all 4 Movements 2 | function[ stackMovementTrain, stackMovementTest ] = movementTarget(foldType) 3 | 4 | % Vectors for 1s ( is Class ) and -1s ( Not Class ) 5 | isClass = ones(1,300); 6 | notClass = -ones(1,300); 7 | 8 | % Target for 3 CV 9 | if strcmp(foldType, '3 Fold') == 1 10 | % Circle 11 | CTarget_Train = [isClass, notClass, notClass, notClass, isClass, notClass, notClass, notClass]; 12 | CTarget_Test = [isClass, notClass, notClass, notClass]; 13 | 14 | % Triangle 15 | TTarget_Train = [notClass, isClass, notClass, notClass, notClass, isClass, notClass, notClass]; 16 | TTarget_Test = [notClass, isClass, notClass, notClass]; 17 | 18 | % Right 19 | RTarget_Train = [notClass, notClass, isClass, notClass, notClass, notClass, isClass, notClass]; 20 | RTarget_Test = [notClass, notClass, isClass, notClass]; 21 | 22 | % Down 23 | DTarget_Train = [notClass, notClass, notClass, isClass, notClass, notClass, notClass, isClass]; 24 | DTarget_Test = [notClass, notClass, notClass, isClass]; 25 | 26 | stackMovementTrain = [CTarget_Train; TTarget_Train; RTarget_Train; DTarget_Train]; 27 | stackMovementTest = [CTarget_Test; TTarget_Test; RTarget_Test; DTarget_Test]; 28 | 29 | % Target for 6 CV 30 | elseif strcmp(foldType, '6 Fold') == 1 31 | isClass6CV = ones(1,900); 32 | notClass6CV = -ones(1,900); 33 | % Circle 34 | C = [isClass6CV, notClass6CV, notClass6CV, notClass6CV]; 35 | CTarget_Train = repmat(C, 1, 5); 36 | CTarget_Test = C; 37 | 38 | % Triangle 39 | T = [notClass6CV, isClass6CV, notClass6CV, notClass6CV]; 40 | TTarget_Train = repmat(T, 1, 5); 41 | TTarget_Test = T; 42 | 43 | % Right 44 | R = [notClass6CV, notClass6CV, isClass6CV, notClass6CV]; 45 | RTarget_Train =repmat(R, 1, 5); 46 | RTarget_Test = R; 47 | 48 | % Down 49 | D = [notClass6CV, notClass6CV, notClass6CV, isClass6CV]; 50 | DTarget_Train =repmat(D, 1, 5); 51 | DTarget_Test = D; 52 | 53 | % Stack all 4 movements of targets for Training and Testing 54 | stackMovementTrain = [CTarget_Train; TTarget_Train; RTarget_Train; DTarget_Train]; 55 | stackMovementTest = [CTarget_Test; TTarget_Test; RTarget_Test; DTarget_Test]; 56 | 57 | % Target for Prefold 58 | elseif strcmp(foldType, 'Prefold') == 1 59 | preTarget_C = [isClass, notClass, notClass, notClass]; 60 | preTarget_T = [notClass, isClass, notClass, notClass]; 61 | preTarget_R = [notClass, notClass, isClass, notClass]; 62 | preTarget_D = [notClass, notClass, notClass, isClass]; 63 | 64 | stackMovementTrain = [preTarget_C; preTarget_T; preTarget_R; preTarget_D]; 65 | stackMovementTest = [preTarget_C; preTarget_T; preTarget_R; preTarget_D]; 66 | 67 | end 68 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/preFold.m: -------------------------------------------------------------------------------- 1 | % --------------------------------------------------------------------------------------- 2 | % Mini Project #4 - Gesture Recognition : NARX Net 3 | % Pre-Fold 4 | % --------------------------------------------------------------------------------------- 5 | 6 | % Clear memory data 7 | clc 8 | clear 9 | close all 10 | 11 | % Load in Person 2's x, z Data ( Including all 3 rounds ) 12 | [ P2_xz ] = EncapsulatePerson('GestureData\Person2.xls'); 13 | 14 | % 2 kinds of Input Delay Lines x 2 kinds of Hidden Layer Size x 3 kinds of Output Delay Lines 15 | % Output Delay has to be smaller than Input Delay 16 | % Do 5 Simulation for each combination & encapsulate to 1 net 17 | % Save Net & plot ROC + Confusion Matrix 18 | inputHiddenOutputDelay(P2_xz); -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/prefoldConfusionROC.m: -------------------------------------------------------------------------------- 1 | % Plot and save ROC and Confusion Matrix for Pre-fold 2 | function prefoldConfusionROC(stackMovementVal , InitResultMatrix, stackFiveMatrixAvg, netname, count) 3 | 4 | % ROC Curve + Save 5 | 6 | ezroc3(InitResultMatrix); 7 | saveas(figure(count), [pwd '\Prefold\ROC\', netname , '_ROC.fig']); 8 | 9 | % Plot Confusion Matrix 10 | % Class 1/--1 to 1/0 11 | InitMovemntValModify = (stackMovementVal + 1 )/2; 12 | comboPredModify = (stackFiveMatrixAvg + 1)/2; 13 | 14 | % Plot Confusion Matrix 15 | confusionPre = plotconfusion(InitMovemntValModify, comboPredModify); 16 | 17 | % Save the matrix 18 | saveas(confusionPre, [pwd '\Prefold\Confusion\', netname , '_Confusion.jpg']) 19 | saveas(confusionPre, [pwd '\Prefold\Confusion\', netname , '_Confusion.fig']) 20 | 21 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Pre-Fold/shrinkConfusion.m: -------------------------------------------------------------------------------- 1 | % Downsize to 4x4 for Confusion Matrix 2 | 3 | function[confusionResize] = shrinkConfusion(predictTest) 4 | % Confusion Matrix with Correct Classifications 5 | originalConfusion = [ [1, -1, -1, -1]; [-1, 1, -1, -1]; [-1, -1, 1, -1]; [-1, -1, -1, 1];]; 6 | 7 | % Downsize 300 predicted values to 1 for all 4 movements 8 | circle_downsize = [ mean(predictTest(1, 1:300)), mean(predictTest(1, 301:600)), mean(predictTest(1, 601:900)), mean(predictTest(1, 901:1200)) ]; 9 | triangle_downsize = [ mean(predictTest(2, 1:300)), mean(predictTest(2, 301:600)), mean(predictTest(2, 601:900)), mean(predictTest(2, 901:1200)) ]; 10 | right_downsize = [ mean(predictTest(3, 1:300)), mean(predictTest(3, 301:600)), mean(predictTest(3, 601:900)), mean(predictTest(3, 901:1200)) ]; 11 | down_downsize = [ mean(predictTest(4, 1:300)), mean(predictTest(4, 301:600)), mean(predictTest(4, 601:900)), mean(predictTest(4, 901:1200)) ]; 12 | 13 | % A 4x4 Confusion Matrix with predicted values! 14 | confusionResize = [ circle_downsize; triangle_downsize; right_downsize; down_downsize ]; 15 | 16 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Independent - 6CV/SubjectIndependent_6CV.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------------------------------------- 2 | % Mini Project #3 - Gesture Recognition : NARX Nerual Network 3 | % Subject Independent - 6 Fold CV 4 | % ------------------------------------------------------------------------------------------------------- 5 | 6 | % Clear memory data 7 | clc 8 | clear 9 | close all 10 | 11 | % Load 3 Rounds of x, z of each movement for each Person 12 | [ P1_xz ] = serializePersonXZ('GestureData\Person1.xls'); 13 | [ P2_xz ] = serializePersonXZ('GestureData\Person2.xls'); 14 | [ P3_xz] = serializePersonXZ('GestureData\Person3.xls'); 15 | [ P4_xz ] = serializePersonXZ('GestureData\Person4.xls'); 16 | [ P5_xz ] = serializePersonXZ('GestureData\Person5.xls'); 17 | [ P6_xz ] = serializePersonXZ('GestureData\Person6.xls'); 18 | 19 | % Load Time Delay Nerual Network 20 | load('InDelay30FeedDelay28Hidden15_CNet.mat'); 21 | 22 | % 6 Fold Cross Validation 23 | [ trained6CVnet, predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest] ... 24 | = sixFoldCV(NARXnet, P1_xz, P2_xz, P3_xz,P4_xz,P5_xz,P6_xz); 25 | 26 | % Plot ROC for CV1, CV2, CV3 and overall CV + save all graphs 27 | confusionROCSixF(predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest); -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Independent - 6CV/confusionROCSixF.m: -------------------------------------------------------------------------------- 1 | % Confusion Matrix for every folds in a single graph -- 6CV 2 | function confusionROCSixF(predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest) 3 | 4 | % Resize the matrix to 4x4 for the prediction output by calling function 5 | % 'shrinkConfusion' 6 | predictT1_4by4 = slashConfusion(predictTestT1); 7 | predictT2_4by4 = slashConfusion(predictTestT2); 8 | predictT3_4by4 = slashConfusion(predictTestT3); 9 | predictT4_4by4 = slashConfusion(predictTestT4); 10 | predictT5_4by4 = slashConfusion(predictTestT5); 11 | predictT6_4by4 = slashConfusion(predictTestT6); 12 | overall = slashConfusion(cat(6, predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6)); 13 | 14 | % Plot ROC Curves for all three rounds individually and an overall ROC + 15 | % Save to folders! 16 | ezroc3( predictT1_4by4 ); 17 | saveas(figure(1), [pwd '\6CV\Fold1ROC.fig']); 18 | ezroc3( predictT2_4by4 ); 19 | saveas(figure(2), [pwd '\6CV\Fold2ROC.fig']); 20 | ezroc3( predictT3_4by4 ); 21 | saveas(figure(3), [pwd '\6CV\Fold3ROC.fig']); 22 | ezroc3( predictT4_4by4 ); 23 | saveas(figure(4), [pwd '\6CV\Fold4ROC.fig']); 24 | ezroc3( predictT5_4by4 ); 25 | saveas(figure(5), [pwd '\6CV\Fold5ROC.fig']); 26 | ezroc3( predictT6_4by4 ); 27 | saveas(figure(6), [pwd '\6CV\Fold6ROC.fig']); 28 | ezroc3( overall ); 29 | saveas(figure(7), [pwd '\6CV\OverallROC.fig']); 30 | 31 | % Plot Confusion Matrix 32 | % Class 1/--1 to 1/0 33 | InitMovemntTestModify = (stackMovementTest + 1 )/2; 34 | lastRPredTestModify = (predictTestT6 + 1)/2; 35 | 36 | % Plot Confusion Matrix 37 | confusionG = plotconfusion(InitMovemntTestModify, lastRPredTestModify); 38 | 39 | % Save the matrix 40 | saveas(confusionG, [pwd '\6CV\Confusion.fig']) 41 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Independent - 6CV/serializePersonXZ.m: -------------------------------------------------------------------------------- 1 | % Serialize all three rounds of data into 2 rows : x and z for each person 2 | 3 | function [P_xz] = serializePersonXZ(fileName) 4 | 5 | % Circle 6 | [R1C_x, R1C_z, R2C_x, R2C_z, R3C_x, R3C_z] = combExcelData(fileName, 'Circle'); 7 | circle_xz = [R1C_x, R1C_z; R2C_x, R2C_z; R3C_x, R3C_z]'; 8 | 9 | % Triangle 10 | [R1T_x, R1T_z, R2T_x, R2T_z, R3T_x, R3T_z] = combExcelData(fileName, 'Triangle'); 11 | triangle_xz = [R1T_x, R1T_z; R2T_x, R2T_z; R3T_x, R3T_z]'; 12 | 13 | % Right 14 | [R1R_x, R1R_z, R2R_x, R2R_z, R3R_x, R3R_z] = combExcelData(fileName, 'Right'); 15 | right_xz = [R1R_x, R1R_z; R2R_x, R2R_z; R3R_x, R3R_z]'; 16 | 17 | % Down 18 | [R1D_x, R1D_z, R2D_x, R2D_z, R3D_x, R3D_z] = combExcelData(fileName, 'Down'); 19 | down_xz = [R1D_x, R1D_z; R2D_x, R2D_z; R3D_x, R3D_z]'; 20 | 21 | % Stack up 4 movements for Person's x,z 22 | P_xz = [circle_xz, triangle_xz, right_xz, down_xz]; 23 | 24 | end 25 | 26 | -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Independent - 6CV/slashConfusion.m: -------------------------------------------------------------------------------- 1 | % Downsize to 4x4 for Confusion Matrix for 6CV 2 | 3 | function[confusionResize] = slashConfusion(predictTest) 4 | % Confusion Matrix with Correct Classifications 5 | originalConfusion = [ [1, -1, -1, -1]; [-1, 1, -1, -1]; [-1, -1, 1, -1]; [-1, -1, -1, 1];]; 6 | 7 | % Downsize 300 predicted values to 1 for all 4 movements 8 | circle_downsize = [ mean(predictTest(1, 1:300)), mean(predictTest(1, 301:600)), mean(predictTest(1, 601:900)), mean(predictTest(1, 901:1200)), ... 9 | mean(predictTest(1, 1201:1500)), mean(predictTest(1, 1501:1800)), mean(predictTest(1, 1801:2100)), mean(predictTest(1, 2101:2400)), ... 10 | mean(predictTest(1, 2401:2700)), mean(predictTest(1, 2701:3000)), mean(predictTest(1, 3001:3300)), mean(predictTest(1, 3301:3600)) ]; 11 | 12 | triangle_downsize = [ mean(predictTest(2, 1:300)), mean(predictTest(2, 301:600)), mean(predictTest(2, 601:900)), mean(predictTest(2, 901:1200)), ... 13 | mean(predictTest(2, 1201:1500)), mean(predictTest(2, 1501:1800)), mean(predictTest(2, 1801:2100)), mean(predictTest(2, 2101:2400)), ... 14 | mean(predictTest(2, 2401:2700)), mean(predictTest(2, 2701:3000)), mean(predictTest(2, 3001:3300)), mean(predictTest(2, 3301:3600)) ]; 15 | 16 | right_downsize = [ mean(predictTest(3, 1:300)), mean(predictTest(3, 301:600)), mean(predictTest(3, 601:900)), mean(predictTest(3, 901:1200)), ... 17 | mean(predictTest(3, 1201:1500)), mean(predictTest(3, 1501:1800)), mean(predictTest(3, 1801:2100)), mean(predictTest(3, 2101:2400)), ... 18 | mean(predictTest(3, 2401:2700)), mean(predictTest(3, 2701:3000)), mean(predictTest(3, 3001:3300)), mean(predictTest(3, 3301:3600)) ]; 19 | 20 | down_downsize = [ mean(predictTest(4, 1:300)), mean(predictTest(4, 301:600)), mean(predictTest(4, 601:900)), mean(predictTest(4, 901:1200)), ... 21 | mean(predictTest(4, 1201:1500)), mean(predictTest(4, 1501:1800)), mean(predictTest(4, 1801:2100)), mean(predictTest(4, 2101:2400)), ... 22 | mean(predictTest(4, 2401:2700)), mean(predictTest(4, 2701:3000)), mean(predictTest(4, 3001:3300)), mean(predictTest(4, 3301:3600)) ]; 23 | 24 | % Downsize again to go from 4x12 to 4x4 25 | circle_resize2 = [mean([circle_downsize(1,1), circle_downsize(1,5), circle_downsize(1,9)]), mean([circle_downsize(1,2), circle_downsize(1,6), circle_downsize(1,10)]), ... 26 | mean([circle_downsize(1,3), circle_downsize(1,7), circle_downsize(1,11)]), mean([circle_downsize(1,4), circle_downsize(1,8), circle_downsize(1,12)])]; 27 | 28 | triangle_resize2 = [mean([triangle_downsize(1,1), triangle_downsize(1,5), triangle_downsize(1,9)]), mean([triangle_downsize(1,2), triangle_downsize(1,6), triangle_downsize(1,10)]), ... 29 | mean([triangle_downsize(1,3), triangle_downsize(1,7), triangle_downsize(1,11)]), mean([triangle_downsize(1,4), triangle_downsize(1,8), triangle_downsize(1,12)])]; 30 | 31 | right_resize2 = [mean([right_downsize(1,1), right_downsize(1,5), right_downsize(1,9)]), mean([right_downsize(1,2), right_downsize(1,6), right_downsize(1,10)]), ... 32 | mean([right_downsize(1,3), right_downsize(1,7), right_downsize(1,11)]), mean([right_downsize(1,4), right_downsize(1,8), right_downsize(1,12)])]; 33 | 34 | down_resize2 = [mean([down_downsize(1,1), down_downsize(1,5), down_downsize(1,9)]), mean([down_downsize(1,2), down_downsize(1,6), down_downsize(1,10)]), ... 35 | mean([down_downsize(1,3), down_downsize(1,7), down_downsize(1,11)]), mean([down_downsize(1,4), down_downsize(1,8), down_downsize(1,12)])]; 36 | 37 | % A 4x4 Confusion Matrix with predicted values! 38 | confusionResize = [ circle_resize2; triangle_resize2; right_resize2; down_resize2 ]; 39 | 40 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Independent - 6CV/stackFivePeople.m: -------------------------------------------------------------------------------- 1 | % Stack 2 designated rounds for Training and set aside 1 person for Testing 2 | 3 | function[P1_data, P2_data,P3_data, P4_data, P5_data, P6_data] = stackFivePeople( P1_xz, P2_xz, P3_xz,P4_xz,P5_xz,P6_xz) 4 | 5 | % Person 1 6 | P1_data = [P1_xz(1:2, :), P1_xz(3:4, :), P1_xz(5:6, :)]; 7 | 8 | % Person 2 9 | P2_data = [P2_xz(1:2, :), P2_xz(3:4, :), P2_xz(5:6, :)]; 10 | 11 | % Person 3 12 | P3_data = [P3_xz(1:2, :), P3_xz(3:4, :), P3_xz(5:6, :)]; 13 | 14 | % Person 4 15 | P4_data = [P4_xz(1:2, :), P4_xz(3:4, :), P4_xz(5:6, :)]; 16 | 17 | % Person 5 18 | P5_data = [P5_xz(1:2, :), P5_xz(3:4, :), P5_xz(5:6, :)]; 19 | 20 | % Person 6 21 | P6_data= [P6_xz(1:2, :), P6_xz(3:4, :), P6_xz(5:6, :)]; 22 | 23 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/Encapsulate.m: -------------------------------------------------------------------------------- 1 | % Encapsulate all data into 2 matrices : x and z for each movement 2 | 3 | function [circle_xz, triangle_xz, right_xz, down_xz] = Encapsulate(fileName) 4 | 5 | % Circle 6 | [R1C_x, R1C_z, R2C_x, R2C_z, R3C_x, R3C_z] = combExcelData(fileName, 'Circle'); 7 | circle_xz = [R1C_x, R1C_z; R2C_x, R2C_z; R3C_x, R3C_z]'; 8 | 9 | % Triangle 10 | [R1T_x, R1T_z, R2T_x, R2T_z, R3T_x, R3T_z] = combExcelData(fileName, 'Triangle'); 11 | triangle_xz = [R1T_x, R1T_z; R2T_x, R2T_z; R3T_x, R3T_z]'; 12 | 13 | % Right 14 | [R1R_x, R1R_z, R2R_x, R2R_z, R3R_x, R3R_z] = combExcelData(fileName, 'Right'); 15 | right_xz = [R1R_x, R1R_z; R2R_x, R2R_z; R3R_x, R3R_z]'; 16 | 17 | % Down 18 | [R1D_x, R1D_z, R2D_x, R2D_z, R3D_x, R3D_z] = combExcelData(fileName, 'Down'); 19 | down_xz = [R1D_x, R1D_z; R2D_x, R2D_z; R3D_x, R3D_z]'; 20 | 21 | 22 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/SubjectSpecific_3CV.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------- 2 | % Mini Project #4 - Gesture Recognition : NARX Nerual Network 3 | % Subject Specific - 3 Fold CV 4 | % ------------------------------------------------------------------- 5 | 6 | % Clear memory data 7 | clc 8 | clear 9 | close all 10 | 11 | % Load 3 Rounds of x, z of each movement for each Person 12 | [ P1_C, P1_T, P1_R, P1_D ] = Encapsulate('GestureData\Person1.xls'); 13 | [ P2_C, P2_T, P2_R, P2_D ] = Encapsulate('GestureData\Person2.xls'); 14 | [ P3_C, P3_T, P3_R, P3_D ] = Encapsulate('GestureData\Person3.xls'); 15 | [ P4_C, P4_T, P4_R, P4_D ] = Encapsulate('GestureData\Person4.xls'); 16 | [ P5_C, P5_T, P5_R, P5_D ] = Encapsulate('GestureData\Person5.xls'); 17 | [ P6_C, P6_T, P6_R, P6_D ] = Encapsulate('GestureData\Person6.xls'); 18 | 19 | % Load Time Delay Nerual Network 20 | load('InDelay30FeedDelay28Hidden15_CNet.mat'); 21 | 22 | % 3 Fold Cross Validation 23 | [trained3CVnet_P1, predictTestT1_P1, predictTestT2_P1, predictTest_P1, stackMovementTest_P1] ... 24 | = threeFoldCV(NARXnet, P1_C, P1_T, P1_R, P1_D); 25 | [trained3CVnet_P2, predictTestT1_P2, predictTestT2_P2, predictTest_P2, stackMovementTest_P2] ... 26 | = threeFoldCV(NARXnet, P2_C, P2_T, P2_R, P2_D); 27 | [trained3CVnet_P3, predictTestT1_P3, predictTestT2_P3, predictTest_P3, stackMovementTest_P3] ... 28 | = threeFoldCV(NARXnet, P3_C, P3_T, P3_R, P3_D); 29 | [trained3CVnet_P4, predictTestT1_P4, predictTestT2_P4, predictTest_P4, stackMovementTest_P4] ... 30 | = threeFoldCV(NARXnet, P4_C, P4_T, P4_R, P4_D); 31 | [trained3CVnet_P5, predictTestT1_P5, predictTestT2_P5, predictTest_P5, stackMovementTest_P5] ... 32 | = threeFoldCV(NARXnet, P5_C, P5_T, P5_R, P5_D); 33 | [trained3CVnet_P6, predictTestT1_P6, predictTestT2_P6, predictTest_P6, stackMovementTest_P6] ... 34 | = threeFoldCV(NARXnet, P6_C, P6_T, P6_R, P6_D); 35 | 36 | 37 | % Plot ROC for CV1, CV2, CV3 and overall CV + save all graphs 38 | confusionROC('Person1', predictTestT1_P1, predictTestT2_P1, predictTest_P1, stackMovementTest_P1); 39 | confusionROC('Person2', predictTestT1_P2, predictTestT2_P2, predictTest_P2, stackMovementTest_P2); 40 | confusionROC('Person3', predictTestT1_P3, predictTestT2_P3, predictTest_P3, stackMovementTest_P3); 41 | confusionROC('Person4', predictTestT1_P4, predictTestT2_P4, predictTest_P4, stackMovementTest_P4); 42 | confusionROC('Person5', predictTestT1_P5, predictTestT2_P5, predictTest_P5, stackMovementTest_P5); 43 | confusionROC('Person6', predictTestT1_P6, predictTestT2_P6, predictTest_P6, stackMovementTest_P6); 44 | -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/combExcelData.m: -------------------------------------------------------------------------------- 1 | % Parse x, z for each round for each movements 2 | 3 | function [R1_x, R1_z, R2_x, R2_z, R3_x, R3_z] = combExcelData(fileName, sheetName) 4 | 5 | % Load in all data from Person Excel File by Different Sheet Name 6 | personSheetsData = xlsread(fileName, sheetName); 7 | 8 | % Slice Round 1 x and z Columns 9 | R1_x = personSheetsData(:,1); 10 | R1_z = personSheetsData(:,3); 11 | 12 | % Slice Round 2 x and z Columns 13 | R2_x = personSheetsData(:,5); 14 | R2_z = personSheetsData(:,7); 15 | 16 | % Slice Round 3 x and z Columns 17 | R3_x = personSheetsData(:,9); 18 | R3_z = personSheetsData(:,11); 19 | 20 | % Concatenate all 3 Rounds of x and z into one column for each 21 | %AllRound_x = cat(1, Round1_x, Round2_x, Round3_x); 22 | %AllRound_z = cat(1, Round1_z, Round2_z, Round3_z); 23 | 24 | end 25 | 26 | -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/confusionROC.m: -------------------------------------------------------------------------------- 1 | % Confusion Matrix for each fold in a single graph 2 | function confusionROC(person, predictTestT1, predictTestT2, predictTestT3, stackMovementTest) 3 | 4 | % Resize the matrix to 4x4 for the prediction output by calling function 5 | % 'shrinkConfusion' 6 | predictT1_4by4 = shrinkConfusion(predictTestT1); 7 | predictT2_4by4 = shrinkConfusion(predictTestT2); 8 | predictT3_4by4 = shrinkConfusion(predictTestT3); 9 | overall = shrinkConfusion(cat(3, predictTestT1, predictTestT2, predictTestT3)); 10 | 11 | % Plot ROC Curves for all three rounds individually and an overall ROC + 12 | % Save to folders! 13 | ezroc3( predictT1_4by4); 14 | saveas(figure(1), [pwd '\ROC\3CV\', person , '\', person, '_Fold1ROC.fig']); 15 | ezroc3( predictT2_4by4 ); 16 | saveas(figure(2), [pwd '\ROC\3CV\', person , '\', person, '_Fold2ROC.fig']); 17 | ezroc3( predictT3_4by4 ); 18 | saveas(figure(3), [pwd '\ROC\3CV\', person , '\', person, '_Fold3ROC.fig']); 19 | ezroc3( overall ); 20 | saveas(figure(4), [pwd '\ROC\3CV\', person , '\', person, '_OverallROC.fig']); 21 | 22 | % Plot Confusion Matrix 23 | % Class 1/--1 to 1/0 24 | InitMovemntTestModify = (stackMovementTest + 1 )/2; 25 | lastRPredTestModify = (predictTestT3 + 1)/2; 26 | 27 | % Plot Confusion Matrix 28 | confusionG = plotconfusion(InitMovemntTestModify, lastRPredTestModify, person); 29 | % Save the matrix 30 | saveas(confusionG, [pwd '\ROC\3CV\', person , '\', person, '_Confusion.fig']) 31 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/movementTarget.m: -------------------------------------------------------------------------------- 1 | % One Hot Target for all 4 Movements 2 | function[ stackMovementTrain, stackMovementTest ] = movementTarget(foldType) 3 | 4 | % Vectors for 1s ( is Class ) and -1s ( Not Class ) 5 | isClass = ones(1,300); 6 | notClass = -ones(1,300); 7 | 8 | % Target for 3 CV 9 | if strcmp(foldType, '3 Fold') == 1 10 | % Circle 11 | CTarget_Train = [isClass, notClass, notClass, notClass, isClass, notClass, notClass, notClass]; 12 | CTarget_Test = [isClass, notClass, notClass, notClass]; 13 | 14 | % Triangle 15 | TTarget_Train = [notClass, isClass, notClass, notClass, notClass, isClass, notClass, notClass]; 16 | TTarget_Test = [notClass, isClass, notClass, notClass]; 17 | 18 | % Right 19 | RTarget_Train = [notClass, notClass, isClass, notClass, notClass, notClass, isClass, notClass]; 20 | RTarget_Test = [notClass, notClass, isClass, notClass]; 21 | 22 | % Down 23 | DTarget_Train = [notClass, notClass, notClass, isClass, notClass, notClass, notClass, isClass]; 24 | DTarget_Test = [notClass, notClass, notClass, isClass]; 25 | 26 | stackMovementTrain = [CTarget_Train; TTarget_Train; RTarget_Train; DTarget_Train]; 27 | stackMovementTest = [CTarget_Test; TTarget_Test; RTarget_Test; DTarget_Test]; 28 | 29 | % Target for 6 CV 30 | elseif strcmp(foldType, '6 Fold') == 1 31 | isClass6CV = ones(1,900); 32 | notClass6CV = -ones(1,900); 33 | % Circle 34 | C = [isClass6CV, notClass6CV, notClass6CV, notClass6CV]; 35 | CTarget_Train = repmat(C, 1, 5); 36 | CTarget_Test = C; 37 | 38 | % Triangle 39 | T = [notClass6CV, isClass6CV, notClass6CV, notClass6CV]; 40 | TTarget_Train = repmat(T, 1, 5); 41 | TTarget_Test = T; 42 | 43 | % Right 44 | R = [notClass6CV, notClass6CV, isClass6CV, notClass6CV]; 45 | RTarget_Train =repmat(R, 1, 5); 46 | RTarget_Test = R; 47 | 48 | % Down 49 | D = [notClass6CV, notClass6CV, notClass6CV, isClass6CV]; 50 | DTarget_Train =repmat(D, 1, 5); 51 | DTarget_Test = D; 52 | 53 | % Stack all 4 movements of targets for Training and Testing 54 | stackMovementTrain = [CTarget_Train; TTarget_Train; RTarget_Train; DTarget_Train]; 55 | stackMovementTest = [CTarget_Test; TTarget_Test; RTarget_Test; DTarget_Test]; 56 | 57 | % Target for Prefold 58 | elseif strcmp(foldType, 'Prefold') == 1 59 | preTarget_C = [isClass, notClass, notClass, notClass]; 60 | preTarget_T = [notClass, isClass, notClass, notClass]; 61 | preTarget_R = [notClass, notClass, isClass, notClass]; 62 | preTarget_D = [notClass, notClass, notClass, isClass]; 63 | 64 | stackMovementTrain = [preTarget_C; preTarget_T; preTarget_R; preTarget_D]; 65 | stackMovementTest = [preTarget_C; preTarget_T; preTarget_R; preTarget_D]; 66 | 67 | end 68 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/roundData.m: -------------------------------------------------------------------------------- 1 | % Stack 2 designated rounds for Training and set aside 1 round for Testing per movement 2 | 3 | function[R1Train, R1Test, R2Train, R2Test, R3Train, R3Test] = roundData(P_movement) 4 | 5 | % Fold #1 -- Training : Round 1 & 2 + Testing : Round 3 6 | R1Train = [P_movement(1, 1:600); P_movement(2, 1:600) ]; 7 | R1Test = [P_movement(1, 601:900) ; P_movement(2, 601:900)]; 8 | 9 | % Fold #2 -- Training : Round 2 & 3 + Testing : Round 1 10 | R2Train = [P_movement(1, 301:900) ; P_movement(2, 301:900)]; 11 | R2Test = [P_movement(1, 1:300) ; P_movement(2, 1:300)]; 12 | 13 | % Fold #3 -- Training : Round 1 & 3 + Testing : Round 2 14 | R3Train = [P_movement(1, 1:300), P_movement(1, 601:900); P_movement(2, 1:300) P_movement(2, 601:900)]; 15 | R3Test = [P_movement(1, 301:600) ; P_movement(2, 301:600)]; 16 | 17 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/shrinkConfusion.m: -------------------------------------------------------------------------------- 1 | % Downsize to 4x4 for Confusion Matrix 2 | 3 | function[confusionResize] = shrinkConfusion(predictTest) 4 | % Confusion Matrix with Correct Classifications 5 | originalConfusion = [ [1, -1, -1, -1]; [-1, 1, -1, -1]; [-1, -1, 1, -1]; [-1, -1, -1, 1];]; 6 | 7 | % Downsize 300 predicted values to 1 for all 4 movements 8 | circle_downsize = [ mean(predictTest(1, 1:300)), mean(predictTest(1, 301:600)), mean(predictTest(1, 601:900)), mean(predictTest(1, 901:1200)) ]; 9 | triangle_downsize = [ mean(predictTest(2, 1:300)), mean(predictTest(2, 301:600)), mean(predictTest(2, 601:900)), mean(predictTest(2, 901:1200)) ]; 10 | right_downsize = [ mean(predictTest(3, 1:300)), mean(predictTest(3, 301:600)), mean(predictTest(3, 601:900)), mean(predictTest(3, 901:1200)) ]; 11 | down_downsize = [ mean(predictTest(4, 1:300)), mean(predictTest(4, 301:600)), mean(predictTest(4, 601:900)), mean(predictTest(4, 901:1200)) ]; 12 | 13 | % A 4x4 Confusion Matrix with predicted values! 14 | confusionResize = [ circle_downsize; triangle_downsize; right_downsize; down_downsize ]; 15 | 16 | end -------------------------------------------------------------------------------- /Gesture Recognition-NARX/Subject Specific - 3CV/threeFoldCV.m: -------------------------------------------------------------------------------- 1 | % 3 Fold Cross Validation 2 | function[ trained3CVnet, predictTestT1, predictTestT2, predictTestT3, stackMovementTest] = threeFoldCV(net, P_C, P_T, P_R, P_D) 3 | % ------------------------------------------------------------------------------------------------------------------------------- 4 | % Create a single matrix to stack up 4 movements per Person 5 | [R1Train_C, R1Test_C, R2Train_C, R2Test_C, R3Train_C, R3Test_C] = roundData(P_C); 6 | [M1Train_T, M1Test_T, M2Train_T, M2Test_T, M3Train_T, M3Test_T] = roundData(P_T); 7 | [M1Train_R, M1Test_R, M2Train_R, M2Test_R, M3Train_R, M3Test_R] = roundData(P_R); 8 | [M1Train_D, M1Test_D, M2Train_D, M2Test_D, M3Train_D, M3Test_D] = roundData(P_D); 9 | 10 | % Fold #1 Train + Test Data 11 | fold1Train = [ R1Train_C M1Train_T M1Train_R M1Train_D]; 12 | fold1Test = [ R1Test_C M1Test_T M1Test_R M1Test_D]; 13 | 14 | % To sequencial format 15 | f1Train = con2seq(fold1Train); 16 | f1Test = con2seq(fold1Test); 17 | 18 | % Fold #2 Train + Test Data 19 | fold2Train = [ R2Train_C M2Train_T M2Train_R M2Train_D]; 20 | fold2Test = [ R2Test_C M2Test_T M2Test_R M2Test_D]; 21 | 22 | % To sequencial format 23 | f2Train = con2seq(fold2Train); 24 | f2Test = con2seq(fold2Test); 25 | 26 | % Fold #3 Train + Test Data 27 | fold3Train = [ R3Train_C M3Train_T M3Train_R M3Train_D]; 28 | fold3Test = [ R3Test_C M3Test_T M3Test_R M3Test_D]; 29 | 30 | % To sequencial format 31 | f3Train = con2seq(fold3Train); 32 | f3Test = con2seq(fold3Test); 33 | 34 | % Target 35 | [ stackMovementTrain, stackMovementTest ] = movementTarget('3 Fold'); 36 | 37 | % To sequencial format 38 | Mtrain = con2seq(stackMovementTrain); 39 | Mtest = con2seq(stackMovementTest); 40 | % ------------------------------------------------------------------------------------------------------------------------------- 41 | % TDNN prepare nets and train with 3 folds 42 | % >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fold #1 43 | % Train 1 44 | [X1s, X1i, A1i, T1s] = preparets(net, f1Train, {}, Mtrain); 45 | f1net = train(net, X1s, T1s, X1i, A1i); 46 | 47 | % Test 1 in closeloop 48 | netc1 = closeloop(f1net); 49 | predictTestT1_pack = netc1(f1Test); 50 | predictTestT1 = cell2mat(predictTestT1_pack); 51 | 52 | % >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fold #2 53 | % Train 2 54 | [X2s, X2i, A2i, T2s] = preparets(f1net, f2Train, {}, Mtrain); 55 | f2net = train(f1net, X2s, T2s, X2i, A2i); 56 | 57 | % Test 2 in closeloop 58 | netc2 = closeloop(f2net); 59 | predictTestT2_pack= netc2(f2Test); 60 | predictTestT2 = cell2mat(predictTestT2_pack); 61 | 62 | % >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fold #3 63 | % Train 3 64 | [X3s, X3i, A3i, T3s] = preparets(f2net, f3Train, {}, Mtrain); 65 | trained3CVnet= train(f2net, X3s, T3s, X3i, A3i); 66 | 67 | 68 | % Test 3 in closeloop 69 | netc3 = closeloop(trained3CVnet); 70 | predictTestT3_pack = netc3(f3Test); 71 | predictTestT3 = cell2mat(predictTestT3_pack); 72 | 73 | 74 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Prefold/EncapsulatePerson.m: -------------------------------------------------------------------------------- 1 | % Encapsulate all data into 2 matrices : x and z per Person 2 | 3 | function [P_xz] = EncapsulatePerson(fileName) 4 | 5 | % Circle 6 | [R1C_x, R1C_z, R2C_x, R2C_z, R3C_x, R3C_z] = combExcelData(fileName, 'Circle'); 7 | circle_xz = [R1C_x, R1C_z; R2C_x, R2C_z; R3C_x, R3C_z]'; 8 | 9 | % Triangle 10 | [R1T_x, R1T_z, R2T_x, R2T_z, R3T_x, R3T_z] = combExcelData(fileName, 'Triangle'); 11 | triangle_xz = [R1T_x, R1T_z; R2T_x, R2T_z; R3T_x, R3T_z]'; 12 | 13 | % Right 14 | [R1R_x, R1R_z, R2R_x, R2R_z, R3R_x, R3R_z] = combExcelData(fileName, 'Right'); 15 | right_xz = [R1R_x, R1R_z; R2R_x, R2R_z; R3R_x, R3R_z]'; 16 | 17 | % Down 18 | [R1D_x, R1D_z, R2D_x, R2D_z, R3D_x, R3D_z] = combExcelData(fileName, 'Down'); 19 | down_xz = [R1D_x, R1D_z; R2D_x, R2D_z; R3D_x, R3D_z]'; 20 | 21 | % Stack up 4 movements for Person's x,z 22 | P_xz = [circle_xz; triangle_xz; right_xz; down_xz]; 23 | 24 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Prefold/depthsNodesDecision.m: -------------------------------------------------------------------------------- 1 | % Train with 3 different depths and 3 different number of nodes = 9 combo 2 | function depthsNodesDecision(P2_xz) 3 | 4 | % Different combinations of depths and number of nodes 5 | depthInput = [15, 30, 60]; 6 | numNodes = [5, 15, 30]; 7 | count = 1; 8 | 9 | % Generate 9 different nets for all possible combinations 10 | for depthIdx = (1:3) 11 | for nodeIdx = (1:3) 12 | % Train with 3 different depths and 3 different number of nodes 13 | TDNNnet = timedelaynet(1:depthInput(depthIdx), numNodes(nodeIdx), 'trainbr'); 14 | TDNNnet.layers{2}.transferFcn = 'tansig'; 15 | TDNNnet.divideParam.testRatio = 0; 16 | TDNNnet.trainParam.epochs = 100; 17 | 18 | % Set up Training : Round 1 | Validation : Round 2 19 | trainxz = [ P2_xz(1:2, 1:300) P2_xz(3:4, 1:300) P2_xz(5:6, 1:300) P2_xz(7:8, 1:300) ]; 20 | valxz= [ P2_xz(1:2, 301:600) P2_xz(3:4, 301:600) P2_xz(5:6, 301:600) P2_xz(7:8, 301:600) ]; 21 | 22 | % Set up target values ( class : 1 | not-class : -1 ) 23 | [ stackMovementTrain, stackMovementVal ] = movementTarget('Prefold'); 24 | 25 | % To sequencial format 26 | seqTrainxz = con2seq( trainxz ); 27 | seqTrainxzT = con2seq( stackMovementTrain ); 28 | seqValxz = con2seq( valxz ); 29 | 30 | % 5 Simulation Matrices then Average 5 dimension 4 x 4 to 1 dimension 31 | [ initPredMatrix, Result4by4Matrix, stackFiveMatrixAvg ] = fiveTrainedInit(TDNNnet, seqTrainxz, seqValxz, seqTrainxzT); 32 | 33 | % Save 9 Different TDNN Nets 34 | %count = 1; 35 | 36 | % Initialize Empty Vectors 37 | comboMatrix = zeros(4, 4, 9); 38 | 39 | % Keep Track of depth and number of nodes 40 | netname = strcat('Depth', int2str(depthInput(depthIdx)), 'NodeNum', int2str(numNodes(nodeIdx)), '_Net'); 41 | 42 | % Store Matrix 43 | comboMatrix(:, :, count) = Result4by4Matrix; 44 | 45 | % Plot + Save ROC + Confusion Matrix 46 | prefoldConfusionROC(stackMovementVal , initPredMatrix, stackFiveMatrixAvg, netname, count); 47 | 48 | % Save Net 49 | save(netname ,'TDNNnet'); 50 | count = count + 1; 51 | 52 | end 53 | end 54 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Prefold/fiveTrainedInit.m: -------------------------------------------------------------------------------- 1 | % Train 5 times for the Initial Net 2 | function[ initPredMatrix, Result4by4Matrix, stackFiveMatrixAvg ] = fiveTrainedInit(net, seqTrainxz, seqValxz, seqTrainxzT) 3 | 4 | initPredMatrix = zeros(4, 4, 5); 5 | stackFiveMatrix = zeros(4,1200, 5); 6 | 7 | for run = (1:5) 8 | predictTest = 0; 9 | 10 | % Train 11 | [Xs, Xi, Ai, Ts] = preparets(net, seqTrainxz, seqTrainxzT); 12 | trainedNet = train(net, Xs, Ts, Xi, Ai); 13 | 14 | % Validation 15 | predictTest_pack = trainedNet(seqValxz); 16 | predictTest = cell2mat(predictTest_pack); 17 | 18 | % Stack up all prediction data into 4 rows 19 | stackFiveMatrix(:, :, run) = predictTest; 20 | 21 | % Unpack x,z into 4 Movements 22 | properConfusionPredict =shrinkConfusion(predictTest); 23 | 24 | % Store in the 4 x 4 x 5 Matrix for all 5 runs 25 | initPredMatrix(:, :, run) = properConfusionPredict; 26 | 27 | end 28 | 29 | 30 | % Average out into 4 x 4 x 1 Matrix 31 | Result4by4Matrix = (initPredMatrix(:, :, 1) + initPredMatrix(:, :, 2) + initPredMatrix(:, :, 3) + initPredMatrix(:, :, 4) + initPredMatrix(:, :, 5))/5; 32 | stackFiveMatrixAvg = (stackFiveMatrix(:, :, 1) + stackFiveMatrix(:, :, 2) + stackFiveMatrix(:, :, 3) + stackFiveMatrix(:, :, 4) + stackFiveMatrix(:, :, 5))/5; 33 | end 34 | 35 | 36 | -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Prefold/preFold.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------- 2 | % Mini Project #3 - Gesture Recognition : Time Delay Nerual Network 3 | % Pre-Fold 4 | % ------------------------------------------------------------------- 5 | 6 | % Clear memory data 7 | clc 8 | clear 9 | close all 10 | 11 | % Load in Person 2's x, z Data ( Including all 3 rounds ) 12 | [ P2_xz ] = EncapsulatePerson('GestureData\Person2.xls'); 13 | 14 | % 9 Combinations (depths + number of nodes) 15 | % Do 5 Simulation for each combination & encapsulate to 1 net 16 | % Save Net & plot ROC + Confusion Matrix 17 | depthsNodesDecision(P2_xz); -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Prefold/prefoldConfusionROC.m: -------------------------------------------------------------------------------- 1 | % Plot and save ROC and Confusion Matrix for Pre-fold 2 | function prefoldConfusionROC(stackMovementVal , InitResultMatrix, stackFiveMatrixAvg, netname, count) 3 | 4 | % ROC Curve + Save 5 | 6 | ezroc3(InitResultMatrix); 7 | saveas(figure(count), [pwd '\ROC\Prefold\ROC\', netname , '_ROC.fig']); 8 | 9 | % Plot Confusion Matrix 10 | % Class 1/--1 to 1/0 11 | InitMovemntValModify = (stackMovementVal + 1 )/2; 12 | comboPredModify = (stackFiveMatrixAvg + 1)/2; 13 | 14 | % Plot Confusion Matrix 15 | confusionPre = plotconfusion(InitMovemntValModify, comboPredModify); 16 | 17 | % Save the matrix 18 | saveas(confusionPre, [pwd '\ROC\Prefold\Confusion\', netname , '_Confusion.fig']) 19 | 20 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Independent-6CV/SubjectIndependent_6CV.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------- 2 | % Mini Project #3 - Gesture Recognition : Time Delay Nerual Network 3 | % Subject Independent - 6 Fold CV 4 | % ------------------------------------------------------------------- 5 | 6 | % Clear memory data 7 | clc 8 | clear 9 | close all 10 | 11 | % Load 3 Rounds of x, z of each movement for each Person 12 | [ P1_xz ] = serializePersonXZ('GestureData\Person1.xls'); 13 | [ P2_xz ] = serializePersonXZ('GestureData\Person2.xls'); 14 | [ P3_xz] = serializePersonXZ('GestureData\Person3.xls'); 15 | [ P4_xz ] = serializePersonXZ('GestureData\Person4.xls'); 16 | [ P5_xz ] = serializePersonXZ('GestureData\Person5.xls'); 17 | [ P6_xz ] = serializePersonXZ('GestureData\Person6.xls'); 18 | 19 | % Load Time Delay Nerual Network 20 | load('Depth15NodeNum30_Net.mat'); 21 | 22 | % 6 Fold Cross Validation 23 | [ trained6CVnet, predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest] ... 24 | = sixFoldCV(TDNNnet, P1_xz, P2_xz, P3_xz,P4_xz,P5_xz,P6_xz); 25 | 26 | % Plot ROC for CV1, CV2, CV3 and overall CV + save all graphs 27 | confusionROCSixF(predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest); -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Independent-6CV/confusionROCSixF.m: -------------------------------------------------------------------------------- 1 | % Confusion Matrix for every folds in a single graph -- 6CV 2 | function confusionROCSixF(predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6, stackMovementTest) 3 | 4 | % Resize the matrix to 4x4 for the prediction output by calling function 5 | % 'shrinkConfusion' 6 | predictT1_4by4 = slashConfusion(predictTestT1); 7 | predictT2_4by4 = slashConfusion(predictTestT2); 8 | predictT3_4by4 = slashConfusion(predictTestT3); 9 | predictT4_4by4 = slashConfusion(predictTestT4); 10 | predictT5_4by4 = slashConfusion(predictTestT5); 11 | predictT6_4by4 = slashConfusion(predictTestT6); 12 | overall = slashConfusion(cat(6, predictTestT1, predictTestT2, predictTestT3, predictTestT4, predictTestT5, predictTestT6)); 13 | 14 | % Plot ROC Curves for all three rounds individually and an overall ROC + 15 | % Save to folders! 16 | ezroc3( predictT1_4by4 ); 17 | saveas(figure(1), [pwd '\ROC\6CV\Fold1ROC.fig']); 18 | ezroc3( predictT2_4by4 ); 19 | saveas(figure(2), [pwd '\ROC\6CV\Fold2ROC.fig']); 20 | ezroc3( predictT3_4by4 ); 21 | saveas(figure(3), [pwd '\ROC\6CV\Fold3ROC.fig']); 22 | ezroc3( predictT4_4by4 ); 23 | saveas(figure(4), [pwd '\ROC\6CV\Fold4ROC.fig']); 24 | ezroc3( predictT5_4by4 ); 25 | saveas(figure(5), [pwd '\ROC\6CV\Fold5ROC.fig']); 26 | ezroc3( predictT6_4by4 ); 27 | saveas(figure(6), [pwd '\ROC\6CV\Fold6ROC.fig']); 28 | ezroc3( overall ); 29 | saveas(figure(7), [pwd '\ROC\6CV\OverallROC.fig']); 30 | 31 | % Plot Confusion Matrix 32 | % Class 1/--1 to 1/0 33 | InitMovemntTestModify = (stackMovementTest + 1 )/2; 34 | lastRPredTestModify = (predictTestT6 + 1)/2; 35 | 36 | % Plot Confusion Matrix 37 | confusionG = plotconfusion(InitMovemntTestModify, lastRPredTestModify); 38 | 39 | % Save the matrix 40 | saveas(confusionG, [pwd '\ROC\6CV\Confusion.fig']) 41 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Independent-6CV/serializePersonXZ.m: -------------------------------------------------------------------------------- 1 | % Serialize all three rounds of data into 2 rows : x and z for each person 2 | 3 | function [P_xz] = serializePersonXZ(fileName) 4 | 5 | % Circle 6 | [R1C_x, R1C_z, R2C_x, R2C_z, R3C_x, R3C_z] = combExcelData(fileName, 'Circle'); 7 | circle_xz = [R1C_x, R1C_z; R2C_x, R2C_z; R3C_x, R3C_z]'; 8 | 9 | % Triangle 10 | [R1T_x, R1T_z, R2T_x, R2T_z, R3T_x, R3T_z] = combExcelData(fileName, 'Triangle'); 11 | triangle_xz = [R1T_x, R1T_z; R2T_x, R2T_z; R3T_x, R3T_z]'; 12 | 13 | % Right 14 | [R1R_x, R1R_z, R2R_x, R2R_z, R3R_x, R3R_z] = combExcelData(fileName, 'Right'); 15 | right_xz = [R1R_x, R1R_z; R2R_x, R2R_z; R3R_x, R3R_z]'; 16 | 17 | % Down 18 | [R1D_x, R1D_z, R2D_x, R2D_z, R3D_x, R3D_z] = combExcelData(fileName, 'Down'); 19 | down_xz = [R1D_x, R1D_z; R2D_x, R2D_z; R3D_x, R3D_z]'; 20 | 21 | % Stack up 4 movements for Person's x,z 22 | P_xz = [circle_xz, triangle_xz, right_xz, down_xz]; 23 | 24 | end 25 | 26 | -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Independent-6CV/stackFivePeople.m: -------------------------------------------------------------------------------- 1 | % Stack 2 designated rounds for Training and set aside 1 person for Testing 2 | 3 | function[P1_data, P2_data,P3_data, P4_data, P5_data, P6_data] = stackFivePeople( P1_xz, P2_xz, P3_xz,P4_xz,P5_xz,P6_xz) 4 | 5 | % Person 1 6 | P1_data = [P1_xz(1:2, :), P1_xz(3:4, :), P1_xz(5:6, :)]; 7 | 8 | % Person 2 9 | P2_data = [P2_xz(1:2, :), P2_xz(3:4, :), P2_xz(5:6, :)]; 10 | 11 | % Person 3 12 | P3_data = [P3_xz(1:2, :), P3_xz(3:4, :), P3_xz(5:6, :)]; 13 | 14 | % Person 4 15 | P4_data = [P4_xz(1:2, :), P4_xz(3:4, :), P4_xz(5:6, :)]; 16 | 17 | % Person 5 18 | P5_data = [P5_xz(1:2, :), P5_xz(3:4, :), P5_xz(5:6, :)]; 19 | 20 | % Person 6 21 | P6_data= [P6_xz(1:2, :), P6_xz(3:4, :), P6_xz(5:6, :)]; 22 | 23 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Specific-3CV/Encapsulate.m: -------------------------------------------------------------------------------- 1 | % Encapsulate all data into 2 matrices : x and z for each movement 2 | 3 | function [circle_xz, triangle_xz, right_xz, down_xz] = Encapsulate(fileName) 4 | 5 | % Circle 6 | [R1C_x, R1C_z, R2C_x, R2C_z, R3C_x, R3C_z] = combExcelData(fileName, 'Circle'); 7 | circle_xz = [R1C_x, R1C_z; R2C_x, R2C_z; R3C_x, R3C_z]'; 8 | 9 | % Triangle 10 | [R1T_x, R1T_z, R2T_x, R2T_z, R3T_x, R3T_z] = combExcelData(fileName, 'Triangle'); 11 | triangle_xz = [R1T_x, R1T_z; R2T_x, R2T_z; R3T_x, R3T_z]'; 12 | 13 | % Right 14 | [R1R_x, R1R_z, R2R_x, R2R_z, R3R_x, R3R_z] = combExcelData(fileName, 'Right'); 15 | right_xz = [R1R_x, R1R_z; R2R_x, R2R_z; R3R_x, R3R_z]'; 16 | 17 | % Down 18 | [R1D_x, R1D_z, R2D_x, R2D_z, R3D_x, R3D_z] = combExcelData(fileName, 'Down'); 19 | down_xz = [R1D_x, R1D_z; R2D_x, R2D_z; R3D_x, R3D_z]'; 20 | 21 | 22 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Specific-3CV/SubjectSpecific_3CV.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------- 2 | % Mini Project #3 - Gesture Recognition : Time Delay Nerual Network 3 | % Subject Specific - 3 Fold CV 4 | % ------------------------------------------------------------------- 5 | 6 | % Clear memory data 7 | clc 8 | clear 9 | close all 10 | 11 | % Load 3 Rounds of x, z of each movement for each Person 12 | [ P1_C, P1_T, P1_R, P1_D ] = Encapsulate('GestureData\Person1.xls'); 13 | [ P2_C, P2_T, P2_R, P2_D ] = Encapsulate('GestureData\Person2.xls'); 14 | [ P3_C, P3_T, P3_R, P3_D ] = Encapsulate('GestureData\Person3.xls'); 15 | [ P4_C, P4_T, P4_R, P4_D ] = Encapsulate('GestureData\Person4.xls'); 16 | [ P5_C, P5_T, P5_R, P5_D ] = Encapsulate('GestureData\Person5.xls'); 17 | [ P6_C, P6_T, P6_R, P6_D ] = Encapsulate('GestureData\Person6.xls'); 18 | 19 | % Load Time Delay Nerual Network 20 | load('Depth15NodeNum30_Net.mat'); 21 | 22 | % 3 Fold Cross Validation 23 | [trained3CVnet_P1, predictTestT1_P1, predictTestT2_P1, predictTest_P1, stackMovementTest_P1] ... 24 | = threeFoldCV(TDNNnet, P1_C, P1_T, P1_R, P1_D); 25 | [trained3CVnet_P2, predictTestT1_P2, predictTestT2_P2, predictTest_P2, stackMovementTest_P2] ... 26 | = threeFoldCV(TDNNnet, P2_C, P2_T, P2_R, P2_D); 27 | [trained3CVnet_P3, predictTestT1_P3, predictTestT2_P3, predictTest_P3, stackMovementTest_P3] ... 28 | = threeFoldCV(TDNNnet, P3_C, P3_T, P3_R, P3_D); 29 | [trained3CVnet_P4, predictTestT1_P4, predictTestT2_P4, predictTest_P4, stackMovementTest_P4] ... 30 | = threeFoldCV(TDNNnet, P4_C, P4_T, P4_R, P4_D); 31 | [trained3CVnet_P5, predictTestT1_P5, predictTestT2_P5, predictTest_P5, stackMovementTest_P5] ... 32 | = threeFoldCV(TDNNnet, P5_C, P5_T, P5_R, P5_D); 33 | [trained3CVnet_P6, predictTestT1_P6, predictTestT2_P6, predictTest_P6, stackMovementTest_P6] ... 34 | = threeFoldCV(TDNNnet, P6_C, P6_T, P6_R, P6_D); 35 | 36 | 37 | % Plot ROC for CV1, CV2, CV3 and overall CV + save all graphs 38 | confusionROC('Person1', predictTestT1_P1, predictTestT2_P1, predictTest_P1, stackMovementTest_P1); 39 | confusionROC('Person2', predictTestT1_P2, predictTestT2_P2, predictTest_P2, stackMovementTest_P2); 40 | confusionROC('Person3', predictTestT1_P3, predictTestT2_P3, predictTest_P3, stackMovementTest_P3); 41 | confusionROC('Person4', predictTestT1_P4, predictTestT2_P4, predictTest_P4, stackMovementTest_P4); 42 | confusionROC('Person5', predictTestT1_P5, predictTestT2_P5, predictTest_P5, stackMovementTest_P5); 43 | confusionROC('Person6', predictTestT1_P6, predictTestT2_P6, predictTest_P6, stackMovementTest_P6); 44 | -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Specific-3CV/confusionROC.m: -------------------------------------------------------------------------------- 1 | % Confusion Matrix for each fold in a single graph 2 | function confusionROC(person, predictTestT1, predictTestT2, predictTestT3, stackMovementTest) 3 | 4 | % Resize the matrix to 4x4 for the prediction output by calling function 5 | % 'shrinkConfusion' 6 | predictT1_4by4 = shrinkConfusion(predictTestT1); 7 | predictT2_4by4 = shrinkConfusion(predictTestT2); 8 | predictT3_4by4 = shrinkConfusion(predictTestT3); 9 | overall = shrinkConfusion(cat(3, predictTestT1, predictTestT2, predictTestT3)); 10 | 11 | % Plot ROC Curves for all three rounds individually and an overall ROC + 12 | % Save to folders! 13 | ezroc3( predictT1_4by4); 14 | saveas(figure(1), [pwd '\ROC\3CV\', person , '\', person, '_Fold1ROC.fig']); 15 | ezroc3( predictT2_4by4 ); 16 | saveas(figure(2), [pwd '\ROC\3CV\', person , '\', person, '_Fold2ROC.fig']); 17 | ezroc3( predictT3_4by4 ); 18 | saveas(figure(3), [pwd '\ROC\3CV\', person , '\', person, '_Fold3ROC.fig']); 19 | ezroc3( overall ); 20 | saveas(figure(4), [pwd '\ROC\3CV\', person , '\', person, '_OverallROC.fig']); 21 | 22 | % Plot Confusion Matrix 23 | % Class 1/--1 to 1/0 24 | InitMovemntTestModify = (stackMovementTest + 1 )/2; 25 | lastRPredTestModify = (predictTestT3 + 1)/2; 26 | 27 | % Plot Confusion Matrix 28 | confusionG = plotconfusion(InitMovemntTestModify, lastRPredTestModify, person); 29 | % Save the matrix 30 | saveas(confusionG, [pwd '\ROC\3CV\', person , '\', person, '_Confusion.fig']) 31 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Specific-3CV/roundData.m: -------------------------------------------------------------------------------- 1 | % Stack 2 designated rounds for Training and set aside 1 round for Testing per movement 2 | 3 | function[R1Train, R1Test, R2Train, R2Test, R3Train, R3Test] = roundData(P_movement) 4 | 5 | % Fold #1 -- Training : Round 1 & 2 + Testing : Round 3 6 | R1Train = [P_movement(1, 1:600); P_movement(2, 1:600) ]; 7 | R1Test = [P_movement(1, 601:900) ; P_movement(2, 601:900)]; 8 | 9 | % Fold #2 -- Training : Round 2 & 3 + Testing : Round 1 10 | R2Train = [P_movement(1, 301:900) ; P_movement(2, 301:900)]; 11 | R2Test = [P_movement(1, 1:300) ; P_movement(2, 1:300)]; 12 | 13 | % Fold #3 -- Training : Round 1 & 3 + Testing : Round 2 14 | R3Train = [P_movement(1, 1:300), P_movement(1, 601:900); P_movement(2, 1:300) P_movement(2, 601:900)]; 15 | R3Test = [P_movement(1, 301:600) ; P_movement(2, 301:600)]; 16 | 17 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Specific-3CV/shrinkConfusion.m: -------------------------------------------------------------------------------- 1 | % Downsize to 4x4 for Confusion Matrix 2 | 3 | function[confusionResize] = shrinkConfusion(predictTest) 4 | % Confusion Matrix with Correct Classifications 5 | originalConfusion = [ [1, -1, -1, -1]; [-1, 1, -1, -1]; [-1, -1, 1, -1]; [-1, -1, -1, 1];]; 6 | 7 | % Downsize 300 predicted values to 1 for all 4 movements 8 | circle_downsize = [ mean(predictTest(1, 1:300)), mean(predictTest(1, 301:600)), mean(predictTest(1, 601:900)), mean(predictTest(1, 901:1200)) ]; 9 | triangle_downsize = [ mean(predictTest(2, 1:300)), mean(predictTest(2, 301:600)), mean(predictTest(2, 601:900)), mean(predictTest(2, 901:1200)) ]; 10 | right_downsize = [ mean(predictTest(3, 1:300)), mean(predictTest(3, 301:600)), mean(predictTest(3, 601:900)), mean(predictTest(3, 901:1200)) ]; 11 | down_downsize = [ mean(predictTest(4, 1:300)), mean(predictTest(4, 301:600)), mean(predictTest(4, 601:900)), mean(predictTest(4, 901:1200)) ]; 12 | 13 | % A 4x4 Confusion Matrix with predicted values! 14 | confusionResize = [ circle_downsize; triangle_downsize; right_downsize; down_downsize ]; 15 | 16 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/Subject Specific-3CV/threeFoldCV.m: -------------------------------------------------------------------------------- 1 | % 3 Fold Cross Validation 2 | function[ trained3CVnet, predictTestT1, predictTestT2, predictTestT3, stackMovementTest] = threeFoldCV(net, P_C, P_T, P_R, P_D) 3 | % ------------------------------------------------------------------------------------------------------------------------------- 4 | % Create a single matrix to stack up 4 movements per Person 5 | [R1Train_C, R1Test_C, R2Train_C, R2Test_C, R3Train_C, R3Test_C] = roundData(P_C); 6 | [M1Train_T, M1Test_T, M2Train_T, M2Test_T, M3Train_T, M3Test_T] = roundData(P_T); 7 | [M1Train_R, M1Test_R, M2Train_R, M2Test_R, M3Train_R, M3Test_R] = roundData(P_R); 8 | [M1Train_D, M1Test_D, M2Train_D, M2Test_D, M3Train_D, M3Test_D] = roundData(P_D); 9 | 10 | % Fold #1 Train + Test Data 11 | fold1Train = [ R1Train_C M1Train_T M1Train_R M1Train_D]; 12 | fold1Test = [ R1Test_C M1Test_T M1Test_R M1Test_D]; 13 | 14 | % To sequencial format 15 | f1Train = con2seq(fold1Train); 16 | f1Test = con2seq(fold1Test); 17 | 18 | % Fold #2 Train + Test Data 19 | fold2Train = [ R2Train_C M2Train_T M2Train_R M2Train_D]; 20 | fold2Test = [ R2Test_C M2Test_T M2Test_R M2Test_D]; 21 | 22 | % To sequencial format 23 | f2Train = con2seq(fold2Train); 24 | f2Test = con2seq(fold2Test); 25 | 26 | % Fold #3 Train + Test Data 27 | fold3Train = [ R3Train_C M3Train_T M3Train_R M3Train_D]; 28 | fold3Test = [ R3Test_C M3Test_T M3Test_R M3Test_D]; 29 | 30 | % To sequencial format 31 | f3Train = con2seq(fold3Train); 32 | f3Test = con2seq(fold3Test); 33 | 34 | % Target 35 | [ stackMovementTrain, stackMovementTest ] = movementTarget('3 Fold'); 36 | 37 | % To sequencial format 38 | Mtrain = con2seq(stackMovementTrain); 39 | Mtest = con2seq(stackMovementTest); 40 | % ------------------------------------------------------------------------------------------------------------------------------- 41 | % TDNN prepare nets and train with 3 folds 42 | % >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fold #1 43 | % Train 1 44 | [X1s, X1i, A1i, T1s] = preparets(net, f1Train, Mtrain); 45 | f1net = train(net, X1s, T1s, X1i, A1i); 46 | 47 | % Test 1 48 | predictTestT1_pack = f1net(f1Test); 49 | predictTestT1 = cell2mat(predictTestT1_pack); 50 | 51 | % >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fold #2 52 | % Train 2 53 | [X2s, X2i, A2i, T2s] = preparets(f1net, f2Train, Mtrain); 54 | f2net = train(f1net, X2s, T2s, X2i, A2i); 55 | 56 | % Test 2 57 | predictTestT2_pack= f2net(f2Test); 58 | predictTestT2 = cell2mat(predictTestT2_pack); 59 | 60 | % >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fold #3 61 | % Train 3 62 | [X3s, X3i, A3i, T3s] = preparets(f2net, f3Train, Mtrain); 63 | trained3CVnet= train(f2net, X3s, T3s, X3i, A3i); 64 | 65 | 66 | % Test 3 67 | predictTestT3_pack = trained3CVnet(f3Test); 68 | predictTestT3 = cell2mat(predictTestT3_pack); 69 | 70 | 71 | end -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/combExcelData.m: -------------------------------------------------------------------------------- 1 | % Parse x, z for each round for each movements 2 | 3 | function [R1_x, R1_z, R2_x, R2_z, R3_x, R3_z] = combExcelData(fileName, sheetName) 4 | 5 | % Load in all data from Person Excel File by Different Sheet Name 6 | personSheetsData = xlsread(fileName, sheetName); 7 | 8 | % Slice Round 1 x and z Columns 9 | R1_x = personSheetsData(:,1); 10 | R1_z = personSheetsData(:,3); 11 | 12 | % Slice Round 2 x and z Columns 13 | R2_x = personSheetsData(:,5); 14 | R2_z = personSheetsData(:,7); 15 | 16 | % Slice Round 3 x and z Columns 17 | R3_x = personSheetsData(:,9); 18 | R3_z = personSheetsData(:,11); 19 | 20 | % Concatenate all 3 Rounds of x and z into one column for each 21 | %AllRound_x = cat(1, Round1_x, Round2_x, Round3_x); 22 | %AllRound_z = cat(1, Round1_z, Round2_z, Round3_z); 23 | 24 | end 25 | 26 | -------------------------------------------------------------------------------- /Gesture Recognition-TDNN/movementTarget.m: -------------------------------------------------------------------------------- 1 | % One Hot Target for all 4 Movements 2 | function[ stackMovementTrain, stackMovementTest ] = movementTarget(foldType) 3 | 4 | % Vectors for 1s ( is Class ) and -1s ( Not Class ) 5 | isClass = ones(1,300); 6 | notClass = -ones(1,300); 7 | 8 | % Target for 3 CV 9 | if strcmp(foldType, '3 Fold') == 1 10 | % Circle 11 | CTarget_Train = [isClass, notClass, notClass, notClass, isClass, notClass, notClass, notClass]; 12 | CTarget_Test = [isClass, notClass, notClass, notClass]; 13 | 14 | % Triangle 15 | TTarget_Train = [notClass, isClass, notClass, notClass, notClass, isClass, notClass, notClass]; 16 | TTarget_Test = [notClass, isClass, notClass, notClass]; 17 | 18 | % Right 19 | RTarget_Train = [notClass, notClass, isClass, notClass, notClass, notClass, isClass, notClass]; 20 | RTarget_Test = [notClass, notClass, isClass, notClass]; 21 | 22 | % Down 23 | DTarget_Train = [notClass, notClass, notClass, isClass, notClass, notClass, notClass, isClass]; 24 | DTarget_Test = [notClass, notClass, notClass, isClass]; 25 | 26 | stackMovementTrain = [CTarget_Train; TTarget_Train; RTarget_Train; DTarget_Train]; 27 | stackMovementTest = [CTarget_Test; TTarget_Test; RTarget_Test; DTarget_Test]; 28 | 29 | % Target for 6 CV 30 | elseif strcmp(foldType, '6 Fold') == 1 31 | isClass6CV = ones(1,900); 32 | notClass6CV = -ones(1,900); 33 | % Circle 34 | C = [isClass6CV, notClass6CV, notClass6CV, notClass6CV]; 35 | CTarget_Train = repmat(C, 1, 5); 36 | CTarget_Test = C; 37 | 38 | % Triangle 39 | T = [notClass6CV, isClass6CV, notClass6CV, notClass6CV]; 40 | TTarget_Train = repmat(T, 1, 5); 41 | TTarget_Test = T; 42 | 43 | % Right 44 | R = [notClass6CV, notClass6CV, isClass6CV, notClass6CV]; 45 | RTarget_Train =repmat(R, 1, 5); 46 | RTarget_Test = R; 47 | 48 | % Down 49 | D = [notClass6CV, notClass6CV, notClass6CV, isClass6CV]; 50 | DTarget_Train =repmat(D, 1, 5); 51 | DTarget_Test = D; 52 | 53 | % Stack all 4 movements of targets for Training and Testing 54 | stackMovementTrain = [CTarget_Train; TTarget_Train; RTarget_Train; DTarget_Train]; 55 | stackMovementTest = [CTarget_Test; TTarget_Test; RTarget_Test; DTarget_Test]; 56 | 57 | % Target for Prefold 58 | elseif strcmp(foldType, 'Prefold') == 1 59 | preTarget_C = [isClass, notClass, notClass, notClass]; 60 | preTarget_T = [notClass, isClass, notClass, notClass]; 61 | preTarget_R = [notClass, notClass, isClass, notClass]; 62 | preTarget_D = [notClass, notClass, notClass, isClass]; 63 | 64 | stackMovementTrain = [preTarget_C; preTarget_T; preTarget_R; preTarget_D]; 65 | stackMovementTest = [preTarget_C; preTarget_T; preTarget_R; preTarget_D]; 66 | 67 | end 68 | end -------------------------------------------------------------------------------- /Portfolio Selection Final Project/5 and 4 and 3 Star LV LB LG.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datarocksAmy/MATLAB/81bee7abfded6286e3e0aa58e9e45db4352dd9f5/Portfolio Selection Final Project/5 and 4 and 3 Star LV LB LG.xlsx -------------------------------------------------------------------------------- /Portfolio Selection Final Project/5&4&3star.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datarocksAmy/MATLAB/81bee7abfded6286e3e0aa58e9e45db4352dd9f5/Portfolio Selection Final Project/5&4&3star.xlsx -------------------------------------------------------------------------------- /Portfolio Selection Final Project/5star.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datarocksAmy/MATLAB/81bee7abfded6286e3e0aa58e9e45db4352dd9f5/Portfolio Selection Final Project/5star.xlsx -------------------------------------------------------------------------------- /Portfolio Selection Final Project/inforatio.m: -------------------------------------------------------------------------------- 1 | %Function Call-Calculate 3 Year and 5 Year Information Ratio 2 | 3 | function [inforatio3yr inforatio5yr indexret3yr indexret5yr] = inforatio(return3yr, return5yr) 4 | 5 | indexret3yr = 17.15; 6 | indexret5yr = 14.14; 7 | trackingerror3yr = std(return3yr-(indexret3yr/100)); 8 | trackingerror5yr = std(return5yr-(indexret5yr/100)); 9 | 10 | inforatio3yr = mean((return3yr-(indexret3yr/100))/trackingerror3yr); 11 | inforatio5yr = mean((return5yr-(indexret5yr/100))/trackingerror5yr); -------------------------------------------------------------------------------- /Portfolio Selection Final Project/sharperatio.m: -------------------------------------------------------------------------------- 1 | %Function Call-Calculate 3 Year and 5 Year Sharpe Ratio 2 | 3 | function [sharperatio3yr sharperatio5yr] = sharperatio(mean3yr, mean5yr, std3yr, std5yr) 4 | 5 | rf = 0.031; 6 | sharperatio3yr = (mean3yr-rf)/std3yr; 7 | sharperatio5yr = (mean5yr-rf)/std5yr; -------------------------------------------------------------------------------- /Radial Basis Function (Exact & Regular)/MiniProject2.m: -------------------------------------------------------------------------------- 1 | % --------------------------------------------- 2 | % Mini Project #2 - RBF Neural Nets 3 | % ( Find the optimized goal and spread value. ) 4 | % --------------------------------------------- 5 | 6 | % Clear memory data 7 | clear all; 8 | clc; 9 | 10 | % Load formatted input and target data 11 | load('P.mat'); 12 | load('T.mat'); 13 | 14 | % Splipt Input Data & Store indexes ( 60% Train, 20% Validation, 20% Test ) 15 | [trainP, valP, testP, trainIdx, valIdx, testIdx] = dividerand(P, 0.6, 0.2, 0.2); 16 | 17 | % Split Target Data ( 60% Train, 20% Validation, 20% Test ) 18 | [trainT, valT, testT] = divideind(T, trainIdx, valIdx, testIdx); 19 | 20 | % Build two RBF Neural Nets ( Exact + Regular Base ) 21 | RBF_exact = newrbe(trainP,trainT); 22 | 23 | % ----- Simulate Validation MSE for different goals and spreads ----- 24 | 25 | % Initialize goal and a matrix for storing simulation results 26 | goal = 0.0; 27 | simulation_results = []; 28 | 29 | % Goal loop until it reaches 1 30 | while( goal <= 1 ) 31 | 32 | % Initialize spread as 1 33 | reg_spread = 1; 34 | 35 | % Spread loop until it reaches 35 36 | while(reg_spread <= 35) 37 | 38 | % Train the Reguar RBF net 39 | RBF_regular = newrb(trainP, trainT, goal, reg_spread, 200, 50); 40 | 41 | % Network Output using validation 42 | RBFRegular_output = sim(RBF_regular, valP); 43 | 44 | % MSE Error for validation 45 | Validation_MSE = mse(RBFRegular_output - valT); 46 | 47 | % Store the simulation result when Validation MSE is between 0.5 48 | % and 0.51 49 | if(Validation_MSE >= 0.5 && Validation_MSE <= 0.51 ) 50 | 51 | % Store all simulation valules : goal, spread, validation MSE 52 | results = [goal, reg_spread, Validation_MSE]; 53 | simulation_results = vertcat(simulation_results, results); 54 | 55 | % Find Validation = 0.5, go to the next goal iteration 56 | if(Validation_MSE == 0.5) 57 | break 58 | elseif(Validation_MSE < 0.5) 59 | break 60 | end 61 | 62 | % Increment spread by 1 63 | reg_spread = reg_spread + 1; 64 | 65 | end 66 | end 67 | % Incrementn goal by 0.1 68 | goal = goal + 0.1; 69 | end 70 | 71 | disp(simulation_results); 72 | -------------------------------------------------------------------------------- /Radial Basis Function (Exact & Regular)/P.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datarocksAmy/MATLAB/81bee7abfded6286e3e0aa58e9e45db4352dd9f5/Radial Basis Function (Exact & Regular)/P.mat -------------------------------------------------------------------------------- /Radial Basis Function (Exact & Regular)/T.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datarocksAmy/MATLAB/81bee7abfded6286e3e0aa58e9e45db4352dd9f5/Radial Basis Function (Exact & Regular)/T.mat -------------------------------------------------------------------------------- /Radial Basis Function (Exact & Regular)/parseData.m: -------------------------------------------------------------------------------- 1 | % Function for labeling Malignant and Benign as 1 if classify correctly 2 | 3 | function [modify_output, modify_T] = parseData(original_output, original_T) 4 | 5 | % Get total row number 6 | %colNum = size(Reg_test_output, 1); 7 | rowNum = size(original_output, 2); 8 | 9 | % Initialize both modify vector with 2 rows of 0 values 10 | modify_output = zeros(2, rowNum); 11 | modify_T = zeros(2, rowNum); 12 | 13 | % Initialize row from 1 14 | row = 1; 15 | 16 | % Start parsing the data in each row 17 | while row <= rowNum 18 | % Split into two row mark as 1(yes) and 0(no) for Malignant & Benign 19 | if (original_output(1, row) == mode(original_output)) 20 | modify_output(2, row) = 1; % Malignant (1) 21 | else 22 | modify_output(1, row) = 1; % Benign (-1) 23 | end 24 | 25 | % Transform original input into two rows 26 | if(original_T(1,row) == -1) 27 | modify_T(1, row) = 1; % Benign 28 | else 29 | modify_T(2, row) = 1; % Malignant 30 | end 31 | 32 | % Increment row by 1 33 | row = row + 1; 34 | end 35 | end --------------------------------------------------------------------------------